From 7d81d2209b2f136e3c200d8efc106897c26c0021 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 13 May 2024 20:46:53 -0400 Subject: [PATCH 001/148] feat: access current burn chain state in epoch 3 In epoch 2, a Stacks block can only access the burn block associated with its parent, since the block is buily before its burn block is known. In epoch 3, all Nakamoto blocks can access the current burn block. --- clarity/src/vm/database/clarity_db.rs | 131 +++++--- clarity/src/vm/test_util/mod.rs | 8 + stackslib/src/clarity_vm/database/mod.rs | 19 ++ .../src/tests/nakamoto_integrations.rs | 280 +++++++++++++++++- 4 files changed, 376 insertions(+), 62 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 7a1aa3e3bc..5394842a1c 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -102,6 +102,11 @@ pub trait HeadersDB { } pub trait BurnStateDB { + /// Get the burn chain height at the current tip. + fn get_tip_burn_block_height(&self) -> Option; + /// Get the sortition id for the current tip. + fn get_tip_sortition_id(&self) -> Option; + fn get_v1_unlock_height(&self) -> u32; fn get_v2_unlock_height(&self) -> u32; fn get_v3_unlock_height(&self) -> u32; @@ -187,6 +192,14 @@ impl HeadersDB for &dyn HeadersDB { } impl BurnStateDB for &dyn BurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + (*self).get_tip_burn_block_height() + } + + fn get_tip_sortition_id(&self) -> Option { + (*self).get_tip_sortition_id() + } + fn get_v1_unlock_height(&self) -> u32 { (*self).get_v1_unlock_height() } @@ -339,6 +352,14 @@ impl HeadersDB for NullHeadersDB { #[allow(clippy::panic)] impl BurnStateDB for NullBurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + None + } + + fn get_tip_sortition_id(&self) -> Option { + None + } + fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { None } @@ -964,26 +985,33 @@ impl<'a> ClarityDatabase<'a> { /// `get_current_block_height`). pub fn get_current_burnchain_block_height(&mut self) -> Result { let cur_stacks_height = self.store.get_current_block_height(); - let last_mined_bhh = if cur_stacks_height == 0 { - return Ok(self.burn_state_db.get_burn_start_height()); - } else { - self.get_index_block_header_hash(cur_stacks_height.checked_sub(1).ok_or_else( - || { - InterpreterError::Expect( - "BUG: cannot eval burn-block-height in boot code".into(), - ) - }, - )?)? - }; - self.get_burnchain_block_height(&last_mined_bhh) - .ok_or_else(|| { - InterpreterError::Expect(format!( - "Block header hash '{}' must return for provided stacks block height {}", - &last_mined_bhh, cur_stacks_height - )) - .into() - }) + // In epoch 2, we can only access the burn block associated with the last block + if self.get_clarity_epoch_version()? < StacksEpochId::Epoch30 { + let last_mined_bhh = if cur_stacks_height == 0 { + return Ok(self.burn_state_db.get_burn_start_height()); + } else { + // Safety note: normal subtraction is safe here, because we've already checked + // that cur_stacks_height > 0. + self.get_index_block_header_hash(cur_stacks_height - 1)? + }; + + self.get_burnchain_block_height(&last_mined_bhh) + .ok_or_else(|| { + InterpreterError::Expect(format!( + "Block header hash '{}' must return for provided stacks block height {}", + &last_mined_bhh, cur_stacks_height + )) + .into() + }) + } else { + // In epoch 3+, we can access the current burnchain block + self.burn_state_db + .get_tip_burn_block_height() + .ok_or_else(|| { + InterpreterError::Expect("Failed to get burnchain tip height.".into()).into() + }) + } } pub fn get_block_header_hash(&mut self, block_height: u32) -> Result { @@ -1010,46 +1038,53 @@ impl<'a> ClarityDatabase<'a> { .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } + /// In Epoch 2.x: /// 1. Get the current Stacks tip height (which is in the process of being evaluated) /// 2. Get the parent block's StacksBlockId, which is SHA512-256(consensus_hash, block_hash). /// This is the highest Stacks block in this fork whose consensus hash is known. /// 3. Resolve the parent StacksBlockId to its consensus hash /// 4. Resolve the consensus hash to the associated SortitionId + /// In Epoch 3+: + /// 1. Get the SortitionId of the current Stacks tip fn get_sortition_id_for_stacks_tip(&mut self) -> Result> { - let current_stacks_height = self.get_current_block_height(); + if self.get_clarity_epoch_version()? < StacksEpochId::Epoch30 { + let current_stacks_height = self.get_current_block_height(); - if current_stacks_height < 1 { - // we are in the Stacks genesis block - return Ok(None); - } + if current_stacks_height < 1 { + // we are in the Stacks genesis block + return Ok(None); + } - // this is the StacksBlockId of the last block evaluated in this fork - let parent_id_bhh = self.get_index_block_header_hash(current_stacks_height - 1)?; + // this is the StacksBlockId of the last block evaluated in this fork + let parent_id_bhh = self.get_index_block_header_hash(current_stacks_height - 1)?; - // infallible, since we always store the consensus hash with the StacksBlockId in the - // headers DB - let consensus_hash = self - .headers_db - .get_consensus_hash_for_block(&parent_id_bhh) - .ok_or_else(|| { - InterpreterError::Expect(format!( - "FATAL: no consensus hash found for StacksBlockId {}", - &parent_id_bhh - )) - })?; + // infallible, since we always store the consensus hash with the StacksBlockId in the + // headers DB + let consensus_hash = self + .headers_db + .get_consensus_hash_for_block(&parent_id_bhh) + .ok_or_else(|| { + InterpreterError::Expect(format!( + "FATAL: no consensus hash found for StacksBlockId {}", + &parent_id_bhh + )) + })?; - // infallible, since every sortition has a consensus hash - let sortition_id = self - .burn_state_db - .get_sortition_id_from_consensus_hash(&consensus_hash) - .ok_or_else(|| { - InterpreterError::Expect(format!( - "FATAL: no SortitionID found for consensus hash {}", - &consensus_hash - )) - })?; + // infallible, since every sortition has a consensus hash + let sortition_id = self + .burn_state_db + .get_sortition_id_from_consensus_hash(&consensus_hash) + .ok_or_else(|| { + InterpreterError::Expect(format!( + "FATAL: no SortitionID found for consensus hash {}", + &consensus_hash + )) + })?; - Ok(Some(sortition_id)) + Ok(Some(sortition_id)) + } else { + Ok(self.burn_state_db.get_tip_sortition_id()) + } } /// Fetch the burnchain block header hash for a given burnchain height. diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index b7e58919aa..e566f5013d 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -199,6 +199,14 @@ impl HeadersDB for UnitTestHeaderDB { } impl BurnStateDB for UnitTestBurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + None + } + + fn get_tip_sortition_id(&self) -> Option { + None + } + fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { None } diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index c9c21957f3..410c59ba81 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -19,6 +19,7 @@ use crate::chainstate::burn::db::sortdb::{ get_ancestor_sort_id, get_ancestor_sort_id_tx, SortitionDB, SortitionDBConn, SortitionHandle, SortitionHandleConn, SortitionHandleTx, }; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::boot::PoxStartCycleInfo; use crate::chainstate::stacks::db::accounts::MinerReward; use crate::chainstate::stacks::db::{ @@ -448,6 +449,14 @@ impl SortitionDBRef for SortitionDBConn<'_> { } impl BurnStateDB for SortitionHandleTx<'_> { + fn get_tip_burn_block_height(&self) -> Option { + self.get_burn_block_height(&self.context.chain_tip) + } + + fn get_tip_sortition_id(&self) -> Option { + Some(self.context.chain_tip.clone()) + } + fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { match SortitionDB::get_block_snapshot(self.tx(), sortition_id) { Ok(Some(x)) => Some(x.block_height as u32), @@ -570,6 +579,16 @@ impl BurnStateDB for SortitionHandleTx<'_> { } impl BurnStateDB for SortitionDBConn<'_> { + fn get_tip_burn_block_height(&self) -> Option { + let tip = SortitionDB::get_canonical_burn_chain_tip(self.conn()).ok()?; + tip.block_height.try_into().ok() + } + + fn get_tip_sortition_id(&self) -> Option { + let tip = SortitionDB::get_canonical_burn_chain_tip(self.conn()).ok()?; + Some(tip.sortition_id) + } + fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { match SortitionDB::get_block_snapshot(self.conn(), sortition_id) { Ok(Some(x)) => Some(x.block_height as u32), diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 55eb6753bf..d417d00476 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -24,7 +24,7 @@ use std::{env, thread}; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; -use clarity::vm::ClarityVersion; +use clarity::vm::{ClarityVersion, Value}; use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v1::messages::SignerMessage; @@ -44,7 +44,9 @@ use stacks::chainstate::stacks::boot::{ MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; use stacks::chainstate::stacks::db::StacksChainState; -use stacks::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; +use stacks::chainstate::stacks::miner::{ + BlockBuilder, BlockLimitFunction, TransactionEvent, TransactionResult, TransactionSuccessEvent, +}; use stacks::chainstate::stacks::{StacksTransaction, ThresholdSignature, TransactionPayload}; use stacks::core::{ StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, @@ -3501,13 +3503,6 @@ fn check_block_heights() { let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); - test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - }); - let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller .start_bitcoind() @@ -3604,11 +3599,9 @@ fn check_block_heights() { }) .unwrap(); - let info = get_chain_info_result(&naka_conf).unwrap(); - println!("Chain info: {:?}", info); - let mut last_burn_block_height = info.burn_block_height as u128; - let mut last_stacks_block_height = info.stacks_tip_height as u128; - let mut last_tenure_height = last_stacks_block_height as u128; + let mut last_burn_block_height = 0; + let mut last_stacks_block_height = 0; + let mut last_tenure_height = 0; let heights0_value = call_read_only( &naka_conf, @@ -3895,3 +3888,262 @@ fn check_block_heights() { run_loop_thread.join().unwrap(); } + +#[test] +#[ignore] +/// This test is testing the burn state of the Stacks blocks. In Stacks 2.x, +/// the burn block state accessed in a Clarity contract is the burn block of +/// the block's parent, since the block is built before its burn block is +/// mined. In Nakamoto, there is no longer this race condition, so Clarity +/// contracts access the state of the current burn block. +/// We should verify: +/// - `burn-block-height` in epoch 3.x is the burn block of the Stacks block +/// - `get-burn-block-info` is able to access info of the current burn block +/// in epoch 3.x +fn clarity_burn_state() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let tenure_count = 5; + let inter_blocks_per_tenure = 9; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let tx_fee = 1000; + let deploy_fee = 3000; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + deploy_fee + tx_fee * tenure_count + tx_fee * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::MinedBlocks], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + Some(&signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + // second block should confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + let mut sender_nonce = 0; + + // This version uses the Clarity 1 / 2 keywords + let contract_name = "test-contract"; + let contract = r#" + (define-read-only (foo (expected-height uint)) + (begin + (asserts! (is-eq expected-height burn-block-height) (err burn-block-height)) + (asserts! (is-some (get-burn-block-info? header-hash burn-block-height)) (err u0)) + (ok true) + ) + ) + (define-public (bar (expected-height uint)) + (foo expected-height) + ) + "#; + + let contract_tx = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract_name, + contract, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx); + + let mut burn_block_height = 0; + + // Mine `tenure_count` nakamoto tenures + for tenure_ix in 0..tenure_count { + info!("Mining tenure {}", tenure_ix); + + // Don't submit this tx on the first iteration, because the contract is not published yet. + if tenure_ix > 0 { + // Call the read-only function and see if we see the correct burn block height + let expected_height = Value::UInt(burn_block_height); + let arg = expected_height.serialize_to_hex().unwrap(); + let result = call_read_only(&naka_conf, &sender_addr, contract_name, "foo", vec![&arg]); + result.expect_result_ok().expect("Read-only call failed"); + + // Submit a tx for the next block (the next block will be a new tenure, so the burn block height will increment) + let call_tx = tests::make_contract_call( + &sender_sk, + sender_nonce, + tx_fee, + &sender_addr, + contract_name, + "bar", + &[Value::UInt(burn_block_height + 1)], + ); + sender_nonce += 1; + submit_tx(&http_origin, &call_tx); + } + + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let info = get_chain_info(&naka_conf); + burn_block_height = info.burn_block_height as u128; + info!("Expecting burn block height to be {}", burn_block_height); + + // Assert that the contract call was successful + test_observer::get_mined_nakamoto_blocks() + .last() + .unwrap() + .tx_events + .iter() + .for_each(|event| match event { + TransactionEvent::Success(TransactionSuccessEvent { result, fee, .. }) => { + // Ignore coinbase and tenure transactions + if *fee == 0 { + return; + } + + info!("Contract call result: {}", result); + result.clone().expect_result_ok().expect("Ok result"); + } + _ => { + info!("Unsuccessful event: {:?}", event); + panic!("Expected a successful transaction"); + } + }); + + // mine the interim blocks + for interim_block_ix in 0..inter_blocks_per_tenure { + info!("Mining interim block {interim_block_ix}"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + // Call the read-only function and see if we see the correct burn block height + let expected_height = Value::UInt(burn_block_height); + let arg = expected_height.serialize_to_hex().unwrap(); + let result = call_read_only(&naka_conf, &sender_addr, contract_name, "foo", vec![&arg]); + info!("Read-only result: {:?}", result); + result.expect_result_ok().expect("Read-only call failed"); + + // Submit a tx to trigger the next block + let call_tx = tests::make_contract_call( + &sender_sk, + sender_nonce, + tx_fee, + &sender_addr, + contract_name, + "bar", + &[expected_height], + ); + sender_nonce += 1; + submit_tx(&http_origin, &call_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + // Assert that the contract call was successful + test_observer::get_mined_nakamoto_blocks() + .last() + .unwrap() + .tx_events + .iter() + .for_each(|event| match event { + TransactionEvent::Success(TransactionSuccessEvent { result, .. }) => { + info!("Contract call result: {}", result); + result.clone().expect_result_ok().expect("Ok result"); + } + _ => { + info!("Unsuccessful event: {:?}", event); + panic!("Expected a successful transaction"); + } + }); + } + + let start_time = Instant::now(); + while commits_submitted.load(Ordering::SeqCst) <= commits_before { + if start_time.elapsed() >= Duration::from_secs(20) { + panic!("Timed out waiting for block-commit"); + } + thread::sleep(Duration::from_millis(100)); + } + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} From cbff7f0904099ca7ed2d0a0eed53b63aa46dad68 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 13 May 2024 21:02:44 -0400 Subject: [PATCH 002/148] test: update `check_block_heights` for new behavior --- .../src/tests/nakamoto_integrations.rs | 35 ++++++------------- 1 file changed, 10 insertions(+), 25 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index d417d00476..7804781c11 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3599,9 +3599,11 @@ fn check_block_heights() { }) .unwrap(); - let mut last_burn_block_height = 0; - let mut last_stacks_block_height = 0; - let mut last_tenure_height = 0; + let info = get_chain_info_result(&naka_conf).unwrap(); + println!("Chain info: {:?}", info); + let mut last_burn_block_height = info.burn_block_height as u128; + let mut last_stacks_block_height = info.stacks_tip_height as u128; + let mut last_tenure_height = last_stacks_block_height as u128; let heights0_value = call_read_only( &naka_conf, @@ -3702,15 +3704,7 @@ fn check_block_heights() { .expect_u128() .unwrap(); assert_eq!(bbh1, bbh3, "Burn block heights should match"); - if tenure_ix == 0 { - // Add two for the 2 blocks with no tenure during Nakamoto bootup - last_burn_block_height = bbh1 + 2; - } else { - assert_eq!( - bbh1, last_burn_block_height, - "Burn block height should not have changed yet" - ); - } + last_burn_block_height = bbh1; let bh1 = heights1 .get("block-height") @@ -3805,19 +3799,10 @@ fn check_block_heights() { .expect_u128() .unwrap(); assert_eq!(bbh1, bbh3, "Burn block heights should match"); - if interim_block_ix == 0 { - assert_eq!( - bbh1, - last_burn_block_height + 1, - "Burn block heights should have incremented" - ); - last_burn_block_height = bbh1; - } else { - assert_eq!( - bbh1, last_burn_block_height, - "Burn block heights should not have incremented" - ); - } + assert_eq!( + bbh1, last_burn_block_height, + "Burn block heights should not have incremented" + ); let bh1 = heights1 .get("block-height") From ecf176309ccd3743217fb39199fa0054e924b603 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 13 May 2024 21:06:03 -0400 Subject: [PATCH 003/148] docs: update readme --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d03444994..0068086840 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,10 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Downgraded log messages about transactions from warning to info (#4697) +### Fixed + +- Allow Nakamoto blocks to access the burn block associated with the current tenure (#4333) + ## [2.5.0.0.3] This release fixes a regression in `2.5.0.0.0` from `2.4.0.1.0` caused by git merge From b5e90694c98548bda5f7163848b501250686940d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 13 May 2024 21:06:56 -0400 Subject: [PATCH 004/148] tests: add new integration test to bitcoin-tests.yml --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 2680a3194f..81c5afb752 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -92,6 +92,7 @@ jobs: - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::signer::stackerdb_delayed_dkg - tests::nakamoto_integrations::check_block_heights + - tests::nakamoto_integrations::clarity_burn_state # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower From f3bed349f5c81339f24201f14bc8ff43d4f24f7d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 14 May 2024 07:27:42 -0400 Subject: [PATCH 005/148] fix: impl missing methods in docs --- clarity/src/vm/docs/mod.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 940b2f2f6a..e009b5c117 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2766,6 +2766,14 @@ mod test { const DOC_POX_STATE_DB: DocBurnStateDB = DocBurnStateDB {}; impl BurnStateDB for DocBurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + Some(0x9abc) + } + + fn get_tip_sortition_id(&self) -> Option { + Some(SortitionId([0u8; 32])) + } + fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { Some(5678) } From 18bac17865fb47b68dcafdbaeecb174feddad566 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 14 May 2024 07:49:00 -0400 Subject: [PATCH 006/148] test: fill in methods in test structs --- .../src/chainstate/stacks/boot/contract_tests.rs | 8 ++++++++ .../src/chainstate/stacks/db/transactions.rs | 16 ++++++++++++++++ stackslib/src/clarity_vm/clarity.rs | 8 ++++++++ 3 files changed, 32 insertions(+) diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 5d8588836e..b00eec7244 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -347,6 +347,14 @@ fn cost_2_contract_is_arithmetic_only() { } impl BurnStateDB for TestSimBurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + panic!("Not implemented in TestSim"); + } + + fn get_tip_sortition_id(&self) -> Option { + panic!("Not implemented in TestSim"); + } + fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { panic!("Not implemented in TestSim"); } diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index e0cd93d9dc..0ba335afca 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -8620,6 +8620,14 @@ pub mod test { struct MockedBurnDB {} impl BurnStateDB for MockedBurnDB { + fn get_tip_burn_block_height(&self) -> Option { + Some(0) + } + + fn get_tip_sortition_id(&self) -> Option { + Some(SortitionId([0u8; 32])) + } + fn get_v1_unlock_height(&self) -> u32 { 2 } @@ -8842,6 +8850,14 @@ pub mod test { struct MockedBurnDB {} impl BurnStateDB for MockedBurnDB { + fn get_tip_burn_block_height(&self) -> Option { + Some(0) + } + + fn get_tip_sortition_id(&self) -> Option { + Some(SortitionId([0u8; 32])) + } + fn get_v1_unlock_height(&self) -> u32 { 2 } diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index ac764e0e91..be8a1c12c0 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -2691,6 +2691,14 @@ mod tests { pub struct BlockLimitBurnStateDB {} impl BurnStateDB for BlockLimitBurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + None + } + + fn get_tip_sortition_id(&self) -> Option { + None + } + fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { None } From 228179b60e30f5b8049f5dfe41c21cd3f75e1f8d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 14 May 2024 11:42:48 -0400 Subject: [PATCH 007/148] test: update `test_block_heights` --- .../chainstate/stacks/boot/contract_tests.rs | 32 ++++--- stackslib/src/clarity_vm/tests/contracts.rs | 83 +++++++++++++++---- 2 files changed, 86 insertions(+), 29 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index b00eec7244..a308e5b339 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -142,6 +142,10 @@ impl ClarityTestSim { } } + pub fn burn_block_height(&self) -> u64 { + self.tenure_height + 100 + } + pub fn execute_next_block_as_conn_with_tenure(&mut self, new_tenure: bool, f: F) -> R where F: FnOnce(&mut ClarityBlockConnection) -> R, @@ -152,8 +156,13 @@ impl ClarityTestSim { &StacksBlockId(test_sim_height_to_hash(self.block_height + 1, self.fork)), ); + self.block_height += 1; + if new_tenure { + self.tenure_height += 1; + } + let headers_db = TestSimHeadersDB { - height: self.block_height + 1, + height: self.block_height, }; let burn_db = TestSimBurnStateDB { epoch_bounds: self.epoch_bounds.clone(), @@ -166,7 +175,7 @@ impl ClarityTestSim { let mut db = store.as_clarity_db(&headers_db, &burn_db); if cur_epoch >= StacksEpochId::Epoch30 { db.begin(); - db.set_tenure_height(self.tenure_height as u32 + if new_tenure { 1 } else { 0 }) + db.set_tenure_height(self.tenure_height as u32) .expect("FAIL: unable to set tenure height in Clarity database"); db.commit() .expect("FAIL: unable to commit tenure height in Clarity database"); @@ -180,10 +189,6 @@ impl ClarityTestSim { r }; - self.block_height += 1; - if new_tenure { - self.tenure_height += 1; - } r } @@ -203,9 +208,14 @@ impl ClarityTestSim { &StacksBlockId(test_sim_height_to_hash(self.block_height + 1, self.fork)), ); + self.block_height += 1; + if new_tenure { + self.tenure_height += 1; + } + let r = { let headers_db = TestSimHeadersDB { - height: self.block_height + 1, + height: self.block_height, }; let burn_db = TestSimBurnStateDB { epoch_bounds: self.epoch_bounds.clone(), @@ -219,7 +229,7 @@ impl ClarityTestSim { let mut db = store.as_clarity_db(&headers_db, &burn_db); if cur_epoch >= StacksEpochId::Epoch30 { db.begin(); - db.set_tenure_height(self.tenure_height as u32 + if new_tenure { 1 } else { 0 }) + db.set_tenure_height(self.tenure_height as u32) .expect("FAIL: unable to set tenure height in Clarity database"); db.commit() .expect("FAIL: unable to commit tenure height in Clarity database"); @@ -229,10 +239,6 @@ impl ClarityTestSim { }; store.test_commit(); - self.block_height += 1; - if new_tenure { - self.tenure_height += 1; - } r } @@ -348,7 +354,7 @@ fn cost_2_contract_is_arithmetic_only() { impl BurnStateDB for TestSimBurnStateDB { fn get_tip_burn_block_height(&self) -> Option { - panic!("Not implemented in TestSim"); + Some(self.height as u32) } fn get_tip_sortition_id(&self) -> Option { diff --git a/stackslib/src/clarity_vm/tests/contracts.rs b/stackslib/src/clarity_vm/tests/contracts.rs index 0cdc1ad8bf..017662d93c 100644 --- a/stackslib/src/clarity_vm/tests/contracts.rs +++ b/stackslib/src/clarity_vm/tests/contracts.rs @@ -898,6 +898,8 @@ fn test_block_heights() { } let block_height = sim.block_height as u128; + let burn_block_height = sim.burn_block_height() as u128; + let tenure_height = sim.tenure_height as u128; sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); assert_eq!(epoch, StacksEpochId::Epoch30); @@ -1010,17 +1012,17 @@ fn test_block_heights() { let mut tx = conn.start_transaction_processing(); assert_eq!( Value::Tuple(TupleData::from_data(vec![ - ("burn-block-height".into(), Value::UInt(block_height)), - ("block-height".into(), Value::UInt(block_height + 1)) + ("burn-block-height".into(), Value::UInt(burn_block_height + 1)), + ("block-height".into(), Value::UInt(tenure_height + 1)) ]).unwrap()), tx.eval_read_only(&contract_identifier1, "(test-func)") .unwrap() ); assert_eq!( Value::Tuple(TupleData::from_data(vec![ - ("burn-block-height".into(), Value::UInt(block_height)), + ("burn-block-height".into(), Value::UInt(burn_block_height + 1)), ("stacks-block-height".into(), Value::UInt(block_height + 1)), - ("tenure-height".into(), Value::UInt(block_height + 1)) + ("tenure-height".into(), Value::UInt(tenure_height + 1)) ]).unwrap()), tx.eval_read_only(&contract_identifier2, "(test-func)") .unwrap() @@ -1029,13 +1031,18 @@ fn test_block_heights() { // Call the contracts in the next block and validate the results let block_height = sim.block_height as u128; + let burn_block_height = sim.burn_block_height() as u128; + let tenure_height = sim.tenure_height as u128; sim.execute_next_block_as_conn(|conn| { let mut tx = conn.start_transaction_processing(); assert_eq!( Value::Tuple( TupleData::from_data(vec![ - ("burn-block-height".into(), Value::UInt(block_height)), - ("block-height".into(), Value::UInt(block_height + 1)), + ( + "burn-block-height".into(), + Value::UInt(burn_block_height + 1) + ), + ("block-height".into(), Value::UInt(tenure_height + 1)), ]) .unwrap() ), @@ -1045,9 +1052,12 @@ fn test_block_heights() { assert_eq!( Value::Tuple( TupleData::from_data(vec![ - ("burn-block-height".into(), Value::UInt(block_height)), + ( + "burn-block-height".into(), + Value::UInt(burn_block_height + 1) + ), ("stacks-block-height".into(), Value::UInt(block_height + 1)), - ("tenure-height".into(), Value::UInt(block_height + 1)) + ("tenure-height".into(), Value::UInt(tenure_height + 1)) ]) .unwrap() ), @@ -1058,13 +1068,15 @@ fn test_block_heights() { // Call the contracts in the next block with no new tenure and validate the results let block_height = sim.block_height as u128; + let burn_block_height = sim.burn_block_height() as u128; + let tenure_height = sim.tenure_height as u128; sim.execute_next_block_as_conn_with_tenure(false, |conn| { let mut tx = conn.start_transaction_processing(); assert_eq!( Value::Tuple( TupleData::from_data(vec![ - ("burn-block-height".into(), Value::UInt(block_height)), - ("block-height".into(), Value::UInt(block_height)) + ("burn-block-height".into(), Value::UInt(burn_block_height)), + ("block-height".into(), Value::UInt(tenure_height)) ]) .unwrap() ), @@ -1074,9 +1086,9 @@ fn test_block_heights() { assert_eq!( Value::Tuple( TupleData::from_data(vec![ - ("burn-block-height".into(), Value::UInt(block_height)), + ("burn-block-height".into(), Value::UInt(burn_block_height)), ("stacks-block-height".into(), Value::UInt(block_height + 1)), - ("tenure-height".into(), Value::UInt(block_height)) + ("tenure-height".into(), Value::UInt(tenure_height)) ]) .unwrap() ), @@ -1087,13 +1099,49 @@ fn test_block_heights() { // Call the contracts in the next block with no new tenure and validate the results let block_height = sim.block_height as u128; + let burn_block_height = sim.burn_block_height() as u128; + let tenure_height = sim.tenure_height as u128; + sim.execute_next_block_as_conn_with_tenure(false, |conn| { + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(burn_block_height)), + ("block-height".into(), Value::UInt(tenure_height)) + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier1, "(test-func)") + .unwrap() + ); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(burn_block_height)), + ("stacks-block-height".into(), Value::UInt(block_height + 1)), + ("tenure-height".into(), Value::UInt(tenure_height)) + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier2, "(test-func)") + .unwrap() + ); + }); + + // Call the contracts in the next block with a new tenure and validate the results + let block_height = sim.block_height as u128; + let burn_block_height = sim.burn_block_height() as u128; + let tenure_height = sim.tenure_height as u128; sim.execute_next_block_as_conn(|conn| { let mut tx = conn.start_transaction_processing(); assert_eq!( Value::Tuple( TupleData::from_data(vec![ - ("burn-block-height".into(), Value::UInt(block_height)), - ("block-height".into(), Value::UInt(block_height)) + ( + "burn-block-height".into(), + Value::UInt(burn_block_height + 1) + ), + ("block-height".into(), Value::UInt(tenure_height + 1)) ]) .unwrap() ), @@ -1103,9 +1151,12 @@ fn test_block_heights() { assert_eq!( Value::Tuple( TupleData::from_data(vec![ - ("burn-block-height".into(), Value::UInt(block_height)), + ( + "burn-block-height".into(), + Value::UInt(burn_block_height + 1) + ), ("stacks-block-height".into(), Value::UInt(block_height + 1)), - ("tenure-height".into(), Value::UInt(block_height)) + ("tenure-height".into(), Value::UInt(tenure_height + 1)) ]) .unwrap() ), From a316dac4d1d46a5703287e554972e5ff10a6b8a4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 14 May 2024 13:45:23 -0400 Subject: [PATCH 008/148] fix: update `test_get_burn_block_info_eval` --- stackslib/src/clarity_vm/tests/contracts.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/clarity_vm/tests/contracts.rs b/stackslib/src/clarity_vm/tests/contracts.rs index 017662d93c..a14d47b906 100644 --- a/stackslib/src/clarity_vm/tests/contracts.rs +++ b/stackslib/src/clarity_vm/tests/contracts.rs @@ -169,7 +169,7 @@ fn test_get_burn_block_info_eval() { // burnchain is 100 blocks ahead of stacks chain in this sim assert_eq!( Value::Optional(OptionalData { data: None }), - tx.eval_read_only(&contract_identifier, "(test-func u103)") + tx.eval_read_only(&contract_identifier, "(test-func u203)") .unwrap() ); }); From 029d5ef454d7ef65e50c3e3b9bca72fcf8aca031 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 15 May 2024 15:54:51 -0400 Subject: [PATCH 009/148] refactor: simplify test setup --- clarity/src/vm/tests/mod.rs | 17 ++++++++++++----- clarity/src/vm/tests/variables.rs | 18 ------------------ 2 files changed, 12 insertions(+), 23 deletions(-) diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index c60377ba3d..9a21596ca9 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -162,7 +162,15 @@ pub fn tl_env_factory() -> TopLevelMemoryEnvironmentGenerator { pub struct MemoryEnvironmentGenerator(MemoryBackingStore); impl MemoryEnvironmentGenerator { fn get_env(&mut self, epoch: StacksEpochId) -> OwnedEnvironment { - let mut owned_env = OwnedEnvironment::new(self.0.as_clarity_db(), epoch); + let mut db = self.0.as_clarity_db(); + db.begin(); + db.set_clarity_epoch_version(epoch).unwrap(); + db.set_tenure_height(0).unwrap(); + if epoch >= StacksEpochId::Epoch30 { + db.set_tenure_height(1).unwrap(); + } + db.commit().unwrap(); + let mut owned_env = OwnedEnvironment::new(db, epoch); // start an initial transaction. owned_env.begin(); owned_env @@ -175,12 +183,11 @@ impl TopLevelMemoryEnvironmentGenerator { let mut db = self.0.as_clarity_db(); db.begin(); db.set_clarity_epoch_version(epoch).unwrap(); - db.commit().unwrap(); - let mut owned_env = OwnedEnvironment::new(db, epoch); if epoch >= StacksEpochId::Epoch30 { - owned_env.set_tenure_height(1); + db.set_tenure_height(1).unwrap(); } - owned_env + db.commit().unwrap(); + OwnedEnvironment::new(db, epoch) } } diff --git a/clarity/src/vm/tests/variables.rs b/clarity/src/vm/tests/variables.rs index 41b880afe9..5b392bb678 100644 --- a/clarity/src/vm/tests/variables.rs +++ b/clarity/src/vm/tests/variables.rs @@ -58,12 +58,6 @@ fn test_block_height( assert!(analysis.is_ok()); } - // If we're testing epoch 3, we need to simulate the tenure height being - // set at the transition. - if epoch >= StacksEpochId::Epoch30 { - owned_env.set_tenure_height(1); - } - // Initialize the contract // Note that we're ignoring the analysis failure here so that we can test // the runtime behavior. In Clarity 3, if this case somehow gets past the @@ -122,12 +116,6 @@ fn test_stacks_block_height( assert!(analysis.is_ok()); } - // If we're testing epoch 3, we need to simulate the tenure height being - // set at the transition. - if epoch >= StacksEpochId::Epoch30 { - owned_env.set_tenure_height(1); - } - // Initialize the contract // Note that we're ignoring the analysis failure here so that we can test // the runtime behavior. In Clarity 3, if this case somehow gets past the @@ -188,12 +176,6 @@ fn test_tenure_height( assert!(analysis.is_ok()); } - // If we're testing epoch 3, we need to simulate the tenure height being - // set at the transition. - if epoch >= StacksEpochId::Epoch30 { - owned_env.set_tenure_height(1); - } - // Initialize the contract // Note that we're ignoring the analysis failure here so that we can test // the runtime behavior. In Clarity 3, if this case somehow gets past the From 9439cdc525f5a31dede5b6a16ae35acf36a71ea2 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 15 May 2024 16:13:03 -0400 Subject: [PATCH 010/148] fix: set default block height in test implementation --- clarity/src/vm/database/clarity_db.rs | 2 +- clarity/src/vm/tests/mod.rs | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 5394842a1c..4ba8fc097f 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -353,7 +353,7 @@ impl HeadersDB for NullHeadersDB { #[allow(clippy::panic)] impl BurnStateDB for NullBurnStateDB { fn get_tip_burn_block_height(&self) -> Option { - None + Some(0) } fn get_tip_sortition_id(&self) -> Option { diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index 9a21596ca9..715c205475 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -165,11 +165,12 @@ impl MemoryEnvironmentGenerator { let mut db = self.0.as_clarity_db(); db.begin(); db.set_clarity_epoch_version(epoch).unwrap(); - db.set_tenure_height(0).unwrap(); + db.commit().unwrap(); if epoch >= StacksEpochId::Epoch30 { + db.begin(); db.set_tenure_height(1).unwrap(); + db.commit().unwrap(); } - db.commit().unwrap(); let mut owned_env = OwnedEnvironment::new(db, epoch); // start an initial transaction. owned_env.begin(); @@ -183,10 +184,12 @@ impl TopLevelMemoryEnvironmentGenerator { let mut db = self.0.as_clarity_db(); db.begin(); db.set_clarity_epoch_version(epoch).unwrap(); + db.commit().unwrap(); if epoch >= StacksEpochId::Epoch30 { + db.begin(); db.set_tenure_height(1).unwrap(); + db.commit().unwrap(); } - db.commit().unwrap(); OwnedEnvironment::new(db, epoch) } } From 9e270b7934eaa35522e88349e9d7608e39346fd7 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 15 May 2024 22:34:49 -0400 Subject: [PATCH 011/148] refactor: remove unnecessary `impl`s --- clarity/src/vm/database/clarity_db.rs | 120 ------------------ .../src/chainstate/stacks/db/transactions.rs | 42 +++--- stackslib/src/clarity_vm/clarity.rs | 21 ++- 3 files changed, 31 insertions(+), 152 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 4ba8fc097f..67f4209e00 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -155,126 +155,6 @@ pub trait BurnStateDB { ) -> Option<(Vec, u128)>; } -impl HeadersDB for &dyn HeadersDB { - fn get_stacks_block_header_hash_for_block( - &self, - id_bhh: &StacksBlockId, - ) -> Option { - (*self).get_stacks_block_header_hash_for_block(id_bhh) - } - fn get_burn_header_hash_for_block(&self, bhh: &StacksBlockId) -> Option { - (*self).get_burn_header_hash_for_block(bhh) - } - fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option { - (*self).get_consensus_hash_for_block(id_bhh) - } - fn get_vrf_seed_for_block(&self, bhh: &StacksBlockId) -> Option { - (*self).get_vrf_seed_for_block(bhh) - } - fn get_burn_block_time_for_block(&self, bhh: &StacksBlockId) -> Option { - (*self).get_burn_block_time_for_block(bhh) - } - fn get_burn_block_height_for_block(&self, bhh: &StacksBlockId) -> Option { - (*self).get_burn_block_height_for_block(bhh) - } - fn get_miner_address(&self, bhh: &StacksBlockId) -> Option { - (*self).get_miner_address(bhh) - } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { - (*self).get_burnchain_tokens_spent_for_block(id_bhh) - } - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option { - (*self).get_burnchain_tokens_spent_for_winning_block(id_bhh) - } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { - (*self).get_tokens_earned_for_block(id_bhh) - } -} - -impl BurnStateDB for &dyn BurnStateDB { - fn get_tip_burn_block_height(&self) -> Option { - (*self).get_tip_burn_block_height() - } - - fn get_tip_sortition_id(&self) -> Option { - (*self).get_tip_sortition_id() - } - - fn get_v1_unlock_height(&self) -> u32 { - (*self).get_v1_unlock_height() - } - - fn get_v2_unlock_height(&self) -> u32 { - (*self).get_v2_unlock_height() - } - - fn get_v3_unlock_height(&self) -> u32 { - (*self).get_v3_unlock_height() - } - - fn get_pox_3_activation_height(&self) -> u32 { - (*self).get_pox_3_activation_height() - } - - fn get_pox_4_activation_height(&self) -> u32 { - (*self).get_pox_4_activation_height() - } - - fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { - (*self).get_burn_block_height(sortition_id) - } - - fn get_sortition_id_from_consensus_hash( - &self, - consensus_hash: &ConsensusHash, - ) -> Option { - (*self).get_sortition_id_from_consensus_hash(consensus_hash) - } - - fn get_burn_start_height(&self) -> u32 { - (*self).get_burn_start_height() - } - - fn get_burn_header_hash( - &self, - height: u32, - sortition_id: &SortitionId, - ) -> Option { - (*self).get_burn_header_hash(height, sortition_id) - } - - fn get_stacks_epoch(&self, height: u32) -> Option { - (*self).get_stacks_epoch(height) - } - - fn get_pox_prepare_length(&self) -> u32 { - (*self).get_pox_prepare_length() - } - - fn get_pox_reward_cycle_length(&self) -> u32 { - (*self).get_pox_reward_cycle_length() - } - - fn get_pox_rejection_fraction(&self) -> u64 { - (*self).get_pox_rejection_fraction() - } - fn get_stacks_epoch_by_epoch_id(&self, epoch_id: &StacksEpochId) -> Option { - (*self).get_stacks_epoch_by_epoch_id(epoch_id) - } - - fn get_ast_rules(&self, height: u32) -> ASTRules { - (*self).get_ast_rules(height) - } - - fn get_pox_payout_addrs( - &self, - height: u32, - sortition_id: &SortitionId, - ) -> Option<(Vec, u128)> { - (*self).get_pox_payout_addrs(height, sortition_id) - } -} - pub struct NullHeadersDB {} pub struct NullBurnStateDB { epoch: StacksEpochId, diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 0ba335afca..d80792a822 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -1716,7 +1716,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -1946,7 +1946,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2060,7 +2060,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2151,7 +2151,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2214,7 +2214,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2322,7 +2322,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2413,7 +2413,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2532,7 +2532,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2646,7 +2646,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { // process both let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2785,7 +2785,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { // process both let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2895,7 +2895,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -3020,7 +3020,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -3130,7 +3130,7 @@ pub mod test { for (dbi, burn_db) in PRE_21_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -3343,7 +3343,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -3886,7 +3886,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { // make sure costs-3 is instantiated, so as-contract works in 2.1 let mut conn = chainstate.test_genesis_block_begin_2_1( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -4609,7 +4609,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { // make sure costs-3 is installed so as-contract will work in epoch 2.1 let mut conn = chainstate.test_genesis_block_begin_2_1( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -4984,7 +4984,7 @@ pub mod test { let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -8116,7 +8116,7 @@ pub mod test { // which leads to an InvalidFee error for (dbi, burn_db) in PRE_21_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -8273,7 +8273,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -8394,7 +8394,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -8488,7 +8488,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index be8a1c12c0..812d39bb97 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -638,8 +638,7 @@ impl<'a, 'b> ClarityConnection for ClarityBlockConnection<'a, 'b> { where F: FnOnce(ClarityDatabase) -> (R, ClarityDatabase), { - let mut db = - ClarityDatabase::new(&mut self.datastore, &self.header_db, &self.burn_state_db); + let mut db = ClarityDatabase::new(&mut self.datastore, self.header_db, self.burn_state_db); db.begin(); let (result, mut db) = to_do(db); db.roll_back() @@ -672,7 +671,7 @@ impl ClarityConnection for ClarityReadOnlyConnection<'_> { { let mut db = self .datastore - .as_clarity_db(&self.header_db, &self.burn_state_db); + .as_clarity_db(self.header_db, self.burn_state_db); db.begin(); let (result, mut db) = to_do(db); db.roll_back() @@ -1528,8 +1527,8 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { pub fn start_transaction_processing<'c>(&'c mut self) -> ClarityTransactionConnection<'c, 'a> { let store = &mut self.datastore; let cost_track = &mut self.cost_track; - let header_db = &self.header_db; - let burn_state_db = &self.burn_state_db; + let header_db = self.header_db; + let burn_state_db = self.burn_state_db; let mainnet = self.mainnet; let chain_id = self.chain_id; let mut log = RollbackWrapperPersistedLog::new(); @@ -1608,8 +1607,8 @@ impl<'a, 'b> ClarityConnection for ClarityTransactionConnection<'a, 'b> { let rollback_wrapper = RollbackWrapper::from_persisted_log(self.store, log); let mut db = ClarityDatabase::new_with_rollback_wrapper( rollback_wrapper, - &self.header_db, - &self.burn_state_db, + self.header_db, + self.burn_state_db, ); db.begin(); let (r, mut db) = to_do(db); @@ -1673,8 +1672,8 @@ impl<'a, 'b> TransactionConnection for ClarityTransactionConnection<'a, 'b> { let rollback_wrapper = RollbackWrapper::from_persisted_log(self.store, log); let mut db = ClarityDatabase::new_with_rollback_wrapper( rollback_wrapper, - &self.header_db, - &self.burn_state_db, + self.header_db, + self.burn_state_db, ); // wrap the whole contract-call in a claritydb transaction, @@ -1741,8 +1740,8 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { let rollback_wrapper = RollbackWrapper::from_persisted_log(self.store, log); let mut db = ClarityDatabase::new_with_rollback_wrapper( rollback_wrapper, - &self.header_db, - &self.burn_state_db, + self.header_db, + self.burn_state_db, ); db.begin(); From 44dd3dc754dfb165f2126956f16aeea9abd12e8c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 15 May 2024 22:36:36 -0400 Subject: [PATCH 012/148] refactor: simplify expression Co-authored-by: Jeff Bencin --- clarity/src/vm/database/clarity_db.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 67f4209e00..4c419dcb2a 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -868,13 +868,12 @@ impl<'a> ClarityDatabase<'a> { // In epoch 2, we can only access the burn block associated with the last block if self.get_clarity_epoch_version()? < StacksEpochId::Epoch30 { - let last_mined_bhh = if cur_stacks_height == 0 { + if cur_stacks_height == 0 { return Ok(self.burn_state_db.get_burn_start_height()); - } else { - // Safety note: normal subtraction is safe here, because we've already checked - // that cur_stacks_height > 0. - self.get_index_block_header_hash(cur_stacks_height - 1)? }; + // Safety note: normal subtraction is safe here, because we've already checked + // that cur_stacks_height > 0. + let last_mined_bhh = self.get_index_block_header_hash(cur_stacks_height - 1)?; self.get_burnchain_block_height(&last_mined_bhh) .ok_or_else(|| { From 8ac11939512a3da8d5ca8a332409d3375ed7f909 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Sun, 12 May 2024 20:02:32 -0700 Subject: [PATCH 013/148] wip: modify nakamoto block header to use `Vec` --- stackslib/src/chainstate/nakamoto/mod.rs | 100 ++++++++++++++---- .../src/chainstate/nakamoto/test_signers.rs | 29 ++--- .../src/chainstate/nakamoto/tests/mod.rs | 20 ++-- .../download/nakamoto/tenure_downloader.rs | 70 ++++++------ .../nakamoto/tenure_downloader_unconfirmed.rs | 63 ++++++----- stackslib/src/net/relay.rs | 49 ++++++--- stackslib/src/net/tests/download/nakamoto.rs | 6 +- testnet/stacks-node/src/event_dispatcher.rs | 2 +- .../stacks-node/src/nakamoto_node/miner.rs | 62 ++++++++++- .../src/nakamoto_node/sign_coordinator.rs | 4 +- .../src/tests/nakamoto_integrations.rs | 23 ++-- testnet/stacks-node/src/tests/signer/v1.rs | 4 +- 12 files changed, 294 insertions(+), 138 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 17b0bed358..ed64a3f217 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -29,6 +29,7 @@ use lazy_static::{__Deref, lazy_static}; use rusqlite::blob::Blob; use rusqlite::types::{FromSql, FromSqlError}; use rusqlite::{params, Connection, OpenFlags, OptionalExtension, ToSql, NO_PARAMS}; +use serde_json::Value as SerdeValue; use sha2::{Digest as Sha2Digest, Sha512_256}; use stacks_common::bitvec::BitVec; use stacks_common::codec::{ @@ -178,8 +179,8 @@ lazy_static! { state_index_root TEXT NOT NULL, -- miner's signature over the block miner_signature TEXT NOT NULL, - -- signers' signature over the block - signer_signature TEXT NOT NULL, + -- signers' signatures over the block + signer_signature BLOB NOT NULL, -- bitvec capturing stacker participation in signature signer_bitvec TEXT NOT NULL, -- The following fields are not part of either the StacksHeaderInfo struct @@ -305,8 +306,10 @@ pub struct NakamotoBlockHeader { pub state_index_root: TrieHash, /// Recoverable ECDSA signature from the tenure's miner. pub miner_signature: MessageSignature, - /// Schnorr signature over the block header from the signer set active during the tenure. - pub signer_signature: ThresholdSignature, + /// The set of recoverable ECDSA signatures over + /// the block header from the signer set active during the tenure. + /// (ordered by reward set order) + pub signer_signature: Vec, /// A bitvec which represents the signers that participated in this block signature. /// The maximum number of entries in the bitvec is 4000. pub signer_bitvec: BitVec<4000>, @@ -325,9 +328,19 @@ impl FromRow for NakamotoBlockHeader { let parent_block_id = row.get("parent_block_id")?; let tx_merkle_root = row.get("tx_merkle_root")?; let state_index_root = row.get("state_index_root")?; - let signer_signature = row.get("signer_signature")?; let miner_signature = row.get("miner_signature")?; let signer_bitvec = row.get("signer_bitvec")?; + let signer_signature: SerdeValue = row.get_unwrap("signer_signature"); + let signer_signature = signer_signature + .as_array() + .map(|values| { + values + .iter() + .cloned() + .map(serde_json::from_value::) + .collect::, serde_json::Error>>() + }) + .ok_or_else(|| DBError::Corruption)??; Ok(NakamotoBlockHeader { version, @@ -490,10 +503,34 @@ impl NakamotoBlockHeader { } /// Verify the block header against an aggregate public key - pub fn verify_signer(&self, signer_aggregate: &Point) -> bool { - let schnorr_signature = &self.signer_signature.0; + pub fn verify_threshold_signer( + &self, + signer_aggregate: &Point, + signature: &ThresholdSignature, + ) -> bool { let message = self.signer_signature_hash().0; - schnorr_signature.verify(signer_aggregate, &message) + signature.verify(signer_aggregate, &message) + } + + /// Verify the block header against the list of signer signatures + /// + /// TODO: ingest the list of signer pubkeys + /// + /// TODO: validate against: + /// - Any invalid signatures + /// - Any duplicate signatures + /// - At least the minimum number of signatures + pub fn verify_signer_signatures(&self, _reward_set: &RewardSet) -> Result<(), ChainstateError> { + // TODO: verify each signature in the block + let _sig_hash = self.signer_signature_hash(); + + let _signatures = self + .signer_signature + .iter() + .map(|sig| sig.clone()) + .collect::>(); + + return Ok(()); } /// Make an "empty" header whose block data needs to be filled in. @@ -514,7 +551,7 @@ impl NakamotoBlockHeader { tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::with_capacity(SIGNERS_MAX_LIST_SIZE), signer_bitvec: BitVec::ones(bitvec_len) .expect("BUG: bitvec of length-1 failed to construct"), } @@ -531,7 +568,7 @@ impl NakamotoBlockHeader { tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::with_capacity(SIGNERS_MAX_LIST_SIZE), signer_bitvec: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), } } @@ -547,7 +584,7 @@ impl NakamotoBlockHeader { tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::with_capacity(SIGNERS_MAX_LIST_SIZE), signer_bitvec: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), } } @@ -1690,13 +1727,16 @@ impl NakamotoChainState { /// Does nothing if: /// * we already have the block /// Returns true if we stored the block; false if not. + /// + /// TODO: ingest the list of signer keys (instead of aggregate key) pub fn accept_block( config: &ChainstateConfig, block: NakamotoBlock, db_handle: &mut SortitionHandleConn, staging_db_tx: &NakamotoStagingBlocksTx, headers_conn: &Connection, - aggregate_public_key: &Point, + _aggregate_public_key: &Point, + reward_set: RewardSet, ) -> Result { test_debug!("Consider Nakamoto block {}", &block.block_id()); // do nothing if we already have this block @@ -1743,17 +1783,28 @@ impl NakamotoChainState { return Ok(false); }; - let schnorr_signature = &block.header.signer_signature.0; - if !db_handle.expects_signer_signature( - &block.header.consensus_hash, - schnorr_signature, - &block.header.signer_signature_hash().0, - aggregate_public_key, - )? { - let msg = format!( - "Received block, but the signer signature does not match the active stacking cycle" + // TODO: epoch gate to verify aggregate signature + // let schnorr_signature = &block.header.signer_signature.0; + // if !db_handle.expects_signer_signature( + // &block.header.consensus_hash, + // schnorr_signature, + // &block.header.signer_signature_hash().0, + // aggregate_public_key, + // )? { + // let msg = format!( + // "Received block, but the signer signature does not match the active stacking cycle" + // ); + // warn!("{}", msg; "aggregate_key" => %aggregate_public_key); + // return Err(ChainstateError::InvalidStacksBlock(msg)); + // } + + // TODO: epoch gate to verify signatures vec + if let Err(e) = block.header.verify_signer_signatures(&reward_set) { + warn!("Received block, but the signer signatures are invalid"; + "block_id" => %block.block_id(), + "error" => ?e ); - warn!("{}", msg; "aggregate_key" => %aggregate_public_key); + let msg = format!("Received block, but the signer signatures are invalid"); return Err(ChainstateError::InvalidStacksBlock(msg)); } @@ -2236,6 +2287,9 @@ impl NakamotoChainState { let vrf_proof_bytes = vrf_proof.map(|proof| proof.to_hex()); + let signer_signature = serde_json::to_string(&header.signer_signature) + .expect("Unable to serialize signer signatures"); + let args: &[&dyn ToSql] = &[ &u64_to_sql(*stacks_block_height)?, &index_root, @@ -2249,7 +2303,7 @@ impl NakamotoChainState { &u64_to_sql(header.chain_length)?, &u64_to_sql(header.burn_spent)?, &header.miner_signature, - &header.signer_signature, + &signer_signature, &header.tx_merkle_root, &header.state_index_root, &block_hash, diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 30a1ba8120..7179664fac 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -19,6 +19,7 @@ use std::collections::{HashSet, VecDeque}; use std::path::{Path, PathBuf}; use std::{fs, io}; +use clarity::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; use clarity::vm::clarity::ClarityConnection; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::types::*; @@ -77,6 +78,8 @@ pub struct TestSigners { pub party_key_ids: Vec>, /// The cycle for which the signers are valid pub cycle: u64, + /// The signer's private keys + pub signer_keys: Vec, } impl Default for TestSigners { @@ -104,6 +107,11 @@ impl Default for TestSigners { }) .collect(); + let mut signer_keys = Vec::::new(); + for _ in 0..num_keys { + signer_keys.push(Secp256k1PrivateKey::default()); + } + // Generate an aggregate public key let poly_commitments = match wsts::v2::test_helpers::dkg(&mut signer_parties, &mut rng) { Ok(poly_commitments) => poly_commitments, @@ -124,29 +132,24 @@ impl Default for TestSigners { threshold, party_key_ids, cycle: 0, + signer_keys, } } } impl TestSigners { + // TODO: sign using vec of signatures pub fn sign_nakamoto_block(&mut self, block: &mut NakamotoBlock, cycle: u64) { // Update the aggregate public key if the cycle has changed if self.cycle != cycle { self.generate_aggregate_key(cycle); } - - let mut rng = rand_core::OsRng; let msg = block.header.signer_signature_hash().0; - let (nonces, sig_shares, key_ids) = - wsts::v2::test_helpers::sign(msg.as_slice(), &mut self.signer_parties, &mut rng); - - let mut sig_aggregator = wsts::v2::Aggregator::new(self.num_keys, self.threshold); - sig_aggregator - .init(&self.poly_commitments) - .expect("aggregator init failed"); - let signature = sig_aggregator - .sign(msg.as_slice(), &nonces, &sig_shares, &key_ids) - .expect("aggregator sig failed"); + let signer_signature = self + .signer_keys + .iter() + .map(|key| key.sign(&msg).unwrap()) + .collect::>(); test_debug!( "Signed Nakamoto block {} with {} (rc {})", @@ -154,7 +157,7 @@ impl TestSigners { &self.aggregate_public_key, cycle ); - block.header.signer_signature = ThresholdSignature(signature); + block.header.signer_signature = signer_signature; } // Generate and assign a new aggregate public key diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index f8d048aaf1..533c339115 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -154,7 +154,7 @@ fn codec_nakamoto_header() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(8).unwrap(), }; @@ -204,7 +204,7 @@ pub fn test_nakamoto_first_tenure_block_syntactic_validation() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -761,7 +761,7 @@ pub fn test_load_store_update_nakamoto_blocks() { tx_merkle_root: nakamoto_tx_merkle_root, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -805,7 +805,7 @@ pub fn test_load_store_update_nakamoto_blocks() { tx_merkle_root: nakamoto_tx_merkle_root_2, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -844,7 +844,7 @@ pub fn test_load_store_update_nakamoto_blocks() { tx_merkle_root: nakamoto_tx_merkle_root_3, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -1519,7 +1519,7 @@ fn test_nakamoto_block_static_verification() { tx_merkle_root: nakamoto_tx_merkle_root, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; nakamoto_header.sign_miner(&private_key).unwrap(); @@ -1538,7 +1538,7 @@ fn test_nakamoto_block_static_verification() { tx_merkle_root: nakamoto_tx_merkle_root_bad_ch, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; nakamoto_header_bad_ch.sign_miner(&private_key).unwrap(); @@ -1557,7 +1557,7 @@ fn test_nakamoto_block_static_verification() { tx_merkle_root: nakamoto_tx_merkle_root_bad_miner_sig, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; nakamoto_header_bad_miner_sig @@ -1711,7 +1711,7 @@ pub fn test_get_highest_nakamoto_tenure() { tx_merkle_root: Sha512Trunc256Sum([0x00; 32]), state_index_root: TrieHash([0x00; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; let tenure_change = TenureChangePayload { @@ -2012,7 +2012,7 @@ fn test_make_miners_stackerdb_config() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; let block = NakamotoBlock { diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index c5ea7ba345..340fa717fd 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -243,18 +243,19 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidMessage); } - if !tenure_start_block - .header - .verify_signer(&self.start_aggregate_public_key) - { - // signature verification failed - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_start_block.header.block_id(), - "start_aggregate_public_key" => %self.start_aggregate_public_key, - "state" => %self.state); - return Err(NetError::InvalidMessage); - } + // TODO: epoch-gated verify threshold or vec of signatures + // if !tenure_start_block + // .header + // .verify_threshold_signer(&self.start_aggregate_public_key) + // { + // // signature verification failed + // warn!("Invalid tenure-start block: bad signer signature"; + // "tenure_id" => %self.tenure_id_consensus_hash, + // "block.header.block_id" => %tenure_start_block.header.block_id(), + // "start_aggregate_public_key" => %self.start_aggregate_public_key, + // "state" => %self.state); + // return Err(NetError::InvalidMessage); + // } debug!( "Accepted tenure-start block for tenure {} block={}", @@ -369,18 +370,19 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidMessage); } - if !tenure_end_block - .header - .verify_signer(&self.end_aggregate_public_key) - { - // bad signature - warn!("Invalid tenure-end block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "end_aggregate_public_key" => %self.end_aggregate_public_key, - "state" => %self.state); - return Err(NetError::InvalidMessage); - } + // TODO: epoch-gated verify threshold or vec of signatures + // if !tenure_end_block + // .header + // .verify_threshold_signer(&self.end_aggregate_public_key) + // { + // // bad signature + // warn!("Invalid tenure-end block: bad signer signature"; + // "tenure_id" => %self.tenure_id_consensus_hash, + // "block.header.block_id" => %tenure_end_block.header.block_id(), + // "end_aggregate_public_key" => %self.end_aggregate_public_key, + // "state" => %self.state); + // return Err(NetError::InvalidMessage); + // } // extract the needful -- need the tenure-change payload (which proves that the tenure-end // block is the tenure-start block for the next tenure) and the parent block ID (which is @@ -470,14 +472,18 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidMessage); } - if !block.header.verify_signer(&self.start_aggregate_public_key) { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "start_aggregate_public_key" => %self.start_aggregate_public_key, - "state" => %self.state); - return Err(NetError::InvalidMessage); - } + // TODO: epoch-gated verify threshold or vec of signatures + // if !block + // .header + // .verify_threshold_signer(&self.start_aggregate_public_key) + // { + // warn!("Invalid block: bad signer signature"; + // "tenure_id" => %self.tenure_id_consensus_hash, + // "block.header.block_id" => %block.header.block_id(), + // "start_aggregate_public_key" => %self.start_aggregate_public_key, + // "state" => %self.state); + // return Err(NetError::InvalidMessage); + // } expected_block_id = &block.header.parent_block_id; count += 1; diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index 4c48a5762f..7a22b4ef2b 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -369,23 +369,26 @@ impl NakamotoUnconfirmedTenureDownloader { let Some(tenure_tip) = self.tenure_tip.as_ref() else { return Err(NetError::InvalidState); }; - let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() - else { - return Err(NetError::InvalidState); - }; + + // TODO: epoch-gated loading of aggregate key + // let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() + // else { + // return Err(NetError::InvalidState); + // }; // stacker signature has to match the current aggregate public key - if !unconfirmed_tenure_start_block - .header - .verify_signer(unconfirmed_aggregate_public_key) - { - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), - "unconfirmed_aggregate_public_key" => %unconfirmed_aggregate_public_key, - "state" => %self.state); - return Err(NetError::InvalidMessage); - } + // TODO: epoch-gated verify threshold or vec of signatures + // if !unconfirmed_tenure_start_block + // .header + // .verify_threshold_signer(unconfirmed_aggregate_public_key) + // { + // warn!("Invalid tenure-start block: bad signer signature"; + // "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + // "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), + // "unconfirmed_aggregate_public_key" => %unconfirmed_aggregate_public_key, + // "state" => %self.state); + // return Err(NetError::InvalidMessage); + // } // block has to match the expected hash if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { @@ -433,10 +436,12 @@ impl NakamotoUnconfirmedTenureDownloader { let Some(tenure_tip) = self.tenure_tip.as_ref() else { return Err(NetError::InvalidState); }; - let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() - else { - return Err(NetError::InvalidState); - }; + + // TODO: epoch-gated load aggregate key + // let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() + // else { + // return Err(NetError::InvalidState); + // }; if tenure_blocks.is_empty() { // nothing to do @@ -454,14 +459,18 @@ impl NakamotoUnconfirmedTenureDownloader { "block_id" => %block.header.block_id()); return Err(NetError::InvalidMessage); } - if !block.header.verify_signer(unconfirmed_aggregate_public_key) { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "unconfirmed_aggregate_public_key" => %unconfirmed_aggregate_public_key, - "state" => %self.state); - return Err(NetError::InvalidMessage); - } + // TODO: epoch-gated verify threshold or vec of signatures + // if !block + // .header + // .verify_threshold_signer(unconfirmed_aggregate_public_key) + // { + // warn!("Invalid block: bad signer signature"; + // "tenure_id" => %tenure_tip.consensus_hash, + // "block.header.block_id" => %block.header.block_id(), + // "unconfirmed_aggregate_public_key" => %unconfirmed_aggregate_public_key, + // "state" => %self.state); + // return Err(NetError::InvalidMessage); + // } // we may or may not need the tenure-start block for the unconfirmed tenure. But if we // do, make sure it's valid, and it's the last block we receive. diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 7236ef76e4..a4506e67e1 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -33,7 +33,9 @@ use stacks_common::util::hash::Sha512Trunc256Sum; use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView}; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionDBConn, SortitionHandleConn}; +use crate::chainstate::burn::db::sortdb::{ + SortitionDB, SortitionDBConn, SortitionHandle, SortitionHandleConn, +}; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::coordinator::comm::CoordinatorChannels; use crate::chainstate::coordinator::BlockEventDispatcher; @@ -721,17 +723,38 @@ impl Relayer { ); let config = chainstate.config(); - let Ok(aggregate_public_key) = - NakamotoChainState::get_aggregate_public_key(chainstate, &sortdb, sort_handle, &block) - else { - warn!("Failed to get aggregate public key. Will not store or relay"; - "stacks_block_hash" => %block.header.block_hash(), - "consensus_hash" => %block.header.consensus_hash, - "burn_height" => block.header.chain_length, - "sortition_height" => block_sn.block_height, - ); - return Ok(false); + + // TODO: epoch gate to verify with aggregate key + // let Ok(aggregate_public_key) = + // NakamotoChainState::get_aggregate_public_key(chainstate, &sortdb, sort_handle, &block) + // else { + // warn!("Failed to get aggregate public key. Will not store or relay"; + // "stacks_block_hash" => %block.header.block_hash(), + // "consensus_hash" => %block.header.consensus_hash, + // "burn_height" => block.header.chain_length, + // "sortition_height" => block_sn.block_height, + // ); + // return Ok(false); + // }; + + // TODO: epoch gate to use signatures vec + let tip = sort_handle.tip(); + + let reward_info = match sortdb.get_preprocessed_reward_set_of(&tip) { + Ok(Some(x)) => x, + Ok(None) => { + return Err(chainstate_error::PoxNoRewardCycle); + } + Err(e) => { + return Err(chainstate_error::DBError(e)); + } }; + let reward_cycle = reward_info.reward_cycle; + + let Some(reward_set) = reward_info.known_selected_anchor_block_owned() else { + return Err(chainstate_error::NoRegisteredSigners(reward_cycle)); + }; + let (headers_conn, staging_db_tx) = chainstate.headers_conn_and_staging_tx_begin()?; let accepted = NakamotoChainState::accept_block( &config, @@ -739,7 +762,9 @@ impl Relayer { sort_handle, &staging_db_tx, headers_conn, - &aggregate_public_key, + // &aggregate_public_key, + &Point::new(), + reward_set, )?; staging_db_tx.commit()?; diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 31c42c8afb..8528ac8f4c 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -103,7 +103,7 @@ fn test_nakamoto_tenure_downloader() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -171,7 +171,7 @@ fn test_nakamoto_tenure_downloader() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -192,7 +192,7 @@ fn test_nakamoto_tenure_downloader() { tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), state_index_root: TrieHash([0x08; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index f9d4a4b4fb..0e799ceec4 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -658,7 +658,7 @@ impl EventObserver { ); as_object_mut.insert( "signer_signature".into(), - format!("0x{}", &header.signer_signature).into(), + serde_json::to_value(&header.signer_signature).unwrap_or_default(), ); } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index d6edd79963..6ee63ef0fe 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -28,6 +28,7 @@ use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stacks::chainstate::stacks::boot::RewardSet; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use stacks::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksTransaction, StacksTransactionSigner, @@ -35,6 +36,7 @@ use stacks::chainstate::stacks::{ TransactionPayload, TransactionVersion, }; use stacks::net::stackerdb::StackerDBs; +use stacks::util::secp256k1::MessageSignature; use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; @@ -180,7 +182,7 @@ impl BlockMinerThread { }; if let Some(mut new_block) = new_block { - let (aggregate_public_key, signers_signature) = match self.coordinate_signature( + let (reward_set, signer_signature) = match self.gather_signatures( &mut new_block, self.burn_block.block_height, &mut stackerdbs, @@ -188,13 +190,15 @@ impl BlockMinerThread { ) { Ok(x) => x, Err(e) => { - error!("Unrecoverable error while proposing block to signer set: {e:?}. Ending tenure."); + error!( + "Unrecoverable error while gathering signatures: {e:?}. Ending tenure." + ); return; } }; - new_block.header.signer_signature = signers_signature; - if let Err(e) = self.broadcast(new_block.clone(), &aggregate_public_key) { + new_block.header.signer_signature = signer_signature; + if let Err(e) = self.broadcast(new_block.clone(), &Point::new(), reward_set) { warn!("Error accepting own block: {e:?}. Will try mining again."); continue; } else { @@ -233,6 +237,7 @@ impl BlockMinerThread { } } + #[allow(dead_code)] fn coordinate_signature( &mut self, new_block: &mut NakamotoBlock, @@ -330,6 +335,51 @@ impl BlockMinerThread { Ok((aggregate_public_key, signature)) } + /// Gather signatures from the signers for the block + fn gather_signatures( + &mut self, + new_block: &mut NakamotoBlock, + _burn_block_height: u64, + _stackerdbs: &mut StackerDBs, + _attempts: &mut u64, + ) -> Result<(RewardSet, Vec), NakamotoNodeError> { + let sort_db = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) + .expect("FATAL: could not open sortition DB"); + let tip = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + &new_block.header.consensus_hash, + ) + .expect("FATAL: could not retrieve chain tip") + .expect("FATAL: could not retrieve chain tip"); + + let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { + Ok(Some(x)) => x, + Ok(None) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "No reward set found. Cannot initialize miner coordinator.".into(), + )); + } + Err(e) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" + ))); + } + }; + + let Some(reward_set) = reward_info.known_selected_anchor_block_owned() else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Current reward cycle did not select a reward set. Cannot mine!".into(), + )); + }; + + // TODO: collect signatures from signers + return Ok((reward_set, vec![])); + } + fn get_stackerdb_contract_and_slots( &self, stackerdbs: &StackerDBs, @@ -443,10 +493,13 @@ impl BlockMinerThread { Ok(filtered_transactions.into_values().collect()) } + /// TODO: update to utilize `signer_signature` vec instead of the aggregate + /// public key. fn broadcast( &self, block: NakamotoBlock, aggregate_public_key: &Point, + reward_set: RewardSet, ) -> Result<(), ChainstateError> { #[cfg(test)] { @@ -484,6 +537,7 @@ impl BlockMinerThread { &staging_tx, headers_conn, &aggregate_public_key, + reward_set, )?; staging_tx.commit()?; Ok(()) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 4667958911..764ae60c3c 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -413,11 +413,11 @@ impl SignCoordinator { // In test mode, short-circuit waiting for the signers if the TEST_SIGNING // channel has been created. This allows integration tests for the stacks-node // independent of the stacks-signer. - if let Some(signature) = + if let Some(_signatures) = crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() { debug!("Short-circuiting waiting for signers, using test signature"); - return Ok(signature); + return Ok(ThresholdSignature::empty()); } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 55eb6753bf..6169ee524d 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -45,7 +45,7 @@ use stacks::chainstate::stacks::boot::{ }; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; -use stacks::chainstate::stacks::{StacksTransaction, ThresholdSignature, TransactionPayload}; +use stacks::chainstate::stacks::{StacksTransaction, TransactionPayload}; use stacks::core::{ StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, @@ -59,6 +59,7 @@ use stacks::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, NakamotoBlockProposal, ValidateRejectCode, }; use stacks::util::hash::hex_bytes; +use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_signature, Pox4SignatureTopic, @@ -167,8 +168,10 @@ lazy_static! { pub static TEST_SIGNING: Mutex> = Mutex::new(None); pub struct TestSigningChannel { - pub recv: Option>, - pub send: Sender, + // pub recv: Option>, + pub recv: Option>>, + // pub send: Sender, + pub send: Sender>, } impl TestSigningChannel { @@ -177,14 +180,16 @@ impl TestSigningChannel { /// Returns None if the singleton isn't instantiated and the miner should coordinate /// a real signer set signature. /// Panics if the blind-signer times out. - pub fn get_signature() -> Option { + /// + /// TODO: update to use signatures vec + pub fn get_signature() -> Option> { let mut signer = TEST_SIGNING.lock().unwrap(); let Some(sign_channels) = signer.as_mut() else { return None; }; let recv = sign_channels.recv.take().unwrap(); drop(signer); // drop signer so we don't hold the lock while receiving. - let signature = recv.recv_timeout(Duration::from_secs(30)).unwrap(); + let signatures = recv.recv_timeout(Duration::from_secs(30)).unwrap(); let overwritten = TEST_SIGNING .lock() .unwrap() @@ -193,12 +198,12 @@ impl TestSigningChannel { .recv .replace(recv); assert!(overwritten.is_none()); - Some(signature) + Some(signatures) } /// Setup the TestSigningChannel as a singleton using TEST_SIGNING, /// returning an owned Sender to the channel. - pub fn instantiate() -> Sender { + pub fn instantiate() -> Sender> { let (send, recv) = channel(); let existed = TEST_SIGNING.lock().unwrap().replace(Self { recv: Some(recv), @@ -335,7 +340,7 @@ pub fn read_and_sign_block_proposal( conf: &Config, signers: &TestSigners, signed_blocks: &HashSet, - channel: &Sender, + channel: &Sender>, ) -> Result { let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); @@ -2216,7 +2221,7 @@ fn miner_writes_proposed_block_to_stackerdb() { let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); let mut proposed_zero_block = proposed_block.clone(); - proposed_zero_block.header.signer_signature = ThresholdSignature::empty(); + proposed_zero_block.header.signer_signature = Vec::::new(); let proposed_zero_block_hash = format!("0x{}", proposed_zero_block.header.block_hash()); coord_channel diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 83a03fee00..b8d20ea76d 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -529,7 +529,7 @@ fn sign_request_rejected() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; let mut block1 = NakamotoBlock { @@ -556,7 +556,7 @@ fn sign_request_rejected() { tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), state_index_root: TrieHash([0x08; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; let mut block2 = NakamotoBlock { From fd0407bf94ace42529caa29f69b015ac738250ef Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 13 May 2024 15:31:56 -0700 Subject: [PATCH 014/148] feat: verify signatures in nakamoto block header --- stackslib/src/chainstate/nakamoto/mod.rs | 84 ++++- .../src/chainstate/nakamoto/tests/mod.rs | 304 +++++++++++++++++- 2 files changed, 371 insertions(+), 17 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index ed64a3f217..8752118118 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -19,6 +19,8 @@ use std::fs; use std::ops::{Deref, DerefMut, Range}; use std::path::PathBuf; +use clarity::types::PublicKey; +use clarity::util::secp256k1::{secp256k1_recover, Secp256k1PublicKey}; use clarity::vm::ast::ASTRules; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::database::{BurnStateDB, ClarityDatabase}; @@ -58,8 +60,9 @@ use super::burn::db::sortdb::{ }; use super::burn::operations::{DelegateStxOp, StackStxOp, TransferStxOp, VoteForAggregateKeyOp}; use super::stacks::boot::{ - PoxVersions, RawRewardSetEntry, RewardSet, RewardSetData, BOOT_TEST_POX_4_AGG_KEY_CONTRACT, - BOOT_TEST_POX_4_AGG_KEY_FNAME, SIGNERS_MAX_LIST_SIZE, SIGNERS_NAME, SIGNERS_PK_LEN, + NakamotoSignerEntry, PoxVersions, RawRewardSetEntry, RewardSet, RewardSetData, + BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, SIGNERS_MAX_LIST_SIZE, + SIGNERS_NAME, SIGNERS_PK_LEN, }; use super::stacks::db::accounts::MinerReward; use super::stacks::db::{ @@ -514,21 +517,72 @@ impl NakamotoBlockHeader { /// Verify the block header against the list of signer signatures /// - /// TODO: ingest the list of signer pubkeys - /// - /// TODO: validate against: - /// - Any invalid signatures + /// Validate against: + /// - Any invalid signatures (eg not recoverable or not from a signer) /// - Any duplicate signatures - /// - At least the minimum number of signatures - pub fn verify_signer_signatures(&self, _reward_set: &RewardSet) -> Result<(), ChainstateError> { - // TODO: verify each signature in the block - let _sig_hash = self.signer_signature_hash(); + /// - At least the minimum number of signatures (based on total signer weight + /// and a 70% threshold) + /// - Order of signatures is maintained vs signer set + pub fn verify_signer_signatures(&self, reward_set: &RewardSet) -> Result<(), ChainstateError> { + let message = self.signer_signature_hash(); + let Some(signers) = &reward_set.signers else { + return Err(ChainstateError::InvalidStacksBlock( + "No signers in the reward set".to_string(), + )); + }; - let _signatures = self - .signer_signature + let mut total_weight_signed: u32 = 0; + // `last_index` is used to prevent out-of-order signatures + let mut last_index = None; + + let total_weight = signers.iter().map(|s| s.weight).sum::(); + + // HashMap of + let signers_by_pk = signers .iter() - .map(|sig| sig.clone()) - .collect::>(); + .enumerate() + .map(|(i, signer)| (signer.signing_key, (signer.clone(), i))) + .collect::>(); + + for signature in &self.signer_signature { + let public_key = Secp256k1PublicKey::recover_to_pubkey(message.bits(), signature) + .map_err(|_| { + ChainstateError::InvalidStacksBlock(format!( + "Unable to recover public key from signature {}", + signature.to_hex() + )) + })?; + + let (signer, signer_index) = signers_by_pk + .get(public_key.to_bytes().as_slice()) + .ok_or_else(|| { + ChainstateError::InvalidStacksBlock(format!( + "Public key {} not found in the reward set", + public_key.to_hex() + )) + })?; + + // Enforce order of signatures + match last_index { + Some(index) if index >= *signer_index => { + return Err(ChainstateError::InvalidStacksBlock( + "Signatures are out of order".to_string(), + )); + } + _ => last_index = Some(*signer_index), + } + + total_weight_signed += signer.weight; + } + + // Calculate 70% of total weight as the threshold + let threshold = (total_weight as f64 * 7_f64 / 10_f64).ceil() as u32; + + if total_weight_signed < threshold { + return Err(ChainstateError::InvalidStacksBlock( + "Not enough signatures".to_string(), + )); + } return Ok(()); } @@ -1727,8 +1781,6 @@ impl NakamotoChainState { /// Does nothing if: /// * we already have the block /// Returns true if we stored the block; false if not. - /// - /// TODO: ingest the list of signer keys (instead of aggregate key) pub fn accept_block( config: &ChainstateConfig, block: NakamotoBlock, diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 533c339115..2aa9a6a19a 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -19,6 +19,7 @@ use std::collections::HashMap; use std::fs; use clarity::types::chainstate::{PoxId, SortitionId, StacksBlockId}; +use clarity::util::secp256k1::Secp256k1PrivateKey; use clarity::vm::clarity::ClarityConnection; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::StacksAddressExtensions; @@ -70,7 +71,7 @@ use crate::chainstate::nakamoto::{ FIRST_STACKS_BLOCK_ID, }; use crate::chainstate::stacks::boot::{ - MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, + NakamotoSignerEntry, RewardSet, MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; use crate::chainstate::stacks::db::{ ChainStateBootData, ChainstateAccountBalance, ChainstateAccountLockup, ChainstateBNSName, @@ -2839,3 +2840,304 @@ fn filter_one_transaction_per_signer_duplicate_nonces() { assert_eq!(filtered_txs.len(), 1); assert!(filtered_txs.contains(&txs.first().expect("failed to get first tx"))); } + +#[cfg(test)] +pub mod nakamoto_block_signatures { + use super::*; + + /// Helper function make a reward set with (PrivateKey, weight) tuples + fn make_reward_set(signers: Vec<(Secp256k1PrivateKey, u32)>) -> RewardSet { + let mut reward_set = RewardSet::empty(); + reward_set.signers = Some( + signers + .iter() + .map(|(s, w)| { + let mut signing_key = [0u8; 33]; + signing_key.copy_from_slice( + &Secp256k1PublicKey::from_private(s) + .to_bytes_compressed() + .as_slice(), + ); + NakamotoSignerEntry { + signing_key, + stacked_amt: 100_u128, + weight: *w, + } + }) + .collect::>(), + ); + reward_set + } + + #[test] + /// Base success case - 3 signers of equal weight, all signing the block + pub fn test_nakamoto_block_verify_signatures() { + let signers = vec![ + Secp256k1PrivateKey::default(), + Secp256k1PrivateKey::default(), + Secp256k1PrivateKey::default(), + ]; + + let reward_set = make_reward_set(signers.iter().map(|s| (s.clone(), 100)).collect()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block sighash for each signer + + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .map(|s| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + header + .verify_signer_signatures(&reward_set) + .expect("Failed to verify signatures"); + // assert!(&header.verify_signer_signatures(&reward_set).is_ok()); + } + + #[test] + /// Fully signed block, but not in order + fn test_out_of_order_signer_signatures() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block for each signer, but in reverse + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .rev() + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected out of order signatures to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + assert!(msg.contains("out of order")); + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + // Test with 3 equal signers, and only two sign + fn test_insufficient_signatures() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block with just the first two signers + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .take(2) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected insufficient signatures to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + assert!(msg.contains("Not enough signatures")); + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + // Test with 4 signers, but one has 75% weight. Only the whale signs + // and the block is valid + fn test_single_signature_threshold() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 75), + (Secp256k1PrivateKey::default(), 10), + (Secp256k1PrivateKey::default(), 5), + (Secp256k1PrivateKey::default(), 10), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block with just the whale + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .take(1) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + header + .verify_signer_signatures(&reward_set) + .expect("Failed to verify signatures"); + } + + #[test] + // Test with a signature that didn't come from the signer set + fn test_invalid_signer() { + let signers = vec![(Secp256k1PrivateKey::default(), 100)]; + + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + let message = header.signer_signature_hash().0; + + // Sign with all signers + let mut signer_signature = signers + .iter() + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + let invalid_signature = Secp256k1PrivateKey::default() + .sign(&message) + .expect("Failed to sign block sighash"); + + signer_signature.push(invalid_signature); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected invalid signature to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + assert!(msg.contains("not found in the reward set")); + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + fn test_duplicate_signatures() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + let message = header.signer_signature_hash().0; + + // First, sign with the first 2 signers + let mut signer_signature = signers + .iter() + .take(2) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + // Sign again with the first signer + let duplicate_signature = signers[0] + .0 + .sign(&message) + .expect("Failed to sign block sighash"); + + signer_signature.push(duplicate_signature); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected duplicate signature to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + assert!(msg.contains("Signatures are out of order")); + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + // Test where a signature used a different message + fn test_signature_invalid_message() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + ]; + + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + let message = header.signer_signature_hash().0; + + let mut signer_signature = signers + .iter() + .take(3) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + // With the 4th signer, use a junk message + let message = [0u8; 32]; + + let bad_signature = signers[3] + .0 + .sign(&message) + .expect("Failed to sign block sighash"); + + signer_signature.push(bad_signature); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected invalid message to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => {} + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + // Test where a signature is not recoverable + fn test_unrecoverable_signature() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + ]; + + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + let message = header.signer_signature_hash().0; + + let mut signer_signature = signers + .iter() + .take(3) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + // Now append an unrecoverable signature + signer_signature.push(MessageSignature::empty()); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected invalid message to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + if !msg.contains("Unable to recover public key") { + panic!("Unexpected error msg: {}", msg); + } + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } +} From 80e6853a3164b9e9ca26c2b303b508e61016d424 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 15 May 2024 12:12:54 -0700 Subject: [PATCH 015/148] feat: refactor test_signers for new signer_signature type --- .../chainstate/nakamoto/coordinator/tests.rs | 18 +- stackslib/src/chainstate/nakamoto/mod.rs | 23 ++- .../src/chainstate/nakamoto/test_signers.rs | 174 +++++++++++++++++- .../src/chainstate/nakamoto/tests/mod.rs | 43 +++-- .../src/chainstate/nakamoto/tests/node.rs | 24 ++- .../src/chainstate/stacks/boot/pox_4_tests.rs | 4 +- .../chainstate/stacks/boot/signers_tests.rs | 6 +- .../stacks/boot/signers_voting_tests.rs | 39 ++-- stackslib/src/net/relay.rs | 3 +- stackslib/src/net/tests/download/nakamoto.rs | 6 +- stackslib/src/net/tests/mod.rs | 4 +- testnet/stacks-node/src/event_dispatcher.rs | 72 +++++++- .../src/tests/nakamoto_integrations.rs | 59 +++--- testnet/stacks-node/src/tests/signer/v1.rs | 5 +- 14 files changed, 361 insertions(+), 119 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 0f3abe5c29..14ba87292f 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -426,8 +426,7 @@ fn replay_reward_cycle( /// Mine a single Nakamoto tenure with a single Nakamoto block #[test] fn test_simple_nakamoto_coordinator_bootup() { - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![], @@ -491,8 +490,7 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { ) .unwrap(); - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], @@ -617,8 +615,7 @@ fn test_nakamoto_chainstate_getters() { &vec![StacksPublicKey::from_private(&private_key)], ) .unwrap(); - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], @@ -1126,8 +1123,7 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a ) .unwrap(); - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], @@ -1527,8 +1523,7 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> &vec![StacksPublicKey::from_private(&private_key)], ) .unwrap(); - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], @@ -1865,8 +1860,7 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe &vec![StacksPublicKey::from_private(&private_key)], ) .unwrap(); - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 8752118118..79e8cc81ee 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -336,14 +336,11 @@ impl FromRow for NakamotoBlockHeader { let signer_signature: SerdeValue = row.get_unwrap("signer_signature"); let signer_signature = signer_signature .as_array() - .map(|values| { - values - .iter() - .cloned() - .map(serde_json::from_value::) - .collect::, serde_json::Error>>() - }) - .ok_or_else(|| DBError::Corruption)??; + .ok_or(DBError::Corruption)? + .iter() + .cloned() + .map(serde_json::from_value::) + .collect::, _>>()?; Ok(NakamotoBlockHeader { version, @@ -1852,9 +1849,17 @@ impl NakamotoChainState { // TODO: epoch gate to verify signatures vec if let Err(e) = block.header.verify_signer_signatures(&reward_set) { + let reward_set_keys = reward_set + .clone() + .signers + .unwrap() + .iter() + .map(|s| to_hex(&s.signing_key)) + .collect::>(); warn!("Received block, but the signer signatures are invalid"; "block_id" => %block.block_id(), - "error" => ?e + "error" => ?e, + "signer_keys" => ?reward_set_keys ); let msg = format!("Received block, but the signer signatures are invalid"); return Err(ChainstateError::InvalidStacksBlock(msg)); diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 7179664fac..f5c61b2f7d 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -19,7 +19,8 @@ use std::collections::{HashSet, VecDeque}; use std::path::{Path, PathBuf}; use std::{fs, io}; -use clarity::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; +use clarity::util::hash::MerkleHashFunc; +use clarity::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use clarity::vm::clarity::ClarityConnection; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::types::*; @@ -36,6 +37,7 @@ use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; use wsts::curve::point::Point; use wsts::traits::Aggregator; +use self::boot::RewardSet; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; use crate::burnchains::*; use crate::chainstate::burn::db::sortdb::*; @@ -138,28 +140,180 @@ impl Default for TestSigners { } impl TestSigners { - // TODO: sign using vec of signatures + /// Generate TestSigners using a list of signer keys + pub fn new(signer_keys: Vec) -> Self { + TestSigners::default_with_signers(signer_keys) + } + + /// Internal function to generate aggregate key information + fn default_with_signers(signer_keys: Vec) -> Self { + let mut rng = rand_core::OsRng::default(); + let num_keys = 10; + let threshold = 7; + let party_key_ids: Vec> = + vec![vec![1, 2, 3], vec![4, 5], vec![6, 7, 8], vec![9, 10]]; + let num_parties = party_key_ids.len().try_into().unwrap(); + + // Create the parties + let mut signer_parties: Vec = party_key_ids + .iter() + .enumerate() + .map(|(pid, pkids)| { + wsts::v2::Party::new( + pid.try_into().unwrap(), + pkids, + num_parties, + num_keys, + threshold, + &mut rng, + ) + }) + .collect(); + + // Generate an aggregate public key + let poly_commitments = match wsts::v2::test_helpers::dkg(&mut signer_parties, &mut rng) { + Ok(poly_commitments) => poly_commitments, + Err(secret_errors) => { + panic!("Got secret errors from DKG: {:?}", secret_errors); + } + }; + let mut sig_aggregator = wsts::v2::Aggregator::new(num_keys, threshold); + sig_aggregator + .init(&poly_commitments) + .expect("aggregator init failed"); + let aggregate_public_key = sig_aggregator.poly[0]; + Self { + signer_parties, + aggregate_public_key, + poly_commitments, + num_keys, + threshold, + party_key_ids, + cycle: 0, + signer_keys, + } + } + + /// Sign a Nakamoto block using [`Self::signer_keys`]. + /// + /// N.B. If any of [`Self::signer_keys`] are not in the reward set, the resulting + /// signatures will be invalid. Use [`Self::sign_block_with_reward_set()`] to ensure + /// that any signer keys not in the reward set are not included. pub fn sign_nakamoto_block(&mut self, block: &mut NakamotoBlock, cycle: u64) { // Update the aggregate public key if the cycle has changed if self.cycle != cycle { self.generate_aggregate_key(cycle); } - let msg = block.header.signer_signature_hash().0; - let signer_signature = self - .signer_keys - .iter() - .map(|key| key.sign(&msg).unwrap()) - .collect::>(); + + // TODO: epoch gate for aggregated signatures + // let signer_signature = self.sign_block_with_aggregate_key(&block); + + let signer_signature = self.generate_block_signatures(&block); test_debug!( - "Signed Nakamoto block {} with {} (rc {})", + "Signed Nakamoto block {} with {} signatures (rc {})", block.block_id(), - &self.aggregate_public_key, + signer_signature.len(), cycle ); block.header.signer_signature = signer_signature; } + /// Sign a NakamotoBlock and maintain the order + /// of the reward set signers in the resulting signatures. + /// + /// If any of [`Self::signer_keys`] are not in the reward set, their signatures + /// will be ignored. + pub fn sign_block_with_reward_set(&self, block: &mut NakamotoBlock, reward_set: &RewardSet) { + let signatures = self.generate_block_signatures(block); + let reordered_signatures = self.reorder_signatures(signatures, reward_set); + block.header.signer_signature = reordered_signatures; + } + + /// Sign a Nakamoto block and generate a vec of signatures + fn generate_block_signatures(&self, block: &NakamotoBlock) -> Vec { + let msg = block.header.signer_signature_hash().0; + self.signer_keys + .iter() + .map(|key| key.sign(&msg).unwrap()) + .collect::>() + } + + fn sign_block_with_aggregate_key(&mut self, block: &NakamotoBlock) -> ThresholdSignature { + let mut rng = rand_core::OsRng::default(); + let msg = block.header.signer_signature_hash().0; + let (nonces, sig_shares, key_ids) = + wsts::v2::test_helpers::sign(msg.as_slice(), &mut self.signer_parties, &mut rng); + + let mut sig_aggregator = wsts::v2::Aggregator::new(self.num_keys, self.threshold); + sig_aggregator + .init(&self.poly_commitments) + .expect("aggregator init failed"); + let signature = sig_aggregator + .sign(msg.as_slice(), &nonces, &sig_shares, &key_ids) + .expect("aggregator sig failed"); + ThresholdSignature(signature) + } + + /// Reorder a list of signatures to match the order of the reward set. + pub fn reorder_signatures( + &self, + signatures: Vec, + reward_set: &RewardSet, + ) -> Vec { + let test_signer_keys = &self + .signer_keys + .iter() + .cloned() + .map(|key| Secp256k1PublicKey::from_private(&key).to_bytes_compressed()) + .collect::>(); + + let reward_set_keys = &reward_set + .clone() + .signers + .unwrap() + .iter() + .map(|s| s.signing_key.to_vec()) + .collect::>(); + + let signature_keys_map = test_signer_keys + .iter() + .cloned() + .zip(signatures.iter().cloned()) + .collect::>(); + + let mut reordered_signatures = Vec::with_capacity(reward_set_keys.len()); + + let mut missing_keys = 0; + + for key in reward_set_keys { + if let Some(signature) = signature_keys_map.get(key) { + reordered_signatures.push(signature.clone()); + } else { + missing_keys += 1; + } + } + if missing_keys > 0 { + warn!( + "TestSigners: {} keys are in the reward set but not in signer_keys", + missing_keys + ); + } + + reordered_signatures + } + + // Sort [`Self::signer_keys`] by their compressed public key + pub fn sorted_signer_keys(&self) -> Vec { + let mut keys = self.signer_keys.clone(); + keys.sort_by(|a, b| { + let a = Secp256k1PublicKey::from_private(a).to_bytes_compressed(); + let b = Secp256k1PublicKey::from_private(b).to_bytes_compressed(); + a.cmp(&b) + }); + keys + } + // Generate and assign a new aggregate public key pub fn generate_aggregate_key(&mut self, cycle: u64) -> Point { // If the key is already generated for this cycle, return it diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 2aa9a6a19a..99f68fadf6 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -155,7 +155,7 @@ fn codec_nakamoto_header() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![MessageSignature::from_bytes(&[0x01; 65]).unwrap()], signer_bitvec: BitVec::zeros(8).unwrap(), }; @@ -179,12 +179,13 @@ fn codec_nakamoto_header() { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, // stacker signature (mocked) - 0x02, 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb, 0xac, 0x55, 0xa0, 0x62, 0x95, 0xce, 0x87, - 0x0b, 0x07, 0x02, 0x9b, 0xfc, 0xdb, 0x2d, 0xce, 0x28, 0xd9, 0x59, 0xf2, 0x81, 0x5b, 0x16, - 0xf8, 0x17, 0x98, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, // signatures length + 0x00, 0x00, 0x00, 0x01, // stacker signature (mocked) + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, ]; let signer_bitvec_serialization = "00080000000100"; @@ -753,6 +754,11 @@ pub fn test_load_store_update_nakamoto_blocks() { MerkleTree::::new(&txid_vecs).root() }; + let header_signatures = vec![ + MessageSignature::from_bytes(&[0x01; 65]).unwrap(), + MessageSignature::from_bytes(&[0x02; 65]).unwrap(), + ]; + let nakamoto_header = NakamotoBlockHeader { version: 1, chain_length: 457, @@ -762,7 +768,7 @@ pub fn test_load_store_update_nakamoto_blocks() { tx_merkle_root: nakamoto_tx_merkle_root, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: header_signatures.clone(), signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -1037,13 +1043,16 @@ pub fn test_load_store_update_nakamoto_blocks() { // can load Nakamoto block, but only the Nakamoto block let nakamoto_blocks_db = chainstate.nakamoto_blocks_db(); + let first_nakamoto_block = nakamoto_blocks_db + .get_nakamoto_block(&nakamoto_header.block_id()) + .unwrap() + .unwrap() + .0; + assert_eq!(first_nakamoto_block, nakamoto_block,); + // Double check that the signatures match assert_eq!( - nakamoto_blocks_db - .get_nakamoto_block(&nakamoto_header.block_id()) - .unwrap() - .unwrap() - .0, - nakamoto_block + first_nakamoto_block.header.signer_signature, + header_signatures ); assert_eq!( nakamoto_blocks_db @@ -1675,8 +1684,7 @@ fn make_fork_run_with_arrivals( /// Tests that getting the highest nakamoto tenure works in the presence of forks #[test] pub fn test_get_highest_nakamoto_tenure() { - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![], @@ -1824,8 +1832,7 @@ pub fn test_get_highest_nakamoto_tenure() { /// to have slot i in subsequent sortitions. #[test] fn test_make_miners_stackerdb_config() { - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![], diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index b2b275a0e1..354bacb7af 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -95,17 +95,21 @@ impl TestStacker { /// make a set of stackers who will share a single signing key and stack with /// `Self::DEFAULT_STACKER_AMOUNT` - pub fn common_signing_set(test_signers: &TestSigners) -> Vec { - let mut signing_key_seed = test_signers.num_keys.to_be_bytes().to_vec(); + pub fn common_signing_set() -> (TestSigners, Vec) { + let num_keys: u32 = 10; + let mut signing_key_seed = num_keys.to_be_bytes().to_vec(); signing_key_seed.extend_from_slice(&[1, 1, 1, 1]); let signing_key = StacksPrivateKey::from_seed(signing_key_seed.as_slice()); - (0..test_signers.num_keys) + let stackers = (0..num_keys) .map(|index| TestStacker { signer_private_key: signing_key.clone(), stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), amount: Self::DEFAULT_STACKER_AMOUNT, }) - .collect() + .collect::>(); + + let test_signers = TestSigners::new(vec![signing_key]); + (test_signers, stackers) } } @@ -574,13 +578,23 @@ impl TestStacksNode { .block_height_to_reward_cycle(sortdb.first_block_height, tenure_sn.block_height) .unwrap(); + // Get the reward set + let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let reward_set = sortdb + .get_preprocessed_reward_set_of(&sort_tip) + .expect("Failed to get reward cycle info") + .expect("Failed to get reward cycle info") + .known_selected_anchor_block_owned() + .expect("Expected a reward set"); + test_debug!( "Signing Nakamoto block {} in tenure {} with key in cycle {}", nakamoto_block.block_id(), tenure_id_consensus_hash, cycle ); - signers.sign_nakamoto_block(&mut nakamoto_block, cycle); + + signers.sign_block_with_reward_set(&mut nakamoto_block, &reward_set); let block_id = nakamoto_block.block_id(); debug!( diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index be7675c700..16f7312ef4 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -4254,7 +4254,7 @@ fn stack_agg_increase() { let default_initial_balances = 1_000_000_000_000_000_000; let observer = TestEventObserver::new(); - let test_signers = TestSigners::default(); + let test_signers = TestSigners::new(vec![]); let mut initial_balances = vec![ (alice.principal.clone(), default_initial_balances), (bob.principal.clone(), default_initial_balances), @@ -6464,7 +6464,7 @@ pub fn pox_4_scenario_test_setup<'a>( TestPeerConfig, ) { // Setup code extracted from your original test - let test_signers = TestSigners::default(); + let test_signers = TestSigners::new(vec![]); let aggregate_public_key = test_signers.aggregate_public_key.clone(); let mut peer_config = TestPeerConfig::new(function_name!(), 0, 0); let private_key = peer_config.private_key.clone(); diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index a97a0c1e09..0cd1df3577 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -349,7 +349,11 @@ pub fn prepare_signers_test<'a>( stackers: &[TestStacker], observer: Option<&'a TestEventObserver>, ) -> (TestPeer<'a>, TestSigners, StacksBlockId, u128) { - let mut test_signers = TestSigners::default(); + let signer_keys = stackers + .iter() + .map(|s| s.signer_private_key.clone()) + .collect::>(); + let mut test_signers = TestSigners::new(signer_keys); let mut peer = boot_nakamoto( test_name, diff --git a/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs index 039e96f597..9a97e88b5e 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs @@ -178,8 +178,7 @@ fn vote_for_aggregate_public_key_success() { let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - let mut signers = TestSigners::default(); - let aggregate_key = signers.generate_aggregate_key(cycle_id as u64 + 1); + let aggregate_key = test_signers.generate_aggregate_key(cycle_id as u64 + 1); let aggregate_public_key = Value::buff_from(aggregate_key.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -334,8 +333,7 @@ fn vote_for_aggregate_public_key_with_errors() { let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - let mut signers = TestSigners::default(); - let aggregate_key = signers.generate_aggregate_key(cycle_id as u64 + 1); + let aggregate_key = test_signers.generate_aggregate_key(cycle_id as u64 + 1); let aggregate_public_key = Value::buff_from(aggregate_key.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -621,8 +619,7 @@ fn vote_for_aggregate_public_key_out_of_window() { let stacker3_index = get_signer_index(&mut peer, latest_block_id, stacker3_address, cycle_id); let stacker4_index = get_signer_index(&mut peer, latest_block_id, stacker4_address, cycle_id); - let mut signers = TestSigners::default(); - let aggregate_key = signers.generate_aggregate_key(cycle_id as u64 + 1); + let aggregate_key = test_signers.generate_aggregate_key(cycle_id as u64 + 1); let aggregate_public_key = Value::buff_from(aggregate_key.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -825,8 +822,7 @@ fn vote_for_aggregate_public_key_in_first_block() { let signer_index = get_signer_index(&mut peer, latest_block_id, signer_address, cycle_id); - let mut signers = TestSigners::default(); - let aggregate_public_key = signers.generate_aggregate_key(cycle_id as u64 + 1); + let aggregate_public_key = test_signers.generate_aggregate_key(cycle_id as u64 + 1); let txs = vec![ // cast a vote for the aggregate public key @@ -911,9 +907,8 @@ fn vote_for_aggregate_public_key_in_last_block() { ); let cycle_id: u128 = current_reward_cycle; - let mut signers = TestSigners::default(); - let aggregate_public_key_1 = signers.generate_aggregate_key(cycle_id as u64 + 1); - let aggregate_public_key_2 = signers.generate_aggregate_key(cycle_id as u64 + 2); + let aggregate_public_key_1 = test_signers.generate_aggregate_key(cycle_id as u64 + 1); + let aggregate_public_key_2 = test_signers.generate_aggregate_key(cycle_id as u64 + 2); // create vote txs for alice let signer_1_nonce = 1; // Start at 1 because the signer has already voted once @@ -1056,8 +1051,7 @@ fn vote_for_duplicate_aggregate_public_key() { let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - let mut signers = TestSigners::default(); - let aggregate_public_key_point = signers.generate_aggregate_key(cycle_id as u64 + 1); + let aggregate_public_key_point = test_signers.generate_aggregate_key(cycle_id as u64 + 1); let aggregate_public_key = Value::buff_from(aggregate_public_key_point.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -1105,7 +1099,7 @@ fn vote_for_duplicate_aggregate_public_key() { let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let aggregate_public_key_point = signers.generate_aggregate_key(cycle_id as u64 + 2); + let aggregate_public_key_point = test_signers.generate_aggregate_key(cycle_id as u64 + 2); let aggregate_public_key_2 = Value::buff_from(aggregate_public_key_point.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -1206,12 +1200,11 @@ fn vote_for_aggregate_public_key_two_rounds() { let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - let mut signers = TestSigners::default(); - let aggregate_public_key_0_point = signers.generate_aggregate_key(0); + let aggregate_public_key_0_point = test_signers.generate_aggregate_key(0); let aggregate_public_key_0 = Value::buff_from(aggregate_public_key_0_point.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); - let aggregate_public_key_1_point = signers.generate_aggregate_key(cycle_id as u64 + 1); + let aggregate_public_key_1_point = test_signers.generate_aggregate_key(cycle_id as u64 + 1); let aggregate_public_key_1 = Value::buff_from(aggregate_public_key_1_point.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -1402,11 +1395,11 @@ fn vote_for_aggregate_public_key_two_rounds() { // In this cycle, the two rounds are in separate tenures. - let aggregate_public_key_0_point = signers.generate_aggregate_key(1); + let aggregate_public_key_0_point = test_signers.generate_aggregate_key(1); let aggregate_public_key_0 = Value::buff_from(aggregate_public_key_0_point.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); - let aggregate_public_key_1_point = signers.generate_aggregate_key(cycle_id as u64 + 2); + let aggregate_public_key_1_point = test_signers.generate_aggregate_key(cycle_id as u64 + 2); let aggregate_public_key_1 = Value::buff_from(aggregate_public_key_1_point.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -1643,8 +1636,7 @@ fn vote_for_aggregate_public_key_early() { let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - let mut signers = TestSigners::default(); - let aggregate_key = signers.generate_aggregate_key(cycle_id as u64 + 1); + let aggregate_key = test_signers.generate_aggregate_key(cycle_id as u64 + 1); let aggregate_public_key = Value::buff_from(aggregate_key.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -1688,7 +1680,7 @@ fn vote_for_aggregate_public_key_early() { let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); // In this tenure, signers have not been set yet, so the vote should fail - let aggregate_public_key_point = signers.generate_aggregate_key(cycle_id as u64 + 2); + let aggregate_public_key_point = test_signers.generate_aggregate_key(cycle_id as u64 + 2); let aggregate_public_key = Value::buff_from(aggregate_public_key_point.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -1872,8 +1864,7 @@ fn vote_for_aggregate_public_key_mixed_rounds() { let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - let mut signers = TestSigners::default(); - let aggregate_public_key_point = signers.generate_aggregate_key(0); + let aggregate_public_key_point = test_signers.generate_aggregate_key(0); let aggregate_public_key = Value::buff_from(aggregate_public_key_point.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index a4506e67e1..33fa3ebc12 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -738,11 +738,12 @@ impl Relayer { // }; // TODO: epoch gate to use signatures vec - let tip = sort_handle.tip(); + let tip = block_sn.sortition_id; let reward_info = match sortdb.get_preprocessed_reward_set_of(&tip) { Ok(Some(x)) => x, Ok(None) => { + error!("No RewardCycleInfo found for tip {}", tip); return Err(chainstate_error::PoxNoRewardCycle); } Err(e) => { diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 8528ac8f4c..02aafdfa1b 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -90,7 +90,7 @@ impl NakamotoDownloadStateMachine { fn test_nakamoto_tenure_downloader() { let ch = ConsensusHash([0x11; 20]); let private_key = StacksPrivateKey::new(); - let mut test_signers = TestSigners::default(); + let mut test_signers = TestSigners::new(vec![]); let aggregate_public_key = test_signers.aggregate_public_key.clone(); @@ -352,7 +352,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!(tip.block_height, 51); - let test_signers = TestSigners::default(); + let test_signers = TestSigners::new(vec![]); let naddr = NeighborAddress { addrbytes: PeerAddress([0xff; 16]), @@ -1090,7 +1090,7 @@ fn test_make_tenure_downloaders() { assert_eq!(tip.block_height, 51); - let test_signers = TestSigners::default(); + let test_signers = TestSigners::new(vec![]); let agg_pubkeys = peer.network.aggregate_public_keys.clone(); // test load_wanted_tenures() diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 82e1b8b814..a9534b6d29 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -90,13 +90,13 @@ pub struct NakamotoBootPlan { impl NakamotoBootPlan { pub fn new(test_name: &str) -> Self { - let test_signers = TestSigners::default(); + let (test_signers, test_stackers) = TestStacker::common_signing_set(); Self { test_name: test_name.to_string(), pox_constants: TestPeerConfig::default().burnchain.pox_constants, private_key: StacksPrivateKey::from_seed(&[2]), initial_balances: vec![], - test_stackers: TestStacker::common_signing_set(&test_signers), + test_stackers, test_signers, observer: Some(TestEventObserver::new()), num_peers: 0, diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 0e799ceec4..43258fc048 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -648,10 +648,6 @@ impl EventObserver { "signer_signature_hash".into(), format!("0x{}", header.signer_signature_hash()).into(), ); - as_object_mut.insert( - "signer_signature".into(), - format!("0x{}", header.signer_signature_hash()).into(), - ); as_object_mut.insert( "miner_signature".into(), format!("0x{}", &header.miner_signature).into(), @@ -1435,8 +1431,12 @@ impl EventDispatcher { mod test { use clarity::vm::costs::ExecutionCost; use stacks::burnchains::{PoxConstants, Txid}; - use stacks::chainstate::stacks::db::StacksHeaderInfo; + use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; + use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksHeaderInfo}; + use stacks::chainstate::stacks::events::StacksBlockEventData; use stacks::chainstate::stacks::StacksBlock; + use stacks::types::chainstate::BlockHeaderHash; + use stacks::util::secp256k1::MessageSignature; use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksBlockId}; @@ -1499,4 +1499,66 @@ mod test { expected_bitvec_str ); } + + #[test] + fn test_block_processed_event_nakamoto() { + let observer = EventObserver { + endpoint: "nowhere".to_string(), + }; + + let filtered_events = vec![]; + let mut block_header = NakamotoBlockHeader::empty(); + let signer_signature = vec![ + MessageSignature::from_bytes(&[0; 65]).unwrap(), + MessageSignature::from_bytes(&[1; 65]).unwrap(), + ]; + block_header.signer_signature = signer_signature.clone(); + let block = NakamotoBlock { + header: block_header.clone(), + txs: vec![], + }; + let mut metadata = StacksHeaderInfo::regtest_genesis(); + metadata.anchored_header = StacksBlockHeaderTypes::Nakamoto(block_header.clone()); + let receipts = vec![]; + let parent_index_hash = StacksBlockId([0; 32]); + let winner_txid = Txid([0; 32]); + let mature_rewards = serde_json::Value::Array(vec![]); + let parent_burn_block_hash = BurnchainHeaderHash([0; 32]); + let parent_burn_block_height = 0; + let parent_burn_block_timestamp = 0; + let anchored_consumed = ExecutionCost::zero(); + let mblock_confirmed_consumed = ExecutionCost::zero(); + let pox_constants = PoxConstants::testnet_default(); + let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); + + let payload = observer.make_new_block_processed_payload( + filtered_events, + &StacksBlockEventData::from((block, BlockHeaderHash([0; 32]))), + &metadata, + &receipts, + &parent_index_hash, + &winner_txid, + &mature_rewards, + parent_burn_block_hash, + parent_burn_block_height, + parent_burn_block_timestamp, + &anchored_consumed, + &mblock_confirmed_consumed, + &pox_constants, + &None, + &Some(signer_bitvec.clone()), + ); + + let event_signer_signature = payload + .get("signer_signature") + .unwrap() + .as_array() + .expect("Expected signer_signature to be an array") + .iter() + .cloned() + .map(serde_json::from_value::) + .collect::, _>>() + .expect("Unable to deserialize array of MessageSignature"); + assert_eq!(event_signer_signature, signer_signature); + } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 6169ee524d..54dfbd28ec 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -345,9 +345,13 @@ pub fn read_and_sign_block_proposal( let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let reward_cycle = burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); + + let reward_set = sortdb + .get_preprocessed_reward_set_of(&tip.sortition_id) + .expect("Failed to get reward cycle info") + .expect("Failed to get reward cycle info") + .known_selected_anchor_block_owned() + .expect("Expected a reward set"); let mut proposed_block = get_latest_block_proposal(conf, &sortdb)?; let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); @@ -364,9 +368,7 @@ pub fn read_and_sign_block_proposal( "signer_sig_hash" => &signer_sig_hash.to_hex(), ); - signers - .clone() - .sign_nakamoto_block(&mut proposed_block, reward_cycle); + signers.sign_block_with_reward_set(&mut proposed_block, &reward_set); channel .send(proposed_block.header.signer_signature) @@ -562,7 +564,7 @@ pub fn boot_to_epoch_3( blocks_processed: &Arc, stacker_sks: &[StacksPrivateKey], signer_sks: &[StacksPrivateKey], - self_signing: Option<&TestSigners>, + self_signing: &mut Option<&mut TestSigners>, btc_regtest_controller: &mut BitcoinRegtestController, ) { assert_eq!(stacker_sks.len(), signer_sks.len()); @@ -632,6 +634,11 @@ pub fn boot_to_epoch_3( submit_tx(&http_origin, &stacking_tx); } + // Update TestSigner with `signer_sks` if self-signing + if let Some(ref mut signers) = self_signing { + signers.signer_keys = signer_sks.to_vec(); + } + let prepare_phase_start = btc_regtest_controller .get_burnchain() .pox_constants @@ -982,7 +989,6 @@ fn simple_neon_integration() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let prom_bind = format!("{}:{}", "127.0.0.1", 6000); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); @@ -998,6 +1004,7 @@ fn simple_neon_integration() { ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); naka_conf.add_initial_balance( PrincipalData::from(sender_signer_addr.clone()).to_string(), 100000, @@ -1038,7 +1045,7 @@ fn simple_neon_integration() { &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -1221,7 +1228,6 @@ fn mine_multiple_per_tenure_integration() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); @@ -1277,12 +1283,13 @@ fn mine_multiple_per_tenure_integration() { .spawn(move || run_loop.start(None, 0)) .unwrap(); wait_for_runloop(&blocks_processed); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); boot_to_epoch_3( &naka_conf, &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -1404,7 +1411,6 @@ fn correct_burn_outs() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.burnchain.pox_reward_length = Some(10); naka_conf.burnchain.pox_prepare_length = Some(3); @@ -1441,6 +1447,8 @@ fn correct_burn_outs() { 100000, ); + let signers = TestSigners::new(vec![sender_signer_sk]); + test_observer::spawn(); let observer_port = test_observer::EVENT_OBSERVER_PORT; naka_conf.events_observers.insert(EventObserverConfig { @@ -1769,7 +1777,6 @@ fn block_proposal_api_endpoint() { return; } - let signers = TestSigners::default(); let (mut conf, _miner_account) = naka_neon_integration_conf(None); let password = "12345".to_string(); conf.connection_options.block_proposal_token = Some(password.clone()); @@ -1810,13 +1817,14 @@ fn block_proposal_api_endpoint() { let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); wait_for_runloop(&blocks_processed); boot_to_epoch_3( &conf, &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -2131,7 +2139,6 @@ fn miner_writes_proposed_block_to_stackerdb() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); let sender_sk = Secp256k1PrivateKey::new(); @@ -2152,6 +2159,8 @@ fn miner_writes_proposed_block_to_stackerdb() { 100000, ); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + test_observer::spawn(); let observer_port = test_observer::EVENT_OBSERVER_PORT; naka_conf.events_observers.insert(EventObserverConfig { @@ -2185,7 +2194,7 @@ fn miner_writes_proposed_block_to_stackerdb() { &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -2267,13 +2276,14 @@ fn vote_for_aggregate_key_burn_op() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let _http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let signer_sk = Secp256k1PrivateKey::new(); let signer_addr = tests::to_addr(&signer_sk); + let mut signers = TestSigners::new(vec![signer_sk.clone()]); + naka_conf.add_initial_balance(PrincipalData::from(signer_addr.clone()).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); @@ -2313,7 +2323,7 @@ fn vote_for_aggregate_key_burn_op() { &blocks_processed, &[stacker_sk], &[signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -2512,13 +2522,13 @@ fn follower_bootup() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); let tenure_count = 5; let inter_blocks_per_tenure = 9; // setup sender + recipient for some test stx transfers @@ -2573,7 +2583,7 @@ fn follower_bootup() { &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -2768,7 +2778,6 @@ fn stack_stx_burn_op_integration_test() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.burnchain.satoshis_per_byte = 2; naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); @@ -2779,6 +2788,8 @@ fn stack_stx_burn_op_integration_test() { let signer_sk_2 = Secp256k1PrivateKey::new(); let signer_addr_2 = tests::to_addr(&signer_sk_2); + let mut signers = TestSigners::new(vec![signer_sk_1.clone()]); + let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); @@ -2819,7 +2830,7 @@ fn stack_stx_burn_op_integration_test() { &blocks_processed, &[stacker_sk], &[signer_sk_1], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -3203,7 +3214,6 @@ fn forked_tenure_is_ignored() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(10); let sender_sk = Secp256k1PrivateKey::new(); @@ -3217,6 +3227,7 @@ fn forked_tenure_is_ignored() { ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); naka_conf.add_initial_balance( PrincipalData::from(sender_signer_addr.clone()).to_string(), @@ -3259,7 +3270,7 @@ fn forked_tenure_is_ignored() { &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index b8d20ea76d..615df08d24 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -30,9 +30,8 @@ use stacks::chainstate::stacks::boot::{SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOT use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::miner::TransactionEvent; use stacks::chainstate::stacks::{ - StacksPrivateKey, StacksTransaction, ThresholdSignature, TransactionAnchorMode, - TransactionAuth, TransactionPayload, TransactionPostConditionMode, TransactionSmartContract, - TransactionVersion, + StacksPrivateKey, StacksTransaction, TransactionAnchorMode, TransactionAuth, + TransactionPayload, TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, }; use stacks::util_lib::strings::StacksString; use stacks_common::bitvec::BitVec; From 57c0db2e07acd4a62f72e5e8236d7fdc4abaa23e Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 15 May 2024 13:09:26 -0700 Subject: [PATCH 016/148] feat: update sign_coordinator and blind_signer for new message types --- .../stacks-node/src/nakamoto_node/miner.rs | 52 +++++++- .../src/nakamoto_node/sign_coordinator.rs | 113 ++++++++++++++++-- .../src/tests/nakamoto_integrations.rs | 38 ++++-- 3 files changed, 174 insertions(+), 29 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 6ee63ef0fe..42009d5380 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -321,7 +321,7 @@ impl BlockMinerThread { })?; *attempts += 1; - let signature = coordinator.begin_sign( + let signature = coordinator.begin_sign_v1( new_block, burn_block_height, *attempts, @@ -339,10 +339,15 @@ impl BlockMinerThread { fn gather_signatures( &mut self, new_block: &mut NakamotoBlock, - _burn_block_height: u64, - _stackerdbs: &mut StackerDBs, - _attempts: &mut u64, + burn_block_height: u64, + stackerdbs: &mut StackerDBs, + attempts: &mut u64, ) -> Result<(RewardSet, Vec), NakamotoNodeError> { + let Some(miner_privkey) = self.config.miner.mining_key else { + return Err(NakamotoNodeError::MinerConfigurationFailed( + "No mining key configured, cannot mine", + )); + }; let sort_db = SortitionDB::open( &self.config.get_burn_db_file_path(), true, @@ -356,6 +361,15 @@ impl BlockMinerThread { .expect("FATAL: could not retrieve chain tip") .expect("FATAL: could not retrieve chain tip"); + let reward_cycle = self + .burnchain + .pox_constants + .block_height_to_reward_cycle( + self.burnchain.first_block_height, + self.burn_block.block_height, + ) + .expect("FATAL: building on a burn block that is before the first burn block"); + let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { Ok(Some(x)) => x, Ok(None) => { @@ -376,8 +390,34 @@ impl BlockMinerThread { )); }; - // TODO: collect signatures from signers - return Ok((reward_set, vec![])); + let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); + let mut coordinator = SignCoordinator::new( + &reward_set, + reward_cycle, + miner_privkey_as_scalar, + Point::new(), + &stackerdbs, + &self.config, + ) + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to initialize the signing coordinator. Cannot mine! {e:?}" + )) + })?; + + *attempts += 1; + let signature = coordinator.begin_sign_v0( + new_block, + burn_block_height, + *attempts, + &tip, + &self.burnchain, + &sort_db, + &stackerdbs, + &self.globals.counters, + )?; + + return Ok((reward_set, signature)); } fn get_stackerdb_contract_and_slots( diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 764ae60c3c..49204166ab 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -17,7 +17,8 @@ use std::sync::mpsc::Receiver; use std::time::{Duration, Instant}; use hashbrown::{HashMap, HashSet}; -use libsigner::v1::messages::{MessageSlotID, SignerMessage}; +use libsigner::v0::messages::SignerMessage as SignerMessageV0; +use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -28,6 +29,7 @@ use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::{Error as ChainstateError, ThresholdSignature}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::stackerdb::StackerDBs; +use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; @@ -140,10 +142,10 @@ fn get_signer_commitments( ); continue; }; - let Ok(SignerMessage::DkgResults { + let Ok(SignerMessageV1::DkgResults { aggregate_key, party_polynomials, - }) = SignerMessage::consensus_deserialize(&mut signer_data.as_slice()) + }) = SignerMessageV1::consensus_deserialize(&mut signer_data.as_slice()) else { warn!( "Failed to parse DKG result, will look for results from other signers."; @@ -314,12 +316,12 @@ impl SignCoordinator { .expect("FATAL: tried to initialize WSTS coordinator before first burn block height") } - fn send_signers_message( + fn send_signers_message( message_key: &Scalar, sortdb: &SortitionDB, tip: &BlockSnapshot, stackerdbs: &StackerDBs, - message: SignerMessage, + message: M, is_mainnet: bool, miners_session: &mut StackerDBSession, ) -> Result<(), String> { @@ -363,7 +365,7 @@ impl SignCoordinator { } #[cfg_attr(test, mutants::skip)] - pub fn begin_sign( + pub fn begin_sign_v1( &mut self, block: &NakamotoBlock, burn_block_height: u64, @@ -397,7 +399,7 @@ impl SignCoordinator { "Failed to start signing round in FIRE coordinator: {e:?}" )) })?; - Self::send_signers_message( + Self::send_signers_message::( &self.message_key, sortdb, burn_tip, @@ -483,11 +485,11 @@ impl SignCoordinator { let packets: Vec<_> = messages .into_iter() .filter_map(|msg| match msg { - SignerMessage::DkgResults { .. } - | SignerMessage::BlockResponse(_) - | SignerMessage::EncryptedSignerState(_) - | SignerMessage::Transactions(_) => None, - SignerMessage::Packet(packet) => { + SignerMessageV1::DkgResults { .. } + | SignerMessageV1::BlockResponse(_) + | SignerMessageV1::EncryptedSignerState(_) + | SignerMessageV1::Transactions(_) => None, + SignerMessageV1::Packet(packet) => { debug!("Received signers packet: {packet:?}"); if !packet.verify(&self.wsts_public_keys, &coordinator_pk) { warn!("Failed to verify StackerDB packet: {packet:?}"); @@ -548,7 +550,7 @@ impl SignCoordinator { } } for msg in outbound_msgs { - match Self::send_signers_message( + match Self::send_signers_message::( &self.message_key, sortdb, burn_tip, @@ -573,4 +575,89 @@ impl SignCoordinator { "Timed out waiting for group signature".into(), )) } + + pub fn begin_sign_v0( + &mut self, + block: &NakamotoBlock, + burn_block_height: u64, + block_attempt: u64, + burn_tip: &BlockSnapshot, + burnchain: &Burnchain, + sortdb: &SortitionDB, + stackerdbs: &StackerDBs, + counters: &Counters, + ) -> Result, NakamotoNodeError> { + let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); + let sign_iter_id = block_attempt; + let reward_cycle_id = burnchain + .block_height_to_reward_cycle(burn_tip.block_height) + .expect("FATAL: tried to initialize coordinator before first burn block height"); + self.coordinator.current_sign_id = sign_id; + self.coordinator.current_sign_iter_id = sign_iter_id; + + let block_proposal = BlockProposal { + block: block.clone(), + burn_height: burn_block_height, + reward_cycle: reward_cycle_id, + }; + + let block_proposal_message = SignerMessageV0::BlockProposal(block_proposal); + Self::send_signers_message::( + &self.message_key, + sortdb, + burn_tip, + &stackerdbs, + block_proposal_message, + self.is_mainnet, + &mut self.miners_session, + ) + .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; + counters.bump_naka_proposed_blocks(); + #[cfg(test)] + { + // In test mode, short-circuit waiting for the signers if the TEST_SIGNING + // channel has been created. This allows integration tests for the stacks-node + // independent of the stacks-signer. + if let Some(signatures) = + crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() + { + debug!("Short-circuiting waiting for signers, using test signature"); + return Ok(signatures); + } + } + + let Some(ref mut receiver) = self.receiver else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Failed to obtain the StackerDB event receiver".into(), + )); + }; + + let start_ts = Instant::now(); + while start_ts.elapsed() <= self.signing_round_timeout { + let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { + Ok(event) => event, + Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { + continue; + } + Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "StackerDB event receiver disconnected".into(), + )) + } + }; + + let is_signer_event = + event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); + if !is_signer_event { + debug!("Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); + continue; + } + + // TODO: get messages from signers + } + + Err(NakamotoNodeError::SignerSignatureError( + "Timed out waiting for group signature".into(), + )) + } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 54dfbd28ec..66a0627b67 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -27,7 +27,8 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::ClarityVersion; use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; -use libsigner::v1::messages::SignerMessage; +use libsigner::v0::messages::SignerMessage as SignerMessageV0; +use libsigner::v1::messages::SignerMessage as SignerMessageV1; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use rand::RngCore; use stacks::burnchains::{MagicBytes, Txid}; @@ -318,24 +319,41 @@ pub fn get_latest_block_proposal( let proposed_block = { let miner_contract_id = boot_code_id(MINERS_NAME, false); let mut miners_stackerdb = StackerDBSession::new(&conf.node.rpc_bind, miner_contract_id); - let message: SignerMessage = miners_stackerdb + let message: SignerMessageV0 = miners_stackerdb .get_latest(miner_slot_id.start) .expect("Failed to get latest chunk from the miner slot ID") .expect("No chunk found"); - let SignerMessage::Packet(packet) = message else { - panic!("Expected a signer message packet. Got {message:?}"); + let SignerMessageV0::BlockProposal(block_proposal) = message else { + panic!("Expected a signer message block proposal. Got {message:?}"); }; - let Message::NonceRequest(nonce_request) = packet.msg else { - panic!("Expected a nonce request. Got {:?}", packet.msg); - }; - let block_proposal = - BlockProposal::consensus_deserialize(&mut nonce_request.message.as_slice()) - .expect("Failed to deserialize block proposal"); + // TODO: use v1 message types behind epoch gate + // get_block_proposal_msg_v1(&mut miners_stackerdb, miner_slot_id.start); block_proposal.block }; Ok(proposed_block) } +#[allow(dead_code)] +fn get_block_proposal_msg_v1( + miners_stackerdb: &mut StackerDBSession, + slot_id: u32, +) -> NakamotoBlock { + let message: SignerMessageV1 = miners_stackerdb + .get_latest(slot_id) + .expect("Failed to get latest chunk from the miner slot ID") + .expect("No chunk found"); + let SignerMessageV1::Packet(packet) = message else { + panic!("Expected a signer message packet. Got {message:?}"); + }; + let Message::NonceRequest(nonce_request) = packet.msg else { + panic!("Expected a nonce request. Got {:?}", packet.msg); + }; + let block_proposal = + BlockProposal::consensus_deserialize(&mut nonce_request.message.as_slice()) + .expect("Failed to deserialize block proposal"); + block_proposal.block +} + pub fn read_and_sign_block_proposal( conf: &Config, signers: &TestSigners, From da6dad1d54c69622f98e426f6737401ad664a1eb Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 16 May 2024 13:28:16 -0700 Subject: [PATCH 017/148] fix: code review comments - use `vec![]` instead of `Vec::new()` or `Vec::with_capacity` - Add a constant for the signer threshold - Update threshold calculation - Fix some unsafe unwraps / math - Refactor how `TestSigner` signs a block using a reward set --- stackslib/src/chainstate/nakamoto/mod.rs | 105 +++++++++-------- .../src/chainstate/nakamoto/test_signers.rs | 70 ++++++------ .../src/chainstate/nakamoto/tests/mod.rs | 106 ++++++++++++++++-- stackslib/src/core/mod.rs | 3 + stackslib/src/net/relay.rs | 3 +- stackslib/src/net/tests/download/nakamoto.rs | 6 +- .../stacks-node/src/nakamoto_node/miner.rs | 6 +- .../src/tests/nakamoto_integrations.rs | 2 +- testnet/stacks-node/src/tests/signer/v1.rs | 4 +- 9 files changed, 199 insertions(+), 106 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 79e8cc81ee..d8e67b3bd4 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -31,7 +31,6 @@ use lazy_static::{__Deref, lazy_static}; use rusqlite::blob::Blob; use rusqlite::types::{FromSql, FromSqlError}; use rusqlite::{params, Connection, OpenFlags, OptionalExtension, ToSql, NO_PARAMS}; -use serde_json::Value as SerdeValue; use sha2::{Digest as Sha2Digest, Sha512_256}; use stacks_common::bitvec::BitVec; use stacks_common::codec::{ @@ -93,7 +92,7 @@ use crate::clarity_vm::clarity::{ ClarityInstance, ClarityTransactionConnection, Error as ClarityError, PreCommitClarityBlock, }; use crate::clarity_vm::database::SortitionDBRef; -use crate::core::BOOT_BLOCK_HASH; +use crate::core::{BOOT_BLOCK_HASH, NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD}; use crate::net::stackerdb::{StackerDBConfig, MINER_SLOT_COUNT}; use crate::net::Error as net_error; use crate::util_lib::boot; @@ -183,7 +182,7 @@ lazy_static! { -- miner's signature over the block miner_signature TEXT NOT NULL, -- signers' signatures over the block - signer_signature BLOB NOT NULL, + signer_signature TEXT NOT NULL, -- bitvec capturing stacker participation in signature signer_bitvec TEXT NOT NULL, -- The following fields are not part of either the StacksHeaderInfo struct @@ -333,14 +332,9 @@ impl FromRow for NakamotoBlockHeader { let state_index_root = row.get("state_index_root")?; let miner_signature = row.get("miner_signature")?; let signer_bitvec = row.get("signer_bitvec")?; - let signer_signature: SerdeValue = row.get_unwrap("signer_signature"); - let signer_signature = signer_signature - .as_array() - .ok_or(DBError::Corruption)? - .iter() - .cloned() - .map(serde_json::from_value::) - .collect::, _>>()?; + let signer_signature_json: String = row.get("signer_signature")?; + let signer_signature: Vec = + serde_json::from_str(&signer_signature_json).map_err(|_e| DBError::ParseError)?; Ok(NakamotoBlockHeader { version, @@ -532,16 +526,19 @@ impl NakamotoBlockHeader { // `last_index` is used to prevent out-of-order signatures let mut last_index = None; - let total_weight = signers.iter().map(|s| s.weight).sum::(); + let total_weight = signers.iter().map(|s| s.weight).fold(0, |w, acc| { + acc.checked_add(w) + .expect("FATAL: Total signer weight > u32::MAX") + }); // HashMap of let signers_by_pk = signers .iter() .enumerate() - .map(|(i, signer)| (signer.signing_key, (signer.clone(), i))) + .map(|(i, signer)| (&signer.signing_key, (signer, i))) .collect::>(); - for signature in &self.signer_signature { + for signature in self.signer_signature.iter() { let public_key = Secp256k1PublicKey::recover_to_pubkey(message.bits(), signature) .map_err(|_| { ChainstateError::InvalidStacksBlock(format!( @@ -550,40 +547,58 @@ impl NakamotoBlockHeader { )) })?; - let (signer, signer_index) = signers_by_pk - .get(public_key.to_bytes().as_slice()) - .ok_or_else(|| { - ChainstateError::InvalidStacksBlock(format!( - "Public key {} not found in the reward set", - public_key.to_hex() - )) - })?; + let mut public_key_bytes = [0u8; 33]; + public_key_bytes.copy_from_slice(&public_key.to_bytes_compressed()[..]); + + let (signer, signer_index) = signers_by_pk.get(&public_key_bytes).ok_or_else(|| { + ChainstateError::InvalidStacksBlock(format!( + "Public key {} not found in the reward set", + public_key.to_hex() + )) + })?; // Enforce order of signatures - match last_index { - Some(index) if index >= *signer_index => { + if let Some(index) = last_index.as_ref() { + if *index >= *signer_index { return Err(ChainstateError::InvalidStacksBlock( "Signatures are out of order".to_string(), )); } - _ => last_index = Some(*signer_index), + } else { + last_index = Some(*signer_index); } - total_weight_signed += signer.weight; + total_weight_signed = total_weight_signed + .checked_add(signer.weight) + .expect("FATAL: overflow while computing signer set threshold"); } - // Calculate 70% of total weight as the threshold - let threshold = (total_weight as f64 * 7_f64 / 10_f64).ceil() as u32; + let threshold = Self::compute_voting_weight_threshold(total_weight)?; if total_weight_signed < threshold { - return Err(ChainstateError::InvalidStacksBlock( - "Not enough signatures".to_string(), - )); + return Err(ChainstateError::InvalidStacksBlock(format!( + "Not enough signatures. Needed at least {} but got {}", + threshold, total_weight_signed + ))); } return Ok(()); } + pub fn compute_voting_weight_threshold(total_weight: u32) -> Result { + let ceil = if (total_weight as u64 * 7) % 10 == 0 { + 0 + } else { + 1 + }; + u32::try_from((total_weight as u64 * NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD) / 10 + ceil) + .map_err(|_| { + ChainstateError::InvalidStacksBlock( + "Overflow when computing nakamoto block approval threshold".to_string(), + ) + }) + } + /// Make an "empty" header whose block data needs to be filled in. /// This is used by the miner code. pub fn from_parent_empty( @@ -602,7 +617,7 @@ impl NakamotoBlockHeader { tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::with_capacity(SIGNERS_MAX_LIST_SIZE), + signer_signature: vec![], signer_bitvec: BitVec::ones(bitvec_len) .expect("BUG: bitvec of length-1 failed to construct"), } @@ -619,7 +634,7 @@ impl NakamotoBlockHeader { tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::with_capacity(SIGNERS_MAX_LIST_SIZE), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), } } @@ -635,7 +650,7 @@ impl NakamotoBlockHeader { tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::with_capacity(SIGNERS_MAX_LIST_SIZE), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), } } @@ -1784,7 +1799,7 @@ impl NakamotoChainState { db_handle: &mut SortitionHandleConn, staging_db_tx: &NakamotoStagingBlocksTx, headers_conn: &Connection, - _aggregate_public_key: &Point, + _aggregate_public_key: Option<&Point>, reward_set: RewardSet, ) -> Result { test_debug!("Consider Nakamoto block {}", &block.block_id()); @@ -1847,22 +1862,12 @@ impl NakamotoChainState { // return Err(ChainstateError::InvalidStacksBlock(msg)); // } - // TODO: epoch gate to verify signatures vec if let Err(e) = block.header.verify_signer_signatures(&reward_set) { - let reward_set_keys = reward_set - .clone() - .signers - .unwrap() - .iter() - .map(|s| to_hex(&s.signing_key)) - .collect::>(); warn!("Received block, but the signer signatures are invalid"; "block_id" => %block.block_id(), "error" => ?e, - "signer_keys" => ?reward_set_keys ); - let msg = format!("Received block, but the signer signatures are invalid"); - return Err(ChainstateError::InvalidStacksBlock(msg)); + return Err(e); } // if we pass all the tests, then along the way, we will have verified (in @@ -2344,8 +2349,12 @@ impl NakamotoChainState { let vrf_proof_bytes = vrf_proof.map(|proof| proof.to_hex()); - let signer_signature = serde_json::to_string(&header.signer_signature) - .expect("Unable to serialize signer signatures"); + let signer_signature = serde_json::to_string(&header.signer_signature).map_err(|_| { + ChainstateError::InvalidStacksBlock(format!( + "Failed to serialize signer signature for block {}", + block_hash + )) + })?; let args: &[&dyn ToSql] = &[ &u64_to_sql(*stacks_block_height)?, diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index f5c61b2f7d..a7e521c155 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -219,26 +219,32 @@ impl TestSigners { block.header.signer_signature = signer_signature; } - /// Sign a NakamotoBlock and maintain the order + /// Sign a NakamotoBlock and maintain the order and membership /// of the reward set signers in the resulting signatures. /// /// If any of [`Self::signer_keys`] are not in the reward set, their signatures - /// will be ignored. + /// will not be included. pub fn sign_block_with_reward_set(&self, block: &mut NakamotoBlock, reward_set: &RewardSet) { - let signatures = self.generate_block_signatures(block); - let reordered_signatures = self.reorder_signatures(signatures, reward_set); - block.header.signer_signature = reordered_signatures; + let signatures = self.generate_ordered_signatures(block, reward_set); + block.header.signer_signature = signatures; } - /// Sign a Nakamoto block and generate a vec of signatures + /// Sign a Nakamoto block and generate a vec of signatures. The signatures will + /// be ordered by the signer's public keys, but will not be checked against the + /// reward set. fn generate_block_signatures(&self, block: &NakamotoBlock) -> Vec { let msg = block.header.signer_signature_hash().0; - self.signer_keys - .iter() - .map(|key| key.sign(&msg).unwrap()) - .collect::>() + let mut keys = self.signer_keys.clone(); + keys.sort_by(|a, b| { + let a = Secp256k1PublicKey::from_private(a).to_bytes_compressed(); + let b = Secp256k1PublicKey::from_private(b).to_bytes_compressed(); + a.cmp(&b) + }); + keys.iter().map(|key| key.sign(&msg).unwrap()).collect() } + /// Sign a Nakamoto block using the aggregate key. + /// NB: this function is current unused. fn sign_block_with_aggregate_key(&mut self, block: &NakamotoBlock) -> ThresholdSignature { let mut rng = rand_core::OsRng::default(); let msg = block.header.signer_signature_hash().0; @@ -255,18 +261,24 @@ impl TestSigners { ThresholdSignature(signature) } - /// Reorder a list of signatures to match the order of the reward set. - pub fn reorder_signatures( + /// Generate an list of signatures for a block. Only + /// signers in the reward set will be included. + pub fn generate_ordered_signatures( &self, - signatures: Vec, + block: &NakamotoBlock, reward_set: &RewardSet, ) -> Vec { - let test_signer_keys = &self + let msg = block.header.signer_signature_hash().0; + + let test_signers_by_pk = self .signer_keys .iter() .cloned() - .map(|key| Secp256k1PublicKey::from_private(&key).to_bytes_compressed()) - .collect::>(); + .map(|s| { + let pk = Secp256k1PublicKey::from_private(&s); + (pk.to_bytes_compressed(), s) + }) + .collect::>(); let reward_set_keys = &reward_set .clone() @@ -276,19 +288,14 @@ impl TestSigners { .map(|s| s.signing_key.to_vec()) .collect::>(); - let signature_keys_map = test_signer_keys - .iter() - .cloned() - .zip(signatures.iter().cloned()) - .collect::>(); - - let mut reordered_signatures = Vec::with_capacity(reward_set_keys.len()); + let mut signatures = Vec::with_capacity(reward_set_keys.len()); let mut missing_keys = 0; for key in reward_set_keys { - if let Some(signature) = signature_keys_map.get(key) { - reordered_signatures.push(signature.clone()); + if let Some(signer_key) = test_signers_by_pk.get(key) { + let signature = signer_key.sign(&msg).unwrap(); + signatures.push(signature); } else { missing_keys += 1; } @@ -300,18 +307,7 @@ impl TestSigners { ); } - reordered_signatures - } - - // Sort [`Self::signer_keys`] by their compressed public key - pub fn sorted_signer_keys(&self) -> Vec { - let mut keys = self.signer_keys.clone(); - keys.sort_by(|a, b| { - let a = Secp256k1PublicKey::from_private(a).to_bytes_compressed(); - let b = Secp256k1PublicKey::from_private(b).to_bytes_compressed(); - a.cmp(&b) - }); - keys + signatures } // Generate and assign a new aggregate public key diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 99f68fadf6..dd36004ff4 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -206,7 +206,7 @@ pub fn test_nakamoto_first_tenure_block_syntactic_validation() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -812,7 +812,7 @@ pub fn test_load_store_update_nakamoto_blocks() { tx_merkle_root: nakamoto_tx_merkle_root_2, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -851,7 +851,7 @@ pub fn test_load_store_update_nakamoto_blocks() { tx_merkle_root: nakamoto_tx_merkle_root_3, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -1529,7 +1529,7 @@ fn test_nakamoto_block_static_verification() { tx_merkle_root: nakamoto_tx_merkle_root, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; nakamoto_header.sign_miner(&private_key).unwrap(); @@ -1548,7 +1548,7 @@ fn test_nakamoto_block_static_verification() { tx_merkle_root: nakamoto_tx_merkle_root_bad_ch, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; nakamoto_header_bad_ch.sign_miner(&private_key).unwrap(); @@ -1567,7 +1567,7 @@ fn test_nakamoto_block_static_verification() { tx_merkle_root: nakamoto_tx_merkle_root_bad_miner_sig, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; nakamoto_header_bad_miner_sig @@ -1720,7 +1720,7 @@ pub fn test_get_highest_nakamoto_tenure() { tx_merkle_root: Sha512Trunc256Sum([0x00; 32]), state_index_root: TrieHash([0x00; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; let tenure_change = TenureChangePayload { @@ -2020,7 +2020,7 @@ fn test_make_miners_stackerdb_config() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; let block = NakamotoBlock { @@ -2848,7 +2848,6 @@ fn filter_one_transaction_per_signer_duplicate_nonces() { assert!(filtered_txs.contains(&txs.first().expect("failed to get first tx"))); } -#[cfg(test)] pub mod nakamoto_block_signatures { use super::*; @@ -2871,11 +2870,69 @@ pub mod nakamoto_block_signatures { weight: *w, } }) - .collect::>(), + .collect(), ); reward_set } + #[test] + // Test that signatures succeed with exactly 70% of the votes + pub fn test_exactly_enough_votes() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 35), + (Secp256k1PrivateKey::default(), 35), + (Secp256k1PrivateKey::default(), 30), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block with the first two signers + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .take(2) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + header + .verify_signer_signatures(&reward_set) + .expect("Failed to verify signatures"); + } + + #[test] + /// Test that signatures fail with just under 70% of the votes + pub fn test_just_not_enough_votes() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 3500), + (Secp256k1PrivateKey::default(), 3499), + (Secp256k1PrivateKey::default(), 3001), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block with the first two signers + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .take(2) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected insufficient signatures to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + assert!(msg.contains("Not enough signatures")); + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } + #[test] /// Base success case - 3 signers of equal weight, all signing the block pub fn test_nakamoto_block_verify_signatures() { @@ -3147,4 +3204,33 @@ pub mod nakamoto_block_signatures { _ => panic!("Expected InvalidStacksBlock error"), } } + + #[test] + pub fn test_compute_voting_weight_threshold() { + assert_eq!( + NakamotoBlockHeader::compute_voting_weight_threshold(100_u32).unwrap(), + 70_u32, + ); + + assert_eq!( + NakamotoBlockHeader::compute_voting_weight_threshold(10_u32).unwrap(), + 7_u32, + ); + + assert_eq!( + NakamotoBlockHeader::compute_voting_weight_threshold(3000_u32).unwrap(), + 2100_u32, + ); + + assert_eq!( + NakamotoBlockHeader::compute_voting_weight_threshold(4000_u32).unwrap(), + 2800_u32, + ); + + // Round-up check + assert_eq!( + NakamotoBlockHeader::compute_voting_weight_threshold(511_u32).unwrap(), + 358_u32, + ); + } } diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 9aeb98eefe..d47edc75ec 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -193,6 +193,9 @@ pub const POX_V3_MAINNET_EARLY_UNLOCK_HEIGHT: u32 = pub const POX_V3_TESTNET_EARLY_UNLOCK_HEIGHT: u32 = (BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT as u32) + 1; +// The threshold % of weighted votes on a block to approve it in Nakamoto +pub const NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD: u64 = 7; + /// Burn block height at which the ASTRules::PrecheckSize becomes the default behavior on mainnet pub const AST_RULES_PRECHECK_SIZE: u64 = 752000; // on or about Aug 30 2022 diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 33fa3ebc12..7f4f1847a9 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -763,8 +763,7 @@ impl Relayer { sort_handle, &staging_db_tx, headers_conn, - // &aggregate_public_key, - &Point::new(), + None, reward_set, )?; staging_db_tx.commit()?; diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 02aafdfa1b..1e76cd1853 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -103,7 +103,7 @@ fn test_nakamoto_tenure_downloader() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -171,7 +171,7 @@ fn test_nakamoto_tenure_downloader() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -192,7 +192,7 @@ fn test_nakamoto_tenure_downloader() { tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), state_index_root: TrieHash([0x08; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 42009d5380..3ed642c9cd 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -198,7 +198,7 @@ impl BlockMinerThread { }; new_block.header.signer_signature = signer_signature; - if let Err(e) = self.broadcast(new_block.clone(), &Point::new(), reward_set) { + if let Err(e) = self.broadcast(new_block.clone(), None, reward_set) { warn!("Error accepting own block: {e:?}. Will try mining again."); continue; } else { @@ -538,7 +538,7 @@ impl BlockMinerThread { fn broadcast( &self, block: NakamotoBlock, - aggregate_public_key: &Point, + aggregate_public_key: Option<&Point>, reward_set: RewardSet, ) -> Result<(), ChainstateError> { #[cfg(test)] @@ -576,7 +576,7 @@ impl BlockMinerThread { &mut sortition_handle, &staging_tx, headers_conn, - &aggregate_public_key, + aggregate_public_key, reward_set, )?; staging_tx.commit()?; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 66a0627b67..11c78596f8 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2248,7 +2248,7 @@ fn miner_writes_proposed_block_to_stackerdb() { let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); let mut proposed_zero_block = proposed_block.clone(); - proposed_zero_block.header.signer_signature = Vec::::new(); + proposed_zero_block.header.signer_signature = vec![]; let proposed_zero_block_hash = format!("0x{}", proposed_zero_block.header.block_hash()); coord_channel diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 615df08d24..f7e6674599 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -528,7 +528,7 @@ fn sign_request_rejected() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; let mut block1 = NakamotoBlock { @@ -555,7 +555,7 @@ fn sign_request_rejected() { tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), state_index_root: TrieHash([0x08; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; let mut block2 = NakamotoBlock { From e29ab98bad462fcfc2a11d0c04d827eadad96258 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 17 May 2024 10:26:52 -0400 Subject: [PATCH 018/148] refactor: add method `clarity_uses_tip_burn_block` --- clarity/src/vm/database/clarity_db.rs | 12 +++++++++--- clarity/src/vm/tests/mod.rs | 4 ++-- stacks-common/src/types/mod.rs | 7 +++++++ .../src/chainstate/stacks/boot/contract_tests.rs | 4 ++-- 4 files changed, 20 insertions(+), 7 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 4c419dcb2a..03e85ae176 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -866,8 +866,11 @@ impl<'a> ClarityDatabase<'a> { pub fn get_current_burnchain_block_height(&mut self) -> Result { let cur_stacks_height = self.store.get_current_block_height(); - // In epoch 2, we can only access the burn block associated with the last block - if self.get_clarity_epoch_version()? < StacksEpochId::Epoch30 { + // Before epoch 3.0, we can only access the burn block associated with the last block + if !self + .get_clarity_epoch_version()? + .clarity_uses_tip_burn_block() + { if cur_stacks_height == 0 { return Ok(self.burn_state_db.get_burn_start_height()); }; @@ -926,7 +929,10 @@ impl<'a> ClarityDatabase<'a> { /// In Epoch 3+: /// 1. Get the SortitionId of the current Stacks tip fn get_sortition_id_for_stacks_tip(&mut self) -> Result> { - if self.get_clarity_epoch_version()? < StacksEpochId::Epoch30 { + if !self + .get_clarity_epoch_version()? + .clarity_uses_tip_burn_block() + { let current_stacks_height = self.get_current_block_height(); if current_stacks_height < 1 { diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index 715c205475..2c6f23ef42 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -166,7 +166,7 @@ impl MemoryEnvironmentGenerator { db.begin(); db.set_clarity_epoch_version(epoch).unwrap(); db.commit().unwrap(); - if epoch >= StacksEpochId::Epoch30 { + if epoch.clarity_uses_tip_burn_block() { db.begin(); db.set_tenure_height(1).unwrap(); db.commit().unwrap(); @@ -185,7 +185,7 @@ impl TopLevelMemoryEnvironmentGenerator { db.begin(); db.set_clarity_epoch_version(epoch).unwrap(); db.commit().unwrap(); - if epoch >= StacksEpochId::Epoch30 { + if epoch.clarity_uses_tip_burn_block() { db.begin(); db.set_tenure_height(1).unwrap(); db.commit().unwrap(); diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 0ed0187ad7..a1b90ce428 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -148,6 +148,13 @@ impl StacksEpochId { StacksEpochId::Epoch30 => MINING_COMMITMENT_FREQUENCY_NAKAMOTO, } } + + /// Returns whether or not this epoch uses the tip for reading burn block + /// info in Clarity (3.0+ behavior) or should use the parent block's burn + /// block (behavior before 3.0). + pub fn clarity_uses_tip_burn_block(&self) -> bool { + self >= &StacksEpochId::Epoch30 + } } impl std::fmt::Display for StacksEpochId { diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index a308e5b339..1c0e3f4299 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -173,7 +173,7 @@ impl ClarityTestSim { let cur_epoch = Self::check_and_bump_epoch(&mut store, &headers_db, &burn_db); let mut db = store.as_clarity_db(&headers_db, &burn_db); - if cur_epoch >= StacksEpochId::Epoch30 { + if cur_epoch.clarity_uses_tip_burn_block() { db.begin(); db.set_tenure_height(self.tenure_height as u32) .expect("FAIL: unable to set tenure height in Clarity database"); @@ -227,7 +227,7 @@ impl ClarityTestSim { debug!("Execute block in epoch {}", &cur_epoch); let mut db = store.as_clarity_db(&headers_db, &burn_db); - if cur_epoch >= StacksEpochId::Epoch30 { + if cur_epoch.clarity_uses_tip_burn_block() { db.begin(); db.set_tenure_height(self.tenure_height as u32) .expect("FAIL: unable to set tenure height in Clarity database"); From e0f905246aa070d2ba2d0765872824ecf8ec03da Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 17 May 2024 15:13:50 -0400 Subject: [PATCH 019/148] chore: fix compile-time issue --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 11c78596f8..f46d9a3878 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3509,7 +3509,7 @@ fn check_block_heights() { return; } - let signers = TestSigners::default(); + let mut signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); @@ -3589,7 +3589,7 @@ fn check_block_heights() { &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); From 9627fecf1d4f8ea0909c4a6dd0c1d49ed668b3ec Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 16 May 2024 09:13:20 -0500 Subject: [PATCH 020/148] feat: sortition state tracking in signer --- libsigner/src/events.rs | 7 +- .../src/deps_common/bitcoin/util/hash.rs | 19 + stacks-signer/src/chainstate.rs | 375 +++++++++++++ stacks-signer/src/client/mod.rs | 6 + stacks-signer/src/client/stacks_client.rs | 84 ++- stacks-signer/src/lib.rs | 3 + stacks-signer/src/signerdb.rs | 24 +- stackslib/src/burnchains/burnchain.rs | 2 +- .../src/net/api/get_tenures_fork_info.rs | 361 ++++++++++++ stackslib/src/net/api/getsortition.rs | 380 +++++++++++++ stackslib/src/net/api/mod.rs | 4 + .../net/api/tests/get_tenures_fork_info.rs | 63 +++ stackslib/src/net/api/tests/getsortition.rs | 88 +++ stackslib/src/net/api/tests/mod.rs | 2 + .../stacks-node/src/nakamoto_node/miner.rs | 1 + .../src/tests/nakamoto_integrations.rs | 521 +++++++++++++++++- 16 files changed, 1923 insertions(+), 17 deletions(-) create mode 100644 stacks-signer/src/chainstate.rs create mode 100644 stackslib/src/net/api/get_tenures_fork_info.rs create mode 100644 stackslib/src/net/api/getsortition.rs create mode 100644 stackslib/src/net/api/tests/get_tenures_fork_info.rs create mode 100644 stackslib/src/net/api/tests/getsortition.rs diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index c603db7f0b..4de6b80513 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -39,8 +39,11 @@ use stacks_common::codec::{ StacksMessageCodec, }; pub use stacks_common::consts::SIGNER_SLOTS_PER_USER; -use stacks_common::types::chainstate::StacksPublicKey; -use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksPublicKey, +}; +use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; +use stacks_common::util::HexError; use tiny_http::{ Method as HttpMethod, Request as HttpRequest, Response as HttpResponse, Server as HttpServer, }; diff --git a/stacks-common/src/deps_common/bitcoin/util/hash.rs b/stacks-common/src/deps_common/bitcoin/util/hash.rs index 3e9186bd92..daa1de3360 100644 --- a/stacks-common/src/deps_common/bitcoin/util/hash.rs +++ b/stacks-common/src/deps_common/bitcoin/util/hash.rs @@ -29,6 +29,7 @@ use crate::deps_common::bitcoin::network::encodable::{ConsensusDecodable, Consen use crate::deps_common::bitcoin::network::serialize::{ self, BitcoinHash, RawEncoder, SimpleEncoder, }; +use crate::util::hash::bytes_to_hex; use crate::util::uint::Uint256; use crate::util::HexError; @@ -49,6 +50,24 @@ impl_array_newtype!(Ripemd160Hash, u8, 20); /// A Bitcoin hash160, 20-bytes, computed from x as RIPEMD160(SHA256(x)) pub struct Hash160([u8; 20]); impl_array_newtype!(Hash160, u8, 20); +impl_byte_array_rusqlite_only!(Hash160); + +impl Hash160 { + /// Convert the Hash160 inner bytes to a non-prefixed hex string + pub fn to_hex(&self) -> String { + bytes_to_hex(&self.0) + } + + /// Try to instantiate a Hash160 using the exact inner bytes of the hash. + pub fn from_bytes(bytes: &[u8]) -> Option { + let mut return_bytes = [0; 20]; + if bytes.len() != return_bytes.len() { + return None; + } + return_bytes.copy_from_slice(bytes); + Some(Self(return_bytes)) + } +} impl Default for Sha256dEncoder { fn default() -> Self { diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs new file mode 100644 index 0000000000..ee03b6df85 --- /dev/null +++ b/stacks-signer/src/chainstate.rs @@ -0,0 +1,375 @@ +use blockstack_lib::chainstate::nakamoto::NakamotoBlock; +use blockstack_lib::chainstate::stacks::TenureChangePayload; +use blockstack_lib::net::api::getsortition::SortitionInfo; +use slog::{slog_info, slog_warn}; +use stacks_common::types::chainstate::{ConsensusHash, StacksPublicKey}; +use stacks_common::util::hash::Hash160; +use stacks_common::{info, warn}; + +use crate::client::{ClientError, StacksClient}; +use crate::signerdb::SignerDb; + +/// Captures this signer's current view of a sortition's miner. +#[derive(PartialEq, Eq)] +pub enum SortitionMinerStatus { + /// The signer thinks this sortition's miner is invalid, and hasn't signed any blocks for them. + InvalidatedBeforeFirstBlock, + /// The signer thinks this sortition's miner is invalid, but already signed one or more blocks for them. + InvalidatedAfterFirstBlock, + /// The signer thinks this sortition's miner is valid + Valid, +} + +/// Captures the Stacks sortition related state for +/// a successful sortition +pub struct SortitionState { + /// The miner's pub key hash + pub miner_pkh: Hash160, + /// If known already, the public key which hashes to `miner_pkh` + pub miner_pubkey: Option, + /// the last burn block in this fork which had a sortition + pub prior_sortition: ConsensusHash, + /// the committed to parent tenure ID + pub parent_tenure_id: ConsensusHash, + /// this sortition's consensus hash + pub consensus_hash: ConsensusHash, + /// did the miner in this sortition do something + /// to become invalidated as a miner? + pub invalidated: SortitionMinerStatus, +} + +/// The signer's current view of the stacks chain's sortition +/// state +pub struct SortitionsView { + /// the prior successful sortition (this corresponds to the "prior" miner slot) + pub last_sortition: Option, + /// the current successful sortition (this corresponds to the "current" miner slot) + pub cur_sortition: Option, + /// is the view fresh? + pub fresh: bool, + /// the hash at which the sortitions view was last fetched + pub latest_consensus_hash: Option, +} + +impl TryFrom for SortitionState { + type Error = ClientError; + fn try_from(value: SortitionInfo) -> Result { + Ok(Self { + miner_pkh: value + .miner_pk_hash160 + .ok_or_else(|| ClientError::UnexpectedSortitionInfo)?, + miner_pubkey: None, + prior_sortition: value + .last_sortition_ch + .ok_or_else(|| ClientError::UnexpectedSortitionInfo)?, + consensus_hash: value.consensus_hash, + parent_tenure_id: value + .stacks_parent_ch + .ok_or_else(|| ClientError::UnexpectedSortitionInfo)?, + invalidated: SortitionMinerStatus::Valid, + }) + } +} + +enum ProposedBy<'a> { + LastSortition(&'a SortitionState), + CurrentSortition(&'a SortitionState), +} + +impl<'a> ProposedBy<'a> { + pub fn state(&self) -> &SortitionState { + match self { + ProposedBy::LastSortition(ref x) => x, + ProposedBy::CurrentSortition(ref x) => x, + } + } +} + +impl SortitionsView { + /// Initialize an empty sortitions view struct -- it will refresh() before + /// checking any proposals. + pub fn new() -> Self { + Self { + last_sortition: None, + cur_sortition: None, + fresh: false, + latest_consensus_hash: None, + } + } + + /// Apply checks from the SortitionsView on the block proposal. + /// + pub fn check_proposal( + &mut self, + client: &StacksClient, + signer_db: &SignerDb, + block: &NakamotoBlock, + block_pk: &StacksPublicKey, + ) -> Result { + self.refresh_view(client)?; + let block_pkh = Hash160::from_data(&block_pk.to_bytes_compressed()); + let Some(proposed_by) = self + .cur_sortition + .as_ref() + .and_then(|cur_sortition| { + if block.header.consensus_hash == cur_sortition.consensus_hash { + Some(ProposedBy::CurrentSortition(cur_sortition)) + } else { + None + } + }) + .or_else(|| { + self.last_sortition.as_ref().and_then(|last_sortition| { + if block.header.consensus_hash == last_sortition.consensus_hash { + Some(ProposedBy::LastSortition(last_sortition)) + } else { + None + } + }) + }) + else { + warn!( + "Miner block proposal has consensus hash that is neither the current or last sortition. Considering invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "current_sortition_consensus_hash" => ?self.cur_sortition.as_ref().map(|x| x.consensus_hash), + "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), + ); + return Ok(false); + }; + + if proposed_by.state().miner_pkh != block_pkh { + warn!( + "Miner block proposal pubkey does not match the winning pubkey hash for its sortition. Considering invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_pubkey" => &block_pk.to_hex(), + "proposed_block_pubkey_hash" => %block_pkh, + "sortition_winner_pubkey_hash" => %proposed_by.state().miner_pkh, + ); + return Ok(false); + } + + // check that this miner is the most recent sortition + match proposed_by { + ProposedBy::CurrentSortition(sortition) => { + if sortition.invalidated != SortitionMinerStatus::Valid { + warn!( + "Current miner behaved improperly, this signer views the miner as invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + return Ok(false); + } + } + ProposedBy::LastSortition(_last_sortition) => { + if let Some(cur_sortition) = &self.cur_sortition { + // should only consider blocks from the last sortition if the new sortition was invalidated + // before we signed their first block. + if cur_sortition.invalidated + != SortitionMinerStatus::InvalidatedBeforeFirstBlock + { + warn!( + "Miner block proposal is from last sortition winner, when the new sortition winner is still valid. Considering proposal invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + return Ok(false); + } + } + } + }; + + if let Some(tenure_change) = block.get_tenure_change_tx_payload() { + // in tenure changes, we need to check: + // (1) if the tenure change confirms the expected parent block (i.e., + // the last block we signed in the parent tenure) + // (2) if the parent tenure was a valid choice + let confirms_expected_parent = + Self::check_tenure_change_block_confirmation(tenure_change, block, signer_db)?; + if !confirms_expected_parent { + return Ok(false); + } + // now, we have to check if the parent tenure was a valid choice. + let is_valid_parent_tenure = + Self::check_parent_tenure_choice(proposed_by.state(), block, client)?; + if !is_valid_parent_tenure { + return Ok(false); + } + let last_in_tenure = signer_db + .get_last_signed_block_in_tenure(&block.header.consensus_hash) + .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; + if last_in_tenure.is_some() { + warn!( + "Miner block proposal contains a tenure change, but we've already signed a block in this tenure. Considering proposal invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + return Ok(false); + } + } else { + // check if the new block confirms the last block in the current tenure + let confirms_latest_in_tenure = + Self::confirms_known_blocks_in(block, &block.header.consensus_hash, signer_db)?; + if !confirms_latest_in_tenure { + return Ok(false); + } + } + + if let Some(tenure_extend) = block.get_tenure_extend_tx_payload() { + // in tenure extends, we need to check: + // (1) if this is the most recent sortition, an extend is allowed if it changes the burnchain view + // (2) if this is the most recent sortition, an extend is allowed if enough time has passed to refresh the block limit + let changed_burn_view = + tenure_extend.burn_view_consensus_hash != proposed_by.state().consensus_hash; + let enough_time_passed = Self::tenure_time_passed_block_lim()?; + if !changed_burn_view || !enough_time_passed { + warn!( + "Miner block proposal contains a tenure extend, but the burnchain view has not changed and enough time has not passed to refresh the block limit. Considering proposal invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + return Ok(false); + } + } + + Ok(true) + } + + fn check_parent_tenure_choice( + sortition_state: &SortitionState, + block: &NakamotoBlock, + client: &StacksClient, + ) -> Result { + // if the parent tenure is the last sortition, it is a valid choice. + // if the parent tenure is a reorg, then all of the reorged sortitions + // must either have produced zero blocks _or_ produced their first block + // very close to the burn block transition. + if sortition_state.prior_sortition == sortition_state.parent_tenure_id { + return Ok(true); + } + info!( + "Most recent miner's tenure does not build off the prior sortition, checking if this is valid behavior"; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + + let tenures_reorged = client.get_tenure_forking_info( + &sortition_state.parent_tenure_id, + &sortition_state.prior_sortition, + )?; + if tenures_reorged.len() == 0 { + warn!("Miner is not building off of most recent tenure, but stacks node was unable to return information about the relevant sortitions. Marking miner invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + return Ok(false); + } + for tenure in tenures_reorged.iter() { + if tenure.first_block_mined.is_some() { + // TODO: must check if the first block was poorly timed. + warn!( + "Miner is not building off of most recent tenure, but a tenure they attempted to reorg has already mined blocks."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "parent_tenure" => %sortition_state.parent_tenure_id, + "last_sortition" => %sortition_state.prior_sortition, + "violating_tenure_id" => %tenure.consensus_hash, + "violating_tenure_first_block_id" => ?tenure.first_block_mined, + ); + return Ok(false); + } + } + + return Ok(true); + } + + fn check_tenure_change_block_confirmation( + tenure_change: &TenureChangePayload, + block: &NakamotoBlock, + signer_db: &SignerDb, + ) -> Result { + // in tenure changes, we need to check: + // (1) if the tenure change confirms the expected parent block (i.e., + // the last block we signed in the parent tenure) + // (2) if the parent tenure was a valid choice + Self::confirms_known_blocks_in(block, &tenure_change.prev_tenure_consensus_hash, signer_db) + } + + fn confirms_known_blocks_in( + block: &NakamotoBlock, + tenure: &ConsensusHash, + signer_db: &SignerDb, + ) -> Result { + let Some(last_known_block) = signer_db + .get_last_signed_block_in_tenure(tenure) + .map_err(|e| ClientError::InvalidResponse(e.to_string()))? + else { + info!( + "Have not signed off on any blocks in the parent tenure, assuming block confirmation is correct"; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "tenure" => %tenure, + ); + return Ok(true); + }; + if block.header.chain_length > last_known_block.block.header.chain_length { + return Ok(true); + } else { + warn!( + "Miner block proposal's tenure change transaction does not confirm as many blocks as we expect in the parent tenure"; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_chain_length" => block.header.chain_length, + "expected_at_least" => last_known_block.block.header.chain_length + 1, + ); + return Ok(false); + } + } + + /// Has the current tenure lasted long enough to extend the block limit? + pub fn tenure_time_passed_block_lim() -> Result { + // TODO + return Ok(false); + } + + /// If necessary, fetch a new view of the recent sortitions + pub fn refresh_view(&mut self, client: &StacksClient) -> Result<(), ClientError> { + if self.fresh { + return Ok(()); + } + let latest_state = client.get_latest_sortition()?; + let latest_ch = latest_state.consensus_hash.clone(); + + // figure out what cur_sortition will be set to. + // if the latest sortition wasn't successful, query the last one that was. + let latest_success = if latest_state.was_sortition { + latest_state + } else { + info!("Latest state wasn't a sortition: {latest_state:?}"); + let last_sortition_ch = latest_state + .last_sortition_ch + .as_ref() + .ok_or_else(|| ClientError::NoSortitionOnChain)?; + client.get_sortition(last_sortition_ch)? + }; + + // now, figure out what `last_sortition` will be set to. + let last_sortition = latest_success + .last_sortition_ch + .as_ref() + .map(|ch| client.get_sortition(ch)) + .transpose()?; + + self.cur_sortition = Some(SortitionState::try_from(latest_success)?); + self.last_sortition = last_sortition + .map(SortitionState::try_from) + .transpose() + .ok() + .flatten(); + self.fresh = true; + self.latest_consensus_hash = Some(latest_ch); + + Ok(()) + } +} diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 3ce771309e..26ce5f05f5 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -86,6 +86,12 @@ pub enum ClientError { /// Invalid response from the stacks node #[error("Invalid response from the stacks node: {0}")] InvalidResponse(String), + /// A successful sortition has not occurred yet + #[error("The Stacks chain has not processed any successful sortitions yet")] + NoSortitionOnChain, + /// A successful sortition's info response should be parseable into a SortitionState + #[error("A successful sortition's info response should be parseable into a SortitionState")] + UnexpectedSortitionInfo, } /// Retry a function F with an exponential backoff and notification on transient failure diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index b4b5d8a3a1..38a8f78d1e 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -26,9 +26,13 @@ use blockstack_lib::chainstate::stacks::{ TransactionSpendingCondition, TransactionVersion, }; use blockstack_lib::net::api::callreadonly::CallReadOnlyResponse; +use blockstack_lib::net::api::get_tenures_fork_info::{ + TenureForkingInfo, RPC_TENURE_FORKING_INFO_PATH, +}; use blockstack_lib::net::api::getaccount::AccountEntryResponse; use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::api::getpoxinfo::RPCPoxInfoData; +use blockstack_lib::net::api::getsortition::{SortitionInfo, RPC_SORTITION_INFO_PATH}; use blockstack_lib::net::api::getstackers::GetStackersResponse; use blockstack_lib::net::api::postblock_proposal::NakamotoBlockProposal; use blockstack_lib::net::api::postfeerate::{FeeRateEstimateRequestBody, RPCFeeEstimateResponse}; @@ -38,12 +42,14 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; use reqwest::header::AUTHORIZATION; use serde_json::json; -use slog::slog_debug; +use slog::{slog_debug, slog_warn}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; -use stacks_common::debug; -use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; +use stacks_common::types::chainstate::{ + ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, +}; use stacks_common::types::StacksEpochId; +use stacks_common::{debug, warn}; use wsts::curve::point::{Compressed, Point}; use crate::client::{retry_with_exponential_backoff, ClientError}; @@ -358,6 +364,65 @@ impl StacksClient { self.get_account_entry(address).map(|entry| entry.nonce) } + /// Get information about the tenures between `chosen_parent` and `last_sortition` + pub fn get_tenure_forking_info( + &self, + chosen_parent: &ConsensusHash, + last_sortition: &ConsensusHash, + ) -> Result, ClientError> { + let send_request = || { + self.stacks_node_client + .get(self.tenure_forking_info_path(chosen_parent, last_sortition)) + .send() + .map_err(backoff::Error::transient) + }; + let response = retry_with_exponential_backoff(send_request)?; + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + let tenures = response.json()?; + Ok(tenures) + } + + /// Get the sortition information for the latest sortition + pub fn get_latest_sortition(&self) -> Result { + let send_request = || { + self.stacks_node_client + .get(self.sortition_info_path()) + .send() + .map_err(|e| { + warn!("Signer failed to request latest sortition"; "err" => ?e); + e + }) + }; + let response = send_request()?; + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + let sortition_info = response.json()?; + Ok(sortition_info) + } + + /// Get the sortition information for a given sortition + pub fn get_sortition(&self, ch: &ConsensusHash) -> Result { + let send_request = || { + self.stacks_node_client + .get(self.sortition_info_path()) + .query(&[("consensus", ch.to_hex().as_str())]) + .send() + .map_err(|e| { + warn!("Signer failed to request sortition"; "consensus_hash" => %ch, "err" => ?e); + e + }) + }; + let response = send_request()?; + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + let sortition_info = response.json()?; + Ok(sortition_info) + } + /// Get the current peer info data from the stacks node pub fn get_peer_info(&self) -> Result { debug!("Getting stacks node info..."); @@ -649,6 +714,19 @@ impl StacksClient { format!("{}/v2/block_proposal", self.http_origin) } + fn sortition_info_path(&self) -> String { + format!("{}{RPC_SORTITION_INFO_PATH}", self.http_origin) + } + + fn tenure_forking_info_path(&self, start: &ConsensusHash, stop: &ConsensusHash) -> String { + format!( + "{}{RPC_TENURE_FORKING_INFO_PATH}/{}/{}", + self.http_origin, + start.to_hex(), + stop.to_hex() + ) + } + fn core_info_path(&self) -> String { format!("{}/v2/info", self.http_origin) } diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index af0e8781a0..a6856bb732 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -20,6 +20,9 @@ Usage documentation can be found in the [README](https://github.com/Trust-Machin // You should have received a copy of the GNU General Public License // along with this program. If not, see . +/// This module stores chainstate information about Stacks, SortitionDB for +/// tracking by the signer. +pub mod chainstate; /// The cli module for the signer binary pub mod cli; /// The signer client for communicating with stackerdb/stacks nodes diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 8c6b3ba187..5ef24d1c87 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -25,6 +25,7 @@ use rusqlite::{params, Connection, Error as SqliteError, OpenFlags, NO_PARAMS}; use serde::{Deserialize, Serialize}; use slog::slog_debug; use stacks_common::debug; +use stacks_common::types::chainstate::ConsensusHash; use stacks_common::util::hash::Sha512Trunc256Sum; use wsts::net::NonceRequest; @@ -88,6 +89,9 @@ CREATE TABLE IF NOT EXISTS blocks ( reward_cycle INTEGER NOT NULL, signer_signature_hash TEXT NOT NULL, block_info TEXT NOT NULL, + consensus_hash TEXT NOT NULL, + signed_over INTEGER NOT NULL, + stacks_height INTEGER NOT NULL, burn_block_height INTEGER NOT NULL, PRIMARY KEY (reward_cycle, signer_signature_hash) )"; @@ -173,6 +177,17 @@ impl SignerDb { try_deserialize(result) } + /// Return the last signed block in a tenure (identified by its consensus hash) + pub fn get_last_signed_block_in_tenure( + &self, + tenure: &ConsensusHash, + ) -> Result, DBError> { + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ? AND signed_over = 1 ORDER BY stacks_height DESC LIMIT 1"; + let result: Option = query_row(&self.db, query, &[tenure])?; + + try_deserialize(result) + } + /// Insert a block into the database. /// `hash` is the `signer_signature_hash` of the block. pub fn insert_block(&mut self, block_info: &BlockInfo) -> Result<(), DBError> { @@ -196,8 +211,13 @@ impl SignerDb { ); self.db .execute( - "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info) VALUES (?1, ?2, ?3, ?4)", - params![u64_to_sql(block_info.reward_cycle)?, u64_to_sql(block_info.burn_block_height)?, hash.to_string(), &block_json], + "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)", + params![ + u64_to_sql(block_info.reward_cycle)?, u64_to_sql(block_info.burn_block_height)?, hash.to_string(), &block_json, + signed_over, + u64_to_sql(block_info.block.header.chain_length)?, + block_info.block.header.consensus_hash.to_hex(), + ], )?; Ok(()) diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 0247a54512..ac2d6a6b8c 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -1078,7 +1078,7 @@ impl Burnchain { /// Hand off the block to the ChainsCoordinator _and_ process the sortition /// *only* to be used by legacy stacks node interfaces, like the Helium node - pub fn process_block_and_sortition_deprecated( + fn process_block_and_sortition_deprecated( db: &mut SortitionDB, burnchain_db: &mut BurnchainDB, burnchain: &Burnchain, diff --git a/stackslib/src/net/api/get_tenures_fork_info.rs b/stackslib/src/net/api/get_tenures_fork_info.rs new file mode 100644 index 0000000000..13ed91810e --- /dev/null +++ b/stackslib/src/net/api/get_tenures_fork_info.rs @@ -0,0 +1,361 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::{to_hex, Hash160}; +use stacks_common::util::HexError; +use {serde, serde_json}; + +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::net::api::getblock_v3::NakamotoBlockStream; +use crate::net::http::{ + parse_bytes, parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, + HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +pub static RPC_TENURE_FORKING_INFO_PATH: &str = "/v3/tenures_fork_info"; + +static DEPTH_LIMIT: usize = 10; + +/// Struct for information about a tenure that is used to determine whether +/// or not the tenure should have been validly forked. +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +pub struct TenureForkingInfo { + /// The burnchain header hash of the block that triggered this event. + #[serde(with = "prefix_hex")] + pub burn_block_hash: BurnchainHeaderHash, + /// The burn height of the block that triggered this event. + pub burn_block_height: u64, + /// This sortition ID of the block that triggered this event. This incorporates + /// PoX forking information and the burn block hash to obtain an identifier that is + /// unique across PoX forks and burnchain forks. + #[serde(with = "prefix_hex")] + pub sortition_id: SortitionId, + /// The parent of this burn block's Sortition ID + #[serde(with = "prefix_hex")] + pub parent_sortition_id: SortitionId, + /// The consensus hash of the block that triggered this event. This incorporates + /// PoX forking information and burn op information to obtain an identifier that is + /// unique across PoX forks and burnchain forks. + #[serde(with = "prefix_hex")] + pub consensus_hash: ConsensusHash, + /// Boolean indicating whether or not there was a succesful sortition (i.e. a winning + /// block or miner was chosen). + pub was_sortition: bool, + /// If the sortition occurred, and a block was mined during the tenure, this is the + /// tenure's block. + #[serde(with = "prefix_opt_hex")] + pub first_block_mined: Option, +} + +mod prefix_opt_hex { + pub fn serialize( + val: &Option, + s: S, + ) -> Result { + match val { + Some(ref some_val) => { + let val_str = format!("0x{some_val}"); + s.serialize_some(&val_str) + } + None => s.serialize_none(), + } + } + + pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( + d: D, + ) -> Result, D::Error> { + let opt_inst_str: Option = serde::Deserialize::deserialize(d)?; + let Some(inst_str) = opt_inst_str else { + return Ok(None); + }; + let Some(hex_str) = inst_str.get(2..) else { + return Err(serde::de::Error::invalid_length( + inst_str.len(), + &"at least length 2 string", + )); + }; + let val = T::try_from(&hex_str).map_err(serde::de::Error::custom)?; + Ok(Some(val)) + } +} + +mod prefix_hex { + pub fn serialize( + val: &T, + s: S, + ) -> Result { + s.serialize_str(&format!("0x{val}")) + } + + pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( + d: D, + ) -> Result { + let inst_str: String = serde::Deserialize::deserialize(d)?; + let Some(hex_str) = inst_str.get(2..) else { + return Err(serde::de::Error::invalid_length( + inst_str.len(), + &"at least length 2 string", + )); + }; + T::try_from(&hex_str).map_err(serde::de::Error::custom) + } +} + +trait HexDeser: Sized { + fn try_from(hex: &str) -> Result; +} + +macro_rules! impl_hex_deser { + ($thing:ident) => { + impl HexDeser for $thing { + fn try_from(hex: &str) -> Result { + $thing::from_hex(hex) + } + } + }; +} + +impl_hex_deser!(BurnchainHeaderHash); +impl_hex_deser!(StacksBlockId); +impl_hex_deser!(SortitionId); +impl_hex_deser!(ConsensusHash); +impl_hex_deser!(BlockHeaderHash); +impl_hex_deser!(Hash160); + +#[derive(Clone, Default)] +pub struct GetTenuresForkInfo { + pub start_sortition: Option, + pub stop_sortition: Option, +} + +/// Decode the HTTP request +impl HttpRequest for GetTenuresForkInfo { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + r#"^{RPC_TENURE_FORKING_INFO_PATH}/(?P[0-9a-f]{{40}})/(?P[0-9a-f]{{40}})$"# + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let req_contents = HttpRequestContents::new().query_string(query); + + let start_str = captures + .name("start") + .ok_or_else(|| { + Error::DecodeError("Failed to match path to start_sortition group".to_string()) + })? + .as_str(); + let stop_str = captures + .name("stop") + .ok_or_else(|| { + Error::DecodeError("Failed to match path to stop_sortition group".to_string()) + })? + .as_str(); + let start_sortition = ConsensusHash::from_hex(start_str).map_err(|_| { + Error::DecodeError("Invalid path: unparseable consensus hash".to_string()) + })?; + let stop_sortition = ConsensusHash::from_hex(stop_str).map_err(|_| { + Error::DecodeError("Invalid path: unparseable consensus hash".to_string()) + })?; + self.start_sortition = Some(start_sortition); + self.stop_sortition = Some(stop_sortition); + + Ok(req_contents) + } + + fn metrics_identifier(&self) -> &str { + RPC_TENURE_FORKING_INFO_PATH + } +} + +impl TenureForkingInfo { + fn from_snapshot( + sn: &BlockSnapshot, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + ) -> Result { + let first_block_mined = if !sn.sortition { + None + } else { + // is this a nakamoto sortition? + let epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), sn.block_height)?.ok_or_else( + || { + warn!( + "Failed to lookup stacks epoch for processed snapshot height {}", + sn.block_height + ); + ChainError::InvalidChainstateDB + }, + )?; + if epoch.epoch_id < StacksEpochId::Epoch30 { + StacksChainState::get_stacks_block_header_info_by_consensus_hash( + chainstate.db(), + &sn.consensus_hash, + )? + .map(|header| header.index_block_hash()) + } else { + NakamotoChainState::get_nakamoto_tenure_start_block_header( + chainstate.db(), + &sn.consensus_hash, + )? + .map(|header| header.index_block_hash()) + } + }; + Ok(TenureForkingInfo { + burn_block_hash: sn.burn_header_hash.clone(), + burn_block_height: sn.block_height, + sortition_id: sn.sortition_id.clone(), + parent_sortition_id: sn.parent_sortition_id.clone(), + consensus_hash: sn.consensus_hash.clone(), + was_sortition: sn.sortition, + first_block_mined, + }) + } +} + +impl RPCRequestHandler for GetTenuresForkInfo { + /// Reset internal state + fn restart(&mut self) { + self.start_sortition = None; + self.stop_sortition = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let result = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + let start_from = self + .stop_sortition + .clone() + .ok_or_else(|| ChainError::NoSuchBlockError)?; + let recurse_end = self + .start_sortition + .clone() + .ok_or_else(|| ChainError::NoSuchBlockError)?; + let mut results = vec![]; + let mut cursor = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &start_from)? + .ok_or_else(|| ChainError::NoSuchBlockError)?; + results.push(TenureForkingInfo::from_snapshot( + &cursor, sortdb, chainstate, + )?); + let handle = sortdb.index_handle(&cursor.sortition_id); + let mut depth = 0; + while depth < DEPTH_LIMIT && cursor.consensus_hash != recurse_end { + depth += 1; + cursor = handle.get_last_snapshot_with_sortition(cursor.block_height)?; + results.push(TenureForkingInfo::from_snapshot( + &cursor, sortdb, chainstate, + )?); + } + + Ok(results) + }); + + let tenures = match result { + Ok(tenures) => tenures, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!( + "Could not find snapshot {:?}\n", + &self.stop_sortition + )), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!( + "Failed to load snapshots for range ({:?}, {:?}]: {:?}\n", + &self.start_sortition, &self.stop_sortition, &e + ); + warn!("{msg}"); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let resp_preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::JSON, + ); + + Ok(( + resp_preamble, + HttpResponseContents::try_from_json(&tenures)?, + )) + } +} + +impl HttpResponse for GetTenuresForkInfo { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let tenures_info: Vec = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(tenures_info)?) + } +} diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs new file mode 100644 index 0000000000..a4fba89fb5 --- /dev/null +++ b/stackslib/src/net/api/getsortition.rs @@ -0,0 +1,380 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::{to_hex, Hash160}; +use stacks_common::util::HexError; +use {serde, serde_json}; + +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::net::api::getblock_v3::NakamotoBlockStream; +use crate::net::http::{ + parse_bytes, parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, + HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +#[derive(Debug, Clone, PartialEq)] +pub enum QuerySpecifier { + ConsensusHash(ConsensusHash), + BurnchainHeaderHash(BurnchainHeaderHash), + BlockHeight(u64), + Latest, +} + +pub static RPC_SORTITION_INFO_PATH: &str = "/v3/sortition"; + +/// Struct for sortition information returned via the GetSortition API call +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +pub struct SortitionInfo { + /// The burnchain header hash of the block that triggered this event. + #[serde(with = "prefix_hex")] + pub burn_block_hash: BurnchainHeaderHash, + /// The burn height of the block that triggered this event. + pub burn_block_height: u64, + /// This sortition ID of the block that triggered this event. This incorporates + /// PoX forking information and the burn block hash to obtain an identifier that is + /// unique across PoX forks and burnchain forks. + #[serde(with = "prefix_hex")] + pub sortition_id: SortitionId, + /// The parent of this burn block's Sortition ID + #[serde(with = "prefix_hex")] + pub parent_sortition_id: SortitionId, + /// The consensus hash of the block that triggered this event. This incorporates + /// PoX forking information and burn op information to obtain an identifier that is + /// unique across PoX forks and burnchain forks. + #[serde(with = "prefix_hex")] + pub consensus_hash: ConsensusHash, + /// Boolean indicating whether or not there was a succesful sortition (i.e. a winning + /// block or miner was chosen). + pub was_sortition: bool, + /// If sortition occurred, and the miner's VRF key registration + /// associated a nakamoto mining pubkey with their commit, this + /// will contain the Hash160 of that mining key. + #[serde(with = "prefix_opt_hex")] + pub miner_pk_hash160: Option, + /// If sortition occurred, this will be the consensus hash of the burn block corresponding + /// to the winning block commit's parent block ptr. In 3.x, this is the consensus hash of + /// the tenure that this new burn block's miner will be building off of. + #[serde(with = "prefix_opt_hex")] + pub stacks_parent_ch: Option, + /// If sortition occurred, this will be the consensus hash of the most recent sortition before + /// this one. + #[serde(with = "prefix_opt_hex")] + pub last_sortition_ch: Option, + #[serde(with = "prefix_opt_hex")] + /// In Stacks 2.x, this is the winning block. + /// In Stacks 3.x, this is the first block of the parent tenure. + pub committed_block_hash: Option, +} + +mod prefix_opt_hex { + pub fn serialize( + val: &Option, + s: S, + ) -> Result { + match val { + Some(ref some_val) => { + let val_str = format!("0x{some_val}"); + s.serialize_some(&val_str) + } + None => s.serialize_none(), + } + } + + pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( + d: D, + ) -> Result, D::Error> { + let opt_inst_str: Option = serde::Deserialize::deserialize(d)?; + let Some(inst_str) = opt_inst_str else { + return Ok(None); + }; + let Some(hex_str) = inst_str.get(2..) else { + return Err(serde::de::Error::invalid_length( + inst_str.len(), + &"at least length 2 string", + )); + }; + let val = T::try_from(&hex_str).map_err(serde::de::Error::custom)?; + Ok(Some(val)) + } +} + +mod prefix_hex { + pub fn serialize( + val: &T, + s: S, + ) -> Result { + s.serialize_str(&format!("0x{val}")) + } + + pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( + d: D, + ) -> Result { + let inst_str: String = serde::Deserialize::deserialize(d)?; + let Some(hex_str) = inst_str.get(2..) else { + return Err(serde::de::Error::invalid_length( + inst_str.len(), + &"at least length 2 string", + )); + }; + T::try_from(&hex_str).map_err(serde::de::Error::custom) + } +} + +trait HexDeser: Sized { + fn try_from(hex: &str) -> Result; +} + +macro_rules! impl_hex_deser { + ($thing:ident) => { + impl HexDeser for $thing { + fn try_from(hex: &str) -> Result { + $thing::from_hex(hex) + } + } + }; +} + +impl_hex_deser!(BurnchainHeaderHash); +impl_hex_deser!(SortitionId); +impl_hex_deser!(ConsensusHash); +impl_hex_deser!(BlockHeaderHash); +impl_hex_deser!(Hash160); + +impl TryFrom<(&String, &String)> for QuerySpecifier { + type Error = Error; + + fn try_from(value: (&String, &String)) -> Result { + let hex_str = if value.1.starts_with("0x") { + &value.1[2..] + } else { + value.1.as_str() + }; + match value.0.as_str() { + "consensus" => Ok(Self::ConsensusHash( + ConsensusHash::from_hex(hex_str).map_err(|e| Error::DecodeError(e.to_string()))?, + )), + "burn" => Ok(Self::BurnchainHeaderHash( + BurnchainHeaderHash::from_hex(hex_str) + .map_err(|e| Error::DecodeError(e.to_string()))?, + )), + "burn_height" => Ok(Self::BlockHeight( + value + .1 + .parse::() + .map_err(|e| Error::DecodeError(e.to_string()))?, + )), + other => Err(Error::DecodeError(format!("Unknown query param: {other}"))), + } + } +} + +#[derive(Clone)] +pub struct GetSortitionHandler { + pub query: QuerySpecifier, +} + +impl GetSortitionHandler { + pub fn new() -> Self { + Self { + query: QuerySpecifier::Latest, + } + } +} +/// Decode the HTTP request +impl HttpRequest for GetSortitionHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!("^{RPC_SORTITION_INFO_PATH}$")).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let req_contents = HttpRequestContents::new().query_string(query); + if req_contents.get_query_args().len() > 1 { + return Err(Error::DecodeError( + "May only supply up to one query argument".into(), + )); + } + self.query = QuerySpecifier::Latest; + for (key, value) in req_contents.get_query_args().iter() { + self.query = QuerySpecifier::try_from((key, value))?; + } + + Ok(req_contents) + } + + fn metrics_identifier(&self) -> &str { + RPC_SORTITION_INFO_PATH + } +} + +impl RPCRequestHandler for GetSortitionHandler { + /// Reset internal state + fn restart(&mut self) { + self.query = QuerySpecifier::Latest; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let result = + node.with_node_state(|_network, sortdb, _chainstate, _mempool, _rpc_args| { + let query_result = match self.query { + QuerySpecifier::Latest => { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).map(Some) + }, + QuerySpecifier::ConsensusHash(ref consensus_hash) => { + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) + }, + QuerySpecifier::BurnchainHeaderHash(ref burn_hash) => { + let handle = sortdb.index_handle_at_tip(); + handle.get_block_snapshot(burn_hash) + }, + QuerySpecifier::BlockHeight(burn_height) => { + let handle = sortdb.index_handle_at_tip(); + handle.get_block_snapshot_by_height(burn_height) + }, + }; + let sortition_sn = query_result? + .ok_or_else(|| ChainError::NoSuchBlockError)?; + + let (miner_pk_hash160, stacks_parent_ch, committed_block_hash, last_sortition_ch) = if !sortition_sn.sortition { + (None, None, None, None) + } else { + let block_commit = SortitionDB::get_block_commit(sortdb.conn(), &sortition_sn.winning_block_txid, &sortition_sn.sortition_id)? + .ok_or_else(|| { + error!( + "Failed to load block commit from Sortition DB for snapshot with a winning block txid"; + "sortition_id" => %sortition_sn.sortition_id, + "txid" => %sortition_sn.winning_block_txid, + ); + ChainError::NoSuchBlockError + })?; + let handle = sortdb.index_handle(&sortition_sn.sortition_id); + let stacks_parent_sn = handle.get_block_snapshot_by_height(block_commit.parent_block_ptr.into())? + .ok_or_else(|| { + warn!( + "Failed to load the snapshot of the winning block commits parent"; + "sortition_id" => %sortition_sn.sortition_id, + "txid" => %sortition_sn.winning_block_txid, + ); + ChainError::NoSuchBlockError + })?; + + // try to figure out what the last snapshot in this fork was with a successful + // sortition. + // optimization heuristic: short-circuit the load if its just `stacks_parent_sn` + let last_sortition_ch = if sortition_sn.num_sortitions == stacks_parent_sn.num_sortitions + 1 { + stacks_parent_sn.consensus_hash.clone() + } else { + // we actually need to perform the marf lookup + let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; + last_sortition.consensus_hash + }; + + (sortition_sn.miner_pk_hash.clone(), Some(stacks_parent_sn.consensus_hash), Some(block_commit.block_header_hash), + Some(last_sortition_ch)) + }; + + Ok(SortitionInfo { + burn_block_hash: sortition_sn.burn_header_hash, + burn_block_height: sortition_sn.block_height, + sortition_id: sortition_sn.sortition_id, + parent_sortition_id: sortition_sn.parent_sortition_id, + consensus_hash: sortition_sn.consensus_hash, + was_sortition: sortition_sn.sortition, + miner_pk_hash160, + stacks_parent_ch, + last_sortition_ch, + committed_block_hash, + }) + }); + + let block = match result { + Ok(block) => block, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("Could not find snapshot {:?}\n", &self.query)), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load snapshot for {:?}: {:?}\n", &self.query, &e); + warn!("{msg}"); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let preamble = HttpResponsePreamble::ok_json(&preamble); + let result = HttpResponseContents::try_from_json(&block)?; + Ok((preamble, result)) + } +} + +impl HttpResponse for GetSortitionHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let sortition_info: SortitionInfo = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(sortition_info)?) + } +} diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index f1af0a9e60..58425b4955 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -32,6 +32,7 @@ use crate::net::Error as NetError; use crate::stacks_common::codec::StacksMessageCodec; pub mod callreadonly; +pub mod get_tenures_fork_info; pub mod getaccount; pub mod getattachment; pub mod getattachmentsinv; @@ -50,6 +51,7 @@ pub mod getmicroblocks_indexed; pub mod getmicroblocks_unconfirmed; pub mod getneighbors; pub mod getpoxinfo; +pub mod getsortition; pub mod getstackerdbchunk; pub mod getstackerdbmetadata; pub mod getstackers; @@ -127,6 +129,8 @@ impl StacksHttp { self.register_rpc_endpoint(poststackerdbchunk::RPCPostStackerDBChunkRequestHandler::new()); self.register_rpc_endpoint(posttransaction::RPCPostTransactionRequestHandler::new()); self.register_rpc_endpoint(getstackers::GetStackersRequestHandler::default()); + self.register_rpc_endpoint(getsortition::GetSortitionHandler::new()); + self.register_rpc_endpoint(get_tenures_fork_info::GetTenuresForkInfo::default()); } } diff --git a/stackslib/src/net/api/tests/get_tenures_fork_info.rs b/stackslib/src/net/api/tests/get_tenures_fork_info.rs new file mode 100644 index 0000000000..6c9e552759 --- /dev/null +++ b/stackslib/src/net/api/tests/get_tenures_fork_info.rs @@ -0,0 +1,63 @@ +use std::collections::BTreeMap; +use std::fmt::Display; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash}; +use stacks_common::types::net::PeerHost; + +use crate::net::api::get_tenures_fork_info::GetTenuresForkInfo; +use crate::net::api::getsortition::{GetSortitionHandler, QuerySpecifier}; +use crate::net::connection::ConnectionOptions; +use crate::net::http::{HttpRequestPreamble, HttpVersion}; +use crate::net::httpcore::{RPCRequestHandler, StacksHttp, StacksHttpPreamble}; +use crate::net::Error as NetError; + +fn make_preamble(start: &T, stop: &R) -> HttpRequestPreamble { + HttpRequestPreamble { + version: HttpVersion::Http11, + verb: "GET".into(), + path_and_query_str: format!("/v3/tenures_fork_info/{start}/{stop}"), + host: PeerHost::DNS("localhost".into(), 0), + content_type: None, + content_length: Some(0), + keep_alive: false, + headers: BTreeMap::new(), + } +} + +#[test] +fn test_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + let mut handler = GetTenuresForkInfo::default(); + + let tests = vec![ + ( + make_preamble(&ConsensusHash([0; 20]), &ConsensusHash([255; 20])), + Ok((ConsensusHash([0; 20]), ConsensusHash([255; 20]))), + ), + ( + make_preamble(&BurnchainHeaderHash([0; 32]), &ConsensusHash([255; 20])), + Err(NetError::NotFoundError), + ), + ( + make_preamble(&ConsensusHash([255; 20]), &BurnchainHeaderHash([0; 32])), + Err(NetError::NotFoundError), + ), + ]; + + for (inp, expected_result) in tests.into_iter() { + handler.restart(); + let parsed_request = http.handle_try_parse_request(&mut handler, &inp, &[]); + match expected_result { + Ok((start, stop)) => { + assert!(parsed_request.is_ok()); + assert_eq!(&handler.start_sortition, &Some(start)); + assert_eq!(&handler.stop_sortition, &Some(stop)); + } + Err(e) => { + assert_eq!(e, parsed_request.unwrap_err()); + } + } + } +} diff --git a/stackslib/src/net/api/tests/getsortition.rs b/stackslib/src/net/api/tests/getsortition.rs new file mode 100644 index 0000000000..40cfaf53cf --- /dev/null +++ b/stackslib/src/net/api/tests/getsortition.rs @@ -0,0 +1,88 @@ +use std::collections::BTreeMap; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash}; +use stacks_common::types::net::PeerHost; + +use crate::net::api::getsortition::{GetSortitionHandler, QuerySpecifier}; +use crate::net::connection::ConnectionOptions; +use crate::net::http::{Error as HttpError, HttpRequestPreamble, HttpVersion}; +use crate::net::httpcore::{RPCRequestHandler, StacksHttp, StacksHttpPreamble}; +use crate::net::Error as NetError; + +fn make_preamble(query: &str) -> HttpRequestPreamble { + HttpRequestPreamble { + version: HttpVersion::Http11, + verb: "GET".into(), + path_and_query_str: format!("/v3/sortition{query}"), + host: PeerHost::DNS("localhost".into(), 0), + content_type: None, + content_length: Some(0), + keep_alive: false, + headers: BTreeMap::new(), + } +} + +#[test] +fn test_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + let mut handler = GetSortitionHandler::new(); + + let tests = vec![ + (make_preamble(""), Ok(QuerySpecifier::Latest)), + ( + make_preamble("?consensus=deadbeef00deadbeef01deadbeef02deadbeef03"), + Ok(QuerySpecifier::ConsensusHash( + ConsensusHash::from_hex("deadbeef00deadbeef01deadbeef02deadbeef03").unwrap(), + )), + ), + ( + make_preamble("?burn=00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff"), + Ok(QuerySpecifier::BurnchainHeaderHash( + BurnchainHeaderHash::from_hex( + "00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff", + ) + .unwrap(), + )), + ), + ( + make_preamble("?burn_height=100"), + Ok(QuerySpecifier::BlockHeight(100)), + ), + ( + make_preamble("?burn_height=a1be"), + Err(HttpError::DecodeError( + "invalid digit found in string".into(), + )), + ), + ( + make_preamble("?burn=a1be0000"), + Err(HttpError::DecodeError("bad length 8 for hex string".into())), + ), + ( + make_preamble("?consensus=a1be0000"), + Err(HttpError::DecodeError("bad length 8 for hex string".into())), + ), + ( + make_preamble("?burn_height=20&consensus=deadbeef00deadbeef01deadbeef02deadbeef03"), + Err(HttpError::DecodeError( + "May only supply up to one query argument".into(), + )), + ), + ]; + + for (inp, expected_result) in tests.into_iter() { + handler.restart(); + let parsed_request = http.handle_try_parse_request(&mut handler, &inp, &[]); + match expected_result { + Ok(query) => { + assert!(parsed_request.is_ok()); + assert_eq!(&handler.query, &query); + } + Err(e) => { + assert_eq!(NetError::Http(e), parsed_request.unwrap_err()); + } + } + } +} diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index ce67147a9e..591a12131c 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -51,6 +51,7 @@ use crate::net::{ }; mod callreadonly; +mod get_tenures_fork_info; mod getaccount; mod getattachment; mod getattachmentsinv; @@ -69,6 +70,7 @@ mod getmicroblocks_indexed; mod getmicroblocks_unconfirmed; mod getneighbors; mod getpoxinfo; +mod getsortition; mod getstackerdbchunk; mod getstackerdbmetadata; mod getstxtransfercost; diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 3ed642c9cd..a757233e8e 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -893,6 +893,7 @@ impl BlockMinerThread { block.header.block_hash(), block.txs.len(); "signer_sighash" => %block.header.signer_signature_hash(), + "consensus_hash" => %block.header.consensus_hash, ); self.event_dispatcher.process_mined_nakamoto_block_event( diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f46d9a3878..caeee26fae 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::{HashMap, HashSet}; +use std::net::ToSocketAddrs; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::{Arc, Mutex}; @@ -39,14 +40,18 @@ use stacks::chainstate::burn::operations::{ use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::miner::NakamotoBlockBuilder; use stacks::chainstate::nakamoto::test_signers::TestSigners; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; use stacks::chainstate::stacks::boot::{ MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; -use stacks::chainstate::stacks::{StacksTransaction, TransactionPayload}; +use stacks::chainstate::stacks::{ + SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction, TenureChangePayload, + TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, + TransactionPublicKeyEncoding, TransactionSpendingCondition, TransactionVersion, +}; use stacks::core::{ StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, @@ -60,7 +65,6 @@ use stacks::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, NakamotoBlockProposal, ValidateRejectCode, }; use stacks::util::hash::hex_bytes; -use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_signature, Pox4SignatureTopic, @@ -71,11 +75,14 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::{CHAIN_ID_TESTNET, STACKS_EPOCH_MAX}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPrivateKey, StacksPublicKey, + TrieHash, }; use stacks_common::types::StacksPublicKeyBuffer; -use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; +use stacks_signer::client::ClientError; +use stacks_signer::signerdb::{BlockInfo, SignerDb}; use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; @@ -309,7 +316,7 @@ pub fn blind_signer( pub fn get_latest_block_proposal( conf: &Config, sortdb: &SortitionDB, -) -> Result { +) -> Result<(NakamotoBlock, StacksPublicKey), String> { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let miner_pubkey = StacksPublicKey::from_private(&conf.get_miner_config().mining_key.unwrap()); let miner_slot_id = NakamotoChainState::get_miner_slot(&sortdb, &tip, &miner_pubkey) @@ -330,7 +337,7 @@ pub fn get_latest_block_proposal( // get_block_proposal_msg_v1(&mut miners_stackerdb, miner_slot_id.start); block_proposal.block }; - Ok(proposed_block) + Ok((proposed_block, miner_pubkey)) } #[allow(dead_code)] @@ -371,7 +378,7 @@ pub fn read_and_sign_block_proposal( .known_selected_anchor_block_owned() .expect("Expected a reward set"); - let mut proposed_block = get_latest_block_proposal(conf, &sortdb)?; + let mut proposed_block = get_latest_block_proposal(conf, &sortdb)?.0; let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); let signer_sig_hash = proposed_block.header.signer_signature_hash(); @@ -2244,7 +2251,8 @@ fn miner_writes_proposed_block_to_stackerdb() { let sortdb = naka_conf.get_burnchain().open_sortition_db(true).unwrap(); let proposed_block = get_latest_block_proposal(&naka_conf, &sortdb) - .expect("Expected to find a proposed block in the StackerDB"); + .expect("Expected to find a proposed block in the StackerDB") + .0; let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); let mut proposed_zero_block = proposed_block.clone(); @@ -3929,3 +3937,498 @@ fn check_block_heights() { run_loop_thread.join().unwrap(); } + +use stacks_signer::chainstate::SortitionsView; + +#[test] +#[ignore] +fn signer_chainstate() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for a test stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 200; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + (send_amt + send_fee) * 20, + ); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let mut sortitions_view = SortitionsView::new(); + + // query for prometheus metrics + #[cfg(feature = "monitoring_prom")] + { + let prom_http_origin = format!("http://{}", prom_bind); + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); + assert!(res.contains(&expected_result)); + } + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted.clone()); + + let socket = naka_conf + .node + .rpc_bind + .to_socket_addrs() + .unwrap() + .next() + .unwrap(); + let signer_client = stacks_signer::client::StacksClient::new( + StacksPrivateKey::from_seed(&[0, 1, 2, 3]), + socket, + naka_conf + .connection_options + .block_proposal_token + .clone() + .unwrap_or("".into()), + false, + ); + + // there hasn't been a successful nakamoto sortition yet, so expect an error + assert!( + matches!( + sortitions_view.refresh_view(&signer_client).unwrap_err(), + ClientError::UnexpectedSortitionInfo + ), + "Sortitions view should fail to refresh if there are no successful nakamoto sortitions yet", + ); + + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + // second block should confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + let mut signer_db = + SignerDb::new(format!("{}/signer_db_path", naka_conf.node.working_dir)).unwrap(); + + // Mine some nakamoto tenures + // track the last tenure's first block and subsequent blocks so we can + // check that they get rejected by the sortitions_view + let mut last_tenures_proposals: Option<(StacksPublicKey, NakamotoBlock, Vec)> = + None; + // hold the first and last blocks of the first tenure. we'll use this to submit reorging proposals + let mut first_tenure_blocks: Option> = None; + for i in 0..5 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + sortitions_view.fresh = false; + sortitions_view.refresh_view(&signer_client).unwrap(); + + // check the prior tenure's proposals again, confirming that the sortitions_view + // will reject them. + if let Some((ref miner_pk, ref prior_tenure_first, ref prior_tenure_interims)) = + last_tenures_proposals + { + let valid = sortitions_view + .check_proposal(&signer_client, &signer_db, prior_tenure_first, miner_pk) + .unwrap(); + assert!( + !valid, + "Sortitions view should reject proposals from prior tenure" + ); + for block in prior_tenure_interims.iter() { + let valid = sortitions_view + .check_proposal(&signer_client, &signer_db, block, miner_pk) + .unwrap(); + assert!( + !valid, + "Sortitions view should reject proposals from prior tenure" + ); + } + } + + let proposal = get_latest_block_proposal(&naka_conf, &sortdb).unwrap(); + + let valid = sortitions_view + .check_proposal(&signer_client, &signer_db, &proposal.0, &proposal.1) + .unwrap(); + + assert!( + valid, + "Nakamoto integration test produced invalid block proposal" + ); + let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + let reward_cycle = burnchain + .block_height_to_reward_cycle(burn_block_height) + .unwrap(); + signer_db + .insert_block(&BlockInfo { + block: proposal.0.clone(), + burn_block_height, + reward_cycle, + vote: None, + valid: Some(true), + nonce_request: None, + signed_over: true, + }) + .unwrap(); + + let before = proposals_submitted.load(Ordering::SeqCst); + + // submit a tx to trigger an intermediate block + let sender_nonce = i; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + + let timer = Instant::now(); + while proposals_submitted.load(Ordering::SeqCst) <= before { + thread::sleep(Duration::from_millis(5)); + if timer.elapsed() > Duration::from_secs(20) { + panic!("Timed out waiting for nakamoto miner to produce intermediate block"); + } + } + + // an intermediate block was produced. check the proposed block + let proposal_interim = get_latest_block_proposal(&naka_conf, &sortdb).unwrap(); + + let valid = sortitions_view + .check_proposal( + &signer_client, + &signer_db, + &proposal_interim.0, + &proposal_interim.1, + ) + .unwrap(); + + assert!( + valid, + "Nakamoto integration test produced invalid block proposal" + ); + // force the view to refresh and check again + + sortitions_view.fresh = false; + let valid = sortitions_view + .check_proposal( + &signer_client, + &signer_db, + &proposal_interim.0, + &proposal_interim.1, + ) + .unwrap(); + + assert!( + valid, + "Nakamoto integration test produced invalid block proposal" + ); + + signer_db + .insert_block(&BlockInfo { + block: proposal_interim.0.clone(), + burn_block_height, + reward_cycle, + vote: None, + valid: Some(true), + nonce_request: None, + signed_over: true, + }) + .unwrap(); + + if first_tenure_blocks.is_none() { + first_tenure_blocks = Some(vec![proposal.0.clone(), proposal_interim.0.clone()]); + } + last_tenures_proposals = Some((proposal.1, proposal.0, vec![proposal_interim.0])); + } + + // now we'll check some specific cases of invalid proposals + // Case: the block doesn't confirm the prior blocks that have been signed. + let last_tenure = &last_tenures_proposals.as_ref().unwrap().1.clone(); + let last_tenure_header = &last_tenure.header; + let miner_sk = naka_conf.miner.mining_key.clone().unwrap(); + let miner_pk = StacksPublicKey::from_private(&miner_sk); + let mut sibling_block_header = NakamotoBlockHeader { + version: 1, + chain_length: last_tenure_header.chain_length, + burn_spent: last_tenure_header.burn_spent, + consensus_hash: last_tenure_header.consensus_hash.clone(), + parent_block_id: last_tenure_header.block_id(), + tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), + state_index_root: TrieHash([0; 32]), + miner_signature: MessageSignature([0; 65]), + signer_signature: Vec::new(), + signer_bitvec: BitVec::ones(1).unwrap(), + }; + sibling_block_header.sign_miner(&miner_sk).unwrap(); + + let sibling_block = NakamotoBlock { + header: sibling_block_header, + txs: vec![], + }; + + assert!( + !sortitions_view + .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .unwrap(), + "A sibling of a previously approved block must be rejected." + ); + + // Case: the block contains a tenure change, but blocks have already + // been signed in this tenure + let mut sibling_block_header = NakamotoBlockHeader { + version: 1, + chain_length: last_tenure_header.chain_length, + burn_spent: last_tenure_header.burn_spent, + consensus_hash: last_tenure_header.consensus_hash.clone(), + parent_block_id: last_tenure_header.parent_block_id.clone(), + tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), + state_index_root: TrieHash([0; 32]), + miner_signature: MessageSignature([0; 65]), + signer_signature: Vec::new(), + signer_bitvec: BitVec::ones(1).unwrap(), + }; + sibling_block_header.sign_miner(&miner_sk).unwrap(); + + let sibling_block = NakamotoBlock { + header: sibling_block_header, + txs: vec![ + StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 1, + auth: TransactionAuth::Standard(TransactionSpendingCondition::Singlesig( + SinglesigSpendingCondition { + hash_mode: SinglesigHashMode::P2PKH, + signer: Hash160([0; 20]), + nonce: 0, + tx_fee: 0, + key_encoding: TransactionPublicKeyEncoding::Compressed, + signature: MessageSignature([0; 65]), + }, + )), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TenureChange( + last_tenure.get_tenure_change_tx_payload().unwrap().clone(), + ), + }, + last_tenure.txs[1].clone(), + ], + }; + + assert!( + !sortitions_view + .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .unwrap(), + "A sibling of a previously approved block must be rejected." + ); + + // Case: the block contains a tenure change, but it doesn't confirm all the blocks of the parent tenure + let reorg_to_block = first_tenure_blocks.as_ref().unwrap().first().unwrap(); + let mut sibling_block_header = NakamotoBlockHeader { + version: 1, + chain_length: reorg_to_block.header.chain_length + 1, + burn_spent: reorg_to_block.header.burn_spent, + consensus_hash: last_tenure_header.consensus_hash.clone(), + parent_block_id: reorg_to_block.block_id(), + tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), + state_index_root: TrieHash([0; 32]), + miner_signature: MessageSignature([0; 65]), + signer_signature: Vec::new(), + signer_bitvec: BitVec::ones(1).unwrap(), + }; + sibling_block_header.sign_miner(&miner_sk).unwrap(); + + let sibling_block = NakamotoBlock { + header: sibling_block_header.clone(), + txs: vec![ + StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 1, + auth: TransactionAuth::Standard(TransactionSpendingCondition::Singlesig( + SinglesigSpendingCondition { + hash_mode: SinglesigHashMode::P2PKH, + signer: Hash160([0; 20]), + nonce: 0, + tx_fee: 0, + key_encoding: TransactionPublicKeyEncoding::Compressed, + signature: MessageSignature([0; 65]), + }, + )), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TenureChange(TenureChangePayload { + tenure_consensus_hash: sibling_block_header.consensus_hash.clone(), + prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash.clone(), + burn_view_consensus_hash: sibling_block_header.consensus_hash.clone(), + previous_tenure_end: reorg_to_block.block_id(), + previous_tenure_blocks: 1, + cause: stacks::chainstate::stacks::TenureChangeCause::BlockFound, + pubkey_hash: Hash160::from_node_public_key(&miner_pk), + }), + }, + last_tenure.txs[1].clone(), + ], + }; + + assert!( + !sortitions_view + .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .unwrap(), + "A sibling of a previously approved block must be rejected." + ); + + // Case: the block contains a tenure change, but the parent tenure is a reorg + let reorg_to_block = first_tenure_blocks.as_ref().unwrap().last().unwrap(); + // make the sortition_view *think* that our block commit pointed at this old tenure + sortitions_view + .cur_sortition + .as_mut() + .map(|sortition_state| { + sortition_state.parent_tenure_id = reorg_to_block.header.consensus_hash.clone() + }); + let mut sibling_block_header = NakamotoBlockHeader { + version: 1, + chain_length: reorg_to_block.header.chain_length + 1, + burn_spent: reorg_to_block.header.burn_spent, + consensus_hash: last_tenure_header.consensus_hash.clone(), + parent_block_id: reorg_to_block.block_id(), + tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), + state_index_root: TrieHash([0; 32]), + miner_signature: MessageSignature([0; 65]), + signer_signature: Vec::new(), + signer_bitvec: BitVec::ones(1).unwrap(), + }; + sibling_block_header.sign_miner(&miner_sk).unwrap(); + + let sibling_block = NakamotoBlock { + header: sibling_block_header.clone(), + txs: vec![ + StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 1, + auth: TransactionAuth::Standard(TransactionSpendingCondition::Singlesig( + SinglesigSpendingCondition { + hash_mode: SinglesigHashMode::P2PKH, + signer: Hash160([0; 20]), + nonce: 0, + tx_fee: 0, + key_encoding: TransactionPublicKeyEncoding::Compressed, + signature: MessageSignature([0; 65]), + }, + )), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TenureChange(TenureChangePayload { + tenure_consensus_hash: sibling_block_header.consensus_hash.clone(), + prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash.clone(), + burn_view_consensus_hash: sibling_block_header.consensus_hash.clone(), + previous_tenure_end: reorg_to_block.block_id(), + previous_tenure_blocks: 1, + cause: stacks::chainstate::stacks::TenureChangeCause::BlockFound, + pubkey_hash: Hash160::from_node_public_key(&miner_pk), + }), + }, + last_tenure.txs[1].clone(), + ], + }; + + assert!( + !sortitions_view + .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .unwrap(), + "A sibling of a previously approved block must be rejected." + ); + + sortitions_view.fresh = false; + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} From 401d2d3814f1fffd0d1252176805431a4989902f Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 20 May 2024 10:35:59 -0700 Subject: [PATCH 021/148] crc: docstring, dead_code macro --- stackslib/src/chainstate/nakamoto/test_signers.rs | 1 + testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index a7e521c155..50a8e063a6 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -245,6 +245,7 @@ impl TestSigners { /// Sign a Nakamoto block using the aggregate key. /// NB: this function is current unused. + #[allow(dead_code)] fn sign_block_with_aggregate_key(&mut self, block: &NakamotoBlock) -> ThresholdSignature { let mut rng = rand_core::OsRng::default(); let msg = block.header.signer_signature_hash().0; diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 49204166ab..c0f42e7820 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -576,6 +576,10 @@ impl SignCoordinator { )) } + /// Start gathering signatures for a Nakamoto block. + /// This function begins by sending a `BlockProposal` message + /// to the signers, and then waits for the signers to respond + /// with their signatures. pub fn begin_sign_v0( &mut self, block: &NakamotoBlock, @@ -653,7 +657,7 @@ impl SignCoordinator { continue; } - // TODO: get messages from signers + // TODO: get messages from signers (#4775) } Err(NakamotoNodeError::SignerSignatureError( From 650c86cf8925a69f37432ea45b9f97522c7fc13d Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 20 May 2024 16:59:36 -0700 Subject: [PATCH 022/148] feat: gather v0 block signatures from stackerdb --- .github/workflows/bitcoin-tests.yml | 1 + .../stacks-node/src/nakamoto_node/miner.rs | 4 +- .../src/nakamoto_node/sign_coordinator.rs | 168 ++++++++++++++++-- testnet/stacks-node/src/tests/signer/mod.rs | 71 ++++++-- testnet/stacks-node/src/tests/signer/v0.rs | 86 ++++++++- testnet/stacks-node/src/tests/signer/v1.rs | 7 +- 6 files changed, 294 insertions(+), 43 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 87fe5a8f09..74b4074b8e 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -83,6 +83,7 @@ jobs: - tests::nakamoto_integrations::follower_bootup - tests::nakamoto_integrations::forked_tenure_is_ignored - tests::signer::v0::block_proposal_rejection + - tests::signer::v0::miner_gather_signatures - tests::signer::v1::dkg - tests::signer::v1::sign_request_rejected - tests::signer::v1::filter_bad_transactions diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 3ed642c9cd..29631cdec0 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -310,7 +310,7 @@ impl BlockMinerThread { &reward_set, reward_cycle, miner_privkey_as_scalar, - aggregate_public_key, + Some(aggregate_public_key), &stackerdbs, &self.config, ) @@ -395,7 +395,7 @@ impl BlockMinerThread { &reward_set, reward_cycle, miner_privkey_as_scalar, - Point::new(), + None, &stackerdbs, &self.config, ) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index c0f42e7820..3cf1c6d144 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -13,17 +13,18 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::BTreeMap; use std::sync::mpsc::Receiver; use std::time::{Duration, Instant}; use hashbrown::{HashMap, HashSet}; -use libsigner::v0::messages::SignerMessage as SignerMessageV0; +use libsigner::v0::messages::{BlockResponse, SignerMessage as SignerMessageV0}; use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::BlockSnapshot; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, MINERS_NAME, SIGNERS_NAME}; use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::{Error as ChainstateError, ThresholdSignature}; @@ -65,6 +66,8 @@ pub struct SignCoordinator { is_mainnet: bool, miners_session: StackerDBSession, signing_round_timeout: Duration, + signer_entries: HashMap, + weight_threshold: u32, pub next_signer_bitvec: BitVec<4000>, } @@ -122,6 +125,7 @@ impl NakamotoSigningParams { } } +#[allow(dead_code)] fn get_signer_commitments( is_mainnet: bool, reward_set: &[NakamotoSignerEntry], @@ -196,9 +200,10 @@ impl SignCoordinator { reward_set: &RewardSet, reward_cycle: u64, message_key: Scalar, - aggregate_public_key: Point, + aggregate_public_key: Option, stackerdb_conn: &StackerDBs, config: &Config, + // v1: bool, ) -> Result { let is_mainnet = config.is_mainnet(); let Some(ref reward_set_signers) = reward_set.signers else { @@ -250,6 +255,32 @@ impl SignCoordinator { ..Default::default() }; + let total_weight = + reward_set_signers + .iter() + .cloned() + .map(|s| s.weight) + .fold(0, |w, acc| { + acc.checked_add(w) + .expect("FATAL: Total signer weight > u32::MAX") + }); + + let threshold = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight)?; + + let signer_public_keys = reward_set_signers + .iter() + .cloned() + .enumerate() + .map(|(idx, signer)| { + let Ok(slot_id) = u32::try_from(idx) else { + return Err(ChainstateError::InvalidStacksBlock( + "Signer index exceeds u32".into(), + )); + }; + Ok((slot_id, signer)) + }) + .collect::, ChainstateError>>()?; + let mut coordinator: FireCoordinator = FireCoordinator::new(coord_config); #[cfg(test)] { @@ -272,25 +303,31 @@ impl SignCoordinator { miners_session, signing_round_timeout: config.miner.wait_on_signers.clone(), next_signer_bitvec, + signer_entries: signer_public_keys, + weight_threshold: threshold, }; - sign_coordinator - .coordinator - .set_aggregate_public_key(Some(aggregate_public_key)); + if let Some(aggregate_public_key) = aggregate_public_key { + sign_coordinator + .coordinator + .set_aggregate_public_key(Some(aggregate_public_key)); + } return Ok(sign_coordinator); } } - let party_polynomials = get_signer_commitments( - is_mainnet, - reward_set_signers.as_slice(), - stackerdb_conn, - reward_cycle, - &aggregate_public_key, - )?; - if let Err(e) = coordinator - .set_key_and_party_polynomials(aggregate_public_key.clone(), party_polynomials) - { - warn!("Failed to set a valid set of party polynomials"; "error" => %e); - }; + if let Some(aggregate_public_key) = aggregate_public_key { + let party_polynomials = get_signer_commitments( + is_mainnet, + reward_set_signers.as_slice(), + stackerdb_conn, + reward_cycle, + &aggregate_public_key, + )?; + if let Err(e) = coordinator + .set_key_and_party_polynomials(aggregate_public_key.clone(), party_polynomials) + { + warn!("Failed to set a valid set of party polynomials"; "error" => %e); + }; + } let (receiver, replaced_other) = STACKER_DB_CHANNEL.register_miner_coordinator(); if replaced_other { @@ -306,6 +343,8 @@ impl SignCoordinator { miners_session, signing_round_timeout: config.miner.wait_on_signers.clone(), next_signer_bitvec, + signer_entries: signer_public_keys, + weight_threshold: threshold, }) } @@ -606,6 +645,9 @@ impl SignCoordinator { }; let block_proposal_message = SignerMessageV0::BlockProposal(block_proposal); + debug!("Sending block proposal message to signers"; + "signer_signature_hash" => ?&block.header.signer_signature_hash().0, + ); Self::send_signers_message::( &self.message_key, sortdb, @@ -636,6 +678,13 @@ impl SignCoordinator { )); }; + let mut total_weight_signed: u32 = 0; + let mut gathered_signatures = BTreeMap::new(); + + info!("SignCoordinator: beginning to watch for block signatures."; + "threshold" => self.weight_threshold, + ); + let start_ts = Instant::now(); while start_ts.elapsed() <= self.signing_round_timeout { let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { @@ -657,7 +706,88 @@ impl SignCoordinator { continue; } - // TODO: get messages from signers (#4775) + let modified_slots = &event.modified_slots.clone(); + + // Update `next_signers_bitvec` with the slots that were modified in the event + modified_slots.iter().for_each(|chunk| { + if let Ok(slot_id) = chunk.slot_id.try_into() { + match &self.next_signer_bitvec.set(slot_id, true) { + Err(e) => { + warn!("Failed to set bitvec for next signer: {e:?}"); + } + _ => (), + }; + } else { + error!("FATAL: slot_id greater than u16, which should never happen."); + } + }); + + let Ok(signer_event) = SignerEvent::::try_from(event).map_err(|e| { + warn!("Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); + }) else { + continue; + }; + let SignerEvent::SignerMessages(signer_set, messages) = signer_event else { + debug!("Received signer event other than a signer message. Ignoring."); + continue; + }; + if signer_set != u32::try_from(reward_cycle_id % 2).unwrap() { + debug!("Received signer event for other reward cycle. Ignoring."); + continue; + }; + let slot_ids = modified_slots + .iter() + .map(|chunk| chunk.slot_id) + .collect::>(); + + debug!("SignCoordinator: Received messages from signers"; + "count" => messages.len(), + "slot_ids" => ?slot_ids, + "threshold" => self.weight_threshold + ); + + for (message, slot_id) in messages.into_iter().zip(slot_ids) { + match message { + SignerMessageV0::BlockResponse(BlockResponse::Accepted(( + response_hash, + signature, + ))) => { + let block_sighash = block.header.signer_signature_hash(); + if block_sighash != response_hash { + warn!( + "Processed signature but didn't validate over the expected block. Returning error."; + "signature" => %signature, + "block_signer_signature_hash" => %block_sighash, + "slot_id" => slot_id, + ); + continue; + } + debug!("SignCoordinator: Received valid signature from signer"; "slot_id" => slot_id, "signature" => %signature); + let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { + return Err(NakamotoNodeError::SignerSignatureError( + "Signer entry not found".into(), + )); + }; + total_weight_signed = total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); + debug!("SignCoordinator: Total weight signed: {total_weight_signed}"); + gathered_signatures.insert(slot_id, signature); + } + SignerMessageV0::BlockResponse(BlockResponse::Rejected(_)) => { + debug!("Received rejected block response. Ignoring."); + } + SignerMessageV0::BlockProposal(_) => { + debug!("Received block proposal message. Ignoring."); + } + } + } + + // After gathering all signatures, return them if we've hit the threshold + if total_weight_signed >= self.weight_threshold { + info!("SignCoordinator: Received enough signatures. Continuing."); + return Ok(gathered_signatures.values().cloned().collect()); + } } Err(NakamotoNodeError::SignerSignatureError( diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index d40073bcbc..31d2dabc11 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -38,10 +38,11 @@ use clarity::boot_util::boot_code_id; use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; -use stacks::chainstate::stacks::boot::SIGNERS_NAME; +use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use stacks::chainstate::stacks::{StacksPrivateKey, ThresholdSignature}; use stacks::core::StacksEpoch; use stacks::net::api::postblock_proposal::BlockValidateResponse; +use stacks::util::secp256k1::MessageSignature; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; @@ -243,7 +244,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest MinedNakamotoBlockEvent { let new_block = self.mine_nakamoto_block(timeout); let signer_sighash = new_block.signer_signature_hash.clone(); - let signature = self.wait_for_confirmed_block(&signer_sighash, timeout); + let signature = self.wait_for_confirmed_block_v1(&signer_sighash, timeout); assert!(signature.0.verify(&agg_key, signer_sighash.as_bytes())); new_block } @@ -275,15 +276,51 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest ThresholdSignature { + let block_obj = self.wait_for_confirmed_block_with_hash(block_signer_sighash, timeout); + let signer_signature_hex = block_obj.get("signer_signature").unwrap().as_str().unwrap(); + let signer_signature_bytes = hex_bytes(&signer_signature_hex[2..]).unwrap(); + let signer_signature = + ThresholdSignature::consensus_deserialize(&mut signer_signature_bytes.as_slice()) + .unwrap(); + signer_signature + } + + /// Wait for a confirmed block and return a list of individual + /// signer signatures + fn wait_for_confirmed_block_v0( + &mut self, + block_signer_sighash: &Sha512Trunc256Sum, + timeout: Duration, + ) -> Vec { + let block_obj = self.wait_for_confirmed_block_with_hash(block_signer_sighash, timeout); + block_obj + .get("signer_signature") + .unwrap() + .as_array() + .expect("Expected signer_signature to be an array") + .iter() + .cloned() + .map(serde_json::from_value::) + .collect::, _>>() + .expect("Unable to deserialize array of MessageSignature") + } + + /// Wait for a confirmed block and return a list of individual + /// signer signatures + fn wait_for_confirmed_block_with_hash( + &mut self, + block_signer_sighash: &Sha512Trunc256Sum, + timeout: Duration, + ) -> serde_json::Map { let t_start = Instant::now(); while t_start.elapsed() <= timeout { let blocks = test_observer::get_blocks(); - if let Some(signature) = blocks.iter().find_map(|block_json| { + if let Some(block) = blocks.iter().find_map(|block_json| { let block_obj = block_json.as_object().unwrap(); let sighash = block_obj // use the try operator because non-nakamoto blocks @@ -294,16 +331,9 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest PublicKeys { - let entries = self - .stacks_client - .get_reward_set_signers(reward_cycle) - .unwrap() - .unwrap(); + let entries = self.get_reward_set_signers(reward_cycle); let entries = SignerEntries::parse(false, &entries).unwrap(); entries.public_keys } + /// Get the signers for the given reward cycle + pub fn get_reward_set_signers(&self, reward_cycle: u64) -> Vec { + self.stacks_client + .get_reward_set_signers(reward_cycle) + .unwrap() + .unwrap() + } + #[allow(dead_code)] fn get_signer_metrics(&self) -> String { #[cfg(feature = "monitoring_prom")] diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 7d7bebcec5..55115c5f18 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -14,6 +14,7 @@ // along with this program. If not, see . use std::env; +use std::sync::atomic::Ordering; use std::time::Duration; use libsigner::v0::messages::{ @@ -25,6 +26,8 @@ use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::codec::StacksMessageCodec; use stacks::libstackerdb::StackerDBChunkData; use stacks::types::chainstate::StacksPrivateKey; +use stacks::types::PublicKey; +use stacks::util::secp256k1::Secp256k1PublicKey; use stacks::util_lib::boot::boot_code_id; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::runloop::State; @@ -33,7 +36,7 @@ use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; -use crate::tests::nakamoto_integrations::boot_to_epoch_3_reward_set; +use crate::tests::nakamoto_integrations::{boot_to_epoch_3_reward_set, next_block_and}; use crate::tests::neon_integrations::next_block_and_wait; use crate::BurnchainController; @@ -96,6 +99,27 @@ impl SignerTest { debug!("Singers initialized"); self.run_until_epoch_3_boundary(); + + let (vrfs_submitted, commits_submitted) = ( + self.running_nodes.vrfs_submitted.clone(), + self.running_nodes.commits_submitted.clone(), + ); + info!("Submitting 1 BTC block for miner VRF key registration"); + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + info!("Successfully triggered first block to wake up the miner runloop."); + // second block should confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + info!("Ready to mine Nakamoto blocks!"); } } @@ -212,3 +236,63 @@ fn block_proposal_rejection() { } signer_test.shutdown(); } + +// Basic test to ensure that miners are able to gather block responses +// from signers and create blocks. +#[test] +#[ignore] +fn miner_gather_signatures() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new(num_signers); + signer_test.boot_to_epoch_3(); + let timeout = Duration::from_secs(30); + + info!("------------------------- Try mining one block -------------------------"); + signer_test.mine_nakamoto_block(timeout); + + // Verify that the signers accepted the proposed block, sending back a validate ok response + let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(timeout); + let message = proposed_signer_signature_hash.0; + + info!("------------------------- Test Block Signed -------------------------"); + // Verify that the signers signed the proposed block + let signature = + signer_test.wait_for_confirmed_block_v0(&proposed_signer_signature_hash, timeout); + + info!("Got {} signatures", signature.len()); + + assert_eq!(signature.len(), num_signers); + + let reward_cycle = signer_test.get_current_reward_cycle(); + let signers = signer_test.get_reward_set_signers(reward_cycle); + + // Verify that the signers signed the proposed block + + let all_signed = signers.iter().zip(signature).all(|(signer, signature)| { + let stacks_public_key = Secp256k1PublicKey::from_slice(signer.signing_key.as_slice()) + .expect("Failed to convert signing key to StacksPublicKey"); + + // let valid = stacks_public_key.verify(message, signature); + let valid = stacks_public_key + .verify(&message, &signature) + .expect("Failed to verify signature"); + if !valid { + error!( + "Failed to verify signature for signer: {:?}", + stacks_public_key + ); + } + valid + }); + assert!(all_signed); +} diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 66855514e7..30f499caae 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -879,7 +879,8 @@ fn block_proposal() { info!("------------------------- Test Block Signed -------------------------"); // Verify that the signers signed the proposed block - let signature = signer_test.wait_for_confirmed_block(&proposed_signer_signature_hash, timeout); + let signature = + signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, timeout); assert!(signature .0 .verify(&key, proposed_signer_signature_hash.as_bytes())); @@ -1098,7 +1099,7 @@ fn sign_after_signer_reboot() { signer_test.mine_nakamoto_block(timeout); let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(short_timeout); let signature = - signer_test.wait_for_confirmed_block(&proposed_signer_signature_hash, short_timeout); + signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, short_timeout); assert!( signature.verify(&key, proposed_signer_signature_hash.0.as_slice()), @@ -1119,7 +1120,7 @@ fn sign_after_signer_reboot() { let last_block = signer_test.mine_nakamoto_block(timeout); let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(short_timeout); let frost_signature = - signer_test.wait_for_confirmed_block(&proposed_signer_signature_hash, short_timeout); + signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, short_timeout); // Check that the latest block's bitvec is all 1's assert_eq!( From 6cfbb17664d9d4ad7a50a0a624bebd4c603bb092 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 20 May 2024 14:30:16 -0400 Subject: [PATCH 023/148] refactor: implement `BurnStateDB` for `SortitionHandleConn` Use `SortitionHandleConn` instead of `SortitionDBConn`. This change required propagation through many locations. --- stackslib/src/chainstate/burn/db/sortdb.rs | 15 ++- stackslib/src/chainstate/coordinator/mod.rs | 4 +- stackslib/src/chainstate/coordinator/tests.rs | 64 ++++----- .../chainstate/nakamoto/coordinator/mod.rs | 4 +- stackslib/src/chainstate/nakamoto/miner.rs | 10 +- .../src/chainstate/nakamoto/tests/mod.rs | 2 +- .../src/chainstate/nakamoto/tests/node.rs | 12 +- stackslib/src/chainstate/stacks/boot/mod.rs | 50 +++---- .../src/chainstate/stacks/boot/pox_2_tests.rs | 10 +- .../src/chainstate/stacks/boot/pox_3_tests.rs | 40 +++--- .../src/chainstate/stacks/boot/pox_4_tests.rs | 42 +++--- .../chainstate/stacks/boot/signers_tests.rs | 2 +- stackslib/src/chainstate/stacks/db/blocks.rs | 12 +- .../src/chainstate/stacks/db/unconfirmed.rs | 92 +++++++------ stackslib/src/chainstate/stacks/miner.rs | 14 +- .../src/chainstate/stacks/tests/accounting.rs | 66 ++++----- .../stacks/tests/block_construction.rs | 76 ++++++----- .../stacks/tests/chain_histories.rs | 32 ++--- stackslib/src/chainstate/stacks/tests/mod.rs | 8 +- stackslib/src/clarity_vm/database/mod.rs | 17 ++- .../src/clarity_vm/tests/epoch_switch.rs | 2 +- stackslib/src/core/mempool.rs | 2 +- stackslib/src/main.rs | 6 +- stackslib/src/net/api/callreadonly.rs | 96 ++++++------- stackslib/src/net/api/getaccount.rs | 126 +++++++++--------- stackslib/src/net/api/getconstantval.rs | 32 +++-- stackslib/src/net/api/getcontractabi.rs | 20 +-- stackslib/src/net/api/getcontractsrc.rs | 50 +++---- stackslib/src/net/api/getdatavar.rs | 44 +++--- .../src/net/api/getistraitimplemented.rs | 56 ++++---- stackslib/src/net/api/getmapentry.rs | 58 ++++---- stackslib/src/net/api/getpoxinfo.rs | 2 +- stackslib/src/net/api/postblock_proposal.rs | 4 +- stackslib/src/net/api/tests/mod.rs | 10 +- stackslib/src/net/mod.rs | 4 +- stackslib/src/net/p2p.rs | 2 +- stackslib/src/net/relay.rs | 42 +++--- stackslib/src/net/stackerdb/config.rs | 2 +- stackslib/src/net/tests/download/epoch2x.rs | 6 +- .../stacks-node/src/nakamoto_node/miner.rs | 38 +++--- testnet/stacks-node/src/neon_node.rs | 8 +- testnet/stacks-node/src/node.rs | 2 +- testnet/stacks-node/src/run_loop/helium.rs | 16 +-- testnet/stacks-node/src/tenure.rs | 4 +- testnet/stacks-node/src/tests/epoch_21.rs | 4 +- testnet/stacks-node/src/tests/epoch_22.rs | 4 +- testnet/stacks-node/src/tests/epoch_24.rs | 4 +- .../src/tests/nakamoto_integrations.rs | 18 +-- .../src/tests/neon_integrations.rs | 21 ++- 49 files changed, 680 insertions(+), 575 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index e3802d6ec1..eb49daa50a 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2595,6 +2595,19 @@ impl<'a> SortitionHandleConn<'a> { } } } + + pub fn get_reward_set_payouts_at( + &self, + sortition_id: &SortitionId, + ) -> Result<(Vec, u128), db_error> { + let sql = "SELECT pox_payouts FROM snapshots WHERE sortition_id = ?1"; + let args: &[&dyn ToSql] = &[sortition_id]; + let pox_addrs_json: String = query_row(self, sql, args)?.ok_or(db_error::NotFoundError)?; + + let pox_addrs: (Vec, u128) = + serde_json::from_str(&pox_addrs_json).expect("FATAL: failed to decode pox payout JSON"); + Ok(pox_addrs) + } } // Connection methods @@ -2616,7 +2629,7 @@ impl SortitionDB { Ok(index_tx) } - /// Make an indexed connectino + /// Make an indexed connection pub fn index_conn<'a>(&'a self) -> SortitionDBConn<'a> { SortitionDBConn::new( &self.marf, diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 96eae44641..2836ec7b4c 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -3274,7 +3274,7 @@ impl< if let Some(ref mut estimator) = self.cost_estimator { let stacks_epoch = self .sortition_db - .index_conn() + .index_handle_at_tip() .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) .expect("Could not find a stacks epoch."); estimator.notify_block( @@ -3288,7 +3288,7 @@ impl< if let Some(ref mut estimator) = self.fee_estimator { let stacks_epoch = self .sortition_db - .index_conn() + .index_handle_at_tip() .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) .expect("Could not find a stacks epoch."); if let Err(e) = diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index d5073c8f85..a76f047725 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -667,7 +667,7 @@ fn make_genesis_block_with_recipients( ) .unwrap(); - let iconn = sort_db.index_conn(); + let iconn = sort_db.index_handle_at_tip(); let mut miner_epoch_info = builder.pre_epoch_begin(state, &iconn, true).unwrap(); let ast_rules = miner_epoch_info.ast_rules.clone(); let mut epoch_tx = builder @@ -922,7 +922,7 @@ fn make_stacks_block_with_input( let total_burn = parents_sortition.total_burn; - let iconn = sort_db.index_conn(); + let iconn = sort_db.index_handle_at_tip(); let mut builder = StacksBlockBuilder::make_regtest_block_builder( burnchain, @@ -1286,7 +1286,7 @@ fn missed_block_commits_2_05() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -1636,7 +1636,7 @@ fn missed_block_commits_2_1() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -1981,7 +1981,7 @@ fn late_block_commits_2_1() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -2154,7 +2154,7 @@ fn test_simple_setup() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -2464,7 +2464,7 @@ fn test_sortition_with_reward_set() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -2709,7 +2709,7 @@ fn test_sortition_with_burner_reward_set() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -2916,7 +2916,7 @@ fn test_pox_btc_ops() { let mut chainstate = get_chainstate(path); let (stacker_balance, burn_height) = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -3001,7 +3001,7 @@ fn test_pox_btc_ops() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -3219,7 +3219,7 @@ fn test_stx_transfer_btc_ops() { let mut chainstate = get_chainstate(path); let (sender_balance, burn_height) = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -3234,7 +3234,7 @@ fn test_stx_transfer_btc_ops() { let (recipient_balance, burn_height) = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -3348,7 +3348,7 @@ fn test_stx_transfer_btc_ops() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -3691,13 +3691,13 @@ fn test_delegate_stx_btc_ops() { ); let first_delegation_info = get_delegation_info_pox_2( &mut chainstate, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &parent_tip, &first_del, ); let second_delegation_info = get_delegation_info_pox_2( &mut chainstate, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &parent_tip, &second_del, ); @@ -3744,7 +3744,7 @@ fn test_delegate_stx_btc_ops() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -3988,7 +3988,7 @@ fn test_initial_coinbase_reward_distributions() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -4156,7 +4156,7 @@ fn test_epoch_switch_cost_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn.with_clarity_db_readonly(|db| db .get_stacks_epoch(burn_block_height as u32) @@ -4176,7 +4176,7 @@ fn test_epoch_switch_cost_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4193,7 +4193,7 @@ fn test_epoch_switch_cost_contract_instantiation() { // check that costs-2 contract DNE before epoch 2.05, and that it does exist after let does_costs_2_contract_exist = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4360,7 +4360,7 @@ fn test_epoch_switch_pox_2_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn.with_clarity_db_readonly(|db| db .get_stacks_epoch(burn_block_height as u32) @@ -4381,7 +4381,7 @@ fn test_epoch_switch_pox_2_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4398,7 +4398,7 @@ fn test_epoch_switch_pox_2_contract_instantiation() { // check that pox-2 contract DNE before epoch 2.1, and that it does exist after let does_pox_2_contract_exist = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4569,7 +4569,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn.with_clarity_db_readonly(|db| db .get_stacks_epoch(burn_block_height as u32) @@ -4590,7 +4590,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4607,7 +4607,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { // check that pox-3 contract DNE before epoch 2.4, and that it does exist after let does_pox_3_contract_exist = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4855,7 +4855,7 @@ fn atlas_stop_start() { // check that the bns contract exists let does_bns_contract_exist = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| db.get_contract(&boot_code_id("bns", false))) @@ -5180,7 +5180,7 @@ fn test_epoch_verify_active_pox_contract() { // Query the pox.clar contract to ensure the total stacked amount is as expected let amount_locked_pox_1_res = get_total_stacked_info( &mut chainstate, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &parent_tip, curr_reward_cycle, false, @@ -5214,7 +5214,7 @@ fn test_epoch_verify_active_pox_contract() { // Query the pox-2.clar contract to ensure the total stacked amount is as expected let amount_locked_pox_2_res = get_total_stacked_info( &mut chainstate, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &parent_tip, curr_reward_cycle, true, @@ -5516,7 +5516,7 @@ fn test_sortition_with_sunset() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -5864,7 +5864,7 @@ fn test_sortition_with_sunset_and_epoch_switch() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -6774,7 +6774,7 @@ fn eval_at_chain_tip(chainstate_path: &str, sort_db: &SortitionDB, eval: &str) - let mut chainstate = get_chainstate(chainstate_path); chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_readonly_clarity_env( diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index f399615c80..abb89e1839 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -656,7 +656,7 @@ impl< if let Some(ref mut estimator) = self.cost_estimator { let stacks_epoch = self .sortition_db - .index_conn() + .index_handle_at_tip() .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) .expect("Could not find a stacks epoch."); estimator.notify_block( @@ -670,7 +670,7 @@ impl< if let Some(ref mut estimator) = self.fee_estimator { let stacks_epoch = self .sortition_db - .index_conn() + .index_handle_at_tip() .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) .expect("Could not find a stacks epoch."); if let Err(e) = estimator.notify_block(&block_receipt, &stacks_epoch.block_limit) { diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index ab9ae6a5f9..da1a7af570 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -41,7 +41,9 @@ use stacks_common::util::hash::{Hash160, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; use crate::burnchains::{PrivateKey, PublicKey}; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionDBConn, SortitionHandleTx}; +use crate::chainstate::burn::db::sortdb::{ + SortitionDB, SortitionDBConn, SortitionHandleConn, SortitionHandleTx, +}; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::*; use crate::chainstate::nakamoto::{ @@ -221,7 +223,7 @@ impl NakamotoBlockBuilder { pub fn load_tenure_info<'a>( &self, chainstate: &'a mut StacksChainState, - burn_dbconn: &'a SortitionDBConn, + burn_dbconn: &'a SortitionHandleConn, cause: Option, ) -> Result, Error> { debug!("Nakamoto miner tenure begin"); @@ -297,7 +299,7 @@ impl NakamotoBlockBuilder { /// yet known). pub fn tenure_begin<'a, 'b>( &mut self, - burn_dbconn: &'a SortitionDBConn, + burn_dbconn: &'a SortitionHandleConn, info: &'b mut MinerTenureInfo<'a>, ) -> Result, Error> { let SetupBlockResult { @@ -396,7 +398,7 @@ impl NakamotoBlockBuilder { pub fn build_nakamoto_block( // not directly used; used as a handle to open other chainstates chainstate_handle: &StacksChainState, - burn_dbconn: &SortitionDBConn, + burn_dbconn: &SortitionHandleConn, mempool: &mut MemPoolDB, // Stacks header we're building off of. parent_stacks_header: &StacksHeaderInfo, diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index f8d048aaf1..3cd7d2dde5 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -128,7 +128,7 @@ pub fn get_account( chainstate .with_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &tip.index_block_hash(), |clarity_conn| { StacksChainState::get_account(clarity_conn, &addr.to_account_principal()) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index b2b275a0e1..ab338cc90e 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -560,9 +560,13 @@ impl TestStacksNode { tenure_change = None; coinbase = None; - let (mut nakamoto_block, size, cost) = - Self::make_nakamoto_block_from_txs(builder, chainstate, &sortdb.index_conn(), txs) - .unwrap(); + let (mut nakamoto_block, size, cost) = Self::make_nakamoto_block_from_txs( + builder, + chainstate, + &sortdb.index_handle_at_tip(), + txs, + ) + .unwrap(); miner.sign_nakamoto_block(&mut nakamoto_block); let tenure_sn = @@ -638,7 +642,7 @@ impl TestStacksNode { pub fn make_nakamoto_block_from_txs( mut builder: NakamotoBlockBuilder, chainstate_handle: &StacksChainState, - burn_dbconn: &SortitionDBConn, + burn_dbconn: &SortitionHandleConn, mut txs: Vec, ) -> Result<(NakamotoBlock, u64, ExecutionCost), ChainstateError> { use clarity::vm::ast::ASTRules; diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index d3e8a494de..e6e02eaab5 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -569,7 +569,7 @@ impl StacksChainState { boot_contract_name: &str, code: &str, ) -> Result { - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let dbconn = self.state_index.sqlite_conn(); self.clarity_state .eval_read_only( @@ -631,7 +631,7 @@ impl StacksChainState { let cost_track = LimitedCostTracker::new_free(); let sender = PrincipalData::Standard(StandardPrincipalData::transient()); let result = self - .maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { + .maybe_read_only_clarity_tx(&sortdb.index_handle_at_tip(), tip, |clarity_tx| { clarity_tx.with_readonly_clarity_env( mainnet, chain_id, @@ -1668,7 +1668,7 @@ pub mod test { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let value = peer.chainstate().clarity_eval_read_only( &iconn, &stacks_block_id, @@ -1696,7 +1696,7 @@ pub mod test { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let value = peer.chainstate().clarity_eval_read_only( &iconn, &stacks_block_id, @@ -1842,9 +1842,11 @@ pub mod test { SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &stacks_block_id, |clarity_tx| { - StacksChainState::get_account(clarity_tx, addr) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &stacks_block_id, + |clarity_tx| StacksChainState::get_account(clarity_tx, addr), + ) .unwrap() }); account @@ -1856,9 +1858,11 @@ pub mod test { SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &stacks_block_id, |clarity_tx| { - StacksChainState::get_contract(clarity_tx, addr).unwrap() - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &stacks_block_id, + |clarity_tx| StacksChainState::get_contract(clarity_tx, addr).unwrap(), + ) .unwrap() }); contract_opt @@ -2799,7 +2803,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -2926,7 +2930,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3012,7 +3016,7 @@ pub mod test { let block_builder = StacksBlockBuilder::make_regtest_block_builder(&burnchain, &parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); - let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_conn(), block_txs).unwrap(); + let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_handle_at_tip(), block_txs).unwrap(); (anchored_block, vec![]) }); @@ -3118,7 +3122,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3229,7 +3233,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3447,7 +3451,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3705,7 +3709,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3980,7 +3984,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -4224,7 +4228,7 @@ pub mod test { let block_builder = StacksBlockBuilder::make_regtest_block_builder(&burnchain, &parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); - let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_conn(), block_txs).unwrap(); + let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_handle_at_tip(), block_txs).unwrap(); (anchored_block, vec![]) }); @@ -4397,7 +4401,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -4696,7 +4700,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -5277,7 +5281,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -5646,7 +5650,7 @@ pub mod test { } let block_builder = StacksBlockBuilder::make_regtest_block_builder(&burnchain, &parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); - let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_conn(), block_txs).unwrap(); + let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_handle_at_tip(), block_txs).unwrap(); if tenure_id == 2 { // block should be all the transactions diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 2c47f0ec0b..b5f1859355 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -667,7 +667,7 @@ where { with_sortdb(peer, |ref mut c, ref sortdb| { let headers_db = HeadersDBConn(c.state_index.sqlite_conn()); - let burn_db = sortdb.index_conn(); + let burn_db = sortdb.index_handle_at_tip(); let mut read_only_clar = c .clarity_state .read_only_connection(tip, &headers_db, &burn_db); @@ -3794,7 +3794,7 @@ fn test_get_pox_addrs() { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3896,7 +3896,7 @@ fn test_get_pox_addrs() { let addrs_and_payout = with_sortdb(&mut peer, |ref mut chainstate, ref mut sortdb| { let addrs = chainstate .maybe_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &tip_index_block, |clarity_tx| { clarity_tx @@ -4091,7 +4091,7 @@ fn test_stack_with_segwit() { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -4193,7 +4193,7 @@ fn test_stack_with_segwit() { let addrs_and_payout = with_sortdb(&mut peer, |ref mut chainstate, ref mut sortdb| { let addrs = chainstate .maybe_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &tip_index_block, |clarity_tx| { clarity_tx diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index f0c7a9ef75..3134b4773a 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -3348,24 +3348,28 @@ fn get_burn_pox_addr_info(peer: &mut TestPeer) -> (Vec, u128) { let burn_height = tip.block_height - 1; let addrs_and_payout = with_sortdb(peer, |ref mut chainstate, ref mut sortdb| { let addrs = chainstate - .maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip_index_block, |clarity_tx| { - clarity_tx - .with_readonly_clarity_env( - false, - 0x80000000, - ClarityVersion::Clarity2, - PrincipalData::Standard(StandardPrincipalData::transient()), - None, - LimitedCostTracker::new_free(), - |env| { - env.eval_read_only( - &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", &burn_height), - ) - }, - ) - .unwrap() - }) + .maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip_index_block, + |clarity_tx| { + clarity_tx + .with_readonly_clarity_env( + false, + 0x80000000, + ClarityVersion::Clarity2, + PrincipalData::Standard(StandardPrincipalData::transient()), + None, + LimitedCostTracker::new_free(), + |env| { + env.eval_read_only( + &boot_code_id("pox-2", false), + &format!("(get-burn-block-info? pox-addrs u{})", &burn_height), + ) + }, + ) + .unwrap() + }, + ) .unwrap(); addrs }) diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index be7675c700..16143c98f2 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -833,24 +833,28 @@ fn get_burn_pox_addr_info(peer: &mut TestPeer) -> (Vec, u128) { let burn_height = tip.block_height - 1; let addrs_and_payout = with_sortdb(peer, |ref mut chainstate, ref mut sortdb| { let addrs = chainstate - .maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip_index_block, |clarity_tx| { - clarity_tx - .with_readonly_clarity_env( - false, - 0x80000000, - ClarityVersion::Clarity2, - PrincipalData::Standard(StandardPrincipalData::transient()), - None, - LimitedCostTracker::new_free(), - |env| { - env.eval_read_only( - &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", &burn_height), - ) - }, - ) - .unwrap() - }) + .maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip_index_block, + |clarity_tx| { + clarity_tx + .with_readonly_clarity_env( + false, + 0x80000000, + ClarityVersion::Clarity2, + PrincipalData::Standard(StandardPrincipalData::transient()), + None, + LimitedCostTracker::new_free(), + |env| { + env.eval_read_only( + &boot_code_id("pox-2", false), + &format!("(get-burn-block-info? pox-addrs u{})", &burn_height), + ) + }, + ) + .unwrap() + }, + ) .unwrap(); addrs }) @@ -2945,7 +2949,7 @@ fn verify_signer_key_sig( ) -> Value { let result: Value = with_sortdb(peer, |ref mut chainstate, ref mut sortdb| { chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &latest_block, |clarity_tx| { + .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &latest_block, |clarity_tx| { clarity_tx .with_readonly_clarity_env( false, diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index a97a0c1e09..67fffd878a 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -483,7 +483,7 @@ pub fn readonly_call_with_sortdb( args: Vec, ) -> Value { chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), tip, |connection| { + .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), tip, |connection| { connection .with_readonly_clarity_env( false, diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index dd70fcfb01..53e71b1321 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -10263,7 +10263,7 @@ pub mod test { ); let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -10515,7 +10515,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -11071,7 +11071,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -11236,7 +11236,7 @@ pub mod test { let tip_hash = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); let account = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_conn(), &tip_hash, |conn| { + .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &tip_hash, |conn| { StacksChainState::get_account(conn, &addr.to_account_principal()) }) .unwrap(); @@ -11394,7 +11394,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -11919,7 +11919,7 @@ pub mod test { let tip_hash = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); let account = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_conn(), &tip_hash, |conn| { + .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &tip_hash, |conn| { StacksChainState::get_account(conn, &addr.to_account_principal()) }) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index 92d32dd038..0e3ae3ae88 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -754,7 +754,7 @@ mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx], ) .unwrap(); @@ -777,7 +777,7 @@ mod test { // build 1-block microblock stream let microblocks = { let sortdb = peer.sortdb.take().unwrap(); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); peer.chainstate() .reload_unconfirmed_state(&sort_iconn, canonical_tip.clone()) @@ -852,18 +852,21 @@ mod test { // process microblock stream to generate unconfirmed state let sortdb = peer.sortdb.take().unwrap(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); let recv_balance = peer .chainstate() - .with_read_only_unconfirmed_clarity_tx(&sortdb.index_conn(), |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_stx_balance(&recv_addr.into()) - .unwrap() - }) - }) + .with_read_only_unconfirmed_clarity_tx( + &sortdb.index_handle_at_tip(), + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() + }) + }, + ) .unwrap() .unwrap(); peer.sortdb = Some(sortdb); @@ -876,13 +879,17 @@ mod test { let sortdb = peer.sortdb.take().unwrap(); let confirmed_recv_balance = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_conn(), &canonical_tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_stx_balance(&recv_addr.into()) - .unwrap() - }) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &canonical_tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() + }) + }, + ) .unwrap(); peer.sortdb = Some(sortdb); @@ -984,7 +991,7 @@ mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx], ) .unwrap(); @@ -1007,9 +1014,9 @@ mod test { // build microblock stream iteratively, and test balances at each additional microblock let sortdb = peer.sortdb.take().unwrap(); let microblocks = { - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); let mut microblock_builder = StacksMicroblockBuilder::new( @@ -1083,18 +1090,21 @@ mod test { // process microblock stream to generate unconfirmed state let sortdb = peer.sortdb.take().unwrap(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); let recv_balance = peer .chainstate() - .with_read_only_unconfirmed_clarity_tx(&sortdb.index_conn(), |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_stx_balance(&recv_addr.into()) - .unwrap() - }) - }) + .with_read_only_unconfirmed_clarity_tx( + &sortdb.index_handle_at_tip(), + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() + }) + }, + ) .unwrap() .unwrap(); peer.sortdb = Some(sortdb); @@ -1110,13 +1120,17 @@ mod test { let sortdb = peer.sortdb.take().unwrap(); let confirmed_recv_balance = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_conn(), &canonical_tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_stx_balance(&recv_addr.into()) - .unwrap() - }) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &canonical_tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() + }) + }, + ) .unwrap(); peer.sortdb = Some(sortdb); @@ -1270,7 +1284,7 @@ mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx, anchored_tx], ) .unwrap(); @@ -1297,7 +1311,7 @@ mod test { Relayer::refresh_unconfirmed(&mut inner_node.chainstate, &mut sortdb); let microblock = { - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut microblock_builder = StacksMicroblockBuilder::resume_unconfirmed( &mut inner_node.chainstate, &sort_iconn, @@ -1386,12 +1400,12 @@ mod test { // process microblock stream to generate unconfirmed state let sortdb = peer.sortdb.take().unwrap(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); let db_recv_balance = peer .chainstate() - .with_read_only_unconfirmed_clarity_tx(&sortdb.index_conn(), |clarity_tx| { + .with_read_only_unconfirmed_clarity_tx(&sortdb.index_handle_at_tip(), |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { clarity_db .get_account_stx_balance(&recv_addr.into()) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index f718a9fb36..450060d3a8 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -39,7 +39,9 @@ use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; use stacks_common::util::vrf::*; use crate::burnchains::{Burnchain, PrivateKey, PublicKey}; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionDBConn, SortitionHandleTx}; +use crate::chainstate::burn::db::sortdb::{ + SortitionDB, SortitionDBConn, SortitionHandleConn, SortitionHandleTx, +}; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::*; use crate::chainstate::stacks::address::StacksAddressExtensions; @@ -1803,7 +1805,7 @@ impl StacksBlockBuilder { pub fn pre_epoch_begin<'a>( &mut self, chainstate: &'a mut StacksChainState, - burn_dbconn: &'a SortitionDBConn, + burn_dbconn: &'a SortitionHandleConn, confirm_microblocks: bool, ) -> Result, Error> { debug!( @@ -1912,7 +1914,7 @@ impl StacksBlockBuilder { /// returned ClarityTx object. pub fn epoch_begin<'a, 'b>( &mut self, - burn_dbconn: &'a SortitionDBConn, + burn_dbconn: &'a SortitionHandleConn, info: &'b mut MinerEpochInfo<'a>, ) -> Result<(ClarityTx<'b, 'b>, ExecutionCost), Error> { let SetupBlockResult { @@ -1974,7 +1976,7 @@ impl StacksBlockBuilder { pub fn make_anchored_block_from_txs( builder: StacksBlockBuilder, chainstate_handle: &StacksChainState, - burn_dbconn: &SortitionDBConn, + burn_dbconn: &SortitionHandleConn, txs: Vec, ) -> Result<(StacksBlock, u64, ExecutionCost), Error> { Self::make_anchored_block_and_microblock_from_txs( @@ -1993,7 +1995,7 @@ impl StacksBlockBuilder { pub fn make_anchored_block_and_microblock_from_txs( mut builder: StacksBlockBuilder, chainstate_handle: &StacksChainState, - burn_dbconn: &SortitionDBConn, + burn_dbconn: &SortitionHandleConn, mut txs: Vec, mut mblock_txs: Vec, ) -> Result<(StacksBlock, u64, ExecutionCost, Option), Error> { @@ -2385,7 +2387,7 @@ impl StacksBlockBuilder { /// returns the assembled block, and the consumed execution budget. pub fn build_anchored_block( chainstate_handle: &StacksChainState, // not directly used; used as a handle to open other chainstates - burn_dbconn: &SortitionDBConn, + burn_dbconn: &SortitionHandleConn, mempool: &mut MemPoolDB, parent_stacks_header: &StacksHeaderInfo, // Stacks header we're building off of total_burn: u64, // the burn so far on the burnchain (i.e. from the last burnchain block) diff --git a/stackslib/src/chainstate/stacks/tests/accounting.rs b/stackslib/src/chainstate/stacks/tests/accounting.rs index 8d65e40a4e..69dddd742c 100644 --- a/stackslib/src/chainstate/stacks/tests/accounting.rs +++ b/stackslib/src/chainstate/stacks/tests/accounting.rs @@ -228,7 +228,7 @@ fn test_bad_microblock_fees_pre_v210() { anchored_txs.push(stx_transfer); } - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -551,7 +551,7 @@ fn test_bad_microblock_fees_fix_transition() { anchored_txs.push(stx_transfer); } - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -907,7 +907,7 @@ fn test_get_block_info_v210() { anchored_txs.push(stx_transfer); } - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -1029,7 +1029,7 @@ fn test_get_block_info_v210() { peer .chainstate() .with_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &stacks_block_id, |clarity_tx| { let list_val = clarity_tx.with_readonly_clarity_env( @@ -1296,7 +1296,7 @@ fn test_get_block_info_v210_no_microblocks() { ) .unwrap(); - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( builder, chainstate, @@ -1333,7 +1333,7 @@ fn test_get_block_info_v210_no_microblocks() { peer .chainstate() .with_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &stacks_block_id, |clarity_tx| { let list_val = clarity_tx.with_readonly_clarity_env( @@ -1678,7 +1678,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { anchored_txs.push(stx_transfer); } - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -1803,7 +1803,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { peer .chainstate() .with_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &stacks_block_id, |clarity_tx| { let list_val = clarity_tx.with_readonly_clarity_env( @@ -1911,29 +1911,33 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { // reported correctly. let recipient_balance = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_conn(), &stacks_block_id, |clarity_tx| { - let recipient_balance_val = clarity_tx - .with_readonly_clarity_env( - false, - CHAIN_ID_TESTNET, - ClarityVersion::Clarity2, - PrincipalData::parse("SP3Q4A5WWZ80REGBN0ZXNE540ECJ9JZ4A765Q5K2Q").unwrap(), - None, - LimitedCostTracker::new_free(), - |env| { - if pay_to_contract { - env.eval_raw(&format!( - "(stx-get-balance '{}.{})", - &addr_anchored, contract_name - )) - } else { - env.eval_raw(&format!("(stx-get-balance '{})", &addr_recipient)) - } - }, - ) - .unwrap(); - recipient_balance_val.expect_u128().unwrap() - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &stacks_block_id, + |clarity_tx| { + let recipient_balance_val = clarity_tx + .with_readonly_clarity_env( + false, + CHAIN_ID_TESTNET, + ClarityVersion::Clarity2, + PrincipalData::parse("SP3Q4A5WWZ80REGBN0ZXNE540ECJ9JZ4A765Q5K2Q").unwrap(), + None, + LimitedCostTracker::new_free(), + |env| { + if pay_to_contract { + env.eval_raw(&format!( + "(stx-get-balance '{}.{})", + &addr_anchored, contract_name + )) + } else { + env.eval_raw(&format!("(stx-get-balance '{})", &addr_recipient)) + } + }, + ) + .unwrap(); + recipient_balance_val.expect_u128().unwrap() + }, + ) .unwrap(); // N.B. `stx-get-balance` will reflect one more block-reward than `get-block-info? diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index ae428af15f..9abd98acb1 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -130,7 +130,7 @@ fn test_build_anchored_blocks_empty() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -254,7 +254,7 @@ fn test_build_anchored_blocks_stx_transfers_single() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -391,7 +391,7 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -557,7 +557,7 @@ fn test_build_anchored_blocks_stx_transfers_multi() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -712,7 +712,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); let coinbase_tx = make_coinbase(miner, tenure_id); - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -968,7 +968,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); let coinbase_tx = make_coinbase(miner, tenure_id); - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -1250,7 +1250,7 @@ fn test_build_anchored_blocks_incrementing_nonces() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -1498,7 +1498,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -1652,7 +1652,7 @@ fn test_build_anchored_blocks_multiple_chaintips() { StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), mempool_to_use, &parent_tip, tip.total_burn, @@ -1759,7 +1759,7 @@ fn test_build_anchored_blocks_empty_chaintips() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -1966,7 +1966,7 @@ fn test_build_anchored_blocks_too_expensive_transactions() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -2133,7 +2133,7 @@ fn test_build_anchored_blocks_invalid() { let coinbase_tx = make_coinbase(miner, tenure_id as usize); let mut anchored_block = StacksBlockBuilder::build_anchored_block( - chainstate, &sortdb.index_conn(), &mut mempool, &parent_tip, tip.total_burn, vrf_proof, Hash160([tenure_id as u8; 20]), &coinbase_tx, BlockBuilderSettings::max_value(), None, &burnchain, + chainstate, &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, vrf_proof, Hash160([tenure_id as u8; 20]), &coinbase_tx, BlockBuilderSettings::max_value(), None, &burnchain, ).unwrap(); if tenure_id == bad_block_tenure { @@ -2403,7 +2403,7 @@ fn test_build_anchored_blocks_bad_nonces() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -2531,7 +2531,7 @@ fn test_build_microblock_stream_forks() { // produce the microblock stream for the parent, which this tenure's anchor // block will confirm. - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); chainstate .reload_unconfirmed_state(&sort_ic, parent_index_hash.clone()) @@ -2654,7 +2654,7 @@ fn test_build_microblock_stream_forks() { let (anchored_block, block_size, block_execution_cost) = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -2858,7 +2858,7 @@ fn test_build_microblock_stream_forks_with_descendants() { // produce the microblock stream for the parent, which this tenure's anchor // block will confirm. - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); chainstate .reload_unconfirmed_state(&sort_ic, parent_index_hash.clone()) @@ -3081,7 +3081,7 @@ fn test_build_microblock_stream_forks_with_descendants() { let (anchored_block, block_size, block_execution_cost) = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, parent_tip.anchored_header.as_stacks_epoch2().unwrap().total_work.burn + 1000, @@ -3186,15 +3186,19 @@ fn test_build_microblock_stream_forks_with_descendants() { test_debug!("Check {} in {} for report", &reporter_addr, &chain_tip); peer.with_db_state(|ref mut sortdb, ref mut chainstate, _, _| { chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { - // the key at height 1 should be reported as poisoned - let report = StacksChainState::get_poison_microblock_report(clarity_tx, 1) - .unwrap() - .unwrap(); - assert_eq!(report.0, reporter_addr); - assert_eq!(report.1, seq); - Ok(()) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &chain_tip, + |clarity_tx| { + // the key at height 1 should be reported as poisoned + let report = StacksChainState::get_poison_microblock_report(clarity_tx, 1) + .unwrap() + .unwrap(); + assert_eq!(report.0, reporter_addr); + assert_eq!(report.1, seq); + Ok(()) + }, + ) .unwrap() }) .unwrap(); @@ -3659,7 +3663,7 @@ fn test_contract_call_across_clarity_versions() { } } - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let builder = StacksBlockBuilder::make_block_builder( &burnchain, @@ -3700,7 +3704,7 @@ fn test_contract_call_across_clarity_versions() { let stacks_block_id = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); peer.chainstate().with_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &stacks_block_id, |clarity_tx| { for tenure_id in 1..num_blocks { @@ -3919,7 +3923,7 @@ fn test_is_tx_problematic() { if let Err(ChainstateError::ProblematicTransaction(txid)) = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone(), contract_spends_too_much_tx.clone()] ) { assert_eq!(txid, contract_spends_too_much_txid); @@ -4096,7 +4100,7 @@ fn test_is_tx_problematic() { if let Err(ChainstateError::ProblematicTransaction(txid)) = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone(), spend_too_much.clone()] ) { assert_eq!(txid, spend_too_much.txid()); @@ -4146,7 +4150,7 @@ fn test_is_tx_problematic() { let err = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone(), runtime_checkerror_problematic.clone()] ); @@ -4198,7 +4202,7 @@ fn test_is_tx_problematic() { if let Err(ChainstateError::ProblematicTransaction(txid)) = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone(), runtime_checkerror_problematic.clone()] ) { assert_eq!(txid, runtime_checkerror_problematic.txid()); @@ -4229,7 +4233,7 @@ fn test_is_tx_problematic() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -4392,7 +4396,7 @@ fn mempool_incorporate_pox_unlocks() { // this will be the height of the block that includes this new tenure let my_height = first_stacks_block_height + 1 + tenure_id; - let available_balance = chainstate.with_read_only_clarity_tx(&sortdb.index_conn(), &parent_tip.index_block_hash(), |clarity_tx| { + let available_balance = chainstate.with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &parent_tip.index_block_hash(), |clarity_tx| { clarity_tx.with_clarity_db_readonly(|db| { let burn_block_height = db.get_current_burnchain_block_height().unwrap() as u64; let v1_unlock_height = db.get_v1_unlock_height(); @@ -4472,7 +4476,7 @@ fn mempool_incorporate_pox_unlocks() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -4612,7 +4616,7 @@ fn test_fee_order_mismatch_nonce_order() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs index cc2fe940b1..21671d00c0 100644 --- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs +++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs @@ -150,7 +150,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -336,7 +336,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -483,7 +483,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -531,7 +531,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -820,7 +820,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -868,7 +868,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1085,7 +1085,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1134,7 +1134,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1433,7 +1433,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1478,7 +1478,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1680,7 +1680,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1728,7 +1728,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1988,7 +1988,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -2033,7 +2033,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -2235,7 +2235,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -2283,7 +2283,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 22a331b193..71d1a7c019 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -1316,9 +1316,11 @@ pub fn get_stacks_account(peer: &mut TestPeer, addr: &PrincipalData) -> StacksAc let stacks_block_id = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); let acct = chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &stacks_block_id, |clarity_tx| { - StacksChainState::get_account(clarity_tx, addr) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &stacks_block_id, + |clarity_tx| StacksChainState::get_account(clarity_tx, addr), + ) .unwrap(); Ok(acct) }) diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 410c59ba81..b15324f96f 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -428,14 +428,21 @@ impl SortitionDBRef for SortitionHandleTx<'_> { } } -impl SortitionDBRef for SortitionDBConn<'_> { +impl SortitionDBRef for SortitionHandleConn<'_> { fn get_pox_start_cycle_info( &self, sortition_id: &SortitionId, parent_stacks_block_burn_ht: u64, cycle_index: u64, ) -> Result, ChainstateError> { - let mut handle = self.as_handle(sortition_id); + let readonly_marf = self + .index + .reopen_readonly() + .expect("BUG: failure trying to get a read-only interface into the sortition db."); + let mut context = self.context.clone(); + context.chain_tip = sortition_id.clone(); + let mut handle = SortitionHandleConn::new(&readonly_marf, context); + get_pox_start_cycle_info(&mut handle, parent_stacks_block_burn_ht, cycle_index) } @@ -578,7 +585,7 @@ impl BurnStateDB for SortitionHandleTx<'_> { } } -impl BurnStateDB for SortitionDBConn<'_> { +impl BurnStateDB for SortitionHandleConn<'_> { fn get_tip_burn_block_height(&self) -> Option { let tip = SortitionDB::get_canonical_burn_chain_tip(self.conn()).ok()?; tip.block_height.try_into().ok() @@ -601,8 +608,6 @@ impl BurnStateDB for SortitionDBConn<'_> { height: u32, sortition_id: &SortitionId, ) -> Option { - let db_handle = SortitionHandleConn::open_reader(self, &sortition_id).ok()?; - let current_height = match self.get_burn_block_height(sortition_id) { None => { return None; @@ -614,7 +619,7 @@ impl BurnStateDB for SortitionDBConn<'_> { return None; } - match db_handle.get_block_snapshot_by_height(height as u64) { + match self.get_block_snapshot_by_height(height as u64) { Ok(Some(x)) => Some(x.burn_header_hash), _ => return None, } diff --git a/stackslib/src/clarity_vm/tests/epoch_switch.rs b/stackslib/src/clarity_vm/tests/epoch_switch.rs index af305f1055..25d01c4905 100644 --- a/stackslib/src/clarity_vm/tests/epoch_switch.rs +++ b/stackslib/src/clarity_vm/tests/epoch_switch.rs @@ -130,7 +130,7 @@ fn test_vm_epoch_switch() { // impl BurnStateDB for SortitionHandleConn { - let burndb = db.index_conn(); + let burndb = db.index_handle_at_tip(); test_burnstatedb_epoch(&burndb, start_height, end_height, 8, 12, 16); } diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index a1135989a4..f3d301b0dc 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -322,7 +322,7 @@ impl MemPoolAdmitter { tx_size: u64, ) -> Result<(), MemPoolRejection> { chainstate.will_admit_mempool_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &self.cur_consensus_hash, &self.cur_block, tx, diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index bd441cc023..9a643dd8b3 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -641,7 +641,7 @@ simulating a miner. let result = StacksBlockBuilder::build_anchored_block( &chain_state, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &mut mempool_db, &parent_header, chain_tip.total_burn, @@ -1179,7 +1179,7 @@ simulating a miner. // simulate the p2p refreshing itself // update p2p's read-only view of the unconfirmed state p2p_chainstate - .refresh_unconfirmed_state(&p2p_new_sortition_db.index_conn()) + .refresh_unconfirmed_state(&p2p_new_sortition_db.index_handle_at_tip()) .expect("Failed to open unconfirmed Clarity state"); sleep_ms(100); @@ -1522,7 +1522,7 @@ simulating a miner. let result = StacksBlockBuilder::build_anchored_block( &chain_state, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &mut mempool_db, &parent_header, chain_tip.total_burn, diff --git a/stackslib/src/net/api/callreadonly.rs b/stackslib/src/net/api/callreadonly.rs index eb07206772..dc24de1ae4 100644 --- a/stackslib/src/net/api/callreadonly.rs +++ b/stackslib/src/net/api/callreadonly.rs @@ -234,52 +234,56 @@ impl RPCRequestHandler for RPCCallReadOnlyRequestHandler { cost_limit.write_length = 0; cost_limit.write_count = 0; - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - let epoch = clarity_tx.get_epoch(); - let cost_track = clarity_tx - .with_clarity_db_readonly(|clarity_db| { - LimitedCostTracker::new_mid_block( - mainnet, chain_id, cost_limit, clarity_db, epoch, - ) - }) - .map_err(|_| { - ClarityRuntimeError::from(InterpreterError::CostContractLoadFailure) - })?; - - let clarity_version = clarity_tx - .with_analysis_db_readonly(|analysis_db| { - analysis_db.get_clarity_version(&contract_identifier) - }) - .map_err(|_| { - ClarityRuntimeError::from(CheckErrors::NoSuchContract(format!( - "{}", - &contract_identifier - ))) - })?; - - clarity_tx.with_readonly_clarity_env( - mainnet, - chain_id, - clarity_version, - sender, - sponsor, - cost_track, - |env| { - // we want to execute any function as long as no actual writes are made as - // opposed to be limited to purely calling `define-read-only` functions, - // so use `read_only = false`. This broadens the number of functions that - // can be called, and also circumvents limitations on `define-read-only` - // functions that can not use `contrac-call?`, even when calling other - // read-only functions - env.execute_contract( - &contract_identifier, - function.as_str(), - &args, - false, - ) - }, - ) - }) + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip, + |clarity_tx| { + let epoch = clarity_tx.get_epoch(); + let cost_track = clarity_tx + .with_clarity_db_readonly(|clarity_db| { + LimitedCostTracker::new_mid_block( + mainnet, chain_id, cost_limit, clarity_db, epoch, + ) + }) + .map_err(|_| { + ClarityRuntimeError::from(InterpreterError::CostContractLoadFailure) + })?; + + let clarity_version = clarity_tx + .with_analysis_db_readonly(|analysis_db| { + analysis_db.get_clarity_version(&contract_identifier) + }) + .map_err(|_| { + ClarityRuntimeError::from(CheckErrors::NoSuchContract(format!( + "{}", + &contract_identifier + ))) + })?; + + clarity_tx.with_readonly_clarity_env( + mainnet, + chain_id, + clarity_version, + sender, + sponsor, + cost_track, + |env| { + // we want to execute any function as long as no actual writes are made as + // opposed to be limited to purely calling `define-read-only` functions, + // so use `read_only = false`. This broadens the number of functions that + // can be called, and also circumvents limitations on `define-read-only` + // functions that can not use `contrac-call?`, even when calling other + // read-only functions + env.execute_contract( + &contract_identifier, + function.as_str(), + &args, + false, + ) + }, + ) + }, + ) }); // decode the response diff --git a/stackslib/src/net/api/getaccount.rs b/stackslib/src/net/api/getaccount.rs index 83a39f3031..cbd4338ac6 100644 --- a/stackslib/src/net/api/getaccount.rs +++ b/stackslib/src/net/api/getaccount.rs @@ -146,76 +146,80 @@ impl RPCRequestHandler for RPCGetAccountRequestHandler { let account_opt_res = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let key = ClarityDatabase::make_key_for_account_balance(&account); - let burn_block_height = - clarity_db.get_current_burnchain_block_height().ok()? as u64; - let v1_unlock_height = clarity_db.get_v1_unlock_height(); - let v2_unlock_height = clarity_db.get_v2_unlock_height().ok()?; - let v3_unlock_height = clarity_db.get_v3_unlock_height().ok()?; - let (balance, balance_proof) = if with_proof { - clarity_db - .get_data_with_proof::(&key) - .ok() - .flatten() - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) - .unwrap_or_else(|| (STXBalance::zero(), Some("".into()))) - } else { - clarity_db - .get_data::(&key) - .ok() - .flatten() - .map(|a| (a, None)) - .unwrap_or_else(|| (STXBalance::zero(), None)) - }; + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let key = ClarityDatabase::make_key_for_account_balance(&account); + let burn_block_height = + clarity_db.get_current_burnchain_block_height().ok()? as u64; + let v1_unlock_height = clarity_db.get_v1_unlock_height(); + let v2_unlock_height = clarity_db.get_v2_unlock_height().ok()?; + let v3_unlock_height = clarity_db.get_v3_unlock_height().ok()?; + let (balance, balance_proof) = if with_proof { + clarity_db + .get_data_with_proof::(&key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .unwrap_or_else(|| (STXBalance::zero(), Some("".into()))) + } else { + clarity_db + .get_data::(&key) + .ok() + .flatten() + .map(|a| (a, None)) + .unwrap_or_else(|| (STXBalance::zero(), None)) + }; - let key = ClarityDatabase::make_key_for_account_nonce(&account); - let (nonce, nonce_proof) = if with_proof { - clarity_db - .get_data_with_proof(&key) - .ok() - .flatten() - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) - .unwrap_or_else(|| (0, Some("".into()))) - } else { - clarity_db - .get_data(&key) - .ok() - .flatten() - .map(|a| (a, None)) - .unwrap_or_else(|| (0, None)) - }; + let key = ClarityDatabase::make_key_for_account_nonce(&account); + let (nonce, nonce_proof) = if with_proof { + clarity_db + .get_data_with_proof(&key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .unwrap_or_else(|| (0, Some("".into()))) + } else { + clarity_db + .get_data(&key) + .ok() + .flatten() + .map(|a| (a, None)) + .unwrap_or_else(|| (0, None)) + }; - let unlocked = balance - .get_available_balance_at_burn_block( + let unlocked = balance + .get_available_balance_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + v3_unlock_height, + ) + .ok()?; + + let (locked, unlock_height) = balance.get_locked_balance_at_burn_block( burn_block_height, v1_unlock_height, v2_unlock_height, v3_unlock_height, - ) - .ok()?; - - let (locked, unlock_height) = balance.get_locked_balance_at_burn_block( - burn_block_height, - v1_unlock_height, - v2_unlock_height, - v3_unlock_height, - ); + ); - let balance = format!("0x{}", to_hex(&unlocked.to_be_bytes())); - let locked = format!("0x{}", to_hex(&locked.to_be_bytes())); + let balance = format!("0x{}", to_hex(&unlocked.to_be_bytes())); + let locked = format!("0x{}", to_hex(&locked.to_be_bytes())); - Some(AccountEntryResponse { - balance, - locked, - unlock_height, - nonce, - balance_proof, - nonce_proof, + Some(AccountEntryResponse { + balance, + locked, + unlock_height, + nonce, + balance_proof, + nonce_proof, + }) }) - }) - }) + }, + ) }); let account = if let Ok(Some(account)) = account_opt_res { diff --git a/stackslib/src/net/api/getconstantval.rs b/stackslib/src/net/api/getconstantval.rs index f9b2881ac5..4b3068dd5d 100644 --- a/stackslib/src/net/api/getconstantval.rs +++ b/stackslib/src/net/api/getconstantval.rs @@ -144,20 +144,24 @@ impl RPCRequestHandler for RPCGetConstantValRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let contract = clarity_db.get_contract(&contract_identifier).ok()?; - - let cst = contract - .contract_context - .lookup_variable(constant_name.as_str())? - .serialize_to_hex() - .ok()?; - - let data = format!("0x{cst}"); - Some(ConstantValResponse { data }) - }) - }) + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let contract = clarity_db.get_contract(&contract_identifier).ok()?; + + let cst = contract + .contract_context + .lookup_variable(constant_name.as_str())? + .serialize_to_hex() + .ok()?; + + let data = format!("0x{cst}"); + Some(ConstantValResponse { data }) + }) + }, + ) }); let data_resp = match data_resp { diff --git a/stackslib/src/net/api/getcontractabi.rs b/stackslib/src/net/api/getcontractabi.rs index 7fc38433e7..35914de9e9 100644 --- a/stackslib/src/net/api/getcontractabi.rs +++ b/stackslib/src/net/api/getcontractabi.rs @@ -132,14 +132,18 @@ impl RPCRequestHandler for RPCGetContractAbiRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - let epoch = clarity_tx.get_epoch(); - clarity_tx.with_analysis_db_readonly(|db| { - db.load_contract(&contract_identifier, &epoch) - .ok()? - .map(|contract| contract.contract_interface) - }) - }) + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip, + |clarity_tx| { + let epoch = clarity_tx.get_epoch(); + clarity_tx.with_analysis_db_readonly(|db| { + db.load_contract(&contract_identifier, &epoch) + .ok()? + .map(|contract| contract.contract_interface) + }) + }, + ) }); let data_resp = match data_resp { diff --git a/stackslib/src/net/api/getcontractsrc.rs b/stackslib/src/net/api/getcontractsrc.rs index 32963f5319..1c20bffd1b 100644 --- a/stackslib/src/net/api/getcontractsrc.rs +++ b/stackslib/src/net/api/getcontractsrc.rs @@ -140,30 +140,34 @@ impl RPCRequestHandler for RPCGetContractSrcRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|db| { - let source = db.get_contract_src(&contract_identifier)?; - let contract_commit_key = make_contract_hash_key(&contract_identifier); - let (contract_commit, proof) = if with_proof { - db.get_data_with_proof::(&contract_commit_key) - .ok() - .flatten() - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? - } else { - db.get_data::(&contract_commit_key) - .ok() - .flatten() - .map(|a| (a, None))? - }; - - let publish_height = contract_commit.block_height; - Some(ContractSrcResponse { - source, - publish_height, - marf_proof: proof, + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|db| { + let source = db.get_contract_src(&contract_identifier)?; + let contract_commit_key = make_contract_hash_key(&contract_identifier); + let (contract_commit, proof) = if with_proof { + db.get_data_with_proof::(&contract_commit_key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? + } else { + db.get_data::(&contract_commit_key) + .ok() + .flatten() + .map(|a| (a, None))? + }; + + let publish_height = contract_commit.block_height; + Some(ContractSrcResponse { + source, + publish_height, + marf_proof: proof, + }) }) - }) - }) + }, + ) }); let data_resp = match data_resp { diff --git a/stackslib/src/net/api/getdatavar.rs b/stackslib/src/net/api/getdatavar.rs index f624f3ca58..124fb4856f 100644 --- a/stackslib/src/net/api/getdatavar.rs +++ b/stackslib/src/net/api/getdatavar.rs @@ -154,26 +154,30 @@ impl RPCRequestHandler for RPCGetDataVarRequestHandler { ); let data_opt = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let (value_hex, marf_proof): (String, _) = if with_proof { - clarity_db - .get_data_with_proof(&key) - .ok() - .flatten() - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? - } else { - clarity_db - .get_data(&key) - .ok() - .flatten() - .map(|a| (a, None))? - }; - - let data = format!("0x{}", value_hex); - Some(DataVarResponse { data, marf_proof }) - }) - }) + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let (value_hex, marf_proof): (String, _) = if with_proof { + clarity_db + .get_data_with_proof(&key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? + } else { + clarity_db + .get_data(&key) + .ok() + .flatten() + .map(|a| (a, None))? + }; + + let data = format!("0x{}", value_hex); + Some(DataVarResponse { data, marf_proof }) + }) + }, + ) }); let data_resp = match data_opt { diff --git a/stackslib/src/net/api/getistraitimplemented.rs b/stackslib/src/net/api/getistraitimplemented.rs index 16b1e2fd33..aac4079074 100644 --- a/stackslib/src/net/api/getistraitimplemented.rs +++ b/stackslib/src/net/api/getistraitimplemented.rs @@ -160,34 +160,38 @@ impl RPCRequestHandler for RPCGetIsTraitImplementedRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|db| { - let analysis = db - .load_contract_analysis(&contract_identifier) - .ok() - .flatten()?; - if analysis.implemented_traits.contains(&trait_id) { - Some(GetIsTraitImplementedResponse { - is_implemented: true, - }) - } else { - let trait_defining_contract = db - .load_contract_analysis(&trait_id.contract_identifier) + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|db| { + let analysis = db + .load_contract_analysis(&contract_identifier) .ok() .flatten()?; - let trait_definition = - trait_defining_contract.get_defined_trait(&trait_id.name)?; - let is_implemented = analysis - .check_trait_compliance( - &db.get_clarity_epoch_version().ok()?, - &trait_id, - trait_definition, - ) - .is_ok(); - Some(GetIsTraitImplementedResponse { is_implemented }) - } - }) - }) + if analysis.implemented_traits.contains(&trait_id) { + Some(GetIsTraitImplementedResponse { + is_implemented: true, + }) + } else { + let trait_defining_contract = db + .load_contract_analysis(&trait_id.contract_identifier) + .ok() + .flatten()?; + let trait_definition = + trait_defining_contract.get_defined_trait(&trait_id.name)?; + let is_implemented = analysis + .check_trait_compliance( + &db.get_clarity_epoch_version().ok()?, + &trait_id, + trait_definition, + ) + .is_ok(); + Some(GetIsTraitImplementedResponse { is_implemented }) + } + }) + }, + ) }); let data_resp = match data_resp { diff --git a/stackslib/src/net/api/getmapentry.rs b/stackslib/src/net/api/getmapentry.rs index b5db5af041..5d0cd7504f 100644 --- a/stackslib/src/net/api/getmapentry.rs +++ b/stackslib/src/net/api/getmapentry.rs @@ -183,34 +183,38 @@ impl RPCRequestHandler for RPCGetMapEntryRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let (value_hex, marf_proof): (String, _) = if with_proof { - clarity_db - .get_data_with_proof(&key) - .ok() - .flatten() - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) - .unwrap_or_else(|| { - test_debug!("No value for '{}' in {}", &key, tip); - (none_response, Some("".into())) - }) - } else { - clarity_db - .get_data(&key) - .ok() - .flatten() - .map(|a| (a, None)) - .unwrap_or_else(|| { - test_debug!("No value for '{}' in {}", &key, tip); - (none_response, None) - }) - }; + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let (value_hex, marf_proof): (String, _) = if with_proof { + clarity_db + .get_data_with_proof(&key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .unwrap_or_else(|| { + test_debug!("No value for '{}' in {}", &key, tip); + (none_response, Some("".into())) + }) + } else { + clarity_db + .get_data(&key) + .ok() + .flatten() + .map(|a| (a, None)) + .unwrap_or_else(|| { + test_debug!("No value for '{}' in {}", &key, tip); + (none_response, None) + }) + }; - let data = format!("0x{}", value_hex); - MapEntryResponse { data, marf_proof } - }) - }) + let data = format!("0x{}", value_hex); + MapEntryResponse { data, marf_proof } + }) + }, + ) }); let data_resp = match data_resp { diff --git a/stackslib/src/net/api/getpoxinfo.rs b/stackslib/src/net/api/getpoxinfo.rs index 9e3cd906d4..c3de3ab0da 100644 --- a/stackslib/src/net/api/getpoxinfo.rs +++ b/stackslib/src/net/api/getpoxinfo.rs @@ -190,7 +190,7 @@ impl RPCPoxInfoData { + 1; let data = chainstate - .maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { + .maybe_read_only_clarity_tx(&sortdb.index_handle_at_tip(), tip, |clarity_tx| { clarity_tx.with_readonly_clarity_env( mainnet, chain_id, diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 2669c64356..0423d5c57b 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -36,7 +36,7 @@ use stacks_common::util::retry::BoundReader; use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::Txid; -use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleConn}; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; @@ -206,7 +206,7 @@ impl NakamotoBlockProposal { }); } - let burn_dbconn = sortdb.index_conn(); + let burn_dbconn: SortitionHandleConn = sortdb.index_handle_at_tip(); let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; let mut db_handle = sortdb.index_handle(&sort_tip); let expected_burn_opt = diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index ce67147a9e..5f9dfef7f8 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -454,7 +454,7 @@ impl<'a> TestRPC<'a> { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![tx_coinbase_signed.clone(), tx_contract_signed.clone()], ) .unwrap(); @@ -477,7 +477,7 @@ impl<'a> TestRPC<'a> { let sortdb = peer_1.sortdb.take().unwrap(); Relayer::setup_unconfirmed_state(peer_1.chainstate(), &sortdb).unwrap(); let mblock = { - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut microblock_builder = StacksMicroblockBuilder::new( stacks_block.block_hash(), consensus_hash.clone(), @@ -529,11 +529,11 @@ impl<'a> TestRPC<'a> { let sortdb2 = peer_2.sortdb.take().unwrap(); peer_1 .chainstate() - .reload_unconfirmed_state(&sortdb1.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&sortdb1.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); peer_2 .chainstate() - .reload_unconfirmed_state(&sortdb2.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&sortdb2.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); peer_1.sortdb = Some(sortdb1); peer_2.sortdb = Some(sortdb2); @@ -732,7 +732,7 @@ impl<'a> TestRPC<'a> { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![tx_coinbase_signed.clone()], ) .unwrap(); diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index bd064774c5..6ab6b7a302 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -3531,7 +3531,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3742,7 +3742,7 @@ pub mod test { |mut builder, ref mut miner, ref sortdb| { let (mut miner_chainstate, _) = StacksChainState::open(false, network_id, &chainstate_path, None).unwrap(); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index f853bb795a..c1318e6647 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -5892,7 +5892,7 @@ impl PeerNetwork { return false; } let stacks_epoch = match sortdb - .index_conn() + .index_handle_at_tip() .get_stacks_epoch(burnchain_tip.block_height as u32) { Some(epoch) => epoch, diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 7236ef76e4..6f7ad30057 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1840,7 +1840,7 @@ impl Relayer { &canonical_consensus_hash, &canonical_block_hash ); let processed_unconfirmed_state = - chainstate.reload_unconfirmed_state(&sortdb.index_conn(), canonical_tip)?; + chainstate.reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip)?; Ok(processed_unconfirmed_state) } @@ -4043,15 +4043,19 @@ pub mod test { StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); let cur_nonce = stacks_node .chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_nonce( - &spending_account.origin_address().unwrap().into(), - ) - .unwrap() - }) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &chain_tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_nonce( + &spending_account.origin_address().unwrap().into(), + ) + .unwrap() + }) + }, + ) .unwrap(); test_debug!( @@ -5415,7 +5419,7 @@ pub mod test { let block = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone()], ) .unwrap() @@ -5482,7 +5486,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone(), bad_tx.clone()], ) { @@ -5504,7 +5508,7 @@ pub mod test { let bad_block = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone()], ) .unwrap(); @@ -5521,7 +5525,7 @@ pub mod test { let merkle_tree = MerkleTree::::new(&txid_vecs); bad_block.header.tx_merkle_root = merkle_tree.root(); - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); chainstate .reload_unconfirmed_state(&sort_ic, parent_index_hash.clone()) .unwrap(); @@ -5806,7 +5810,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx], ) .unwrap(); @@ -5985,7 +5989,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx, versioned_contract], ) .unwrap(); @@ -6172,7 +6176,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx], ) .unwrap(); @@ -6212,7 +6216,7 @@ pub mod test { let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); match node.chainstate.will_admit_mempool_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, @@ -6262,7 +6266,7 @@ pub mod test { let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); match node.chainstate.will_admit_mempool_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index f2d8521ae4..de642b98bb 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -493,7 +493,7 @@ impl StackerDBConfig { let cur_epoch = SortitionDB::get_stacks_epoch(sortition_db.conn(), burn_tip.block_height)? .expect("FATAL: no epoch defined"); - let dbconn = sortition_db.index_conn(); + let dbconn = sortition_db.index_handle_at_tip(); // check the target contract let res = chainstate.with_read_only_clarity_tx(&dbconn, &chain_tip_hash, |clarity_tx| { diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index 5e9ea0daf2..bc242dc246 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -624,7 +624,7 @@ fn make_contract_call_transaction( let chain_tip = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); let cur_nonce = chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { + .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &chain_tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { clarity_db .get_account_nonce(&spending_account.origin_address().unwrap().into()) @@ -807,7 +807,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { StacksBlockBuilder::make_anchored_block_and_microblock_from_txs( builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx, stack_tx], vec![mblock_tx], ) @@ -1424,7 +1424,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc let (anchored_block, block_size, block_execution_cost) = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, parent_tip diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index d6edd79963..a7e78bc37d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -410,21 +410,25 @@ impl BlockMinerThread { // Get all nonces for the signers from clarity DB to use to validate transactions let account_nonces = chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &stacks_block_id, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - addresses - .iter() - .map(|address| { - ( - address.clone(), - clarity_db - .get_account_nonce(&address.clone().into()) - .unwrap_or(0), - ) - }) - .collect::>() - }) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &stacks_block_id, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + addresses + .iter() + .map(|address| { + ( + address.clone(), + clarity_db + .get_account_nonce(&address.clone().into()) + .unwrap_or(0), + ) + }) + .collect::>() + }) + }, + ) .unwrap_or_default(); let mut filtered_transactions: HashMap = HashMap::new(); for (_slot, signer_message) in signer_messages { @@ -757,7 +761,7 @@ impl BlockMinerThread { // build the block itself let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, - &burn_db.index_conn(), + &burn_db.index_handle_at_tip(), &mut mem_pool, &parent_block_info.stacks_parent_header, &self.burn_block.consensus_hash, @@ -933,7 +937,7 @@ impl ParentStacksBlockInfo { let principal = miner_address.into(); let account = chain_state .with_read_only_clarity_tx( - &burn_db.index_conn(), + &burn_db.index_handle_at_tip(), &stacks_tip_header.index_block_hash(), |conn| StacksChainState::get_account(conn, &principal), ) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index c4eff65ff0..ac379b86e7 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -753,7 +753,7 @@ impl MicroblockMinerThread { .epoch_id; let mint_result = { - let ic = sortdb.index_conn(); + let ic = sortdb.index_handle_at_tip(); let mut microblock_miner = match StacksMicroblockBuilder::resume_unconfirmed( chainstate, &ic, @@ -2352,7 +2352,7 @@ impl BlockMinerThread { } let (anchored_block, _, _) = match StacksBlockBuilder::build_anchored_block( &chain_state, - &burn_db.index_conn(), + &burn_db.index_handle_at_tip(), &mut mem_pool, &parent_block_info.stacks_parent_header, parent_block_info.parent_block_total_burn, @@ -2382,7 +2382,7 @@ impl BlockMinerThread { // try again match StacksBlockBuilder::build_anchored_block( &chain_state, - &burn_db.index_conn(), + &burn_db.index_handle_at_tip(), &mut mem_pool, &parent_block_info.stacks_parent_header, parent_block_info.parent_block_total_burn, @@ -4047,7 +4047,7 @@ impl ParentStacksBlockInfo { let principal = miner_address.into(); let account = chain_state .with_read_only_clarity_tx( - &burn_db.index_conn(), + &burn_db.index_handle_at_tip(), &StacksBlockHeader::make_index_block_hash(mine_tip_ch, mine_tip_bh), |conn| StacksChainState::get_account(conn, &principal), ) diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 77117a6822..ba5b7e204e 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -891,7 +891,7 @@ impl Node { let mut fee_estimator = self.config.make_fee_estimator(); let stacks_epoch = db - .index_conn() + .index_handle_at_tip() .get_stacks_epoch_by_epoch_id(&processed_block.evaluated_epoch) .expect("Could not find a stacks epoch."); if let Some(estimator) = cost_estimator.as_mut() { diff --git a/testnet/stacks-node/src/run_loop/helium.rs b/testnet/stacks-node/src/run_loop/helium.rs index c7212d4132..4c81867369 100644 --- a/testnet/stacks-node/src/run_loop/helium.rs +++ b/testnet/stacks-node/src/run_loop/helium.rs @@ -89,11 +89,11 @@ impl RunLoop { let _ = burnchain.sortdb_mut(); // Run the tenure, keep the artifacts - let artifacts_from_1st_tenure = match first_tenure.run(&burnchain.sortdb_ref().index_conn()) - { - Some(res) => res, - None => panic!("Error while running 1st tenure"), - }; + let artifacts_from_1st_tenure = + match first_tenure.run(&burnchain.sortdb_ref().index_handle_at_tip()) { + Some(res) => res, + None => panic!("Error while running 1st tenure"), + }; // Tenures are instantiating their own chainstate, so that nodes can keep a clean chainstate, // while having the option of running multiple tenures concurrently and try different strategies. @@ -136,7 +136,7 @@ impl RunLoop { &burnchain_tip, &chain_tip, &mut self.node.chain_state, - &burnchain.sortdb_ref().index_conn(), + &burnchain.sortdb_ref().index_handle_at_tip(), ); // If the node we're looping on won the sortition, initialize and configure the next tenure @@ -160,7 +160,7 @@ impl RunLoop { &chain_tip, &mut tenure, ); - tenure.run(&burnchain.sortdb_ref().index_conn()) + tenure.run(&burnchain.sortdb_ref().index_handle_at_tip()) } None => None, }; @@ -214,7 +214,7 @@ impl RunLoop { &burnchain_tip, &chain_tip, &mut self.node.chain_state, - &burnchain.sortdb_ref().index_conn(), + &burnchain.sortdb_ref().index_handle_at_tip(), ); } }; diff --git a/testnet/stacks-node/src/tenure.rs b/testnet/stacks-node/src/tenure.rs index fd7683f569..5dd67cddab 100644 --- a/testnet/stacks-node/src/tenure.rs +++ b/testnet/stacks-node/src/tenure.rs @@ -5,7 +5,7 @@ use std::time::{Duration, Instant}; use stacks::burnchains::PoxConstants; #[cfg(test)] use stacks::chainstate::burn::db::sortdb::SortitionDB; -use stacks::chainstate::burn::db::sortdb::SortitionDBConn; +use stacks::chainstate::burn::db::sortdb::SortitionHandleConn; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::BlockBuilderSettings; use stacks::chainstate::stacks::{ @@ -72,7 +72,7 @@ impl<'a> Tenure { } } - pub fn run(&mut self, burn_dbconn: &SortitionDBConn) -> Option { + pub fn run(&mut self, burn_dbconn: &SortitionHandleConn) -> Option { info!("Node starting new tenure with VRF {:?}", self.vrf_seed); let duration_left: u128 = self.config.burnchain.commit_anchor_block_within as u128; diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 14db80f0b1..947eb633ee 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -5143,7 +5143,7 @@ fn test_v1_unlock_height_with_current_stackers() { let sortdb = btc_regtest_controller.sortdb_mut(); for height in 211..tip_info.burn_block_height { - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, @@ -5423,7 +5423,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { let sortdb = btc_regtest_controller.sortdb_mut(); for height in 211..tip_info.burn_block_height { - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 4e387d6304..f3c48adc86 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -400,7 +400,7 @@ fn disable_pox() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, @@ -1069,7 +1069,7 @@ fn pox_2_unlock_all() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 2cc9868dc6..2394e93621 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -493,7 +493,7 @@ fn fix_to_pox_contract() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, @@ -1213,7 +1213,7 @@ fn verify_auto_unlock_behavior() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 7804781c11..c8fc6c80f3 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1915,7 +1915,7 @@ fn block_proposal_api_endpoint() { ) .expect("Failed to build Nakamoto block"); - let burn_dbconn = btc_regtest_controller.sortdb_ref().index_conn(); + let burn_dbconn = btc_regtest_controller.sortdb_ref().index_handle_at_tip(); let mut miner_tenure_info = builder .load_tenure_info(&mut chainstate, &burn_dbconn, tenure_cause) .unwrap(); @@ -3582,8 +3582,8 @@ fn check_block_heights() { "get-heights", vec![], ); - let heights0 = heights0_value.expect_tuple().unwrap(); - info!("Heights from pre-epoch 3.0: {}", heights0); + let preheights = heights0_value.expect_tuple().unwrap(); + info!("Heights from pre-epoch 3.0: {}", preheights); // first block wakes up the run loop, wait until a key registration has been submitted. next_block_and(&mut btc_regtest_controller, 60, || { @@ -3601,7 +3601,7 @@ fn check_block_heights() { let info = get_chain_info_result(&naka_conf).unwrap(); println!("Chain info: {:?}", info); - let mut last_burn_block_height = info.burn_block_height as u128; + let mut last_burn_block_height; let mut last_stacks_block_height = info.stacks_tip_height as u128; let mut last_tenure_height = last_stacks_block_height as u128; @@ -3615,14 +3615,8 @@ fn check_block_heights() { let heights0 = heights0_value.expect_tuple().unwrap(); info!("Heights from epoch 3.0 start: {}", heights0); assert_eq!( - heights0 - .get("burn-block-height") - .unwrap() - .clone() - .expect_u128() - .unwrap() - + 3, - last_burn_block_height, + heights0.get("burn-block-height"), + preheights.get("burn-block-height"), "Burn block height should match" ); assert_eq!( diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 5c683f66fe..aecc0f9c12 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -3463,12 +3463,15 @@ fn microblock_fork_poison_integration_test() { .unwrap(); chainstate - .reload_unconfirmed_state(&btc_regtest_controller.sortdb_ref().index_conn(), tip_hash) + .reload_unconfirmed_state( + &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + tip_hash, + ) .unwrap(); let first_microblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller.sortdb_ref().index_conn(), + &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), consensus_hash, stacks_block.clone(), vec![unconfirmed_tx], @@ -3718,12 +3721,15 @@ fn microblock_integration_test() { .unwrap(); chainstate - .reload_unconfirmed_state(&btc_regtest_controller.sortdb_ref().index_conn(), tip_hash) + .reload_unconfirmed_state( + &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + tip_hash, + ) .unwrap(); let first_microblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller.sortdb_ref().index_conn(), + &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), consensus_hash, stacks_block.clone(), vec![unconfirmed_tx], @@ -9134,7 +9140,10 @@ fn use_latest_tip_integration_test() { // Initialize the unconfirmed state. chainstate - .reload_unconfirmed_state(&btc_regtest_controller.sortdb_ref().index_conn(), tip_hash) + .reload_unconfirmed_state( + &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + tip_hash, + ) .unwrap(); // Make microblock with two transactions. @@ -9157,7 +9166,7 @@ fn use_latest_tip_integration_test() { let mblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller.sortdb_ref().index_conn(), + &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), consensus_hash, stacks_block.clone(), vec_tx, From efdd01cdb96c8cf9fcd47c6fc221069da6b45fdd Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Tue, 21 May 2024 17:15:47 +0300 Subject: [PATCH 024/148] Add untilBurnHt check inside `RevokeDelegateStxCommand` --- .../tests/pox-4/pox_RevokeDelegateStxCommand.ts | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts index 1c30e3d569..17b0e3e3c1 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts @@ -15,6 +15,7 @@ import { Cl, someCV, tupleCV } from "@stacks/transactions"; * * Constraints for running this command include: * - The `Stacker` has to currently be delegating. + * - The `Stacker`'s delegation must not be expired */ export class RevokeDelegateStxCommand implements PoxCommand { readonly wallet: Wallet; @@ -31,10 +32,12 @@ export class RevokeDelegateStxCommand implements PoxCommand { check(model: Readonly): boolean { // Constraints for running this command include: // - The Stacker has to currently be delegating. - + // - The Stacker's delegation must not be expired + const stacker = model.stackers.get(this.wallet.stxAddress)!; return ( model.stackingMinimum > 0 && - model.stackers.get(this.wallet.stxAddress)!.hasDelegated === true + stacker.hasDelegated === true && + stacker.delegatedUntilBurnHt > model.burnBlockHeight ); } From 194ad768d27be17bdd066677131fab528bc016cc Mon Sep 17 00:00:00 2001 From: BowTiedRadone <92028479+BowTiedRadone@users.noreply.github.com> Date: Tue, 21 May 2024 18:14:54 +0300 Subject: [PATCH 025/148] Update docs according to suggestion Co-authored-by: Nikos Baxevanis --- .../tests/pox-4/pox_RevokeDelegateStxCommand.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts index 17b0e3e3c1..ca70eb7f0f 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts @@ -15,7 +15,7 @@ import { Cl, someCV, tupleCV } from "@stacks/transactions"; * * Constraints for running this command include: * - The `Stacker` has to currently be delegating. - * - The `Stacker`'s delegation must not be expired + * - The `Stacker`'s delegation must not be expired. */ export class RevokeDelegateStxCommand implements PoxCommand { readonly wallet: Wallet; From ca5a85301622f8cab833a4cce4a42e8625c284ee Mon Sep 17 00:00:00 2001 From: BowTiedRadone <92028479+BowTiedRadone@users.noreply.github.com> Date: Tue, 21 May 2024 18:15:09 +0300 Subject: [PATCH 026/148] Update comment according to suggestion Co-authored-by: Nikos Baxevanis --- .../tests/pox-4/pox_RevokeDelegateStxCommand.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts index ca70eb7f0f..54e4806757 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts @@ -32,7 +32,7 @@ export class RevokeDelegateStxCommand implements PoxCommand { check(model: Readonly): boolean { // Constraints for running this command include: // - The Stacker has to currently be delegating. - // - The Stacker's delegation must not be expired + // - The Stacker's delegation must not be expired. const stacker = model.stackers.get(this.wallet.stxAddress)!; return ( model.stackingMinimum > 0 && From f72ecc8c30ffafb457d41fb1ffac0399253ae4a7 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 21 May 2024 11:06:58 -0700 Subject: [PATCH 027/148] feat: update metrics in v0 signer, add tests --- stacks-signer/src/monitoring/server.rs | 7 ++++++- stacks-signer/src/v0/signer.rs | 13 ++++++++++--- testnet/stacks-node/Cargo.toml | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 17 +++++++++++++++++ 4 files changed, 34 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/monitoring/server.rs b/stacks-signer/src/monitoring/server.rs index 9cecd41ed7..ffde008c9f 100644 --- a/stacks-signer/src/monitoring/server.rs +++ b/stacks-signer/src/monitoring/server.rs @@ -95,7 +95,12 @@ impl MonitoringServer { public_key, format!("http://{}", config.node_host), ); - server.update_metrics()?; + if let Err(e) = server.update_metrics() { + warn!( + "Monitoring: Error updating metrics when starting server: {:?}", + e + ); + }; server.main_loop() } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 682c1433c1..e5471053ae 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -239,6 +239,7 @@ impl Signer { "block_id" => %block_proposal.block.block_id(), ); let block_info = BlockInfo::from(block_proposal.clone()); + crate::monitoring::increment_block_proposals_received(); stacks_client .submit_block_for_validation(block_info.block.clone()) .unwrap_or_else(|e| { @@ -311,11 +312,17 @@ impl Signer { }; // Submit a proposal response to the .signers contract for miners debug!("{self}: Broadcasting a block response to stacks node: {response:?}"); - if let Err(e) = self + match self .stackerdb - .send_message_with_retry::(response.into()) + .send_message_with_retry::(response.clone().into()) { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + Ok(_) => { + let accepted = matches!(response, BlockResponse::Accepted(..)); + crate::monitoring::increment_block_responses_sent(accepted); + } + Err(e) => { + warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + } } self.signer_db .insert_block(&block_info) diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index bceb484cd7..42f4f858b7 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -62,7 +62,7 @@ name = "stacks-events" path = "src/stacks_events.rs" [features] -monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom"] +monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom", "stacks-signer/monitoring_prom"] slog_json = ["stacks/slog_json", "stacks-common/slog_json", "clarity/slog_json"] prod-genesis-chainstate = [] default = [] diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 55115c5f18..85b971a426 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -295,4 +295,21 @@ fn miner_gather_signatures() { valid }); assert!(all_signed); + + // Test prometheus metrics response + #[cfg(feature = "monitoring_prom")] + { + let metrics_response = signer_test.get_signer_metrics(); + + // Because 5 signers are running in the same process, the prometheus metrics + // are incremented once for every signer. This is why we expect the metric to be + // `5`, even though there is only one block proposed. + let expected_result = format!("stacks_signer_block_proposals_received {}", num_signers); + assert!(metrics_response.contains(&expected_result)); + let expected_result = format!( + "stacks_signer_block_responses_sent{{response_type=\"accepted\"}} {}", + num_signers + ); + assert!(metrics_response.contains(&expected_result)); + } } From 690b9bcbbfa573d50702bb549176427d4c8255c4 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 21 May 2024 21:27:54 +0300 Subject: [PATCH 028/148] fix cause typo, add checker strict and drop table --- stackslib/src/chainstate/nakamoto/tenure.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index c9e5c0cf59..078e2514bf 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -119,6 +119,9 @@ use crate::util_lib::db::{ }; pub static NAKAMOTO_TENURES_SCHEMA: &'static str = r#" + -- Drop the existing table if it exists + DROP TABLE IF EXISTS nakamoto_tenures; + CREATE TABLE nakamoto_tenures ( -- consensus hash of start-tenure block (i.e. the consensus hash of the sortition in which the miner's block-commit -- was mined) @@ -129,7 +132,7 @@ pub static NAKAMOTO_TENURES_SCHEMA: &'static str = r#" burn_view_consensus_hash TEXT NOT NULL, -- whether or not this tenure was triggered by a sortition (as opposed to a tenure-extension). -- this is equal to the `cause` field in a TenureChange - cause INETGER NOT NULL, + cause INTEGER NOT NULL, -- block hash of start-tenure block block_hash TEXT NOT NULL, -- block ID of this start block (this is the StacksBlockId of the above tenure_id_consensus_hash and block_hash) @@ -144,9 +147,10 @@ pub static NAKAMOTO_TENURES_SCHEMA: &'static str = r#" num_blocks_confirmed INTEGER NOT NULL, -- this is the ith tenure transaction in its respective Nakamoto chain history. tenure_index INTEGER NOT NULL, - + -- schema version field + schema_version INTEGER NOT NULL DEFAULT 1, PRIMARY KEY(burn_view_consensus_hash,tenure_index) - ); + ) STRICT; CREATE INDEX nakamoto_tenures_by_block_id ON nakamoto_tenures(block_id); CREATE INDEX nakamoto_tenures_by_tenure_id ON nakamoto_tenures(tenure_id_consensus_hash); CREATE INDEX nakamoto_tenures_by_block_and_consensus_hashes ON nakamoto_tenures(tenure_id_consensus_hash,block_hash); From aa9baf977c5edfa6b28e47d7f9896e1f3a84c2ef Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 21 May 2024 11:57:18 -0700 Subject: [PATCH 029/148] fix: move reward_set loading to top-level BlockMinerThread --- .../stacks-node/src/nakamoto_node/miner.rs | 57 +++++++++---------- 1 file changed, 27 insertions(+), 30 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 29631cdec0..09a70b1178 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -155,6 +155,27 @@ impl BlockMinerThread { let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) .expect("FATAL: failed to connect to stacker DB"); + let sort_db = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) + .expect("FATAL: could not open sortition DB"); + + let tip = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + &self.burn_block.consensus_hash, + ) + .expect("FATAL: could not retrieve chain tip") + .expect("FATAL: could not retrieve chain tip"); + + let reward_set = sort_db + .get_preprocessed_reward_set_of(&tip.sortition_id) + .expect("FATAL: Error fetching reward set") + .expect("FATAL: No reward set found for miner") + .known_selected_anchor_block_owned() + .expect("FATAL: No reward set found for miner"); + let mut attempts = 0; // now, actually run this tenure loop { @@ -182,11 +203,12 @@ impl BlockMinerThread { }; if let Some(mut new_block) = new_block { - let (reward_set, signer_signature) = match self.gather_signatures( + let signer_signature = match self.gather_signatures( &mut new_block, self.burn_block.block_height, &mut stackerdbs, &mut attempts, + &reward_set, ) { Ok(x) => x, Err(e) => { @@ -198,7 +220,7 @@ impl BlockMinerThread { }; new_block.header.signer_signature = signer_signature; - if let Err(e) = self.broadcast(new_block.clone(), None, reward_set) { + if let Err(e) = self.broadcast(new_block.clone(), None, reward_set.clone()) { warn!("Error accepting own block: {e:?}. Will try mining again."); continue; } else { @@ -221,12 +243,6 @@ impl BlockMinerThread { self.mined_blocks.push(new_block); } - let sort_db = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) - .expect("FATAL: could not open sortition DB"); let wait_start = Instant::now(); while wait_start.elapsed() < self.config.miner.wait_on_interim_blocks { thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); @@ -342,7 +358,8 @@ impl BlockMinerThread { burn_block_height: u64, stackerdbs: &mut StackerDBs, attempts: &mut u64, - ) -> Result<(RewardSet, Vec), NakamotoNodeError> { + reward_set: &RewardSet, + ) -> Result, NakamotoNodeError> { let Some(miner_privkey) = self.config.miner.mining_key else { return Err(NakamotoNodeError::MinerConfigurationFailed( "No mining key configured, cannot mine", @@ -370,26 +387,6 @@ impl BlockMinerThread { ) .expect("FATAL: building on a burn block that is before the first burn block"); - let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { - Ok(Some(x)) => x, - Ok(None) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "No reward set found. Cannot initialize miner coordinator.".into(), - )); - } - Err(e) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" - ))); - } - }; - - let Some(reward_set) = reward_info.known_selected_anchor_block_owned() else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Current reward cycle did not select a reward set. Cannot mine!".into(), - )); - }; - let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); let mut coordinator = SignCoordinator::new( &reward_set, @@ -417,7 +414,7 @@ impl BlockMinerThread { &self.globals.counters, )?; - return Ok((reward_set, signature)); + return Ok(signature); } fn get_stackerdb_contract_and_slots( From 6cd331a44071b592c32d8bc91150dc2744e4274a Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 22 May 2024 20:11:01 +0300 Subject: [PATCH 030/148] migrate db to schema_2, fix typo and remove unused import --- stackslib/src/chainstate/nakamoto/mod.rs | 8 +++- .../src/chainstate/nakamoto/signer_set.rs | 1 - stackslib/src/chainstate/nakamoto/tenure.rs | 44 +++++++++++++++++-- stackslib/src/chainstate/stacks/db/mod.rs | 11 ++++- 4 files changed, 55 insertions(+), 9 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 2cdf93eef5..bcc03cdeca 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -76,7 +76,7 @@ use crate::chainstate::burn::operations::{LeaderBlockCommitOp, LeaderKeyRegister use crate::chainstate::burn::{BlockSnapshot, SortitionHash}; use crate::chainstate::coordinator::{BlockEventDispatcher, Error}; use crate::chainstate::nakamoto::signer_set::NakamotoSigners; -use crate::chainstate::nakamoto::tenure::NAKAMOTO_TENURES_SCHEMA; +use crate::chainstate::nakamoto::tenure::{NAKAMOTO_TENURES_SCHEMA_1, NAKAMOTO_TENURES_SCHEMA_2}; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::{POX_4_NAME, SIGNERS_UPDATE_STATE}; use crate::chainstate::stacks::db::{DBConfig as ChainstateConfig, StacksChainState}; @@ -144,7 +144,7 @@ lazy_static! { reward_set TEXT NOT NULL, PRIMARY KEY (index_block_hash) );"#.into(), - NAKAMOTO_TENURES_SCHEMA.into(), + NAKAMOTO_TENURES_SCHEMA_1.into(), r#" -- Table for Nakamoto block headers CREATE TABLE nakamoto_block_headers ( @@ -216,6 +216,10 @@ lazy_static! { UPDATE db_config SET version = "4"; "#.into(), ]; + + pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_2: Vec = vec![ + NAKAMOTO_TENURES_SCHEMA_2.into() + ]; } /// Matured miner reward schedules diff --git a/stackslib/src/chainstate/nakamoto/signer_set.rs b/stackslib/src/chainstate/nakamoto/signer_set.rs index e776ca41db..a7e8df6ed0 100644 --- a/stackslib/src/chainstate/nakamoto/signer_set.rs +++ b/stackslib/src/chainstate/nakamoto/signer_set.rs @@ -58,7 +58,6 @@ use crate::chainstate::burn::operations::{ }; use crate::chainstate::burn::{BlockSnapshot, SortitionHash}; use crate::chainstate::coordinator::{BlockEventDispatcher, Error}; -use crate::chainstate::nakamoto::tenure::NAKAMOTO_TENURES_SCHEMA; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::{ PoxVersions, RawRewardSetEntry, RewardSet, BOOT_TEST_POX_4_AGG_KEY_CONTRACT, diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 078e2514bf..fde669760d 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -118,8 +118,45 @@ use crate::util_lib::db::{ FromRow, }; -pub static NAKAMOTO_TENURES_SCHEMA: &'static str = r#" - -- Drop the existing table if it exists +pub static NAKAMOTO_TENURES_SCHEMA_1: &'static str = r#" + CREATE TABLE nakamoto_tenures ( + -- consensus hash of start-tenure block (i.e. the consensus hash of the sortition in which the miner's block-commit + -- was mined) + tenure_id_consensus_hash TEXT NOT NULL, + -- consensus hash of the previous tenure's start-tenure block + prev_tenure_id_consensus_hash TEXT NOT NULL, + -- consensus hash of the last-processed sortition + burn_view_consensus_hash TEXT NOT NULL, + -- whether or not this tenure was triggered by a sortition (as opposed to a tenure-extension). + -- this is equal to the `cause` field in a TenureChange + cause INETGER NOT NULL, + -- block hash of start-tenure block + block_hash TEXT NOT NULL, + -- block ID of this start block (this is the StacksBlockId of the above tenure_id_consensus_hash and block_hash) + block_id TEXT NOT NULL, + -- this field is the total number of _sortition-induced_ tenures in the chain history (including this tenure), + -- as of the _end_ of this block. A tenure can contain multiple TenureChanges; if so, then this + -- is the height of the _sortition-induced_ TenureChange that created it. + coinbase_height INTEGER NOT NULL, + -- number of blocks this tenure. + -- * for tenure-changes induced by sortitions, this is the number of blocks in the previous tenure + -- * for tenure-changes induced by extension, this is the number of blocks in the current tenure so far. + num_blocks_confirmed INTEGER NOT NULL, + -- this is the ith tenure transaction in its respective Nakamoto chain history. + tenure_index INTEGER NOT NULL, + + PRIMARY KEY(burn_view_consensus_hash,tenure_index) + ); + CREATE INDEX nakamoto_tenures_by_block_id ON nakamoto_tenures(block_id); + CREATE INDEX nakamoto_tenures_by_tenure_id ON nakamoto_tenures(tenure_id_consensus_hash); + CREATE INDEX nakamoto_tenures_by_block_and_consensus_hashes ON nakamoto_tenures(tenure_id_consensus_hash,block_hash); + CREATE INDEX nakamoto_tenures_by_burn_view_consensus_hash ON nakamoto_tenures(burn_view_consensus_hash); + CREATE INDEX nakamoto_tenures_by_tenure_index ON nakamoto_tenures(tenure_index); + CREATE INDEX nakamoto_tenures_by_parent ON nakamoto_tenures(tenure_id_consensus_hash,prev_tenure_id_consensus_hash); +"#; + +pub static NAKAMOTO_TENURES_SCHEMA_2: &'static str = r#" + -- Drop the nakamoto_tenures table if it exists DROP TABLE IF EXISTS nakamoto_tenures; CREATE TABLE nakamoto_tenures ( @@ -147,8 +184,7 @@ pub static NAKAMOTO_TENURES_SCHEMA: &'static str = r#" num_blocks_confirmed INTEGER NOT NULL, -- this is the ith tenure transaction in its respective Nakamoto chain history. tenure_index INTEGER NOT NULL, - -- schema version field - schema_version INTEGER NOT NULL DEFAULT 1, + PRIMARY KEY(burn_view_consensus_hash,tenure_index) ) STRICT; CREATE INDEX nakamoto_tenures_by_block_id ON nakamoto_tenures(block_id); diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 374fc11ae1..f10a87dccc 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -53,7 +53,7 @@ use crate::chainstate::burn::operations::{ use crate::chainstate::burn::{ConsensusHash, ConsensusHashExtensions}; use crate::chainstate::nakamoto::{ HeaderTypeNames, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, - NakamotoStagingBlocksConn, NAKAMOTO_CHAINSTATE_SCHEMA_1, + NakamotoStagingBlocksConn, NAKAMOTO_CHAINSTATE_SCHEMA_1, NAKAMOTO_CHAINSTATE_SCHEMA_2, }; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::boot::*; @@ -668,7 +668,7 @@ impl<'a> DerefMut for ChainstateTx<'a> { } } -pub const CHAINSTATE_VERSION: &'static str = "4"; +pub const CHAINSTATE_VERSION: &'static str = "5"; const CHAINSTATE_INITIAL_SCHEMA: &'static [&'static str] = &[ "PRAGMA foreign_keys = ON;", @@ -1079,6 +1079,13 @@ impl StacksChainState { tx.execute_batch(cmd)?; } } + "4" => { + // migrate to nakamoto 2 + info!("Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo"); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_2.iter() { + tx.execute_batch(cmd)?; + } + } _ => { error!( "Invalid chain state database: expected version = {}, got {}", From 2416c4b78858ae7e43feb1a80c9439e194f769d0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 15:39:03 -0400 Subject: [PATCH 031/148] feat: better API for querying preprocessed reward sets (e.g. by reward cycle ID, by sortition ID, etc.) --- stackslib/src/chainstate/burn/db/sortdb.rs | 190 +++++++++++++++++---- 1 file changed, 155 insertions(+), 35 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index e3802d6ec1..eee027b72b 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -83,7 +83,6 @@ use crate::core::{ FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, STACKS_EPOCH_MAX, }; use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; -use crate::net::Error as NetError; use crate::util_lib::db::{ db_mkdirs, get_ancestor_block_hash, opt_u64_to_sql, query_count, query_row, query_row_columns, query_row_panic, query_rows, sql_pragma, table_exists, tx_begin_immediate, tx_busy_handler, @@ -3542,20 +3541,104 @@ impl SortitionDB { sortition_id: &SortitionId, rc_info: &RewardCycleInfo, ) -> Result<(), db_error> { - let sql = "INSERT INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; + let sql = "REPLACE INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; let rc_json = serde_json::to_string(rc_info).map_err(db_error::SerializationError)?; let args: &[&dyn ToSql] = &[sortition_id, &rc_json]; sort_tx.execute(sql, args)?; Ok(()) } + /// Get the prepare phase start sortition ID of a reward cycle + fn inner_get_prepare_phase_start_sortition_id_for_reward_cycle( + index_conn: &SortitionDBConn, + pox_constants: &PoxConstants, + first_block_height: u64, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result { + let prepare_phase_start = pox_constants + .reward_cycle_to_block_height(first_block_height, reward_cycle_id) + .saturating_sub(pox_constants.prepare_length.into()); + + let first_sortition = get_ancestor_sort_id(index_conn, prepare_phase_start, tip)? + .ok_or_else(|| { + error!( + "Could not find prepare phase start ancestor while fetching reward set"; + "tip_sortition_id" => %tip, + "reward_cycle_id" => reward_cycle_id, + "prepare_phase_start_height" => prepare_phase_start + ); + db_error::NotFoundError + })?; + Ok(first_sortition) + } + + pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( + &self, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result { + Self::inner_get_prepare_phase_start_sortition_id_for_reward_cycle( + &self.index_conn(), + &self.pox_constants, + self.first_block_height, + tip, + reward_cycle_id, + ) + } + + /// Get the reward set for a reward cycle, given the reward cycle tip. + /// Return the reward cycle info for this reward cycle + fn inner_get_preprocessed_reward_set_for_reward_cycle( + index_conn: &SortitionDBConn, + pox_constants: &PoxConstants, + first_block_height: u64, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result<(RewardCycleInfo, SortitionId), db_error> { + let first_sortition = Self::inner_get_prepare_phase_start_sortition_id_for_reward_cycle( + index_conn, + pox_constants, + first_block_height, + tip, + reward_cycle_id, + )?; + info!("Fetching preprocessed reward set"; + "tip_sortition_id" => %tip, + "reward_cycle_id" => reward_cycle_id, + "prepare_phase_start_sortition_id" => %first_sortition, + ); + + Ok(( + Self::get_preprocessed_reward_set(index_conn, &first_sortition)? + .ok_or(db_error::NotFoundError)?, + first_sortition, + )) + } + + pub fn get_preprocessed_reward_set_for_reward_cycle( + &self, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result<(RewardCycleInfo, SortitionId), db_error> { + Self::inner_get_preprocessed_reward_set_for_reward_cycle( + &self.index_conn(), + &self.pox_constants, + self.first_block_height, + tip, + reward_cycle_id, + ) + } + /// Figure out the reward cycle for `tip` and lookup the preprocessed /// reward set (if it exists) for the active reward cycle during `tip` - pub fn get_preprocessed_reward_set_of( - &self, + fn inner_get_preprocessed_reward_set_of( + index_conn: &SortitionDBConn, + pox_constants: &PoxConstants, + first_block_height: u64, tip: &SortitionId, - ) -> Result, db_error> { - let tip_sn = SortitionDB::get_block_snapshot(self.conn(), tip)?.ok_or_else(|| { + ) -> Result { + let tip_sn = SortitionDB::get_block_snapshot(index_conn, tip)?.ok_or_else(|| { error!( "Could not find snapshot for sortition while fetching reward set"; "tip_sortition_id" => %tip, @@ -3563,38 +3646,30 @@ impl SortitionDB { db_error::NotFoundError })?; - let reward_cycle_id = self - .pox_constants - .block_height_to_reward_cycle(self.first_block_height, tip_sn.block_height) + let reward_cycle_id = pox_constants + .block_height_to_reward_cycle(first_block_height, tip_sn.block_height) .expect("FATAL: stored snapshot with block height < first_block_height"); - let prepare_phase_start = self - .pox_constants - .reward_cycle_to_block_height(self.first_block_height, reward_cycle_id) - .saturating_sub(self.pox_constants.prepare_length.into()); + Self::inner_get_preprocessed_reward_set_for_reward_cycle( + index_conn, + pox_constants, + first_block_height, + tip, + reward_cycle_id, + ) + .and_then(|(reward_cycle_info, _anchor_sortition_id)| Ok(reward_cycle_info)) + } - let first_sortition = get_ancestor_sort_id( + pub fn get_preprocessed_reward_set_of( + &self, + tip: &SortitionId, + ) -> Result { + Ok(Self::inner_get_preprocessed_reward_set_of( &self.index_conn(), - prepare_phase_start, - &tip_sn.sortition_id, - )? - .ok_or_else(|| { - error!( - "Could not find prepare phase start ancestor while fetching reward set"; - "tip_sortition_id" => %tip, - "reward_cycle_id" => reward_cycle_id, - "prepare_phase_start_height" => prepare_phase_start - ); - db_error::NotFoundError - })?; - - info!("Fetching preprocessed reward set"; - "tip_sortition_id" => %tip, - "reward_cycle_id" => reward_cycle_id, - "prepare_phase_start_sortition_id" => %first_sortition, - ); - - Self::get_preprocessed_reward_set(self.conn(), &first_sortition) + &self.pox_constants, + self.first_block_height, + tip, + )?) } /// Get a pre-processed reawrd set. @@ -3617,7 +3692,7 @@ impl SortitionDB { } pub fn get_preprocessed_reward_set_size(&self, tip: &SortitionId) -> Option { - let Ok(Some(reward_info)) = &self.get_preprocessed_reward_set_of(&tip) else { + let Ok(reward_info) = &self.get_preprocessed_reward_set_of(&tip) else { return None; }; let Some(reward_set) = reward_info.known_selected_anchor_block() else { @@ -3842,6 +3917,46 @@ impl<'a> SortitionDBConn<'a> { serde_json::from_str(&pox_addrs_json).expect("FATAL: failed to decode pox payout JSON"); Ok(pox_addrs) } + + pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( + &self, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result { + SortitionDB::inner_get_prepare_phase_start_sortition_id_for_reward_cycle( + self, + &self.context.pox_constants, + self.context.first_block_height, + tip, + reward_cycle_id, + ) + } + + pub fn get_preprocessed_reward_set_for_reward_cycle( + &self, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result<(RewardCycleInfo, SortitionId), db_error> { + SortitionDB::inner_get_preprocessed_reward_set_for_reward_cycle( + self, + &self.context.pox_constants, + self.context.first_block_height, + tip, + reward_cycle_id, + ) + } + + pub fn get_preprocessed_reward_set_of( + &self, + tip: &SortitionId, + ) -> Result { + SortitionDB::inner_get_preprocessed_reward_set_of( + self, + &self.context.pox_constants, + self.context.first_block_height, + tip, + ) + } } // High-level functions used by ChainsCoordinator @@ -4559,12 +4674,14 @@ impl SortitionDB { Ok(ret) } + /// DO NOT CALL FROM CONSENSUS CODE pub fn index_handle_at_tip<'a>(&'a self) -> SortitionHandleConn<'a> { let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap(); self.index_handle(&sortition_id) } /// Open a tx handle at the burn chain tip + /// DO NOT CALL FROM CONSENSUS CODE pub fn tx_begin_at_tip<'a>(&'a mut self) -> SortitionHandleTx<'a> { let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap(); self.tx_handle_begin(&sortition_id).unwrap() @@ -4574,6 +4691,7 @@ impl SortitionDB { /// Returns Ok(Some(tip info)) on success /// Returns Ok(None) if there are no Nakamoto blocks in this tip /// Returns Err(..) on other DB error + /// DO NOT CALL FROM CONSENSUS CODE pub fn get_canonical_nakamoto_tip_hash_and_height( conn: &Connection, tip: &BlockSnapshot, @@ -4598,6 +4716,7 @@ impl SortitionDB { } /// Get the canonical Stacks chain tip -- this gets memoized on the canonical burn chain tip. + /// DO NOT CALL FROM CONSENSUS CODE pub fn get_canonical_stacks_chain_tip_hash_and_height( conn: &Connection, ) -> Result<(ConsensusHash, BlockHeaderHash, u64), db_error> { @@ -4625,6 +4744,7 @@ impl SortitionDB { } /// Get the canonical Stacks chain tip -- this gets memoized on the canonical burn chain tip. + /// DO NOT CALL FROM CONSENSUS CODE pub fn get_canonical_stacks_chain_tip_hash( conn: &Connection, ) -> Result<(ConsensusHash, BlockHeaderHash), db_error> { From 753f9c97b7f605c76bb2517ffa59f2bf237ef45c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 15:39:38 -0400 Subject: [PATCH 032/148] chore: fix 4813 by re-trying to store a reward set if it has 'selected-and-unknown' status --- stackslib/src/chainstate/coordinator/mod.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 96eae44641..4c201bfb2c 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -120,7 +120,7 @@ impl NewBurnchainBlockStatus { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct RewardCycleInfo { pub reward_cycle: u64, pub anchor_status: PoxAnchorBlockStatus, @@ -845,9 +845,21 @@ pub fn get_reward_cycle_info( .expect("FATAL: no start-of-prepare-phase sortition"); let mut tx = sort_db.tx_begin()?; - if SortitionDB::get_preprocessed_reward_set(&mut tx, &first_prepare_sn.sortition_id)? - .is_none() - { + let preprocessed_reward_set = + SortitionDB::get_preprocessed_reward_set(&mut tx, &first_prepare_sn.sortition_id)?; + let need_to_store = if let Some(reward_cycle_info) = preprocessed_reward_set { + // overwrite if we have an unknown anchor block + !reward_cycle_info.is_reward_info_known() + } else { + true + }; + if need_to_store { + test_debug!( + "Store preprocessed reward set for cycle {} (prepare start sortition {}): {:?}", + prev_reward_cycle, + &first_prepare_sn.sortition_id, + &reward_cycle_info + ); SortitionDB::store_preprocessed_reward_set( &mut tx, &first_prepare_sn.sortition_id, From ad1094bd0b377e1e69ddcefb24f880dd0024ccdd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 15:40:24 -0400 Subject: [PATCH 033/148] chore: blow away aggregate key verification code (it won't be used until a later date, and it's in the git history so we can fetch it later) --- stackslib/src/chainstate/nakamoto/mod.rs | 104 +---------------------- 1 file changed, 1 insertion(+), 103 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index d8e67b3bd4..21e2022f95 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -496,16 +496,6 @@ impl NakamotoBlockHeader { Ok(()) } - /// Verify the block header against an aggregate public key - pub fn verify_threshold_signer( - &self, - signer_aggregate: &Point, - signature: &ThresholdSignature, - ) -> bool { - let message = self.signer_signature_hash().0; - signature.verify(signer_aggregate, &message) - } - /// Verify the block header against the list of signer signatures /// /// Validate against: @@ -1799,7 +1789,6 @@ impl NakamotoChainState { db_handle: &mut SortitionHandleConn, staging_db_tx: &NakamotoStagingBlocksTx, headers_conn: &Connection, - _aggregate_public_key: Option<&Point>, reward_set: RewardSet, ) -> Result { test_debug!("Consider Nakamoto block {}", &block.block_id()); @@ -1847,21 +1836,6 @@ impl NakamotoChainState { return Ok(false); }; - // TODO: epoch gate to verify aggregate signature - // let schnorr_signature = &block.header.signer_signature.0; - // if !db_handle.expects_signer_signature( - // &block.header.consensus_hash, - // schnorr_signature, - // &block.header.signer_signature_hash().0, - // aggregate_public_key, - // )? { - // let msg = format!( - // "Received block, but the signer signature does not match the active stacking cycle" - // ); - // warn!("{}", msg; "aggregate_key" => %aggregate_public_key); - // return Err(ChainstateError::InvalidStacksBlock(msg)); - // } - if let Err(e) = block.header.verify_signer_signatures(&reward_set) { warn!("Received block, but the signer signatures are invalid"; "block_id" => %block.block_id(), @@ -1881,83 +1855,6 @@ impl NakamotoChainState { Ok(true) } - /// Get the aggregate public key for the given block from the signers-voting contract - pub(crate) fn load_aggregate_public_key( - sortdb: &SortitionDB, - sort_handle: &SH, - chainstate: &mut StacksChainState, - for_burn_block_height: u64, - at_block_id: &StacksBlockId, - warn_if_not_found: bool, - ) -> Result { - // Get the current reward cycle - let Some(rc) = sort_handle.pox_constants().block_height_to_reward_cycle( - sort_handle.first_burn_block_height(), - for_burn_block_height, - ) else { - // This should be unreachable, but we'll return an error just in case. - let msg = format!( - "BUG: Failed to determine reward cycle of burn block height: {}.", - for_burn_block_height - ); - warn!("{msg}"); - return Err(ChainstateError::InvalidStacksBlock(msg)); - }; - - test_debug!( - "get-approved-aggregate-key at block {}, cycle {}", - at_block_id, - rc - ); - match chainstate.get_aggregate_public_key_pox_4(sortdb, at_block_id, rc)? { - Some(key) => Ok(key), - None => { - // this can happen for a whole host of reasons - if warn_if_not_found { - warn!( - "Failed to get aggregate public key"; - "block_id" => %at_block_id, - "reward_cycle" => rc, - ); - } - Err(ChainstateError::InvalidStacksBlock( - "Failed to get aggregate public key".into(), - )) - } - } - } - - /// Get the aggregate public key for a block. - /// TODO: The block at which the aggregate public key is queried needs to be better defined. - /// See https://github.com/stacks-network/stacks-core/issues/4109 - pub fn get_aggregate_public_key( - chainstate: &mut StacksChainState, - sortdb: &SortitionDB, - sort_handle: &SH, - block: &NakamotoBlock, - ) -> Result { - let block_sn = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &block.header.consensus_hash)? - .ok_or(ChainstateError::DBError(DBError::NotFoundError))?; - let aggregate_key_block_header = - Self::get_canonical_block_header(chainstate.db(), sortdb)?.unwrap(); - let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), block_sn.block_height)? - .ok_or(ChainstateError::InvalidStacksBlock( - "Failed to get epoch ID".into(), - ))? - .epoch_id; - - let aggregate_public_key = Self::load_aggregate_public_key( - sortdb, - sort_handle, - chainstate, - block_sn.block_height, - &aggregate_key_block_header.index_block_hash(), - epoch_id >= StacksEpochId::Epoch30, - )?; - Ok(aggregate_public_key) - } - /// Return the total ExecutionCost consumed during the tenure up to and including /// `block` pub fn get_total_tenure_cost_at( @@ -2112,6 +2009,7 @@ impl NakamotoChainState { } /// Load the canonical Stacks block header (either epoch-2 rules or Nakamoto) + /// DO NOT CALL FROM CONSENSUS CODE pub fn get_canonical_block_header( chainstate_conn: &Connection, sortdb: &SortitionDB, From da60d066825c06f8afe21bcb6e533a05e03b4fff Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:29:30 -0400 Subject: [PATCH 034/148] chore: API sync --- .../chainstate/nakamoto/coordinator/tests.rs | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 14ba87292f..63651b3946 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -636,7 +636,7 @@ fn test_nakamoto_chainstate_getters() { // no tenures yet assert!( - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_tx.sqlite()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_tx) .unwrap() .is_none() ); @@ -769,7 +769,7 @@ fn test_nakamoto_chainstate_getters() { // we now have a tenure, and it confirms the last epoch2 block let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_tx.sqlite()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_tx) .unwrap() .unwrap(); assert_eq!(highest_tenure.coinbase_height, 12); @@ -797,7 +797,7 @@ fn test_nakamoto_chainstate_getters() { .is_some()); assert!(NakamotoChainState::check_tenure_continuity( chainstate.db(), - sort_tx.sqlite(), + &sort_tx, &blocks[0].header.consensus_hash, &blocks[1].header, ) @@ -969,7 +969,7 @@ fn test_nakamoto_chainstate_getters() { // we now have a new highest tenure let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_tx.sqlite()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_tx) .unwrap() .unwrap(); assert_eq!(highest_tenure.coinbase_height, 13); @@ -994,14 +994,14 @@ fn test_nakamoto_chainstate_getters() { .is_none()); assert!(NakamotoChainState::check_tenure_continuity( chainstate.db(), - sort_tx.sqlite(), + &sort_tx, &new_blocks[0].header.consensus_hash, &new_blocks[1].header, ) .unwrap()); assert!(!NakamotoChainState::check_tenure_continuity( chainstate.db(), - sort_tx.sqlite(), + &sort_tx, &blocks[0].header.consensus_hash, &new_blocks[1].header, ) @@ -1613,7 +1613,7 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) .unwrap() .unwrap(); (tenure, tip) @@ -1705,7 +1705,7 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) .unwrap() .unwrap(); (tenure, tip) @@ -1800,7 +1800,7 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) .unwrap() .unwrap(); (tenure, tip) @@ -2001,7 +2001,7 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) .unwrap() .unwrap(); (tenure, tip) From 67038a22890990c529b5df82dc168b0718765eb5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:29:40 -0400 Subject: [PATCH 035/148] chore: API sync --- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 21e2022f95..bae7fd3436 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2826,7 +2826,7 @@ impl NakamotoChainState { // this block is mined in the ongoing tenure. if !Self::check_tenure_continuity( chainstate_tx, - burn_dbconn.sqlite(), + burn_dbconn, &parent_ch, &block.header, )? { From 127165ad7659550120e229692606d1b82b822310 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:29:50 -0400 Subject: [PATCH 036/148] fix: search for the highest tenure from the block-processor's given sortition tip. Do not attempt to get the canonical stacks or burnchain tips. --- stackslib/src/chainstate/nakamoto/tenure.rs | 27 +++++++++++++-------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index c9e5c0cf59..b68950a875 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -556,17 +556,24 @@ impl NakamotoChainState { } /// Get the highest non-empty processed tenure on the canonical sortition history. - pub fn get_highest_nakamoto_tenure( + pub fn get_highest_nakamoto_tenure( headers_conn: &Connection, - sortdb_conn: &Connection, + sortdb_conn: &SH, ) -> Result, ChainstateError> { - // find the tenure for the Stacks chain tip - let (tip_ch, tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb_conn)?; - if tip_ch == FIRST_BURNCHAIN_CONSENSUS_HASH || tip_bhh == FIRST_STACKS_BLOCK_HASH { - // no chain tip, so no tenure - return Ok(None); + // NOTE: we do a *search* here in case the canonical Stacks pointer stored on the canonical + // sortition gets invalidated through a reorg. + let mut cursor = SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &sortdb_conn.tip())? + .ok_or(ChainstateError::NoSuchBlockError)?; + loop { + if let Some(tenure) = Self::get_highest_nakamoto_tenure_change_by_tenure_id( + headers_conn, + &cursor.consensus_hash, + )? { + return Ok(Some(tenure)); + } + cursor = SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &cursor.parent_sortition_id)? + .ok_or(ChainstateError::NoSuchBlockError)?; } - Self::get_nakamoto_tenure_change_by_tenure_id(headers_conn, &tip_ch) } /// Verify that a tenure change tx is a valid first-ever tenure change. It must connect to an @@ -857,9 +864,9 @@ impl NakamotoChainState { /// /// Returns Ok(bool) to indicate whether or not this block is in the same tenure as its parent. /// Returns Err(..) on DB error - pub(crate) fn check_tenure_continuity( + pub(crate) fn check_tenure_continuity( headers_conn: &Connection, - sortdb_conn: &Connection, + sortdb_conn: &SH, parent_ch: &ConsensusHash, block_header: &NakamotoBlockHeader, ) -> Result { From 9bc3d55b254a6cf0ea46c00f5730415c1307d548 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:30:18 -0400 Subject: [PATCH 037/148] feat: add a way to synthesize a reward set from a list of signers --- .../src/chainstate/nakamoto/test_signers.rs | 44 +++++++++++++++++-- 1 file changed, 40 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index a7e521c155..1a52b0a2c2 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -30,7 +30,9 @@ use rand::{CryptoRng, RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; use stacks_common::address::*; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; -use stacks_common::types::chainstate::{BlockHeaderHash, SortitionId, StacksBlockId, VRFSeed}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, SortitionId, StacksAddress, StacksBlockId, VRFSeed, +}; use stacks_common::util::hash::Hash160; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; @@ -52,6 +54,7 @@ use crate::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::address::PoxAddress; +use crate::chainstate::stacks::boot::{NakamotoSignerEntry, PoxStartCycleInfo}; use crate::chainstate::stacks::db::*; use crate::chainstate::stacks::miner::*; use crate::chainstate::stacks::{ @@ -205,9 +208,6 @@ impl TestSigners { self.generate_aggregate_key(cycle); } - // TODO: epoch gate for aggregated signatures - // let signer_signature = self.sign_block_with_aggregate_key(&block); - let signer_signature = self.generate_block_signatures(&block); test_debug!( @@ -229,6 +229,42 @@ impl TestSigners { block.header.signer_signature = signatures; } + /// Synthesize a reward set from the signer for the purposes of signing and verifying blocks + /// later on + pub fn synthesize_reward_set(&self) -> RewardSet { + let mut signer_entries = vec![]; + let mut pox_addrs = vec![]; + for key in self.signer_keys.iter() { + let signing_key_vec = Secp256k1PublicKey::from_private(key).to_bytes_compressed(); + let mut signing_key = [0u8; 33]; + signing_key[0..33].copy_from_slice(&signing_key_vec[0..33]); + + let nakamoto_signer_entry = NakamotoSignerEntry { + signing_key, + stacked_amt: 100_000_000_000, + weight: 1, + }; + let pox_addr = PoxAddress::Standard( + StacksAddress { + version: AddressHashMode::SerializeP2PKH.to_version_testnet(), + bytes: Hash160::from_data(&nakamoto_signer_entry.signing_key), + }, + Some(AddressHashMode::SerializeP2PKH), + ); + signer_entries.push(nakamoto_signer_entry); + pox_addrs.push(pox_addr); + } + + RewardSet { + rewarded_addresses: pox_addrs, + start_cycle_state: PoxStartCycleInfo { + missed_reward_slots: vec![], + }, + signers: Some(signer_entries), + pox_ustx_threshold: Some(100_000_000_000), + } + } + /// Sign a Nakamoto block and generate a vec of signatures. The signatures will /// be ordered by the signer's public keys, but will not be checked against the /// reward set. From b8e85e38e808e305b1ba73ec12a2d2aafec402ee Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:30:46 -0400 Subject: [PATCH 038/148] chore: move unused code for loading the aggregate public key into a test module, where it is still required for test coverage --- .../src/chainstate/nakamoto/tests/mod.rs | 87 ++++++++++++++++++- 1 file changed, 84 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index dd36004ff4..9aab60ab9b 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -92,8 +92,89 @@ use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::Error as db_error; use crate::util_lib::strings::StacksString; +/// WSTS aggregate public keys are not shipping immediately in Nakamoto, but there is still a lot +/// of test coverage for it. The code here is preserved to keep these tests working until WSTS's +/// coordinator implementaiton is ready. +impl NakamotoChainState { + /// Get the aggregate public key for the given block from the signers-voting contract + pub(crate) fn load_aggregate_public_key( + sortdb: &SortitionDB, + sort_handle: &SH, + chainstate: &mut StacksChainState, + for_burn_block_height: u64, + at_block_id: &StacksBlockId, + warn_if_not_found: bool, + ) -> Result { + // Get the current reward cycle + let Some(rc) = sort_handle.pox_constants().block_height_to_reward_cycle( + sort_handle.first_burn_block_height(), + for_burn_block_height, + ) else { + // This should be unreachable, but we'll return an error just in case. + let msg = format!( + "BUG: Failed to determine reward cycle of burn block height: {}.", + for_burn_block_height + ); + warn!("{msg}"); + return Err(ChainstateError::InvalidStacksBlock(msg)); + }; + + test_debug!( + "get-approved-aggregate-key at block {}, cycle {}", + at_block_id, + rc + ); + match chainstate.get_aggregate_public_key_pox_4(sortdb, at_block_id, rc)? { + Some(key) => Ok(key), + None => { + // this can happen for a whole host of reasons + if warn_if_not_found { + warn!( + "Failed to get aggregate public key"; + "block_id" => %at_block_id, + "reward_cycle" => rc, + ); + } + Err(ChainstateError::InvalidStacksBlock( + "Failed to get aggregate public key".into(), + )) + } + } + } + + /// Get the aggregate public key for a block. + /// TODO: The block at which the aggregate public key is queried needs to be better defined. + /// See https://github.com/stacks-network/stacks-core/issues/4109 + pub fn get_aggregate_public_key( + chainstate: &mut StacksChainState, + sortdb: &SortitionDB, + sort_handle: &SH, + block: &NakamotoBlock, + ) -> Result { + let block_sn = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &block.header.consensus_hash)? + .ok_or(ChainstateError::DBError(db_error::NotFoundError))?; + let aggregate_key_block_header = + Self::get_canonical_block_header(chainstate.db(), sortdb)?.unwrap(); + let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), block_sn.block_height)? + .ok_or(ChainstateError::InvalidStacksBlock( + "Failed to get epoch ID".into(), + ))? + .epoch_id; + + let aggregate_public_key = Self::load_aggregate_public_key( + sortdb, + sort_handle, + chainstate, + block_sn.block_height, + &aggregate_key_block_header.index_block_hash(), + epoch_id >= StacksEpochId::Epoch30, + )?; + Ok(aggregate_public_key) + } +} + impl<'a> NakamotoStagingBlocksConnRef<'a> { - #[cfg(test)] pub fn get_all_blocks_in_tenure( &self, tenure_id_consensus_hash: &ConsensusHash, @@ -1764,7 +1845,7 @@ pub fn test_get_highest_nakamoto_tenure() { &stacks_ch, &stacks_bhh, stacks_height ); let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) .unwrap() .unwrap(); @@ -1802,7 +1883,7 @@ pub fn test_get_highest_nakamoto_tenure() { // new tip doesn't include the last two tenures let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) .unwrap() .unwrap(); let last_tenure_change = &all_tenure_changes[2]; From 41c50fa3e8b631d2f325e6497f2e0f02683e048c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:31:13 -0400 Subject: [PATCH 039/148] chore: API sync --- stackslib/src/chainstate/nakamoto/tests/node.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 354bacb7af..fc425d0580 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -583,7 +583,6 @@ impl TestStacksNode { let reward_set = sortdb .get_preprocessed_reward_set_of(&sort_tip) .expect("Failed to get reward cycle info") - .expect("Failed to get reward cycle info") .known_selected_anchor_block_owned() .expect("Expected a reward set"); From f1d46570ae5e26d298bf437db6c4519cb7f63750 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:31:22 -0400 Subject: [PATCH 040/148] chore: fmt --- stackslib/src/chainstate/stacks/boot/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index d3e8a494de..01ca39be4a 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -40,7 +40,9 @@ use serde::Deserialize; use stacks_common::address::AddressHashMode; use stacks_common::codec::StacksMessageCodec; use stacks_common::types; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, StacksBlockId}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, StacksAddress, StacksBlockId, StacksPublicKey, +}; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; use wsts::curve::point::{Compressed, Point}; use wsts::curve::scalar::Scalar; From ffb572e7e6b176f0160744633372401d3f4496bf Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:31:30 -0400 Subject: [PATCH 041/148] chore: doc epoch2-specific behavior --- stackslib/src/net/chat.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 52622d1e59..3037ac60d1 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -1628,7 +1628,7 @@ impl ConversationP2P { .map_err(|e| net_error::from(e))?; if cfg!(test) { - // make *sure* the behavior stays the same + // make *sure* the behavior stays the same in epoch 2 let original_blocks_inv_data: BlocksInvData = chainstate.get_blocks_inventory(&block_hashes)?; From f2fac094da51753a6b452e742808a8e0a215e7eb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:31:42 -0400 Subject: [PATCH 042/148] chore: use reward sets instead of aggregate public keys --- .../net/download/nakamoto/download_state_machine.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 77cf64dba6..c95dc6d5f3 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -38,9 +38,11 @@ use crate::chainstate::burn::db::sortdb::{ BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, }; use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::RewardCycleInfo; use crate::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, }; +use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{ Error as chainstate_error, StacksBlockHeader, TenureChangePayload, @@ -861,6 +863,7 @@ impl NakamotoDownloadStateMachine { "Peer {} has no inventory for reward cycle {}", naddr, reward_cycle ); + test_debug!("Peer {} has the following inventory data: {:?}", naddr, inv); continue; }; for (i, wt) in wanted_tenures.iter().enumerate() { @@ -1152,14 +1155,14 @@ impl NakamotoDownloadStateMachine { fn update_tenure_downloaders( &mut self, count: usize, - agg_public_keys: &BTreeMap>, + current_reward_sets: &BTreeMap, ) { self.tenure_downloads.make_tenure_downloaders( &mut self.tenure_download_schedule, &mut self.available_tenures, &mut self.tenure_block_ids, count, - agg_public_keys, + current_reward_sets, ) } @@ -1435,7 +1438,7 @@ impl NakamotoDownloadStateMachine { sortdb, sort_tip, chainstate, - &network.aggregate_public_keys, + &network.current_reward_sets, ) else { neighbor_rpc.add_dead(network, &naddr); continue; @@ -1500,7 +1503,7 @@ impl NakamotoDownloadStateMachine { max_count: usize, ) -> HashMap> { // queue up more downloaders - self.update_tenure_downloaders(max_count, &network.aggregate_public_keys); + self.update_tenure_downloaders(max_count, &network.current_reward_sets); // run all downloaders let new_blocks = self.tenure_downloads.run(network, &mut self.neighbor_rpc); From 3fff20e2f36f95fd6b61f17e3d8cd75d8ac9418f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:32:01 -0400 Subject: [PATCH 043/148] chore: use reward set signature verification --- .../download/nakamoto/tenure_downloader.rs | 95 +++++++++---------- 1 file changed, 46 insertions(+), 49 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 340fa717fd..a3586602e6 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -31,7 +31,6 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ @@ -41,6 +40,7 @@ use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, }; +use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{ Error as chainstate_error, StacksBlockHeader, TenureChangePayload, @@ -129,8 +129,8 @@ impl fmt::Display for NakamotoTenureDownloadState { /// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); /// in this case, the end-block is the start-block of the ongoing tenure. /// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse -/// order. As blocks are found, their signer signatures will be validated against the aggregate -/// public key for this tenure; their hash-chain continuity will be validated against the start +/// order. As blocks are found, their signer signatures will be validated against the signer +/// public keys for this tenure; their hash-chain continuity will be validated against the start /// and end block hashes; their quantity will be validated against the tenure-change transaction /// in the end-block. /// @@ -149,10 +149,10 @@ pub struct NakamotoTenureDownloader { pub tenure_end_block_id: StacksBlockId, /// Address of who we're asking for blocks pub naddr: NeighborAddress, - /// Aggregate public key that signed the start-block of this tenure - pub start_aggregate_public_key: Point, - /// Aggregate public key that signed the end-block of this tenure - pub end_aggregate_public_key: Point, + /// Signer public keys that signed the start-block of this tenure, in reward cycle order + pub start_signer_keys: RewardSet, + /// Signer public keys that signed the end-block of this tenure + pub end_signer_keys: RewardSet, /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with /// this state machine. pub idle: bool, @@ -178,8 +178,8 @@ impl NakamotoTenureDownloader { tenure_start_block_id: StacksBlockId, tenure_end_block_id: StacksBlockId, naddr: NeighborAddress, - start_aggregate_public_key: Point, - end_aggregate_public_key: Point, + start_signer_keys: RewardSet, + end_signer_keys: RewardSet, ) -> Self { test_debug!( "Instantiate downloader to {} for tenure {}", @@ -191,8 +191,8 @@ impl NakamotoTenureDownloader { tenure_start_block_id, tenure_end_block_id, naddr, - start_aggregate_public_key, - end_aggregate_public_key, + start_signer_keys, + end_signer_keys, idle: false, state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), tenure_start_block: None, @@ -243,19 +243,18 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidMessage); } - // TODO: epoch-gated verify threshold or vec of signatures - // if !tenure_start_block - // .header - // .verify_threshold_signer(&self.start_aggregate_public_key) - // { - // // signature verification failed - // warn!("Invalid tenure-start block: bad signer signature"; - // "tenure_id" => %self.tenure_id_consensus_hash, - // "block.header.block_id" => %tenure_start_block.header.block_id(), - // "start_aggregate_public_key" => %self.start_aggregate_public_key, - // "state" => %self.state); - // return Err(NetError::InvalidMessage); - // } + if let Err(e) = tenure_start_block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + // signature verification failed + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } debug!( "Accepted tenure-start block for tenure {} block={}", @@ -370,19 +369,18 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidMessage); } - // TODO: epoch-gated verify threshold or vec of signatures - // if !tenure_end_block - // .header - // .verify_threshold_signer(&self.end_aggregate_public_key) - // { - // // bad signature - // warn!("Invalid tenure-end block: bad signer signature"; - // "tenure_id" => %self.tenure_id_consensus_hash, - // "block.header.block_id" => %tenure_end_block.header.block_id(), - // "end_aggregate_public_key" => %self.end_aggregate_public_key, - // "state" => %self.state); - // return Err(NetError::InvalidMessage); - // } + if let Err(e) = tenure_end_block + .header + .verify_signer_signatures(&self.end_signer_keys) + { + // bad signature + warn!("Invalid tenure-end block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } // extract the needful -- need the tenure-change payload (which proves that the tenure-end // block is the tenure-start block for the next tenure) and the parent block ID (which is @@ -472,18 +470,17 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidMessage); } - // TODO: epoch-gated verify threshold or vec of signatures - // if !block - // .header - // .verify_threshold_signer(&self.start_aggregate_public_key) - // { - // warn!("Invalid block: bad signer signature"; - // "tenure_id" => %self.tenure_id_consensus_hash, - // "block.header.block_id" => %block.header.block_id(), - // "start_aggregate_public_key" => %self.start_aggregate_public_key, - // "state" => %self.state); - // return Err(NetError::InvalidMessage); - // } + if let Err(e) = block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } expected_block_id = &block.header.parent_block_id; count += 1; From 0318fb28735bb562b6c45b778edf5b927a51d4b9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:32:19 -0400 Subject: [PATCH 044/148] chore: use reward set signature verification --- .../nakamoto/tenure_downloader_set.rs | 27 +++++++++++-------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 357b588e8a..0100eb0ecd 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -31,16 +31,17 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, }; use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; use crate::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, }; +use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{ Error as chainstate_error, StacksBlockHeader, TenureChangePayload, @@ -418,7 +419,7 @@ impl NakamotoTenureDownloaderSet { available: &mut HashMap>, tenure_block_ids: &HashMap, count: usize, - agg_public_keys: &BTreeMap>, + current_reward_cycles: &BTreeMap, ) { test_debug!("schedule: {:?}", schedule); test_debug!("available: {:?}", &available); @@ -479,19 +480,25 @@ impl NakamotoTenureDownloaderSet { test_debug!("Neighbor {} does not serve tenure {}", &naddr, ch); continue; }; - let Some(Some(start_agg_pubkey)) = agg_public_keys.get(&tenure_info.start_reward_cycle) + let Some(Some(start_reward_set)) = current_reward_cycles + .get(&tenure_info.start_reward_cycle) + .map(|cycle_info| cycle_info.known_selected_anchor_block()) else { test_debug!( - "Cannot fetch tenure-start block due to no known aggregate public key: {:?}", + "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", + tenure_info.start_reward_cycle, &tenure_info ); schedule.pop_front(); continue; }; - let Some(Some(end_agg_pubkey)) = agg_public_keys.get(&tenure_info.end_reward_cycle) + let Some(Some(end_reward_set)) = current_reward_cycles + .get(&tenure_info.end_reward_cycle) + .map(|cycle_info| cycle_info.known_selected_anchor_block()) else { test_debug!( - "Cannot fetch tenure-end block due to no known aggregate public key: {:?}", + "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", + tenure_info.end_reward_cycle, &tenure_info ); schedule.pop_front(); @@ -499,12 +506,10 @@ impl NakamotoTenureDownloaderSet { }; test_debug!( - "Download tenure {} (start={}, end={}) with aggregate keys {}, {} (rc {},{})", + "Download tenure {} (start={}, end={}) (rc {},{})", &ch, &tenure_info.start_block_id, &tenure_info.end_block_id, - &start_agg_pubkey, - &end_agg_pubkey, tenure_info.start_reward_cycle, tenure_info.end_reward_cycle ); @@ -513,8 +518,8 @@ impl NakamotoTenureDownloaderSet { tenure_info.start_block_id.clone(), tenure_info.end_block_id.clone(), naddr.clone(), - start_agg_pubkey.clone(), - end_agg_pubkey.clone(), + start_reward_set.clone(), + end_reward_set.clone(), ); test_debug!("Request tenure {} from neighbor {}", ch, &naddr); From 4f38a047a1c7a2ef4fab77339b70be0abb2d1c8b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:32:33 -0400 Subject: [PATCH 045/148] chore: use reward set signature verification --- .../nakamoto/tenure_downloader_unconfirmed.rs | 120 +++++++++--------- 1 file changed, 57 insertions(+), 63 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index 7a22b4ef2b..4d4d4dee47 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -31,16 +31,17 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, }; use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::RewardCycleInfo; use crate::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, }; +use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{ Error as chainstate_error, StacksBlockHeader, TenureChangePayload, @@ -107,10 +108,10 @@ pub struct NakamotoUnconfirmedTenureDownloader { pub state: NakamotoUnconfirmedDownloadState, /// Address of who we're asking pub naddr: NeighborAddress, - /// Aggregate public key of the highest confirmed tenure - pub confirmed_aggregate_public_key: Option, - /// Aggregate public key of the unconfirmed (ongoing) tenure - pub unconfirmed_aggregate_public_key: Option, + /// reward set of the highest confirmed tenure + pub confirmed_signer_keys: Option, + /// reward set of the unconfirmed (ongoing) tenure + pub unconfirmed_signer_keys: Option, /// Block ID of this node's highest-processed block. /// We will not download any blocks lower than this, if it's set. pub highest_processed_block_id: Option, @@ -133,8 +134,8 @@ impl NakamotoUnconfirmedTenureDownloader { Self { state: NakamotoUnconfirmedDownloadState::GetTenureInfo, naddr, - confirmed_aggregate_public_key: None, - unconfirmed_aggregate_public_key: None, + confirmed_signer_keys: None, + unconfirmed_signer_keys: None, highest_processed_block_id, highest_processed_block_height: None, tenure_tip: None, @@ -185,7 +186,7 @@ impl NakamotoUnconfirmedTenureDownloader { local_sort_tip: &BlockSnapshot, chainstate: &StacksChainState, remote_tenure_tip: RPCGetTenureInfo, - agg_pubkeys: &BTreeMap>, + current_reward_sets: &BTreeMap, ) -> Result<(), NetError> { if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { return Err(NetError::InvalidState); @@ -297,21 +298,24 @@ impl NakamotoUnconfirmedTenureDownloader { ) .expect("FATAL: sortition from before system start"); - // get aggregate public keys for the unconfirmed tenure and highest-complete tenure sortitions - let Some(Some(confirmed_aggregate_public_key)) = - agg_pubkeys.get(&parent_tenure_rc).cloned() + // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions + let Some(Some(confirmed_reward_set)) = current_reward_sets + .get(&parent_tenure_rc) + .map(|cycle_info| cycle_info.known_selected_anchor_block()) else { warn!( - "No aggregate public key for confirmed tenure {} (rc {})", + "No signer public keys for confirmed tenure {} (rc {})", &parent_local_tenure_sn.consensus_hash, parent_tenure_rc ); return Err(NetError::InvalidState); }; - let Some(Some(unconfirmed_aggregate_public_key)) = agg_pubkeys.get(&tenure_rc).cloned() + let Some(Some(unconfirmed_reward_set)) = current_reward_sets + .get(&tenure_rc) + .map(|cycle_info| cycle_info.known_selected_anchor_block()) else { warn!( - "No aggregate public key for confirmed tenure {} (rc {})", + "No signer public keys for unconfirmed tenure {} (rc {})", &local_tenure_sn.consensus_hash, tenure_rc ); return Err(NetError::InvalidState); @@ -339,14 +343,12 @@ impl NakamotoUnconfirmedTenureDownloader { } test_debug!( - "Will validate unconfirmed blocks with ({},{}) and ({},{})", - &confirmed_aggregate_public_key, + "Will validate unconfirmed blocks with reward sets in ({},{})", parent_tenure_rc, - &unconfirmed_aggregate_public_key, tenure_rc ); - self.confirmed_aggregate_public_key = Some(confirmed_aggregate_public_key); - self.unconfirmed_aggregate_public_key = Some(unconfirmed_aggregate_public_key); + self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); + self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); self.tenure_tip = Some(remote_tenure_tip); Ok(()) @@ -370,25 +372,22 @@ impl NakamotoUnconfirmedTenureDownloader { return Err(NetError::InvalidState); }; - // TODO: epoch-gated loading of aggregate key - // let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() - // else { - // return Err(NetError::InvalidState); - // }; - - // stacker signature has to match the current aggregate public key - // TODO: epoch-gated verify threshold or vec of signatures - // if !unconfirmed_tenure_start_block - // .header - // .verify_threshold_signer(unconfirmed_aggregate_public_key) - // { - // warn!("Invalid tenure-start block: bad signer signature"; - // "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - // "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), - // "unconfirmed_aggregate_public_key" => %unconfirmed_aggregate_public_key, - // "state" => %self.state); - // return Err(NetError::InvalidMessage); - // } + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + + // stacker signature has to match the current reward set + if let Err(e) = unconfirmed_tenure_start_block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } // block has to match the expected hash if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { @@ -437,11 +436,9 @@ impl NakamotoUnconfirmedTenureDownloader { return Err(NetError::InvalidState); }; - // TODO: epoch-gated load aggregate key - // let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() - // else { - // return Err(NetError::InvalidState); - // }; + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; if tenure_blocks.is_empty() { // nothing to do @@ -459,18 +456,17 @@ impl NakamotoUnconfirmedTenureDownloader { "block_id" => %block.header.block_id()); return Err(NetError::InvalidMessage); } - // TODO: epoch-gated verify threshold or vec of signatures - // if !block - // .header - // .verify_threshold_signer(unconfirmed_aggregate_public_key) - // { - // warn!("Invalid block: bad signer signature"; - // "tenure_id" => %tenure_tip.consensus_hash, - // "block.header.block_id" => %block.header.block_id(), - // "unconfirmed_aggregate_public_key" => %unconfirmed_aggregate_public_key, - // "state" => %self.state); - // return Err(NetError::InvalidMessage); - // } + if let Err(e) = block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } // we may or may not need the tenure-start block for the unconfirmed tenure. But if we // do, make sure it's valid, and it's the last block we receive. @@ -616,12 +612,10 @@ impl NakamotoUnconfirmedTenureDownloader { else { return Err(NetError::InvalidState); }; - let Some(confirmed_aggregate_public_key) = self.confirmed_aggregate_public_key.as_ref() - else { + let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { return Err(NetError::InvalidState); }; - let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() - else { + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { return Err(NetError::InvalidState); }; @@ -634,8 +628,8 @@ impl NakamotoUnconfirmedTenureDownloader { unconfirmed_tenure.winning_block_id.clone(), unconfirmed_tenure_start_block.header.block_id(), self.naddr.clone(), - confirmed_aggregate_public_key.clone(), - unconfirmed_aggregate_public_key.clone(), + confirmed_signer_keys.clone(), + unconfirmed_signer_keys.clone(), ) .with_tenure_end_block(unconfirmed_tenure_start_block.clone()); @@ -723,7 +717,7 @@ impl NakamotoUnconfirmedTenureDownloader { sortdb: &SortitionDB, local_sort_tip: &BlockSnapshot, chainstate: &StacksChainState, - agg_pubkeys: &BTreeMap>, + current_reward_sets: &BTreeMap, ) -> Result>, NetError> { match &self.state { NakamotoUnconfirmedDownloadState::GetTenureInfo => { @@ -735,7 +729,7 @@ impl NakamotoUnconfirmedTenureDownloader { local_sort_tip, chainstate, remote_tenure_info, - agg_pubkeys, + current_reward_sets, )?; Ok(None) } From b0100bc078e28f0b5dac26075f3a468bbd76a3fc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:32:45 -0400 Subject: [PATCH 046/148] chore: document post-nakamoto usage of aggregate public key --- stackslib/src/net/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index bd064774c5..1cead0306a 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2021,6 +2021,7 @@ pub mod test { /// What services should this peer support? pub services: u16, /// aggregate public key to use + /// (NOTE: will be used post-Nakamoto) pub aggregate_public_key: Option, pub test_stackers: Option>, pub test_signers: Option, From 3de7c89a3cd35d6412e9528576c94e3d1dbed22b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:32:58 -0400 Subject: [PATCH 047/148] feat: cache reward sets by reward cycle and sortition ID, instead of aggregate public keys --- stackslib/src/net/p2p.rs | 155 ++++++++++++++++++++++++--------------- 1 file changed, 95 insertions(+), 60 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index f853bb795a..821d4dbc1d 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -36,7 +36,6 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use {mio, url}; use crate::burnchains::db::{BurnchainDB, BurnchainHeaderReader}; @@ -45,7 +44,7 @@ use crate::chainstate::burn::db::sortdb::{BlockHeaderCache, SortitionDB}; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::coordinator::{ static_get_canonical_affirmation_map, static_get_heaviest_affirmation_map, - static_get_stacks_tip_affirmation_map, + static_get_stacks_tip_affirmation_map, RewardCycleInfo, }; use crate::chainstate::stacks::boot::MINERS_NAME; use crate::chainstate::stacks::db::StacksChainState; @@ -259,13 +258,10 @@ pub struct PeerNetwork { /// In epoch 2.x, this is the same as the tip block ID /// In nakamoto, this is the block ID of the first block in the current tenure pub tenure_start_block_id: StacksBlockId, - /// The aggregate public keys of each witnessed reward cycle. - /// Only active during epoch 3.x and beyond. - /// Gets refreshed on each new Stacks block arrival, which deals with burnchain forks. - /// Stored in a BTreeMap because we often need to query the last or second-to-last reward cycle - /// aggregate public key, and we need to determine whether or not to load new reward cycles' - /// keys. - pub aggregate_public_keys: BTreeMap>, + /// The reward sets of the current and past reward cycle. + /// Needed to validate blocks, which are signed by a threshold of stackers + pub current_reward_sets: BTreeMap, + pub current_reward_set_ids: BTreeMap, // information about the state of the network's anchor blocks pub heaviest_affirmation_map: AffirmationMap, @@ -476,7 +472,8 @@ impl PeerNetwork { stacks_tip_sn: None, parent_stacks_tip: (ConsensusHash([0x00; 20]), BlockHeaderHash([0x00; 32]), 0), tenure_start_block_id: StacksBlockId([0x00; 32]), - aggregate_public_keys: BTreeMap::new(), + current_reward_sets: BTreeMap::new(), + current_reward_set_ids: BTreeMap::new(), peerdb: peerdb, atlasdb: atlasdb, @@ -5430,58 +5427,100 @@ impl PeerNetwork { )) } - /// Refresh our view of the aggregate public keys - /// Returns a list of (reward-cycle, option(pubkey)) pairs. - /// An option(pubkey) is defined for all reward cycles, but for epochs 2.4 and earlier, it will - /// be None. - fn find_new_aggregate_public_keys( + /// Refresh our view of the last two reward cycles + fn refresh_reward_cycles( &mut self, sortdb: &SortitionDB, tip_sn: &BlockSnapshot, - chainstate: &mut StacksChainState, - stacks_tip_block_id: &StacksBlockId, - ) -> Result)>, net_error> { - let sort_tip_rc = self + ) -> Result<(), net_error> { + let cur_rc = self .burnchain .block_height_to_reward_cycle(tip_sn.block_height) .expect("FATAL: sortition from before system start"); - let next_agg_pubkey_rc = self - .aggregate_public_keys - .last_key_value() - .map(|(rc, _)| rc.saturating_add(1)) - .unwrap_or(0); - let mut new_agg_pubkeys: Vec<_> = (next_agg_pubkey_rc..=sort_tip_rc) - .filter_map(|key_rc| { - let ih = sortdb.index_handle(&tip_sn.sortition_id); - let agg_pubkey_opt = if self.get_current_epoch().epoch_id < StacksEpochId::Epoch25 { - None - } else { - test_debug!( - "Try to get aggregate public key for reward cycle {}", - key_rc + + let prev_rc = cur_rc.saturating_sub(1); + + // keyed by both rc and sortition ID in case there's a bitcoin fork -- we'd want the + // canonical reward set to be loaded + let cur_rc_sortition_id = sortdb + .get_prepare_phase_start_sortition_id_for_reward_cycle(&tip_sn.sortition_id, cur_rc)?; + let prev_rc_sortition_id = sortdb + .get_prepare_phase_start_sortition_id_for_reward_cycle(&tip_sn.sortition_id, prev_rc)?; + + for (rc, sortition_id) in [ + (prev_rc, prev_rc_sortition_id), + (cur_rc, cur_rc_sortition_id), + ] + .into_iter() + { + if let Some(sort_id) = self.current_reward_set_ids.get(&rc) { + if sort_id == &sortition_id { + continue; + } + } + let Ok((reward_cycle_info, reward_cycle_sort_id)) = sortdb + .get_preprocessed_reward_set_for_reward_cycle(&tip_sn.sortition_id, rc) + .map_err(|e| { + warn!( + "Failed to load reward set for cycle {} ({}): {:?}", + rc, &sortition_id, &e ); - NakamotoChainState::load_aggregate_public_key( - sortdb, - &ih, - chainstate, - self.burnchain.reward_cycle_to_block_height(key_rc), - &stacks_tip_block_id, - false, + e + }) + else { + // NOTE: this should never be reached + continue; + }; + if !reward_cycle_info.is_reward_info_known() { + // haven't yet processed the anchor block, so don't store + test_debug!("Reward cycle info for cycle {} at sortition {} expects the PoX anchor block, so will not cache", rc, &reward_cycle_sort_id); + continue; + } + + test_debug!( + "Reward cycle info for cycle {} at sortition {} is {:?}", + rc, + &reward_cycle_sort_id, + &reward_cycle_info + ); + self.current_reward_sets.insert(rc, reward_cycle_info); + self.current_reward_set_ids.insert(rc, reward_cycle_sort_id); + } + + // free memory + if self.current_reward_sets.len() > 3 { + self.current_reward_sets.retain(|old_rc, _| { + if (*old_rc).saturating_add(1) < prev_rc { + self.current_reward_set_ids.remove(old_rc); + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; + } + let Some(old_sortition_id) = self.current_reward_set_ids.get(old_rc) else { + // shouldn't happen + self.current_reward_set_ids.remove(old_rc); + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; + }; + let Ok(prepare_phase_sort_id) = sortdb + .get_prepare_phase_start_sortition_id_for_reward_cycle( + &tip_sn.sortition_id, + *old_rc, ) - .ok() + else { + self.current_reward_set_ids.remove(old_rc); + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; }; - if agg_pubkey_opt.is_none() { - return None; + if prepare_phase_sort_id != *old_sortition_id { + // non-canonical reward cycle info + self.current_reward_set_ids.remove(old_rc); + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; } - Some((key_rc, agg_pubkey_opt)) - }) - .collect(); - - if new_agg_pubkeys.len() == 0 && self.aggregate_public_keys.len() == 0 { - // special case -- we're before epoch 3.0, so don't waste time doing this again - new_agg_pubkeys.push((sort_tip_rc, None)); + true + }); } - Ok(new_agg_pubkeys) + Ok(()) } /// Refresh view of burnchain, if needed. @@ -5511,14 +5550,13 @@ impl PeerNetwork { != self.burnchain_tip.canonical_stacks_tip_consensus_hash || burnchain_tip_changed || stacks_tip_changed; + + if stacks_tip_changed || burnchain_tip_changed { + self.refresh_reward_cycles(sortdb, &canonical_sn)?; + } + let mut ret: HashMap> = HashMap::new(); - let aggregate_public_keys = self.find_new_aggregate_public_keys( - sortdb, - &canonical_sn, - chainstate, - &new_stacks_tip_block_id, - )?; let (parent_stacks_tip, tenure_start_block_id, stacks_tip_sn) = if stacks_tip_changed { let stacks_tip_sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &stacks_tip.0)?; @@ -5692,9 +5730,6 @@ impl PeerNetwork { self.stacks_tip = stacks_tip; self.stacks_tip_sn = stacks_tip_sn; self.parent_stacks_tip = parent_stacks_tip; - for (key_rc, agg_pubkey_opt) in aggregate_public_keys { - self.aggregate_public_keys.insert(key_rc, agg_pubkey_opt); - } self.tenure_start_block_id = tenure_start_block_id; Ok(ret) From dc43a273d5414b58dbc3d408a9cb2a57a16029bb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:33:28 -0400 Subject: [PATCH 048/148] chore: API sync; delete dead code (it's in the git history now) --- stackslib/src/net/relay.rs | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 7f4f1847a9..0acc48244d 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -30,7 +30,6 @@ use stacks_common::types::chainstate::{BurnchainHeaderHash, PoxId, SortitionId, use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha512Trunc256Sum; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView}; use crate::chainstate::burn::db::sortdb::{ @@ -723,26 +722,11 @@ impl Relayer { ); let config = chainstate.config(); - - // TODO: epoch gate to verify with aggregate key - // let Ok(aggregate_public_key) = - // NakamotoChainState::get_aggregate_public_key(chainstate, &sortdb, sort_handle, &block) - // else { - // warn!("Failed to get aggregate public key. Will not store or relay"; - // "stacks_block_hash" => %block.header.block_hash(), - // "consensus_hash" => %block.header.consensus_hash, - // "burn_height" => block.header.chain_length, - // "sortition_height" => block_sn.block_height, - // ); - // return Ok(false); - // }; - - // TODO: epoch gate to use signatures vec let tip = block_sn.sortition_id; let reward_info = match sortdb.get_preprocessed_reward_set_of(&tip) { - Ok(Some(x)) => x, - Ok(None) => { + Ok(x) => x, + Err(db_error::NotFoundError) => { error!("No RewardCycleInfo found for tip {}", tip); return Err(chainstate_error::PoxNoRewardCycle); } @@ -763,7 +747,6 @@ impl Relayer { sort_handle, &staging_db_tx, headers_conn, - None, reward_set, )?; staging_db_tx.commit()?; From 4d0a1a37e6a8aedb88f353141ac5f6b896a49b2d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:33:48 -0400 Subject: [PATCH 049/148] chore: use reward sets instead of aggregate public keys --- stackslib/src/net/tests/download/nakamoto.rs | 125 ++++++++++++++----- 1 file changed, 92 insertions(+), 33 deletions(-) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 1e76cd1853..47dabd176e 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -92,7 +92,7 @@ fn test_nakamoto_tenure_downloader() { let private_key = StacksPrivateKey::new(); let mut test_signers = TestSigners::new(vec![]); - let aggregate_public_key = test_signers.aggregate_public_key.clone(); + let reward_set = test_signers.synthesize_reward_set(); let tenure_start_header = NakamotoBlockHeader { version: 1, @@ -116,7 +116,6 @@ fn test_nakamoto_tenure_downloader() { cause: TenureChangeCause::BlockFound, pubkey_hash: Hash160([0x02; 20]), }; - use stacks_common::types::net::PeerAddress; let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); @@ -231,8 +230,8 @@ fn test_nakamoto_tenure_downloader() { tenure_start_block.header.block_id(), next_tenure_start_block.header.block_id(), naddr.clone(), - aggregate_public_key.clone(), - aggregate_public_key.clone(), + reward_set.clone(), + reward_set.clone(), ); // must be first block @@ -365,7 +364,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { let tip_ch = peer.network.stacks_tip.0.clone(); let parent_tip_ch = peer.network.parent_stacks_tip.0.clone(); - let agg_pubkeys = peer.network.aggregate_public_keys.clone(); + let current_reward_sets = peer.network.current_reward_sets.clone(); let unconfirmed_tenure = peer .chainstate() @@ -444,10 +443,22 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(tip_block_id)); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); let tenure_tip = RPCGetTenureInfo { consensus_hash: peer.network.stacks_tip.0.clone(), @@ -472,7 +483,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); @@ -507,10 +518,22 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(mid_tip_block_id)); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); @@ -537,7 +560,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); @@ -596,10 +619,22 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(mid_tip_block_id)); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); @@ -626,7 +661,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); @@ -684,10 +719,22 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // serve all of the unconfirmed blocks in one shot. { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), None); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); @@ -714,7 +761,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); @@ -751,10 +798,22 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // bad block signature { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), None); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); @@ -781,7 +840,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); @@ -1091,7 +1150,7 @@ fn test_make_tenure_downloaders() { assert_eq!(tip.block_height, 51); let test_signers = TestSigners::new(vec![]); - let agg_pubkeys = peer.network.aggregate_public_keys.clone(); + let current_reward_sets = peer.network.current_reward_sets.clone(); // test load_wanted_tenures() { @@ -1794,7 +1853,7 @@ fn test_make_tenure_downloaders() { &mut available, &tenure_block_ids, 6, - &agg_pubkeys, + ¤t_reward_sets, ); // made all 6 downloaders @@ -1832,7 +1891,7 @@ fn test_make_tenure_downloaders() { &mut available, &tenure_block_ids, 12, - &agg_pubkeys, + ¤t_reward_sets, ); // only made 4 downloaders got created From 004f3d61c48daa700edc6657aa34fb611504c92f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:33:57 -0400 Subject: [PATCH 050/148] chore: API sync --- stackslib/src/net/tests/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index a9534b6d29..45ad71590d 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -736,7 +736,7 @@ impl NakamotoBootPlan { let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) .unwrap() .unwrap(); (tenure, tip) @@ -811,7 +811,7 @@ impl NakamotoBootPlan { let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = NakamotoChainState::get_highest_nakamoto_tenure( chainstate.db(), - sort_db.conn(), + &sort_db.index_handle_at_tip(), ) .unwrap() .unwrap(); From 6e301e3e77f71b2a1046247cc915276a6963d614 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 17:38:04 -0400 Subject: [PATCH 051/148] fix: load the signer reward cycle for the block based on the fact that the block in reward cycle N at reward cycle index 0 was signed by the signers of reward cycle N - 1 --- stackslib/src/chainstate/burn/db/sortdb.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index eee027b72b..7f8153ce84 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3646,8 +3646,12 @@ impl SortitionDB { db_error::NotFoundError })?; + // NOTE: the .saturating_sub(1) is necessary because the reward set is calculated in epoch + // 2.5 and lower at reward cycle index 1, not 0. This correction ensures that the last + // block is checked against the signers who were active just before the new reward set is + // calculated. let reward_cycle_id = pox_constants - .block_height_to_reward_cycle(first_block_height, tip_sn.block_height) + .block_height_to_reward_cycle(first_block_height, tip_sn.block_height.saturating_sub(1)) .expect("FATAL: stored snapshot with block height < first_block_height"); Self::inner_get_preprocessed_reward_set_for_reward_cycle( From 1f1c8dc5b7dec2e7e25c5e21934bee772a795ece Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 17:38:34 -0400 Subject: [PATCH 052/148] fix: check the *ongoing* tenure, not the last-started tenure --- .../chainstate/nakamoto/coordinator/tests.rs | 42 +++++++++++-------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 63651b3946..b7c0bb5ba9 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -406,7 +406,7 @@ fn replay_reward_cycle( block.clone(), None, ) - .unwrap(); + .unwrap_or(false); if accepted { test_debug!("Accepted Nakamoto block {block_id}"); peer.coord.handle_new_nakamoto_stacks_block().unwrap(); @@ -1612,10 +1612,12 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) - .unwrap() - .unwrap(); + let tenure = NakamotoChainState::get_ongoing_nakamoto_tenure( + chainstate.db(), + &sort_db.index_handle_at_tip(), + ) + .unwrap() + .unwrap(); (tenure, tip) }; assert_eq!(highest_tenure.tenure_id_consensus_hash, tip.consensus_hash); @@ -1704,10 +1706,12 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) - .unwrap() - .unwrap(); + let tenure = NakamotoChainState::get_ongoing_nakamoto_tenure( + chainstate.db(), + &sort_db.index_handle_at_tip(), + ) + .unwrap() + .unwrap(); (tenure, tip) }; assert_eq!(highest_tenure.tenure_id_consensus_hash, tip.consensus_hash); @@ -1799,10 +1803,12 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) - .unwrap() - .unwrap(); + let tenure = NakamotoChainState::get_ongoing_nakamoto_tenure( + chainstate.db(), + &sort_db.index_handle_at_tip(), + ) + .unwrap() + .unwrap(); (tenure, tip) }; assert_eq!(highest_tenure.tenure_id_consensus_hash, tip.consensus_hash); @@ -2000,10 +2006,12 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) - .unwrap() - .unwrap(); + let tenure = NakamotoChainState::get_ongoing_nakamoto_tenure( + chainstate.db(), + &sort_db.index_handle_at_tip(), + ) + .unwrap() + .unwrap(); (tenure, tip) }; From a99c09802c9fe306c313c73f607b8a471def2ade Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 17:38:51 -0400 Subject: [PATCH 053/148] feat: add query API for the ongoing tenure in a given sortition (be it an extension of a previously-started tenure, or a newly-started tenure), and clarify that the existing API for getting the highest Nakamoto tenure only pertains to the highest *started* tenure (not extended) --- stackslib/src/chainstate/nakamoto/tenure.rs | 52 +++++++++++++++++++-- 1 file changed, 48 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index b68950a875..6238e2905a 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -539,6 +539,17 @@ impl NakamotoChainState { Ok(tenure_opt) } + /// Get the nakamoto tenure by burn view + pub fn get_nakamoto_tenure_change_by_burn_view( + headers_conn: &Connection, + burn_view: &ConsensusHash, + ) -> Result, ChainstateError> { + let sql = "SELECT * FROM nakamoto_tenures WHERE burn_view_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; + let args = rusqlite::params![burn_view]; + let tenure_opt: Option = query_row(headers_conn, sql, args)?; + Ok(tenure_opt) + } + /// Get a nakamoto tenure-change by its tenure ID consensus hash. /// Get the highest such record. It will be the last-processed BlockFound tenure /// for the given sortition consensus hash. @@ -555,7 +566,8 @@ impl NakamotoChainState { Ok(tenure_opt) } - /// Get the highest non-empty processed tenure on the canonical sortition history. + /// Get the highest non-empty processed tenure-change on the canonical sortition history. + /// It will be a BlockFound tenure. pub fn get_highest_nakamoto_tenure( headers_conn: &Connection, sortdb_conn: &SH, @@ -564,16 +576,48 @@ impl NakamotoChainState { // sortition gets invalidated through a reorg. let mut cursor = SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &sortdb_conn.tip())? .ok_or(ChainstateError::NoSuchBlockError)?; - loop { + + // if there's been no activity for more than 2*reward_cycle_length sortitions, then the + // chain is dead anyway + for _ in 0..(2 * sortdb_conn.pox_constants().reward_cycle_length) { if let Some(tenure) = Self::get_highest_nakamoto_tenure_change_by_tenure_id( headers_conn, &cursor.consensus_hash, )? { return Ok(Some(tenure)); } - cursor = SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &cursor.parent_sortition_id)? - .ok_or(ChainstateError::NoSuchBlockError)?; + cursor = + SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &cursor.parent_sortition_id)? + .ok_or(ChainstateError::NoSuchBlockError)?; + } + Ok(None) + } + + /// Get the ongoing tenure (i.e. last tenure-change tx record) from the sortition pointed to by + /// sortdb_conn. + /// It will be a BlockFound or an Extension tenure. + pub fn get_ongoing_nakamoto_tenure( + headers_conn: &Connection, + sortdb_conn: &SH, + ) -> Result, ChainstateError> { + // NOTE: we do a *search* here in case the canonical Stacks pointer stored on the canonical + // sortition gets invalidated through a reorg. + let mut cursor = SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &sortdb_conn.tip())? + .ok_or(ChainstateError::NoSuchBlockError)?; + + // if there's been no activity for more than 2*reward_cycle_length sortitions, then the + // chain is dead anyway + for _ in 0..(2 * sortdb_conn.pox_constants().reward_cycle_length) { + if let Some(tenure) = + Self::get_nakamoto_tenure_change_by_burn_view(headers_conn, &cursor.consensus_hash)? + { + return Ok(Some(tenure)); + } + cursor = + SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &cursor.parent_sortition_id)? + .ok_or(ChainstateError::NoSuchBlockError)?; } + Ok(None) } /// Verify that a tenure change tx is a valid first-ever tenure change. It must connect to an From f930538634200ebe85173049750a60a5a6e293ce Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 17:39:48 -0400 Subject: [PATCH 054/148] chore: fmt --- .../src/chainstate/nakamoto/tests/mod.rs | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 9aab60ab9b..abc9cf203b 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -1844,10 +1844,12 @@ pub fn test_get_highest_nakamoto_tenure() { "stacks tip = {},{},{}", &stacks_ch, &stacks_bhh, stacks_height ); - let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) - .unwrap() - .unwrap(); + let highest_tenure = NakamotoChainState::get_highest_nakamoto_tenure( + chainstate.db(), + &sort_db.index_handle_at_tip(), + ) + .unwrap() + .unwrap(); let last_tenure_change = last_tenure_change.unwrap(); let last_header = last_header.unwrap(); @@ -1882,10 +1884,12 @@ pub fn test_get_highest_nakamoto_tenure() { ); // new tip doesn't include the last two tenures - let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) - .unwrap() - .unwrap(); + let highest_tenure = NakamotoChainState::get_highest_nakamoto_tenure( + chainstate.db(), + &sort_db.index_handle_at_tip(), + ) + .unwrap() + .unwrap(); let last_tenure_change = &all_tenure_changes[2]; let last_header = &all_headers[2]; assert_eq!( From 480f2fd3340e05508e4c7cf1c5f32569865001a8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 17:39:56 -0400 Subject: [PATCH 055/148] chore: document tenure-loading behavior --- stackslib/src/net/inv/nakamoto.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index de46d15744..867be5a507 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -72,7 +72,8 @@ pub(crate) struct InvTenureInfo { impl InvTenureInfo { /// Load up cacheable tenure state for a given tenure-ID consensus hash. - /// This only returns Ok(Some(..)) if there was a tenure-change tx for this consensus hash. + /// This only returns Ok(Some(..)) if there was a tenure-change tx for this consensus hash + /// (i.e. it was a BlockFound tenure, not an Extension tenure) pub fn load( chainstate: &StacksChainState, consensus_hash: &ConsensusHash, From d2c2847edb7f1efe2039dd7d610c28b11dbda1f5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 17:40:08 -0400 Subject: [PATCH 056/148] chore: fmt --- stackslib/src/net/tests/mod.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 45ad71590d..5e2cb3e6cc 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -735,10 +735,12 @@ impl NakamotoBootPlan { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) - .unwrap() - .unwrap(); + let tenure = NakamotoChainState::get_highest_nakamoto_tenure( + chainstate.db(), + &sort_db.index_handle_at_tip(), + ) + .unwrap() + .unwrap(); (tenure, tip) }; From b24deed88f1f41276941639c6408570846cff604 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 18:12:20 -0400 Subject: [PATCH 057/148] chore: mock aggregate public key until the miner is upgraded to not need it --- .../stacks-node/src/nakamoto_node/miner.rs | 46 +++++++------------ .../src/tests/nakamoto_integrations.rs | 1 - 2 files changed, 17 insertions(+), 30 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 3ed642c9cd..2be08d7c12 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -42,7 +42,8 @@ use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::VRFProof; -use wsts::curve::point::Point; +use wsts::curve::ecdsa; +use wsts::curve::point::{Compressed, Point}; use wsts::curve::scalar::Scalar; use super::relayer::RelayerThread; @@ -198,7 +199,7 @@ impl BlockMinerThread { }; new_block.header.signer_signature = signer_signature; - if let Err(e) = self.broadcast(new_block.clone(), None, reward_set) { + if let Err(e) = self.broadcast(new_block.clone(), reward_set) { warn!("Error accepting own block: {e:?}. Will try mining again."); continue; } else { @@ -272,12 +273,7 @@ impl BlockMinerThread { .expect("FATAL: building on a burn block that is before the first burn block"); let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { - Ok(Some(x)) => x, - Ok(None) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "No reward set found. Cannot initialize miner coordinator.".into(), - )); - } + Ok(x) => x, Err(e) => { return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" @@ -291,18 +287,18 @@ impl BlockMinerThread { )); }; - let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + let chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); - let sortition_handle = sort_db.index_handle_at_tip(); - let Ok(aggregate_public_key) = NakamotoChainState::get_aggregate_public_key( - &mut chain_state, - &sort_db, - &sortition_handle, - &new_block, - ) else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Failed to obtain the active aggregate public key. Cannot mine!".into(), - )); + + // NOTE: this is a placeholder until the API can be fixed + let aggregate_public_key = { + let key_bytes = [ + 0x03, 0xd3, 0xe1, 0x5a, 0x36, 0xf3, 0x2a, 0x9e, 0x71, 0x31, 0x7f, 0xcb, 0x4a, 0x20, + 0x1b, 0x0c, 0x08, 0xb3, 0xbc, 0xfb, 0xdc, 0x8a, 0xee, 0x2e, 0xe4, 0xd2, 0x69, 0x23, + 0x00, 0x06, 0xb1, 0xa0, 0xcb, + ]; + let ecdsa_pk = ecdsa::PublicKey::try_from(key_bytes.as_slice()).unwrap(); + Point::try_from(&Compressed::from(ecdsa_pk.to_bytes())).unwrap() }; let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); @@ -310,6 +306,7 @@ impl BlockMinerThread { &reward_set, reward_cycle, miner_privkey_as_scalar, + // TODO: placeholder until the signer is working aggregate_public_key, &stackerdbs, &self.config, @@ -371,12 +368,7 @@ impl BlockMinerThread { .expect("FATAL: building on a burn block that is before the first burn block"); let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { - Ok(Some(x)) => x, - Ok(None) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "No reward set found. Cannot initialize miner coordinator.".into(), - )); - } + Ok(x) => x, Err(e) => { return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" @@ -533,12 +525,9 @@ impl BlockMinerThread { Ok(filtered_transactions.into_values().collect()) } - /// TODO: update to utilize `signer_signature` vec instead of the aggregate - /// public key. fn broadcast( &self, block: NakamotoBlock, - aggregate_public_key: Option<&Point>, reward_set: RewardSet, ) -> Result<(), ChainstateError> { #[cfg(test)] @@ -576,7 +565,6 @@ impl BlockMinerThread { &mut sortition_handle, &staging_tx, headers_conn, - aggregate_public_key, reward_set, )?; staging_tx.commit()?; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f46d9a3878..3c7e422e8d 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -367,7 +367,6 @@ pub fn read_and_sign_block_proposal( let reward_set = sortdb .get_preprocessed_reward_set_of(&tip.sortition_id) .expect("Failed to get reward cycle info") - .expect("Failed to get reward cycle info") .known_selected_anchor_block_owned() .expect("Expected a reward set"); From bb1aed130af53e820f98d0f130bcd22ce9c48389 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 18:13:20 -0400 Subject: [PATCH 058/148] fix: remove unused var --- testnet/stacks-node/src/nakamoto_node/miner.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 2be08d7c12..a938c4b7be 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -287,9 +287,6 @@ impl BlockMinerThread { )); }; - let chain_state = neon_node::open_chainstate_with_faults(&self.config) - .expect("FATAL: could not open chainstate DB"); - // NOTE: this is a placeholder until the API can be fixed let aggregate_public_key = { let key_bytes = [ From 65a934e454524acbe6f2d3d9db1e87923731dd95 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Thu, 23 May 2024 06:54:01 +0200 Subject: [PATCH 059/148] fix typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3f91b1a9f2..436d45705b 100644 --- a/README.md +++ b/README.md @@ -87,7 +87,7 @@ cd testnet/stacks-node cargo run --bin stacks-node -- start --config ./conf/testnet-follower-conf.toml ``` -_On Windows, many tests will fail if the line endings aren't `LF`. Please ensure that you are have git's `core.autocrlf` set to `input` when you clone the repository to avoid any potential issues. This is due to the Clarity language currently being sensitive to line endings._ +_On Windows, many tests will fail if the line endings aren't `LF`. Please ensure that you have git's `core.autocrlf` set to `input` when you clone the repository to avoid any potential issues. This is due to the Clarity language currently being sensitive to line endings._ Additional testnet documentation is available [here](./docs/testnet.md) and [here](https://docs.stacks.co/docs/nodes-and-miners/miner-testnet) From d79f31239726c181eef290acafc7f82d27176f1a Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Thu, 23 May 2024 06:54:50 +0200 Subject: [PATCH 060/148] fix typos --- docs/ci-release.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ci-release.md b/docs/ci-release.md index 4e21ed631d..f7881ba675 100644 --- a/docs/ci-release.md +++ b/docs/ci-release.md @@ -23,7 +23,7 @@ All releases are built via a Github Actions workflow named `CI` ([ci.yml](../.gi - `stacks-core:` - An untagged build of any branch will produce a single image built from source on Debian with glibc: - `stacks-core:` -- A tagged release on a non-default branch will produces: +- A tagged release on a non-default branch will produce: - Docker Alpine image for several architectures tagged with: - `stacks-core:` - Docker Debian image for several architectures tagged with: @@ -83,7 +83,7 @@ There are also 2 different methods in use with regard to running tests: A matrix is used when there are several known tests that need to be run. Partitions (shards) are used when there is a large and unknown number of tests to run (ex: `cargo test` to run all tests). There is also a workflow designed to run tests that are manually triggered: [Standalone Tests](../.github/workflows/standalone-tests.yml). -This workflow requires you to select which test(s) you want to run, which then triggers a reusbale workflow via conditional. For example, selecting "Epoch Tests" will run the tests defined in [Epoch Tests](../.github/workflows/epoch-tests.yml). Likewise, selecting `Release Tests` will run the same tests as a release workflow. +This workflow requires you to select which test(s) you want to run, which then triggers a reusable workflow via conditional. For example, selecting "Epoch Tests" will run the tests defined in [Epoch Tests](../.github/workflows/epoch-tests.yml). Likewise, selecting `Release Tests` will run the same tests as a release workflow. Files: From 66f9883858805c9191df8157156ffe8668d9e7d0 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Thu, 23 May 2024 06:55:40 +0200 Subject: [PATCH 061/148] fix typos --- docs/rpc-endpoints.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/rpc-endpoints.md b/docs/rpc-endpoints.md index 6815adfc61..6163f27b75 100644 --- a/docs/rpc-endpoints.md +++ b/docs/rpc-endpoints.md @@ -111,7 +111,7 @@ Returns a JSON list containing the following: ``` The `consensus_hash` field identifies the sortition in which the given block was -chosen. The `header` is the raw block header, a a hex string. The +chosen. The `header` is the raw block header, a hex string. The `parent_block_id` is the block ID hash of this block's parent, and can be used as a `?tip=` query parameter to page through deeper and deeper block headers. @@ -143,8 +143,8 @@ Returns JSON data in the form: } ``` -Where balance is the hex encoding of a unsigned 128-bit integer -(big-endian), nonce is a unsigned 64-bit integer, and the proofs are +Where balance is the hex encoding of an unsigned 128-bit integer +(big-endian), nonce is an unsigned 64-bit integer, and the proofs are provided as hex strings. For non-existent accounts, this _does not_ 404, rather it returns an @@ -212,7 +212,7 @@ JSON object _without_ the `proof` field. ### GET /v2/fees/transfer -Get an estimated fee rate for STX transfer transactions. This a a fee rate / byte, and is returned as a JSON integer. +Get an estimated fee rate for STX transfer transactions. This is a fee rate / byte, and is returned as a JSON integer. ### GET /v2/contracts/interface/[Stacks Address]/[Contract Name] @@ -530,6 +530,6 @@ Return metadata about the highest-known tenure, as the following JSON structure: Here, `consensus_hash` identifies the highest-known tenure (which may not be the highest sortition), `reward_cycle` identifies the reward cycle number of this -tenure, `tip_block_id` idenitifies the highest-known block in this tenure, and +tenure, `tip_block_id` identifies the highest-known block in this tenure, and `tip_height` identifies that block's height. From 7f5d3cd68615f8e9518ee19fea8dc7659c871fb0 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 23 May 2024 12:00:56 +0300 Subject: [PATCH 062/148] Describe how the `delegatedUntilBurnHt` will be updated - This comment will be of help as we are updating the delegatedUntilBurnHt field to support delegations for an indefinite period. --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index 15f4d4ddc0..fe1f335176 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -124,6 +124,10 @@ it("statefully interacts with PoX-4", async () => { poolMembers: [], delegatedTo: "", delegatedMaxAmount: 0, + // We initialize delegatedUntilBurnHt to 0. It will be updated + // after successful delegate-stx calls. It's value will be either + // the unwrapped until-burn-ht uint passed to the delegate-stx, + // or undefined for indefinite delegations. delegatedUntilBurnHt: 0, delegatedPoxAddress: "", amountLocked: 0, From 0fe4b35f343d8ec50edab52b4a462e438f46c094 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 23 May 2024 12:05:40 +0300 Subject: [PATCH 063/148] Enable the none branch for `delegatedUntilBurnHt` This commit: - updates the type of delegatedUntilBurnHt to number | none - updates the generators and the types of the untilBurnHt to support the none branch - updates the untilBurnHt param to support the none branch - adds the undefined check in the comparisons that involve delegatedUntilBurnHt - adds the undefined check in the state refresh comparison that involves delegatedUntilBurnHt --- .../tests/pox-4/pox_CommandModel.ts | 13 ++++++++---- .../tests/pox-4/pox_Commands.ts | 7 +++++-- .../pox-4/pox_DelegateStackExtendCommand.ts | 3 ++- .../pox-4/pox_DelegateStackStxCommand.ts | 3 ++- .../tests/pox-4/pox_DelegateStxCommand.ts | 21 ++++++++++++++----- .../pox-4/pox_RevokeDelegateStxCommand.ts | 10 +++++++-- 6 files changed, 42 insertions(+), 15 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts index 6d4d582b58..cdf211c3ed 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts @@ -76,9 +76,14 @@ export class Stub { // Get the wallet's ex-delegators by comparing their delegatedUntilBurnHt // to the current burn block height (only if the wallet is a delegatee). - const expiredDelegators = wallet.poolMembers.filter((stackerAddress) => - this.stackers.get(stackerAddress)!.delegatedUntilBurnHt < - burnBlockHeight + // If the delegatedUntilBurnHt is undefined, the delegator is considered + // active for an indefinite period (until a revoke-delegate-stx call). + const expiredDelegators = wallet.poolMembers.filter( + (stackerAddress) => + this.stackers.get(stackerAddress)!.delegatedUntilBurnHt !== + undefined && + this.stackers.get(stackerAddress)!.delegatedUntilBurnHt as number < + burnBlockHeight, ); // Get the operator's pool stackers that no longer have partially commited @@ -180,7 +185,7 @@ export type Stacker = { poolMembers: StxAddress[]; delegatedTo: StxAddress; delegatedMaxAmount: number; - delegatedUntilBurnHt: number; + delegatedUntilBurnHt: number | undefined; delegatedPoxAddress: BtcAddress; amountLocked: number; amountUnlocked: number; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts index ba7043d5ec..bafbe38a43 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts @@ -163,13 +163,16 @@ export function PoxCommands( fc.record({ wallet: fc.constantFrom(...wallets.values()), delegateTo: fc.constantFrom(...wallets.values()), - untilBurnHt: fc.integer({ min: 1 }), + untilBurnHt: fc.oneof( + fc.constant(Cl.none()), + fc.integer({ min: 1 }).map((value) => Cl.some(Cl.uint(value))), + ), amount: fc.bigInt({ min: 0n, max: 100_000_000_000_000n }), }).map(( r: { wallet: Wallet; delegateTo: Wallet; - untilBurnHt: number; + untilBurnHt: OptionalCV; amount: bigint; }, ) => diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts index cfd385cf5a..2875551342 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts @@ -83,7 +83,8 @@ export class DelegateStackExtendCommand implements PoxCommand { stackerWallet.hasDelegated === true && stackerWallet.isStacking === true && stackerWallet.delegatedTo === this.operator.stxAddress && - stackerWallet.delegatedUntilBurnHt >= newUnlockHeight && + (stackerWallet.delegatedUntilBurnHt === undefined || + stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && stackerWallet.delegatedMaxAmount >= stackedAmount && operatorWallet.poolMembers.includes(this.stacker.stxAddress) && operatorWallet.lockedAddresses.includes(this.stacker.stxAddress) && diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts index 456983807f..e3d9dd25c1 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts @@ -94,7 +94,8 @@ export class DelegateStackStxCommand implements PoxCommand { Number(this.amountUstx) <= stackerWallet.ustxBalance && Number(this.amountUstx) >= model.stackingMinimum && operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt + (stackerWallet.delegatedUntilBurnHt === undefined || + this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt) ); } diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts index 4a12b0140d..e70d466c9d 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts @@ -7,7 +7,15 @@ import { } from "./pox_CommandModel.ts"; import { poxAddressToTuple } from "@stacks/stacking"; import { expect } from "vitest"; -import { boolCV, Cl } from "@stacks/transactions"; +import { + boolCV, + Cl, + ClarityType, + cvToValue, + isClarityType, + OptionalCV, + UIntCV, +} from "@stacks/transactions"; /** * The `DelegateStxCommand` delegates STX for stacking within PoX-4. This @@ -22,7 +30,7 @@ import { boolCV, Cl } from "@stacks/transactions"; export class DelegateStxCommand implements PoxCommand { readonly wallet: Wallet; readonly delegateTo: Wallet; - readonly untilBurnHt: number; + readonly untilBurnHt: OptionalCV; readonly amount: bigint; /** @@ -37,7 +45,7 @@ export class DelegateStxCommand implements PoxCommand { constructor( wallet: Wallet, delegateTo: Wallet, - untilBurnHt: number, + untilBurnHt: OptionalCV, amount: bigint, ) { this.wallet = wallet; @@ -74,7 +82,7 @@ export class DelegateStxCommand implements PoxCommand { // (delegate-to principal) Cl.principal(this.delegateTo.stxAddress), // (until-burn-ht (optional uint)) - Cl.some(Cl.uint(this.untilBurnHt)), + this.untilBurnHt, // (pox-addr (optional { version: (buff 1), hashbytes: (buff 32) })) Cl.some(poxAddressToTuple(this.delegateTo.btcAddress)), ], @@ -93,7 +101,10 @@ export class DelegateStxCommand implements PoxCommand { wallet.hasDelegated = true; wallet.delegatedTo = this.delegateTo.stxAddress; wallet.delegatedMaxAmount = amountUstx; - wallet.delegatedUntilBurnHt = this.untilBurnHt; + wallet.delegatedUntilBurnHt = + isClarityType(this.untilBurnHt, ClarityType.OptionalNone) + ? undefined + : Number(cvToValue(this.untilBurnHt).value); wallet.delegatedPoxAddress = this.delegateTo.btcAddress; delegatedWallet.poolMembers.push(this.wallet.stxAddress); diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts index 54e4806757..c39a1a5e42 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts @@ -37,7 +37,8 @@ export class RevokeDelegateStxCommand implements PoxCommand { return ( model.stackingMinimum > 0 && stacker.hasDelegated === true && - stacker.delegatedUntilBurnHt > model.burnBlockHeight + (stacker.delegatedUntilBurnHt === undefined || + stacker.delegatedUntilBurnHt > model.burnBlockHeight) ); } @@ -46,6 +47,9 @@ export class RevokeDelegateStxCommand implements PoxCommand { const wallet = model.stackers.get(this.wallet.stxAddress)!; const operatorWallet = model.stackers.get(wallet.delegatedTo)!; + const expectedUntilBurnHt = wallet.delegatedUntilBurnHt === undefined + ? Cl.none() + : Cl.some(Cl.uint(wallet.delegatedUntilBurnHt)); // Act const revokeDelegateStx = real.network.callPublicFn( @@ -66,7 +70,7 @@ export class RevokeDelegateStxCommand implements PoxCommand { "pox-addr": Cl.some( poxAddressToTuple(wallet.delegatedPoxAddress || ""), ), - "until-burn-ht": Cl.some(Cl.uint(wallet.delegatedUntilBurnHt)), + "until-burn-ht": expectedUntilBurnHt, }), ), ); @@ -76,6 +80,8 @@ export class RevokeDelegateStxCommand implements PoxCommand { // Update model so that we know this wallet is not delegating anymore. // This is important in order to prevent the test from revoking the // delegation multiple times with the same address. + // We update delegatedUntilBurnHt to 0, and not undefined. Undefined + // stands for indefinite delegation. wallet.hasDelegated = false; wallet.delegatedTo = ""; wallet.delegatedUntilBurnHt = 0; From 313431707c5088f0e330175bb61a42259c9633f9 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 23 May 2024 12:10:25 +0300 Subject: [PATCH 064/148] Pass the incremented `start-burn-height` to the `stack-stx` calls --- .../tests/pox-4/pox_StackStxAuthCommand.ts | 2 +- .../tests/pox-4/pox_StackStxSigCommand.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts index 108f0956b5..0b62f55c42 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts @@ -125,7 +125,7 @@ export class StackStxAuthCommand implements PoxCommand { // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) poxAddressToTuple(this.wallet.btcAddress), // (start-burn-ht uint) - Cl.uint(burnBlockHeight), + Cl.uint(burnBlockHeight + 1), // (lock-period uint) Cl.uint(this.period), // (signer-sig (optional (buff 65))) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts index baa87015a1..100d84a6e0 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts @@ -123,7 +123,7 @@ export class StackStxSigCommand implements PoxCommand { // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) poxAddressToTuple(this.wallet.btcAddress), // (start-burn-ht uint) - Cl.uint(burnBlockHeight), + Cl.uint(burnBlockHeight + 1), // (lock-period uint) Cl.uint(this.period), // (signer-sig (optional (buff 65))) From 2b3a54e42cc384cae4136ff7d95654edc230aaaa Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 23 May 2024 13:06:55 +0300 Subject: [PATCH 065/148] Use simnet mineBlock inside `StackStxAuthCommand` This update includes the authorization and the function call in the same block. It is needed because otherwise, it can result in issuing the authorization for the wrong reward cycle. --- .../tests/pox-4/pox_StackStxAuthCommand.ts | 70 +++++++++---------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts index 0b62f55c42..53f34ca0bb 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts @@ -15,6 +15,7 @@ import { isClarityType, } from "@stacks/transactions"; import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; /** * The `StackStxAuthCommand` locks STX for stacking within PoX-4. This self-service @@ -80,31 +81,6 @@ export class StackStxAuthCommand implements PoxCommand { // generated number passed to the constructor of this class. const maxAmount = model.stackingMinimum * this.margin; - const { result: setAuthorization } = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "set-signer-key-authorization", - [ - // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) - poxAddressToTuple(this.wallet.btcAddress), - // (period uint) - Cl.uint(this.period), - // (reward-cycle uint) - Cl.uint(currentRewCycle), - // (topic (string-ascii 14)) - Cl.stringAscii("stack-stx"), - // (signer-key (buff 33)) - Cl.bufferFromHex(this.wallet.signerPubKey), - // (allowed bool) - Cl.bool(true), - // (max-amount uint) - Cl.uint(maxAmount), - // (auth-id uint) - Cl.uint(this.authId), - ], - this.wallet.stxAddress, - ); - - expect(setAuthorization).toBeOk(Cl.bool(true)); const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); const burnBlockHeight = Number( cvToValue(burnBlockHeightCV as ClarityValue), @@ -115,11 +91,35 @@ export class StackStxAuthCommand implements PoxCommand { // signer key. const amountUstx = maxAmount; - // Act - const stackStx = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "stack-stx", - [ + // Include the authorization and the `stack-stx` transactions in a single + // block. This way we ensure both the authorization and the stack-stx + // transactions are called during the same reward cycle, so the authorization + // currentRewCycle param is relevant for the upcoming stack-stx call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(this.period), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii("stack-stx"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn("ST000000000000000000002AMW42H.pox-4", "stack-stx", [ // (amount-ustx uint) Cl.uint(amountUstx), // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) @@ -136,9 +136,10 @@ export class StackStxAuthCommand implements PoxCommand { Cl.uint(maxAmount), // (auth-id uint) Cl.uint(this.authId), - ], - this.wallet.stxAddress, - ); + ], this.wallet.stxAddress), + ]); + + expect(block[0].result).toBeOk(Cl.bool(true)); const { result: rewardCycle } = real.network.callReadOnlyFn( "ST000000000000000000002AMW42H.pox-4", @@ -156,8 +157,7 @@ export class StackStxAuthCommand implements PoxCommand { ); assert(isClarityType(unlockBurnHeight, ClarityType.UInt)); - // Assert - expect(stackStx.result).toBeOk( + expect(block[1].result).toBeOk( Cl.tuple({ "lock-amount": Cl.uint(amountUstx), "signer-key": Cl.bufferFromHex(this.wallet.signerPubKey), From aa52fa598cdf9276d9cdae767076ae1dadaed90d Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 23 May 2024 15:23:19 +0300 Subject: [PATCH 066/148] Use simnet `mineBlock` inside `StackExtendAuthCommand` This update includes the authorization and the function call in the same block. It is needed because otherwise, it can result in issuing the authorization for the wrong reward cycle. --- .../tests/pox-4/pox_StackExtendAuthCommand.ts | 98 ++++++++++--------- 1 file changed, 52 insertions(+), 46 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts index a7dbf49cbb..fa796673ea 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts @@ -7,6 +7,7 @@ import { } from "./pox_Commands"; import { Cl, ClarityType, isClarityType } from "@stacks/transactions"; import { assert, expect } from "vitest"; +import { tx } from "@hirosystems/clarinet-sdk"; export class StackExtendAuthCommand implements PoxCommand { readonly wallet: Wallet; @@ -77,51 +78,6 @@ export class StackExtendAuthCommand implements PoxCommand { const stacker = model.stackers.get(this.wallet.stxAddress)!; - const { result: setAuthorization } = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "set-signer-key-authorization", - [ - // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) - poxAddressToTuple(this.wallet.btcAddress), - // (period uint) - Cl.uint(this.extendCount), - // (reward-cycle uint) - Cl.uint(currentRewCycle), - // (topic (string-ascii 14)) - Cl.stringAscii("stack-extend"), - // (signer-key (buff 33)) - Cl.bufferFromHex(this.wallet.signerPubKey), - // (allowed bool) - Cl.bool(true), - // (max-amount uint) - Cl.uint(stacker.amountLocked), - // (auth-id uint) - Cl.uint(this.authId), - ], - this.wallet.stxAddress, - ); - - expect(setAuthorization).toBeOk(Cl.bool(true)); - const stackExtend = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "stack-extend", - [ - // (extend-count uint) - Cl.uint(this.extendCount), - // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) - poxAddressToTuple(this.wallet.btcAddress), - // (signer-sig (optional (buff 65))) - Cl.none(), - // (signer-key (buff 33)) - Cl.bufferFromHex(this.wallet.signerPubKey), - // (max-amount uint) - Cl.uint(stacker.amountLocked), - // (auth-id uint) - Cl.uint(this.authId), - ], - this.wallet.stxAddress, - ); - const { result: firstExtendCycle } = real.network.callReadOnlyFn( "ST000000000000000000002AMW42H.pox-4", "burn-height-to-reward-cycle", @@ -143,7 +99,57 @@ export class StackExtendAuthCommand implements PoxCommand { const newUnlockHeight = extendedUnlockHeight.value; - expect(stackExtend.result).toBeOk( + // Include the authorization and the `stack-extend` transactions in a single + // block. This way we ensure both the authorization and the stack-extend + // transactions are called during the same reward cycle, so the authorization + // currentRewCycle param is relevant for the upcoming stack-extend call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(this.extendCount), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii("stack-extend"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-extend", + [ + // (extend-count uint) + Cl.uint(this.extendCount), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.wallet.btcAddress), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + ]); + + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeOk( Cl.tuple({ stacker: Cl.principal(this.wallet.stxAddress), "unlock-burn-height": Cl.uint(newUnlockHeight), From f9b4a1af58a2cb52198294c593c37db0319d5cdb Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 23 May 2024 16:04:41 +0300 Subject: [PATCH 067/148] Use simnet `mineBlock` inside `StackAggregationCommitIndexedAuthCommand` This update includes the authorization and the function call in the same block. It is needed because otherwise, it can result in issuing the authorization for the wrong reward cycle. --- ...tackAggregationCommitIndexedAuthCommand.ts | 98 ++++++++++--------- 1 file changed, 53 insertions(+), 45 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts index dfe7f2beef..cfafccc674 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts @@ -9,6 +9,7 @@ import { poxAddressToTuple } from "@stacks/stacking"; import { expect } from "vitest"; import { Cl } from "@stacks/transactions"; import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; /** * The `StackAggregationCommitIndexedAuthCommand` allows an operator to @@ -65,54 +66,61 @@ export class StackAggregationCommitIndexedAuthCommand implements PoxCommand { const operatorWallet = model.stackers.get(this.operator.stxAddress)!; const committedAmount = operatorWallet.amountToCommit; - const { result: setSignature } = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "set-signer-key-authorization", - [ - // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) - poxAddressToTuple(this.operator.btcAddress), - // (period uint) - Cl.uint(1), - // (reward-cycle uint) - Cl.uint(currentRewCycle + 1), - // (topic (string-ascii 14)) - Cl.stringAscii("agg-commit"), - // (signer-key (buff 33)) - Cl.bufferFromHex(this.operator.signerPubKey), - // (allowed bool) - Cl.bool(true), - // (max-amount uint) - Cl.uint(committedAmount), - // (auth-id uint) - Cl.uint(this.authId), - ], - this.operator.stxAddress, - ); - expect(setSignature).toBeOk(Cl.bool(true)); - // Act - const stackAggregationCommitIndexed = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "stack-aggregation-commit-indexed", - [ - // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) - poxAddressToTuple(this.operator.btcAddress), - // (reward-cycle uint) - Cl.uint(currentRewCycle + 1), - // (signer-sig (optional (buff 65))) - Cl.none(), - // (signer-key (buff 33)) - Cl.bufferFromHex(this.operator.signerPubKey), - // (max-amount uint) - Cl.uint(committedAmount), - // (auth-id uint) - Cl.uint(this.authId), - ], - this.operator.stxAddress, - ); + + // Include the authorization and the `stack-aggregation-commit-indexed` + // transactions in a single block. This way we ensure both the authorization + // and the stack-aggregation-commit-indexed transactions are called during + // the same reward cycle, so the authorization currentRewCycle param is + // relevant for the upcoming stack-aggregation-commit-indexed call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (period uint) + Cl.uint(1), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (topic (string-ascii 14)) + Cl.stringAscii("agg-commit"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit-indexed", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + ]); // Assert - expect(stackAggregationCommitIndexed.result).toBeOk( + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeOk( Cl.uint(model.nextRewardSetIndex), ); From 9eada5d6aa2c5d8eb1a462ae0b503adde29e0709 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 23 May 2024 16:10:23 +0300 Subject: [PATCH 068/148] Use simnet `mineBlock` inside `StackAggregationCommitAuthCommand` This update includes the authorization and the function call in the same block. It is needed because otherwise, it can result in issuing the authorization for the wrong reward cycle. --- .../pox_StackAggregationCommitAuthCommand.ts | 98 ++++++++++--------- 1 file changed, 53 insertions(+), 45 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts index 5312679833..62622f4bd3 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts @@ -9,6 +9,7 @@ import { poxAddressToTuple } from "@stacks/stacking"; import { expect } from "vitest"; import { Cl } from "@stacks/transactions"; import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; /** * The `StackAggregationCommitAuthCommand` allows an operator to commit @@ -60,54 +61,61 @@ export class StackAggregationCommitAuthCommand implements PoxCommand { const operatorWallet = model.stackers.get(this.operator.stxAddress)!; const committedAmount = operatorWallet.amountToCommit; - const { result: setSignature } = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "set-signer-key-authorization", - [ - // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) - poxAddressToTuple(this.operator.btcAddress), - // (period uint) - Cl.uint(1), - // (reward-cycle uint) - Cl.uint(currentRewCycle + 1), - // (topic (string-ascii 14)) - Cl.stringAscii("agg-commit"), - // (signer-key (buff 33)) - Cl.bufferFromHex(this.operator.signerPubKey), - // (allowed bool) - Cl.bool(true), - // (max-amount uint) - Cl.uint(committedAmount), - // (auth-id uint) - Cl.uint(this.authId), - ], - this.operator.stxAddress, - ); - expect(setSignature).toBeOk(Cl.bool(true)); - // Act - const stackAggregationCommit = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "stack-aggregation-commit", - [ - // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) - poxAddressToTuple(this.operator.btcAddress), - // (reward-cycle uint) - Cl.uint(currentRewCycle + 1), - // (signer-sig (optional (buff 65))) - Cl.none(), - // (signer-key (buff 33)) - Cl.bufferFromHex(this.operator.signerPubKey), - // (max-amount uint) - Cl.uint(committedAmount), - // (auth-id uint) - Cl.uint(this.authId), - ], - this.operator.stxAddress, - ); + + // Include the authorization and the `stack-aggregation-commit` transactions + // in a single block. This way we ensure both the authorization and the + // stack-aggregation-commit transactions are called during the same reward + // cycle, so the authorization currentRewCycle param is relevant for the + // upcoming stack-aggregation-commit call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (period uint) + Cl.uint(1), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (topic (string-ascii 14)) + Cl.stringAscii("agg-commit"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + ]); // Assert - expect(stackAggregationCommit.result).toBeOk(Cl.bool(true)); + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeOk(Cl.bool(true)); operatorWallet.amountToCommit -= committedAmount; operatorWallet.committedRewCycleIndexes.push(model.nextRewardSetIndex); From 72b0e2c51c8d21130dd33581f8f5925e32ffd393 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 23 May 2024 14:53:53 -0400 Subject: [PATCH 069/148] chore: address PR feedback --- stackslib/src/chainstate/burn/db/sortdb.rs | 10 +++++----- stackslib/src/chainstate/coordinator/mod.rs | 10 +++++----- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 7f8153ce84..2916f3de3c 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -4678,14 +4678,14 @@ impl SortitionDB { Ok(ret) } - /// DO NOT CALL FROM CONSENSUS CODE + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn index_handle_at_tip<'a>(&'a self) -> SortitionHandleConn<'a> { let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap(); self.index_handle(&sortition_id) } /// Open a tx handle at the burn chain tip - /// DO NOT CALL FROM CONSENSUS CODE + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn tx_begin_at_tip<'a>(&'a mut self) -> SortitionHandleTx<'a> { let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap(); self.tx_handle_begin(&sortition_id).unwrap() @@ -4695,7 +4695,7 @@ impl SortitionDB { /// Returns Ok(Some(tip info)) on success /// Returns Ok(None) if there are no Nakamoto blocks in this tip /// Returns Err(..) on other DB error - /// DO NOT CALL FROM CONSENSUS CODE + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn get_canonical_nakamoto_tip_hash_and_height( conn: &Connection, tip: &BlockSnapshot, @@ -4720,7 +4720,7 @@ impl SortitionDB { } /// Get the canonical Stacks chain tip -- this gets memoized on the canonical burn chain tip. - /// DO NOT CALL FROM CONSENSUS CODE + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn get_canonical_stacks_chain_tip_hash_and_height( conn: &Connection, ) -> Result<(ConsensusHash, BlockHeaderHash, u64), db_error> { @@ -4748,7 +4748,7 @@ impl SortitionDB { } /// Get the canonical Stacks chain tip -- this gets memoized on the canonical burn chain tip. - /// DO NOT CALL FROM CONSENSUS CODE + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn get_canonical_stacks_chain_tip_hash( conn: &Connection, ) -> Result<(ConsensusHash, BlockHeaderHash), db_error> { diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 4c201bfb2c..3d6487d526 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -854,11 +854,11 @@ pub fn get_reward_cycle_info( true }; if need_to_store { - test_debug!( - "Store preprocessed reward set for cycle {} (prepare start sortition {}): {:?}", - prev_reward_cycle, - &first_prepare_sn.sortition_id, - &reward_cycle_info + debug!( + "Store preprocessed reward set for cycle"; + "reward_cycle" => prev_reward_cycle, + "prepare-start sortition" => %first_prepare_sn.sortition_id, + "reward_cycle_info" => format!("{:?}", &reward_cycle_info) ); SortitionDB::store_preprocessed_reward_set( &mut tx, diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index c23551cb60..1656eab594 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2009,7 +2009,7 @@ impl NakamotoChainState { } /// Load the canonical Stacks block header (either epoch-2 rules or Nakamoto) - /// DO NOT CALL FROM CONSENSUS CODE + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn get_canonical_block_header( chainstate_conn: &Connection, sortdb: &SortitionDB, From 91f3545ad60c31d320d3e35ae39fac4c85e035db Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 23 May 2024 15:41:44 -0400 Subject: [PATCH 070/148] chore: comment on why we need to continuously re-check the preprocessed reward set of the anchor block is not yet known --- stackslib/src/chainstate/coordinator/mod.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 3d6487d526..f34e21d1bd 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -847,6 +847,14 @@ pub fn get_reward_cycle_info( let mut tx = sort_db.tx_begin()?; let preprocessed_reward_set = SortitionDB::get_preprocessed_reward_set(&mut tx, &first_prepare_sn.sortition_id)?; + + // It's possible that we haven't processed the PoX anchor block at the time we have + // processed the burnchain block which commits to it. In this case, the PoX anchor block + // status would be SelectedAndUnknown. However, it's overwhelmingly likely (and in + // Nakamoto, _required_) that the PoX anchor block will be processed shortly thereafter. + // When this happens, we need to _update_ the sortition DB with the newly-processed reward + // set. This code performs this check to determine whether or not we need to store this + // calculated reward set. let need_to_store = if let Some(reward_cycle_info) = preprocessed_reward_set { // overwrite if we have an unknown anchor block !reward_cycle_info.is_reward_info_known() From 6b86b953a11d3d0bc3975f306165c7333ba1c980 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 23 May 2024 12:56:09 -0700 Subject: [PATCH 071/148] crc: typecasting and returning errors instead of panic in `coordinate_signature` --- stackslib/src/chainstate/nakamoto/mod.rs | 21 ++++++++------- .../stacks-node/src/nakamoto_node/miner.rs | 27 ++++++++++++++++--- 2 files changed, 35 insertions(+), 13 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index b5215ee6bc..e0dcd9a95c 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -532,11 +532,11 @@ impl NakamotoBlockHeader { }); // HashMap of - let signers_by_pk = signers + let signers_by_pk: HashMap<_, _> = signers .iter() .enumerate() .map(|(i, signer)| (&signer.signing_key, (signer, i))) - .collect::>(); + .collect(); for signature in self.signer_signature.iter() { let public_key = Secp256k1PublicKey::recover_to_pubkey(message.bits(), signature) @@ -585,18 +585,21 @@ impl NakamotoBlockHeader { return Ok(()); } + /// Compute the threshold for the minimum number of signers (by weight) required + /// to approve a Nakamoto block. pub fn compute_voting_weight_threshold(total_weight: u32) -> Result { - let ceil = if (total_weight as u64 * 7) % 10 == 0 { + let threshold = NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD; + let total_weight = u64::from(total_weight); + let ceil = if (total_weight * threshold) % 10 == 0 { 0 } else { 1 }; - u32::try_from((total_weight as u64 * NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD) / 10 + ceil) - .map_err(|_| { - ChainstateError::InvalidStacksBlock( - "Overflow when computing nakamoto block approval threshold".to_string(), - ) - }) + u32::try_from((total_weight * threshold) / 10 + ceil).map_err(|_| { + ChainstateError::InvalidStacksBlock( + "Overflow when computing nakamoto block approval threshold".to_string(), + ) + }) } /// Make an "empty" header whose block data needs to be filled in. diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 3ed642c9cd..8c8575b2d3 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -255,13 +255,28 @@ impl BlockMinerThread { true, self.burnchain.pox_constants.clone(), ) - .expect("FATAL: could not open sortition DB"); + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open sortition DB. Cannot mine! {e:?}" + )) + })?; + let tip = SortitionDB::get_block_snapshot_consensus( sort_db.conn(), &new_block.header.consensus_hash, ) - .expect("FATAL: could not retrieve chain tip") - .expect("FATAL: could not retrieve chain tip"); + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to retrieve chain tip: {:?}", + e + )) + }) + .and_then(|result| { + result.ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure("Failed to retrieve chain tip".into()) + }) + })?; + let reward_cycle = self .burnchain .pox_constants @@ -269,7 +284,11 @@ impl BlockMinerThread { self.burnchain.first_block_height, self.burn_block.block_height, ) - .expect("FATAL: building on a burn block that is before the first burn block"); + .ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Building on a burn block that is before the first burn block".into(), + ) + })?; let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { Ok(Some(x)) => x, From 0658bb907345ed7d0baf2a091972a0d4e6ebe9c4 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 23 May 2024 13:09:15 -0700 Subject: [PATCH 072/148] crc: return err instead of fatal when updating next_bitvec --- .../src/nakamoto_node/sign_coordinator.rs | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 3cf1c6d144..081852d783 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -709,18 +709,18 @@ impl SignCoordinator { let modified_slots = &event.modified_slots.clone(); // Update `next_signers_bitvec` with the slots that were modified in the event - modified_slots.iter().for_each(|chunk| { - if let Ok(slot_id) = chunk.slot_id.try_into() { - match &self.next_signer_bitvec.set(slot_id, true) { - Err(e) => { - warn!("Failed to set bitvec for next signer: {e:?}"); - } - _ => (), - }; - } else { - error!("FATAL: slot_id greater than u16, which should never happen."); - } - }); + for chunk in modified_slots.iter() { + let Ok(slot_id) = chunk.slot_id.try_into() else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Unable to modify next_signer_bitvec: slot_id exceeds u16".into(), + )); + }; + if let Err(e) = &self.next_signer_bitvec.set(slot_id, true) { + return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to set bitvec for next signer: {e:?}" + ))); + }; + } let Ok(signer_event) = SignerEvent::::try_from(event).map_err(|e| { warn!("Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); From 74f0cdddde3b9d75ad86306080f9ec5947d30dbd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 23 May 2024 16:57:36 -0400 Subject: [PATCH 073/148] chore: address stackerdb audit --- stackslib/src/net/mod.rs | 1 + stackslib/src/net/p2p.rs | 1 + stackslib/src/net/stackerdb/config.rs | 35 ++++++++++- stackslib/src/net/stackerdb/mod.rs | 24 ++++--- stackslib/src/net/stackerdb/tests/config.rs | 70 ++++++++++++++++----- 5 files changed, 103 insertions(+), 28 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index bd064774c5..a80ce8bb74 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2540,6 +2540,7 @@ pub mod test { &mut stacks_node.chainstate, &sortdb, old_stackerdb_configs, + config.connection_opts.num_neighbors, ) .expect("Failed to refresh stackerdb configs"); diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index f853bb795a..b28bd3c4cd 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -5363,6 +5363,7 @@ impl PeerNetwork { chainstate, sortdb, stacker_db_configs, + self.connection_opts.num_neighbors, )?; Ok(()) } diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index f2d8521ae4..3d2b7e87cc 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -292,6 +292,7 @@ impl StackerDBConfig { contract_id: &QualifiedContractIdentifier, tip: &StacksBlockId, signers: Vec<(StacksAddress, u32)>, + local_max_neighbors: u64, ) -> Result { let value = chainstate.eval_read_only(burn_dbconn, tip, contract_id, "(stackerdb-get-config)")?; @@ -365,11 +366,12 @@ impl StackerDBConfig { )); } - let max_neighbors = config_tuple + let mut max_neighbors = config_tuple .get("max-neighbors") .expect("FATAL: missing 'max-neighbors'") .clone() .expect_u128()?; + if max_neighbors > usize::MAX as u128 { let reason = format!( "Contract {} stipulates a maximum number of neighbors beyond usize::MAX", @@ -382,6 +384,16 @@ impl StackerDBConfig { )); } + if max_neighbors > u128::from(local_max_neighbors) { + warn!( + "Contract {} stipulates a maximum number of neighbors ({}) beyond locally-configured maximum {}; defaulting to locally-configured maximum", + contract_id, + max_neighbors, + local_max_neighbors, + ); + max_neighbors = u128::from(local_max_neighbors); + } + let hint_replicas_list = config_tuple .get("hint-replicas") .expect("FATAL: missing 'hint-replicas'") @@ -435,7 +447,7 @@ impl StackerDBConfig { )); } - if port < 1024 || port > ((u16::MAX - 1) as u128) { + if port < 1024 || port > u128::from(u16::MAX - 1) { let reason = format!( "Contract {} stipulates a port lower than 1024 or above u16::MAX - 1", contract_id @@ -446,11 +458,20 @@ impl StackerDBConfig { reason, )); } + // NOTE: port is now known to be in range [1024, 65535] let mut pubkey_hash_slice = [0u8; 20]; pubkey_hash_slice.copy_from_slice(&pubkey_hash_bytes[0..20]); let peer_addr = PeerAddress::from_slice(&addr_bytes).expect("FATAL: not 16 bytes"); + if peer_addr.is_in_private_range() { + debug!( + "Ignoring private IP address '{}' in hint-replias", + &peer_addr.to_socketaddr(port as u16) + ); + continue; + } + let naddr = NeighborAddress { addrbytes: peer_addr, port: port as u16, @@ -475,6 +496,7 @@ impl StackerDBConfig { chainstate: &mut StacksChainState, sortition_db: &SortitionDB, contract_id: &QualifiedContractIdentifier, + max_neighbors: u64, ) -> Result { let chain_tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sortition_db)? @@ -542,7 +564,14 @@ impl StackerDBConfig { // evaluate the contract for these two functions let signers = Self::eval_signer_slots(chainstate, &dbconn, contract_id, &chain_tip_hash)?; - let config = Self::eval_config(chainstate, &dbconn, contract_id, &chain_tip_hash, signers)?; + let config = Self::eval_config( + chainstate, + &dbconn, + contract_id, + &chain_tip_hash, + signers, + max_neighbors, + )?; Ok(config) } } diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index da3ffa4555..5774ab4817 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -267,6 +267,7 @@ impl StackerDBs { chainstate: &mut StacksChainState, sortdb: &SortitionDB, stacker_db_configs: HashMap, + num_neighbors: u64, ) -> Result, net_error> { let existing_contract_ids = self.get_stackerdb_contract_ids()?; let mut new_stackerdb_configs = HashMap::new(); @@ -288,15 +289,20 @@ impl StackerDBs { }) } else { // attempt to load the config from the contract itself - StackerDBConfig::from_smart_contract(chainstate, &sortdb, &stackerdb_contract_id) - .unwrap_or_else(|e| { - warn!( - "Failed to load StackerDB config"; - "contract" => %stackerdb_contract_id, - "err" => ?e, - ); - StackerDBConfig::noop() - }) + StackerDBConfig::from_smart_contract( + chainstate, + &sortdb, + &stackerdb_contract_id, + num_neighbors, + ) + .unwrap_or_else(|e| { + warn!( + "Failed to load StackerDB config"; + "contract" => %stackerdb_contract_id, + "err" => ?e, + ); + StackerDBConfig::noop() + }) }; // Create the StackerDB replica if it does not exist already if !existing_contract_ids.contains(&stackerdb_contract_id) { diff --git a/stackslib/src/net/stackerdb/tests/config.rs b/stackslib/src/net/stackerdb/tests/config.rs index 9600ed79a8..a075d7b974 100644 --- a/stackslib/src/net/stackerdb/tests/config.rs +++ b/stackslib/src/net/stackerdb/tests/config.rs @@ -133,7 +133,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -152,7 +152,7 @@ fn test_valid_and_invalid_stackerdb_configs() { write_freq: 4, max_writes: 56, hint_replicas: vec![NeighborAddress { - addrbytes: PeerAddress::from_ipv4(127, 0, 0, 1), + addrbytes: PeerAddress::from_ipv4(142, 150, 80, 100), port: 8901, public_key_hash: Hash160::from_hex("0123456789abcdef0123456789abcdef01234567") .unwrap(), @@ -174,7 +174,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -193,7 +193,7 @@ fn test_valid_and_invalid_stackerdb_configs() { write_freq: 4, max_writes: 56, hint_replicas: vec![NeighborAddress { - addrbytes: PeerAddress::from_ipv4(127, 0, 0, 1), + addrbytes: PeerAddress::from_ipv4(142, 150, 80, 100), port: 8901, public_key_hash: Hash160::from_hex("0123456789abcdef0123456789abcdef01234567") .unwrap(), @@ -212,7 +212,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -234,7 +234,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -256,7 +256,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -278,7 +278,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -300,7 +300,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -322,7 +322,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -344,7 +344,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -366,7 +366,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -388,7 +388,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u18446744073709551617, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -432,7 +432,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u1, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -454,7 +454,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u65537, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -462,6 +462,44 @@ fn test_valid_and_invalid_stackerdb_configs() { "#, None, ), + ( + // valid, but private IP and absurd max neighbors are both handled + r#" + (define-public (stackerdb-get-signer-slots) + (ok (list { signer: 'ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B, num-slots: u3 }))) + + (define-public (stackerdb-get-config) + (ok { + chunk-size: u123, + write-freq: u4, + max-writes: u56, + max-neighbors: u1024, + hint-replicas: (list + { + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u192 u168 u0 u1), + port: u8901, + public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 + }) + })) + "#, + Some(StackerDBConfig { + chunk_size: 123, + signers: vec![( + StacksAddress { + version: 26, + bytes: Hash160::from_hex("b4fdae98b64b9cd6c9436f3b965558966afe890b") + .unwrap(), + }, + 3, + )], + write_freq: 4, + max_writes: 56, + // no neighbors + hint_replicas: vec![], + // max neighbors is truncated + max_neighbors: 32, + }), + ), ]; for (i, (code, _result)) in testcases.iter().enumerate() { @@ -490,7 +528,7 @@ fn test_valid_and_invalid_stackerdb_configs() { ContractName::try_from(format!("test-{}", i)).unwrap(), ); peer.with_db_state(|sortdb, chainstate, _, _| { - match StackerDBConfig::from_smart_contract(chainstate, sortdb, &contract_id) { + match StackerDBConfig::from_smart_contract(chainstate, sortdb, &contract_id, 32) { Ok(config) => { let expected = result .clone() From 4cb4d15a1ddd71afcc56a9a56e62a82e79d94f36 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 23 May 2024 14:42:29 -0700 Subject: [PATCH 074/148] feat: load reward set in gather_signatures instead of `run_miner` to prevent panics --- .../stacks-node/src/nakamoto_node/miner.rs | 86 ++++++++++++------- .../src/nakamoto_node/sign_coordinator.rs | 14 --- 2 files changed, 55 insertions(+), 45 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 09a70b1178..618eed7d6c 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -155,27 +155,6 @@ impl BlockMinerThread { let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) .expect("FATAL: failed to connect to stacker DB"); - let sort_db = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) - .expect("FATAL: could not open sortition DB"); - - let tip = SortitionDB::get_block_snapshot_consensus( - sort_db.conn(), - &self.burn_block.consensus_hash, - ) - .expect("FATAL: could not retrieve chain tip") - .expect("FATAL: could not retrieve chain tip"); - - let reward_set = sort_db - .get_preprocessed_reward_set_of(&tip.sortition_id) - .expect("FATAL: Error fetching reward set") - .expect("FATAL: No reward set found for miner") - .known_selected_anchor_block_owned() - .expect("FATAL: No reward set found for miner"); - let mut attempts = 0; // now, actually run this tenure loop { @@ -203,12 +182,11 @@ impl BlockMinerThread { }; if let Some(mut new_block) = new_block { - let signer_signature = match self.gather_signatures( + let (reward_set, signer_signature) = match self.gather_signatures( &mut new_block, self.burn_block.block_height, &mut stackerdbs, &mut attempts, - &reward_set, ) { Ok(x) => x, Err(e) => { @@ -243,6 +221,15 @@ impl BlockMinerThread { self.mined_blocks.push(new_block); } + let Ok(sort_db) = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) else { + error!("Failed to open sortition DB. Will try mining again."); + continue; + }; + let wait_start = Instant::now(); while wait_start.elapsed() < self.config.miner.wait_on_interim_blocks { thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); @@ -351,15 +338,14 @@ impl BlockMinerThread { Ok((aggregate_public_key, signature)) } - /// Gather signatures from the signers for the block + /// Gather a list of signatures from the signers for the block fn gather_signatures( &mut self, new_block: &mut NakamotoBlock, burn_block_height: u64, stackerdbs: &mut StackerDBs, attempts: &mut u64, - reward_set: &RewardSet, - ) -> Result, NakamotoNodeError> { + ) -> Result<(RewardSet, Vec), NakamotoNodeError> { let Some(miner_privkey) = self.config.miner.mining_key else { return Err(NakamotoNodeError::MinerConfigurationFailed( "No mining key configured, cannot mine", @@ -370,13 +356,47 @@ impl BlockMinerThread { true, self.burnchain.pox_constants.clone(), ) - .expect("FATAL: could not open sortition DB"); + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open sortition DB. Cannot mine! {e:?}" + )) + })?; + let tip = SortitionDB::get_block_snapshot_consensus( sort_db.conn(), &new_block.header.consensus_hash, ) - .expect("FATAL: could not retrieve chain tip") - .expect("FATAL: could not retrieve chain tip"); + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to retrieve chain tip: {:?}", + e + )) + }) + .and_then(|result| { + result.ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure("Failed to retrieve chain tip".into()) + }) + })?; + + let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { + Ok(Some(x)) => x, + Ok(None) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "No reward set found. Cannot initialize miner coordinator.".into(), + )); + } + Err(e) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" + ))); + } + }; + + let Some(reward_set) = reward_info.known_selected_anchor_block_owned() else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Current reward cycle did not select a reward set. Cannot mine!".into(), + )); + }; let reward_cycle = self .burnchain @@ -385,7 +405,11 @@ impl BlockMinerThread { self.burnchain.first_block_height, self.burn_block.block_height, ) - .expect("FATAL: building on a burn block that is before the first burn block"); + .ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Building on a burn block that is before the first burn block".into(), + ) + })?; let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); let mut coordinator = SignCoordinator::new( @@ -414,7 +438,7 @@ impl BlockMinerThread { &self.globals.counters, )?; - return Ok(signature); + return Ok((reward_set, signature)); } fn get_stackerdb_contract_and_slots( diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 081852d783..b0b4463d1d 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -708,20 +708,6 @@ impl SignCoordinator { let modified_slots = &event.modified_slots.clone(); - // Update `next_signers_bitvec` with the slots that were modified in the event - for chunk in modified_slots.iter() { - let Ok(slot_id) = chunk.slot_id.try_into() else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Unable to modify next_signer_bitvec: slot_id exceeds u16".into(), - )); - }; - if let Err(e) = &self.next_signer_bitvec.set(slot_id, true) { - return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to set bitvec for next signer: {e:?}" - ))); - }; - } - let Ok(signer_event) = SignerEvent::::try_from(event).map_err(|e| { warn!("Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); }) else { From 2275aa877d7524b83929d63ea3f894d37a1e4adb Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 23 May 2024 14:52:32 -0700 Subject: [PATCH 075/148] feat: remove aggregate key related stuff from sign coordinator --- .../stacks-node/src/nakamoto_node/miner.rs | 68 +++++-------------- .../src/nakamoto_node/sign_coordinator.rs | 26 +------ 2 files changed, 18 insertions(+), 76 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index f92f3fc60a..8f23bfc82d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -280,19 +280,6 @@ impl BlockMinerThread { }) })?; - let reward_cycle = self - .burnchain - .pox_constants - .block_height_to_reward_cycle( - self.burnchain.first_block_height, - self.burn_block.block_height, - ) - .ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure( - "Building on a burn block that is before the first burn block".into(), - ) - })?; - let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { Ok(Some(x)) => x, Ok(None) => { @@ -328,19 +315,14 @@ impl BlockMinerThread { }; let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); - let mut coordinator = SignCoordinator::new( - &reward_set, - reward_cycle, - miner_privkey_as_scalar, - Some(aggregate_public_key), - &stackerdbs, - &self.config, - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to initialize the signing coordinator. Cannot mine! {e:?}" - )) - })?; + let mut coordinator = + SignCoordinator::new(&reward_set, miner_privkey_as_scalar, &self.config).map_err( + |e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to initialize the signing coordinator. Cannot mine! {e:?}" + )) + }, + )?; *attempts += 1; let signature = coordinator.begin_sign_v1( @@ -417,33 +399,15 @@ impl BlockMinerThread { )); }; - let reward_cycle = self - .burnchain - .pox_constants - .block_height_to_reward_cycle( - self.burnchain.first_block_height, - self.burn_block.block_height, - ) - .ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure( - "Building on a burn block that is before the first burn block".into(), - ) - })?; - let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); - let mut coordinator = SignCoordinator::new( - &reward_set, - reward_cycle, - miner_privkey_as_scalar, - None, - &stackerdbs, - &self.config, - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to initialize the signing coordinator. Cannot mine! {e:?}" - )) - })?; + let mut coordinator = + SignCoordinator::new(&reward_set, miner_privkey_as_scalar, &self.config).map_err( + |e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to initialize the signing coordinator. Cannot mine! {e:?}" + )) + }, + )?; *attempts += 1; let signature = coordinator.begin_sign_v0( diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index b0b4463d1d..0db0ee9e04 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -198,10 +198,7 @@ impl SignCoordinator { /// * `aggregate_public_key` - the active aggregate key for this cycle pub fn new( reward_set: &RewardSet, - reward_cycle: u64, message_key: Scalar, - aggregate_public_key: Option, - stackerdb_conn: &StackerDBs, config: &Config, // v1: bool, ) -> Result { @@ -281,7 +278,7 @@ impl SignCoordinator { }) .collect::, ChainstateError>>()?; - let mut coordinator: FireCoordinator = FireCoordinator::new(coord_config); + let coordinator: FireCoordinator = FireCoordinator::new(coord_config); #[cfg(test)] { // In test mode, short-circuit spinning up the SignCoordinator if the TEST_SIGNING @@ -294,7 +291,7 @@ impl SignCoordinator { if replaced_other { warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); } - let mut sign_coordinator = Self { + let sign_coordinator = Self { coordinator, message_key, receiver: Some(receiver), @@ -306,28 +303,9 @@ impl SignCoordinator { signer_entries: signer_public_keys, weight_threshold: threshold, }; - if let Some(aggregate_public_key) = aggregate_public_key { - sign_coordinator - .coordinator - .set_aggregate_public_key(Some(aggregate_public_key)); - } return Ok(sign_coordinator); } } - if let Some(aggregate_public_key) = aggregate_public_key { - let party_polynomials = get_signer_commitments( - is_mainnet, - reward_set_signers.as_slice(), - stackerdb_conn, - reward_cycle, - &aggregate_public_key, - )?; - if let Err(e) = coordinator - .set_key_and_party_polynomials(aggregate_public_key.clone(), party_polynomials) - { - warn!("Failed to set a valid set of party polynomials"; "error" => %e); - }; - } let (receiver, replaced_other) = STACKER_DB_CHANNEL.register_miner_coordinator(); if replaced_other { From edb3aef02f9474fe5c76595ace7dcf5fb642c17d Mon Sep 17 00:00:00 2001 From: BowTiedRadone <92028479+BowTiedRadone@users.noreply.github.com> Date: Fri, 24 May 2024 12:09:09 +0300 Subject: [PATCH 076/148] Fix typo Co-authored-by: Nikos Baxevanis --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index fe1f335176..71d29086ef 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -125,7 +125,7 @@ it("statefully interacts with PoX-4", async () => { delegatedTo: "", delegatedMaxAmount: 0, // We initialize delegatedUntilBurnHt to 0. It will be updated - // after successful delegate-stx calls. It's value will be either + // after successful delegate-stx calls. Its value will be either // the unwrapped until-burn-ht uint passed to the delegate-stx, // or undefined for indefinite delegations. delegatedUntilBurnHt: 0, From 3dccf4023a9a9043b5894ce22bc0dcbfd377cfd3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 14:48:47 -0400 Subject: [PATCH 077/148] chore: address PR feedback --- .github/workflows/bitcoin-tests.yml | 3 +- stackslib/src/chainstate/burn/db/sortdb.rs | 227 ++++++++---------- stackslib/src/net/p2p.rs | 81 ++++--- .../stacks-node/src/nakamoto_node/miner.rs | 14 +- 4 files changed, 152 insertions(+), 173 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 87fe5a8f09..8e6997b5e1 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -85,7 +85,8 @@ jobs: - tests::signer::v0::block_proposal_rejection - tests::signer::v1::dkg - tests::signer::v1::sign_request_rejected - - tests::signer::v1::filter_bad_transactions + # TODO: enable these once v1 signer is fixed + # - tests::signer::v1::filter_bad_transactions - tests::signer::v1::delayed_dkg # TODO: enable these once v1 signer is fixed # - tests::signer::v1::mine_2_nakamoto_reward_cycles diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 2916f3de3c..8f416b4c39 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3548,128 +3548,45 @@ impl SortitionDB { Ok(()) } - /// Get the prepare phase start sortition ID of a reward cycle - fn inner_get_prepare_phase_start_sortition_id_for_reward_cycle( - index_conn: &SortitionDBConn, - pox_constants: &PoxConstants, - first_block_height: u64, - tip: &SortitionId, - reward_cycle_id: u64, - ) -> Result { - let prepare_phase_start = pox_constants - .reward_cycle_to_block_height(first_block_height, reward_cycle_id) - .saturating_sub(pox_constants.prepare_length.into()); - - let first_sortition = get_ancestor_sort_id(index_conn, prepare_phase_start, tip)? - .ok_or_else(|| { - error!( - "Could not find prepare phase start ancestor while fetching reward set"; - "tip_sortition_id" => %tip, - "reward_cycle_id" => reward_cycle_id, - "prepare_phase_start_height" => prepare_phase_start - ); - db_error::NotFoundError - })?; - Ok(first_sortition) - } - + /// Wrapper around SortitionDBConn::get_prepare_phase_start_sortition_id_for_reward_cycle(). + /// See that method for details. pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( &self, tip: &SortitionId, reward_cycle_id: u64, ) -> Result { - Self::inner_get_prepare_phase_start_sortition_id_for_reward_cycle( - &self.index_conn(), - &self.pox_constants, - self.first_block_height, - tip, - reward_cycle_id, - ) - } - - /// Get the reward set for a reward cycle, given the reward cycle tip. - /// Return the reward cycle info for this reward cycle - fn inner_get_preprocessed_reward_set_for_reward_cycle( - index_conn: &SortitionDBConn, - pox_constants: &PoxConstants, - first_block_height: u64, - tip: &SortitionId, - reward_cycle_id: u64, - ) -> Result<(RewardCycleInfo, SortitionId), db_error> { - let first_sortition = Self::inner_get_prepare_phase_start_sortition_id_for_reward_cycle( - index_conn, - pox_constants, - first_block_height, - tip, - reward_cycle_id, - )?; - info!("Fetching preprocessed reward set"; - "tip_sortition_id" => %tip, - "reward_cycle_id" => reward_cycle_id, - "prepare_phase_start_sortition_id" => %first_sortition, - ); - - Ok(( - Self::get_preprocessed_reward_set(index_conn, &first_sortition)? - .ok_or(db_error::NotFoundError)?, - first_sortition, - )) + self.index_conn() + .get_prepare_phase_start_sortition_id_for_reward_cycle( + &self.pox_constants, + self.first_block_height, + tip, + reward_cycle_id, + ) } + /// Wrapper around SortitionDBConn::get_preprocessed_reward_set_for_reward_cycle(). + /// See that method for details. pub fn get_preprocessed_reward_set_for_reward_cycle( &self, tip: &SortitionId, reward_cycle_id: u64, ) -> Result<(RewardCycleInfo, SortitionId), db_error> { - Self::inner_get_preprocessed_reward_set_for_reward_cycle( - &self.index_conn(), - &self.pox_constants, - self.first_block_height, - tip, - reward_cycle_id, - ) - } - - /// Figure out the reward cycle for `tip` and lookup the preprocessed - /// reward set (if it exists) for the active reward cycle during `tip` - fn inner_get_preprocessed_reward_set_of( - index_conn: &SortitionDBConn, - pox_constants: &PoxConstants, - first_block_height: u64, - tip: &SortitionId, - ) -> Result { - let tip_sn = SortitionDB::get_block_snapshot(index_conn, tip)?.ok_or_else(|| { - error!( - "Could not find snapshot for sortition while fetching reward set"; - "tip_sortition_id" => %tip, - ); - db_error::NotFoundError - })?; - - // NOTE: the .saturating_sub(1) is necessary because the reward set is calculated in epoch - // 2.5 and lower at reward cycle index 1, not 0. This correction ensures that the last - // block is checked against the signers who were active just before the new reward set is - // calculated. - let reward_cycle_id = pox_constants - .block_height_to_reward_cycle(first_block_height, tip_sn.block_height.saturating_sub(1)) - .expect("FATAL: stored snapshot with block height < first_block_height"); - - Self::inner_get_preprocessed_reward_set_for_reward_cycle( - index_conn, - pox_constants, - first_block_height, - tip, - reward_cycle_id, - ) - .and_then(|(reward_cycle_info, _anchor_sortition_id)| Ok(reward_cycle_info)) + self.index_conn() + .get_preprocessed_reward_set_for_reward_cycle( + &self.pox_constants, + self.first_block_height, + tip, + reward_cycle_id, + ) } + /// Wrapper around SortitionDBConn::get_preprocessed_reward_set_of(). + /// See that method for details. pub fn get_preprocessed_reward_set_of( &self, tip: &SortitionId, ) -> Result { - Ok(Self::inner_get_preprocessed_reward_set_of( - &self.index_conn(), + Ok(self.index_conn().get_preprocessed_reward_set_of( &self.pox_constants, self.first_block_height, tip, @@ -3695,6 +3612,8 @@ impl SortitionDB { Ok(rc_info) } + /// Get the number of entries in the reward set, given a sortition ID within the reward cycle + /// for which this set is active. pub fn get_preprocessed_reward_set_size(&self, tip: &SortitionId) -> Option { let Ok(reward_info) = &self.get_preprocessed_reward_set_of(&tip) else { return None; @@ -3922,44 +3841,98 @@ impl<'a> SortitionDBConn<'a> { Ok(pox_addrs) } - pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( + /// Figure out the reward cycle for `tip` and lookup the preprocessed + /// reward set (if it exists) for the active reward cycle during `tip`. + /// Returns the reward cycle info on success. + /// Returns Error on DB errors, as well as if the reward set is not yet processed. + pub fn get_preprocessed_reward_set_of( &self, + pox_constants: &PoxConstants, + first_block_height: u64, tip: &SortitionId, - reward_cycle_id: u64, - ) -> Result { - SortitionDB::inner_get_prepare_phase_start_sortition_id_for_reward_cycle( - self, - &self.context.pox_constants, - self.context.first_block_height, + ) -> Result { + let tip_sn = SortitionDB::get_block_snapshot(self, tip)?.ok_or_else(|| { + error!( + "Could not find snapshot for sortition while fetching reward set"; + "tip_sortition_id" => %tip, + ); + db_error::NotFoundError + })?; + + // NOTE: the .saturating_sub(1) is necessary because the reward set is calculated in epoch + // 2.5 and lower at reward cycle index 1, not 0. This correction ensures that the last + // block is checked against the signers who were active just before the new reward set is + // calculated. + let reward_cycle_id = pox_constants + .block_height_to_reward_cycle(first_block_height, tip_sn.block_height.saturating_sub(1)) + .expect("FATAL: stored snapshot with block height < first_block_height"); + + self.get_preprocessed_reward_set_for_reward_cycle( + pox_constants, + first_block_height, tip, reward_cycle_id, ) + .and_then(|(reward_cycle_info, _anchor_sortition_id)| Ok(reward_cycle_info)) } - pub fn get_preprocessed_reward_set_for_reward_cycle( + /// Get the prepare phase start sortition ID of a reward cycle. This is the first prepare + /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned + /// sortition will be in the preceding reward cycle) + pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( &self, + pox_constants: &PoxConstants, + first_block_height: u64, tip: &SortitionId, reward_cycle_id: u64, - ) -> Result<(RewardCycleInfo, SortitionId), db_error> { - SortitionDB::inner_get_preprocessed_reward_set_for_reward_cycle( - self, - &self.context.pox_constants, - self.context.first_block_height, - tip, - reward_cycle_id, - ) + ) -> Result { + let prepare_phase_start = pox_constants + .reward_cycle_to_block_height(first_block_height, reward_cycle_id) + .saturating_sub(pox_constants.prepare_length.into()); + + let first_sortition = + get_ancestor_sort_id(self, prepare_phase_start, tip)?.ok_or_else(|| { + error!( + "Could not find prepare phase start ancestor while fetching reward set"; + "tip_sortition_id" => %tip, + "reward_cycle_id" => reward_cycle_id, + "prepare_phase_start_height" => prepare_phase_start + ); + db_error::NotFoundError + })?; + Ok(first_sortition) } - pub fn get_preprocessed_reward_set_of( + /// Get the reward set for a reward cycle, given the reward cycle tip. The reward cycle info + /// will be returned for the reward set in which `tip` belongs (i.e. the reward set calculated + /// in the preceding reward cycle). + /// Return the reward cycle info for this reward cycle, as well as the first prepare-phase + /// sortition ID under which this reward cycle info is stored. + /// Returns Error on DB Error, or if the reward cycle info is not processed yet. + pub fn get_preprocessed_reward_set_for_reward_cycle( &self, + pox_constants: &PoxConstants, + first_block_height: u64, tip: &SortitionId, - ) -> Result { - SortitionDB::inner_get_preprocessed_reward_set_of( - self, - &self.context.pox_constants, - self.context.first_block_height, + reward_cycle_id: u64, + ) -> Result<(RewardCycleInfo, SortitionId), db_error> { + let first_sortition = self.get_prepare_phase_start_sortition_id_for_reward_cycle( + pox_constants, + first_block_height, tip, - ) + reward_cycle_id, + )?; + info!("Fetching preprocessed reward set"; + "tip_sortition_id" => %tip, + "reward_cycle_id" => reward_cycle_id, + "prepare_phase_start_sortition_id" => %first_sortition, + ); + + Ok(( + SortitionDB::get_preprocessed_reward_set(self, &first_sortition)? + .ok_or(db_error::NotFoundError)?, + first_sortition, + )) } } diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 821d4dbc1d..d1168abe94 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -261,6 +261,10 @@ pub struct PeerNetwork { /// The reward sets of the current and past reward cycle. /// Needed to validate blocks, which are signed by a threshold of stackers pub current_reward_sets: BTreeMap, + /// The sortition IDs that began the prepare-phases for given reward cycles. This is used to + /// determine whether or not the reward cycle info in `current_reward_sets` is still valid -- a + /// burnchain fork may invalidate them, so the code must check that the sortition ID for the + /// start of the prepare-phase is still canonical. pub current_reward_set_ids: BTreeMap, // information about the state of the network's anchor blocks @@ -5427,6 +5431,47 @@ impl PeerNetwork { )) } + /// Clear out old reward cycles + fn free_old_reward_cycles( + &mut self, + sortdb: &SortitionDB, + tip_sortition_id: &SortitionId, + prev_rc: u64, + ) { + if self.current_reward_sets.len() > 3 { + self.current_reward_sets.retain(|old_rc, _| { + if (*old_rc).saturating_add(1) < prev_rc { + self.current_reward_set_ids.remove(old_rc); + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; + } + let Some(old_sortition_id) = self.current_reward_set_ids.get(old_rc) else { + // shouldn't happen + self.current_reward_set_ids.remove(old_rc); + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; + }; + let Ok(prepare_phase_sort_id) = sortdb + .get_prepare_phase_start_sortition_id_for_reward_cycle( + &tip_sortition_id, + *old_rc, + ) + else { + self.current_reward_set_ids.remove(old_rc); + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; + }; + if prepare_phase_sort_id != *old_sortition_id { + // non-canonical reward cycle info + self.current_reward_set_ids.remove(old_rc); + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; + } + true + }); + } + } + /// Refresh our view of the last two reward cycles fn refresh_reward_cycles( &mut self, @@ -5469,11 +5514,12 @@ impl PeerNetwork { }) else { // NOTE: this should never be reached + error!("Unreachable code (but not panicking): no reward cycle info for reward cycle {}", rc); continue; }; if !reward_cycle_info.is_reward_info_known() { // haven't yet processed the anchor block, so don't store - test_debug!("Reward cycle info for cycle {} at sortition {} expects the PoX anchor block, so will not cache", rc, &reward_cycle_sort_id); + debug!("Reward cycle info for cycle {} at sortition {} expects the PoX anchor block, so will not cache", rc, &reward_cycle_sort_id); continue; } @@ -5488,38 +5534,7 @@ impl PeerNetwork { } // free memory - if self.current_reward_sets.len() > 3 { - self.current_reward_sets.retain(|old_rc, _| { - if (*old_rc).saturating_add(1) < prev_rc { - self.current_reward_set_ids.remove(old_rc); - test_debug!("Drop reward cycle info for cycle {}", old_rc); - return false; - } - let Some(old_sortition_id) = self.current_reward_set_ids.get(old_rc) else { - // shouldn't happen - self.current_reward_set_ids.remove(old_rc); - test_debug!("Drop reward cycle info for cycle {}", old_rc); - return false; - }; - let Ok(prepare_phase_sort_id) = sortdb - .get_prepare_phase_start_sortition_id_for_reward_cycle( - &tip_sn.sortition_id, - *old_rc, - ) - else { - self.current_reward_set_ids.remove(old_rc); - test_debug!("Drop reward cycle info for cycle {}", old_rc); - return false; - }; - if prepare_phase_sort_id != *old_sortition_id { - // non-canonical reward cycle info - self.current_reward_set_ids.remove(old_rc); - test_debug!("Drop reward cycle info for cycle {}", old_rc); - return false; - } - true - }); - } + self.free_old_reward_cycles(sortdb, &tip_sn.sortition_id, prev_rc); Ok(()) } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index a938c4b7be..929ed681c7 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -42,8 +42,7 @@ use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::VRFProof; -use wsts::curve::ecdsa; -use wsts::curve::point::{Compressed, Point}; +use wsts::curve::point::Point; use wsts::curve::scalar::Scalar; use super::relayer::RelayerThread; @@ -288,16 +287,7 @@ impl BlockMinerThread { }; // NOTE: this is a placeholder until the API can be fixed - let aggregate_public_key = { - let key_bytes = [ - 0x03, 0xd3, 0xe1, 0x5a, 0x36, 0xf3, 0x2a, 0x9e, 0x71, 0x31, 0x7f, 0xcb, 0x4a, 0x20, - 0x1b, 0x0c, 0x08, 0xb3, 0xbc, 0xfb, 0xdc, 0x8a, 0xee, 0x2e, 0xe4, 0xd2, 0x69, 0x23, - 0x00, 0x06, 0xb1, 0xa0, 0xcb, - ]; - let ecdsa_pk = ecdsa::PublicKey::try_from(key_bytes.as_slice()).unwrap(); - Point::try_from(&Compressed::from(ecdsa_pk.to_bytes())).unwrap() - }; - + let aggregate_public_key = Point::new(); let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); let mut coordinator = SignCoordinator::new( &reward_set, From 3692d5bc62bdf1ad3795efea694c71a23de31201 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 15:28:35 -0400 Subject: [PATCH 078/148] fix: don't send more than MAX_PAYLOAD_LEN bytes --- stackslib/src/net/api/gettenure.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/api/gettenure.rs b/stackslib/src/net/api/gettenure.rs index c3eb4493fe..24c3c87d71 100644 --- a/stackslib/src/net/api/gettenure.rs +++ b/stackslib/src/net/api/gettenure.rs @@ -19,7 +19,7 @@ use std::{fs, io}; use regex::{Captures, Regex}; use serde::de::Error as de_Error; -use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::codec::{StacksMessageCodec, MAX_PAYLOAD_LEN}; use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; use stacks_common::types::net::PeerHost; use stacks_common::util::hash::to_hex; @@ -46,7 +46,7 @@ pub struct RPCNakamotoTenureRequestHandler { /// Block to start streaming from. It and its ancestors will be incrementally streamed until one of /// hte following happens: /// * we reach the first block in the tenure - /// * we would exceed MAX_MESSAGE_LEN bytes transmitted if we started sending the next block + /// * we would exceed MAX_PAYLOAD_LEN bytes transmitted if we started sending the next block pub block_id: Option, /// What's the final block ID to stream from? /// Passed as `stop=` query parameter @@ -132,7 +132,7 @@ impl NakamotoTenureStream { self.total_sent = self .total_sent .saturating_add(self.block_stream.total_bytes); - if self.total_sent.saturating_add(parent_size) > MAX_MESSAGE_LEN.into() { + if self.total_sent.saturating_add(parent_size) > MAX_PAYLOAD_LEN.into() { // out of space to send this return Ok(false); } @@ -284,7 +284,7 @@ impl HttpResponse for RPCNakamotoTenureRequestHandler { preamble: &HttpResponsePreamble, body: &[u8], ) -> Result { - let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + let bytes = parse_bytes(preamble, body, MAX_PAYLOAD_LEN.into())?; Ok(HttpResponsePayload::Bytes(bytes)) } } From 9478e91c3057e212531f1131baa31af746c2befb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 15:29:13 -0400 Subject: [PATCH 079/148] fix: clear peers after finished downloaders, since it'll be more efficient --- stackslib/src/net/download/nakamoto/tenure_downloader_set.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 357b588e8a..468a0cf6a5 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -431,8 +431,8 @@ impl NakamotoTenureDownloaderSet { self.num_scheduled_downloaders() ); - self.clear_available_peers(); self.clear_finished_downloaders(); + self.clear_available_peers(); self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); while self.inflight() < count { let Some(ch) = schedule.front() else { From 0659e0ed27de7d28a735ec5cec8dbcb2fa891222 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 15:29:39 -0400 Subject: [PATCH 080/148] fix: avoid a potentially infinite loop by using .retain() to select scheduled downloads, and fix a last-block-height calculation to ensure that it's always one more than the highest desired block height --- .../nakamoto/download_state_machine.rs | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 77cf64dba6..71083b02b2 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -269,8 +269,7 @@ impl NakamotoDownloadStateMachine { .pox_constants .reward_cycle_to_block_height(sortdb.first_block_height, tip_rc.saturating_add(1)) .saturating_sub(1) - .min(tip.block_height) - .saturating_add(1); + .min(tip.block_height.saturating_add(1)); test_debug!( "Load tip sortitions between {} and {} (loaded_so_far = {})", @@ -1229,6 +1228,8 @@ impl NakamotoDownloadStateMachine { .any(|(_, available)| available.contains_key(&wt.tenure_id_consensus_hash)); if is_available && !wt.processed { + // a tenure is available but not yet processed, so we can't yet transition to + // fetching unconfirmed tenures (we'd have no way to validate them). return false; } } @@ -1294,14 +1295,16 @@ impl NakamotoDownloadStateMachine { count: usize, downloaders: &mut HashMap, highest_processed_block_id: Option, - ) { - while downloaders.len() < count { - let Some(naddr) = schedule.front() else { - break; - }; + ) -> usize { + let mut added = 0; + schedule.retain(|naddr| { if downloaders.contains_key(naddr) { - continue; + return true; + } + if added >= count { + return true; } + let unconfirmed_tenure_download = NakamotoUnconfirmedTenureDownloader::new( naddr.clone(), highest_processed_block_id.clone(), @@ -1309,8 +1312,10 @@ impl NakamotoDownloadStateMachine { test_debug!("Request unconfirmed tenure state from neighbor {}", &naddr); downloaders.insert(naddr.clone(), unconfirmed_tenure_download); - schedule.pop_front(); - } + added += 1; + false + }); + added } /// Update our unconfirmed tenure download state machines From ba4b2911febcb23f992962f6674ef0995b6c56a3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 15:30:25 -0400 Subject: [PATCH 081/148] fix: only return blocks that we have validated --- .../nakamoto/tenure_downloader_unconfirmed.rs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index 4c48a5762f..e53ba5c2a2 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -311,7 +311,7 @@ impl NakamotoUnconfirmedTenureDownloader { let Some(Some(unconfirmed_aggregate_public_key)) = agg_pubkeys.get(&tenure_rc).cloned() else { warn!( - "No aggregate public key for confirmed tenure {} (rc {})", + "No aggregate public key for unconfirmed tenure {} (rc {})", &local_tenure_sn.consensus_hash, tenure_rc ); return Err(NetError::InvalidState); @@ -447,6 +447,7 @@ impl NakamotoUnconfirmedTenureDownloader { // If there's a tenure-start block, it must be last. let mut expected_block_id = last_block_id; let mut finished_download = false; + let mut last_block_index = None; for (cnt, block) in tenure_blocks.iter().enumerate() { if &block.header.block_id() != expected_block_id { warn!("Unexpected Nakamoto block -- not part of tenure"; @@ -493,6 +494,7 @@ impl NakamotoUnconfirmedTenureDownloader { } finished_download = true; + last_block_index = Some(cnt); break; } @@ -501,7 +503,9 @@ impl NakamotoUnconfirmedTenureDownloader { if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { if expected_block_id == highest_processed_block_id { // got all the blocks we asked for + debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); finished_download = true; + last_block_index = Some(cnt); break; } } @@ -511,15 +515,22 @@ impl NakamotoUnconfirmedTenureDownloader { if let Some(highest_processed_block_height) = self.highest_processed_block_height.as_ref() { - if &block.header.chain_length < highest_processed_block_height { + if &block.header.chain_length <= highest_processed_block_height { // no need to continue this download debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); finished_download = true; + last_block_index = Some(cnt); break; } } expected_block_id = &block.header.parent_block_id; + last_block_index = Some(cnt); + } + + // blocks after the last_block_index were not processed, so should be dropped + if let Some(last_block_index) = last_block_index { + tenure_blocks.truncate(last_block_index + 1); } if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { From 51961521adbe453983a228cab16fac5c0b34eeb2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 15:30:45 -0400 Subject: [PATCH 082/148] fix: remove unused NakamotoInvState enum --- stackslib/src/net/inv/nakamoto.rs | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index de46d15744..c173de16b6 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -247,8 +247,6 @@ impl InvGenerator { #[derive(Debug, PartialEq, Clone)] pub struct NakamotoTenureInv { - /// What state is the machine in? - pub state: NakamotoInvState, /// Bitmap of which tenures a peer has. /// Maps reward cycle to bitmap. pub tenures_inv: BTreeMap>, @@ -279,7 +277,6 @@ impl NakamotoTenureInv { neighbor_address: NeighborAddress, ) -> Self { Self { - state: NakamotoInvState::GetNakamotoInvBegin, tenures_inv: BTreeMap::new(), last_updated_at: 0, first_block_height, @@ -335,7 +332,8 @@ impl NakamotoTenureInv { /// Add in a newly-discovered inventory. /// NOTE: inventories are supposed to be aligned to the reward cycle - /// Returns true if we learned about at least one new tenure-start block + /// Returns true if the tenure bitvec has changed -- we either learned about a new tenure-start + /// block, or the remote peer "un-learned" it (e.g. due to a reorg). /// Returns false if not. pub fn merge_tenure_inv(&mut self, tenure_inv: BitVec<2100>, reward_cycle: u64) -> bool { // populate the tenures bitmap to we can fit this tenures inv @@ -367,7 +365,6 @@ impl NakamotoTenureInv { && (self.cur_reward_cycle >= cur_rc || !self.online) { test_debug!("Reset inv comms for {}", &self.neighbor_address); - self.state = NakamotoInvState::GetNakamotoInvBegin; self.online = true; self.start_sync_time = now; self.cur_reward_cycle = start_rc; @@ -474,13 +471,6 @@ impl NakamotoTenureInv { } } -#[derive(Debug, PartialEq, Clone, Copy)] -pub enum NakamotoInvState { - GetNakamotoInvBegin, - GetNakamotoInvFinish, - Done, -} - /// Nakamoto inventory state machine pub struct NakamotoInvStateMachine { /// Communications links From 137651db7f973bd87a6903758a2e04284d46e594 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 15:31:11 -0400 Subject: [PATCH 083/148] feat: test the absence of infinite loops when making tenure downloaders, and test that we only accept unconfirmed tenure blocks we've validated --- stackslib/src/net/tests/download/nakamoto.rs | 126 ++++++++++++++++++- 1 file changed, 125 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 31c42c8afb..3ef0469022 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; +use std::collections::{HashMap, VecDeque}; use std::sync::mpsc::sync_channel; use std::thread; @@ -439,6 +439,62 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { burn_height: peer.network.burnchain_tip.block_height, }; + // we can make unconfirmed tenure downloaders + { + let mut empty_schedule = VecDeque::new(); + let mut full_schedule = { + let mut sched = VecDeque::new(); + sched.push_back(naddr.clone()); + sched + }; + let mut empty_downloaders = HashMap::new(); + let mut full_downloaders = { + let mut dl = HashMap::new(); + let utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(tip_block_id)); + dl.insert(naddr.clone(), utd); + dl + }; + assert_eq!( + NakamotoDownloadStateMachine::make_unconfirmed_tenure_downloaders( + &mut empty_schedule, + 10, + &mut empty_downloaders, + None + ), + 0 + ); + assert_eq!( + NakamotoDownloadStateMachine::make_unconfirmed_tenure_downloaders( + &mut empty_schedule, + 10, + &mut full_downloaders, + None + ), + 0 + ); + assert_eq!( + NakamotoDownloadStateMachine::make_unconfirmed_tenure_downloaders( + &mut full_schedule, + 10, + &mut full_downloaders, + None + ), + 0 + ); + assert_eq!(full_schedule.len(), 1); + assert_eq!( + NakamotoDownloadStateMachine::make_unconfirmed_tenure_downloaders( + &mut full_schedule, + 10, + &mut empty_downloaders, + None + ), + 1 + ); + assert_eq!(full_schedule.len(), 0); + assert_eq!(empty_downloaders.len(), 1); + } + // we've processed the tip already, so we transition straight to the Done state { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(tip_block_id)); @@ -796,6 +852,74 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .try_accept_unconfirmed_tenure_blocks(vec![bad_block]) .is_err()); } + + // Does not consume blocks beyond the highest processed block ID + { + let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), None); + utd.confirmed_aggregate_public_key = + Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.unconfirmed_aggregate_public_key = + Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + + assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); + + let tenure_tip = RPCGetTenureInfo { + consensus_hash: peer.network.stacks_tip.0.clone(), + tenure_start_block_id: peer.network.tenure_start_block_id.clone(), + parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), + parent_tenure_start_block_id: StacksBlockId::new( + &peer.network.parent_stacks_tip.0, + &peer.network.parent_stacks_tip.1, + ), + tip_block_id: StacksBlockId::new( + &peer.network.stacks_tip.0, + &peer.network.stacks_tip.1, + ), + tip_height: peer.network.stacks_tip.2, + reward_cycle: tip_rc, + }; + + let sortdb = peer.sortdb.take().unwrap(); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + utd.try_accept_tenure_info( + &sortdb, + &sort_tip, + peer.chainstate(), + tenure_tip.clone(), + &agg_pubkeys, + ) + .unwrap(); + + peer.sortdb = Some(sortdb); + + assert!(utd.unconfirmed_tenure_start_block.is_some()); + + utd.highest_processed_block_id = Some(unconfirmed_tenure[1].header.block_id()); + let res = utd + .try_accept_unconfirmed_tenure_blocks( + unconfirmed_tenure.clone().into_iter().rev().collect(), + ) + .unwrap(); + assert_eq!(res.unwrap().as_slice(), &unconfirmed_tenure[1..]); + + assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::Done); + + // we can request the highest-complete tenure + assert!(!utd.need_highest_complete_tenure(peer.chainstate()).unwrap()); + + let ntd = utd + .make_highest_complete_tenure_downloader( + &highest_confirmed_wanted_tenure, + &unconfirmed_wanted_tenure, + ) + .unwrap(); + assert_eq!( + ntd.state, + NakamotoTenureDownloadState::GetTenureStartBlock( + unconfirmed_wanted_tenure.winning_block_id.clone() + ) + ); + } } #[test] From 812f70fda4fdd6eaaa55b6d46b83dfcfcd5452d7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 15:42:36 -0400 Subject: [PATCH 084/148] fix: compile error --- testnet/stacks-node/src/neon_node.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index b6ac17e51e..a6b3035938 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -4610,7 +4610,12 @@ impl StacksNode { stackerdb_configs.insert(contract.clone(), StackerDBConfig::noop()); } let stackerdb_configs = stackerdbs - .create_or_reconfigure_stackerdbs(&mut chainstate, &sortdb, stackerdb_configs) + .create_or_reconfigure_stackerdbs( + &mut chainstate, + &sortdb, + stackerdb_configs, + config.connection_options.num_neighbors, + ) .unwrap(); let stackerdb_contract_ids: Vec = From b2c65e965dc0157b5423af88a0f57f91795d4d18 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 15:43:13 -0400 Subject: [PATCH 085/148] fix: typo --- stackslib/src/net/stackerdb/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index 3d2b7e87cc..5545aa46cd 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -466,7 +466,7 @@ impl StackerDBConfig { let peer_addr = PeerAddress::from_slice(&addr_bytes).expect("FATAL: not 16 bytes"); if peer_addr.is_in_private_range() { debug!( - "Ignoring private IP address '{}' in hint-replias", + "Ignoring private IP address '{}' in hint-replicas", &peer_addr.to_socketaddr(port as u16) ); continue; From b4c5e64a0312452d679ba00fc391fea5f5093532 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 27 May 2024 22:25:13 +0300 Subject: [PATCH 086/148] add output to print sqlite version --- stacks-signer/src/signerdb.rs | 18 ++++++++++++++++++ stackslib/src/chainstate/nakamoto/mod.rs | 21 ++++++++++++--------- stackslib/src/chainstate/nakamoto/tenure.rs | 2 +- 3 files changed, 31 insertions(+), 10 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 8c6b3ba187..04dc1f65bb 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -144,6 +144,17 @@ impl SignerDb { ) } + /// Get the sqlite version from the database + pub fn get_sqlite_version( + &self, + ) -> Result, DBError> { + query_row( + &self.db, + "SELECT sqlite_version()", + NO_PARAMS, + ) + } + /// Insert the given state in the `signer_states` table for the given reward cycle pub fn insert_encrypted_signer_state( &self, @@ -391,4 +402,11 @@ mod tests { .expect("Failed to get signer state") .is_none()); } + + #[test] + fn test_display_sqlite_version() { + let db_path = tmp_db_path(); + let db = SignerDb::new(db_path).expect("Failed to create signer db"); + println!("sqlite version is: {:#?}", db.get_sqlite_version()); + } } diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index bcc03cdeca..d3541fae22 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -207,18 +207,21 @@ lazy_static! { ); CREATE INDEX nakamoto_block_headers_by_consensus_hash ON nakamoto_block_headers(consensus_hash); "#.into(), - format!( - r#"ALTER TABLE payments - ADD COLUMN schedule_type TEXT NOT NULL DEFAULT "{}"; - "#, - HeaderTypeNames::Epoch2.get_name_str()), - r#" - UPDATE db_config SET version = "4"; - "#.into(), + format!( + r#"ALTER TABLE payments + ADD COLUMN schedule_type TEXT NOT NULL DEFAULT "{}"; + "#, + HeaderTypeNames::Epoch2.get_name_str()), + r#" + UPDATE db_config SET version = "4"; + "#.into(), ]; pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_2: Vec = vec![ - NAKAMOTO_TENURES_SCHEMA_2.into() + NAKAMOTO_TENURES_SCHEMA_2.into(), + r#" + UPDATE db_config SET version = "5"; + "#.into(), ]; } diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index fde669760d..2edf9f1e87 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -186,7 +186,7 @@ pub static NAKAMOTO_TENURES_SCHEMA_2: &'static str = r#" tenure_index INTEGER NOT NULL, PRIMARY KEY(burn_view_consensus_hash,tenure_index) - ) STRICT; + ); CREATE INDEX nakamoto_tenures_by_block_id ON nakamoto_tenures(block_id); CREATE INDEX nakamoto_tenures_by_tenure_id ON nakamoto_tenures(tenure_id_consensus_hash); CREATE INDEX nakamoto_tenures_by_block_and_consensus_hashes ON nakamoto_tenures(tenure_id_consensus_hash,block_hash); From 7b423bdfea2a2dbdbf22352f204f6a45fb3a0194 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 27 May 2024 22:42:22 +0300 Subject: [PATCH 087/148] update format --- stacks-signer/src/signerdb.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 04dc1f65bb..7b6c40745f 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -144,15 +144,9 @@ impl SignerDb { ) } - /// Get the sqlite version from the database - pub fn get_sqlite_version( - &self, - ) -> Result, DBError> { - query_row( - &self.db, - "SELECT sqlite_version()", - NO_PARAMS, - ) + /// Get the sqlite version from the database + pub fn get_sqlite_version(&self) -> Result, DBError> { + query_row(&self.db, "SELECT sqlite_version()", NO_PARAMS) } /// Insert the given state in the `signer_states` table for the given reward cycle From 9df9ca604440e98878e8d59bb5eb5991f98ef86d Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 28 May 2024 16:44:15 +0300 Subject: [PATCH 088/148] display nakamoto db sqlite version version is "3.33.0" --- stackslib/src/chainstate/stacks/db/mod.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index f10a87dccc..1a6e59f4c0 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1031,6 +1031,14 @@ impl StacksChainState { Ok(config.expect("BUG: no db_config installed")) } + pub fn get_db_version(conn: &DBConn) -> Result { + let option_version = query_row::(conn, "SELECT sqlite_version()", NO_PARAMS)?; + if let Some(version) = option_version { + return Ok(version); + } + Ok("no version".into()) + } + fn apply_schema_migrations<'a>( tx: &DBTx<'a>, mainnet: bool, @@ -1054,6 +1062,9 @@ impl StacksChainState { ); return Err(Error::InvalidChainstateDB); } + + println!("This is the sqlite version: {:#?}", Self::get_db_version(tx)); + if db_config.version != CHAINSTATE_VERSION { while db_config.version != CHAINSTATE_VERSION { From 37809a42f2c3c0b0632c8d12be38c146287512b5 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 28 May 2024 17:11:58 +0300 Subject: [PATCH 089/148] remove sqlite version info --- stacks-signer/src/signerdb.rs | 12 ------------ stackslib/src/chainstate/stacks/db/mod.rs | 11 ----------- 2 files changed, 23 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 7b6c40745f..8c6b3ba187 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -144,11 +144,6 @@ impl SignerDb { ) } - /// Get the sqlite version from the database - pub fn get_sqlite_version(&self) -> Result, DBError> { - query_row(&self.db, "SELECT sqlite_version()", NO_PARAMS) - } - /// Insert the given state in the `signer_states` table for the given reward cycle pub fn insert_encrypted_signer_state( &self, @@ -396,11 +391,4 @@ mod tests { .expect("Failed to get signer state") .is_none()); } - - #[test] - fn test_display_sqlite_version() { - let db_path = tmp_db_path(); - let db = SignerDb::new(db_path).expect("Failed to create signer db"); - println!("sqlite version is: {:#?}", db.get_sqlite_version()); - } } diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 1a6e59f4c0..f10a87dccc 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1031,14 +1031,6 @@ impl StacksChainState { Ok(config.expect("BUG: no db_config installed")) } - pub fn get_db_version(conn: &DBConn) -> Result { - let option_version = query_row::(conn, "SELECT sqlite_version()", NO_PARAMS)?; - if let Some(version) = option_version { - return Ok(version); - } - Ok("no version".into()) - } - fn apply_schema_migrations<'a>( tx: &DBTx<'a>, mainnet: bool, @@ -1062,9 +1054,6 @@ impl StacksChainState { ); return Err(Error::InvalidChainstateDB); } - - println!("This is the sqlite version: {:#?}", Self::get_db_version(tx)); - if db_config.version != CHAINSTATE_VERSION { while db_config.version != CHAINSTATE_VERSION { From c24aee1ef314230d05fcfcda95e2be45d1585fd4 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Tue, 28 May 2024 17:30:07 +0300 Subject: [PATCH 090/148] Update the model to support multiple allowed contract callers This commit: - updates the old way of updating the model after successful `allow-contract-caller` function calls. Before it was updating the allowed contract caller. The current approach pushes the allowed contract caller to a list (only if needed). - updates the way of updating the model after successful disallow-contract-caller function calls. --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 6 ++-- .../pox-4/pox_AllowContractCallerCommand.ts | 29 +++++-------------- .../tests/pox-4/pox_CommandModel.ts | 2 +- .../pox_DisallowContractCallerCommand.ts | 11 +++++-- 4 files changed, 20 insertions(+), 28 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index 71d29086ef..bf8b63ffe7 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -125,8 +125,8 @@ it("statefully interacts with PoX-4", async () => { delegatedTo: "", delegatedMaxAmount: 0, // We initialize delegatedUntilBurnHt to 0. It will be updated - // after successful delegate-stx calls. Its value will be either - // the unwrapped until-burn-ht uint passed to the delegate-stx, + // after successful delegate-stx calls. It's value will be either + // the unwrapped until-burn-ht uint passed to the delegate-stx, // or undefined for indefinite delegations. delegatedUntilBurnHt: 0, delegatedPoxAddress: "", @@ -134,7 +134,7 @@ it("statefully interacts with PoX-4", async () => { amountUnlocked: 100_000_000_000_000, unlockHeight: 0, firstLockedRewardCycle: 0, - allowedContractCaller: "", + allowedContractCallers: [], callerAllowedBy: [], committedRewCycleIndexes: [], }])), diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts index dad1a381a5..141676cdae 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts @@ -74,32 +74,17 @@ export class AllowContractCallerCommand implements PoxCommand { // Get the wallets involved from the model and update it with the new state. const wallet = model.stackers.get(this.wallet.stxAddress)!; - const callerAllowedBefore = wallet.allowedContractCaller; - - const callerAllowedBeforeState = model.stackers.get(callerAllowedBefore) || - null; - - if (callerAllowedBeforeState) { - // Remove the allower from the ex-allowed caller's allowance list. - - const walletIndexInsideAllowedByList = callerAllowedBeforeState - .callerAllowedBy.indexOf( - this.wallet.stxAddress, - ); - - expect(walletIndexInsideAllowedByList).toBeGreaterThan(-1); - - callerAllowedBeforeState.callerAllowedBy.splice( - walletIndexInsideAllowedByList, - 1, - ); - } const callerToAllow = model.stackers.get(this.allowanceTo.stxAddress)!; // Update model so that we know this wallet has authorized a contract-caller. + // If the caller is already allowed, there's no need to add it again. + const callerToAllowIndexInAllowedList = wallet.allowedContractCallers + .indexOf(this.allowanceTo.stxAddress); - wallet.allowedContractCaller = this.allowanceTo.stxAddress; - callerToAllow.callerAllowedBy.push(this.wallet.stxAddress); + if (callerToAllowIndexInAllowedList == -1) { + wallet.allowedContractCallers.push(this.allowanceTo.stxAddress); + callerToAllow.callerAllowedBy.push(this.wallet.stxAddress); + } // Log to console for debugging purposes. This is not necessary for the // test to pass but it is useful for debugging and eyeballing the test. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts index cdf211c3ed..ce1d2a28b4 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts @@ -191,7 +191,7 @@ export type Stacker = { amountUnlocked: number; unlockHeight: number; firstLockedRewardCycle: number; - allowedContractCaller: StxAddress; + allowedContractCallers: StxAddress[]; callerAllowedBy: StxAddress[]; committedRewCycleIndexes: number[]; }; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts index 09618db49c..16b830b5fb 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts @@ -42,7 +42,9 @@ export class DisallowContractCallerCommand implements PoxCommand { this.callerToDisallow.stxAddress, )!; return ( - stacker.allowedContractCaller === this.callerToDisallow.stxAddress && + stacker.allowedContractCallers.includes( + this.callerToDisallow.stxAddress, + ) && callerToDisallow.callerAllowedBy.includes( this.stacker.stxAddress, ) === @@ -76,7 +78,12 @@ export class DisallowContractCallerCommand implements PoxCommand { // Update model so that we know that the stacker has revoked stacking // allowance. const stacker = model.stackers.get(this.stacker.stxAddress)!; - stacker.allowedContractCaller = ""; + const callerToDisallowIndex = stacker.allowedContractCallers.indexOf( + this.callerToDisallow.stxAddress, + ); + + expect(callerToDisallowIndex).toBeGreaterThan(-1); + stacker.allowedContractCallers.splice(callerToDisallowIndex, 1); // Remove the operator from the caller to disallow's allowance list. const walletIndexAllowedByList = callerToDisallow.callerAllowedBy.indexOf( From 0e68198516e6c90a5a4bf61b714c104a881aa201 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 28 May 2024 13:49:52 -0400 Subject: [PATCH 091/148] chore: indicate in the docs that the reward set must correspond to the sortition ID --- stackslib/src/net/p2p.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index d1168abe94..fbb6c375ed 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -265,6 +265,8 @@ pub struct PeerNetwork { /// determine whether or not the reward cycle info in `current_reward_sets` is still valid -- a /// burnchain fork may invalidate them, so the code must check that the sortition ID for the /// start of the prepare-phase is still canonical. + /// This needs to be in 1-to-1 correspondence with `current_reward_sets` -- the sortition IDs + /// that make up the values need to correspond to the reward sets computed as of the sortition. pub current_reward_set_ids: BTreeMap, // information about the state of the network's anchor blocks From 9920ce5e8f44a13a0a993d5ce25bc01a677675ac Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 28 May 2024 22:52:05 -0400 Subject: [PATCH 092/148] fix: use sortition handle at correct tip --- stackslib/src/chainstate/burn/db/sortdb.rs | 14 +++++ stackslib/src/chainstate/coordinator/mod.rs | 4 +- .../chainstate/nakamoto/coordinator/mod.rs | 4 +- stackslib/src/chainstate/stacks/boot/mod.rs | 50 ++++++++------- .../chainstate/stacks/boot/signers_tests.rs | 44 +++++++------ stackslib/src/chainstate/stacks/db/blocks.rs | 5 +- .../src/chainstate/stacks/db/unconfirmed.rs | 61 ++++++++++--------- stackslib/src/clarity_vm/database/mod.rs | 15 +++-- stackslib/src/core/mempool.rs | 9 ++- stackslib/src/main.rs | 2 +- stackslib/src/net/api/callreadonly.rs | 2 +- stackslib/src/net/api/getaccount.rs | 2 +- stackslib/src/net/api/getconstantval.rs | 2 +- stackslib/src/net/api/getcontractabi.rs | 2 +- stackslib/src/net/api/getcontractsrc.rs | 2 +- stackslib/src/net/api/getdatavar.rs | 2 +- .../src/net/api/getistraitimplemented.rs | 2 +- stackslib/src/net/api/getmapentry.rs | 2 +- stackslib/src/net/api/getpoxinfo.rs | 26 ++++---- stackslib/src/net/api/postblock_proposal.rs | 2 +- stackslib/src/net/p2p.rs | 2 +- stackslib/src/net/relay.rs | 6 +- stackslib/src/net/stackerdb/config.rs | 2 +- stackslib/src/util_lib/db.rs | 12 ++++ .../stacks-node/src/nakamoto_node/miner.rs | 20 ++++-- testnet/stacks-node/src/neon_node.rs | 17 +++--- testnet/stacks-node/src/node.rs | 3 +- testnet/stacks-node/src/run_loop/helium.rs | 23 ++++--- testnet/stacks-node/src/tests/epoch_21.rs | 4 +- testnet/stacks-node/src/tests/epoch_22.rs | 4 +- testnet/stacks-node/src/tests/epoch_24.rs | 4 +- .../src/tests/neon_integrations.rs | 30 +++++++-- 32 files changed, 239 insertions(+), 140 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index eb49daa50a..3fa528995a 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2653,6 +2653,20 @@ impl SortitionDB { ) } + pub fn index_handle_at_block<'a>( + &'a self, + chainstate: &StacksChainState, + stacks_block_id: &StacksBlockId, + ) -> Result, db_error> { + let (consensus_hash, bhh) = match chainstate.get_block_header_hashes(stacks_block_id) { + Ok(Some(x)) => x, + _ => return Err(db_error::NotFoundError), + }; + let snapshot = SortitionDB::get_block_snapshot_consensus(&self.conn(), &consensus_hash)? + .ok_or(db_error::NotFoundError)?; + Ok(self.index_handle(&snapshot.sortition_id)) + } + pub fn tx_handle_begin<'a>( &'a mut self, chain_tip: &SortitionId, diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 2836ec7b4c..96eae44641 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -3274,7 +3274,7 @@ impl< if let Some(ref mut estimator) = self.cost_estimator { let stacks_epoch = self .sortition_db - .index_handle_at_tip() + .index_conn() .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) .expect("Could not find a stacks epoch."); estimator.notify_block( @@ -3288,7 +3288,7 @@ impl< if let Some(ref mut estimator) = self.fee_estimator { let stacks_epoch = self .sortition_db - .index_handle_at_tip() + .index_conn() .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) .expect("Could not find a stacks epoch."); if let Err(e) = diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index abb89e1839..f399615c80 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -656,7 +656,7 @@ impl< if let Some(ref mut estimator) = self.cost_estimator { let stacks_epoch = self .sortition_db - .index_handle_at_tip() + .index_conn() .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) .expect("Could not find a stacks epoch."); estimator.notify_block( @@ -670,7 +670,7 @@ impl< if let Some(ref mut estimator) = self.fee_estimator { let stacks_epoch = self .sortition_db - .index_handle_at_tip() + .index_conn() .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) .expect("Could not find a stacks epoch."); if let Err(e) = estimator.notify_block(&block_receipt, &stacks_epoch.block_limit) { diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index e6e02eaab5..9c2262d958 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -569,7 +569,7 @@ impl StacksChainState { boot_contract_name: &str, code: &str, ) -> Result { - let iconn = sortdb.index_handle_at_tip(); + let iconn = sortdb.index_handle_at_block(self, stacks_block_id)?; let dbconn = self.state_index.sqlite_conn(); self.clarity_state .eval_read_only( @@ -631,24 +631,28 @@ impl StacksChainState { let cost_track = LimitedCostTracker::new_free(); let sender = PrincipalData::Standard(StandardPrincipalData::transient()); let result = self - .maybe_read_only_clarity_tx(&sortdb.index_handle_at_tip(), tip, |clarity_tx| { - clarity_tx.with_readonly_clarity_env( - mainnet, - chain_id, - ClarityVersion::Clarity1, - sender, - None, - cost_track, - |env| { - env.execute_contract( - &contract_identifier, - function, - &[SymbolicExpression::atom_value(Value::UInt(reward_cycle))], - true, - ) - }, - ) - })? + .maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(self, tip)?, + tip, + |clarity_tx| { + clarity_tx.with_readonly_clarity_env( + mainnet, + chain_id, + ClarityVersion::Clarity1, + sender, + None, + cost_track, + |env| { + env.execute_contract( + &contract_identifier, + function, + &[SymbolicExpression::atom_value(Value::UInt(reward_cycle))], + true, + ) + }, + ) + }, + )? .ok_or_else(|| Error::NoSuchBlockError)?? .expect_u128() .expect("FATAL: unexpected PoX structure"); @@ -1843,7 +1847,9 @@ pub mod test { let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); chainstate .with_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb + .index_handle_at_block(&chainstate, &stacks_block_id) + .unwrap(), &stacks_block_id, |clarity_tx| StacksChainState::get_account(clarity_tx, addr), ) @@ -1859,7 +1865,9 @@ pub mod test { let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); chainstate .with_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb + .index_handle_at_block(chainstate, &stacks_block_id) + .unwrap(), &stacks_block_id, |clarity_tx| StacksChainState::get_contract(clarity_tx, addr).unwrap(), ) diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index 67fffd878a..ad4a8ae2db 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -483,26 +483,30 @@ pub fn readonly_call_with_sortdb( args: Vec, ) -> Value { chainstate - .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), tip, |connection| { - connection - .with_readonly_clarity_env( - false, - 0x80000000, - ClarityVersion::Clarity2, - PrincipalData::from(boot_code_addr(false)), - None, - LimitedCostTracker::new_free(), - |env| { - env.execute_contract_allow_private( - &boot_code_id(&boot_contract, false), - &function_name, - &symbols_from_values(args), - true, - ) - }, - ) - .unwrap() - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, tip).unwrap(), + tip, + |connection| { + connection + .with_readonly_clarity_env( + false, + 0x80000000, + ClarityVersion::Clarity2, + PrincipalData::from(boot_code_addr(false)), + None, + LimitedCostTracker::new_free(), + |env| { + env.execute_contract_allow_private( + &boot_code_id(&boot_contract, false), + &function_name, + &symbols_from_values(args), + true, + ) + }, + ) + .unwrap() + }, + ) .unwrap() } diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 19c7f59969..dfff8c52a1 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -11926,9 +11926,12 @@ pub mod test { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let tip_hash = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); + let iconn = sortdb + .index_handle_at_block(peer.chainstate(), &tip_hash) + .unwrap(); let account = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &tip_hash, |conn| { + .with_read_only_clarity_tx(&iconn, &tip_hash, |conn| { StacksChainState::get_account(conn, &addr.to_account_principal()) }) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index 0e3ae3ae88..fc928fa196 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -777,8 +777,9 @@ mod test { // build 1-block microblock stream let microblocks = { let sortdb = peer.sortdb.take().unwrap(); - let sort_iconn = sortdb.index_handle_at_tip(); - + let sort_iconn = sortdb + .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .unwrap(); peer.chainstate() .reload_unconfirmed_state(&sort_iconn, canonical_tip.clone()) .unwrap(); @@ -851,22 +852,22 @@ mod test { // process microblock stream to generate unconfirmed state let sortdb = peer.sortdb.take().unwrap(); + let iconn = sortdb + .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .unwrap(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) + .reload_unconfirmed_state(&iconn, canonical_tip.clone()) .unwrap(); let recv_balance = peer .chainstate() - .with_read_only_unconfirmed_clarity_tx( - &sortdb.index_handle_at_tip(), - |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_stx_balance(&recv_addr.into()) - .unwrap() - }) - }, - ) + .with_read_only_unconfirmed_clarity_tx(&iconn, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() + }) + }) .unwrap() .unwrap(); peer.sortdb = Some(sortdb); @@ -877,19 +878,18 @@ mod test { SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); let sortdb = peer.sortdb.take().unwrap(); + let iconn = sortdb + .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .unwrap(); let confirmed_recv_balance = peer .chainstate() - .with_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), - &canonical_tip, - |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_stx_balance(&recv_addr.into()) - .unwrap() - }) - }, - ) + .with_read_only_clarity_tx(&iconn, &canonical_tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() + }) + }) .unwrap(); peer.sortdb = Some(sortdb); @@ -1014,9 +1014,11 @@ mod test { // build microblock stream iteratively, and test balances at each additional microblock let sortdb = peer.sortdb.take().unwrap(); let microblocks = { - let sort_iconn = sortdb.index_handle_at_tip(); + let sort_iconn = sortdb + .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .unwrap(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) + .reload_unconfirmed_state(&sort_iconn, canonical_tip.clone()) .unwrap(); let mut microblock_builder = StacksMicroblockBuilder::new( @@ -1399,13 +1401,16 @@ mod test { // process microblock stream to generate unconfirmed state let sortdb = peer.sortdb.take().unwrap(); + let iconn = sortdb + .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .unwrap(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) + .reload_unconfirmed_state(&iconn, canonical_tip.clone()) .unwrap(); let db_recv_balance = peer .chainstate() - .with_read_only_unconfirmed_clarity_tx(&sortdb.index_handle_at_tip(), |clarity_tx| { + .with_read_only_unconfirmed_clarity_tx(&iconn, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { clarity_db .get_account_stx_balance(&recv_addr.into()) diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 8282a5fd8d..1aff287eaf 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -440,10 +440,7 @@ impl SortitionDBRef for SortitionHandleConn<'_> { parent_stacks_block_burn_ht: u64, cycle_index: u64, ) -> Result, ChainstateError> { - let readonly_marf = self - .index - .reopen_readonly() - .expect("BUG: failure trying to get a read-only interface into the sortition db."); + let readonly_marf = self.index.reopen_readonly()?; let mut context = self.context.clone(); context.chain_tip = sortition_id.clone(); let mut handle = SortitionHandleConn::new(&readonly_marf, context); @@ -592,12 +589,18 @@ impl BurnStateDB for SortitionHandleTx<'_> { impl BurnStateDB for SortitionHandleConn<'_> { fn get_tip_burn_block_height(&self) -> Option { - let tip = SortitionDB::get_canonical_burn_chain_tip(self.conn()).ok()?; + let tip = match SortitionDB::get_block_snapshot(self.conn(), &self.context.chain_tip) { + Ok(Some(x)) => x, + _ => return None, + }; tip.block_height.try_into().ok() } fn get_tip_sortition_id(&self) -> Option { - let tip = SortitionDB::get_canonical_burn_chain_tip(self.conn()).ok()?; + let tip = match SortitionDB::get_block_snapshot(self.conn(), &self.context.chain_tip) { + Ok(Some(x)) => x, + _ => return None, + }; Some(tip.sortition_id) } diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 078e25586d..dc67539573 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -321,8 +321,15 @@ impl MemPoolAdmitter { tx: &StacksTransaction, tx_size: u64, ) -> Result<(), MemPoolRejection> { + let sortition_id = match SortitionDB::get_sortition_id_by_consensus( + &sortdb.conn(), + &self.cur_consensus_hash, + ) { + Ok(Some(x)) => x, + _ => return Err(MemPoolRejection::DBError(db_error::NotFoundError)), + }; chainstate.will_admit_mempool_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&sortition_id), &self.cur_consensus_hash, &self.cur_block, tx, diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index e2f3110d67..a10353ced3 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -641,7 +641,7 @@ simulating a miner. let result = StacksBlockBuilder::build_anchored_block( &chain_state, - &sort_db.index_handle_at_tip(), + &sort_db.index_handle(&chain_tip.sortition_id), &mut mempool_db, &parent_header, chain_tip.total_burn, diff --git a/stackslib/src/net/api/callreadonly.rs b/stackslib/src/net/api/callreadonly.rs index dc24de1ae4..150ed1ca1e 100644 --- a/stackslib/src/net/api/callreadonly.rs +++ b/stackslib/src/net/api/callreadonly.rs @@ -235,7 +235,7 @@ impl RPCRequestHandler for RPCCallReadOnlyRequestHandler { cost_limit.write_count = 0; chainstate.maybe_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle_at_block(chainstate, &tip)?, &tip, |clarity_tx| { let epoch = clarity_tx.get_epoch(); diff --git a/stackslib/src/net/api/getaccount.rs b/stackslib/src/net/api/getaccount.rs index cbd4338ac6..7cbf0a8210 100644 --- a/stackslib/src/net/api/getaccount.rs +++ b/stackslib/src/net/api/getaccount.rs @@ -147,7 +147,7 @@ impl RPCRequestHandler for RPCGetAccountRequestHandler { let account_opt_res = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle_at_block(chainstate, &tip)?, &tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { diff --git a/stackslib/src/net/api/getconstantval.rs b/stackslib/src/net/api/getconstantval.rs index 4b3068dd5d..b08d1c6835 100644 --- a/stackslib/src/net/api/getconstantval.rs +++ b/stackslib/src/net/api/getconstantval.rs @@ -145,7 +145,7 @@ impl RPCRequestHandler for RPCGetConstantValRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle_at_block(chainstate, &tip)?, &tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { diff --git a/stackslib/src/net/api/getcontractabi.rs b/stackslib/src/net/api/getcontractabi.rs index 35914de9e9..d98c2c6623 100644 --- a/stackslib/src/net/api/getcontractabi.rs +++ b/stackslib/src/net/api/getcontractabi.rs @@ -133,7 +133,7 @@ impl RPCRequestHandler for RPCGetContractAbiRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle_at_block(chainstate, &tip)?, &tip, |clarity_tx| { let epoch = clarity_tx.get_epoch(); diff --git a/stackslib/src/net/api/getcontractsrc.rs b/stackslib/src/net/api/getcontractsrc.rs index 1c20bffd1b..139995988e 100644 --- a/stackslib/src/net/api/getcontractsrc.rs +++ b/stackslib/src/net/api/getcontractsrc.rs @@ -141,7 +141,7 @@ impl RPCRequestHandler for RPCGetContractSrcRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle_at_block(chainstate, &tip)?, &tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|db| { diff --git a/stackslib/src/net/api/getdatavar.rs b/stackslib/src/net/api/getdatavar.rs index 124fb4856f..f3a4acb7d3 100644 --- a/stackslib/src/net/api/getdatavar.rs +++ b/stackslib/src/net/api/getdatavar.rs @@ -155,7 +155,7 @@ impl RPCRequestHandler for RPCGetDataVarRequestHandler { let data_opt = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle_at_block(chainstate, &tip)?, &tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { diff --git a/stackslib/src/net/api/getistraitimplemented.rs b/stackslib/src/net/api/getistraitimplemented.rs index aac4079074..3b8e07ad1a 100644 --- a/stackslib/src/net/api/getistraitimplemented.rs +++ b/stackslib/src/net/api/getistraitimplemented.rs @@ -161,7 +161,7 @@ impl RPCRequestHandler for RPCGetIsTraitImplementedRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle_at_block(chainstate, &tip)?, &tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|db| { diff --git a/stackslib/src/net/api/getmapentry.rs b/stackslib/src/net/api/getmapentry.rs index 5d0cd7504f..cb318b5996 100644 --- a/stackslib/src/net/api/getmapentry.rs +++ b/stackslib/src/net/api/getmapentry.rs @@ -184,7 +184,7 @@ impl RPCRequestHandler for RPCGetMapEntryRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle_at_block(chainstate, &tip)?, &tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { diff --git a/stackslib/src/net/api/getpoxinfo.rs b/stackslib/src/net/api/getpoxinfo.rs index c3de3ab0da..81868c81f8 100644 --- a/stackslib/src/net/api/getpoxinfo.rs +++ b/stackslib/src/net/api/getpoxinfo.rs @@ -190,17 +190,21 @@ impl RPCPoxInfoData { + 1; let data = chainstate - .maybe_read_only_clarity_tx(&sortdb.index_handle_at_tip(), tip, |clarity_tx| { - clarity_tx.with_readonly_clarity_env( - mainnet, - chain_id, - ClarityVersion::Clarity2, - sender, - None, - cost_track, - |env| env.execute_contract(&contract_identifier, function, &[], true), - ) - }) + .maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, tip)?, + tip, + |clarity_tx| { + clarity_tx.with_readonly_clarity_env( + mainnet, + chain_id, + ClarityVersion::Clarity2, + sender, + None, + cost_track, + |env| env.execute_contract(&contract_identifier, function, &[], true), + ) + }, + ) .map_err(|_| NetError::NotFoundError)?; let res = match data { diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 0423d5c57b..43189b6847 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -206,8 +206,8 @@ impl NakamotoBlockProposal { }); } - let burn_dbconn: SortitionHandleConn = sortdb.index_handle_at_tip(); let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; + let burn_dbconn: SortitionHandleConn = sortdb.index_handle(&sort_tip); let mut db_handle = sortdb.index_handle(&sort_tip); let expected_burn_opt = NakamotoChainState::get_expected_burns(&mut db_handle, chainstate.db(), &self.block)?; diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index c1318e6647..f853bb795a 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -5892,7 +5892,7 @@ impl PeerNetwork { return false; } let stacks_epoch = match sortdb - .index_handle_at_tip() + .index_conn() .get_stacks_epoch(burnchain_tip.block_height as u32) { Some(epoch) => epoch, diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index cb8605ee41..fa11b575d4 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1840,8 +1840,10 @@ impl Relayer { "Reload unconfirmed state off of {}/{}", &canonical_consensus_hash, &canonical_block_hash ); - let processed_unconfirmed_state = - chainstate.reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip)?; + let processed_unconfirmed_state = chainstate.reload_unconfirmed_state( + &sortdb.index_handle_at_block(chainstate, &canonical_tip)?, + canonical_tip, + )?; Ok(processed_unconfirmed_state) } diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index de642b98bb..284d3d52b4 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -493,7 +493,7 @@ impl StackerDBConfig { let cur_epoch = SortitionDB::get_stacks_epoch(sortition_db.conn(), burn_tip.block_height)? .expect("FATAL: no epoch defined"); - let dbconn = sortition_db.index_handle_at_tip(); + let dbconn = sortition_db.index_handle_at_block(chainstate, &chain_tip_hash)?; // check the target contract let res = chainstate.with_read_only_clarity_tx(&dbconn, &chain_tip_hash, |clarity_tx| { diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index 940d79bafe..aa947046f4 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -34,8 +34,10 @@ use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; +use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::stacks::index::marf::{MarfConnection, MarfTransaction, MARF}; use crate::chainstate::stacks::index::{Error as MARFError, MARFValue, MarfTrieId}; +use crate::core::{StacksEpoch, StacksEpochId}; pub type DBConn = rusqlite::Connection; pub type DBTx<'a> = rusqlite::Transaction<'a>; @@ -630,6 +632,16 @@ impl<'a, C, T: MarfTrieId> IndexDBConn<'a, C, T> { pub fn conn(&self) -> &DBConn { self.index.sqlite_conn() } + + pub fn get_stacks_epoch_by_epoch_id(&self, epoch_id: &StacksEpochId) -> Option { + SortitionDB::get_stacks_epoch_by_epoch_id(self.conn(), epoch_id) + .expect("BUG: failed to get epoch for epoch id") + } + + pub fn get_stacks_epoch(&self, height: u32) -> Option { + SortitionDB::get_stacks_epoch(self.conn(), height as u64) + .expect("BUG: failed to get epoch for burn block height") + } } impl<'a, C, T: MarfTrieId> Deref for IndexDBConn<'a, C, T> { diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index a7e78bc37d..40799bafa9 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -288,7 +288,7 @@ impl BlockMinerThread { let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); - let sortition_handle = sort_db.index_handle_at_tip(); + let sortition_handle = sort_db.index_handle(&tip.sortition_id); let Ok(aggregate_public_key) = NakamotoChainState::get_aggregate_public_key( &mut chain_state, &sort_db, @@ -411,7 +411,9 @@ impl BlockMinerThread { // Get all nonces for the signers from clarity DB to use to validate transactions let account_nonces = chainstate .with_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb + .index_handle_at_block(chainstate, &stacks_block_id) + .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &stacks_block_id, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { @@ -479,7 +481,8 @@ impl BlockMinerThread { ) .expect("FATAL: could not open sortition DB"); - let mut sortition_handle = sort_db.index_handle_at_tip(); + let mut sortition_handle = + sort_db.index_handle_at_block(&chain_state, &block.block_id())?; let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; NakamotoChainState::accept_block( &chainstate_config, @@ -726,9 +729,10 @@ impl BlockMinerThread { } } + let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); + // create our coinbase if this is the first block we've mined this tenure let tenure_start_info = if let Some(ref par_tenure_info) = parent_block_info.parent_tenure { - let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); let current_miner_nonce = parent_block_info.coinbase_nonce; let tenure_change_tx = self.generate_tenure_change_tx( current_miner_nonce, @@ -761,7 +765,9 @@ impl BlockMinerThread { // build the block itself let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, - &burn_db.index_handle_at_tip(), + &burn_db + .index_handle_at_block(&chain_state, &parent_block_id) + .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &mut mem_pool, &parent_block_info.stacks_parent_header, &self.burn_block.consensus_hash, @@ -937,7 +943,9 @@ impl ParentStacksBlockInfo { let principal = miner_address.into(); let account = chain_state .with_read_only_clarity_tx( - &burn_db.index_handle_at_tip(), + &burn_db + .index_handle_at_block(&chain_state, &stacks_tip_header.index_block_hash()) + .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &stacks_tip_header.index_block_hash(), |conn| StacksChainState::get_account(conn, &principal), ) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 7a82b7ce80..e84e69fc85 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -727,7 +727,7 @@ impl MicroblockMinerThread { .unwrap_or(0) ); - let burn_height = + let block_snapshot = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &self.parent_consensus_hash) .map_err(|e| { error!("Failed to find block snapshot for mined block: {}", e); @@ -736,8 +736,8 @@ impl MicroblockMinerThread { .ok_or_else(|| { error!("Failed to find block snapshot for mined block"); ChainstateError::NoSuchBlockError - })? - .block_height; + })?; + let burn_height = block_snapshot.block_height; let ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), burn_height).map_err(|e| { error!("Failed to get AST rules for microblock: {}", e); @@ -753,7 +753,10 @@ impl MicroblockMinerThread { .epoch_id; let mint_result = { - let ic = sortdb.index_handle_at_tip(); + let ic = sortdb.index_handle_at_block( + &chainstate, + &block_snapshot.get_canonical_stacks_block_id(), + )?; let mut microblock_miner = match StacksMicroblockBuilder::resume_unconfirmed( chainstate, &ic, @@ -2352,7 +2355,7 @@ impl BlockMinerThread { } let (anchored_block, _, _) = match StacksBlockBuilder::build_anchored_block( &chain_state, - &burn_db.index_handle_at_tip(), + &burn_db.index_handle(&burn_tip.sortition_id), &mut mem_pool, &parent_block_info.stacks_parent_header, parent_block_info.parent_block_total_burn, @@ -2382,7 +2385,7 @@ impl BlockMinerThread { // try again match StacksBlockBuilder::build_anchored_block( &chain_state, - &burn_db.index_handle_at_tip(), + &burn_db.index_handle(&burn_tip.sortition_id), &mut mem_pool, &parent_block_info.stacks_parent_header, parent_block_info.parent_block_total_burn, @@ -4066,7 +4069,7 @@ impl ParentStacksBlockInfo { let principal = miner_address.into(); let account = chain_state .with_read_only_clarity_tx( - &burn_db.index_handle_at_tip(), + &burn_db.index_handle(&burn_chain_tip.sortition_id), &StacksBlockHeader::make_index_block_hash(mine_tip_ch, mine_tip_bh), |conn| StacksChainState::get_account(conn, &principal), ) diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index ba5b7e204e..4c1d8d39cb 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -3,7 +3,6 @@ use std::net::SocketAddr; use std::thread::JoinHandle; use std::{env, thread, time}; -use clarity::vm::database::BurnStateDB; use rand::RngCore; use stacks::burnchains::bitcoin::BitcoinNetworkType; use stacks::burnchains::db::BurnchainDB; @@ -891,7 +890,7 @@ impl Node { let mut fee_estimator = self.config.make_fee_estimator(); let stacks_epoch = db - .index_handle_at_tip() + .index_conn() .get_stacks_epoch_by_epoch_id(&processed_block.evaluated_epoch) .expect("Could not find a stacks epoch."); if let Some(estimator) = cost_estimator.as_mut() { diff --git a/testnet/stacks-node/src/run_loop/helium.rs b/testnet/stacks-node/src/run_loop/helium.rs index 4c81867369..2db7a3a090 100644 --- a/testnet/stacks-node/src/run_loop/helium.rs +++ b/testnet/stacks-node/src/run_loop/helium.rs @@ -89,11 +89,14 @@ impl RunLoop { let _ = burnchain.sortdb_mut(); // Run the tenure, keep the artifacts - let artifacts_from_1st_tenure = - match first_tenure.run(&burnchain.sortdb_ref().index_handle_at_tip()) { - Some(res) => res, - None => panic!("Error while running 1st tenure"), - }; + let artifacts_from_1st_tenure = match first_tenure.run( + &burnchain + .sortdb_ref() + .index_handle(&burnchain_tip.block_snapshot.sortition_id), + ) { + Some(res) => res, + None => panic!("Error while running 1st tenure"), + }; // Tenures are instantiating their own chainstate, so that nodes can keep a clean chainstate, // while having the option of running multiple tenures concurrently and try different strategies. @@ -136,7 +139,9 @@ impl RunLoop { &burnchain_tip, &chain_tip, &mut self.node.chain_state, - &burnchain.sortdb_ref().index_handle_at_tip(), + &burnchain + .sortdb_ref() + .index_handle(&burnchain_tip.block_snapshot.sortition_id), ); // If the node we're looping on won the sortition, initialize and configure the next tenure @@ -160,7 +165,11 @@ impl RunLoop { &chain_tip, &mut tenure, ); - tenure.run(&burnchain.sortdb_ref().index_handle_at_tip()) + tenure.run( + &burnchain + .sortdb_ref() + .index_handle(&burnchain_tip.block_snapshot.sortition_id), + ) } None => None, }; diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 947eb633ee..ea9fe27569 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -5143,7 +5143,7 @@ fn test_v1_unlock_height_with_current_stackers() { let sortdb = btc_regtest_controller.sortdb_mut(); for height in 211..tip_info.burn_block_height { - let iconn = sortdb.index_handle_at_tip(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, @@ -5423,7 +5423,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { let sortdb = btc_regtest_controller.sortdb_mut(); for height in 211..tip_info.burn_block_height { - let iconn = sortdb.index_handle_at_tip(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index f3c48adc86..289d09be64 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -400,7 +400,7 @@ fn disable_pox() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_handle_at_tip(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, @@ -1069,7 +1069,7 @@ fn pox_2_unlock_all() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_handle_at_tip(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index d2e2258a1a..3fc3b3d590 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -493,7 +493,7 @@ fn fix_to_pox_contract() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_handle_at_tip(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, @@ -1213,7 +1213,7 @@ fn verify_auto_unlock_behavior() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_handle_at_tip(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 236a14d94b..157daf0bde 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -3464,14 +3464,20 @@ fn microblock_fork_poison_integration_test() { chainstate .reload_unconfirmed_state( - &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), tip_hash, ) .unwrap(); let first_microblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), consensus_hash, stacks_block.clone(), vec![unconfirmed_tx], @@ -3722,14 +3728,20 @@ fn microblock_integration_test() { chainstate .reload_unconfirmed_state( - &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), tip_hash, ) .unwrap(); let first_microblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), consensus_hash, stacks_block.clone(), vec![unconfirmed_tx], @@ -9141,7 +9153,10 @@ fn use_latest_tip_integration_test() { // Initialize the unconfirmed state. chainstate .reload_unconfirmed_state( - &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), tip_hash, ) .unwrap(); @@ -9166,7 +9181,10 @@ fn use_latest_tip_integration_test() { let mblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), consensus_hash, stacks_block.clone(), vec_tx, From 4804c17908b34b6c9be1dc4d6a50d17967da9d65 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 15 May 2024 21:56:51 -0400 Subject: [PATCH 093/148] test: Check that Nakamoto miner produces blocks using `nakamoto_attempt_time_ms` --- docs/mining.md | 2 +- testnet/stacks-node/src/config.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 301 +++++++++++++++++- 3 files changed, 302 insertions(+), 3 deletions(-) diff --git a/docs/mining.md b/docs/mining.md index 891358af03..e113f12d93 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -26,7 +26,7 @@ subsequent_attempt_time_ms = 60000 # Time to spend mining a microblock, in milliseconds. microblock_attempt_time_ms = 30000 # Time to spend mining a Nakamoto block, in milliseconds. -nakamoto_attempt_time_ms = 10000 +nakamoto_attempt_time_ms = 20000 ``` You can verify that your node is operating as a miner by checking its log output diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index ad02341343..c101da090d 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2320,7 +2320,7 @@ impl Default for MinerConfig { first_attempt_time_ms: 10, subsequent_attempt_time_ms: 120_000, microblock_attempt_time_ms: 30_000, - nakamoto_attempt_time_ms: 10_000, + nakamoto_attempt_time_ms: 20_000, probability_pick_no_estimate_tx: 25, block_reward_recipient: None, segwit: false, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 55eb6753bf..c9bf52aa43 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -45,7 +45,10 @@ use stacks::chainstate::stacks::boot::{ }; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; -use stacks::chainstate::stacks::{StacksTransaction, ThresholdSignature, TransactionPayload}; +use stacks::chainstate::stacks::{ + StacksTransaction, ThresholdSignature, TransactionPayload, MAX_BLOCK_LEN, +}; +use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, @@ -3895,3 +3898,299 @@ fn check_block_heights() { run_loop_thread.join().unwrap(); } + +/// Test config parameter `nakamoto_attempt_time_ms` +#[test] +#[ignore] +fn test_nakamoto_attempt_time() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let password = "12345".to_string(); + naka_conf.connection_options.block_proposal_token = Some(password.clone()); + // Use fixed timing params for this test + let nakamoto_attempt_time_ms = 20_000; + naka_conf.miner.nakamoto_attempt_time_ms = nakamoto_attempt_time_ms; + let stacker_sk = setup_stacker(&mut naka_conf); + + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + 1_000_000_000, + ); + + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100_000, + ); + + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + // We'll need a lot of accounts for one subtest to avoid MAXIMUM_MEMPOOL_TX_CHAINING + struct Account { + nonce: u64, + privk: Secp256k1PrivateKey, + _address: StacksAddress, + } + let num_accounts = 1_000; + let init_account_balance = 1_000_000_000; + let account_keys = add_initial_balances(&mut naka_conf, num_accounts, init_account_balance); + let mut account = account_keys + .into_iter() + .map(|privk| { + let _address = tests::to_addr(&privk); + Account { + nonce: 0, + privk, + _address, + } + }) + .collect::>(); + + // only subscribe to the block proposal events + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::BlockProposal], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + Some(&signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + blind_signer(&naka_conf, &signers, proposals_submitted); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let _block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + // second block should confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + // Mine 3 nakamoto tenures + for _ in 0..3 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + } + + // TODO (hack) instantiate the sortdb in the burnchain + _ = btc_regtest_controller.sortdb_mut(); + + // ----- Setup boilerplate finished, test block proposal API endpoint ----- + + let mut sender_nonce = 0; + let tenure_count = 3; + let inter_blocks_per_tenure = 10; + + // Subtest 1 + // Mine nakamoto tenures with a few transactions + // Blocks should be produced at least every 20 seconds + for _ in 0..tenure_count { + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let mut last_tip = BlockHeaderHash([0x00; 32]); + let mut last_tip_height = 0; + + // mine the interim blocks + for _ in 0..inter_blocks_per_tenure { + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let txs_per_block = 3; + let tx_fee = 500; + let amount = 500; + + for _ in 0..txs_per_block { + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, tx_fee, &recipient, amount); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + } + + // Sleep a bit longer than what our max block time should be + thread::sleep(Duration::from_millis(nakamoto_attempt_time_ms + 100)); + + // Miner should have made a new block by now + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + assert!(blocks_processed > blocks_processed_before); + + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_ne!(info.stacks_tip, last_tip); + assert_ne!(info.stacks_tip_height, last_tip_height); + + last_tip = info.stacks_tip; + last_tip_height = info.stacks_tip_height; + } + + let start_time = Instant::now(); + while commits_submitted.load(Ordering::SeqCst) <= commits_before { + if start_time.elapsed() >= Duration::from_secs(20) { + panic!("Timed out waiting for block-commit"); + } + thread::sleep(Duration::from_millis(100)); + } + } + + // Subtest 2 + // Confirm that no blocks are mined if there are no transactions + for _ in 0..2 { + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let info_before = get_chain_info_result(&naka_conf).unwrap(); + + // Wait long enough for a block to be mined + thread::sleep(Duration::from_millis(nakamoto_attempt_time_ms * 2)); + + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let info = get_chain_info_result(&naka_conf).unwrap(); + + // Assert that no block was mined while waiting + assert_eq!(blocks_processed, blocks_processed_before); + assert_eq!(info.stacks_tip, info_before.stacks_tip); + assert_eq!(info.stacks_tip_height, info_before.stacks_tip_height); + } + + // Subtest 3 + // Add more than `nakamoto_attempt_time_ms` worth of transactions into mempool + // Multiple blocks should be mined + for _ in 0..tenure_count { + let info_before = get_chain_info_result(&naka_conf).unwrap(); + + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let tx_limit = 10000; + let tx_fee = 500; + let amount = 500; + let mut tx_total_size = 0; + let mut tx_count = 0; + let mut acct_idx = 0; + + // Submit max # of txs from each account to reach tx_limit + 'submit_txs: loop { + let acct = &mut account[acct_idx]; + for _ in 0..MAXIMUM_MEMPOOL_TX_CHAINING { + let transfer_tx = + make_stacks_transfer(&acct.privk, acct.nonce, tx_fee, &recipient, amount); + submit_tx(&http_origin, &transfer_tx); + tx_total_size += transfer_tx.len(); + tx_count += 1; + acct.nonce += 1; + if tx_count >= tx_limit { + break 'submit_txs; + } + } + acct_idx += 1; + } + + // Make sure that these transactions *could* fit into a single block + assert!(tx_total_size < MAX_BLOCK_LEN as usize); + + // Wait long enough for 2 blocks to be made + thread::sleep(Duration::from_millis(nakamoto_attempt_time_ms * 2 + 100)); + + // Check that 2 blocks were made + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let blocks_mined = blocks_processed - blocks_processed_before; + assert!(blocks_mined > 2); + + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_ne!(info.stacks_tip, info_before.stacks_tip); + assert_ne!(info.stacks_tip_height, info_before.stacks_tip_height); + } + + // ----- Clean up ----- + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} From b379a0a55e8c3f47b46cf8dadcf67ae8f8196ed7 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 29 May 2024 13:05:19 -0400 Subject: [PATCH 094/148] test: Add `nakamoto_attempt_time` to `bitcoin-tests.yml` --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 87fe5a8f09..b0262313c2 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -82,6 +82,7 @@ jobs: - tests::nakamoto_integrations::vote_for_aggregate_key_burn_op - tests::nakamoto_integrations::follower_bootup - tests::nakamoto_integrations::forked_tenure_is_ignored + - tests::nakamoto_integrations::nakamoto_attempt_time - tests::signer::v0::block_proposal_rejection - tests::signer::v1::dkg - tests::signer::v1::sign_request_rejected diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c9bf52aa43..e76387868f 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3902,7 +3902,7 @@ fn check_block_heights() { /// Test config parameter `nakamoto_attempt_time_ms` #[test] #[ignore] -fn test_nakamoto_attempt_time() { +fn nakamoto_attempt_time() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } From fa482af24f123efe598d6c6407f15028a28026f4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 29 May 2024 13:30:23 -0400 Subject: [PATCH 095/148] fix: `index_handle_at_block` for Nakamoto blocks --- stackslib/src/chainstate/burn/db/sortdb.rs | 14 ++++++---- .../src/chainstate/nakamoto/tests/mod.rs | 5 +++- .../src/tests/neon_integrations.rs | 27 ++++++++++--------- 3 files changed, 28 insertions(+), 18 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 3fa528995a..bba8257cbe 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -67,7 +67,7 @@ use crate::chainstate::burn::{ use crate::chainstate::coordinator::{ Error as CoordinatorError, PoxAnchorBlockStatus, RewardCycleInfo, SortitionDBMigrator, }; -use crate::chainstate::nakamoto::NakamotoBlockHeader; +use crate::chainstate::nakamoto::{NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; use crate::chainstate::stacks::boot::PoxStartCycleInfo; use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; @@ -2658,12 +2658,16 @@ impl SortitionDB { chainstate: &StacksChainState, stacks_block_id: &StacksBlockId, ) -> Result, db_error> { - let (consensus_hash, bhh) = match chainstate.get_block_header_hashes(stacks_block_id) { + let header = match NakamotoChainState::get_block_header(chainstate.db(), stacks_block_id) { Ok(Some(x)) => x, - _ => return Err(db_error::NotFoundError), + x => { + debug!("Failed to get block header: {:?}", x); + return Err(db_error::NotFoundError); + } }; - let snapshot = SortitionDB::get_block_snapshot_consensus(&self.conn(), &consensus_hash)? - .ok_or(db_error::NotFoundError)?; + let snapshot = + SortitionDB::get_block_snapshot_consensus(&self.conn(), &header.consensus_hash)? + .ok_or(db_error::NotFoundError)?; Ok(self.index_handle(&snapshot.sortition_id)) } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 3cd7d2dde5..4a1b0ad714 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -126,9 +126,12 @@ pub fn get_account( &tip ); + let snapshot = SortitionDB::get_block_snapshot_consensus(&sortdb.conn(), &tip.consensus_hash) + .unwrap() + .unwrap(); chainstate .with_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&snapshot.sortition_id), &tip.index_block_hash(), |clarity_conn| { StacksChainState::get_account(clarity_conn, &addr.to_account_principal()) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 157daf0bde..b81e82b57c 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -3471,13 +3471,14 @@ fn microblock_fork_poison_integration_test() { tip_hash, ) .unwrap(); + let iconn = btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(); let first_microblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(), + &iconn, consensus_hash, stacks_block.clone(), vec![unconfirmed_tx], @@ -3735,13 +3736,14 @@ fn microblock_integration_test() { tip_hash, ) .unwrap(); + let iconn = btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(); let first_microblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(), + &iconn, consensus_hash, stacks_block.clone(), vec![unconfirmed_tx], @@ -9178,13 +9180,14 @@ fn use_latest_tip_integration_test() { let vec_tx = vec![tx_1, tx_2]; let privk = find_microblock_privkey(&conf, &stacks_block.header.microblock_pubkey_hash, 1024).unwrap(); + let iconn = btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(); let mblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(), + &iconn, consensus_hash, stacks_block.clone(), vec_tx, From fdcaea8b91972ad721d9d9e0579ac89fb531ddc0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 29 May 2024 13:11:02 -0500 Subject: [PATCH 096/148] first round of addressing PR review --- stacks-common/src/util/macros.rs | 5 + stacks-signer/src/chainstate.rs | 135 +++++++++--------- stacks-signer/src/client/stacks_client.rs | 44 +++++- stacks-signer/src/signerdb.rs | 7 + stackslib/src/chainstate/burn/db/sortdb.rs | 22 +++ stackslib/src/chainstate/stacks/mod.rs | 7 + .../src/net/api/get_tenures_fork_info.rs | 104 ++++---------- stackslib/src/net/api/getsortition.rs | 98 ++----------- stackslib/src/net/api/mod.rs | 92 +++++++++++- .../net/api/tests/get_tenures_fork_info.rs | 15 ++ stackslib/src/net/api/tests/getsortition.rs | 45 +++--- stackslib/src/net/api/tests/mod.rs | 98 ++++++++++++- .../src/tests/nakamoto_integrations.rs | 31 ++-- 13 files changed, 426 insertions(+), 277 deletions(-) diff --git a/stacks-common/src/util/macros.rs b/stacks-common/src/util/macros.rs index 845b3b452b..5009e984cb 100644 --- a/stacks-common/src/util/macros.rs +++ b/stacks-common/src/util/macros.rs @@ -574,6 +574,11 @@ macro_rules! impl_byte_array_newtype { to_hex(&self.0) } } + impl std::fmt::LowerHex for $thing { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.to_hex()) + } + } impl std::fmt::Display for $thing { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{}", self.to_hex()) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index ee03b6df85..540d714bc8 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -1,3 +1,18 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::TenureChangePayload; use blockstack_lib::net::api::getsortition::SortitionInfo; @@ -10,7 +25,7 @@ use crate::client::{ClientError, StacksClient}; use crate::signerdb::SignerDb; /// Captures this signer's current view of a sortition's miner. -#[derive(PartialEq, Eq)] +#[derive(PartialEq, Eq, Debug)] pub enum SortitionMinerStatus { /// The signer thinks this sortition's miner is invalid, and hasn't signed any blocks for them. InvalidatedBeforeFirstBlock, @@ -21,7 +36,13 @@ pub enum SortitionMinerStatus { } /// Captures the Stacks sortition related state for -/// a successful sortition +/// a successful sortition. +/// +/// Sortition state in this struct is +/// is indexed using consensus hashes, and fetched from a single "get latest" RPC call +/// to the stacks node. This ensures that the state in this struct is consistent with itself +/// (i.e., it does not span a bitcoin fork) and up to date. +#[derive(Debug)] pub struct SortitionState { /// The miner's pub key hash pub miner_pkh: Hash160, @@ -33,22 +54,20 @@ pub struct SortitionState { pub parent_tenure_id: ConsensusHash, /// this sortition's consensus hash pub consensus_hash: ConsensusHash, - /// did the miner in this sortition do something - /// to become invalidated as a miner? - pub invalidated: SortitionMinerStatus, + /// what is this signer's view of the this sortition's miner? did they misbehave? + pub miner_status: SortitionMinerStatus, } /// The signer's current view of the stacks chain's sortition /// state +#[derive(Debug)] pub struct SortitionsView { /// the prior successful sortition (this corresponds to the "prior" miner slot) pub last_sortition: Option, /// the current successful sortition (this corresponds to the "current" miner slot) - pub cur_sortition: Option, - /// is the view fresh? - pub fresh: bool, - /// the hash at which the sortitions view was last fetched - pub latest_consensus_hash: Option, + pub cur_sortition: SortitionState, + /// the hash at which the sortitions view was fetched + pub latest_consensus_hash: ConsensusHash, } impl TryFrom for SortitionState { @@ -66,7 +85,7 @@ impl TryFrom for SortitionState { parent_tenure_id: value .stacks_parent_ch .ok_or_else(|| ClientError::UnexpectedSortitionInfo)?, - invalidated: SortitionMinerStatus::Valid, + miner_status: SortitionMinerStatus::Valid, }) } } @@ -79,44 +98,27 @@ enum ProposedBy<'a> { impl<'a> ProposedBy<'a> { pub fn state(&self) -> &SortitionState { match self { - ProposedBy::LastSortition(ref x) => x, - ProposedBy::CurrentSortition(ref x) => x, + ProposedBy::LastSortition(x) => x, + ProposedBy::CurrentSortition(x) => x, } } } impl SortitionsView { - /// Initialize an empty sortitions view struct -- it will refresh() before - /// checking any proposals. - pub fn new() -> Self { - Self { - last_sortition: None, - cur_sortition: None, - fresh: false, - latest_consensus_hash: None, - } - } - /// Apply checks from the SortitionsView on the block proposal. - /// pub fn check_proposal( - &mut self, + &self, client: &StacksClient, signer_db: &SignerDb, block: &NakamotoBlock, block_pk: &StacksPublicKey, ) -> Result { - self.refresh_view(client)?; let block_pkh = Hash160::from_data(&block_pk.to_bytes_compressed()); - let Some(proposed_by) = self - .cur_sortition - .as_ref() - .and_then(|cur_sortition| { - if block.header.consensus_hash == cur_sortition.consensus_hash { - Some(ProposedBy::CurrentSortition(cur_sortition)) - } else { - None - } + let Some(proposed_by) = + (if block.header.consensus_hash == self.cur_sortition.consensus_hash { + Some(ProposedBy::CurrentSortition(&self.cur_sortition)) + } else { + None }) .or_else(|| { self.last_sortition.as_ref().and_then(|last_sortition| { @@ -132,7 +134,7 @@ impl SortitionsView { "Miner block proposal has consensus hash that is neither the current or last sortition. Considering invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - "current_sortition_consensus_hash" => ?self.cur_sortition.as_ref().map(|x| x.consensus_hash), + "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), ); return Ok(false); @@ -153,7 +155,7 @@ impl SortitionsView { // check that this miner is the most recent sortition match proposed_by { ProposedBy::CurrentSortition(sortition) => { - if sortition.invalidated != SortitionMinerStatus::Valid { + if sortition.miner_status != SortitionMinerStatus::Valid { warn!( "Current miner behaved improperly, this signer views the miner as invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, @@ -163,19 +165,17 @@ impl SortitionsView { } } ProposedBy::LastSortition(_last_sortition) => { - if let Some(cur_sortition) = &self.cur_sortition { - // should only consider blocks from the last sortition if the new sortition was invalidated - // before we signed their first block. - if cur_sortition.invalidated - != SortitionMinerStatus::InvalidatedBeforeFirstBlock - { - warn!( - "Miner block proposal is from last sortition winner, when the new sortition winner is still valid. Considering proposal invalid."; - "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - ); - return Ok(false); - } + // should only consider blocks from the last sortition if the new sortition was invalidated + // before we signed their first block. + if self.cur_sortition.miner_status + != SortitionMinerStatus::InvalidatedBeforeFirstBlock + { + warn!( + "Miner block proposal is from last sortition winner, when the new sortition winner is still valid. Considering proposal invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + return Ok(false); } } }; @@ -258,7 +258,7 @@ impl SortitionsView { &sortition_state.parent_tenure_id, &sortition_state.prior_sortition, )?; - if tenures_reorged.len() == 0 { + if tenures_reorged.is_empty() { warn!("Miner is not building off of most recent tenure, but stacks node was unable to return information about the relevant sortitions. Marking miner invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), @@ -281,7 +281,7 @@ impl SortitionsView { } } - return Ok(true); + Ok(true) } fn check_tenure_change_block_confirmation( @@ -314,7 +314,7 @@ impl SortitionsView { return Ok(true); }; if block.header.chain_length > last_known_block.block.header.chain_length { - return Ok(true); + Ok(true) } else { warn!( "Miner block proposal's tenure change transaction does not confirm as many blocks as we expect in the parent tenure"; @@ -323,23 +323,20 @@ impl SortitionsView { "proposed_chain_length" => block.header.chain_length, "expected_at_least" => last_known_block.block.header.chain_length + 1, ); - return Ok(false); + Ok(false) } } /// Has the current tenure lasted long enough to extend the block limit? pub fn tenure_time_passed_block_lim() -> Result { // TODO - return Ok(false); + Ok(false) } - /// If necessary, fetch a new view of the recent sortitions - pub fn refresh_view(&mut self, client: &StacksClient) -> Result<(), ClientError> { - if self.fresh { - return Ok(()); - } + /// Fetch a new view of the recent sortitions + pub fn fetch_view(client: &StacksClient) -> Result { let latest_state = client.get_latest_sortition()?; - let latest_ch = latest_state.consensus_hash.clone(); + let latest_ch = latest_state.consensus_hash; // figure out what cur_sortition will be set to. // if the latest sortition wasn't successful, query the last one that was. @@ -361,15 +358,19 @@ impl SortitionsView { .map(|ch| client.get_sortition(ch)) .transpose()?; - self.cur_sortition = Some(SortitionState::try_from(latest_success)?); - self.last_sortition = last_sortition + let cur_sortition = SortitionState::try_from(latest_success)?; + let last_sortition = last_sortition .map(SortitionState::try_from) .transpose() .ok() .flatten(); - self.fresh = true; - self.latest_consensus_hash = Some(latest_ch); - Ok(()) + let latest_consensus_hash = latest_ch; + + Ok(Self { + cur_sortition, + last_sortition, + latest_consensus_hash, + }) } } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 38a8f78d1e..7e082558f9 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -42,14 +42,14 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; use reqwest::header::AUTHORIZATION; use serde_json::json; -use slog::{slog_debug, slog_warn}; +use slog::{slog_debug, slog_info, slog_warn}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use stacks_common::types::chainstate::{ ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, }; use stacks_common::types::StacksEpochId; -use stacks_common::{debug, warn}; +use stacks_common::{debug, info, warn}; use wsts::curve::point::{Compressed, Point}; use crate::client::{retry_with_exponential_backoff, ClientError}; @@ -369,6 +369,45 @@ impl StacksClient { &self, chosen_parent: &ConsensusHash, last_sortition: &ConsensusHash, + ) -> Result, ClientError> { + let mut tenures = self.get_tenure_forking_info_step(chosen_parent, last_sortition)?; + if tenures.is_empty() { + return Ok(tenures); + } + while tenures.last().map(|x| &x.consensus_hash) != Some(chosen_parent) { + let new_start = tenures.last().ok_or_else(|| { + ClientError::InvalidResponse( + "Should have tenure data in forking info response".into(), + ) + })?; + let mut next_results = + self.get_tenure_forking_info_step(chosen_parent, &new_start.consensus_hash)?; + if next_results.is_empty() { + return Err(ClientError::InvalidResponse( + "Could not fetch forking info all the way back to the requested chosen_parent" + .into(), + )); + } + // SAFETY check: next_results isn't empty, because of the above check. otherwise, remove(0) could panic. + next_results.remove(0); + let info_log: Vec<_> = tenures.iter().map(|t| t.consensus_hash).collect(); + info!("Current tenures = {:?}", info_log); + if next_results.is_empty() { + return Err(ClientError::InvalidResponse( + "Could not fetch forking info all the way back to the requested chosen_parent" + .into(), + )); + } + tenures.extend(next_results.into_iter()); + } + + Ok(tenures) + } + + fn get_tenure_forking_info_step( + &self, + chosen_parent: &ConsensusHash, + last_sortition: &ConsensusHash, ) -> Result, ClientError> { let send_request = || { self.stacks_node_client @@ -381,6 +420,7 @@ impl StacksClient { return Err(ClientError::RequestFailure(response.status())); } let tenures = response.json()?; + Ok(tenures) } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 5ef24d1c87..7450476397 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -96,6 +96,11 @@ CREATE TABLE IF NOT EXISTS blocks ( PRIMARY KEY (reward_cycle, signer_signature_hash) )"; +const CREATE_INDEXES: &str = " +CREATE INDEX IF NOT EXISTS blocks_signed_over ON blocks (signed_over); +CREATE INDEX IF NOT EXISTS blocks_consensus_hash ON blocks (consensus_hash); +"; + const CREATE_SIGNER_STATE_TABLE: &str = " CREATE TABLE IF NOT EXISTS signer_states ( reward_cycle INTEGER PRIMARY KEY, @@ -125,6 +130,8 @@ impl SignerDb { self.db.execute(CREATE_SIGNER_STATE_TABLE, NO_PARAMS)?; } + self.db.execute_batch(CREATE_INDEXES)?; + Ok(()) } diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index e3802d6ec1..08f78ec6cf 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2226,6 +2226,28 @@ impl<'a> SortitionHandleConn<'a> { }) } + /// Get the latest block snapshot on this fork where a sortition occured. + pub fn get_last_snapshot_with_sortition_from_tip(&self) -> Result { + let ancestor_hash = + match self.get_indexed(&self.context.chain_tip, &db_keys::last_sortition())? { + Some(hex_str) => BurnchainHeaderHash::from_hex(&hex_str).unwrap_or_else(|_| { + panic!( + "FATAL: corrupt database: failed to parse {} into a hex string", + &hex_str + ) + }), + None => { + // no prior sortitions, so get the first + return self.get_first_block_snapshot(); + } + }; + + self.get_block_snapshot(&ancestor_hash).map(|snapshot_opt| { + snapshot_opt + .unwrap_or_else(|| panic!("FATAL: corrupt index: no snapshot {}", ancestor_hash)) + }) + } + pub fn get_leader_key_at( &self, key_block_height: u64, diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index f9ad4fff3f..fa5572e536 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -99,6 +99,8 @@ pub enum Error { StacksTransactionSkipped(String), PostConditionFailed(String), NoSuchBlockError, + /// The supplied Sortition IDs, consensus hashes, or stacks blocks are not in the same fork. + NotInSameFork, InvalidChainstateDB, BlockTooBigError, TransactionTooBigError, @@ -224,6 +226,9 @@ impl fmt::Display for Error { Error::NoRegisteredSigners(reward_cycle) => { write!(f, "No registered signers for reward cycle {reward_cycle}") } + Error::NotInSameFork => { + write!(f, "The supplied block identifiers are not in the same fork") + } } } } @@ -268,6 +273,7 @@ impl error::Error for Error { Error::InvalidChildOfNakomotoBlock => None, Error::ExpectedTenureChange => None, Error::NoRegisteredSigners(_) => None, + Error::NotInSameFork => None, } } } @@ -312,6 +318,7 @@ impl Error { Error::InvalidChildOfNakomotoBlock => "InvalidChildOfNakomotoBlock", Error::ExpectedTenureChange => "ExpectedTenureChange", Error::NoRegisteredSigners(_) => "NoRegisteredSigners", + Error::NotInSameFork => "NotInSameFork", } } diff --git a/stackslib/src/net/api/get_tenures_fork_info.rs b/stackslib/src/net/api/get_tenures_fork_info.rs index 13ed91810e..da2b1cd3d9 100644 --- a/stackslib/src/net/api/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/get_tenures_fork_info.rs @@ -34,6 +34,7 @@ use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoSta use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::Error as ChainError; use crate::net::api::getblock_v3::NakamotoBlockStream; +use crate::net::api::{prefix_hex, prefix_opt_hex}; use crate::net::http::{ parse_bytes, parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, @@ -46,7 +47,7 @@ use crate::net::httpcore::{ use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; use crate::util_lib::db::{DBConn, Error as DBError}; -pub static RPC_TENURE_FORKING_INFO_PATH: &str = "/v3/tenures_fork_info"; +pub static RPC_TENURE_FORKING_INFO_PATH: &str = "/v3/tenures/fork_info"; static DEPTH_LIMIT: usize = 10; @@ -81,81 +82,6 @@ pub struct TenureForkingInfo { pub first_block_mined: Option, } -mod prefix_opt_hex { - pub fn serialize( - val: &Option, - s: S, - ) -> Result { - match val { - Some(ref some_val) => { - let val_str = format!("0x{some_val}"); - s.serialize_some(&val_str) - } - None => s.serialize_none(), - } - } - - pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( - d: D, - ) -> Result, D::Error> { - let opt_inst_str: Option = serde::Deserialize::deserialize(d)?; - let Some(inst_str) = opt_inst_str else { - return Ok(None); - }; - let Some(hex_str) = inst_str.get(2..) else { - return Err(serde::de::Error::invalid_length( - inst_str.len(), - &"at least length 2 string", - )); - }; - let val = T::try_from(&hex_str).map_err(serde::de::Error::custom)?; - Ok(Some(val)) - } -} - -mod prefix_hex { - pub fn serialize( - val: &T, - s: S, - ) -> Result { - s.serialize_str(&format!("0x{val}")) - } - - pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( - d: D, - ) -> Result { - let inst_str: String = serde::Deserialize::deserialize(d)?; - let Some(hex_str) = inst_str.get(2..) else { - return Err(serde::de::Error::invalid_length( - inst_str.len(), - &"at least length 2 string", - )); - }; - T::try_from(&hex_str).map_err(serde::de::Error::custom) - } -} - -trait HexDeser: Sized { - fn try_from(hex: &str) -> Result; -} - -macro_rules! impl_hex_deser { - ($thing:ident) => { - impl HexDeser for $thing { - fn try_from(hex: &str) -> Result { - $thing::from_hex(hex) - } - } - }; -} - -impl_hex_deser!(BurnchainHeaderHash); -impl_hex_deser!(StacksBlockId); -impl_hex_deser!(SortitionId); -impl_hex_deser!(ConsensusHash); -impl_hex_deser!(BlockHeaderHash); -impl_hex_deser!(Hash160); - #[derive(Clone, Default)] pub struct GetTenuresForkInfo { pub start_sortition: Option, @@ -289,6 +215,11 @@ impl RPCRequestHandler for GetTenuresForkInfo { .start_sortition .clone() .ok_or_else(|| ChainError::NoSuchBlockError)?; + let recurse_end_snapshot = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &recurse_end)? + .ok_or_else(|| ChainError::NoSuchBlockError)?; + let height_bound = recurse_end_snapshot.block_height; + let mut results = vec![]; let mut cursor = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &start_from)? .ok_or_else(|| ChainError::NoSuchBlockError)?; @@ -299,7 +230,16 @@ impl RPCRequestHandler for GetTenuresForkInfo { let mut depth = 0; while depth < DEPTH_LIMIT && cursor.consensus_hash != recurse_end { depth += 1; - cursor = handle.get_last_snapshot_with_sortition(cursor.block_height)?; + info!("Handling fork info request"; + "cursor.consensus_hash" => %cursor.consensus_hash, + "cursor.block_height" => cursor.block_height, + "recurse_end" => %recurse_end, + "height_bound" => height_bound + ); + if height_bound >= cursor.block_height { + return Err(ChainError::NotInSameFork); + } + cursor = handle.get_last_snapshot_with_sortition(cursor.block_height - 1)?; results.push(TenureForkingInfo::from_snapshot( &cursor, sortdb, chainstate, )?); @@ -310,6 +250,16 @@ impl RPCRequestHandler for GetTenuresForkInfo { let tenures = match result { Ok(tenures) => tenures, + Err(ChainError::NotInSameFork) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpBadRequest::new_json(serde_json::json!( + "Supplied start and end sortitions are not in the same sortition fork" + )), + ) + .try_into_contents() + .map_err(NetError::from); + } Err(ChainError::NoSuchBlockError) => { return StacksHttpResponse::new_error( &preamble, diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index a4fba89fb5..ed084a084d 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -32,6 +32,7 @@ use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoSta use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::Error as ChainError; use crate::net::api::getblock_v3::NakamotoBlockStream; +use crate::net::api::{prefix_hex, prefix_opt_hex}; use crate::net::http::{ parse_bytes, parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, @@ -53,6 +54,7 @@ pub enum QuerySpecifier { } pub static RPC_SORTITION_INFO_PATH: &str = "/v3/sortition"; +static PATH_REGEX: &str = "^/v3/sortition(/(?P[a-z_]{1,15})/(?P[0-9a-f]{1,64}))?$"; /// Struct for sortition information returned via the GetSortition API call #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] @@ -98,90 +100,16 @@ pub struct SortitionInfo { pub committed_block_hash: Option, } -mod prefix_opt_hex { - pub fn serialize( - val: &Option, - s: S, - ) -> Result { - match val { - Some(ref some_val) => { - let val_str = format!("0x{some_val}"); - s.serialize_some(&val_str) - } - None => s.serialize_none(), - } - } - - pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( - d: D, - ) -> Result, D::Error> { - let opt_inst_str: Option = serde::Deserialize::deserialize(d)?; - let Some(inst_str) = opt_inst_str else { - return Ok(None); - }; - let Some(hex_str) = inst_str.get(2..) else { - return Err(serde::de::Error::invalid_length( - inst_str.len(), - &"at least length 2 string", - )); - }; - let val = T::try_from(&hex_str).map_err(serde::de::Error::custom)?; - Ok(Some(val)) - } -} - -mod prefix_hex { - pub fn serialize( - val: &T, - s: S, - ) -> Result { - s.serialize_str(&format!("0x{val}")) - } - - pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( - d: D, - ) -> Result { - let inst_str: String = serde::Deserialize::deserialize(d)?; - let Some(hex_str) = inst_str.get(2..) else { - return Err(serde::de::Error::invalid_length( - inst_str.len(), - &"at least length 2 string", - )); - }; - T::try_from(&hex_str).map_err(serde::de::Error::custom) - } -} - -trait HexDeser: Sized { - fn try_from(hex: &str) -> Result; -} - -macro_rules! impl_hex_deser { - ($thing:ident) => { - impl HexDeser for $thing { - fn try_from(hex: &str) -> Result { - $thing::from_hex(hex) - } - } - }; -} - -impl_hex_deser!(BurnchainHeaderHash); -impl_hex_deser!(SortitionId); -impl_hex_deser!(ConsensusHash); -impl_hex_deser!(BlockHeaderHash); -impl_hex_deser!(Hash160); - -impl TryFrom<(&String, &String)> for QuerySpecifier { +impl TryFrom<(&str, &str)> for QuerySpecifier { type Error = Error; - fn try_from(value: (&String, &String)) -> Result { + fn try_from(value: (&str, &str)) -> Result { let hex_str = if value.1.starts_with("0x") { &value.1[2..] } else { - value.1.as_str() + value.1 }; - match value.0.as_str() { + match value.0 { "consensus" => Ok(Self::ConsensusHash( ConsensusHash::from_hex(hex_str).map_err(|e| Error::DecodeError(e.to_string()))?, )), @@ -219,7 +147,7 @@ impl HttpRequest for GetSortitionHandler { } fn path_regex(&self) -> Regex { - Regex::new(&format!("^{RPC_SORTITION_INFO_PATH}$")).unwrap() + Regex::new(PATH_REGEX).unwrap() } /// Try to decode this request. @@ -227,7 +155,7 @@ impl HttpRequest for GetSortitionHandler { fn try_parse_request( &mut self, preamble: &HttpRequestPreamble, - _captures: &Captures, + captures: &Captures, query: Option<&str>, _body: &[u8], ) -> Result { @@ -238,14 +166,10 @@ impl HttpRequest for GetSortitionHandler { } let req_contents = HttpRequestContents::new().query_string(query); - if req_contents.get_query_args().len() > 1 { - return Err(Error::DecodeError( - "May only supply up to one query argument".into(), - )); - } self.query = QuerySpecifier::Latest; - for (key, value) in req_contents.get_query_args().iter() { - self.query = QuerySpecifier::try_from((key, value))?; + eprintln!("{captures:?}"); + if let (Some(key), Some(value)) = (captures.name("key"), captures.name("value")) { + self.query = QuerySpecifier::try_from((key.as_str(), value.as_str()))?; } Ok(req_contents) diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 58425b4955..34fa1ec4c3 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -16,7 +16,11 @@ use clarity::vm::costs::ExecutionCost; use stacks_common::codec::read_next; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, +}; +use stacks_common::util::hash::Hash160; +use stacks_common::util::HexError; use crate::burnchains::Txid; use crate::chainstate::stacks::{StacksMicroblock, StacksTransaction}; @@ -111,8 +115,10 @@ impl StacksHttp { getstackerdbmetadata::RPCGetStackerDBMetadataRequestHandler::new(), ); self.register_rpc_endpoint(getstackers::GetStackersRequestHandler::default()); + self.register_rpc_endpoint(getsortition::GetSortitionHandler::new()); self.register_rpc_endpoint(gettenure::RPCNakamotoTenureRequestHandler::new()); self.register_rpc_endpoint(gettenureinfo::RPCNakamotoTenureInfoRequestHandler::new()); + self.register_rpc_endpoint(get_tenures_fork_info::GetTenuresForkInfo::default()); self.register_rpc_endpoint( gettransaction_unconfirmed::RPCGetTransactionUnconfirmedRequestHandler::new(), ); @@ -128,9 +134,6 @@ impl StacksHttp { self.register_rpc_endpoint(postmicroblock::RPCPostMicroblockRequestHandler::new()); self.register_rpc_endpoint(poststackerdbchunk::RPCPostStackerDBChunkRequestHandler::new()); self.register_rpc_endpoint(posttransaction::RPCPostTransactionRequestHandler::new()); - self.register_rpc_endpoint(getstackers::GetStackersRequestHandler::default()); - self.register_rpc_endpoint(getsortition::GetSortitionHandler::new()); - self.register_rpc_endpoint(get_tenures_fork_info::GetTenuresForkInfo::default()); } } @@ -143,3 +146,84 @@ impl From for Error { } } } + +/// This module serde encodes and decodes optional byte fields in RPC +/// responses as Some(String) where the String is a `0x` prefixed +/// hex string. +pub mod prefix_opt_hex { + pub fn serialize( + val: &Option, + s: S, + ) -> Result { + match val { + Some(ref some_val) => { + let val_str = format!("0x{some_val:x}"); + s.serialize_some(&val_str) + } + None => s.serialize_none(), + } + } + + pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( + d: D, + ) -> Result, D::Error> { + let opt_inst_str: Option = serde::Deserialize::deserialize(d)?; + let Some(inst_str) = opt_inst_str else { + return Ok(None); + }; + let Some(hex_str) = inst_str.get(2..) else { + return Err(serde::de::Error::invalid_length( + inst_str.len(), + &"at least length 2 string", + )); + }; + let val = T::try_from(&hex_str).map_err(serde::de::Error::custom)?; + Ok(Some(val)) + } +} + +/// This module serde encodes and decodes byte fields in RPC +/// responses as a String where the String is a `0x` prefixed +/// hex string. +pub mod prefix_hex { + pub fn serialize( + val: &T, + s: S, + ) -> Result { + s.serialize_str(&format!("0x{val:x}")) + } + + pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( + d: D, + ) -> Result { + let inst_str: String = serde::Deserialize::deserialize(d)?; + let Some(hex_str) = inst_str.get(2..) else { + return Err(serde::de::Error::invalid_length( + inst_str.len(), + &"at least length 2 string", + )); + }; + T::try_from(&hex_str).map_err(serde::de::Error::custom) + } +} + +pub trait HexDeser: Sized { + fn try_from(hex: &str) -> Result; +} + +macro_rules! impl_hex_deser { + ($thing:ident) => { + impl HexDeser for $thing { + fn try_from(hex: &str) -> Result { + $thing::from_hex(hex) + } + } + }; +} + +impl_hex_deser!(BurnchainHeaderHash); +impl_hex_deser!(StacksBlockId); +impl_hex_deser!(SortitionId); +impl_hex_deser!(ConsensusHash); +impl_hex_deser!(BlockHeaderHash); +impl_hex_deser!(Hash160); diff --git a/stackslib/src/net/api/tests/get_tenures_fork_info.rs b/stackslib/src/net/api/tests/get_tenures_fork_info.rs index 6c9e552759..88e3d875ff 100644 --- a/stackslib/src/net/api/tests/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/tests/get_tenures_fork_info.rs @@ -1,3 +1,18 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::collections::BTreeMap; use std::fmt::Display; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; diff --git a/stackslib/src/net/api/tests/getsortition.rs b/stackslib/src/net/api/tests/getsortition.rs index 40cfaf53cf..d48bc54a3a 100644 --- a/stackslib/src/net/api/tests/getsortition.rs +++ b/stackslib/src/net/api/tests/getsortition.rs @@ -1,3 +1,18 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::collections::BTreeMap; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; @@ -32,13 +47,13 @@ fn test_parse_request() { let tests = vec![ (make_preamble(""), Ok(QuerySpecifier::Latest)), ( - make_preamble("?consensus=deadbeef00deadbeef01deadbeef02deadbeef03"), + make_preamble("/consensus/deadbeef00deadbeef01deadbeef02deadbeef03"), Ok(QuerySpecifier::ConsensusHash( ConsensusHash::from_hex("deadbeef00deadbeef01deadbeef02deadbeef03").unwrap(), )), ), ( - make_preamble("?burn=00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff"), + make_preamble("/burn/00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff"), Ok(QuerySpecifier::BurnchainHeaderHash( BurnchainHeaderHash::from_hex( "00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff", @@ -47,41 +62,39 @@ fn test_parse_request() { )), ), ( - make_preamble("?burn_height=100"), + make_preamble("/burn_height/100"), Ok(QuerySpecifier::BlockHeight(100)), ), ( - make_preamble("?burn_height=a1be"), - Err(HttpError::DecodeError( - "invalid digit found in string".into(), - )), + make_preamble("/burn_height/a1be"), + Err(HttpError::DecodeError("invalid digit found in string".into()).into()), ), ( - make_preamble("?burn=a1be0000"), - Err(HttpError::DecodeError("bad length 8 for hex string".into())), + make_preamble("/burn/a1be0000"), + Err(HttpError::DecodeError("bad length 8 for hex string".into()).into()), ), ( - make_preamble("?consensus=a1be0000"), - Err(HttpError::DecodeError("bad length 8 for hex string".into())), + make_preamble("/consensus/a1be0000"), + Err(HttpError::DecodeError("bad length 8 for hex string".into()).into()), ), ( - make_preamble("?burn_height=20&consensus=deadbeef00deadbeef01deadbeef02deadbeef03"), - Err(HttpError::DecodeError( - "May only supply up to one query argument".into(), - )), + make_preamble("/burn_height/20/consensus/deadbeef00deadbeef01deadbeef02deadbeef03"), + Err(NetError::NotFoundError), ), ]; for (inp, expected_result) in tests.into_iter() { handler.restart(); let parsed_request = http.handle_try_parse_request(&mut handler, &inp, &[]); + eprintln!("{}", &inp.path_and_query_str); + eprintln!("{parsed_request:?}"); match expected_result { Ok(query) => { assert!(parsed_request.is_ok()); assert_eq!(&handler.query, &query); } Err(e) => { - assert_eq!(NetError::Http(e), parsed_request.unwrap_err()); + assert_eq!(e, parsed_request.unwrap_err()); } } } diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index 591a12131c..de26412fcd 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -22,10 +22,11 @@ use libstackerdb::SlotMetadata; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, + StacksPrivateKey, StacksPublicKey, }; use stacks_common::util::get_epoch_time_secs; -use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; +use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::pipe::Pipe; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; @@ -40,6 +41,7 @@ use crate::chainstate::stacks::{ TransactionAuth, TransactionPayload, TransactionPostConditionMode, TransactionVersion, }; use crate::core::MemPoolDB; +use crate::net::api::{prefix_hex, prefix_opt_hex}; use crate::net::db::PeerDB; use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; use crate::net::relay::Relayer; @@ -1075,3 +1077,95 @@ pub fn test_rpc(test_name: &str, requests: Vec) -> Vec = + prefix_opt_hex::deserialize(&mut deserializer).unwrap(); + + assert_eq!(out, inp); + if test.is_some() { + assert_eq!( + hex_str, + format!("\"0x{}\"", to_hex(&inp.as_ref().unwrap().0)) + ); + } else { + assert_eq!(hex_str, "null"); + } + } +} + +#[test] +fn prefixed_hex_bad_desers() { + let inp = "\"1\""; + let mut opt_deserializer = serde_json::Deserializer::from_str(inp); + assert_eq!( + prefix_opt_hex::deserialize::<_, BurnchainHeaderHash>(&mut opt_deserializer) + .unwrap_err() + .to_string(), + "invalid length 1, expected at least length 2 string".to_string(), + ); + let inp = "\"0x\""; + let mut opt_deserializer = serde_json::Deserializer::from_str(inp); + assert_eq!( + prefix_opt_hex::deserialize::<_, BurnchainHeaderHash>(&mut opt_deserializer) + .unwrap_err() + .to_string(), + "bad length 0 for hex string".to_string(), + ); + let inp = "\"0x00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff00\""; + let mut opt_deserializer = serde_json::Deserializer::from_str(inp); + assert_eq!( + prefix_opt_hex::deserialize::<_, BurnchainHeaderHash>(&mut opt_deserializer) + .unwrap_err() + .to_string(), + "bad length 66 for hex string".to_string(), + ); +} + +#[test] +fn prefixed_hex_serialization() { + let tests_32b = [ + [0u8; 32], + [1; 32], + [15; 32], + [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, + ], + ]; + + for test in tests_32b.iter() { + let inp = BurnchainHeaderHash(test.clone()); + let mut out_buff = Vec::new(); + let mut serializer = serde_json::Serializer::new(&mut out_buff); + prefix_hex::serialize(&inp, &mut serializer).unwrap(); + let hex_str = String::from_utf8(out_buff).unwrap(); + eprintln!("{hex_str}"); + + let mut deserializer = serde_json::Deserializer::from_str(&hex_str); + let out: BurnchainHeaderHash = prefix_hex::deserialize(&mut deserializer).unwrap(); + + assert_eq!(out, inp); + assert_eq!(hex_str, format!("\"0x{}\"", to_hex(&inp.0))); + } +} diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index caeee26fae..d09c81bcba 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4013,8 +4013,6 @@ fn signer_chainstate() { let burnchain = naka_conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); - let mut sortitions_view = SortitionsView::new(); - // query for prometheus metrics #[cfg(feature = "monitoring_prom")] { @@ -4051,15 +4049,6 @@ fn signer_chainstate() { false, ); - // there hasn't been a successful nakamoto sortition yet, so expect an error - assert!( - matches!( - sortitions_view.refresh_view(&signer_client).unwrap_err(), - ClientError::UnexpectedSortitionInfo - ), - "Sortitions view should fail to refresh if there are no successful nakamoto sortitions yet", - ); - // first block wakes up the run loop, wait until a key registration has been submitted. next_block_and(&mut btc_regtest_controller, 60, || { let vrf_count = vrfs_submitted.load(Ordering::SeqCst); @@ -4084,7 +4073,7 @@ fn signer_chainstate() { None; // hold the first and last blocks of the first tenure. we'll use this to submit reorging proposals let mut first_tenure_blocks: Option> = None; - for i in 0..5 { + for i in 0..15 { next_block_and_mine_commit( &mut btc_regtest_controller, 60, @@ -4093,8 +4082,7 @@ fn signer_chainstate() { ) .unwrap(); - sortitions_view.fresh = false; - sortitions_view.refresh_view(&signer_client).unwrap(); + let sortitions_view = SortitionsView::fetch_view(&signer_client).unwrap(); // check the prior tenure's proposals again, confirming that the sortitions_view // will reject them. @@ -4188,7 +4176,7 @@ fn signer_chainstate() { ); // force the view to refresh and check again - sortitions_view.fresh = false; + let sortitions_view = SortitionsView::fetch_view(&signer_client).unwrap(); let valid = sortitions_view .check_proposal( &signer_client, @@ -4246,6 +4234,8 @@ fn signer_chainstate() { txs: vec![], }; + let mut sortitions_view = SortitionsView::fetch_view(&signer_client).unwrap(); + assert!( !sortitions_view .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) @@ -4362,12 +4352,7 @@ fn signer_chainstate() { // Case: the block contains a tenure change, but the parent tenure is a reorg let reorg_to_block = first_tenure_blocks.as_ref().unwrap().last().unwrap(); // make the sortition_view *think* that our block commit pointed at this old tenure - sortitions_view - .cur_sortition - .as_mut() - .map(|sortition_state| { - sortition_state.parent_tenure_id = reorg_to_block.header.consensus_hash.clone() - }); + sortitions_view.cur_sortition.parent_tenure_id = reorg_to_block.header.consensus_hash.clone(); let mut sibling_block_header = NakamotoBlockHeader { version: 1, chain_length: reorg_to_block.header.chain_length + 1, @@ -4422,7 +4407,9 @@ fn signer_chainstate() { "A sibling of a previously approved block must be rejected." ); - sortitions_view.fresh = false; + // view is stale, if we ever expand this test, sortitions_view should + // be fetched again, so drop it here. + drop(sortitions_view); coord_channel .lock() From 1740bc4cff0061ada2396ebdf9214000200c3d65 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 29 May 2024 13:27:54 -0500 Subject: [PATCH 097/148] more PR reviews --- stackslib/src/net/api/get_tenures_fork_info.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/api/get_tenures_fork_info.rs b/stackslib/src/net/api/get_tenures_fork_info.rs index da2b1cd3d9..055103dce4 100644 --- a/stackslib/src/net/api/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/get_tenures_fork_info.rs @@ -77,7 +77,7 @@ pub struct TenureForkingInfo { /// block or miner was chosen). pub was_sortition: bool, /// If the sortition occurred, and a block was mined during the tenure, this is the - /// tenure's block. + /// tenure's first block. #[serde(with = "prefix_opt_hex")] pub first_block_mined: Option, } From 4d7cf4dc829bb89efe5b949a9542faf1c9962ac5 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 29 May 2024 16:37:29 -0500 Subject: [PATCH 098/148] add unit tests for stacks-signer::chainstate --- stacks-signer/src/chainstate.rs | 2 +- stacks-signer/src/client/stacks_client.rs | 6 +- stacks-signer/src/lib.rs | 4 + stacks-signer/src/tests/chainstate.rs | 225 ++++++++++++++++++ stacks-signer/src/tests/mod.rs | 1 + .../src/net/api/get_tenures_fork_info.rs | 6 - 6 files changed, 233 insertions(+), 11 deletions(-) create mode 100644 stacks-signer/src/tests/chainstate.rs create mode 100644 stacks-signer/src/tests/mod.rs diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 540d714bc8..a79251f73f 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -223,7 +223,7 @@ impl SortitionsView { let changed_burn_view = tenure_extend.burn_view_consensus_hash != proposed_by.state().consensus_hash; let enough_time_passed = Self::tenure_time_passed_block_lim()?; - if !changed_burn_view || !enough_time_passed { + if !changed_burn_view && !enough_time_passed { warn!( "Miner block proposal contains a tenure extend, but the burnchain view has not changed and enough time has not passed to refresh the block limit. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 7e082558f9..8a7ade028c 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -42,14 +42,14 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; use reqwest::header::AUTHORIZATION; use serde_json::json; -use slog::{slog_debug, slog_info, slog_warn}; +use slog::{slog_debug, slog_warn}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use stacks_common::types::chainstate::{ ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, }; use stacks_common::types::StacksEpochId; -use stacks_common::{debug, info, warn}; +use stacks_common::{debug, warn}; use wsts::curve::point::{Compressed, Point}; use crate::client::{retry_with_exponential_backoff, ClientError}; @@ -390,8 +390,6 @@ impl StacksClient { } // SAFETY check: next_results isn't empty, because of the above check. otherwise, remove(0) could panic. next_results.remove(0); - let info_log: Vec<_> = tenures.iter().map(|t| t.consensus_hash).collect(); - info!("Current tenures = {:?}", info_log); if next_results.is_empty() { return Err(ClientError::InvalidResponse( "Could not fetch forking info all the way back to the requested chosen_parent" diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index a6856bb732..0ff622896c 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -39,6 +39,10 @@ pub mod signerdb; pub mod v0; /// The v1 implementation of the singer. This includes WSTS support pub mod v1; + +#[cfg(test)] +mod tests; + use std::fmt::{Debug, Display}; use std::sync::mpsc::{channel, Receiver, Sender}; diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs new file mode 100644 index 0000000000..d12d941ecc --- /dev/null +++ b/stacks-signer/src/tests/chainstate.rs @@ -0,0 +1,225 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::fs; +use std::net::{Ipv4Addr, SocketAddrV4}; + +use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; +use blockstack_lib::chainstate::stacks::{ + SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction, TenureChangeCause, + TenureChangePayload, TransactionAnchorMode, TransactionAuth, TransactionPayload, + TransactionPostConditionMode, TransactionPublicKeyEncoding, TransactionSpendingCondition, + TransactionVersion, +}; +use stacks_common::bitvec::BitVec; +use stacks_common::types::chainstate::{ + ConsensusHash, StacksBlockId, StacksPrivateKey, StacksPublicKey, TrieHash, +}; +use stacks_common::util::get_epoch_time_secs; +use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::MessageSignature; + +use crate::chainstate::{SortitionMinerStatus, SortitionState, SortitionsView}; +use crate::client::StacksClient; +use crate::signerdb::SignerDb; + +fn setup_test_environment( + fn_name: &str, +) -> ( + StacksClient, + SignerDb, + StacksPublicKey, + SortitionsView, + NakamotoBlock, +) { + let block_sk = StacksPrivateKey::from_seed(&[0, 1]); + let block_pk = StacksPublicKey::from_private(&block_sk); + let block_pkh = Hash160::from_node_public_key(&block_pk); + + let cur_sortition = SortitionState { + miner_pkh: block_pkh, + miner_pubkey: None, + prior_sortition: ConsensusHash([0; 20]), + parent_tenure_id: ConsensusHash([0; 20]), + consensus_hash: ConsensusHash([1; 20]), + miner_status: SortitionMinerStatus::Valid, + }; + + let last_sortition = Some(SortitionState { + miner_pkh: block_pkh, + miner_pubkey: None, + prior_sortition: ConsensusHash([128; 20]), + parent_tenure_id: ConsensusHash([128; 20]), + consensus_hash: ConsensusHash([0; 20]), + miner_status: SortitionMinerStatus::Valid, + }); + + let view = SortitionsView { + latest_consensus_hash: cur_sortition.consensus_hash, + cur_sortition, + last_sortition, + }; + + let stacks_client = StacksClient::new( + StacksPrivateKey::new(), + SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 10000).into(), + "FOO".into(), + false, + ); + + let signer_db_dir = "/tmp/stacks-node-tests/signer-units/"; + let signer_db_path = format!("{signer_db_dir}/{fn_name}.{}.sqlite", get_epoch_time_secs()); + fs::create_dir_all(signer_db_dir).unwrap(); + let signer_db = SignerDb::new(signer_db_path).unwrap(); + + let block = NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: 10, + burn_spent: 10, + consensus_hash: ConsensusHash([15; 20]), + parent_block_id: StacksBlockId([0; 32]), + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + signer_bitvec: BitVec::zeros(1).unwrap(), + }, + txs: vec![], + }; + + (stacks_client, signer_db, block_pk, view, block) +} + +#[test] +fn check_proposal_units() { + let (stacks_client, signer_db, block_pk, mut view, block) = + setup_test_environment("check_proposal_units"); + + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk,) + .unwrap()); + + view.last_sortition = None; + + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk,) + .unwrap()); +} + +#[test] +fn check_proposal_miner_pkh_mismatch() { + let (stacks_client, signer_db, _block_pk, view, mut block) = + setup_test_environment("miner_pkh_mismatch"); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + let different_block_pk = StacksPublicKey::from_private(&StacksPrivateKey::from_seed(&[2, 3])); + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &different_block_pk) + .unwrap()); + + block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &different_block_pk) + .unwrap()); +} + +#[test] +fn check_proposal_invalid_status() { + let (stacks_client, signer_db, block_pk, mut view, mut block) = + setup_test_environment("invalid_status"); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + assert!(view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); + view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedAfterFirstBlock; + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); + + block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); + + view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; + block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; + // this block passes the signer state checks, even though it doesn't have a tenure change tx. + // this is because the signer state does not perform the tenure change logic checks: it needs + // the stacks-node to do that (because the stacks-node actually knows whether or not their + // parent blocks have been seen before, while the signer state checks are only reasoning about + // stacks blocks seen by the signer, which may be a subset) + assert!(view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); +} + +fn make_tenure_change_payload() -> TenureChangePayload { + TenureChangePayload { + tenure_consensus_hash: ConsensusHash([0; 20]), + prev_tenure_consensus_hash: ConsensusHash([0; 20]), + burn_view_consensus_hash: ConsensusHash([0; 20]), + previous_tenure_end: StacksBlockId([0; 32]), + previous_tenure_blocks: 1, + cause: TenureChangeCause::Extended, + pubkey_hash: Hash160([0; 20]), + } +} + +fn make_tenure_change_tx(payload: TenureChangePayload) -> StacksTransaction { + StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 1, + auth: TransactionAuth::Standard(TransactionSpendingCondition::Singlesig( + SinglesigSpendingCondition { + hash_mode: SinglesigHashMode::P2PKH, + signer: Hash160([0; 20]), + nonce: 0, + tx_fee: 0, + key_encoding: TransactionPublicKeyEncoding::Compressed, + signature: MessageSignature([0; 65]), + }, + )), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TenureChange(payload), + } +} + +#[test] +fn check_proposal_tenure_extend_invalid_conditions() { + let (stacks_client, signer_db, block_pk, view, mut block) = + setup_test_environment("tenure_extend"); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + let mut extend_payload = make_tenure_change_payload(); + extend_payload.burn_view_consensus_hash = view.cur_sortition.consensus_hash; + extend_payload.tenure_consensus_hash = block.header.consensus_hash; + extend_payload.prev_tenure_consensus_hash = block.header.consensus_hash; + let tx = make_tenure_change_tx(extend_payload); + block.txs = vec![tx]; + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); + + let mut extend_payload = make_tenure_change_payload(); + extend_payload.burn_view_consensus_hash = ConsensusHash([64; 20]); + extend_payload.tenure_consensus_hash = block.header.consensus_hash; + extend_payload.prev_tenure_consensus_hash = block.header.consensus_hash; + let tx = make_tenure_change_tx(extend_payload); + block.txs = vec![tx]; + assert!(view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); +} diff --git a/stacks-signer/src/tests/mod.rs b/stacks-signer/src/tests/mod.rs new file mode 100644 index 0000000000..a92c85da71 --- /dev/null +++ b/stacks-signer/src/tests/mod.rs @@ -0,0 +1 @@ +mod chainstate; diff --git a/stackslib/src/net/api/get_tenures_fork_info.rs b/stackslib/src/net/api/get_tenures_fork_info.rs index 055103dce4..4abc8ab6e1 100644 --- a/stackslib/src/net/api/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/get_tenures_fork_info.rs @@ -230,12 +230,6 @@ impl RPCRequestHandler for GetTenuresForkInfo { let mut depth = 0; while depth < DEPTH_LIMIT && cursor.consensus_hash != recurse_end { depth += 1; - info!("Handling fork info request"; - "cursor.consensus_hash" => %cursor.consensus_hash, - "cursor.block_height" => cursor.block_height, - "recurse_end" => %recurse_end, - "height_bound" => height_bound - ); if height_bound >= cursor.block_height { return Err(ChainError::NotInSameFork); } From 4f4b0de732fe93b794eaa68a1a91d25a2d4fa39e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 30 May 2024 09:25:46 -0500 Subject: [PATCH 099/148] more assertions in test for get_forking_info --- .../src/tests/nakamoto_integrations.rs | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index d09c81bcba..c2e5566a20 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -60,6 +60,7 @@ use stacks::core::{ }; use stacks::libstackerdb::SlotMetadata; use stacks::net::api::callreadonly::CallReadOnlyRequestBody; +use stacks::net::api::get_tenures_fork_info::TenureForkingInfo; use stacks::net::api::getstackers::GetStackersResponse; use stacks::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, NakamotoBlockProposal, ValidateRejectCode, @@ -81,7 +82,6 @@ use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; -use stacks_signer::client::ClientError; use stacks_signer::signerdb::{BlockInfo, SignerDb}; use wsts::net::Message; @@ -4407,6 +4407,26 @@ fn signer_chainstate() { "A sibling of a previously approved block must be rejected." ); + let start_sortition = &reorg_to_block.header.consensus_hash; + let stop_sortition = &sortitions_view.cur_sortition.prior_sortition; + // check that the get_tenure_forking_info response is sane + let fork_info = signer_client + .get_tenure_forking_info(start_sortition, stop_sortition) + .unwrap(); + + // it should start and stop with the given inputs (reversed!) + assert_eq!(fork_info.first().unwrap().consensus_hash, *stop_sortition); + assert_eq!(fork_info.last().unwrap().consensus_hash, *start_sortition); + + // every step of the return should be linked to the parent + let mut prior: Option<&TenureForkingInfo> = None; + for step in fork_info.iter().rev() { + if let Some(ref prior) = prior { + assert_eq!(prior.sortition_id, step.parent_sortition_id); + } + prior = Some(step); + } + // view is stale, if we ever expand this test, sortitions_view should // be fetched again, so drop it here. drop(sortitions_view); From a83cefb7ee09046b2d8e5a8bc5aaaf19939c31ce Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:53:58 -0400 Subject: [PATCH 100/148] chore: remove dead code, and add a way to get the last sortition in a prepare phase --- stackslib/src/chainstate/burn/db/sortdb.rs | 196 ++++++--------------- 1 file changed, 49 insertions(+), 147 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 8f416b4c39..4e8dd50f1e 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -43,8 +43,6 @@ use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::*; use stacks_common::util::{get_epoch_time_secs, log}; -use wsts::common::Signature as WSTSSignature; -use wsts::curve::point::{Compressed, Point}; use crate::burnchains::affirmation::{AffirmationMap, AffirmationMapEntry}; use crate::burnchains::bitcoin::BitcoinNetworkType; @@ -1860,80 +1858,6 @@ impl<'a> SortitionHandleConn<'a> { SortitionHandleConn::open_reader(connection, &sn.sortition_id) } - /// Does the sortition db expect to receive blocks - /// signed by this signer set? - /// - /// This only works if `consensus_hash` is within two reward cycles (4200 blocks) of the - /// sortition pointed to by this handle's sortiton tip. If it isn't, then this - /// method returns Ok(false). This is to prevent a DDoS vector whereby compromised stale - /// Signer keys can be used to blast out lots of Nakamoto blocks that will be accepted - /// but never processed. So, `consensus_hash` can be in the same reward cycle as - /// `self.context.chain_tip`, or the previous, but no earlier. - pub fn expects_signer_signature( - &self, - consensus_hash: &ConsensusHash, - signer_signature: &WSTSSignature, - message: &[u8], - aggregate_public_key: &Point, - ) -> Result { - let sn = SortitionDB::get_block_snapshot(self, &self.context.chain_tip)? - .ok_or(db_error::NotFoundError) - .map_err(|e| { - warn!("No sortition for tip: {:?}", &self.context.chain_tip); - e - })?; - - let ch_sn = SortitionDB::get_block_snapshot_consensus(self, consensus_hash)? - .ok_or(db_error::NotFoundError) - .map_err(|e| { - warn!("No sortition for consensus hash: {:?}", consensus_hash); - e - })?; - - if ch_sn.block_height - + u64::from(self.context.pox_constants.reward_cycle_length) - + u64::from(self.context.pox_constants.prepare_length) - < sn.block_height - { - // too far in the past - debug!("Block with consensus hash {} is too far in the past", consensus_hash; - "consensus_hash" => %consensus_hash, - "block_height" => ch_sn.block_height, - "tip_block_height" => sn.block_height - ); - return Ok(false); - } - - // this given consensus hash must be an ancestor of our chain tip - let ch_at = self - .get_consensus_at(ch_sn.block_height)? - .ok_or(db_error::NotFoundError) - .map_err(|e| { - warn!("No ancestor consensus hash"; - "tip" => %self.context.chain_tip, - "consensus_hash" => %consensus_hash, - "consensus_hash height" => %ch_sn.block_height - ); - e - })?; - - if ch_at != ch_sn.consensus_hash { - // not an ancestor - warn!("Consensus hash is not an ancestor of the sortition tip"; - "tip" => %self.context.chain_tip, - "consensus_hash" => %consensus_hash - ); - return Err(db_error::NotFoundError); - } - - // is this consensus hash in this fork? - if SortitionDB::get_burnchain_header_hash_by_consensus(self, consensus_hash)?.is_none() { - return Ok(false); - } - - Ok(signer_signature.verify(aggregate_public_key, message)) - } - pub fn get_reward_set_size_at(&self, sortition_id: &SortitionId) -> Result { self.get_indexed(sortition_id, &db_keys::pox_reward_set_size()) .map(|x| { @@ -1984,32 +1908,6 @@ impl<'a> SortitionHandleConn<'a> { Ok(anchor_block_txid) } - /// Get the last processed reward cycle. - /// Since we always process a RewardSetInfo at the start of a reward cycle (anchor block or - /// no), this is simply the same as asking which reward cycle this SortitionHandleConn's - /// sortition tip is in. - pub fn get_last_processed_reward_cycle(&self) -> Result { - let sn = SortitionDB::get_block_snapshot(self, &self.context.chain_tip)? - .ok_or(db_error::NotFoundError)?; - let rc = self - .context - .pox_constants - .block_height_to_reward_cycle(self.context.first_block_height, sn.block_height) - .expect("FATAL: sortition from before system start"); - let rc_start_block = self - .context - .pox_constants - .reward_cycle_to_block_height(self.context.first_block_height, rc); - let last_rc = if sn.block_height >= rc_start_block { - rc - } else { - // NOTE: the reward cycle is "processed" at reward cycle index 1, not index 0 - rc.saturating_sub(1) - }; - - Ok(last_rc) - } - pub fn get_reward_cycle_unlocks( &mut self, cycle: u64, @@ -3535,19 +3433,39 @@ impl SortitionDB { } /// Store a pre-processed reward set. - /// `sortition_id` is the first sortition ID of the prepare phase + /// `sortition_id` is the first sortition ID of the prepare phase. + /// No-op if the reward set is empty. pub fn store_preprocessed_reward_set( sort_tx: &mut DBTx, sortition_id: &SortitionId, rc_info: &RewardCycleInfo, ) -> Result<(), db_error> { + if rc_info.known_selected_anchor_block().is_none() { + return Ok(()); + } let sql = "REPLACE INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; let rc_json = serde_json::to_string(rc_info).map_err(db_error::SerializationError)?; - let args: &[&dyn ToSql] = &[sortition_id, &rc_json]; + let args = rusqlite::params![sortition_id, &rc_json]; sort_tx.execute(sql, args)?; Ok(()) } + /// Wrapper around SortitionDBConn::get_prepare_phase_end_sortition_id_for_reward_cycle(). + /// See that method for details. + pub fn get_prepare_phase_end_sortition_id_for_reward_cycle( + &self, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result { + self.index_conn() + .get_prepare_phase_end_sortition_id_for_reward_cycle( + &self.pox_constants, + self.first_block_height, + tip, + reward_cycle_id, + ) + } + /// Wrapper around SortitionDBConn::get_prepare_phase_start_sortition_id_for_reward_cycle(). /// See that method for details. pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( @@ -3876,6 +3794,33 @@ impl<'a> SortitionDBConn<'a> { .and_then(|(reward_cycle_info, _anchor_sortition_id)| Ok(reward_cycle_info)) } + /// Get the prepare phase end sortition ID of a reward cycle. This is the last prepare + /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned + /// sortition will be in the preceding reward cycle) + pub fn get_prepare_phase_end_sortition_id_for_reward_cycle( + &self, + pox_constants: &PoxConstants, + first_block_height: u64, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result { + let prepare_phase_end = pox_constants + .reward_cycle_to_block_height(first_block_height, reward_cycle_id) + .saturating_sub(1); + + let last_sortition = + get_ancestor_sort_id(self, prepare_phase_end, tip)?.ok_or_else(|| { + error!( + "Could not find prepare phase end ancestor while fetching reward set"; + "tip_sortition_id" => %tip, + "reward_cycle_id" => reward_cycle_id, + "prepare_phase_end_height" => prepare_phase_end + ); + db_error::NotFoundError + })?; + Ok(last_sortition) + } + /// Get the prepare phase start sortition ID of a reward cycle. This is the first prepare /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned /// sortition will be in the preceding reward cycle) @@ -6101,16 +6046,6 @@ impl<'a> SortitionHandleTx<'a> { keys.push(db_keys::pox_affirmation_map().to_string()); values.push(cur_affirmation_map.encode()); - if cfg!(test) { - // last reward cycle. - // NOTE: We keep this only for testing, since this is what the original (but - // unmigratable code) did, and we need to verify that the compatibility fix to - // SortitionDB::get_last_processed_reward_cycle() is semantically compatible - // with querying this key. - keys.push(db_keys::last_reward_cycle_key().to_string()); - values.push(db_keys::last_reward_cycle_to_string(_reward_cycle)); - } - pox_payout_addrs } else { // if this snapshot consumed some reward set entries AND @@ -6193,15 +6128,6 @@ impl<'a> SortitionHandleTx<'a> { keys.push(db_keys::pox_last_selected_anchor_txid().to_string()); values.push("".to_string()); - if cfg!(test) { - // NOTE: We keep this only for testing, since this is what the original (but - // unmigratable code) did, and we need to verify that the compatibility fix to - // SortitionDB::get_last_processed_reward_cycle() is semantically compatible - // with querying this key. - keys.push(db_keys::last_reward_cycle_key().to_string()); - values.push(db_keys::last_reward_cycle_to_string(0)); - } - // no payouts vec![] }; @@ -6543,30 +6469,6 @@ pub mod tests { use crate::core::{StacksEpochExtension, *}; use crate::util_lib::db::Error as db_error; - impl<'a> SortitionHandleConn<'a> { - /// At one point in the development lifecycle, this code depended on a MARF key/value - /// pair to map the sortition tip to the last-processed reward cycle number. This data would - /// not have been present in epoch 2.4 chainstate and earlier, but would have been present in - /// epoch 2.5 and later, since at the time it was expected that all nodes would perform a - /// genesis sync when booting into epoch 2.5. However, that requirement changed at the last - /// minute, so this code was reworked to avoid the need for the MARF key. But to ensure that - /// this method is semantically consistent with the old code (which the Nakamoto chains - /// coordinator depends on), this code will test that the new reward cycle calculation matches - /// the old reward cycle calculation. - #[cfg(test)] - pub fn legacy_get_last_processed_reward_cycle(&self) -> Result { - // verify that this is semantically compatible with the older behavior, which shipped - // for epoch 2.5 but needed to be removed at the last minute in order to support a - // migration path from 2.4 chainstate to 2.5/3.0 chainstate. - let encoded_rc = self - .get_indexed(&self.context.chain_tip, &db_keys::last_reward_cycle_key())? - .expect("FATAL: no last-processed reward cycle"); - - let expected_rc = db_keys::last_reward_cycle_from_string(&encoded_rc); - Ok(expected_rc) - } - } - impl<'a> SortitionHandleTx<'a> { /// Update the canonical Stacks tip (testing only) pub fn test_update_canonical_stacks_tip( From 6ab82f41f24c74004f40048efe290217b7689374 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:54:21 -0400 Subject: [PATCH 101/148] chore: update docs on when nakamoto needs the preprocessed reward set --- stackslib/src/chainstate/coordinator/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index f34e21d1bd..973dd83b53 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -830,7 +830,8 @@ pub fn get_reward_cycle_info( }; // cache the reward cycle info as of the first sortition in the prepare phase, so that - // the Nakamoto epoch can go find it later + // the first Nakamoto epoch can go find it later. Subsequent Nakamoto epochs will use the + // reward set stored to the Nakamoto chain state. let ic = sort_db.index_handle(sortition_tip); let prev_reward_cycle = burnchain .block_height_to_reward_cycle(burn_height) From e69ef4a8102fbfc0225acc2183ef610ede47f261 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:54:47 -0400 Subject: [PATCH 102/148] feat: add `load_nakamoto_reward_set()` function, which will load a Nakamoto-epoch reward set from the Nakamoto chain state, except for the first-ever Nakamoto reward set which is necessarily a preprocessed reward set. Also, remove all calls to load a preprocessed sortition DB reward set from the Nakamoto coordinator --- .../chainstate/nakamoto/coordinator/mod.rs | 262 +++++++++++++----- 1 file changed, 189 insertions(+), 73 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index f399615c80..df4966da49 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -42,7 +42,7 @@ use crate::chainstate::coordinator::{ }; use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::boot::{RewardSet, SIGNERS_NAME}; -use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState}; +use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use crate::chainstate::stacks::Error as ChainstateError; use crate::cost_estimates::{CostEstimator, FeeEstimator}; @@ -210,14 +210,9 @@ fn find_prepare_phase_sortitions( /// If this method returns None, the caller should try again when there are more Stacks blocks. In /// Nakamoto, every reward cycle _must_ have a PoX anchor block; otherwise, the chain halts. /// -/// N.B. this method assumes that the prepare phase is comprised _solely_ of Nakamoto tenures. It -/// will not work if any of the prepare-phase tenures are from epoch 2.x. -/// /// Returns Ok(Some(reward-cycle-info)) if we found the first sortition in the prepare phase. /// Returns Ok(None) if we're still waiting for the PoX anchor block sortition /// Returns Err(Error::NotInPreparePhase) if `burn_height` is not in the prepare phase -/// Returns Err(Error::RewardCycleAlreadyProcessed) if the reward set for this reward cycle has -/// already been processed. pub fn get_nakamoto_reward_cycle_info( burn_height: u64, sortition_tip: &SortitionId, @@ -243,7 +238,6 @@ pub fn get_nakamoto_reward_cycle_info( let reward_cycle = burnchain .next_reward_cycle(burn_height) .expect("FATAL: no reward cycle for burn height"); - let reward_start_height = burnchain.reward_cycle_to_block_height(reward_cycle); debug!("Processing reward set for Nakamoto reward cycle"; "burn_height" => burn_height, @@ -251,26 +245,144 @@ pub fn get_nakamoto_reward_cycle_info( "reward_cycle_length" => burnchain.pox_constants.reward_cycle_length, "prepare_phase_length" => burnchain.pox_constants.prepare_length); + let Some((rc_info, anchor_block_header)) = load_nakamoto_reward_set( + burn_height, + sortition_tip, + burnchain, + chain_state, + sort_db, + provider, + )? + else { + return Ok(None); + }; + + let block_id = match anchor_block_header.anchored_header { + StacksBlockHeaderTypes::Epoch2(..) => anchor_block_header.index_block_hash(), + StacksBlockHeaderTypes::Nakamoto(ref header) => header.block_id(), + }; + + info!( + "Anchor block selected"; + "cycle" => reward_cycle, + "block_id" => %block_id, + "consensus_hash" => %anchor_block_header.consensus_hash, + "burn_height" => anchor_block_header.burn_header_height, + "anchor_chain_tip" => %anchor_block_header.index_block_hash(), + "anchor_chain_tip_height" => %anchor_block_header.burn_header_height, + ); + + return Ok(Some(rc_info)); +} + +/// Helper to get the Nakamoto reward set for a given reward cycle. +/// In all but the first Nakamoto reward cycle, this will load up the stored reward set from the +/// Nakamoto chain state. In the first Nakamoto reward cycle, where the reward set is computed +/// from epoch2 state, the reward set will be loaded from the sortition DB (which is the only place +/// it will be stored). +/// +/// Returns Ok(Some((reward set info, PoX anchor block header))) on success +/// Returns Ok(None) if the reward set is not yet known, but could be known by the time a +/// subsequent call is made. +pub fn load_nakamoto_reward_set( + burn_height: u64, + sortition_tip: &SortitionId, + burnchain: &Burnchain, + chain_state: &mut StacksChainState, + sort_db: &SortitionDB, + provider: &U, +) -> Result, Error> { + let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), burn_height)? + .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", burn_height)); + + // calculating the reward set for the _next_ reward cycle + let reward_cycle = burnchain + .next_reward_cycle(burn_height) + .expect("FATAL: no reward cycle for burn height"); + + let reward_start_height = burnchain.reward_cycle_to_block_height(reward_cycle); + // Find the first Stacks block in this reward cycle's preceding prepare phase. // This block will have invoked `.signers.stackerdb-set-signer-slots()` with the reward set. // Note that we may not have processed it yet. But, if we do find it, then it's // unique (and since Nakamoto Stacks blocks are processed in order, the anchor block // cannot change later). - let prepare_phase_sortitions = - find_prepare_phase_sortitions(sort_db, burnchain, sortition_tip)?; - - // did we already calculate the reward cycle info? If so, then return it. - let first_sortition_id = if let Some(first_sn) = prepare_phase_sortitions.first() { - if let Some(persisted_reward_cycle_info) = - SortitionDB::get_preprocessed_reward_set(sort_db.conn(), &first_sn.sortition_id)? + let first_epoch30_reward_cycle = burnchain + .next_reward_cycle(epoch_at_height.start_height) + .expect("FATAL: no reward cycle for epoch 3.0 start height"); + + if epoch_at_height.epoch_id < StacksEpochId::Epoch30 + || (epoch_at_height.epoch_id == StacksEpochId::Epoch30 + && reward_cycle == first_epoch30_reward_cycle) + { + // in epoch 2.5, and in the first reward cycle of epoch 3.0, the reward set can *only* be found in the sortition DB. + // The nakamoto chain-processing rules aren't active yet, so we can't look for the reward + // cycle info in the nakamoto chain state. + if let Ok(persisted_reward_cycle_info) = + sort_db.get_preprocessed_reward_set_of(sortition_tip) { - return Ok(Some(persisted_reward_cycle_info)); + if persisted_reward_cycle_info + .known_selected_anchor_block() + .is_none() + { + debug!("No reward set known yet for prepare phase"; + "sortition_tip" => %sortition_tip); + return Ok(None); + } + + // find the corresponding Stacks anchor block header + let Some((anchor_block_hash, _)) = persisted_reward_cycle_info.selected_anchor_block() + else { + // should be unreachable + error!("No anchor block known for persisted reward set"; + "sortition_tip" => %sortition_tip); + return Ok(None); + }; + + let ic = sort_db.index_conn(); + let Some(anchor_block_snapshot) = + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + sortition_tip, + anchor_block_hash, + )? + else { + // should be unreachable + error!("No ancestor block snapshot for anchor block"; + "anchor_block_hash" => %anchor_block_hash, + "sortition_tip" => %sortition_tip); + + return Ok(None); + }; + + let Some(anchor_block_header) = + StacksChainState::get_stacks_block_header_info_by_consensus_hash( + chain_state.db(), + &anchor_block_snapshot.consensus_hash, + )? + else { + // should be unreachable + error!("No block header for anchor block"; + "consensus_hash" => %anchor_block_snapshot.consensus_hash, + "anchor_block_hash" => %anchor_block_hash); + return Ok(None); + }; + + debug!("Loaded reward set calculated in epoch 2.5 for reward cycle {} (which is in epoch {})", reward_cycle, epoch_at_height.epoch_id); + return Ok(Some((persisted_reward_cycle_info, anchor_block_header))); } - first_sn.sortition_id.clone() - } else { - // can't do anything + + // no reward set known yet. It's possible that it simply hasn't been processed yet. + debug!("No pre-processed PoX reward set known for pre-Nakamoto cycle {reward_cycle}"); return Ok(None); - }; + } + + // find the reward cycle's prepare-phase sortitions (in the preceding reward cycle) + let sort_end = sort_db.get_prepare_phase_end_sortition_id_for_reward_cycle( + sortition_tip, + reward_cycle.saturating_sub(1), + )?; + let prepare_phase_sortitions = find_prepare_phase_sortitions(sort_db, burnchain, &sort_end)?; // iterate over the prepare_phase_sortitions, finding the first such sortition // with a processed stacks block @@ -317,7 +429,7 @@ pub fn get_nakamoto_reward_cycle_info( .expect("FATAL: no snapshot for winning PoX anchor block"); // make sure the `anchor_block` field is the same as whatever goes into the block-commit, - // or PoX ancestry queries won't work + // or PoX ancestry queries won't work. let (block_id, stacks_block_hash) = match anchor_block_header.anchored_header { StacksBlockHeaderTypes::Epoch2(ref header) => ( StacksBlockId::new(&anchor_block_header.consensus_hash, &header.block_hash()), @@ -330,19 +442,18 @@ pub fn get_nakamoto_reward_cycle_info( let txid = anchor_block_sn.winning_block_txid; - info!( - "Anchor block selected"; - "cycle" => reward_cycle, - "block_id" => %block_id, - "consensus_hash" => %anchor_block_header.consensus_hash, - "burn_height" => anchor_block_header.burn_header_height, - "anchor_chain_tip" => %anchor_block_header.index_block_hash(), - "anchor_chain_tip_height" => %anchor_block_header.burn_header_height, - "first_prepare_sortition_id" => %first_sortition_id - ); + test_debug!("Stacks anchor block found"; + "block_id" => %block_id, + "block_hash" => %stacks_block_hash, + "consensus_hash" => %anchor_block_sn.consensus_hash, + "txid" => %txid, + "reward_start_height" => %reward_start_height, + "burnchain_height" => %anchor_block_sn.block_height); let reward_set = provider.get_reward_set_nakamoto( - reward_start_height, + // NOTE: the .saturating_sub(2) is needed here because reward_start_height is at reward + // index 1, while we need the highest height in the last cycle. + reward_start_height.saturating_sub(2), chain_state, burnchain, sort_db, @@ -358,13 +469,7 @@ pub fn get_nakamoto_reward_cycle_info( reward_cycle, anchor_status, }; - - // persist this - let mut tx = sort_db.tx_begin()?; - SortitionDB::store_preprocessed_reward_set(&mut tx, &first_sortition_id, &rc_info)?; - tx.commit()?; - - return Ok(Some(rc_info)); + Ok(Some((rc_info, anchor_block_header))) } /// Get the next PoX recipients in the Nakamoto epoch. @@ -375,35 +480,21 @@ pub fn get_nakamoto_reward_cycle_info( pub fn get_nakamoto_next_recipients( sortition_tip: &BlockSnapshot, sort_db: &mut SortitionDB, + chain_state: &mut StacksChainState, burnchain: &Burnchain, ) -> Result, Error> { let reward_cycle_info = if burnchain.is_reward_cycle_start(sortition_tip.block_height + 1) { - // load up new reward cycle info so we can start using *that* - let prepare_phase_sortitions = - find_prepare_phase_sortitions(sort_db, burnchain, &sortition_tip.parent_sortition_id)?; - - // NOTE: this must panic because Nakamoto's first reward cycle has stackers - let first_sn = prepare_phase_sortitions - .first() - .expect("FATAL: unreachable: no prepare-phase sortitions at start of reward cycle"); - - debug!("Get pre-processed reward set"; - "sortition_id" => %first_sn.sortition_id); - - // NOTE: don't panic here. The only caller of this method is a stacks-node miner, - // and they *may* have invoked this before they've processed the prepare phase. - // That's recoverable by simply waiting to mine until they've processed those - // blocks. - let reward_set = - SortitionDB::get_preprocessed_reward_set(sort_db.conn(), &first_sn.sortition_id)? - .ok_or_else(|| { - warn!( - "No preprocessed reward set found"; - "reward_cycle_start" => sortition_tip.block_height + 1, - "first_prepare_sortition_id" => %first_sn.sortition_id - ); - Error::PoXNotProcessedYet - })?; + let Some((reward_set, _)) = load_nakamoto_reward_set( + sortition_tip.block_height, + &sortition_tip.sortition_id, + burnchain, + chain_state, + sort_db, + &OnChainRewardSetProvider::new(), + )? + else { + return Ok(None); + }; Some(reward_set) } else { None @@ -465,9 +556,18 @@ impl< .expect("FATAL: epoch3 block height has no reward cycle"); // only proceed if we have processed the _anchor block_ for this reward cycle - let handle_conn = self.sortition_db.index_handle(&canonical_sortition_tip); - let last_processed_rc = handle_conn.get_last_processed_reward_cycle()?; - Ok(last_processed_rc >= first_epoch3_reward_cycle) + let Some((rc_info, _)) = load_nakamoto_reward_set( + canonical_sn.block_height, + &canonical_sn.sortition_id, + &self.burnchain, + &mut self.chain_state_db, + &self.sortition_db, + &OnChainRewardSetProvider::new(), + )? + else { + return Ok(false); + }; + Ok(rc_info.reward_cycle >= first_epoch3_reward_cycle) } /// This is the main loop body for the coordinator in epoch 3. @@ -707,8 +807,24 @@ impl< }); let last_processed_reward_cycle = { - let ic = self.sortition_db.index_handle(&canonical_sortition_tip); - ic.get_last_processed_reward_cycle()? + let canonical_sn = SortitionDB::get_block_snapshot( + &self.sortition_db.conn(), + &canonical_sortition_tip, + )? + .ok_or(DBError::NotFoundError)?; + let Some((rc_info, _)) = load_nakamoto_reward_set( + canonical_sn.block_height, + &canonical_sn.sortition_id, + &self.burnchain, + &mut self.chain_state_db, + &self.sortition_db, + &OnChainRewardSetProvider::new(), + )? + else { + // no anchor block yet, so try processing another block + continue; + }; + rc_info.reward_cycle }; if last_processed_reward_cycle > current_reward_cycle { @@ -863,10 +979,10 @@ impl< let reward_cycle_info = self.get_nakamoto_reward_cycle_info(header.block_height)?; if let Some(rc_info) = reward_cycle_info { // in nakamoto, if we have any reward cycle info at all, it will be known. - assert!( - rc_info.known_selected_anchor_block().is_some(), - "FATAL: unknown PoX anchor block in Nakamoto" - ); + if rc_info.known_selected_anchor_block().is_none() { + warn!("Unknown PoX anchor block in Nakamoto (at height {}). Refusing to process more burnchain blocks until that changes.", header.block_height); + return Ok(false); + } } } From 35368887a3a81ff8d449a629e261c5f4eba1e4cf Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:55:50 -0400 Subject: [PATCH 103/148] chore: API sync --- stackslib/src/chainstate/nakamoto/coordinator/tests.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index b7c0bb5ba9..a95d968f3e 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -400,6 +400,7 @@ fn replay_reward_cycle( info!("Process Nakamoto block {} ({:?}", &block_id, &block.header); let accepted = Relayer::process_new_nakamoto_block( + &peer.config.burnchain, &sortdb, &mut sort_handle, &mut node.chainstate, From 201acbe2629a3c8a6719631d35d22606e68ccc0e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:56:23 -0400 Subject: [PATCH 104/148] chore: fmt --- stackslib/src/chainstate/nakamoto/test_signers.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 4a2aa4f29c..13d7f2ff1e 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -50,7 +50,6 @@ use crate::chainstate::burn::*; use crate::chainstate::coordinator::{ ChainsCoordinator, Error as CoordinatorError, OnChainRewardSetProvider, }; -use crate::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::address::PoxAddress; From 1d8bea0e639af485b5d3593415d664707e58f63f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:56:38 -0400 Subject: [PATCH 105/148] feat: test new reward-set load path by using it directly in the Nakamoto test infrastructure, instead of loading it from a preprocessed reward set in the sortition DB --- .../src/chainstate/nakamoto/tests/node.rs | 33 ++++++++++++++----- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index fc425d0580..1054f584b6 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -46,7 +46,9 @@ use crate::chainstate::burn::*; use crate::chainstate::coordinator::{ ChainsCoordinator, Error as CoordinatorError, OnChainRewardSetProvider, }; -use crate::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; +use crate::chainstate::nakamoto::coordinator::{ + get_nakamoto_next_recipients, load_nakamoto_reward_set, +}; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; @@ -579,12 +581,20 @@ impl TestStacksNode { .unwrap(); // Get the reward set - let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); - let reward_set = sortdb - .get_preprocessed_reward_set_of(&sort_tip) - .expect("Failed to get reward cycle info") - .known_selected_anchor_block_owned() - .expect("Expected a reward set"); + let sort_tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let reward_set = load_nakamoto_reward_set( + sort_tip_sn.block_height, + &sort_tip_sn.sortition_id, + &miner.burnchain, + chainstate, + sortdb, + &OnChainRewardSetProvider::new(), + ) + .expect("Failed to load reward set") + .expect("Expected a reward set") + .0 + .known_selected_anchor_block_owned() + .expect("Unknown reward set"); test_debug!( "Signing Nakamoto block {} in tenure {} with key in cycle {}", @@ -609,6 +619,7 @@ impl TestStacksNode { let mut sort_handle = sortdb.index_handle(&sort_tip); info!("Processing the new nakamoto block"); let accepted = match Relayer::process_new_nakamoto_block( + &miner.burnchain, sortdb, &mut sort_handle, chainstate, @@ -912,7 +923,12 @@ impl<'a> TestPeer<'a> { } // patch in reward set info - match get_nakamoto_next_recipients(&tip, &mut sortdb, &self.config.burnchain) { + match get_nakamoto_next_recipients( + &tip, + &mut sortdb, + &mut stacks_node.chainstate, + &self.config.burnchain, + ) { Ok(recipients) => { block_commit_op.commit_outs = match recipients { Some(info) => { @@ -1130,6 +1146,7 @@ impl<'a> TestPeer<'a> { let block_id = block.block_id(); debug!("Process Nakamoto block {} ({:?}", &block_id, &block.header); let accepted = Relayer::process_new_nakamoto_block( + &self.network.burnchain, &sortdb, &mut sort_handle, &mut node.chainstate, From 9be7365ae16045cadaffe2cfea2c21762d0da6ba Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:57:13 -0400 Subject: [PATCH 106/148] chore: API sync -- we now use a `CurrentRewardSet` struct in the PeerNetwork to reference the reward cycle and its reward set --- .../net/download/nakamoto/download_state_machine.rs | 4 ++-- .../src/net/download/nakamoto/tenure_downloader.rs | 2 +- .../src/net/download/nakamoto/tenure_downloader_set.rs | 8 ++++---- .../download/nakamoto/tenure_downloader_unconfirmed.rs | 10 +++++----- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index c95dc6d5f3..ae5c8e055c 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -63,7 +63,7 @@ use crate::net::inv::epoch2x::InvState; use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::rpc::NeighborRPC; use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; @@ -1155,7 +1155,7 @@ impl NakamotoDownloadStateMachine { fn update_tenure_downloaders( &mut self, count: usize, - current_reward_sets: &BTreeMap, + current_reward_sets: &BTreeMap, ) { self.tenure_downloads.make_tenure_downloaders( &mut self.tenure_download_schedule, diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index a3586602e6..9f261929b5 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -57,7 +57,7 @@ use crate::net::inv::epoch2x::InvState; use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::rpc::NeighborRPC; use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 0100eb0ecd..f075028589 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -62,7 +62,7 @@ use crate::net::inv::epoch2x::InvState; use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::rpc::NeighborRPC; use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; @@ -419,7 +419,7 @@ impl NakamotoTenureDownloaderSet { available: &mut HashMap>, tenure_block_ids: &HashMap, count: usize, - current_reward_cycles: &BTreeMap, + current_reward_cycles: &BTreeMap, ) { test_debug!("schedule: {:?}", schedule); test_debug!("available: {:?}", &available); @@ -482,7 +482,7 @@ impl NakamotoTenureDownloaderSet { }; let Some(Some(start_reward_set)) = current_reward_cycles .get(&tenure_info.start_reward_cycle) - .map(|cycle_info| cycle_info.known_selected_anchor_block()) + .map(|cycle_info| cycle_info.reward_set()) else { test_debug!( "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", @@ -494,7 +494,7 @@ impl NakamotoTenureDownloaderSet { }; let Some(Some(end_reward_set)) = current_reward_cycles .get(&tenure_info.end_reward_cycle) - .map(|cycle_info| cycle_info.known_selected_anchor_block()) + .map(|cycle_info| cycle_info.reward_set()) else { test_debug!( "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index 4d4d4dee47..97ccb2c389 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -62,7 +62,7 @@ use crate::net::inv::epoch2x::InvState; use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::rpc::NeighborRPC; use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; @@ -186,7 +186,7 @@ impl NakamotoUnconfirmedTenureDownloader { local_sort_tip: &BlockSnapshot, chainstate: &StacksChainState, remote_tenure_tip: RPCGetTenureInfo, - current_reward_sets: &BTreeMap, + current_reward_sets: &BTreeMap, ) -> Result<(), NetError> { if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { return Err(NetError::InvalidState); @@ -301,7 +301,7 @@ impl NakamotoUnconfirmedTenureDownloader { // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions let Some(Some(confirmed_reward_set)) = current_reward_sets .get(&parent_tenure_rc) - .map(|cycle_info| cycle_info.known_selected_anchor_block()) + .map(|cycle_info| cycle_info.reward_set()) else { warn!( "No signer public keys for confirmed tenure {} (rc {})", @@ -312,7 +312,7 @@ impl NakamotoUnconfirmedTenureDownloader { let Some(Some(unconfirmed_reward_set)) = current_reward_sets .get(&tenure_rc) - .map(|cycle_info| cycle_info.known_selected_anchor_block()) + .map(|cycle_info| cycle_info.reward_set()) else { warn!( "No signer public keys for unconfirmed tenure {} (rc {})", @@ -717,7 +717,7 @@ impl NakamotoUnconfirmedTenureDownloader { sortdb: &SortitionDB, local_sort_tip: &BlockSnapshot, chainstate: &StacksChainState, - current_reward_sets: &BTreeMap, + current_reward_sets: &BTreeMap, ) -> Result>, NetError> { match &self.state { NakamotoUnconfirmedDownloadState::GetTenureInfo => { From 99bce259cd8221afd2c75214c0a5f8d07280e709 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:57:38 -0400 Subject: [PATCH 107/148] chore: remove dead code --- stackslib/src/net/mod.rs | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 1cead0306a..b64a537e81 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2756,6 +2756,7 @@ pub mod test { let receipts_res = self.relayer.process_network_result( self.network.get_local_peer(), &mut net_result, + &self.network.burnchain, &mut sortdb, &mut stacks_node.chainstate, &mut mempool, @@ -3884,29 +3885,12 @@ pub mod test { } /// Verify that the sortition DB migration into Nakamoto worked correctly. - /// For now, it's sufficient to check that the `get_last_processed_reward_cycle()` calculation - /// works the same across both the original and migration-compatible implementations. pub fn check_nakamoto_migration(&mut self) { let mut sortdb = self.sortdb.take().unwrap(); let mut node = self.stacks_node.take().unwrap(); let chainstate = &mut node.chainstate; let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - for height in 0..=tip.block_height { - let sns = - SortitionDB::get_all_snapshots_by_burn_height(sortdb.conn(), height).unwrap(); - for sn in sns { - let ih = sortdb.index_handle(&sn.sortition_id); - let highest_processed_rc = ih.get_last_processed_reward_cycle().unwrap(); - let expected_highest_processed_rc = - ih.legacy_get_last_processed_reward_cycle().unwrap(); - assert_eq!( - highest_processed_rc, expected_highest_processed_rc, - "BUG: at burn height {} the highest-processed reward cycles diverge", - height - ); - } - } let epochs = SortitionDB::get_stacks_epochs(sortdb.conn()).unwrap(); let epoch_3_idx = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap(); From e5d2d4840779d091999c3743eb3241f9b9d5245e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:57:51 -0400 Subject: [PATCH 108/148] feat: use the new reward-set loader path in the PeerNetwork to load the Nakamoto reward set from the Nakamoto chainstate instead of the sortition DB. Update the caching logic as well to key each cached reward set by anchor block ID --- stackslib/src/net/p2p.rs | 183 ++++++++++++++++++++------------------- 1 file changed, 93 insertions(+), 90 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index fbb6c375ed..a34c212e69 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -40,13 +40,14 @@ use {mio, url}; use crate::burnchains::db::{BurnchainDB, BurnchainHeaderReader}; use crate::burnchains::{Address, Burnchain, BurnchainView, PublicKey}; -use crate::chainstate::burn::db::sortdb::{BlockHeaderCache, SortitionDB}; +use crate::chainstate::burn::db::sortdb::{get_ancestor_sort_id, BlockHeaderCache, SortitionDB}; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::coordinator::{ static_get_canonical_affirmation_map, static_get_heaviest_affirmation_map, - static_get_stacks_tip_affirmation_map, RewardCycleInfo, + static_get_stacks_tip_affirmation_map, OnChainRewardSetProvider, RewardCycleInfo, }; -use crate::chainstate::stacks::boot::MINERS_NAME; +use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; +use crate::chainstate::stacks::boot::{RewardSet, MINERS_NAME}; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{StacksBlockHeader, MAX_BLOCK_LEN, MAX_TRANSACTION_LEN}; use crate::core::StacksEpoch; @@ -232,6 +233,24 @@ impl ConnectingPeer { } } +#[derive(Clone, Debug, PartialEq)] +pub struct CurrentRewardSet { + pub reward_cycle: u64, + pub reward_cycle_info: RewardCycleInfo, + pub anchor_block_consensus_hash: ConsensusHash, + pub anchor_block_hash: BlockHeaderHash, +} + +impl CurrentRewardSet { + pub fn reward_set(&self) -> Option<&RewardSet> { + self.reward_cycle_info.known_selected_anchor_block() + } + + pub fn anchor_block_id(&self) -> StacksBlockId { + StacksBlockId::new(&self.anchor_block_consensus_hash, &self.anchor_block_hash) + } +} + pub struct PeerNetwork { // constants pub peer_version: u32, @@ -258,16 +277,9 @@ pub struct PeerNetwork { /// In epoch 2.x, this is the same as the tip block ID /// In nakamoto, this is the block ID of the first block in the current tenure pub tenure_start_block_id: StacksBlockId, - /// The reward sets of the current and past reward cycle. + /// The reward sets of the past three reward cycles. /// Needed to validate blocks, which are signed by a threshold of stackers - pub current_reward_sets: BTreeMap, - /// The sortition IDs that began the prepare-phases for given reward cycles. This is used to - /// determine whether or not the reward cycle info in `current_reward_sets` is still valid -- a - /// burnchain fork may invalidate them, so the code must check that the sortition ID for the - /// start of the prepare-phase is still canonical. - /// This needs to be in 1-to-1 correspondence with `current_reward_sets` -- the sortition IDs - /// that make up the values need to correspond to the reward sets computed as of the sortition. - pub current_reward_set_ids: BTreeMap, + pub current_reward_sets: BTreeMap, // information about the state of the network's anchor blocks pub heaviest_affirmation_map: AffirmationMap, @@ -479,7 +491,6 @@ impl PeerNetwork { parent_stacks_tip: (ConsensusHash([0x00; 20]), BlockHeaderHash([0x00; 32]), 0), tenure_start_block_id: StacksBlockId([0x00; 32]), current_reward_sets: BTreeMap::new(), - current_reward_set_ids: BTreeMap::new(), peerdb: peerdb, atlasdb: atlasdb, @@ -5434,38 +5445,10 @@ impl PeerNetwork { } /// Clear out old reward cycles - fn free_old_reward_cycles( - &mut self, - sortdb: &SortitionDB, - tip_sortition_id: &SortitionId, - prev_rc: u64, - ) { + fn free_old_reward_cycles(&mut self, rc: u64) { if self.current_reward_sets.len() > 3 { self.current_reward_sets.retain(|old_rc, _| { - if (*old_rc).saturating_add(1) < prev_rc { - self.current_reward_set_ids.remove(old_rc); - test_debug!("Drop reward cycle info for cycle {}", old_rc); - return false; - } - let Some(old_sortition_id) = self.current_reward_set_ids.get(old_rc) else { - // shouldn't happen - self.current_reward_set_ids.remove(old_rc); - test_debug!("Drop reward cycle info for cycle {}", old_rc); - return false; - }; - let Ok(prepare_phase_sort_id) = sortdb - .get_prepare_phase_start_sortition_id_for_reward_cycle( - &tip_sortition_id, - *old_rc, - ) - else { - self.current_reward_set_ids.remove(old_rc); - test_debug!("Drop reward cycle info for cycle {}", old_rc); - return false; - }; - if prepare_phase_sort_id != *old_sortition_id { - // non-canonical reward cycle info - self.current_reward_set_ids.remove(old_rc); + if (*old_rc).saturating_add(2) < rc { test_debug!("Drop reward cycle info for cycle {}", old_rc); return false; } @@ -5474,10 +5457,11 @@ impl PeerNetwork { } } - /// Refresh our view of the last two reward cycles + /// Refresh our view of the last three reward cycles fn refresh_reward_cycles( &mut self, sortdb: &SortitionDB, + chainstate: &mut StacksChainState, tip_sn: &BlockSnapshot, ) -> Result<(), net_error> { let cur_rc = self @@ -5486,57 +5470,58 @@ impl PeerNetwork { .expect("FATAL: sortition from before system start"); let prev_rc = cur_rc.saturating_sub(1); + let prev_prev_rc = prev_rc.saturating_sub(1); + let ih = sortdb.index_handle(&tip_sn.sortition_id); - // keyed by both rc and sortition ID in case there's a bitcoin fork -- we'd want the - // canonical reward set to be loaded - let cur_rc_sortition_id = sortdb - .get_prepare_phase_start_sortition_id_for_reward_cycle(&tip_sn.sortition_id, cur_rc)?; - let prev_rc_sortition_id = sortdb - .get_prepare_phase_start_sortition_id_for_reward_cycle(&tip_sn.sortition_id, prev_rc)?; - - for (rc, sortition_id) in [ - (prev_rc, prev_rc_sortition_id), - (cur_rc, cur_rc_sortition_id), - ] - .into_iter() - { - if let Some(sort_id) = self.current_reward_set_ids.get(&rc) { - if sort_id == &sortition_id { - continue; - } - } - let Ok((reward_cycle_info, reward_cycle_sort_id)) = sortdb - .get_preprocessed_reward_set_for_reward_cycle(&tip_sn.sortition_id, rc) - .map_err(|e| { - warn!( - "Failed to load reward set for cycle {} ({}): {:?}", - rc, &sortition_id, &e - ); - e - }) + for rc in [cur_rc, prev_rc, prev_prev_rc] { + let rc_start_height = self.burnchain.reward_cycle_to_block_height(rc) + 1; + let Some(ancestor_sort_id) = + get_ancestor_sort_id(&ih, rc_start_height, &tip_sn.sortition_id)? else { - // NOTE: this should never be reached - error!("Unreachable code (but not panicking): no reward cycle info for reward cycle {}", rc); + // reward cycle is too far back for there to be an ancestor continue; }; - if !reward_cycle_info.is_reward_info_known() { - // haven't yet processed the anchor block, so don't store - debug!("Reward cycle info for cycle {} at sortition {} expects the PoX anchor block, so will not cache", rc, &reward_cycle_sort_id); - continue; + let ancestor_ih = sortdb.index_handle(&ancestor_sort_id); + let anchor_hash_opt = ancestor_ih.get_last_anchor_block_hash()?; + + if let Some(cached_rc_info) = self.current_reward_sets.get(&rc) { + if let Some(anchor_hash) = anchor_hash_opt.as_ref() { + if cached_rc_info.anchor_block_hash == *anchor_hash { + // cached reward set data is still valid + continue; + } + } } - test_debug!( - "Reward cycle info for cycle {} at sortition {} is {:?}", + let Some((reward_set_info, anchor_block_header)) = load_nakamoto_reward_set( rc, - &reward_cycle_sort_id, - &reward_cycle_info - ); - self.current_reward_sets.insert(rc, reward_cycle_info); - self.current_reward_set_ids.insert(rc, reward_cycle_sort_id); - } + &tip_sn.sortition_id, + &self.burnchain, + chainstate, + sortdb, + &OnChainRewardSetProvider::new(), + ) + .map_err(|e| { + warn!( + "Failed to load reward cycle info for cycle {}: {:?}", + rc, &e + ); + e + }) + .unwrap_or(None) else { + continue; + }; + + let rc_info = CurrentRewardSet { + reward_cycle: rc, + reward_cycle_info: reward_set_info, + anchor_block_consensus_hash: anchor_block_header.consensus_hash, + anchor_block_hash: anchor_block_header.anchored_header.block_hash(), + }; - // free memory - self.free_old_reward_cycles(sortdb, &tip_sn.sortition_id, prev_rc); + self.current_reward_sets.insert(rc, rc_info); + } + self.free_old_reward_cycles(cur_rc); Ok(()) } @@ -5560,7 +5545,9 @@ impl PeerNetwork { SortitionDB::get_canonical_stacks_chain_tip_hash_and_height(sortdb.conn())?; let burnchain_tip_changed = canonical_sn.block_height != self.chain_view.burn_block_height - || self.num_state_machine_passes == 0; + || self.num_state_machine_passes == 0 + || canonical_sn.sortition_id != self.burnchain_tip.sortition_id; + let stacks_tip_changed = self.stacks_tip != stacks_tip; let new_stacks_tip_block_id = StacksBlockId::new(&stacks_tip.0, &stacks_tip.1); let need_stackerdb_refresh = canonical_sn.canonical_stacks_tip_consensus_hash @@ -5568,8 +5555,8 @@ impl PeerNetwork { || burnchain_tip_changed || stacks_tip_changed; - if stacks_tip_changed || burnchain_tip_changed { - self.refresh_reward_cycles(sortdb, &canonical_sn)?; + if burnchain_tip_changed || stacks_tip_changed { + self.refresh_reward_cycles(sortdb, chainstate, &canonical_sn)?; } let mut ret: HashMap> = HashMap::new(); @@ -6789,11 +6776,13 @@ mod test { while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs { if let Ok(mut result) = peer_1.step_with_ibd(false) { let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); peer_1 .with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, @@ -6807,11 +6796,13 @@ mod test { if let Ok(mut result) = peer_2.step_with_ibd(false) { let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); peer_2 .with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, @@ -6976,11 +6967,13 @@ mod test { while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs { if let Ok(mut result) = peer_1.step_with_ibd(false) { let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); peer_1 .with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, @@ -6994,11 +6987,13 @@ mod test { if let Ok(mut result) = peer_2.step_with_ibd(false) { let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); peer_2 .with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, @@ -7180,11 +7175,13 @@ mod test { while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs / 2 { if let Ok(mut result) = peer_1.step_with_ibd(false) { let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); peer_1 .with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, @@ -7198,11 +7195,13 @@ mod test { if let Ok(mut result) = peer_2.step_with_ibd(false) { let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); peer_2 .with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, @@ -7364,11 +7363,13 @@ mod test { while peer_1_mempool_txs < num_txs || peer_2.network.mempool_sync_txs < (num_txs as u64) { if let Ok(mut result) = peer_1.step_with_ibd(false) { let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); peer_1 .with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, @@ -7382,11 +7383,13 @@ mod test { if let Ok(mut result) = peer_2.step_with_ibd(false) { let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); peer_2 .with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, From 19c592e48cfd4839fa93917be23520964fe94468 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:58:36 -0400 Subject: [PATCH 109/148] feat: use new Nakamoto reward set loading code to fetch the reward set when validating a block to be relayed --- stackslib/src/net/relay.rs | 38 +++++++++++++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 4db684ca35..a073398f42 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -38,7 +38,10 @@ use crate::chainstate::burn::db::sortdb::{ }; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::coordinator::comm::CoordinatorChannels; -use crate::chainstate::coordinator::BlockEventDispatcher; +use crate::chainstate::coordinator::{ + BlockEventDispatcher, Error as CoordinatorError, OnChainRewardSetProvider, +}; +use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::db::unconfirmed::ProcessedUnconfirmedState; use crate::chainstate::stacks::db::{StacksChainState, StacksEpochReceipt, StacksHeaderInfo}; @@ -655,6 +658,7 @@ impl Relayer { /// downloaded by us, or pushed via p2p. /// Return Ok(true) if we stored it, Ok(false) if we didn't pub fn process_new_nakamoto_block( + burnchain: &Burnchain, sortdb: &SortitionDB, sort_handle: &mut SortitionHandleConn, chainstate: &mut StacksChainState, @@ -725,15 +729,35 @@ impl Relayer { let config = chainstate.config(); let tip = block_sn.sortition_id; - let reward_info = match sortdb.get_preprocessed_reward_set_of(&tip) { - Ok(x) => x, - Err(db_error::NotFoundError) => { + let reward_info = match load_nakamoto_reward_set( + block_sn.block_height, + &tip, + burnchain, + chainstate, + sortdb, + &OnChainRewardSetProvider::new(), + ) { + Ok(Some((reward_info, ..))) => reward_info, + Ok(None) => { error!("No RewardCycleInfo found for tip {}", tip); return Err(chainstate_error::PoxNoRewardCycle); } - Err(e) => { + Err(CoordinatorError::DBError(db_error::NotFoundError)) => { + error!("No RewardCycleInfo found for tip {}", tip); + return Err(chainstate_error::PoxNoRewardCycle); + } + Err(CoordinatorError::ChainstateError(e)) => { + error!("No RewardCycleInfo loaded for tip {}: {:?}", tip, &e); + return Err(e); + } + Err(CoordinatorError::DBError(e)) => { + error!("No RewardCycleInfo loaded for tip {}: {:?}", tip, &e); return Err(chainstate_error::DBError(e)); } + Err(e) => { + error!("Failed to load RewardCycleInfo for tip {}: {:?}", tip, &e); + return Err(chainstate_error::PoxNoRewardCycle); + } }; let reward_cycle = reward_info.reward_cycle; @@ -769,6 +793,7 @@ impl Relayer { /// Process nakamoto blocks. /// Log errors but do not return them. pub fn process_nakamoto_blocks( + burnchain: &Burnchain, sortdb: &SortitionDB, chainstate: &mut StacksChainState, blocks: impl Iterator, @@ -779,6 +804,7 @@ impl Relayer { for block in blocks { let block_id = block.block_id(); if let Err(e) = Self::process_new_nakamoto_block( + burnchain, sortdb, &mut sort_handle, chainstate, @@ -2028,6 +2054,7 @@ impl Relayer { &mut self, _local_peer: &LocalPeer, network_result: &mut NetworkResult, + burnchain: &Burnchain, sortdb: &mut SortitionDB, chainstate: &mut StacksChainState, mempool: &mut MemPoolDB, @@ -2121,6 +2148,7 @@ impl Relayer { let nakamoto_blocks = std::mem::replace(&mut network_result.nakamoto_blocks, HashMap::new()); if let Err(e) = Relayer::process_nakamoto_blocks( + burnchain, sortdb, chainstate, nakamoto_blocks.into_values(), From 411c3c218cf2d50e4a460ca632bd4dac50490839 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:58:56 -0400 Subject: [PATCH 110/148] chore: API sync --- stackslib/src/net/tests/download/epoch2x.rs | 2 ++ stackslib/src/net/tests/download/nakamoto.rs | 10 ++++++++++ stackslib/src/net/tests/mod.rs | 1 + 3 files changed, 13 insertions(+) diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index 5e9ea0daf2..1f7a266596 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -329,10 +329,12 @@ where let mut result = peer.step_dns(&mut dns_clients[i]).unwrap(); let lp = peer.network.local_peer.clone(); + let burnchain = peer.network.burnchain.clone(); peer.with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 47dabd176e..62d2fdc0bb 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -448,6 +448,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -456,6 +457,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -523,6 +525,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -531,6 +534,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -624,6 +628,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -632,6 +637,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -724,6 +730,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -732,6 +739,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -803,6 +811,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -811,6 +820,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 5e2cb3e6cc..9e225d8f0d 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -230,6 +230,7 @@ impl NakamotoBootPlan { for block in blocks { let block_id = block.block_id(); let accepted = Relayer::process_new_nakamoto_block( + &peer.network.burnchain, &sortdb, &mut sort_handle, &mut node.chainstate, From 41339d1574bd85886d95fdb07bfa573542c733fe Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:59:09 -0400 Subject: [PATCH 111/148] fix: load the Nakamoto reward set from Nakamoto chainstate --- .../stacks-node/src/nakamoto_node/miner.rs | 86 +++++++++++++++++-- 1 file changed, 80 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ce0c417704..69b04c3a53 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -25,6 +25,8 @@ use libsigner::v1::messages::{MessageSlotID, SignerMessage}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use stacks::chainstate::coordinator::OnChainRewardSetProvider; +use stacks::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; @@ -277,6 +279,13 @@ impl BlockMinerThread { }) })?; + let mut chain_state = + neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open chainstate DB. Cannot mine! {e:?}" + )) + })?; + let reward_cycle = self .burnchain .pox_constants @@ -290,8 +299,20 @@ impl BlockMinerThread { ) })?; - let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { - Ok(x) => x, + let reward_info = match load_nakamoto_reward_set( + tip.block_height, + &tip.sortition_id, + &self.burnchain, + &mut chain_state, + &sort_db, + &OnChainRewardSetProvider::new(), + ) { + Ok(Some((reward_info, _))) => reward_info, + Ok(None) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "No reward set stored yet. Cannot mine!".into(), + )); + } Err(e) => { return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" @@ -373,8 +394,27 @@ impl BlockMinerThread { ) .expect("FATAL: building on a burn block that is before the first burn block"); - let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { - Ok(x) => x, + let mut chain_state = + neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open chainstate DB. Cannot mine! {e:?}" + )) + })?; + + let reward_info = match load_nakamoto_reward_set( + tip.block_height, + &tip.sortition_id, + &self.burnchain, + &mut chain_state, + &sort_db, + &OnChainRewardSetProvider::new(), + ) { + Ok(Some((reward_info, _))) => reward_info, + Ok(None) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "No reward set stored yet. Cannot mine!".into(), + )); + } Err(e) => { return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" @@ -839,8 +879,42 @@ impl BlockMinerThread { let signer_transactions = self.get_signer_transactions(&mut chain_state, &burn_db, &stackerdbs)?; - let signer_bitvec_len = - &burn_db.get_preprocessed_reward_set_size(&self.burn_block.sortition_id); + let tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) + .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::DBError(e)))?; + + let reward_info = match load_nakamoto_reward_set( + tip.block_height, + &tip.sortition_id, + &self.burnchain, + &mut chain_state, + &burn_db, + &OnChainRewardSetProvider::new(), + ) { + Ok(Some((reward_info, _))) => reward_info, + Ok(None) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "No reward set stored yet. Cannot mine!".into(), + )); + } + Err(e) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" + ))); + } + }; + + let Some(reward_set) = reward_info.known_selected_anchor_block_owned() else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Current reward cycle did not select a reward set. Cannot mine!".into(), + )); + }; + let signer_bitvec_len = reward_set + .signers + .as_ref() + .map(|x| x.len()) + .unwrap_or(0) + .try_into() + .ok(); // build the block itself let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( From 8508840eeb963a2334f4b19a126c8687c98747ed Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:59:38 -0400 Subject: [PATCH 112/148] chore: API sync --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index fc4ca1ae0d..b7cc1bc4f1 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -258,6 +258,7 @@ impl RelayerThread { .process_network_result( &self.local_peer, &mut net_result, + &self.burnchain, &mut self.sortdb, &mut self.chainstate, &mut self.mempool, @@ -416,11 +417,16 @@ impl RelayerThread { .unwrap_or_else(|| VRFProof::empty()); // let's figure out the recipient set! - let recipients = get_nakamoto_next_recipients(&sort_tip, &mut self.sortdb, &self.burnchain) - .map_err(|e| { - error!("Relayer: Failure fetching recipient set: {:?}", e); - NakamotoNodeError::SnapshotNotFoundForChainTip - })?; + let recipients = get_nakamoto_next_recipients( + &sort_tip, + &mut self.sortdb, + &mut self.chainstate, + &self.burnchain, + ) + .map_err(|e| { + error!("Relayer: Failure fetching recipient set: {:?}", e); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; let block_header = NakamotoChainState::get_block_header_by_consensus_hash(self.chainstate.db(), target_ch) From 32a1cd733d86bfa9e97981c21c2bca8fd1141012 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:59:49 -0400 Subject: [PATCH 113/148] chore: API sync --- testnet/stacks-node/src/neon_node.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index b6ac17e51e..48086c190b 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2727,6 +2727,7 @@ impl RelayerThread { .process_network_result( &relayer_thread.local_peer, &mut net_result, + &relayer_thread.burnchain, sortdb, chainstate, mempool, From b1a68b780a029868be35bdba2e0508a5c251fb95 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 11:00:01 -0400 Subject: [PATCH 114/148] chore: update Nakamoto integration test infrastructure to fetch the Nakamoto reward set from Nakamoto chainstate --- .../src/tests/nakamoto_integrations.rs | 28 +++++++++++++++---- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3c7e422e8d..2da7444c37 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -37,6 +37,8 @@ use stacks::chainstate::burn::operations::{ BlockstackOperationType, PreStxOp, StackStxOp, VoteForAggregateKeyOp, }; use stacks::chainstate::coordinator::comm::CoordinatorChannels; +use stacks::chainstate::coordinator::OnChainRewardSetProvider; +use stacks::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use stacks::chainstate::nakamoto::miner::NakamotoBlockBuilder; use stacks::chainstate::nakamoto::test_signers::TestSigners; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; @@ -362,13 +364,29 @@ pub fn read_and_sign_block_proposal( ) -> Result { let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let reward_set = sortdb - .get_preprocessed_reward_set_of(&tip.sortition_id) - .expect("Failed to get reward cycle info") - .known_selected_anchor_block_owned() - .expect("Expected a reward set"); + let reward_set = load_nakamoto_reward_set( + tip.block_height, + &tip.sortition_id, + &burnchain, + &mut chainstate, + &sortdb, + &OnChainRewardSetProvider::new(), + ) + .expect("Failed to query reward set") + .expect("No reward set calculated") + .0 + .known_selected_anchor_block_owned() + .expect("Expected a reward set"); let mut proposed_block = get_latest_block_proposal(conf, &sortdb)?; let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); From cee39a162674b80ff6a5691b2447ff3161d73e9b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 30 May 2024 11:33:15 -0400 Subject: [PATCH 115/148] fix: handle unconfirmed blocks correctly --- stackslib/src/chainstate/burn/db/sortdb.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index bba8257cbe..c698a1b7a0 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2658,7 +2658,16 @@ impl SortitionDB { chainstate: &StacksChainState, stacks_block_id: &StacksBlockId, ) -> Result, db_error> { - let header = match NakamotoChainState::get_block_header(chainstate.db(), stacks_block_id) { + let lookup_block_id = if let Some(ref unconfirmed_state) = chainstate.unconfirmed_state { + if &unconfirmed_state.unconfirmed_chain_tip == stacks_block_id { + &unconfirmed_state.confirmed_chain_tip + } else { + stacks_block_id + } + } else { + stacks_block_id + }; + let header = match NakamotoChainState::get_block_header(chainstate.db(), lookup_block_id) { Ok(Some(x)) => x, x => { debug!("Failed to get block header: {:?}", x); From 6e5e35706275e1b30633bba0b5a44791509f1b39 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 30 May 2024 14:09:46 -0400 Subject: [PATCH 116/148] fix: update `supports_epoch` for version `5` This should have been included in #4812 but was overlooked. The fix solves the issue with the various Nakamoto integration tests. --- stackslib/src/chainstate/stacks/db/mod.rs | 40 +++++++++++++++++++---- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index f10a87dccc..865758ed01 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -294,16 +294,32 @@ impl DBConfig { || self.version == "2" || self.version == "3" || self.version == "4" + || self.version == "5" } StacksEpochId::Epoch2_05 => { - self.version == "2" || self.version == "3" || self.version == "4" + self.version == "2" + || self.version == "3" + || self.version == "4" + || self.version == "5" + } + StacksEpochId::Epoch21 => { + self.version == "3" || self.version == "4" || self.version == "5" + } + StacksEpochId::Epoch22 => { + self.version == "3" || self.version == "4" || self.version == "5" + } + StacksEpochId::Epoch23 => { + self.version == "3" || self.version == "4" || self.version == "5" + } + StacksEpochId::Epoch24 => { + self.version == "3" || self.version == "4" || self.version == "5" + } + StacksEpochId::Epoch25 => { + self.version == "3" || self.version == "4" || self.version == "5" + } + StacksEpochId::Epoch30 => { + self.version == "3" || self.version == "4" || self.version == "5" } - StacksEpochId::Epoch21 => self.version == "3" || self.version == "4", - StacksEpochId::Epoch22 => self.version == "3" || self.version == "4", - StacksEpochId::Epoch23 => self.version == "3" || self.version == "4", - StacksEpochId::Epoch24 => self.version == "3" || self.version == "4", - StacksEpochId::Epoch25 => self.version == "3" || self.version == "4", - StacksEpochId::Epoch30 => self.version == "3" || self.version == "4", } } } @@ -2933,4 +2949,14 @@ pub mod test { MAINNET_2_0_GENESIS_ROOT_HASH ); } + + #[test] + fn latest_db_version_supports_latest_epoch() { + let db = DBConfig { + version: CHAINSTATE_VERSION.to_string(), + mainnet: true, + chain_id: CHAIN_ID_MAINNET, + }; + assert!(db.supports_epoch(StacksEpochId::latest())); + } } From e9685305e43e6b6d41d1e32e8d4f79e3613f3f97 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 30 May 2024 11:25:52 -0700 Subject: [PATCH 117/148] feat: additional checks for invalid signatures, duplicates, etc --- .../src/nakamoto_node/sign_coordinator.rs | 31 +++++++++++++++++-- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 0db0ee9e04..149eb84cbf 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -30,6 +30,8 @@ use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::{Error as ChainstateError, ThresholdSignature}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::stackerdb::StackerDBs; +use stacks::types::PublicKey; +use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; use stacks_common::bitvec::BitVec; @@ -732,9 +734,32 @@ impl SignCoordinator { "Signer entry not found".into(), )); }; - total_weight_signed = total_weight_signed - .checked_add(signer_entry.weight) - .expect("FATAL: total weight signed exceeds u32::MAX"); + let Ok(signer_pubkey) = + StacksPublicKey::from_slice(&signer_entry.signing_key) + else { + return Err(NakamotoNodeError::SignerSignatureError( + "Failed to parse signer public key".into(), + )); + }; + let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) + else { + warn!("Got invalid signature from a signer. Ignoring."); + continue; + }; + if !valid_sig { + warn!( + "Processed signature but didn't validate over the expected block. Ignoring"; + "signature" => %signature, + "block_signer_signature_hash" => %block_sighash, + "slot_id" => slot_id, + ); + continue; + } + if !gathered_signatures.contains_key(&slot_id) { + total_weight_signed = total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); + } debug!("SignCoordinator: Total weight signed: {total_weight_signed}"); gathered_signatures.insert(slot_id, signature); } From 634c8663b56e5037feb957f60d361425a77d3921 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 30 May 2024 14:30:39 -0400 Subject: [PATCH 118/148] test: add simple unit test for `SortitionDB` Ensures that the latest DB version supports the latest epoch. --- stackslib/src/chainstate/burn/db/sortdb.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index e3802d6ec1..dc1e65f28d 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -10830,4 +10830,12 @@ pub mod tests { let db_epochs = SortitionDB::get_stacks_epochs(sortdb.conn()).unwrap(); assert_eq!(db_epochs, STACKS_EPOCHS_MAINNET.to_vec()); } + + #[test] + fn latest_db_version_supports_latest_epoch() { + assert!(SortitionDB::is_db_version_supported_in_epoch( + StacksEpochId::latest(), + SORTITION_DB_VERSION + )); + } } From 803cf7ba86c9aea01828ea23a3607b46c482cb8e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 30 May 2024 15:55:44 -0400 Subject: [PATCH 119/148] chore: improve tip used for index handles in `net` --- stackslib/src/net/mod.rs | 9 +++++-- stackslib/src/net/relay.rs | 52 ++++++++++++++++++++++---------------- 2 files changed, 37 insertions(+), 24 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index d270e396f9..ed456e30f4 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -3532,7 +3532,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&tip.sortition_id), block_txs, ) .unwrap(); @@ -3743,7 +3743,12 @@ pub mod test { |mut builder, ref mut miner, ref sortdb| { let (mut miner_chainstate, _) = StacksChainState::open(false, network_id, &chainstate_path, None).unwrap(); - let sort_iconn = sortdb.index_handle_at_tip(); + let sort_iconn = sortdb + .index_handle_at_block( + &miner_chainstate, + &builder.chain_tip.index_block_hash(), + ) + .unwrap(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index fa11b575d4..eefb2cd3dd 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -4047,21 +4047,20 @@ pub mod test { let chain_tip = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); + let iconn = sortdb + .index_handle_at_block(&stacks_node.chainstate, &chain_tip) + .unwrap(); let cur_nonce = stacks_node .chainstate - .with_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), - &chain_tip, - |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_nonce( - &spending_account.origin_address().unwrap().into(), - ) - .unwrap() - }) - }, - ) + .with_read_only_clarity_tx(&iconn, &chain_tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_nonce( + &spending_account.origin_address().unwrap().into(), + ) + .unwrap() + }) + }) .unwrap(); test_debug!( @@ -5425,7 +5424,7 @@ pub mod test { let block = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&tip.sortition_id), vec![coinbase_tx.clone()], ) .unwrap() @@ -5492,7 +5491,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&tip.sortition_id), vec![coinbase_tx.clone(), bad_tx.clone()], ) { @@ -5514,7 +5513,7 @@ pub mod test { let bad_block = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&tip.sortition_id), vec![coinbase_tx.clone()], ) .unwrap(); @@ -5531,7 +5530,9 @@ pub mod test { let merkle_tree = MerkleTree::::new(&txid_vecs); bad_block.header.tx_merkle_root = merkle_tree.root(); - let sort_ic = sortdb.index_handle_at_tip(); + let sort_ic = sortdb + .index_handle_at_block(chainstate, &parent_index_hash) + .unwrap(); chainstate .reload_unconfirmed_state(&sort_ic, parent_index_hash.clone()) .unwrap(); @@ -5816,7 +5817,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( builder, chainstate, - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&tip.sortition_id), vec![coinbase_tx], ) .unwrap(); @@ -5994,7 +5995,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( builder, chainstate, - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&tip.sortition_id), vec![coinbase_tx, versioned_contract], ) .unwrap(); @@ -6181,7 +6182,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( builder, chainstate, - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&tip.sortition_id), vec![coinbase_tx], ) .unwrap(); @@ -6220,8 +6221,12 @@ pub mod test { // tenure 28 let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); + let snapshot = + SortitionDB::get_block_snapshot_consensus(&sortdb.conn(), &consensus_hash) + .unwrap() + .unwrap(); match node.chainstate.will_admit_mempool_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&snapshot.sortition_id), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, @@ -6270,8 +6275,11 @@ pub mod test { // tenure 28 let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); + let snapshot = SortitionDB::get_block_snapshot_consensus(&sortdb.conn(), &consensus_hash) + .unwrap() + .unwrap(); match node.chainstate.will_admit_mempool_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&snapshot.sortition_id), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, From 01b18ebbea2a9c0dc16b01640f75d6c72199c5fd Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 30 May 2024 16:39:43 -0400 Subject: [PATCH 120/148] fix: tip selection in net tests --- stackslib/src/net/mod.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index ed456e30f4..7d05bd3e12 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -3743,12 +3743,7 @@ pub mod test { |mut builder, ref mut miner, ref sortdb| { let (mut miner_chainstate, _) = StacksChainState::open(false, network_id, &chainstate_path, None).unwrap(); - let sort_iconn = sortdb - .index_handle_at_block( - &miner_chainstate, - &builder.chain_tip.index_block_hash(), - ) - .unwrap(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) From af000af2f52bbbac698778f8d319dfc9b0aa308d Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 30 May 2024 15:45:55 -0500 Subject: [PATCH 121/148] use network.burnchain_tip for QuerySpecifier::Latest --- stackslib/src/net/api/getsortition.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index ed084a084d..1e2551eb83 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -194,10 +194,10 @@ impl RPCRequestHandler for GetSortitionHandler { node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { let result = - node.with_node_state(|_network, sortdb, _chainstate, _mempool, _rpc_args| { + node.with_node_state(|network, sortdb, _chainstate, _mempool, _rpc_args| { let query_result = match self.query { QuerySpecifier::Latest => { - SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).map(Some) + Ok(Some(network.burnchain_tip.clone())) }, QuerySpecifier::ConsensusHash(ref consensus_hash) => { SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) From 49787aec53eadc465e5efcc3c905e2971117a5a8 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 30 May 2024 19:27:13 -0400 Subject: [PATCH 122/148] fix: correct tip usage for sortition handle --- testnet/stacks-node/src/nakamoto_node/miner.rs | 3 +-- testnet/stacks-node/src/run_loop/helium.rs | 4 +++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 40799bafa9..3ebb12fd9f 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -481,8 +481,7 @@ impl BlockMinerThread { ) .expect("FATAL: could not open sortition DB"); - let mut sortition_handle = - sort_db.index_handle_at_block(&chain_state, &block.block_id())?; + let mut sortition_handle = sort_db.index_handle_at_tip(); let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; NakamotoChainState::accept_block( &chainstate_config, diff --git a/testnet/stacks-node/src/run_loop/helium.rs b/testnet/stacks-node/src/run_loop/helium.rs index 2db7a3a090..2922ce584a 100644 --- a/testnet/stacks-node/src/run_loop/helium.rs +++ b/testnet/stacks-node/src/run_loop/helium.rs @@ -223,7 +223,9 @@ impl RunLoop { &burnchain_tip, &chain_tip, &mut self.node.chain_state, - &burnchain.sortdb_ref().index_handle_at_tip(), + &burnchain + .sortdb_ref() + .index_handle(&burnchain_tip.block_snapshot.sortition_id), ); } }; From f963b354ed002e7c8ce8f34ef0dd6e74a333e6e1 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 30 May 2024 16:36:13 -0700 Subject: [PATCH 123/148] crc: helper method for RewardSet total weight --- stackslib/src/chainstate/nakamoto/mod.rs | 7 +++--- stackslib/src/chainstate/stacks/boot/mod.rs | 17 ++++++++++++++ .../src/nakamoto_node/sign_coordinator.rs | 22 ++++++++++--------- 3 files changed, 32 insertions(+), 14 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 33f6ff2109..d92b373bdd 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -516,10 +516,9 @@ impl NakamotoBlockHeader { // `last_index` is used to prevent out-of-order signatures let mut last_index = None; - let total_weight = signers.iter().map(|s| s.weight).fold(0, |w, acc| { - acc.checked_add(w) - .expect("FATAL: Total signer weight > u32::MAX") - }); + let total_weight = reward_set + .total_signing_weight() + .map_err(|_| ChainstateError::NoRegisteredSigners(0))?; // HashMap of let signers_by_pk: HashMap<_, _> = signers diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 01ca39be4a..e42f1a0dfa 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -277,6 +277,23 @@ impl RewardSet { pub fn metadata_deserialize(from: &str) -> Result { serde_json::from_str(from).map_err(|e| e.to_string()) } + + /// Return the total `weight` of all signers in the reward set. + /// If there are no reward set signers, a ChainstateError is returned. + pub fn total_signing_weight(&self) -> Result { + let Some(ref reward_set_signers) = self.signers else { + return Err(format!( + "Unable to calculate total weight - No signers in reward set" + )); + }; + Ok(reward_set_signers + .iter() + .map(|s| s.weight) + .fold(0, |s, acc| { + acc.checked_add(s) + .expect("FATAL: Total signer weight > u32::MAX") + })) + } } impl RewardSetData { diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 149eb84cbf..078a73590a 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -254,15 +254,10 @@ impl SignCoordinator { ..Default::default() }; - let total_weight = - reward_set_signers - .iter() - .cloned() - .map(|s| s.weight) - .fold(0, |w, acc| { - acc.checked_add(w) - .expect("FATAL: Total signer weight > u32::MAX") - }); + let total_weight = reward_set.total_signing_weight().map_err(|e| { + warn!("Failed to calculate total weight for the reward set: {e:?}"); + ChainstateError::NoRegisteredSigners(0) + })?; let threshold = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight)?; @@ -760,7 +755,14 @@ impl SignCoordinator { .checked_add(signer_entry.weight) .expect("FATAL: total weight signed exceeds u32::MAX"); } - debug!("SignCoordinator: Total weight signed: {total_weight_signed}"); + debug!("Signature Added to block"; + "block_signer_sighash" => %block_sighash, + "signer_pubkey" => signer_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %signature, + // "signer_weight" => signer_entry.weight // commented due to max size of `debug!` + "total_weight_signed" => total_weight_signed, + ); gathered_signatures.insert(slot_id, signature); } SignerMessageV0::BlockResponse(BlockResponse::Rejected(_)) => { From 4c35571c4cabac9889bf7c9b358d31e0d484380e Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 30 May 2024 16:44:07 -0700 Subject: [PATCH 124/148] fix: add missing comma to debug metadata --- testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 078a73590a..a0be82f06e 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -760,7 +760,7 @@ impl SignCoordinator { "signer_pubkey" => signer_pubkey.to_hex(), "signer_slot_id" => slot_id, "signature" => %signature, - // "signer_weight" => signer_entry.weight // commented due to max size of `debug!` + "signer_weight" => signer_entry.weight, "total_weight_signed" => total_weight_signed, ); gathered_signatures.insert(slot_id, signature); From e6893196861c615536a42607fd0cf6e81726135d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 21:59:41 -0400 Subject: [PATCH 125/148] chore: replace next_reward_cycle() with pox_reward_cycle() --- stackslib/src/burnchains/burnchain.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 0247a54512..52fef1a4f7 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -551,16 +551,18 @@ impl Burnchain { .reward_cycle_to_block_height(self.first_block_height, reward_cycle) } - pub fn next_reward_cycle(&self, block_height: u64) -> Option { + /// Compute the reward cycle ID of the PoX reward set which is active as of this burn_height. + /// The reward set is calculated at reward cycle index 1, so if this block height is at or after + /// reward cycle index 1, then this behaves like `block_height_to_reward_cycle()`. However, + /// if it's reward cycle index is 0, then it belongs to the previous reward cycle. + pub fn pox_reward_cycle(&self, block_height: u64) -> Option { let cycle = self.block_height_to_reward_cycle(block_height)?; let effective_height = block_height.checked_sub(self.first_block_height)?; - let next_bump = if effective_height % u64::from(self.pox_constants.reward_cycle_length) == 0 - { - 0 + if effective_height % u64::from(self.pox_constants.reward_cycle_length) == 0 { + Some(cycle.saturating_sub(1)) } else { - 1 - }; - Some(cycle + next_bump) + Some(cycle) + } } pub fn block_height_to_reward_cycle(&self, block_height: u64) -> Option { From 243dcd785d27b7636c68f3611313c46394ddc0c9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 22:00:10 -0400 Subject: [PATCH 126/148] fix: off-by-one error returned the wrong reward cycle --- stackslib/src/chainstate/burn/db/sortdb.rs | 23 ++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 4e8dd50f1e..3ee746971f 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3212,11 +3212,18 @@ impl SortitionDB { ) -> Result<(), db_error> { let pox_constants = self.pox_constants.clone(); for rc in 0..=(canonical_tip.block_height / u64::from(pox_constants.reward_cycle_length)) { - if pox_constants.reward_cycle_to_block_height(self.first_block_height, rc) - > canonical_tip.block_height - { + let rc_start = pox_constants.reward_cycle_to_block_height(self.first_block_height, rc); + if rc_start > canonical_tip.block_height { break; } + let epoch_at_height = SortitionDB::get_stacks_epoch(self.conn(), rc_start)? + .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", rc_start)) + .epoch_id; + + if epoch_at_height >= StacksEpochId::Epoch30 { + break; + } + info!("Regenerating reward set for cycle {}", &rc); migrator.regenerate_reward_cycle_info(self, rc)?; } @@ -3434,13 +3441,13 @@ impl SortitionDB { /// Store a pre-processed reward set. /// `sortition_id` is the first sortition ID of the prepare phase. - /// No-op if the reward set is empty. + /// No-op if the reward set has a selected-and-unknown anchor block. pub fn store_preprocessed_reward_set( sort_tx: &mut DBTx, sortition_id: &SortitionId, rc_info: &RewardCycleInfo, ) -> Result<(), db_error> { - if rc_info.known_selected_anchor_block().is_none() { + if !rc_info.is_reward_info_known() { return Ok(()); } let sql = "REPLACE INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; @@ -3777,12 +3784,8 @@ impl<'a> SortitionDBConn<'a> { db_error::NotFoundError })?; - // NOTE: the .saturating_sub(1) is necessary because the reward set is calculated in epoch - // 2.5 and lower at reward cycle index 1, not 0. This correction ensures that the last - // block is checked against the signers who were active just before the new reward set is - // calculated. let reward_cycle_id = pox_constants - .block_height_to_reward_cycle(first_block_height, tip_sn.block_height.saturating_sub(1)) + .block_height_to_reward_cycle(first_block_height, tip_sn.block_height) .expect("FATAL: stored snapshot with block height < first_block_height"); self.get_preprocessed_reward_set_for_reward_cycle( From 84b6f2ecb532a0dc8666eb56737264c9630f94e2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 22:00:26 -0400 Subject: [PATCH 127/148] chore: fmt --- stackslib/src/chainstate/coordinator/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 973dd83b53..b3e170987e 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -752,6 +752,7 @@ pub fn get_reward_cycle_info( ) -> Result, Error> { let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), burn_height)? .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", burn_height)); + if !burnchain.is_reward_cycle_start(burn_height) { return Ok(None); } @@ -3531,6 +3532,7 @@ impl SortitionDBMigrator { .pox_constants .reward_cycle_to_block_height(sort_db.first_block_height, reward_cycle) .saturating_sub(1); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn())?; let ancestor_sn = { From 480c59ac4f01b0e49c81c9b94fba75da71614289 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 22:00:45 -0400 Subject: [PATCH 128/148] chore: fix off-by-one errors by basing the reward set retrieval logic on the *given* block height's reward cycle --- .../chainstate/nakamoto/coordinator/mod.rs | 87 ++++++++----------- 1 file changed, 37 insertions(+), 50 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index df4966da49..f703a23486 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -28,7 +28,7 @@ use stacks_common::types::{StacksEpoch, StacksEpochId}; use crate::burnchains::db::{BurnchainBlockData, BurnchainDB, BurnchainHeaderReader}; use crate::burnchains::{Burnchain, BurnchainBlockHeader}; -use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::db::sortdb::{get_ancestor_sort_id, SortitionDB}; use crate::chainstate::burn::operations::leader_block_commit::RewardSetInfo; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::coordinator::comm::{ @@ -200,9 +200,13 @@ fn find_prepare_phase_sortitions( Ok(sns) } -/// Try to get the reward cycle information for a Nakamoto reward cycle. +/// Try to get the reward cycle information for a Nakamoto reward cycle, identified by the +/// burn_height. The reward cycle info returned will be from the reward cycle that is active as of +/// `burn_height`. `sortition_tip` can be any sortition ID that's at a higher height than +/// `burn_height`. +/// /// In Nakamoto, the PoX anchor block for reward cycle _R_ is the _first_ Stacks block mined in the -/// _last_ tenure of _R - 1_'s reward phase phase (i.e. which takes place toward the end of reward cycle). +/// _last_ tenure of _R - 1_'s reward phase (i.e. which takes place toward the end of reward cycle). /// The reason it must be this way is because its hash will be in the block-commit for the first /// prepare-phase tenure of cycle _R_ (which is required for the PoX ancestry query in the /// block-commit validation logic). @@ -230,13 +234,9 @@ pub fn get_nakamoto_reward_cycle_info( "FATAL: called a nakamoto function outside of epoch 3" ); - if !burnchain.is_in_prepare_phase(burn_height) { - return Err(Error::NotInPreparePhase); - } - // calculating the reward set for the _next_ reward cycle let reward_cycle = burnchain - .next_reward_cycle(burn_height) + .pox_reward_cycle(burn_height) .expect("FATAL: no reward cycle for burn height"); debug!("Processing reward set for Nakamoto reward cycle"; @@ -275,7 +275,8 @@ pub fn get_nakamoto_reward_cycle_info( return Ok(Some(rc_info)); } -/// Helper to get the Nakamoto reward set for a given reward cycle. +/// Helper to get the Nakamoto reward set for a given reward cycle, identified by `burn_height`. +/// /// In all but the first Nakamoto reward cycle, this will load up the stored reward set from the /// Nakamoto chain state. In the first Nakamoto reward cycle, where the reward set is computed /// from epoch2 state, the reward set will be loaded from the sortition DB (which is the only place @@ -295,12 +296,20 @@ pub fn load_nakamoto_reward_set( let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), burn_height)? .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", burn_height)); - // calculating the reward set for the _next_ reward cycle let reward_cycle = burnchain - .next_reward_cycle(burn_height) + .pox_reward_cycle(burn_height) .expect("FATAL: no reward cycle for burn height"); - let reward_start_height = burnchain.reward_cycle_to_block_height(reward_cycle); + let prepare_end_height = burnchain + .reward_cycle_to_block_height(reward_cycle) + .saturating_sub(1); + let Some(prepare_end_sortition_id) = + get_ancestor_sort_id(&sort_db.index_conn(), prepare_end_height, sortition_tip)? + else { + // reward cycle is too far in the future + warn!("Requested reward cycle start ancestor sortition ID for cycle {} prepare-end height {}, but tip is {}", reward_cycle, prepare_end_height, sortition_tip); + return Ok(None); + }; // Find the first Stacks block in this reward cycle's preceding prepare phase. // This block will have invoked `.signers.stackerdb-set-signer-slots()` with the reward set. @@ -308,7 +317,7 @@ pub fn load_nakamoto_reward_set( // unique (and since Nakamoto Stacks blocks are processed in order, the anchor block // cannot change later). let first_epoch30_reward_cycle = burnchain - .next_reward_cycle(epoch_at_height.start_height) + .pox_reward_cycle(epoch_at_height.start_height) .expect("FATAL: no reward cycle for epoch 3.0 start height"); if epoch_at_height.epoch_id < StacksEpochId::Epoch30 @@ -319,14 +328,15 @@ pub fn load_nakamoto_reward_set( // The nakamoto chain-processing rules aren't active yet, so we can't look for the reward // cycle info in the nakamoto chain state. if let Ok(persisted_reward_cycle_info) = - sort_db.get_preprocessed_reward_set_of(sortition_tip) + sort_db.get_preprocessed_reward_set_of(&prepare_end_sortition_id) { if persisted_reward_cycle_info .known_selected_anchor_block() .is_none() { debug!("No reward set known yet for prepare phase"; - "sortition_tip" => %sortition_tip); + "sortition_tip" => %sortition_tip, + "prepare_end_sortition_id" => %prepare_end_sortition_id); return Ok(None); } @@ -335,7 +345,8 @@ pub fn load_nakamoto_reward_set( else { // should be unreachable error!("No anchor block known for persisted reward set"; - "sortition_tip" => %sortition_tip); + "sortition_tip" => %sortition_tip, + "prepare_end_sortition_id" => %prepare_end_sortition_id); return Ok(None); }; @@ -343,14 +354,15 @@ pub fn load_nakamoto_reward_set( let Some(anchor_block_snapshot) = SortitionDB::get_block_snapshot_for_winning_stacks_block( &ic, - sortition_tip, + &prepare_end_sortition_id, anchor_block_hash, )? else { // should be unreachable error!("No ancestor block snapshot for anchor block"; "anchor_block_hash" => %anchor_block_hash, - "sortition_tip" => %sortition_tip); + "sortition_tip" => %sortition_tip, + "prepare_end_sortition_id" => %prepare_end_sortition_id); return Ok(None); }; @@ -378,11 +390,8 @@ pub fn load_nakamoto_reward_set( } // find the reward cycle's prepare-phase sortitions (in the preceding reward cycle) - let sort_end = sort_db.get_prepare_phase_end_sortition_id_for_reward_cycle( - sortition_tip, - reward_cycle.saturating_sub(1), - )?; - let prepare_phase_sortitions = find_prepare_phase_sortitions(sort_db, burnchain, &sort_end)?; + let prepare_phase_sortitions = + find_prepare_phase_sortitions(sort_db, burnchain, &prepare_end_sortition_id)?; // iterate over the prepare_phase_sortitions, finding the first such sortition // with a processed stacks block @@ -447,13 +456,11 @@ pub fn load_nakamoto_reward_set( "block_hash" => %stacks_block_hash, "consensus_hash" => %anchor_block_sn.consensus_hash, "txid" => %txid, - "reward_start_height" => %reward_start_height, + "prepare_end_height" => %prepare_end_height, "burnchain_height" => %anchor_block_sn.block_height); let reward_set = provider.get_reward_set_nakamoto( - // NOTE: the .saturating_sub(2) is needed here because reward_start_height is at reward - // index 1, while we need the highest height in the last cycle. - reward_start_height.saturating_sub(2), + prepare_end_height.saturating_sub(1), chain_state, burnchain, sort_db, @@ -972,42 +979,22 @@ impl< } }; - if self.burnchain.is_in_prepare_phase(header.block_height) { - // try to eagerly load up the reward cycle information, so we can persist it and - // make it available to signers. If we're at the _end_ of the prepare phase, then - // we have no choice but to block. - let reward_cycle_info = self.get_nakamoto_reward_cycle_info(header.block_height)?; - if let Some(rc_info) = reward_cycle_info { - // in nakamoto, if we have any reward cycle info at all, it will be known. - if rc_info.known_selected_anchor_block().is_none() { - warn!("Unknown PoX anchor block in Nakamoto (at height {}). Refusing to process more burnchain blocks until that changes.", header.block_height); - return Ok(false); - } - } - } - let reward_cycle_info = if self.burnchain.is_reward_cycle_start(header.block_height) { // we're at the end of the prepare phase, so we'd better have obtained the reward // cycle info of we must block. - // N.B. it's `- 2` because `is_reward_cycle_start` implies that `block_height % reward_cycle_length == 1`, - // but this call needs `block_height % reward_cycle_length == reward_cycle_length - 1` -- i.e. `block_height` - // must be the last block height in the last reward cycle. - let end_cycle_block_height = header.block_height.saturating_sub(2); - let reward_cycle_info = - self.get_nakamoto_reward_cycle_info(end_cycle_block_height)?; + let reward_cycle_info = self.get_nakamoto_reward_cycle_info(header.block_height)?; if let Some(rc_info) = reward_cycle_info.as_ref() { // in nakamoto, if we have any reward cycle info at all, it will be known. // otherwise, we may have to process some more Stacks blocks if rc_info.known_selected_anchor_block().is_none() { - warn!("Unknown PoX anchor block in Nakamoto (at height {}). Refusing to process more burnchain blocks until that changes.", end_cycle_block_height); + warn!("Unknown PoX anchor block in Nakamoto (at height {}). Refusing to process more burnchain blocks until that changes.", header.block_height); return Ok(false); } } else { // have to block -- we don't have the reward cycle information debug!("Do not yet have PoX anchor block for next reward cycle -- no anchor block found"; "next_reward_cycle" => self.burnchain.block_height_to_reward_cycle(header.block_height), - "reward_cycle_end" => end_cycle_block_height - ); + "block_height" => header.block_height); return Ok(false); } reward_cycle_info From 75d200eb0268d71160ff30a91241c6473588ac43 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 22:01:20 -0400 Subject: [PATCH 129/148] chore: log last tenure ID --- stackslib/src/chainstate/nakamoto/tests/node.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 1054f584b6..5b08a398fa 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -386,11 +386,12 @@ impl TestStacksNode { .unwrap(); test_debug!( - "Work in {} {} for Nakamoto parent: {},{}", + "Work in {} {} for Nakamoto parent: {},{}. Last tenure ID is {}", burn_block.block_height, burn_block.parent_snapshot.burn_header_hash, parent_sortition.total_burn, last_parent.header.chain_length + 1, + &parent_tenure_id, ); (parent_tenure_id, parent_sortition) @@ -420,11 +421,12 @@ impl TestStacksNode { let parent_tenure_id = parent_chain_tip.index_block_hash(); test_debug!( - "Work in {} {} for Stacks 2.x parent: {},{}", + "Work in {} {} for Stacks 2.x parent: {},{}. Last tenure ID is {}", burn_block.block_height, burn_block.parent_snapshot.burn_header_hash, parent_stacks_block_snapshot.total_burn, parent_chain_tip.anchored_header.height(), + &parent_tenure_id, ); (parent_tenure_id, parent_stacks_block_snapshot) From 04501144655ff0fc9691613899653f2eeef172de Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 22:02:10 -0400 Subject: [PATCH 130/148] fix: a reward set can be identified by either the block hash (epoch 2) or the block ID (nakamoto) --- stackslib/src/net/p2p.rs | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index a34c212e69..1d195323f5 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -5474,7 +5474,7 @@ impl PeerNetwork { let ih = sortdb.index_handle(&tip_sn.sortition_id); for rc in [cur_rc, prev_rc, prev_prev_rc] { - let rc_start_height = self.burnchain.reward_cycle_to_block_height(rc) + 1; + let rc_start_height = self.burnchain.reward_cycle_to_block_height(rc); let Some(ancestor_sort_id) = get_ancestor_sort_id(&ih, rc_start_height, &tip_sn.sortition_id)? else { @@ -5486,7 +5486,12 @@ impl PeerNetwork { if let Some(cached_rc_info) = self.current_reward_sets.get(&rc) { if let Some(anchor_hash) = anchor_hash_opt.as_ref() { - if cached_rc_info.anchor_block_hash == *anchor_hash { + // careful -- the sortition DB stores a StacksBlockId's value (the tenure-start + // StacksBlockId) as a BlockHeaderHash, since that's what it was designed to + // deal with in the pre-Nakamoto days + if cached_rc_info.anchor_block_id() == StacksBlockId(anchor_hash.0.clone()) + || cached_rc_info.anchor_block_hash == *anchor_hash + { // cached reward set data is still valid continue; } @@ -5494,7 +5499,7 @@ impl PeerNetwork { } let Some((reward_set_info, anchor_block_header)) = load_nakamoto_reward_set( - rc, + rc_start_height, &tip_sn.sortition_id, &self.burnchain, chainstate, @@ -5519,6 +5524,11 @@ impl PeerNetwork { anchor_block_hash: anchor_block_header.anchored_header.block_hash(), }; + test_debug!( + "Store cached reward set for reward cycle {} anchor block {}", + rc, + &rc_info.anchor_block_hash + ); self.current_reward_sets.insert(rc, rc_info); } self.free_old_reward_cycles(cur_rc); From 3492365f606088d4e150e13aa5b00b39596684a7 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 May 2024 12:08:00 -0500 Subject: [PATCH 131/148] feat: use burnview consensus hash to initialize the sortdb handles for nakamoto blocks --- stackslib/src/chainstate/burn/db/sortdb.rs | 34 ++++++++++++-- stackslib/src/chainstate/nakamoto/mod.rs | 37 +++++++++++++++ stackslib/src/chainstate/nakamoto/tenure.rs | 47 +++++++++++++++++++ .../stacks-node/src/nakamoto_node/miner.rs | 2 +- 4 files changed, 115 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 3e5c27ba84..1c6fe26606 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -70,7 +70,7 @@ use crate::chainstate::coordinator::{ use crate::chainstate::nakamoto::{NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; use crate::chainstate::stacks::boot::PoxStartCycleInfo; -use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; +use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::index::marf::{MARFOpenOpts, MarfConnection, MARF}; use crate::chainstate::stacks::index::storage::TrieFileStorage; use crate::chainstate::stacks::index::{ @@ -2674,9 +2674,24 @@ impl SortitionDB { return Err(db_error::NotFoundError); } }; - let snapshot = - SortitionDB::get_block_snapshot_consensus(&self.conn(), &header.consensus_hash)? - .ok_or(db_error::NotFoundError)?; + // if its a nakamoto block, we want to use the burnchain view of the block + let burn_view = match &header.anchored_header { + StacksBlockHeaderTypes::Epoch2(_) => header.consensus_hash, + StacksBlockHeaderTypes::Nakamoto(_) => { + NakamotoChainState::get_tenure_for_block(chainstate.db(), &header) + .map_err(|e| { + warn!( + "Failed to get tenure for block header: {:?}", e; + "block_id" => %stacks_block_id, + ); + db_error::NotFoundError + })? + .burn_view_consensus_hash + } + }; + + let snapshot = SortitionDB::get_block_snapshot_consensus(&self.conn(), &burn_view)? + .ok_or(db_error::NotFoundError)?; Ok(self.index_handle(&snapshot.sortition_id)) } @@ -4604,6 +4619,17 @@ impl SortitionDB { self.index_handle(&sortition_id) } + /// Open an index handle at the given consensus hash + /// Returns a db_error::NotFoundError if `ch` cannot be found + pub fn index_handle_at_ch<'a>( + &'a self, + ch: &ConsensusHash, + ) -> Result, db_error> { + let sortition_id = Self::get_sortition_id_by_consensus(self.conn(), ch)? + .ok_or_else(|| db_error::NotFoundError)?; + Ok(self.index_handle(&sortition_id)) + } + /// Open a tx handle at the burn chain tip pub fn tx_begin_at_tip<'a>(&'a mut self) -> SortitionHandleTx<'a> { let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap(); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index d3541fae22..3c8946280c 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1344,6 +1344,43 @@ impl NakamotoChainState { return Err(ChainstateError::InvalidStacksBlock(msg.into())); } + // set the sortition handle's pointer to the block's burnchain view. + // this is either: + // (1) set by the tenure change tx if one exists + // (2) the same as parent block id + + let burnchain_view = if let Some(tenure_change) = next_ready_block.get_tenure_tx_payload() { + tenure_change.burn_view_consensus_hash + } else { + let Some(current_tenure) = Self::get_highest_nakamoto_tenure_change_by_tenure_id( + &chainstate_tx.tx, + &next_ready_block.header.consensus_hash, + )? + else { + warn!( + "Cannot process Nakamoto block: failed to find active tenure"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "block_hash" => %next_ready_block.header.block_hash(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + return Ok(None); + }; + current_tenure.burn_view_consensus_hash + }; + let Some(burnchain_view_sortid) = + SortitionDB::get_sortition_id_by_consensus(sort_tx.tx(), &burnchain_view)? + else { + warn!( + "Cannot process Nakamoto block: failed to find Sortition ID associated with burnchain view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "block_hash" => %next_ready_block.header.block_hash(), + "burn_view_consensus_hash" => %burnchain_view, + ); + return Ok(None); + }; + + sort_tx.context.chain_tip = burnchain_view_sortid; + // find commit and sortition burns if this is a tenure-start block let Ok(new_tenure) = next_ready_block.is_wellformed_tenure_start_block() else { return Err(ChainstateError::InvalidStacksBlock( diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 2edf9f1e87..f68e0ee90a 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -595,6 +595,53 @@ impl NakamotoChainState { Ok(tenure_opt) } + /// Get the tenure change that was active for a given block header + /// If a tenure change occurred during this block, it will be returned + pub fn get_tenure_for_block( + headers_conn: &Connection, + block_header: &StacksHeaderInfo, + ) -> Result { + let sql = "SELECT * FROM nakamoto_tenures WHERE block_id = ? LIMIT 1"; + let tenure_opt: Option = + query_row(headers_conn, sql, &[block_header.index_block_hash()])?; + if let Some(tenure) = tenure_opt { + return Ok(tenure); + } + // there wasn't a tenure change at that block, so we need to figure out the active tenure + // use the "tenure height" to query for `num_blocks_confirmed` + let block_height = block_header.stacks_block_height; + let tenure_start_height = Self::get_nakamoto_tenure_start_block_header( + headers_conn, + &block_header.consensus_hash, + )? + .ok_or_else(|| ChainstateError::NoSuchBlockError)? + .stacks_block_height; + let blocks_confirmed = u64_to_sql(block_height.saturating_sub(tenure_start_height))?; + // querying by blocks confirmed doesn't work if cause is blockfound, + // so don't try and instead failback to directly querying it + let sql = "SELECT * FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ? + AND num_blocks_confirmed <= ? + AND cause <> ? + ORDER BY num_blocks_confirmed DESC LIMIT 1"; + if let Some(tenure) = query_row( + headers_conn, + sql, + params![ + &block_header.consensus_hash, + blocks_confirmed, + TenureChangeCause::BlockFound.as_u8() + ], + )? { + return Ok(tenure); + } + // failback to the BlockFound tenure change + Self::get_highest_nakamoto_tenure_change_by_tenure_id( + headers_conn, + &block_header.consensus_hash, + )? + .ok_or_else(|| ChainstateError::NoSuchBlockError) + } + /// Get the highest non-empty processed tenure on the canonical sortition history. pub fn get_highest_nakamoto_tenure( headers_conn: &Connection, diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 3ebb12fd9f..c34d75966e 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -481,7 +481,7 @@ impl BlockMinerThread { ) .expect("FATAL: could not open sortition DB"); - let mut sortition_handle = sort_db.index_handle_at_tip(); + let mut sortition_handle = sort_db.index_handle_at_ch(&block.header.consensus_hash)?; let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; NakamotoChainState::accept_block( &chainstate_config, From 50bcac2c1cdbc221024fb3f9792b757135b23ab6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 May 2024 12:49:16 -0500 Subject: [PATCH 132/148] naka miner should use its consensus hash view as the sortition handle --- testnet/stacks-node/src/nakamoto_node/miner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index c34d75966e..d256039b2e 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -765,7 +765,7 @@ impl BlockMinerThread { let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, &burn_db - .index_handle_at_block(&chain_state, &parent_block_id) + .index_handle_at_ch(&self.burn_block.consensus_hash) .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &mut mem_pool, &parent_block_info.stacks_parent_header, From b74377cc105dff99b88c35cb94b563bedd344171 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 May 2024 15:05:30 -0500 Subject: [PATCH 133/148] when setting the sort_tx context in naka block processing, always unset it afterwards --- stackslib/src/chainstate/nakamoto/mod.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 3c8946280c..4fec156e6f 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1379,8 +1379,6 @@ impl NakamotoChainState { return Ok(None); }; - sort_tx.context.chain_tip = burnchain_view_sortid; - // find commit and sortition burns if this is a tenure-start block let Ok(new_tenure) = next_ready_block.is_wellformed_tenure_start_block() else { return Err(ChainstateError::InvalidStacksBlock( @@ -1418,7 +1416,12 @@ impl NakamotoChainState { // though it will always be None), which gets the borrow-checker to believe that it's safe // to access `stacks_chain_state` again. In the `Ok(..)` case, it's instead sufficient so // simply commit the block before beginning the second transaction to mark it processed. - let (ok_opt, err_opt) = match NakamotoChainState::append_block( + + // set the sortition tx's tip to the burnchain view -- we must unset this after appending the block, + // so we wrap this call in a closure to make sure that the unsetting is infallible + let prior_sort_tip = + std::mem::replace(&mut sort_tx.context.chain_tip, burnchain_view_sortid); + let (ok_opt, err_opt) = (|clarity_instance| match NakamotoChainState::append_block( &mut chainstate_tx, clarity_instance, sort_tx, @@ -1437,7 +1440,9 @@ impl NakamotoChainState { ) { Ok(next_chain_tip_info) => (Some(next_chain_tip_info), None), Err(e) => (None, Some(e)), - }; + })(clarity_instance); + + sort_tx.context.chain_tip = prior_sort_tip; if let Some(e) = err_opt { // force rollback From 54aa3b91598c4e05df325a0d830794efb0009a81 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 31 May 2024 18:11:09 -0400 Subject: [PATCH 134/148] chore: address PR feedback --- stacks-common/src/types/mod.rs | 18 +++++++++++ .../burn/operations/leader_block_commit.rs | 2 +- .../chainstate/nakamoto/coordinator/mod.rs | 32 ++++++++----------- stackslib/src/chainstate/nakamoto/mod.rs | 6 ++++ .../src/chainstate/nakamoto/signer_set.rs | 7 ++++ .../src/chainstate/nakamoto/tests/node.rs | 2 +- stackslib/src/net/p2p.rs | 2 +- stackslib/src/net/relay.rs | 2 +- .../stacks-node/src/nakamoto_node/miner.rs | 12 +++++-- .../src/nakamoto_node/sign_coordinator.rs | 6 +++- .../src/tests/nakamoto_integrations.rs | 4 ++- 11 files changed, 65 insertions(+), 28 deletions(-) diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index e6e5cf5f79..901d72b40a 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -151,6 +151,24 @@ impl StacksEpochId { StacksEpochId::Epoch30 => MINING_COMMITMENT_FREQUENCY_NAKAMOTO, } } + + /// Does this epoch use the nakamoto reward set, or the epoch2 reward set? + /// We use the epoch2 reward set in all pre-3.0 epochs. + /// We also use the epoch2 reward set in the first 3.0 reward cycle. + /// After that, we use the nakamoto reward set. + pub fn uses_nakamoto_reward_set(&self, cur_reward_cycle: u64, first_epoch30_reward_cycle: u64) -> bool { + match self { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 => false, + StacksEpochId::Epoch30 => cur_reward_cycle > first_epoch30_reward_cycle + } + } } impl std::fmt::Display for StacksEpochId { diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index 087a3e3b42..539181f9af 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -662,7 +662,7 @@ impl LeaderBlockCommitOp { check_recipients.sort(); let mut commit_outs = self.commit_outs.clone(); commit_outs.sort(); - for (expected_commit, found_commit) in + for (found_commit, expected_commit) in commit_outs.iter().zip(check_recipients) { if expected_commit.to_burnchain_repr() diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index f703a23486..64950f9059 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -234,7 +234,7 @@ pub fn get_nakamoto_reward_cycle_info( "FATAL: called a nakamoto function outside of epoch 3" ); - // calculating the reward set for the _next_ reward cycle + // calculating the reward set for the current reward cycle let reward_cycle = burnchain .pox_reward_cycle(burn_height) .expect("FATAL: no reward cycle for burn height"); @@ -246,7 +246,7 @@ pub fn get_nakamoto_reward_cycle_info( "prepare_phase_length" => burnchain.pox_constants.prepare_length); let Some((rc_info, anchor_block_header)) = load_nakamoto_reward_set( - burn_height, + reward_cycle, sortition_tip, burnchain, chain_state, @@ -275,7 +275,7 @@ pub fn get_nakamoto_reward_cycle_info( return Ok(Some(rc_info)); } -/// Helper to get the Nakamoto reward set for a given reward cycle, identified by `burn_height`. +/// Helper to get the Nakamoto reward set for a given reward cycle, identified by `reward_cycle`. /// /// In all but the first Nakamoto reward cycle, this will load up the stored reward set from the /// Nakamoto chain state. In the first Nakamoto reward cycle, where the reward set is computed @@ -286,23 +286,20 @@ pub fn get_nakamoto_reward_cycle_info( /// Returns Ok(None) if the reward set is not yet known, but could be known by the time a /// subsequent call is made. pub fn load_nakamoto_reward_set( - burn_height: u64, + reward_cycle: u64, sortition_tip: &SortitionId, burnchain: &Burnchain, chain_state: &mut StacksChainState, sort_db: &SortitionDB, provider: &U, ) -> Result, Error> { - let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), burn_height)? - .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", burn_height)); - - let reward_cycle = burnchain - .pox_reward_cycle(burn_height) - .expect("FATAL: no reward cycle for burn height"); - let prepare_end_height = burnchain .reward_cycle_to_block_height(reward_cycle) .saturating_sub(1); + + let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), prepare_end_height)? + .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", prepare_end_height)); + let Some(prepare_end_sortition_id) = get_ancestor_sort_id(&sort_db.index_conn(), prepare_end_height, sortition_tip)? else { @@ -320,10 +317,7 @@ pub fn load_nakamoto_reward_set( .pox_reward_cycle(epoch_at_height.start_height) .expect("FATAL: no reward cycle for epoch 3.0 start height"); - if epoch_at_height.epoch_id < StacksEpochId::Epoch30 - || (epoch_at_height.epoch_id == StacksEpochId::Epoch30 - && reward_cycle == first_epoch30_reward_cycle) - { + if !epoch_at_height.epoch_id.uses_nakamoto_reward_set(reward_cycle, first_epoch30_reward_cycle) { // in epoch 2.5, and in the first reward cycle of epoch 3.0, the reward set can *only* be found in the sortition DB. // The nakamoto chain-processing rules aren't active yet, so we can't look for the reward // cycle info in the nakamoto chain state. @@ -490,9 +484,9 @@ pub fn get_nakamoto_next_recipients( chain_state: &mut StacksChainState, burnchain: &Burnchain, ) -> Result, Error> { - let reward_cycle_info = if burnchain.is_reward_cycle_start(sortition_tip.block_height + 1) { + let reward_cycle_info = if burnchain.is_reward_cycle_start(sortition_tip.block_height.saturating_add(1)) { let Some((reward_set, _)) = load_nakamoto_reward_set( - sortition_tip.block_height, + burnchain.pox_reward_cycle(sortition_tip.block_height.saturating_add(1)).expect("Sortition block height has no reward cycle"), &sortition_tip.sortition_id, burnchain, chain_state, @@ -564,7 +558,7 @@ impl< // only proceed if we have processed the _anchor block_ for this reward cycle let Some((rc_info, _)) = load_nakamoto_reward_set( - canonical_sn.block_height, + self.burnchain.pox_reward_cycle(canonical_sn.block_height).expect("FATAL: snapshot has no reward cycle"), &canonical_sn.sortition_id, &self.burnchain, &mut self.chain_state_db, @@ -820,7 +814,7 @@ impl< )? .ok_or(DBError::NotFoundError)?; let Some((rc_info, _)) = load_nakamoto_reward_set( - canonical_sn.block_height, + self.burnchain.pox_reward_cycle(canonical_sn.block_height).expect("FATAL: snapshot has no reward cycle"), &canonical_sn.sortition_id, &self.burnchain, &mut self.chain_state_db, diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 33f6ff2109..23a8895ee2 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2704,6 +2704,12 @@ impl NakamotoChainState { &mut clarity_tx, vote_for_agg_key_ops.clone(), )); + + if signer_set_calc.is_some() { + debug!("Setup block: computed reward set for the next reward cycle"; + "anchor_block_height" => coinbase_height, + "burn_header_height" => burn_header_height); + } } else { signer_set_calc = None; } diff --git a/stackslib/src/chainstate/nakamoto/signer_set.rs b/stackslib/src/chainstate/nakamoto/signer_set.rs index e776ca41db..f09f41529d 100644 --- a/stackslib/src/chainstate/nakamoto/signer_set.rs +++ b/stackslib/src/chainstate/nakamoto/signer_set.rs @@ -217,6 +217,8 @@ impl NakamotoSigners { Ok(slots) } + /// Compute the reward set for the next reward cycle, store it, and write it to the .signers + /// contract. `reward_cycle` is the _current_ reward cycle. pub fn handle_signer_stackerdb_update( clarity: &mut ClarityTransactionConnection, pox_constants: &PoxConstants, @@ -351,6 +353,11 @@ impl NakamotoSigners { Ok(SignerCalculation { events, reward_set }) } + /// If this block is mined in the prepare phase, based on its tenure's `burn_tip_height`. If + /// so, and if we haven't done so yet, then compute the PoX reward set, store it, and update + /// the .signers contract. The stored PoX reward set is the reward set for the next reward + /// cycle, and will be used by the Nakamoto chains coordinator to validate its block-commits + /// and block signatures. pub fn check_and_handle_prepare_phase_start( clarity_tx: &mut ClarityTx, first_block_height: u64, diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 5b08a398fa..c29abcc0fb 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -585,7 +585,7 @@ impl TestStacksNode { // Get the reward set let sort_tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let reward_set = load_nakamoto_reward_set( - sort_tip_sn.block_height, + miner.burnchain.pox_reward_cycle(sort_tip_sn.block_height).expect("FATAL: no reward cycle for sortition"), &sort_tip_sn.sortition_id, &miner.burnchain, chainstate, diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 1d195323f5..0b85f177cf 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -5499,7 +5499,7 @@ impl PeerNetwork { } let Some((reward_set_info, anchor_block_header)) = load_nakamoto_reward_set( - rc_start_height, + rc, &tip_sn.sortition_id, &self.burnchain, chainstate, diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index a073398f42..d04ba3d88c 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -730,7 +730,7 @@ impl Relayer { let tip = block_sn.sortition_id; let reward_info = match load_nakamoto_reward_set( - block_sn.block_height, + burnchain.pox_reward_cycle(block_sn.block_height).expect("FATAL: block snapshot has no reward cycle"), &tip, burnchain, chainstate, diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 69b04c3a53..3308cefd57 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -300,7 +300,9 @@ impl BlockMinerThread { })?; let reward_info = match load_nakamoto_reward_set( - tip.block_height, + self.burnchain + .pox_reward_cycle(tip.block_height.saturating_add(1)) + .expect("FATAL: no reward cycle for sortition"), &tip.sortition_id, &self.burnchain, &mut chain_state, @@ -402,7 +404,9 @@ impl BlockMinerThread { })?; let reward_info = match load_nakamoto_reward_set( - tip.block_height, + self.burnchain + .pox_reward_cycle(tip.block_height.saturating_add(1)) + .expect("FATAL: no reward cycle for sortition"), &tip.sortition_id, &self.burnchain, &mut chain_state, @@ -883,7 +887,9 @@ impl BlockMinerThread { .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::DBError(e)))?; let reward_info = match load_nakamoto_reward_set( - tip.block_height, + self.burnchain + .pox_reward_cycle(tip.block_height.saturating_add(1)) + .expect("FATAL: no reward cycle defined for sortition tip"), &tip.sortition_id, &self.burnchain, &mut chain_state, diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index c0f42e7820..d973114623 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -202,7 +202,11 @@ impl SignCoordinator { ) -> Result { let is_mainnet = config.is_mainnet(); let Some(ref reward_set_signers) = reward_set.signers else { - error!("Could not initialize WSTS coordinator for reward set without signer"); + error!("Could not initialize signing coordinator for reward set without signer"); + debug!( + "reward_cycle: {}, reward set: {:?}", + reward_cycle, &reward_set + ); return Err(ChainstateError::NoRegisteredSigners(0)); }; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 2da7444c37..bafd66f6d4 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -375,7 +375,9 @@ pub fn read_and_sign_block_proposal( let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let reward_set = load_nakamoto_reward_set( - tip.block_height, + burnchain + .pox_reward_cycle(tip.block_height.saturating_add(1)) + .unwrap(), &tip.sortition_id, &burnchain, &mut chainstate, From d28e44f5cdab906a99b3c6cc6540884c1f60775c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 31 May 2024 23:40:22 -0400 Subject: [PATCH 135/148] chore: cargo fmt --- stacks-common/src/types/mod.rs | 8 ++- .../chainstate/nakamoto/coordinator/mod.rs | 55 ++++++++++++------- .../src/chainstate/nakamoto/tests/node.rs | 5 +- stackslib/src/net/relay.rs | 4 +- 4 files changed, 48 insertions(+), 24 deletions(-) diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 901d72b40a..1e0a056c21 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -156,7 +156,11 @@ impl StacksEpochId { /// We use the epoch2 reward set in all pre-3.0 epochs. /// We also use the epoch2 reward set in the first 3.0 reward cycle. /// After that, we use the nakamoto reward set. - pub fn uses_nakamoto_reward_set(&self, cur_reward_cycle: u64, first_epoch30_reward_cycle: u64) -> bool { + pub fn uses_nakamoto_reward_set( + &self, + cur_reward_cycle: u64, + first_epoch30_reward_cycle: u64, + ) -> bool { match self { StacksEpochId::Epoch10 | StacksEpochId::Epoch20 @@ -166,7 +170,7 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => false, - StacksEpochId::Epoch30 => cur_reward_cycle > first_epoch30_reward_cycle + StacksEpochId::Epoch30 => cur_reward_cycle > first_epoch30_reward_cycle, } } } diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 64950f9059..c8d1adb826 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -296,9 +296,14 @@ pub fn load_nakamoto_reward_set( let prepare_end_height = burnchain .reward_cycle_to_block_height(reward_cycle) .saturating_sub(1); - + let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), prepare_end_height)? - .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", prepare_end_height)); + .unwrap_or_else(|| { + panic!( + "FATAL: no epoch defined for burn height {}", + prepare_end_height + ) + }); let Some(prepare_end_sortition_id) = get_ancestor_sort_id(&sort_db.index_conn(), prepare_end_height, sortition_tip)? @@ -317,7 +322,10 @@ pub fn load_nakamoto_reward_set( .pox_reward_cycle(epoch_at_height.start_height) .expect("FATAL: no reward cycle for epoch 3.0 start height"); - if !epoch_at_height.epoch_id.uses_nakamoto_reward_set(reward_cycle, first_epoch30_reward_cycle) { + if !epoch_at_height + .epoch_id + .uses_nakamoto_reward_set(reward_cycle, first_epoch30_reward_cycle) + { // in epoch 2.5, and in the first reward cycle of epoch 3.0, the reward set can *only* be found in the sortition DB. // The nakamoto chain-processing rules aren't active yet, so we can't look for the reward // cycle info in the nakamoto chain state. @@ -484,22 +492,25 @@ pub fn get_nakamoto_next_recipients( chain_state: &mut StacksChainState, burnchain: &Burnchain, ) -> Result, Error> { - let reward_cycle_info = if burnchain.is_reward_cycle_start(sortition_tip.block_height.saturating_add(1)) { - let Some((reward_set, _)) = load_nakamoto_reward_set( - burnchain.pox_reward_cycle(sortition_tip.block_height.saturating_add(1)).expect("Sortition block height has no reward cycle"), - &sortition_tip.sortition_id, - burnchain, - chain_state, - sort_db, - &OnChainRewardSetProvider::new(), - )? - else { - return Ok(None); + let reward_cycle_info = + if burnchain.is_reward_cycle_start(sortition_tip.block_height.saturating_add(1)) { + let Some((reward_set, _)) = load_nakamoto_reward_set( + burnchain + .pox_reward_cycle(sortition_tip.block_height.saturating_add(1)) + .expect("Sortition block height has no reward cycle"), + &sortition_tip.sortition_id, + burnchain, + chain_state, + sort_db, + &OnChainRewardSetProvider::new(), + )? + else { + return Ok(None); + }; + Some(reward_set) + } else { + None }; - Some(reward_set) - } else { - None - }; sort_db .get_next_block_recipients(burnchain, sortition_tip, reward_cycle_info.as_ref()) .map_err(Error::from) @@ -558,7 +569,9 @@ impl< // only proceed if we have processed the _anchor block_ for this reward cycle let Some((rc_info, _)) = load_nakamoto_reward_set( - self.burnchain.pox_reward_cycle(canonical_sn.block_height).expect("FATAL: snapshot has no reward cycle"), + self.burnchain + .pox_reward_cycle(canonical_sn.block_height) + .expect("FATAL: snapshot has no reward cycle"), &canonical_sn.sortition_id, &self.burnchain, &mut self.chain_state_db, @@ -814,7 +827,9 @@ impl< )? .ok_or(DBError::NotFoundError)?; let Some((rc_info, _)) = load_nakamoto_reward_set( - self.burnchain.pox_reward_cycle(canonical_sn.block_height).expect("FATAL: snapshot has no reward cycle"), + self.burnchain + .pox_reward_cycle(canonical_sn.block_height) + .expect("FATAL: snapshot has no reward cycle"), &canonical_sn.sortition_id, &self.burnchain, &mut self.chain_state_db, diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index c29abcc0fb..201cadb9ac 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -585,7 +585,10 @@ impl TestStacksNode { // Get the reward set let sort_tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let reward_set = load_nakamoto_reward_set( - miner.burnchain.pox_reward_cycle(sort_tip_sn.block_height).expect("FATAL: no reward cycle for sortition"), + miner + .burnchain + .pox_reward_cycle(sort_tip_sn.block_height) + .expect("FATAL: no reward cycle for sortition"), &sort_tip_sn.sortition_id, &miner.burnchain, chainstate, diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index d04ba3d88c..022d1edf14 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -730,7 +730,9 @@ impl Relayer { let tip = block_sn.sortition_id; let reward_info = match load_nakamoto_reward_set( - burnchain.pox_reward_cycle(block_sn.block_height).expect("FATAL: block snapshot has no reward cycle"), + burnchain + .pox_reward_cycle(block_sn.block_height) + .expect("FATAL: block snapshot has no reward cycle"), &tip, burnchain, chainstate, From 2c1a229201d3daa3f634bc16cf79314371071ee3 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 3 Jun 2024 12:54:24 -0500 Subject: [PATCH 136/148] add burn_view to StacksHeaderInfo for nakamoto blocks, use in append_block --- stackslib/src/chainstate/burn/db/sortdb.rs | 15 +- .../chainstate/nakamoto/coordinator/mod.rs | 9 +- stackslib/src/chainstate/nakamoto/mod.rs | 164 +++++++++++++----- stackslib/src/chainstate/nakamoto/tenure.rs | 66 ++----- stackslib/src/chainstate/stacks/db/mod.rs | 19 +- stackslib/src/chainstate/stacks/miner.rs | 1 + 6 files changed, 152 insertions(+), 122 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 1c6fe26606..db7c15dfea 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2677,17 +2677,10 @@ impl SortitionDB { // if its a nakamoto block, we want to use the burnchain view of the block let burn_view = match &header.anchored_header { StacksBlockHeaderTypes::Epoch2(_) => header.consensus_hash, - StacksBlockHeaderTypes::Nakamoto(_) => { - NakamotoChainState::get_tenure_for_block(chainstate.db(), &header) - .map_err(|e| { - warn!( - "Failed to get tenure for block header: {:?}", e; - "block_id" => %stacks_block_id, - ); - db_error::NotFoundError - })? - .burn_view_consensus_hash - } + StacksBlockHeaderTypes::Nakamoto(_) => header.burn_view.ok_or_else(|| { + error!("Loaded nakamoto block header without a burn view"; "block_id" => %stacks_block_id); + db_error::Other("Nakamoto block header without burn view".into()) + })?, }; let snapshot = SortitionDB::get_block_snapshot_consensus(&self.conn(), &burn_view)? diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index f399615c80..2bca7609a3 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -573,13 +573,10 @@ impl< loop { // process at most one block per loop pass - let mut sortdb_handle = self - .sortition_db - .tx_handle_begin(&canonical_sortition_tip)?; - let mut processed_block_receipt = match NakamotoChainState::process_next_nakamoto_block( &mut self.chain_state_db, - &mut sortdb_handle, + &mut self.sortition_db, + &canonical_sortition_tip, self.dispatcher, ) { Ok(receipt_opt) => receipt_opt, @@ -606,8 +603,6 @@ impl< } }; - sortdb_handle.commit()?; - let Some(block_receipt) = processed_block_receipt.take() else { // out of blocks debug!("No more blocks to process (no receipts)"); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 4fec156e6f..4929a38c32 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -39,7 +39,7 @@ use stacks_common::consts::{ FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, MINER_REWARD_MATURITY, }; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, TrieHash, VRFSeed, }; use stacks_common::types::{PrivateKey, StacksEpochId}; @@ -221,6 +221,13 @@ lazy_static! { NAKAMOTO_TENURES_SCHEMA_2.into(), r#" UPDATE db_config SET version = "5"; + "#.into(), + // make burn_view NULLable. We could use a default value, but NULL should be safer (because it will error). + // there should be no entries in nakamoto_block_headers with a NULL entry when this column is added, because + // nakamoto blocks have not been produced yet. + r#" + ALTER TABLE nakamoto_block_headers + ADD COLUMN burn_view TEXT; "#.into(), ]; } @@ -1273,16 +1280,21 @@ impl NakamotoChainState { /// If there exists a ready Nakamoto block, then this method returns Ok(Some(..)) with the /// receipt. Otherwise, it returns Ok(None). /// + /// Canonical sortition tip is a pointer to the current canonical sortition tip. + /// this is used to store block processed information in the sortition db. + /// /// It returns Err(..) on DB error, or if the child block does not connect to the parent. /// The caller should keep calling this until it gets Ok(None) pub fn process_next_nakamoto_block<'a, T: BlockEventDispatcher>( stacks_chain_state: &mut StacksChainState, - sort_tx: &mut SortitionHandleTx, + sort_db: &mut SortitionDB, + canonical_sortition_tip: &SortitionId, dispatcher_opt: Option<&'a T>, ) -> Result, ChainstateError> { let nakamoto_blocks_db = stacks_chain_state.nakamoto_blocks_db(); - let Some((next_ready_block, block_size)) = - nakamoto_blocks_db.next_ready_nakamoto_block(stacks_chain_state.db(), sort_tx)? + let sortition_handle = sort_db.index_handle(canonical_sortition_tip); + let Some((next_ready_block, block_size)) = nakamoto_blocks_db + .next_ready_nakamoto_block(stacks_chain_state.db(), &sortition_handle)? else { // no more blocks test_debug!("No more Nakamoto blocks to process"); @@ -1293,7 +1305,7 @@ impl NakamotoChainState { // find corresponding snapshot let next_ready_block_snapshot = SortitionDB::get_block_snapshot_consensus( - sort_tx, + sort_db.conn(), &next_ready_block.header.consensus_hash, )? .unwrap_or_else(|| { @@ -1350,26 +1362,69 @@ impl NakamotoChainState { // (2) the same as parent block id let burnchain_view = if let Some(tenure_change) = next_ready_block.get_tenure_tx_payload() { + if let Some(ref parent_burn_view) = parent_header_info.burn_view { + // check that the tenure_change's burn view descends from the parent + let parent_burn_view_sn = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + parent_burn_view, + )? + .ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: could not find parent block's burnchain view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "block_hash" => %next_ready_block.header.block_hash(), + "block_id" => %next_ready_block.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })?; + let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; + let connected_sort_id = get_ancestor_sort_id(&handle, parent_burn_view_sn.block_height, &handle.context.chain_tip)? + .ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: could not find parent block's burnchain view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "block_hash" => %next_ready_block.header.block_hash(), + "block_id" => %next_ready_block.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })?; + if connected_sort_id != parent_burn_view_sn.sortition_id { + warn!( + "Cannot process Nakamoto block: parent block's burnchain view does not connect to own burn view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "block_hash" => %next_ready_block.header.block_hash(), + "block_id" => %next_ready_block.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + return Err(ChainstateError::InvalidStacksBlock( + "Does not connect to burn view of parent block ID".into(), + )); + } + } tenure_change.burn_view_consensus_hash } else { - let Some(current_tenure) = Self::get_highest_nakamoto_tenure_change_by_tenure_id( - &chainstate_tx.tx, - &next_ready_block.header.consensus_hash, - )? - else { + parent_header_info.burn_view.clone().ok_or_else(|| { warn!( - "Cannot process Nakamoto block: failed to find active tenure"; + "Cannot process Nakamoto block: parent block does not have a burnchain view and current block has no tenure tx"; "consensus_hash" => %next_ready_block.header.consensus_hash, "block_hash" => %next_ready_block.header.block_hash(), + "block_id" => %next_ready_block.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); - return Ok(None); - }; - current_tenure.burn_view_consensus_hash + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })? }; - let Some(burnchain_view_sortid) = - SortitionDB::get_sortition_id_by_consensus(sort_tx.tx(), &burnchain_view)? + let Some(burnchain_view_sn) = + SortitionDB::get_block_snapshot_consensus(sort_db.conn(), &burnchain_view)? else { + // This should be checked already during block acceptance and parent block processing + // - The check for expected burns returns `NoSuchBlockError` if the burnchain view + // could not be found for a block with a tenure tx. + // We error here anyways, but the check during block acceptance makes sure that the staging + // db doesn't get into a situation where it continuously tries to retry such a block (because + // such a block shouldn't land in the staging db). warn!( "Cannot process Nakamoto block: failed to find Sortition ID associated with burnchain view"; "consensus_hash" => %next_ready_block.header.consensus_hash, @@ -1388,24 +1443,22 @@ impl NakamotoChainState { let (commit_burn, sortition_burn) = if new_tenure { // find block-commit to get commit-burn - let block_commit = sort_tx - .get_block_commit( - &next_ready_block_snapshot.winning_block_txid, - &next_ready_block_snapshot.sortition_id, - )? - .expect("FATAL: no block-commit for tenure-start block"); + let block_commit = SortitionDB::get_block_commit( + sort_db.conn(), + &next_ready_block_snapshot.winning_block_txid, + &next_ready_block_snapshot.sortition_id, + )? + .expect("FATAL: no block-commit for tenure-start block"); - let sort_burn = SortitionDB::get_block_burn_amount( - sort_tx.deref().deref(), - &next_ready_block_snapshot, - )?; + let sort_burn = + SortitionDB::get_block_burn_amount(sort_db.conn(), &next_ready_block_snapshot)?; (block_commit.burn_fee, sort_burn) } else { (0, 0) }; // attach the block to the chain state and calculate the next chain tip. - let pox_constants = sort_tx.context.pox_constants.clone(); + let pox_constants = sort_db.pox_constants.clone(); // NOTE: because block status is updated in a separate transaction, we need `chainstate_tx` // and `clarity_instance` to go out of scope before we can issue the it (since we need a @@ -1419,12 +1472,12 @@ impl NakamotoChainState { // set the sortition tx's tip to the burnchain view -- we must unset this after appending the block, // so we wrap this call in a closure to make sure that the unsetting is infallible - let prior_sort_tip = - std::mem::replace(&mut sort_tx.context.chain_tip, burnchain_view_sortid); + let mut burn_view_handle = sort_db.index_handle(&burnchain_view_sn.sortition_id); let (ok_opt, err_opt) = (|clarity_instance| match NakamotoChainState::append_block( &mut chainstate_tx, clarity_instance, - sort_tx, + &mut burn_view_handle, + &burnchain_view, &pox_constants, &parent_header_info, &next_ready_block_snapshot.burn_header_hash, @@ -1442,8 +1495,6 @@ impl NakamotoChainState { Err(e) => (None, Some(e)), })(clarity_instance); - sort_tx.context.chain_tip = prior_sort_tip; - if let Some(e) = err_opt { // force rollback drop(ok_opt); @@ -1478,6 +1529,7 @@ impl NakamotoChainState { ); // set stacks block accepted + let mut sort_tx = sort_db.tx_handle_begin(canonical_sortition_tip)?; sort_tx.set_stacks_block_accepted( &next_ready_block.header.consensus_hash, &next_ready_block.header.block_hash(), @@ -1529,6 +1581,14 @@ impl NakamotoChainState { ); } + sort_tx + .commit() + .unwrap_or_else(|e| { + error!("Failed to commit sortition db transaction after committing chainstate and clarity block. The chainstate database is now corrupted."; + "error" => ?e); + panic!() + }); + Ok(Some(receipt)) } @@ -1545,7 +1605,7 @@ impl NakamotoChainState { /// however, will flag a block as invalid in this case, because the parent must be available in /// order to process a block. pub(crate) fn get_expected_burns( - sort_handle: &mut SH, + sort_handle: &SH, chainstate_conn: &Connection, block: &NakamotoBlock, ) -> Result, ChainstateError> { @@ -2310,6 +2370,15 @@ impl NakamotoChainState { if tenure_changed { &1i64 } else { &0i64 }, &vrf_proof_bytes.as_ref(), &header.signer_bitvec, + tip_info.burn_view.as_ref().ok_or_else(|| { + error!( + "Attempted to store nakamoto block header information without burnchain view"; + "block_id" => %index_block_hash, + ); + ChainstateError::DBError(DBError::Other( + "Nakamoto block StacksHeaderInfo did not set burnchain view".into(), + )) + })?, ]; chainstate_tx.execute( @@ -2330,9 +2399,10 @@ impl NakamotoChainState { parent_block_id, tenure_changed, vrf_proof, - signer_bitvec + signer_bitvec, + burn_view ) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24)", + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24, ?25)", args )?; @@ -2362,6 +2432,7 @@ impl NakamotoChainState { burn_vote_for_aggregate_key_ops: Vec, new_tenure: bool, block_fees: u128, + burn_view: &ConsensusHash, ) -> Result { if new_tip.parent_block_id != StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) @@ -2409,6 +2480,7 @@ impl NakamotoChainState { burn_header_height: new_burnchain_height, burn_header_timestamp: new_burnchain_timestamp, anchored_block_size: block_size, + burn_view: Some(burn_view.clone()), }; let tenure_fees = block_fees @@ -2792,7 +2864,8 @@ impl NakamotoChainState { fn append_block<'a>( chainstate_tx: &mut ChainstateTx, clarity_instance: &'a mut ClarityInstance, - burn_dbconn: &mut SortitionHandleTx, + burn_dbconn: &mut SortitionHandleConn, + burnchain_view: &ConsensusHash, pox_constants: &PoxConstants, parent_chain_tip: &StacksHeaderInfo, chain_tip_burn_header_hash: &BurnchainHeaderHash, @@ -2942,18 +3015,18 @@ impl NakamotoChainState { // (note that we can't check this earlier, since we need the parent tenure to have been // processed) if new_tenure && parent_chain_tip.is_nakamoto_block() && !block.is_first_mined() { - let tenure_block_commit = burn_dbconn - .get_block_commit( - &tenure_block_snapshot.winning_block_txid, - &tenure_block_snapshot.sortition_id, - )? - .ok_or_else(|| { - warn!("Invalid Nakamoto block: has no block-commit in its sortition"; + let tenure_block_commit = SortitionDB::get_block_commit( + burn_dbconn.conn(), + &tenure_block_snapshot.winning_block_txid, + &tenure_block_snapshot.sortition_id, + )? + .ok_or_else(|| { + warn!("Invalid Nakamoto block: has no block-commit in its sortition"; "block_id" => %block.header.block_id(), "sortition_id" => %tenure_block_snapshot.sortition_id, "block_commit_txid" => %tenure_block_snapshot.winning_block_txid); - ChainstateError::NoSuchBlockError - })?; + ChainstateError::NoSuchBlockError + })?; let parent_tenure_start_header = Self::get_nakamoto_tenure_start_block_header(chainstate_tx.tx(), &parent_ch)? @@ -3198,6 +3271,7 @@ impl NakamotoChainState { burn_vote_for_aggregate_key_ops, new_tenure, block_fees, + burnchain_view, ) .expect("FATAL: failed to advance chain tip"); diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index f68e0ee90a..5793994c80 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -89,7 +89,9 @@ use stacks_common::util::vrf::{VRFProof, VRFPublicKey, VRF}; use wsts::curve::point::Point; use crate::burnchains::{PoxConstants, Txid}; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle, SortitionHandleTx}; +use crate::chainstate::burn::db::sortdb::{ + SortitionDB, SortitionHandle, SortitionHandleConn, SortitionHandleTx, +}; use crate::chainstate::burn::{BlockSnapshot, SortitionHash}; use crate::chainstate::coordinator::{BlockEventDispatcher, Error}; use crate::chainstate::nakamoto::{ @@ -595,53 +597,6 @@ impl NakamotoChainState { Ok(tenure_opt) } - /// Get the tenure change that was active for a given block header - /// If a tenure change occurred during this block, it will be returned - pub fn get_tenure_for_block( - headers_conn: &Connection, - block_header: &StacksHeaderInfo, - ) -> Result { - let sql = "SELECT * FROM nakamoto_tenures WHERE block_id = ? LIMIT 1"; - let tenure_opt: Option = - query_row(headers_conn, sql, &[block_header.index_block_hash()])?; - if let Some(tenure) = tenure_opt { - return Ok(tenure); - } - // there wasn't a tenure change at that block, so we need to figure out the active tenure - // use the "tenure height" to query for `num_blocks_confirmed` - let block_height = block_header.stacks_block_height; - let tenure_start_height = Self::get_nakamoto_tenure_start_block_header( - headers_conn, - &block_header.consensus_hash, - )? - .ok_or_else(|| ChainstateError::NoSuchBlockError)? - .stacks_block_height; - let blocks_confirmed = u64_to_sql(block_height.saturating_sub(tenure_start_height))?; - // querying by blocks confirmed doesn't work if cause is blockfound, - // so don't try and instead failback to directly querying it - let sql = "SELECT * FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ? - AND num_blocks_confirmed <= ? - AND cause <> ? - ORDER BY num_blocks_confirmed DESC LIMIT 1"; - if let Some(tenure) = query_row( - headers_conn, - sql, - params![ - &block_header.consensus_hash, - blocks_confirmed, - TenureChangeCause::BlockFound.as_u8() - ], - )? { - return Ok(tenure); - } - // failback to the BlockFound tenure change - Self::get_highest_nakamoto_tenure_change_by_tenure_id( - headers_conn, - &block_header.consensus_hash, - )? - .ok_or_else(|| ChainstateError::NoSuchBlockError) - } - /// Get the highest non-empty processed tenure on the canonical sortition history. pub fn get_highest_nakamoto_tenure( headers_conn: &Connection, @@ -893,9 +848,9 @@ impl NakamotoChainState { /// tenure-change tx, or just parent_coinbase_height if there was a tenure-extend tx or no tenure /// txs at all). /// TODO: unit test - pub(crate) fn advance_nakamoto_tenure( + pub(crate) fn advance_nakamoto_tenure( headers_tx: &mut StacksDBTx, - sort_tx: &mut SortitionHandleTx, + handle: &mut SH, block: &NakamotoBlock, parent_coinbase_height: u64, ) -> Result { @@ -918,7 +873,7 @@ impl NakamotoChainState { }; let Some(processed_tenure) = - Self::check_nakamoto_tenure(headers_tx, sort_tx, &block.header, tenure_payload)? + Self::check_nakamoto_tenure(headers_tx, handle, &block.header, tenure_payload)? else { return Err(ChainstateError::InvalidStacksTransaction( "Invalid tenure tx".into(), @@ -991,7 +946,7 @@ impl NakamotoChainState { /// TODO: unit test pub(crate) fn calculate_scheduled_tenure_reward( chainstate_tx: &mut ChainstateTx, - burn_dbconn: &mut SortitionHandleTx, + burn_dbconn: &SortitionHandleConn, block: &NakamotoBlock, evaluated_epoch: StacksEpochId, parent_coinbase_height: u64, @@ -1004,7 +959,7 @@ impl NakamotoChainState { // figure out if there any accumulated rewards by // getting the snapshot that elected this block. let accumulated_rewards = SortitionDB::get_block_snapshot_consensus( - burn_dbconn.tx(), + burn_dbconn.conn(), &block.header.consensus_hash, )? .expect("CORRUPTION: failed to load snapshot that elected processed block") @@ -1076,7 +1031,7 @@ impl NakamotoChainState { /// particular burnchain fork. /// Return the block snapshot if so. pub(crate) fn check_sortition_exists( - burn_dbconn: &mut SortitionHandleTx, + burn_dbconn: &SortitionHandleConn, block_consensus_hash: &ConsensusHash, ) -> Result { // check that the burnchain block that this block is associated with has been processed. @@ -1092,9 +1047,8 @@ impl NakamotoChainState { ChainstateError::NoSuchBlockError })?; - let sortition_tip = burn_dbconn.context.chain_tip.clone(); let snapshot = burn_dbconn - .get_block_snapshot(&burn_header_hash, &sortition_tip)? + .get_block_snapshot(&burn_header_hash)? .ok_or_else(|| { warn!( "Tried to process Nakamoto block before its burn view was processed"; diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 865758ed01..d9b6d47775 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -196,6 +196,9 @@ pub struct StacksHeaderInfo { pub burn_header_timestamp: u64, /// Size of the block corresponding to `anchored_header` in bytes pub anchored_block_size: u64, + /// The burnchain tip that is passed to Clarity while processing this block. + /// This should always be `Some()` for Nakamoto blocks and `None` for 2.x blocks + pub burn_view: Option, } #[derive(Debug, Clone, PartialEq)] @@ -387,6 +390,7 @@ impl StacksHeaderInfo { consensus_hash: ConsensusHash::empty(), burn_header_timestamp: 0, anchored_block_size: 0, + burn_view: None, } } @@ -406,6 +410,7 @@ impl StacksHeaderInfo { consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), burn_header_timestamp: first_burnchain_block_timestamp, anchored_block_size: 0, + burn_view: None, } } @@ -452,15 +457,21 @@ impl FromRow for StacksHeaderInfo { .parse::() .map_err(|_| db_error::ParseError)?; + let header_type: HeaderTypeNames = row + .get("header_type") + .unwrap_or_else(|_e| HeaderTypeNames::Epoch2); let stacks_header: StacksBlockHeaderTypes = { - let header_type: HeaderTypeNames = row - .get("header_type") - .unwrap_or_else(|_e| HeaderTypeNames::Epoch2); match header_type { HeaderTypeNames::Epoch2 => StacksBlockHeader::from_row(row)?.into(), HeaderTypeNames::Nakamoto => NakamotoBlockHeader::from_row(row)?.into(), } }; + let burn_view = { + match header_type { + HeaderTypeNames::Epoch2 => None, + HeaderTypeNames::Nakamoto => Some(ConsensusHash::from_column(row, "burn_view")?), + } + }; if block_height != stacks_header.height() { return Err(db_error::ParseError); @@ -476,6 +487,7 @@ impl FromRow for StacksHeaderInfo { burn_header_height: burn_header_height as u32, burn_header_timestamp, anchored_block_size, + burn_view, }) } } @@ -2617,6 +2629,7 @@ impl StacksChainState { burn_header_height: new_burnchain_height, burn_header_timestamp: new_burnchain_timestamp, anchored_block_size: anchor_block_size, + burn_view: None, }; StacksChainState::insert_stacks_block_header( diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 22fdb782b2..0ec979c623 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1495,6 +1495,7 @@ impl StacksBlockBuilder { burn_header_timestamp: genesis_burn_header_timestamp, burn_header_height: genesis_burn_header_height, anchored_block_size: 0, + burn_view: None, }; let mut builder = StacksBlockBuilder::from_parent_pubkey_hash( From 55567d777bc6ea3edbfd7a89e79b770d32c75ff7 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 3 Jun 2024 11:56:35 -0700 Subject: [PATCH 137/148] fix: return errors instead of panics in miner thread --- .../stacks-node/src/nakamoto_node/miner.rs | 27 +++++++++++++------ .../stacks-node/src/nakamoto_node/relayer.rs | 2 +- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 7d4e54b1d5..b324ec561d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -132,15 +132,22 @@ impl BlockMinerThread { } /// Stop a miner tenure by blocking the miner and then joining the tenure thread - pub fn stop_miner(globals: &Globals, prior_miner: JoinHandle<()>) { + pub fn stop_miner( + globals: &Globals, + prior_miner: JoinHandle>, + ) -> Result<(), NakamotoNodeError> { globals.block_miner(); prior_miner .join() - .expect("FATAL: IO failure joining prior mining thread"); + .map_err(|_| NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted))??; globals.unblock_miner(); + Ok(()) } - pub fn run_miner(mut self, prior_miner: Option>) { + pub fn run_miner( + mut self, + prior_miner: Option>>, + ) -> Result<(), NakamotoNodeError> { // when starting a new tenure, block the mining thread if its currently running. // the new mining thread will join it (so that the new mining thread stalls, not the relayer) debug!( @@ -150,10 +157,10 @@ impl BlockMinerThread { "thread_id" => ?thread::current().id(), ); if let Some(prior_miner) = prior_miner { - Self::stop_miner(&self.globals, prior_miner); + Self::stop_miner(&self.globals, prior_miner)?; } let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) - .expect("FATAL: failed to connect to stacker DB"); + .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::NetError(e)))?; let mut attempts = 0; // now, actually run this tenure @@ -176,7 +183,9 @@ impl BlockMinerThread { } Err(e) => { warn!("Failed to mine block: {e:?}"); - return; + return Err(NakamotoNodeError::MiningFailure( + ChainstateError::MinerAborted, + )); } } }; @@ -193,7 +202,9 @@ impl BlockMinerThread { error!( "Unrecoverable error while gathering signatures: {e:?}. Ending tenure." ); - return; + return Err(NakamotoNodeError::MiningFailure( + ChainstateError::MinerAborted, + )); } }; @@ -234,7 +245,7 @@ impl BlockMinerThread { while wait_start.elapsed() < self.config.miner.wait_on_interim_blocks { thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); if self.check_burn_tip_changed(&sort_db).is_err() { - return; + return Err(NakamotoNodeError::BurnchainTipChanged); } } } diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index fc4ca1ae0d..d0ff26acdb 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -154,7 +154,7 @@ pub struct RelayerThread { relayer: Relayer, /// handle to the subordinate miner thread - miner_thread: Option>, + miner_thread: Option>>, /// The relayer thread reads directives from the relay_rcv, but it also periodically wakes up /// to check if it should issue a block commit or try to register a VRF key next_initiative: Instant, From 0e592954101807134ca4adaf447a8f5664ddb072 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 3 Jun 2024 12:34:04 -0700 Subject: [PATCH 138/148] fix: cargo check --tests errors after merge --- stackslib/src/net/tests/download/nakamoto.rs | 22 ++++++++++++++----- .../src/tests/nakamoto_integrations.rs | 4 ++-- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 6e1e4c1bcb..c084527336 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -915,10 +915,22 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // Does not consume blocks beyond the highest processed block ID { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), None); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); @@ -945,7 +957,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 4ecddf6e78..7c57e8c14c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3938,7 +3938,7 @@ fn nakamoto_attempt_time() { return; } - let signers = TestSigners::default(); + let mut signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let password = "12345".to_string(); naka_conf.connection_options.block_proposal_token = Some(password.clone()); @@ -4019,7 +4019,7 @@ fn nakamoto_attempt_time() { &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); From 737cf14b336c7fcbf846b7da6387728abde95ea2 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 3 Jun 2024 16:00:47 -0500 Subject: [PATCH 139/148] cleanup --- stackslib/src/chainstate/nakamoto/mod.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 4929a38c32..1f29cda7cf 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1470,10 +1470,8 @@ impl NakamotoChainState { // to access `stacks_chain_state` again. In the `Ok(..)` case, it's instead sufficient so // simply commit the block before beginning the second transaction to mark it processed. - // set the sortition tx's tip to the burnchain view -- we must unset this after appending the block, - // so we wrap this call in a closure to make sure that the unsetting is infallible let mut burn_view_handle = sort_db.index_handle(&burnchain_view_sn.sortition_id); - let (ok_opt, err_opt) = (|clarity_instance| match NakamotoChainState::append_block( + let (ok_opt, err_opt) = match NakamotoChainState::append_block( &mut chainstate_tx, clarity_instance, &mut burn_view_handle, @@ -1493,7 +1491,7 @@ impl NakamotoChainState { ) { Ok(next_chain_tip_info) => (Some(next_chain_tip_info), None), Err(e) => (None, Some(e)), - })(clarity_instance); + }; if let Some(e) = err_opt { // force rollback From e22a62dd2707a1b9b00cbf8912baf2e58f74c2ef Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 3 Jun 2024 17:43:35 -0400 Subject: [PATCH 140/148] fix: method docs --- stackslib/src/chainstate/burn/db/sortdb.rs | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 3ee746971f..d4304a0e2f 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3457,8 +3457,10 @@ impl SortitionDB { Ok(()) } - /// Wrapper around SortitionDBConn::get_prepare_phase_end_sortition_id_for_reward_cycle(). - /// See that method for details. + /// Get the prepare phase end sortition ID of a reward cycle. This is the last prepare + /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned + /// sortition will be in the preceding reward cycle) + /// Wrapper around SortitionDBConn::get_prepare_phase_end_sortition_id_for_reward_ccyle() pub fn get_prepare_phase_end_sortition_id_for_reward_cycle( &self, tip: &SortitionId, @@ -3473,8 +3475,10 @@ impl SortitionDB { ) } + /// Get the prepare phase start sortition ID of a reward cycle. This is the first prepare + /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned + /// sortition will be in the preceding reward cycle) /// Wrapper around SortitionDBConn::get_prepare_phase_start_sortition_id_for_reward_cycle(). - /// See that method for details. pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( &self, tip: &SortitionId, @@ -3489,8 +3493,11 @@ impl SortitionDB { ) } + /// Figure out the reward cycle for `tip` and lookup the preprocessed + /// reward set (if it exists) for the active reward cycle during `tip`. + /// Returns the reward cycle info on success. + /// Returns Error on DB errors, as well as if the reward set is not yet processed. /// Wrapper around SortitionDBConn::get_preprocessed_reward_set_for_reward_cycle(). - /// See that method for details. pub fn get_preprocessed_reward_set_for_reward_cycle( &self, tip: &SortitionId, @@ -3505,8 +3512,11 @@ impl SortitionDB { ) } + /// Figure out the reward cycle for `tip` and lookup the preprocessed + /// reward set (if it exists) for the active reward cycle during `tip`. + /// Returns the reward cycle info on success. + /// Returns Error on DB errors, as well as if the reward set is not yet processed. /// Wrapper around SortitionDBConn::get_preprocessed_reward_set_of(). - /// See that method for details. pub fn get_preprocessed_reward_set_of( &self, tip: &SortitionId, From ded87549fb5d68b14fb301938697e8c479ecd68b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 4 Jun 2024 08:29:57 -0500 Subject: [PATCH 141/148] address PR reviews --- stacks-signer/src/client/stacks_client.rs | 18 +++++++++--------- stackslib/src/net/api/get_tenures_fork_info.rs | 3 ++- stackslib/src/net/api/getsortition.rs | 5 ++--- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 8a7ade028c..f24679ff69 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -1,3 +1,4 @@ +use std::collections::VecDeque; // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2024 Stacks Open Internet Foundation // @@ -370,26 +371,25 @@ impl StacksClient { chosen_parent: &ConsensusHash, last_sortition: &ConsensusHash, ) -> Result, ClientError> { - let mut tenures = self.get_tenure_forking_info_step(chosen_parent, last_sortition)?; + let mut tenures: VecDeque = + self.get_tenure_forking_info_step(chosen_parent, last_sortition)?; if tenures.is_empty() { - return Ok(tenures); + return Ok(vec![]); } - while tenures.last().map(|x| &x.consensus_hash) != Some(chosen_parent) { - let new_start = tenures.last().ok_or_else(|| { + while tenures.back().map(|x| &x.consensus_hash) != Some(chosen_parent) { + let new_start = tenures.back().ok_or_else(|| { ClientError::InvalidResponse( "Should have tenure data in forking info response".into(), ) })?; let mut next_results = self.get_tenure_forking_info_step(chosen_parent, &new_start.consensus_hash)?; - if next_results.is_empty() { + if next_results.pop_front().is_none() { return Err(ClientError::InvalidResponse( "Could not fetch forking info all the way back to the requested chosen_parent" .into(), )); } - // SAFETY check: next_results isn't empty, because of the above check. otherwise, remove(0) could panic. - next_results.remove(0); if next_results.is_empty() { return Err(ClientError::InvalidResponse( "Could not fetch forking info all the way back to the requested chosen_parent" @@ -399,14 +399,14 @@ impl StacksClient { tenures.extend(next_results.into_iter()); } - Ok(tenures) + Ok(tenures.into_iter().collect()) } fn get_tenure_forking_info_step( &self, chosen_parent: &ConsensusHash, last_sortition: &ConsensusHash, - ) -> Result, ClientError> { + ) -> Result, ClientError> { let send_request = || { self.stacks_node_client .get(self.tenure_forking_info_path(chosen_parent, last_sortition)) diff --git a/stackslib/src/net/api/get_tenures_fork_info.rs b/stackslib/src/net/api/get_tenures_fork_info.rs index 4abc8ab6e1..778e4cbf68 100644 --- a/stackslib/src/net/api/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/get_tenures_fork_info.rs @@ -233,7 +233,8 @@ impl RPCRequestHandler for GetTenuresForkInfo { if height_bound >= cursor.block_height { return Err(ChainError::NotInSameFork); } - cursor = handle.get_last_snapshot_with_sortition(cursor.block_height - 1)?; + cursor = handle + .get_last_snapshot_with_sortition(cursor.block_height.saturating_sub(1))?; results.push(TenureForkingInfo::from_snapshot( &cursor, sortdb, chainstate, )?); diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 1e2551eb83..73789c3f6a 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -53,8 +53,8 @@ pub enum QuerySpecifier { Latest, } -pub static RPC_SORTITION_INFO_PATH: &str = "/v3/sortition"; -static PATH_REGEX: &str = "^/v3/sortition(/(?P[a-z_]{1,15})/(?P[0-9a-f]{1,64}))?$"; +pub static RPC_SORTITION_INFO_PATH: &str = "/v3/sortitions"; +static PATH_REGEX: &str = "^/v3/sortitions(/(?P[a-z_]{1,15})/(?P[0-9a-f]{1,64}))?$"; /// Struct for sortition information returned via the GetSortition API call #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] @@ -167,7 +167,6 @@ impl HttpRequest for GetSortitionHandler { let req_contents = HttpRequestContents::new().query_string(query); self.query = QuerySpecifier::Latest; - eprintln!("{captures:?}"); if let (Some(key), Some(value)) = (captures.name("key"), captures.name("value")) { self.query = QuerySpecifier::try_from((key.as_str(), value.as_str()))?; } From 3b15824577a1fad29005c928c0e31262ab0dc091 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 4 Jun 2024 08:37:36 -0500 Subject: [PATCH 142/148] chore: cargo fmt --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f596340c55..98cdca63d7 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -50,8 +50,7 @@ use stacks::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, Transa use stacks::chainstate::stacks::{ SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction, TenureChangePayload, TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, - TransactionPublicKeyEncoding, TransactionSpendingCondition, TransactionVersion, - MAX_BLOCK_LEN + TransactionPublicKeyEncoding, TransactionSpendingCondition, TransactionVersion, MAX_BLOCK_LEN, }; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ @@ -84,8 +83,8 @@ use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; -use stacks_signer::signerdb::{BlockInfo, SignerDb}; use stacks_signer::chainstate::SortitionsView; +use stacks_signer::signerdb::{BlockInfo, SignerDb}; use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; From 7b1ad1e1a11225b72b6f1b88289b12907e719a42 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 4 Jun 2024 11:17:25 -0500 Subject: [PATCH 143/148] fix issues from merge, fix an upstream bug in miner thread hand-offs --- stacks-signer/src/client/stacks_client.rs | 3 +-- stackslib/src/net/api/getsortition.rs | 2 +- stackslib/src/net/stackerdb/config.rs | 2 +- testnet/stacks-node/src/nakamoto_node/miner.rs | 12 ++++++++++-- .../stacks-node/src/tests/nakamoto_integrations.rs | 14 +++++++++++++- 5 files changed, 26 insertions(+), 7 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index f24679ff69..17a5916f6f 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -445,8 +445,7 @@ impl StacksClient { pub fn get_sortition(&self, ch: &ConsensusHash) -> Result { let send_request = || { self.stacks_node_client - .get(self.sortition_info_path()) - .query(&[("consensus", ch.to_hex().as_str())]) + .get(format!("{}/consensus/{}", self.sortition_info_path(), ch.to_hex())) .send() .map_err(|e| { warn!("Signer failed to request sortition"; "consensus_hash" => %ch, "err" => ?e); diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 73789c3f6a..5df67e3636 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -243,7 +243,7 @@ impl RPCRequestHandler for GetSortitionHandler { stacks_parent_sn.consensus_hash.clone() } else { // we actually need to perform the marf lookup - let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; + let last_sortition = handle.get_last_snapshot_with_sortition(stacks_parent_sn.block_height)?; last_sortition.consensus_hash }; diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index 5545aa46cd..8e88086f0a 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -385,7 +385,7 @@ impl StackerDBConfig { } if max_neighbors > u128::from(local_max_neighbors) { - warn!( + debug!( "Contract {} stipulates a maximum number of neighbors ({}) beyond locally-configured maximum {}; defaulting to locally-configured maximum", contract_id, max_neighbors, diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index b8adc92643..5be483a9af 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -137,9 +137,16 @@ impl BlockMinerThread { prior_miner: JoinHandle>, ) -> Result<(), NakamotoNodeError> { globals.block_miner(); - prior_miner + let prior_miner_result = prior_miner .join() - .map_err(|_| NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted))??; + .map_err(|_| NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted))?; + if let Err(e) = prior_miner_result { + // it's okay if the prior miner thread exited with an error. + // in many cases this is expected (i.e., a burnchain block occurred) + // if some error condition should be handled though, this is the place + // to do that handling. + debug!("Prior mining thread exited with: {e:?}"); + } globals.unblock_miner(); Ok(()) } @@ -155,6 +162,7 @@ impl BlockMinerThread { "had_prior_miner" => prior_miner.is_some(), "parent_tenure_id" => %self.parent_tenure_id, "thread_id" => ?thread::current().id(), + "burn_block_consensus_hash" => %self.burn_block.consensus_hash, ); if let Some(prior_miner) = prior_miner { Self::stop_miner(&self.globals, prior_miner)?; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 98cdca63d7..8080721933 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4402,7 +4402,19 @@ fn signer_chainstate() { } } - let proposal = get_latest_block_proposal(&naka_conf, &sortdb).unwrap(); + // make sure we're getting a proposal from the current sortition (not 100% guaranteed by + // `next_block_and_mine_commit`) by looping + let time_start = Instant::now(); + let proposal = loop { + let proposal = get_latest_block_proposal(&naka_conf, &sortdb).unwrap(); + if proposal.0.header.consensus_hash == sortitions_view.latest_consensus_hash { + break proposal; + } + if time_start.elapsed() > Duration::from_secs(20) { + panic!("Timed out waiting for block proposal from the current bitcoin block"); + } + thread::sleep(Duration::from_secs(1)); + }; let valid = sortitions_view .check_proposal(&signer_client, &signer_db, &proposal.0, &proposal.1) From 7d0557805fe3b6733675eaed706aa474990de874 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 4 Jun 2024 11:52:18 -0500 Subject: [PATCH 144/148] fix test build failures --- .../src/chainstate/nakamoto/coordinator/tests.rs | 15 ++++++++------- stackslib/src/chainstate/nakamoto/tests/mod.rs | 4 ++++ stackslib/src/core/tests/mod.rs | 1 + stackslib/src/cost_estimates/tests/common.rs | 1 + 4 files changed, 14 insertions(+), 7 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 0f3abe5c29..648584991d 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -635,18 +635,19 @@ fn test_nakamoto_chainstate_getters() { // scope this to drop the chainstate ref and db tx let chainstate = &peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); - let mut sort_tx = sort_db.tx_handle_begin(&sort_tip.sortition_id).unwrap(); + let sort_handle = sort_db.index_handle(&sort_tip.sortition_id); // no tenures yet - assert!( - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_tx.sqlite()) - .unwrap() - .is_none() - ); + assert!(NakamotoChainState::get_highest_nakamoto_tenure( + chainstate.db(), + sort_handle.sqlite() + ) + .unwrap() + .is_none()); // sortition-existence-check works assert_eq!( - NakamotoChainState::check_sortition_exists(&mut sort_tx, &sort_tip.consensus_hash) + NakamotoChainState::check_sortition_exists(&sort_handle, &sort_tip.consensus_hash) .unwrap(), sort_tip ); diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 4a1b0ad714..959377b7c3 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -678,6 +678,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_height: 100, burn_header_timestamp: 1000, anchored_block_size: 12345, + burn_view: None, }; let epoch2_execution_cost = ExecutionCost { @@ -778,6 +779,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_height: 200, burn_header_timestamp: 1001, anchored_block_size: 123, + burn_view: Some(nakamoto_header.consensus_hash), }; let epoch2_block = StacksBlock { @@ -822,6 +824,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_height: 200, burn_header_timestamp: 1001, anchored_block_size: 123, + burn_view: Some(nakamoto_header_2.consensus_hash), }; let nakamoto_block_2 = NakamotoBlock { @@ -861,6 +864,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_height: 200, burn_header_timestamp: 1001, anchored_block_size: 123, + burn_view: Some(nakamoto_header_3.consensus_hash), }; let nakamoto_block_3 = NakamotoBlock { diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 905f788dc2..6a3b700186 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -128,6 +128,7 @@ pub fn make_block( burn_header_height: burn_height as u32, burn_header_timestamp: 0, anchored_block_size: 1, + burn_view: None, }; c_tx.commit_block(); diff --git a/stackslib/src/cost_estimates/tests/common.rs b/stackslib/src/cost_estimates/tests/common.rs index 6fd21b0676..fe6527ff53 100644 --- a/stackslib/src/cost_estimates/tests/common.rs +++ b/stackslib/src/cost_estimates/tests/common.rs @@ -39,6 +39,7 @@ pub fn make_block_receipt(tx_receipts: Vec) -> StacksE burn_header_height: 2, burn_header_timestamp: 2, anchored_block_size: 1, + burn_view: None, }, tx_receipts, matured_rewards: vec![], From 7c8df315cb1d73560f07429f7a93934da7aa37a6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 4 Jun 2024 13:09:05 -0500 Subject: [PATCH 145/148] fix: miner should allow prior miner thread to error --- testnet/stacks-node/src/nakamoto_node/miner.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index b324ec561d..0d04d12537 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -137,9 +137,16 @@ impl BlockMinerThread { prior_miner: JoinHandle>, ) -> Result<(), NakamotoNodeError> { globals.block_miner(); - prior_miner + let prior_miner_result = prior_miner .join() - .map_err(|_| NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted))??; + .map_err(|_| NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted))?; + if let Err(e) = prior_miner_result { + // it's okay if the prior miner thread exited with an error. + // in many cases this is expected (i.e., a burnchain block occurred) + // if some error condition should be handled though, this is the place + // to do that handling. + debug!("Prior mining thread exited with: {e:?}"); + } globals.unblock_miner(); Ok(()) } From ba0e654391f69ed3dc20de31a081e9a553053ff3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 4 Jun 2024 15:12:11 -0400 Subject: [PATCH 146/148] chore: fix build issue --- stackslib/src/net/tests/download/nakamoto.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index c0595301d4..719e901076 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -930,6 +930,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -938,6 +939,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); From fd0c97913cfb64516c62b54bdf720ee036bd59a5 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 4 Jun 2024 15:00:34 -0500 Subject: [PATCH 147/148] fix tests --- stackslib/src/net/api/tests/get_tenures_fork_info.rs | 2 +- stackslib/src/net/api/tests/getsortition.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/tests/get_tenures_fork_info.rs b/stackslib/src/net/api/tests/get_tenures_fork_info.rs index 88e3d875ff..2b5abcfb36 100644 --- a/stackslib/src/net/api/tests/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/tests/get_tenures_fork_info.rs @@ -31,7 +31,7 @@ fn make_preamble(start: &T, stop: &R) -> HttpRequestPrea HttpRequestPreamble { version: HttpVersion::Http11, verb: "GET".into(), - path_and_query_str: format!("/v3/tenures_fork_info/{start}/{stop}"), + path_and_query_str: format!("/v3/tenures/fork_info/{start}/{stop}"), host: PeerHost::DNS("localhost".into(), 0), content_type: None, content_length: Some(0), diff --git a/stackslib/src/net/api/tests/getsortition.rs b/stackslib/src/net/api/tests/getsortition.rs index d48bc54a3a..8541b73eb6 100644 --- a/stackslib/src/net/api/tests/getsortition.rs +++ b/stackslib/src/net/api/tests/getsortition.rs @@ -29,7 +29,7 @@ fn make_preamble(query: &str) -> HttpRequestPreamble { HttpRequestPreamble { version: HttpVersion::Http11, verb: "GET".into(), - path_and_query_str: format!("/v3/sortition{query}"), + path_and_query_str: format!("/v3/sortitions{query}"), host: PeerHost::DNS("localhost".into(), 0), content_type: None, content_length: Some(0), From ade4db7820b44c25fe4de5d2bfa4efe5611a9b8b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 5 Jun 2024 09:42:46 -0500 Subject: [PATCH 148/148] test: speedup the nakamoto_attempt_time integration test --- .../src/tests/nakamoto_integrations.rs | 112 +++++++++--------- 1 file changed, 59 insertions(+), 53 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 376ef1409b..9c98d4ab33 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4095,8 +4095,8 @@ fn nakamoto_attempt_time() { // ----- Setup boilerplate finished, test block proposal API endpoint ----- let mut sender_nonce = 0; - let tenure_count = 3; - let inter_blocks_per_tenure = 10; + let tenure_count = 2; + let inter_blocks_per_tenure = 3; // Subtest 1 // Mine nakamoto tenures with a few transactions @@ -4127,16 +4127,24 @@ fn nakamoto_attempt_time() { submit_tx(&http_origin, &transfer_tx); } - // Sleep a bit longer than what our max block time should be - thread::sleep(Duration::from_millis(nakamoto_attempt_time_ms + 100)); - // Miner should have made a new block by now - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - - assert!(blocks_processed > blocks_processed_before); + let wait_start = Instant::now(); + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + // wait a little longer than what the max block time should be + if wait_start.elapsed() > Duration::from_millis(nakamoto_attempt_time_ms + 100) { + panic!( + "A block should have been produced within {nakamoto_attempt_time_ms} ms" + ); + } + thread::sleep(Duration::from_secs(1)); + } let info = get_chain_info_result(&naka_conf).unwrap(); assert_ne!(info.stacks_tip, last_tip); @@ -4184,57 +4192,55 @@ fn nakamoto_attempt_time() { // Subtest 3 // Add more than `nakamoto_attempt_time_ms` worth of transactions into mempool // Multiple blocks should be mined - for _ in 0..tenure_count { - let info_before = get_chain_info_result(&naka_conf).unwrap(); + let info_before = get_chain_info_result(&naka_conf).unwrap(); - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); - let tx_limit = 10000; - let tx_fee = 500; - let amount = 500; - let mut tx_total_size = 0; - let mut tx_count = 0; - let mut acct_idx = 0; - - // Submit max # of txs from each account to reach tx_limit - 'submit_txs: loop { - let acct = &mut account[acct_idx]; - for _ in 0..MAXIMUM_MEMPOOL_TX_CHAINING { - let transfer_tx = - make_stacks_transfer(&acct.privk, acct.nonce, tx_fee, &recipient, amount); - submit_tx(&http_origin, &transfer_tx); - tx_total_size += transfer_tx.len(); - tx_count += 1; - acct.nonce += 1; - if tx_count >= tx_limit { - break 'submit_txs; - } + let tx_limit = 10000; + let tx_fee = 500; + let amount = 500; + let mut tx_total_size = 0; + let mut tx_count = 0; + let mut acct_idx = 0; + + // Submit max # of txs from each account to reach tx_limit + 'submit_txs: loop { + let acct = &mut account[acct_idx]; + for _ in 0..MAXIMUM_MEMPOOL_TX_CHAINING { + let transfer_tx = + make_stacks_transfer(&acct.privk, acct.nonce, tx_fee, &recipient, amount); + submit_tx(&http_origin, &transfer_tx); + tx_total_size += transfer_tx.len(); + tx_count += 1; + acct.nonce += 1; + if tx_count >= tx_limit { + break 'submit_txs; } - acct_idx += 1; } + acct_idx += 1; + } - // Make sure that these transactions *could* fit into a single block - assert!(tx_total_size < MAX_BLOCK_LEN as usize); + // Make sure that these transactions *could* fit into a single block + assert!(tx_total_size < MAX_BLOCK_LEN as usize); - // Wait long enough for 2 blocks to be made - thread::sleep(Duration::from_millis(nakamoto_attempt_time_ms * 2 + 100)); + // Wait long enough for 2 blocks to be made + thread::sleep(Duration::from_millis(nakamoto_attempt_time_ms * 2 + 100)); - // Check that 2 blocks were made - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); + // Check that 2 blocks were made + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); - let blocks_mined = blocks_processed - blocks_processed_before; - assert!(blocks_mined > 2); + let blocks_mined = blocks_processed - blocks_processed_before; + assert!(blocks_mined > 2); - let info = get_chain_info_result(&naka_conf).unwrap(); - assert_ne!(info.stacks_tip, info_before.stacks_tip); - assert_ne!(info.stacks_tip_height, info_before.stacks_tip_height); - } + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_ne!(info.stacks_tip, info_before.stacks_tip); + assert_ne!(info.stacks_tip_height, info_before.stacks_tip_height); // ----- Clean up ----- coord_channel