Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat: add heuristics to miner for nakamoto #5238

Merged
merged 22 commits into from
Oct 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
f394e64
feat: add 2 heuristics to miner for nakamoto
kantai Sep 24, 2024
cdb5394
use * in mempool SELECT
kantai Sep 25, 2024
b995e27
chore: fix signer_set_rollover test
kantai Sep 25, 2024
541d13b
fix: bug in the tenure extending logic -- only include tenure change …
kantai Sep 25, 2024
251dc3c
Merge remote-tracking branch 'origin/develop' into feat/naka-miner-he…
kantai Sep 25, 2024
0fe565c
Merge branch 'develop' into feat/naka-miner-heuristics
obycode Sep 25, 2024
6f60813
use better sqlite column affinity
kantai Sep 25, 2024
677accd
more test fixes
kantai Sep 26, 2024
419ce43
more test fixes
kantai Sep 26, 2024
6a3746c
fix test assertion
kantai Sep 26, 2024
91d6473
Merge branch 'develop' into feat/naka-miner-heuristics
kantai Sep 26, 2024
f271794
Merge branch 'develop' into feat/naka-miner-heuristics
kantai Sep 30, 2024
8b36b88
fix: need interim blocks in the nakamoto integration tests
kantai Sep 30, 2024
91f429a
Merge branch 'develop' into feat/naka-miner-heuristics
kantai Oct 2, 2024
90c2596
test: add assertions for empty block heuristics
kantai Oct 2, 2024
2003a72
Merge branch 'develop' into feat/naka-miner-heuristics
kantai Oct 2, 2024
994ef3b
test: add long tx test
kantai Oct 3, 2024
e5ecf72
Merge remote-tracking branch 'origin/develop' into feat/naka-miner-he…
kantai Oct 3, 2024
83c6924
Merge branch 'develop' into feat/naka-miner-heuristics
kantai Oct 4, 2024
2c6aad8
Merge branch 'develop' into feat/naka-miner-heuristics
kantai Oct 4, 2024
331dc94
feat: wait to build block if min gap won't be met
kantai Oct 4, 2024
86f0c1f
Merge branch 'develop' into feat/naka-miner-heuristics
kantai Oct 4, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 14 additions & 11 deletions .github/workflows/bitcoin-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ jobs:
- tests::bitcoin_regtest::bitcoind_integration_test
- tests::integrations::integration_test_get_info
- tests::neon_integrations::antientropy_integration_test
- tests::neon_integrations::bad_microblock_pubkey
- tests::neon_integrations::bitcoind_forking_test
- tests::neon_integrations::bitcoind_integration_test
- tests::neon_integrations::block_large_tx_integration_test
Expand All @@ -43,21 +42,26 @@ jobs:
- tests::neon_integrations::fuzzed_median_fee_rate_estimation_test_window10
- tests::neon_integrations::fuzzed_median_fee_rate_estimation_test_window5
- tests::neon_integrations::liquid_ustx_integration
- tests::neon_integrations::microblock_fork_poison_integration_test
- tests::neon_integrations::microblock_integration_test
# Microblock tests that are no longer needed on every CI run
# (microblocks are unsupported starting in Epoch 2.5)
# - tests::neon_integrations::bad_microblock_pubkey
# - tests::neon_integrations::microblock_fork_poison_integration_test
# - tests::neon_integrations::microblock_integration_test
# - tests::neon_integrations::microblock_limit_hit_integration_test
# - tests::neon_integrations::test_problematic_microblocks_are_not_mined
# - tests::neon_integrations::test_problematic_microblocks_are_not_relayed_or_stored
# - tests::neon_integrations::size_overflow_unconfirmed_invalid_stream_microblocks_integration_test
# - tests::neon_integrations::size_overflow_unconfirmed_microblocks_integration_test
# - tests::neon_integrations::size_overflow_unconfirmed_stream_microblocks_integration_test
# - tests::neon_integrations::runtime_overflow_unconfirmed_microblocks_integration_test
# Disable this flaky test. Microblocks are no longer supported anyways.
# - tests::neon_integrations::microblock_large_tx_integration_test_FLAKY
- tests::neon_integrations::microblock_limit_hit_integration_test
- tests::neon_integrations::miner_submit_twice
- tests::neon_integrations::mining_events_integration_test
- tests::neon_integrations::pox_integration_test
- tests::neon_integrations::push_boot_receipts
- tests::neon_integrations::runtime_overflow_unconfirmed_microblocks_integration_test
- tests::neon_integrations::should_fix_2771
- tests::neon_integrations::size_check_integration_test
- tests::neon_integrations::size_overflow_unconfirmed_invalid_stream_microblocks_integration_test
- tests::neon_integrations::size_overflow_unconfirmed_microblocks_integration_test
- tests::neon_integrations::size_overflow_unconfirmed_stream_microblocks_integration_test
- tests::neon_integrations::stx_delegate_btc_integration_test
- tests::neon_integrations::stx_transfer_btc_integration_test
- tests::neon_integrations::stack_stx_burn_op_test
Expand All @@ -66,8 +70,6 @@ jobs:
- tests::neon_integrations::test_flash_block_skip_tenure
- tests::neon_integrations::test_problematic_blocks_are_not_mined
- tests::neon_integrations::test_problematic_blocks_are_not_relayed_or_stored
- tests::neon_integrations::test_problematic_microblocks_are_not_mined
- tests::neon_integrations::test_problematic_microblocks_are_not_relayed_or_stored
- tests::neon_integrations::test_problematic_txs_are_not_stored
- tests::neon_integrations::use_latest_tip_integration_test
- tests::neon_integrations::confirm_unparsed_ongoing_ops
Expand All @@ -81,7 +83,7 @@ jobs:
- tests::epoch_25::microblocks_disabled
- tests::should_succeed_handling_malformed_and_valid_txs
- tests::nakamoto_integrations::simple_neon_integration
- tests::nakamoto_integrations::simple_neon_integration_with_flash_blocks_on_epoch_3
- tests::nakamoto_integrations::flash_blocks_on_epoch_3
- tests::nakamoto_integrations::mine_multiple_per_tenure_integration
- tests::nakamoto_integrations::block_proposal_api_endpoint
- tests::nakamoto_integrations::miner_writes_proposed_block_to_stackerdb
Expand All @@ -90,6 +92,7 @@ jobs:
- tests::nakamoto_integrations::follower_bootup
- tests::nakamoto_integrations::forked_tenure_is_ignored
- tests::nakamoto_integrations::nakamoto_attempt_time
- tests::nakamoto_integrations::skip_mining_long_tx
- tests::signer::v0::block_proposal_rejection
- tests::signer::v0::miner_gather_signatures
- tests::signer::v0::end_of_tenure
Expand Down
52 changes: 51 additions & 1 deletion stackslib/src/chainstate/stacks/miner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ use std::collections::{HashMap, HashSet};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::ThreadId;
use std::time::Instant;
use std::{cmp, fs, mem};

use clarity::vm::analysis::{CheckError, CheckErrors};
Expand Down Expand Up @@ -2211,6 +2212,15 @@ impl StacksBlockBuilder {
);
}

// nakamoto miner tenure start heuristic:
// mine an empty block so you can start your tenure quickly!
if let Some(tx) = initial_txs.first() {
if matches!(&tx.payload, TransactionPayload::TenureChange(_)) {
info!("Nakamoto miner heuristic: during tenure change blocks, produce a fast short block to begin tenure");
return Ok((false, tx_events));
}
}

mempool.reset_nonce_cache()?;
mempool.estimate_tx_rates(100, &block_limit, &stacks_epoch_id)?;

Expand All @@ -2221,6 +2231,7 @@ impl StacksBlockBuilder {

let mut invalidated_txs = vec![];
let mut to_drop_and_blacklist = vec![];
let mut update_timings = vec![];

let deadline = ts_start + u128::from(max_miner_time_ms);
let mut num_txs = 0;
Expand Down Expand Up @@ -2250,10 +2261,27 @@ impl StacksBlockBuilder {
if block_limit_hit == BlockLimitFunction::LIMIT_REACHED {
return Ok(None);
}
if get_epoch_time_ms() >= deadline {
let time_now = get_epoch_time_ms();
if time_now >= deadline {
debug!("Miner mining time exceeded ({} ms)", max_miner_time_ms);
return Ok(None);
}
if let Some(time_estimate) = txinfo.metadata.time_estimate_ms {
if time_now.saturating_add(time_estimate.into()) > deadline {
debug!("Mining tx would cause us to exceed our deadline, skipping";
obycode marked this conversation as resolved.
Show resolved Hide resolved
"txid" => %txinfo.tx.txid(),
"deadline" => deadline,
"now" => time_now,
"estimate" => time_estimate);
return Ok(Some(
TransactionResult::skipped(
&txinfo.tx,
"Transaction would exceed deadline.".into(),
)
.convert_to_event(),
));
}
}

// skip transactions early if we can
if considered.contains(&txinfo.tx.txid()) {
Expand Down Expand Up @@ -2303,6 +2331,7 @@ impl StacksBlockBuilder {
considered.insert(txinfo.tx.txid());
num_considered += 1;

let tx_start = Instant::now();
let tx_result = builder.try_mine_tx_with_len(
epoch_tx,
&txinfo.tx,
Expand All @@ -2314,6 +2343,21 @@ impl StacksBlockBuilder {
let result_event = tx_result.convert_to_event();
match tx_result {
TransactionResult::Success(TransactionSuccess { receipt, .. }) => {
if txinfo.metadata.time_estimate_ms.is_none() {
// use i64 to avoid running into issues when storing in
// rusqlite.
let time_estimate_ms: i64 = tx_start
.elapsed()
.as_millis()
.try_into()
.unwrap_or_else(|_| i64::MAX);
let time_estimate_ms: u64 = time_estimate_ms
.try_into()
// should be unreachable
.unwrap_or_else(|_| 0);
update_timings.push((txinfo.tx.txid(), time_estimate_ms));
}

num_txs += 1;
if update_estimator {
if let Err(e) = estimator.notify_event(
Expand Down Expand Up @@ -2386,6 +2430,12 @@ impl StacksBlockBuilder {
},
);

if !update_timings.is_empty() {
if let Err(e) = mempool.update_tx_time_estimates(&update_timings) {
warn!("Error while updating time estimates for mempool"; "err" => ?e);
}
}

if to_drop_and_blacklist.len() > 0 {
let _ = mempool.drop_and_blacklist_txs(&to_drop_and_blacklist);
}
Expand Down
61 changes: 42 additions & 19 deletions stackslib/src/core/mempool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -460,6 +460,7 @@ pub struct MemPoolTxMetadata {
pub last_known_origin_nonce: Option<u64>,
pub last_known_sponsor_nonce: Option<u64>,
pub accept_time: u64,
pub time_estimate_ms: Option<u64>,
}

impl MemPoolTxMetadata {
Expand Down Expand Up @@ -594,6 +595,7 @@ impl FromRow<MemPoolTxMetadata> for MemPoolTxMetadata {
let sponsor_nonce = u64::from_column(row, "sponsor_nonce")?;
let last_known_sponsor_nonce = u64::from_column(row, "last_known_sponsor_nonce")?;
let last_known_origin_nonce = u64::from_column(row, "last_known_origin_nonce")?;
let time_estimate_ms: Option<u64> = row.get("time_estimate_ms")?;

Ok(MemPoolTxMetadata {
txid,
Expand All @@ -609,6 +611,7 @@ impl FromRow<MemPoolTxMetadata> for MemPoolTxMetadata {
last_known_origin_nonce,
last_known_sponsor_nonce,
accept_time,
time_estimate_ms,
})
}
}
Expand All @@ -624,10 +627,7 @@ impl FromRow<MemPoolTxInfo> for MemPoolTxInfo {
return Err(db_error::ParseError);
}

Ok(MemPoolTxInfo {
tx: tx,
metadata: md,
})
Ok(MemPoolTxInfo { tx, metadata: md })
}
}

Expand Down Expand Up @@ -803,6 +803,16 @@ const MEMPOOL_SCHEMA_6_NONCES: &'static [&'static str] = &[
"#,
];

const MEMPOOL_SCHEMA_7_TIME_ESTIMATES: &'static [&'static str] = &[
r#"
-- ALLOW NULL
ALTER TABLE mempool ADD COLUMN time_estimate_ms INTEGER;
"#,
r#"
INSERT INTO schema_version (version) VALUES (7)
jcnelson marked this conversation as resolved.
Show resolved Hide resolved
"#,
];

const MEMPOOL_INDEXES: &'static [&'static str] = &[
"CREATE INDEX IF NOT EXISTS by_txid ON mempool(txid);",
"CREATE INDEX IF NOT EXISTS by_height ON mempool(height);",
Expand Down Expand Up @@ -1287,6 +1297,9 @@ impl MemPoolDB {
MemPoolDB::instantiate_nonces(tx)?;
}
6 => {
MemPoolDB::instantiate_schema_7(tx)?;
}
7 => {
break;
}
_ => {
Expand Down Expand Up @@ -1363,6 +1376,16 @@ impl MemPoolDB {
Ok(())
}

/// Add the nonce table
#[cfg_attr(test, mutants::skip)]
fn instantiate_schema_7(tx: &DBTx) -> Result<(), db_error> {
for sql_exec in MEMPOOL_SCHEMA_7_TIME_ESTIMATES {
tx.execute_batch(sql_exec)?;
}

Ok(())
}

#[cfg_attr(test, mutants::skip)]
pub fn db_path(chainstate_root_path: &str) -> Result<String, db_error> {
let mut path = PathBuf::from(chainstate_root_path);
Expand Down Expand Up @@ -1992,21 +2015,7 @@ impl MemPoolDB {
nonce: u64,
) -> Result<Option<MemPoolTxMetadata>, db_error> {
let sql = format!(
"SELECT
txid,
origin_address,
origin_nonce,
sponsor_address,
sponsor_nonce,
tx_fee,
length,
consensus_hash,
block_header_hash,
height,
accept_time,
last_known_sponsor_nonce,
last_known_origin_nonce
FROM mempool WHERE {0}_address = ?1 AND {0}_nonce = ?2",
"SELECT * FROM mempool WHERE {0}_address = ?1 AND {0}_nonce = ?2",
if is_origin { "origin" } else { "sponsor" }
);
let args = params![addr.to_string(), u64_to_sql(nonce)?];
Expand Down Expand Up @@ -2650,6 +2659,20 @@ impl MemPoolDB {
Ok(())
}

/// Update the time estimates for the supplied txs in the mempool db
pub fn update_tx_time_estimates(&mut self, txs: &[(Txid, u64)]) -> Result<(), db_error> {
let sql = "UPDATE mempool SET time_estimate_ms = ? WHERE txid = ?";
let mempool_tx = self.tx_begin()?;
for (txid, time_estimate_ms) in txs.iter() {
mempool_tx
.tx
.execute(sql, params![time_estimate_ms, txid])?;
}
mempool_tx.commit()?;

Ok(())
}

/// Drop and blacklist transactions, so we don't re-broadcast them or re-fetch them.
/// Do *NOT* remove them from the bloom filter. This will cause them to continue to be
/// reported as present, which is exactly what we want because we don't want these transactions
Expand Down
2 changes: 1 addition & 1 deletion stackslib/src/core/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1381,7 +1381,7 @@ fn mempool_do_not_replace_tx() {
.unwrap_err();
assert!(match err_resp {
MemPoolRejection::ConflictingNonceInMempool => true,
_ => false,
e => panic!("Failed: {e:?}"),
});

assert!(MemPoolDB::db_has_tx(&mempool_tx, &prior_txid).unwrap());
Expand Down
52 changes: 36 additions & 16 deletions testnet/stacks-node/src/nakamoto_node/miner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ use stacks::chainstate::stacks::{
use stacks::net::p2p::NetworkHandle;
use stacks::net::stackerdb::StackerDBs;
use stacks::net::{NakamotoBlocksData, StacksMessageType};
use stacks::util::get_epoch_time_secs;
use stacks::util::secp256k1::MessageSignature;
use stacks_common::types::chainstate::{StacksAddress, StacksBlockId};
use stacks_common::types::{PrivateKey, StacksEpochId};
Expand Down Expand Up @@ -949,6 +950,29 @@ impl BlockMinerThread {
Some(vrf_proof)
}

fn validate_timestamp_info(
&self,
current_timestamp_secs: u64,
stacks_parent_header: &StacksHeaderInfo,
) -> bool {
let parent_timestamp = match stacks_parent_header.anchored_header.as_stacks_nakamoto() {
Some(naka_header) => naka_header.timestamp,
None => stacks_parent_header.burn_header_timestamp,
};
let time_since_parent_ms = current_timestamp_secs.saturating_sub(parent_timestamp) * 1000;
if time_since_parent_ms < self.config.miner.min_time_between_blocks_ms {
debug!("Parent block mined {time_since_parent_ms} ms ago. Required minimum gap between blocks is {} ms", self.config.miner.min_time_between_blocks_ms;
"current_timestamp" => current_timestamp_secs,
"parent_block_id" => %stacks_parent_header.index_block_hash(),
"parent_block_height" => stacks_parent_header.stacks_block_height,
"parent_block_timestamp" => stacks_parent_header.burn_header_timestamp,
);
false
} else {
true
}
}

/// Check that the provided block is not mined too quickly after the parent block.
/// This is to ensure that the signers do not reject the block due to the block being mined within the same second as the parent block.
fn validate_timestamp(&self, x: &NakamotoBlock) -> Result<bool, NakamotoNodeError> {
Expand All @@ -970,22 +994,7 @@ impl BlockMinerThread {
);
NakamotoNodeError::ParentNotFound
})?;
let current_timestamp = x.header.timestamp;
let parent_timestamp = match stacks_parent_header.anchored_header.as_stacks_nakamoto() {
Some(naka_header) => naka_header.timestamp,
None => stacks_parent_header.burn_header_timestamp,
};
let time_since_parent_ms = current_timestamp.saturating_sub(parent_timestamp) * 1000;
if time_since_parent_ms < self.config.miner.min_time_between_blocks_ms {
debug!("Parent block mined {time_since_parent_ms} ms ago. Required minimum gap between blocks is {} ms", self.config.miner.min_time_between_blocks_ms;
"current_timestamp" => current_timestamp,
"parent_block_id" => %stacks_parent_header.index_block_hash(),
"parent_block_height" => stacks_parent_header.stacks_block_height,
"parent_block_timestamp" => stacks_parent_header.burn_header_timestamp,
);
return Ok(false);
}
Ok(true)
Ok(self.validate_timestamp_info(x.header.timestamp, &stacks_parent_header))
}

// TODO: add tests from mutation testing results #4869
Expand Down Expand Up @@ -1042,6 +1051,17 @@ impl BlockMinerThread {

let signer_bitvec_len = reward_set.rewarded_addresses.len().try_into().ok();

if !self.validate_timestamp_info(
get_epoch_time_secs(),
&parent_block_info.stacks_parent_header,
) {
// treat a too-soon-to-mine block as an interrupt: this will let the caller sleep and then re-evaluate
// all the pre-mining checks (burnchain tip changes, signal interrupts, etc.)
return Err(NakamotoNodeError::MiningFailure(
ChainstateError::MinerAborted,
));
}

// build the block itself
let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block(
&chain_state,
Expand Down
Loading