Skip to content
Merged
1 change: 1 addition & 0 deletions dash-spv/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ rust-version = "1.80"
# Core Dash libraries
dashcore = { path = "../dash", features = ["std", "serde", "core-block-hash-use-x11", "message_verification", "bls", "quorum_validation"] }
dashcore_hashes = { path = "../hashes" }
key-wallet-manager = { path = "../key-wallet-manager" }

# BLS signatures
blsful = "2.5"
Expand Down
18 changes: 16 additions & 2 deletions dash-spv/examples/filter_sync.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@
//! BIP157 filter synchronization example.

use dash_spv::{init_logging, ClientConfig, DashSpvClient, WatchItem};
use dash_spv::network::MultiPeerNetworkManager;
use dash_spv::storage::MemoryStorageManager;
use dash_spv::{init_logging, ClientConfig, DashSpvClient};
use dashcore::{Address, Network};
use key_wallet_manager::spv_wallet_manager::SPVWalletManager;
use std::str::FromStr;
use std::sync::Arc;
use tokio::sync::RwLock;

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
Expand All @@ -19,8 +24,17 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
.watch_address(watch_address.clone().require_network(Network::Dash).unwrap())
.without_masternodes(); // Skip masternode sync for this example

// Create network manager
let network_manager = MultiPeerNetworkManager::new(&config).await?;

// Create storage manager
let storage_manager = MemoryStorageManager::new().await?;

// Create wallet manager
let wallet = Arc::new(RwLock::new(SPVWalletManager::new()));

// Create the client
let mut client = DashSpvClient::new(config).await?;
let mut client = DashSpvClient::new(config, network_manager, storage_manager, wallet).await?;

// Start the client
client.start().await?;
Expand Down
212 changes: 116 additions & 96 deletions dash-spv/examples/reorg_demo.rs
Original file line number Diff line number Diff line change
@@ -1,103 +1,123 @@
// TODO: This example needs to be updated as the reorganize() method was removed
// The reorganization logic is now handled internally by the SPV client
// and wallet state is managed through the WalletInterface

#![allow(dead_code)]

//! Demo showing that chain reorganization now works without borrow conflicts

use dash_spv::chain::{ChainWork, Fork, ReorgManager};
use dash_spv::storage::{MemoryStorageManager, StorageManager};
use dash_spv::types::ChainState;
use dash_spv::wallet::WalletState;
use dashcore::{blockdata::constants::genesis_block, Header as BlockHeader, Network};
use dashcore_hashes::Hash;

fn create_test_header(prev: &BlockHeader, nonce: u32) -> BlockHeader {
let mut header = prev.clone();
header.prev_blockhash = prev.block_hash();
header.nonce = nonce;
header.time = prev.time + 600; // 10 minutes later
header
// Temporarily disable this example
fn main() {
println!("This example is temporarily disabled pending updates to use the new architecture");
}

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("🔧 Chain Reorganization Demo - Testing Borrow Conflict Fix\n");

// Create test components
let network = Network::Dash;
let genesis = genesis_block(network).header;
let mut chain_state = ChainState::new_for_network(network);
let mut wallet_state = WalletState::new(network);
let mut storage = MemoryStorageManager::new().await?;

println!("📦 Building main chain: genesis -> block1 -> block2");

// Build main chain: genesis -> block1 -> block2
let block1 = create_test_header(&genesis, 1);
let block2 = create_test_header(&block1, 2);

// Store main chain
storage.store_headers(&[genesis]).await?;
storage.store_headers(&[block1]).await?;
storage.store_headers(&[block2]).await?;

// Update chain state
chain_state.add_header(genesis);
chain_state.add_header(block1);
chain_state.add_header(block2);

println!("✅ Main chain height: {}", chain_state.get_height());

println!("\n📦 Building fork: genesis -> block1' -> block2' -> block3'");

// Build fork chain: genesis -> block1' -> block2' -> block3'
let block1_fork = create_test_header(&genesis, 100); // Different nonce
let block2_fork = create_test_header(&block1_fork, 101);
let block3_fork = create_test_header(&block2_fork, 102);

// Create fork with more work
let fork = Fork {
fork_point: genesis.block_hash(),
fork_height: 0, // Fork from genesis
tip_hash: block3_fork.block_hash(),
tip_height: 3,
headers: vec![block1_fork, block2_fork, block3_fork],
chain_work: ChainWork::from_bytes([255u8; 32]), // Maximum work
};

println!("✅ Fork chain height: {}", fork.tip_height);
println!("✅ Fork has more work than main chain");

println!("\n🔄 Attempting reorganization...");
println!(" This previously failed with borrow conflict!");

// Create reorg manager
let reorg_manager = ReorgManager::new(100, false);

// This should now work without borrow conflicts!
match reorg_manager.reorganize(&mut chain_state, &mut wallet_state, &fork, &mut storage).await {
Ok(event) => {
println!("\n✅ Reorganization SUCCEEDED!");
println!(
" - Common ancestor: {} at height {}",
event.common_ancestor, event.common_height
);
println!(" - Disconnected {} headers", event.disconnected_headers.len());
println!(" - Connected {} headers", event.connected_headers.len());
println!(" - New chain height: {}", chain_state.get_height());

// Verify new headers were stored
let header_at_3 = storage.get_header(3).await?;
if header_at_3.is_some() {
println!("\n✅ New chain tip verified in storage!");
}
#[cfg(skip_example)]
mod disabled_example {
use dash_spv::chain::{ChainWork, Fork, ReorgManager};
use dash_spv::storage::{MemoryStorageManager, StorageManager};
use dash_spv::types::ChainState;
use dashcore::{blockdata::constants::genesis_block, Header as BlockHeader, Network};
use dashcore_hashes::Hash;
use key_wallet_manager::spv_wallet_manager::SPVWalletManager;
use std::sync::Arc;
use tokio::sync::RwLock;

fn create_test_header(prev: &BlockHeader, nonce: u32) -> BlockHeader {
let mut header = prev.clone();
header.prev_blockhash = prev.block_hash();
header.nonce = nonce;
header.time = prev.time + 600; // 10 minutes later
header
}

println!("\n🎉 Borrow conflict has been resolved!");
println!(" The reorganization now uses a phased approach:");
println!(" 1. Read phase: Collect all necessary data");
println!(" 2. Write phase: Apply changes using only StorageManager");
}
Err(e) => {
println!("\n❌ Reorganization failed: {}", e);
println!(" This suggests the borrow conflict still exists.");
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("🔧 Chain Reorganization Demo - Testing Borrow Conflict Fix\n");

// Create test components
let network = Network::Dash;
let genesis = genesis_block(network).header;
let mut chain_state = ChainState::new_for_network(network);
let wallet_manager = Arc::new(RwLock::new(SPVWalletManager::new()));
let mut storage = MemoryStorageManager::new().await?;

println!("📦 Building main chain: genesis -> block1 -> block2");

// Build main chain: genesis -> block1 -> block2
let block1 = create_test_header(&genesis, 1);
let block2 = create_test_header(&block1, 2);

// Store main chain
storage.store_headers(&[genesis]).await?;
storage.store_headers(&[block1]).await?;
storage.store_headers(&[block2]).await?;

// Update chain state
chain_state.add_header(genesis);
chain_state.add_header(block1);
chain_state.add_header(block2);

println!("✅ Main chain height: {}", chain_state.get_height());

println!("\n📦 Building fork: genesis -> block1' -> block2' -> block3'");

// Build fork chain: genesis -> block1' -> block2' -> block3'
let block1_fork = create_test_header(&genesis, 100); // Different nonce
let block2_fork = create_test_header(&block1_fork, 101);
let block3_fork = create_test_header(&block2_fork, 102);

// Create fork with more work
let fork = Fork {
fork_point: genesis.block_hash(),
fork_height: 0, // Fork from genesis
tip_hash: block3_fork.block_hash(),
tip_height: 3,
headers: vec![block1_fork, block2_fork, block3_fork],
chain_work: ChainWork::from_bytes([255u8; 32]), // Maximum work
};

println!("✅ Fork chain height: {}", fork.tip_height);
println!("✅ Fork has more work than main chain");

println!("\n🔄 Attempting reorganization...");
println!(" This previously failed with borrow conflict!");

// Create reorg manager
let reorg_manager = ReorgManager::new(100, false);

// This should now work without borrow conflicts!
// Note: reorganize now takes wallet as an Arc<RwLock<W>> where W: WalletInterface
match reorg_manager
.reorganize(&mut chain_state, wallet_manager.clone(), &fork, &mut storage)
.await
{
Ok(event) => {
println!("\n✅ Reorganization SUCCEEDED!");
println!(
" - Common ancestor: {} at height {}",
event.common_ancestor, event.common_height
);
println!(" - Disconnected {} headers", event.disconnected_headers.len());
println!(" - Connected {} headers", event.connected_headers.len());
println!(" - New chain height: {}", chain_state.get_height());

// Verify new headers were stored
let header_at_3 = storage.get_header(3).await?;
if header_at_3.is_some() {
println!("\n✅ New chain tip verified in storage!");
}

println!("\n🎉 Borrow conflict has been resolved!");
println!(" The reorganization now uses a phased approach:");
println!(" 1. Read phase: Collect all necessary data");
println!(" 2. Write phase: Apply changes using only StorageManager");
}
Err(e) => {
println!("\n❌ Reorganization failed: {}", e);
println!(" This suggests the borrow conflict still exists.");
}
}
}

Ok(())
}
Ok(())
}
} // end of disabled_example module
16 changes: 15 additions & 1 deletion dash-spv/examples/simple_sync.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
//! Simple header synchronization example.

use dash_spv::network::MultiPeerNetworkManager;
use dash_spv::storage::MemoryStorageManager;
use dash_spv::{init_logging, ClientConfig, DashSpvClient};
use key_wallet_manager::spv_wallet_manager::SPVWalletManager;
use std::sync::Arc;
use tokio::sync::RwLock;

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
Expand All @@ -12,8 +17,17 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
.without_filters() // Skip filter sync for this example
.without_masternodes(); // Skip masternode sync for this example

// Create network manager
let network_manager = MultiPeerNetworkManager::new(&config).await?;

// Create storage manager
let storage_manager = MemoryStorageManager::new().await?;

// Create wallet manager
let wallet = Arc::new(RwLock::new(SPVWalletManager::new()));

// Create the client
let mut client = DashSpvClient::new(config).await?;
let mut client = DashSpvClient::new(config, network_manager, storage_manager, wallet).await?;

// Start the client
client.start().await?;
Expand Down
22 changes: 2 additions & 20 deletions dash-spv/src/bloom/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

use super::utils::{extract_pubkey_hash, outpoint_to_bytes};
use crate::error::SpvError;
use crate::wallet::Wallet;
use dashcore::address::Address;
use dashcore::bloom::{BloomFilter, BloomFlags};
use dashcore::OutPoint;
Expand Down Expand Up @@ -93,25 +92,8 @@ impl BloomFilterBuilder {
self
}

/// Build a bloom filter from wallet state
pub async fn from_wallet(wallet: &Wallet) -> Result<Self, SpvError> {
let mut builder = Self::new();

// Add all wallet addresses
let addresses = wallet.get_all_addresses().await?;
builder = builder.add_addresses(addresses);

// Add unspent outputs
let utxos = wallet.get_unspent_outputs().await?;
let outpoints = utxos.into_iter().map(|utxo| utxo.outpoint);
builder = builder.add_outpoints(outpoints);

// Set reasonable parameters based on wallet size
let total_elements = builder.addresses.len() + builder.outpoints.len();
builder = builder.elements(std::cmp::max(100, total_elements as u32 * 2));

Ok(builder)
}
// Removed: from_wallet - wallet functionality is now handled externally
// The wallet interface doesn't expose addresses and UTXOs directly

/// Build the bloom filter
pub fn build(self) -> Result<BloomFilter, SpvError> {
Expand Down
Loading
Loading