Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions dash-spv-ffi/src/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,6 @@ impl From<DetailedSyncProgress> for FFIDetailedSyncProgress {
#[repr(C)]
pub struct FFIChainState {
pub header_height: u32,
pub filter_header_height: u32,
pub masternode_height: u32,
pub last_chainlock_height: u32,
pub last_chainlock_hash: FFIString,
Expand All @@ -193,7 +192,6 @@ impl From<ChainState> for FFIChainState {
fn from(state: ChainState) -> Self {
FFIChainState {
header_height: state.headers.len() as u32,
filter_header_height: state.filter_headers.len() as u32,
masternode_height: state.last_masternode_diff_height.unwrap_or(0),
last_chainlock_height: state.last_chainlock_height.unwrap_or(0),
last_chainlock_hash: FFIString::new(
Expand Down
2 changes: 0 additions & 2 deletions dash-spv-ffi/tests/unit/test_type_conversions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,6 @@ mod tests {
fn test_chain_state_none_values() {
let state = dash_spv::ChainState {
headers: vec![],
filter_headers: vec![],
last_chainlock_height: None,
last_chainlock_hash: None,
current_filter_tip: None,
Expand All @@ -175,7 +174,6 @@ mod tests {

let ffi_state = FFIChainState::from(state);
assert_eq!(ffi_state.header_height, 0);
assert_eq!(ffi_state.filter_header_height, 0);
assert_eq!(ffi_state.masternode_height, 0);
assert_eq!(ffi_state.last_chainlock_height, 0);
assert_eq!(ffi_state.current_filter_tip, 0);
Expand Down
36 changes: 0 additions & 36 deletions dash-spv/src/client/core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -271,42 +271,6 @@ impl<
Ok(())
}

/// Clear all stored filter headers and compact filters while keeping other data intact.
pub async fn clear_filters(&mut self) -> Result<()> {
{
let mut storage = self.storage.lock().await;
storage.clear_filters().await.map_err(SpvError::Storage)?;
}

// Reset in-memory chain state for filters
{
let mut state = self.state.write().await;
state.filter_headers.clear();
state.current_filter_tip = None;
}

// Reset filter sync manager tracking
self.sync_manager.filter_sync_mut().clear_filter_state().await;

// Reset filter-related statistics
let received_heights = {
let stats = self.stats.read().await;
stats.received_filter_heights.clone()
};

{
let mut stats = self.stats.write().await;
stats.filter_headers_downloaded = 0;
stats.filter_height = 0;
stats.filters_downloaded = 0;
stats.filters_received = 0;
}

received_heights.lock().await.clear();

Ok(())
}

// ============ Configuration ============

/// Update the client configuration.
Expand Down
11 changes: 0 additions & 11 deletions dash-spv/src/storage/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,6 @@ impl DiskStorageManager {
// For checkpoint sync, we need to store headers starting from the checkpoint height
self.store_headers_at_height(&state.headers, state.sync_base_height).await?;

// Store filter headers
self.filter_headers
.write()
.await
.store_items(&state.filter_headers, state.sync_base_height, self)
.await?;

// Store other state as JSON
let state_data = serde_json::json!({
"last_chainlock_height": state.last_chainlock_height,
Expand Down Expand Up @@ -87,10 +80,6 @@ impl DiskStorageManager {
if let Some(tip_height) = self.get_tip_height().await? {
state.headers = self.load_headers(range_start..tip_height + 1).await?;
}
if let Some(filter_tip_height) = self.get_filter_tip_height().await? {
state.filter_headers =
self.load_filter_headers(range_start..filter_tip_height + 1).await?;
}

Ok(Some(state))
}
Expand Down
25 changes: 1 addition & 24 deletions dash-spv/src/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -250,16 +250,12 @@ impl DetailedSyncProgress {
///
/// ## Memory Considerations
/// - headers: ~80 bytes per header
/// - filter_headers: 32 bytes per filter header
/// - At 2M blocks: ~160MB for headers, ~64MB for filter headers
/// - At 2M blocks: ~160MB for headers
#[derive(Clone, Default)]
pub struct ChainState {
/// Block headers indexed by height.
pub headers: Vec<BlockHeader>,

/// Filter headers indexed by height.
pub filter_headers: Vec<FilterHeader>,

/// Last ChainLock height.
pub last_chainlock_height: Option<u32>,

Expand Down Expand Up @@ -355,28 +351,11 @@ impl ChainState {
self.headers.get(index)
}

/// Get filter header at the given height.
pub fn filter_header_at_height(&self, height: u32) -> Option<&FilterHeader> {
if height < self.sync_base_height {
return None; // Height is before our sync base
}
let index = (height - self.sync_base_height) as usize;
self.filter_headers.get(index)
}

/// Add headers to the chain.
pub fn add_headers(&mut self, headers: Vec<BlockHeader>) {
self.headers.extend(headers);
}

/// Add filter headers to the chain.
pub fn add_filter_headers(&mut self, filter_headers: Vec<FilterHeader>) {
if let Some(last) = filter_headers.last() {
self.current_filter_tip = Some(*last);
}
self.filter_headers.extend(filter_headers);
}

/// Get the tip header
pub fn get_tip_header(&self) -> Option<BlockHeader> {
self.headers.last().copied()
Expand Down Expand Up @@ -458,7 +437,6 @@ impl ChainState {
) {
// Clear any existing headers
self.headers.clear();
self.filter_headers.clear();

// Set sync base height to checkpoint
self.sync_base_height = checkpoint_height;
Expand Down Expand Up @@ -498,7 +476,6 @@ impl std::fmt::Debug for ChainState {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ChainState")
.field("headers", &format!("{} headers", self.headers.len()))
.field("filter_headers", &format!("{} filter headers", self.filter_headers.len()))
.field("last_chainlock_height", &self.last_chainlock_height)
.field("last_chainlock_hash", &self.last_chainlock_hash)
.field("current_filter_tip", &self.current_filter_tip)
Expand Down