Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,10 +62,12 @@
- The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/miden-node/pull/1512)).
- Limited number of storage map keys in `GetAccount` requests ([#1517](https://github.com/0xMiden/miden-node/pull/1517)).
- Block producer now detects if it is desync'd from the store's chain tip and aborts ([#1520](https://github.com/0xMiden/miden-node/pull/1520)).
- Pin tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)).
- Add `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/miden-node/pull/1529)).
- Add check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/miden-node/issues/1534)).
- Ensure store terminates on nullifier tree or account tree root vs header mismatch (#[#1569](https://github.com/0xMiden/miden-node/pull/1569)).
- Added support for foreign accounts to `NtxDataStore` and add `GetAccount` endpoint to NTX Builder gRPC store client ([#1521](https://github.com/0xMiden/miden-node/pull/1521)).
- Pined tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)).
- Added `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/miden-node/pull/1529)).
- Added check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/miden-node/issues/1534)).
- Use paged queries for tree rebuilding to reduce memory usage during startup ([#1536](https://github.com/0xMiden/miden-node/pull/1536)).

### Changes

Expand Down
42 changes: 29 additions & 13 deletions crates/store/src/db/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,11 @@ use crate::db::manager::{ConnectionManager, configure_connection_on_creation};
use crate::db::migrations::apply_migrations;
use crate::db::models::conv::SqlTypeConvert;
use crate::db::models::queries::StorageMapValuesPage;
pub use crate::db::models::queries::{
AccountCommitmentsPage,
NullifiersPage,
PublicAccountIdsPage,
};
use crate::db::models::{Page, queries};
use crate::errors::{DatabaseError, DatabaseSetupError, NoteSyncError, StateSyncError};
use crate::genesis::GenesisBlock;
Expand Down Expand Up @@ -325,12 +330,15 @@ impl Db {
Ok(me)
}

/// Loads all the nullifiers from the DB.
/// Returns a page of nullifiers for tree rebuilding.
#[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
pub(crate) async fn select_all_nullifiers(&self) -> Result<Vec<NullifierInfo>> {
self.transact("all nullifiers", move |conn| {
let nullifiers = queries::select_all_nullifiers(conn)?;
Ok(nullifiers)
pub async fn select_nullifiers_paged(
&self,
page_size: std::num::NonZeroUsize,
after_nullifier: Option<Nullifier>,
) -> Result<NullifiersPage> {
self.transact("read nullifiers paged", move |conn| {
queries::select_nullifiers_paged(conn, page_size, after_nullifier)
})
.await
}
Expand Down Expand Up @@ -396,20 +404,28 @@ impl Db {
.await
}

/// TODO marked for removal, replace with paged version
/// Returns a page of account commitments for tree rebuilding.
#[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
pub async fn select_all_account_commitments(&self) -> Result<Vec<(AccountId, Word)>> {
self.transact("read all account commitments", move |conn| {
queries::select_all_account_commitments(conn)
pub async fn select_account_commitments_paged(
&self,
page_size: std::num::NonZeroUsize,
after_account_id: Option<AccountId>,
) -> Result<AccountCommitmentsPage> {
self.transact("read account commitments paged", move |conn| {
queries::select_account_commitments_paged(conn, page_size, after_account_id)
})
.await
}

/// Returns all account IDs that have public state.
/// Returns a page of public account IDs for forest rebuilding.
#[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)]
pub async fn select_all_public_account_ids(&self) -> Result<Vec<AccountId>> {
self.transact("read all public account IDs", move |conn| {
queries::select_all_public_account_ids(conn)
pub async fn select_public_account_ids_paged(
&self,
page_size: std::num::NonZeroUsize,
after_account_id: Option<AccountId>,
) -> Result<PublicAccountIdsPage> {
self.transact("read public account IDs paged", move |conn| {
queries::select_public_account_ids_paged(conn, page_size, after_account_id)
})
.await
}
Expand Down
138 changes: 102 additions & 36 deletions crates/store/src/db/models/queries/accounts.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use std::collections::BTreeMap;
use std::num::NonZeroUsize;
use std::ops::RangeInclusive;

use diesel::prelude::{Queryable, QueryableByName};
Expand Down Expand Up @@ -254,11 +255,19 @@ pub(crate) fn select_network_account_by_id(
}
}

/// Select all account commitments from the DB using the given [`SqliteConnection`].
///
/// # Returns
/// Page of account commitments returned by [`select_account_commitments_paged`].
#[derive(Debug)]
pub struct AccountCommitmentsPage {
/// The account commitments in this page.
pub commitments: Vec<(AccountId, Word)>,
/// If `Some`, there are more results. Use this as the `after_account_id` for the next page.
pub next_cursor: Option<AccountId>,
}

/// Selects account commitments with pagination.
///
/// The vector with the account id and corresponding commitment, or an error.
/// Returns up to `page_size` account commitments, starting after `after_account_id` if provided.
/// Results are ordered by `account_id` for stable pagination.
///
/// # Raw SQL
///
Expand All @@ -270,31 +279,71 @@ pub(crate) fn select_network_account_by_id(
/// accounts
/// WHERE
/// is_latest = 1
/// AND (account_id > :after_account_id OR :after_account_id IS NULL)
/// ORDER BY
/// block_num ASC
/// account_id ASC
/// LIMIT :page_size + 1
/// ```
pub(crate) fn select_all_account_commitments(
pub(crate) fn select_account_commitments_paged(
conn: &mut SqliteConnection,
) -> Result<Vec<(AccountId, Word)>, DatabaseError> {
let raw = SelectDsl::select(
page_size: NonZeroUsize,
after_account_id: Option<AccountId>,
) -> Result<AccountCommitmentsPage, DatabaseError> {
use miden_protocol::utils::Serializable;

// Fetch one extra to determine if there are more results
#[allow(clippy::cast_possible_wrap)]
let limit = (page_size.get() + 1) as i64;

let mut query = SelectDsl::select(
schema::accounts::table,
(schema::accounts::account_id, schema::accounts::account_commitment),
)
.filter(schema::accounts::is_latest.eq(true))
.order_by(schema::accounts::block_num.asc())
.load::<(Vec<u8>, Vec<u8>)>(conn)?;
.order_by(schema::accounts::account_id.asc())
.limit(limit)
.into_boxed();

if let Some(cursor) = after_account_id {
query = query.filter(schema::accounts::account_id.gt(cursor.to_bytes()));
}

Result::<Vec<_>, DatabaseError>::from_iter(raw.into_iter().map(
let raw = query.load::<(Vec<u8>, Vec<u8>)>(conn)?;

let mut commitments = Result::<Vec<_>, DatabaseError>::from_iter(raw.into_iter().map(
|(ref account, ref commitment)| {
Ok((AccountId::read_from_bytes(account)?, Word::read_from_bytes(commitment)?))
},
))
))?;

// If we got more than page_size, there are more results
let next_cursor = if commitments.len() > page_size.get() {
commitments.pop(); // Remove the extra element
commitments.last().map(|(id, _)| *id)
} else {
None
};

Ok(AccountCommitmentsPage { commitments, next_cursor })
}

/// Page of public account IDs returned by [`select_public_account_ids_paged`].
#[derive(Debug)]
pub struct PublicAccountIdsPage {
/// The public account IDs in this page.
pub account_ids: Vec<AccountId>,
/// If `Some`, there are more results. Use this as the `after_account_id` for the next page.
pub next_cursor: Option<AccountId>,
}

/// Select all account IDs that have public state.
/// Selects public account IDs with pagination.
///
/// This filters accounts in-memory after loading only the account IDs (not commitments),
/// which is more efficient than loading full commitments when only IDs are needed.
/// Returns up to `page_size` public account IDs, starting after `after_account_id` if provided.
/// Results are ordered by `account_id` for stable pagination.
///
/// Public accounts are those with `AccountStorageMode::Public` or `AccountStorageMode::Network`.
/// We identify them by checking `code_commitment IS NOT NULL` - public accounts store their full
/// state (including `code_commitment`), while private accounts only store the `account_commitment`.
///
/// # Raw SQL
///
Expand All @@ -305,31 +354,48 @@ pub(crate) fn select_all_account_commitments(
/// accounts
/// WHERE
/// is_latest = 1
/// AND code_commitment IS NOT NULL
/// AND (account_id > :after_account_id OR :after_account_id IS NULL)
/// ORDER BY
/// block_num ASC
/// account_id ASC
/// LIMIT :page_size + 1
/// ```
pub(crate) fn select_all_public_account_ids(
pub(crate) fn select_public_account_ids_paged(
conn: &mut SqliteConnection,
) -> Result<Vec<AccountId>, DatabaseError> {
// We could technically use a `LIKE` constraint for both postgres and sqlite backends,
// but diesel doesn't expose that.
let raw: Vec<Vec<u8>> =
SelectDsl::select(schema::accounts::table, schema::accounts::account_id)
.filter(schema::accounts::is_latest.eq(true))
.order_by(schema::accounts::block_num.asc())
.load::<Vec<u8>>(conn)?;
page_size: NonZeroUsize,
after_account_id: Option<AccountId>,
) -> Result<PublicAccountIdsPage, DatabaseError> {
use miden_protocol::utils::Serializable;

Result::from_iter(
raw.into_iter()
.map(|bytes| {
AccountId::read_from_bytes(&bytes).map_err(DatabaseError::DeserializationError)
})
.filter_map(|result| match result {
Ok(id) if id.has_public_state() => Some(Ok(id)),
Ok(_) => None,
Err(e) => Some(Err(e)),
}),
)
#[allow(clippy::cast_possible_wrap)]
let limit = (page_size.get() + 1) as i64;

let mut query = SelectDsl::select(schema::accounts::table, schema::accounts::account_id)
.filter(schema::accounts::is_latest.eq(true))
.filter(schema::accounts::code_commitment.is_not_null())
.order_by(schema::accounts::account_id.asc())
.limit(limit)
.into_boxed();

if let Some(cursor) = after_account_id {
query = query.filter(schema::accounts::account_id.gt(cursor.to_bytes()));
}

let raw = query.load::<Vec<u8>>(conn)?;

let mut account_ids: Vec<AccountId> = Result::from_iter(raw.into_iter().map(|bytes| {
AccountId::read_from_bytes(&bytes).map_err(DatabaseError::DeserializationError)
}))?;

// If we got more than page_size, there are more results
let next_cursor = if account_ids.len() > page_size.get() {
account_ids.pop(); // Remove the extra element
account_ids.last().copied()
} else {
None
};

Ok(PublicAccountIdsPage { account_ids, next_cursor })
}

/// Select account vault assets within a block range (inclusive).
Expand Down
1 change: 1 addition & 0 deletions crates/store/src/db/models/queries/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ pub use block_headers::*;
mod accounts;
pub use accounts::*;
mod nullifiers;
pub use nullifiers::NullifiersPage;
pub(crate) use nullifiers::*;
mod notes;
pub(crate) use notes::*;
Expand Down
63 changes: 63 additions & 0 deletions crates/store/src/db/models/queries/nullifiers.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use std::num::NonZeroUsize;
use std::ops::RangeInclusive;

use diesel::query_dsl::methods::SelectDsl;
Expand Down Expand Up @@ -128,6 +129,7 @@ pub(crate) fn select_nullifiers_by_prefix(
/// ORDER BY
/// block_num ASC
/// ```
#[cfg(test)]
pub(crate) fn select_all_nullifiers(
conn: &mut SqliteConnection,
) -> Result<Vec<NullifierInfo>, DatabaseError> {
Expand All @@ -137,6 +139,67 @@ pub(crate) fn select_all_nullifiers(
vec_raw_try_into(nullifiers_raw)
}

/// Page of nullifiers returned by [`select_nullifiers_paged`].
#[derive(Debug)]
pub struct NullifiersPage {
/// The nullifiers in this page.
pub nullifiers: Vec<NullifierInfo>,
/// If `Some`, there are more results. Use this as the `after_nullifier` for the next page.
pub next_cursor: Option<Nullifier>,
}

/// Selects nullifiers with pagination.
///
/// Returns up to `page_size` nullifiers, starting after `after_nullifier` if provided.
/// Results are ordered by nullifier bytes for stable pagination.
///
/// # Raw SQL
///
/// ```sql
/// SELECT
/// nullifier,
/// block_num
/// FROM
/// nullifiers
/// WHERE
/// (nullifier > :after_nullifier OR :after_nullifier IS NULL)
/// ORDER BY
/// nullifier ASC
/// LIMIT :page_size + 1
/// ```
pub(crate) fn select_nullifiers_paged(
conn: &mut SqliteConnection,
page_size: NonZeroUsize,
after_nullifier: Option<Nullifier>,
) -> Result<NullifiersPage, DatabaseError> {
// Fetch one extra to determine if there are more results
#[allow(clippy::cast_possible_wrap)]
let limit = (page_size.get() + 1) as i64;

let mut query =
SelectDsl::select(schema::nullifiers::table, NullifierWithoutPrefixRawRow::as_select())
.order_by(schema::nullifiers::nullifier.asc())
.limit(limit)
.into_boxed();

if let Some(cursor) = after_nullifier {
query = query.filter(schema::nullifiers::nullifier.gt(cursor.to_bytes()));
}
Comment on lines +185 to +187
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: probably quicker to just after_nullifier.unwrap_or_default() since it will cache the statement on the next call 😁

Copy link
Collaborator

@Mirko-von-Leipzig Mirko-von-Leipzig Jan 22, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah nvm I see you're using after and not next as the token.

Copy link
Contributor Author

@drahnr drahnr Jan 23, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah nvm I see you're using after and not next as the token.

I don't think there is a difference?

after_nullifier.unwrap_or_default()

This only works for an asc sorting and the implicit all-zeros nullifier. I don't like the implicitness.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Revisiting, there is a small delta in what the cursor means, and I think we should unify eventually.


let nullifiers_raw = query.load::<NullifierWithoutPrefixRawRow>(conn)?;
let mut nullifiers: Vec<NullifierInfo> = vec_raw_try_into(nullifiers_raw)?;

// If we got more than page_size, there are more results
let next_cursor = if nullifiers.len() > page_size.get() {
nullifiers.pop(); // Remove the extra element
nullifiers.last().map(|info| info.nullifier)
} else {
None
};

Ok(NullifiersPage { nullifiers, next_cursor })
}

/// Insert nullifiers for a block into the database.
///
/// # Parameters
Expand Down
11 changes: 11 additions & 0 deletions crates/store/src/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use deadpool_sync::InteractError;
use miden_node_proto::domain::account::NetworkAccountError;
use miden_node_proto::domain::block::InvalidBlockRange;
use miden_node_proto::errors::{ConversionError, GrpcError};
use miden_node_utils::ErrorReport;
use miden_node_utils::limiter::QueryLimitError;
use miden_protocol::Word;
use miden_protocol::account::AccountId;
Expand Down Expand Up @@ -313,6 +314,16 @@ pub enum ApplyBlockError {
DbUpdateTaskFailed(String),
}

impl From<ApplyBlockError> for Status {
fn from(err: ApplyBlockError) -> Self {
match err {
ApplyBlockError::InvalidBlockError(_) => Status::invalid_argument(err.as_report()),

_ => Status::internal(err.as_report()),
}
}
}

#[derive(Error, Debug, GrpcError)]
pub enum GetBlockHeaderError {
#[error("database error")]
Expand Down
Loading