Skip to content

Commit

Permalink
Merge branch 'master' into dom/remove-getters-fast-unstake
Browse files Browse the repository at this point in the history
  • Loading branch information
liamaharon authored May 20, 2024
2 parents 79675bc + e7b6d7d commit 31ba666
Show file tree
Hide file tree
Showing 5 changed files with 41 additions and 20 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -559,7 +559,7 @@ impl BlockEntry {
self.distributed_assignments.resize(new_len, false);
self.distributed_assignments |= bitfield;

// If the an operation did not change our current bitfied, we return true.
// If the an operation did not change our current bitfield, we return true.
let distributed = total_one_bits == self.distributed_assignments.count_ones();

distributed
Expand Down
2 changes: 1 addition & 1 deletion polkadot/node/gum/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
//!
//! ### Log levels
//!
//! All of the the [`tracing` macros](https://docs.rs/tracing/latest/tracing/index.html#macros) are available.
//! All of the [`tracing` macros](https://docs.rs/tracing/latest/tracing/index.html#macros) are available.
//! In decreasing order of priority they are:
//!
//! - `error!`
Expand Down
2 changes: 1 addition & 1 deletion polkadot/node/network/bridge/src/network.rs
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ fn send_message<M>(
// network used `Bytes` this would not be necessary.
//
// peer may have gotten disconnect by the time `send_message()` is called
// at which point the the sink is not available.
// at which point the sink is not available.
let last_peer = peers.pop();
peers.into_iter().for_each(|peer| {
if let Some(sink) = notification_sinks.get(&(peer_set, peer)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ enum MuxedMessage {
/// A new request has arrived and should be handled.
NewRequest(IncomingRequest<DisputeRequest>),

/// Rate limit timer hit - is is time to process one row of messages.
/// Rate limit timer hit - is time to process one row of messages.
///
/// This is the result of calling `self.peer_queues.pop_reqs()`.
WakePeerQueuesPopReqs(Vec<IncomingRequest<DisputeRequest>>),
Expand Down
53 changes: 37 additions & 16 deletions substrate/utils/frame/remote-externalities/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -834,30 +834,51 @@ where
) -> Result<Vec<StorageKey>, &'static str> {
let retry_strategy =
FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES);
let get_child_keys_closure = || {
#[allow(deprecated)]
substrate_rpc_client::ChildStateApi::storage_keys(
client,
PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()),
child_prefix.clone(),
Some(at),
)
};
let child_keys =
Retry::spawn(retry_strategy, get_child_keys_closure).await.map_err(|e| {
error!(target: LOG_TARGET, "Error = {:?}", e);
"rpc child_get_keys failed."
})?;
let mut all_child_keys = Vec::new();
let mut start_key = None;

loop {
let get_child_keys_closure = || {
let top_key = PrefixedStorageKey::new(prefixed_top_key.0.clone());
substrate_rpc_client::ChildStateApi::storage_keys_paged(
client,
top_key,
Some(child_prefix.clone()),
Self::DEFAULT_KEY_DOWNLOAD_PAGE,
start_key.clone(),
Some(at),
)
};

let child_keys = Retry::spawn(retry_strategy.clone(), get_child_keys_closure)
.await
.map_err(|e| {
error!(target: LOG_TARGET, "Error = {:?}", e);
"rpc child_get_keys failed."
})?;

let keys_count = child_keys.len();
if keys_count == 0 {
break;
}

start_key = child_keys.last().cloned();
all_child_keys.extend(child_keys);

if keys_count < Self::DEFAULT_KEY_DOWNLOAD_PAGE as usize {
break;
}
}

debug!(
target: LOG_TARGET,
"[thread = {:?}] scraped {} child-keys of the child-bearing top key: {}",
std::thread::current().id(),
child_keys.len(),
all_child_keys.len(),
HexDisplay::from(prefixed_top_key)
);

Ok(child_keys)
Ok(all_child_keys)
}
}

Expand Down

0 comments on commit 31ba666

Please sign in to comment.