Skip to content

Commit

Permalink
prospective-parachains: respond with multiple backable candidates (#3160
Browse files Browse the repository at this point in the history
)

Fixes #3129
  • Loading branch information
alindima authored Feb 6, 2024
1 parent 53f615d commit 7df1ae3
Show file tree
Hide file tree
Showing 9 changed files with 844 additions and 92 deletions.
163 changes: 141 additions & 22 deletions polkadot/node/core/prospective-parachains/src/fragment_tree.rs
Original file line number Diff line number Diff line change
Expand Up @@ -756,53 +756,127 @@ impl FragmentTree {
depths.iter_ones().collect()
}

/// Select a candidate after the given `required_path` which passes
/// the predicate.
/// Select `count` candidates after the given `required_path` which pass
/// the predicate and have not already been backed on chain.
///
/// If there are multiple possibilities, this will select the first one.
///
/// This returns `None` if there is no candidate meeting those criteria.
/// Does an exhaustive search into the tree starting after `required_path`.
/// If there are multiple possibilities of size `count`, this will select the first one.
/// If there is no chain of size `count` that matches the criteria, this will return the largest
/// chain it could find with the criteria.
/// If there are no candidates meeting those criteria, returns an empty `Vec`.
/// Cycles are accepted, see module docs for the `Cycles` section.
///
/// The intention of the `required_path` is to allow queries on the basis of
/// one or more candidates which were previously pending availability becoming
/// available and opening up more room on the core.
pub(crate) fn select_child(
pub(crate) fn select_children(
&self,
required_path: &[CandidateHash],
count: u32,
pred: impl Fn(&CandidateHash) -> bool,
) -> Option<CandidateHash> {
) -> Vec<CandidateHash> {
let base_node = {
// traverse the required path.
let mut node = NodePointer::Root;
for required_step in required_path {
node = self.node_candidate_child(node, &required_step)?;
if let Some(next_node) = self.node_candidate_child(node, &required_step) {
node = next_node;
} else {
return vec![]
};
}

node
};

// TODO [now]: taking the first selection might introduce bias
// TODO: taking the first best selection might introduce bias
// or become gameable.
//
// For plausibly unique parachains, this shouldn't matter much.
// figure out alternative selection criteria?
match base_node {
self.select_children_inner(base_node, count, count, &pred, &mut vec![])
}

// Try finding a candidate chain starting from `base_node` of length `expected_count`.
// If not possible, return the longest one we could find.
// Does a depth-first search, since we're optimistic that there won't be more than one such
// chains (parachains shouldn't usually have forks). So in the usual case, this will conclude
// in `O(expected_count)`.
// Cycles are accepted, but this doesn't allow for infinite execution time, because the maximum
// depth we'll reach is `expected_count`.
//
// Worst case performance is `O(num_forks ^ expected_count)`.
// Although an exponential function, this is actually a constant that can only be altered via
// sudo/governance, because:
// 1. `num_forks` at a given level is at most `max_candidate_depth * max_validators_per_core`
// (because each validator in the assigned group can second `max_candidate_depth`
// candidates). The prospective-parachains subsystem assumes that the number of para forks is
// limited by collator-protocol and backing subsystems. In practice, this is a constant which
// can only be altered by sudo or governance.
// 2. `expected_count` is equal to the number of cores a para is scheduled on (in an elastic
// scaling scenario). For non-elastic-scaling, this is just 1. In practice, this should be a
// small number (1-3), capped by the total number of available cores (a constant alterable
// only via governance/sudo).
fn select_children_inner(
&self,
base_node: NodePointer,
expected_count: u32,
remaining_count: u32,
pred: &dyn Fn(&CandidateHash) -> bool,
accumulator: &mut Vec<CandidateHash>,
) -> Vec<CandidateHash> {
if remaining_count == 0 {
// The best option is the chain we've accumulated so far.
return accumulator.to_vec();
}

let children: Vec<_> = match base_node {
NodePointer::Root => self
.nodes
.iter()
.take_while(|n| n.parent == NodePointer::Root)
.filter(|n| self.scope.get_pending_availability(&n.candidate_hash).is_none())
.filter(|n| pred(&n.candidate_hash))
.map(|n| n.candidate_hash)
.next(),
NodePointer::Storage(ptr) => self.nodes[ptr]
.children
.iter()
.filter(|n| self.scope.get_pending_availability(&n.1).is_none())
.filter(|n| pred(&n.1))
.map(|n| n.1)
.next(),
.enumerate()
.take_while(|(_, n)| n.parent == NodePointer::Root)
.filter(|(_, n)| self.scope.get_pending_availability(&n.candidate_hash).is_none())
.filter(|(_, n)| pred(&n.candidate_hash))
.map(|(ptr, n)| (NodePointer::Storage(ptr), n.candidate_hash))
.collect(),
NodePointer::Storage(base_node_ptr) => {
let base_node = &self.nodes[base_node_ptr];

base_node
.children
.iter()
.filter(|(_, hash)| self.scope.get_pending_availability(&hash).is_none())
.filter(|(_, hash)| pred(&hash))
.map(|(ptr, hash)| (*ptr, *hash))
.collect()
},
};

let mut best_result = accumulator.clone();
for (child_ptr, child_hash) in children {
accumulator.push(child_hash);

let result = self.select_children_inner(
child_ptr,
expected_count,
remaining_count - 1,
&pred,
accumulator,
);

accumulator.pop();

// Short-circuit the search if we've found the right length. Otherwise, we'll
// search for a max.
if result.len() == expected_count as usize {
return result
} else if best_result.len() < result.len() {
best_result = result;
}
}

best_result
}

fn populate_from_bases(&mut self, storage: &CandidateStorage, initial_bases: Vec<NodePointer>) {
Expand Down Expand Up @@ -987,6 +1061,7 @@ mod tests {
use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations;
use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData};
use polkadot_primitives_test_helpers as test_helpers;
use std::iter;

fn make_constraints(
min_relay_parent_number: BlockNumber,
Expand Down Expand Up @@ -1524,6 +1599,21 @@ mod tests {
assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash);
assert_eq!(tree.nodes[3].candidate_hash, candidate_a_hash);
assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash);

for count in 1..10 {
assert_eq!(
tree.select_children(&[], count, |_| true),
iter::repeat(candidate_a_hash)
.take(std::cmp::min(count as usize, max_depth + 1))
.collect::<Vec<_>>()
);
assert_eq!(
tree.select_children(&[candidate_a_hash], count - 1, |_| true),
iter::repeat(candidate_a_hash)
.take(std::cmp::min(count as usize - 1, max_depth))
.collect::<Vec<_>>()
);
}
}

#[test]
Expand Down Expand Up @@ -1591,6 +1681,35 @@ mod tests {
assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash);
assert_eq!(tree.nodes[3].candidate_hash, candidate_b_hash);
assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash);

assert_eq!(tree.select_children(&[], 1, |_| true), vec![candidate_a_hash],);
assert_eq!(
tree.select_children(&[], 2, |_| true),
vec![candidate_a_hash, candidate_b_hash],
);
assert_eq!(
tree.select_children(&[], 3, |_| true),
vec![candidate_a_hash, candidate_b_hash, candidate_a_hash],
);
assert_eq!(
tree.select_children(&[candidate_a_hash], 2, |_| true),
vec![candidate_b_hash, candidate_a_hash],
);

assert_eq!(
tree.select_children(&[], 6, |_| true),
vec![
candidate_a_hash,
candidate_b_hash,
candidate_a_hash,
candidate_b_hash,
candidate_a_hash
],
);
assert_eq!(
tree.select_children(&[candidate_a_hash, candidate_b_hash], 6, |_| true),
vec![candidate_a_hash, candidate_b_hash, candidate_a_hash,],
);
}

#[test]
Expand Down
74 changes: 43 additions & 31 deletions polkadot/node/core/prospective-parachains/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -146,12 +146,20 @@ async fn run_iteration<Context>(
handle_candidate_seconded(view, para, candidate_hash),
ProspectiveParachainsMessage::CandidateBacked(para, candidate_hash) =>
handle_candidate_backed(&mut *ctx, view, para, candidate_hash).await?,
ProspectiveParachainsMessage::GetBackableCandidate(
ProspectiveParachainsMessage::GetBackableCandidates(
relay_parent,
para,
count,
required_path,
tx,
) => answer_get_backable_candidate(&view, relay_parent, para, required_path, tx),
) => answer_get_backable_candidates(
&view,
relay_parent,
para,
count,
required_path,
tx,
),
ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx) =>
answer_hypothetical_frontier_request(&view, request, tx),
ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) =>
Expand Down Expand Up @@ -552,12 +560,13 @@ async fn handle_candidate_backed<Context>(
Ok(())
}

fn answer_get_backable_candidate(
fn answer_get_backable_candidates(
view: &View,
relay_parent: Hash,
para: ParaId,
count: u32,
required_path: Vec<CandidateHash>,
tx: oneshot::Sender<Option<(CandidateHash, Hash)>>,
tx: oneshot::Sender<Vec<(CandidateHash, Hash)>>,
) {
let data = match view.active_leaves.get(&relay_parent) {
None => {
Expand All @@ -568,7 +577,7 @@ fn answer_get_backable_candidate(
"Requested backable candidate for inactive relay-parent."
);

let _ = tx.send(None);
let _ = tx.send(vec![]);
return
},
Some(d) => d,
Expand All @@ -583,7 +592,7 @@ fn answer_get_backable_candidate(
"Requested backable candidate for inactive para."
);

let _ = tx.send(None);
let _ = tx.send(vec![]);
return
},
Some(tree) => tree,
Expand All @@ -598,46 +607,49 @@ fn answer_get_backable_candidate(
"No candidate storage for active para",
);

let _ = tx.send(None);
let _ = tx.send(vec![]);
return
},
Some(s) => s,
};

let Some(child_hash) =
tree.select_child(&required_path, |candidate| storage.is_backed(candidate))
else {
let backable_candidates: Vec<_> = tree
.select_children(&required_path, count, |candidate| storage.is_backed(candidate))
.into_iter()
.filter_map(|child_hash| {
storage.relay_parent_by_candidate_hash(&child_hash).map_or_else(
|| {
gum::error!(
target: LOG_TARGET,
?child_hash,
para_id = ?para,
"Candidate is present in fragment tree but not in candidate's storage!",
);
None
},
|parent_hash| Some((child_hash, parent_hash)),
)
})
.collect();

if backable_candidates.is_empty() {
gum::trace!(
target: LOG_TARGET,
?required_path,
para_id = ?para,
%relay_parent,
"Could not find any backable candidate",
);

let _ = tx.send(None);
return
};
let Some(candidate_relay_parent) = storage.relay_parent_by_candidate_hash(&child_hash) else {
gum::error!(
} else {
gum::trace!(
target: LOG_TARGET,
?child_hash,
para_id = ?para,
"Candidate is present in fragment tree but not in candidate's storage!",
?relay_parent,
?backable_candidates,
"Found backable candidates",
);
let _ = tx.send(None);
return
};

gum::trace!(
target: LOG_TARGET,
?relay_parent,
candidate_hash = ?child_hash,
?candidate_relay_parent,
"Found backable candidate",
);
}

let _ = tx.send(Some((child_hash, candidate_relay_parent)));
let _ = tx.send(backable_candidates);
}

fn answer_hypothetical_frontier_request(
Expand Down
Loading

0 comments on commit 7df1ae3

Please sign in to comment.