Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Merge branch 'master' into child-trie-soft-min
Browse files Browse the repository at this point in the history
  • Loading branch information
cheme authored Jun 25, 2019
2 parents 453927b + 1c2eaa3 commit 349f9a5
Show file tree
Hide file tree
Showing 31 changed files with 1,396 additions and 486 deletions.
3 changes: 2 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

13 changes: 5 additions & 8 deletions core/cli/src/informant.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,39 +40,36 @@ pub fn start<C>(service: &Service<C>, exit: ::exit_future::Exit, handle: TaskExe
/// Creates an informant in the form of a `Future` that must be polled regularly.
pub fn build<C>(service: &Service<C>) -> impl Future<Item = (), Error = ()>
where C: Components {
let network = service.network();
let client = service.client();
let mut last_number = None;
let mut last_update = time::Instant::now();

let display_notifications = network.status().for_each(move |sync_status| {
let display_notifications = service.network_status().for_each(move |net_status| {

let info = client.info();
let best_number = info.chain.best_number.saturated_into::<u64>();
let best_hash = info.chain.best_hash;
let speed = move || speed(best_number, last_number, last_update);
last_update = time::Instant::now();
let (status, target) = match (sync_status.sync.state, sync_status.sync.best_seen_block) {
let (status, target) = match (net_status.sync_state, net_status.best_seen_block) {
(SyncState::Idle, _) => ("Idle".into(), "".into()),
(SyncState::Downloading, None) => (format!("Syncing{}", speed()), "".into()),
(SyncState::Downloading, Some(n)) => (format!("Syncing{}", speed()), format!(", target=#{}", n)),
};
last_number = Some(best_number);
let finalized_number: u64 = info.chain.finalized_number.saturated_into::<u64>();
let bandwidth_download = network.average_download_per_sec();
let bandwidth_upload = network.average_upload_per_sec();
info!(
target: "substrate",
"{}{} ({} peers), best: #{} ({}), finalized #{} ({}), ⬇ {} ⬆ {}",
Colour::White.bold().paint(&status),
target,
Colour::White.bold().paint(format!("{}", sync_status.num_peers)),
Colour::White.bold().paint(format!("{}", net_status.num_connected_peers)),
Colour::White.paint(format!("{}", best_number)),
best_hash,
Colour::White.paint(format!("{}", finalized_number)),
info.chain.finalized_hash,
TransferRateFormat(bandwidth_download),
TransferRateFormat(bandwidth_upload),
TransferRateFormat(net_status.average_download_per_sec),
TransferRateFormat(net_status.average_upload_per_sec),
);

Ok(())
Expand Down
1 change: 0 additions & 1 deletion core/client/db/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,6 @@ pub fn block_id_to_lookup_key<Block>(

/// Maps database error to client error
pub fn db_err(err: io::Error) -> client::error::Error {
use std::error::Error;
client::error::Error::Backend(format!("{}", err))
}

Expand Down
7 changes: 4 additions & 3 deletions core/consensus/aura/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,8 @@ use srml_aura::{
};
use substrate_telemetry::{telemetry, CONSENSUS_TRACE, CONSENSUS_DEBUG, CONSENSUS_WARN, CONSENSUS_INFO};

use slots::{CheckedHeader, SlotData, SlotWorker, SlotInfo, SlotCompatible, slot_now, check_equivocation};
use slots::{CheckedHeader, SlotData, SlotWorker, SlotInfo, SlotCompatible};
use slots::{SignedDuration, check_equivocation};

pub use aura_primitives::*;
pub use consensus_common::SyncOracle;
Expand Down Expand Up @@ -283,8 +284,8 @@ impl<H, B, C, E, I, P, Error, SO> SlotWorker<B> for AuraWorker<C, E, I, P, SO> w
Box::new(proposal_work.map(move |b| {
// minor hack since we don't have access to the timestamp
// that is actually set by the proposer.
let slot_after_building = slot_now(slot_duration);
if slot_after_building != Some(slot_num) {
let slot_after_building = SignedDuration::default().slot_now(slot_duration);
if slot_after_building != slot_num {
info!(
"Discarding proposal for slot {}; block production took too long",
slot_num
Expand Down
9 changes: 9 additions & 0 deletions core/consensus/babe/primitives/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,15 @@ pub struct BabeConfiguration {
///
/// Dynamic thresholds may be supported in the future.
pub threshold: u64,

/// The minimum number of blocks that must be received before running the
/// median algorithm to compute the offset between the on-chain time and the
/// local time. Currently, only the value provided by this type at genesis
/// will be used, but this is subject to change.
///
/// Blocks less than `self.median_required_blocks` must be generated by an
/// *initial validator* ― that is, a node that was a validator at genesis.
pub median_required_blocks: u64,
}

#[cfg(feature = "std")]
Expand Down
130 changes: 120 additions & 10 deletions core/consensus/babe/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,20 @@ use digest::CompatibleDigestItem;
pub use digest::{BabePreDigest, BABE_VRF_PREFIX};
pub use babe_primitives::*;
pub use consensus_common::SyncOracle;
use consensus_common::import_queue::{
SharedBlockImport, SharedJustificationImport, SharedFinalityProofImport,
SharedFinalityProofRequestBuilder,
};
use consensus_common::well_known_cache_keys::Id as CacheKeyId;
use runtime_primitives::{generic, generic::{BlockId, OpaqueDigestItemId}, Justification};
use runtime_primitives::traits::{
Block, Header, DigestItemFor, ProvideRuntimeApi,
SimpleBitOps,
SimpleBitOps, Zero,
};
use std::{sync::Arc, u64, fmt::{Debug, Display}};
use std::{sync::Arc, u64, fmt::{Debug, Display}, time::{Instant, Duration}};
use runtime_support::serde::{Serialize, Deserialize};
use parity_codec::{Decode, Encode};
use parking_lot::Mutex;
use primitives::{crypto::Pair, sr25519};
use merlin::Transcript;
use inherents::{InherentDataProviders, InherentData};
Expand Down Expand Up @@ -77,7 +82,7 @@ use futures::{Future, IntoFuture, future};
use tokio_timer::Timeout;
use log::{error, warn, debug, info, trace};

use slots::{SlotWorker, SlotData, SlotInfo, SlotCompatible, slot_now};
use slots::{SlotWorker, SlotData, SlotInfo, SlotCompatible, SignedDuration};

pub use babe_primitives::AuthorityId;

Expand Down Expand Up @@ -332,8 +337,8 @@ impl<Hash, H, B, C, E, I, Error, SO> SlotWorker<B> for BabeWorker<C, E, I, SO> w
Box::new(proposal_work.map(move |b| {
// minor hack since we don't have access to the timestamp
// that is actually set by the proposer.
let slot_after_building = slot_now(slot_duration);
if slot_after_building != Some(slot_num) {
let slot_after_building = SignedDuration::default().slot_now(slot_duration);
if slot_after_building != slot_num {
info!(
target: "babe",
"Discarding proposal for slot {}; block production took too long",
Expand Down Expand Up @@ -512,7 +517,8 @@ fn check_header<B: Block + Sized, C: AuxStore>(
pub struct BabeVerifier<C> {
client: Arc<C>,
inherent_data_providers: inherents::InherentDataProviders,
threshold: u64,
config: Config,
timestamps: Mutex<(Option<Duration>, Vec<(Instant, u64)>)>,
}

impl<C> BabeVerifier<C> {
Expand Down Expand Up @@ -540,6 +546,38 @@ impl<C> BabeVerifier<C> {
}
}

fn median_algorithm(
median_required_blocks: u64,
slot_duration: u64,
slot_num: u64,
slot_now: u64,
timestamps: &mut (Option<Duration>, Vec<(Instant, u64)>),
) {
let num_timestamps = timestamps.1.len();
if num_timestamps as u64 >= median_required_blocks && median_required_blocks > 0 {
let mut new_list: Vec<_> = timestamps.1.iter().map(|&(t, sl)| {
let offset: u128 = u128::from(slot_duration)
.checked_mul(1_000_000u128) // self.config.get() returns *milliseconds*
.and_then(|x| x.checked_mul(u128::from(slot_num).saturating_sub(u128::from(sl))))
.expect("we cannot have timespans long enough for this to overflow; qed");
const NANOS_PER_SEC: u32 = 1_000_000_000;
let nanos = (offset % u128::from(NANOS_PER_SEC)) as u32;
let secs = (offset / u128::from(NANOS_PER_SEC)) as u64;
t + Duration::new(secs, nanos)
}).collect();
// FIXME #2926: use a selection algorithm instead of a full sorting algorithm.
new_list.sort_unstable();
let &median = new_list
.get(num_timestamps / 2)
.expect("we have at least one timestamp, so this is a valid index; qed");
timestamps.1.clear();
// FIXME #2927: pass this to the block authoring logic somehow
timestamps.0.replace(Instant::now() - median);
} else {
timestamps.1.push((Instant::now(), slot_now))
}
}

impl<B: Block, C> Verifier<B> for BabeVerifier<C> where
C: ProvideRuntimeApi + Send + Sync + AuxStore + ProvideCache<B>,
C::Api: BlockBuilderApi<B> + BabeApi<B>,
Expand Down Expand Up @@ -582,7 +620,7 @@ impl<B: Block, C> Verifier<B> for BabeVerifier<C> where
header,
hash,
&authorities[..],
self.threshold,
self.config.threshold(),
)?;
match checked_header {
CheckedHeader::Checked(pre_header, (pre_digest, seal)) => {
Expand Down Expand Up @@ -629,7 +667,13 @@ impl<B: Block, C> Verifier<B> for BabeVerifier<C> where
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::LongestChain,
};

median_algorithm(
self.config.0.median_required_blocks,
self.config.get(),
slot_num,
slot_now,
&mut *self.timestamps.lock(),
);
// FIXME #1019 extract authorities
Ok((import_block, maybe_keys))
}
Expand Down Expand Up @@ -739,6 +783,72 @@ fn claim_slot(
get_keypair(key).vrf_sign_n_check(transcript, |inout| check(inout, threshold))
}

fn initialize_authorities_cache<B, C>(client: &C) -> Result<(), ConsensusError> where
B: Block,
C: ProvideRuntimeApi + ProvideCache<B>,
C::Api: BabeApi<B>,
{
// no cache => no initialization
let cache = match client.cache() {
Some(cache) => cache,
None => return Ok(()),
};

// check if we already have initialized the cache
let genesis_id = BlockId::Number(Zero::zero());
let genesis_authorities: Option<Vec<AuthorityId>> = cache
.get_at(&well_known_cache_keys::AUTHORITIES, &genesis_id)
.and_then(|v| Decode::decode(&mut &v[..]));
if genesis_authorities.is_some() {
return Ok(());
}

let map_err = |error| consensus_common::Error::from(consensus_common::Error::ClientImport(
format!(
"Error initializing authorities cache: {}",
error,
)));
let genesis_authorities = authorities(client, &genesis_id)?;
cache.initialize(&well_known_cache_keys::AUTHORITIES, genesis_authorities.encode())
.map_err(map_err)
}

/// Start an import queue for the Babe consensus algorithm.
pub fn import_queue<B, C, E>(
config: Config,
block_import: SharedBlockImport<B>,
justification_import: Option<SharedJustificationImport<B>>,
finality_proof_import: Option<SharedFinalityProofImport<B>>,
finality_proof_request_builder: Option<SharedFinalityProofRequestBuilder<B>>,
client: Arc<C>,
inherent_data_providers: InherentDataProviders,
) -> Result<BabeImportQueue<B>, consensus_common::Error> where
B: Block,
C: 'static + ProvideRuntimeApi + ProvideCache<B> + Send + Sync + AuxStore,
C::Api: BlockBuilderApi<B> + BabeApi<B>,
DigestItemFor<B>: CompatibleDigestItem,
E: 'static,
{
register_babe_inherent_data_provider(&inherent_data_providers, config.get())?;
initialize_authorities_cache(&*client)?;

let verifier = Arc::new(
BabeVerifier {
client: client,
inherent_data_providers,
timestamps: Default::default(),
config,
}
);
Ok(BasicQueue::new(
verifier,
block_import,
justification_import,
finality_proof_import,
finality_proof_request_builder,
))
}

#[cfg(test)]
#[allow(dead_code, unused_imports, deprecated)]
// FIXME #2532: need to allow deprecated until refactor is done
Expand All @@ -753,7 +863,6 @@ mod tests {
use network::test::{Block as TestBlock, PeersClient};
use runtime_primitives::traits::{Block as BlockT, DigestFor};
use network::config::ProtocolConfig;
use parking_lot::Mutex;
use tokio::runtime::current_thread;
use keyring::sr25519::Keyring;
use super::generic::DigestItem;
Expand Down Expand Up @@ -837,7 +946,8 @@ mod tests {
Arc::new(BabeVerifier {
client,
inherent_data_providers,
threshold: config.threshold(),
config,
timestamps: Default::default(),
})
}

Expand Down
2 changes: 1 addition & 1 deletion core/consensus/common/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ edition = "2018"

[dependencies]
derive_more = "0.14.0"
crossbeam-channel = "0.3.4"
libp2p = { version = "0.9.0", default-features = false }
log = "0.4"
primitives = { package = "substrate-primitives", path= "../../primitives" }
Expand All @@ -16,6 +15,7 @@ futures = "0.1"
rstd = { package = "sr-std", path = "../../sr-std" }
runtime_version = { package = "sr-version", path = "../../sr-version" }
runtime_primitives = { package = "sr-primitives", path = "../../sr-primitives" }
tokio-executor = "0.1.6"
tokio-timer = "0.2"
parity-codec = { version = "3.3", features = ["derive"] }
parking_lot = "0.8.0"
Expand Down
Loading

0 comments on commit 349f9a5

Please sign in to comment.