Skip to content

Commit

Permalink
Use header cache for blockhain cache (paritytech#5929)
Browse files Browse the repository at this point in the history
  • Loading branch information
arkpar authored May 7, 2020
1 parent 12184df commit 5775597
Show file tree
Hide file tree
Showing 5 changed files with 34 additions and 23 deletions.
23 changes: 16 additions & 7 deletions client/db/src/cache/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use std::{sync::Arc, collections::{HashMap, hash_map::Entry}};
use parking_lot::RwLock;

use sc_client_api::blockchain::{well_known_cache_keys::{self, Id as CacheKeyId}, Cache as BlockchainCache};
use sp_blockchain::Result as ClientResult;
use sp_blockchain::{Result as ClientResult, HeaderMetadataCache};
use sp_database::{Database, Transaction};
use codec::{Encode, Decode};
use sp_runtime::generic::BlockId;
Expand Down Expand Up @@ -78,6 +78,7 @@ impl<T> CacheItemT for T where T: Clone + Decode + Encode + PartialEq {}
/// Database-backed blockchain data cache.
pub struct DbCache<Block: BlockT> {
cache_at: HashMap<CacheKeyId, ListCache<Block, Vec<u8>, self::list_storage::DbStorage>>,
header_metadata_cache: Arc<HeaderMetadataCache<Block>>,
db: Arc<dyn Database<DbHash>>,
key_lookup_column: u32,
header_column: u32,
Expand All @@ -90,6 +91,7 @@ impl<Block: BlockT> DbCache<Block> {
/// Create new cache.
pub fn new(
db: Arc<dyn Database<DbHash>>,
header_metadata_cache: Arc<HeaderMetadataCache<Block>>,
key_lookup_column: u32,
header_column: u32,
cache_column: u32,
Expand All @@ -99,6 +101,7 @@ impl<Block: BlockT> DbCache<Block> {
Self {
cache_at: HashMap::new(),
db,
header_metadata_cache,
key_lookup_column,
header_column,
cache_column,
Expand Down Expand Up @@ -348,18 +351,24 @@ impl<Block: BlockT> BlockchainCache<Block> for DbCacheSync<Block> {
at: &BlockId<Block>,
) -> ClientResult<Option<((NumberFor<Block>, Block::Hash), Option<(NumberFor<Block>, Block::Hash)>, Vec<u8>)>> {
let mut cache = self.0.write();
let header_metadata_cache = cache.header_metadata_cache.clone();
let cache = cache.get_cache(*key)?;
let storage = cache.storage();
let db = storage.db();
let columns = storage.columns();
let at = match *at {
BlockId::Hash(hash) => {
let header = utils::require_header::<Block>(
&**db,
columns.key_lookup,
columns.header,
BlockId::Hash(hash.clone()))?;
ComplexBlockId::new(hash, *header.number())
match header_metadata_cache.header_metadata(hash) {
Some(metadata) => ComplexBlockId::new(hash, metadata.number),
None => {
let header = utils::require_header::<Block>(
&**db,
columns.key_lookup,
columns.header,
BlockId::Hash(hash.clone()))?;
ComplexBlockId::new(hash, *header.number())
}
}
},
BlockId::Number(number) => {
let hash = utils::require_header::<Block>(
Expand Down
4 changes: 3 additions & 1 deletion client/db/src/changes_tries_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ use parking_lot::RwLock;
use sp_blockchain::{Error as ClientError, Result as ClientResult};
use sp_trie::MemoryDB;
use sc_client_api::backend::PrunableStateChangesTrieStorage;
use sp_blockchain::{well_known_cache_keys, Cache as BlockchainCache};
use sp_blockchain::{well_known_cache_keys, Cache as BlockchainCache, HeaderMetadataCache};
use sp_core::{ChangesTrieConfiguration, ChangesTrieConfigurationRange, convert_hash};
use sp_core::storage::PrefixedStorageKey;
use sp_database::Transaction;
Expand Down Expand Up @@ -114,6 +114,7 @@ impl<Block: BlockT> DbChangesTrieStorage<Block> {
/// Create new changes trie storage.
pub fn new(
db: Arc<dyn Database<DbHash>>,
header_metadata_cache: Arc<HeaderMetadataCache<Block>>,
meta_column: u32,
changes_tries_column: u32,
key_lookup_column: u32,
Expand All @@ -137,6 +138,7 @@ impl<Block: BlockT> DbChangesTrieStorage<Block> {
min_blocks_to_keep,
cache: DbCacheSync(RwLock::new(DbCache::new(
db.clone(),
header_metadata_cache,
key_lookup_column,
header_column,
cache_column,
Expand Down
9 changes: 5 additions & 4 deletions client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,7 @@ pub struct BlockchainDb<Block: BlockT> {
db: Arc<dyn Database<DbHash>>,
meta: Arc<RwLock<Meta<NumberFor<Block>, Block::Hash>>>,
leaves: RwLock<LeafSet<Block::Hash, NumberFor<Block>>>,
header_metadata_cache: HeaderMetadataCache<Block>,
header_metadata_cache: Arc<HeaderMetadataCache<Block>>,
}

impl<Block: BlockT> BlockchainDb<Block> {
Expand All @@ -379,7 +379,7 @@ impl<Block: BlockT> BlockchainDb<Block> {
db,
leaves: RwLock::new(leaves),
meta: Arc::new(RwLock::new(meta)),
header_metadata_cache: HeaderMetadataCache::default(),
header_metadata_cache: Arc::new(HeaderMetadataCache::default()),
})
}

Expand Down Expand Up @@ -505,7 +505,7 @@ impl<Block: BlockT> HeaderMetadata<Block> for BlockchainDb<Block> {
type Error = sp_blockchain::Error;

fn header_metadata(&self, hash: Block::Hash) -> Result<CachedHeaderMetadata<Block>, Self::Error> {
self.header_metadata_cache.header_metadata(hash).or_else(|_| {
self.header_metadata_cache.header_metadata(hash).map_or_else(|| {
self.header(BlockId::hash(hash))?.map(|header| {
let header_metadata = CachedHeaderMetadata::from(&header);
self.header_metadata_cache.insert_header_metadata(
Expand All @@ -514,7 +514,7 @@ impl<Block: BlockT> HeaderMetadata<Block> for BlockchainDb<Block> {
);
header_metadata
}).ok_or(ClientError::UnknownBlock(format!("header not found in db: {}", hash)))
})
}, Ok)
}

fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata<Block>) {
Expand Down Expand Up @@ -831,6 +831,7 @@ impl<Block: BlockT> Backend<Block> {
let offchain_storage = offchain::LocalStorage::new(db.clone());
let changes_tries_storage = DbChangesTrieStorage::new(
db,
blockchain.header_metadata_cache.clone(),
columns::META,
columns::CHANGES_TRIE,
columns::KEY_LOOKUP,
Expand Down
10 changes: 6 additions & 4 deletions client/db/src/light.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ pub struct LightStorage<Block: BlockT> {
db: Arc<dyn Database<DbHash>>,
meta: RwLock<Meta<NumberFor<Block>, Block::Hash>>,
cache: Arc<DbCacheSync<Block>>,
header_metadata_cache: HeaderMetadataCache<Block>,
header_metadata_cache: Arc<HeaderMetadataCache<Block>>,

#[cfg(not(target_os = "unknown"))]
io_stats: FrozenForDuration<kvdb::IoStats>,
Expand All @@ -84,8 +84,10 @@ impl<Block: BlockT> LightStorage<Block> {

fn from_kvdb(db: Arc<dyn Database<DbHash>>) -> ClientResult<Self> {
let meta = read_meta::<Block>(&*db, columns::HEADER)?;
let header_metadata_cache = Arc::new(HeaderMetadataCache::default());
let cache = DbCache::new(
db.clone(),
header_metadata_cache.clone(),
columns::KEY_LOOKUP,
columns::HEADER,
columns::CACHE,
Expand All @@ -97,7 +99,7 @@ impl<Block: BlockT> LightStorage<Block> {
db,
meta: RwLock::new(meta),
cache: Arc::new(DbCacheSync(RwLock::new(cache))),
header_metadata_cache: HeaderMetadataCache::default(),
header_metadata_cache,
#[cfg(not(target_os = "unknown"))]
io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)),
})
Expand Down Expand Up @@ -188,7 +190,7 @@ impl<Block: BlockT> HeaderMetadata<Block> for LightStorage<Block> {
type Error = ClientError;

fn header_metadata(&self, hash: Block::Hash) -> Result<CachedHeaderMetadata<Block>, Self::Error> {
self.header_metadata_cache.header_metadata(hash).or_else(|_| {
self.header_metadata_cache.header_metadata(hash).map_or_else(|| {
self.header(BlockId::hash(hash))?.map(|header| {
let header_metadata = CachedHeaderMetadata::from(&header);
self.header_metadata_cache.insert_header_metadata(
Expand All @@ -197,7 +199,7 @@ impl<Block: BlockT> HeaderMetadata<Block> for LightStorage<Block> {
);
header_metadata
}).ok_or(ClientError::UnknownBlock(format!("header not found in db: {}", hash)))
})
}, Ok)
}

fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata<Block>) {
Expand Down
11 changes: 4 additions & 7 deletions primitives/blockchain/src/header_metadata.rs
Original file line number Diff line number Diff line change
Expand Up @@ -239,19 +239,16 @@ impl<Block: BlockT> Default for HeaderMetadataCache<Block> {
}
}

impl<Block: BlockT> HeaderMetadata<Block> for HeaderMetadataCache<Block> {
type Error = String;

fn header_metadata(&self, hash: Block::Hash) -> Result<CachedHeaderMetadata<Block>, Self::Error> {
impl<Block: BlockT> HeaderMetadataCache<Block> {
pub fn header_metadata(&self, hash: Block::Hash) -> Option<CachedHeaderMetadata<Block>> {
self.cache.write().get(&hash).cloned()
.ok_or("header metadata not found in cache".to_owned())
}

fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata<Block>) {
pub fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata<Block>) {
self.cache.write().put(hash, metadata);
}

fn remove_header_metadata(&self, hash: Block::Hash) {
pub fn remove_header_metadata(&self, hash: Block::Hash) {
self.cache.write().pop(&hash);
}
}
Expand Down

0 comments on commit 5775597

Please sign in to comment.