Skip to content

Commit

Permalink
Resolve nightly-2021-10-05 clippy complaints
Browse files Browse the repository at this point in the history
  • Loading branch information
mvines committed Oct 6, 2021
1 parent eb4ce3d commit 7027d56
Show file tree
Hide file tree
Showing 53 changed files with 229 additions and 293 deletions.
36 changes: 9 additions & 27 deletions bucket_map/src/bucket_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,9 +121,7 @@ impl BucketStorage {
}

pub fn uid(&self, ix: u64) -> Uid {
if ix >= self.capacity() {
panic!("bad index size");
}
assert!(ix < self.capacity(), "bad index size");
let ix = (ix * self.cell_size) as usize;
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
unsafe {
Expand All @@ -133,12 +131,8 @@ impl BucketStorage {
}

pub fn allocate(&self, ix: u64, uid: Uid) -> Result<(), BucketStorageError> {
if ix >= self.capacity() {
panic!("allocate: bad index size");
}
if UID_UNLOCKED == uid {
panic!("allocate: bad uid");
}
assert!(ix < self.capacity(), "allocate: bad index size");
assert!(UID_UNLOCKED != uid, "allocate: bad uid");
let mut e = Err(BucketStorageError::AlreadyAllocated);
let ix = (ix * self.cell_size) as usize;
//debug!("ALLOC {} {}", ix, uid);
Expand All @@ -154,12 +148,8 @@ impl BucketStorage {
}

pub fn free(&self, ix: u64, uid: Uid) {
if ix >= self.capacity() {
panic!("free: bad index size");
}
if UID_UNLOCKED == uid {
panic!("free: bad uid");
}
assert!(ix < self.capacity(), "bad index size");
assert!(UID_UNLOCKED != uid, "free: bad uid");
let ix = (ix * self.cell_size) as usize;
//debug!("FREE {} {}", ix, uid);
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
Expand All @@ -177,9 +167,7 @@ impl BucketStorage {
}

pub fn get<T: Sized>(&self, ix: u64) -> &T {
if ix >= self.capacity() {
panic!("bad index size");
}
assert!(ix < self.capacity(), "bad index size");
let start = (ix * self.cell_size) as usize + std::mem::size_of::<Header>();
let end = start + std::mem::size_of::<T>();
let item_slice: &[u8] = &self.mmap[start..end];
Expand All @@ -199,9 +187,7 @@ impl BucketStorage {
}

pub fn get_cell_slice<T: Sized>(&self, ix: u64, len: u64) -> &[T] {
if ix >= self.capacity() {
panic!("bad index size");
}
assert!(ix < self.capacity(), "bad index size");
let ix = self.cell_size * ix;
let start = ix as usize + std::mem::size_of::<Header>();
let end = start + std::mem::size_of::<T>() * len as usize;
Expand All @@ -215,9 +201,7 @@ impl BucketStorage {

#[allow(clippy::mut_from_ref)]
pub fn get_mut<T: Sized>(&self, ix: u64) -> &mut T {
if ix >= self.capacity() {
panic!("bad index size");
}
assert!(ix < self.capacity(), "bad index size");
let start = (ix * self.cell_size) as usize + std::mem::size_of::<Header>();
let end = start + std::mem::size_of::<T>();
let item_slice: &[u8] = &self.mmap[start..end];
Expand All @@ -229,9 +213,7 @@ impl BucketStorage {

#[allow(clippy::mut_from_ref)]
pub fn get_mut_cell_slice<T: Sized>(&self, ix: u64, len: u64) -> &mut [T] {
if ix >= self.capacity() {
panic!("bad index size");
}
assert!(ix < self.capacity(), "bad index size");
let ix = self.cell_size * ix;
let start = ix as usize + std::mem::size_of::<Header>();
let end = start + std::mem::size_of::<T>() * len as usize;
Expand Down
10 changes: 1 addition & 9 deletions clap-utils/src/keypair.rs
Original file line number Diff line number Diff line change
Expand Up @@ -324,19 +324,11 @@ pub fn presigner_from_pubkey_sigs(
})
}

#[derive(Debug)]
#[derive(Debug, Default)]
pub struct SignerFromPathConfig {
pub allow_null_signer: bool,
}

impl Default for SignerFromPathConfig {
fn default() -> Self {
Self {
allow_null_signer: false,
}
}
}

pub fn signer_from_path(
matches: &ArgMatches,
path: &str,
Expand Down
6 changes: 3 additions & 3 deletions cli/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error

pub fn parse_args<'a>(
matches: &ArgMatches<'_>,
mut wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<(CliConfig<'a>, CliSigners), Box<dyn error::Error>> {
let config = if let Some(config_file) = matches.value_of("config_file") {
Config::load(config_file).unwrap_or_default()
Expand Down Expand Up @@ -186,11 +186,11 @@ pub fn parse_args<'a>(
let CliCommandInfo {
command,
mut signers,
} = parse_command(matches, &default_signer, &mut wallet_manager)?;
} = parse_command(matches, &default_signer, wallet_manager)?;

if signers.is_empty() {
if let Ok(signer_info) =
default_signer.generate_unique_signers(vec![None], matches, &mut wallet_manager)
default_signer.generate_unique_signers(vec![None], matches, wallet_manager)
{
signers.extend(signer_info.signers);
}
Expand Down
1 change: 1 addition & 0 deletions cli/src/nonce.rs
Original file line number Diff line number Diff line change
Expand Up @@ -510,6 +510,7 @@ pub fn process_get_nonce(
config: &CliConfig,
nonce_account_pubkey: &Pubkey,
) -> ProcessResult {
#[allow(clippy::redundant_closure)]
match get_account_with_commitment(rpc_client, nonce_account_pubkey, config.commitment)
.and_then(|ref a| state_from_account(a))?
{
Expand Down
1 change: 1 addition & 0 deletions cli/tests/nonce.rs
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,7 @@ fn full_battery_tests(
}

#[test]
#[allow(clippy::redundant_closure)]
fn test_create_account_with_seed() {
solana_logger::setup();
let mint_keypair = Keypair::new();
Expand Down
1 change: 1 addition & 0 deletions cli/tests/stake.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
#![allow(clippy::redundant_closure)]
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
spend_utils::SpendAmount,
Expand Down
1 change: 1 addition & 0 deletions cli/tests/transfer.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
#![allow(clippy::redundant_closure)]
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
spend_utils::SpendAmount,
Expand Down
3 changes: 3 additions & 0 deletions client/src/blockhash_query.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ impl Source {
Ok((res.0, res.1))
}
Self::NonceAccount(ref pubkey) => {
#[allow(clippy::redundant_closure)]
let data = nonce_utils::get_account_with_commitment(rpc_client, pubkey, commitment)
.and_then(|ref a| nonce_utils::data_from_account(a))?;
Ok((data.blockhash, data.fee_calculator))
Expand Down Expand Up @@ -80,6 +81,7 @@ impl Source {
Ok(blockhash)
}
Self::NonceAccount(ref pubkey) => {
#[allow(clippy::redundant_closure)]
let data = nonce_utils::get_account_with_commitment(rpc_client, pubkey, commitment)
.and_then(|ref a| nonce_utils::data_from_account(a))?;
Ok(data.blockhash)
Expand All @@ -96,6 +98,7 @@ impl Source {
Ok(match self {
Self::Cluster => rpc_client.is_blockhash_valid(blockhash, commitment)?,
Self::NonceAccount(ref pubkey) => {
#[allow(clippy::redundant_closure)]
let _ = nonce_utils::get_account_with_commitment(rpc_client, pubkey, commitment)
.and_then(|ref a| nonce_utils::data_from_account(a))?;
true
Expand Down
9 changes: 0 additions & 9 deletions core/src/broadcast_stage/broadcast_duplicates_run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ use {
itertools::Itertools,
solana_entry::entry::Entry,
solana_ledger::shred::Shredder,
solana_runtime::blockhash_queue::BlockhashQueue,
solana_sdk::{
hash::Hash,
signature::{Keypair, Signature, Signer},
Expand All @@ -26,11 +25,6 @@ pub struct BroadcastDuplicatesConfig {
#[derive(Clone)]
pub(super) struct BroadcastDuplicatesRun {
config: BroadcastDuplicatesConfig,
// Local queue for broadcast to track which duplicate blockhashes we've sent
duplicate_queue: BlockhashQueue,
// Buffer for duplicate entries
duplicate_entries_buffer: Vec<Entry>,
last_duplicate_entry_hash: Hash,
current_slot: Slot,
next_shred_index: u32,
shred_version: u16,
Expand All @@ -50,10 +44,7 @@ impl BroadcastDuplicatesRun {
));
Self {
config,
duplicate_queue: BlockhashQueue::default(),
duplicate_entries_buffer: vec![],
next_shred_index: u32::MAX,
last_duplicate_entry_hash: Hash::default(),
shred_version,
current_slot: 0,
recent_blockhash: None,
Expand Down
2 changes: 0 additions & 2 deletions core/src/broadcast_stage/standard_broadcast_run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ pub struct StandardBroadcastRun {
last_datapoint_submit: Arc<AtomicInterval>,
num_batches: usize,
cluster_nodes_cache: Arc<ClusterNodesCache<BroadcastStage>>,
last_peer_update: Arc<AtomicInterval>,
}

impl StandardBroadcastRun {
Expand All @@ -52,7 +51,6 @@ impl StandardBroadcastRun {
last_datapoint_submit: Arc::default(),
num_batches: 0,
cluster_nodes_cache,
last_peer_update: Arc::new(AtomicInterval::default()),
}
}

Expand Down
2 changes: 1 addition & 1 deletion core/src/consensus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1313,7 +1313,7 @@ pub mod test {
}
VoteState::serialize(
&VoteStateVersions::new_current(vote_state),
&mut account.data_as_mut_slice(),
account.data_as_mut_slice(),
)
.expect("serialize state");
(
Expand Down
8 changes: 4 additions & 4 deletions core/src/replay_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3535,7 +3535,7 @@ pub mod tests {
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let bank1 = bank_forks.read().unwrap().get(1).cloned().unwrap();
let mut bank1_progress = progress
let bank1_progress = progress
.entry(bank1.slot())
.or_insert_with(|| ForkProgress::new(bank1.last_blockhash(), None, None, 0, 0));
let shreds = shred_to_insert(
Expand All @@ -3548,7 +3548,7 @@ pub mod tests {
let res = ReplayStage::replay_blockstore_into_bank(
&bank1,
&blockstore,
&mut bank1_progress,
bank1_progress,
None,
&replay_vote_sender,
&VerifyRecyclers::default(),
Expand Down Expand Up @@ -3923,7 +3923,7 @@ pub mod tests {
.values()
.cloned()
.collect();
let mut heaviest_subtree_fork_choice = &mut vote_simulator.heaviest_subtree_fork_choice;
let heaviest_subtree_fork_choice = &mut vote_simulator.heaviest_subtree_fork_choice;
let mut latest_validator_votes_for_frozen_banks =
LatestValidatorVotesForFrozenBanks::default();
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
Expand All @@ -3938,7 +3938,7 @@ pub mod tests {
&VoteTracker::default(),
&ClusterSlots::default(),
&vote_simulator.bank_forks,
&mut heaviest_subtree_fork_choice,
heaviest_subtree_fork_choice,
&mut latest_validator_votes_for_frozen_banks,
);

Expand Down
4 changes: 2 additions & 2 deletions core/src/shred_fetch_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,9 +95,9 @@ impl ShredFetchStage {
}
}
stats.shred_count += p.packets.len();
p.packets.iter_mut().for_each(|mut packet| {
p.packets.iter_mut().for_each(|packet| {
Self::process_packet(
&mut packet,
packet,
&mut shreds_received,
&mut stats,
last_root,
Expand Down
31 changes: 14 additions & 17 deletions core/tests/ledger_cleanup.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,25 +29,24 @@ mod tests {

#[derive(Debug)]
struct BenchmarkConfig {
pub benchmark_slots: u64,
pub batch_size: u64,
pub max_ledger_shreds: u64,
pub entries_per_slot: u64,
pub stop_size_bytes: u64,
pub stop_size_iterations: u64,
pub pre_generate_data: bool,
pub cleanup_blockstore: bool,
pub emit_cpu_info: bool,
pub assert_compaction: bool,
pub compaction_interval: Option<u64>,
pub no_compaction: bool,
benchmark_slots: u64,
batch_size: u64,
max_ledger_shreds: u64,
entries_per_slot: u64,
stop_size_bytes: u64,
stop_size_iterations: u64,
pre_generate_data: bool,
cleanup_blockstore: bool,
assert_compaction: bool,
compaction_interval: Option<u64>,
no_compaction: bool,
}

#[derive(Clone, Copy, Debug)]
struct CpuStatsInner {
pub cpu_user: f32,
pub cpu_system: f32,
pub cpu_idle: f32,
cpu_user: f32,
cpu_system: f32,
cpu_idle: f32,
}

impl From<CPULoad> for CpuStatsInner {
Expand Down Expand Up @@ -153,7 +152,6 @@ mod tests {
let stop_size_iterations = read_env("STOP_SIZE_ITERATIONS", DEFAULT_STOP_SIZE_ITERATIONS);
let pre_generate_data = read_env("PRE_GENERATE_DATA", false);
let cleanup_blockstore = read_env("CLEANUP_BLOCKSTORE", true);
let emit_cpu_info = read_env("EMIT_CPU_INFO", true);
// set default to `true` once compaction is merged
let assert_compaction = read_env("ASSERT_COMPACTION", false);
let compaction_interval = match read_env("COMPACTION_INTERVAL", 0) {
Expand All @@ -171,7 +169,6 @@ mod tests {
stop_size_iterations,
pre_generate_data,
cleanup_blockstore,
emit_cpu_info,
assert_compaction,
compaction_interval,
no_compaction,
Expand Down
4 changes: 1 addition & 3 deletions entry/src/entry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -573,9 +573,7 @@ impl EntrySlice for [Entry] {
1,
);
}
if res != 0 {
panic!("GPU PoH verify many failed");
}
assert!(res == 0, "GPU PoH verify many failed");
inc_new_counter_info!(
"entry_verify-gpu_thread",
timing::duration_as_us(&gpu_wait.elapsed()) as usize
Expand Down
20 changes: 9 additions & 11 deletions faucet/src/faucet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,16 +84,6 @@ pub enum FaucetRequest {
},
}

impl Default for FaucetRequest {
fn default() -> Self {
Self::GetAirdrop {
lamports: u64::default(),
to: Pubkey::default(),
blockhash: Hash::default(),
}
}
}

pub enum FaucetTransaction {
Airdrop(Transaction),
Memo((Transaction, String)),
Expand Down Expand Up @@ -416,7 +406,15 @@ async fn process(
mut stream: TokioTcpStream,
faucet: Arc<Mutex<Faucet>>,
) -> Result<(), Box<dyn std::error::Error>> {
let mut request = vec![0u8; serialized_size(&FaucetRequest::default()).unwrap() as usize];
let mut request = vec![
0u8;
serialized_size(&FaucetRequest::GetAirdrop {
lamports: u64::default(),
to: Pubkey::default(),
blockhash: Hash::default(),
})
.unwrap() as usize
];
while stream.read_exact(&mut request).await.is_ok() {
trace!("{:?}", request);

Expand Down
4 changes: 1 addition & 3 deletions frozen-abi/src/abi_digester.rs
Original file line number Diff line number Diff line change
Expand Up @@ -194,9 +194,7 @@ impl AbiDigester {
label: &'static str,
variant: &'static str,
) -> Result<(), DigestError> {
if !self.for_enum {
panic!("derive AbiEnumVisitor or implement it for the enum, which contains a variant ({}) named {}", label, variant);
}
assert!(self.for_enum, "derive AbiEnumVisitor or implement it for the enum, which contains a variant ({}) named {}", label, variant);
Ok(())
}

Expand Down
Loading

0 comments on commit 7027d56

Please sign in to comment.