From 459c9699b8fd9614145a1ebfdde2d462dc93d3c6 Mon Sep 17 00:00:00 2001 From: Michael <68944931+michaelh-laine@users.noreply.github.com> Date: Wed, 5 Oct 2022 20:55:45 +0200 Subject: [PATCH 01/65] Resolve PagerDuty incident on All Clear instead of triggering new incident (#28232) --- Cargo.lock | 1 + notifier/Cargo.toml | 1 + notifier/src/lib.rs | 48 ++++++++++++++++++++++++++++-------------- watchtower/src/main.rs | 14 ++++++------ 4 files changed, 42 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0f366de2938a43..fa31b33977a2dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5668,6 +5668,7 @@ dependencies = [ "log", "reqwest", "serde_json", + "solana-sdk 1.15.0", ] [[package]] diff --git a/notifier/Cargo.toml b/notifier/Cargo.toml index 02c2ae28a4226e..a4245badcf09f8 100644 --- a/notifier/Cargo.toml +++ b/notifier/Cargo.toml @@ -13,6 +13,7 @@ edition = "2021" log = "0.4.17" reqwest = { version = "0.11.12", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } serde_json = "1.0" +solana-sdk = { path = "../sdk", version = "=1.15.0" } [lib] name = "solana_notifier" diff --git a/notifier/src/lib.rs b/notifier/src/lib.rs index 594f5938d87e2d..87c01bfebf29eb 100644 --- a/notifier/src/lib.rs +++ b/notifier/src/lib.rs @@ -27,6 +27,7 @@ use log::*; use { reqwest::{blocking::Client, StatusCode}, serde_json::json, + solana_sdk::hash::Hash, std::{env, str::FromStr, thread::sleep, time::Duration}, }; @@ -83,7 +84,7 @@ fn get_twilio_config() -> Result, String> { Ok(Some(config)) } -enum NotificationType { +enum NotificationChannel { Discord(String), Slack(String), PagerDuty(String), @@ -92,9 +93,15 @@ enum NotificationType { Log(Level), } +#[derive(Clone)] +pub enum NotificationType { + Trigger { incident: Hash }, + Resolve { incident: Hash }, +} + pub struct Notifier { client: Client, - notifiers: Vec, + notifiers: Vec, } impl Notifier { @@ -108,32 +115,32 @@ impl Notifier { let mut notifiers = vec![]; if let Ok(webhook) = env::var(format!("{}DISCORD_WEBHOOK", env_prefix)) { - notifiers.push(NotificationType::Discord(webhook)); + notifiers.push(NotificationChannel::Discord(webhook)); } if let Ok(webhook) = env::var(format!("{}SLACK_WEBHOOK", env_prefix)) { - notifiers.push(NotificationType::Slack(webhook)); + notifiers.push(NotificationChannel::Slack(webhook)); } if let Ok(routing_key) = env::var(format!("{}PAGERDUTY_INTEGRATION_KEY", env_prefix)) { - notifiers.push(NotificationType::PagerDuty(routing_key)); + notifiers.push(NotificationChannel::PagerDuty(routing_key)); } if let (Ok(bot_token), Ok(chat_id)) = ( env::var(format!("{}TELEGRAM_BOT_TOKEN", env_prefix)), env::var(format!("{}TELEGRAM_CHAT_ID", env_prefix)), ) { - notifiers.push(NotificationType::Telegram(TelegramWebHook { + notifiers.push(NotificationChannel::Telegram(TelegramWebHook { bot_token, chat_id, })); } if let Ok(Some(webhook)) = get_twilio_config() { - notifiers.push(NotificationType::Twilio(webhook)); + notifiers.push(NotificationChannel::Twilio(webhook)); } if let Ok(log_level) = env::var(format!("{}LOG_NOTIFIER_LEVEL", env_prefix)) { match Level::from_str(&log_level) { - Ok(level) => notifiers.push(NotificationType::Log(level)), + Ok(level) => notifiers.push(NotificationChannel::Log(level)), Err(e) => warn!( "could not parse specified log notifier level string ({}): {}", log_level, e @@ -153,10 +160,10 @@ impl Notifier { self.notifiers.is_empty() } - pub fn send(&self, msg: &str) { + pub fn send(&self, msg: &str, notification_type: &NotificationType) { for notifier in &self.notifiers { match notifier { - NotificationType::Discord(webhook) => { + NotificationChannel::Discord(webhook) => { for line in msg.split('\n') { // Discord rate limiting is aggressive, limit to 1 message a second sleep(Duration::from_millis(1000)); @@ -183,14 +190,23 @@ impl Notifier { } } } - NotificationType::Slack(webhook) => { + NotificationChannel::Slack(webhook) => { let data = json!({ "text": msg }); if let Err(err) = self.client.post(webhook).json(&data).send() { warn!("Failed to send Slack message: {:?}", err); } } - NotificationType::PagerDuty(routing_key) => { - let data = json!({"payload":{"summary":msg,"source":"solana-watchtower","severity":"critical"},"routing_key":routing_key,"event_action":"trigger"}); + NotificationChannel::PagerDuty(routing_key) => { + let event_action = match notification_type { + NotificationType::Trigger { incident: _ } => String::from("trigger"), + NotificationType::Resolve { incident: _ } => String::from("resolve"), + }; + let dedup_key = match notification_type { + NotificationType::Trigger { ref incident } => incident.clone().to_string(), + NotificationType::Resolve { ref incident } => incident.clone().to_string(), + }; + + let data = json!({"payload":{"summary":msg,"source":"solana-watchtower","severity":"critical"},"routing_key":routing_key,"event_action":event_action,"dedup_key":dedup_key}); let url = "https://events.pagerduty.com/v2/enqueue"; if let Err(err) = self.client.post(url).json(&data).send() { @@ -198,7 +214,7 @@ impl Notifier { } } - NotificationType::Telegram(TelegramWebHook { chat_id, bot_token }) => { + NotificationChannel::Telegram(TelegramWebHook { chat_id, bot_token }) => { let data = json!({ "chat_id": chat_id, "text": msg }); let url = format!("https://api.telegram.org/bot{}/sendMessage", bot_token); @@ -207,7 +223,7 @@ impl Notifier { } } - NotificationType::Twilio(TwilioWebHook { + NotificationChannel::Twilio(TwilioWebHook { account, token, to, @@ -222,7 +238,7 @@ impl Notifier { warn!("Failed to send Twilio message: {:?}", err); } } - NotificationType::Log(level) => { + NotificationChannel::Log(level) => { log!(*level, "{}", msg) } } diff --git a/watchtower/src/main.rs b/watchtower/src/main.rs index 59f6c02615ffc2..0ee297925b224c 100644 --- a/watchtower/src/main.rs +++ b/watchtower/src/main.rs @@ -10,7 +10,7 @@ use { }, solana_cli_output::display::format_labeled_address, solana_metrics::{datapoint_error, datapoint_info}, - solana_notifier::Notifier, + solana_notifier::{NotificationType, Notifier}, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::{client_error, response::RpcVoteAccountStatus}, solana_sdk::{ @@ -244,6 +244,7 @@ fn main() -> Result<(), Box> { let mut last_notification_msg = "".into(); let mut num_consecutive_failures = 0; let mut last_success = Instant::now(); + let mut incident = Hash::new_unique(); loop { let failure = match get_cluster_info(&config, &rpc_client) { @@ -373,7 +374,7 @@ fn main() -> Result<(), Box> { if num_consecutive_failures > config.unhealthy_threshold { datapoint_info!("watchtower-sanity", ("ok", false, bool)); if last_notification_msg != notification_msg { - notifier.send(¬ification_msg); + notifier.send(¬ification_msg, &NotificationType::Trigger { incident }); } datapoint_error!( "watchtower-sanity-failure", @@ -399,14 +400,15 @@ fn main() -> Result<(), Box> { humantime::format_duration(alarm_duration) ); info!("{}", all_clear_msg); - notifier.send(&format!( - "solana-watchtower{}: {}", - config.name_suffix, all_clear_msg - )); + notifier.send( + &format!("solana-watchtower{}: {}", config.name_suffix, all_clear_msg), + &NotificationType::Resolve { incident }, + ); } last_notification_msg = "".into(); last_success = Instant::now(); num_consecutive_failures = 0; + incident = Hash::new_unique(); } sleep(config.interval); } From 3f5eec32cd9a7440be590ef4e7059d709753ac41 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 5 Oct 2022 12:56:29 -0700 Subject: [PATCH 02/65] refactor SplitAncientStorages::new for testing (#28235) --- runtime/src/accounts_db.rs | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index b407687690bb69..5b2fc47e6a9dde 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -1357,15 +1357,31 @@ impl SplitAncientStorages { // 2. first unevenly divided chunk starting at 1 epoch old slot (may be empty) // 3. evenly divided full chunks in the middle // 4. unevenly divided chunk of most recent slots (may be empty) - let max_slot_inclusive = snapshot_storages.max_slot_inclusive(); let range = snapshot_storages.range(); let ancient_slots = snapshot_storages .iter_range(&(range.start..one_epoch_old_slot)) .filter_map(|(slot, storages)| storages.map(|_| slot)) .collect::>(); - let ancient_slot_count = ancient_slots.len(); let first_non_ancient_slot = std::cmp::max(range.start, one_epoch_old_slot); + Self::new_with_ancient_info(range, ancient_slots, first_non_ancient_slot) + } + + /// create once ancient slots have been identified + /// This is easier to test, removing SortedStorges as a type to deal with here. + fn new_with_ancient_info( + range: &Range, + ancient_slots: Vec, + first_non_ancient_slot: Slot, + ) -> Self { + if range.is_empty() { + // Corner case mainly for tests, but gives us a consistent base case. Makes more sense to return default here than anything else. + // caller is asking to split for empty set of slots + return SplitAncientStorages::default(); + } + + let max_slot_inclusive = range.end.saturating_sub(1); + let ancient_slot_count = ancient_slots.len(); let first_chunk_start = ((first_non_ancient_slot + MAX_ITEMS_PER_CHUNK) / MAX_ITEMS_PER_CHUNK) * MAX_ITEMS_PER_CHUNK; From 63c00e7f5e20f8393044c864362f0f88105adb67 Mon Sep 17 00:00:00 2001 From: kirill lykov Date: Wed, 5 Oct 2022 22:20:26 +0200 Subject: [PATCH 03/65] move readable inside the loop of packets (#27916) * Add recv_mmsg_exact function * update tests * address PR comments --- streamer/src/nonblocking/recvmmsg.rs | 30 ++++++++++++++++++++++------ streamer/src/nonblocking/sendmmsg.rs | 12 +++++------ 2 files changed, 30 insertions(+), 12 deletions(-) diff --git a/streamer/src/nonblocking/recvmmsg.rs b/streamer/src/nonblocking/recvmmsg.rs index df2b08ff2c761c..794b9d72e0127e 100644 --- a/streamer/src/nonblocking/recvmmsg.rs +++ b/streamer/src/nonblocking/recvmmsg.rs @@ -9,6 +9,8 @@ use { tokio::net::UdpSocket, }; +/// Pulls some packets from the socket into the specified container +/// returning how many packets were read pub async fn recv_mmsg( socket: &UdpSocket, packets: &mut [Packet], @@ -36,6 +38,21 @@ pub async fn recv_mmsg( Ok(i) } +/// Reads the exact number of packets required to fill `packets` +pub async fn recv_mmsg_exact( + socket: &UdpSocket, + packets: &mut [Packet], +) -> io::Result { + let total = packets.len(); + let mut remaining = total; + while remaining != 0 { + let first = total - remaining; + let res = recv_mmsg(socket, &mut packets[first..]).await?; + remaining -= res; + } + Ok(packets.len()) +} + #[cfg(test)] mod tests { use { @@ -63,8 +80,8 @@ mod tests { sender.send_to(&data[..], &addr).await.unwrap(); } - let mut packets = vec![Packet::default(); TEST_NUM_MSGS]; - let recv = recv_mmsg(&reader, &mut packets[..]).await.unwrap(); + let mut packets = vec![Packet::default(); sent]; + let recv = recv_mmsg_exact(&reader, &mut packets[..]).await.unwrap(); assert_eq!(sent, recv); for packet in packets.iter().take(recv) { assert_eq!(packet.meta.size, PACKET_DATA_SIZE); @@ -90,17 +107,18 @@ mod tests { } let mut packets = vec![Packet::default(); TEST_NUM_MSGS]; - let recv = recv_mmsg(&reader, &mut packets[..]).await.unwrap(); + let recv = recv_mmsg_exact(&reader, &mut packets[..]).await.unwrap(); assert_eq!(TEST_NUM_MSGS, recv); for packet in packets.iter().take(recv) { assert_eq!(packet.meta.size, PACKET_DATA_SIZE); assert_eq!(packet.meta.socket_addr(), saddr); } + let mut packets = vec![Packet::default(); sent - TEST_NUM_MSGS]; packets .iter_mut() .for_each(|pkt| pkt.meta = Meta::default()); - let recv = recv_mmsg(&reader, &mut packets[..]).await.unwrap(); + let recv = recv_mmsg_exact(&reader, &mut packets[..]).await.unwrap(); assert_eq!(sent - TEST_NUM_MSGS, recv); for packet in packets.iter().take(recv) { assert_eq!(packet.meta.size, PACKET_DATA_SIZE); @@ -119,7 +137,7 @@ mod tests { } #[tokio::test] - async fn test_recv_mmsg_multi_iter_timeout() { + async fn test_recv_mmsg_exact_multi_iter_timeout() { let reader = UdpSocket::bind("127.0.0.1:0").await.expect("bind"); let addr = reader.local_addr().unwrap(); let sender = UdpSocket::bind("127.0.0.1:0").await.expect("bind"); @@ -132,7 +150,7 @@ mod tests { let start = Instant::now(); let mut packets = vec![Packet::default(); TEST_NUM_MSGS]; - let recv = recv_mmsg(&reader, &mut packets[..]).await.unwrap(); + let recv = recv_mmsg_exact(&reader, &mut packets[..]).await.unwrap(); assert_eq!(TEST_NUM_MSGS, recv); for packet in packets.iter().take(recv) { assert_eq!(packet.meta.size, PACKET_DATA_SIZE); diff --git a/streamer/src/nonblocking/sendmmsg.rs b/streamer/src/nonblocking/sendmmsg.rs index 8721937e25ff3b..3c8d6083002ce3 100644 --- a/streamer/src/nonblocking/sendmmsg.rs +++ b/streamer/src/nonblocking/sendmmsg.rs @@ -54,7 +54,7 @@ mod tests { use { crate::{ nonblocking::{ - recvmmsg::recv_mmsg, + recvmmsg::{recv_mmsg, recv_mmsg_exact}, sendmmsg::{batch_send, multi_target_send}, }, packet::Packet, @@ -81,7 +81,7 @@ mod tests { assert_eq!(sent, Some(())); let mut packets = vec![Packet::default(); 32]; - let recv = recv_mmsg(&reader, &mut packets[..]).await.unwrap(); + let recv = recv_mmsg_exact(&reader, &mut packets[..]).await.unwrap(); assert_eq!(32, recv); } @@ -111,12 +111,12 @@ mod tests { let sent = batch_send(&sender, &packet_refs[..]).await.ok(); assert_eq!(sent, Some(())); - let mut packets = vec![Packet::default(); 32]; - let recv = recv_mmsg(&reader, &mut packets[..]).await.unwrap(); + let mut packets = vec![Packet::default(); 16]; + let recv = recv_mmsg_exact(&reader, &mut packets[..]).await.unwrap(); assert_eq!(16, recv); - let mut packets = vec![Packet::default(); 32]; - let recv = recv_mmsg(&reader2, &mut packets[..]).await.unwrap(); + let mut packets = vec![Packet::default(); 16]; + let recv = recv_mmsg_exact(&reader2, &mut packets[..]).await.unwrap(); assert_eq!(16, recv); } From a8c6a9e5fcccd47487a9fa0d6a1d3a8bf019f7ab Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Wed, 5 Oct 2022 17:44:35 -0400 Subject: [PATCH 04/65] Bank::freeze() waits for EAH calculation to complete (#28170) --- Cargo.lock | 1 + core/src/accounts_hash_verifier.rs | 14 +- core/tests/snapshots.rs | 65 +++++-- ledger/src/blockstore_processor.rs | 43 ++++- local-cluster/src/local_cluster.rs | 6 - local-cluster/tests/local_cluster.rs | 6 +- program-test/Cargo.toml | 1 + program-test/src/lib.rs | 29 ++- programs/bpf/Cargo.lock | 1 + runtime/src/accounts_db.rs | 6 +- runtime/src/bank.rs | 62 ++++--- runtime/src/bank_forks.rs | 54 ++++-- runtime/src/epoch_accounts_hash.rs | 5 +- runtime/src/epoch_accounts_hash/manager.rs | 199 +++++++++++++++++++++ runtime/src/serde_snapshot.rs | 8 +- runtime/src/serde_snapshot/tests.rs | 14 +- 16 files changed, 424 insertions(+), 90 deletions(-) create mode 100644 runtime/src/epoch_accounts_hash/manager.rs diff --git a/Cargo.lock b/Cargo.lock index fa31b33977a2dd..fde4a19427c5ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5870,6 +5870,7 @@ dependencies = [ "base64 0.13.0", "bincode", "chrono-humanize", + "crossbeam-channel", "log", "serde", "solana-banks-client", diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index bbe8ba06c4b3b3..c594fa90386edb 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -234,20 +234,16 @@ impl AccountsHashVerifier { fn save_epoch_accounts_hash(accounts_package: &AccountsPackage, accounts_hash: Hash) { if accounts_package.package_type == AccountsPackageType::EpochAccountsHash { - debug!( + info!( "saving epoch accounts hash, slot: {}, hash: {}", accounts_package.slot, accounts_hash ); - let new_epoch_accounts_hash = EpochAccountsHash::new(accounts_hash); - let old_epoch_accounts_hash = accounts_package + let epoch_accounts_hash = EpochAccountsHash::new(accounts_hash); + accounts_package .accounts .accounts_db - .epoch_accounts_hash - .lock() - .unwrap() - .replace(new_epoch_accounts_hash); - // Old epoch accounts hash must be NONE, because a previous bank must have taken it to hash into itself - assert!(old_epoch_accounts_hash.is_none()); + .epoch_accounts_hash_manager + .set_valid(epoch_accounts_hash, accounts_package.slot); } } diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index f351a077ea03f5..21b169a84bd213 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -211,13 +211,31 @@ fn run_bank_forks_snapshot_n( let bank_forks = &mut snapshot_test_config.bank_forks; let mint_keypair = &snapshot_test_config.genesis_config_info.mint_keypair; + let pending_accounts_package = PendingAccountsPackage::default(); + let exit = Arc::new(AtomicBool::new(false)); + let node_id = Arc::new(Keypair::new()); + let cluster_info = Arc::new(ClusterInfo::new( + ContactInfo::new_localhost(&node_id.pubkey(), timestamp()), + Arc::clone(&node_id), + SocketAddrSpace::Unspecified, + )); + let accounts_hash_verifier = AccountsHashVerifier::new( + Arc::clone(&pending_accounts_package), + None, + &exit, + &cluster_info, + None, + false, + 0, + Some(snapshot_test_config.snapshot_config.clone()), + ); + let (snapshot_request_sender, snapshot_request_receiver) = unbounded(); let request_sender = AbsRequestSender::new(snapshot_request_sender); - let pending_accounts_package = PendingAccountsPackage::default(); let snapshot_request_handler = SnapshotRequestHandler { snapshot_config: snapshot_test_config.snapshot_config.clone(), snapshot_request_receiver, - pending_accounts_package: pending_accounts_package.clone(), + pending_accounts_package, }; for slot in 1..=last_slot { let mut bank = Bank::new_from_parent(&bank_forks[slot - 1], &Pubkey::default(), slot); @@ -231,13 +249,6 @@ fn run_bank_forks_snapshot_n( bank_forks.set_root(bank.slot(), &request_sender, None); bank.update_accounts_hash(); snapshot_request_handler.handle_snapshot_requests(false, false, 0, &mut None); - - // Clear out any pending accounts package. Since `set_root()` can trigger an Epoch - // Accounts Hash request, we must ensure that there is not already a pending EAH - // accounts package, otherwise ABS will panic when trying to submit a second EAH - // accounts package. The most straight forward way is to clear the pending accounts - // package every time. - pending_accounts_package.lock().unwrap().take(); } } @@ -283,6 +294,9 @@ fn run_bank_forks_snapshot_n( let account_paths = &[snapshot_test_config.accounts_dir.path().to_path_buf()]; let genesis_config = &snapshot_test_config.genesis_config_info.genesis_config; restore_from_snapshot(bank_forks, last_slot, genesis_config, account_paths); + + exit.store(true, Ordering::Relaxed); + accounts_hash_verifier.join().unwrap(); } #[test_case(V1_2_0, Development)] @@ -591,6 +605,16 @@ fn test_slots_to_snapshot(snapshot_version: SnapshotVersion, cluster_type: Clust snapshot_test_config .bank_forks .set_root(current_bank.slot(), &request_sender, None); + + // Since the accounts background services are not runnning, EpochAccountsHash + // calculation requests will not be handled. To prevent banks from hanging during + // Bank::freeze() due to waiting for EAH to complete, just set the EAH to Invalid. + current_bank + .rc + .accounts + .accounts_db + .epoch_accounts_hash_manager + .set_invalid_for_tests(); } let num_old_slots = num_set_roots * *add_root_interval - MAX_CACHE_ENTRIES + 1; @@ -685,12 +709,31 @@ fn test_bank_forks_incremental_snapshot( let bank_forks = &mut snapshot_test_config.bank_forks; let mint_keypair = &snapshot_test_config.genesis_config_info.mint_keypair; + let pending_accounts_package = PendingAccountsPackage::default(); + let exit = Arc::new(AtomicBool::new(false)); + let node_id = Arc::new(Keypair::new()); + let cluster_info = Arc::new(ClusterInfo::new( + ContactInfo::new_localhost(&node_id.pubkey(), timestamp()), + Arc::clone(&node_id), + SocketAddrSpace::Unspecified, + )); + let accounts_hash_verifier = AccountsHashVerifier::new( + Arc::clone(&pending_accounts_package), + None, + &exit, + &cluster_info, + None, + false, + 0, + Some(snapshot_test_config.snapshot_config.clone()), + ); + let (snapshot_request_sender, snapshot_request_receiver) = unbounded(); let request_sender = AbsRequestSender::new(snapshot_request_sender); let snapshot_request_handler = SnapshotRequestHandler { snapshot_config: snapshot_test_config.snapshot_config.clone(), snapshot_request_receiver, - pending_accounts_package: PendingAccountsPackage::default(), + pending_accounts_package, }; let mut last_full_snapshot_slot = None; @@ -761,6 +804,8 @@ fn test_bank_forks_incremental_snapshot( .unwrap(); } } + exit.store(true, Ordering::Relaxed); + accounts_hash_verifier.join().unwrap(); } fn make_full_snapshot_archive( diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 3a40a3df7a1d9a..7670dc8130f921 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -18,7 +18,7 @@ use { solana_program_runtime::timings::{ExecuteTimingType, ExecuteTimings, ThreadExecuteTimings}, solana_rayon_threadlimit::{get_max_thread_count, get_thread_count}, solana_runtime::{ - accounts_background_service::AbsRequestSender, + accounts_background_service::{AbsRequestSender, SnapshotRequestType}, accounts_db::{AccountShrinkThreshold, AccountsDbConfig}, accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, @@ -56,7 +56,10 @@ use { collections::{HashMap, HashSet}, path::PathBuf, result, - sync::{atomic::AtomicBool, Arc, Mutex, RwLock}, + sync::{ + atomic::{AtomicBool, Ordering::Relaxed}, + Arc, Mutex, RwLock, + }, time::{Duration, Instant}, }, thiserror::Error, @@ -727,6 +730,35 @@ pub fn test_process_blockstore( opts: &ProcessOptions, exit: &Arc, ) -> (Arc>, LeaderScheduleCache) { + // Spin up a thread to be a fake Accounts Background Service. Need to intercept and handle + // (i.e. skip/make invalid) all EpochAccountsHash requests so future rooted banks do not hang + // in Bank::freeze() waiting for an in-flight EAH calculation to complete. + let (snapshot_request_sender, snapshot_request_receiver) = crossbeam_channel::unbounded(); + let abs_request_sender = AbsRequestSender::new(snapshot_request_sender); + let bg_exit = Arc::new(AtomicBool::new(false)); + let bg_thread = { + let exit = Arc::clone(&bg_exit); + std::thread::spawn(move || { + while !exit.load(Relaxed) { + snapshot_request_receiver + .try_iter() + .filter(|snapshot_request| { + snapshot_request.request_type == SnapshotRequestType::EpochAccountsHash + }) + .for_each(|snapshot_request| { + snapshot_request + .snapshot_root_bank + .rc + .accounts + .accounts_db + .epoch_accounts_hash_manager + .set_invalid_for_tests(); + }); + std::thread::sleep(Duration::from_millis(100)); + } + }) + }; + let (bank_forks, leader_schedule_cache, ..) = crate::bank_forks_utils::load_bank_forks( genesis_config, blockstore, @@ -738,6 +770,7 @@ pub fn test_process_blockstore( None, exit, ); + process_blockstore_from_root( blockstore, &bank_forks, @@ -745,9 +778,13 @@ pub fn test_process_blockstore( opts, None, None, - &AbsRequestSender::default(), + &abs_request_sender, ) .unwrap(); + + bg_exit.store(true, Relaxed); + bg_thread.join().unwrap(); + (bank_forks, leader_schedule_cache) } diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index 14fd48f29bbca8..6d0deaded73f73 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -27,7 +27,6 @@ use { clock::{DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT}, commitment_config::CommitmentConfig, epoch_schedule::EpochSchedule, - feature_set, genesis_config::{ClusterType, GenesisConfig}, message::Message, poh_config::PohConfig, @@ -258,11 +257,6 @@ impl LocalCluster { ), ); - // Do not enable Epoch Accounts Hash in local-cluster tests yet - genesis_config - .accounts - .remove(&feature_set::epoch_accounts_hash::id()); - let (leader_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); let leader_contact_info = leader_node.info.clone(); let mut leader_config = safe_clone_config(&config.validator_configs[0]); diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 3d1df45ffd1bf2..0670a231a01672 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -723,7 +723,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st // If these intervals change, also make sure to change the loop timers accordingly. let accounts_hash_interval = 3; let incremental_snapshot_interval = accounts_hash_interval * 3; - let full_snapshot_interval = incremental_snapshot_interval * 3; + let full_snapshot_interval = incremental_snapshot_interval * 5; let num_account_paths = 3; let leader_snapshot_test_config = SnapshotValidatorConfig::new( @@ -1302,7 +1302,7 @@ fn test_snapshot_restart_tower() { fn test_snapshots_blockstore_floor() { solana_logger::setup_with_default(RUST_LOG_FILTER); // First set up the cluster with 1 snapshotting leader - let snapshot_interval_slots = 10; + let snapshot_interval_slots = 100; let num_account_paths = 4; let leader_snapshot_test_config = @@ -1408,7 +1408,7 @@ fn test_snapshots_blockstore_floor() { #[serial] fn test_snapshots_restart_validity() { solana_logger::setup_with_default(RUST_LOG_FILTER); - let snapshot_interval_slots = 10; + let snapshot_interval_slots = 100; let num_account_paths = 1; let mut snapshot_test_config = setup_snapshot_validator_config(snapshot_interval_slots, num_account_paths); diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index 27b8592df2564f..51e9c8c16e50c2 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -13,6 +13,7 @@ async-trait = "0.1.57" base64 = "0.13.0" bincode = "1.3.3" chrono-humanize = "0.2.1" +crossbeam-channel = "0.5" log = "0.4.17" serde = "1.0.144" solana-banks-client = { path = "../banks-client", version = "=1.15.0" } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 55f37a988ba043..6ecf346421d220 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -15,6 +15,7 @@ use { stable_log, timings::ExecuteTimings, }, solana_runtime::{ + accounts_background_service::{AbsRequestSender, SnapshotRequestType}, bank::Bank, bank_forks::BankForks, builtins::Builtin, @@ -1129,11 +1130,29 @@ impl ProgramTestContext { pre_warp_slot, )) }; - bank_forks.set_root( - pre_warp_slot, - &solana_runtime::accounts_background_service::AbsRequestSender::default(), - Some(pre_warp_slot), - ); + + let (snapshot_request_sender, snapshot_request_receiver) = crossbeam_channel::unbounded(); + let abs_request_sender = AbsRequestSender::new(snapshot_request_sender); + + bank_forks.set_root(pre_warp_slot, &abs_request_sender, Some(pre_warp_slot)); + + // The call to `set_root()` above will send an EAH request. Need to intercept and handle + // (i.e. skip/make invalid) all EpochAccountsHash requests so future rooted banks do not + // hang in Bank::freeze() waiting for an in-flight EAH calculation to complete. + snapshot_request_receiver + .try_iter() + .filter(|snapshot_request| { + snapshot_request.request_type == SnapshotRequestType::EpochAccountsHash + }) + .for_each(|snapshot_request| { + snapshot_request + .snapshot_root_bank + .rc + .accounts + .accounts_db + .epoch_accounts_hash_manager + .set_invalid_for_tests(); + }); // warp_bank is frozen so go forward to get unfrozen bank at warp_slot bank_forks.insert(Bank::new_from_parent( diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 5f8c6f383e94e2..bb945eb9256d78 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -5264,6 +5264,7 @@ dependencies = [ "base64 0.13.0", "bincode", "chrono-humanize", + "crossbeam-channel", "log", "serde", "solana-banks-client", diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 5b2fc47e6a9dde..2e4dbb2f6f5ab1 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -45,7 +45,7 @@ use { bank::Rewrites, cache_hash_data::CacheHashData, contains::Contains, - epoch_accounts_hash::EpochAccountsHash, + epoch_accounts_hash::EpochAccountsHashManager, expected_rent_collection::{ExpectedRentCollection, SlotInfoInEpoch}, pubkey_bins::PubkeyBinCalculator24, read_only_accounts_cache::ReadOnlyAccountsCache, @@ -1198,7 +1198,7 @@ pub struct AccountsDb { /// The cadence is once per epoch, all nodes calculate a full accounts hash as of a known slot calculated using 'N' /// Some time later (to allow for slow calculation time), the bank hash at a slot calculated using 'M' includes the full accounts hash. /// Thus, the state of all accounts on a validator is known to be correct at least once per epoch. - pub epoch_accounts_hash: Mutex>, + pub epoch_accounts_hash_manager: EpochAccountsHashManager, } #[derive(Debug, Default)] @@ -2087,7 +2087,7 @@ impl AccountsDb { num_hash_scan_passes, log_dead_slots: AtomicBool::new(true), exhaustively_verify_refcounts: false, - epoch_accounts_hash: Mutex::new(None), + epoch_accounts_hash_manager: EpochAccountsHashManager::new_invalid(), } } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index ece485d0973819..4b894ccb225b71 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -6869,28 +6869,11 @@ impl Bank { self.last_blockhash().as_ref(), ]); - let epoch_accounts_hash = self.epoch_accounts_hash(); - let should_include_epoch_accounts_hash = self.should_include_epoch_accounts_hash(); - if should_include_epoch_accounts_hash { - // Nothing is writing a value into the epoch accounts hash yet—this is not a problem - // for normal clusters, as the feature gating this `if` block is always false. - // However, some tests enable all features, so this `if` block can be true. - // - // For now, check to see if the epoch accounts hash is `Some` before hashing. Once the - // writer-side is implemented, change this to be an `.expect()` or `.unwrap()`, as it - // will be required for the epoch accounts hash calculation to have completed and - // for this value to be `Some`. - if let Some(epoch_accounts_hash) = epoch_accounts_hash { - debug!( - "including epoch accounts hash, slot: {}, hash: {:?}", - self.slot(), - epoch_accounts_hash - ); - hash = hashv(&[hash.as_ref(), epoch_accounts_hash.as_ref().as_ref()]); - } else { - warn!("bank {}: epoch_accounts_hash was None but should have been included in this bank's hash!", self.slot()); - } - } + let epoch_accounts_hash = self.should_include_epoch_accounts_hash().then(|| { + let epoch_accounts_hash = self.wait_get_epoch_accounts_hash(); + hash = hashv(&[hash.as_ref(), epoch_accounts_hash.as_ref().as_ref()]); + epoch_accounts_hash + }); let buf = self .hard_forks @@ -6917,8 +6900,8 @@ impl Bank { self.signature_count(), self.last_blockhash(), self.capitalization(), - if should_include_epoch_accounts_hash { - format!(", epoch_accounts_hash: {:?}", epoch_accounts_hash) + if let Some(epoch_accounts_hash) = epoch_accounts_hash { + format!(", epoch_accounts_hash: {:?}", epoch_accounts_hash.as_ref()) } else { "".to_string() } @@ -6946,6 +6929,29 @@ impl Bank { self.parent_slot() < stop_slot && self.slot() >= stop_slot } + /// If the epoch accounts hash should be included in this Bank, then fetch it. If the EAH + /// calculation has not completed yet, this fn will block until it does complete. + fn wait_get_epoch_accounts_hash(&self) -> EpochAccountsHash { + let (epoch_accounts_hash, measure) = measure!(self + .rc + .accounts + .accounts_db + .epoch_accounts_hash_manager + .wait_get_epoch_accounts_hash()); + + datapoint_info!( + "bank-wait_get_epoch_accounts_hash", + ("slot", self.slot() as i64, i64), + ( + "epoch_accounts_hash", + epoch_accounts_hash.as_ref().to_string(), + String + ), + ("waiting-time-us", measure.as_us() as i64, i64), + ); + epoch_accounts_hash + } + /// Recalculate the hash_internal_state from the account stores. Would be used to verify a /// snapshot. /// return true if all is good @@ -7927,13 +7933,11 @@ impl Bank { /// Convenience fn to get the Epoch Accounts Hash pub fn epoch_accounts_hash(&self) -> Option { - *self - .rc + self.rc .accounts .accounts_db - .epoch_accounts_hash - .lock() - .unwrap() + .epoch_accounts_hash_manager + .try_get_epoch_accounts_hash() } } diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index a9533fa0b5e6b0..15cdfe4918537d 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -296,6 +296,13 @@ impl BankForks { "sending epoch accounts hash request, slot: {}", eah_bank.slot() ); + eah_bank + .rc + .accounts + .accounts_db + .epoch_accounts_hash_manager + .set_in_flight(eah_bank.slot()); + self.last_accounts_hash_slot = eah_bank.slot(); let squash_timing = eah_bank.squash(); total_squash_accounts_ms += squash_timing.squash_accounts_ms as i64; @@ -305,16 +312,6 @@ impl BankForks { total_squash_cache_ms += squash_timing.squash_cache_ms as i64; is_root_bank_squashed = eah_bank.slot() == root; - // Clear any existing EAH before requesting a new one - _ = eah_bank - .rc - .accounts - .accounts_db - .epoch_accounts_hash - .lock() - .unwrap() - .take(); - accounts_background_request_sender .send_snapshot_request(SnapshotRequest { snapshot_root_bank: Arc::clone(eah_bank), @@ -637,6 +634,7 @@ mod tests { signature::{Keypair, Signer}, }, solana_vote_program::vote_state::BlockTimestamp, + std::{sync::atomic::Ordering::Relaxed, time::Duration}, }; #[test] @@ -734,9 +732,38 @@ mod tests { let slots_in_epoch = 32; genesis_config.epoch_schedule = EpochSchedule::new(slots_in_epoch); + // Spin up a thread to be a fake Accounts Background Service. Need to intercept and handle + // (i.e. skip/make invalid) all EpochAccountsHash requests so future rooted banks do not hang + // in Bank::freeze() waiting for an in-flight EAH calculation to complete. + let (snapshot_request_sender, snapshot_request_receiver) = crossbeam_channel::unbounded(); + let abs_request_sender = AbsRequestSender::new(snapshot_request_sender); + let bg_exit = Arc::new(AtomicBool::new(false)); + let bg_thread = { + let exit = Arc::clone(&bg_exit); + std::thread::spawn(move || { + while !exit.load(Relaxed) { + snapshot_request_receiver + .try_iter() + .filter(|snapshot_request| { + snapshot_request.request_type == SnapshotRequestType::EpochAccountsHash + }) + .for_each(|snapshot_request| { + snapshot_request + .snapshot_root_bank + .rc + .accounts + .accounts_db + .epoch_accounts_hash_manager + .set_invalid_for_tests(); + }); + std::thread::sleep(Duration::from_millis(100)); + } + }) + }; + let bank0 = Bank::new_for_tests(&genesis_config); let mut bank_forks0 = BankForks::new(bank0); - bank_forks0.set_root(0, &AbsRequestSender::default(), None); + bank_forks0.set_root(0, &abs_request_sender, None); let bank1 = Bank::new_for_tests(&genesis_config); let mut bank_forks1 = BankForks::new(bank1); @@ -768,7 +795,7 @@ mod tests { // Set root in bank_forks0 to truncate the ancestor history bank_forks0.insert(child1); - bank_forks0.set_root(slot, &AbsRequestSender::default(), None); + bank_forks0.set_root(slot, &abs_request_sender, None); // Don't set root in bank_forks1 to keep the ancestor history bank_forks1.insert(child2); @@ -782,6 +809,9 @@ mod tests { info!("child0.ancestors: {:?}", child1.ancestors); info!("child1.ancestors: {:?}", child2.ancestors); assert_eq!(child1.hash(), child2.hash()); + + bg_exit.store(true, Relaxed); + bg_thread.join().unwrap(); } fn make_hash_map(data: Vec<(Slot, Vec)>) -> HashMap> { diff --git a/runtime/src/epoch_accounts_hash.rs b/runtime/src/epoch_accounts_hash.rs index 9b8689e153467f..010d8a1640fdb0 100644 --- a/runtime/src/epoch_accounts_hash.rs +++ b/runtime/src/epoch_accounts_hash.rs @@ -16,6 +16,9 @@ use { }, }; +mod manager; +pub use manager::Manager as EpochAccountsHashManager; + /// The EpochAccountsHash holds the result after calculating the accounts hash once per epoch #[derive(Debug, Serialize, Deserialize, Hash, PartialEq, Eq, Clone, Copy)] pub struct EpochAccountsHash(Hash); @@ -29,7 +32,7 @@ impl AsRef for EpochAccountsHash { impl EpochAccountsHash { /// Make an EpochAccountsHash from a regular accounts hash #[must_use] - pub fn new(accounts_hash: Hash) -> Self { + pub const fn new(accounts_hash: Hash) -> Self { Self(accounts_hash) } } diff --git a/runtime/src/epoch_accounts_hash/manager.rs b/runtime/src/epoch_accounts_hash/manager.rs new file mode 100644 index 00000000000000..0cfa34f92a95e5 --- /dev/null +++ b/runtime/src/epoch_accounts_hash/manager.rs @@ -0,0 +1,199 @@ +use { + super::EpochAccountsHash, + solana_sdk::{clock::Slot, hash::Hash}, + std::sync::{Condvar, Mutex}, +}; + +/// Manage the epoch accounts hash +/// +/// Handles setting when an EAH calculation is requested and when it completes. Also handles +/// waiting for in-flight calculations to complete when the "stop" Bank must include it. +#[derive(Debug)] +pub struct Manager { + /// Current state of the epoch accounts hash + state: Mutex, + /// This condition variable is used to wait for an in-flight EAH calculation to complete + cvar: Condvar, +} + +impl Manager { + #[must_use] + fn _new(state: State) -> Self { + Self { + state: Mutex::new(state), + cvar: Condvar::new(), + } + } + + /// Create a new epoch accounts hash manager, with the initial state set to Invalid + #[must_use] + pub fn new_invalid() -> Self { + Self::_new(State::Invalid) + } + + /// Create a new epoch accounts hash manager, with the initial state set to Valid + #[must_use] + pub fn new_valid(epoch_accounts_hash: EpochAccountsHash, slot: Slot) -> Self { + Self::_new(State::Valid(epoch_accounts_hash, slot)) + } + + /// An epoch accounts hash calculation has been requested; update our state + pub fn set_in_flight(&self, slot: Slot) { + let mut state = self.state.lock().unwrap(); + if let State::InFlight(old_slot) = &*state { + panic!("An epoch accounts hash calculation is already in-flight from slot {old_slot}!"); + } + *state = State::InFlight(slot); + } + + /// An epoch accounts hash calculation has completed; update our state + pub fn set_valid(&self, epoch_accounts_hash: EpochAccountsHash, slot: Slot) { + let mut state = self.state.lock().unwrap(); + if let State::Valid(old_epoch_accounts_hash, old_slot) = &*state { + panic!( + "The epoch accounts hash is already valid! \ + \nold slot: {old_slot}, epoch accounts hash: {old_epoch_accounts_hash:?} \ + \nnew slot: {slot}, epoch accounts hash: {epoch_accounts_hash:?}" + ); + } + *state = State::Valid(epoch_accounts_hash, slot); + self.cvar.notify_all(); + } + + /// Get the epoch accounts hash + /// + /// If an EAH calculation is in-flight, then this call will block until it completes. + pub fn wait_get_epoch_accounts_hash(&self) -> EpochAccountsHash { + let mut state = self.state.lock().unwrap(); + loop { + match &*state { + State::Valid(epoch_accounts_hash, _slot) => break *epoch_accounts_hash, + State::Invalid => break SENTINEL_EPOCH_ACCOUNTS_HASH, + State::InFlight(_slot) => state = self.cvar.wait(state).unwrap(), + } + } + } + + /// Get the epoch accounts hash + /// + /// This fn does not block, and will only yield an EAH if the state is `Valid` + pub fn try_get_epoch_accounts_hash(&self) -> Option { + let state = self.state.lock().unwrap(); + match &*state { + State::Valid(epoch_accounts_hash, _slot) => Some(*epoch_accounts_hash), + _ => None, + } + } + + /// **FOR TESTS ONLY** + /// Set the state to Invalid + /// This is needed by tests that do not fully startup all the accounts background services. + /// **FOR TESTS ONLY** + pub fn set_invalid_for_tests(&self) { + *self.state.lock().unwrap() = State::Invalid; + } +} + +/// The EpochAccountsHash is calculated in the background via AccountsBackgroundService. This enum +/// is used to track the state of that calculation, and queried when saving the EAH into a Bank. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum State { + /// On startup from genesis/slot0, the initial state of the EAH is invalid since one has not + /// yet been requested. This state should only really occur for tests and new clusters; not + /// for established running clusters. + Invalid, + /// An EAH calculation has been requested (for `Slot`) and is in flight. The Bank that should + /// save the EAH must wait until the calculation has completed. + InFlight(Slot), + /// The EAH calculation is complete (for `Slot`) and the EAH value is valid to read/use. + Valid(EpochAccountsHash, Slot), +} + +/// Sentinel epoch accounts hash value; used when getting an Invalid EAH +/// +/// Displays as "Sentine1EpochAccountsHash111111111111111111" +const SENTINEL_EPOCH_ACCOUNTS_HASH: EpochAccountsHash = + EpochAccountsHash::new(Hash::new_from_array([ + 0x06, 0x92, 0x40, 0x3b, 0xee, 0xea, 0x7e, 0xe2, 0x7d, 0xf4, 0x90, 0x7f, 0xbd, 0x9e, 0xd0, + 0xd2, 0x1c, 0x2b, 0x66, 0x9a, 0xc4, 0xda, 0xce, 0xd7, 0x23, 0x41, 0x69, 0xab, 0xb7, 0x80, + 0x00, 0x00, + ])); + +#[cfg(test)] +mod tests { + use {super::*, std::time::Duration}; + + #[test] + fn test_new_valid() { + let epoch_accounts_hash = EpochAccountsHash::new(Hash::new_unique()); + let manager = Manager::new_valid(epoch_accounts_hash, 5678); + assert_eq!( + manager.try_get_epoch_accounts_hash(), + Some(epoch_accounts_hash), + ); + assert_eq!(manager.wait_get_epoch_accounts_hash(), epoch_accounts_hash); + } + + #[test] + fn test_new_invalid() { + let manager = Manager::new_invalid(); + assert!(manager.try_get_epoch_accounts_hash().is_none()); + assert_eq!( + manager.wait_get_epoch_accounts_hash(), + SENTINEL_EPOCH_ACCOUNTS_HASH, + ); + } + + #[test] + fn test_try_get_epoch_accounts_hash() { + let epoch_accounts_hash = EpochAccountsHash::new(Hash::new_unique()); + for (state, expected) in [ + (State::Invalid, None), + (State::InFlight(123), None), + ( + State::Valid(epoch_accounts_hash, 5678), + Some(epoch_accounts_hash), + ), + ] { + let manager = Manager::_new(state); + let actual = manager.try_get_epoch_accounts_hash(); + assert_eq!(actual, expected); + } + } + + #[test] + fn test_wait_epoch_accounts_hash() { + // Test: State is Invalid, no need to wait + { + let manager = Manager::new_invalid(); + assert_eq!( + manager.wait_get_epoch_accounts_hash(), + SENTINEL_EPOCH_ACCOUNTS_HASH, + ); + } + + // Test: State is Valid, no need to wait + { + let epoch_accounts_hash = EpochAccountsHash::new(Hash::new_unique()); + let manager = Manager::new_valid(epoch_accounts_hash, 5678); + assert_eq!(manager.wait_get_epoch_accounts_hash(), epoch_accounts_hash); + } + + // Test: State is InFlight, must wait + { + let epoch_accounts_hash = EpochAccountsHash::new(Hash::new_unique()); + let manager = Manager::new_invalid(); + manager.set_in_flight(123); + + std::thread::scope(|s| { + s.spawn(|| { + std::thread::sleep(Duration::from_secs(1)); + manager.set_valid(epoch_accounts_hash, 5678) + }); + assert!(manager.try_get_epoch_accounts_hash().is_none()); + assert_eq!(manager.wait_get_epoch_accounts_hash(), epoch_accounts_hash); + assert!(manager.try_get_epoch_accounts_hash().is_some()); + }); + } + } +} diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index c8459df9b9c7e7..fa3fee4a087115 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -688,8 +688,12 @@ where accounts_update_notifier, exit, ); - *accounts_db.epoch_accounts_hash.lock().unwrap() = - epoch_accounts_hash.map(EpochAccountsHash::new); + + if let Some(epoch_accounts_hash) = epoch_accounts_hash { + accounts_db + .epoch_accounts_hash_manager + .set_valid(EpochAccountsHash::new(epoch_accounts_hash), 0); + } let AccountsDbFields( _snapshot_storages, diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 4eee544aab1441..d9a875c94a7ed0 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -254,15 +254,15 @@ fn test_bank_serialize_style( if initial_epoch_accounts_hash { expected_epoch_accounts_hash = Some(Hash::new(&[7; 32])); - *bank2 + bank2 .rc .accounts .accounts_db - .epoch_accounts_hash - .lock() - .unwrap() = Some(EpochAccountsHash::new( - expected_epoch_accounts_hash.unwrap(), - )); + .epoch_accounts_hash_manager + .set_valid( + EpochAccountsHash::new(expected_epoch_accounts_hash.unwrap()), + 0, + ); } crate::serde_snapshot::bank_to_stream( @@ -416,7 +416,7 @@ fn test_bank_serialize_style( assert_eq!(dbank.get_accounts_hash(), accounts_hash); assert!(bank2 == dbank); assert_eq!(dbank.incremental_snapshot_persistence, incremental); - assert_eq!(dbank.rc.accounts.accounts_db.epoch_accounts_hash.lock().unwrap().map(|hash| *hash.as_ref()), expected_epoch_accounts_hash, + assert_eq!(dbank.rc.accounts.accounts_db.epoch_accounts_hash_manager.try_get_epoch_accounts_hash().map(|hash| *hash.as_ref()), expected_epoch_accounts_hash, "(reserialize_accounts_hash, incremental_snapshot_persistence, epoch_accounts_hash, update_accounts_hash, initial_epoch_accounts_hash): {:?}", ( reserialize_accounts_hash, From ad0742c7cf05bb171a2d94f56b2c48188cc198cc Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 5 Oct 2022 14:49:33 -0700 Subject: [PATCH 05/65] exit early deep in bank freeze, avoiding some unnecessary work (#28238) exit early on bank freeze --- runtime/src/accounts_db.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 2e4dbb2f6f5ab1..0ccae87e2a081b 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -7662,6 +7662,10 @@ impl AccountsDb { skipped_rewrites: &Rewrites, ) { let mut skipped_rewrites = skipped_rewrites.read().unwrap().clone(); + if skipped_rewrites.is_empty() { + // if there are no skipped rewrites, then there is nothing futher to do + return; + } hashes.iter().for_each(|(key, _)| { skipped_rewrites.remove(key); }); From 456d4ae93aff13fcea444562147133bccce3d9c8 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 5 Oct 2022 15:32:39 -0700 Subject: [PATCH 06/65] accounts hash calc, add get_ancient_slots (#28240) --- runtime/src/accounts_db.rs | 129 +++++++++++++++++++++++++++++++++++-- 1 file changed, 123 insertions(+), 6 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 0ccae87e2a081b..cef35a15369f1f 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -1357,16 +1357,36 @@ impl SplitAncientStorages { // 2. first unevenly divided chunk starting at 1 epoch old slot (may be empty) // 3. evenly divided full chunks in the middle // 4. unevenly divided chunk of most recent slots (may be empty) - let range = snapshot_storages.range(); - let ancient_slots = snapshot_storages - .iter_range(&(range.start..one_epoch_old_slot)) - .filter_map(|(slot, storages)| storages.map(|_| slot)) - .collect::>(); - let first_non_ancient_slot = std::cmp::max(range.start, one_epoch_old_slot); + let ancient_slots = Self::get_ancient_slots(one_epoch_old_slot, snapshot_storages); + + let first_non_ancient_slot = ancient_slots + .last() + .map(|last_ancient_slot| last_ancient_slot.saturating_add(1)) + .unwrap_or(range.start); Self::new_with_ancient_info(range, ancient_slots, first_non_ancient_slot) } + /// return all ancient append vec slots from the early slots referenced by 'snapshot_storages' + fn get_ancient_slots( + one_epoch_old_slot: Slot, + snapshot_storages: &SortedStorages, + ) -> Vec { + let range = snapshot_storages.range(); + let mut ancient_slots = Vec::default(); + for (slot, storages) in snapshot_storages.iter_range(&(range.start..one_epoch_old_slot)) { + if let Some(storages) = storages { + if storages.len() == 1 && is_ancient(&storages.first().unwrap().accounts) { + ancient_slots.push(slot); + continue; // was ancient, keep looking + } + // we found a slot with a non-ancient append vec + break; + } + } + ancient_slots + } + /// create once ancient slots have been identified /// This is easier to test, removing SortedStorges as a type to deal with here. fn new_with_ancient_info( @@ -16247,4 +16267,101 @@ pub mod tests { db.add_uncleaned_pubkeys_after_shrink(slot, vec![pubkey].into_iter()); assert_eq!(&*db.uncleaned_pubkeys.get(&slot).unwrap(), &vec![pubkey]); } + + #[test] + fn test_get_ancient_slots() { + // test permutations of ancient, non-ancient, ancient with sparse slot #s and not + for sparse in [false, true] { + let (slot1_ancient, slot2, slot3_ancient, slot1_plus_ancient) = if sparse { + (1, 10, 20, 5) + } else { + // we only test with 2 ancient append vecs when sparse + (1, 2, 3, 4 /* irrelevant */) + }; + + let db = AccountsDb::new_single_for_tests(); + let ancient = db.create_ancient_append_vec(slot1_ancient).0.unwrap().1; + let ancient_1_plus = db + .create_ancient_append_vec(slot1_plus_ancient) + .0 + .unwrap() + .1; + let ancient3 = db.create_ancient_append_vec(slot3_ancient).0.unwrap().1; + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir.path(); + let id = 1; + let size = 1; + let non_ancient_storage = Arc::new(AccountStorageEntry::new(path, slot2, id, size)); + let raw_storages = vec![vec![non_ancient_storage.clone()]]; + let snapshot_storages = SortedStorages::new(&raw_storages); + // test without an ancient append vec + let one_epoch_old_slot = 0; + let ancient_slots = + SplitAncientStorages::get_ancient_slots(one_epoch_old_slot, &snapshot_storages); + assert_eq!(Vec::::default(), ancient_slots); + let one_epoch_old_slot = 3; + let ancient_slots = + SplitAncientStorages::get_ancient_slots(one_epoch_old_slot, &snapshot_storages); + assert_eq!(Vec::::default(), ancient_slots); + + // now test with an ancient append vec + let raw_storages = vec![vec![ancient.clone()]]; + let snapshot_storages = SortedStorages::new(&raw_storages); + let one_epoch_old_slot = 0; + let ancient_slots = + SplitAncientStorages::get_ancient_slots(one_epoch_old_slot, &snapshot_storages); + assert_eq!(Vec::::default(), ancient_slots); + let one_epoch_old_slot = slot2 + 1; + let ancient_slots = + SplitAncientStorages::get_ancient_slots(one_epoch_old_slot, &snapshot_storages); + assert_eq!(vec![slot1_ancient], ancient_slots); + + // now test with an ancient append vec and then a non-ancient append vec + let raw_storages = vec![vec![ancient.clone()], vec![non_ancient_storage.clone()]]; + let snapshot_storages = SortedStorages::new(&raw_storages); + let one_epoch_old_slot = 0; + let ancient_slots = + SplitAncientStorages::get_ancient_slots(one_epoch_old_slot, &snapshot_storages); + assert_eq!(Vec::::default(), ancient_slots); + let one_epoch_old_slot = slot2 + 1; + let ancient_slots = + SplitAncientStorages::get_ancient_slots(one_epoch_old_slot, &snapshot_storages); + assert_eq!(vec![slot1_ancient], ancient_slots); + + // ancient, non-ancient, ancient + let raw_storages = vec![ + vec![ancient.clone()], + vec![non_ancient_storage.clone()], + vec![ancient3.clone()], + ]; + let snapshot_storages = SortedStorages::new(&raw_storages); + let one_epoch_old_slot = 0; + let ancient_slots = + SplitAncientStorages::get_ancient_slots(one_epoch_old_slot, &snapshot_storages); + assert_eq!(Vec::::default(), ancient_slots); + let one_epoch_old_slot = slot3_ancient + 1; + let ancient_slots = + SplitAncientStorages::get_ancient_slots(one_epoch_old_slot, &snapshot_storages); + assert_eq!(vec![slot1_ancient], ancient_slots); + + if sparse { + // ancient, ancient, non-ancient, ancient + let raw_storages = vec![ + vec![ancient], + vec![ancient_1_plus], + vec![non_ancient_storage], + vec![ancient3], + ]; + let snapshot_storages = SortedStorages::new(&raw_storages); + let one_epoch_old_slot = 0; + let ancient_slots = + SplitAncientStorages::get_ancient_slots(one_epoch_old_slot, &snapshot_storages); + assert_eq!(Vec::::default(), ancient_slots); + let one_epoch_old_slot = slot3_ancient + 1; + let ancient_slots = + SplitAncientStorages::get_ancient_slots(one_epoch_old_slot, &snapshot_storages); + assert_eq!(vec![slot1_ancient, slot1_plus_ancient], ancient_slots); + } + } + } } From eec2a2a301bfe24c7d4b45ac8fa3c25c980bc465 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Wed, 5 Oct 2022 19:29:22 -0400 Subject: [PATCH 07/65] Move epoch accounts hash utility functions (#28219) --- runtime/src/epoch_accounts_hash.rs | 146 +---------------------- runtime/src/epoch_accounts_hash/utils.rs | 140 ++++++++++++++++++++++ 2 files changed, 144 insertions(+), 142 deletions(-) create mode 100644 runtime/src/epoch_accounts_hash/utils.rs diff --git a/runtime/src/epoch_accounts_hash.rs b/runtime/src/epoch_accounts_hash.rs index 010d8a1640fdb0..db6df74209bd2e 100644 --- a/runtime/src/epoch_accounts_hash.rs +++ b/runtime/src/epoch_accounts_hash.rs @@ -7,14 +7,10 @@ //! //! This results in all nodes effectively voting on the accounts state (at least) once per epoch. -use { - crate::bank::Bank, - serde::{Deserialize, Serialize}, - solana_sdk::{ - clock::{Epoch, Slot}, - hash::Hash, - }, -}; +use solana_sdk::hash::Hash; + +mod utils; +pub use utils::*; mod manager; pub use manager::Manager as EpochAccountsHashManager; @@ -36,137 +32,3 @@ impl EpochAccountsHash { Self(accounts_hash) } } - -/// Calculation of the EAH occurs once per epoch. All nodes in the cluster must agree on which -/// slot the EAH is based on. This slot will be at an offset into the epoch, and referred to as -/// the "start" slot for the EAH calculation. -#[must_use] -#[inline] -pub fn calculation_offset_start(bank: &Bank) -> Slot { - calculation_info(bank).calculation_offset_start -} - -/// Calculation of the EAH occurs once per epoch. All nodes in the cluster must agree on which -/// bank will hash the EAH into its `Bank::hash`. This slot will be at an offset into the epoch, -/// and referred to as the "stop" slot for the EAH calculation. All nodes must complete the EAH -/// calculation before this slot! -#[must_use] -#[inline] -pub fn calculation_offset_stop(bank: &Bank) -> Slot { - calculation_info(bank).calculation_offset_stop -} - -/// For the epoch that `bank` is in, get the slot that the EAH calculation starts -#[must_use] -#[inline] -pub fn calculation_start(bank: &Bank) -> Slot { - calculation_info(bank).calculation_start -} - -/// For the epoch that `bank` is in, get the slot that the EAH calculation stops -#[must_use] -#[inline] -pub fn calculation_stop(bank: &Bank) -> Slot { - calculation_info(bank).calculation_stop -} - -/// For the epoch that `bank` is in, get all the EAH calculation information -pub fn calculation_info(bank: &Bank) -> CalculationInfo { - let epoch = bank.epoch(); - let epoch_schedule = bank.epoch_schedule(); - - let slots_per_epoch = epoch_schedule.get_slots_in_epoch(epoch); - let calculation_offset_start = slots_per_epoch / 4; - let calculation_offset_stop = slots_per_epoch / 4 * 3; - - let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch); - let last_slot_in_epoch = epoch_schedule.get_last_slot_in_epoch(epoch); - let calculation_start = first_slot_in_epoch.saturating_add(calculation_offset_start); - let calculation_stop = first_slot_in_epoch.saturating_add(calculation_offset_stop); - - CalculationInfo { - epoch, - slots_per_epoch, - first_slot_in_epoch, - last_slot_in_epoch, - calculation_offset_start, - calculation_offset_stop, - calculation_start, - calculation_stop, - } -} - -/// All the EAH calculation information for a specific epoch -/// -/// Computing the EAH calculation information looks up a bunch of values. Instead of throwing -/// those values away, they are kept in here as well. This may aid in future debugging, and the -/// additional fields are trivial in size. -#[derive(Debug, Default, Copy, Clone)] -pub struct CalculationInfo { - /* - * The values that were looked up, which were needed to get the calculation info - */ - /// The epoch this information applies to - pub epoch: Epoch, - /// Number of slots in this epoch - pub slots_per_epoch: u64, - /// First slot in this epoch - pub first_slot_in_epoch: Slot, - /// Last slot in this epoch - pub last_slot_in_epoch: Slot, - - /* - * The computed values for the calculation info - */ - /// Offset into the epoch when the EAH calculation starts - pub calculation_offset_start: Slot, - /// Offset into the epoch when the EAH calculation stops - pub calculation_offset_stop: Slot, - /// Absolute slot where the EAH calculation starts - pub calculation_start: Slot, - /// Absolute slot where the EAH calculation stops - pub calculation_stop: Slot, -} - -#[cfg(test)] -mod tests { - use { - super::*, - solana_sdk::{epoch_schedule::EpochSchedule, genesis_config::GenesisConfig}, - }; - - #[test] - fn test_calculation_offset_bounds() { - let bank = Bank::default_for_tests(); - let offset_start = calculation_offset_start(&bank); - let offset_stop = calculation_offset_stop(&bank); - assert!(offset_start < offset_stop); - } - - #[test] - fn test_calculation_bounds() { - let bank = Bank::default_for_tests(); - let start = calculation_start(&bank); - let stop = calculation_stop(&bank); - assert!(start < stop); - } - - #[test] - fn test_calculation_info() { - for slots_per_epoch in [32, 100, 65_536, 432_000, 123_456_789] { - for warmup in [false, true] { - let genesis_config = GenesisConfig { - epoch_schedule: EpochSchedule::custom(slots_per_epoch, slots_per_epoch, warmup), - ..GenesisConfig::default() - }; - let info = calculation_info(&Bank::new_for_tests(&genesis_config)); - assert!(info.calculation_offset_start < info.calculation_offset_stop); - assert!(info.calculation_offset_start < info.slots_per_epoch); - assert!(info.calculation_offset_stop < info.slots_per_epoch); - assert!(info.calculation_start < info.calculation_stop,); - assert!(info.calculation_start > info.first_slot_in_epoch,); - assert!(info.calculation_stop < info.last_slot_in_epoch,); - } - } - } -} diff --git a/runtime/src/epoch_accounts_hash/utils.rs b/runtime/src/epoch_accounts_hash/utils.rs new file mode 100644 index 00000000000000..5de819e984badd --- /dev/null +++ b/runtime/src/epoch_accounts_hash/utils.rs @@ -0,0 +1,140 @@ +//! Utility functions and types for Epoch Accounts Hash + +use { + crate::bank::Bank, + solana_sdk::clock::{Epoch, Slot}, +}; + +/// Calculation of the EAH occurs once per epoch. All nodes in the cluster must agree on which +/// slot the EAH is based on. This slot will be at an offset into the epoch, and referred to as +/// the "start" slot for the EAH calculation. +#[must_use] +#[inline] +pub fn calculation_offset_start(bank: &Bank) -> Slot { + calculation_info(bank).calculation_offset_start +} + +/// Calculation of the EAH occurs once per epoch. All nodes in the cluster must agree on which +/// bank will hash the EAH into its `Bank::hash`. This slot will be at an offset into the epoch, +/// and referred to as the "stop" slot for the EAH calculation. All nodes must complete the EAH +/// calculation before this slot! +#[must_use] +#[inline] +pub fn calculation_offset_stop(bank: &Bank) -> Slot { + calculation_info(bank).calculation_offset_stop +} + +/// For the epoch that `bank` is in, get the slot that the EAH calculation starts +#[must_use] +#[inline] +pub fn calculation_start(bank: &Bank) -> Slot { + calculation_info(bank).calculation_start +} + +/// For the epoch that `bank` is in, get the slot that the EAH calculation stops +#[must_use] +#[inline] +pub fn calculation_stop(bank: &Bank) -> Slot { + calculation_info(bank).calculation_stop +} + +/// For the epoch that `bank` is in, get all the EAH calculation information +pub fn calculation_info(bank: &Bank) -> CalculationInfo { + let epoch = bank.epoch(); + let epoch_schedule = bank.epoch_schedule(); + + let slots_per_epoch = epoch_schedule.get_slots_in_epoch(epoch); + let calculation_offset_start = slots_per_epoch / 4; + let calculation_offset_stop = slots_per_epoch / 4 * 3; + + let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch); + let last_slot_in_epoch = epoch_schedule.get_last_slot_in_epoch(epoch); + let calculation_start = first_slot_in_epoch.saturating_add(calculation_offset_start); + let calculation_stop = first_slot_in_epoch.saturating_add(calculation_offset_stop); + + CalculationInfo { + epoch, + slots_per_epoch, + first_slot_in_epoch, + last_slot_in_epoch, + calculation_offset_start, + calculation_offset_stop, + calculation_start, + calculation_stop, + } +} + +/// All the EAH calculation information for a specific epoch +/// +/// Computing the EAH calculation information looks up a bunch of values. Instead of throwing +/// those values away, they are kept in here as well. This may aid in future debugging, and the +/// additional fields are trivial in size. +#[derive(Debug, Default, Copy, Clone)] +pub struct CalculationInfo { + /* + * The values that were looked up, which were needed to get the calculation info + */ + /// The epoch this information applies to + pub epoch: Epoch, + /// Number of slots in this epoch + pub slots_per_epoch: u64, + /// First slot in this epoch + pub first_slot_in_epoch: Slot, + /// Last slot in this epoch + pub last_slot_in_epoch: Slot, + + /* + * The computed values for the calculation info + */ + /// Offset into the epoch when the EAH calculation starts + pub calculation_offset_start: Slot, + /// Offset into the epoch when the EAH calculation stops + pub calculation_offset_stop: Slot, + /// Absolute slot where the EAH calculation starts + pub calculation_start: Slot, + /// Absolute slot where the EAH calculation stops + pub calculation_stop: Slot, +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_sdk::{epoch_schedule::EpochSchedule, genesis_config::GenesisConfig}, + }; + + #[test] + fn test_calculation_offset_bounds() { + let bank = Bank::default_for_tests(); + let offset_start = calculation_offset_start(&bank); + let offset_stop = calculation_offset_stop(&bank); + assert!(offset_start < offset_stop); + } + + #[test] + fn test_calculation_bounds() { + let bank = Bank::default_for_tests(); + let start = calculation_start(&bank); + let stop = calculation_stop(&bank); + assert!(start < stop); + } + + #[test] + fn test_calculation_info() { + for slots_per_epoch in [32, 100, 65_536, 432_000, 123_456_789] { + for warmup in [false, true] { + let genesis_config = GenesisConfig { + epoch_schedule: EpochSchedule::custom(slots_per_epoch, slots_per_epoch, warmup), + ..GenesisConfig::default() + }; + let info = calculation_info(&Bank::new_for_tests(&genesis_config)); + assert!(info.calculation_offset_start < info.calculation_offset_stop); + assert!(info.calculation_offset_start < info.slots_per_epoch); + assert!(info.calculation_offset_stop < info.slots_per_epoch); + assert!(info.calculation_start < info.calculation_stop,); + assert!(info.calculation_start > info.first_slot_in_epoch,); + assert!(info.calculation_stop < info.last_slot_in_epoch,); + } + } + } +} From 435d4aded9437ecd24f46b1de80ea66d21c47684 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 5 Oct 2022 19:42:06 -0700 Subject: [PATCH 08/65] run initial hash calc in background, using background threads (#28239) --- runtime/benches/accounts.rs | 1 + runtime/src/accounts.rs | 2 ++ runtime/src/accounts_db.rs | 19 +++++++++++++++---- runtime/src/bank.rs | 4 ++++ 4 files changed, 22 insertions(+), 4 deletions(-) diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index 5fcdd00bafa9d9..360d3f2cffb2d3 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -113,6 +113,7 @@ fn test_accounts_hash_bank_hash(bencher: &mut Bencher) { false, false, true, + false, )) }); } diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index c46df23aa02dd4..376631d85e6b56 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -834,6 +834,7 @@ impl Accounts { ignore_mismatch: bool, store_detailed_debug_info: bool, enable_rehashing: bool, + use_bg_thread_pool: bool, ) -> bool { if let Err(err) = self.accounts_db.verify_bank_hash_and_lamports_new( slot, @@ -845,6 +846,7 @@ impl Accounts { ignore_mismatch, store_detailed_debug_info, enable_rehashing, + use_bg_thread_pool, ) { warn!("verify_bank_hash failed: {:?}, slot: {}", err, slot); false diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index cef35a15369f1f..a4b5640dcd29fb 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -7513,6 +7513,7 @@ impl AccountsDb { epoch_schedule: &EpochSchedule, rent_collector: &RentCollector, enable_rehashing: bool, + use_bg_thread_pool: bool, ) -> Result<(), BankHashVerificationError> { self.verify_bank_hash_and_lamports_new( slot, @@ -7524,6 +7525,7 @@ impl AccountsDb { false, false, enable_rehashing, + use_bg_thread_pool, ) } @@ -7540,20 +7542,19 @@ impl AccountsDb { ignore_mismatch: bool, store_hash_raw_data_for_debug: bool, enable_rehashing: bool, + use_bg_thread_pool: bool, ) -> Result<(), BankHashVerificationError> { use BankHashVerificationError::*; let use_index = false; let check_hash = false; // this will not be supported anymore - // interesting to consider this - let is_startup = true; let (calculated_hash, calculated_lamports) = self .calculate_accounts_hash_helper_with_verify( use_index, test_hash_calculation, slot, CalcAccountsHashConfig { - use_bg_thread_pool: !is_startup, + use_bg_thread_pool, check_hash, ancestors: Some(ancestors), epoch_schedule, @@ -11843,6 +11844,7 @@ pub mod tests { &EpochSchedule::default(), &RentCollector::default(), true, + false, ) .unwrap(); } @@ -12247,6 +12249,7 @@ pub mod tests { &EpochSchedule::default(), &RentCollector::default(), true, + false, ), Ok(_) ); @@ -12261,6 +12264,7 @@ pub mod tests { &EpochSchedule::default(), &RentCollector::default(), true, + false, ), Err(MissingBankHash) ); @@ -12284,6 +12288,7 @@ pub mod tests { &EpochSchedule::default(), &RentCollector::default(), true, + false, ), Err(MismatchedBankHash) ); @@ -12313,6 +12318,7 @@ pub mod tests { &EpochSchedule::default(), &RentCollector::default(), true, + false, ), Ok(_) ); @@ -12335,12 +12341,13 @@ pub mod tests { &EpochSchedule::default(), &RentCollector::default(), true, + false, ), Ok(_) ); assert_matches!( - db.verify_bank_hash_and_lamports(some_slot, &ancestors, 10, true, &EpochSchedule::default(), &RentCollector::default(), true,), + db.verify_bank_hash_and_lamports(some_slot, &ancestors, 10, true, &EpochSchedule::default(), &RentCollector::default(), true, false,), Err(MismatchedTotalLamports(expected, actual)) if expected == 2 && actual == 10 ); } @@ -12368,6 +12375,7 @@ pub mod tests { &EpochSchedule::default(), &RentCollector::default(), true, + false, ), Ok(_) ); @@ -12412,6 +12420,7 @@ pub mod tests { &EpochSchedule::default(), &RentCollector::default(), true, + false, ), Err(MismatchedBankHash) ); @@ -13032,6 +13041,7 @@ pub mod tests { &EpochSchedule::default(), &RentCollector::default(), true, + false, ) .unwrap(); @@ -13045,6 +13055,7 @@ pub mod tests { &EpochSchedule::default(), &RentCollector::default(), true, + false, ) .unwrap(); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 4b894ccb225b71..b9cb13f9e87b5d 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -7010,6 +7010,8 @@ impl Bank { config.ignore_mismatch, config.store_hash_raw_data_for_debug, enable_rehashing, + // true to run using bg thread pool + true, ); accounts_ .accounts_db @@ -7031,6 +7033,8 @@ impl Bank { config.ignore_mismatch, config.store_hash_raw_data_for_debug, enable_rehashing, + // fg is waiting for this to run, so we can use the fg thread pool + false, ); self.set_initial_accounts_hash_verification_completed(); result From 40ebebe1402ccd826785d4e82d5acf837e7d64bb Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 6 Oct 2022 14:04:04 +0800 Subject: [PATCH 09/65] Explorer: Display block utc timestamp in account history (#28248) --- .../history/TransactionHistoryCard.tsx | 25 ++++++++++++++----- 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/explorer/src/components/account/history/TransactionHistoryCard.tsx b/explorer/src/components/account/history/TransactionHistoryCard.tsx index 068b0ee6201e84..2d49e3e29d92e4 100644 --- a/explorer/src/components/account/history/TransactionHistoryCard.tsx +++ b/explorer/src/components/account/history/TransactionHistoryCard.tsx @@ -15,6 +15,7 @@ import { import { FetchStatus } from "providers/cache"; import { LoadingCard } from "components/common/LoadingCard"; import { ErrorCard } from "components/common/ErrorCard"; +import { displayTimestampUtc } from "utils/date"; export function TransactionHistoryCard({ pubkey }: { pubkey: PublicKey }) { const address = pubkey.toBase58(); @@ -56,7 +57,7 @@ export function TransactionHistoryCard({ pubkey }: { pubkey: PublicKey }) { return ( - + @@ -64,9 +65,16 @@ export function TransactionHistoryCard({ pubkey }: { pubkey: PublicKey }) { {hasTimestamps && ( - - {blockTime ? : "---"} - + <> + + {blockTime ? : "---"} + + + {blockTime + ? displayTimestampUtc(blockTime * 1000, true) + : "---"} + + )} @@ -90,8 +98,13 @@ export function TransactionHistoryCard({ pubkey }: { pubkey: PublicKey }) { Transaction Signature - Slot - {hasTimestamps && Age} + Block + {hasTimestamps && ( + <> + Age + Timestamp + + )} Result From db37d6b2ec57e917e09977e7155c32bb3e697ebb Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 6 Oct 2022 17:12:56 +0800 Subject: [PATCH 10/65] RPC: Improve unsupported transaction error message (#28249) --- rpc-client-api/src/custom_error.rs | 7 ++++++- rpc/src/rpc.rs | 6 +++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/rpc-client-api/src/custom_error.rs b/rpc-client-api/src/custom_error.rs index 33bcf9b810595e..483402dd40a59e 100644 --- a/rpc-client-api/src/custom_error.rs +++ b/rpc-client-api/src/custom_error.rs @@ -195,7 +195,12 @@ impl From for Error { }, RpcCustomError::UnsupportedTransactionVersion(version) => Self { code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION), - message: format!("Transaction version ({}) is not supported", version), + message: format!( + "Transaction version ({0}) is not supported by the requesting client. \ + Please try the request again with the following configuration parameter: \ + \"maxSupportedTransactionVersion\": {0}", + version + ), data: None, }, RpcCustomError::MinContextSlotNotReached { context_slot } => Self { diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index f7c1a4bda006bb..ad79a697b58d2d 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -6753,7 +6753,11 @@ pub mod tests { let response = parse_failure_response(rpc.handle_request_sync(request)); let expected = ( JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION, - String::from("Transaction version (0) is not supported"), + String::from( + "Transaction version (0) is not supported by the requesting client. \ + Please try the request again with the following configuration parameter: \ + \"maxSupportedTransactionVersion\": 0", + ), ); assert_eq!(response, expected); } From c7c67f010b9984eabab5ad0b0285c1e5ecb30106 Mon Sep 17 00:00:00 2001 From: Kartik Soneji Date: Thu, 6 Oct 2022 16:08:48 +0530 Subject: [PATCH 11/65] docs: Better install command for Windows (#28138) Pass install command to `cmd` on Windows --- docs/src/cli/install-solana-cli-tools.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/cli/install-solana-cli-tools.md b/docs/src/cli/install-solana-cli-tools.md index 467ed94b6b6506..10b5023d3cd1d4 100644 --- a/docs/src/cli/install-solana-cli-tools.md +++ b/docs/src/cli/install-solana-cli-tools.md @@ -72,7 +72,7 @@ solana --version installer into a temporary directory: ```bash -curl https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/solana-install-init-x86_64-pc-windows-msvc.exe --output C:\solana-install-tmp\solana-install-init.exe --create-dirs +cmd /c "curl https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/solana-install-init-x86_64-pc-windows-msvc.exe --output C:\solana-install-tmp\solana-install-init.exe --create-dirs" ``` - Copy and paste the following command, then press Enter to install the latest From a6512016a77080380694155ec128f049fc0b8c01 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Thu, 6 Oct 2022 15:41:55 +0000 Subject: [PATCH 12/65] uses references for MerkleBranch root and proof fields (#28243) --- ledger/src/shred/merkle.rs | 180 +++++++++++++++++++------------------ 1 file changed, 94 insertions(+), 86 deletions(-) diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index c8cd2537704378..4b6cd792f79471 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -29,8 +29,7 @@ use { }, static_assertions::const_assert_eq, std::{ - io::{Cursor, Seek, SeekFrom, Write}, - iter::repeat_with, + io::{Cursor, Write}, ops::Range, time::Instant, }, @@ -58,7 +57,6 @@ type MerkleProofEntry = [u8; 20]; pub struct ShredData { common_header: ShredCommonHeader, data_header: DataShredHeader, - merkle_branch: MerkleBranch, payload: Vec, } @@ -69,7 +67,6 @@ pub struct ShredData { pub struct ShredCode { common_header: ShredCommonHeader, coding_header: CodingShredHeader, - merkle_branch: MerkleBranch, payload: Vec, } @@ -79,30 +76,23 @@ pub(super) enum Shred { ShredData(ShredData), } -#[derive(Clone, Debug, Eq, PartialEq)] -struct MerkleBranch { - root: MerkleRoot, - proof: Vec, +struct MerkleBranch<'a> { + root: &'a MerkleRoot, + proof: Vec<&'a MerkleProofEntry>, } impl Shred { dispatch!(fn common_header(&self) -> &ShredCommonHeader); dispatch!(fn erasure_shard_as_slice(&self) -> Result<&[u8], Error>); dispatch!(fn erasure_shard_index(&self) -> Result); + dispatch!(fn merkle_root(&self) -> Result<&MerkleRoot, Error>); dispatch!(fn merkle_tree_node(&self) -> Result); dispatch!(fn payload(&self) -> &Vec); dispatch!(fn sanitize(&self, verify_merkle_proof: bool) -> Result<(), Error>); - dispatch!(fn set_merkle_branch(&mut self, merkle_branch: MerkleBranch) -> Result<(), Error>); + dispatch!(fn set_merkle_branch(&mut self, merkle_branch: &MerkleBranch) -> Result<(), Error>); dispatch!(fn set_signature(&mut self, signature: Signature)); dispatch!(fn signed_message(&self) -> &[u8]); - fn merkle_root(&self) -> &MerkleRoot { - match self { - Self::ShredCode(shred) => &shred.merkle_branch.root, - Self::ShredData(shred) => &shred.merkle_branch.root, - } - } - #[must_use] fn verify(&self, pubkey: &Pubkey) -> bool { let message = self.signed_message(); @@ -163,6 +153,27 @@ impl ShredData { Some(offset..offset + SIZE_OF_MERKLE_ROOT) } + fn merkle_root(&self) -> Result<&MerkleRoot, Error> { + let proof_size = self.proof_size()?; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?; + let root = self + .payload + .get(offset..offset + SIZE_OF_MERKLE_ROOT) + .ok_or(Error::InvalidPayloadSize(self.payload.len()))?; + Ok(<&MerkleRoot>::try_from(root).unwrap()) + } + + fn merkle_branch(&self) -> Result { + let proof_size = self.proof_size()?; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?; + let size = SIZE_OF_MERKLE_ROOT + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY; + MerkleBranch::try_from( + self.payload + .get(offset..offset + size) + .ok_or(Error::InvalidPayloadSize(self.payload.len()))?, + ) + } + fn merkle_tree_node(&self) -> Result { let chunk = self.erasure_shard_as_slice()?; Ok(hashv(&[MERKLE_HASH_PREFIX_LEAF, chunk])) @@ -171,7 +182,7 @@ impl ShredData { fn verify_merkle_proof(&self) -> Result { let node = self.merkle_tree_node()?; let index = self.erasure_shard_index()?; - Ok(verify_merkle_proof(index, node, &self.merkle_branch)) + Ok(verify_merkle_proof(index, node, &self.merkle_branch()?)) } fn from_recovered_shard(signature: &Signature, mut shard: Vec) -> Result { @@ -196,12 +207,11 @@ impl ShredData { Ok(Self { common_header, data_header, - merkle_branch: MerkleBranch::new_zeroed(proof_size), payload: shard, }) } - fn set_merkle_branch(&mut self, merkle_branch: MerkleBranch) -> Result<(), Error> { + fn set_merkle_branch(&mut self, merkle_branch: &MerkleBranch) -> Result<(), Error> { let proof_size = self.proof_size()?; if merkle_branch.proof.len() != usize::from(proof_size) { return Err(Error::InvalidMerkleProof); @@ -216,14 +226,13 @@ impl ShredData { for entry in &merkle_branch.proof { bincode::serialize_into(&mut cursor, entry)?; } - self.merkle_branch = merkle_branch; Ok(()) } fn sanitize(&self, verify_merkle_proof: bool) -> Result<(), Error> { match self.common_header.shred_variant { ShredVariant::MerkleData(proof_size) => { - if self.merkle_branch.proof.len() != usize::from(proof_size) { + if self.merkle_branch()?.proof.len() != usize::from(proof_size) { return Err(Error::InvalidProofSize(proof_size)); } } @@ -260,6 +269,27 @@ impl ShredCode { .ok_or(Error::InvalidProofSize(proof_size)) } + fn merkle_root(&self) -> Result<&MerkleRoot, Error> { + let proof_size = self.proof_size()?; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?; + let root = self + .payload + .get(offset..offset + SIZE_OF_MERKLE_ROOT) + .ok_or(Error::InvalidPayloadSize(self.payload.len()))?; + Ok(<&MerkleRoot>::try_from(root).unwrap()) + } + + fn merkle_branch(&self) -> Result { + let proof_size = self.proof_size()?; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?; + let size = SIZE_OF_MERKLE_ROOT + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY; + MerkleBranch::try_from( + self.payload + .get(offset..offset + size) + .ok_or(Error::InvalidPayloadSize(self.payload.len()))?, + ) + } + fn merkle_tree_node(&self) -> Result { let proof_size = self.proof_size()?; let shard_size = Self::capacity(proof_size)?; @@ -273,7 +303,7 @@ impl ShredCode { fn verify_merkle_proof(&self) -> Result { let node = self.merkle_tree_node()?; let index = self.erasure_shard_index()?; - Ok(verify_merkle_proof(index, node, &self.merkle_branch)) + Ok(verify_merkle_proof(index, node, &self.merkle_branch()?)) } pub(super) fn get_signed_message_range(proof_size: u8) -> Option> { @@ -283,7 +313,7 @@ impl ShredCode { pub(super) fn erasure_mismatch(&self, other: &ShredCode) -> bool { shred_code::erasure_mismatch(self, other) - || self.merkle_branch.root != other.merkle_branch.root + || self.merkle_root().ok() != other.merkle_root().ok() || self.common_header.signature != other.common_header.signature } @@ -311,12 +341,11 @@ impl ShredCode { Ok(Self { common_header, coding_header, - merkle_branch: MerkleBranch::new_zeroed(proof_size), payload: shard, }) } - fn set_merkle_branch(&mut self, merkle_branch: MerkleBranch) -> Result<(), Error> { + fn set_merkle_branch(&mut self, merkle_branch: &MerkleBranch) -> Result<(), Error> { let proof_size = self.proof_size()?; if merkle_branch.proof.len() != usize::from(proof_size) { return Err(Error::InvalidMerkleProof); @@ -331,14 +360,13 @@ impl ShredCode { for entry in &merkle_branch.proof { bincode::serialize_into(&mut cursor, entry)?; } - self.merkle_branch = merkle_branch; Ok(()) } fn sanitize(&self, verify_merkle_proof: bool) -> Result<(), Error> { match self.common_header.shred_variant { ShredVariant::MerkleCode(proof_size) => { - if self.merkle_branch.proof.len() != usize::from(proof_size) { + if self.merkle_branch()?.proof.len() != usize::from(proof_size) { return Err(Error::InvalidProofSize(proof_size)); } } @@ -353,15 +381,6 @@ impl ShredCode { } } -impl MerkleBranch { - fn new_zeroed(proof_size: u8) -> Self { - Self { - root: MerkleRoot::default(), - proof: vec![MerkleProofEntry::default(); usize::from(proof_size)], - } - } -} - impl ShredTrait for ShredData { impl_shred_common!(); @@ -375,31 +394,20 @@ impl ShredTrait for ShredData { const SIZE_OF_HEADERS: usize = SIZE_OF_DATA_SHRED_HEADERS; fn from_payload(mut payload: Vec) -> Result { + // see: https://github.com/solana-labs/solana/pull/10109 if payload.len() < Self::SIZE_OF_PAYLOAD { return Err(Error::InvalidPayloadSize(payload.len())); } payload.truncate(Self::SIZE_OF_PAYLOAD); let mut cursor = Cursor::new(&payload[..]); let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; - let proof_size = match common_header.shred_variant { - ShredVariant::MerkleData(proof_size) => proof_size, - _ => return Err(Error::InvalidShredVariant), - }; + if !matches!(common_header.shred_variant, ShredVariant::MerkleData(_)) { + return Err(Error::InvalidShredVariant); + } let data_header = deserialize_from_with_limit(&mut cursor)?; - // Skip data buffer. - let data_buffer_size = Self::capacity(proof_size)?; - let data_buffer_size = i64::try_from(data_buffer_size).unwrap(); - cursor.seek(SeekFrom::Current(data_buffer_size))?; - // Deserialize merkle branch. - let root = deserialize_from_with_limit(&mut cursor)?; - let proof = repeat_with(|| deserialize_from_with_limit(&mut cursor)) - .take(usize::from(proof_size)) - .collect::>()?; - let merkle_branch = MerkleBranch { root, proof }; let shred = Self { common_header, data_header, - merkle_branch, payload, }; shred.sanitize(/*verify_merkle_proof:*/ true)?; @@ -441,7 +449,7 @@ impl ShredTrait for ShredData { } fn signed_message(&self) -> &[u8] { - self.merkle_branch.root.as_ref() + self.merkle_root().map(AsRef::as_ref).unwrap_or_default() } } @@ -453,27 +461,18 @@ impl ShredTrait for ShredCode { fn from_payload(mut payload: Vec) -> Result { let mut cursor = Cursor::new(&payload[..]); let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; - let proof_size = match common_header.shred_variant { - ShredVariant::MerkleCode(proof_size) => proof_size, - _ => return Err(Error::InvalidShredVariant), - }; + if !matches!(common_header.shred_variant, ShredVariant::MerkleCode(_)) { + return Err(Error::InvalidShredVariant); + } let coding_header = deserialize_from_with_limit(&mut cursor)?; - // Skip erasure code shard. - let shard_size = Self::capacity(proof_size)?; - let shard_size = i64::try_from(shard_size).unwrap(); - cursor.seek(SeekFrom::Current(shard_size))?; - // Deserialize merkle branch. - let root = deserialize_from_with_limit(&mut cursor)?; - let proof = repeat_with(|| deserialize_from_with_limit(&mut cursor)) - .take(usize::from(proof_size)) - .collect::>()?; - let merkle_branch = MerkleBranch { root, proof }; // see: https://github.com/solana-labs/solana/pull/10109 + if payload.len() < Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidPayloadSize(payload.len())); + } payload.truncate(Self::SIZE_OF_PAYLOAD); let shred = Self { common_header, coding_header, - merkle_branch, payload, }; shred.sanitize(/*verify_merkle_proof:*/ true)?; @@ -515,7 +514,7 @@ impl ShredTrait for ShredCode { } fn signed_message(&self) -> &[u8] { - self.merkle_branch.root.as_ref() + self.merkle_root().map(AsRef::as_ref).unwrap_or_default() } } @@ -549,6 +548,23 @@ impl ShredCodeTrait for ShredCode { } } +impl<'a> TryFrom<&'a [u8]> for MerkleBranch<'a> { + type Error = Error; + fn try_from(merkle_branch: &'a [u8]) -> Result { + if merkle_branch.len() < SIZE_OF_MERKLE_ROOT { + return Err(Error::InvalidMerkleProof); + } + let (root, proof) = merkle_branch.split_at(SIZE_OF_MERKLE_ROOT); + let root = <&MerkleRoot>::try_from(root).unwrap(); + let proof = proof + .chunks(SIZE_OF_MERKLE_PROOF_ENTRY) + .map(<&MerkleProofEntry>::try_from) + .collect::>() + .map_err(|_| Error::InvalidMerkleProof)?; + Ok(Self { root, proof }) + } +} + // Obtains parent's hash by joining two sibiling nodes in merkle tree. fn join_nodes, T: AsRef<[u8]>>(node: S, other: T) -> Hash { let node = &node.as_ref()[..SIZE_OF_MERKLE_PROOF_ENTRY]; @@ -594,11 +610,11 @@ fn make_merkle_branch( return None; } let mut offset = 0; - let mut proof = Vec::::new(); + let mut proof = Vec::<&MerkleProofEntry>::new(); while size > 1 { let node = tree.get(offset + (index ^ 1).min(size - 1))?; let entry = &node.as_ref()[..SIZE_OF_MERKLE_PROOF_ENTRY]; - proof.push(MerkleProofEntry::try_from(entry).unwrap()); + proof.push(<&MerkleProofEntry>::try_from(entry).unwrap()); offset += size; size = (size + 1) >> 1; index >>= 1; @@ -607,7 +623,7 @@ fn make_merkle_branch( return None; } let root = &tree.last()?.as_ref()[..SIZE_OF_MERKLE_ROOT]; - let root = MerkleRoot::try_from(root).unwrap(); + let root = <&MerkleRoot>::try_from(root).unwrap(); Some(MerkleBranch { root, proof }) } @@ -745,7 +761,7 @@ pub(super) fn recover( let merkle_root = MerkleRoot::try_from(merkle_root).unwrap(); for (index, (shred, mask)) in shreds.iter_mut().zip(&mask).enumerate() { if *mask { - if shred.merkle_root() != &merkle_root { + if shred.merkle_root()? != &merkle_root { return Err(Error::InvalidMerkleProof); } } else { @@ -754,7 +770,7 @@ pub(super) fn recover( if merkle_branch.proof.len() != usize::from(proof_size) { return Err(Error::InvalidMerkleProof); } - shred.set_merkle_branch(merkle_branch)?; + shred.set_merkle_branch(&merkle_branch)?; // Assert that shred payload is fully populated. debug_assert_eq!(shred, { let shred = shred.payload().clone(); @@ -803,7 +819,6 @@ pub(super) fn make_shreds_from_data( common_header: ShredCommonHeader, mut data_header: DataShredHeader, data: &[u8], - merkle_branch: MerkleBranch, ) -> ShredData { let size = ShredData::SIZE_OF_HEADERS + data.len(); let mut payload = vec![0u8; ShredData::SIZE_OF_PAYLOAD]; @@ -812,7 +827,6 @@ pub(super) fn make_shreds_from_data( ShredData { common_header, data_header, - merkle_branch, payload, } } @@ -847,7 +861,6 @@ pub(super) fn make_shreds_from_data( size: 0u16, } }; - let merkle_branch = MerkleBranch::new_zeroed(proof_size); // Split the data into erasure batches and initialize // data shreds from chunks of each batch. let mut shreds = Vec::::new(); @@ -855,7 +868,7 @@ pub(super) fn make_shreds_from_data( let (chunk, rest) = data.split_at(chunk_size); common_header.fec_set_index = common_header.index; for shred in chunk.chunks(data_buffer_size) { - let shred = new_shred_data(common_header, data_header, shred, merkle_branch.clone()); + let shred = new_shred_data(common_header, data_header, shred); shreds.push(shred); common_header.index += 1; } @@ -873,11 +886,10 @@ pub(super) fn make_shreds_from_data( .then_some((proof_size, data_buffer_size)) }) .ok_or(Error::UnknownProofSize)?; - let merkle_branch = MerkleBranch::new_zeroed(proof_size); common_header.shred_variant = ShredVariant::MerkleData(proof_size); common_header.fec_set_index = common_header.index; for shred in data.chunks(data_buffer_size) { - let shred = new_shred_data(common_header, data_header, shred, merkle_branch.clone()); + let shred = new_shred_data(common_header, data_header, shred); shreds.push(shred); common_header.index += 1; } @@ -993,7 +1005,6 @@ fn make_erasure_batch( num_coding_shreds: num_coding_shreds as u16, position: 0, }; - let merkle_branch = MerkleBranch::new_zeroed(proof_size); for code in parity { let mut payload = vec![0u8; ShredCode::SIZE_OF_PAYLOAD]; let mut cursor = Cursor::new(&mut payload[..]); @@ -1003,7 +1014,6 @@ fn make_erasure_batch( let shred = ShredCode { common_header, coding_header, - merkle_branch: merkle_branch.clone(), payload, }; shreds.push(Shred::ShredCode(shred)); @@ -1028,7 +1038,7 @@ fn make_erasure_batch( let merkle_branch = make_merkle_branch(index, erasure_batch_size, &tree) .ok_or(Error::InvalidMerkleProof)?; debug_assert_eq!(merkle_branch.proof.len(), usize::from(proof_size)); - shred.set_merkle_branch(merkle_branch)?; + shred.set_merkle_branch(&merkle_branch)?; shred.set_signature(signature); debug_assert!(shred.verify(&keypair.pubkey())); debug_assert_matches!(shred.sanitize(/*verify_merkle_proof:*/ true), Ok(())); @@ -1128,7 +1138,7 @@ mod test { for index in 0..size { let branch = make_merkle_branch(index, size, &tree).unwrap(); let root = &tree.last().unwrap().as_ref()[..SIZE_OF_MERKLE_ROOT]; - assert_eq!(&branch.root, root); + assert_eq!(branch.root, root); assert!(verify_merkle_proof(index, nodes[index], &branch)); for i in (0..size).filter(|&i| i != index) { assert!(!verify_merkle_proof(i, nodes[i], &branch)); @@ -1207,7 +1217,6 @@ mod test { let shred = ShredData { common_header, data_header, - merkle_branch: MerkleBranch::new_zeroed(proof_size), payload, }; shreds.push(Shred::ShredData(shred)); @@ -1242,7 +1251,6 @@ mod test { let shred = ShredCode { common_header, coding_header, - merkle_branch: MerkleBranch::new_zeroed(proof_size), payload, }; shreds.push(Shred::ShredCode(shred)); @@ -1256,7 +1264,7 @@ mod test { for (index, shred) in shreds.iter_mut().enumerate() { let merkle_branch = make_merkle_branch(index, num_shreds, &tree).unwrap(); assert_eq!(merkle_branch.proof.len(), usize::from(proof_size)); - shred.set_merkle_branch(merkle_branch).unwrap(); + shred.set_merkle_branch(&merkle_branch).unwrap(); let signature = keypair.sign_message(shred.signed_message()); shred.set_signature(signature); assert!(shred.verify(&keypair.pubkey())); From a26fe6d1be69cb396d84588f1783490a70bfc72e Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Thu, 6 Oct 2022 11:39:39 -0500 Subject: [PATCH 13/65] fix node count query (#28259) * fix node count query * fix column name --- .../grafana-provisioning/dashboards/cluster-monitor.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metrics/scripts/grafana-provisioning/dashboards/cluster-monitor.json b/metrics/scripts/grafana-provisioning/dashboards/cluster-monitor.json index 3e0b843cfba13b..f2b1e229bf7afc 100644 --- a/metrics/scripts/grafana-provisioning/dashboards/cluster-monitor.json +++ b/metrics/scripts/grafana-provisioning/dashboards/cluster-monitor.json @@ -251,7 +251,7 @@ "hide": false, "orderByTime": "ASC", "policy": "default", - "query": "SELECT LAST(median) FROM ( SELECT median(live_count) FROM \"$testnet\".\"autogen\".\"cluster_info-num_nodes\" WHERE $timeFilter AND live_count > 0 GROUP BY time(5s) )\n", + "query": "SELECT LAST(median) FROM ( SELECT median(num_nodes) - median(num_nodes_dead) as median FROM \"$testnet\".\"autogen\".\"cluster_nodes_broadcast\" WHERE $timeFilter AND live_count > 0 GROUP BY time(5s) )\n", "rawQuery": true, "refId": "A", "resultFormat": "time_series", From 9e8f21ec264006493154d39b39fef2f30207fb89 Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 6 Oct 2022 12:17:49 -0500 Subject: [PATCH 14/65] Add metric to track number of slots processed in load_frozen_forks() (#28247) --- ledger/src/blockstore_processor.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 7670dc8130f921..b57dc3b9d4d0b3 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -867,11 +867,12 @@ pub fn process_blockstore_from_root( let mut timing = ExecuteTimings::default(); // Iterate and replay slots from blockstore starting from `start_slot` + let mut num_slots_processed = 0; if let Some(start_slot_meta) = blockstore .meta(start_slot) .unwrap_or_else(|_| panic!("Failed to get meta for slot {}", start_slot)) { - load_frozen_forks( + num_slots_processed = load_frozen_forks( bank_forks, start_slot, &start_slot_meta, @@ -906,6 +907,7 @@ pub fn process_blockstore_from_root( i64 ), ("slot", bank_forks.read().unwrap().root(), i64), + ("num_slots_processed", num_slots_processed, i64), ("forks", bank_forks.read().unwrap().banks().len(), i64), ); @@ -1411,11 +1413,14 @@ fn load_frozen_forks( cache_block_meta_sender: Option<&CacheBlockMetaSender>, timing: &mut ExecuteTimings, accounts_background_request_sender: &AbsRequestSender, -) -> result::Result<(), BlockstoreProcessorError> { +) -> result::Result { let recyclers = VerifyRecyclers::default(); let mut all_banks = HashMap::new(); let mut last_status_report = Instant::now(); let mut pending_slots = vec![]; + // The total number of slots processed + let mut total_slots_elapsed = 0; + // The number of slots processed between status report updates let mut slots_elapsed = 0; let mut txs = 0; let blockstore_max_root = blockstore.max_root(); @@ -1575,6 +1580,7 @@ fn load_frozen_forks( } slots_elapsed += 1; + total_slots_elapsed += 1; trace!( "Bank for {}slot {} is complete", @@ -1604,7 +1610,7 @@ fn load_frozen_forks( ); } - Ok(()) + Ok(total_slots_elapsed) } fn run_final_hash_calc(bank: &Bank, on_halt_store_hash_raw_data_for_debug: bool) { From 0aea2da091e986caae866ebf958db588e4aaf147 Mon Sep 17 00:00:00 2001 From: Jon Cinque Date: Thu, 6 Oct 2022 19:25:18 +0200 Subject: [PATCH 15/65] docs: Update node build version to v16 (#28264) --- .github/workflows/docs.yml | 2 +- docs/yarn.lock | 30 ------------------------------ 2 files changed, 1 insertion(+), 31 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 72d5e9c20b6922..1d9cc2fb401fb7 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -58,7 +58,7 @@ jobs: if: ${{ steps.prebuild.outputs.need_to_build == 'true' }} uses: actions/setup-node@v3 with: - node-version: 14 + node-version: 16 - name: Build if: ${{ steps.prebuild.outputs.need_to_build == 'true' }} diff --git a/docs/yarn.lock b/docs/yarn.lock index 7969630912d805..03bed1adfbfa5d 100644 --- a/docs/yarn.lock +++ b/docs/yarn.lock @@ -2455,13 +2455,6 @@ "resolved" "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz" "version" "2.2.0" -"bindings@^1.5.0": - "integrity" "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==" - "resolved" "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz" - "version" "1.5.0" - dependencies: - "file-uri-to-path" "1.0.0" - "bluebird@^3.7.1": "integrity" "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==" "resolved" "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz" @@ -4250,11 +4243,6 @@ "loader-utils" "^2.0.0" "schema-utils" "^3.0.0" -"file-uri-to-path@1.0.0": - "integrity" "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==" - "resolved" "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz" - "version" "1.0.0" - "filesize@6.1.0": "integrity" "sha512-LpCHtPQ3sFx67z+uh2HnSyWSLLu5Jxo21795uRDuar/EOuYWXib5EmPaGIBuSnRqH2IODiKA2k5re/K9OnN/Yg==" "resolved" "https://registry.npmjs.org/filesize/-/filesize-6.1.0.tgz" @@ -4403,19 +4391,6 @@ "resolved" "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz" "version" "1.0.0" -"fsevents@^1.2.7": - "integrity" "sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw==" - "resolved" "https://registry.npmjs.org/fsevents/-/fsevents-1.2.13.tgz" - "version" "1.2.13" - dependencies: - "bindings" "^1.5.0" - "nan" "^2.12.1" - -"fsevents@~2.3.1": - "integrity" "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==" - "resolved" "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz" - "version" "2.3.2" - "function-bind@^1.1.1": "integrity" "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" "resolved" "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz" @@ -6202,11 +6177,6 @@ "dns-packet" "^1.3.1" "thunky" "^1.0.2" -"nan@^2.12.1": - "integrity" "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==" - "resolved" "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz" - "version" "2.14.2" - "nanoid@^3.1.23": "integrity" "sha512-FiB0kzdP0FFVGDKlRLEQ1BgDzU87dy5NnzjeW9YZNt+/c3+q82EQDUwniSAUxp/F0gFNI1ZhKU1FqYsMuqZVnw==" "resolved" "https://registry.npmjs.org/nanoid/-/nanoid-3.1.23.tgz" From bb9a19ac865e32f85ca155fd81c1f5e110597231 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 6 Oct 2022 10:50:16 -0700 Subject: [PATCH 16/65] minor refactoring of accounts hash cache (#28260) --- runtime/src/accounts_db.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index a4b5640dcd29fb..0e7fade6b052a8 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -7033,24 +7033,26 @@ impl AccountsDb { { let mut load_from_cache = true; let mut hasher = std::collections::hash_map::DefaultHasher::new(); + bin_range.start.hash(&mut hasher); + bin_range.end.hash(&mut hasher); + let is_first_scan_pass = bin_range.start == 0; + // calculate hash representing all storages in this chunk for (slot, sub_storages) in snapshot_storages.iter_range(&range_this_chunk) { - if bin_range.start == 0 && slot < one_epoch_old { + if is_first_scan_pass && slot < one_epoch_old { self.update_old_slot_stats(stats, sub_storages); } - bin_range.start.hash(&mut hasher); - bin_range.end.hash(&mut hasher); if let Some(sub_storages) = sub_storages { if sub_storages.len() > 1 && !config.store_detailed_debug_info_on_failure { - // Having > 1 appendvecs per slot is not expected. If we have that, we just fail to cache this slot. - // However, if we're just dumping detailed debug info, we don't care, so store anyway. + // Having > 1 appendvecs per slot is not expected. If we have that, we just fail to load from the cache for this slot. + // However, if we're just dumping detailed debug info store anyway. load_from_cache = false; break; } + // hash info about this storage let append_vec = sub_storages.first().unwrap(); - // check written_bytes here. This is necessary for tests and removes a potential for false positives. append_vec.written_bytes().hash(&mut hasher); let storage_file = append_vec.accounts.get_path(); slot.hash(&mut hasher); From eea1d3a08892d4065d824edd7ef27d4ab1bad44a Mon Sep 17 00:00:00 2001 From: Michael Vines Date: Thu, 6 Oct 2022 10:54:39 -0700 Subject: [PATCH 17/65] Update restart guide to include the use of `solana-ledger-tool latest-optimistic-slots` (#28267) --- docs/src/running-validator/restart-cluster.md | 20 +++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/docs/src/running-validator/restart-cluster.md b/docs/src/running-validator/restart-cluster.md index 6d1fec88b792d9..2353709e85beee 100644 --- a/docs/src/running-validator/restart-cluster.md +++ b/docs/src/running-validator/restart-cluster.md @@ -1,14 +1,26 @@ ## Restarting a cluster -### Step 1. Identify the slot that the cluster will be restarted at +### Step 1. Identify the latest optimistically confirmed slot for the cluster -The highest optimistically confirmed slot is the best slot to start from, which -can be found by looking for +In Solana 1.14 or greater, run the following command to output the latest +optimistically confirmed slot your validator observed: +```bash +solana-ledger-tool -l ledger latest-optimistic-slots +``` + +In Solana 1.13 or less, the latest optimistically confirmed can be found by looking for the more recent occurence of [this](https://github.com/solana-labs/solana/blob/0264147d42d506fb888f5c4c021a998e231a3e74/core/src/optimistic_confirmation_verifier.rs#L71) -metrics datapoint. Otherwise use the last root. +metrics datapoint. Call this slot `SLOT_X` +Note that it's possible that some validators observed an optimistically +confirmed slot that's greater than others before the outage. Survey the other +validators on the cluster to ensure that a greater optimistically confirmed slot +does not exist before proceeding. If a greater slot value is found use it +instead. + + ### Step 2. Stop the validator(s) ### Step 3. Optionally install the new solana version From 66cd290d049912ee21685821e28aace176950303 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 6 Oct 2022 11:19:53 -0700 Subject: [PATCH 18/65] add chunk tracking in SplitAncientStorages (#28202) --- runtime/src/accounts_db.rs | 406 +++++++++++++++++++++++++++++++++---- 1 file changed, 364 insertions(+), 42 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 0e7fade6b052a8..edfafe3f3dd0fc 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -1339,7 +1339,10 @@ struct SplitAncientStorages { /// # non-ancient slots to scan non_ancient_slot_count: usize, /// # chunks to use to iterate the storages + /// all ancient chunks, the special 0 and last chunks for non-full chunks, and all the 'full' chunks of normal slots chunk_count: usize, + /// start and end(exclusive) of normal (non-ancient) slots to be scanned + normal_slot_range: Range, } impl SplitAncientStorages { @@ -1351,13 +1354,14 @@ impl SplitAncientStorages { /// When the slot gets deleted or gets consumed in an ancient append vec, it will no longer be in its chunk. /// The results of scanning a chunk of appendvecs can be cached to avoid scanning large amounts of data over and over. fn new(one_epoch_old_slot: Slot, snapshot_storages: &SortedStorages) -> Self { + let range = snapshot_storages.range(); + // any ancient append vecs should definitely be cached // We need to break the ranges into: // 1. individual ancient append vecs (may be empty) // 2. first unevenly divided chunk starting at 1 epoch old slot (may be empty) // 3. evenly divided full chunks in the middle // 4. unevenly divided chunk of most recent slots (may be empty) - let range = snapshot_storages.range(); let ancient_slots = Self::get_ancient_slots(one_epoch_old_slot, snapshot_storages); let first_non_ancient_slot = ancient_slots @@ -1408,19 +1412,83 @@ impl SplitAncientStorages { let non_ancient_slot_count = (max_slot_inclusive - first_non_ancient_slot + 1) as usize; + let normal_slot_range = Range { + start: first_non_ancient_slot, + end: range.end, + }; + // 2 is for 2 special chunks - unaligned slots at the beginning and end let chunk_count = ancient_slot_count + 2 + non_ancient_slot_count / (MAX_ITEMS_PER_CHUNK as usize); - Self { + SplitAncientStorages { ancient_slot_count, ancient_slots, first_non_ancient_slot, first_chunk_start, non_ancient_slot_count, chunk_count, + normal_slot_range, + } + } + + /// given 'normal_chunk', return the starting slot of that chunk in the normal/non-ancient range + /// a normal_chunk is 0<=normal_chunk<=non_ancient_chunk_count + /// non_ancient_chunk_count is chunk_count-ancient_slot_count + fn get_starting_slot_from_normal_chunk(&self, normal_chunk: usize) -> Slot { + if normal_chunk == 0 { + self.normal_slot_range.start + } else { + assert!( + normal_chunk.saturating_add(self.ancient_slot_count) < self.chunk_count, + "out of bounds: {}, {}", + normal_chunk, + self.chunk_count + ); + + let normal_chunk = normal_chunk.saturating_sub(1); + (self.first_chunk_start + MAX_ITEMS_PER_CHUNK * (normal_chunk as Slot)) + .max(self.normal_slot_range.start) } } + + /// ancient slots are the first chunks + fn is_chunk_ancient(&self, chunk: usize) -> bool { + chunk < self.ancient_slot_count + } + + /// given chunk in 0<=chunk Option> { + let range = if chunk < self.ancient_slot_count { + // ancient append vecs are handled individually + let slot = self.ancient_slots[chunk]; + Range { + start: slot, + end: slot + 1, + } + } else { + // normal chunks are after ancient chunks + let normal_chunk = chunk - self.ancient_slot_count; + if normal_chunk == 0 { + // first slot + Range { + start: self.normal_slot_range.start, + end: self.first_chunk_start.min(self.normal_slot_range.end), + } + } else { + // normal full chunk or the last chunk + let first_slot = self.get_starting_slot_from_normal_chunk(normal_chunk); + Range { + start: first_slot, + end: (first_slot + MAX_ITEMS_PER_CHUNK).min(self.normal_slot_range.end), + } + } + }; + // return empty range as None + (!range.is_empty()).then_some(range) + } } #[derive(Debug, Default)] @@ -6956,14 +7024,7 @@ impl AccountsDb { where S: AppendVecScan, { - let SplitAncientStorages { - ancient_slot_count, - ancient_slots, - first_non_ancient_slot, - first_chunk_start, - non_ancient_slot_count, - chunk_count, - } = SplitAncientStorages::new( + let splitter = SplitAncientStorages::new( self.get_one_epoch_old_slot_for_hash_calc_scan( snapshot_storages.max_slot_inclusive(), config, @@ -6973,46 +7034,27 @@ impl AccountsDb { let range = snapshot_storages.range(); let start_bin_index = bin_range.start; - (0..chunk_count) + + (0..splitter.chunk_count) .into_par_iter() - .map(|mut chunk| { + .map(|chunk| { let mut scanner = scanner.clone(); - // calculate start, end_exclusive - let (single_cached_slot, (start, mut end_exclusive)) = if chunk < ancient_slot_count - { - let ancient_slot = ancient_slots[chunk as usize]; - (true, (ancient_slot, ancient_slot + 1)) - } else { - (false, { - chunk -= ancient_slot_count; - if chunk == 0 { - if first_non_ancient_slot == first_chunk_start { - return scanner.scanning_complete(); // if we evenly divide, nothing for special chunk 0 to do - } - // otherwise first chunk is not 'full' - (first_non_ancient_slot, first_chunk_start) - } else { - // normal chunk in the middle or at the end - let start = - first_chunk_start + MAX_ITEMS_PER_CHUNK * ((chunk as Slot) - 1); - let end_exclusive = start + MAX_ITEMS_PER_CHUNK; - (start, end_exclusive) - } - }) - }; - end_exclusive = std::cmp::min(end_exclusive, range.end); - if start == end_exclusive { + + let range_this_chunk = splitter.get_slot_range(chunk); + + if range_this_chunk.is_none() { return scanner.scanning_complete(); } - let range_this_chunk = start..end_exclusive; + let range_this_chunk = range_this_chunk.unwrap(); let should_cache_hash_data = CalcAccountsHashConfig::get_should_cache_hash_data() || config.store_detailed_debug_info_on_failure; // Single cached slots get cached and full chunks get cached. // chunks that don't divide evenly would include some cached append vecs that are no longer part of this range and some that are, so we have to ignore caching on non-evenly dividing chunks. - let eligible_for_caching = single_cached_slot - || end_exclusive.saturating_sub(start) == MAX_ITEMS_PER_CHUNK; + let eligible_for_caching = splitter.is_chunk_ancient(chunk) + || range_this_chunk.end.saturating_sub(range_this_chunk.start) + == MAX_ITEMS_PER_CHUNK; if eligible_for_caching || config.store_detailed_debug_info_on_failure { let range = bin_range.end - bin_range.start; @@ -7131,8 +7173,8 @@ impl AccountsDb { "FAILED_TO_SAVE: {}-{}, {}, first_chunk_start: {}, {:?}, error: {:?}", range.start, range.end, - non_ancient_slot_count, - first_chunk_start, + splitter.non_ancient_slot_count, + splitter.first_chunk_start, file_name, result, ); @@ -16272,6 +16314,286 @@ pub mod tests { assert!(db.dirty_stores.is_empty()); } + #[test] + fn test_split_storages_ancient_chunks() { + let storages = SortedStorages::empty(); + assert_eq!(storages.max_slot_inclusive(), 0); + let result = SplitAncientStorages::new(0, &storages); + assert_eq!(result, SplitAncientStorages::default()); + } + + /// get all the ranges the splitter produces + fn get_all_slot_ranges(splitter: &SplitAncientStorages) -> Vec>> { + (0..splitter.chunk_count) + .map(|chunk| { + assert_eq!( + splitter.get_starting_slot_from_normal_chunk(chunk), + if chunk == 0 { + splitter.normal_slot_range.start + } else { + (splitter.first_chunk_start + ((chunk as Slot) - 1) * MAX_ITEMS_PER_CHUNK) + .max(splitter.normal_slot_range.start) + }, + "chunk: {chunk}, num_chunks: {}, splitter: {:?}", + splitter.chunk_count, + splitter, + ); + splitter.get_slot_range(chunk) + }) + .collect::>() + } + + /// test function to make sure the split range covers exactly every slot in the original range + fn verify_all_slots_covered_exactly_once(splitter: &SplitAncientStorages, range: &Range) { + // verify all slots covered exactly once + let result = get_all_slot_ranges(splitter); + let mut expected = range.start; + result.iter().for_each(|range| { + if let Some(range) = range { + for slot in range.clone() { + assert_eq!(slot, expected); + expected += 1; + } + } + }); + assert_eq!(expected, range.end); + } + + /// new splitter for test + /// without any ancient append vecs + fn new_splitter(range: &Range) -> SplitAncientStorages { + let splitter = + SplitAncientStorages::new_with_ancient_info(range, Vec::default(), range.start); + + verify_all_slots_covered_exactly_once(&splitter, range); + + splitter + } + + /// new splitter for test + /// without any ancient append vecs + fn new_splitter2(start: Slot, count: Slot) -> SplitAncientStorages { + new_splitter(&Range { + start, + end: start + count, + }) + } + + #[test] + fn test_split_storages_splitter_simple() { + let plus_1 = MAX_ITEMS_PER_CHUNK + 1; + let plus_2 = plus_1 + 1; + + // starting at 0 is aligned with beginning, so 1st chunk is unnecessary since beginning slot starts at boundary + // second chunk is the final chunk, which is not full (does not have 2500 entries) + let splitter = new_splitter2(0, 1); + let result = get_all_slot_ranges(&splitter); + assert_eq!(result, [Some(0..1), None]); + + // starting at 1 is not aligned with beginning, but since we don't have enough for a full chunk, it gets returned in the last chunk + let splitter = new_splitter2(1, 1); + let result = get_all_slot_ranges(&splitter); + assert_eq!(result, [Some(1..2), None]); + + // 1 full chunk, aligned + let splitter = new_splitter2(0, MAX_ITEMS_PER_CHUNK); + let result = get_all_slot_ranges(&splitter); + assert_eq!(result, [Some(0..MAX_ITEMS_PER_CHUNK), None, None]); + + // 1 full chunk + 1, aligned + let splitter = new_splitter2(0, plus_1); + let result = get_all_slot_ranges(&splitter); + assert_eq!( + result, + [ + Some(0..MAX_ITEMS_PER_CHUNK), + Some(MAX_ITEMS_PER_CHUNK..plus_1), + None + ] + ); + + // 1 full chunk + 2, aligned + let splitter = new_splitter2(0, plus_2); + let result = get_all_slot_ranges(&splitter); + assert_eq!( + result, + [ + Some(0..MAX_ITEMS_PER_CHUNK), + Some(MAX_ITEMS_PER_CHUNK..plus_2), + None + ] + ); + + // 1 full chunk, mis-aligned by 1 + let offset = 1; + let splitter = new_splitter2(offset, MAX_ITEMS_PER_CHUNK); + let result = get_all_slot_ranges(&splitter); + assert_eq!( + result, + [ + Some(1..MAX_ITEMS_PER_CHUNK), + Some(MAX_ITEMS_PER_CHUNK..MAX_ITEMS_PER_CHUNK + 1), + None + ] + ); + + // starting at 1 is not aligned with beginning + let offset = 1; + let splitter = new_splitter2(offset, plus_1); + let result = get_all_slot_ranges(&splitter); + assert_eq!( + result, + [ + Some(offset..MAX_ITEMS_PER_CHUNK), + Some(MAX_ITEMS_PER_CHUNK..plus_1 + offset), + None + ], + "{:?}", + splitter + ); + + // 2 full chunks, aligned + let offset = 0; + let splitter = new_splitter2(offset, MAX_ITEMS_PER_CHUNK * 2); + let result = get_all_slot_ranges(&splitter); + assert_eq!( + result, + [ + Some(offset..MAX_ITEMS_PER_CHUNK), + Some(MAX_ITEMS_PER_CHUNK..MAX_ITEMS_PER_CHUNK * 2), + None, + None + ], + "{:?}", + splitter + ); + + // 2 full chunks + 1, mis-aligned + let offset = 1; + let splitter = new_splitter2(offset, MAX_ITEMS_PER_CHUNK * 2); + let result = get_all_slot_ranges(&splitter); + assert_eq!( + result, + [ + Some(offset..MAX_ITEMS_PER_CHUNK), + Some(MAX_ITEMS_PER_CHUNK..MAX_ITEMS_PER_CHUNK * 2), + Some(MAX_ITEMS_PER_CHUNK * 2..MAX_ITEMS_PER_CHUNK * 2 + offset), + None, + ], + "{:?}", + splitter + ); + + // 3 full chunks - 1, mis-aligned by 2 + // we need ALL the chunks here + let offset = 2; + let splitter = new_splitter2(offset, MAX_ITEMS_PER_CHUNK * 3 - 1); + let result = get_all_slot_ranges(&splitter); + assert_eq!( + result, + [ + Some(offset..MAX_ITEMS_PER_CHUNK), + Some(MAX_ITEMS_PER_CHUNK..MAX_ITEMS_PER_CHUNK * 2), + Some(MAX_ITEMS_PER_CHUNK * 2..MAX_ITEMS_PER_CHUNK * 3), + Some(MAX_ITEMS_PER_CHUNK * 3..MAX_ITEMS_PER_CHUNK * 3 + 1), + ], + "{:?}", + splitter + ); + + // 1 full chunk - 1, mis-aligned by 2 + // we need ALL the chunks here + let offset = 2; + let splitter = new_splitter2(offset, MAX_ITEMS_PER_CHUNK - 1); + let result = get_all_slot_ranges(&splitter); + assert_eq!( + result, + [ + Some(offset..MAX_ITEMS_PER_CHUNK), + Some(MAX_ITEMS_PER_CHUNK..MAX_ITEMS_PER_CHUNK + 1), + ], + "{:?}", + splitter + ); + + // 1 full chunk - 1, aligned at big offset + // huge offset + // we need ALL the chunks here + let offset = MAX_ITEMS_PER_CHUNK * 100; + let splitter = new_splitter2(offset, MAX_ITEMS_PER_CHUNK - 1); + let result = get_all_slot_ranges(&splitter); + assert_eq!( + result, + [Some(offset..MAX_ITEMS_PER_CHUNK * 101 - 1), None,], + "{:?}", + splitter + ); + + // 1 full chunk - 1, mis-aligned by 2 at big offset + // huge offset + // we need ALL the chunks here + let offset = MAX_ITEMS_PER_CHUNK * 100 + 2; + let splitter = new_splitter2(offset, MAX_ITEMS_PER_CHUNK - 1); + let result = get_all_slot_ranges(&splitter); + assert_eq!( + result, + [ + Some(offset..MAX_ITEMS_PER_CHUNK * 101), + Some(MAX_ITEMS_PER_CHUNK * 101..MAX_ITEMS_PER_CHUNK * 101 + 1), + ], + "{:?}", + splitter + ); + } + + #[test] + fn test_split_storages_splitter_broken() { + solana_logger::setup(); + // 1 full chunk - 1, mis-aligned by 2 at big offset + // huge offset + // we need ALL the chunks here + let offset = MAX_ITEMS_PER_CHUNK * 100 + 2; + let splitter = new_splitter2(offset, MAX_ITEMS_PER_CHUNK - 1); + let result = get_all_slot_ranges(&splitter); + assert_eq!( + result, + [ + Some(offset..MAX_ITEMS_PER_CHUNK * 101), + Some(MAX_ITEMS_PER_CHUNK * 101..MAX_ITEMS_PER_CHUNK * 101 + 1), + ], + "{:?}", + splitter + ); + } + + #[test] + fn test_split_storages_parametric_splitter() { + for offset_multiplier in [1, 1000] { + for offset in [ + 0, + 1, + 2, + MAX_ITEMS_PER_CHUNK - 2, + MAX_ITEMS_PER_CHUNK - 1, + MAX_ITEMS_PER_CHUNK, + MAX_ITEMS_PER_CHUNK + 1, + ] { + for full_chunks in [0, 1, 2, 3] { + for reduced_items in [0, 1, 2] { + for added_items in [0, 1, 2] { + // this will verify the entire range correctly + let _ = new_splitter2( + offset * offset_multiplier, + (full_chunks * MAX_ITEMS_PER_CHUNK + added_items) + .saturating_sub(reduced_items), + ); + } + } + } + } + } + } + #[test] fn test_add_uncleaned_pubkeys_after_shrink() { let db = AccountsDb::new_single_for_tests(); From 30b0a13ba963538830a81314a06422b5669e0da8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Thu, 6 Oct 2022 20:31:58 +0200 Subject: [PATCH 19/65] Bump solana_rbpf to 0.2.34 (#28198) * Bumps solana_rbpf to 0.2.34 * Removes generic UserError from EbpfError. * Uses ProgramResult for syscalls. Removes use sites of the question_mark! macro by wrapping the call method of SyscallObjects. * Uses InvokeContext as syscall context object directly. * Replaces bind_syscall_context_object() by a parameter in the constructor. * Inlines bind_syscall_context_objects() at its only call site. --- Cargo.lock | 4 +- cli/Cargo.toml | 2 +- cli/src/program.rs | 10 +- programs/bpf/Cargo.lock | 4 +- programs/bpf/Cargo.toml | 2 +- programs/bpf/benches/bpf_loader.rs | 43 +- programs/bpf/tests/programs.rs | 25 +- programs/bpf_loader/Cargo.toml | 2 +- programs/bpf_loader/src/lib.rs | 101 +- programs/bpf_loader/src/syscalls/cpi.rs | 124 +- programs/bpf_loader/src/syscalls/logging.rs | 196 +- programs/bpf_loader/src/syscalls/mem_ops.rs | 215 +- programs/bpf_loader/src/syscalls/mod.rs | 2306 ++++++++----------- programs/bpf_loader/src/syscalls/sysvar.rs | 78 +- rbpf-cli/Cargo.toml | 2 +- rbpf-cli/src/main.rs | 27 +- 16 files changed, 1337 insertions(+), 1804 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fde4a19427c5ae..36f13ff6fc00a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6806,9 +6806,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.2.33" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13ec17b58709d07634895dbca4cae6bebeaddf3b83bf4fa8dfeaf3621222347" +checksum = "334bb11faee82f0306e39944cb4ac85824b7d261f3d99d1f2f8bd09851644f0b" dependencies = [ "byteorder", "combine", diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 6fd332e29a41cb..e0ac25c919f0a1 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -54,7 +54,7 @@ solana-tpu-client = { path = "../tpu-client", version = "=1.15.0" } solana-transaction-status = { path = "../transaction-status", version = "=1.15.0" } solana-version = { path = "../version", version = "=1.15.0" } solana-vote-program = { path = "../programs/vote", version = "=1.15.0" } -solana_rbpf = "=0.2.33" +solana_rbpf = "=0.2.34" spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } thiserror = "1.0.31" tiny-bip39 = "0.8.2" diff --git a/cli/src/program.rs b/cli/src/program.rs index ce504d01220081..d4be4c8aa886c4 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -10,7 +10,7 @@ use { clap::{App, AppSettings, Arg, ArgMatches, SubCommand}, log::*, solana_account_decoder::{UiAccountEncoding, UiDataSliceConfig}, - solana_bpf_loader_program::{syscalls::register_syscalls, BpfError, ThisInstructionMeter}, + solana_bpf_loader_program::{syscalls::register_syscalls, ThisInstructionMeter}, solana_clap_utils::{self, input_parsers::*, input_validators::*, keypair::*}, solana_cli_output::{ CliProgram, CliProgramAccountType, CliProgramAuthority, CliProgramBuffer, CliProgramId, @@ -2086,7 +2086,7 @@ fn read_and_verify_elf(program_location: &str) -> Result, Box::from_elf( + let executable = Executable::::from_elf( &program_data, Config { reject_broken_elfs: true, @@ -2097,10 +2097,8 @@ fn read_and_verify_elf(program_location: &str) -> Result, Box::from_executable( - executable, - ) - .map_err(|err| format!("ELF error: {}", err))?; + VerifiedExecutable::::from_executable(executable) + .map_err(|err| format!("ELF error: {}", err))?; Ok(program_data) } diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index bb945eb9256d78..e818163950bb13 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -6000,9 +6000,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.2.33" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13ec17b58709d07634895dbca4cae6bebeaddf3b83bf4fa8dfeaf3621222347" +checksum = "334bb11faee82f0306e39944cb4ac85824b7d261f3d99d1f2f8bd09851644f0b" dependencies = [ "byteorder 1.4.3", "combine", diff --git a/programs/bpf/Cargo.toml b/programs/bpf/Cargo.toml index 6724e3c47f2575..8c53889599b3d3 100644 --- a/programs/bpf/Cargo.toml +++ b/programs/bpf/Cargo.toml @@ -38,7 +38,7 @@ solana-program-runtime = { path = "../../program-runtime", version = "=1.15.0" } solana-runtime = { path = "../../runtime", version = "=1.15.0" } solana-sdk = { path = "../../sdk", version = "=1.15.0" } solana-transaction-status = { path = "../../transaction-status", version = "=1.15.0" } -solana_rbpf = "=0.2.33" +solana_rbpf = "=0.2.34" [dev-dependencies] solana-ledger = { path = "../../ledger", version = "=1.15.0" } diff --git a/programs/bpf/benches/bpf_loader.rs b/programs/bpf/benches/bpf_loader.rs index 37d28687e99814..c0137b0b0b38da 100644 --- a/programs/bpf/benches/bpf_loader.rs +++ b/programs/bpf/benches/bpf_loader.rs @@ -8,7 +8,7 @@ extern crate solana_bpf_loader_program; use { byteorder::{ByteOrder, LittleEndian, WriteBytesExt}, solana_bpf_loader_program::{ - create_vm, serialization::serialize_parameters, syscalls::register_syscalls, BpfError, + create_vm, serialization::serialize_parameters, syscalls::register_syscalls, ThisInstructionMeter, }, solana_measure::measure::Measure, @@ -81,7 +81,7 @@ fn bench_program_create_executable(bencher: &mut Bencher) { let elf = load_elf("bench_alu").unwrap(); bencher.iter(|| { - let _ = Executable::::from_elf( + let _ = Executable::::from_elf( &elf, Config::default(), SyscallRegistry::default(), @@ -106,19 +106,18 @@ fn bench_program_alu(bencher: &mut Bencher) { .get_compute_meter() .borrow_mut() .mock_set_remaining(std::i64::MAX as u64); - let executable = Executable::::from_elf( + let executable = Executable::::from_elf( &elf, Config::default(), register_syscalls(invoke_context, true).unwrap(), ) .unwrap(); - let mut verified_executable = VerifiedExecutable::< - RequisiteVerifier, - BpfError, - ThisInstructionMeter, - >::from_executable(executable) - .unwrap(); + let mut verified_executable = + VerifiedExecutable::::from_executable( + executable, + ) + .unwrap(); verified_executable.jit_compile().unwrap(); let compute_meter = invoke_context.get_compute_meter(); @@ -235,19 +234,18 @@ fn bench_create_vm(bencher: &mut Bencher) { ) .unwrap(); - let executable = Executable::::from_elf( + let executable = Executable::::from_elf( &elf, Config::default(), register_syscalls(invoke_context, true).unwrap(), ) .unwrap(); - let verified_executable = VerifiedExecutable::< - RequisiteVerifier, - BpfError, - ThisInstructionMeter, - >::from_executable(executable) - .unwrap(); + let verified_executable = + VerifiedExecutable::::from_executable( + executable, + ) + .unwrap(); bencher.iter(|| { let _ = create_vm( @@ -283,19 +281,18 @@ fn bench_instruction_count_tuner(_bencher: &mut Bencher) { ) .unwrap(); - let executable = Executable::::from_elf( + let executable = Executable::::from_elf( &elf, Config::default(), register_syscalls(invoke_context, true).unwrap(), ) .unwrap(); - let verified_executable = VerifiedExecutable::< - RequisiteVerifier, - BpfError, - ThisInstructionMeter, - >::from_executable(executable) - .unwrap(); + let verified_executable = + VerifiedExecutable::::from_executable( + executable, + ) + .unwrap(); let compute_meter = invoke_context.get_compute_meter(); let mut instruction_meter = ThisInstructionMeter { compute_meter }; diff --git a/programs/bpf/tests/programs.rs b/programs/bpf/tests/programs.rs index 82cb247448956c..aa7511ca705d02 100644 --- a/programs/bpf/tests/programs.rs +++ b/programs/bpf/tests/programs.rs @@ -55,7 +55,7 @@ use { create_vm, serialization::{deserialize_parameters, serialize_parameters}, syscalls::register_syscalls, - BpfError, ThisInstructionMeter, + ThisInstructionMeter, }, solana_program_runtime::invoke_context::with_mock_invoke_context, solana_rbpf::{ @@ -241,7 +241,7 @@ fn run_program(name: &str) -> u64 { reject_broken_elfs: true, ..Config::default() }; - let executable = Executable::::from_elf( + let executable = Executable::::from_elf( &data, config, register_syscalls(invoke_context, true /* no sol_alloc_free */).unwrap(), @@ -249,12 +249,11 @@ fn run_program(name: &str) -> u64 { .unwrap(); #[allow(unused_mut)] - let mut verified_executable = VerifiedExecutable::< - RequisiteVerifier, - BpfError, - ThisInstructionMeter, - >::from_executable(executable) - .unwrap(); + let mut verified_executable = + VerifiedExecutable::::from_executable( + executable, + ) + .unwrap(); let run_program_iterations = { #[cfg(target_arch = "x86_64")] @@ -300,7 +299,10 @@ fn run_program(name: &str) -> u64 { instruction_count = vm.get_total_instruction_count(); if config.enable_instruction_tracing { if i == 1 { - if !Tracer::compare(tracer.as_ref().unwrap(), vm.get_tracer()) { + if !Tracer::compare( + tracer.as_ref().unwrap(), + &vm.get_program_environment().tracer, + ) { let analysis = Analysis::from_executable(verified_executable.get_executable()) .unwrap(); @@ -312,7 +314,8 @@ fn run_program(name: &str) -> u64 { .write(&mut stdout.lock(), &analysis) .unwrap(); println!("TRACE (jit):"); - vm.get_tracer() + vm.get_program_environment() + .tracer .write(&mut stdout.lock(), &analysis) .unwrap(); assert!(false); @@ -330,7 +333,7 @@ fn run_program(name: &str) -> u64 { trace!("BPF Program Instruction Trace:\n{}", trace_string); } } - tracer = Some(vm.get_tracer().clone()); + tracer = Some(vm.get_program_environment().tracer.clone()); } } assert!(match deserialize_parameters( diff --git a/programs/bpf_loader/Cargo.toml b/programs/bpf_loader/Cargo.toml index b4a2cd312bf055..acfd11badcadfa 100644 --- a/programs/bpf_loader/Cargo.toml +++ b/programs/bpf_loader/Cargo.toml @@ -19,7 +19,7 @@ solana-metrics = { path = "../../metrics", version = "=1.15.0" } solana-program-runtime = { path = "../../program-runtime", version = "=1.15.0" } solana-sdk = { path = "../../sdk", version = "=1.15.0" } solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=1.15.0" } -solana_rbpf = "=0.2.33" +solana_rbpf = "=0.2.34" thiserror = "1.0" [dev-dependencies] diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 951bd37c961ef3..e99b62adbf3ffd 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -14,6 +14,7 @@ extern crate solana_metrics; use { crate::{ + allocator_bump::BpfAllocator, serialization::{deserialize_parameters, serialize_parameters}, syscalls::SyscallError, }, @@ -28,13 +29,13 @@ use { }, solana_rbpf::{ aligned_memory::AlignedMemory, - ebpf::{HOST_ALIGN, MM_INPUT_START}, + ebpf::{HOST_ALIGN, MM_HEAP_START, MM_INPUT_START}, elf::Executable, error::{EbpfError, UserDefinedError}, memory_region::MemoryRegion, static_analysis::Analysis, verifier::{RequisiteVerifier, VerifierError}, - vm::{Config, EbpfVm, InstructionMeter, VerifiedExecutable}, + vm::{Config, EbpfVm, InstructionMeter, ProgramResult, VerifiedExecutable}, }, solana_sdk::{ bpf_loader, bpf_loader_deprecated, @@ -42,9 +43,10 @@ use { entrypoint::{HEAP_LENGTH, SUCCESS}, feature_set::{ cap_accounts_data_allocations_per_transaction, cap_bpf_program_instruction_accounts, - disable_deploy_of_alloc_free_syscall, disable_deprecated_loader, - enable_bpf_loader_extend_program_ix, error_on_syscall_bpf_function_hash_collisions, - limit_max_instruction_trace_length, reject_callx_r10, + check_slice_translation_size, disable_deploy_of_alloc_free_syscall, + disable_deprecated_loader, enable_bpf_loader_extend_program_ix, + error_on_syscall_bpf_function_hash_collisions, limit_max_instruction_trace_length, + reject_callx_r10, }, instruction::{AccountMeta, InstructionError}, loader_instruction::LoaderInstruction, @@ -80,7 +82,7 @@ pub enum BpfError { } impl UserDefinedError for BpfError {} -fn map_ebpf_error(invoke_context: &InvokeContext, e: EbpfError) -> InstructionError { +fn map_ebpf_error(invoke_context: &InvokeContext, e: EbpfError) -> InstructionError { ic_msg!(invoke_context, "{}", e); InstructionError::InvalidAccountData } @@ -201,7 +203,7 @@ pub fn create_executor( )?; create_executor_metrics.program_id = programdata.get_key().to_string(); let mut load_elf_time = Measure::start("load_elf_time"); - let executable = Executable::::from_elf( + let executable = Executable::::from_elf( programdata .get_data() .get(programdata_offset..) @@ -220,10 +222,8 @@ pub fn create_executor( .map_err(|e| map_ebpf_error(invoke_context, e))?; let mut verify_code_time = Measure::start("verify_code_time"); let mut verified_executable = - VerifiedExecutable::::from_executable( - executable, - ) - .map_err(|e| map_ebpf_error(invoke_context, e))?; + VerifiedExecutable::::from_executable(executable) + .map_err(|e| map_ebpf_error(invoke_context, e))?; verify_code_time.stop(); create_executor_metrics.verify_code_us = verify_code_time.as_us(); invoke_context.timings.create_executor_verify_code_us = invoke_context @@ -289,11 +289,11 @@ fn check_loader_id(id: &Pubkey) -> bool { /// Create the BPF virtual machine pub fn create_vm<'a, 'b>( - program: &'a VerifiedExecutable, + program: &'a VerifiedExecutable, parameter_bytes: &mut [u8], orig_account_lengths: Vec, invoke_context: &'a mut InvokeContext<'b>, -) -> Result, EbpfError> { +) -> Result, EbpfError> { let compute_budget = invoke_context.get_compute_budget(); let heap_size = compute_budget.heap_size.unwrap_or(HEAP_LENGTH); let _ = invoke_context.get_compute_meter().borrow_mut().consume( @@ -304,8 +304,33 @@ pub fn create_vm<'a, 'b>( let mut heap = AlignedMemory::::zero_filled(compute_budget.heap_size.unwrap_or(HEAP_LENGTH)); let parameter_region = MemoryRegion::new_writable(parameter_bytes, MM_INPUT_START); - let mut vm = EbpfVm::new(program, heap.as_slice_mut(), vec![parameter_region])?; - syscalls::bind_syscall_context_objects(&mut vm, invoke_context, heap, orig_account_lengths)?; + let vm = EbpfVm::new( + program, + invoke_context, + heap.as_slice_mut(), + vec![parameter_region], + )?; + let check_aligned = bpf_loader_deprecated::id() + != invoke_context + .transaction_context + .get_current_instruction_context() + .and_then(|instruction_context| { + instruction_context + .try_borrow_last_program_account(invoke_context.transaction_context) + }) + .map(|program_account| *program_account.get_owner()) + .map_err(SyscallError::InstructionError)?; + let check_size = invoke_context + .feature_set + .is_active(&check_slice_translation_size::id()); + invoke_context + .set_syscall_context( + check_aligned, + check_size, + orig_account_lengths, + Rc::new(RefCell::new(BpfAllocator::new(heap, MM_HEAP_START))), + ) + .map_err(SyscallError::InstructionError)?; Ok(vm) } @@ -1281,7 +1306,7 @@ impl InstructionMeter for ThisInstructionMeter { /// BPF Loader's Executor implementation pub struct BpfExecutor { - verified_executable: VerifiedExecutable, + verified_executable: VerifiedExecutable, use_jit: bool, } @@ -1353,7 +1378,10 @@ impl Executor for BpfExecutor { let mut trace_buffer = Vec::::new(); let analysis = Analysis::from_executable(self.verified_executable.get_executable()).unwrap(); - vm.get_tracer().write(&mut trace_buffer, &analysis).unwrap(); + vm.get_program_environment() + .tracer + .write(&mut trace_buffer, &analysis) + .unwrap(); let trace_string = String::from_utf8(trace_buffer).unwrap(); trace!("BPF Program Instruction Trace:\n{}", trace_string); } @@ -1364,7 +1392,7 @@ impl Executor for BpfExecutor { stable_log::program_return(&log_collector, &program_id, return_data); } match result { - Ok(status) if status != SUCCESS => { + ProgramResult::Ok(status) if status != SUCCESS => { let error: InstructionError = if (status == MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED && !invoke_context @@ -1386,11 +1414,24 @@ impl Executor for BpfExecutor { stable_log::program_failure(&log_collector, &program_id, &error); Err(error) } - Err(error) => { + ProgramResult::Err(error) => { let error = match error { - EbpfError::UserError(BpfError::SyscallError( - SyscallError::InstructionError(error), - )) => error, + /*EbpfError::UserError(user_error) if let BpfError::SyscallError( + SyscallError::InstructionError(instruction_error), + ) = user_error.downcast_ref::().unwrap() => instruction_error.clone(),*/ + EbpfError::UserError(user_error) + if matches!( + user_error.downcast_ref::().unwrap(), + BpfError::SyscallError(SyscallError::InstructionError(_)), + ) => + { + match user_error.downcast_ref::().unwrap() { + BpfError::SyscallError(SyscallError::InstructionError( + instruction_error, + )) => instruction_error.clone(), + _ => unreachable!(), + } + } err => { ic_logger_msg!(log_collector, "Program failed to complete: {}", err); InstructionError::ProgramFailedToComplete @@ -1536,21 +1577,21 @@ mod tests { "entrypoint", ) .unwrap(); - let executable = Executable::::from_text_bytes( + let executable = Executable::::from_text_bytes( program, config, syscall_registry, bpf_functions, ) .unwrap(); - let verified_executable = VerifiedExecutable::< - TautologyVerifier, - BpfError, - TestInstructionMeter, - >::from_executable(executable) - .unwrap(); + let verified_executable = + VerifiedExecutable::::from_executable( + executable, + ) + .unwrap(); let input_region = MemoryRegion::new_writable(&mut input_mem, MM_INPUT_START); - let mut vm = EbpfVm::new(&verified_executable, &mut [], vec![input_region]).unwrap(); + let mut vm = + EbpfVm::new(&verified_executable, &mut (), &mut [], vec![input_region]).unwrap(); let mut instruction_meter = TestInstructionMeter { remaining: 10 }; vm.execute_program_interpreted(&mut instruction_meter) .unwrap(); diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs index d4f477d5e0adc8..7eb2290266cb5b 100644 --- a/programs/bpf_loader/src/syscalls/cpi.rs +++ b/programs/bpf_loader/src/syscalls/cpi.rs @@ -23,71 +23,59 @@ struct CallerAccount<'a> { type TranslatedAccounts<'a> = Vec<(IndexOfAccount, Option>)>; /// Implemented by language specific data structure translators -trait SyscallInvokeSigned<'a, 'b> { - fn get_context_mut(&self) -> Result>, EbpfError>; +trait SyscallInvokeSigned { fn translate_instruction( - &self, addr: u64, memory_mapping: &mut MemoryMapping, invoke_context: &mut InvokeContext, - ) -> Result>; - fn translate_accounts<'c>( - &'c self, + ) -> Result; + fn translate_accounts<'a>( instruction_accounts: &[InstructionAccount], program_indices: &[IndexOfAccount], account_infos_addr: u64, account_infos_len: u64, memory_mapping: &mut MemoryMapping, invoke_context: &mut InvokeContext, - ) -> Result, EbpfError>; + ) -> Result, EbpfError>; fn translate_signers( - &self, program_id: &Pubkey, signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &mut MemoryMapping, invoke_context: &InvokeContext, - ) -> Result, EbpfError>; + ) -> Result, EbpfError>; } declare_syscall!( /// Cross-program invocation called from Rust SyscallInvokeSignedRust, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, instruction_addr: u64, account_infos_addr: u64, account_infos_len: u64, signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - *result = call( - self, + ) -> Result { + cpi_common::( + invoke_context, instruction_addr, account_infos_addr, account_infos_len, signers_seeds_addr, signers_seeds_len, memory_mapping, - ); + ) } ); -impl<'a, 'b> SyscallInvokeSigned<'a, 'b> for SyscallInvokeSignedRust<'a, 'b> { - fn get_context_mut(&self) -> Result>, EbpfError> { - self.invoke_context - .try_borrow_mut() - .map_err(|_| SyscallError::InvokeContextBorrowFailed.into()) - } - +impl SyscallInvokeSigned for SyscallInvokeSignedRust { fn translate_instruction( - &self, addr: u64, memory_mapping: &mut MemoryMapping, invoke_context: &mut InvokeContext, - ) -> Result> { + ) -> Result { let ix = translate_type::( memory_mapping, addr, @@ -131,15 +119,14 @@ impl<'a, 'b> SyscallInvokeSigned<'a, 'b> for SyscallInvokeSignedRust<'a, 'b> { }) } - fn translate_accounts<'c>( - &'c self, + fn translate_accounts<'a>( instruction_accounts: &[InstructionAccount], program_indices: &[IndexOfAccount], account_infos_addr: u64, account_infos_len: u64, memory_mapping: &mut MemoryMapping, invoke_context: &mut InvokeContext, - ) -> Result, EbpfError> { + ) -> Result, EbpfError> { let account_infos = translate_slice::( memory_mapping, account_infos_addr, @@ -157,7 +144,7 @@ impl<'a, 'b> SyscallInvokeSigned<'a, 'b> for SyscallInvokeSignedRust<'a, 'b> { invoke_context.get_check_aligned(), ) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; let translate = |account_info: &AccountInfo, invoke_context: &InvokeContext| { // Translate the account from user space @@ -235,13 +222,12 @@ impl<'a, 'b> SyscallInvokeSigned<'a, 'b> for SyscallInvokeSignedRust<'a, 'b> { } fn translate_signers( - &self, program_id: &Pubkey, signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &mut MemoryMapping, invoke_context: &InvokeContext, - ) -> Result, EbpfError> { + ) -> Result, EbpfError> { let mut signers = Vec::new(); if signers_seeds_len > 0 { let signers_seeds = translate_slice::<&[&[u8]]>( @@ -279,7 +265,7 @@ impl<'a, 'b> SyscallInvokeSigned<'a, 'b> for SyscallInvokeSignedRust<'a, 'b> { invoke_context.get_check_size(), ) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; let signer = Pubkey::create_program_address(&seeds, program_id) .map_err(SyscallError::BadSeeds)?; signers.push(signer); @@ -347,41 +333,33 @@ struct SolSignerSeedsC { declare_syscall!( /// Cross-program invocation called from C SyscallInvokeSignedC, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, instruction_addr: u64, account_infos_addr: u64, account_infos_len: u64, signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - *result = call( - self, + ) -> Result { + cpi_common::( + invoke_context, instruction_addr, account_infos_addr, account_infos_len, signers_seeds_addr, signers_seeds_len, memory_mapping, - ); + ) } ); -impl<'a, 'b> SyscallInvokeSigned<'a, 'b> for SyscallInvokeSignedC<'a, 'b> { - fn get_context_mut(&self) -> Result>, EbpfError> { - self.invoke_context - .try_borrow_mut() - .map_err(|_| SyscallError::InvokeContextBorrowFailed.into()) - } - +impl SyscallInvokeSigned for SyscallInvokeSignedC { fn translate_instruction( - &self, addr: u64, memory_mapping: &mut MemoryMapping, invoke_context: &mut InvokeContext, - ) -> Result> { + ) -> Result { let ix_c = translate_type::( memory_mapping, addr, @@ -439,7 +417,7 @@ impl<'a, 'b> SyscallInvokeSigned<'a, 'b> for SyscallInvokeSignedC<'a, 'b> { is_writable: meta_c.is_writable, }) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; Ok(Instruction { program_id: *program_id, @@ -448,15 +426,14 @@ impl<'a, 'b> SyscallInvokeSigned<'a, 'b> for SyscallInvokeSignedC<'a, 'b> { }) } - fn translate_accounts<'c>( - &'c self, + fn translate_accounts<'a>( instruction_accounts: &[InstructionAccount], program_indices: &[IndexOfAccount], account_infos_addr: u64, account_infos_len: u64, memory_mapping: &mut MemoryMapping, invoke_context: &mut InvokeContext, - ) -> Result, EbpfError> { + ) -> Result, EbpfError> { let account_infos = translate_slice::( memory_mapping, account_infos_addr, @@ -474,7 +451,7 @@ impl<'a, 'b> SyscallInvokeSigned<'a, 'b> for SyscallInvokeSignedC<'a, 'b> { invoke_context.get_check_aligned(), ) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; let translate = |account_info: &SolAccountInfo, invoke_context: &InvokeContext| { // Translate the account from user space @@ -551,13 +528,12 @@ impl<'a, 'b> SyscallInvokeSigned<'a, 'b> for SyscallInvokeSignedC<'a, 'b> { } fn translate_signers( - &self, program_id: &Pubkey, signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &mut MemoryMapping, invoke_context: &InvokeContext, - ) -> Result, EbpfError> { + ) -> Result, EbpfError> { if signers_seeds_len > 0 { let signers_seeds = translate_slice::( memory_mapping, @@ -596,11 +572,11 @@ impl<'a, 'b> SyscallInvokeSigned<'a, 'b> for SyscallInvokeSignedC<'a, 'b> { invoke_context.get_check_size(), ) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; Pubkey::create_program_address(&seeds_bytes, program_id) .map_err(|err| SyscallError::BadSeeds(err).into()) }) - .collect::, EbpfError>>()?) + .collect::, EbpfError>>()?) } else { Ok(vec![]) } @@ -614,9 +590,9 @@ fn get_translated_accounts<'a, T, F>( account_infos: &[T], invoke_context: &mut InvokeContext, do_translate: F, -) -> Result, EbpfError> +) -> Result, EbpfError> where - F: Fn(&T, &InvokeContext) -> Result, EbpfError>, + F: Fn(&T, &InvokeContext) -> Result, EbpfError>, { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context @@ -686,9 +662,9 @@ where .set_data_from_slice(caller_account.data) .map_err(SyscallError::InstructionError)?, Err(err) if callee_account.get_data() != caller_account.data => { - return Err(EbpfError::UserError(BpfError::SyscallError( + return Err(EbpfError::UserError(Box::new(BpfError::SyscallError( SyscallError::InstructionError(err), - ))); + )))); } _ => {} } @@ -764,7 +740,7 @@ fn check_instruction_size( num_accounts: usize, data_len: usize, invoke_context: &mut InvokeContext, -) -> Result<(), EbpfError> { +) -> Result<(), EbpfError> { if invoke_context .feature_set .is_active(&feature_set::loosen_cpi_size_restriction::id()) @@ -803,7 +779,7 @@ fn check_instruction_size( fn check_account_infos( num_account_infos: usize, invoke_context: &mut InvokeContext, -) -> Result<(), EbpfError> { +) -> Result<(), EbpfError> { if invoke_context .feature_set .is_active(&feature_set::loosen_cpi_size_restriction::id()) @@ -850,7 +826,7 @@ fn check_authorized_program( program_id: &Pubkey, instruction_data: &[u8], invoke_context: &InvokeContext, -) -> Result<(), EbpfError> { +) -> Result<(), EbpfError> { if native_loader::check_id(program_id) || bpf_loader::check_id(program_id) || bpf_loader_deprecated::check_id(program_id) @@ -868,23 +844,21 @@ fn check_authorized_program( } /// Call process instruction, common to both Rust and C -fn call<'a, 'b: 'a>( - syscall: &mut dyn SyscallInvokeSigned<'a, 'b>, +fn cpi_common( + invoke_context: &mut InvokeContext, instruction_addr: u64, account_infos_addr: u64, account_infos_len: u64, signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &mut MemoryMapping, -) -> Result> { - let mut invoke_context = syscall.get_context_mut()?; +) -> Result { invoke_context .get_compute_meter() .consume(invoke_context.get_compute_budget().invoke_units)?; // Translate and verify caller's data - let instruction = - syscall.translate_instruction(instruction_addr, memory_mapping, *invoke_context)?; + let instruction = S::translate_instruction(instruction_addr, memory_mapping, invoke_context)?; let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context .get_current_instruction_context() @@ -892,24 +866,24 @@ fn call<'a, 'b: 'a>( let caller_program_id = instruction_context .get_last_program_key(transaction_context) .map_err(SyscallError::InstructionError)?; - let signers = syscall.translate_signers( + let signers = S::translate_signers( caller_program_id, signers_seeds_addr, signers_seeds_len, memory_mapping, - *invoke_context, + invoke_context, )?; let (instruction_accounts, program_indices) = invoke_context .prepare_instruction(&instruction, &signers) .map_err(SyscallError::InstructionError)?; - check_authorized_program(&instruction.program_id, &instruction.data, *invoke_context)?; - let mut accounts = syscall.translate_accounts( + check_authorized_program(&instruction.program_id, &instruction.data, invoke_context)?; + let mut accounts = S::translate_accounts( &instruction_accounts, &program_indices, account_infos_addr, account_infos_len, memory_mapping, - *invoke_context, + invoke_context, )?; // Process instruction diff --git a/programs/bpf_loader/src/syscalls/logging.rs b/programs/bpf_loader/src/syscalls/logging.rs index ebb40967ee9853..08444a89079cf4 100644 --- a/programs/bpf_loader/src/syscalls/logging.rs +++ b/programs/bpf_loader/src/syscalls/logging.rs @@ -3,67 +3,50 @@ use {super::*, crate::declare_syscall}; declare_syscall!( /// Log a user's info message SyscallLog, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, addr: u64, len: u64, _arg3: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { let cost = invoke_context .get_compute_budget() .syscall_base_cost .max(len); - question_mark!(invoke_context.get_compute_meter().consume(cost), result); - - question_mark!( - translate_string_and_do( - memory_mapping, - addr, - len, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - &mut |string: &str| { - stable_log::program_log(&invoke_context.get_log_collector(), string); - Ok(0) - } - ), - result - ); - *result = Ok(0); + invoke_context.get_compute_meter().consume(cost)?; + + translate_string_and_do( + memory_mapping, + addr, + len, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + &mut |string: &str| { + stable_log::program_log(&invoke_context.get_log_collector(), string); + Ok(0) + }, + )?; + Ok(0) } ); declare_syscall!( /// Log 5 64-bit values SyscallLogU64, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64, _memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { let cost = invoke_context.get_compute_budget().log_64_units; - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; stable_log::program_log( &invoke_context.get_log_collector(), @@ -72,151 +55,112 @@ declare_syscall!( arg1, arg2, arg3, arg4, arg5 ), ); - *result = Ok(0); + Ok(0) } ); declare_syscall!( /// Log current compute consumption SyscallLogBpfComputeUnits, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, _arg1: u64, _arg2: u64, _arg3: u64, _arg4: u64, _arg5: u64, _memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { let cost = invoke_context.get_compute_budget().syscall_base_cost; - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; ic_logger_msg!( invoke_context.get_log_collector(), "Program consumption: {} units remaining", invoke_context.get_compute_meter().borrow().get_remaining() ); - *result = Ok(0); + Ok(0) } ); declare_syscall!( /// Log 5 64-bit values SyscallLogPubkey, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, pubkey_addr: u64, _arg2: u64, _arg3: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { let cost = invoke_context.get_compute_budget().log_pubkey_units; - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; - let pubkey = question_mark!( - translate_type::( - memory_mapping, - pubkey_addr, - invoke_context.get_check_aligned() - ), - result - ); + let pubkey = translate_type::( + memory_mapping, + pubkey_addr, + invoke_context.get_check_aligned(), + )?; stable_log::program_log(&invoke_context.get_log_collector(), &pubkey.to_string()); - *result = Ok(0); + Ok(0) } ); declare_syscall!( /// Log data handling SyscallLogData, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, addr: u64, len: u64, _arg3: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { let budget = invoke_context.get_compute_budget(); - question_mark!( - invoke_context - .get_compute_meter() - .consume(budget.syscall_base_cost), - result - ); - - let untranslated_fields = question_mark!( - translate_slice::<&[u8]>( - memory_mapping, - addr, - len, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ); - - question_mark!( - invoke_context.get_compute_meter().consume( - budget - .syscall_base_cost - .saturating_mul(untranslated_fields.len() as u64) - ), - result - ); - question_mark!( - invoke_context.get_compute_meter().consume( - untranslated_fields - .iter() - .fold(0, |total, e| total.saturating_add(e.len() as u64)) - ), - result - ); + invoke_context + .get_compute_meter() + .consume(budget.syscall_base_cost)?; + + let untranslated_fields = translate_slice::<&[u8]>( + memory_mapping, + addr, + len, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; + + invoke_context.get_compute_meter().consume( + budget + .syscall_base_cost + .saturating_mul(untranslated_fields.len() as u64), + )?; + invoke_context.get_compute_meter().consume( + untranslated_fields + .iter() + .fold(0, |total, e| total.saturating_add(e.len() as u64)), + )?; let mut fields = Vec::with_capacity(untranslated_fields.len()); for untranslated_field in untranslated_fields { - fields.push(question_mark!( - translate_slice::( - memory_mapping, - untranslated_field.as_ptr() as *const _ as u64, - untranslated_field.len() as u64, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - )); + fields.push(translate_slice::( + memory_mapping, + untranslated_field.as_ptr() as *const _ as u64, + untranslated_field.len() as u64, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?); } let log_collector = invoke_context.get_log_collector(); stable_log::program_data(&log_collector, &fields); - *result = Ok(0); + Ok(0) } ); diff --git a/programs/bpf_loader/src/syscalls/mem_ops.rs b/programs/bpf_loader/src/syscalls/mem_ops.rs index 2fb5cd064aad2d..112b8a890c5157 100644 --- a/programs/bpf_loader/src/syscalls/mem_ops.rs +++ b/programs/bpf_loader/src/syscalls/mem_ops.rs @@ -1,9 +1,6 @@ use {super::*, crate::declare_syscall}; -fn mem_op_consume<'a, 'b>( - invoke_context: &Ref<&'a mut InvokeContext<'b>>, - n: u64, -) -> Result<(), EbpfError> { +fn mem_op_consume(invoke_context: &mut InvokeContext, n: u64) -> Result<(), EbpfError> { let compute_budget = invoke_context.get_compute_budget(); let cost = compute_budget .mem_op_base_cost @@ -14,54 +11,40 @@ fn mem_op_consume<'a, 'b>( declare_syscall!( /// memcpy SyscallMemcpy, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, dst_addr: u64, src_addr: u64, n: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); - question_mark!(mem_op_consume(&invoke_context, n), result); + ) -> Result { + mem_op_consume(invoke_context, n)?; let do_check_physical_overlapping = invoke_context .feature_set .is_active(&check_physical_overlapping::id()); if !is_nonoverlapping(src_addr, dst_addr, n) { - *result = Err(SyscallError::CopyOverlapping.into()); - return; + return Err(SyscallError::CopyOverlapping.into()); } - let dst_ptr = question_mark!( - translate_slice_mut::( - memory_mapping, - dst_addr, - n, - invoke_context.get_check_aligned(), - invoke_context.get_check_size() - ), - result - ) + let dst_ptr = translate_slice_mut::( + memory_mapping, + dst_addr, + n, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )? .as_mut_ptr(); - let src_ptr = question_mark!( - translate_slice::( - memory_mapping, - src_addr, - n, - invoke_context.get_check_aligned(), - invoke_context.get_check_size() - ), - result - ) + let src_ptr = translate_slice::( + memory_mapping, + src_addr, + n, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )? .as_ptr(); if do_check_physical_overlapping && !is_nonoverlapping(src_ptr as usize, dst_ptr as usize, n as usize) @@ -74,111 +57,82 @@ declare_syscall!( std::ptr::copy_nonoverlapping(src_ptr, dst_ptr, n as usize); } } - *result = Ok(0); + Ok(0) } ); declare_syscall!( /// memmove SyscallMemmove, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, dst_addr: u64, src_addr: u64, n: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); - question_mark!(mem_op_consume(&invoke_context, n), result); + ) -> Result { + mem_op_consume(invoke_context, n)?; - let dst = question_mark!( - translate_slice_mut::( - memory_mapping, - dst_addr, - n, - invoke_context.get_check_aligned(), - invoke_context.get_check_size() - ), - result - ); - let src = question_mark!( - translate_slice::( - memory_mapping, - src_addr, - n, - invoke_context.get_check_aligned(), - invoke_context.get_check_size() - ), - result - ); + let dst = translate_slice_mut::( + memory_mapping, + dst_addr, + n, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; + let src = translate_slice::( + memory_mapping, + src_addr, + n, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; unsafe { std::ptr::copy(src.as_ptr(), dst.as_mut_ptr(), n as usize); } - *result = Ok(0); + Ok(0) } ); declare_syscall!( /// memcmp SyscallMemcmp, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, s1_addr: u64, s2_addr: u64, n: u64, cmp_result_addr: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); - question_mark!(mem_op_consume(&invoke_context, n), result); + ) -> Result { + mem_op_consume(invoke_context, n)?; - let s1 = question_mark!( - translate_slice::( - memory_mapping, - s1_addr, - n, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ); - let s2 = question_mark!( - translate_slice::( - memory_mapping, - s2_addr, - n, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ); - let cmp_result = question_mark!( - translate_type_mut::( - memory_mapping, - cmp_result_addr, - invoke_context.get_check_aligned() - ), - result - ); + let s1 = translate_slice::( + memory_mapping, + s1_addr, + n, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; + let s2 = translate_slice::( + memory_mapping, + s2_addr, + n, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; + let cmp_result = translate_type_mut::( + memory_mapping, + cmp_result_addr, + invoke_context.get_check_aligned(), + )?; let mut i = 0; while i < n as usize { - let a = *question_mark!(s1.get(i).ok_or(SyscallError::InvalidLength,), result); - let b = *question_mark!(s2.get(i).ok_or(SyscallError::InvalidLength,), result); + let a = *s1.get(i).ok_or(SyscallError::InvalidLength)?; + let b = *s2.get(i).ok_or(SyscallError::InvalidLength)?; if a != b { *cmp_result = if invoke_context .feature_set @@ -191,50 +145,39 @@ declare_syscall!( a as i32 - b as i32 } }; - *result = Ok(0); - return; + return Ok(0); }; i = i.saturating_add(1); } *cmp_result = 0; - *result = Ok(0); + Ok(0) } ); declare_syscall!( /// memset SyscallMemset, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, s_addr: u64, c: u64, n: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); - question_mark!(mem_op_consume(&invoke_context, n), result); + ) -> Result { + mem_op_consume(invoke_context, n)?; - let s = question_mark!( - translate_slice_mut::( - memory_mapping, - s_addr, - n, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ); + let s = translate_slice_mut::( + memory_mapping, + s_addr, + n, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; for val in s.iter_mut().take(n as usize) { *val = c as u8; } - *result = Ok(0); + Ok(0) } ); diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 0134f1b94e1af6..41b5dbcedade55 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -11,7 +11,7 @@ pub use self::{ }; #[allow(deprecated)] use { - crate::{allocator_bump::BpfAllocator, BpfError}, + crate::BpfError, solana_program_runtime::{ ic_logger_msg, ic_msg, invoke_context::{ComputeMeter, InvokeContext}, @@ -19,13 +19,9 @@ use { timings::ExecuteTimings, }, solana_rbpf::{ - aligned_memory::AlignedMemory, - ebpf::{self, HOST_ALIGN}, error::EbpfError, memory_region::{AccessType, MemoryMapping}, - question_mark, - verifier::RequisiteVerifier, - vm::{EbpfVm, SyscallObject, SyscallRegistry}, + vm::{ProgramResult, SyscallRegistry}, }, solana_sdk::{ account::{ReadableAccount, WritableAccount}, @@ -33,11 +29,11 @@ use { blake3, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, entrypoint::{BPF_ALIGN_OF_U128, MAX_PERMITTED_DATA_INCREASE, SUCCESS}, feature_set::{ - self, blake3_syscall_enabled, check_physical_overlapping, check_slice_translation_size, - curve25519_syscall_enabled, disable_cpi_setting_executable_and_rent_epoch, - disable_fees_sysvar, enable_early_verification_of_account_modifications, - libsecp256k1_0_5_upgrade_enabled, limit_secp256k1_recovery_id, - stop_sibling_instruction_search_at_parent, syscall_saturated_math, + self, blake3_syscall_enabled, check_physical_overlapping, curve25519_syscall_enabled, + disable_cpi_setting_executable_and_rent_epoch, disable_fees_sysvar, + enable_early_verification_of_account_modifications, libsecp256k1_0_5_upgrade_enabled, + limit_secp256k1_recovery_id, stop_sibling_instruction_search_at_parent, + syscall_saturated_math, }, hash::{Hasher, HASH_BYTES}, instruction::{ @@ -57,7 +53,7 @@ use { }, std::{ alloc::Layout, - cell::{Ref, RefCell, RefMut}, + cell::RefCell, mem::{align_of, size_of}, rc::Rc, slice::from_raw_parts_mut, @@ -125,17 +121,17 @@ pub enum SyscallError { #[error("InvalidAttribute")] InvalidAttribute, } -impl From for EbpfError { +impl From for EbpfError { fn from(error: SyscallError) -> Self { - EbpfError::UserError(error.into()) + EbpfError::UserError(Box::::new(error.into())) } } trait SyscallConsume { - fn consume(&mut self, amount: u64) -> Result<(), EbpfError>; + fn consume(&mut self, amount: u64) -> Result<(), EbpfError>; } impl SyscallConsume for Rc> { - fn consume(&mut self, amount: u64) -> Result<(), EbpfError> { + fn consume(&mut self, amount: u64) -> Result<(), EbpfError> { self.try_borrow_mut() .map_err(|_| SyscallError::InvokeContextBorrowFailed)? .consume(amount) @@ -145,9 +141,9 @@ impl SyscallConsume for Rc> { } macro_rules! register_feature_gated_syscall { - ($syscall_registry:expr, $is_feature_active:expr, $name:expr, $init:expr, $call:expr $(,)?) => { + ($syscall_registry:expr, $is_feature_active:expr, $name:expr, $call:expr $(,)?) => { if $is_feature_active { - $syscall_registry.register_syscall_by_name($name, $init, $call) + $syscall_registry.register_syscall_by_name($name, $call) } else { Ok(()) } @@ -157,7 +153,7 @@ macro_rules! register_feature_gated_syscall { pub fn register_syscalls( invoke_context: &mut InvokeContext, disable_deploy_of_alloc_free_syscall: bool, -) -> Result> { +) -> Result { let blake3_syscall_enabled = invoke_context .feature_set .is_active(&blake3_syscall_enabled::id()); @@ -172,72 +168,43 @@ pub fn register_syscalls( let mut syscall_registry = SyscallRegistry::default(); // Abort - syscall_registry.register_syscall_by_name(b"abort", SyscallAbort::init, SyscallAbort::call)?; + syscall_registry.register_syscall_by_name(b"abort", SyscallAbort::call)?; // Panic - syscall_registry.register_syscall_by_name( - b"sol_panic_", - SyscallPanic::init, - SyscallPanic::call, - )?; + syscall_registry.register_syscall_by_name(b"sol_panic_", SyscallPanic::call)?; // Logging - syscall_registry.register_syscall_by_name(b"sol_log_", SyscallLog::init, SyscallLog::call)?; - syscall_registry.register_syscall_by_name( - b"sol_log_64_", - SyscallLogU64::init, - SyscallLogU64::call, - )?; - syscall_registry.register_syscall_by_name( - b"sol_log_compute_units_", - SyscallLogBpfComputeUnits::init, - SyscallLogBpfComputeUnits::call, - )?; - syscall_registry.register_syscall_by_name( - b"sol_log_pubkey", - SyscallLogPubkey::init, - SyscallLogPubkey::call, - )?; + syscall_registry.register_syscall_by_name(b"sol_log_", SyscallLog::call)?; + syscall_registry.register_syscall_by_name(b"sol_log_64_", SyscallLogU64::call)?; + syscall_registry + .register_syscall_by_name(b"sol_log_compute_units_", SyscallLogBpfComputeUnits::call)?; + syscall_registry.register_syscall_by_name(b"sol_log_pubkey", SyscallLogPubkey::call)?; // Program defined addresses (PDA) syscall_registry.register_syscall_by_name( b"sol_create_program_address", - SyscallCreateProgramAddress::init, SyscallCreateProgramAddress::call, )?; syscall_registry.register_syscall_by_name( b"sol_try_find_program_address", - SyscallTryFindProgramAddress::init, SyscallTryFindProgramAddress::call, )?; // Sha256 - syscall_registry.register_syscall_by_name( - b"sol_sha256", - SyscallSha256::init, - SyscallSha256::call, - )?; + syscall_registry.register_syscall_by_name(b"sol_sha256", SyscallSha256::call)?; // Keccak256 - syscall_registry.register_syscall_by_name( - b"sol_keccak256", - SyscallKeccak256::init, - SyscallKeccak256::call, - )?; + syscall_registry.register_syscall_by_name(b"sol_keccak256", SyscallKeccak256::call)?; // Secp256k1 Recover - syscall_registry.register_syscall_by_name( - b"sol_secp256k1_recover", - SyscallSecp256k1Recover::init, - SyscallSecp256k1Recover::call, - )?; + syscall_registry + .register_syscall_by_name(b"sol_secp256k1_recover", SyscallSecp256k1Recover::call)?; // Blake3 register_feature_gated_syscall!( syscall_registry, blake3_syscall_enabled, b"sol_blake3", - SyscallBlake3::init, SyscallBlake3::call, )?; @@ -248,171 +215,88 @@ pub fn register_syscalls( syscall_registry, curve25519_syscall_enabled, b"sol_curve_validate_point", - SyscallCurvePointValidation::init, SyscallCurvePointValidation::call, )?; register_feature_gated_syscall!( syscall_registry, curve25519_syscall_enabled, b"sol_curve_group_op", - SyscallCurveGroupOps::init, SyscallCurveGroupOps::call, )?; // Sysvars - syscall_registry.register_syscall_by_name( - b"sol_get_clock_sysvar", - SyscallGetClockSysvar::init, - SyscallGetClockSysvar::call, - )?; + syscall_registry + .register_syscall_by_name(b"sol_get_clock_sysvar", SyscallGetClockSysvar::call)?; syscall_registry.register_syscall_by_name( b"sol_get_epoch_schedule_sysvar", - SyscallGetEpochScheduleSysvar::init, SyscallGetEpochScheduleSysvar::call, )?; register_feature_gated_syscall!( syscall_registry, !disable_fees_sysvar, b"sol_get_fees_sysvar", - SyscallGetFeesSysvar::init, SyscallGetFeesSysvar::call, )?; - syscall_registry.register_syscall_by_name( - b"sol_get_rent_sysvar", - SyscallGetRentSysvar::init, - SyscallGetRentSysvar::call, - )?; + syscall_registry + .register_syscall_by_name(b"sol_get_rent_sysvar", SyscallGetRentSysvar::call)?; // Memory ops - syscall_registry.register_syscall_by_name( - b"sol_memcpy_", - SyscallMemcpy::init, - SyscallMemcpy::call, - )?; - syscall_registry.register_syscall_by_name( - b"sol_memmove_", - SyscallMemmove::init, - SyscallMemmove::call, - )?; - syscall_registry.register_syscall_by_name( - b"sol_memcmp_", - SyscallMemcmp::init, - SyscallMemcmp::call, - )?; - syscall_registry.register_syscall_by_name( - b"sol_memset_", - SyscallMemset::init, - SyscallMemset::call, - )?; + syscall_registry.register_syscall_by_name(b"sol_memcpy_", SyscallMemcpy::call)?; + syscall_registry.register_syscall_by_name(b"sol_memmove_", SyscallMemmove::call)?; + syscall_registry.register_syscall_by_name(b"sol_memcmp_", SyscallMemcmp::call)?; + syscall_registry.register_syscall_by_name(b"sol_memset_", SyscallMemset::call)?; if is_abi_v2 { // Set account attributes syscall_registry.register_syscall_by_name( b"sol_set_account_attributes", - SyscallSetAccountProperties::init, SyscallSetAccountProperties::call, )?; } else { // Processed sibling instructions syscall_registry.register_syscall_by_name( b"sol_get_processed_sibling_instruction", - SyscallGetProcessedSiblingInstruction::init, SyscallGetProcessedSiblingInstruction::call, )?; // Stack height - syscall_registry.register_syscall_by_name( - b"sol_get_stack_height", - SyscallGetStackHeight::init, - SyscallGetStackHeight::call, - )?; + syscall_registry + .register_syscall_by_name(b"sol_get_stack_height", SyscallGetStackHeight::call)?; // Return data - syscall_registry.register_syscall_by_name( - b"sol_set_return_data", - SyscallSetReturnData::init, - SyscallSetReturnData::call, - )?; - syscall_registry.register_syscall_by_name( - b"sol_get_return_data", - SyscallGetReturnData::init, - SyscallGetReturnData::call, - )?; + syscall_registry + .register_syscall_by_name(b"sol_set_return_data", SyscallSetReturnData::call)?; + syscall_registry + .register_syscall_by_name(b"sol_get_return_data", SyscallGetReturnData::call)?; // Cross-program invocation - syscall_registry.register_syscall_by_name( - b"sol_invoke_signed_c", - SyscallInvokeSignedC::init, - SyscallInvokeSignedC::call, - )?; - syscall_registry.register_syscall_by_name( - b"sol_invoke_signed_rust", - SyscallInvokeSignedRust::init, - SyscallInvokeSignedRust::call, - )?; + syscall_registry + .register_syscall_by_name(b"sol_invoke_signed_c", SyscallInvokeSignedC::call)?; + syscall_registry + .register_syscall_by_name(b"sol_invoke_signed_rust", SyscallInvokeSignedRust::call)?; // Memory allocator register_feature_gated_syscall!( syscall_registry, !disable_deploy_of_alloc_free_syscall, b"sol_alloc_free_", - SyscallAllocFree::init, SyscallAllocFree::call, )?; } // Log data - syscall_registry.register_syscall_by_name( - b"sol_log_data", - SyscallLogData::init, - SyscallLogData::call, - )?; + syscall_registry.register_syscall_by_name(b"sol_log_data", SyscallLogData::call)?; Ok(syscall_registry) } -pub fn bind_syscall_context_objects<'a, 'b>( - vm: &mut EbpfVm<'a, RequisiteVerifier, BpfError, crate::ThisInstructionMeter>, - invoke_context: &'a mut InvokeContext<'b>, - heap: AlignedMemory, - orig_account_lengths: Vec, -) -> Result<(), EbpfError> { - let check_aligned = bpf_loader_deprecated::id() - != invoke_context - .transaction_context - .get_current_instruction_context() - .and_then(|instruction_context| { - instruction_context - .try_borrow_last_program_account(invoke_context.transaction_context) - }) - .map(|program_account| *program_account.get_owner()) - .map_err(SyscallError::InstructionError)?; - let check_size = invoke_context - .feature_set - .is_active(&check_slice_translation_size::id()); - - invoke_context - .set_syscall_context( - check_aligned, - check_size, - orig_account_lengths, - Rc::new(RefCell::new(BpfAllocator::new(heap, ebpf::MM_HEAP_START))), - ) - .map_err(SyscallError::InstructionError)?; - - let invoke_context = Rc::new(RefCell::new(invoke_context)); - vm.bind_syscall_context_objects(invoke_context)?; - - Ok(()) -} - fn translate( memory_mapping: &MemoryMapping, access_type: AccessType, vm_addr: u64, len: u64, -) -> Result> { - memory_mapping.map::(access_type, vm_addr, len) +) -> Result { + memory_mapping.map(access_type, vm_addr, len).into() } fn translate_type_inner<'a, T>( @@ -420,7 +304,7 @@ fn translate_type_inner<'a, T>( access_type: AccessType, vm_addr: u64, check_aligned: bool, -) -> Result<&'a mut T, EbpfError> { +) -> Result<&'a mut T, EbpfError> { let host_addr = translate(memory_mapping, access_type, vm_addr, size_of::() as u64)?; if check_aligned && (host_addr as *mut T as usize).wrapping_rem(align_of::()) != 0 { @@ -432,14 +316,14 @@ fn translate_type_mut<'a, T>( memory_mapping: &MemoryMapping, vm_addr: u64, check_aligned: bool, -) -> Result<&'a mut T, EbpfError> { +) -> Result<&'a mut T, EbpfError> { translate_type_inner::(memory_mapping, AccessType::Store, vm_addr, check_aligned) } fn translate_type<'a, T>( memory_mapping: &MemoryMapping, vm_addr: u64, check_aligned: bool, -) -> Result<&'a T, EbpfError> { +) -> Result<&'a T, EbpfError> { translate_type_inner::(memory_mapping, AccessType::Load, vm_addr, check_aligned) .map(|value| &*value) } @@ -451,7 +335,7 @@ fn translate_slice_inner<'a, T>( len: u64, check_aligned: bool, check_size: bool, -) -> Result<&'a mut [T], EbpfError> { +) -> Result<&'a mut [T], EbpfError> { if len == 0 { return Ok(&mut []); } @@ -474,7 +358,7 @@ fn translate_slice_mut<'a, T>( len: u64, check_aligned: bool, check_size: bool, -) -> Result<&'a mut [T], EbpfError> { +) -> Result<&'a mut [T], EbpfError> { translate_slice_inner::( memory_mapping, AccessType::Store, @@ -490,7 +374,7 @@ fn translate_slice<'a, T>( len: u64, check_aligned: bool, check_size: bool, -) -> Result<&'a [T], EbpfError> { +) -> Result<&'a [T], EbpfError> { translate_slice_inner::( memory_mapping, AccessType::Load, @@ -510,8 +394,8 @@ fn translate_string_and_do( len: u64, check_aligned: bool, check_size: bool, - work: &mut dyn FnMut(&str) -> Result>, -) -> Result> { + work: &mut dyn FnMut(&str) -> Result, +) -> Result { let buf = translate_slice::(memory_mapping, addr, len, check_aligned, check_size)?; let i = match buf.iter().position(|byte| *byte == 0) { Some(i) => i, @@ -524,25 +408,29 @@ fn translate_string_and_do( } } -type SyscallContext<'a, 'b> = Rc>>; - #[macro_export] macro_rules! declare_syscall { - ($(#[$attr:meta])* $name:ident, $call:item) => { + ($(#[$attr:meta])* $name:ident, $inner_call:item) => { $(#[$attr])* - pub struct $name<'a, 'b> { - pub(crate) invoke_context: SyscallContext<'a, 'b>, - } - impl<'a, 'b> $name<'a, 'b> { - pub fn init( - invoke_context: SyscallContext<'a, 'b>, - ) -> Box<(dyn SyscallObject + 'a)> { - Box::new(Self { invoke_context }) + pub struct $name {} + impl $name { + $inner_call + pub fn call( + invoke_context: &mut InvokeContext, + arg_a: u64, + arg_b: u64, + arg_c: u64, + arg_d: u64, + arg_e: u64, + memory_mapping: &mut MemoryMapping, + result: &mut ProgramResult, + ) { + let converted_result: ProgramResult = Self::inner_call( + invoke_context, arg_a, arg_b, arg_c, arg_d, arg_e, memory_mapping, + ).into(); + *result = converted_result; } } - impl<'a, 'b> SyscallObject for $name<'a, 'b> { - $call - } }; } @@ -552,23 +440,16 @@ declare_syscall!( /// `abort()` is not intended to be called explicitly by the program. /// Causes the BPF program to be halted immediately SyscallAbort, - fn call( - &mut self, + fn inner_call( + _invoke_context: &mut InvokeContext, _arg1: u64, _arg2: u64, _arg3: u64, _arg4: u64, _arg5: u64, _memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let _ = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); - *result = Err(SyscallError::Abort.into()); + ) -> Result { + Err(SyscallError::Abort.into()) } ); @@ -576,32 +457,25 @@ declare_syscall!( /// Panic syscall function, called when the BPF program calls 'sol_panic_()` /// Causes the BPF program to be halted immediately SyscallPanic, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, file: u64, len: u64, line: u64, column: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); - question_mark!(invoke_context.get_compute_meter().consume(len), result); + ) -> Result { + invoke_context.get_compute_meter().consume(len)?; - *result = translate_string_and_do( + translate_string_and_do( memory_mapping, file, len, invoke_context.get_check_aligned(), invoke_context.get_check_size(), &mut |string: &str| Err(SyscallError::Panic(string.to_string(), line, column).into()), - ); + ) } ); @@ -613,34 +487,21 @@ declare_syscall!( /// information about that memory (start address and size) is passed /// to the VM to use for enforcement. SyscallAllocFree, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, size: u64, free_addr: u64, _arg3: u64, _arg4: u64, _arg5: u64, _memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); - let allocator = question_mark!( - invoke_context - .get_allocator() - .map_err(SyscallError::InstructionError), - result - ); - let mut allocator = question_mark!( - allocator - .try_borrow_mut() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { + let allocator = invoke_context + .get_allocator() + .map_err(SyscallError::InstructionError)?; + let mut allocator = allocator + .try_borrow_mut() + .map_err(|_| SyscallError::InvokeContextBorrowFailed)?; let align = if invoke_context.get_check_aligned() { BPF_ALIGN_OF_U128 @@ -650,11 +511,10 @@ declare_syscall!( let layout = match Layout::from_size_align(size as usize, align) { Ok(layout) => layout, Err(_) => { - *result = Ok(0); - return; + return Ok(0); } }; - *result = if free_addr == 0 { + if free_addr == 0 { match allocator.alloc(layout) { Ok(addr) => Ok(addr as u64), Err(_) => Ok(0), @@ -662,7 +522,7 @@ declare_syscall!( } else { allocator.dealloc(free_addr, layout); Ok(0) - }; + } } ); @@ -673,7 +533,7 @@ fn translate_and_check_program_address_inputs<'a>( memory_mapping: &mut MemoryMapping, check_aligned: bool, check_size: bool, -) -> Result<(Vec<&'a [u8]>, &'a Pubkey), EbpfError> { +) -> Result<(Vec<&'a [u8]>, &'a Pubkey), EbpfError> { let untranslated_seeds = translate_slice::<&[&u8]>( memory_mapping, seeds_addr, @@ -698,7 +558,7 @@ fn translate_and_check_program_address_inputs<'a>( check_size, ) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; let program_id = translate_type::(memory_mapping, program_id_addr, check_aligned)?; Ok((seeds, program_id)) } @@ -706,96 +566,72 @@ fn translate_and_check_program_address_inputs<'a>( declare_syscall!( /// Create a program address SyscallCreateProgramAddress, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, seeds_addr: u64, seeds_len: u64, program_id_addr: u64, address_addr: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { let cost = invoke_context .get_compute_budget() .create_program_address_units; - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; - let (seeds, program_id) = question_mark!( - translate_and_check_program_address_inputs( - seeds_addr, - seeds_len, - program_id_addr, - memory_mapping, - invoke_context.get_check_aligned(), - invoke_context.get_check_size() - ), - result - ); + let (seeds, program_id) = translate_and_check_program_address_inputs( + seeds_addr, + seeds_len, + program_id_addr, + memory_mapping, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; let new_address = match Pubkey::create_program_address(&seeds, program_id) { Ok(address) => address, Err(_) => { - *result = Ok(1); - return; + return Ok(1); } }; - let address = question_mark!( - translate_slice_mut::( - memory_mapping, - address_addr, - 32, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ); + let address = translate_slice_mut::( + memory_mapping, + address_addr, + 32, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; address.copy_from_slice(new_address.as_ref()); - *result = Ok(0); + Ok(0) } ); declare_syscall!( /// Create a program address SyscallTryFindProgramAddress, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, seeds_addr: u64, seeds_len: u64, program_id_addr: u64, address_addr: u64, bump_seed_addr: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { let cost = invoke_context .get_compute_budget() .create_program_address_units; - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; - let (seeds, program_id) = question_mark!( - translate_and_check_program_address_inputs( - seeds_addr, - seeds_len, - program_id_addr, - memory_mapping, - invoke_context.get_check_aligned(), - invoke_context.get_check_size() - ), - result - ); + let (seeds, program_id) = translate_and_check_program_address_inputs( + seeds_addr, + seeds_len, + program_id_addr, + memory_mapping, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; let mut bump_seed = [std::u8::MAX]; for _ in 0..std::u8::MAX { @@ -806,56 +642,42 @@ declare_syscall!( if let Ok(new_address) = Pubkey::create_program_address(&seeds_with_bump, program_id) { - let bump_seed_ref = question_mark!( - translate_type_mut::( - memory_mapping, - bump_seed_addr, - invoke_context.get_check_aligned() - ), - result - ); - let address = question_mark!( - translate_slice_mut::( - memory_mapping, - address_addr, - 32, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ); + let bump_seed_ref = translate_type_mut::( + memory_mapping, + bump_seed_addr, + invoke_context.get_check_aligned(), + )?; + let address = translate_slice_mut::( + memory_mapping, + address_addr, + 32, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; *bump_seed_ref = bump_seed[0]; address.copy_from_slice(new_address.as_ref()); - *result = Ok(0); - return; + return Ok(0); } } bump_seed[0] = bump_seed[0].saturating_sub(1); - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; } - *result = Ok(1); + Ok(1) } ); declare_syscall!( /// SHA256 SyscallSha256, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, vals_addr: u64, vals_len: u64, result_addr: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { let compute_budget = invoke_context.get_compute_budget(); if compute_budget.sha256_max_slices < vals_len { ic_msg!( @@ -864,82 +686,63 @@ declare_syscall!( vals_len, compute_budget.sha256_max_slices, ); - *result = Err(SyscallError::TooManySlices.into()); - return; + return Err(SyscallError::TooManySlices.into()); } - question_mark!( - invoke_context - .get_compute_meter() - .consume(compute_budget.sha256_base_cost), - result - ); - let hash_result = question_mark!( - translate_slice_mut::( + invoke_context + .get_compute_meter() + .consume(compute_budget.sha256_base_cost)?; + + let hash_result = translate_slice_mut::( + memory_mapping, + result_addr, + HASH_BYTES as u64, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; + let mut hasher = Hasher::default(); + if vals_len > 0 { + let vals = translate_slice::<&[u8]>( memory_mapping, - result_addr, - HASH_BYTES as u64, + vals_addr, + vals_len, invoke_context.get_check_aligned(), invoke_context.get_check_size(), - ), - result - ); - let mut hasher = Hasher::default(); - if vals_len > 0 { - let vals = question_mark!( - translate_slice::<&[u8]>( + )?; + for val in vals.iter() { + let bytes = translate_slice::( memory_mapping, - vals_addr, - vals_len, + val.as_ptr() as u64, + val.len() as u64, invoke_context.get_check_aligned(), invoke_context.get_check_size(), - ), - result - ); - for val in vals.iter() { - let bytes = question_mark!( - translate_slice::( - memory_mapping, - val.as_ptr() as u64, - val.len() as u64, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ); + )?; let cost = compute_budget.mem_op_base_cost.max( compute_budget .sha256_byte_cost .saturating_mul((val.len() as u64).saturating_div(2)), ); - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; hasher.hash(bytes); } } hash_result.copy_from_slice(&hasher.result().to_bytes()); - *result = Ok(0); + Ok(0) } ); declare_syscall!( // Keccak256 SyscallKeccak256, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, vals_addr: u64, vals_len: u64, result_addr: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { let compute_budget = invoke_context.get_compute_budget(); if compute_budget.sha256_max_slices < vals_len { ic_msg!( @@ -948,121 +751,92 @@ declare_syscall!( vals_len, compute_budget.sha256_max_slices, ); - *result = Err(SyscallError::TooManySlices.into()); - return; + return Err(SyscallError::TooManySlices.into()); } - question_mark!( - invoke_context - .get_compute_meter() - .consume(compute_budget.sha256_base_cost), - result - ); - let hash_result = question_mark!( - translate_slice_mut::( + invoke_context + .get_compute_meter() + .consume(compute_budget.sha256_base_cost)?; + + let hash_result = translate_slice_mut::( + memory_mapping, + result_addr, + keccak::HASH_BYTES as u64, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; + let mut hasher = keccak::Hasher::default(); + if vals_len > 0 { + let vals = translate_slice::<&[u8]>( memory_mapping, - result_addr, - keccak::HASH_BYTES as u64, + vals_addr, + vals_len, invoke_context.get_check_aligned(), invoke_context.get_check_size(), - ), - result - ); - let mut hasher = keccak::Hasher::default(); - if vals_len > 0 { - let vals = question_mark!( - translate_slice::<&[u8]>( + )?; + for val in vals.iter() { + let bytes = translate_slice::( memory_mapping, - vals_addr, - vals_len, + val.as_ptr() as u64, + val.len() as u64, invoke_context.get_check_aligned(), invoke_context.get_check_size(), - ), - result - ); - for val in vals.iter() { - let bytes = question_mark!( - translate_slice::( - memory_mapping, - val.as_ptr() as u64, - val.len() as u64, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ); + )?; let cost = compute_budget.mem_op_base_cost.max( compute_budget .sha256_byte_cost .saturating_mul((val.len() as u64).saturating_div(2)), ); - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; hasher.hash(bytes); } } hash_result.copy_from_slice(&hasher.result().to_bytes()); - *result = Ok(0); + Ok(0) } ); declare_syscall!( /// secp256k1_recover SyscallSecp256k1Recover, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, hash_addr: u64, recovery_id_val: u64, signature_addr: u64, result_addr: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { let cost = invoke_context.get_compute_budget().secp256k1_recover_cost; - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; - let hash = question_mark!( - translate_slice::( - memory_mapping, - hash_addr, - keccak::HASH_BYTES as u64, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ); - let signature = question_mark!( - translate_slice::( - memory_mapping, - signature_addr, - SECP256K1_SIGNATURE_LENGTH as u64, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ); - let secp256k1_recover_result = question_mark!( - translate_slice_mut::( - memory_mapping, - result_addr, - SECP256K1_PUBLIC_KEY_LENGTH as u64, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ); + let hash = translate_slice::( + memory_mapping, + hash_addr, + keccak::HASH_BYTES as u64, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; + let signature = translate_slice::( + memory_mapping, + signature_addr, + SECP256K1_SIGNATURE_LENGTH as u64, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; + let secp256k1_recover_result = translate_slice_mut::( + memory_mapping, + result_addr, + SECP256K1_PUBLIC_KEY_LENGTH as u64, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; let message = match libsecp256k1::Message::parse_slice(hash) { Ok(msg) => msg, Err(_) => { - *result = Ok(Secp256k1RecoverError::InvalidHash.into()); - return; + return Ok(Secp256k1RecoverError::InvalidHash.into()); } }; let adjusted_recover_id_val = if invoke_context @@ -1072,8 +846,7 @@ declare_syscall!( match recovery_id_val.try_into() { Ok(adjusted_recover_id_val) => adjusted_recover_id_val, Err(_) => { - *result = Ok(Secp256k1RecoverError::InvalidRecoveryId.into()); - return; + return Ok(Secp256k1RecoverError::InvalidRecoveryId.into()); } } } else { @@ -1082,9 +855,7 @@ declare_syscall!( let recovery_id = match libsecp256k1::RecoveryId::parse(adjusted_recover_id_val) { Ok(id) => id, Err(_) => { - *result = Ok(Secp256k1RecoverError::InvalidRecoveryId.into()); - - return; + return Ok(Secp256k1RecoverError::InvalidRecoveryId.into()); } }; let sig_parse_result = if invoke_context @@ -1099,21 +870,19 @@ declare_syscall!( let signature = match sig_parse_result { Ok(sig) => sig, Err(_) => { - *result = Ok(Secp256k1RecoverError::InvalidSignature.into()); - return; + return Ok(Secp256k1RecoverError::InvalidSignature.into()); } }; let public_key = match libsecp256k1::recover(&message, &signature, &recovery_id) { Ok(key) => key.serialize(), Err(_) => { - *result = Ok(Secp256k1RecoverError::InvalidSignature.into()); - return; + return Ok(Secp256k1RecoverError::InvalidSignature.into()); } }; secp256k1_recover_result.copy_from_slice(&public_key[1..65]); - *result = Ok(SUCCESS); + Ok(SUCCESS) } ); @@ -1122,72 +891,55 @@ declare_syscall!( // // Currently, only curve25519 Edwards and Ristretto representations are supported SyscallCurvePointValidation, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, curve_id: u64, point_addr: u64, _arg3: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { + ) -> Result { use solana_zk_token_sdk::curve25519::{curve_syscall_traits::*, edwards, ristretto}; - - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); - match curve_id { CURVE25519_EDWARDS => { let cost = invoke_context .get_compute_budget() .curve25519_edwards_validate_point_cost; - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; - let point = question_mark!( - translate_type::( - memory_mapping, - point_addr, - invoke_context.get_check_aligned() - ), - result - ); + let point = translate_type::( + memory_mapping, + point_addr, + invoke_context.get_check_aligned(), + )?; if edwards::validate_edwards(point) { - *result = Ok(0); + Ok(0) } else { - *result = Ok(1); + Ok(1) } } CURVE25519_RISTRETTO => { let cost = invoke_context .get_compute_budget() .curve25519_ristretto_validate_point_cost; - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; - let point = question_mark!( - translate_type::( - memory_mapping, - point_addr, - invoke_context.get_check_aligned() - ), - result - ); + let point = translate_type::( + memory_mapping, + point_addr, + invoke_context.get_check_aligned(), + )?; if ristretto::validate_ristretto(point) { - *result = Ok(0); + Ok(0) } else { - *result = Ok(1); + Ok(1) } } - _ => { - *result = Ok(1); - } - }; + _ => Ok(1), + } } ); @@ -1196,140 +948,105 @@ declare_syscall!( // // Currently, only curve25519 Edwards and Ristretto representations are supported SyscallCurveGroupOps, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, curve_id: u64, group_op: u64, left_input_addr: u64, right_input_addr: u64, result_point_addr: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { + ) -> Result { use solana_zk_token_sdk::curve25519::{ curve_syscall_traits::*, edwards, ristretto, scalar, }; - - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); - match curve_id { CURVE25519_EDWARDS => match group_op { ADD => { let cost = invoke_context .get_compute_budget() .curve25519_edwards_add_cost; - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; - let left_point = question_mark!( - translate_type::( - memory_mapping, - left_input_addr, - invoke_context.get_check_aligned(), - ), - result - ); - let right_point = question_mark!( - translate_type::( - memory_mapping, - right_input_addr, - invoke_context.get_check_aligned(), - ), - result - ); + let left_point = translate_type::( + memory_mapping, + left_input_addr, + invoke_context.get_check_aligned(), + )?; + let right_point = translate_type::( + memory_mapping, + right_input_addr, + invoke_context.get_check_aligned(), + )?; if let Some(result_point) = edwards::add_edwards(left_point, right_point) { - *question_mark!( - translate_type_mut::( - memory_mapping, - result_point_addr, - invoke_context.get_check_aligned(), - ), - result - ) = result_point; + *translate_type_mut::( + memory_mapping, + result_point_addr, + invoke_context.get_check_aligned(), + )? = result_point; + Ok(0) } else { - *result = Ok(1); + Ok(1) } } SUB => { let cost = invoke_context .get_compute_budget() .curve25519_edwards_subtract_cost; - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; - let left_point = question_mark!( - translate_type::( - memory_mapping, - left_input_addr, - invoke_context.get_check_aligned(), - ), - result - ); - let right_point = question_mark!( - translate_type::( - memory_mapping, - right_input_addr, - invoke_context.get_check_aligned(), - ), - result - ); + let left_point = translate_type::( + memory_mapping, + left_input_addr, + invoke_context.get_check_aligned(), + )?; + let right_point = translate_type::( + memory_mapping, + right_input_addr, + invoke_context.get_check_aligned(), + )?; if let Some(result_point) = edwards::subtract_edwards(left_point, right_point) { - *question_mark!( - translate_type_mut::( - memory_mapping, - result_point_addr, - invoke_context.get_check_aligned(), - ), - result - ) = result_point; + *translate_type_mut::( + memory_mapping, + result_point_addr, + invoke_context.get_check_aligned(), + )? = result_point; + Ok(0) } else { - *result = Ok(1); + Ok(1) } } MUL => { let cost = invoke_context .get_compute_budget() .curve25519_edwards_multiply_cost; - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; - let scalar = question_mark!( - translate_type::( - memory_mapping, - left_input_addr, - invoke_context.get_check_aligned(), - ), - result - ); - let input_point = question_mark!( - translate_type::( - memory_mapping, - right_input_addr, - invoke_context.get_check_aligned(), - ), - result - ); + let scalar = translate_type::( + memory_mapping, + left_input_addr, + invoke_context.get_check_aligned(), + )?; + let input_point = translate_type::( + memory_mapping, + right_input_addr, + invoke_context.get_check_aligned(), + )?; if let Some(result_point) = edwards::multiply_edwards(scalar, input_point) { - *question_mark!( - translate_type_mut::( - memory_mapping, - result_point_addr, - invoke_context.get_check_aligned(), - ), - result - ) = result_point; + *translate_type_mut::( + memory_mapping, + result_point_addr, + invoke_context.get_check_aligned(), + )? = result_point; + Ok(0) } else { - *result = Ok(1); + Ok(1) } } - _ => { - *result = Ok(1); - } + _ => Ok(1), }, CURVE25519_RISTRETTO => match group_op { @@ -1337,120 +1054,92 @@ declare_syscall!( let cost = invoke_context .get_compute_budget() .curve25519_ristretto_add_cost; - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; - let left_point = question_mark!( - translate_type::( - memory_mapping, - left_input_addr, - invoke_context.get_check_aligned(), - ), - result - ); - let right_point = question_mark!( - translate_type::( - memory_mapping, - right_input_addr, - invoke_context.get_check_aligned(), - ), - result - ); + let left_point = translate_type::( + memory_mapping, + left_input_addr, + invoke_context.get_check_aligned(), + )?; + let right_point = translate_type::( + memory_mapping, + right_input_addr, + invoke_context.get_check_aligned(), + )?; if let Some(result_point) = ristretto::add_ristretto(left_point, right_point) { - *question_mark!( - translate_type_mut::( - memory_mapping, - result_point_addr, - invoke_context.get_check_aligned(), - ), - result - ) = result_point; + *translate_type_mut::( + memory_mapping, + result_point_addr, + invoke_context.get_check_aligned(), + )? = result_point; + Ok(0) } else { - *result = Ok(1); + Ok(1) } } SUB => { let cost = invoke_context .get_compute_budget() .curve25519_ristretto_subtract_cost; - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; - let left_point = question_mark!( - translate_type::( - memory_mapping, - left_input_addr, - invoke_context.get_check_aligned(), - ), - result - ); - let right_point = question_mark!( - translate_type::( - memory_mapping, - right_input_addr, - invoke_context.get_check_aligned(), - ), - result - ); + let left_point = translate_type::( + memory_mapping, + left_input_addr, + invoke_context.get_check_aligned(), + )?; + let right_point = translate_type::( + memory_mapping, + right_input_addr, + invoke_context.get_check_aligned(), + )?; if let Some(result_point) = ristretto::subtract_ristretto(left_point, right_point) { - *question_mark!( - translate_type_mut::( - memory_mapping, - result_point_addr, - invoke_context.get_check_aligned(), - ), - result - ) = result_point; + *translate_type_mut::( + memory_mapping, + result_point_addr, + invoke_context.get_check_aligned(), + )? = result_point; + Ok(0) } else { - *result = Ok(1); + Ok(1) } } MUL => { let cost = invoke_context .get_compute_budget() .curve25519_ristretto_multiply_cost; - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; - let scalar = question_mark!( - translate_type::( - memory_mapping, - left_input_addr, - invoke_context.get_check_aligned(), - ), - result - ); - let input_point = question_mark!( - translate_type::( - memory_mapping, - right_input_addr, - invoke_context.get_check_aligned(), - ), - result - ); + let scalar = translate_type::( + memory_mapping, + left_input_addr, + invoke_context.get_check_aligned(), + )?; + let input_point = translate_type::( + memory_mapping, + right_input_addr, + invoke_context.get_check_aligned(), + )?; if let Some(result_point) = ristretto::multiply_ristretto(scalar, input_point) { - *question_mark!( - translate_type_mut::( - memory_mapping, - result_point_addr, - invoke_context.get_check_aligned(), - ), - result - ) = result_point; + *translate_type_mut::( + memory_mapping, + result_point_addr, + invoke_context.get_check_aligned(), + )? = result_point; + Ok(0) } else { - *result = Ok(1); + Ok(1) } } - _ => { - *result = Ok(1); - } + _ => Ok(1), }, - _ => { - *result = Ok(1); - } + _ => Ok(1), } } ); @@ -1458,22 +1147,15 @@ declare_syscall!( declare_syscall!( // Blake3 SyscallBlake3, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, vals_addr: u64, vals_len: u64, result_addr: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { let compute_budget = invoke_context.get_compute_budget(); if compute_budget.sha256_max_slices < vals_len { ic_msg!( @@ -1482,82 +1164,63 @@ declare_syscall!( vals_len, compute_budget.sha256_max_slices, ); - *result = Err(SyscallError::TooManySlices.into()); - return; + return Err(SyscallError::TooManySlices.into()); } - question_mark!( - invoke_context - .get_compute_meter() - .consume(compute_budget.sha256_base_cost), - result - ); - let hash_result = question_mark!( - translate_slice_mut::( + invoke_context + .get_compute_meter() + .consume(compute_budget.sha256_base_cost)?; + + let hash_result = translate_slice_mut::( + memory_mapping, + result_addr, + blake3::HASH_BYTES as u64, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; + let mut hasher = blake3::Hasher::default(); + if vals_len > 0 { + let vals = translate_slice::<&[u8]>( memory_mapping, - result_addr, - blake3::HASH_BYTES as u64, + vals_addr, + vals_len, invoke_context.get_check_aligned(), invoke_context.get_check_size(), - ), - result - ); - let mut hasher = blake3::Hasher::default(); - if vals_len > 0 { - let vals = question_mark!( - translate_slice::<&[u8]>( + )?; + for val in vals.iter() { + let bytes = translate_slice::( memory_mapping, - vals_addr, - vals_len, + val.as_ptr() as u64, + val.len() as u64, invoke_context.get_check_aligned(), invoke_context.get_check_size(), - ), - result - ); - for val in vals.iter() { - let bytes = question_mark!( - translate_slice::( - memory_mapping, - val.as_ptr() as u64, - val.len() as u64, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ); + )?; let cost = compute_budget.mem_op_base_cost.max( compute_budget .sha256_byte_cost .saturating_mul((val.len() as u64).saturating_div(2)), ); - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; hasher.hash(bytes); } } hash_result.copy_from_slice(&hasher.result().to_bytes()); - *result = Ok(0); + Ok(0) } ); declare_syscall!( /// Set return data SyscallSetReturnData, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, addr: u64, len: u64, _arg3: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let mut invoke_context = question_mark!( - self.invoke_context - .try_borrow_mut() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { let budget = invoke_context.get_compute_budget(); let cost = if invoke_context @@ -1572,75 +1235,57 @@ declare_syscall!( len / budget.cpi_bytes_per_unit + budget.syscall_base_cost } }; - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; if len > MAX_RETURN_DATA as u64 { - *result = Err(SyscallError::ReturnDataTooLarge(len, MAX_RETURN_DATA as u64).into()); - return; + return Err(SyscallError::ReturnDataTooLarge(len, MAX_RETURN_DATA as u64).into()); } let return_data = if len == 0 { Vec::new() } else { - question_mark!( - translate_slice::( - memory_mapping, - addr, - len, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ) + translate_slice::( + memory_mapping, + addr, + len, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )? .to_vec() }; let transaction_context = &mut invoke_context.transaction_context; - let program_id = *question_mark!( - transaction_context - .get_current_instruction_context() - .and_then(|instruction_context| instruction_context - .get_last_program_key(transaction_context)) - .map_err(SyscallError::InstructionError), - result - ); - question_mark!( - transaction_context - .set_return_data(program_id, return_data) - .map_err(SyscallError::InstructionError), - result - ); + let program_id = *transaction_context + .get_current_instruction_context() + .and_then(|instruction_context| { + instruction_context.get_last_program_key(transaction_context) + }) + .map_err(SyscallError::InstructionError)?; + + transaction_context + .set_return_data(program_id, return_data) + .map_err(SyscallError::InstructionError)?; - *result = Ok(0); + Ok(0) } ); declare_syscall!( /// Get return data SyscallGetReturnData, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, return_data_addr: u64, mut length: u64, program_id_addr: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { let budget = invoke_context.get_compute_budget(); - question_mark!( - invoke_context - .get_compute_meter() - .consume(budget.syscall_base_cost), - result - ); + invoke_context + .get_compute_meter() + .consume(budget.syscall_base_cost)?; let (program_id, return_data) = invoke_context.transaction_context.get_return_data(); length = length.min(return_data.len() as u64); @@ -1658,75 +1303,56 @@ declare_syscall!( (length + size_of::() as u64) / budget.cpi_bytes_per_unit } }; - question_mark!(invoke_context.get_compute_meter().consume(cost), result); + invoke_context.get_compute_meter().consume(cost)?; - let return_data_result = question_mark!( - translate_slice_mut::( - memory_mapping, - return_data_addr, - length, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ); + let return_data_result = translate_slice_mut::( + memory_mapping, + return_data_addr, + length, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; let to_slice = return_data_result; - let from_slice = question_mark!( - return_data - .get(..length as usize) - .ok_or(SyscallError::InvokeContextBorrowFailed), - result - ); + let from_slice = return_data + .get(..length as usize) + .ok_or(SyscallError::InvokeContextBorrowFailed)?; if to_slice.len() != from_slice.len() { - *result = Err(SyscallError::InvalidLength.into()); - return; + return Err(SyscallError::InvalidLength.into()); } to_slice.copy_from_slice(from_slice); - let program_id_result = question_mark!( - translate_type_mut::( - memory_mapping, - program_id_addr, - invoke_context.get_check_aligned() - ), - result - ); + let program_id_result = translate_type_mut::( + memory_mapping, + program_id_addr, + invoke_context.get_check_aligned(), + )?; *program_id_result = *program_id; } // Return the actual length, rather the length returned - *result = Ok(return_data.len() as u64); + Ok(return_data.len() as u64) } ); declare_syscall!( /// Get a processed sigling instruction SyscallGetProcessedSiblingInstruction, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, index: u64, meta_addr: u64, program_id_addr: u64, data_addr: u64, accounts_addr: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { let budget = invoke_context.get_compute_budget(); - question_mark!( - invoke_context - .get_compute_meter() - .consume(budget.syscall_base_cost), - result - ); + + invoke_context + .get_compute_meter() + .consume(budget.syscall_base_cost)?; let stop_sibling_instruction_search_at_parent = invoke_context .feature_set .is_active(&stop_sibling_instruction_search_at_parent::id()); @@ -1740,13 +1366,10 @@ declare_syscall!( let mut reverse_index_at_stack_height = 0; let mut found_instruction_context = None; for index_in_trace in (0..instruction_trace_length).rev() { - let instruction_context = question_mark!( - invoke_context - .transaction_context - .get_instruction_context_at_index_in_trace(index_in_trace) - .map_err(SyscallError::InstructionError), - result - ); + let instruction_context = invoke_context + .transaction_context + .get_instruction_context_at_index_in_trace(index_in_trace) + .map_err(SyscallError::InstructionError)?; if (stop_sibling_instruction_search_at_parent || instruction_context.get_stack_height() == TRANSACTION_LEVEL_STACK_HEIGHT) && instruction_context.get_stack_height() < stack_height @@ -1766,219 +1389,159 @@ declare_syscall!( let ProcessedSiblingInstruction { data_len, accounts_len, - } = question_mark!( - translate_type_mut::( - memory_mapping, - meta_addr, - invoke_context.get_check_aligned(), - ), - result - ); + } = translate_type_mut::( + memory_mapping, + meta_addr, + invoke_context.get_check_aligned(), + )?; if *data_len == (instruction_context.get_instruction_data().len() as u64) && *accounts_len == (instruction_context.get_number_of_instruction_accounts() as u64) { - let program_id = question_mark!( - translate_type_mut::( - memory_mapping, - program_id_addr, - invoke_context.get_check_aligned() - ), - result - ); - let data = question_mark!( - translate_slice_mut::( - memory_mapping, - data_addr, - *data_len as u64, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ); - let accounts = question_mark!( - translate_slice_mut::( - memory_mapping, - accounts_addr, - *accounts_len as u64, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ); + let program_id = translate_type_mut::( + memory_mapping, + program_id_addr, + invoke_context.get_check_aligned(), + )?; + let data = translate_slice_mut::( + memory_mapping, + data_addr, + *data_len as u64, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; + let accounts = translate_slice_mut::( + memory_mapping, + accounts_addr, + *accounts_len as u64, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; - *program_id = *question_mark!( - instruction_context - .get_last_program_key(invoke_context.transaction_context) - .map_err(SyscallError::InstructionError), - result - ); + *program_id = *instruction_context + .get_last_program_key(invoke_context.transaction_context) + .map_err(SyscallError::InstructionError)?; data.clone_from_slice(instruction_context.get_instruction_data()); - let account_metas = question_mark!( - (0..instruction_context.get_number_of_instruction_accounts()) - .map(|instruction_account_index| Ok(AccountMeta { + let account_metas = (0..instruction_context.get_number_of_instruction_accounts()) + .map(|instruction_account_index| { + Ok(AccountMeta { pubkey: *invoke_context .transaction_context .get_key_of_account_at_index( instruction_context .get_index_of_instruction_account_in_transaction( - instruction_account_index - )? + instruction_account_index, + )?, )?, is_signer: instruction_context .is_instruction_account_signer(instruction_account_index)?, is_writable: instruction_context .is_instruction_account_writable(instruction_account_index)?, - })) - .collect::, InstructionError>>() - .map_err(SyscallError::InstructionError), - result - ); + }) + }) + .collect::, InstructionError>>() + .map_err(SyscallError::InstructionError)?; accounts.clone_from_slice(account_metas.as_slice()); } *data_len = instruction_context.get_instruction_data().len() as u64; *accounts_len = instruction_context.get_number_of_instruction_accounts() as u64; - *result = Ok(true as u64); - return; + return Ok(true as u64); } - *result = Ok(false as u64); + Ok(false as u64) } ); declare_syscall!( /// Get current call stack height SyscallGetStackHeight, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, _arg1: u64, _arg2: u64, _arg3: u64, _arg4: u64, _arg5: u64, _memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); - + ) -> Result { let budget = invoke_context.get_compute_budget(); - question_mark!( - invoke_context - .get_compute_meter() - .consume(budget.syscall_base_cost), - result - ); - *result = Ok(invoke_context.get_stack_height() as u64); + invoke_context + .get_compute_meter() + .consume(budget.syscall_base_cost)?; + + Ok(invoke_context.get_stack_height() as u64) } ); declare_syscall!( /// Update the properties of accounts SyscallSetAccountProperties, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, updates_addr: u64, updates_count: u64, _arg3: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let invoke_context = question_mark!( - self.invoke_context - .try_borrow() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { let budget = invoke_context.get_compute_budget(); - question_mark!( - invoke_context.get_compute_meter().consume( - budget.syscall_base_cost.saturating_add( - budget - .account_property_update_cost - .saturating_mul(updates_count) - ) + + invoke_context.get_compute_meter().consume( + budget.syscall_base_cost.saturating_add( + budget + .account_property_update_cost + .saturating_mul(updates_count), ), - result - ); + )?; let transaction_context = &invoke_context.transaction_context; - let instruction_context = question_mark!( - transaction_context - .get_current_instruction_context() - .map_err(SyscallError::InstructionError), - result - ); - let updates = question_mark!( - translate_slice_mut::( - memory_mapping, - updates_addr, - updates_count, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - ), - result - ); - *result = Ok(0); + let instruction_context = transaction_context + .get_current_instruction_context() + .map_err(SyscallError::InstructionError)?; + let updates = translate_slice_mut::( + memory_mapping, + updates_addr, + updates_count, + invoke_context.get_check_aligned(), + invoke_context.get_check_size(), + )?; for update in updates.iter() { - let mut borrowed_account = question_mark!( - instruction_context - .try_borrow_instruction_account( - transaction_context, - update.instruction_account_index, - ) - .map_err(SyscallError::InstructionError), - result - ); + let mut borrowed_account = instruction_context + .try_borrow_instruction_account( + transaction_context, + update.instruction_account_index, + ) + .map_err(SyscallError::InstructionError)?; let attribute = unsafe { std::mem::transmute::<_, TransactionContextAttribute>(update.attribute) }; match attribute { TransactionContextAttribute::TransactionAccountOwner => { - let owner_pubkey = question_mark!( - translate_type_mut::( - memory_mapping, - update.value, - invoke_context.get_check_aligned() - ), - result - ); - question_mark!( - borrowed_account - .set_owner(&owner_pubkey.to_bytes()) - .map_err(SyscallError::InstructionError), - result - ); - } - TransactionContextAttribute::TransactionAccountLamports => question_mark!( - borrowed_account - .set_lamports(update.value) - .map_err(SyscallError::InstructionError), - result - ), - TransactionContextAttribute::TransactionAccountData => question_mark!( - borrowed_account - .set_data_length(update.value as usize) - .map_err(SyscallError::InstructionError), - result - ), - TransactionContextAttribute::TransactionAccountIsExecutable => question_mark!( + let owner_pubkey = translate_type_mut::( + memory_mapping, + update.value, + invoke_context.get_check_aligned(), + )?; + borrowed_account - .set_executable(update.value != 0) - .map_err(SyscallError::InstructionError), - result - ), + .set_owner(&owner_pubkey.to_bytes()) + .map_err(SyscallError::InstructionError)?; + } + TransactionContextAttribute::TransactionAccountLamports => borrowed_account + .set_lamports(update.value) + .map_err(SyscallError::InstructionError)?, + TransactionContextAttribute::TransactionAccountData => borrowed_account + .set_data_length(update.value as usize) + .map_err(SyscallError::InstructionError)?, + TransactionContextAttribute::TransactionAccountIsExecutable => borrowed_account + .set_executable(update.value != 0) + .map_err(SyscallError::InstructionError)?, _ => { - *result = Err(SyscallError::InvalidAttribute.into()); - return; + return Err(SyscallError::InvalidAttribute.into()); } } } + Ok(0) } ); @@ -1988,9 +1551,13 @@ mod tests { use solana_sdk::sysvar::fees::Fees; use { super::*, + crate::BpfAllocator, solana_program_runtime::{invoke_context::InvokeContext, sysvar_cache::SysvarCache}, solana_rbpf::{ - ebpf::HOST_ALIGN, memory_region::MemoryRegion, user_error::UserError, vm::Config, + aligned_memory::AlignedMemory, + ebpf::{self, HOST_ALIGN}, + memory_region::MemoryRegion, + vm::{Config, SyscallFunction}, }, solana_sdk::{ account::AccountSharedData, @@ -2007,8 +1574,9 @@ mod tests { macro_rules! assert_access_violation { ($result:expr, $va:expr, $len:expr) => { match $result { - Err(EbpfError::AccessViolation(_, _, va, len, _)) if $va == va && $len == len => (), - Err(EbpfError::StackAccessViolation(_, _, va, len, _)) + ProgramResult::Err(EbpfError::AccessViolation(_, _, va, len, _)) + if $va == va && $len == len => {} + ProgramResult::Err(EbpfError::StackAccessViolation(_, _, va, len, _)) if $va == va && $len == len => {} _ => panic!(), } @@ -2053,11 +1621,8 @@ mod tests { let data = vec![0u8; LENGTH as usize]; let addr = data.as_ptr() as u64; let config = Config::default(); - let memory_mapping = MemoryMapping::new::( - vec![MemoryRegion::new_readonly(&data, START)], - &config, - ) - .unwrap(); + let memory_mapping = + MemoryMapping::new(vec![MemoryRegion::new_readonly(&data, START)], &config).unwrap(); let cases = vec![ (true, START, 0, addr), @@ -2093,7 +1658,7 @@ mod tests { let pubkey = solana_sdk::pubkey::new_rand(); let addr = &pubkey as *const _ as u64; let config = Config::default(); - let memory_mapping = MemoryMapping::new::( + let memory_mapping = MemoryMapping::new( vec![MemoryRegion { host_addr: addr, vm_addr: 0x100000000, @@ -2122,8 +1687,7 @@ mod tests { vm_gap_shift: 63, is_writable: false, }; - let mut memory_mapping = - MemoryMapping::new::(vec![memory_region.clone()], &config).unwrap(); + let mut memory_mapping = MemoryMapping::new(vec![memory_region.clone()], &config).unwrap(); let translated_instruction = translate_type::(&memory_mapping, 0x100000000, true).unwrap(); assert_eq!(instruction, *translated_instruction); @@ -2134,7 +1698,7 @@ mod tests { .position(|memory_region| memory_region.vm_addr == 0x100000000) .unwrap(); memory_mapping - .replace_region::(memory_region_index, memory_region) + .replace_region(memory_region_index, memory_region) .unwrap(); assert!(translate_type::(&memory_mapping, 0x100000000, true).is_err()); } @@ -2147,7 +1711,7 @@ mod tests { assert_eq!(0x1 as *const u8, data.as_ptr()); let addr = good_data.as_ptr() as *const _ as u64; let config = Config::default(); - let memory_mapping = MemoryMapping::new::( + let memory_mapping = MemoryMapping::new( vec![MemoryRegion { host_addr: addr, vm_addr: 0x100000000, @@ -2166,7 +1730,7 @@ mod tests { // u8 let mut data = vec![1u8, 2, 3, 4, 5]; let addr = data.as_ptr() as *const _ as u64; - let memory_mapping = MemoryMapping::new::( + let memory_mapping = MemoryMapping::new( vec![MemoryRegion { host_addr: addr, vm_addr: 0x100000000, @@ -2200,7 +1764,7 @@ mod tests { // u64 let mut data = vec![1u64, 2, 3, 4, 5]; let addr = data.as_ptr() as *const _ as u64; - let memory_mapping = MemoryMapping::new::( + let memory_mapping = MemoryMapping::new( vec![MemoryRegion { host_addr: addr, vm_addr: 0x100000000, @@ -2224,7 +1788,7 @@ mod tests { // Pubkeys let mut data = vec![solana_sdk::pubkey::new_rand(); 5]; let addr = data.as_ptr() as *const _ as u64; - let memory_mapping = MemoryMapping::new::( + let memory_mapping = MemoryMapping::new( vec![MemoryRegion { host_addr: addr, vm_addr: 0x100000000, @@ -2248,7 +1812,7 @@ mod tests { let string = "Gaggablaghblagh!"; let addr = string.as_ptr() as *const _ as u64; let config = Config::default(); - let memory_mapping = MemoryMapping::new::( + let memory_mapping = MemoryMapping::new( vec![MemoryRegion { host_addr: addr, vm_addr: 0x100000000, @@ -2286,12 +1850,10 @@ mod tests { bpf_loader::id(), ); let config = Config::default(); - let mut memory_mapping = MemoryMapping::new::(vec![], &config).unwrap(); - let mut result: Result> = Ok(0); + let mut memory_mapping = MemoryMapping::new(vec![], &config).unwrap(); + let mut result = ProgramResult::Ok(0); SyscallAbort::call( - &mut SyscallAbort { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }, + &mut invoke_context, 0, 0, 0, @@ -2312,14 +1874,11 @@ mod tests { program_id, bpf_loader::id(), ); - let mut syscall_panic = SyscallPanic { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; let string = "Gaggablaghblagh!"; let addr = string.as_ptr() as *const _ as u64; let config = Config::default(); - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![MemoryRegion { host_addr: addr, vm_addr: 0x100000000, @@ -2331,14 +1890,13 @@ mod tests { ) .unwrap(); - syscall_panic - .invoke_context - .borrow_mut() + invoke_context .get_compute_meter() .borrow_mut() .mock_set_remaining(string.len() as u64 - 1); - let mut result: Result> = Ok(0); - syscall_panic.call( + let mut result = ProgramResult::Ok(0); + SyscallPanic::call( + &mut invoke_context, 0x100000000, string.len() as u64, 42, @@ -2347,21 +1905,20 @@ mod tests { &mut memory_mapping, &mut result, ); - assert_eq!( - Err(EbpfError::UserError(BpfError::SyscallError( + assert!(matches!( + result, + ProgramResult::Err(EbpfError::UserError(error)) if error.downcast_ref::().unwrap() == &BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) - ))), - result - ); + ), + )); - syscall_panic - .invoke_context - .borrow_mut() + invoke_context .get_compute_meter() .borrow_mut() .mock_set_remaining(string.len() as u64); - let mut result: Result> = Ok(0); - syscall_panic.call( + let mut result = ProgramResult::Ok(0); + SyscallPanic::call( + &mut invoke_context, 0x100000000, string.len() as u64, 42, @@ -2381,14 +1938,11 @@ mod tests { program_id, bpf_loader::id(), ); - let mut syscall_sol_log = SyscallLog { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; let string = "Gaggablaghblagh!"; let addr = string.as_ptr() as *const _ as u64; let config = Config::default(); - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![MemoryRegion { host_addr: addr, vm_addr: 0x100000000, @@ -2400,14 +1954,13 @@ mod tests { ) .unwrap(); - syscall_sol_log - .invoke_context - .borrow_mut() + invoke_context .get_compute_meter() .borrow_mut() .mock_set_remaining(400 - 1); - let mut result: Result> = Ok(0); - syscall_sol_log.call( + let mut result = ProgramResult::Ok(0); + SyscallLog::call( + &mut invoke_context, 0x100000001, // AccessViolation string.len() as u64, 0, @@ -2417,8 +1970,9 @@ mod tests { &mut result, ); assert_access_violation!(result, 0x100000001, string.len() as u64); - let mut result: Result> = Ok(0); - syscall_sol_log.call( + let mut result = ProgramResult::Ok(0); + SyscallLog::call( + &mut invoke_context, 0x100000000, string.len() as u64 * 2, // AccessViolation 0, @@ -2429,8 +1983,9 @@ mod tests { ); assert_access_violation!(result, 0x100000000, string.len() as u64 * 2); - let mut result: Result> = Ok(0); - syscall_sol_log.call( + let mut result = ProgramResult::Ok(0); + SyscallLog::call( + &mut invoke_context, 0x100000000, string.len() as u64, 0, @@ -2440,8 +1995,9 @@ mod tests { &mut result, ); result.unwrap(); - let mut result: Result> = Ok(0); - syscall_sol_log.call( + let mut result = ProgramResult::Ok(0); + SyscallLog::call( + &mut invoke_context, 0x100000000, string.len() as u64, 0, @@ -2450,17 +2006,15 @@ mod tests { &mut memory_mapping, &mut result, ); - assert_eq!( - Err(EbpfError::UserError(BpfError::SyscallError( + assert!(matches!( + result, + ProgramResult::Err(EbpfError::UserError(error)) if error.downcast_ref::().unwrap() == &BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) - ))), - result - ); + ), + )); assert_eq!( - syscall_sol_log - .invoke_context - .borrow() + invoke_context .get_log_collector() .unwrap() .borrow() @@ -2478,26 +2032,28 @@ mod tests { bpf_loader::id(), ); let cost = invoke_context.get_compute_budget().log_64_units; - let mut syscall_sol_log_u64 = SyscallLogU64 { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; - syscall_sol_log_u64 - .invoke_context - .borrow_mut() + invoke_context .get_compute_meter() .borrow_mut() .mock_set_remaining(cost); let config = Config::default(); - let mut memory_mapping = MemoryMapping::new::(vec![], &config).unwrap(); - let mut result: Result> = Ok(0); - syscall_sol_log_u64.call(1, 2, 3, 4, 5, &mut memory_mapping, &mut result); + let mut memory_mapping = MemoryMapping::new(vec![], &config).unwrap(); + let mut result = ProgramResult::Ok(0); + SyscallLogU64::call( + &mut invoke_context, + 1, + 2, + 3, + 4, + 5, + &mut memory_mapping, + &mut result, + ); result.unwrap(); assert_eq!( - syscall_sol_log_u64 - .invoke_context - .borrow() + invoke_context .get_log_collector() .unwrap() .borrow() @@ -2515,14 +2071,11 @@ mod tests { bpf_loader::id(), ); let cost = invoke_context.get_compute_budget().log_pubkey_units; - let mut syscall_sol_pubkey = SyscallLogPubkey { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; let pubkey = Pubkey::from_str("MoqiU1vryuCGQSxFKA1SZ316JdLEFFhoAu6cKUNk7dN").unwrap(); let addr = pubkey.as_ref().first().unwrap() as *const _ as u64; let config = Config::default(); - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![MemoryRegion { host_addr: addr, vm_addr: 0x100000000, @@ -2534,8 +2087,9 @@ mod tests { ) .unwrap(); - let mut result: Result> = Ok(0); - syscall_sol_pubkey.call( + let mut result = ProgramResult::Ok(0); + SyscallLogPubkey::call( + &mut invoke_context, 0x100000001, // AccessViolation 32, 0, @@ -2546,35 +2100,47 @@ mod tests { ); assert_access_violation!(result, 0x100000001, 32); - syscall_sol_pubkey - .invoke_context - .borrow_mut() + invoke_context .get_compute_meter() .borrow_mut() .mock_set_remaining(1); - let mut result: Result> = Ok(0); - syscall_sol_pubkey.call(100, 32, 0, 0, 0, &mut memory_mapping, &mut result); - assert_eq!( - Err(EbpfError::UserError(BpfError::SyscallError( - SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) - ))), - result + let mut result = ProgramResult::Ok(0); + SyscallLogPubkey::call( + &mut invoke_context, + 100, + 32, + 0, + 0, + 0, + &mut memory_mapping, + &mut result, ); + assert!(matches!( + result, + ProgramResult::Err(EbpfError::UserError(error)) if error.downcast_ref::().unwrap() == &BpfError::SyscallError( + SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) + ), + )); - syscall_sol_pubkey - .invoke_context - .borrow_mut() + invoke_context .get_compute_meter() .borrow_mut() .mock_set_remaining(cost); - let mut result: Result> = Ok(0); - syscall_sol_pubkey.call(0x100000000, 0, 0, 0, 0, &mut memory_mapping, &mut result); + let mut result = ProgramResult::Ok(0); + SyscallLogPubkey::call( + &mut invoke_context, + 0x100000000, + 0, + 0, + 0, + 0, + &mut memory_mapping, + &mut result, + ); result.unwrap(); assert_eq!( - syscall_sol_pubkey - .invoke_context - .borrow() + invoke_context .get_log_collector() .unwrap() .borrow() @@ -2596,7 +2162,7 @@ mod tests { bpf_loader::id(), ); let mut heap = AlignedMemory::::zero_filled(100); - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![ MemoryRegion::new_readonly(&[], ebpf::MM_PROGRAM_START), MemoryRegion::new_writable_gapped(&mut [], ebpf::MM_STACK_START, 4096), @@ -2614,17 +2180,41 @@ mod tests { Rc::new(RefCell::new(BpfAllocator::new(heap, ebpf::MM_HEAP_START))), ) .unwrap(); - let mut syscall = SyscallAllocFree { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; - let mut result: Result> = Ok(0); - syscall.call(100, 0, 0, 0, 0, &mut memory_mapping, &mut result); + let mut result = ProgramResult::Ok(0); + SyscallAllocFree::call( + &mut invoke_context, + 100, + 0, + 0, + 0, + 0, + &mut memory_mapping, + &mut result, + ); assert_ne!(result.unwrap(), 0); - let mut result: Result> = Ok(0); - syscall.call(100, 0, 0, 0, 0, &mut memory_mapping, &mut result); + let mut result = ProgramResult::Ok(0); + SyscallAllocFree::call( + &mut invoke_context, + 100, + 0, + 0, + 0, + 0, + &mut memory_mapping, + &mut result, + ); assert_eq!(result.unwrap(), 0); - let mut result: Result> = Ok(0); - syscall.call(u64::MAX, 0, 0, 0, 0, &mut memory_mapping, &mut result); + let mut result = ProgramResult::Ok(0); + SyscallAllocFree::call( + &mut invoke_context, + u64::MAX, + 0, + 0, + 0, + 0, + &mut memory_mapping, + &mut result, + ); assert_eq!(result.unwrap(), 0); } @@ -2637,7 +2227,7 @@ mod tests { bpf_loader::id(), ); let mut heap = AlignedMemory::::zero_filled(100); - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![ MemoryRegion::new_readonly(&[], ebpf::MM_PROGRAM_START), MemoryRegion::new_writable_gapped(&mut [], ebpf::MM_STACK_START, 4096), @@ -2655,16 +2245,31 @@ mod tests { Rc::new(RefCell::new(BpfAllocator::new(heap, ebpf::MM_HEAP_START))), ) .unwrap(); - let mut syscall = SyscallAllocFree { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; for _ in 0..100 { - let mut result: Result> = Ok(0); - syscall.call(1, 0, 0, 0, 0, &mut memory_mapping, &mut result); + let mut result = ProgramResult::Ok(0); + SyscallAllocFree::call( + &mut invoke_context, + 1, + 0, + 0, + 0, + 0, + &mut memory_mapping, + &mut result, + ); assert_ne!(result.unwrap(), 0); } - let mut result: Result> = Ok(0); - syscall.call(100, 0, 0, 0, 0, &mut memory_mapping, &mut result); + let mut result = ProgramResult::Ok(0); + SyscallAllocFree::call( + &mut invoke_context, + 100, + 0, + 0, + 0, + 0, + &mut memory_mapping, + &mut result, + ); assert_eq!(result.unwrap(), 0); } @@ -2677,7 +2282,7 @@ mod tests { bpf_loader::id(), ); let mut heap = AlignedMemory::::zero_filled(100); - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![ MemoryRegion::new_readonly(&[], ebpf::MM_PROGRAM_START), MemoryRegion::new_writable_gapped(&mut [], ebpf::MM_STACK_START, 4096), @@ -2695,16 +2300,31 @@ mod tests { Rc::new(RefCell::new(BpfAllocator::new(heap, ebpf::MM_HEAP_START))), ) .unwrap(); - let mut syscall = SyscallAllocFree { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; for _ in 0..12 { - let mut result: Result> = Ok(0); - syscall.call(1, 0, 0, 0, 0, &mut memory_mapping, &mut result); + let mut result = ProgramResult::Ok(0); + SyscallAllocFree::call( + &mut invoke_context, + 1, + 0, + 0, + 0, + 0, + &mut memory_mapping, + &mut result, + ); assert_ne!(result.unwrap(), 0); } - let mut result: Result> = Ok(0); - syscall.call(100, 0, 0, 0, 0, &mut memory_mapping, &mut result); + let mut result = ProgramResult::Ok(0); + SyscallAllocFree::call( + &mut invoke_context, + 100, + 0, + 0, + 0, + 0, + &mut memory_mapping, + &mut result, + ); assert_eq!(result.unwrap(), 0); } @@ -2719,7 +2339,7 @@ mod tests { ); let mut heap = AlignedMemory::::zero_filled(100); let config = Config::default(); - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![ MemoryRegion::new_readonly(&[], ebpf::MM_PROGRAM_START), MemoryRegion::new_writable_gapped(&mut [], ebpf::MM_STACK_START, 4096), @@ -2737,11 +2357,9 @@ mod tests { Rc::new(RefCell::new(BpfAllocator::new(heap, ebpf::MM_HEAP_START))), ) .unwrap(); - let mut syscall = SyscallAllocFree { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallAllocFree::call( + &mut invoke_context, size_of::() as u64, 0, 0, @@ -2790,7 +2408,7 @@ mod tests { let ro_len = bytes_to_hash.len() as u64; let ro_va = 0x100000000; let rw_va = 0x200000000; - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![ MemoryRegion { host_addr: bytes_to_hash.as_ptr() as *const _ as u64, @@ -2838,18 +2456,25 @@ mod tests { )) * 4, ); - let mut syscall = SyscallSha256 { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; - let mut result: Result> = Ok(0); - syscall.call(ro_va, ro_len, rw_va, 0, 0, &mut memory_mapping, &mut result); + let mut result = ProgramResult::Ok(0); + SyscallSha256::call( + &mut invoke_context, + ro_va, + ro_len, + rw_va, + 0, + 0, + &mut memory_mapping, + &mut result, + ); result.unwrap(); let hash_local = hashv(&[bytes1.as_ref(), bytes2.as_ref()]).to_bytes(); assert_eq!(hash_result, hash_local); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallSha256::call( + &mut invoke_context, ro_va - 1, // AccessViolation ro_len, rw_va, @@ -2859,8 +2484,9 @@ mod tests { &mut result, ); assert_access_violation!(result, ro_va - 1, 32); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallSha256::call( + &mut invoke_context, ro_va, ro_len + 1, // AccessViolation rw_va, @@ -2870,8 +2496,9 @@ mod tests { &mut result, ); assert_access_violation!(result, ro_va, 48); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallSha256::call( + &mut invoke_context, ro_va, ro_len, rw_va - 1, // AccessViolation @@ -2882,13 +2509,22 @@ mod tests { ); assert_access_violation!(result, rw_va - 1, HASH_BYTES as u64); - syscall.call(ro_va, ro_len, rw_va, 0, 0, &mut memory_mapping, &mut result); - assert_eq!( - Err(EbpfError::UserError(BpfError::SyscallError( - SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) - ))), - result + SyscallSha256::call( + &mut invoke_context, + ro_va, + ro_len, + rw_va, + 0, + 0, + &mut memory_mapping, + &mut result, ); + assert!(matches!( + result, + ProgramResult::Err(EbpfError::UserError(error)) if error.downcast_ref::().unwrap() == &BpfError::SyscallError( + SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) + ), + )); } #[test] @@ -2915,7 +2551,7 @@ mod tests { ]; let invalid_bytes_va = 0x200000000; - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![ MemoryRegion { host_addr: valid_bytes.as_ptr() as *const _ as u64, @@ -2945,12 +2581,10 @@ mod tests { .curve25519_edwards_validate_point_cost) * 2, ); - let mut syscall = SyscallCurvePointValidation { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurvePointValidation::call( + &mut invoke_context, CURVE25519_EDWARDS, valid_bytes_va, 0, @@ -2961,8 +2595,9 @@ mod tests { ); assert_eq!(0, result.unwrap()); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurvePointValidation::call( + &mut invoke_context, CURVE25519_EDWARDS, invalid_bytes_va, 0, @@ -2973,8 +2608,9 @@ mod tests { ); assert_eq!(1, result.unwrap()); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurvePointValidation::call( + &mut invoke_context, CURVE25519_EDWARDS, valid_bytes_va, 0, @@ -2983,12 +2619,12 @@ mod tests { &mut memory_mapping, &mut result, ); - assert_eq!( - Err(EbpfError::UserError(BpfError::SyscallError( + assert!(matches!( + result, + ProgramResult::Err(EbpfError::UserError(error)) if error.downcast_ref::().unwrap() == &BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) - ))), - result - ); + ), + )); } #[test] @@ -3015,7 +2651,7 @@ mod tests { ]; let invalid_bytes_va = 0x200000000; - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![ MemoryRegion { host_addr: valid_bytes.as_ptr() as *const _ as u64, @@ -3045,12 +2681,10 @@ mod tests { .curve25519_ristretto_validate_point_cost) * 2, ); - let mut syscall = SyscallCurvePointValidation { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurvePointValidation::call( + &mut invoke_context, CURVE25519_RISTRETTO, valid_bytes_va, 0, @@ -3061,8 +2695,9 @@ mod tests { ); assert_eq!(0, result.unwrap()); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurvePointValidation::call( + &mut invoke_context, CURVE25519_RISTRETTO, invalid_bytes_va, 0, @@ -3073,8 +2708,9 @@ mod tests { ); assert_eq!(1, result.unwrap()); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurvePointValidation::call( + &mut invoke_context, CURVE25519_RISTRETTO, valid_bytes_va, 0, @@ -3083,12 +2719,12 @@ mod tests { &mut memory_mapping, &mut result, ); - assert_eq!( - Err(EbpfError::UserError(BpfError::SyscallError( + assert!(matches!( + result, + ProgramResult::Err(EbpfError::UserError(error)) if error.downcast_ref::().unwrap() == &BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) - ))), - result - ); + ), + )); } #[test] @@ -3128,7 +2764,7 @@ mod tests { let result_point: [u8; 32] = [0; 32]; let result_point_va = 0x500000000; - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![ MemoryRegion { host_addr: left_point.as_ptr() as *const _ as u64, @@ -3185,12 +2821,10 @@ mod tests { .curve25519_edwards_multiply_cost) * 2, ); - let mut syscall = SyscallCurveGroupOps { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurveGroupOps::call( + &mut invoke_context, CURVE25519_EDWARDS, ADD, left_point_va, @@ -3207,8 +2841,9 @@ mod tests { ]; assert_eq!(expected_sum, result_point); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurveGroupOps::call( + &mut invoke_context, CURVE25519_EDWARDS, ADD, invalid_point_va, @@ -3219,8 +2854,9 @@ mod tests { ); assert_eq!(1, result.unwrap()); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurveGroupOps::call( + &mut invoke_context, CURVE25519_EDWARDS, SUB, left_point_va, @@ -3237,8 +2873,9 @@ mod tests { ]; assert_eq!(expected_difference, result_point); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurveGroupOps::call( + &mut invoke_context, CURVE25519_EDWARDS, SUB, invalid_point_va, @@ -3249,8 +2886,9 @@ mod tests { ); assert_eq!(1, result.unwrap()); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurveGroupOps::call( + &mut invoke_context, CURVE25519_EDWARDS, MUL, scalar_va, @@ -3267,8 +2905,9 @@ mod tests { ]; assert_eq!(expected_product, result_point); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurveGroupOps::call( + &mut invoke_context, CURVE25519_EDWARDS, MUL, scalar_va, @@ -3279,8 +2918,9 @@ mod tests { ); assert_eq!(1, result.unwrap()); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurveGroupOps::call( + &mut invoke_context, CURVE25519_EDWARDS, MUL, scalar_va, @@ -3289,12 +2929,12 @@ mod tests { &mut memory_mapping, &mut result, ); - assert_eq!( - Err(EbpfError::UserError(BpfError::SyscallError( + assert!(matches!( + result, + ProgramResult::Err(EbpfError::UserError(error)) if error.downcast_ref::().unwrap() == &BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) - ))), - result - ); + ), + )); } #[test] @@ -3334,7 +2974,7 @@ mod tests { let result_point: [u8; 32] = [0; 32]; let result_point_va = 0x500000000; - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![ MemoryRegion { host_addr: left_point.as_ptr() as *const _ as u64, @@ -3391,12 +3031,10 @@ mod tests { .curve25519_ristretto_multiply_cost) * 2, ); - let mut syscall = SyscallCurveGroupOps { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurveGroupOps::call( + &mut invoke_context, CURVE25519_RISTRETTO, ADD, left_point_va, @@ -3413,8 +3051,9 @@ mod tests { ]; assert_eq!(expected_sum, result_point); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurveGroupOps::call( + &mut invoke_context, CURVE25519_RISTRETTO, ADD, invalid_point_va, @@ -3425,8 +3064,9 @@ mod tests { ); assert_eq!(1, result.unwrap()); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurveGroupOps::call( + &mut invoke_context, CURVE25519_RISTRETTO, SUB, left_point_va, @@ -3443,8 +3083,9 @@ mod tests { ]; assert_eq!(expected_difference, result_point); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurveGroupOps::call( + &mut invoke_context, CURVE25519_RISTRETTO, SUB, invalid_point_va, @@ -3456,8 +3097,9 @@ mod tests { assert_eq!(1, result.unwrap()); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurveGroupOps::call( + &mut invoke_context, CURVE25519_RISTRETTO, MUL, scalar_va, @@ -3474,8 +3116,9 @@ mod tests { ]; assert_eq!(expected_product, result_point); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurveGroupOps::call( + &mut invoke_context, CURVE25519_RISTRETTO, MUL, scalar_va, @@ -3487,8 +3130,9 @@ mod tests { assert_eq!(1, result.unwrap()); - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallCurveGroupOps::call( + &mut invoke_context, CURVE25519_RISTRETTO, MUL, scalar_va, @@ -3497,12 +3141,12 @@ mod tests { &mut memory_mapping, &mut result, ); - assert_eq!( - Err(EbpfError::UserError(BpfError::SyscallError( + assert!(matches!( + result, + ProgramResult::Err(EbpfError::UserError(error)) if error.downcast_ref::().unwrap() == &BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) - ))), - result - ); + ), + )); } fn create_filled_type(zero_init: bool) -> T { @@ -3574,7 +3218,7 @@ mod tests { let got_clock = Clock::default(); let got_clock_va = 0x100000000; - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![MemoryRegion { host_addr: &got_clock as *const _ as u64, vm_addr: got_clock_va, @@ -3585,12 +3229,18 @@ mod tests { &config, ) .unwrap(); - let mut syscall = SyscallGetClockSysvar { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; - let mut result: Result> = Ok(0); - syscall.call(got_clock_va, 0, 0, 0, 0, &mut memory_mapping, &mut result); + let mut result = ProgramResult::Ok(0); + SyscallGetClockSysvar::call( + &mut invoke_context, + got_clock_va, + 0, + 0, + 0, + 0, + &mut memory_mapping, + &mut result, + ); result.unwrap(); assert_eq!(got_clock, src_clock); @@ -3608,7 +3258,7 @@ mod tests { let got_epochschedule = EpochSchedule::default(); let got_epochschedule_va = 0x100000000; - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![MemoryRegion { host_addr: &got_epochschedule as *const _ as u64, vm_addr: got_epochschedule_va, @@ -3619,12 +3269,10 @@ mod tests { &config, ) .unwrap(); - let mut syscall = SyscallGetEpochScheduleSysvar { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; - let mut result: Result> = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + SyscallGetEpochScheduleSysvar::call( + &mut invoke_context, got_epochschedule_va, 0, 0, @@ -3651,7 +3299,7 @@ mod tests { let got_fees = Fees::default(); let got_fees_va = 0x100000000; - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![MemoryRegion { host_addr: &got_fees as *const _ as u64, vm_addr: got_fees_va, @@ -3662,12 +3310,18 @@ mod tests { &config, ) .unwrap(); - let mut syscall = SyscallGetFeesSysvar { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; - let mut result: Result> = Ok(0); - syscall.call(got_fees_va, 0, 0, 0, 0, &mut memory_mapping, &mut result); + let mut result = ProgramResult::Ok(0); + SyscallGetFeesSysvar::call( + &mut invoke_context, + got_fees_va, + 0, + 0, + 0, + 0, + &mut memory_mapping, + &mut result, + ); result.unwrap(); assert_eq!(got_fees, src_fees); @@ -3681,7 +3335,7 @@ mod tests { let got_rent = create_filled_type::(true); let got_rent_va = 0x100000000; - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![MemoryRegion { host_addr: &got_rent as *const _ as u64, vm_addr: got_rent_va, @@ -3692,12 +3346,18 @@ mod tests { &config, ) .unwrap(); - let mut syscall = SyscallGetRentSysvar { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; - let mut result: Result> = Ok(0); - syscall.call(got_rent_va, 0, 0, 0, 0, &mut memory_mapping, &mut result); + let mut result = ProgramResult::Ok(0); + SyscallGetRentSysvar::call( + &mut invoke_context, + got_rent_va, + 0, + 0, + 0, + 0, + &mut memory_mapping, + &mut result, + ); result.unwrap(); assert_eq!(got_rent, src_rent); @@ -3709,11 +3369,12 @@ mod tests { } } - fn call_program_address_common( + fn call_program_address_common<'a, 'b: 'a>( + invoke_context: &'a mut InvokeContext<'b>, seeds: &[&[u8]], program_id: &Pubkey, - syscall: &mut dyn SyscallObject, - ) -> Result<(Pubkey, u8), EbpfError> { + syscall: SyscallFunction<&'a mut InvokeContext<'b>>, + ) -> Result<(Pubkey, u8), EbpfError> { const SEEDS_VA: u64 = 0x100000000; const PROGRAM_ID_VA: u64 = 0x200000000; const ADDRESS_VA: u64 = 0x300000000; @@ -3770,10 +3431,11 @@ mod tests { is_writable: false, }); } - let mut memory_mapping = MemoryMapping::new::(regions, &config).unwrap(); + let mut memory_mapping = MemoryMapping::new(regions, &config).unwrap(); - let mut result = Ok(0); - syscall.call( + let mut result = ProgramResult::Ok(0); + syscall( + invoke_context, SEEDS_VA, seeds.len() as u64, PROGRAM_ID_VA, @@ -3782,19 +3444,20 @@ mod tests { &mut memory_mapping, &mut result, ); - let _ = result?; - Ok((address, bump_seed)) + Result::::from(result).map(|_| (address, bump_seed)) } fn create_program_address( invoke_context: &mut InvokeContext, seeds: &[&[u8]], address: &Pubkey, - ) -> Result> { - let mut syscall = SyscallCreateProgramAddress { - invoke_context: Rc::new(RefCell::new(invoke_context)), - }; - let (address, _) = call_program_address_common(seeds, address, &mut syscall)?; + ) -> Result { + let (address, _) = call_program_address_common( + invoke_context, + seeds, + address, + SyscallCreateProgramAddress::call, + )?; Ok(address) } @@ -3802,11 +3465,13 @@ mod tests { invoke_context: &mut InvokeContext, seeds: &[&[u8]], address: &Pubkey, - ) -> Result<(Pubkey, u8), EbpfError> { - let mut syscall = SyscallTryFindProgramAddress { - invoke_context: Rc::new(RefCell::new(invoke_context)), - }; - call_program_address_common(seeds, address, &mut syscall) + ) -> Result<(Pubkey, u8), EbpfError> { + call_program_address_common( + invoke_context, + seeds, + address, + SyscallTryFindProgramAddress::call, + ) } #[test] @@ -3844,9 +3509,7 @@ mod tests { let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); let syscall_base_cost = invoke_context.get_compute_budget().syscall_base_cost; - let mut syscall_get_processed_sibling_instruction = SyscallGetProcessedSiblingInstruction { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; + const VM_BASE_ADDRESS: u64 = 0x100000000; const META_OFFSET: usize = 0; const PROGRAM_ID_OFFSET: usize = @@ -3856,7 +3519,7 @@ mod tests { const END_OFFSET: usize = ACCOUNTS_OFFSET + std::mem::size_of::() * 4; let mut memory = [0u8; END_OFFSET]; let config = Config::default(); - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![MemoryRegion { host_addr: memory.as_mut_ptr() as u64, vm_addr: VM_BASE_ADDRESS, @@ -3898,14 +3561,13 @@ mod tests { ) .unwrap(); - syscall_get_processed_sibling_instruction - .invoke_context - .borrow_mut() + invoke_context .get_compute_meter() .borrow_mut() .mock_set_remaining(syscall_base_cost); - let mut result: Result> = Ok(0); - syscall_get_processed_sibling_instruction.call( + let mut result = ProgramResult::Ok(0); + SyscallGetProcessedSiblingInstruction::call( + &mut invoke_context, 0, VM_BASE_ADDRESS.saturating_add(META_OFFSET as u64), VM_BASE_ADDRESS.saturating_add(PROGRAM_ID_OFFSET as u64), @@ -3914,12 +3576,9 @@ mod tests { &mut memory_mapping, &mut result, ); - assert_eq!(result, Ok(1)); + assert_eq!(result.unwrap(), 1); { - let transaction_context = &syscall_get_processed_sibling_instruction - .invoke_context - .borrow() - .transaction_context; + let transaction_context = &invoke_context.transaction_context; assert_eq!(processed_sibling_instruction.data_len, 1); assert_eq!(processed_sibling_instruction.accounts_len, 1); assert_eq!( @@ -3937,13 +3596,13 @@ mod tests { ); } - syscall_get_processed_sibling_instruction - .invoke_context - .borrow_mut() + invoke_context .get_compute_meter() .borrow_mut() .mock_set_remaining(syscall_base_cost); - syscall_get_processed_sibling_instruction.call( + let mut result = ProgramResult::Ok(0); + SyscallGetProcessedSiblingInstruction::call( + &mut invoke_context, 1, VM_BASE_ADDRESS.saturating_add(META_OFFSET as u64), VM_BASE_ADDRESS.saturating_add(PROGRAM_ID_OFFSET as u64), @@ -3952,7 +3611,7 @@ mod tests { &mut memory_mapping, &mut result, ); - assert_eq!(result, Ok(0)); + assert_eq!(result.unwrap(), 0); } #[test] @@ -4039,13 +3698,11 @@ mod tests { .account_property_update_cost .saturating_mul(updates_list.len() as u64), ); - let mut syscall_set_account_properties = SyscallSetAccountProperties { - invoke_context: Rc::new(RefCell::new(&mut invoke_context)), - }; + const VM_ADDRESS_KEYS: u64 = 0x100000000; const VM_ADDRESS_UPDATES_LIST: u64 = 0x200000000; let config = Config::default(); - let mut memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new( vec![ MemoryRegion { host_addr: keys.as_ptr() as u64, @@ -4066,14 +3723,13 @@ mod tests { ) .unwrap(); - syscall_set_account_properties - .invoke_context - .borrow_mut() + invoke_context .get_compute_meter() .borrow_mut() .mock_set_remaining(cost); - let mut result: Result> = Ok(0); - syscall_set_account_properties.call( + let mut result = ProgramResult::Ok(0); + SyscallSetAccountProperties::call( + &mut invoke_context, VM_ADDRESS_UPDATES_LIST, updates_list.len() as u64, 0, @@ -4082,12 +3738,9 @@ mod tests { &mut memory_mapping, &mut result, ); - assert_eq!(result, Ok(0)); + assert_eq!(result.unwrap(), 0); { - let transaction_context = &syscall_set_account_properties - .invoke_context - .borrow() - .transaction_context; + let transaction_context = &invoke_context.transaction_context; let account = transaction_context .get_account_at_index(2) .unwrap() @@ -4116,19 +3769,22 @@ mod tests { let address = bpf_loader_upgradeable::id(); let exceeded_seed = &[127; MAX_SEED_LEN + 1]; - let result = create_program_address(&mut invoke_context, &[exceeded_seed], &address); - assert_eq!( - result, - Err(SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded).into()) - ); - assert_eq!( + assert!(matches!( + create_program_address(&mut invoke_context, &[exceeded_seed], &address), + Err(EbpfError::UserError(error)) if error.downcast_ref::().unwrap() == &BpfError::SyscallError( + SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded) + ), + )); + assert!(matches!( create_program_address( &mut invoke_context, &[b"short_seed", exceeded_seed], &address, ), - Err(SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded).into()) - ); + Err(EbpfError::UserError(error)) if error.downcast_ref::().unwrap() == &BpfError::SyscallError( + SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded) + ), + )); let max_seed = &[0; MAX_SEED_LEN]; assert!(create_program_address(&mut invoke_context, &[max_seed], &address).is_ok()); let exceeded_seeds: &[&[u8]] = &[ @@ -4169,34 +3825,38 @@ mod tests { &[16], &[17], ]; - assert_eq!( + assert!(matches!( create_program_address(&mut invoke_context, max_seeds, &address), - Err(SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded).into()) - ); + Err(EbpfError::UserError(error)) if error.downcast_ref::().unwrap() == &BpfError::SyscallError( + SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded) + ), + )); assert_eq!( - create_program_address(&mut invoke_context, &[b"", &[1]], &address), - Ok("BwqrghZA2htAcqq8dzP1WDAhTXYTYWj7CHxF5j7TDBAe" + create_program_address(&mut invoke_context, &[b"", &[1]], &address).unwrap(), + "BwqrghZA2htAcqq8dzP1WDAhTXYTYWj7CHxF5j7TDBAe" .parse() - .unwrap()) + .unwrap(), ); assert_eq!( - create_program_address(&mut invoke_context, &["☉".as_ref(), &[0]], &address), - Ok("13yWmRpaTR4r5nAktwLqMpRNr28tnVUZw26rTvPSSB19" + create_program_address(&mut invoke_context, &["☉".as_ref(), &[0]], &address).unwrap(), + "13yWmRpaTR4r5nAktwLqMpRNr28tnVUZw26rTvPSSB19" .parse() - .unwrap()) + .unwrap(), ); assert_eq!( - create_program_address(&mut invoke_context, &[b"Talking", b"Squirrels"], &address), - Ok("2fnQrngrQT4SeLcdToJAD96phoEjNL2man2kfRLCASVk" + create_program_address(&mut invoke_context, &[b"Talking", b"Squirrels"], &address) + .unwrap(), + "2fnQrngrQT4SeLcdToJAD96phoEjNL2man2kfRLCASVk" .parse() - .unwrap()) + .unwrap(), ); let public_key = Pubkey::from_str("SeedPubey1111111111111111111111111111111111").unwrap(); assert_eq!( - create_program_address(&mut invoke_context, &[public_key.as_ref(), &[1]], &address), - Ok("976ymqVnfE32QFe6NfGDctSvVa36LWnvYxhU6G2232YL" + create_program_address(&mut invoke_context, &[public_key.as_ref(), &[1]], &address) + .unwrap(), + "976ymqVnfE32QFe6NfGDctSvVa36LWnvYxhU6G2232YL" .parse() - .unwrap()) + .unwrap(), ); assert_ne!( create_program_address(&mut invoke_context, &[b"Talking", b"Squirrels"], &address) @@ -4207,13 +3867,12 @@ mod tests { .get_compute_meter() .borrow_mut() .mock_set_remaining(0); - assert_eq!( + assert!(matches!( create_program_address(&mut invoke_context, &[b"", &[1]], &address), - Err( + Err(EbpfError::UserError(error)) if error.downcast_ref::().unwrap() == &BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) - .into() - ) - ); + ), + )); } #[test] @@ -4266,23 +3925,24 @@ mod tests { .get_compute_meter() .borrow_mut() .mock_set_remaining(cost * (max_tries - bump_seed as u64 - 1)); - assert_eq!( + assert!(matches!( try_find_program_address(&mut invoke_context, seeds, &address), - Err( + Err(EbpfError::UserError(error)) if error.downcast_ref::().unwrap() == &BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) - .into() - ) - ); + ), + )); let exceeded_seed = &[127; MAX_SEED_LEN + 1]; invoke_context .get_compute_meter() .borrow_mut() .mock_set_remaining(cost * (max_tries - 1)); - assert_eq!( + assert!(matches!( try_find_program_address(&mut invoke_context, &[exceeded_seed], &address), - Err(SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded).into()) - ); + Err(EbpfError::UserError(error)) if error.downcast_ref::().unwrap() == &BpfError::SyscallError( + SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded) + ), + )); let exceeded_seeds: &[&[u8]] = &[ &[1], &[2], @@ -4306,10 +3966,12 @@ mod tests { .get_compute_meter() .borrow_mut() .mock_set_remaining(cost * (max_tries - 1)); - assert_eq!( + assert!(matches!( try_find_program_address(&mut invoke_context, exceeded_seeds, &address), - Err(SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded).into()) - ); + Err(EbpfError::UserError(error)) if error.downcast_ref::().unwrap() == &BpfError::SyscallError( + SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded) + ), + )); } #[test] diff --git a/programs/bpf_loader/src/syscalls/sysvar.rs b/programs/bpf_loader/src/syscalls/sysvar.rs index 5cd339c927a049..a1cf50a94061b7 100644 --- a/programs/bpf_loader/src/syscalls/sysvar.rs +++ b/programs/bpf_loader/src/syscalls/sysvar.rs @@ -6,7 +6,7 @@ fn get_sysvar( check_aligned: bool, memory_mapping: &mut MemoryMapping, invoke_context: &mut InvokeContext, -) -> Result> { +) -> Result { invoke_context.get_compute_meter().consume( invoke_context .get_compute_budget() @@ -24,89 +24,68 @@ fn get_sysvar( declare_syscall!( /// Get a Clock sysvar SyscallGetClockSysvar, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, var_addr: u64, _arg2: u64, _arg3: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let mut invoke_context = question_mark!( - self.invoke_context - .try_borrow_mut() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); - *result = get_sysvar( + ) -> Result { + get_sysvar( invoke_context.get_sysvar_cache().get_clock(), var_addr, invoke_context.get_check_aligned(), memory_mapping, - &mut invoke_context, - ); + invoke_context, + ) } ); declare_syscall!( /// Get a EpochSchedule sysvar SyscallGetEpochScheduleSysvar, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, var_addr: u64, _arg2: u64, _arg3: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let mut invoke_context = question_mark!( - self.invoke_context - .try_borrow_mut() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); - *result = get_sysvar( + ) -> Result { + get_sysvar( invoke_context.get_sysvar_cache().get_epoch_schedule(), var_addr, invoke_context.get_check_aligned(), memory_mapping, - &mut invoke_context, - ); + invoke_context, + ) } ); declare_syscall!( /// Get a Fees sysvar SyscallGetFeesSysvar, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, var_addr: u64, _arg2: u64, _arg3: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let mut invoke_context = question_mark!( - self.invoke_context - .try_borrow_mut() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); + ) -> Result { #[allow(deprecated)] { - *result = get_sysvar( + get_sysvar( invoke_context.get_sysvar_cache().get_fees(), var_addr, invoke_context.get_check_aligned(), memory_mapping, - &mut invoke_context, - ); + invoke_context, + ) } } ); @@ -114,28 +93,21 @@ declare_syscall!( declare_syscall!( /// Get a Rent sysvar SyscallGetRentSysvar, - fn call( - &mut self, + fn inner_call( + invoke_context: &mut InvokeContext, var_addr: u64, _arg2: u64, _arg3: u64, _arg4: u64, _arg5: u64, memory_mapping: &mut MemoryMapping, - result: &mut Result>, - ) { - let mut invoke_context = question_mark!( - self.invoke_context - .try_borrow_mut() - .map_err(|_| SyscallError::InvokeContextBorrowFailed), - result - ); - *result = get_sysvar( + ) -> Result { + get_sysvar( invoke_context.get_sysvar_cache().get_rent(), var_addr, invoke_context.get_check_aligned(), memory_mapping, - &mut invoke_context, - ); + invoke_context, + ) } ); diff --git a/rbpf-cli/Cargo.toml b/rbpf-cli/Cargo.toml index 47403d57bd3d8d..63bda203692f7b 100644 --- a/rbpf-cli/Cargo.toml +++ b/rbpf-cli/Cargo.toml @@ -17,4 +17,4 @@ solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.15. solana-logger = { path = "../logger", version = "=1.15.0" } solana-program-runtime = { path = "../program-runtime", version = "=1.15.0" } solana-sdk = { path = "../sdk", version = "=1.15.0" } -solana_rbpf = "=0.2.33" +solana_rbpf = "=0.2.34" diff --git a/rbpf-cli/src/main.rs b/rbpf-cli/src/main.rs index 10f5f4409e348b..3486339d7d8928 100644 --- a/rbpf-cli/src/main.rs +++ b/rbpf-cli/src/main.rs @@ -3,7 +3,7 @@ use { serde::{Deserialize, Serialize}, serde_json::Result, solana_bpf_loader_program::{ - create_vm, serialization::serialize_parameters, syscalls::register_syscalls, BpfError, + create_vm, serialization::serialize_parameters, syscalls::register_syscalls, ThisInstructionMeter, }, solana_program_runtime::invoke_context::{prepare_mock_invoke_context, InvokeContext}, @@ -254,10 +254,10 @@ native machine code before execting it in the virtual machine.", file.read_to_end(&mut contents).unwrap(); let syscall_registry = register_syscalls(&mut invoke_context, true).unwrap(); let executable = if magic == [0x7f, 0x45, 0x4c, 0x46] { - Executable::::from_elf(&contents, config, syscall_registry) + Executable::::from_elf(&contents, config, syscall_registry) .map_err(|err| format!("Executable constructor failed: {:?}", err)) } else { - assemble::( + assemble::( std::str::from_utf8(contents.as_slice()).unwrap(), config, syscall_registry, @@ -266,11 +266,9 @@ native machine code before execting it in the virtual machine.", .unwrap(); let mut verified_executable = - VerifiedExecutable::::from_executable( - executable, - ) - .map_err(|err| format!("Executable verifier failed: {:?}", err)) - .unwrap(); + VerifiedExecutable::::from_executable(executable) + .map_err(|err| format!("Executable verifier failed: {:?}", err)) + .unwrap(); verified_executable.jit_compile().unwrap(); let mut analysis = LazyAnalysis::new(verified_executable.get_executable()); @@ -310,13 +308,14 @@ native machine code before execting it in the virtual machine.", if matches.is_present("trace") { eprintln!("Trace is saved in trace.out"); let mut file = File::create("trace.out").unwrap(); - vm.get_tracer() + vm.get_program_environment() + .tracer .write(&mut file, analysis.analyze()) .unwrap(); } if matches.is_present("profile") { eprintln!("Profile is saved in profile.dot"); - let tracer = &vm.get_tracer(); + let tracer = &vm.get_program_environment().tracer; let analysis = analysis.analyze(); let dynamic_analysis = DynamicAnalysis::new(tracer, analysis); let mut file = File::create("profile.dot").unwrap(); @@ -376,19 +375,19 @@ impl Debug for Output { // Replace with std::lazy::Lazy when stabilized. // https://github.com/rust-lang/rust/issues/74465 struct LazyAnalysis<'a> { - analysis: Option>, - executable: &'a Executable, + analysis: Option>, + executable: &'a Executable, } impl<'a> LazyAnalysis<'a> { - fn new(executable: &'a Executable) -> Self { + fn new(executable: &'a Executable) -> Self { Self { analysis: None, executable, } } - fn analyze(&mut self) -> &Analysis { + fn analyze(&mut self) -> &Analysis { if let Some(ref analysis) = self.analysis { return analysis; } From 2d936784dd83f277c2704ef542e74abd0e5900a3 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Thu, 6 Oct 2022 14:43:56 -0400 Subject: [PATCH 20/65] Ignore errors when joining background threads for EAH tests (#28263) --- core/tests/epoch_accounts_hash.rs | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index cec92c71f6fb6e..2177ae701dd024 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -211,17 +211,11 @@ impl Drop for BackgroundServices { info!("Stopping background services..."); self.exit.store(true, Ordering::Relaxed); - unsafe { ManuallyDrop::take(&mut self.accounts_background_service) } - .join() - .expect("stop ABS"); - - unsafe { ManuallyDrop::take(&mut self.accounts_hash_verifier) } - .join() - .expect("stop AHV"); - - unsafe { ManuallyDrop::take(&mut self.snapshot_packager_service) } - .join() - .expect("stop SPS"); + // Join the background threads, and ignore any errors. + // SAFETY: We do not use any of the `ManuallyDrop` fields again, so `.take()` is OK here. + _ = unsafe { ManuallyDrop::take(&mut self.accounts_background_service) }.join(); + _ = unsafe { ManuallyDrop::take(&mut self.accounts_hash_verifier) }.join(); + _ = unsafe { ManuallyDrop::take(&mut self.snapshot_packager_service) }.join(); info!("Stopping background services... DONE"); } From d9ef04772d21261b54a470fa817738c5c8ad09c3 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Thu, 6 Oct 2022 18:54:24 +0000 Subject: [PATCH 21/65] moves merkle proof size sanity check to Shred{Code,Data}::merkle_branch (#28266) --- ledger/src/shred/merkle.rs | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index 4b6cd792f79471..90b686f5c8f74b 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -167,11 +167,15 @@ impl ShredData { let proof_size = self.proof_size()?; let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?; let size = SIZE_OF_MERKLE_ROOT + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY; - MerkleBranch::try_from( + let merkle_branch = MerkleBranch::try_from( self.payload .get(offset..offset + size) .ok_or(Error::InvalidPayloadSize(self.payload.len()))?, - ) + )?; + if merkle_branch.proof.len() != usize::from(proof_size) { + return Err(Error::InvalidMerkleProof); + } + Ok(merkle_branch) } fn merkle_tree_node(&self) -> Result { @@ -230,13 +234,9 @@ impl ShredData { } fn sanitize(&self, verify_merkle_proof: bool) -> Result<(), Error> { - match self.common_header.shred_variant { - ShredVariant::MerkleData(proof_size) => { - if self.merkle_branch()?.proof.len() != usize::from(proof_size) { - return Err(Error::InvalidProofSize(proof_size)); - } - } - _ => return Err(Error::InvalidShredVariant), + let shred_variant = self.common_header.shred_variant; + if !matches!(shred_variant, ShredVariant::MerkleData(_)) { + return Err(Error::InvalidShredVariant); } if !verify_merkle_proof { debug_assert_matches!(self.verify_merkle_proof(), Ok(true)); @@ -283,11 +283,15 @@ impl ShredCode { let proof_size = self.proof_size()?; let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?; let size = SIZE_OF_MERKLE_ROOT + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY; - MerkleBranch::try_from( + let merkle_branch = MerkleBranch::try_from( self.payload .get(offset..offset + size) .ok_or(Error::InvalidPayloadSize(self.payload.len()))?, - ) + )?; + if merkle_branch.proof.len() != usize::from(proof_size) { + return Err(Error::InvalidMerkleProof); + } + Ok(merkle_branch) } fn merkle_tree_node(&self) -> Result { @@ -364,13 +368,9 @@ impl ShredCode { } fn sanitize(&self, verify_merkle_proof: bool) -> Result<(), Error> { - match self.common_header.shred_variant { - ShredVariant::MerkleCode(proof_size) => { - if self.merkle_branch()?.proof.len() != usize::from(proof_size) { - return Err(Error::InvalidProofSize(proof_size)); - } - } - _ => return Err(Error::InvalidShredVariant), + let shred_variant = self.common_header.shred_variant; + if !matches!(shred_variant, ShredVariant::MerkleCode(_)) { + return Err(Error::InvalidShredVariant); } if !verify_merkle_proof { debug_assert_matches!(self.verify_merkle_proof(), Ok(true)); From 6eeedaec4f23e4b20109a3056d3469d9d54c98df Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Thu, 6 Oct 2022 14:26:47 -0500 Subject: [PATCH 22/65] [Proposal ]Partitioned Inflationary Rewards Distribution (#27455) * add epoch-boundary-stake-reward proposal * 80 col * clarify rewarding interval selection for skipping slots * update proposal with reward credit based on jeff's comments * Update docs/src/proposals/epoch-boundary-stake-reward.md Co-authored-by: Trent Nelson * Update docs/src/proposals/epoch-boundary-stake-reward.md Co-authored-by: Trent Nelson * rename * update proposal with more feedbacks * revise * update with carl's feedback * use mathmatic notation to clarify interval boundaries * more feedbacks * remove parenthesis * update snapshot paragraph * update with reward calc service * more feedbacks * update with more feedbacks * more feedbacks from carllin Co-authored-by: Trent Nelson --- ...ioned-inflationary-rewards-distribution.md | 140 ++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 docs/src/proposals/partitioned-inflationary-rewards-distribution.md diff --git a/docs/src/proposals/partitioned-inflationary-rewards-distribution.md b/docs/src/proposals/partitioned-inflationary-rewards-distribution.md new file mode 100644 index 00000000000000..e1586d484251dd --- /dev/null +++ b/docs/src/proposals/partitioned-inflationary-rewards-distribution.md @@ -0,0 +1,140 @@ +--- +title: Partitioned Inflationary Rewards Distribution +--- + +## Problem + +With the increase of number of stake accounts, computing and redeeming the stake +rewards at the start block of the epoch boundary becomes very expensive. +Currently, with 550K stake accounts, the stake reward time has already taken +more than 10 seconds. This prolonged computation slows down the network, and can +cause large number of forks at the epoch boundary, which makes the matter even +worse. + +## Proposed Solutions + +Instead of computing and reward stake accounts at epoch boundary, we will +decouple reward computation and reward credit into two phases. + +A separate service, "EpochRewardCalculationService" will be created. The service +will listen to a channel for any incoming rewards calculation requests, and +perform the calculation for the rewards. For each block that cross the epoch +boundary, the bank will send a request to the `EpochRewardCalculationService`. +This marks the start of the reward computation phase. + +``` +N-1 -- N -- N+1 + \ + \ + N+2 +``` + +In the above example, N is the start of the new epoch. Two rewards calculation +requests will be sent out at slot N and slot N+2 because they both cross the +epoch boundary and are on different forks. To avoid repeated computation with +the same input, the signature of the computation requests, `hash(epoch_number, +hash(stake_accounts_data), hash(vote_accounts), hash(delegation_map))`, are +calculated. Duplicated computation requests will be discard. For the above +example, if there are no stake/vote accounts changes between slot N and slot +N+2, the 2nd computation request will be discarded. + +When reaching block height `N` after the start of the `reward computation +phase`, the bank starts the second phase - reward credit, in which, the bank +first query the `epoch calc service` with the request signature to get the +rewards result, which will be resented as a map from accounts_pubkey->rewards, +then credit the rewards to the stake accounts for the next `M` blocks. If the +rewards result is not available, the bank will wait until the results are +available. + +We call them:
+(a) calculating interval: `[epoch_start, epoch_start+N]`
+(b) credit interval: `[epoch_start+N+1, epoch_start+N+M]`, respectively.
+And the combined interval `[epoch_start, epoch_start+N+M]` is called +`rewarding interval`. + +For `calculating interval`, `N` is chosen to be sufficiently large so that the +background computation should have completed and the result of the reward +computation is available at the end of `calculating interval`. `N` can be fixed +such as 100 (roughly equivalent to 50 seconds), or chosen as a function of the +number of stake accounts, `f(num_stake_accounts)`. + +In `credit interval`, the bank will fetch the reward computation results from +the background thread and start credit the rewards during the next `M` blocks. +The idea is partition the accounts into `M` partitions. And each block, the bank +credit `1/M` accounts. The partition is required to be deterministic for the +current epoch, but must also be random across different epochs. One way to +achieve these properties is to hash the account's pubkey with some epoch +dependent values, sort the results, and divide them into `M` bins. The epoch +dependent value can be the epoch number, total rewards for the epoch, the leader +pubkey for the epoch block, etc. `M` can be choses based on 50K account per +block, which equal to `ceil(num_stake_accounts/50,000)`. + +`num_stake_account` is extracted from `leader_schedule_epoch` block, so we don't +run into discrepancy where new transactions right before an epoch boundary +creates one fork with `X` stake accounts and another fork with `Y` stake accounts. + +In order to avoid putting extra burden of computing and credit the stake reward +for blocks produced during the `rewarding interval`, we can reduce the compute +budget limits on those blocks in `rewarding interval`, and reserve some computing +and read/write capacity to perform stake rewarding. + +### Challenges + +1. stake accounts reads/writes during the `rewarding interval` + +`epoch_start..epoch_start+N+M` Because of the delayed credit of the rewards, +Reads to those stake accounts will not return the value that the user are +expecting (viz. not include the recent epoch stake rewards). Writes to those +stake accounts will be lost once the reward are credited on block +`epoch_start+N+M`. We will need to modify the runtime to restrict read/writes to +stake accounts during the `rewarding interval`. Any transactions, which involves +stake accounts, will result in a new execution error, i.e. "stake rewards +pending, account access is restricted". However, normal rpc queries, such as +'getBalance', will return the current lamport of the account. The user can +expect the rewards to be credit as some time point during the 'rewarding +interval'. + +2. snapshot taken during the `rewarding interval` + +If a snapshot is taken during the `rewarding interval`, it would miss the +rewards for the stake accounts. Any plain restart from those snapshots will be +wrong, unless we reconstruct the rewards from the recent epoch boundary. This +will add some complexity to validator restart. In the first implementation, we +will force *not* taking any snapshot and *not* performing accounts hash +calculation during the `rewarding interval`. Incremental snapshot request will +be skipped. Full snapshot request will be re-queued be picked up later at the +end of the `reward interval`. + +In future, if needed, we can +revisit to enable taking snapshots and perform hash calculation during reward +interval. + +3. account-db related action during the `rewarding interval` + +Account-db related action such as flush, clean, squash, shrink etc. may touch +and evict the stake accounts from account db's cache during the `rewarding +interval`. This will slow down the credit in the future at bank `epoch_start+N`. +We may need to exclude such accounts_db actions for stake_accounts during +`rewarding interval`. This is going to be a performance tuning problem. In the +first implementation, for simplicity, we will keep the account-db action as it +is, and make the `credit interval` larger to accommodate the performance hit +when writing back those accounts. In future, we can continue tuning account db +actions during 'rewarding interval'. + +4. view of total epoch capitalization change + +The view of total epoch capitalization, instead of being available at every +epoch boundary, is only available after the `rewarding interval`. Any third +party application logic, which depends on total epoch capitalization, need to +wait after `rewarding interval`. + +5. `getInflationReward` JSONRPC API method call + +Today, the `getInflationReward` JSONRPC API method call can simply grab the +first block in the target epoch and lookup the target stake account's rewards +entry. With these changes, the call will need updated to derive the target +stake account's credit block, grab _that_ block, then lookup rewards. +Additionally we'll need to return more informative errors for queries made +during the lockout period, so users can know that their rewards are pending for +the target epoch. A new rpc API, i.e. `getRewardInterval`, will be added for +querying the `rewarding interval` for the current epoch. From c802b12e10bdbee6ddc116efb60135bdc0f5a6b4 Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 6 Oct 2022 14:27:32 -0500 Subject: [PATCH 23/65] Separate deprecated arguments out to reduce clutter (#28250) --- Cargo.lock | 1 + programs/bpf/Cargo.lock | 1 + validator/Cargo.toml | 1 + validator/src/main.rs | 241 +++++++++++++++++++++------------------- 4 files changed, 128 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 36f13ff6fc00a7..8dafb24e200033 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6639,6 +6639,7 @@ dependencies = [ "jsonrpc-derive", "jsonrpc-ipc-server", "jsonrpc-server-utils", + "lazy_static", "libc", "log", "num_cpus", diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index e818163950bb13..576bfb8689665e 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -5853,6 +5853,7 @@ dependencies = [ "jsonrpc-derive", "jsonrpc-ipc-server", "jsonrpc-server-utils", + "lazy_static", "libc", "log", "num_cpus", diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 07ce035db44884..ad5bdaf383070f 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -23,6 +23,7 @@ jsonrpc-core-client = { version = "18.0.0", features = ["ipc"] } jsonrpc-derive = "18.0.0" jsonrpc-ipc-server = "18.0.0" jsonrpc-server-utils = "18.0.0" +lazy_static = "1.4.0" log = "0.4.17" num_cpus = "1.13.1" rand = "0.7.0" diff --git a/validator/src/main.rs b/validator/src/main.rs index 878046f85c96ba..60febdfb92608c 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -7,6 +7,7 @@ use { AppSettings, Arg, ArgMatches, SubCommand, }, console::style, + lazy_static::lazy_static, log::*, rand::{seq::SliceRandom, thread_rng}, solana_clap_utils::{ @@ -594,15 +595,6 @@ pub fn main() { .takes_value(false) .help("Launch validator without voting"), ) - .arg( - Arg::with_name("no_check_vote_account") - .long("no-check-vote-account") - .takes_value(false) - .conflicts_with("no_voting") - .requires("entrypoint") - .hidden(true) - .help("Skip the RPC vote account sanity check") - ) .arg( Arg::with_name("check_vote_account") .long("check-vote-account") @@ -639,13 +631,6 @@ pub fn main() { .validator(solana_validator::port_validator) .help("Enable JSON RPC on this port, and the next port for the RPC websocket"), ) - .arg( - Arg::with_name("minimal_rpc_api") - .long("--minimal-rpc-api") - .takes_value(false) - .hidden(true) - .help("Only expose the RPC methods required to serve snapshots to other nodes"), - ) .arg( Arg::with_name("full_rpc_api") .long("--full-rpc-api") @@ -694,16 +679,6 @@ pub fn main() { .takes_value(false) .help("Upload new confirmed blocks into a BigTable instance"), ) - .arg( - Arg::with_name("enable_cpi_and_log_storage") - .long("enable-cpi-and-log-storage") - .requires("enable_rpc_transaction_history") - .takes_value(false) - .hidden(true) - .help("Deprecated, please use \"enable-extended-tx-metadata-storage\". \ - Include CPI inner instructions, logs and return data in \ - the historical transaction info stored"), - ) .arg( Arg::with_name("enable_extended_tx_metadata_storage") .long("enable-extended-tx-metadata-storage") @@ -886,18 +861,6 @@ pub fn main() { slots behind the highest snapshot available for \ download from other validators"), ) - .arg( - Arg::with_name("incremental_snapshots") - .long("incremental-snapshots") - .takes_value(false) - .hidden(true) - .conflicts_with("no_incremental_snapshots") - .help("Enable incremental snapshots") - .long_help("Enable incremental snapshots by setting this flag. \ - When enabled, --snapshot-interval-slots will set the \ - incremental snapshot interval. To set the full snapshot \ - interval, use --full-snapshot-interval-slots.") - ) .arg( Arg::with_name("no_incremental_snapshots") .long("no-incremental-snapshots") @@ -1227,12 +1190,6 @@ pub fn main() { will not push/pull from from validators outside this set. \ [default: all validators]") ) - .arg( - Arg::with_name("no_rocksdb_compaction") - .long("no-rocksdb-compaction") - .takes_value(false) - .help("Disable manual compaction of the ledger database (this is ignored).") - ) .arg( Arg::with_name("rocksdb_compaction_interval") .long("rocksdb-compaction-interval-slots") @@ -1268,17 +1225,6 @@ pub fn main() { .takes_value(false) .help("Enable UDP for receiving/sending transactions."), ) - .arg( - Arg::with_name("disable_quic_servers") - .long("disable-quic-servers") - .takes_value(false) - .hidden(true) - ) - .arg( - Arg::with_name("enable_quic_servers") - .hidden(true) - .long("enable-quic-servers") - ) .arg( Arg::with_name("tpu_connection_pool_size") .long("tpu-connection-pool-size") @@ -1632,14 +1578,6 @@ pub fn main() { .takes_value(false) .help("Disable the just-in-time compiler and instead use the interpreter for BPF"), ) - .arg( - // legacy nop argument - Arg::with_name("bpf_jit") - .long("bpf-jit") - .hidden(true) - .takes_value(false) - .conflicts_with("no_bpf_jit") - ) .arg( Arg::with_name("poh_pinned_cpu_core") .hidden(true) @@ -1805,28 +1743,6 @@ pub fn main() { .help("Enables testing of hash calculation using stores in \ AccountsHashVerifier. This has a computational cost."), ) - .arg( - Arg::with_name("accounts_db_index_hashing") - .long("accounts-db-index-hashing") - .help("Enables the use of the index in hash calculation in \ - AccountsHashVerifier/Accounts Background Service.") - .hidden(true), - ) - .arg( - Arg::with_name("no_accounts_db_index_hashing") - .long("no-accounts-db-index-hashing") - .help("This is obsolete. See --accounts-db-index-hashing. \ - Disables the use of the index in hash calculation in \ - AccountsHashVerifier/Accounts Background Service.") - .hidden(true), - ) - .arg( - // legacy nop argument - Arg::with_name("accounts_db_caching_enabled") - .long("accounts-db-caching-enabled") - .conflicts_with("no_accounts_db_caching") - .hidden(true) - ) .arg( Arg::with_name("accounts_shrink_optimize_total_space") .long("accounts-shrink-optimize-total-space") @@ -1877,6 +1793,7 @@ pub fn main() { .long("replay-slots-concurrently") .help("Allow concurrent replay of slots on different forks") ) + .args(&get_deprecated_arguments()) .after_help("The default subcommand is run") .subcommand( SubCommand::with_name("exit") @@ -2055,6 +1972,7 @@ pub fn main() { then this not a good time for a restart") ) .get_matches(); + warn_for_deprecated_arguments(&matches); let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr")); let ledger_path = PathBuf::from(matches.value_of("ledger_path").unwrap()); @@ -2375,9 +2293,6 @@ pub fn main() { let init_complete_file = matches.value_of("init_complete_file"); - if matches.is_present("no_check_vote_account") { - info!("vote account sanity checks are no longer performed by default. --no-check-vote-account is deprecated and can be removed from the command line"); - } let rpc_bootstrap_config = bootstrap::RpcBootstrapConfig { no_genesis_fetch: matches.is_present("no_genesis_fetch"), no_snapshot_fetch: matches.is_present("no_snapshot_fetch"), @@ -2621,25 +2536,6 @@ pub fn main() { None }; - if matches.is_present("minimal_rpc_api") { - warn!("--minimal-rpc-api is now the default behavior. This flag is deprecated and can be removed from the launch args"); - } - - if matches.is_present("enable_cpi_and_log_storage") { - warn!( - "--enable-cpi-and-log-storage is deprecated. Please update the \ - launch args to use --enable-extended-tx-metadata-storage and remove \ - --enable-cpi-and-log-storage" - ); - } - - if matches.is_present("enable_quic_servers") { - warn!("--enable-quic-servers is now the default behavior. This flag is deprecated and can be removed from the launch args"); - } - if matches.is_present("disable_quic_servers") { - warn!("--disable-quic-servers is deprecated. The quic server cannot be disabled."); - } - let rpc_bigtable_config = if matches.is_present("enable_rpc_bigtable_ledger_storage") || matches.is_present("enable_bigtable_ledger_upload") { @@ -2659,12 +2555,6 @@ pub fn main() { None }; - if matches.is_present("accounts_db_index_hashing") { - info!("The accounts hash is only calculated without using the index. --accounts-db-index-hashing is deprecated and can be removed from the command line"); - } - if matches.is_present("no_accounts_db_index_hashing") { - info!("The accounts hash is only calculated without using the index. --no-accounts-db-index-hashing is deprecated and can be removed from the command line"); - } let rpc_send_retry_rate_ms = value_t_or_exit!(matches, "rpc_send_transaction_retry_ms", u64); let rpc_send_batch_size = value_t_or_exit!(matches, "rpc_send_transaction_batch_size", usize); let rpc_send_batch_send_rate_ms = @@ -3012,9 +2902,6 @@ pub fn main() { exit(1); } - if matches.is_present("incremental_snapshots") { - warn!("--incremental-snapshots is now the default behavior. This flag is deprecated and can be removed from the launch args") - } if matches.is_present("limit_ledger_size") { let limit_ledger_size = match matches.value_of("limit_ledger_size") { @@ -3334,3 +3221,125 @@ fn process_account_indexes(matches: &ArgMatches) -> AccountSecondaryIndexes { indexes: account_indexes, } } + +// Helper to add arguments that are no longer used but are being kept around to +// avoid breaking validator startup commands +fn get_deprecated_arguments() -> Vec> { + vec![ + Arg::with_name("accounts_db_caching_enabled") + .long("accounts-db-caching-enabled") + .conflicts_with("no_accounts_db_caching") + .hidden(true), + Arg::with_name("accounts_db_index_hashing") + .long("accounts-db-index-hashing") + .help( + "Enables the use of the index in hash calculation in \ + AccountsHashVerifier/Accounts Background Service.", + ) + .hidden(true), + Arg::with_name("no_accounts_db_index_hashing") + .long("no-accounts-db-index-hashing") + .help( + "This is obsolete. See --accounts-db-index-hashing. \ + Disables the use of the index in hash calculation in \ + AccountsHashVerifier/Accounts Background Service.", + ) + .hidden(true), + Arg::with_name("bpf_jit") + .long("bpf-jit") + .hidden(true) + .takes_value(false) + .conflicts_with("no_bpf_jit"), + Arg::with_name("disable_quic_servers") + .long("disable-quic-servers") + .takes_value(false) + .hidden(true), + Arg::with_name("enable_quic_servers") + .hidden(true) + .long("enable-quic-servers"), + Arg::with_name("enable_cpi_and_log_storage") + .long("enable-cpi-and-log-storage") + .requires("enable_rpc_transaction_history") + .takes_value(false) + .hidden(true) + .help( + "Deprecated, please use \"enable-extended-tx-metadata-storage\". \ + Include CPI inner instructions, logs and return data in \ + the historical transaction info stored", + ), + Arg::with_name("incremental_snapshots") + .long("incremental-snapshots") + .takes_value(false) + .hidden(true) + .conflicts_with("no_incremental_snapshots") + .help("Enable incremental snapshots") + .long_help( + "Enable incremental snapshots by setting this flag. \ + When enabled, --snapshot-interval-slots will set the \ + incremental snapshot interval. To set the full snapshot \ + interval, use --full-snapshot-interval-slots.", + ), + Arg::with_name("minimal_rpc_api") + .long("--minimal-rpc-api") + .takes_value(false) + .hidden(true) + .help("Only expose the RPC methods required to serve snapshots to other nodes"), + Arg::with_name("no_check_vote_account") + .long("no-check-vote-account") + .takes_value(false) + .conflicts_with("no_voting") + .requires("entrypoint") + .hidden(true) + .help("Skip the RPC vote account sanity check"), + Arg::with_name("no_rocksdb_compaction") + .long("no-rocksdb-compaction") + .hidden(true) + .takes_value(false) + .help("Disable manual compaction of the ledger database (this is ignored)."), + ] +} + +lazy_static! { + static ref DEPRECATED_ARGS_AND_HELP: Vec<(&'static str, &'static str)> = vec![ + ("accounts_db_caching_enabled", ""), + ( + "accounts_db_index_hashing", + "The accounts hash is only calculated without using the index.", + ), + ( + "no_accounts_db_index_hashing", + "The accounts hash is only calculated without using the index.", + ), + ("bpf_jit", ""), + ( + "disable_quic_servers", + "The quic server cannot be disabled.", + ), + ( + "enable_quic_servers", + "The quic server is now enabled by default.", + ), + ( + "enable_cpi_and_log_storage", + "Please use --enable-extended-tx-metadata-storage instead.", + ), + ("incremental_snapshots", ""), + ("minimal_rpc_api", ""), + ( + "no_check_vote_account", + "Vote account sanity checks are no longer performed by default.", + ), + ("no_rocksdb_compaction", ""), + ]; +} + +fn warn_for_deprecated_arguments(matches: &ArgMatches) { + for (arg, help) in DEPRECATED_ARGS_AND_HELP.iter() { + if matches.is_present(arg) { + warn!( + "{}", + format!("--{} is deprecated. {}", arg, help).replace('_', "-") + ); + } + } +} From 981c9d07a40166460bd500878005099c024c76b8 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Thu, 6 Oct 2022 16:17:32 -0400 Subject: [PATCH 24/65] Rearranges eah TestEnvironment fields to ensure drop order (#28270) --- core/tests/epoch_accounts_hash.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 2177ae701dd024..a170b891f9d9d2 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -42,16 +42,17 @@ use { }; struct TestEnvironment { + /// NOTE: The fields are arranged to ensure they are dropped in the correct order. + /// - BankForks must be dropped before BackgroundServices + /// - BackgroundServices must be dropped before the TempDirs + /// - SnapshotConfig should be dropped before the TempDirs bank_forks: Arc>, - + background_services: BackgroundServices, genesis_config_info: GenesisConfigInfo, + _snapshot_config: SnapshotConfig, _bank_snapshots_dir: TempDir, _full_snapshot_archives_dir: TempDir, _incremental_snapshot_archives_dir: TempDir, - _snapshot_config: SnapshotConfig, - - // NOTE: This field must come after bank_forks because it must be dropped after - background_services: BackgroundServices, } impl TestEnvironment { From a8d5731e5b25f55beefb86ec48d9e0f5f77df4e9 Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Thu, 6 Oct 2022 12:21:51 -0700 Subject: [PATCH 25/65] Give the quic client and server thread the names --- streamer/src/quic.rs | 1 + tpu-client/src/quic_client.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index 39295fa82a5551..1089537c74fa7b 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -99,6 +99,7 @@ pub(crate) fn configure_server( fn rt() -> Runtime { Builder::new_multi_thread() .worker_threads(NUM_QUIC_STREAMER_WORKER_THREADS) + .thread_name("quic-server") .enable_all() .build() .unwrap() diff --git a/tpu-client/src/quic_client.rs b/tpu-client/src/quic_client.rs index 6bee76189ccf9d..47a597e1718787 100644 --- a/tpu-client/src/quic_client.rs +++ b/tpu-client/src/quic_client.rs @@ -21,6 +21,7 @@ use { lazy_static! { static ref RUNTIME: Runtime = tokio::runtime::Builder::new_multi_thread() + .thread_name("quic-client") .enable_all() .build() .unwrap(); From 76c30e7bd0d1c524321e5ee540bf147ec449a0d8 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 6 Oct 2022 17:11:08 -0700 Subject: [PATCH 26/65] move accounts_db::load_without_fixed_root to test mod (#28271) --- runtime/src/accounts_db.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index edfafe3f3dd0fc..6af7a1d3baa497 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -4843,14 +4843,6 @@ impl AccountsDb { .filter(|(account, _)| !account.is_zero_lamport()) } - pub fn load_without_fixed_root( - &self, - ancestors: &Ancestors, - pubkey: &Pubkey, - ) -> Option<(AccountSharedData, Slot)> { - self.load(ancestors, pubkey, LoadHint::Unspecified) - } - fn read_index_for_accessor_or_load_slow<'a>( &'a self, ancestors: &Ancestors, @@ -9650,6 +9642,14 @@ pub mod tests { None, ) } + + fn load_without_fixed_root( + &self, + ancestors: &Ancestors, + pubkey: &Pubkey, + ) -> Option<(AccountSharedData, Slot)> { + self.load(ancestors, pubkey, LoadHint::Unspecified) + } } #[test] From a400178744d2a526b83eee7234bd26239e0d1595 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 6 Oct 2022 23:23:26 -0700 Subject: [PATCH 27/65] hash calc chunking cleanup (#28274) --- runtime/src/accounts_db.rs | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 6af7a1d3baa497..ed41c529b4f176 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -1334,7 +1334,7 @@ struct SplitAncientStorages { ancient_slots: Vec, /// lowest slot that is not an ancient append vec first_non_ancient_slot: Slot, - /// slot # of beginning of first full chunk starting at the first non ancient slot + /// slot # of beginning of first aligned chunk starting from the first non ancient slot first_chunk_start: Slot, /// # non-ancient slots to scan non_ancient_slot_count: usize, @@ -1461,7 +1461,7 @@ impl SplitAncientStorages { /// return the range of slots in that chunk /// None indicates the range is empty for that chunk. fn get_slot_range(&self, chunk: usize) -> Option> { - let range = if chunk < self.ancient_slot_count { + let range = if self.is_chunk_ancient(chunk) { // ancient append vecs are handled individually let slot = self.ancient_slots[chunk]; Range { @@ -16344,19 +16344,25 @@ pub mod tests { } /// test function to make sure the split range covers exactly every slot in the original range - fn verify_all_slots_covered_exactly_once(splitter: &SplitAncientStorages, range: &Range) { + fn verify_all_slots_covered_exactly_once( + splitter: &SplitAncientStorages, + overall_range: &Range, + ) { // verify all slots covered exactly once let result = get_all_slot_ranges(splitter); - let mut expected = range.start; + let mut expected = overall_range.start; result.iter().for_each(|range| { if let Some(range) = range { + assert!( + overall_range.start == range.start || range.start % MAX_ITEMS_PER_CHUNK == 0 + ); for slot in range.clone() { assert_eq!(slot, expected); expected += 1; } } }); - assert_eq!(expected, range.end); + assert_eq!(expected, overall_range.end); } /// new splitter for test @@ -16431,8 +16437,8 @@ pub mod tests { assert_eq!( result, [ - Some(1..MAX_ITEMS_PER_CHUNK), - Some(MAX_ITEMS_PER_CHUNK..MAX_ITEMS_PER_CHUNK + 1), + Some(offset..MAX_ITEMS_PER_CHUNK), + Some(MAX_ITEMS_PER_CHUNK..MAX_ITEMS_PER_CHUNK + offset), None ] ); @@ -16547,7 +16553,7 @@ pub mod tests { } #[test] - fn test_split_storages_splitter_broken() { + fn test_split_storages_splitter_large_offset() { solana_logger::setup(); // 1 full chunk - 1, mis-aligned by 2 at big offset // huge offset @@ -16582,7 +16588,7 @@ pub mod tests { for reduced_items in [0, 1, 2] { for added_items in [0, 1, 2] { // this will verify the entire range correctly - let _ = new_splitter2( + _ = new_splitter2( offset * offset_multiplier, (full_chunks * MAX_ITEMS_PER_CHUNK + added_items) .saturating_sub(reduced_items), From f6fee4ac3aa3fae51e43d19542a47b766d6ff773 Mon Sep 17 00:00:00 2001 From: Alessandro Decina Date: Fri, 7 Oct 2022 07:45:05 +0100 Subject: [PATCH 28/65] Serialization refactor (#28251) * Use infallible, unchecked methods to write into the serialization buffer We serialize in two steps: first we compute the size of the buffer, then we write into it. Therefore there's no need to check if each individual write fits the buffer - we know it does we just computed the required size. * serialize_parameters: remove extra loop/borrows Remove one extra loop over accounts to gather account lengths. Also gather all accounts at once and avoid temporary borrows. * Move creating MemoryRegions for serialized parameters from create_vm to serialize_parameters This is in preparation of using multiple MemoryRegions once we land direct account mapping. * bpf_loader: introduce internal API to build serialization buffer/regions This is prep work for landing the direct_mapping feature, which maps account data in their own memory regions. * serialization: fix after API changes --- program-test/src/lib.rs | 2 +- programs/bpf/benches/bpf_loader.rs | 13 +- programs/bpf/tests/programs.rs | 24 +- programs/bpf_loader/src/lib.rs | 19 +- programs/bpf_loader/src/serialization.rs | 350 ++++++++++++++--------- rbpf-cli/src/main.rs | 4 +- sdk/src/transaction_context.rs | 7 + 7 files changed, 247 insertions(+), 172 deletions(-) diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 6ecf346421d220..03ed0ef3976523 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -118,7 +118,7 @@ pub fn builtin_process_instruction( let deduplicated_indices: HashSet = instruction_account_indices.collect(); // Serialize entrypoint parameters with BPF ABI - let (mut parameter_bytes, _account_lengths) = serialize_parameters( + let (mut parameter_bytes, _regions, _account_lengths) = serialize_parameters( invoke_context.transaction_context, invoke_context .transaction_context diff --git a/programs/bpf/benches/bpf_loader.rs b/programs/bpf/benches/bpf_loader.rs index c0137b0b0b38da..a848cdc1c6d91d 100644 --- a/programs/bpf/benches/bpf_loader.rs +++ b/programs/bpf/benches/bpf_loader.rs @@ -14,7 +14,9 @@ use { solana_measure::measure::Measure, solana_program_runtime::invoke_context::with_mock_invoke_context, solana_rbpf::{ + ebpf::MM_INPUT_START, elf::Executable, + memory_region::MemoryRegion, verifier::RequisiteVerifier, vm::{Config, InstructionMeter, SyscallRegistry, VerifiedExecutable}, }, @@ -31,7 +33,6 @@ use { instruction::{AccountMeta, Instruction}, message::Message, pubkey::Pubkey, - rent::Rent, signature::{Keypair, Signer}, }, std::{env, fs::File, io::Read, mem, path::PathBuf, sync::Arc}, @@ -124,7 +125,7 @@ fn bench_program_alu(bencher: &mut Bencher) { let mut instruction_meter = ThisInstructionMeter { compute_meter }; let mut vm = create_vm( &verified_executable, - &mut inner_iter, + vec![MemoryRegion::new_writable(&mut inner_iter, MM_INPUT_START)], vec![], invoke_context, ) @@ -224,7 +225,7 @@ fn bench_create_vm(bencher: &mut Bencher) { .mock_set_remaining(BUDGET); // Serialize account data - let (mut serialized, account_lengths) = serialize_parameters( + let (_serialized, regions, account_lengths) = serialize_parameters( invoke_context.transaction_context, invoke_context .transaction_context @@ -250,7 +251,7 @@ fn bench_create_vm(bencher: &mut Bencher) { bencher.iter(|| { let _ = create_vm( &verified_executable, - serialized.as_slice_mut(), + regions.clone(), account_lengths.clone(), invoke_context, ) @@ -271,7 +272,7 @@ fn bench_instruction_count_tuner(_bencher: &mut Bencher) { .mock_set_remaining(BUDGET); // Serialize account data - let (mut serialized, account_lengths) = serialize_parameters( + let (_serialized, regions, account_lengths) = serialize_parameters( invoke_context.transaction_context, invoke_context .transaction_context @@ -298,7 +299,7 @@ fn bench_instruction_count_tuner(_bencher: &mut Bencher) { let mut instruction_meter = ThisInstructionMeter { compute_meter }; let mut vm = create_vm( &verified_executable, - serialized.as_slice_mut(), + regions, account_lengths, invoke_context, ) diff --git a/programs/bpf/tests/programs.rs b/programs/bpf/tests/programs.rs index aa7511ca705d02..0a50fb812259e1 100644 --- a/programs/bpf/tests/programs.rs +++ b/programs/bpf/tests/programs.rs @@ -224,16 +224,6 @@ fn run_program(name: &str) -> u64 { file.read_to_end(&mut data).unwrap(); let loader_id = bpf_loader::id(); with_mock_invoke_context(loader_id, 0, false, |invoke_context| { - let (parameter_bytes, account_lengths) = serialize_parameters( - invoke_context.transaction_context, - invoke_context - .transaction_context - .get_current_instruction_context() - .unwrap(), - true, // should_cap_ix_accounts - ) - .unwrap(); - let compute_meter = invoke_context.get_compute_meter(); let mut instruction_meter = ThisInstructionMeter { compute_meter }; let config = Config { @@ -278,11 +268,21 @@ fn run_program(name: &str) -> u64 { transaction_context .set_return_data(caller, Vec::new()) .unwrap(); - let mut parameter_bytes = parameter_bytes.clone(); + + let (parameter_bytes, regions, account_lengths) = serialize_parameters( + invoke_context.transaction_context, + invoke_context + .transaction_context + .get_current_instruction_context() + .unwrap(), + true, // should_cap_ix_accounts + ) + .unwrap(); + { let mut vm = create_vm( &verified_executable, - parameter_bytes.as_slice_mut(), + regions, account_lengths.clone(), invoke_context, ) diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index e99b62adbf3ffd..b97263823a9836 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -29,7 +29,7 @@ use { }, solana_rbpf::{ aligned_memory::AlignedMemory, - ebpf::{HOST_ALIGN, MM_HEAP_START, MM_INPUT_START}, + ebpf::{HOST_ALIGN, MM_HEAP_START}, elf::Executable, error::{EbpfError, UserDefinedError}, memory_region::MemoryRegion, @@ -290,7 +290,7 @@ fn check_loader_id(id: &Pubkey) -> bool { /// Create the BPF virtual machine pub fn create_vm<'a, 'b>( program: &'a VerifiedExecutable, - parameter_bytes: &mut [u8], + regions: Vec, orig_account_lengths: Vec, invoke_context: &'a mut InvokeContext<'b>, ) -> Result, EbpfError> { @@ -303,13 +303,8 @@ pub fn create_vm<'a, 'b>( ); let mut heap = AlignedMemory::::zero_filled(compute_budget.heap_size.unwrap_or(HEAP_LENGTH)); - let parameter_region = MemoryRegion::new_writable(parameter_bytes, MM_INPUT_START); - let vm = EbpfVm::new( - program, - invoke_context, - heap.as_slice_mut(), - vec![parameter_region], - )?; + + let vm = EbpfVm::new(program, invoke_context, heap.as_slice_mut(), regions)?; let check_aligned = bpf_loader_deprecated::id() != invoke_context .transaction_context @@ -1331,7 +1326,7 @@ impl Executor for BpfExecutor { let program_id = *instruction_context.get_last_program_key(transaction_context)?; let mut serialize_time = Measure::start("serialize"); - let (mut parameter_bytes, account_lengths) = serialize_parameters( + let (parameter_bytes, regions, account_lengths) = serialize_parameters( invoke_context.transaction_context, instruction_context, invoke_context @@ -1345,7 +1340,7 @@ impl Executor for BpfExecutor { let execution_result = { let mut vm = match create_vm( &self.verified_executable, - parameter_bytes.as_slice_mut(), + regions, account_lengths, invoke_context, ) { @@ -1480,7 +1475,7 @@ mod tests { super::*, rand::Rng, solana_program_runtime::invoke_context::mock_process_instruction, - solana_rbpf::{verifier::Verifier, vm::SyscallRegistry}, + solana_rbpf::{ebpf::MM_INPUT_START, verifier::Verifier, vm::SyscallRegistry}, solana_runtime::{bank::Bank, bank_client::BankClient}, solana_sdk::{ account::{ diff --git a/programs/bpf_loader/src/serialization.rs b/programs/bpf_loader/src/serialization.rs index 7b60c8a10f577b..94283520f99b61 100644 --- a/programs/bpf_loader/src/serialization.rs +++ b/programs/bpf_loader/src/serialization.rs @@ -1,53 +1,166 @@ #![allow(clippy::integer_arithmetic)] use { - byteorder::{ByteOrder, LittleEndian, WriteBytesExt}, - solana_rbpf::{aligned_memory::AlignedMemory, ebpf::HOST_ALIGN}, + byteorder::{ByteOrder, LittleEndian}, + solana_rbpf::{ + aligned_memory::{AlignedMemory, Pod}, + ebpf::{HOST_ALIGN, MM_INPUT_START}, + memory_region::MemoryRegion, + }, solana_sdk::{ bpf_loader_deprecated, entrypoint::{BPF_ALIGN_OF_U128, MAX_PERMITTED_DATA_INCREASE, NON_DUP_MARKER}, instruction::InstructionError, pubkey::Pubkey, system_instruction::MAX_PERMITTED_DATA_LENGTH, - transaction_context::{IndexOfAccount, InstructionContext, TransactionContext}, + transaction_context::{ + BorrowedAccount, IndexOfAccount, InstructionContext, TransactionContext, + }, }, - std::{io::prelude::*, mem::size_of}, + std::{mem, mem::size_of}, }; /// Maximum number of instruction accounts that can be serialized into the /// BPF VM. const MAX_INSTRUCTION_ACCOUNTS: u8 = NON_DUP_MARKER; +enum SerializeAccount<'a> { + Account(IndexOfAccount, BorrowedAccount<'a>), + Duplicate(IndexOfAccount), +} + +struct Serializer { + pub buffer: AlignedMemory, + regions: Vec, + vaddr: u64, + region_start: usize, + aligned: bool, +} + +impl Serializer { + fn new(size: usize, start_addr: u64, aligned: bool) -> Serializer { + Serializer { + buffer: AlignedMemory::with_capacity(size), + regions: Vec::new(), + region_start: 0, + vaddr: start_addr, + aligned, + } + } + + fn fill_write(&mut self, num: usize, value: u8) -> std::io::Result<()> { + self.buffer.fill_write(num, value) + } + + pub fn write(&mut self, value: T) { + self.debug_assert_alignment::(); + // Safety: + // in serialize_parameters_(aligned|unaligned) first we compute the + // required size then we write into the newly allocated buffer. There's + // no need to check bounds at every write. + // + // AlignedMemory::write_unchecked _does_ debug_assert!() that the capacity + // is enough, so in the unlikely case we introduce a bug in the size + // computation, tests will abort. + unsafe { + self.buffer.write_unchecked(value); + } + } + + fn write_all(&mut self, value: &[u8]) { + // Safety: + // see write() - the buffer is guaranteed to be large enough + unsafe { + self.buffer.write_all_unchecked(value); + } + } + + fn write_account(&mut self, account: &BorrowedAccount<'_>) -> Result<(), InstructionError> { + self.write_all(account.get_data()); + + if self.aligned { + let align_offset = + (account.get_data().len() as *const u8).align_offset(BPF_ALIGN_OF_U128); + self.fill_write(MAX_PERMITTED_DATA_INCREASE + align_offset, 0) + .map_err(|_| InstructionError::InvalidArgument)?; + } + + Ok(()) + } + + fn push_region(&mut self) { + let range = self.region_start..self.buffer.len(); + let region = MemoryRegion::new_writable( + self.buffer.as_slice_mut().get_mut(range.clone()).unwrap(), + self.vaddr, + ); + self.regions.push(region); + self.region_start = range.end; + self.vaddr += range.len() as u64; + } + + fn finish(mut self) -> (AlignedMemory, Vec) { + self.push_region(); + debug_assert_eq!(self.region_start, self.buffer.len()); + (self.buffer, self.regions) + } + + fn debug_assert_alignment(&self) { + debug_assert!( + !self.aligned + || self + .buffer + .as_slice() + .as_ptr_range() + .end + .align_offset(mem::align_of::()) + == 0 + ); + } +} + pub fn serialize_parameters( transaction_context: &TransactionContext, instruction_context: &InstructionContext, should_cap_ix_accounts: bool, -) -> Result<(AlignedMemory, Vec), InstructionError> { +) -> Result<(AlignedMemory, Vec, Vec), InstructionError> { let num_ix_accounts = instruction_context.get_number_of_instruction_accounts(); if should_cap_ix_accounts && num_ix_accounts > MAX_INSTRUCTION_ACCOUNTS as IndexOfAccount { return Err(InstructionError::MaxAccountsExceeded); } - let is_loader_deprecated = *instruction_context .try_borrow_last_program_account(transaction_context)? .get_owner() == bpf_loader_deprecated::id(); + + let num_accounts = instruction_context.get_number_of_instruction_accounts() as usize; + let mut accounts = Vec::with_capacity(num_accounts); + let mut account_lengths: Vec = Vec::with_capacity(num_accounts); + for instruction_account_index in 0..instruction_context.get_number_of_instruction_accounts() { + if let Some(index) = + instruction_context.is_instruction_account_duplicate(instruction_account_index)? + { + accounts.push(SerializeAccount::Duplicate(index)); + // unwrap here is safe: if an account is a duplicate, we must have + // seen the original already + account_lengths.push(*account_lengths.get(index as usize).unwrap()); + } else { + let account = instruction_context + .try_borrow_instruction_account(transaction_context, instruction_account_index)?; + account_lengths.push(account.get_data().len()); + accounts.push(SerializeAccount::Account( + instruction_account_index, + account, + )); + }; + } + if is_loader_deprecated { - serialize_parameters_unaligned(transaction_context, instruction_context) + serialize_parameters_unaligned(transaction_context, instruction_context, accounts) } else { - serialize_parameters_aligned(transaction_context, instruction_context) + serialize_parameters_aligned(transaction_context, instruction_context, accounts) } - .and_then(|buffer| { - let account_lengths = (0..instruction_context.get_number_of_instruction_accounts()) - .map(|instruction_account_index| { - Ok(instruction_context - .try_borrow_instruction_account(transaction_context, instruction_account_index)? - .get_data() - .len()) - }) - .collect::, InstructionError>>()?; - Ok((buffer, account_lengths)) - }) + .map(|(buffer, regions)| (buffer, regions, account_lengths)) } pub fn deserialize_parameters( @@ -77,82 +190,65 @@ pub fn deserialize_parameters( } } -pub fn serialize_parameters_unaligned( +fn serialize_parameters_unaligned( transaction_context: &TransactionContext, instruction_context: &InstructionContext, -) -> Result, InstructionError> { + accounts: Vec, +) -> Result<(AlignedMemory, Vec), InstructionError> { // Calculate size in order to alloc once let mut size = size_of::(); - for instruction_account_index in 0..instruction_context.get_number_of_instruction_accounts() { - let duplicate = - instruction_context.is_instruction_account_duplicate(instruction_account_index)?; + for account in &accounts { size += 1; // dup - if duplicate.is_none() { - let data_len = instruction_context - .try_borrow_instruction_account(transaction_context, instruction_account_index)? - .get_data() - .len(); - size += size_of::() // is_signer + match account { + SerializeAccount::Duplicate(_) => {} + SerializeAccount::Account(_, account) => { + size += size_of::() // is_signer + size_of::() // is_writable + size_of::() // key + size_of::() // lamports + size_of::() // data len - + data_len // data + + account.get_data().len() // data + size_of::() // owner + size_of::() // executable + size_of::(); // rent_epoch + } } } size += size_of::() // instruction data len + instruction_context.get_instruction_data().len() // instruction data + size_of::(); // program id - let mut v = AlignedMemory::::with_capacity(size); - v.write_u64::(instruction_context.get_number_of_instruction_accounts() as u64) - .map_err(|_| InstructionError::InvalidArgument)?; - for instruction_account_index in 0..instruction_context.get_number_of_instruction_accounts() { - let duplicate = - instruction_context.is_instruction_account_duplicate(instruction_account_index)?; - if let Some(position) = duplicate { - v.write_u8(position as u8) - .map_err(|_| InstructionError::InvalidArgument)?; - } else { - let borrowed_account = instruction_context - .try_borrow_instruction_account(transaction_context, instruction_account_index)?; - v.write_u8(NON_DUP_MARKER) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_u8(borrowed_account.is_signer() as u8) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_u8(borrowed_account.is_writable() as u8) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all(borrowed_account.get_key().as_ref()) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_u64::(borrowed_account.get_lamports()) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_u64::(borrowed_account.get_data().len() as u64) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all(borrowed_account.get_data()) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all(borrowed_account.get_owner().as_ref()) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_u8(borrowed_account.is_executable() as u8) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_u64::(borrowed_account.get_rent_epoch() as u64) - .map_err(|_| InstructionError::InvalidArgument)?; - } + let mut s = Serializer::new(size, MM_INPUT_START, false); + + s.write::((accounts.len() as u64).to_le()); + for account in accounts { + match account { + SerializeAccount::Duplicate(position) => s.write(position as u8), + SerializeAccount::Account(_, account) => { + s.write::(NON_DUP_MARKER); + s.write::(account.is_signer() as u8); + s.write::(account.is_writable() as u8); + s.write_all(account.get_key().as_ref()); + s.write::(account.get_lamports().to_le()); + s.write::((account.get_data().len() as u64).to_le()); + s.write_account(&account) + .map_err(|_| InstructionError::InvalidArgument)?; + s.write_all(account.get_owner().as_ref()); + s.write::(account.is_executable() as u8); + s.write::((account.get_rent_epoch() as u64).to_le()); + } + }; } - v.write_u64::(instruction_context.get_instruction_data().len() as u64) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all(instruction_context.get_instruction_data()) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all( + s.write::((instruction_context.get_instruction_data().len() as u64).to_le()); + s.write_all(instruction_context.get_instruction_data()); + s.write_all( instruction_context .try_borrow_last_program_account(transaction_context)? .get_key() .as_ref(), - ) - .map_err(|_| InstructionError::InvalidArgument)?; - Ok(v) + ); + + Ok(s.finish()) } pub fn deserialize_parameters_unaligned( @@ -205,24 +301,20 @@ pub fn deserialize_parameters_unaligned( Ok(()) } -pub fn serialize_parameters_aligned( +fn serialize_parameters_aligned( transaction_context: &TransactionContext, instruction_context: &InstructionContext, -) -> Result, InstructionError> { + accounts: Vec, +) -> Result<(AlignedMemory, Vec), InstructionError> { // Calculate size in order to alloc once let mut size = size_of::(); - for instruction_account_index in 0..instruction_context.get_number_of_instruction_accounts() { - let duplicate = - instruction_context.is_instruction_account_duplicate(instruction_account_index)?; + for account in &accounts { size += 1; // dup - if duplicate.is_some() { - size += 7; // padding to 64-bit aligned - } else { - let data_len = instruction_context - .try_borrow_instruction_account(transaction_context, instruction_account_index)? - .get_data() - .len(); - size += size_of::() // is_signer + match account { + SerializeAccount::Duplicate(_) => size += 7, // padding to 64-bit aligned + SerializeAccount::Account(_, account) => { + let data_len = account.get_data().len(); + size += size_of::() // is_signer + size_of::() // is_writable + size_of::() // executable + size_of::() // original_data_len @@ -234,69 +326,49 @@ pub fn serialize_parameters_aligned( + MAX_PERMITTED_DATA_INCREASE + (data_len as *const u8).align_offset(BPF_ALIGN_OF_U128) + size_of::(); // rent epoch + } } } size += size_of::() // data len + instruction_context.get_instruction_data().len() + size_of::(); // program id; - let mut v = AlignedMemory::::with_capacity(size); + + let mut s = Serializer::new(size, MM_INPUT_START, true); // Serialize into the buffer - v.write_u64::(instruction_context.get_number_of_instruction_accounts() as u64) - .map_err(|_| InstructionError::InvalidArgument)?; - for instruction_account_index in 0..instruction_context.get_number_of_instruction_accounts() { - let duplicate = - instruction_context.is_instruction_account_duplicate(instruction_account_index)?; - if let Some(position) = duplicate { - v.write_u8(position as u8) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all(&[0u8, 0, 0, 0, 0, 0, 0]) - .map_err(|_| InstructionError::InvalidArgument)?; // 7 bytes of padding to make 64-bit aligned - } else { - let borrowed_account = instruction_context - .try_borrow_instruction_account(transaction_context, instruction_account_index)?; - v.write_u8(NON_DUP_MARKER) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_u8(borrowed_account.is_signer() as u8) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_u8(borrowed_account.is_writable() as u8) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_u8(borrowed_account.is_executable() as u8) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all(&[0u8, 0, 0, 0]) - .map_err(|_| InstructionError::InvalidArgument)?; // 4 bytes of padding to make 128-bit aligned - v.write_all(borrowed_account.get_key().as_ref()) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all(borrowed_account.get_owner().as_ref()) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_u64::(borrowed_account.get_lamports()) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_u64::(borrowed_account.get_data().len() as u64) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all(borrowed_account.get_data()) - .map_err(|_| InstructionError::InvalidArgument)?; - v.fill_write( - MAX_PERMITTED_DATA_INCREASE - + (v.write_index() as *const u8).align_offset(BPF_ALIGN_OF_U128), - 0, - ) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_u64::(borrowed_account.get_rent_epoch() as u64) - .map_err(|_| InstructionError::InvalidArgument)?; - } + s.write::((accounts.len() as u64).to_le()); + for account in accounts { + match account { + SerializeAccount::Account(_, borrowed_account) => { + s.write::(NON_DUP_MARKER); + s.write::(borrowed_account.is_signer() as u8); + s.write::(borrowed_account.is_writable() as u8); + s.write::(borrowed_account.is_executable() as u8); + s.write_all(&[0u8, 0, 0, 0]); + s.write_all(borrowed_account.get_key().as_ref()); + s.write_all(borrowed_account.get_owner().as_ref()); + s.write::(borrowed_account.get_lamports().to_le()); + s.write::((borrowed_account.get_data().len() as u64).to_le()); + s.write_account(&borrowed_account) + .map_err(|_| InstructionError::InvalidArgument)?; + s.write::((borrowed_account.get_rent_epoch() as u64).to_le()); + } + SerializeAccount::Duplicate(position) => { + s.write::(position as u8); + s.write_all(&[0u8, 0, 0, 0, 0, 0, 0]); + } + }; } - v.write_u64::(instruction_context.get_instruction_data().len() as u64) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all(instruction_context.get_instruction_data()) - .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all( + s.write::((instruction_context.get_instruction_data().len() as u64).to_le()); + s.write_all(instruction_context.get_instruction_data()); + s.write_all( instruction_context .try_borrow_last_program_account(transaction_context)? .get_key() .as_ref(), - ) - .map_err(|_| InstructionError::InvalidArgument)?; - Ok(v) + ); + + Ok(s.finish()) } pub fn deserialize_parameters_aligned( @@ -525,7 +597,7 @@ mod tests { continue; } - let (mut serialized, _account_lengths) = serialization_result.unwrap(); + let (mut serialized, _regions, _account_lengths) = serialization_result.unwrap(); let (de_program_id, de_accounts, de_instruction_data) = unsafe { deserialize(serialized.as_slice_mut().first_mut().unwrap() as *mut u8) }; assert_eq!(de_program_id, &program_id); @@ -665,7 +737,7 @@ mod tests { // check serialize_parameters_aligned - let (mut serialized, account_lengths) = serialize_parameters( + let (mut serialized, _regions, account_lengths) = serialize_parameters( invoke_context.transaction_context, instruction_context, true, @@ -741,7 +813,7 @@ mod tests { .borrow_mut() .set_owner(bpf_loader_deprecated::id()); - let (mut serialized, account_lengths) = serialize_parameters( + let (mut serialized, _regions, account_lengths) = serialize_parameters( invoke_context.transaction_context, instruction_context, true, diff --git a/rbpf-cli/src/main.rs b/rbpf-cli/src/main.rs index 3486339d7d8928..fcc97ae257dc2f 100644 --- a/rbpf-cli/src/main.rs +++ b/rbpf-cli/src/main.rs @@ -233,7 +233,7 @@ native machine code before execting it in the virtual machine.", &instruction_data, ); invoke_context.push().unwrap(); - let (mut parameter_bytes, account_lengths) = serialize_parameters( + let (_parameter_bytes, regions, account_lengths) = serialize_parameters( invoke_context.transaction_context, invoke_context .transaction_context @@ -292,7 +292,7 @@ native machine code before execting it in the virtual machine.", let mut vm = create_vm( &verified_executable, - parameter_bytes.as_slice_mut(), + regions, account_lengths, &mut invoke_context, ) diff --git a/sdk/src/transaction_context.rs b/sdk/src/transaction_context.rs index e27bf27ffa89ae..02db1c2de0953f 100644 --- a/sdk/src/transaction_context.rs +++ b/sdk/src/transaction_context.rs @@ -700,11 +700,13 @@ pub struct BorrowedAccount<'a> { impl<'a> BorrowedAccount<'a> { /// Returns the index of this account (transaction wide) + #[inline] pub fn get_index_in_transaction(&self) -> IndexOfAccount { self.index_in_transaction } /// Returns the public key of this account (transaction wide) + #[inline] pub fn get_key(&self) -> &Pubkey { self.transaction_context .get_key_of_account_at_index(self.index_in_transaction) @@ -712,6 +714,7 @@ impl<'a> BorrowedAccount<'a> { } /// Returns the owner of this account (transaction wide) + #[inline] pub fn get_owner(&self) -> &Pubkey { self.account.owner() } @@ -760,6 +763,7 @@ impl<'a> BorrowedAccount<'a> { } /// Returns the number of lamports of this account (transaction wide) + #[inline] pub fn get_lamports(&self) -> u64 { self.account.lamports() } @@ -824,6 +828,7 @@ impl<'a> BorrowedAccount<'a> { } /// Returns a read-only slice of the account data (transaction wide) + #[inline] pub fn get_data(&self) -> &[u8] { self.account.data() } @@ -936,6 +941,7 @@ impl<'a> BorrowedAccount<'a> { } /// Returns whether this account is executable (transaction wide) + #[inline] pub fn is_executable(&self) -> bool { self.account.executable() } @@ -982,6 +988,7 @@ impl<'a> BorrowedAccount<'a> { /// Returns the rent epoch of this account (transaction wide) #[cfg(not(target_os = "solana"))] + #[inline] pub fn get_rent_epoch(&self) -> u64 { self.account.rent_epoch() } From c10c2f80d0624a8db4194bc097048472457ab1b1 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 7 Oct 2022 16:53:04 +0800 Subject: [PATCH 29/65] docs: use devnet blocks and transactions as v0 transaction examples (#28288) --- docs/src/integrations/exchange.md | 76 ++++++++++++++++--------------- 1 file changed, 39 insertions(+), 37 deletions(-) diff --git a/docs/src/integrations/exchange.md b/docs/src/integrations/exchange.md index 5dd71508387c56..faa5902315e243 100644 --- a/docs/src/integrations/exchange.md +++ b/docs/src/integrations/exchange.md @@ -252,12 +252,12 @@ that isn't necessary for tracking account balances. Set the "transactionDetails" parameter to speed up block fetching. ```bash -curl localhost:8899 -X POST -H 'Content-Type: application/json' -d '{ +curl https://api.devnet.solana.com -X POST -H 'Content-Type: application/json' -d '{ "jsonrpc": "2.0", "id": 1, "method": "getBlock", "params": [ - 148696677, + 166974442, { "encoding": "jsonParsed", "maxSupportedTransactionVersion": 0, @@ -271,26 +271,27 @@ curl localhost:8899 -X POST -H 'Content-Type: application/json' -d '{ { "jsonrpc": "2.0", "result": { - "blockHeight": 134239354, - "blockTime": 1662064341, - "blockhash": "AuPLyvFX2yA1aVFUqvFfyiB2Sxwu2McL8ALhwbU6w7er", - "parentSlot": 148696675, - "previousBlockhash": "AKu155zCvrgrPvcVBFyboAfY2GF33S3ZDkj2Pa8x19XM", + "blockHeight": 157201607, + "blockTime": 1665070281, + "blockhash": "HKhao674uvFc4wMK1Cm3UyuuGbKExdgPFjXQ5xtvsG3o", + "parentSlot": 166974441, + "previousBlockhash": "98CNLU4rsYa2HDUyp7PubU4DhwYJJhSX9v6pvE7SWsAo", "transactions": [ + ... (omit) { "meta": { "err": null, "fee": 5000, "postBalances": [ - 7161091286, - 2769675090, - 1 + 1110663066, + 1, + 1040000000 ], "postTokenBalances": [], "preBalances": [ - 8130576328, - 1800195048, - 1 + 1120668066, + 1, + 1030000000 ], "preTokenBalances": [], "status": { @@ -300,30 +301,31 @@ curl localhost:8899 -X POST -H 'Content-Type: application/json' -d '{ "transaction": { "accountKeys": [ { - "pubkey": "ogDsdvMKRRRMmsrT2hTPdkQBu1qY2z1jBDzgpi8HZri", + "pubkey": "9aE476sH92Vz7DMPyq5WLPkrKWivxeuTKEFKd2sZZcde", "signer": true, "source": "transaction", "writable": true }, { - "pubkey": "3M2b3tLji7rvscqrLAHMukYxDK2nB96Q9hwfV6QkdzBN", + "pubkey": "11111111111111111111111111111111", "signer": false, "source": "transaction", - "writable": true + "writable": false }, { - "pubkey": "11111111111111111111111111111111", + "pubkey": "G1wZ113tiUHdSpQEBcid8n1x8BAvcWZoZgxPKxgE5B7o", "signer": false, - "source": "transaction", - "writable": false + "source": "lookupTable", + "writable": true } ], "signatures": [ - "36Q383JMiqiobuPV9qBqy41xjMsVnQBm9rdZSdpbrLTGhSQDTGZJnocM4TQTVfUGfV2vEX9ZB3sex6wUBUWzjEvs" + "2CxNRsyRT7y88GBwvAB3hRg8wijMSZh3VNYXAdUesGSyvbRJbRR2q9G1KSEpQENmXHmmMLHiXumw4dp8CvzQMjrM" ] }, - "version": "legacy" - } + "version": 0 + }, + ... (omit) ] }, "id": 1 @@ -403,12 +405,12 @@ curl localhost:8899 -X POST -H "Content-Type: application/json" -d '{ [`getTransaction`](developing/clients/jsonrpc-api.md#gettransaction) request: ```bash -curl localhost:8899 -X POST -H 'Content-Type: application/json' -d '{ +curl https://api.devnet.solana.com -X POST -H 'Content-Type: application/json' -d '{ "jsonrpc":"2.0", "id":1, "method":"getTransaction", "params":[ - "4Cswku8E9sm8TVZ4kP4iHbwCQygMDx78SXSURBkJuJAaXCbL9eYM8RPS2BDooLd5ftML4JjQrohe4deJrFkVzPBa", + "2CxNRsyRT7y88GBwvAB3hRg8wijMSZh3VNYXAdUesGSyvbRJbRR2q9G1KSEpQENmXHmmMLHiXumw4dp8CvzQMjrM", { "encoding":"jsonParsed", "maxSupportedTransactionVersion":0 @@ -420,7 +422,7 @@ curl localhost:8899 -X POST -H 'Content-Type: application/json' -d '{ { "jsonrpc": "2.0", "result": { - "blockTime": 1660763773, + "blockTime": 1665070281, "meta": { "err": null, "fee": 5000, @@ -430,15 +432,15 @@ curl localhost:8899 -X POST -H 'Content-Type: application/json' -d '{ "Program 11111111111111111111111111111111 success" ], "postBalances": [ - 2078778739, + 1110663066, 1, - 26396753106 + 1040000000 ], "postTokenBalances": [], "preBalances": [ - 2078783740, + 1120668066, 1, - 26396753105 + 1030000000 ], "preTokenBalances": [], "rewards": [], @@ -446,7 +448,7 @@ curl localhost:8899 -X POST -H 'Content-Type: application/json' -d '{ "Ok": null } }, - "slot": 155713260, + "slot": 166974442, "transaction": { "message": { "accountKeys": [ @@ -463,7 +465,7 @@ curl localhost:8899 -X POST -H 'Content-Type: application/json' -d '{ "writable": false }, { - "pubkey": "2xNweLHLqrbx4zo1waDvgWJHgsUpPj8Y8icbAFeR4a8i", + "pubkey": "G1wZ113tiUHdSpQEBcid8n1x8BAvcWZoZgxPKxgE5B7o", "signer": false, "source": "lookupTable", "writable": true @@ -471,10 +473,10 @@ curl localhost:8899 -X POST -H 'Content-Type: application/json' -d '{ ], "addressTableLookups": [ { - "accountKey": "3LZbwptsCkv5R5uu1GNZKiX9SoC6egNG8NXg9zH5ZVM9", + "accountKey": "4syr5pBaboZy4cZyF6sys82uGD7jEvoAP2ZMaoich4fZ", "readonlyIndexes": [], "writableIndexes": [ - 1 + 3 ] } ], @@ -482,8 +484,8 @@ curl localhost:8899 -X POST -H 'Content-Type: application/json' -d '{ { "parsed": { "info": { - "destination": "2xNweLHLqrbx4zo1waDvgWJHgsUpPj8Y8icbAFeR4a8i", - "lamports": 1, + "destination": "G1wZ113tiUHdSpQEBcid8n1x8BAvcWZoZgxPKxgE5B7o", + "lamports": 10000000, "source": "9aE476sH92Vz7DMPyq5WLPkrKWivxeuTKEFKd2sZZcde" }, "type": "transfer" @@ -492,10 +494,10 @@ curl localhost:8899 -X POST -H 'Content-Type: application/json' -d '{ "programId": "11111111111111111111111111111111" } ], - "recentBlockhash": "9nLh3gmVhyjrh68UeV1rafyo8BFNyZtHSRUUjZYikveh" + "recentBlockhash": "BhhivDNgoy4L5tLtHb1s3TP19uUXqKiy4FfUR34d93eT" }, "signatures": [ - "4Cswku8E9sm8TVZ4kP4iHbwCQygMDx78SXSURBkJuJAaXCbL9eYM8RPS2BDooLd5ftML4JjQrohe4deJrFkVzPBa" + "2CxNRsyRT7y88GBwvAB3hRg8wijMSZh3VNYXAdUesGSyvbRJbRR2q9G1KSEpQENmXHmmMLHiXumw4dp8CvzQMjrM" ] }, "version": 0 From f4dd24491fcb1d996dfe7221c936a64886c50b1d Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Wed, 28 Sep 2022 17:24:42 -0400 Subject: [PATCH 30/65] Migrate SDK from BPF to SBF --- Cargo.lock | 2 + ci/buildkite-pipeline-in-disk.sh | 12 +- ci/buildkite-pipeline.sh | 12 +- ci/buildkite-solana-private.sh | 12 +- ci/nits.sh | 4 +- ci/publish-tarball.sh | 6 +- ci/run-local.sh | 2 +- ci/test-bench.sh | 6 +- ci/{test-stable-bpf.sh => test-stable-sbf.sh} | 0 ci/test-stable.sh | 48 +-- .../on-chain-programs/developing-c.md | 34 +- programs/bpf/c/makefile | 4 +- programs/bpf_loader/test_elfs/makefile | 4 +- scripts/build-downstream-anchor-projects.sh | 12 +- scripts/build-downstream-projects.sh | 10 +- scripts/cargo-install-all.sh | 4 +- sdk/cargo-build-bpf/Cargo.toml | 2 + sdk/cargo-build-bpf/src/main.rs | 23 +- sdk/cargo-build-sbf/src/main.rs | 4 +- sdk/sbf/.gitignore | 11 + sdk/sbf/c/README.md | 44 +++ sdk/sbf/c/inc/deserialize_deprecated.h | 1 + sdk/sbf/c/inc/sol/assert.h | 56 ++++ sdk/sbf/c/inc/sol/blake3.h | 39 +++ sdk/sbf/c/inc/sol/cpi.h | 138 ++++++++ sdk/sbf/c/inc/sol/deserialize.h | 137 ++++++++ sdk/sbf/c/inc/sol/deserialize_deprecated.h | 119 +++++++ sdk/sbf/c/inc/sol/entrypoint.h | 66 ++++ sdk/sbf/c/inc/sol/inc/assert.inc | 47 +++ sdk/sbf/c/inc/sol/inc/blake3.inc | 30 ++ sdk/sbf/c/inc/sol/inc/cpi.inc | 117 +++++++ sdk/sbf/c/inc/sol/inc/keccak.inc | 30 ++ sdk/sbf/c/inc/sol/inc/log.inc | 103 ++++++ sdk/sbf/c/inc/sol/inc/pubkey.inc | 107 ++++++ sdk/sbf/c/inc/sol/inc/return_data.inc | 41 +++ sdk/sbf/c/inc/sol/inc/secp256k1.inc | 41 +++ sdk/sbf/c/inc/sol/inc/sha.inc | 30 ++ sdk/sbf/c/inc/sol/keccak.h | 39 +++ sdk/sbf/c/inc/sol/log.h | 139 ++++++++ sdk/sbf/c/inc/sol/pubkey.h | 134 ++++++++ sdk/sbf/c/inc/sol/return_data.h | 59 ++++ sdk/sbf/c/inc/sol/secp256k1.h | 50 +++ sdk/sbf/c/inc/sol/sha.h | 39 +++ sdk/sbf/c/inc/sol/string.h | 123 +++++++ sdk/sbf/c/inc/sol/types.h | 141 ++++++++ sdk/sbf/c/inc/solana_sdk.h | 21 ++ sdk/sbf/c/inc/stdio.h | 4 + sdk/sbf/c/inc/stdlib.h | 2 + sdk/sbf/c/inc/string.h | 6 + sdk/sbf/c/inc/sys/param.h | 1 + sdk/sbf/c/inc/wchar.h | 1 + sdk/sbf/c/sbf.ld | 24 ++ sdk/sbf/c/sbf.mk | 311 ++++++++++++++++++ sdk/sbf/env.sh | 16 + sdk/sbf/scripts/dump.sh | 46 +++ sdk/sbf/scripts/install.sh | 131 ++++++++ sdk/sbf/scripts/objcopy.sh | 6 + sdk/sbf/scripts/package.sh | 19 ++ sdk/sbf/scripts/strip.sh | 23 ++ 59 files changed, 2596 insertions(+), 97 deletions(-) rename ci/{test-stable-bpf.sh => test-stable-sbf.sh} (100%) create mode 100644 sdk/sbf/.gitignore create mode 100644 sdk/sbf/c/README.md create mode 100644 sdk/sbf/c/inc/deserialize_deprecated.h create mode 100644 sdk/sbf/c/inc/sol/assert.h create mode 100644 sdk/sbf/c/inc/sol/blake3.h create mode 100644 sdk/sbf/c/inc/sol/cpi.h create mode 100644 sdk/sbf/c/inc/sol/deserialize.h create mode 100644 sdk/sbf/c/inc/sol/deserialize_deprecated.h create mode 100644 sdk/sbf/c/inc/sol/entrypoint.h create mode 100644 sdk/sbf/c/inc/sol/inc/assert.inc create mode 100644 sdk/sbf/c/inc/sol/inc/blake3.inc create mode 100644 sdk/sbf/c/inc/sol/inc/cpi.inc create mode 100644 sdk/sbf/c/inc/sol/inc/keccak.inc create mode 100644 sdk/sbf/c/inc/sol/inc/log.inc create mode 100644 sdk/sbf/c/inc/sol/inc/pubkey.inc create mode 100644 sdk/sbf/c/inc/sol/inc/return_data.inc create mode 100644 sdk/sbf/c/inc/sol/inc/secp256k1.inc create mode 100644 sdk/sbf/c/inc/sol/inc/sha.inc create mode 100644 sdk/sbf/c/inc/sol/keccak.h create mode 100644 sdk/sbf/c/inc/sol/log.h create mode 100644 sdk/sbf/c/inc/sol/pubkey.h create mode 100644 sdk/sbf/c/inc/sol/return_data.h create mode 100644 sdk/sbf/c/inc/sol/secp256k1.h create mode 100644 sdk/sbf/c/inc/sol/sha.h create mode 100644 sdk/sbf/c/inc/sol/string.h create mode 100644 sdk/sbf/c/inc/sol/types.h create mode 100644 sdk/sbf/c/inc/solana_sdk.h create mode 100644 sdk/sbf/c/inc/stdio.h create mode 100644 sdk/sbf/c/inc/stdlib.h create mode 100644 sdk/sbf/c/inc/string.h create mode 100644 sdk/sbf/c/inc/sys/param.h create mode 100644 sdk/sbf/c/inc/wchar.h create mode 100644 sdk/sbf/c/sbf.ld create mode 100644 sdk/sbf/c/sbf.mk create mode 100644 sdk/sbf/env.sh create mode 100755 sdk/sbf/scripts/dump.sh create mode 100755 sdk/sbf/scripts/install.sh create mode 100755 sdk/sbf/scripts/objcopy.sh create mode 100755 sdk/sbf/scripts/package.sh create mode 100755 sdk/sbf/scripts/strip.sh diff --git a/Cargo.lock b/Cargo.lock index 8dafb24e200033..5b17a1cabef298 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4775,6 +4775,8 @@ version = "1.15.0" dependencies = [ "cargo_metadata", "clap 3.1.8", + "log", + "solana-logger 1.15.0", "solana-sdk 1.15.0", ] diff --git a/ci/buildkite-pipeline-in-disk.sh b/ci/buildkite-pipeline-in-disk.sh index e9b3dbb55a0f5d..361bef9f369a57 100644 --- a/ci/buildkite-pipeline-in-disk.sh +++ b/ci/buildkite-pipeline-in-disk.sh @@ -163,13 +163,13 @@ all_test_steps() { command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 70 wait_step - # BPF test suite + # SBF test suite if affects \ .rs$ \ Cargo.lock$ \ Cargo.toml$ \ ^ci/rust-version.sh \ - ^ci/test-stable-bpf.sh \ + ^ci/test-stable-sbf.sh \ ^ci/test-stable.sh \ ^ci/test-local-cluster.sh \ ^core/build.rs \ @@ -178,16 +178,16 @@ all_test_steps() { ^sdk/ \ ; then cat >> "$output_file" <<"EOF" - - command: "ci/test-stable-bpf.sh" - name: "stable-bpf" + - command: "ci/test-stable-sbf.sh" + name: "stable-sbf" timeout_in_minutes: 35 - artifact_paths: "bpf-dumps.tar.bz2" + artifact_paths: "sbf-dumps.tar.bz2" agents: queue: "gcp" EOF else annotate --style info \ - "Stable-BPF skipped as no relevant files were modified" + "Stable-SBF skipped as no relevant files were modified" fi # Perf test suite diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index 348d94b09b68ad..53c3f23756beb9 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -163,13 +163,13 @@ all_test_steps() { fi wait_step - # BPF test suite + # SBF test suite if affects \ .rs$ \ Cargo.lock$ \ Cargo.toml$ \ ^ci/rust-version.sh \ - ^ci/test-stable-bpf.sh \ + ^ci/test-stable-sbf.sh \ ^ci/test-stable.sh \ ^ci/test-local-cluster.sh \ ^core/build.rs \ @@ -178,16 +178,16 @@ all_test_steps() { ^sdk/ \ ; then cat >> "$output_file" <<"EOF" - - command: "ci/test-stable-bpf.sh" - name: "stable-bpf" + - command: "ci/test-stable-sbf.sh" + name: "stable-sbf" timeout_in_minutes: 35 - artifact_paths: "bpf-dumps.tar.bz2" + artifact_paths: "sbf-dumps.tar.bz2" agents: queue: "gcp" EOF else annotate --style info \ - "Stable-BPF skipped as no relevant files were modified" + "Stable-SBF skipped as no relevant files were modified" fi # Perf test suite diff --git a/ci/buildkite-solana-private.sh b/ci/buildkite-solana-private.sh index 9f465b7a8e4e47..d05972808ae976 100644 --- a/ci/buildkite-solana-private.sh +++ b/ci/buildkite-solana-private.sh @@ -163,13 +163,13 @@ all_test_steps() { fi wait_step - # BPF test suite + # SBF test suite if affects \ .rs$ \ Cargo.lock$ \ Cargo.toml$ \ ^ci/rust-version.sh \ - ^ci/test-stable-bpf.sh \ + ^ci/test-stable-sbf.sh \ ^ci/test-stable.sh \ ^ci/test-local-cluster.sh \ ^core/build.rs \ @@ -178,16 +178,16 @@ all_test_steps() { ^sdk/ \ ; then cat >> "$output_file" <<"EOF" - - command: "ci/test-stable-bpf.sh" - name: "stable-bpf" + - command: "ci/test-stable-sbf.sh" + name: "stable-sbf" timeout_in_minutes: 35 - artifact_paths: "bpf-dumps.tar.bz2" + artifact_paths: "sbf-dumps.tar.bz2" agents: queue: "sol-private" EOF else annotate --style info \ - "Stable-BPF skipped as no relevant files were modified" + "Stable-SBF skipped as no relevant files were modified" fi # Perf test suite diff --git a/ci/nits.sh b/ci/nits.sh index 93a71614fe652c..1ebfc1b13b1cf1 100755 --- a/ci/nits.sh +++ b/ci/nits.sh @@ -25,9 +25,9 @@ declare print_free_tree=( ':metrics/src/**.rs' ':net-utils/src/**.rs' ':runtime/src/**.rs' - ':sdk/bpf/rust/rust-utils/**.rs' + ':sdk/sbf/rust/rust-utils/**.rs' ':sdk/**.rs' - ':^sdk/cargo-build-bpf/**.rs' + ':^sdk/cargo-build-sbf/**.rs' ':^sdk/program/src/program_option.rs' ':^sdk/program/src/program_stubs.rs' ':programs/**.rs' diff --git a/ci/publish-tarball.sh b/ci/publish-tarball.sh index 5703b371661ee5..04b24aa007a9a2 100755 --- a/ci/publish-tarball.sh +++ b/ci/publish-tarball.sh @@ -102,10 +102,10 @@ MAYBE_TARBALLS= if [[ "$CI_OS_NAME" = linux ]]; then ( set -x - sdk/bpf/scripts/package.sh - [[ -f bpf-sdk.tar.bz2 ]] + sdk/sbf/scripts/package.sh + [[ -f sbf-sdk.tar.bz2 ]] ) - MAYBE_TARBALLS="bpf-sdk.tar.bz2" + MAYBE_TARBALLS="sbf-sdk.tar.bz2" fi source ci/upload-ci-artifact.sh diff --git a/ci/run-local.sh b/ci/run-local.sh index d73ac81684b22d..aa1bf1344fa245 100755 --- a/ci/run-local.sh +++ b/ci/run-local.sh @@ -12,7 +12,7 @@ steps+=(shellcheck) steps+=(test-checks) steps+=(test-coverage) steps+=(test-stable) -steps+=(test-stable-bpf) +steps+=(test-stable-sbf) steps+=(test-stable-perf) steps+=(test-downstream-builds) steps+=(test-bench) diff --git a/ci/test-bench.sh b/ci/test-bench.sh index ef56bcb1eff2e0..58c6ee3f064e44 100755 --- a/ci/test-bench.sh +++ b/ci/test-bench.sh @@ -28,8 +28,8 @@ _ "$cargo" build --manifest-path=keygen/Cargo.toml export PATH="$PWD/target/debug":$PATH # Clear the C dependency files, if dependency moves these files are not regenerated -test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete -test -d target/release/bpf && find target/release/bpf -name '*.d' -delete +test -d target/debug/sbf && find target/debug/sbf -name '*.d' -delete +test -d target/release/sbf && find target/release/sbf -name '*.d' -delete # Ensure all dependencies are built _ "$cargo" nightly build --release @@ -57,7 +57,7 @@ _ "$cargo" nightly bench --manifest-path poh/Cargo.toml ${V:+--verbose} \ _ "$cargo" nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \ -- -Z unstable-options --format=json | tee -a "$BENCH_FILE" -# Run bpf benches +# Run sbf benches _ "$cargo" nightly bench --manifest-path programs/bpf/Cargo.toml ${V:+--verbose} --features=bpf_c \ -- -Z unstable-options --format=json --nocapture | tee -a "$BENCH_FILE" diff --git a/ci/test-stable-bpf.sh b/ci/test-stable-sbf.sh similarity index 100% rename from ci/test-stable-bpf.sh rename to ci/test-stable-sbf.sh diff --git a/ci/test-stable.sh b/ci/test-stable.sh index 0f0548cae4301e..ccd75adca26742 100755 --- a/ci/test-stable.sh +++ b/ci/test-stable.sh @@ -68,25 +68,25 @@ test-stable) _ "$cargo" stable test --jobs "$JOBS" --all --tests --exclude solana-local-cluster ${V:+--verbose} -- --nocapture fi ;; -test-stable-bpf) +test-stable-sbf) # Clear the C dependency files, if dependency moves these files are not regenerated - test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete - test -d target/release/bpf && find target/release/bpf -name '*.d' -delete + test -d target/debug/sbf && find target/debug/sbf -name '*.d' -delete + test -d target/release/sbf && find target/release/sbf -name '*.d' -delete - # rustfilt required for dumping BPF assembly listings + # rustfilt required for dumping SBF assembly listings "$cargo" install rustfilt # solana-keygen required when building C programs _ "$cargo" build --manifest-path=keygen/Cargo.toml export PATH="$PWD/target/debug":$PATH - cargo_build_bpf="$(realpath ./cargo-build-bpf)" - cargo_test_bpf="$(realpath ./cargo-test-bpf)" + cargo_build_sbf="$(realpath ./cargo-build-sbf)" + cargo_test_sbf="$(realpath ./cargo-test-sbf)" - # BPF solana-sdk legacy compile test - "$cargo_build_bpf" --manifest-path sdk/Cargo.toml + # SBF solana-sdk legacy compile test + "$cargo_build_sbf" --manifest-path sdk/Cargo.toml - # BPF C program system tests + # SBF C program system tests _ make -C programs/bpf/c tests if need_to_generate_test_result; then _ "$cargo" stable test \ @@ -99,16 +99,16 @@ test-stable-bpf) --no-default-features --features=bpf_c,bpf_rust -- --nocapture fi - # BPF Rust program unit tests - for bpf_test in programs/bpf/rust/*; do - if pushd "$bpf_test"; then + # SBF Rust program unit tests + for sbf_test in programs/bpf/rust/*; do + if pushd "$sbf_test"; then "$cargo" test - "$cargo_build_bpf" --bpf-sdk ../../../../sdk/bpf --dump - "$cargo_test_bpf" --bpf-sdk ../../../../sdk/bpf + "$cargo_build_sbf" --sbf-sdk ../../../../sdk/sbf --dump + "$cargo_test_sbf" --sbf-sdk ../../../../sdk/sbf popd fi done |& tee cargo.log - # Save the output of cargo building the bpf tests so we can analyze + # Save the output of cargo building the sbf tests so we can analyze # the number of redundant rebuilds of dependency crates. The # expected number of solana-program crate compilations is 4. There # should be 3 builds of solana-program while 128bit crate is @@ -125,27 +125,27 @@ test-stable-bpf) exit 1 fi - # bpf-tools version - "$cargo_build_bpf" -V + # sbf-tools version + "$cargo_build_sbf" -V - # BPF program instruction count assertion - bpf_target_path=programs/bpf/target + # SBF program instruction count assertion + sbf_target_path=programs/bpf/target if need_to_generate_test_result; then _ "$cargo" stable test \ --manifest-path programs/bpf/Cargo.toml \ --no-default-features --features=bpf_c,bpf_rust assert_instruction_count \ -- -Z unstable-options --format json --report-time |& tee results.json - awk '!/{ "type": .* }/' results.json >"${bpf_target_path}"/deploy/instuction_counts.txt + awk '!/{ "type": .* }/' results.json >"${sbf_target_path}"/deploy/instuction_counts.txt else _ "$cargo" stable test \ --manifest-path programs/bpf/Cargo.toml \ --no-default-features --features=bpf_c,bpf_rust assert_instruction_count \ - -- --nocapture &> "${bpf_target_path}"/deploy/instuction_counts.txt + -- --nocapture &> "${sbf_target_path}"/deploy/instuction_counts.txt fi - bpf_dump_archive="bpf-dumps.tar.bz2" - rm -f "$bpf_dump_archive" - tar cjvf "$bpf_dump_archive" "${bpf_target_path}"/{deploy/*.txt,bpfel-unknown-unknown/release/*.so} + sbf_dump_archive="sbf-dumps.tar.bz2" + rm -f "$sbf_dump_archive" + tar cjvf "$sbf_dump_archive" "${sbf_target_path}"/{deploy/*.txt,sbf-solana-solana/release/*.so} exit 0 ;; test-stable-perf) diff --git a/docs/src/developing/on-chain-programs/developing-c.md b/docs/src/developing/on-chain-programs/developing-c.md index 52469efcd7e09b..db3ef04bd71189 100644 --- a/docs/src/developing/on-chain-programs/developing-c.md +++ b/docs/src/developing/on-chain-programs/developing-c.md @@ -18,10 +18,10 @@ The `makefile` should contain the following: ```bash OUT_DIR := -include ~/.local/share/solana/install/active_release/bin/sdk/bpf/c/bpf.mk +include ~/.local/share/solana/install/active_release/bin/sdk/sbf/c/sbf.mk ``` -The bpf-sdk may not be in the exact place specified above but if you setup your +The sbf-sdk may not be in the exact place specified above but if you setup your environment per [How to Build](#how-to-build) then it should be. Take a look at @@ -56,14 +56,14 @@ information on how to write a test case. ## Program Entrypoint Programs export a known entrypoint symbol which the Solana runtime looks up and -calls when invoking a program. Solana supports multiple [versions of the BPF +calls when invoking a program. Solana supports multiple [versions of the SBF loader](overview.md#versions) and the entrypoints may vary between them. Programs must be written for and deployed to the same loader. For more details see the [overview](overview#loaders). -Currently there are two supported loaders [BPF +Currently there are two supported loaders [SBF Loader](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/bpf_loader.rs#L17) -and [BPF loader +and [SBF loader deprecated](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/bpf_loader_deprecated.rs#L14). They both have the same raw entrypoint definition, the following is the raw @@ -89,10 +89,10 @@ function](https://github.com/solana-labs/example-helloworld/blob/bc0b25c0ccebeff Each loader provides a helper function that deserializes the program's input parameters into C types: -- [BPF Loader - deserialization](https://github.com/solana-labs/solana/blob/d2ee9db2143859fa5dc26b15ee6da9c25cc0429c/sdk/bpf/c/inc/solana_sdk.h#L304) -- [BPF Loader deprecated - deserialization](https://github.com/solana-labs/solana/blob/8415c22b593f164020adc7afe782e8041d756ddf/sdk/bpf/c/inc/deserialize_deprecated.h#L25) +- [SBF Loader + deserialization](https://github.com/solana-labs/solana/blob/d2ee9db2143859fa5dc26b15ee6da9c25cc0429c/sdk/sbf/c/inc/solana_sdk.h#L304) +- [SBF Loader deprecated + deserialization](https://github.com/solana-labs/solana/blob/8415c22b593f164020adc7afe782e8041d756ddf/sdk/sbf/c/inc/deserialize_deprecated.h#L25) Some programs may want to perform deserialization themselves, and they can by providing their own implementation of the [raw entrypoint](#program-entrypoint). @@ -109,7 +109,7 @@ Details on how the loader serializes the program inputs can be found in the ## Data Types The loader's deserialization helper function populates the -[SolParameters](https://github.com/solana-labs/solana/blob/8415c22b593f164020adc7afe782e8041d756ddf/sdk/bpf/c/inc/solana_sdk.h#L276) +[SolParameters](https://github.com/solana-labs/solana/blob/8415c22b593f164020adc7afe782e8041d756ddf/sdk/sbf/c/inc/solana_sdk.h#L276) structure: ```c @@ -128,7 +128,7 @@ typedef struct { 'ka' is an ordered array of the accounts referenced by the instruction and represented as a -[SolAccountInfo](https://github.com/solana-labs/solana/blob/8415c22b593f164020adc7afe782e8041d756ddf/sdk/bpf/c/inc/solana_sdk.h#L173) +[SolAccountInfo](https://github.com/solana-labs/solana/blob/8415c22b593f164020adc7afe782e8041d756ddf/sdk/sbf/c/inc/solana_sdk.h#L173) structures. An account's place in the array signifies its meaning, for example, when transferring lamports an instruction may define the first account as the source and the second as the destination. @@ -153,7 +153,7 @@ processed. ## Heap C programs can allocate memory via the system call -[`calloc`](https://github.com/solana-labs/solana/blob/c3d2d2134c93001566e1e56f691582f379b5ae55/sdk/bpf/c/inc/solana_sdk.h#L245) +[`calloc`](https://github.com/solana-labs/solana/blob/c3d2d2134c93001566e1e56f691582f379b5ae55/sdk/sbf/c/inc/solana_sdk.h#L245) or implement their own heap on top of the 32KB heap region starting at virtual address x300000000. The heap region is also used by `calloc` so if a program implements their own heap it should not also call `calloc`. @@ -163,8 +163,8 @@ implements their own heap it should not also call `calloc`. The runtime provides two system calls that take data and log it to the program logs. -- [`sol_log(const char*)`](https://github.com/solana-labs/solana/blob/d2ee9db2143859fa5dc26b15ee6da9c25cc0429c/sdk/bpf/c/inc/solana_sdk.h#L128) -- [`sol_log_64(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t)`](https://github.com/solana-labs/solana/blob/d2ee9db2143859fa5dc26b15ee6da9c25cc0429c/sdk/bpf/c/inc/solana_sdk.h#L134) +- [`sol_log(const char*)`](https://github.com/solana-labs/solana/blob/d2ee9db2143859fa5dc26b15ee6da9c25cc0429c/sdk/sbf/c/inc/solana_sdk.h#L128) +- [`sol_log_64(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t)`](https://github.com/solana-labs/solana/blob/d2ee9db2143859fa5dc26b15ee6da9c25cc0429c/sdk/sbf/c/inc/solana_sdk.h#L134) The [debugging](debugging.md#logging) section has more information about working with program logs. @@ -172,7 +172,7 @@ with program logs. ## Compute Budget Use the system call -[`sol_log_compute_units()`](https://github.com/solana-labs/solana/blob/d3a3a7548c857f26ec2cb10e270da72d373020ec/sdk/bpf/c/inc/solana_sdk.h#L140) +[`sol_log_compute_units()`](https://github.com/solana-labs/solana/blob/d3a3a7548c857f26ec2cb10e270da72d373020ec/sdk/sbf/c/inc/solana_sdk.h#L140) to log a message containing the remaining number of compute units the program may consume before execution is halted @@ -181,10 +181,10 @@ for more information. ## ELF Dump -The BPF shared object internals can be dumped to a text file to gain more +The SBF shared object internals can be dumped to a text file to gain more insight into a program's composition and what it may be doing at runtime. The dump will contain both the ELF information as well as a list of all the symbols -and the instructions that implement them. Some of the BPF loader's error log +and the instructions that implement them. Some of the SBF loader's error log messages will reference specific instruction numbers where the error occurred. These references can be looked up in the ELF dump to identify the offending instruction and its context. diff --git a/programs/bpf/c/makefile b/programs/bpf/c/makefile index 1cc080ccc76a9d..77b774c2a6bf82 100644 --- a/programs/bpf/c/makefile +++ b/programs/bpf/c/makefile @@ -1,2 +1,2 @@ -BPF_SDK := ../../../sdk/bpf/c -include $(BPF_SDK)/bpf.mk +SBF_SDK := ../../../sdk/sbf/c +include $(SBF_SDK)/sbf.mk diff --git a/programs/bpf_loader/test_elfs/makefile b/programs/bpf_loader/test_elfs/makefile index 1cc080ccc76a9d..77b774c2a6bf82 100644 --- a/programs/bpf_loader/test_elfs/makefile +++ b/programs/bpf_loader/test_elfs/makefile @@ -1,2 +1,2 @@ -BPF_SDK := ../../../sdk/bpf/c -include $(BPF_SDK)/bpf.mk +SBF_SDK := ../../../sdk/sbf/c +include $(SBF_SDK)/sbf.mk diff --git a/scripts/build-downstream-anchor-projects.sh b/scripts/build-downstream-anchor-projects.sh index 8ee5d4be54613b..ef423fdb0361d6 100755 --- a/scripts/build-downstream-anchor-projects.sh +++ b/scripts/build-downstream-anchor-projects.sh @@ -12,8 +12,8 @@ source scripts/read-cargo-variable.sh solana_ver=$(readCargoVariable version sdk/Cargo.toml) solana_dir=$PWD cargo="$solana_dir"/cargo -cargo_build_bpf="$solana_dir"/cargo-build-bpf -cargo_test_bpf="$solana_dir"/cargo-test-bpf +cargo_build_sbf="$solana_dir"/cargo-build-sbf +cargo_test_sbf="$solana_dir"/cargo-test-sbf mkdir -p target/downstream-projects-anchor cd target/downstream-projects-anchor @@ -72,8 +72,8 @@ mango() { $cargo build $cargo test - $cargo_build_bpf - $cargo_test_bpf + $cargo_build_sbf + $cargo_test_sbf ) } @@ -91,8 +91,8 @@ metaplex() { $cargo build $cargo test - $cargo_build_bpf - $cargo_test_bpf + $cargo_build_sbf + $cargo_test_sbf ) } diff --git a/scripts/build-downstream-projects.sh b/scripts/build-downstream-projects.sh index 7c1fa5557c467e..dbf04b79705878 100755 --- a/scripts/build-downstream-projects.sh +++ b/scripts/build-downstream-projects.sh @@ -13,8 +13,8 @@ source scripts/read-cargo-variable.sh solana_ver=$(readCargoVariable version sdk/Cargo.toml) solana_dir=$PWD cargo="$solana_dir"/cargo -cargo_build_bpf="$solana_dir"/cargo-build-bpf -cargo_test_bpf="$solana_dir"/cargo-test-bpf +cargo_build_sbf="$solana_dir"/cargo-build-sbf +cargo_test_sbf="$solana_dir"/cargo-test-sbf mkdir -p target/downstream-projects cd target/downstream-projects @@ -30,7 +30,7 @@ example_helloworld() { patch_crates_io_solana src/program-rust/Cargo.toml "$solana_dir" echo "[workspace]" >> src/program-rust/Cargo.toml - $cargo_build_bpf \ + $cargo_build_sbf \ --manifest-path src/program-rust/Cargo.toml # TODO: Build src/program-c/... @@ -68,7 +68,7 @@ spl() { ./patch.crates-io.sh "$solana_dir" for program in "${PROGRAMS[@]}"; do - $cargo_test_bpf --manifest-path "$program"/Cargo.toml + $cargo_test_sbf --manifest-path "$program"/Cargo.toml done # TODO better: `build.rs` for spl-token-cli doesn't seem to properly build @@ -98,7 +98,7 @@ exclude = [ EOF $cargo build - $cargo_build_bpf \ + $cargo_build_sbf \ --manifest-path dex/Cargo.toml --no-default-features --features program $cargo test \ diff --git a/scripts/cargo-install-all.sh b/scripts/cargo-install-all.sh index eae5006c6f95c1..da359fe3a5cc3b 100755 --- a/scripts/cargo-install-all.sh +++ b/scripts/cargo-install-all.sh @@ -161,8 +161,8 @@ if [[ -z "$validatorOnly" ]]; then "$cargo" $maybeRustVersion build --manifest-path programs/bpf_loader/gen-syscall-list/Cargo.toml # shellcheck disable=SC2086 # Don't want to double quote $rust_version "$cargo" $maybeRustVersion run --bin gen-headers - mkdir -p "$installDir"/bin/sdk/bpf - cp -a sdk/bpf/* "$installDir"/bin/sdk/bpf + mkdir -p "$installDir"/bin/sdk/sbf + cp -a sdk/sbf/* "$installDir"/bin/sdk/sbf fi ( diff --git a/sdk/cargo-build-bpf/Cargo.toml b/sdk/cargo-build-bpf/Cargo.toml index 6b30bb1c82a3b0..c561eb765e46b9 100644 --- a/sdk/cargo-build-bpf/Cargo.toml +++ b/sdk/cargo-build-bpf/Cargo.toml @@ -12,6 +12,8 @@ publish = false [dependencies] cargo_metadata = "0.15.0" clap = { version = "3.1.5", features = ["cargo", "env"] } +log = { version = "0.4.14", features = ["std"] } +solana-logger = { path = "../../logger", version = "=1.15.0" } solana-sdk = { path = "..", version = "=1.15.0" } [features] diff --git a/sdk/cargo-build-bpf/src/main.rs b/sdk/cargo-build-bpf/src/main.rs index 0ea34f68c6cb74..b1b9a6dbd3dc80 100644 --- a/sdk/cargo-build-bpf/src/main.rs +++ b/sdk/cargo-build-bpf/src/main.rs @@ -1,11 +1,15 @@ -use std::{ - env, - path::PathBuf, - process::{exit, Command, Stdio}, +use { + log::*, + std::{ + env, + path::PathBuf, + process::{exit, Command, Stdio}, + }, }; fn main() { - println!("Warning: cargo-build-bpf is deprecated. Please, use cargo-build-sbf"); + solana_logger::setup(); + warn!("cargo-build-bpf is deprecated. Please, use cargo-build-sbf"); let mut args = env::args() .map(|x| { let s = x; @@ -29,22 +33,21 @@ fn main() { let index = args.iter().position(|x| x == "--").unwrap_or(args.len()); args.insert(index, "bpf".to_string()); args.insert(index, "--arch".to_string()); - print!("cargo-build-bpf child: {}", program.display()); + info!("cargo-build-bpf child: {}", program.display()); for a in &args { - print!(" {}", a); + info!(" {}", a); } - println!(); let child = Command::new(&program) .args(&args) .stdout(Stdio::piped()) .spawn() .unwrap_or_else(|err| { - eprintln!("Failed to execute {}: {}", program.display(), err); + error!("Failed to execute {}: {}", program.display(), err); exit(1); }); let output = child.wait_with_output().expect("failed to wait on child"); - println!( + info!( "{}", output .stdout diff --git a/sdk/cargo-build-sbf/src/main.rs b/sdk/cargo-build-sbf/src/main.rs index 59c44253e97123..3dcf5df2b31462 100644 --- a/sdk/cargo-build-sbf/src/main.rs +++ b/sdk/cargo-build-sbf/src/main.rs @@ -48,7 +48,7 @@ impl Default for Config<'_> { .expect("Unable to get parent directory") .to_path_buf() .join("sdk") - .join("bpf"), + .join("sbf"), sbf_out_dir: None, sbf_tools_version: "(unknown)", dump: false, @@ -526,7 +526,7 @@ fn build_sbf_package(config: &Config, target_directory: &Path, package: &cargo_m install_if_missing( config, package, - "https://github.com/solana-labs/bpf-tools/releases/download", + "https://github.com/solana-labs/sbf-tools/releases/download", sbf_tools_download_file_name, &target_path, ) diff --git a/sdk/sbf/.gitignore b/sdk/sbf/.gitignore new file mode 100644 index 00000000000000..e9ab3169be5448 --- /dev/null +++ b/sdk/sbf/.gitignore @@ -0,0 +1,11 @@ +/dependencies/criterion* +/dependencies/hashbrown* +/dependencies/llvm-native* +/dependencies/rust-bpf-sysroot* +/dependencies/bpf-tools* +/dependencies/sbf-tools* +/dependencies/xargo* +/dependencies/bin* +/dependencies/.crates.toml +/dependencies/.crates2.json +/syscalls.txt diff --git a/sdk/sbf/c/README.md b/sdk/sbf/c/README.md new file mode 100644 index 00000000000000..ede109f36da9cc --- /dev/null +++ b/sdk/sbf/c/README.md @@ -0,0 +1,44 @@ +## Development + +### Quick start +To get started create a `makefile` containing: +```make +include path/to/sbf.mk +``` +and `src/program.c` containing: +```c +#include + +bool entrypoint(const uint8_t *input) { + SolKeyedAccount ka[1]; + uint8_t *data; + uint64_t data_len; + + if (!sol_deserialize(buf, ka, SOL_ARRAY_SIZE(ka), NULL, &data, &data_len)) { + return false; + } + print_params(1, ka, data, data_len); + return true; +} +``` + +Then run `make` to build `out/program.o`. +Run `make help` for more details. + +### Unit tests +Built-in support for unit testing is provided by the +[Criterion](https://criterion.readthedocs.io/en/master/index.html) test framework. +To get started create the file `test/example.c` containing: +```c +#include +#include "../src/program.c" + +Test(test_suite_name, test_case_name) { + cr_assert(true); +} +``` +Then run `make test`. + +### Limitations +* Programs must be fully contained within a single .c file +* No libc is available but `solana_sdk.h` provides a minimal set of primitives diff --git a/sdk/sbf/c/inc/deserialize_deprecated.h b/sdk/sbf/c/inc/deserialize_deprecated.h new file mode 100644 index 00000000000000..db9c6de8aa402c --- /dev/null +++ b/sdk/sbf/c/inc/deserialize_deprecated.h @@ -0,0 +1 @@ +#include \ No newline at end of file diff --git a/sdk/sbf/c/inc/sol/assert.h b/sdk/sbf/c/inc/sol/assert.h new file mode 100644 index 00000000000000..77217d1025989b --- /dev/null +++ b/sdk/sbf/c/inc/sol/assert.h @@ -0,0 +1,56 @@ +#pragma once +/** + * @brief Solana assert and panic utilities + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +/** + * Panics + * + * Prints the line number where the panic occurred and then causes + * the BPF VM to immediately halt execution. No accounts' data are updated + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/assert.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +void sol_panic_(const char *, uint64_t, uint64_t, uint64_t); +#else +typedef void(*sol_panic__pointer_type)(const char *, uint64_t, uint64_t, uint64_t); +static void sol_panic_(const char * arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4) { + sol_panic__pointer_type sol_panic__pointer = (sol_panic__pointer_type) 1751159739; + sol_panic__pointer(arg1, arg2, arg3, arg4); +} +#endif +#define sol_panic() sol_panic_(__FILE__, sizeof(__FILE__), __LINE__, 0) + +/** + * Asserts + */ +#define sol_assert(expr) \ +if (!(expr)) { \ + sol_panic(); \ +} + +#ifdef SOL_TEST +/** + * Stub functions when building tests + */ +#include +#include + +void sol_panic_(const char *file, uint64_t len, uint64_t line, uint64_t column) { + printf("Panic in %s at %d:%d\n", file, line, column); + abort(); +} +#endif + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/blake3.h b/sdk/sbf/c/inc/sol/blake3.h new file mode 100644 index 00000000000000..04c12989e98400 --- /dev/null +++ b/sdk/sbf/c/inc/sol/blake3.h @@ -0,0 +1,39 @@ +#pragma once +/** + * @brief Solana Blake3 system call + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Length of a Blake3 hash result + */ +#define BLAKE3_RESULT_LENGTH 32 + +/** + * Blake3 + * + * @param bytes Array of byte arrays + * @param bytes_len Number of byte arrays + * @param result 32 byte array to hold the result + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/blake3.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +uint64_t sol_blake3(const SolBytes *, int, const uint8_t *); +#else +typedef uint64_t(*sol_blake3_pointer_type)(const SolBytes *, int, const uint8_t *); +static uint64_t sol_blake3(const SolBytes * arg1, int arg2, const uint8_t * arg3) { + sol_blake3_pointer_type sol_blake3_pointer = (sol_blake3_pointer_type) 390877474; + return sol_blake3_pointer(arg1, arg2, arg3); +} +#endif + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/cpi.h b/sdk/sbf/c/inc/sol/cpi.h new file mode 100644 index 00000000000000..b3748cff2240f9 --- /dev/null +++ b/sdk/sbf/c/inc/sol/cpi.h @@ -0,0 +1,138 @@ +#pragma once +/** + * @brief Solana Cross-Program Invocation + */ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Maximum CPI instruction data size. 10 KiB was chosen to ensure that CPI + * instructions are not more limited than transaction instructions if the size + * of transactions is doubled in the future. + */ +static const uint64_t MAX_CPI_INSTRUCTION_DATA_LEN = 10240; + +/** + * Maximum CPI instruction accounts. 255 was chosen to ensure that instruction + * accounts are always within the maximum instruction account limit for BPF + * program instructions. + */ +static const uint8_t MAX_CPI_INSTRUCTION_ACCOUNTS = 255; + +/** + * Maximum number of account info structs that can be used in a single CPI + * invocation. A limit on account info structs is effectively the same as + * limiting the number of unique accounts. 128 was chosen to match the max + * number of locked accounts per transaction (MAX_TX_ACCOUNT_LOCKS). + */ +static const uint16_t MAX_CPI_ACCOUNT_INFOS = 128; + +/** + * Account Meta + */ +typedef struct { + SolPubkey *pubkey; /** An account's public key */ + bool is_writable; /** True if the `pubkey` can be loaded as a read-write account */ + bool is_signer; /** True if an Instruction requires a Transaction signature matching `pubkey` */ +} SolAccountMeta; + +/** + * Instruction + */ +typedef struct { + SolPubkey *program_id; /** Pubkey of the instruction processor that executes this instruction */ + SolAccountMeta *accounts; /** Metadata for what accounts should be passed to the instruction processor */ + uint64_t account_len; /** Number of SolAccountMetas */ + uint8_t *data; /** Opaque data passed to the instruction processor */ + uint64_t data_len; /** Length of the data in bytes */ +} SolInstruction; + +/** + * Internal cross-program invocation function + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/cpi.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +uint64_t sol_invoke_signed_c( + const SolInstruction *, + const SolAccountInfo *, + int, + const SolSignerSeeds *, + int +); +#else +typedef uint64_t(*sol_invoke_signed_c_pointer_type)( + const SolInstruction *, + const SolAccountInfo *, + int, + const SolSignerSeeds *, + int +); +static uint64_t sol_invoke_signed_c( + const SolInstruction * arg1, + const SolAccountInfo * arg2, + int arg3, + const SolSignerSeeds * arg4, + int + arg5) { + sol_invoke_signed_c_pointer_type sol_invoke_signed_c_pointer = (sol_invoke_signed_c_pointer_type) 2720767109; + return sol_invoke_signed_c_pointer(arg1, arg2, arg3, arg4, arg5); +} +#endif + +/** + * Invoke another program and sign for some of the keys + * + * @param instruction Instruction to process + * @param account_infos Accounts used by instruction + * @param account_infos_len Length of account_infos array + * @param seeds Seed bytes used to sign program accounts + * @param seeds_len Length of the seeds array + */ +static uint64_t sol_invoke_signed( + const SolInstruction *instruction, + const SolAccountInfo *account_infos, + int account_infos_len, + const SolSignerSeeds *signers_seeds, + int signers_seeds_len +) { + return sol_invoke_signed_c( + instruction, + account_infos, + account_infos_len, + signers_seeds, + signers_seeds_len + ); +} +/** + * Invoke another program + * + * @param instruction Instruction to process + * @param account_infos Accounts used by instruction + * @param account_infos_len Length of account_infos array +*/ +static uint64_t sol_invoke( + const SolInstruction *instruction, + const SolAccountInfo *account_infos, + int account_infos_len +) { + const SolSignerSeeds signers_seeds[] = {{}}; + return sol_invoke_signed( + instruction, + account_infos, + account_infos_len, + signers_seeds, + 0 + ); +} + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/deserialize.h b/sdk/sbf/c/inc/sol/deserialize.h new file mode 100644 index 00000000000000..e5060549581dbe --- /dev/null +++ b/sdk/sbf/c/inc/sol/deserialize.h @@ -0,0 +1,137 @@ +#pragma once +/** + * @brief Solana BPF loader deserializer to be used when deploying + * a program with `BPFLoader2111111111111111111111111111111111` or + * `BPFLoaderUpgradeab1e11111111111111111111111` + */ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Maximum number of bytes a program may add to an account during a single realloc + */ +#define MAX_PERMITTED_DATA_INCREASE (1024 * 10) + +/** + * De-serializes the input parameters into usable types + * + * Use this function to deserialize the buffer passed to the program entrypoint + * into usable types. This function does not perform copy deserialization, + * instead it populates the pointers and lengths in SolAccountInfo and data so + * that any modification to lamports or account data take place on the original + * buffer. Doing so also eliminates the need to serialize back into the buffer + * at the end of the program. + * + * @param input Source buffer containing serialized input parameters + * @param params Pointer to a SolParameters structure + * @return Boolean true if successful. + */ +static bool sol_deserialize( + const uint8_t *input, + SolParameters *params, + uint64_t ka_num +) { + if (NULL == input || NULL == params) { + return false; + } + params->ka_num = *(uint64_t *) input; + input += sizeof(uint64_t); + + for (int i = 0; i < params->ka_num; i++) { + uint8_t dup_info = input[0]; + input += sizeof(uint8_t); + + if (i >= ka_num) { + if (dup_info == UINT8_MAX) { + input += sizeof(uint8_t); + input += sizeof(uint8_t); + input += sizeof(uint8_t); + input += 4; // padding + input += sizeof(SolPubkey); + input += sizeof(SolPubkey); + input += sizeof(uint64_t); + uint64_t data_len = *(uint64_t *) input; + input += sizeof(uint64_t); + input += data_len; + input += MAX_PERMITTED_DATA_INCREASE; + input = (uint8_t*)(((uint64_t)input + 8 - 1) & ~(8 - 1)); // padding + input += sizeof(uint64_t); + } else { + input += 7; // padding + } + continue; + } + if (dup_info == UINT8_MAX) { + // is signer? + params->ka[i].is_signer = *(uint8_t *) input != 0; + input += sizeof(uint8_t); + + // is writable? + params->ka[i].is_writable = *(uint8_t *) input != 0; + input += sizeof(uint8_t); + + // executable? + params->ka[i].executable = *(uint8_t *) input; + input += sizeof(uint8_t); + + input += 4; // padding + + // key + params->ka[i].key = (SolPubkey *) input; + input += sizeof(SolPubkey); + + // owner + params->ka[i].owner = (SolPubkey *) input; + input += sizeof(SolPubkey); + + // lamports + params->ka[i].lamports = (uint64_t *) input; + input += sizeof(uint64_t); + + // account data + params->ka[i].data_len = *(uint64_t *) input; + input += sizeof(uint64_t); + params->ka[i].data = (uint8_t *) input; + input += params->ka[i].data_len; + input += MAX_PERMITTED_DATA_INCREASE; + input = (uint8_t*)(((uint64_t)input + 8 - 1) & ~(8 - 1)); // padding + + // rent epoch + params->ka[i].rent_epoch = *(uint64_t *) input; + input += sizeof(uint64_t); + } else { + params->ka[i].is_signer = params->ka[dup_info].is_signer; + params->ka[i].is_writable = params->ka[dup_info].is_writable; + params->ka[i].executable = params->ka[dup_info].executable; + params->ka[i].key = params->ka[dup_info].key; + params->ka[i].owner = params->ka[dup_info].owner; + params->ka[i].lamports = params->ka[dup_info].lamports; + params->ka[i].data_len = params->ka[dup_info].data_len; + params->ka[i].data = params->ka[dup_info].data; + params->ka[i].rent_epoch = params->ka[dup_info].rent_epoch; + input += 7; // padding + } + } + + params->data_len = *(uint64_t *) input; + input += sizeof(uint64_t); + params->data = input; + input += params->data_len; + + params->program_id = (SolPubkey *) input; + input += sizeof(SolPubkey); + + return true; +} + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/deserialize_deprecated.h b/sdk/sbf/c/inc/sol/deserialize_deprecated.h new file mode 100644 index 00000000000000..4a38eaed678530 --- /dev/null +++ b/sdk/sbf/c/inc/sol/deserialize_deprecated.h @@ -0,0 +1,119 @@ +#pragma once +/** + * @brief Solana deprecated BPF loader deserializer to be used when deploying + * a program with `BPFLoader1111111111111111111111111111111111` + */ + + #include + #include + #include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * De-serializes the input parameters into usable types + * + * Use this function to deserialize the buffer passed to the program entrypoint + * into usable types. This function does not perform copy deserialization, + * instead it populates the pointers and lengths in SolAccountInfo and data so + * that any modification to lamports or account data take place on the original + * buffer. Doing so also eliminates the need to serialize back into the buffer + * at the end of the program. + * + * @param input Source buffer containing serialized input parameters + * @param params Pointer to a SolParameters structure + * @return Boolean true if successful. + */ +static bool sol_deserialize_deprecated( + const uint8_t *input, + SolParameters *params, + uint64_t ka_num +) { + if (NULL == input || NULL == params) { + return false; + } + params->ka_num = *(uint64_t *) input; + input += sizeof(uint64_t); + + for (int i = 0; i < params->ka_num; i++) { + uint8_t dup_info = input[0]; + input += sizeof(uint8_t); + + if (i >= ka_num) { + if (dup_info == UINT8_MAX) { + input += sizeof(uint8_t); + input += sizeof(uint8_t); + input += sizeof(SolPubkey); + input += sizeof(uint64_t); + input += *(uint64_t *) input; + input += sizeof(uint64_t); + input += sizeof(SolPubkey); + input += sizeof(uint8_t); + input += sizeof(uint64_t); + } + continue; + } + if (dup_info == UINT8_MAX) { + // is signer? + params->ka[i].is_signer = *(uint8_t *) input != 0; + input += sizeof(uint8_t); + + // is writable? + params->ka[i].is_writable = *(uint8_t *) input != 0; + input += sizeof(uint8_t); + + // key + params->ka[i].key = (SolPubkey *) input; + input += sizeof(SolPubkey); + + // lamports + params->ka[i].lamports = (uint64_t *) input; + input += sizeof(uint64_t); + + // account data + params->ka[i].data_len = *(uint64_t *) input; + input += sizeof(uint64_t); + params->ka[i].data = (uint8_t *) input; + input += params->ka[i].data_len; + + // owner + params->ka[i].owner = (SolPubkey *) input; + input += sizeof(SolPubkey); + + // executable? + params->ka[i].executable = *(uint8_t *) input; + input += sizeof(uint8_t); + + // rent epoch + params->ka[i].rent_epoch = *(uint64_t *) input; + input += sizeof(uint64_t); + } else { + params->ka[i].is_signer = params->ka[dup_info].is_signer; + params->ka[i].key = params->ka[dup_info].key; + params->ka[i].lamports = params->ka[dup_info].lamports; + params->ka[i].data_len = params->ka[dup_info].data_len; + params->ka[i].data = params->ka[dup_info].data; + params->ka[i].owner = params->ka[dup_info].owner; + params->ka[i].executable = params->ka[dup_info].executable; + params->ka[i].rent_epoch = params->ka[dup_info].rent_epoch; + } + } + + params->data_len = *(uint64_t *) input; + input += sizeof(uint64_t); + params->data = input; + input += params->data_len; + + params->program_id = (SolPubkey *) input; + input += sizeof(SolPubkey); + + return true; +} + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/entrypoint.h b/sdk/sbf/c/inc/sol/entrypoint.h new file mode 100644 index 00000000000000..2557480c141918 --- /dev/null +++ b/sdk/sbf/c/inc/sol/entrypoint.h @@ -0,0 +1,66 @@ +#pragma once +/** + * @brief Solana program entrypoint + */ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Keyed Account + */ +typedef struct { + SolPubkey *key; /** Public key of the account */ + uint64_t *lamports; /** Number of lamports owned by this account */ + uint64_t data_len; /** Length of data in bytes */ + uint8_t *data; /** On-chain data within this account */ + SolPubkey *owner; /** Program that owns this account */ + uint64_t rent_epoch; /** The epoch at which this account will next owe rent */ + bool is_signer; /** Transaction was signed by this account's key? */ + bool is_writable; /** Is the account writable? */ + bool executable; /** This account's data contains a loaded program (and is now read-only) */ +} SolAccountInfo; + +/** + * The Solana runtime provides a memory region that is available to programs at + * a fixed virtual address and length. The builtin functions `sol_calloc` and + * `sol_free` call into the Solana runtime to allocate from this memory region + * for heap operations. Because the memory region is directly available to + * programs another option is a program can implement their own heap directly on + * top of that region. If a program chooses to implement their own heap they + * should not call the builtin heap functions because they will conflict. + * `HEAP_START_ADDRESS` and `HEAP_LENGTH` specify the memory region's start + * virtual address and length. + */ +#define HEAP_START_ADDRESS (uint64_t)0x300000000 +#define HEAP_LENGTH (uint64_t)(32 * 1024) + +/** + * Structure that the program's entrypoint input data is deserialized into. + */ +typedef struct { + SolAccountInfo* ka; /** Pointer to an array of SolAccountInfo, must already + point to an array of SolAccountInfos */ + uint64_t ka_num; /** Number of SolAccountInfo entries in `ka` */ + const uint8_t *data; /** pointer to the instruction data */ + uint64_t data_len; /** Length in bytes of the instruction data */ + const SolPubkey *program_id; /** program_id of the currently executing program */ +} SolParameters; + +/** + * Program instruction entrypoint + * + * @param input Buffer of serialized input parameters. Use sol_deserialize() to decode + * @return 0 if the instruction executed successfully + */ +uint64_t entrypoint(const uint8_t *input); + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/inc/assert.inc b/sdk/sbf/c/inc/sol/inc/assert.inc new file mode 100644 index 00000000000000..fa523115e2f73a --- /dev/null +++ b/sdk/sbf/c/inc/sol/inc/assert.inc @@ -0,0 +1,47 @@ +#pragma once +/** + * @brief Solana assert and panic utilities + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +/** + * Panics + * + * Prints the line number where the panic occurred and then causes + * the BPF VM to immediately halt execution. No accounts' data are updated + */ +@SYSCALL void sol_panic_(const char *, uint64_t, uint64_t, uint64_t); +#define sol_panic() sol_panic_(__FILE__, sizeof(__FILE__), __LINE__, 0) + +/** + * Asserts + */ +#define sol_assert(expr) \ +if (!(expr)) { \ + sol_panic(); \ +} + +#ifdef SOL_TEST +/** + * Stub functions when building tests + */ +#include +#include + +void sol_panic_(const char *file, uint64_t len, uint64_t line, uint64_t column) { + printf("Panic in %s at %d:%d\n", file, line, column); + abort(); +} +#endif + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/inc/blake3.inc b/sdk/sbf/c/inc/sol/inc/blake3.inc new file mode 100644 index 00000000000000..b2cfd7b3362a46 --- /dev/null +++ b/sdk/sbf/c/inc/sol/inc/blake3.inc @@ -0,0 +1,30 @@ +#pragma once +/** + * @brief Solana Blake3 system call + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Length of a Blake3 hash result + */ +#define BLAKE3_RESULT_LENGTH 32 + +/** + * Blake3 + * + * @param bytes Array of byte arrays + * @param bytes_len Number of byte arrays + * @param result 32 byte array to hold the result + */ +@SYSCALL uint64_t sol_blake3(const SolBytes *, int, const uint8_t *); + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/inc/cpi.inc b/sdk/sbf/c/inc/sol/inc/cpi.inc new file mode 100644 index 00000000000000..41ce4fb01a691b --- /dev/null +++ b/sdk/sbf/c/inc/sol/inc/cpi.inc @@ -0,0 +1,117 @@ +#pragma once +/** + * @brief Solana Cross-Program Invocation + */ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Maximum CPI instruction data size. 10 KiB was chosen to ensure that CPI + * instructions are not more limited than transaction instructions if the size + * of transactions is doubled in the future. + */ +static const uint64_t MAX_CPI_INSTRUCTION_DATA_LEN = 10240; + +/** + * Maximum CPI instruction accounts. 255 was chosen to ensure that instruction + * accounts are always within the maximum instruction account limit for BPF + * program instructions. + */ +static const uint8_t MAX_CPI_INSTRUCTION_ACCOUNTS = 255; + +/** + * Maximum number of account info structs that can be used in a single CPI + * invocation. A limit on account info structs is effectively the same as + * limiting the number of unique accounts. 128 was chosen to match the max + * number of locked accounts per transaction (MAX_TX_ACCOUNT_LOCKS). + */ +static const uint16_t MAX_CPI_ACCOUNT_INFOS = 128; + +/** + * Account Meta + */ +typedef struct { + SolPubkey *pubkey; /** An account's public key */ + bool is_writable; /** True if the `pubkey` can be loaded as a read-write account */ + bool is_signer; /** True if an Instruction requires a Transaction signature matching `pubkey` */ +} SolAccountMeta; + +/** + * Instruction + */ +typedef struct { + SolPubkey *program_id; /** Pubkey of the instruction processor that executes this instruction */ + SolAccountMeta *accounts; /** Metadata for what accounts should be passed to the instruction processor */ + uint64_t account_len; /** Number of SolAccountMetas */ + uint8_t *data; /** Opaque data passed to the instruction processor */ + uint64_t data_len; /** Length of the data in bytes */ +} SolInstruction; + +/** + * Internal cross-program invocation function + */ +@SYSCALL uint64_t sol_invoke_signed_c( + const SolInstruction *, + const SolAccountInfo *, + int, + const SolSignerSeeds *, + int +); + +/** + * Invoke another program and sign for some of the keys + * + * @param instruction Instruction to process + * @param account_infos Accounts used by instruction + * @param account_infos_len Length of account_infos array + * @param seeds Seed bytes used to sign program accounts + * @param seeds_len Length of the seeds array + */ +static uint64_t sol_invoke_signed( + const SolInstruction *instruction, + const SolAccountInfo *account_infos, + int account_infos_len, + const SolSignerSeeds *signers_seeds, + int signers_seeds_len +) { + return sol_invoke_signed_c( + instruction, + account_infos, + account_infos_len, + signers_seeds, + signers_seeds_len + ); +} +/** + * Invoke another program + * + * @param instruction Instruction to process + * @param account_infos Accounts used by instruction + * @param account_infos_len Length of account_infos array +*/ +static uint64_t sol_invoke( + const SolInstruction *instruction, + const SolAccountInfo *account_infos, + int account_infos_len +) { + const SolSignerSeeds signers_seeds[] = {{}}; + return sol_invoke_signed( + instruction, + account_infos, + account_infos_len, + signers_seeds, + 0 + ); +} + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/inc/keccak.inc b/sdk/sbf/c/inc/sol/inc/keccak.inc new file mode 100644 index 00000000000000..fc882372c1112d --- /dev/null +++ b/sdk/sbf/c/inc/sol/inc/keccak.inc @@ -0,0 +1,30 @@ +#pragma once +/** + * @brief Solana keccak system call +**/ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Length of a Keccak hash result + */ +#define KECCAK_RESULT_LENGTH 32 + +/** + * Keccak + * + * @param bytes Array of byte arrays + * @param bytes_len Number of byte arrays + * @param result 32 byte array to hold the result + */ +@SYSCALL uint64_t sol_keccak256(const SolBytes *, int, uint8_t *); + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/inc/log.inc b/sdk/sbf/c/inc/sol/inc/log.inc new file mode 100644 index 00000000000000..c6f7e05da19d61 --- /dev/null +++ b/sdk/sbf/c/inc/sol/inc/log.inc @@ -0,0 +1,103 @@ +#pragma once +/** + * @brief Solana logging utilities + */ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Prints a string to stdout + */ +@SYSCALL void sol_log_(const char *, uint64_t); +#define sol_log(message) sol_log_(message, sol_strlen(message)) + +/** + * Prints a 64 bit values represented in hexadecimal to stdout + */ +@SYSCALL void sol_log_64_(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t); +#define sol_log_64 sol_log_64_ + +/** + * Prints the current compute unit consumption to stdout + */ +@SYSCALL void sol_log_compute_units_(); +#define sol_log_compute_units() sol_log_compute_units_() + +/** + * Prints the hexadecimal representation of an array + * + * @param array The array to print + */ +static void sol_log_array(const uint8_t *array, int len) { + for (int j = 0; j < len; j++) { + sol_log_64(0, 0, 0, j, array[j]); + } +} + +/** + * Print the base64 representation of some arrays. + */ +@SYSCALL void sol_log_data(SolBytes *, uint64_t); + +/** + * Prints the program's input parameters + * + * @param params Pointer to a SolParameters structure + */ +static void sol_log_params(const SolParameters *params) { + sol_log("- Program identifier:"); + sol_log_pubkey(params->program_id); + + sol_log("- Number of KeyedAccounts"); + sol_log_64(0, 0, 0, 0, params->ka_num); + for (int i = 0; i < params->ka_num; i++) { + sol_log(" - Is signer"); + sol_log_64(0, 0, 0, 0, params->ka[i].is_signer); + sol_log(" - Is writable"); + sol_log_64(0, 0, 0, 0, params->ka[i].is_writable); + sol_log(" - Key"); + sol_log_pubkey(params->ka[i].key); + sol_log(" - Lamports"); + sol_log_64(0, 0, 0, 0, *params->ka[i].lamports); + sol_log(" - data"); + sol_log_array(params->ka[i].data, params->ka[i].data_len); + sol_log(" - Owner"); + sol_log_pubkey(params->ka[i].owner); + sol_log(" - Executable"); + sol_log_64(0, 0, 0, 0, params->ka[i].executable); + sol_log(" - Rent Epoch"); + sol_log_64(0, 0, 0, 0, params->ka[i].rent_epoch); + } + sol_log("- Instruction data\0"); + sol_log_array(params->data, params->data_len); +} + +#ifdef SOL_TEST +/** + * Stub functions when building tests + */ +#include + +void sol_log_(const char *s, uint64_t len) { + printf("Program log: %s\n", s); +} +void sol_log_64(uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5) { + printf("Program log: %llu, %llu, %llu, %llu, %llu\n", arg1, arg2, arg3, arg4, arg5); +} + +void sol_log_compute_units_() { + printf("Program consumption: __ units remaining\n"); +} +#endif + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/inc/pubkey.inc b/sdk/sbf/c/inc/sol/inc/pubkey.inc new file mode 100644 index 00000000000000..5e86e8d39675f3 --- /dev/null +++ b/sdk/sbf/c/inc/sol/inc/pubkey.inc @@ -0,0 +1,107 @@ +#pragma once +/** + * @brief Solana Public key + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Size of Public key in bytes + */ +#define SIZE_PUBKEY 32 + +/** + * Public key + */ +typedef struct { + uint8_t x[SIZE_PUBKEY]; +} SolPubkey; + +/** + * Prints the hexadecimal representation of a public key + * + * @param key The public key to print + */ +@SYSCALL void sol_log_pubkey(const SolPubkey *); + +/** + * Compares two public keys + * + * @param one First public key + * @param two Second public key + * @return true if the same + */ +static bool SolPubkey_same(const SolPubkey *one, const SolPubkey *two) { + for (int i = 0; i < sizeof(*one); i++) { + if (one->x[i] != two->x[i]) { + return false; + } + } + return true; +} + +/** + * Seed used to create a program address or passed to sol_invoke_signed + */ +typedef struct { + const uint8_t *addr; /** Seed bytes */ + uint64_t len; /** Length of the seed bytes */ +} SolSignerSeed; + +/** + * Seeds used by a signer to create a program address or passed to + * sol_invoke_signed + */ +typedef struct { + const SolSignerSeed *addr; /** An array of a signer's seeds */ + uint64_t len; /** Number of seeds */ +} SolSignerSeeds; + +/** + * Create a program address + * + * @param seeds Seed bytes used to sign program accounts + * @param seeds_len Length of the seeds array + * @param program_id Program id of the signer + * @param program_address Program address created, filled on return + */ +@SYSCALL uint64_t sol_create_program_address(const SolSignerSeed *, int, const SolPubkey *, SolPubkey *); + +/** + * Try to find a program address and return corresponding bump seed + * + * @param seeds Seed bytes used to sign program accounts + * @param seeds_len Length of the seeds array + * @param program_id Program id of the signer + * @param program_address Program address created, filled on return + * @param bump_seed Bump seed required to create a valid program address + */ +@SYSCALL uint64_t sol_try_find_program_address(const SolSignerSeed *, int, const SolPubkey *, SolPubkey *, uint8_t *); + +#ifdef SOL_TEST +/** + * Stub functions when building tests + */ +#include + +void sol_log_pubkey( + const SolPubkey *pubkey +) { + printf("Program log: "); + for (int i = 0; i < SIZE_PUBKEY; i++) { + printf("%02 ", pubkey->x[i]); + } + printf("\n"); +} + +#endif + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/inc/return_data.inc b/sdk/sbf/c/inc/sol/inc/return_data.inc new file mode 100644 index 00000000000000..9c571f489aeceb --- /dev/null +++ b/sdk/sbf/c/inc/sol/inc/return_data.inc @@ -0,0 +1,41 @@ +#pragma once +/** + * @brief Solana return data system calls +**/ + +#include +#include + +#ifdef __cplusplus +extern "C" +{ +#endif + +/** + * Maximum size of return data + */ +#define MAX_RETURN_DATA 1024 + +/** + * Set the return data + * + * @param bytes byte array to set + * @param bytes_len length of byte array. This may not exceed MAX_RETURN_DATA. + */ +@SYSCALL void sol_set_return_data(const uint8_t *, uint64_t); + +/** + * Get the return data + * + * @param bytes byte buffer + * @param bytes_len maximum length of buffer + * @param program_id the program_id which set the return data. Only set if there was some return data (the function returns non-zero). + * @param result length of return data (may exceed bytes_len if the return data is longer) + */ +@SYSCALL uint64_t sol_get_return_data(uint8_t *, uint64_t, SolPubkey *); + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/inc/secp256k1.inc b/sdk/sbf/c/inc/sol/inc/secp256k1.inc new file mode 100644 index 00000000000000..e42ac5fb94371b --- /dev/null +++ b/sdk/sbf/c/inc/sol/inc/secp256k1.inc @@ -0,0 +1,41 @@ +#pragma once +/** + * @brief Solana secp256k1 system call + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** Length of a secp256k1 recover input hash */ +#define SECP256K1_RECOVER_HASH_LENGTH 32 +/** Length of a secp256k1 input signature */ +#define SECP256K1_RECOVER_SIGNATURE_LENGTH 64 +/** Length of a secp256k1 recover result */ +#define SECP256K1_RECOVER_RESULT_LENGTH 64 + +/** The hash provided to a sol_secp256k1_recover is invalid */ +#define SECP256K1_RECOVER_ERROR_INVALID_HASH 1 +/** The recovery_id provided to a sol_secp256k1_recover is invalid */ +#define SECP256K1_RECOVER_ERROR_INVALID_RECOVERY_ID 2 +/** The signature provided to a sol_secp256k1_recover is invalid */ +#define SECP256K1_RECOVER_ERROR_INVALID_SIGNATURE 3 + +/** + * Recover public key from a signed message. + * + * @param hash Hashed message + * @param recovery_id Tag used for public key recovery from signatures. Can be 0 or 1 + * @param signature An ECDSA signature + * @param result 64 byte array to hold the result. A recovered public key + * @return 0 if executed successfully + */ +@SYSCALL uint64_t sol_secp256k1_recover(const uint8_t *, uint64_t, const uint8_t *, uint8_t *); + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/inc/sha.inc b/sdk/sbf/c/inc/sol/inc/sha.inc new file mode 100644 index 00000000000000..8acc35cc687868 --- /dev/null +++ b/sdk/sbf/c/inc/sol/inc/sha.inc @@ -0,0 +1,30 @@ +#pragma once +/** + * @brief Solana sha system call + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Length of a sha256 hash result + */ +#define SHA256_RESULT_LENGTH 32 + +/** + * Sha256 + * + * @param bytes Array of byte arrays + * @param bytes_len Number of byte arrays + * @param result 32 byte array to hold the result + */ +@SYSCALL uint64_t sol_sha256(const SolBytes *, int, uint8_t *); + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/keccak.h b/sdk/sbf/c/inc/sol/keccak.h new file mode 100644 index 00000000000000..213eb4cae18ba6 --- /dev/null +++ b/sdk/sbf/c/inc/sol/keccak.h @@ -0,0 +1,39 @@ +#pragma once +/** + * @brief Solana keccak system call +**/ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Length of a Keccak hash result + */ +#define KECCAK_RESULT_LENGTH 32 + +/** + * Keccak + * + * @param bytes Array of byte arrays + * @param bytes_len Number of byte arrays + * @param result 32 byte array to hold the result + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/keccak.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +uint64_t sol_keccak256(const SolBytes *, int, uint8_t *); +#else +typedef uint64_t(*sol_keccak256_pointer_type)(const SolBytes *, int, uint8_t *); +static uint64_t sol_keccak256(const SolBytes * arg1, int arg2, uint8_t * arg3) { + sol_keccak256_pointer_type sol_keccak256_pointer = (sol_keccak256_pointer_type) 3615046331; + return sol_keccak256_pointer(arg1, arg2, arg3); +} +#endif + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/log.h b/sdk/sbf/c/inc/sol/log.h new file mode 100644 index 00000000000000..6dd594e14b4521 --- /dev/null +++ b/sdk/sbf/c/inc/sol/log.h @@ -0,0 +1,139 @@ +#pragma once +/** + * @brief Solana logging utilities + */ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Prints a string to stdout + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/log.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +void sol_log_(const char *, uint64_t); +#else +typedef void(*sol_log__pointer_type)(const char *, uint64_t); +static void sol_log_(const char * arg1, uint64_t arg2) { + sol_log__pointer_type sol_log__pointer = (sol_log__pointer_type) 544561597; + sol_log__pointer(arg1, arg2); +} +#endif +#define sol_log(message) sol_log_(message, sol_strlen(message)) + +/** + * Prints a 64 bit values represented in hexadecimal to stdout + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/log.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +void sol_log_64_(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t); +#else +typedef void(*sol_log_64__pointer_type)(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t); +static void sol_log_64_(uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5) { + sol_log_64__pointer_type sol_log_64__pointer = (sol_log_64__pointer_type) 1546269048; + sol_log_64__pointer(arg1, arg2, arg3, arg4, arg5); +} +#endif +#define sol_log_64 sol_log_64_ + +/** + * Prints the current compute unit consumption to stdout + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/log.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +void sol_log_compute_units_(); +#else +typedef void(*sol_log_compute_units__pointer_type)(); +static void sol_log_compute_units_() { + sol_log_compute_units__pointer_type sol_log_compute_units__pointer = (sol_log_compute_units__pointer_type) 1387942038; + sol_log_compute_units__pointer(); +} +#endif +#define sol_log_compute_units() sol_log_compute_units_() + +/** + * Prints the hexadecimal representation of an array + * + * @param array The array to print + */ +static void sol_log_array(const uint8_t *array, int len) { + for (int j = 0; j < len; j++) { + sol_log_64(0, 0, 0, j, array[j]); + } +} + +/** + * Print the base64 representation of some arrays. + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/log.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +void sol_log_data(SolBytes *, uint64_t); +#else +typedef void(*sol_log_data_pointer_type)(SolBytes *, uint64_t); +static void sol_log_data(SolBytes * arg1, uint64_t arg2) { + sol_log_data_pointer_type sol_log_data_pointer = (sol_log_data_pointer_type) 1930933300; + sol_log_data_pointer(arg1, arg2); +} +#endif + +/** + * Prints the program's input parameters + * + * @param params Pointer to a SolParameters structure + */ +static void sol_log_params(const SolParameters *params) { + sol_log("- Program identifier:"); + sol_log_pubkey(params->program_id); + + sol_log("- Number of KeyedAccounts"); + sol_log_64(0, 0, 0, 0, params->ka_num); + for (int i = 0; i < params->ka_num; i++) { + sol_log(" - Is signer"); + sol_log_64(0, 0, 0, 0, params->ka[i].is_signer); + sol_log(" - Is writable"); + sol_log_64(0, 0, 0, 0, params->ka[i].is_writable); + sol_log(" - Key"); + sol_log_pubkey(params->ka[i].key); + sol_log(" - Lamports"); + sol_log_64(0, 0, 0, 0, *params->ka[i].lamports); + sol_log(" - data"); + sol_log_array(params->ka[i].data, params->ka[i].data_len); + sol_log(" - Owner"); + sol_log_pubkey(params->ka[i].owner); + sol_log(" - Executable"); + sol_log_64(0, 0, 0, 0, params->ka[i].executable); + sol_log(" - Rent Epoch"); + sol_log_64(0, 0, 0, 0, params->ka[i].rent_epoch); + } + sol_log("- Instruction data\0"); + sol_log_array(params->data, params->data_len); +} + +#ifdef SOL_TEST +/** + * Stub functions when building tests + */ +#include + +void sol_log_(const char *s, uint64_t len) { + printf("Program log: %s\n", s); +} +void sol_log_64(uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5) { + printf("Program log: %llu, %llu, %llu, %llu, %llu\n", arg1, arg2, arg3, arg4, arg5); +} + +void sol_log_compute_units_() { + printf("Program consumption: __ units remaining\n"); +} +#endif + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/pubkey.h b/sdk/sbf/c/inc/sol/pubkey.h new file mode 100644 index 00000000000000..a05ae210d58785 --- /dev/null +++ b/sdk/sbf/c/inc/sol/pubkey.h @@ -0,0 +1,134 @@ +#pragma once +/** + * @brief Solana Public key + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Size of Public key in bytes + */ +#define SIZE_PUBKEY 32 + +/** + * Public key + */ +typedef struct { + uint8_t x[SIZE_PUBKEY]; +} SolPubkey; + +/** + * Prints the hexadecimal representation of a public key + * + * @param key The public key to print + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/pubkey.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +void sol_log_pubkey(const SolPubkey *); +#else +typedef void(*sol_log_pubkey_pointer_type)(const SolPubkey *); +static void sol_log_pubkey(const SolPubkey * arg1) { + sol_log_pubkey_pointer_type sol_log_pubkey_pointer = (sol_log_pubkey_pointer_type) 2129692874; + sol_log_pubkey_pointer(arg1); +} +#endif + +/** + * Compares two public keys + * + * @param one First public key + * @param two Second public key + * @return true if the same + */ +static bool SolPubkey_same(const SolPubkey *one, const SolPubkey *two) { + for (int i = 0; i < sizeof(*one); i++) { + if (one->x[i] != two->x[i]) { + return false; + } + } + return true; +} + +/** + * Seed used to create a program address or passed to sol_invoke_signed + */ +typedef struct { + const uint8_t *addr; /** Seed bytes */ + uint64_t len; /** Length of the seed bytes */ +} SolSignerSeed; + +/** + * Seeds used by a signer to create a program address or passed to + * sol_invoke_signed + */ +typedef struct { + const SolSignerSeed *addr; /** An array of a signer's seeds */ + uint64_t len; /** Number of seeds */ +} SolSignerSeeds; + +/** + * Create a program address + * + * @param seeds Seed bytes used to sign program accounts + * @param seeds_len Length of the seeds array + * @param program_id Program id of the signer + * @param program_address Program address created, filled on return + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/pubkey.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +uint64_t sol_create_program_address(const SolSignerSeed *, int, const SolPubkey *, SolPubkey *); +#else +typedef uint64_t(*sol_create_program_address_pointer_type)(const SolSignerSeed *, int, const SolPubkey *, SolPubkey *); +static uint64_t sol_create_program_address(const SolSignerSeed * arg1, int arg2, const SolPubkey * arg3, SolPubkey * arg4) { + sol_create_program_address_pointer_type sol_create_program_address_pointer = (sol_create_program_address_pointer_type) 2474062396; + return sol_create_program_address_pointer(arg1, arg2, arg3, arg4); +} +#endif + +/** + * Try to find a program address and return corresponding bump seed + * + * @param seeds Seed bytes used to sign program accounts + * @param seeds_len Length of the seeds array + * @param program_id Program id of the signer + * @param program_address Program address created, filled on return + * @param bump_seed Bump seed required to create a valid program address + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/pubkey.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +uint64_t sol_try_find_program_address(const SolSignerSeed *, int, const SolPubkey *, SolPubkey *, uint8_t *); +#else +typedef uint64_t(*sol_try_find_program_address_pointer_type)(const SolSignerSeed *, int, const SolPubkey *, SolPubkey *, uint8_t *); +static uint64_t sol_try_find_program_address(const SolSignerSeed * arg1, int arg2, const SolPubkey * arg3, SolPubkey * arg4, uint8_t * arg5) { + sol_try_find_program_address_pointer_type sol_try_find_program_address_pointer = (sol_try_find_program_address_pointer_type) 1213221432; + return sol_try_find_program_address_pointer(arg1, arg2, arg3, arg4, arg5); +} +#endif + +#ifdef SOL_TEST +/** + * Stub functions when building tests + */ +#include + +void sol_log_pubkey( + const SolPubkey *pubkey +) { + printf("Program log: "); + for (int i = 0; i < SIZE_PUBKEY; i++) { + printf("%02 ", pubkey->x[i]); + } + printf("\n"); +} + +#endif + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/return_data.h b/sdk/sbf/c/inc/sol/return_data.h new file mode 100644 index 00000000000000..6afb13513a4b3d --- /dev/null +++ b/sdk/sbf/c/inc/sol/return_data.h @@ -0,0 +1,59 @@ +#pragma once +/** + * @brief Solana return data system calls +**/ + +#include +#include + +#ifdef __cplusplus +extern "C" +{ +#endif + +/** + * Maximum size of return data + */ +#define MAX_RETURN_DATA 1024 + +/** + * Set the return data + * + * @param bytes byte array to set + * @param bytes_len length of byte array. This may not exceed MAX_RETURN_DATA. + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/return_data.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +void sol_set_return_data(const uint8_t *, uint64_t); +#else +typedef void(*sol_set_return_data_pointer_type)(const uint8_t *, uint64_t); +static void sol_set_return_data(const uint8_t * arg1, uint64_t arg2) { + sol_set_return_data_pointer_type sol_set_return_data_pointer = (sol_set_return_data_pointer_type) 2720453611; + sol_set_return_data_pointer(arg1, arg2); +} +#endif + +/** + * Get the return data + * + * @param bytes byte buffer + * @param bytes_len maximum length of buffer + * @param program_id the program_id which set the return data. Only set if there was some return data (the function returns non-zero). + * @param result length of return data (may exceed bytes_len if the return data is longer) + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/return_data.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +uint64_t sol_get_return_data(uint8_t *, uint64_t, SolPubkey *); +#else +typedef uint64_t(*sol_get_return_data_pointer_type)(uint8_t *, uint64_t, SolPubkey *); +static uint64_t sol_get_return_data(uint8_t * arg1, uint64_t arg2, SolPubkey * arg3) { + sol_get_return_data_pointer_type sol_get_return_data_pointer = (sol_get_return_data_pointer_type) 1562527204; + return sol_get_return_data_pointer(arg1, arg2, arg3); +} +#endif + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/secp256k1.h b/sdk/sbf/c/inc/sol/secp256k1.h new file mode 100644 index 00000000000000..f973defe0e3f78 --- /dev/null +++ b/sdk/sbf/c/inc/sol/secp256k1.h @@ -0,0 +1,50 @@ +#pragma once +/** + * @brief Solana secp256k1 system call + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** Length of a secp256k1 recover input hash */ +#define SECP256K1_RECOVER_HASH_LENGTH 32 +/** Length of a secp256k1 input signature */ +#define SECP256K1_RECOVER_SIGNATURE_LENGTH 64 +/** Length of a secp256k1 recover result */ +#define SECP256K1_RECOVER_RESULT_LENGTH 64 + +/** The hash provided to a sol_secp256k1_recover is invalid */ +#define SECP256K1_RECOVER_ERROR_INVALID_HASH 1 +/** The recovery_id provided to a sol_secp256k1_recover is invalid */ +#define SECP256K1_RECOVER_ERROR_INVALID_RECOVERY_ID 2 +/** The signature provided to a sol_secp256k1_recover is invalid */ +#define SECP256K1_RECOVER_ERROR_INVALID_SIGNATURE 3 + +/** + * Recover public key from a signed message. + * + * @param hash Hashed message + * @param recovery_id Tag used for public key recovery from signatures. Can be 0 or 1 + * @param signature An ECDSA signature + * @param result 64 byte array to hold the result. A recovered public key + * @return 0 if executed successfully + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/secp256k1.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +uint64_t sol_secp256k1_recover(const uint8_t *, uint64_t, const uint8_t *, uint8_t *); +#else +typedef uint64_t(*sol_secp256k1_recover_pointer_type)(const uint8_t *, uint64_t, const uint8_t *, uint8_t *); +static uint64_t sol_secp256k1_recover(const uint8_t * arg1, uint64_t arg2, const uint8_t * arg3, uint8_t * arg4) { + sol_secp256k1_recover_pointer_type sol_secp256k1_recover_pointer = (sol_secp256k1_recover_pointer_type) 400819024; + return sol_secp256k1_recover_pointer(arg1, arg2, arg3, arg4); +} +#endif + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/sha.h b/sdk/sbf/c/inc/sol/sha.h new file mode 100644 index 00000000000000..ad776e8e914713 --- /dev/null +++ b/sdk/sbf/c/inc/sol/sha.h @@ -0,0 +1,39 @@ +#pragma once +/** + * @brief Solana sha system call + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Length of a sha256 hash result + */ +#define SHA256_RESULT_LENGTH 32 + +/** + * Sha256 + * + * @param bytes Array of byte arrays + * @param bytes_len Number of byte arrays + * @param result 32 byte array to hold the result + */ +/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/sha.inc AND RUN `cargo run --bin gen-headers` */ +#ifndef SOL_SBFV2 +uint64_t sol_sha256(const SolBytes *, int, uint8_t *); +#else +typedef uint64_t(*sol_sha256_pointer_type)(const SolBytes *, int, uint8_t *); +static uint64_t sol_sha256(const SolBytes * arg1, int arg2, uint8_t * arg3) { + sol_sha256_pointer_type sol_sha256_pointer = (sol_sha256_pointer_type) 301243782; + return sol_sha256_pointer(arg1, arg2, arg3); +} +#endif + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/string.h b/sdk/sbf/c/inc/sol/string.h new file mode 100644 index 00000000000000..4813d05a7b80de --- /dev/null +++ b/sdk/sbf/c/inc/sol/string.h @@ -0,0 +1,123 @@ +#pragma once +/** + * @brief Solana string and memory system calls and utilities + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Copies memory + */ +static void sol_memcpy(void *dst, const void *src, int len) { + for (int i = 0; i < len; i++) { + *((uint8_t *)dst + i) = *((const uint8_t *)src + i); + } +} + +/** + * Compares memory + */ +static int sol_memcmp(const void *s1, const void *s2, int n) { + for (int i = 0; i < n; i++) { + uint8_t diff = *((uint8_t *)s1 + i) - *((const uint8_t *)s2 + i); + if (diff) { + return diff; + } + } + return 0; +} + +/** + * Fill a byte string with a byte value + */ +static void *sol_memset(void *b, int c, size_t len) { + uint8_t *a = (uint8_t *) b; + while (len > 0) { + *a = c; + a++; + len--; + } + return b; +} + +/** + * Find length of string + */ +static size_t sol_strlen(const char *s) { + size_t len = 0; + while (*s) { + len++; + s++; + } + return len; +} + +/** + * Start address of the memory region used for program heap. + */ +#define HEAP_START_ADDRESS (0x300000000) +/** + * Length of the heap memory region used for program heap. + */ +#define HEAP_LENGTH (32 * 1024) + +/** + * Alloc zero-initialized memory + */ +static void *sol_calloc(size_t nitems, size_t size) { + // Bump allocator + uint64_t* pos_ptr = (uint64_t*)HEAP_START_ADDRESS; + + uint64_t pos = *pos_ptr; + if (pos == 0) { + /** First time, set starting position */ + pos = HEAP_START_ADDRESS + HEAP_LENGTH; + } + + uint64_t bytes = (uint64_t)(nitems * size); + if (size == 0 || + !(nitems == 0 || size == 0) && + !(nitems == bytes / size)) { + /** Overflow */ + return NULL; + } + if (pos < bytes) { + /** Saturated */ + pos = 0; + } else { + pos -= bytes; + } + + uint64_t align = size; + align--; + align |= align >> 1; + align |= align >> 2; + align |= align >> 4; + align |= align >> 8; + align |= align >> 16; + align |= align >> 32; + align++; + pos &= ~(align - 1); + if (pos < HEAP_START_ADDRESS + sizeof(uint8_t*)) { + return NULL; + } + *pos_ptr = pos; + return (void*)pos; +} + +/** + * Deallocates the memory previously allocated by sol_calloc + */ +static void sol_free(void *ptr) { + // I'm a bump allocator, I don't free +} + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/sol/types.h b/sdk/sbf/c/inc/sol/types.h new file mode 100644 index 00000000000000..ea565f1f940c70 --- /dev/null +++ b/sdk/sbf/c/inc/sol/types.h @@ -0,0 +1,141 @@ +#pragma once +/** + * @brief Solana types for BPF programs + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Pick up static_assert if C11 or greater + * + * Inlined here until is available + */ +#if (defined _ISOC11_SOURCE || (defined __STDC_VERSION__ && __STDC_VERSION__ >= 201112L)) && !defined (__cplusplus) +#undef static_assert +#define static_assert _Static_assert +#endif + +/** + * Numeric types + */ +#ifndef __LP64__ +#error LP64 data model required +#endif + +typedef signed char int8_t; +typedef unsigned char uint8_t; +typedef signed short int16_t; +typedef unsigned short uint16_t; +typedef signed int int32_t; +typedef unsigned int uint32_t; +typedef signed long int int64_t; +typedef unsigned long int uint64_t; +typedef int64_t ssize_t; +typedef uint64_t size_t; + +#if defined (__cplusplus) || defined(static_assert) +static_assert(sizeof(int8_t) == 1); +static_assert(sizeof(uint8_t) == 1); +static_assert(sizeof(int16_t) == 2); +static_assert(sizeof(uint16_t) == 2); +static_assert(sizeof(int32_t) == 4); +static_assert(sizeof(uint32_t) == 4); +static_assert(sizeof(int64_t) == 8); +static_assert(sizeof(uint64_t) == 8); +#endif + +/** + * Minimum of signed integral types + */ +#define INT8_MIN (-128) +#define INT16_MIN (-32767-1) +#define INT32_MIN (-2147483647-1) +#define INT64_MIN (-9223372036854775807L-1) + +/** + * Maximum of signed integral types + */ +#define INT8_MAX (127) +#define INT16_MAX (32767) +#define INT32_MAX (2147483647) +#define INT64_MAX (9223372036854775807L) + +/** + * Maximum of unsigned integral types + */ +#define UINT8_MAX (255) +#define UINT16_MAX (65535) +#define UINT32_MAX (4294967295U) +#define UINT64_MAX (18446744073709551615UL) + +/** + * NULL + */ +#define NULL 0 + +/** Indicates the instruction was processed successfully */ +#define SUCCESS 0 + +/** + * Builtin program status values occupy the upper 32 bits of the program return + * value. Programs may define their own error values but they must be confined + * to the lower 32 bits. + */ +#define TO_BUILTIN(error) ((uint64_t)(error) << 32) + +/** Note: Not applicable to program written in C */ +#define ERROR_CUSTOM_ZERO TO_BUILTIN(1) +/** The arguments provided to a program instruction where invalid */ +#define ERROR_INVALID_ARGUMENT TO_BUILTIN(2) +/** An instruction's data contents was invalid */ +#define ERROR_INVALID_INSTRUCTION_DATA TO_BUILTIN(3) +/** An account's data contents was invalid */ +#define ERROR_INVALID_ACCOUNT_DATA TO_BUILTIN(4) +/** An account's data was too small */ +#define ERROR_ACCOUNT_DATA_TOO_SMALL TO_BUILTIN(5) +/** An account's balance was too small to complete the instruction */ +#define ERROR_INSUFFICIENT_FUNDS TO_BUILTIN(6) +/** The account did not have the expected program id */ +#define ERROR_INCORRECT_PROGRAM_ID TO_BUILTIN(7) +/** A signature was required but not found */ +#define ERROR_MISSING_REQUIRED_SIGNATURES TO_BUILTIN(8) +/** An initialize instruction was sent to an account that has already been initialized */ +#define ERROR_ACCOUNT_ALREADY_INITIALIZED TO_BUILTIN(9) +/** An attempt to operate on an account that hasn't been initialized */ +#define ERROR_UNINITIALIZED_ACCOUNT TO_BUILTIN(10) +/** The instruction expected additional account keys */ +#define ERROR_NOT_ENOUGH_ACCOUNT_KEYS TO_BUILTIN(11) +/** Note: Not applicable to program written in C */ +#define ERROR_ACCOUNT_BORROW_FAILED TO_BUILTIN(12) +/** The length of the seed is too long for address generation */ +#define MAX_SEED_LENGTH_EXCEEDED TO_BUILTIN(13) +/** Provided seeds do not result in a valid address */ +#define INVALID_SEEDS TO_BUILTIN(14) + +/** + * Boolean type + */ +#ifndef __cplusplus +#include +#endif + +/** + * Computes the number of elements in an array + */ +#define SOL_ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) + +/** + * Byte array pointer and string + */ +typedef struct { + const uint8_t *addr; /** bytes */ + uint64_t len; /** number of bytes*/ +} SolBytes; + +#ifdef __cplusplus +} +#endif + +/**@}*/ diff --git a/sdk/sbf/c/inc/solana_sdk.h b/sdk/sbf/c/inc/solana_sdk.h new file mode 100644 index 00000000000000..829b66486c541d --- /dev/null +++ b/sdk/sbf/c/inc/solana_sdk.h @@ -0,0 +1,21 @@ +#pragma once +/** + * @brief Solana C-based BPF program types and utility functions + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/**@}*/ diff --git a/sdk/sbf/c/inc/stdio.h b/sdk/sbf/c/inc/stdio.h new file mode 100644 index 00000000000000..cd3fda27071dd0 --- /dev/null +++ b/sdk/sbf/c/inc/stdio.h @@ -0,0 +1,4 @@ +#pragma once +typedef void *FILE; + +int printf(const char * restrictformat, ... ); diff --git a/sdk/sbf/c/inc/stdlib.h b/sdk/sbf/c/inc/stdlib.h new file mode 100644 index 00000000000000..5d2570c5fed5d7 --- /dev/null +++ b/sdk/sbf/c/inc/stdlib.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/sdk/sbf/c/inc/string.h b/sdk/sbf/c/inc/string.h new file mode 100644 index 00000000000000..387f32be7781b0 --- /dev/null +++ b/sdk/sbf/c/inc/string.h @@ -0,0 +1,6 @@ +#pragma once +#include + +#define memcpy sol_memcpy +#define memset sol_memset +#define strlen sol_strlen diff --git a/sdk/sbf/c/inc/sys/param.h b/sdk/sbf/c/inc/sys/param.h new file mode 100644 index 00000000000000..6f70f09beec221 --- /dev/null +++ b/sdk/sbf/c/inc/sys/param.h @@ -0,0 +1 @@ +#pragma once diff --git a/sdk/sbf/c/inc/wchar.h b/sdk/sbf/c/inc/wchar.h new file mode 100644 index 00000000000000..6f70f09beec221 --- /dev/null +++ b/sdk/sbf/c/inc/wchar.h @@ -0,0 +1 @@ +#pragma once diff --git a/sdk/sbf/c/sbf.ld b/sdk/sbf/c/sbf.ld new file mode 100644 index 00000000000000..262fd549324e8e --- /dev/null +++ b/sdk/sbf/c/sbf.ld @@ -0,0 +1,24 @@ +PHDRS +{ + text PT_LOAD ; + rodata PT_LOAD ; + data PT_LOAD ; + dynamic PT_DYNAMIC ; +} + +SECTIONS +{ + . = SIZEOF_HEADERS; + .text : { *(.text*) } :text + .rodata : { *(.rodata*) } :rodata + .data.rel.ro : { *(.data.rel.ro*) } :rodata + .dynamic : { *(.dynamic) } :dynamic + .dynsym : { *(.dynsym) } :data + .dynstr : { *(.dynstr) } :data + .rel.dyn : { *(.rel.dyn) } :data + /DISCARD/ : { + *(.eh_frame*) + *(.gnu.hash*) + *(.hash*) + } +} diff --git a/sdk/sbf/c/sbf.mk b/sdk/sbf/c/sbf.mk new file mode 100644 index 00000000000000..da4748800ae6ac --- /dev/null +++ b/sdk/sbf/c/sbf.mk @@ -0,0 +1,311 @@ +LOCAL_PATH := $(dir $(lastword $(MAKEFILE_LIST))) +INSTALL_SH := $(abspath $(LOCAL_PATH)/../scripts/install.sh) + +all: +.PHONY: help all clean + +ifneq ($(V),1) +_@ :=@ +endif + +INC_DIRS ?= +SRC_DIR ?= ./src +TEST_PREFIX ?= test_ +OUT_DIR ?= ./out +OS := $(shell uname) + +LLVM_DIR = $(LOCAL_PATH)../dependencies/sbf-tools/llvm +LLVM_SYSTEM_INC_DIRS := $(LLVM_DIR)/lib/clang/14.0.0/include +COMPILER_RT_DIR = $(LOCAL_PATH)../dependencies/sbf-tools/rust/lib/rustlib/sbf-solana-solana/lib +STD_INC_DIRS := $(LLVM_DIR)/include +STD_LIB_DIRS := $(LLVM_DIR)/lib + +ifdef LLVM_DIR +CC := $(LLVM_DIR)/bin/clang +CXX := $(LLVM_DIR)/bin/clang++ +LLD := $(LLVM_DIR)/bin/ld.lld +OBJ_DUMP := $(LLVM_DIR)/bin/llvm-objdump +READ_ELF := $(LLVM_DIR)/bin/llvm-readelf +endif + +SYSTEM_INC_DIRS := \ + $(LOCAL_PATH)inc \ + $(LLVM_SYSTEM_INC_DIRS) \ + +C_FLAGS := \ + -Werror \ + -O2 \ + -fno-builtin \ + -std=c17 \ + $(addprefix -isystem,$(SYSTEM_INC_DIRS)) \ + $(addprefix -I,$(STD_INC_DIRS)) \ + $(addprefix -I,$(INC_DIRS)) \ + +ifeq ($(SOL_SBFV2),1) +C_FLAGS := \ + $(C_FLAGS) \ + -DSOL_SBFV2=1 +endif + +CXX_FLAGS := \ + $(C_FLAGS) \ + -std=c++17 \ + +SBF_C_FLAGS := \ + $(C_FLAGS) \ + -target sbf \ + -fPIC + +SBF_CXX_FLAGS := \ + $(CXX_FLAGS) \ + -target sbf \ + -fPIC \ + -fomit-frame-pointer \ + -fno-exceptions \ + -fno-asynchronous-unwind-tables \ + -fno-unwind-tables + +SBF_LLD_FLAGS := \ + -z notext \ + -shared \ + --Bdynamic \ + $(LOCAL_PATH)sbf.ld \ + --entry entrypoint \ + -L $(STD_LIB_DIRS) \ + -lc \ + +ifeq ($(SOL_SBFV2),1) +SBF_LLD_FLAGS := \ + $(SBF_LLD_FLAGS) \ + --pack-dyn-relocs=relr +endif + +OBJ_DUMP_FLAGS := \ + --source \ + --disassemble \ + +READ_ELF_FLAGS := \ + --all \ + +TESTFRAMEWORK_RPATH := $(abspath $(LOCAL_PATH)../dependencies/criterion/lib) +TESTFRAMEWORK_FLAGS := \ + -DSOL_TEST \ + -isystem $(LOCAL_PATH)../dependencies/criterion/include \ + -L $(LOCAL_PATH)../dependencies/criterion/lib \ + -rpath $(TESTFRAMEWORK_RPATH) \ + -lcriterion \ + +MACOS_ADJUST_TEST_DYLIB := \ +$(if $(filter $(OS),Darwin),\ + $(_@)install_name_tool -change libcriterion.3.dylib $(TESTFRAMEWORK_RPATH)/libcriterion.3.dylib, \ + : \ +) + +TEST_C_FLAGS := \ + $(C_FLAGS) \ + $(TESTFRAMEWORK_FLAGS) \ + +TEST_CXX_FLAGS := \ + $(CXX_FLAGS) \ + $(TESTFRAMEWORK_FLAGS) \ + +help: + @echo '' + @echo 'SBF Program makefile' + @echo '' + @echo 'This makefile will build SBF Programs from C or C++ source files into ELFs' + @echo '' + @echo 'Assumptions:' + @echo ' - Programs are located in the source directory: $(SRC_DIR)/' + @echo ' - Programs are named by their directory name (eg. directory name:src/foo/ -> program name:foo)' + @echo ' - Tests are located in their corresponding program directory and must being with "test_"' + @echo ' - Output files will be placed in the directory: $(OUT_DIR)' + @echo '' + @echo 'User settings' + @echo ' - The following setting are overridable on the command line, default values shown:' + @echo ' - Show commands while building: V=1' + @echo ' V=$(V)' + @echo ' - List of include directories:' + @echo ' INC_DIRS=$(INC_DIRS)' + @echo ' - List of system include directories:' + @echo ' SYSTEM_INC_DIRS=$(SYSTEM_INC_DIRS)' + @echo ' - List of standard library include directories:' + @echo ' STD_INC_DIRS=$(STD_INC_DIRS)' + @echo ' - List of standard library archive directories:' + @echo ' STD_LIB_DIRS=$(STD_LIB_DIRS)' + @echo ' - Location of source directories:' + @echo ' SRC_DIR=$(SRC_DIR)' + @echo ' - Location to place output files:' + @echo ' OUT_DIR=$(OUT_DIR)' + @echo ' - Location of LLVM:' + @echo ' LLVM_DIR=$(LLVM_DIR)' + @echo '' + @echo 'Usage:' + @echo ' - make help - This help message' + @echo ' - make all - Build all the programs and tests, run the tests' + @echo ' - make programs - Build all the programs' + @echo ' - make tests - Build and run all tests' + @echo ' - make dump_ - Dump the contents of the program to stdout' + @echo ' - make readelf_ - Display information about the ELF binary' + @echo ' - make - Build a single program by name' + @echo ' - make - Build and run a single test by name' + @echo '' + @echo 'Available programs:' + $(foreach name, $(PROGRAM_NAMES), @echo ' - $(name)'$(\n)) + @echo '' + @echo 'Available tests:' + $(foreach name, $(TEST_NAMES), @echo ' - $(name)'$(\n)) + @echo '' + @echo 'Example:' + @echo ' - Assuming a program named foo (src/foo/foo.c)' + @echo ' - make foo' + @echo ' - make dump_foo' + @echo '' + +define C_RULE +$1: $2 + @echo "[cc] $1 ($2)" + $(_@)mkdir -p $(dir $1) + $(_@)$(CC) $(SBF_C_FLAGS) -o $1 -c $2 +endef + +define CC_RULE +$1: $2 + @echo "[cxx] $1 ($2)" + $(_@)mkdir -p $(dir $1) + $(_@)$(CXX) $(SBF_CXX_FLAGS) -o $1 -c $2 +endef + +define D_RULE +$1: $2 $(LOCAL_PATH)/sbf.mk + @echo "[GEN] $1 ($2)" + $(_@)mkdir -p $(dir $1) + $(_@)$(CC) -M -MT '$(basename $1).o' $(SBF_C_FLAGS) $2 | sed 's,\($(basename $1)\)\.o[ :]*,\1.o $1 : ,g' > $1 +endef + +define DXX_RULE +$1: $2 $(LOCAL_PATH)/sbf.mk + @echo "[GEN] $1 ($2)" + $(_@)mkdir -p $(dir $1) + $(_@)$(CXX) -M -MT '$(basename $1).o' $(SBF_CXX_FLAGS) $2 | sed 's,\($(basename $1)\)\.o[ :]*,\1.o $1 : ,g' > $1 +endef + +define O_RULE +$1: $2 + @echo "[llc] $1 ($2)" + $(_@)mkdir -p $(dir $1) + $(_@)$(LLC) $(SBF_LLC_FLAGS) -o $1 $2 +endef + +define SO_RULE +$1: $2 + @echo "[lld] $1 ($2)" + $(_@)mkdir -p $(dir $1) + $(_@)$(LLD) $(SBF_LLD_FLAGS) -o $1 $2 $(COMPILER_RT_DIR)/libcompiler_builtins-*.rlib +ifeq (,$(wildcard $(subst .so,-keypair.json,$1))) + $(_@)solana-keygen new --no-passphrase --silent -o $(subst .so,-keypair.json,$1) +endif + @echo To deploy this program: + @echo $$$$ solana program deploy $(abspath $1) +endef + +define TEST_C_RULE +$1: $2 + @echo "[test cc] $1 ($2)" + $(_@)mkdir -p $(dir $1) + $(_@)$(CC) $(TEST_C_FLAGS) -o $1 $2 + $(_@)$(MACOS_ADJUST_TEST_DYLIB) $1 +endef + +define TEST_CC_RULE +$1: $2 + @echo "[test cxx] $1 ($2)" + $(_@)mkdir -p $(dir $1) + $(_@)$(CXX) $(TEST_CXX_FLAGS) -o $1 $2 + $(_@)$(MACOS_ADJUST_TEST_DYLIB) $1 +endef + +define TEST_D_RULE +$1: $2 $(LOCAL_PATH)/sbf.mk + @echo "[GEN] $1 ($2)" + $(_@)mkdir -p $(dir $1) + $(_@)$(CC) -M -MT '$(basename $1)' $(TEST_C_FLAGS) $2 | sed 's,\($(basename $1)\)[ :]*,\1 $1 : ,g' > $1 +endef + +define TEST_DXX_RULE +$1: $2 $(LOCAL_PATH)/sbf.mk + @echo "[GEN] $1 ($2)" + $(_@)mkdir -p $(dir $1) + $(_@)$(CXX) -M -MT '$(basename $1)' $(TEST_CXX_FLAGS) $2 | sed 's,\($(basename $1)\)[ :]*,\1 $1 : ,g' > $1 +endef + +define TEST_EXEC_RULE +$1: $2 + LD_LIBRARY_PATH=$(TESTFRAMEWORK_RPATH) \ + $2$(\n) +endef + +.PHONY: $(INSTALL_SH) +$(INSTALL_SH): + $(_@)$(INSTALL_SH) + +PROGRAM_NAMES := $(notdir $(basename $(wildcard $(SRC_DIR)/*))) + +define \n + + +endef + +all: programs tests + +$(foreach PROGRAM, $(PROGRAM_NAMES), \ + $(eval -include $(wildcard $(OUT_DIR)/$(PROGRAM)/*.d)) \ + \ + $(eval $(PROGRAM): %: $(addprefix $(OUT_DIR)/, %.so)) \ + $(eval $(PROGRAM)_SRCS := \ + $(addprefix $(SRC_DIR)/$(PROGRAM)/, \ + $(filter-out $(TEST_PREFIX)%,$(notdir $(wildcard $(SRC_DIR)/$(PROGRAM)/*.c $(SRC_DIR)/$(PROGRAM)/*.cc))))) \ + $(eval $(PROGRAM)_OBJS := $(subst $(SRC_DIR), $(OUT_DIR), \ + $(patsubst %.c,%.o, \ + $(patsubst %.cc,%.o,$($(PROGRAM)_SRCS))))) \ + $(eval $($(PROGRAM)_SRCS): $(INSTALL_SH)) \ + $(eval $(call SO_RULE,$(OUT_DIR)/$(PROGRAM).so,$($(PROGRAM)_OBJS))) \ + $(foreach _,$(filter %.c,$($(PROGRAM)_SRCS)), \ + $(eval $(call D_RULE,$(subst $(SRC_DIR),$(OUT_DIR),$(_:%.c=%.d)),$_)) \ + $(eval $(call C_RULE,$(subst $(SRC_DIR),$(OUT_DIR),$(_:%.c=%.o)),$_))) \ + $(foreach _,$(filter %.cc,$($(PROGRAM)_SRCS)), \ + $(eval $(call DXX_RULE,$(subst $(SRC_DIR),$(OUT_DIR),$(_:%.cc=%.d)),$_)) \ + $(eval $(call CC_RULE,$(subst $(SRC_DIR),$(OUT_DIR),$(_:%.cc=%.o)),$_))) \ + \ + $(eval TESTS := $(notdir $(basename $(wildcard $(SRC_DIR)/$(PROGRAM)/$(TEST_PREFIX)*.c)))) \ + $(eval $(TESTS) : %: $(addprefix $(OUT_DIR)/$(PROGRAM)/, %)) \ + $(eval TEST_NAMES := $(TEST_NAMES) $(TESTS)) \ + $(foreach TEST, $(TESTS), \ + $(eval $(TEST)_SRCS := \ + $(addprefix $(SRC_DIR)/$(PROGRAM)/, \ + $(notdir $(wildcard $(SRC_DIR)/$(PROGRAM)/$(TEST).c $(SRC_DIR)/$(PROGRAM)/$(TEST).cc)))) \ + $(eval $($(TEST)_SRCS): $(INSTALL_SH)) \ + $(foreach _,$(filter %.c,$($(TEST)_SRCS)), \ + $(eval $(call TEST_D_RULE,$(subst $(SRC_DIR),$(OUT_DIR),$(_:%.c=%.d)),$_)) \ + $(eval $(call TEST_C_RULE,$(subst $(SRC_DIR),$(OUT_DIR),$(_:%.c=%)),$_))) \ + $(foreach _,$(filter %.cc, $($(TEST)_SRCS)), \ + $(eval $(call TEST_DXX_RULE,$(subst $(SRC_DIR),$(OUT_DIR),$(_:%.cc=%.d)),$_)) \ + $(eval $(call TEST_CC_RULE,$(subst $(SRC_DIR),$(OUT_DIR),$(_:%.cc=%)),$_))) \ + $(eval $(call TEST_EXEC_RULE,$(TEST),$(addprefix $(OUT_DIR)/$(PROGRAM)/, $(TEST)))) \ + ) \ +) + +.PHONY: $(PROGRAM_NAMES) +programs: $(PROGRAM_NAMES) + +.PHONY: $(TEST_NAMES) +tests: $(TEST_NAMES) + +dump_%: % + $(_@)$(OBJ_DUMP) $(OBJ_DUMP_FLAGS) $(addprefix $(OUT_DIR)/, $(addsuffix .so, $<)) + +readelf_%: % + $(_@)$(READ_ELF) $(READ_ELF_FLAGS) $(addprefix $(OUT_DIR)/, $(addsuffix .so, $<)) + +clean: + rm -rf $(OUT_DIR) diff --git a/sdk/sbf/env.sh b/sdk/sbf/env.sh new file mode 100644 index 00000000000000..63e57e6d51f980 --- /dev/null +++ b/sdk/sbf/env.sh @@ -0,0 +1,16 @@ +# +# Configures the SBF SDK environment +# + +if [ -z "$sbf_sdk" ]; then + sbf_sdk=. +fi + +# Ensure the sdk is installed +"$sbf_sdk"/scripts/install.sh + +# Use the SDK's version of llvm to build the compiler-builtins for SBF +export CC="$sbf_sdk/dependencies/sbf-tools/llvm/bin/clang" +export AR="$sbf_sdk/dependencies/sbf-tools/llvm/bin/llvm-ar" +export OBJDUMP="$sbf_sdk/dependencies/sbf-tools/llvm/bin/llvm-objdump" +export OBJCOPY="$sbf_sdk/dependencies/sbf-tools/llvm/bin/llvm-objcopy" diff --git a/sdk/sbf/scripts/dump.sh b/sdk/sbf/scripts/dump.sh new file mode 100755 index 00000000000000..ddd776f2b5ba3a --- /dev/null +++ b/sdk/sbf/scripts/dump.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +sbf_sdk=$(cd "$(dirname "$0")/.." && pwd) +# shellcheck source=sdk/sbf/env.sh +source "$sbf_sdk"/env.sh + +so=$1 +dump=$2 +if [[ -z $so ]] || [[ -z $dump ]]; then + echo "Usage: $0 sbf-program.so dump.txt" >&2 + exit 1 +fi + +if [[ ! -r $so ]]; then + echo "Error: File not found or readable: $so" >&2 + exit 1 +fi + +if ! command -v rustfilt > /dev/null; then + echo "Error: rustfilt not found. It can be installed by running: cargo install rustfilt" >&2 + exit 1 +fi + +set -e +out_dir=$(dirname "$dump") +if [[ ! -d $out_dir ]]; then + mkdir -p "$out_dir" +fi +dump_mangled=$dump.mangled + +( + set -ex + ls -la "$so" > "$dump_mangled" + "$sbf_sdk"/dependencies/sbf-tools/llvm/bin/llvm-readelf -aW "$so" >>"$dump_mangled" + "$OBJDUMP" --print-imm-hex --source --disassemble "$so" >> "$dump_mangled" + sed s/://g < "$dump_mangled" | rustfilt > "$dump" +) +rm -f "$dump_mangled" + +if [[ ! -f "$dump" ]]; then + echo "Error: Failed to create $dump" >&2 + exit 1 +fi + +echo >&2 +echo "Wrote $dump" >&2 diff --git a/sdk/sbf/scripts/install.sh b/sdk/sbf/scripts/install.sh new file mode 100755 index 00000000000000..d91164c92ec465 --- /dev/null +++ b/sdk/sbf/scripts/install.sh @@ -0,0 +1,131 @@ +#!/usr/bin/env bash + +mkdir -p "$(dirname "$0")"/../dependencies +cd "$(dirname "$0")"/../dependencies + +unameOut="$(uname -s)" +case "${unameOut}" in + Linux*) + criterion_suffix= + machine=linux;; + Darwin*) + criterion_suffix= + machine=osx;; + MINGW*) + criterion_suffix=-mingw + machine=windows;; + *) + criterion_suffix= + machine=linux +esac + +download() { + declare url="$1/$2/$3" + declare filename=$3 + declare wget_args=( + "$url" -O "$filename" + "--progress=dot:giga" + "--retry-connrefused" + "--read-timeout=30" + ) + declare curl_args=( + -L "$url" -o "$filename" + ) + if hash wget 2>/dev/null; then + wget_or_curl="wget ${wget_args[*]}" + elif hash curl 2>/dev/null; then + wget_or_curl="curl ${curl_args[*]}" + else + echo "Error: Neither curl nor wget were found" >&2 + return 1 + fi + + set -x + if $wget_or_curl; then + tar --strip-components 1 -jxf "$filename" || return 1 + { set +x; } 2>/dev/null + rm -rf "$filename" + return 0 + fi + return 1 +} + +get() { + declare version=$1 + declare dirname=$2 + declare job=$3 + declare cache_root=~/.cache/solana + declare cache_dirname="$cache_root/$version/$dirname" + declare cache_partial_dirname="$cache_dirname"_partial + + if [[ -r $cache_dirname ]]; then + ln -sf "$cache_dirname" "$dirname" || return 1 + return 0 + fi + + rm -rf "$cache_partial_dirname" || return 1 + mkdir -p "$cache_partial_dirname" || return 1 + pushd "$cache_partial_dirname" + + if $job; then + popd + mv "$cache_partial_dirname" "$cache_dirname" || return 1 + ln -sf "$cache_dirname" "$dirname" || return 1 + return 0 + fi + popd + return 1 +} + +# Install Criterion +if [[ $machine == "linux" ]]; then + version=v2.3.3 +else + version=v2.3.2 +fi +if [[ ! -e criterion-$version.md || ! -e criterion ]]; then + ( + set -e + rm -rf criterion* + job="download \ + https://github.com/Snaipe/Criterion/releases/download \ + $version \ + criterion-$version-$machine$criterion_suffix-x86_64.tar.bz2 \ + criterion" + get $version criterion "$job" + ) + exitcode=$? + if [[ $exitcode -ne 0 ]]; then + exit 1 + fi + touch criterion-$version.md +fi + +# Install Rust-SBF +version=v1.29 +if [[ ! -e sbf-tools-$version.md || ! -e sbf-tools ]]; then + ( + set -e + rm -rf sbf-tools* + job="download \ + https://github.com/solana-labs/sbf-tools/releases/download \ + $version \ + solana-sbf-tools-$machine.tar.bz2 \ + sbf-tools" + get $version sbf-tools "$job" + ) + exitcode=$? + if [[ $exitcode -ne 0 ]]; then + exit 1 + fi + touch sbf-tools-$version.md + set -ex + ./sbf-tools/rust/bin/rustc --version + ./sbf-tools/rust/bin/rustc --print sysroot + set +e + rustup toolchain uninstall sbf + set -e + rustup toolchain link sbf sbf-tools/rust +fi + +exit 0 diff --git a/sdk/sbf/scripts/objcopy.sh b/sdk/sbf/scripts/objcopy.sh new file mode 100755 index 00000000000000..d27b606a4af7c5 --- /dev/null +++ b/sdk/sbf/scripts/objcopy.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +sbf_sdk=$(cd "$(dirname "$0")/.." && pwd) +# shellcheck source=sdk/sbf/env.sh +source "$sbf_sdk"/env.sh +exec "$sbf_sdk"/dependencies/sbf-tools/llvm/bin/llvm-objcopy "$@" diff --git a/sdk/sbf/scripts/package.sh b/sdk/sbf/scripts/package.sh new file mode 100755 index 00000000000000..08dc9ca496af03 --- /dev/null +++ b/sdk/sbf/scripts/package.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +set -ex + +cd "$(dirname "$0")"/../../.. + +echo --- Creating sbf-sdk tarball + +rm -rf sbf-sdk.tar.bz2 sbf-sdk/ +mkdir sbf-sdk/ +cp LICENSE sbf-sdk/ + +( + ci/crate-version.sh sdk/Cargo.toml + git rev-parse HEAD +) > sbf-sdk/version.txt + +cp -a sdk/sbf/* sbf-sdk/ + +tar jvcf sbf-sdk.tar.bz2 sbf-sdk/ diff --git a/sdk/sbf/scripts/strip.sh b/sdk/sbf/scripts/strip.sh new file mode 100755 index 00000000000000..9c20ef0f9974b0 --- /dev/null +++ b/sdk/sbf/scripts/strip.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +so=$1 +if [[ ! -r $so ]]; then + echo "Error: file not found: $so" + exit 1 +fi +so_stripped=$2 +if [[ -z $so_stripped ]]; then + echo "Usage: $0 unstripped.so stripped.so" + exit 1 +fi + +sbf_sdk=$(cd "$(dirname "$0")/.." && pwd) +# shellcheck source=sdk/sbf/env.sh +source "$sbf_sdk"/env.sh + +set -e +out_dir=$(dirname "$so_stripped") +if [[ ! -d $out_dir ]]; then + mkdir -p "$out_dir" +fi +"$sbf_sdk"/dependencies/sbf-tools/llvm/bin/llvm-objcopy --strip-all "$so" "$so_stripped" From 03245736673d4736425e90c2302e24b626307289 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Fri, 7 Oct 2022 10:36:22 -0500 Subject: [PATCH 31/65] report additional transaction errors to metrics (#28285) --- core/src/leader_slot_banking_stage_metrics.rs | 17 +++++++ runtime/src/bank.rs | 26 ++++++++-- runtime/src/transaction_error_metrics.rs | 47 +++++++++++++++++++ 3 files changed, 86 insertions(+), 4 deletions(-) diff --git a/core/src/leader_slot_banking_stage_metrics.rs b/core/src/leader_slot_banking_stage_metrics.rs index a189efde155b31..ed556991e1d560 100644 --- a/core/src/leader_slot_banking_stage_metrics.rs +++ b/core/src/leader_slot_banking_stage_metrics.rs @@ -109,6 +109,11 @@ struct LeaderSlotPacketCountMetrics { // `self.retrayble_errored_transaction_count`. account_lock_throttled_transactions_count: u64, + // total number of transactions that were excluded from the block because their write + // account locks exceed the limit. + // These transactions are not retried. + account_locks_limit_throttled_transactions_count: u64, + // total number of transactions that were excluded from the block because they were too expensive // according to the cost model. These transactions are added back to the buffered queue and are // already counted in `self.retrayble_errored_transaction_count`. @@ -207,6 +212,11 @@ impl LeaderSlotPacketCountMetrics { self.account_lock_throttled_transactions_count as i64, i64 ), + ( + "account_locks_limit_throttled_transactions_count", + self.account_locks_limit_throttled_transactions_count as i64, + i64 + ), ( "cost_model_throttled_transactions_count", self.cost_model_throttled_transactions_count as i64, @@ -459,6 +469,13 @@ impl LeaderSlotMetricsTracker { error_counters.account_in_use as u64 ); + saturating_add_assign!( + leader_slot_metrics + .packet_count_metrics + .account_locks_limit_throttled_transactions_count, + error_counters.too_many_account_locks as u64 + ); + saturating_add_assign!( leader_slot_metrics .packet_count_metrics diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index b9cb13f9e87b5d..e729f7dadb4ecc 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4542,14 +4542,32 @@ impl Bank { .iter() .enumerate() .filter_map(|(index, res)| match res { + // following are retryable errors Err(TransactionError::AccountInUse) => { error_counters.account_in_use += 1; Some(index) } - Err(TransactionError::WouldExceedMaxBlockCostLimit) - | Err(TransactionError::WouldExceedMaxVoteCostLimit) - | Err(TransactionError::WouldExceedMaxAccountCostLimit) - | Err(TransactionError::WouldExceedAccountDataBlockLimit) => Some(index), + Err(TransactionError::WouldExceedMaxBlockCostLimit) => { + error_counters.would_exceed_max_block_cost_limit += 1; + Some(index) + } + Err(TransactionError::WouldExceedMaxVoteCostLimit) => { + error_counters.would_exceed_max_vote_cost_limit += 1; + Some(index) + } + Err(TransactionError::WouldExceedMaxAccountCostLimit) => { + error_counters.would_exceed_max_account_cost_limit += 1; + Some(index) + } + Err(TransactionError::WouldExceedAccountDataBlockLimit) => { + error_counters.would_exceed_account_data_block_limit += 1; + Some(index) + } + // following are non-retryable errors + Err(TransactionError::TooManyAccountLocks) => { + error_counters.too_many_account_locks += 1; + None + } Err(_) => None, Ok(_) => None, }) diff --git a/runtime/src/transaction_error_metrics.rs b/runtime/src/transaction_error_metrics.rs index deabd2cdc54f25..baa25a07364839 100644 --- a/runtime/src/transaction_error_metrics.rs +++ b/runtime/src/transaction_error_metrics.rs @@ -4,6 +4,7 @@ use solana_sdk::{clock::Slot, saturating_add_assign}; pub struct TransactionErrorMetrics { pub total: usize, pub account_in_use: usize, + pub too_many_account_locks: usize, pub account_loaded_twice: usize, pub account_not_found: usize, pub blockhash_not_found: usize, @@ -18,6 +19,10 @@ pub struct TransactionErrorMetrics { pub not_allowed_during_cluster_maintenance: usize, pub invalid_writable_account: usize, pub invalid_rent_paying_account: usize, + pub would_exceed_max_block_cost_limit: usize, + pub would_exceed_max_account_cost_limit: usize, + pub would_exceed_max_vote_cost_limit: usize, + pub would_exceed_account_data_block_limit: usize, } impl TransactionErrorMetrics { @@ -28,6 +33,7 @@ impl TransactionErrorMetrics { pub fn accumulate(&mut self, other: &TransactionErrorMetrics) { saturating_add_assign!(self.total, other.total); saturating_add_assign!(self.account_in_use, other.account_in_use); + saturating_add_assign!(self.too_many_account_locks, other.too_many_account_locks); saturating_add_assign!(self.account_loaded_twice, other.account_loaded_twice); saturating_add_assign!(self.account_not_found, other.account_not_found); saturating_add_assign!(self.blockhash_not_found, other.blockhash_not_found); @@ -54,6 +60,22 @@ impl TransactionErrorMetrics { self.invalid_rent_paying_account, other.invalid_rent_paying_account ); + saturating_add_assign!( + self.would_exceed_max_block_cost_limit, + other.would_exceed_max_block_cost_limit + ); + saturating_add_assign!( + self.would_exceed_max_account_cost_limit, + other.would_exceed_max_account_cost_limit + ); + saturating_add_assign!( + self.would_exceed_max_vote_cost_limit, + other.would_exceed_max_vote_cost_limit + ); + saturating_add_assign!( + self.would_exceed_account_data_block_limit, + other.would_exceed_account_data_block_limit + ); } pub fn report(&self, id: u32, slot: Slot) { @@ -63,6 +85,11 @@ impl TransactionErrorMetrics { ("slot", slot as i64, i64), ("total", self.total as i64, i64), ("account_in_use", self.account_in_use as i64, i64), + ( + "too_many_account_locks", + self.too_many_account_locks as i64, + i64 + ), ( "account_loaded_twice", self.account_loaded_twice as i64, @@ -105,6 +132,26 @@ impl TransactionErrorMetrics { self.invalid_rent_paying_account as i64, i64 ), + ( + "would_exceed_max_block_cost_limit", + self.would_exceed_max_block_cost_limit as i64, + i64 + ), + ( + "would_exceed_max_account_cost_limit", + self.would_exceed_max_account_cost_limit as i64, + i64 + ), + ( + "would_exceed_max_vote_cost_limit", + self.would_exceed_max_vote_cost_limit as i64, + i64 + ), + ( + "would_exceed_account_data_block_limit", + self.would_exceed_account_data_block_limit as i64, + i64 + ), ); } } From a1fe8dd444c15e8e5b38a2fed4851a72aa4de43e Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 7 Oct 2022 08:55:01 -0700 Subject: [PATCH 32/65] add info for load race condition asserts (#28277) --- runtime/src/accounts_db.rs | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index ed41c529b4f176..4331acd44128d5 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -5110,22 +5110,35 @@ impl AccountsDb { // Notice the subtle `?` at previous line, we bail out pretty early if missing. if new_slot == slot && new_storage_location.is_store_id_equal(&storage_location) { - // Considering that we're failed to get accessor above and further that + inc_new_counter_info!("retry_to_get_account_accessor-panic", 1); + let message = format!( + "Bad index entry detected ({}, {}, {:?}, {:?}, {:?}, {:?})", + pubkey, + slot, + storage_location, + load_hint, + new_storage_location, + self.accounts_index.get_account_read_entry(pubkey) + ); + // Considering that we've failed to get accessor above and further that // the index still returned the same (slot, store_id) tuple, offset must be same // too. - assert!(new_storage_location.is_offset_equal(&storage_location)); + assert!( + new_storage_location.is_offset_equal(&storage_location), + "{message}" + ); // If the entry was missing from the cache, that means it must have been flushed, // and the accounts index is always updated before cache flush, so store_id must // not indicate being cached at this point. - assert!(!new_storage_location.is_cached()); + assert!(!new_storage_location.is_cached(), "{message}"); // If this is not a cache entry, then this was a minor fork slot // that had its storage entries cleaned up by purge_slots() but hasn't been // cleaned yet. That means this must be rpc access and not replay/banking at the // very least. Note that purge shouldn't occur even for RPC as caller must hold all // of ancestor slots.. - assert_eq!(load_hint, LoadHint::Unspecified); + assert_eq!(load_hint, LoadHint::Unspecified, "{message}"); // Everything being assert!()-ed, let's panic!() here as it's an error condition // after all.... @@ -5135,10 +5148,7 @@ impl AccountsDb { // first of all. // For details, see the comment in AccountIndex::do_checked_scan_accounts(), // which is referring back here. - panic!( - "Bad index entry detected ({}, {}, {:?}, {:?})", - pubkey, slot, storage_location, load_hint - ); + panic!("{message}"); } else if fallback_to_slow_path { // the above bad-index-entry check must had been checked first to retain the same // behavior From 16853acf354ea855fbf1aa632304ff58689fba58 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 7 Oct 2022 09:19:34 -0700 Subject: [PATCH 33/65] log adding root every 10s (#28280) --- runtime/src/accounts_db.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 4331acd44128d5..9f84829bf921a6 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -1193,6 +1193,8 @@ pub struct AccountsDb { /// debug feature to scan every append vec and verify refcounts are equal exhaustively_verify_refcounts: bool, + last_add_root_log: AtomicInterval, + /// the full accounts hash calculation as of a predetermined block height 'N' /// to be included in the bank hash at a predetermined block height 'M' /// The cadence is once per epoch, all nodes calculate a full accounts hash as of a known slot calculated using 'N' @@ -2175,6 +2177,7 @@ impl AccountsDb { num_hash_scan_passes, log_dead_slots: AtomicBool::new(true), exhaustively_verify_refcounts: false, + last_add_root_log: AtomicInterval::default(), epoch_accounts_hash_manager: EpochAccountsHashManager::new_invalid(), } } @@ -8509,6 +8512,10 @@ impl AccountsDb { } store_time.stop(); + if self.last_add_root_log.should_update(10_000) { + datapoint_info!("add_root", ("root", slot, i64)); + } + AccountsAddRootTiming { index_us: index_time.as_us(), cache_us: cache_time.as_us(), From 50985f79a1be9734f7ff3d549369c1077be817e8 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Fri, 7 Oct 2022 11:50:57 -0500 Subject: [PATCH 34/65] Correctly mark packets as forwarded (#28161) Only mark packets accepted for forwarding as `forwarded` --- core/src/banking_stage.rs | 45 ++++++++++++++------------ core/src/unprocessed_packet_batches.rs | 18 +++++++++++ 2 files changed, 43 insertions(+), 20 deletions(-) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 508818d1209b02..70105973de206e 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -1014,11 +1014,7 @@ impl BankingStage { } }); - if hold { - for deserialized_packet in buffered_packet_batches.iter_mut() { - deserialized_packet.forwarded = true; - } - } else { + if !hold { slot_metrics_tracker.increment_cleared_from_buffer_after_forward_count( filter_forwarding_result.total_forwardable_packets as u64, ); @@ -1110,7 +1106,7 @@ impl BankingStage { } } - accepting_packets = Self::add_filtered_packets_to_forward_buffer( + let accepted_packet_indexes = Self::add_filtered_packets_to_forward_buffer( forward_buffer, &packets_to_process, &sanitized_transactions, @@ -1118,6 +1114,14 @@ impl BankingStage { &forwardable_transaction_indexes, &mut dropped_tx_before_forwarding_count, ); + accepting_packets = + accepted_packet_indexes.len() == forwardable_transaction_indexes.len(); + + UnprocessedPacketBatches::mark_accepted_packets_as_forwarded( + buffered_packet_batches, + &packets_to_process, + &accepted_packet_indexes, + ); Self::collect_retained_packets( buffered_packet_batches, @@ -1275,37 +1279,38 @@ impl BankingStage { } /// try to add filtered forwardable and valid packets to forward buffer; - /// returns if forward_buffer is still accepting packets, and how many packets added. + /// returns vector of packet indexes that were accepted for forwarding. fn add_filtered_packets_to_forward_buffer( forward_buffer: &mut ForwardPacketBatchesByAccounts, packets_to_process: &[Arc], transactions: &[SanitizedTransaction], transaction_to_packet_indexes: &[usize], - retained_transaction_indexes: &[usize], + forwardable_transaction_indexes: &[usize], dropped_tx_before_forwarding_count: &mut usize, - ) -> bool { + ) -> Vec { let mut added_packets_count: usize = 0; - let mut accepting_packets = true; - for retained_transaction_index in retained_transaction_indexes { - let sanitized_transaction = &transactions[*retained_transaction_index]; - let immutable_deserialized_packet = packets_to_process - [transaction_to_packet_indexes[*retained_transaction_index]] - .clone(); - accepting_packets = - forward_buffer.try_add_packet(sanitized_transaction, immutable_deserialized_packet); - if !accepting_packets { + let mut accepted_packet_indexes = Vec::with_capacity(transaction_to_packet_indexes.len()); + for forwardable_transaction_index in forwardable_transaction_indexes { + let sanitized_transaction = &transactions[*forwardable_transaction_index]; + let forwardable_packet_index = + transaction_to_packet_indexes[*forwardable_transaction_index]; + let immutable_deserialized_packet = + packets_to_process[forwardable_packet_index].clone(); + if !forward_buffer.try_add_packet(sanitized_transaction, immutable_deserialized_packet) + { break; } + accepted_packet_indexes.push(forwardable_packet_index); saturating_add_assign!(added_packets_count, 1); } // count the packets not being forwarded in this batch saturating_add_assign!( *dropped_tx_before_forwarding_count, - retained_transaction_indexes.len() - added_packets_count + forwardable_transaction_indexes.len() - added_packets_count ); - accepting_packets + accepted_packet_indexes } #[allow(clippy::too_many_arguments)] diff --git a/core/src/unprocessed_packet_batches.rs b/core/src/unprocessed_packet_batches.rs index 7b7ccb79632df5..a72ba3dffbf8d5 100644 --- a/core/src/unprocessed_packet_batches.rs +++ b/core/src/unprocessed_packet_batches.rs @@ -281,6 +281,24 @@ impl UnprocessedPacketBatches { .get(immutable_packet.message_hash()) .map_or(true, |p| p.forwarded) } + + pub fn mark_accepted_packets_as_forwarded( + buffered_packet_batches: &mut UnprocessedPacketBatches, + packets_to_process: &[Arc], + accepted_packet_indexes: &[usize], + ) { + accepted_packet_indexes + .iter() + .for_each(|accepted_packet_index| { + let accepted_packet = packets_to_process[*accepted_packet_index].clone(); + if let Some(deserialized_packet) = buffered_packet_batches + .message_hash_to_transaction + .get_mut(accepted_packet.message_hash()) + { + deserialized_packet.forwarded = true; + } + }); + } } pub fn deserialize_packets<'a>( From 3781c0668f5059278a2afb9d6c60059631704930 Mon Sep 17 00:00:00 2001 From: apfitzge Date: Fri, 7 Oct 2022 12:23:18 -0500 Subject: [PATCH 35/65] separate account locks validation from creating the locks (#28292) --- sdk/src/transaction/sanitized.rs | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/sdk/src/transaction/sanitized.rs b/sdk/src/transaction/sanitized.rs index c416c00b0de6ec..ee69eea7955f98 100644 --- a/sdk/src/transaction/sanitized.rs +++ b/sdk/src/transaction/sanitized.rs @@ -199,13 +199,8 @@ impl SanitizedTransaction { &self, tx_account_lock_limit: usize, ) -> Result { - if self.message.has_duplicates() { - Err(TransactionError::AccountLoadedTwice) - } else if self.message.account_keys().len() > tx_account_lock_limit { - Err(TransactionError::TooManyAccountLocks) - } else { - Ok(self.get_account_locks_unchecked()) - } + Self::validate_account_locks(self.message(), tx_account_lock_limit)?; + Ok(self.get_account_locks_unchecked()) } /// Return the list of accounts that must be locked during processing this transaction. @@ -281,4 +276,18 @@ impl SanitizedTransaction { } Ok(()) } + + /// Validate a transaction message against locked accounts + fn validate_account_locks( + message: &SanitizedMessage, + tx_account_lock_limit: usize, + ) -> Result<()> { + if message.has_duplicates() { + Err(TransactionError::AccountLoadedTwice) + } else if message.account_keys().len() > tx_account_lock_limit { + Err(TransactionError::TooManyAccountLocks) + } else { + Ok(()) + } + } } From 00a18a962e4da7d8e604a7dd85d528080b7ef29a Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Fri, 7 Oct 2022 14:42:57 -0400 Subject: [PATCH 36/65] formats logs in bootstrap (#28291) --- validator/src/bootstrap.rs | 37 +++++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/validator/src/bootstrap.rs b/validator/src/bootstrap.rs index b5fe8b136ab582..77965b8a44a487 100644 --- a/validator/src/bootstrap.rs +++ b/validator/src/bootstrap.rs @@ -846,10 +846,10 @@ where if is_any_same_slot_and_different_hash(full_snapshot_hash, known_snapshot_hashes.keys()) { warn!( - "Ignoring all snapshot hashes from node {} since we've seen a different full snapshot hash with this slot.\nfull snapshot hash: {:?}", - node, - full_snapshot_hash, - ); + "Ignoring all snapshot hashes from node {} since we've seen a different full snapshot hash with this slot.\nfull snapshot hash: {:?}", + node, + full_snapshot_hash, + ); debug!( "known full snapshot hashes: {:#?}", known_snapshot_hashes.keys(), @@ -1204,19 +1204,28 @@ fn download_snapshot( && known_validators.len() == 1 && bootstrap_config.only_known_rpc { - warn!("The snapshot download is too slow, throughput: {} < min speed {} bytes/sec, but will NOT abort \ - and try a different node as it is the only known validator and the --only-known-rpc flag \ - is set. \ - Abort count: {}, Progress detail: {:?}", - download_progress.last_throughput, minimal_snapshot_download_speed, - download_abort_count, download_progress); + warn!( + "The snapshot download is too slow, throughput: {} < min speed {} \ + bytes/sec, but will NOT abort and try a different node as it is the \ + only known validator and the --only-known-rpc flag is set. \ + Abort count: {}, Progress detail: {:?}", + download_progress.last_throughput, + minimal_snapshot_download_speed, + download_abort_count, + download_progress, + ); return true; // Do not abort download from the one-and-only known validator } } - warn!("The snapshot download is too slow, throughput: {} < min speed {} bytes/sec, will abort \ - and try a different node. Abort count: {}, Progress detail: {:?}", - download_progress.last_throughput, minimal_snapshot_download_speed, - download_abort_count, download_progress); + warn!( + "The snapshot download is too slow, throughput: {} < min speed {} \ + bytes/sec, will abort and try a different node. \ + Abort count: {}, Progress detail: {:?}", + download_progress.last_throughput, + minimal_snapshot_download_speed, + download_abort_count, + download_progress, + ); *download_abort_count += 1; false } else { From 624f5cfcd598a067042be251e45ac74286a5f035 Mon Sep 17 00:00:00 2001 From: steviez Date: Fri, 7 Oct 2022 18:05:13 -0400 Subject: [PATCH 37/65] Add rocksdb multi_get_bytes() method (#28244) --- ledger/src/blockstore_db.rs | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index 9877b90ef87b48..6bc426f169d744 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -1223,6 +1223,40 @@ where result } + pub fn multi_get_bytes(&self, keys: Vec) -> Vec>>> { + let rocks_keys: Vec<_> = keys.into_iter().map(|key| C::key(key)).collect(); + { + let ref_rocks_keys: Vec<_> = rocks_keys.iter().map(|k| &k[..]).collect(); + let is_perf_enabled = maybe_enable_rocksdb_perf( + self.column_options.rocks_perf_sample_interval, + &self.read_perf_status, + ); + let result = self + .backend + .multi_get_cf(self.handle(), ref_rocks_keys) + .into_iter() + .map(|r| match r { + Ok(opt) => match opt { + Some(pinnable_slice) => Ok(Some(pinnable_slice.as_ref().to_vec())), + None => Ok(None), + }, + Err(e) => Err(e), + }) + .collect::>>>(); + if let Some(op_start_instant) = is_perf_enabled { + // use multi-get instead + report_rocksdb_read_perf( + C::NAME, + PERF_METRIC_OP_NAME_MULTI_GET, + &op_start_instant.elapsed(), + &self.column_options, + ); + } + + result + } + } + pub fn iter( &self, iterator_mode: IteratorMode, From 60f6e24b76d53dc56f6defb1889effe21d7822d7 Mon Sep 17 00:00:00 2001 From: steviez Date: Sun, 9 Oct 2022 15:34:03 -0400 Subject: [PATCH 38/65] Make Blockstore::get_entries_in_data_block() use multi_get() (#28245) --- ledger/src/blockstore.rs | 84 ++++++++++++++++++++-------------------- 1 file changed, 41 insertions(+), 43 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 2884074844fc1d..f3f3f520f0ac6a 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -2889,53 +2889,51 @@ impl Blockstore { end_index: u32, slot_meta: Option<&SlotMeta>, ) -> Result> { - let data_shred_cf = self.db.column::(); + let keys: Vec<(Slot, u64)> = (start_index..=end_index) + .map(|index| (slot, u64::from(index))) + .collect(); - // Short circuit on first error - let data_shreds: Result> = (start_index..=end_index) - .map(|i| { - data_shred_cf - .get_bytes((slot, u64::from(i))) - .and_then(|serialized_shred| { - if serialized_shred.is_none() { - if let Some(slot_meta) = slot_meta { - if slot > self.lowest_cleanup_slot() { - panic!( - "Shred with - slot: {}, - index: {}, - consumed: {}, - completed_indexes: {:?} - must exist if shred index was included in a range: {} {}", - slot, - i, - slot_meta.consumed, - slot_meta.completed_data_indexes, - start_index, - end_index - ); - } + let data_shreds: Result>>> = self + .data_shred_cf + .multi_get_bytes(keys) + .into_iter() + .collect(); + let data_shreds = data_shreds?; + + let data_shreds: Result> = + data_shreds + .into_iter() + .enumerate() + .map(|(idx, shred_bytes)| { + if shred_bytes.is_none() { + if let Some(slot_meta) = slot_meta { + if slot > self.lowest_cleanup_slot() { + panic!( + "Shred with slot: {}, index: {}, consumed: {}, completed_indexes: {:?} \ + must exist if shred index was included in a range: {} {}", + slot, + idx, + slot_meta.consumed, + slot_meta.completed_data_indexes, + start_index, + end_index + ); } - return Err(BlockstoreError::InvalidShredData(Box::new( - bincode::ErrorKind::Custom(format!( - "Missing shred for slot {}, index {}", - slot, i - )), - ))); } - - Shred::new_from_serialized_shred(serialized_shred.unwrap()).map_err(|err| { - BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom( - format!( - "Could not reconstruct shred from shred payload: {:?}", - err - ), - ))) - }) + return Err(BlockstoreError::InvalidShredData(Box::new( + bincode::ErrorKind::Custom(format!( + "Missing shred for slot {}, index {}", + slot, idx + )), + ))); + } + Shred::new_from_serialized_shred(shred_bytes.unwrap()).map_err(|err| { + BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom( + format!("Could not reconstruct shred from shred payload: {:?}", err), + ))) }) - }) - .collect(); - + }) + .collect(); let data_shreds = data_shreds?; let last_shred = data_shreds.last().unwrap(); assert!(last_shred.data_complete() || last_shred.last_in_slot()); From 1224c545d576492b915a8b256547675899d3b7ec Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Sun, 9 Oct 2022 16:42:59 -0700 Subject: [PATCH 39/65] Make FIFO related validator arguments public (#28282) #### Summary of Changes This PR makes two FIFO-related validator arguments public: --rocksdb_shred_compaction and --rocksdb_fifo_shred_storage_size. #### Test Plan * There're already ~26 validators running FIFO in mainnet-beta for more than 30 days, no issues were reported so far. * Ran a validator with FIFO for days and observed it's able to consistently catch up and create new roots. --- validator/src/main.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/validator/src/main.rs b/validator/src/main.rs index 60febdfb92608c..6ebaf43a8998d3 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1011,7 +1011,6 @@ pub fn main() { ) .arg( Arg::with_name("rocksdb_shred_compaction") - .hidden(true) .long("rocksdb-shred-compaction") .value_name("ROCKSDB_COMPACTION_STYLE") .takes_value(true) @@ -1026,7 +1025,6 @@ pub fn main() { ) .arg( Arg::with_name("rocksdb_fifo_shred_storage_size") - .hidden(true) .long("rocksdb-fifo-shred-storage-size") .value_name("SHRED_STORAGE_SIZE_BYTES") .takes_value(true) From 4cbf59a5ddd31e4cbcd545e128b9e459cf56b036 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Mon, 10 Oct 2022 13:03:44 +0800 Subject: [PATCH 40/65] docs: fix exchanges integration (#28315) * fix description * get blocks use devnet data --- docs/src/integrations/exchange.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/src/integrations/exchange.md b/docs/src/integrations/exchange.md index faa5902315e243..7d4e741022da3f 100644 --- a/docs/src/integrations/exchange.md +++ b/docs/src/integrations/exchange.md @@ -222,15 +222,15 @@ Solana API node. passing the last block you have already processed as the start-slot parameter: ```bash -curl localhost:8899 -X POST -H "Content-Type: application/json" -d '{ +curl https://api.devnet.solana.com -X POST -H "Content-Type: application/json" -d '{ "jsonrpc": "2.0", "id": 1, "method": "getBlocks", - "params": [5] + "params": [160017005, 160017015] }' # Result -{"jsonrpc":"2.0","result":[5,6,8,9,11],"id":1} +{"jsonrpc":"2.0","result":[160017005,160017006,160017007,160017012,160017013,160017014,160017015],"id":1} ``` Not every slot produces a block, so there may be gaps in the sequence of integers. @@ -337,8 +337,8 @@ changes in every account without having to parse the entire transaction. They list the starting and ending balances of each account in [lamports](../terminology.md#lamport), indexed to the `accountKeys` list. For example, if the deposit address of interest is -`3M2b3tLji7rvscqrLAHMukYxDK2nB96Q9hwfV6QkdzBN`, this transaction represents a -transfer of 2769675090 - 1800195048 = 969,480,042 lamports = 0.969485042 SOL +`G1wZ113tiUHdSpQEBcid8n1x8BAvcWZoZgxPKxgE5B7o`, this transaction represents a +transfer of 1040000000 - 1030000000 = 10,000,000 lamports = 0.01 SOL If you need more information about the transaction type or other specifics, you can request the block from RPC in binary format, and parse it using either our From 2fc8e533a2585ce96cb6b3a1a85917ac09a81865 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Mon, 10 Oct 2022 15:01:41 +0200 Subject: [PATCH 41/65] Refactor - Move `executor_cache` to program-runtime crate (#28322) * Moves CachedExecutors, related structs, consts and tests into the program-runtime crate. * Moves TransactionExecutor, related enum and type defs into executor_cache mod. --- Cargo.lock | 1 + program-runtime/Cargo.toml | 1 + program-runtime/src/executor_cache.rs | 637 ++++++++++++++++++++++++++ program-runtime/src/invoke_context.rs | 74 +-- program-runtime/src/lib.rs | 1 + programs/bpf/Cargo.lock | 1 + programs/bpf_loader/src/lib.rs | 3 +- runtime/src/accounts.rs | 2 +- runtime/src/bank.rs | 534 +-------------------- runtime/src/message_processor.rs | 3 +- 10 files changed, 652 insertions(+), 605 deletions(-) create mode 100644 program-runtime/src/executor_cache.rs diff --git a/Cargo.lock b/Cargo.lock index 5b17a1cabef298..43d921659fc3ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5852,6 +5852,7 @@ dependencies = [ "log", "num-derive", "num-traits", + "rand 0.7.3", "rustc_version 0.4.0", "serde", "solana-frozen-abi 1.15.0", diff --git a/program-runtime/Cargo.toml b/program-runtime/Cargo.toml index ae060e5fa24448..aae3ff0cde9431 100644 --- a/program-runtime/Cargo.toml +++ b/program-runtime/Cargo.toml @@ -20,6 +20,7 @@ libloading = "0.7.0" log = "0.4.14" num-derive = { version = "0.3" } num-traits = { version = "0.2" } +rand = "0.7.0" serde = { version = "1.0.129", features = ["derive", "rc"] } solana-frozen-abi = { path = "../frozen-abi", version = "=1.15.0" } solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.15.0" } diff --git a/program-runtime/src/executor_cache.rs b/program-runtime/src/executor_cache.rs new file mode 100644 index 00000000000000..ef3f23a8f9fb4b --- /dev/null +++ b/program-runtime/src/executor_cache.rs @@ -0,0 +1,637 @@ +use { + crate::invoke_context::InvokeContext, + log::*, + rand::Rng, + solana_sdk::{ + instruction::InstructionError, pubkey::Pubkey, saturating_add_assign, slot_history::Slot, + stake_history::Epoch, transaction_context::IndexOfAccount, + }, + std::{ + collections::HashMap, + fmt::Debug, + ops::Div, + sync::{ + atomic::{AtomicU64, Ordering::Relaxed}, + Arc, + }, + }, +}; + +/// Program executor +pub trait Executor: Debug + Send + Sync { + /// Execute the program + fn execute( + &self, + first_instruction_account: IndexOfAccount, + invoke_context: &mut InvokeContext, + ) -> Result<(), InstructionError>; +} + +pub type Executors = HashMap; + +#[repr(u8)] +#[derive(PartialEq, Debug)] +enum TransactionExecutorStatus { + /// Executor was already in the cache, no update needed + Cached, + /// Executor was missing from the cache, but not updated + Missing, + /// Executor is for an updated program + Updated, +} + +/// Tracks whether a given executor is "dirty" and needs to updated in the +/// executors cache +#[derive(Debug)] +pub struct TransactionExecutor { + pub(crate) executor: Arc, + status: TransactionExecutorStatus, +} + +impl TransactionExecutor { + /// Wraps an executor and tracks that it doesn't need to be updated in the + /// executors cache. + pub fn new_cached(executor: Arc) -> Self { + Self { + executor, + status: TransactionExecutorStatus::Cached, + } + } + + /// Wraps an executor and tracks that it needs to be updated in the + /// executors cache. + pub fn new_miss(executor: Arc) -> Self { + Self { + executor, + status: TransactionExecutorStatus::Missing, + } + } + + /// Wraps an executor and tracks that it needs to be updated in the + /// executors cache only if the transaction succeeded. + pub fn new_updated(executor: Arc) -> Self { + Self { + executor, + status: TransactionExecutorStatus::Updated, + } + } + + pub fn is_missing(&self) -> bool { + self.status == TransactionExecutorStatus::Missing + } + + pub fn is_updated(&self) -> bool { + self.status == TransactionExecutorStatus::Updated + } + + pub fn get(&self) -> Arc { + self.executor.clone() + } +} + +/// Capacity of `CachedExecutors` +pub const MAX_CACHED_EXECUTORS: usize = 256; + +/// An `Executor` and its statistics tracked in `CachedExecutors` +#[derive(Debug)] +pub struct CachedExecutorsEntry { + prev_epoch_count: u64, + epoch_count: AtomicU64, + executor: Arc, + pub hit_count: AtomicU64, +} + +impl Clone for CachedExecutorsEntry { + fn clone(&self) -> Self { + Self { + prev_epoch_count: self.prev_epoch_count, + epoch_count: AtomicU64::new(self.epoch_count.load(Relaxed)), + executor: self.executor.clone(), + hit_count: AtomicU64::new(self.hit_count.load(Relaxed)), + } + } +} + +/// LFU Cache of executors with single-epoch memory of usage counts +#[derive(Debug)] +pub struct CachedExecutors { + capacity: usize, + current_epoch: Epoch, + pub executors: HashMap, + pub stats: Stats, +} + +impl Default for CachedExecutors { + fn default() -> Self { + Self { + capacity: MAX_CACHED_EXECUTORS, + current_epoch: Epoch::default(), + executors: HashMap::default(), + stats: Stats::default(), + } + } +} + +#[cfg(RUSTC_WITH_SPECIALIZATION)] +impl solana_frozen_abi::abi_example::AbiExample for CachedExecutors { + fn example() -> Self { + // Delegate AbiExample impl to Default before going deep and stuck with + // not easily impl-able Arc due to rust's coherence issue + // This is safe because CachedExecutors isn't serializable by definition. + Self::default() + } +} + +impl CachedExecutors { + pub fn new(max_capacity: usize, current_epoch: Epoch) -> Self { + Self { + capacity: max_capacity, + current_epoch, + executors: HashMap::new(), + stats: Stats::default(), + } + } + + pub fn new_from_parent_bank_executors( + parent_bank_executors: &CachedExecutors, + current_epoch: Epoch, + ) -> Self { + let executors = if parent_bank_executors.current_epoch == current_epoch { + parent_bank_executors.executors.clone() + } else { + parent_bank_executors + .executors + .iter() + .map(|(&key, entry)| { + let entry = CachedExecutorsEntry { + prev_epoch_count: entry.epoch_count.load(Relaxed), + epoch_count: AtomicU64::default(), + executor: entry.executor.clone(), + hit_count: AtomicU64::new(entry.hit_count.load(Relaxed)), + }; + (key, entry) + }) + .collect() + }; + + Self { + capacity: parent_bank_executors.capacity, + current_epoch, + executors, + stats: Stats::default(), + } + } + + pub fn get(&self, pubkey: &Pubkey) -> Option> { + if let Some(entry) = self.executors.get(pubkey) { + self.stats.hits.fetch_add(1, Relaxed); + entry.epoch_count.fetch_add(1, Relaxed); + entry.hit_count.fetch_add(1, Relaxed); + Some(entry.executor.clone()) + } else { + self.stats.misses.fetch_add(1, Relaxed); + None + } + } + + pub fn put(&mut self, executors: &[(&Pubkey, Arc)]) { + let mut new_executors: Vec<_> = executors + .iter() + .filter_map(|(key, executor)| { + if let Some(mut entry) = self.remove(key) { + self.stats.replacements.fetch_add(1, Relaxed); + entry.executor = executor.clone(); + let _ = self.executors.insert(**key, entry); + None + } else { + self.stats.insertions.fetch_add(1, Relaxed); + Some((*key, executor)) + } + }) + .collect(); + + if !new_executors.is_empty() { + let mut counts = self + .executors + .iter() + .map(|(key, entry)| { + let count = entry + .prev_epoch_count + .saturating_add(entry.epoch_count.load(Relaxed)); + (key, count) + }) + .collect::>(); + counts.sort_unstable_by_key(|(_, count)| *count); + + let primer_counts = Self::get_primer_counts(counts.as_slice(), new_executors.len()); + + if self.executors.len() >= self.capacity { + let mut least_keys = counts + .iter() + .take(new_executors.len()) + .map(|least| *least.0) + .collect::>(); + for least_key in least_keys.drain(..) { + let _ = self.remove(&least_key); + self.stats + .evictions + .entry(least_key) + .and_modify(|c| saturating_add_assign!(*c, 1)) + .or_insert(1); + } + } + + for ((key, executor), primer_count) in new_executors.drain(..).zip(primer_counts) { + let entry = CachedExecutorsEntry { + prev_epoch_count: 0, + epoch_count: AtomicU64::new(primer_count), + executor: executor.clone(), + hit_count: AtomicU64::new(1), + }; + let _ = self.executors.insert(*key, entry); + } + } + } + + pub fn remove(&mut self, pubkey: &Pubkey) -> Option { + let maybe_entry = self.executors.remove(pubkey); + if let Some(entry) = maybe_entry.as_ref() { + if entry.hit_count.load(Relaxed) == 1 { + self.stats.one_hit_wonders.fetch_add(1, Relaxed); + } + } + maybe_entry + } + + pub fn clear(&mut self) { + *self = CachedExecutors::default(); + } + + pub fn get_primer_count_upper_bound_inclusive(counts: &[(&Pubkey, u64)]) -> u64 { + const PRIMER_COUNT_TARGET_PERCENTILE: u64 = 85; + #[allow(clippy::assertions_on_constants)] + { + assert!(PRIMER_COUNT_TARGET_PERCENTILE <= 100); + } + // Executor use-frequencies are assumed to fit a Pareto distribution. Choose an + // upper-bound for our primer count as the actual count at the target rank to avoid + // an upward bias + + let target_index = u64::try_from(counts.len().saturating_sub(1)) + .ok() + .and_then(|counts| { + let index = counts + .saturating_mul(PRIMER_COUNT_TARGET_PERCENTILE) + .div(100); // switch to u64::saturating_div once stable + usize::try_from(index).ok() + }) + .unwrap_or(0); + + counts + .get(target_index) + .map(|(_, count)| *count) + .unwrap_or(0) + } + + pub fn get_primer_counts(counts: &[(&Pubkey, u64)], num_counts: usize) -> Vec { + let max_primer_count = Self::get_primer_count_upper_bound_inclusive(counts); + let mut rng = rand::thread_rng(); + + (0..num_counts) + .map(|_| rng.gen_range(0, max_primer_count.saturating_add(1))) + .collect::>() + } +} + +/// Statistics of the entrie `CachedExecutors` +#[derive(Debug, Default)] +pub struct Stats { + pub hits: AtomicU64, + pub misses: AtomicU64, + pub evictions: HashMap, + pub insertions: AtomicU64, + pub replacements: AtomicU64, + pub one_hit_wonders: AtomicU64, +} + +impl Stats { + /// Logs the measurement values + pub fn submit(&self, slot: Slot) { + let hits = self.hits.load(Relaxed); + let misses = self.misses.load(Relaxed); + let insertions = self.insertions.load(Relaxed); + let replacements = self.replacements.load(Relaxed); + let one_hit_wonders = self.one_hit_wonders.load(Relaxed); + let evictions: u64 = self.evictions.values().sum(); + datapoint_info!( + "bank-executor-cache-stats", + ("slot", slot, i64), + ("hits", hits, i64), + ("misses", misses, i64), + ("evictions", evictions, i64), + ("insertions", insertions, i64), + ("replacements", replacements, i64), + ("one_hit_wonders", one_hit_wonders, i64), + ); + debug!( + "Executor Cache Stats -- Hits: {}, Misses: {}, Evictions: {}, Insertions: {}, Replacements: {}, One-Hit-Wonders: {}", + hits, misses, evictions, insertions, replacements, one_hit_wonders, + ); + if log_enabled!(log::Level::Trace) && !self.evictions.is_empty() { + let mut evictions = self.evictions.iter().collect::>(); + evictions.sort_by_key(|e| e.1); + let evictions = evictions + .into_iter() + .rev() + .map(|(program_id, evictions)| { + format!(" {:<44} {}", program_id.to_string(), evictions) + }) + .collect::>(); + let evictions = evictions.join("\n"); + trace!( + "Eviction Details:\n {:<44} {}\n{}", + "Program", + "Count", + evictions + ); + } + } +} + +#[allow(clippy::indexing_slicing)] +#[cfg(test)] +mod tests { + use { + super::*, crate::invoke_context::InvokeContext, solana_sdk::instruction::InstructionError, + }; + + #[derive(Debug)] + struct TestExecutor {} + impl Executor for TestExecutor { + fn execute( + &self, + _first_instruction_account: IndexOfAccount, + _invoke_context: &mut InvokeContext, + ) -> std::result::Result<(), InstructionError> { + Ok(()) + } + } + + #[test] + fn test_cached_executors() { + let key1 = solana_sdk::pubkey::new_rand(); + let key2 = solana_sdk::pubkey::new_rand(); + let key3 = solana_sdk::pubkey::new_rand(); + let key4 = solana_sdk::pubkey::new_rand(); + let executor: Arc = Arc::new(TestExecutor {}); + let mut cache = CachedExecutors::new(3, 0); + + cache.put(&[(&key1, executor.clone())]); + cache.put(&[(&key2, executor.clone())]); + cache.put(&[(&key3, executor.clone())]); + assert!(cache.get(&key1).is_some()); + assert!(cache.get(&key2).is_some()); + assert!(cache.get(&key3).is_some()); + + assert!(cache.get(&key1).is_some()); + assert!(cache.get(&key1).is_some()); + assert!(cache.get(&key2).is_some()); + cache.put(&[(&key4, executor.clone())]); + assert!(cache.get(&key4).is_some()); + let num_retained = [&key1, &key2, &key3] + .iter() + .filter_map(|key| cache.get(key)) + .count(); + assert_eq!(num_retained, 2); + + assert!(cache.get(&key4).is_some()); + assert!(cache.get(&key4).is_some()); + assert!(cache.get(&key4).is_some()); + cache.put(&[(&key3, executor.clone())]); + assert!(cache.get(&key3).is_some()); + let num_retained = [&key1, &key2, &key4] + .iter() + .filter_map(|key| cache.get(key)) + .count(); + assert_eq!(num_retained, 2); + } + + #[test] + fn test_cached_executor_eviction() { + let key1 = solana_sdk::pubkey::new_rand(); + let key2 = solana_sdk::pubkey::new_rand(); + let key3 = solana_sdk::pubkey::new_rand(); + let key4 = solana_sdk::pubkey::new_rand(); + let executor: Arc = Arc::new(TestExecutor {}); + let mut cache = CachedExecutors::new(3, 0); + assert!(cache.current_epoch == 0); + + cache.put(&[(&key1, executor.clone())]); + cache.put(&[(&key2, executor.clone())]); + cache.put(&[(&key3, executor.clone())]); + assert!(cache.get(&key1).is_some()); + assert!(cache.get(&key1).is_some()); + assert!(cache.get(&key1).is_some()); + + let mut cache = CachedExecutors::new_from_parent_bank_executors(&cache, 1); + assert!(cache.current_epoch == 1); + + assert!(cache.get(&key2).is_some()); + assert!(cache.get(&key2).is_some()); + assert!(cache.get(&key3).is_some()); + cache.put(&[(&key4, executor.clone())]); + + assert!(cache.get(&key4).is_some()); + let num_retained = [&key1, &key2, &key3] + .iter() + .filter_map(|key| cache.get(key)) + .count(); + assert_eq!(num_retained, 2); + + cache.put(&[(&key1, executor.clone())]); + cache.put(&[(&key3, executor.clone())]); + assert!(cache.get(&key1).is_some()); + assert!(cache.get(&key3).is_some()); + let num_retained = [&key2, &key4] + .iter() + .filter_map(|key| cache.get(key)) + .count(); + assert_eq!(num_retained, 1); + + cache = CachedExecutors::new_from_parent_bank_executors(&cache, 2); + assert!(cache.current_epoch == 2); + + cache.put(&[(&key3, executor.clone())]); + assert!(cache.get(&key3).is_some()); + } + + #[test] + fn test_cached_executors_evicts_smallest() { + let key1 = solana_sdk::pubkey::new_rand(); + let key2 = solana_sdk::pubkey::new_rand(); + let key3 = solana_sdk::pubkey::new_rand(); + let executor: Arc = Arc::new(TestExecutor {}); + let mut cache = CachedExecutors::new(2, 0); + + cache.put(&[(&key1, executor.clone())]); + for _ in 0..5 { + let _ = cache.get(&key1); + } + cache.put(&[(&key2, executor.clone())]); + // make key1's use-count for sure greater than key2's + let _ = cache.get(&key1); + + let mut entries = cache + .executors + .iter() + .map(|(k, v)| (*k, v.epoch_count.load(Relaxed))) + .collect::>(); + entries.sort_by_key(|(_, v)| *v); + assert!(entries[0].1 < entries[1].1); + + cache.put(&[(&key3, executor.clone())]); + assert!(cache.get(&entries[0].0).is_none()); + assert!(cache.get(&entries[1].0).is_some()); + } + + #[test] + fn test_cached_executors_one_hit_wonder_counter() { + let mut cache = CachedExecutors::new(1, 0); + + let one_hit_wonder = Pubkey::new_unique(); + let popular = Pubkey::new_unique(); + let executor: Arc = Arc::new(TestExecutor {}); + + // make sure we're starting from where we think we are + assert_eq!(cache.stats.one_hit_wonders.load(Relaxed), 0); + + // add our one-hit-wonder + cache.put(&[(&one_hit_wonder, executor.clone())]); + assert_eq!(cache.executors[&one_hit_wonder].hit_count.load(Relaxed), 1); + // displace the one-hit-wonder with "popular program" + cache.put(&[(&popular, executor.clone())]); + assert_eq!(cache.executors[&popular].hit_count.load(Relaxed), 1); + + // one-hit-wonder counter incremented + assert_eq!(cache.stats.one_hit_wonders.load(Relaxed), 1); + + // make "popular program" popular + cache.get(&popular).unwrap(); + assert_eq!(cache.executors[&popular].hit_count.load(Relaxed), 2); + + // evict "popular program" + cache.put(&[(&one_hit_wonder, executor.clone())]); + assert_eq!(cache.executors[&one_hit_wonder].hit_count.load(Relaxed), 1); + + // one-hit-wonder counter not incremented + assert_eq!(cache.stats.one_hit_wonders.load(Relaxed), 1); + } + + #[test] + fn test_executor_cache_get_primer_count_upper_bound_inclusive() { + let pubkey = Pubkey::default(); + let v = []; + assert_eq!( + CachedExecutors::get_primer_count_upper_bound_inclusive(&v), + 0 + ); + let v = [(&pubkey, 1)]; + assert_eq!( + CachedExecutors::get_primer_count_upper_bound_inclusive(&v), + 1 + ); + let v = (0u64..10).map(|i| (&pubkey, i)).collect::>(); + assert_eq!( + CachedExecutors::get_primer_count_upper_bound_inclusive(v.as_slice()), + 7 + ); + } + + #[test] + fn test_cached_executors_stats() { + #[derive(Debug, Default, PartialEq)] + struct ComparableStats { + hits: u64, + misses: u64, + evictions: HashMap, + insertions: u64, + replacements: u64, + one_hit_wonders: u64, + } + impl From<&Stats> for ComparableStats { + fn from(stats: &Stats) -> Self { + let Stats { + hits, + misses, + evictions, + insertions, + replacements, + one_hit_wonders, + } = stats; + ComparableStats { + hits: hits.load(Relaxed), + misses: misses.load(Relaxed), + evictions: evictions.clone(), + insertions: insertions.load(Relaxed), + replacements: replacements.load(Relaxed), + one_hit_wonders: one_hit_wonders.load(Relaxed), + } + } + } + + const CURRENT_EPOCH: Epoch = 0; + let mut cache = CachedExecutors::new(2, CURRENT_EPOCH); + let mut expected_stats = ComparableStats::default(); + + let program_id1 = Pubkey::new_unique(); + let program_id2 = Pubkey::new_unique(); + let executor: Arc = Arc::new(TestExecutor {}); + + // make sure we're starting from where we think we are + assert_eq!(ComparableStats::from(&cache.stats), expected_stats,); + + // insert some executors + cache.put(&[(&program_id1, executor.clone())]); + cache.put(&[(&program_id2, executor.clone())]); + expected_stats.insertions += 2; + assert_eq!(ComparableStats::from(&cache.stats), expected_stats); + + // replace a one-hit-wonder executor + cache.put(&[(&program_id1, executor.clone())]); + expected_stats.replacements += 1; + expected_stats.one_hit_wonders += 1; + assert_eq!(ComparableStats::from(&cache.stats), expected_stats); + + // hit some executors + cache.get(&program_id1); + cache.get(&program_id1); + cache.get(&program_id2); + expected_stats.hits += 3; + assert_eq!(ComparableStats::from(&cache.stats), expected_stats); + + // miss an executor + cache.get(&Pubkey::new_unique()); + expected_stats.misses += 1; + assert_eq!(ComparableStats::from(&cache.stats), expected_stats); + + // evict an executor + cache.put(&[(&Pubkey::new_unique(), executor.clone())]); + expected_stats.insertions += 1; + expected_stats.evictions.insert(program_id2, 1); + assert_eq!(ComparableStats::from(&cache.stats), expected_stats); + + // make sure stats are cleared in new_from_parent + assert_eq!( + ComparableStats::from( + &CachedExecutors::new_from_parent_bank_executors(&cache, CURRENT_EPOCH).stats + ), + ComparableStats::default() + ); + assert_eq!( + ComparableStats::from( + &CachedExecutors::new_from_parent_bank_executors(&cache, CURRENT_EPOCH + 1).stats + ), + ComparableStats::default() + ); + } +} diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 5b080f30a1afae..5fabd643bf3ec1 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -2,6 +2,7 @@ use { crate::{ accounts_data_meter::AccountsDataMeter, compute_budget::ComputeBudget, + executor_cache::{Executor, Executors, TransactionExecutor}, ic_logger_msg, ic_msg, log_collector::LogCollector, pre_account::PreAccount, @@ -28,7 +29,6 @@ use { alloc::Layout, borrow::Cow, cell::RefCell, - collections::HashMap, fmt::{self, Debug}, rc::Rc, sync::Arc, @@ -58,78 +58,6 @@ impl std::fmt::Debug for BuiltinProgram { } } -/// Program executor -pub trait Executor: Debug + Send + Sync { - /// Execute the program - fn execute( - &self, - first_instruction_account: IndexOfAccount, - invoke_context: &mut InvokeContext, - ) -> Result<(), InstructionError>; -} - -pub type Executors = HashMap; - -#[repr(u8)] -#[derive(PartialEq, Debug)] -enum TransactionExecutorStatus { - /// Executor was already in the cache, no update needed - Cached, - /// Executor was missing from the cache, but not updated - Missing, - /// Executor is for an updated program - Updated, -} - -/// Tracks whether a given executor is "dirty" and needs to updated in the -/// executors cache -#[derive(Debug)] -pub struct TransactionExecutor { - executor: Arc, - status: TransactionExecutorStatus, -} - -impl TransactionExecutor { - /// Wraps an executor and tracks that it doesn't need to be updated in the - /// executors cache. - pub fn new_cached(executor: Arc) -> Self { - Self { - executor, - status: TransactionExecutorStatus::Cached, - } - } - - /// Wraps an executor and tracks that it needs to be updated in the - /// executors cache. - pub fn new_miss(executor: Arc) -> Self { - Self { - executor, - status: TransactionExecutorStatus::Missing, - } - } - - /// Wraps an executor and tracks that it needs to be updated in the - /// executors cache only if the transaction succeeded. - pub fn new_updated(executor: Arc) -> Self { - Self { - executor, - status: TransactionExecutorStatus::Updated, - } - } - - pub fn is_missing(&self) -> bool { - self.status == TransactionExecutorStatus::Missing - } - - pub fn is_updated(&self) -> bool { - self.status == TransactionExecutorStatus::Updated - } - - pub fn get(&self) -> Arc { - self.executor.clone() - } -} - /// Compute meter pub struct ComputeMeter { remaining: u64, diff --git a/program-runtime/src/lib.rs b/program-runtime/src/lib.rs index 2a9be8d8c4e3bd..88bfe95834a30e 100644 --- a/program-runtime/src/lib.rs +++ b/program-runtime/src/lib.rs @@ -11,6 +11,7 @@ extern crate solana_metrics; pub mod accounts_data_meter; pub mod compute_budget; +pub mod executor_cache; pub mod invoke_context; pub mod log_collector; pub mod pre_account; diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 576bfb8689665e..0bad7e57f405c7 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -5245,6 +5245,7 @@ dependencies = [ "log", "num-derive", "num-traits", + "rand 0.7.3", "rustc_version", "serde", "solana-frozen-abi 1.15.0", diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index b97263823a9836..b828b581436beb 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -21,8 +21,9 @@ use { log::{log_enabled, trace, Level::Trace}, solana_measure::measure::Measure, solana_program_runtime::{ + executor_cache::Executor, ic_logger_msg, ic_msg, - invoke_context::{ComputeMeter, Executor, InvokeContext}, + invoke_context::{ComputeMeter, InvokeContext}, log_collector::LogCollector, stable_log, sysvar_cache::get_sysvar_with_account_check, diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 376631d85e6b56..91aebe47ff0711 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -1427,7 +1427,7 @@ mod tests { }, assert_matches::assert_matches, solana_address_lookup_table_program::state::LookupTableMeta, - solana_program_runtime::invoke_context::Executors, + solana_program_runtime::executor_cache::Executors, solana_sdk::{ account::{AccountSharedData, WritableAccount}, epoch_schedule::EpochSchedule, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index e729f7dadb4ecc..760639aa59a1ad 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -78,7 +78,6 @@ use { dashmap::{DashMap, DashSet}, itertools::Itertools, log::*, - rand::Rng, rayon::{ iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}, ThreadPool, ThreadPoolBuilder, @@ -88,9 +87,8 @@ use { solana_program_runtime::{ accounts_data_meter::MAX_ACCOUNTS_DATA_LEN, compute_budget::{self, ComputeBudget}, - invoke_context::{ - BuiltinProgram, Executor, Executors, ProcessInstructionWithContext, TransactionExecutor, - }, + executor_cache::{CachedExecutors, Executors, TransactionExecutor, MAX_CACHED_EXECUTORS}, + invoke_context::{BuiltinProgram, ProcessInstructionWithContext}, log_collector::LogCollector, sysvar_cache::SysvarCache, timings::{ExecuteTimingType, ExecuteTimings}, @@ -157,7 +155,7 @@ use { collections::{HashMap, HashSet}, convert::{TryFrom, TryInto}, fmt, mem, - ops::{Deref, Div, RangeInclusive}, + ops::{Deref, RangeInclusive}, path::PathBuf, rc::Rc, sync::{ @@ -313,272 +311,6 @@ pub struct SquashTiming { type EpochCount = u64; -mod executor_cache { - use {super::*, log}; - - #[derive(Debug, Default)] - pub struct Stats { - pub hits: AtomicU64, - pub misses: AtomicU64, - pub evictions: HashMap, - pub insertions: AtomicU64, - pub replacements: AtomicU64, - pub one_hit_wonders: AtomicU64, - } - - impl Stats { - pub fn submit(&self, slot: Slot) { - let hits = self.hits.load(Relaxed); - let misses = self.misses.load(Relaxed); - let insertions = self.insertions.load(Relaxed); - let replacements = self.replacements.load(Relaxed); - let one_hit_wonders = self.one_hit_wonders.load(Relaxed); - let evictions: u64 = self.evictions.values().sum(); - datapoint_info!( - "bank-executor-cache-stats", - ("slot", slot, i64), - ("hits", hits, i64), - ("misses", misses, i64), - ("evictions", evictions, i64), - ("insertions", insertions, i64), - ("replacements", replacements, i64), - ("one_hit_wonders", one_hit_wonders, i64), - ); - debug!( - "Executor Cache Stats -- Hits: {}, Misses: {}, Evictions: {}, Insertions: {}, Replacements: {}, One-Hit-Wonders: {}", - hits, misses, evictions, insertions, replacements, one_hit_wonders, - ); - if log_enabled!(log::Level::Trace) && !self.evictions.is_empty() { - let mut evictions = self.evictions.iter().collect::>(); - evictions.sort_by_key(|e| e.1); - let evictions = evictions - .into_iter() - .rev() - .map(|(program_id, evictions)| { - format!(" {:<44} {}", program_id.to_string(), evictions) - }) - .collect::>(); - let evictions = evictions.join("\n"); - trace!( - "Eviction Details:\n {:<44} {}\n{}", - "Program", - "Count", - evictions - ); - } - } - } -} - -const MAX_CACHED_EXECUTORS: usize = 256; -#[derive(Debug)] -struct CachedExecutorsEntry { - prev_epoch_count: u64, - epoch_count: AtomicU64, - executor: Arc, - hit_count: AtomicU64, -} - -impl Clone for CachedExecutorsEntry { - fn clone(&self) -> Self { - Self { - prev_epoch_count: self.prev_epoch_count, - epoch_count: AtomicU64::new(self.epoch_count.load(Relaxed)), - executor: self.executor.clone(), - hit_count: AtomicU64::new(self.hit_count.load(Relaxed)), - } - } -} - -/// LFU Cache of executors with single-epoch memory of usage counts -#[derive(Debug)] -struct CachedExecutors { - capacity: usize, - current_epoch: Epoch, - pub(self) executors: HashMap, - stats: executor_cache::Stats, -} - -impl Default for CachedExecutors { - fn default() -> Self { - Self { - capacity: MAX_CACHED_EXECUTORS, - current_epoch: Epoch::default(), - executors: HashMap::default(), - stats: executor_cache::Stats::default(), - } - } -} - -#[cfg(RUSTC_WITH_SPECIALIZATION)] -impl AbiExample for CachedExecutors { - fn example() -> Self { - // Delegate AbiExample impl to Default before going deep and stuck with - // not easily impl-able Arc due to rust's coherence issue - // This is safe because CachedExecutors isn't serializable by definition. - Self::default() - } -} - -impl CachedExecutors { - fn new(max_capacity: usize, current_epoch: Epoch) -> Self { - Self { - capacity: max_capacity, - current_epoch, - executors: HashMap::new(), - stats: executor_cache::Stats::default(), - } - } - - fn new_from_parent_bank_executors( - parent_bank_executors: &CachedExecutors, - current_epoch: Epoch, - ) -> Self { - let executors = if parent_bank_executors.current_epoch == current_epoch { - parent_bank_executors.executors.clone() - } else { - parent_bank_executors - .executors - .iter() - .map(|(&key, entry)| { - let entry = CachedExecutorsEntry { - prev_epoch_count: entry.epoch_count.load(Relaxed), - epoch_count: AtomicU64::default(), - executor: entry.executor.clone(), - hit_count: AtomicU64::new(entry.hit_count.load(Relaxed)), - }; - (key, entry) - }) - .collect() - }; - - Self { - capacity: parent_bank_executors.capacity, - current_epoch, - executors, - stats: executor_cache::Stats::default(), - } - } - - fn get(&self, pubkey: &Pubkey) -> Option> { - if let Some(entry) = self.executors.get(pubkey) { - self.stats.hits.fetch_add(1, Relaxed); - entry.epoch_count.fetch_add(1, Relaxed); - entry.hit_count.fetch_add(1, Relaxed); - Some(entry.executor.clone()) - } else { - self.stats.misses.fetch_add(1, Relaxed); - None - } - } - - fn put(&mut self, executors: &[(&Pubkey, Arc)]) { - let mut new_executors: Vec<_> = executors - .iter() - .filter_map(|(key, executor)| { - if let Some(mut entry) = self.remove(key) { - self.stats.replacements.fetch_add(1, Relaxed); - entry.executor = executor.clone(); - let _ = self.executors.insert(**key, entry); - None - } else { - self.stats.insertions.fetch_add(1, Relaxed); - Some((*key, executor)) - } - }) - .collect(); - - if !new_executors.is_empty() { - let mut counts = self - .executors - .iter() - .map(|(key, entry)| { - let count = entry.prev_epoch_count + entry.epoch_count.load(Relaxed); - (key, count) - }) - .collect::>(); - counts.sort_unstable_by_key(|(_, count)| *count); - - let primer_counts = Self::get_primer_counts(counts.as_slice(), new_executors.len()); - - if self.executors.len() >= self.capacity { - let mut least_keys = counts - .iter() - .take(new_executors.len()) - .map(|least| *least.0) - .collect::>(); - for least_key in least_keys.drain(..) { - let _ = self.remove(&least_key); - self.stats - .evictions - .entry(least_key) - .and_modify(|c| saturating_add_assign!(*c, 1)) - .or_insert(1); - } - } - - for ((key, executor), primer_count) in new_executors.drain(..).zip(primer_counts) { - let entry = CachedExecutorsEntry { - prev_epoch_count: 0, - epoch_count: AtomicU64::new(primer_count), - executor: executor.clone(), - hit_count: AtomicU64::new(1), - }; - let _ = self.executors.insert(*key, entry); - } - } - } - - fn remove(&mut self, pubkey: &Pubkey) -> Option { - let maybe_entry = self.executors.remove(pubkey); - if let Some(entry) = maybe_entry.as_ref() { - if entry.hit_count.load(Relaxed) == 1 { - self.stats.one_hit_wonders.fetch_add(1, Relaxed); - } - } - maybe_entry - } - - fn clear(&mut self) { - *self = CachedExecutors::default(); - } - - fn get_primer_count_upper_bound_inclusive(counts: &[(&Pubkey, u64)]) -> u64 { - const PRIMER_COUNT_TARGET_PERCENTILE: u64 = 85; - #[allow(clippy::assertions_on_constants)] - { - assert!(PRIMER_COUNT_TARGET_PERCENTILE <= 100); - } - // Executor use-frequencies are assumed to fit a Pareto distribution. Choose an - // upper-bound for our primer count as the actual count at the target rank to avoid - // an upward bias - - let target_index = u64::try_from(counts.len().saturating_sub(1)) - .ok() - .and_then(|counts| { - let index = counts - .saturating_mul(PRIMER_COUNT_TARGET_PERCENTILE) - .div(100); // switch to u64::saturating_div once stable - usize::try_from(index).ok() - }) - .unwrap_or(0); - - counts - .get(target_index) - .map(|(_, count)| *count) - .unwrap_or(0) - } - - fn get_primer_counts(counts: &[(&Pubkey, u64)], num_counts: usize) -> Vec { - let max_primer_count = Self::get_primer_count_upper_bound_inclusive(counts); - let mut rng = rand::thread_rng(); - - (0..num_counts) - .map(|_| rng.gen_range(0, max_primer_count.saturating_add(1))) - .collect::>() - } -} - #[derive(Debug)] pub struct BankRc { /// where all the Accounts are stored @@ -8136,8 +7868,10 @@ pub(crate) mod tests { status_cache::MAX_CACHE_ENTRIES, }, crossbeam_channel::{bounded, unbounded}, + rand::Rng, solana_program_runtime::{ compute_budget::MAX_COMPUTE_UNIT_LIMIT, + executor_cache::Executor, invoke_context::InvokeContext, prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, }, @@ -15337,244 +15071,6 @@ pub(crate) mod tests { } } - #[test] - fn test_cached_executors() { - let key1 = solana_sdk::pubkey::new_rand(); - let key2 = solana_sdk::pubkey::new_rand(); - let key3 = solana_sdk::pubkey::new_rand(); - let key4 = solana_sdk::pubkey::new_rand(); - let executor: Arc = Arc::new(TestExecutor {}); - let mut cache = CachedExecutors::new(3, 0); - - cache.put(&[(&key1, executor.clone())]); - cache.put(&[(&key2, executor.clone())]); - cache.put(&[(&key3, executor.clone())]); - assert!(cache.get(&key1).is_some()); - assert!(cache.get(&key2).is_some()); - assert!(cache.get(&key3).is_some()); - - assert!(cache.get(&key1).is_some()); - assert!(cache.get(&key1).is_some()); - assert!(cache.get(&key2).is_some()); - cache.put(&[(&key4, executor.clone())]); - assert!(cache.get(&key4).is_some()); - let num_retained = [&key1, &key2, &key3] - .iter() - .filter_map(|key| cache.get(key)) - .count(); - assert_eq!(num_retained, 2); - - assert!(cache.get(&key4).is_some()); - assert!(cache.get(&key4).is_some()); - assert!(cache.get(&key4).is_some()); - cache.put(&[(&key3, executor.clone())]); - assert!(cache.get(&key3).is_some()); - let num_retained = [&key1, &key2, &key4] - .iter() - .filter_map(|key| cache.get(key)) - .count(); - assert_eq!(num_retained, 2); - } - - #[test] - fn test_cached_executor_eviction() { - let key1 = solana_sdk::pubkey::new_rand(); - let key2 = solana_sdk::pubkey::new_rand(); - let key3 = solana_sdk::pubkey::new_rand(); - let key4 = solana_sdk::pubkey::new_rand(); - let executor: Arc = Arc::new(TestExecutor {}); - let mut cache = CachedExecutors::new(3, 0); - assert!(cache.current_epoch == 0); - - cache.put(&[(&key1, executor.clone())]); - cache.put(&[(&key2, executor.clone())]); - cache.put(&[(&key3, executor.clone())]); - assert!(cache.get(&key1).is_some()); - assert!(cache.get(&key1).is_some()); - assert!(cache.get(&key1).is_some()); - - let mut cache = CachedExecutors::new_from_parent_bank_executors(&cache, 1); - assert!(cache.current_epoch == 1); - - assert!(cache.get(&key2).is_some()); - assert!(cache.get(&key2).is_some()); - assert!(cache.get(&key3).is_some()); - cache.put(&[(&key4, executor.clone())]); - - assert!(cache.get(&key4).is_some()); - let num_retained = [&key1, &key2, &key3] - .iter() - .filter_map(|key| cache.get(key)) - .count(); - assert_eq!(num_retained, 2); - - cache.put(&[(&key1, executor.clone())]); - cache.put(&[(&key3, executor.clone())]); - assert!(cache.get(&key1).is_some()); - assert!(cache.get(&key3).is_some()); - let num_retained = [&key2, &key4] - .iter() - .filter_map(|key| cache.get(key)) - .count(); - assert_eq!(num_retained, 1); - - cache = CachedExecutors::new_from_parent_bank_executors(&cache, 2); - assert!(cache.current_epoch == 2); - - cache.put(&[(&key3, executor.clone())]); - assert!(cache.get(&key3).is_some()); - } - - #[test] - fn test_cached_executors_evicts_smallest() { - let key1 = solana_sdk::pubkey::new_rand(); - let key2 = solana_sdk::pubkey::new_rand(); - let key3 = solana_sdk::pubkey::new_rand(); - let executor: Arc = Arc::new(TestExecutor {}); - let mut cache = CachedExecutors::new(2, 0); - - cache.put(&[(&key1, executor.clone())]); - for _ in 0..5 { - let _ = cache.get(&key1); - } - cache.put(&[(&key2, executor.clone())]); - // make key1's use-count for sure greater than key2's - let _ = cache.get(&key1); - - let mut entries = cache - .executors - .iter() - .map(|(k, v)| (*k, v.epoch_count.load(Relaxed))) - .collect::>(); - entries.sort_by_key(|(_, v)| *v); - assert!(entries[0].1 < entries[1].1); - - cache.put(&[(&key3, executor.clone())]); - assert!(cache.get(&entries[0].0).is_none()); - assert!(cache.get(&entries[1].0).is_some()); - } - - #[test] - fn test_cached_executors_one_hit_wonder_counter() { - let mut cache = CachedExecutors::new(1, 0); - - let one_hit_wonder = Pubkey::new_unique(); - let popular = Pubkey::new_unique(); - let executor: Arc = Arc::new(TestExecutor {}); - - // make sure we're starting from where we think we are - assert_eq!(cache.stats.one_hit_wonders.load(Relaxed), 0); - - // add our one-hit-wonder - cache.put(&[(&one_hit_wonder, executor.clone())]); - assert_eq!(cache.executors[&one_hit_wonder].hit_count.load(Relaxed), 1); - // displace the one-hit-wonder with "popular program" - cache.put(&[(&popular, executor.clone())]); - assert_eq!(cache.executors[&popular].hit_count.load(Relaxed), 1); - - // one-hit-wonder counter incremented - assert_eq!(cache.stats.one_hit_wonders.load(Relaxed), 1); - - // make "popular program" popular - cache.get(&popular).unwrap(); - assert_eq!(cache.executors[&popular].hit_count.load(Relaxed), 2); - - // evict "popular program" - cache.put(&[(&one_hit_wonder, executor.clone())]); - assert_eq!(cache.executors[&one_hit_wonder].hit_count.load(Relaxed), 1); - - // one-hit-wonder counter not incremented - assert_eq!(cache.stats.one_hit_wonders.load(Relaxed), 1); - } - - #[test] - fn test_cached_executors_stats() { - #[derive(Debug, Default, PartialEq)] - struct ComparableStats { - hits: u64, - misses: u64, - evictions: HashMap, - insertions: u64, - replacements: u64, - one_hit_wonders: u64, - } - impl From<&executor_cache::Stats> for ComparableStats { - fn from(stats: &executor_cache::Stats) -> Self { - let executor_cache::Stats { - hits, - misses, - evictions, - insertions, - replacements, - one_hit_wonders, - } = stats; - ComparableStats { - hits: hits.load(Relaxed), - misses: misses.load(Relaxed), - evictions: evictions.clone(), - insertions: insertions.load(Relaxed), - replacements: replacements.load(Relaxed), - one_hit_wonders: one_hit_wonders.load(Relaxed), - } - } - } - - const CURRENT_EPOCH: Epoch = 0; - let mut cache = CachedExecutors::new(2, CURRENT_EPOCH); - let mut expected_stats = ComparableStats::default(); - - let program_id1 = Pubkey::new_unique(); - let program_id2 = Pubkey::new_unique(); - let executor: Arc = Arc::new(TestExecutor {}); - - // make sure we're starting from where we think we are - assert_eq!(ComparableStats::from(&cache.stats), expected_stats,); - - // insert some executors - cache.put(&[(&program_id1, executor.clone())]); - cache.put(&[(&program_id2, executor.clone())]); - expected_stats.insertions += 2; - assert_eq!(ComparableStats::from(&cache.stats), expected_stats); - - // replace a one-hit-wonder executor - cache.put(&[(&program_id1, executor.clone())]); - expected_stats.replacements += 1; - expected_stats.one_hit_wonders += 1; - assert_eq!(ComparableStats::from(&cache.stats), expected_stats); - - // hit some executors - cache.get(&program_id1); - cache.get(&program_id1); - cache.get(&program_id2); - expected_stats.hits += 3; - assert_eq!(ComparableStats::from(&cache.stats), expected_stats); - - // miss an executor - cache.get(&Pubkey::new_unique()); - expected_stats.misses += 1; - assert_eq!(ComparableStats::from(&cache.stats), expected_stats); - - // evict an executor - cache.put(&[(&Pubkey::new_unique(), executor.clone())]); - expected_stats.insertions += 1; - expected_stats.evictions.insert(program_id2, 1); - assert_eq!(ComparableStats::from(&cache.stats), expected_stats); - - // make sure stats are cleared in new_from_parent - assert_eq!( - ComparableStats::from( - &CachedExecutors::new_from_parent_bank_executors(&cache, CURRENT_EPOCH).stats - ), - ComparableStats::default() - ); - assert_eq!( - ComparableStats::from( - &CachedExecutors::new_from_parent_bank_executors(&cache, CURRENT_EPOCH + 1).stats - ), - ComparableStats::default() - ); - } - #[test] fn test_bank_executor_cache() { solana_logger::setup(); @@ -18129,26 +17625,6 @@ pub(crate) mod tests { ); } - #[test] - fn test_executor_cache_get_primer_count_upper_bound_inclusive() { - let pubkey = Pubkey::default(); - let v = []; - assert_eq!( - CachedExecutors::get_primer_count_upper_bound_inclusive(&v), - 0 - ); - let v = [(&pubkey, 1)]; - assert_eq!( - CachedExecutors::get_primer_count_upper_bound_inclusive(&v), - 1 - ); - let v = (0u64..10).map(|i| (&pubkey, i)).collect::>(); - assert_eq!( - CachedExecutors::get_primer_count_upper_bound_inclusive(v.as_slice()), - 7 - ); - } - #[derive(Serialize, Deserialize)] enum MockTransferInstruction { Transfer(u64), diff --git a/runtime/src/message_processor.rs b/runtime/src/message_processor.rs index 68507c96a212f1..ac8639c5c91190 100644 --- a/runtime/src/message_processor.rs +++ b/runtime/src/message_processor.rs @@ -3,7 +3,8 @@ use { solana_measure::measure::Measure, solana_program_runtime::{ compute_budget::ComputeBudget, - invoke_context::{BuiltinProgram, Executors, InvokeContext}, + executor_cache::Executors, + invoke_context::{BuiltinProgram, InvokeContext}, log_collector::LogCollector, sysvar_cache::SysvarCache, timings::{ExecuteDetailsTimings, ExecuteTimings}, From 27cd2c324ed301ffe22888fa90d73e3c2945d880 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Mon, 10 Oct 2022 10:16:13 -0400 Subject: [PATCH 42/65] Adds tests for EAH and snapshot interactions (#28304) --- core/tests/epoch_accounts_hash.rs | 176 ++++++++++++++++++++++++++++-- 1 file changed, 165 insertions(+), 11 deletions(-) diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index a170b891f9d9d2..305c0bf80fad21 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -11,13 +11,18 @@ use { AbsRequestHandlers, AbsRequestSender, AccountsBackgroundService, DroppedSlotsReceiver, PrunedBanksRequestHandler, SnapshotRequestHandler, }, + accounts_db::AccountShrinkThreshold, accounts_hash::CalcAccountsHashConfig, + accounts_index::AccountSecondaryIndexes, bank::Bank, bank_forks::BankForks, epoch_accounts_hash::{self, EpochAccountsHash}, genesis_utils::{self, GenesisConfigInfo}, + runtime_config::RuntimeConfig, + snapshot_archive_info::SnapshotArchiveInfoGetter, snapshot_config::SnapshotConfig, snapshot_package::{PendingAccountsPackage, PendingSnapshotPackage}, + snapshot_utils, }, solana_sdk::{ clock::Slot, @@ -39,6 +44,7 @@ use { time::Duration, }, tempfile::TempDir, + test_case::test_case, }; struct TestEnvironment { @@ -49,7 +55,7 @@ struct TestEnvironment { bank_forks: Arc>, background_services: BackgroundServices, genesis_config_info: GenesisConfigInfo, - _snapshot_config: SnapshotConfig, + snapshot_config: SnapshotConfig, _bank_snapshots_dir: TempDir, _full_snapshot_archives_dir: TempDir, _incremental_snapshot_archives_dir: TempDir, @@ -64,6 +70,24 @@ impl TestEnvironment { #[must_use] fn new() -> TestEnvironment { + Self::_new(SnapshotConfig::new_load_only()) + } + + #[must_use] + fn new_with_snapshots( + full_snapshot_archive_interval_slots: Slot, + incremental_snapshot_archive_interval_slots: Slot, + ) -> TestEnvironment { + let snapshot_config = SnapshotConfig { + full_snapshot_archive_interval_slots, + incremental_snapshot_archive_interval_slots, + ..SnapshotConfig::default() + }; + Self::_new(snapshot_config) + } + + #[must_use] + fn _new(snapshot_config: SnapshotConfig) -> TestEnvironment { let bank_snapshots_dir = TempDir::new().unwrap(); let full_snapshot_archives_dir = TempDir::new().unwrap(); let incremental_snapshot_archives_dir = TempDir::new().unwrap(); @@ -80,7 +104,7 @@ impl TestEnvironment { .path() .to_path_buf(), bank_snapshots_dir: bank_snapshots_dir.path().to_path_buf(), - ..SnapshotConfig::new_load_only() + ..snapshot_config }; let mut bank_forks = @@ -124,7 +148,7 @@ impl TestEnvironment { _bank_snapshots_dir: bank_snapshots_dir, _full_snapshot_archives_dir: full_snapshot_archives_dir, _incremental_snapshot_archives_dir: incremental_snapshot_archives_dir, - _snapshot_config: snapshot_config, + snapshot_config, background_services, } } @@ -222,20 +246,21 @@ impl Drop for BackgroundServices { } } -/// Run through a few epochs and ensure the Epoch Accounts Hash is calculated correctly -#[test] -fn test_epoch_accounts_hash() { +/// Ensure that EAHs are requested, calculated, and awaited correctly. +/// Test both with and without snapshots to make sure they don't interfere with EAH. +#[test_case(TestEnvironment::new() ; "without snapshots")] +#[test_case(TestEnvironment::new_with_snapshots(20, 10) ; "with snapshots")] +fn test_epoch_accounts_hash_basic(test_environment: TestEnvironment) { solana_logger::setup(); const NUM_EPOCHS_TO_TEST: u64 = 2; const SET_ROOT_INTERVAL: Slot = 3; - let test_config = TestEnvironment::new(); - let bank_forks = &test_config.bank_forks; + let bank_forks = &test_environment.bank_forks; let mut expected_epoch_accounts_hash = None; - let slots_per_epoch = test_config + let slots_per_epoch = test_environment .genesis_config_info .genesis_config .epoch_schedule @@ -250,7 +275,7 @@ fn test_epoch_accounts_hash() { )); let transaction = system_transaction::transfer( - &test_config.genesis_config_info.mint_keypair, + &test_environment.genesis_config_info.mint_keypair, &Pubkey::new_unique(), 1, bank.last_blockhash(), @@ -267,7 +292,7 @@ fn test_epoch_accounts_hash() { trace!("rooting bank {}", bank.slot()); bank_forks.write().unwrap().set_root( bank.slot(), - &test_config + &test_environment .background_services .accounts_background_request_sender, None, @@ -322,3 +347,132 @@ fn test_epoch_accounts_hash() { std::thread::yield_now(); } } + +/// Ensure that snapshots always have the expected EAH +/// +/// Generate snapshots: +/// - Before EAH start +/// - After EAH start but before EAH stop +/// - After EAH stop +/// +/// In Epoch 0, this will correspond to all three EAH states (invalid, in-flight, and valid). In +/// Epoch 1, this will correspond to a normal running cluster, where EAH will only be either +/// in-flight or valid. +#[test] +fn test_snapshots_have_expected_epoch_accounts_hash() { + solana_logger::setup(); + + const NUM_EPOCHS_TO_TEST: u64 = 2; + + // Since slots-per-epoch is 100, EAH start will be slots 25 and 125, and EAH stop will be slots + // 75 and 175. Pick a full snapshot interval that triggers in the three scenarios outlined in + // the test's description. + const FULL_SNAPSHOT_INTERVAL: Slot = 20; + + let test_environment = + TestEnvironment::new_with_snapshots(FULL_SNAPSHOT_INTERVAL, FULL_SNAPSHOT_INTERVAL); + let bank_forks = &test_environment.bank_forks; + + let slots_per_epoch = test_environment + .genesis_config_info + .genesis_config + .epoch_schedule + .slots_per_epoch; + for _ in 0..slots_per_epoch * NUM_EPOCHS_TO_TEST { + let bank = { + let parent = bank_forks.read().unwrap().working_bank(); + let bank = bank_forks.write().unwrap().insert(Bank::new_from_parent( + &parent, + &Pubkey::default(), + parent.slot() + 1, + )); + + let transaction = system_transaction::transfer( + &test_environment.genesis_config_info.mint_keypair, + &Pubkey::new_unique(), + 1, + bank.last_blockhash(), + ); + bank.process_transaction(&transaction).unwrap(); + bank.fill_bank_with_ticks_for_tests(); + + bank + }; + trace!("new bank {}", bank.slot()); + + // Root every bank. This is what a normal validator does as well. + // `set_root()` is also what requests snapshots and EAH calculations. + bank_forks.write().unwrap().set_root( + bank.slot(), + &test_environment + .background_services + .accounts_background_request_sender, + None, + ); + + // After submitting an EAH calculation request, wait until it gets handled by ABS so that + // subsequent snapshot requests are not swallowed. + if bank.slot() == epoch_accounts_hash::calculation_start(&bank) { + while dbg!(bank.epoch_accounts_hash()).is_none() { + std::thread::sleep(Duration::from_secs(1)); + } + } + + // After submitting a snapshot request... + // - Wait until the snapshot archive has been generated + // - Deserialize the bank from the snapshot archive + // - Ensure the EAHs match + if bank.slot() % FULL_SNAPSHOT_INTERVAL == 0 { + let snapshot_config = &test_environment.snapshot_config; + let full_snapshot_archive_info = loop { + if let Some(full_snapshot_archive_info) = + snapshot_utils::get_highest_full_snapshot_archive_info( + &snapshot_config.full_snapshot_archives_dir, + ) + { + if full_snapshot_archive_info.slot() == bank.slot() { + break full_snapshot_archive_info; + } + } + + _ = dbg!(snapshot_utils::get_full_snapshot_archives( + &snapshot_config.full_snapshot_archives_dir + )); + std::thread::sleep(Duration::from_millis(1000)); + }; + + let accounts_dir = TempDir::new().unwrap(); + let deserialized_bank = snapshot_utils::bank_from_snapshot_archives( + &[accounts_dir.into_path()], + &snapshot_config.bank_snapshots_dir, + &full_snapshot_archive_info, + None, + &test_environment.genesis_config_info.genesis_config, + &RuntimeConfig::default(), + None, + None, + AccountSecondaryIndexes::default(), + false, + None, + AccountShrinkThreshold::default(), + true, + true, + true, + None, + None, + &Arc::new(AtomicBool::new(false)), + ) + .unwrap() + .0; + + assert_eq!(&deserialized_bank, bank.as_ref()); + assert_eq!( + deserialized_bank.epoch_accounts_hash(), + bank.epoch_accounts_hash(), + ); + } + + // Give the background services a chance to run + std::thread::yield_now(); + } +} From 5a08eed82d3492e0538a34cc4d5072dea91c0bf6 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Mon, 10 Oct 2022 12:07:55 -0400 Subject: [PATCH 43/65] Cleans up debugging code in EAH tests (#28324) --- core/tests/epoch_accounts_hash.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 305c0bf80fad21..569ed3c17310ae 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -413,7 +413,7 @@ fn test_snapshots_have_expected_epoch_accounts_hash() { // After submitting an EAH calculation request, wait until it gets handled by ABS so that // subsequent snapshot requests are not swallowed. if bank.slot() == epoch_accounts_hash::calculation_start(&bank) { - while dbg!(bank.epoch_accounts_hash()).is_none() { + while bank.epoch_accounts_hash().is_none() { std::thread::sleep(Duration::from_secs(1)); } } @@ -434,11 +434,7 @@ fn test_snapshots_have_expected_epoch_accounts_hash() { break full_snapshot_archive_info; } } - - _ = dbg!(snapshot_utils::get_full_snapshot_archives( - &snapshot_config.full_snapshot_archives_dir - )); - std::thread::sleep(Duration::from_millis(1000)); + std::thread::sleep(Duration::from_secs(1)); }; let accounts_dir = TempDir::new().unwrap(); From d500391006475e16d870f81647096799a6a2fa4b Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Mon, 10 Oct 2022 13:11:38 -0400 Subject: [PATCH 44/65] Removes redundant accounts_package_type (#28325) --- runtime/src/accounts_background_service.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 8b67a83c3ec657..0bac89b5b4a8ed 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -160,7 +160,6 @@ impl SnapshotRequestHandler { .map(|(snapshot_request, accounts_package_type)| { trace!("handling snapshot request: {:?}, {:?}", snapshot_request, accounts_package_type); let mut total_time = Measure::start("snapshot_request_receiver_total_time"); - let accounts_package_type = new_accounts_package_type(&snapshot_request, &self.snapshot_config, *last_full_snapshot_slot); let SnapshotRequest { snapshot_root_bank, status_cache_slot_deltas, From cc390f176c636b07928a2b18e49f66890ad326f1 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Tue, 11 Oct 2022 02:24:10 +0800 Subject: [PATCH 45/65] RPC: Fix ATA create instruction parsing (#28314) --- .../src/parse_associated_token.rs | 153 +++++++++--------- 1 file changed, 79 insertions(+), 74 deletions(-) diff --git a/transaction-status/src/parse_associated_token.rs b/transaction-status/src/parse_associated_token.rs index 1c2cd2c985ef7b..428d9b98e99b26 100644 --- a/transaction-status/src/parse_associated_token.rs +++ b/transaction-status/src/parse_associated_token.rs @@ -27,69 +27,56 @@ pub fn parse_associated_token( )); } } - if instruction.data.is_empty() { - check_num_associated_token_accounts(&instruction.accounts, 7)?; - Ok(ParsedInstructionEnum { - instruction_type: "create".to_string(), - info: json!({ - "source": account_keys[instruction.accounts[0] as usize].to_string(), - "account": account_keys[instruction.accounts[1] as usize].to_string(), - "wallet": account_keys[instruction.accounts[2] as usize].to_string(), - "mint": account_keys[instruction.accounts[3] as usize].to_string(), - "systemProgram": account_keys[instruction.accounts[4] as usize].to_string(), - "tokenProgram": account_keys[instruction.accounts[5] as usize].to_string(), - "rentSysvar": account_keys[instruction.accounts[6] as usize].to_string(), - }), - }) + let ata_instruction = if instruction.data.is_empty() { + AssociatedTokenAccountInstruction::Create } else { - let ata_instruction = AssociatedTokenAccountInstruction::try_from_slice(&instruction.data) - .map_err(|_| { - ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) - })?; - match ata_instruction { - AssociatedTokenAccountInstruction::Create => { - check_num_associated_token_accounts(&instruction.accounts, 6)?; - Ok(ParsedInstructionEnum { - instruction_type: "create".to_string(), - info: json!({ - "source": account_keys[instruction.accounts[0] as usize].to_string(), - "account": account_keys[instruction.accounts[1] as usize].to_string(), - "wallet": account_keys[instruction.accounts[2] as usize].to_string(), - "mint": account_keys[instruction.accounts[3] as usize].to_string(), - "systemProgram": account_keys[instruction.accounts[4] as usize].to_string(), - "tokenProgram": account_keys[instruction.accounts[5] as usize].to_string(), - }), - }) - } - AssociatedTokenAccountInstruction::CreateIdempotent => { - check_num_associated_token_accounts(&instruction.accounts, 6)?; - Ok(ParsedInstructionEnum { - instruction_type: "createIdempotent".to_string(), - info: json!({ - "source": account_keys[instruction.accounts[0] as usize].to_string(), - "account": account_keys[instruction.accounts[1] as usize].to_string(), - "wallet": account_keys[instruction.accounts[2] as usize].to_string(), - "mint": account_keys[instruction.accounts[3] as usize].to_string(), - "systemProgram": account_keys[instruction.accounts[4] as usize].to_string(), - "tokenProgram": account_keys[instruction.accounts[5] as usize].to_string(), - }), - }) - } - AssociatedTokenAccountInstruction::RecoverNested => { - check_num_associated_token_accounts(&instruction.accounts, 7)?; - Ok(ParsedInstructionEnum { - instruction_type: "recoverNested".to_string(), - info: json!({ - "nestedSource": account_keys[instruction.accounts[0] as usize].to_string(), - "nestedMint": account_keys[instruction.accounts[1] as usize].to_string(), - "destination": account_keys[instruction.accounts[2] as usize].to_string(), - "nestedOwner": account_keys[instruction.accounts[3] as usize].to_string(), - "ownerMint": account_keys[instruction.accounts[4] as usize].to_string(), - "wallet": account_keys[instruction.accounts[5] as usize].to_string(), - "tokenProgram": account_keys[instruction.accounts[6] as usize].to_string(), - }), - }) - } + AssociatedTokenAccountInstruction::try_from_slice(&instruction.data) + .map_err(|_| ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken))? + }; + + match ata_instruction { + AssociatedTokenAccountInstruction::Create => { + check_num_associated_token_accounts(&instruction.accounts, 6)?; + Ok(ParsedInstructionEnum { + instruction_type: "create".to_string(), + info: json!({ + "source": account_keys[instruction.accounts[0] as usize].to_string(), + "account": account_keys[instruction.accounts[1] as usize].to_string(), + "wallet": account_keys[instruction.accounts[2] as usize].to_string(), + "mint": account_keys[instruction.accounts[3] as usize].to_string(), + "systemProgram": account_keys[instruction.accounts[4] as usize].to_string(), + "tokenProgram": account_keys[instruction.accounts[5] as usize].to_string(), + }), + }) + } + AssociatedTokenAccountInstruction::CreateIdempotent => { + check_num_associated_token_accounts(&instruction.accounts, 6)?; + Ok(ParsedInstructionEnum { + instruction_type: "createIdempotent".to_string(), + info: json!({ + "source": account_keys[instruction.accounts[0] as usize].to_string(), + "account": account_keys[instruction.accounts[1] as usize].to_string(), + "wallet": account_keys[instruction.accounts[2] as usize].to_string(), + "mint": account_keys[instruction.accounts[3] as usize].to_string(), + "systemProgram": account_keys[instruction.accounts[4] as usize].to_string(), + "tokenProgram": account_keys[instruction.accounts[5] as usize].to_string(), + }), + }) + } + AssociatedTokenAccountInstruction::RecoverNested => { + check_num_associated_token_accounts(&instruction.accounts, 7)?; + Ok(ParsedInstructionEnum { + instruction_type: "recoverNested".to_string(), + info: json!({ + "nestedSource": account_keys[instruction.accounts[0] as usize].to_string(), + "nestedMint": account_keys[instruction.accounts[1] as usize].to_string(), + "destination": account_keys[instruction.accounts[2] as usize].to_string(), + "nestedOwner": account_keys[instruction.accounts[3] as usize].to_string(), + "ownerMint": account_keys[instruction.accounts[4] as usize].to_string(), + "wallet": account_keys[instruction.accounts[5] as usize].to_string(), + "tokenProgram": account_keys[instruction.accounts[6] as usize].to_string(), + }), + }) } } } @@ -158,25 +145,43 @@ mod test { ); let message = Message::new(&[create_ix], None); let mut compiled_instruction = convert_compiled_instruction(&message.instructions[0]); + let expected_parsed_ix = ParsedInstructionEnum { + instruction_type: "create".to_string(), + info: json!({ + "source": funder.to_string(), + "account": associated_account_address.to_string(), + "wallet": wallet_address.to_string(), + "mint": mint.to_string(), + "systemProgram": solana_sdk::system_program::id().to_string(), + "tokenProgram": spl_token::id().to_string(), + }), + }; assert_eq!( parse_associated_token( &compiled_instruction, &AccountKeys::new(&convert_account_keys(&message), None) ) .unwrap(), - ParsedInstructionEnum { - instruction_type: "create".to_string(), - info: json!({ - "source": funder.to_string(), - "account": associated_account_address.to_string(), - "wallet": wallet_address.to_string(), - "mint": mint.to_string(), - "systemProgram": solana_sdk::system_program::id().to_string(), - "tokenProgram": spl_token::id().to_string(), - "rentSysvar": sysvar::rent::id().to_string(), - }) - } + expected_parsed_ix, ); + + // after popping rent account, parsing should still succeed + let rent_account_index = compiled_instruction + .accounts + .iter() + .position(|index| message.account_keys[*index as usize] == sysvar::rent::id()) + .unwrap(); + compiled_instruction.accounts.remove(rent_account_index); + assert_eq!( + parse_associated_token( + &compiled_instruction, + &AccountKeys::new(&convert_account_keys(&message), None) + ) + .unwrap(), + expected_parsed_ix, + ); + + // after popping another account, parsing should fail compiled_instruction.accounts.pop(); assert!(parse_associated_token( &compiled_instruction, From 2929c8f7a20b3d8d910cebaf5c3bebaab13af5db Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 10 Oct 2022 15:37:23 -0400 Subject: [PATCH 46/65] Fix blockstore_processor::load_frozen_forks() halt_at_slot behavior (#28317) load_frozen_forks() finds new slots to process by creating new Banks for the children of the current slot in process_next_slots(). Prior to this change, we would then immediately check if we had reached the halt_at_slot and correctly halt processing when appropriate. As such, it would be possible for Banks to be created for slots beyond the halt_at_slot. While a potential child slot that is past halt_at_slot wouldn't be replayed, the Bank being created still alters some universal state in AccountsDb. So, this change moves the halt_at_slot check before we create children Banks in process_next_slots(). --- ledger/src/blockstore_processor.rs | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index b57dc3b9d4d0b3..fc3ae1d5231dd4 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -1440,9 +1440,8 @@ fn load_frozen_forks( &mut pending_slots, )?; - let halt_at_slot = opts.halt_at_slot.unwrap_or(std::u64::MAX); let on_halt_store_hash_raw_data_for_debug = opts.on_halt_store_hash_raw_data_for_debug; - if bank_forks.read().unwrap().root() != halt_at_slot { + if Some(bank_forks.read().unwrap().root()) != opts.halt_at_slot { let mut set_root_us = 0; let mut root_retain_us = 0; let mut process_single_slot_us = 0; @@ -1588,6 +1587,17 @@ fn load_frozen_forks( slot, ); + let done_processing = opts + .halt_at_slot + .map(|halt_at_slot| slot >= halt_at_slot) + .unwrap_or(false); + if done_processing { + if opts.run_final_accounts_hash_calc { + run_final_hash_calc(&bank, on_halt_store_hash_raw_data_for_debug); + } + break; + } + process_next_slots( &bank, &meta, @@ -1595,13 +1605,6 @@ fn load_frozen_forks( leader_schedule_cache, &mut pending_slots, )?; - - if slot >= halt_at_slot { - if opts.run_final_accounts_hash_calc { - run_final_hash_calc(&bank, on_halt_store_hash_raw_data_for_debug); - } - break; - } } } else if on_halt_store_hash_raw_data_for_debug { run_final_hash_calc( From 15050b14b99ad64e0fce3badf071adf376ea35aa Mon Sep 17 00:00:00 2001 From: Jeff Biseda Date: Mon, 10 Oct 2022 14:09:45 -0700 Subject: [PATCH 47/65] use signed repair request variants (#28283) --- core/src/ancestor_hashes_service.rs | 6 - core/src/repair_service.rs | 30 ++- core/src/serve_repair.rs | 271 ++++++---------------------- dos/src/main.rs | 6 +- sdk/src/feature_set.rs | 5 - 5 files changed, 73 insertions(+), 245 deletions(-) diff --git a/core/src/ancestor_hashes_service.rs b/core/src/ancestor_hashes_service.rs index 3f81d38e2a31b3..98972928cee7e5 100644 --- a/core/src/ancestor_hashes_service.rs +++ b/core/src/ancestor_hashes_service.rs @@ -643,7 +643,6 @@ impl AncestorHashesService { repair_stats, outstanding_requests, identity_keypair, - &root_bank, ) { request_throttle.push(timestamp()); repairable_dead_slot_pool.take(&slot).unwrap(); @@ -719,7 +718,6 @@ impl AncestorHashesService { repair_stats: &mut AncestorRepairRequestsStats, outstanding_requests: &RwLock, identity_keypair: &Keypair, - root_bank: &Bank, ) -> bool { let sampled_validators = serve_repair.repair_request_ancestor_hashes_sample_peers( duplicate_slot, @@ -738,7 +736,6 @@ impl AncestorHashesService { .add_request(AncestorHashesRepairType(duplicate_slot), timestamp()); let request_bytes = serve_repair.ancestor_repair_request_bytes( identity_keypair, - root_bank, pubkey, duplicate_slot, nonce, @@ -1164,7 +1161,6 @@ mod test { } = ManageAncestorHashesState::new(vote_simulator.bank_forks); let RepairInfo { - bank_forks, cluster_info: requester_cluster_info, cluster_slots, repair_validators, @@ -1181,7 +1177,6 @@ mod test { &mut repair_stats, &outstanding_requests, &requester_cluster_info.keypair(), - &bank_forks.read().unwrap().root_bank(), ); assert!(ancestor_hashes_request_statuses.is_empty()); @@ -1200,7 +1195,6 @@ mod test { &mut repair_stats, &outstanding_requests, &requester_cluster_info.keypair(), - &bank_forks.read().unwrap().root_bank(), ); assert_eq!(ancestor_hashes_request_statuses.len(), 1); diff --git a/core/src/repair_service.rs b/core/src/repair_service.rs index 6e5047e3cd114d..07c4d89967dc43 100644 --- a/core/src/repair_service.rs +++ b/core/src/repair_service.rs @@ -272,9 +272,6 @@ impl RepairService { let mut add_votes_elapsed; let root_bank = repair_info.bank_forks.read().unwrap().root_bank(); - let sign_repair_requests_feature_epoch = - ServeRepair::sign_repair_requests_activated_epoch(&root_bank); - let repairs = { let new_root = root_bank.slot(); @@ -331,16 +328,6 @@ impl RepairService { repairs .iter() .filter_map(|repair_request| { - let sign_repair_request = ServeRepair::should_sign_repair_request( - repair_request.slot(), - &root_bank, - sign_repair_requests_feature_epoch, - ); - let maybe_keypair = if sign_repair_request { - Some(identity_keypair) - } else { - None - }; let (to, req) = serve_repair .repair_request( &repair_info.cluster_slots, @@ -349,7 +336,7 @@ impl RepairService { &mut repair_stats, &repair_info.repair_validators, &mut outstanding_requests, - maybe_keypair, + identity_keypair, ) .ok()?; Some((req, to)) @@ -617,6 +604,7 @@ impl RepairService { repair_socket: &UdpSocket, repair_validators: &Option>, outstanding_requests: &RwLock, + identity_keypair: &Keypair, ) { duplicate_slot_repair_statuses.retain(|slot, status| { Self::update_duplicate_slot_repair_addr( @@ -641,6 +629,7 @@ impl RepairService { serve_repair, repair_stats, nonce, + identity_keypair, ) { info!( "repair req send_to {} ({}) error {:?}", @@ -667,13 +656,14 @@ impl RepairService { serve_repair: &ServeRepair, repair_stats: &mut RepairStats, nonce: Nonce, + identity_keypair: &Keypair, ) -> Result<()> { let req = serve_repair.map_repair_request( repair_type, repair_pubkey, repair_stats, nonce, - None, + identity_keypair, )?; repair_socket.send_to(&req, to)?; Ok(()) @@ -1091,10 +1081,9 @@ mod test { let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); let cluster_slots = ClusterSlots::default(); - let serve_repair = ServeRepair::new( - Arc::new(new_test_cluster_info(Node::new_localhost().info)), - bank_forks, - ); + let cluster_info = Arc::new(new_test_cluster_info(Node::new_localhost().info)); + let identity_keypair = cluster_info.keypair().clone(); + let serve_repair = ServeRepair::new(cluster_info, bank_forks); let mut duplicate_slot_repair_statuses = HashMap::new(); let dead_slot = 9; let receive_socket = &UdpSocket::bind("0.0.0.0:0").unwrap(); @@ -1129,6 +1118,7 @@ mod test { &UdpSocket::bind("0.0.0.0:0").unwrap(), &None, &RwLock::new(OutstandingRequests::default()), + &identity_keypair, ); assert!(duplicate_slot_repair_statuses .get(&dead_slot) @@ -1154,6 +1144,7 @@ mod test { &UdpSocket::bind("0.0.0.0:0").unwrap(), &None, &RwLock::new(OutstandingRequests::default()), + &identity_keypair, ); assert_eq!(duplicate_slot_repair_statuses.len(), 1); assert!(duplicate_slot_repair_statuses.get(&dead_slot).is_some()); @@ -1172,6 +1163,7 @@ mod test { &UdpSocket::bind("0.0.0.0:0").unwrap(), &None, &RwLock::new(OutstandingRequests::default()), + &identity_keypair, ); assert!(duplicate_slot_repair_statuses.is_empty()); } diff --git a/core/src/serve_repair.rs b/core/src/serve_repair.rs index cf349a3b99f5b3..f90489e4c09ef2 100644 --- a/core/src/serve_repair.rs +++ b/core/src/serve_repair.rs @@ -29,16 +29,14 @@ use { data_budget::DataBudget, packet::{Packet, PacketBatch, PacketBatchRecycler}, }, - solana_runtime::{bank::Bank, bank_forks::BankForks}, + solana_runtime::bank_forks::BankForks, solana_sdk::{ clock::Slot, - feature_set::sign_repair_requests, hash::{Hash, HASH_BYTES}, packet::PACKET_DATA_SIZE, pubkey::{Pubkey, PUBKEY_BYTES}, signature::{Signable, Signature, Signer, SIGNATURE_BYTES}, signer::keypair::Keypair, - stake_history::Epoch, timing::{duration_as_ms, timestamp}, }, solana_streamer::{ @@ -317,10 +315,6 @@ impl ServeRepair { } } - fn my_info(&self) -> ContactInfo { - self.cluster_info.my_contact_info() - } - pub(crate) fn my_id(&self) -> Pubkey { self.cluster_info.id() } @@ -429,24 +423,6 @@ impl ServeRepair { } } - pub(crate) fn sign_repair_requests_activated_epoch(root_bank: &Bank) -> Option { - root_bank - .feature_set - .activated_slot(&sign_repair_requests::id()) - .map(|slot| root_bank.epoch_schedule().get_epoch(slot)) - } - - pub(crate) fn should_sign_repair_request( - slot: Slot, - root_bank: &Bank, - sign_repairs_epoch: Option, - ) -> bool { - match sign_repairs_epoch { - None => false, - Some(feature_epoch) => feature_epoch < root_bank.epoch_schedule().get_epoch(slot), - } - } - /// Process messages from the network fn run_listen( &self, @@ -755,38 +731,22 @@ impl ServeRepair { pub fn ancestor_repair_request_bytes( &self, keypair: &Keypair, - root_bank: &Bank, repair_peer_id: &Pubkey, request_slot: Slot, nonce: Nonce, ) -> Result> { - let sign_repairs_epoch = Self::sign_repair_requests_activated_epoch(root_bank); - let require_sig = - Self::should_sign_repair_request(request_slot, root_bank, sign_repairs_epoch); - - let (request_proto, maybe_keypair) = if require_sig { - let header = RepairRequestHeader { - signature: Signature::default(), - sender: self.my_id(), - recipient: *repair_peer_id, - timestamp: timestamp(), - nonce, - }; - ( - RepairProtocol::AncestorHashes { - header, - slot: request_slot, - }, - Some(keypair), - ) - } else { - ( - RepairProtocol::LegacyAncestorHashes(self.my_info(), request_slot, nonce), - None, - ) + let header = RepairRequestHeader { + signature: Signature::default(), + sender: self.my_id(), + recipient: *repair_peer_id, + timestamp: timestamp(), + nonce, }; - - Self::repair_proto_to_bytes(&request_proto, maybe_keypair) + let request = RepairProtocol::AncestorHashes { + header, + slot: request_slot, + }; + Self::repair_proto_to_bytes(&request, keypair) } pub(crate) fn repair_request( @@ -797,7 +757,7 @@ impl ServeRepair { repair_stats: &mut RepairStats, repair_validators: &Option>, outstanding_requests: &mut OutstandingShredRepairs, - identity_keypair: Option<&Keypair>, + identity_keypair: &Keypair, ) -> Result<(SocketAddr, Vec)> { // find a peer that appears to be accepting replication and has the desired slot, as indicated // by a valid tvu port location @@ -873,67 +833,41 @@ impl ServeRepair { repair_peer_id: &Pubkey, repair_stats: &mut RepairStats, nonce: Nonce, - identity_keypair: Option<&Keypair>, + identity_keypair: &Keypair, ) -> Result> { - let header = if identity_keypair.is_some() { - Some(RepairRequestHeader { - signature: Signature::default(), - sender: self.my_id(), - recipient: *repair_peer_id, - timestamp: timestamp(), - nonce, - }) - } else { - None + let header = RepairRequestHeader { + signature: Signature::default(), + sender: self.my_id(), + recipient: *repair_peer_id, + timestamp: timestamp(), + nonce, }; let request_proto = match repair_request { ShredRepairType::Shred(slot, shred_index) => { repair_stats .shred .update(repair_peer_id, *slot, *shred_index); - if let Some(header) = header { - RepairProtocol::WindowIndex { - header, - slot: *slot, - shred_index: *shred_index, - } - } else { - RepairProtocol::LegacyWindowIndexWithNonce( - self.my_info(), - *slot, - *shred_index, - nonce, - ) + RepairProtocol::WindowIndex { + header, + slot: *slot, + shred_index: *shred_index, } } ShredRepairType::HighestShred(slot, shred_index) => { repair_stats .highest_shred .update(repair_peer_id, *slot, *shred_index); - if let Some(header) = header { - RepairProtocol::HighestWindowIndex { - header, - slot: *slot, - shred_index: *shred_index, - } - } else { - RepairProtocol::LegacyHighestWindowIndexWithNonce( - self.my_info(), - *slot, - *shred_index, - nonce, - ) + RepairProtocol::HighestWindowIndex { + header, + slot: *slot, + shred_index: *shred_index, } } ShredRepairType::Orphan(slot) => { repair_stats.orphan.update(repair_peer_id, *slot, 0); - if let Some(header) = header { - RepairProtocol::Orphan { - header, - slot: *slot, - } - } else { - RepairProtocol::LegacyOrphanWithNonce(self.my_info(), *slot, nonce) + RepairProtocol::Orphan { + header, + slot: *slot, } } }; @@ -994,17 +928,12 @@ impl ServeRepair { } } - pub fn repair_proto_to_bytes( - request: &RepairProtocol, - keypair: Option<&Keypair>, - ) -> Result> { + pub fn repair_proto_to_bytes(request: &RepairProtocol, keypair: &Keypair) -> Result> { + debug_assert!(request.supports_signature()); let mut payload = serialize(&request)?; - if let Some(keypair) = keypair { - debug_assert!(request.supports_signature()); - let signable_data = [&payload[..4], &payload[4 + SIGNATURE_BYTES..]].concat(); - let signature = keypair.sign_message(&signable_data[..]); - payload[4..4 + SIGNATURE_BYTES].copy_from_slice(signature.as_ref()); - } + let signable_data = [&payload[..4], &payload[4 + SIGNATURE_BYTES..]].concat(); + let signature = keypair.sign_message(&signable_data[..]); + payload[4..4 + SIGNATURE_BYTES].copy_from_slice(signature.as_ref()); Ok(payload) } @@ -1233,7 +1162,7 @@ mod tests { &repair_peer_id, &mut RepairStats::default(), 456, - Some(&keypair), + &keypair, ) .unwrap(); @@ -1257,7 +1186,7 @@ mod tests { #[test] fn test_serialize_deserialize_ancestor_hashes_request() { - let slot = 50; + let slot: Slot = 50; let nonce = 70; let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); let cluster_info = Arc::new(new_test_cluster_info(me)); @@ -1268,11 +1197,10 @@ mod tests { let mut bank = Bank::new_for_tests(&genesis_config); bank.feature_set = Arc::new(FeatureSet::all_enabled()); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); - let serve_repair = ServeRepair::new(cluster_info.clone(), bank_forks.clone()); + let serve_repair = ServeRepair::new(cluster_info, bank_forks); - let root_bank = bank_forks.read().unwrap().root_bank(); let request_bytes = serve_repair - .ancestor_repair_request_bytes(&keypair, &root_bank, &repair_peer_id, slot, nonce) + .ancestor_repair_request_bytes(&keypair, &repair_peer_id, slot, nonce) .unwrap(); let mut cursor = Cursor::new(&request_bytes[..]); let deserialized_request: RepairProtocol = @@ -1294,35 +1222,10 @@ mod tests { } else { panic!("unexpected request type {:?}", &deserialized_request); } - - let mut bank = Bank::new_for_tests(&genesis_config); - let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&sign_repair_requests::id()); - bank.feature_set = Arc::new(feature_set); - let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); - let serve_repair = ServeRepair::new(cluster_info, bank_forks.clone()); - - let root_bank = bank_forks.read().unwrap().root_bank(); - let request_bytes = serve_repair - .ancestor_repair_request_bytes(&keypair, &root_bank, &repair_peer_id, slot, nonce) - .unwrap(); - let mut cursor = Cursor::new(&request_bytes[..]); - let deserialized_request: RepairProtocol = - deserialize_from_with_limit(&mut cursor).unwrap(); - assert_eq!(cursor.position(), request_bytes.len() as u64); - if let RepairProtocol::LegacyAncestorHashes(ci, deserialized_slot, deserialized_nonce) = - deserialized_request - { - assert_eq!(slot, deserialized_slot); - assert_eq!(nonce, deserialized_nonce); - assert_eq!(&serve_repair.my_id(), &ci.id); - } else { - panic!("unexpected request type {:?}", &deserialized_request); - } } #[test] - fn test_map_requests_signed_unsigned() { + fn test_map_requests_signed() { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); @@ -1337,20 +1240,20 @@ mod tests { let nonce = 70; let request = ShredRepairType::Shred(slot, shred_index); - let rsp = serve_repair + let request_bytes = serve_repair .map_repair_request( &request, &repair_peer_id, &mut RepairStats::default(), nonce, - Some(&keypair), + &keypair, ) .unwrap(); - let mut cursor = Cursor::new(&rsp[..]); + let mut cursor = Cursor::new(&request_bytes[..]); let deserialized_request: RepairProtocol = deserialize_from_with_limit(&mut cursor).unwrap(); - assert_eq!(cursor.position(), rsp.len() as u64); + assert_eq!(cursor.position(), request_bytes.len() as u64); if let RepairProtocol::WindowIndex { header, slot: deserialized_slot, @@ -1362,7 +1265,7 @@ mod tests { assert_eq!(header.nonce, nonce); assert_eq!(&header.sender, &serve_repair.my_id()); assert_eq!(&header.recipient, &repair_peer_id); - let signed_data = [&rsp[..4], &rsp[4 + SIGNATURE_BYTES..]].concat(); + let signed_data = [&request_bytes[..4], &request_bytes[4 + SIGNATURE_BYTES..]].concat(); assert!(header .signature .verify(keypair.pubkey().as_ref(), &signed_data)); @@ -1370,50 +1273,21 @@ mod tests { panic!("unexpected request type {:?}", &deserialized_request); } - let rsp = serve_repair - .map_repair_request( - &request, - &repair_peer_id, - &mut RepairStats::default(), - nonce, - None, - ) - .unwrap(); - - let mut cursor = Cursor::new(&rsp[..]); - let deserialized_request: RepairProtocol = - deserialize_from_with_limit(&mut cursor).unwrap(); - assert_eq!(cursor.position(), rsp.len() as u64); - if let RepairProtocol::LegacyWindowIndexWithNonce( - ci, - deserialized_slot, - deserialized_shred_index, - deserialized_nonce, - ) = deserialized_request - { - assert_eq!(slot, deserialized_slot); - assert_eq!(shred_index, deserialized_shred_index); - assert_eq!(nonce, deserialized_nonce); - assert_eq!(&serve_repair.my_id(), &ci.id); - } else { - panic!("unexpected request type {:?}", &deserialized_request); - } - let request = ShredRepairType::HighestShred(slot, shred_index); - let rsp = serve_repair + let request_bytes = serve_repair .map_repair_request( &request, &repair_peer_id, &mut RepairStats::default(), nonce, - Some(&keypair), + &keypair, ) .unwrap(); - let mut cursor = Cursor::new(&rsp[..]); + let mut cursor = Cursor::new(&request_bytes[..]); let deserialized_request: RepairProtocol = deserialize_from_with_limit(&mut cursor).unwrap(); - assert_eq!(cursor.position(), rsp.len() as u64); + assert_eq!(cursor.position(), request_bytes.len() as u64); if let RepairProtocol::HighestWindowIndex { header, slot: deserialized_slot, @@ -1425,42 +1299,13 @@ mod tests { assert_eq!(header.nonce, nonce); assert_eq!(&header.sender, &serve_repair.my_id()); assert_eq!(&header.recipient, &repair_peer_id); - let signed_data = [&rsp[..4], &rsp[4 + SIGNATURE_BYTES..]].concat(); + let signed_data = [&request_bytes[..4], &request_bytes[4 + SIGNATURE_BYTES..]].concat(); assert!(header .signature .verify(keypair.pubkey().as_ref(), &signed_data)); } else { panic!("unexpected request type {:?}", &deserialized_request); } - - let rsp = serve_repair - .map_repair_request( - &request, - &repair_peer_id, - &mut RepairStats::default(), - nonce, - None, - ) - .unwrap(); - - let mut cursor = Cursor::new(&rsp[..]); - let deserialized_request: RepairProtocol = - deserialize_from_with_limit(&mut cursor).unwrap(); - assert_eq!(cursor.position(), rsp.len() as u64); - if let RepairProtocol::LegacyHighestWindowIndexWithNonce( - ci, - deserialized_slot, - deserialized_shred_index, - deserialized_nonce, - ) = deserialized_request - { - assert_eq!(slot, deserialized_slot); - assert_eq!(shred_index, deserialized_shred_index); - assert_eq!(nonce, deserialized_nonce); - assert_eq!(&serve_repair.my_id(), &ci.id); - } else { - panic!("unexpected request type {:?}", &deserialized_request); - } } #[test] @@ -1687,6 +1532,7 @@ mod tests { let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); let cluster_info = Arc::new(new_test_cluster_info(me)); let serve_repair = ServeRepair::new(cluster_info.clone(), bank_forks); + let identity_keypair = cluster_info.keypair().clone(); let mut outstanding_requests = OutstandingShredRepairs::default(); let rv = serve_repair.repair_request( &cluster_slots, @@ -1695,7 +1541,7 @@ mod tests { &mut RepairStats::default(), &None, &mut outstanding_requests, - None, + &identity_keypair, ); assert_matches!(rv, Err(Error::ClusterInfo(ClusterInfoError::NoPeers))); @@ -1724,7 +1570,7 @@ mod tests { &mut RepairStats::default(), &None, &mut outstanding_requests, - None, + &identity_keypair, ) .unwrap(); assert_eq!(nxt.serve_repair, serve_repair_addr); @@ -1759,7 +1605,7 @@ mod tests { &mut RepairStats::default(), &None, &mut outstanding_requests, - None, + &identity_keypair, ) .unwrap(); if rv.0 == serve_repair_addr { @@ -2015,6 +1861,7 @@ mod tests { ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); cluster_info.insert_info(contact_info2.clone()); cluster_info.insert_info(contact_info3.clone()); + let identity_keypair = cluster_info.keypair().clone(); let serve_repair = ServeRepair::new(cluster_info, bank_forks); // If: @@ -2032,7 +1879,7 @@ mod tests { &mut RepairStats::default(), &known_validators, &mut OutstandingShredRepairs::default(), - None, + &identity_keypair, ) .is_err()); } @@ -2050,7 +1897,7 @@ mod tests { &mut RepairStats::default(), &known_validators, &mut OutstandingShredRepairs::default(), - None, + &identity_keypair, ) .is_ok()); @@ -2072,7 +1919,7 @@ mod tests { &mut RepairStats::default(), &None, &mut OutstandingShredRepairs::default(), - None, + &identity_keypair, ) .is_ok()); } diff --git a/dos/src/main.rs b/dos/src/main.rs index bdebdac5f3cdc4..5db23d169762ed 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -646,7 +646,7 @@ fn run_dos( slot, shred_index: 0, }; - ServeRepair::repair_proto_to_bytes(&req, Some(&keypair)).unwrap() + ServeRepair::repair_proto_to_bytes(&req, &keypair).unwrap() } DataType::RepairShred => { let slot = 100; @@ -657,14 +657,14 @@ fn run_dos( slot, shred_index: 0, }; - ServeRepair::repair_proto_to_bytes(&req, Some(&keypair)).unwrap() + ServeRepair::repair_proto_to_bytes(&req, &keypair).unwrap() } DataType::RepairOrphan => { let slot = 100; let keypair = Keypair::new(); let header = RepairRequestHeader::new(keypair.pubkey(), target_id, timestamp(), 0); let req = RepairProtocol::Orphan { header, slot }; - ServeRepair::repair_proto_to_bytes(&req, Some(&keypair)).unwrap() + ServeRepair::repair_proto_to_bytes(&req, &keypair).unwrap() } DataType::Random => { vec![0; params.data_size] diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index b64d3f28709e62..f7c29af746706b 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -482,10 +482,6 @@ pub mod compact_vote_state_updates { solana_sdk::declare_id!("86HpNqzutEZwLcPxS6EHDcMNYWk6ikhteg9un7Y2PBKE"); } -pub mod sign_repair_requests { - solana_sdk::declare_id!("sigrs6u1EWeHuoKFkY8RR7qcSsPmrAeBBPESyf5pnYe"); -} - pub mod incremental_snapshot_only_incremental_hash_calculation { solana_sdk::declare_id!("25vqsfjk7Nv1prsQJmA4Xu1bN61s8LXCBGUPp8Rfy1UF"); } @@ -641,7 +637,6 @@ lazy_static! { (loosen_cpi_size_restriction::id(), "loosen cpi size restrictions #26641"), (use_default_units_in_fee_calculation::id(), "use default units per instruction in fee calculation #26785"), (compact_vote_state_updates::id(), "Compact vote state updates to lower block size"), - (sign_repair_requests::id(), "sign repair requests #26834"), (incremental_snapshot_only_incremental_hash_calculation::id(), "only hash accounts in incremental snapshot during incremental snapshot creation #26799"), (disable_cpi_setting_executable_and_rent_epoch::id(), "disable setting is_executable and_rent_epoch in CPI #26987"), (relax_authority_signer_check_for_lookup_table_creation::id(), "relax authority signer check for lookup table creation #27205"), From 27a08571210b28ecbc7a8366b3aecc1ca5e65f5f Mon Sep 17 00:00:00 2001 From: Eloy Date: Tue, 11 Oct 2022 04:17:15 +0200 Subject: [PATCH 48/65] Documentation fixes and improvements (#28312) * Fix CONTRIBUTING.md docs, accepted/implemented proposals links * Add Fedora Linux needed packages in README.md * Remove trailing whitespace Co-authored-by: Michael Vines --- CONTRIBUTING.md | 4 ++-- README.md | 8 +++++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4a64afe79e7b3c..280ddecaf3471a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -255,7 +255,7 @@ directory and viewable on the official [Solana Documentation](https://docs.solan Current design proposals may be viewed on the docs site: -1. [Accepted Proposals](https://docs.solana.com/proposals/accepted-design-proposals.md). -2. [Implemented Proposals](https://docs.solana.com/implemented-proposals/implemented-proposals.md) +1. [Accepted Proposals](https://docs.solana.com/proposals/accepted-design-proposals) +2. [Implemented Proposals](https://docs.solana.com/implemented-proposals/implemented-proposals) New design proposals should follow this guide on [how to submit a design proposal](./docs/src/proposals.md#submit-a-design-proposal). \ No newline at end of file diff --git a/README.md b/README.md index e76cfc43a1d6ca..b5499fa0e5a4c6 100644 --- a/README.md +++ b/README.md @@ -31,13 +31,19 @@ $ rustup install VERSION ``` Note that if this is not the latest rust version on your machine, cargo commands may require an [override](https://rust-lang.github.io/rustup/overrides.html) in order to use the correct version. -On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, protobuf etc. On Ubuntu: +On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, protobuf etc. +On Ubuntu: ```bash $ sudo apt-get update $ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang cmake make libprotobuf-dev protobuf-compiler ``` +On Fedora: +```bash +$ sudo dnf install openssl-devel systemd-devel pkg-config zlib-devel llvm clang cmake make protobuf-devel protobuf-compiler perl-core +``` + ## **2. Download the source code.** ```bash From bcbf4c7fb34729a9303ca5375e67c7d4b508d5e1 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Tue, 11 Oct 2022 11:11:15 +0800 Subject: [PATCH 49/65] Explorer: Support displaying closed program accounts (#28336) --- .../UpgradeableLoaderAccountSection.tsx | 114 +++++++++--------- explorer/src/providers/accounts/index.tsx | 4 - 2 files changed, 58 insertions(+), 60 deletions(-) diff --git a/explorer/src/components/account/UpgradeableLoaderAccountSection.tsx b/explorer/src/components/account/UpgradeableLoaderAccountSection.tsx index 83f9a3e2755064..73450019cea818 100644 --- a/explorer/src/components/account/UpgradeableLoaderAccountSection.tsx +++ b/explorer/src/components/account/UpgradeableLoaderAccountSection.tsx @@ -12,7 +12,6 @@ import { import { Slot } from "components/common/Slot"; import { addressLabel } from "utils/tx"; import { useCluster } from "providers/cluster"; -import { ErrorCard } from "components/common/ErrorCard"; import { UnknownAccountCard } from "components/account/UnknownAccountCard"; import { Downloadable } from "components/common/Downloadable"; import { CheckingBadge, VerifiedBadge } from "components/common/VerifiedBadge"; @@ -31,9 +30,6 @@ export function UpgradeableLoaderAccountSection({ }) { switch (parsedData.type) { case "program": { - if (programData === undefined) { - return ; - } return (

- Program Account + {programData === undefined && "Closed "}Program Account

diff --git a/explorer/src/providers/accounts/index.tsx b/explorer/src/providers/accounts/index.tsx index 74f5f1525729c5..77b29aa406b0c3 100644 --- a/explorer/src/providers/accounts/index.tsx +++ b/explorer/src/providers/accounts/index.tsx @@ -197,10 +197,6 @@ async function fetchAccountInfo( ) { const info = create(result.data.parsed, ParsedInfo); programData = create(info, ProgramDataAccount).info; - } else { - throw new Error( - `invalid program data account for program: ${pubkey.toBase58()}` - ); } } From 928730ac949c0a31e6d3023f27499a354bdb77d7 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Tue, 11 Oct 2022 14:27:58 +0800 Subject: [PATCH 50/65] Explorer: Support additional bpf upgradeable loader instructions (#28342) --- .../BpfUpgradeableLoaderDetailsCard.tsx | 30 +++++++--- .../bpf-upgradeable-loader/types.ts | 57 ++++++++++++------- 2 files changed, 59 insertions(+), 28 deletions(-) diff --git a/explorer/src/components/instruction/bpf-upgradeable-loader/BpfUpgradeableLoaderDetailsCard.tsx b/explorer/src/components/instruction/bpf-upgradeable-loader/BpfUpgradeableLoaderDetailsCard.tsx index b09c5a1ac535d6..03d6d81af9891c 100644 --- a/explorer/src/components/instruction/bpf-upgradeable-loader/BpfUpgradeableLoaderDetailsCard.tsx +++ b/explorer/src/components/instruction/bpf-upgradeable-loader/BpfUpgradeableLoaderDetailsCard.tsx @@ -13,7 +13,9 @@ import { ParsedInfo } from "validators"; import { InstructionCard } from "../InstructionCard"; import { UnknownDetailsCard } from "../UnknownDetailsCard"; import { + CloseInfo, DeployWithMaxDataLenInfo, + ExtendProgramInfo, InitializeBufferInfo, SetAuthorityInfo, UpgradeInfo, @@ -33,15 +35,16 @@ export function BpfUpgradeableLoaderDetailsCard(props: DetailsProps) { try { const parsed = create(props.ix.parsed, ParsedInfo); switch (parsed.type) { + case "initializeBuffer": { + return renderDetails( + props, + parsed, + InitializeBufferInfo + ); + } case "write": { return renderDetails(props, parsed, WriteInfo); } - case "upgrade": { - return renderDetails(props, parsed, UpgradeInfo); - } - case "setAuthority": { - return renderDetails(props, parsed, SetAuthorityInfo); - } case "deployWithMaxDataLen": { return renderDetails( props, @@ -49,11 +52,20 @@ export function BpfUpgradeableLoaderDetailsCard(props: DetailsProps) { DeployWithMaxDataLenInfo ); } - case "initializeBuffer": { - return renderDetails( + case "upgrade": { + return renderDetails(props, parsed, UpgradeInfo); + } + case "setAuthority": { + return renderDetails(props, parsed, SetAuthorityInfo); + } + case "close": { + return renderDetails(props, parsed, CloseInfo); + } + case "extendProgram": { + return renderDetails( props, parsed, - InitializeBufferInfo + ExtendProgramInfo ); } default: diff --git a/explorer/src/components/instruction/bpf-upgradeable-loader/types.ts b/explorer/src/components/instruction/bpf-upgradeable-loader/types.ts index 51c780686a0332..c8f06645379add 100644 --- a/explorer/src/components/instruction/bpf-upgradeable-loader/types.ts +++ b/explorer/src/components/instruction/bpf-upgradeable-loader/types.ts @@ -1,19 +1,32 @@ /* eslint-disable @typescript-eslint/no-redeclare */ -import { enums, nullable, number, type, string, Infer } from "superstruct"; +import { enums, number, type, string, Infer, optional } from "superstruct"; import { PublicKeyFromString } from "validators/pubkey"; +export type InitializeBufferInfo = Infer; +export const InitializeBufferInfo = type({ + account: PublicKeyFromString, + authority: PublicKeyFromString, +}); + export type WriteInfo = Infer; export const WriteInfo = type({ + offset: number(), + bytes: string(), account: PublicKeyFromString, authority: PublicKeyFromString, - bytes: string(), - offset: number(), }); -export type InitializeBufferInfo = Infer; -export const InitializeBufferInfo = type({ - account: PublicKeyFromString, +export type DeployWithMaxDataLenInfo = Infer; +export const DeployWithMaxDataLenInfo = type({ + maxDataLen: number(), + payerAccount: PublicKeyFromString, + programDataAccount: PublicKeyFromString, + programAccount: PublicKeyFromString, + bufferAccount: PublicKeyFromString, + rentSysvar: PublicKeyFromString, + clockSysvar: PublicKeyFromString, + systemProgram: PublicKeyFromString, authority: PublicKeyFromString, }); @@ -23,29 +36,33 @@ export const UpgradeInfo = type({ programAccount: PublicKeyFromString, bufferAccount: PublicKeyFromString, spillAccount: PublicKeyFromString, - authority: PublicKeyFromString, rentSysvar: PublicKeyFromString, clockSysvar: PublicKeyFromString, + authority: PublicKeyFromString, }); export type SetAuthorityInfo = Infer; export const SetAuthorityInfo = type({ account: PublicKeyFromString, authority: PublicKeyFromString, - newAuthority: nullable(PublicKeyFromString), + newAuthority: optional(PublicKeyFromString), }); -export type DeployWithMaxDataLenInfo = Infer; -export const DeployWithMaxDataLenInfo = type({ +export type CloseInfo = Infer; +export const CloseInfo = type({ + account: PublicKeyFromString, + recipient: PublicKeyFromString, + authority: PublicKeyFromString, + programAccount: optional(PublicKeyFromString), +}); + +export type ExtendProgramInfo = Infer; +export const ExtendProgramInfo = type({ + additionalBytes: number(), programDataAccount: PublicKeyFromString, programAccount: PublicKeyFromString, - payerAccount: PublicKeyFromString, - bufferAccount: PublicKeyFromString, - authority: PublicKeyFromString, - rentSysvar: PublicKeyFromString, - clockSysvar: PublicKeyFromString, - systemProgram: PublicKeyFromString, - maxDataLen: number(), + systemProgram: optional(PublicKeyFromString), + payerAccount: optional(PublicKeyFromString), }); export type UpgradeableBpfLoaderInstructionType = Infer< @@ -53,8 +70,10 @@ export type UpgradeableBpfLoaderInstructionType = Infer< >; export const UpgradeableBpfLoaderInstructionType = enums([ "initializeBuffer", + "write", "deployWithMaxDataLen", + "upgrade", "setAuthority", - "write", - "finalize", + "close", + "extendProgram", ]); From 33a328e5891408e2f3dcdc6cad83337ff2005544 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Tue, 11 Oct 2022 16:13:10 +0800 Subject: [PATCH 51/65] feat: add getParsedBlock method to Connection (#28345) * feat: add getParsedBlock method to Connection * Update web3.js/src/connection.ts Co-authored-by: Yihau Chen Co-authored-by: Yihau Chen --- web3.js/src/connection.ts | 90 +++++++++++++++++++++++++++++++++ web3.js/test/connection.test.ts | 18 +++++++ 2 files changed, 108 insertions(+) diff --git a/web3.js/src/connection.ts b/web3.js/src/connection.ts index 2741dea65e0294..411a2c3ab28154 100644 --- a/web3.js/src/connection.ts +++ b/web3.js/src/connection.ts @@ -1143,6 +1143,42 @@ export type BlockResponse = { blockTime: number | null; }; +/** + * A block with parsed transactions + */ +export type ParsedBlockResponse = { + /** Blockhash of this block */ + blockhash: Blockhash; + /** Blockhash of this block's parent */ + previousBlockhash: Blockhash; + /** Slot index of this block's parent */ + parentSlot: number; + /** Vector of transactions with status meta and original message */ + transactions: Array<{ + /** The details of the transaction */ + transaction: ParsedTransaction; + /** Metadata produced from the transaction */ + meta: ParsedTransactionMeta | null; + /** The transaction version */ + version?: TransactionVersion; + }>; + /** Vector of block rewards */ + rewards?: Array<{ + /** Public key of reward recipient */ + pubkey: string; + /** Reward value in lamports */ + lamports: number; + /** Account balance after reward is applied */ + postBalance: number | null; + /** Type of reward received */ + rewardType: string | null; + }>; + /** The unix timestamp of when the block was processed */ + blockTime: number | null; + /** The number of blocks beneath this block */ + blockHeight: number | null; +}; + /** * A processed block fetched from the RPC API */ @@ -2081,6 +2117,38 @@ const GetBlockRpcResult = jsonRpcResult( ), ); +/** + * Expected parsed JSON RPC response for the "getBlock" message + */ +const GetParsedBlockRpcResult = jsonRpcResult( + nullable( + pick({ + blockhash: string(), + previousBlockhash: string(), + parentSlot: number(), + transactions: array( + pick({ + transaction: ParsedConfirmedTransactionResult, + meta: nullable(ParsedConfirmedTransactionMetaResult), + version: optional(TransactionVersionStruct), + }), + ), + rewards: optional( + array( + pick({ + pubkey: string(), + lamports: number(), + postBalance: nullable(number()), + rewardType: nullable(string()), + }), + ), + ), + blockTime: nullable(number()), + blockHeight: nullable(number()), + }), + ), +); + /** * Expected JSON RPC response for the "getConfirmedBlock" message * @@ -3878,6 +3946,28 @@ export class Connection { }; } + /** + * Fetch parsed transaction details for a confirmed or finalized block + */ + async getParsedBlock( + slot: number, + rawConfig?: GetVersionedBlockConfig, + ): Promise { + const {commitment, config} = extractCommitmentFromConfig(rawConfig); + const args = this._buildArgsAtLeastConfirmed( + [slot], + commitment as Finality, + 'jsonParsed', + config, + ); + const unsafeRes = await this._rpcRequest('getBlock', args); + const res = create(unsafeRes, GetParsedBlockRpcResult); + if ('error' in res) { + throw new SolanaJSONRPCError(res.error, 'failed to get block'); + } + return res.result; + } + /* * Returns the current block height of the node */ diff --git a/web3.js/test/connection.test.ts b/web3.js/test/connection.test.ts index a5f8724a2cd44b..fc5599dea8374d 100644 --- a/web3.js/test/connection.test.ts +++ b/web3.js/test/connection.test.ts @@ -4621,6 +4621,24 @@ describe('Connection', function () { } expect(foundTx).to.be.true; }); + + it('getParsedBlock', async () => { + const block = await connection.getParsedBlock(transactionSlot, { + maxSupportedTransactionVersion: 0, + commitment: 'confirmed', + }); + expect(block).to.not.be.null; + if (block === null) throw new Error(); // unreachable + + let foundTx = false; + for (const tx of block.transactions) { + if (tx.transaction.signatures[0] === signature) { + foundTx = true; + expect(tx.version).to.eq(0); + } + } + expect(foundTx).to.be.true; + }); }).timeout(5 * 1000); } }); From 391c15bb5b994922b5eeb8493b28ee652470cbb3 Mon Sep 17 00:00:00 2001 From: Kevin Heavey <24635973+kevinheavey@users.noreply.github.com> Date: Tue, 11 Oct 2022 17:39:02 +0300 Subject: [PATCH 52/65] remove unused deps from solana-stake-program (#27929) Co-authored-by: Kevin Heavey --- Cargo.lock | 8 -------- programs/bpf/Cargo.lock | 8 -------- programs/stake/Cargo.toml | 8 -------- 3 files changed, 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 43d921659fc3ee..4864e616ef42af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6327,22 +6327,14 @@ dependencies = [ "assert_matches", "bincode", "log", - "num-derive", - "num-traits", "proptest", "rustc_version 0.4.0", - "serde", - "serde_derive", "solana-config-program", - "solana-frozen-abi 1.15.0", - "solana-frozen-abi-macro 1.15.0", "solana-logger 1.15.0", - "solana-metrics", "solana-program-runtime", "solana-sdk 1.15.0", "solana-vote-program", "test-case", - "thiserror", ] [[package]] diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 0bad7e57f405c7..0ced4666c766bd 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -5637,19 +5637,11 @@ version = "1.15.0" dependencies = [ "bincode", "log", - "num-derive", - "num-traits", "rustc_version", - "serde", - "serde_derive", "solana-config-program", - "solana-frozen-abi 1.15.0", - "solana-frozen-abi-macro 1.15.0", - "solana-metrics", "solana-program-runtime", "solana-sdk 1.15.0", "solana-vote-program", - "thiserror", ] [[package]] diff --git a/programs/stake/Cargo.toml b/programs/stake/Cargo.toml index 03f57937525939..ea4eb188a053cd 100644 --- a/programs/stake/Cargo.toml +++ b/programs/stake/Cargo.toml @@ -12,18 +12,10 @@ edition = "2021" [dependencies] bincode = "1.3.3" log = "0.4.17" -num-derive = "0.3" -num-traits = "0.2" -serde = "1.0.144" -serde_derive = "1.0.103" solana-config-program = { path = "../config", version = "=1.15.0" } -solana-frozen-abi = { path = "../../frozen-abi", version = "=1.15.0" } -solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.15.0" } -solana-metrics = { path = "../../metrics", version = "=1.15.0" } solana-program-runtime = { path = "../../program-runtime", version = "=1.15.0" } solana-sdk = { path = "../../sdk", version = "=1.15.0" } solana-vote-program = { path = "../vote", version = "=1.15.0" } -thiserror = "1.0" [dev-dependencies] assert_matches = "1.5.0" From 7a120b8b62c5d93d49fd84b7ef207883f0b66944 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 11 Oct 2022 07:43:03 -0700 Subject: [PATCH 53/65] accounts_db::load returns None for zero lamport accounts (#28311) --- runtime/src/accounts.rs | 15 +------ runtime/src/accounts_db.rs | 87 +++++++++++++++++++++++++++++++++----- 2 files changed, 78 insertions(+), 24 deletions(-) diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 91aebe47ff0711..2b41ddf03fa447 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -635,26 +635,15 @@ impl Accounts { } } - fn filter_zero_lamport_account( - account: AccountSharedData, - slot: Slot, - ) -> Option<(AccountSharedData, Slot)> { - if account.lamports() > 0 { - Some((account, slot)) - } else { - None - } - } - /// Slow because lock is held for 1 operation instead of many + /// This always returns None for zero-lamport accounts. fn load_slow( &self, ancestors: &Ancestors, pubkey: &Pubkey, load_hint: LoadHint, ) -> Option<(AccountSharedData, Slot)> { - let (account, slot) = self.accounts_db.load(ancestors, pubkey, load_hint)?; - Self::filter_zero_lamport_account(account, slot) + self.accounts_db.load(ancestors, pubkey, load_hint) } pub fn load_with_fixed_root( diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 9f84829bf921a6..20953f74c0c568 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -145,6 +145,20 @@ pub enum StoreReclaims { Ignore, } +/// specifies how to return zero lamport accounts from a load +#[derive(Clone, Copy)] +enum LoadZeroLamports { + /// return None if loaded account has zero lamports + None, + /// return Some(account with zero lamports) if loaded account has zero lamports + /// This used to be the only behavior. + /// Note that this is non-deterministic if clean is running asynchronously. + /// If a zero lamport account exists in the index, then Some is returned. + /// Once it is cleaned from the index, None is returned. + #[cfg(test)] + SomeWithZeroLamportAccountForTests, +} + // the current best way to add filler accounts is gradually. // In other scenarios, such as monitoring catchup with large # of accounts, it may be useful to be able to // add filler accounts at the beginning, so that code path remains but won't execute at the moment. @@ -4829,11 +4843,19 @@ impl AccountsDb { pubkey: &Pubkey, load_hint: LoadHint, ) -> Option<(AccountSharedData, Slot)> { - self.do_load(ancestors, pubkey, None, load_hint) + self.do_load(ancestors, pubkey, None, load_hint, LoadZeroLamports::None) } pub fn load_account_into_read_cache(&self, ancestors: &Ancestors, pubkey: &Pubkey) { - self.do_load_with_populate_read_cache(ancestors, pubkey, None, LoadHint::Unspecified, true); + self.do_load_with_populate_read_cache( + ancestors, + pubkey, + None, + LoadHint::Unspecified, + true, + // no return from this function, so irrelevant + LoadZeroLamports::None, + ); } /// note this returns None for accounts with zero lamports @@ -4843,7 +4865,6 @@ impl AccountsDb { pubkey: &Pubkey, ) -> Option<(AccountSharedData, Slot)> { self.load(ancestors, pubkey, LoadHint::FixedMaxRoot) - .filter(|(account, _)| !account.is_zero_lamport()) } fn read_index_for_accessor_or_load_slow<'a>( @@ -5172,8 +5193,16 @@ impl AccountsDb { pubkey: &Pubkey, max_root: Option, load_hint: LoadHint, + load_zero_lamports: LoadZeroLamports, ) -> Option<(AccountSharedData, Slot)> { - self.do_load_with_populate_read_cache(ancestors, pubkey, max_root, load_hint, false) + self.do_load_with_populate_read_cache( + ancestors, + pubkey, + max_root, + load_hint, + false, + load_zero_lamports, + ) } /// if 'load_into_read_cache_only', then return value is meaningless. @@ -5185,6 +5214,7 @@ impl AccountsDb { max_root: Option, load_hint: LoadHint, load_into_read_cache_only: bool, + load_zero_lamports: LoadZeroLamports, ) -> Option<(AccountSharedData, Slot)> { #[cfg(not(test))] assert!(max_root.is_none()); @@ -5199,6 +5229,11 @@ impl AccountsDb { if !in_write_cache { let result = self.read_only_accounts_cache.load(*pubkey, slot); if let Some(account) = result { + if matches!(load_zero_lamports, LoadZeroLamports::None) + && account.is_zero_lamport() + { + return None; + } return Some((account, slot)); } } @@ -5226,6 +5261,9 @@ impl AccountsDb { let loaded_account = account_accessor.check_and_get_loaded_account(); let is_cached = loaded_account.is_cached(); let account = loaded_account.take_account(); + if matches!(load_zero_lamports, LoadZeroLamports::None) && account.is_zero_lamport() { + return None; + } if self.caching_enabled && !is_cached { /* @@ -9665,7 +9703,14 @@ pub mod tests { ancestors: &Ancestors, pubkey: &Pubkey, ) -> Option<(AccountSharedData, Slot)> { - self.load(ancestors, pubkey, LoadHint::Unspecified) + self.do_load( + ancestors, + pubkey, + None, + LoadHint::Unspecified, + // callers of this expect zero lamport accounts that exist in the index to be returned as Some(empty) + LoadZeroLamports::SomeWithZeroLamportAccountForTests, + ) } } @@ -14080,6 +14125,9 @@ pub mod tests { assert_eq!(db.read_only_accounts_cache.cache_len(), 1); } + /// a test that will accept either answer + const LOAD_ZERO_LAMPORTS_ANY_TESTS: LoadZeroLamports = LoadZeroLamports::None; + #[test] fn test_flush_cache_clean() { let caching_enabled = true; @@ -14109,6 +14157,7 @@ pub mod tests { &account_key, Some(0), LoadHint::Unspecified, + LoadZeroLamports::SomeWithZeroLamportAccountForTests, ) .unwrap(); assert_eq!(account.0.lamports(), 0); @@ -14124,7 +14173,8 @@ pub mod tests { &Ancestors::default(), &account_key, Some(0), - LoadHint::Unspecified + LoadHint::Unspecified, + LOAD_ZERO_LAMPORTS_ANY_TESTS ) .is_none()); } @@ -14208,7 +14258,8 @@ pub mod tests { &Ancestors::default(), &zero_lamport_account_key, max_root, - load_hint + load_hint, + LoadZeroLamports::SomeWithZeroLamportAccountForTests, ) .unwrap() .0 @@ -14337,6 +14388,7 @@ pub mod tests { &account_key, Some(0), LoadHint::Unspecified, + LoadZeroLamports::SomeWithZeroLamportAccountForTests, ) .unwrap(); assert_eq!(account.0.lamports(), zero_lamport_account.lamports()); @@ -14350,6 +14402,7 @@ pub mod tests { &account_key, Some(max_scan_root), LoadHint::Unspecified, + LOAD_ZERO_LAMPORTS_ANY_TESTS, ) .unwrap(); assert_eq!(account.0.lamports(), slot1_account.lamports()); @@ -14364,6 +14417,7 @@ pub mod tests { &account_key, Some(max_scan_root), LoadHint::Unspecified, + LOAD_ZERO_LAMPORTS_ANY_TESTS, ) .unwrap(); assert_eq!(account.0.lamports(), slot1_account.lamports()); @@ -14376,7 +14430,8 @@ pub mod tests { &scan_ancestors, &account_key, Some(max_scan_root), - LoadHint::Unspecified + LoadHint::Unspecified, + LOAD_ZERO_LAMPORTS_ANY_TESTS ) .is_none()); } @@ -14540,7 +14595,8 @@ pub mod tests { &Ancestors::default(), key, Some(last_dead_slot), - LoadHint::Unspecified + LoadHint::Unspecified, + LOAD_ZERO_LAMPORTS_ANY_TESTS ) .is_some()); } @@ -14568,7 +14624,8 @@ pub mod tests { &Ancestors::default(), key, Some(last_dead_slot), - LoadHint::Unspecified + LoadHint::Unspecified, + LOAD_ZERO_LAMPORTS_ANY_TESTS ) .is_none()); } @@ -15101,7 +15158,15 @@ pub mod tests { .store(thread_rng().gen_range(0, 10) as u64, Ordering::Relaxed); // Load should never be unable to find this key - let loaded_account = db.do_load(&ancestors, &pubkey, None, load_hint).unwrap(); + let loaded_account = db + .do_load( + &ancestors, + &pubkey, + None, + load_hint, + LOAD_ZERO_LAMPORTS_ANY_TESTS, + ) + .unwrap(); // slot + 1 == account.lamports because of the account-cache-flush thread assert_eq!( loaded_account.0.lamports(), From cdf77bb824252fedabc9deaac643a26b803d65be Mon Sep 17 00:00:00 2001 From: Ian Macalinao Date: Tue, 11 Oct 2022 09:47:10 -0500 Subject: [PATCH 54/65] runtime: fix typo 'transaction' (#27948) --- runtime/src/bank.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 760639aa59a1ad..0e4595c212aaad 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1522,7 +1522,7 @@ impl Bank { let (transaction_debug_keys, transaction_debug_keys_time) = measure!( parent.transaction_debug_keys.clone(), - "transation_debug_keys_creation", + "transaction_debug_keys_creation", ); let (transaction_log_collector_config, transaction_log_collector_config_time) = measure!( From e2fc9d51ded89f791e4331e626d20e5bdb6c9382 Mon Sep 17 00:00:00 2001 From: Jason Davis Date: Wed, 5 Oct 2022 11:48:03 -0500 Subject: [PATCH 55/65] Increase cpu metric reporting interval from 1s to 10s --- core/src/system_monitor_service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/system_monitor_service.rs b/core/src/system_monitor_service.rs index 57f11f7dff5196..a562784407a525 100644 --- a/core/src/system_monitor_service.rs +++ b/core/src/system_monitor_service.rs @@ -27,7 +27,7 @@ const MS_PER_H: u64 = MS_PER_M * 60; const SAMPLE_INTERVAL_UDP_MS: u64 = 2 * MS_PER_S; const SAMPLE_INTERVAL_OS_NETWORK_LIMITS_MS: u64 = MS_PER_H; const SAMPLE_INTERVAL_MEM_MS: u64 = MS_PER_S; -const SAMPLE_INTERVAL_CPU_MS: u64 = MS_PER_S; +const SAMPLE_INTERVAL_CPU_MS: u64 = 10 * MS_PER_S; const SAMPLE_INTERVAL_DISK_MS: u64 = MS_PER_S; const SLEEP_INTERVAL: Duration = Duration::from_millis(500); From 020cc090d55a2a505eaa1a1ba0e07a8c726b0de8 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Tue, 11 Oct 2022 13:17:06 -0400 Subject: [PATCH 56/65] Refactors ABS snapshot request handling (#28326) --- runtime/src/accounts_background_service.rs | 337 ++++++++++++--------- 1 file changed, 189 insertions(+), 148 deletions(-) diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 0bac89b5b4a8ed..c0a5a60aa26ed5 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -150,163 +150,204 @@ impl SnapshotRequestHandler { non_snapshot_time_us: u128, last_full_snapshot_slot: &mut Option, ) -> Option> { - self.snapshot_request_receiver.try_iter() + self.snapshot_request_receiver + .try_iter() .map(|request| { - let accounts_package_type = new_accounts_package_type(&request, &self.snapshot_config, *last_full_snapshot_slot); + let accounts_package_type = new_accounts_package_type( + &request, + &self.snapshot_config, + *last_full_snapshot_slot, + ); (request, accounts_package_type) }) - .inspect(|(request, package_type)| trace!("outstanding snapshot request: {:?}, {:?}", request, package_type)) + .inspect(|(request, package_type)| { + trace!( + "outstanding snapshot request: {:?}, {:?}", + request, + package_type + ) + }) .max_by(cmp_snapshot_requests) .map(|(snapshot_request, accounts_package_type)| { - trace!("handling snapshot request: {:?}, {:?}", snapshot_request, accounts_package_type); - let mut total_time = Measure::start("snapshot_request_receiver_total_time"); - let SnapshotRequest { - snapshot_root_bank, - status_cache_slot_deltas, - request_type, - } = snapshot_request; - - // we should not rely on the state of this validator until startup verification is complete (unless handling an EAH request) - assert!(snapshot_root_bank.is_startup_verification_complete() || request_type == SnapshotRequestType::EpochAccountsHash); - - if accounts_package_type == AccountsPackageType::Snapshot(SnapshotType::FullSnapshot) { - *last_full_snapshot_slot = Some(snapshot_root_bank.slot()); - } - - let previous_hash = if test_hash_calculation { - // We have to use the index version here. - // We cannot calculate the non-index way because cache has not been flushed and stores don't match reality. This comment is out of date and can be re-evaluated. - snapshot_root_bank.update_accounts_hash_with_index_option(true, false, false) - } else { - Hash::default() - }; - - let mut shrink_time = Measure::start("shrink_time"); - if !accounts_db_caching_enabled { - snapshot_root_bank - .process_stale_slot_with_budget(0, SHRUNKEN_ACCOUNT_PER_INTERVAL); - } - shrink_time.stop(); - - let mut flush_accounts_cache_time = Measure::start("flush_accounts_cache_time"); - if accounts_db_caching_enabled { - // Forced cache flushing MUST flush all roots <= snapshot_root_bank.slot(). - // That's because `snapshot_root_bank.slot()` must be root at this point, - // and contains relevant updates because each bank has at least 1 account update due - // to sysvar maintenance. Otherwise, this would cause missing storages in the snapshot - snapshot_root_bank.force_flush_accounts_cache(); - // Ensure all roots <= `self.slot()` have been flushed. - // Note `max_flush_root` could be larger than self.slot() if there are - // `> MAX_CACHE_SLOT` cached and rooted slots which triggered earlier flushes. - assert!( - snapshot_root_bank.slot() - <= snapshot_root_bank - .rc - .accounts - .accounts_db - .accounts_cache - .fetch_max_flush_root() - ); - } - flush_accounts_cache_time.stop(); - - let hash_for_testing = if test_hash_calculation { - let use_index_hash_calculation = false; - let check_hash = false; - - let (this_hash, capitalization) = snapshot_root_bank.accounts().accounts_db.calculate_accounts_hash_helper( - use_index_hash_calculation, - snapshot_root_bank.slot(), - &CalcAccountsHashConfig { - use_bg_thread_pool: true, - check_hash, - ancestors: None, - epoch_schedule: snapshot_root_bank.epoch_schedule(), - rent_collector: snapshot_root_bank.rent_collector(), - store_detailed_debug_info_on_failure: false, - full_snapshot: None, - enable_rehashing: snapshot_root_bank.bank_enable_rehashing_on_accounts_hash(), - }, - ).unwrap(); - assert_eq!(previous_hash, this_hash); - assert_eq!(capitalization, snapshot_root_bank.capitalization()); - Some(this_hash) - } else { - None - }; - - let mut clean_time = Measure::start("clean_time"); - snapshot_root_bank.clean_accounts(*last_full_snapshot_slot); - clean_time.stop(); - - if accounts_db_caching_enabled { - shrink_time = Measure::start("shrink_time"); - snapshot_root_bank.shrink_candidate_slots(); - shrink_time.stop(); - } - - // Snapshot the bank and send over an accounts package - let mut snapshot_time = Measure::start("snapshot_time"); - let result = snapshot_utils::snapshot_bank( - &snapshot_root_bank, - status_cache_slot_deltas, - &self.pending_accounts_package, - &self.snapshot_config.bank_snapshots_dir, - &self.snapshot_config.full_snapshot_archives_dir, - &self.snapshot_config.incremental_snapshot_archives_dir, - self.snapshot_config.snapshot_version, - self.snapshot_config.archive_format, - hash_for_testing, + self.handle_snapshot_request( + accounts_db_caching_enabled, + test_hash_calculation, + non_snapshot_time_us, + last_full_snapshot_slot, + snapshot_request, accounts_package_type, - ); - if let Err(e) = result { - warn!( - "Error taking bank snapshot. slot: {}, accounts package type: {:?}, err: {:?}", - snapshot_root_bank.slot(), - accounts_package_type, - e, - ); - - if Self::is_snapshot_error_fatal(&e) { - return Err(e); - } - } - snapshot_time.stop(); - info!("Took bank snapshot. accounts package type: {:?}, slot: {}, accounts hash: {}, bank hash: {}", - accounts_package_type, - snapshot_root_bank.slot(), - snapshot_root_bank.get_accounts_hash(), - snapshot_root_bank.hash(), - ); - - // Cleanup outdated snapshots - let mut purge_old_snapshots_time = Measure::start("purge_old_snapshots_time"); - snapshot_utils::purge_old_bank_snapshots(&self.snapshot_config.bank_snapshots_dir); - purge_old_snapshots_time.stop(); - total_time.stop(); - - datapoint_info!( - "handle_snapshot_requests-timing", - ( - "flush_accounts_cache_time", - flush_accounts_cache_time.as_us(), - i64 - ), - ("shrink_time", shrink_time.as_us(), i64), - ("clean_time", clean_time.as_us(), i64), - ("snapshot_time", snapshot_time.as_us(), i64), - ( - "purge_old_snapshots_time", - purge_old_snapshots_time.as_us(), - i64 - ), - ("total_us", total_time.as_us(), i64), - ("non_snapshot_time_us", non_snapshot_time_us, i64), - ); - Ok(snapshot_root_bank.block_height()) + ) }) } + fn handle_snapshot_request( + &self, + accounts_db_caching_enabled: bool, + test_hash_calculation: bool, + non_snapshot_time_us: u128, + last_full_snapshot_slot: &mut Option, + snapshot_request: SnapshotRequest, + accounts_package_type: AccountsPackageType, + ) -> Result { + trace!( + "handling snapshot request: {:?}, {:?}", + snapshot_request, + accounts_package_type + ); + let mut total_time = Measure::start("snapshot_request_receiver_total_time"); + let SnapshotRequest { + snapshot_root_bank, + status_cache_slot_deltas, + request_type, + } = snapshot_request; + + // we should not rely on the state of this validator until startup verification is complete (unless handling an EAH request) + assert!( + snapshot_root_bank.is_startup_verification_complete() + || request_type == SnapshotRequestType::EpochAccountsHash + ); + + if accounts_package_type == AccountsPackageType::Snapshot(SnapshotType::FullSnapshot) { + *last_full_snapshot_slot = Some(snapshot_root_bank.slot()); + } + + let previous_hash = if test_hash_calculation { + // We have to use the index version here. + // We cannot calculate the non-index way because cache has not been flushed and stores don't match reality. This comment is out of date and can be re-evaluated. + snapshot_root_bank.update_accounts_hash_with_index_option(true, false, false) + } else { + Hash::default() + }; + + let mut shrink_time = Measure::start("shrink_time"); + if !accounts_db_caching_enabled { + snapshot_root_bank.process_stale_slot_with_budget(0, SHRUNKEN_ACCOUNT_PER_INTERVAL); + } + shrink_time.stop(); + + let mut flush_accounts_cache_time = Measure::start("flush_accounts_cache_time"); + if accounts_db_caching_enabled { + // Forced cache flushing MUST flush all roots <= snapshot_root_bank.slot(). + // That's because `snapshot_root_bank.slot()` must be root at this point, + // and contains relevant updates because each bank has at least 1 account update due + // to sysvar maintenance. Otherwise, this would cause missing storages in the snapshot + snapshot_root_bank.force_flush_accounts_cache(); + // Ensure all roots <= `self.slot()` have been flushed. + // Note `max_flush_root` could be larger than self.slot() if there are + // `> MAX_CACHE_SLOT` cached and rooted slots which triggered earlier flushes. + assert!( + snapshot_root_bank.slot() + <= snapshot_root_bank + .rc + .accounts + .accounts_db + .accounts_cache + .fetch_max_flush_root() + ); + } + flush_accounts_cache_time.stop(); + + let hash_for_testing = if test_hash_calculation { + let use_index_hash_calculation = false; + let check_hash = false; + + let (this_hash, capitalization) = snapshot_root_bank + .accounts() + .accounts_db + .calculate_accounts_hash_helper( + use_index_hash_calculation, + snapshot_root_bank.slot(), + &CalcAccountsHashConfig { + use_bg_thread_pool: true, + check_hash, + ancestors: None, + epoch_schedule: snapshot_root_bank.epoch_schedule(), + rent_collector: snapshot_root_bank.rent_collector(), + store_detailed_debug_info_on_failure: false, + full_snapshot: None, + enable_rehashing: snapshot_root_bank + .bank_enable_rehashing_on_accounts_hash(), + }, + ) + .unwrap(); + assert_eq!(previous_hash, this_hash); + assert_eq!(capitalization, snapshot_root_bank.capitalization()); + Some(this_hash) + } else { + None + }; + + let mut clean_time = Measure::start("clean_time"); + snapshot_root_bank.clean_accounts(*last_full_snapshot_slot); + clean_time.stop(); + + if accounts_db_caching_enabled { + shrink_time = Measure::start("shrink_time"); + snapshot_root_bank.shrink_candidate_slots(); + shrink_time.stop(); + } + + // Snapshot the bank and send over an accounts package + let mut snapshot_time = Measure::start("snapshot_time"); + let result = snapshot_utils::snapshot_bank( + &snapshot_root_bank, + status_cache_slot_deltas, + &self.pending_accounts_package, + &self.snapshot_config.bank_snapshots_dir, + &self.snapshot_config.full_snapshot_archives_dir, + &self.snapshot_config.incremental_snapshot_archives_dir, + self.snapshot_config.snapshot_version, + self.snapshot_config.archive_format, + hash_for_testing, + accounts_package_type, + ); + if let Err(e) = result { + warn!( + "Error taking bank snapshot. slot: {}, accounts package type: {:?}, err: {:?}", + snapshot_root_bank.slot(), + accounts_package_type, + e, + ); + + if Self::is_snapshot_error_fatal(&e) { + return Err(e); + } + } + snapshot_time.stop(); + info!("Took bank snapshot. accounts package type: {:?}, slot: {}, accounts hash: {}, bank hash: {}", + accounts_package_type, + snapshot_root_bank.slot(), + snapshot_root_bank.get_accounts_hash(), + snapshot_root_bank.hash(), + ); + + // Cleanup outdated snapshots + let mut purge_old_snapshots_time = Measure::start("purge_old_snapshots_time"); + snapshot_utils::purge_old_bank_snapshots(&self.snapshot_config.bank_snapshots_dir); + purge_old_snapshots_time.stop(); + total_time.stop(); + + datapoint_info!( + "handle_snapshot_requests-timing", + ( + "flush_accounts_cache_time", + flush_accounts_cache_time.as_us(), + i64 + ), + ("shrink_time", shrink_time.as_us(), i64), + ("clean_time", clean_time.as_us(), i64), + ("snapshot_time", snapshot_time.as_us(), i64), + ( + "purge_old_snapshots_time", + purge_old_snapshots_time.as_us(), + i64 + ), + ("total_us", total_time.as_us(), i64), + ("non_snapshot_time_us", non_snapshot_time_us, i64), + ); + Ok(snapshot_root_bank.block_height()) + } + /// Check if a SnapshotError should be treated as 'fatal' by SnapshotRequestHandler, and /// `handle_snapshot_requests()` in particular. Fatal errors will cause the node to shutdown. /// Non-fatal errors are logged and then swallowed. From db9e32d71d9c605edf8da07dab837876ac1312ee Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 11 Oct 2022 12:49:49 -0500 Subject: [PATCH 57/65] Revert "log adding root every 10s (#28280)" (#28334) This reverts commit 16853acf354ea855fbf1aa632304ff58689fba58. We get similar coverage from the slot field of bank-forks_set_root; additionally, we can see banks with bank-new_from_parent-heights. --- runtime/src/accounts_db.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 20953f74c0c568..d7520f985a2d1f 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -1207,8 +1207,6 @@ pub struct AccountsDb { /// debug feature to scan every append vec and verify refcounts are equal exhaustively_verify_refcounts: bool, - last_add_root_log: AtomicInterval, - /// the full accounts hash calculation as of a predetermined block height 'N' /// to be included in the bank hash at a predetermined block height 'M' /// The cadence is once per epoch, all nodes calculate a full accounts hash as of a known slot calculated using 'N' @@ -2191,7 +2189,6 @@ impl AccountsDb { num_hash_scan_passes, log_dead_slots: AtomicBool::new(true), exhaustively_verify_refcounts: false, - last_add_root_log: AtomicInterval::default(), epoch_accounts_hash_manager: EpochAccountsHashManager::new_invalid(), } } @@ -8550,10 +8547,6 @@ impl AccountsDb { } store_time.stop(); - if self.last_add_root_log.should_update(10_000) { - datapoint_info!("add_root", ("root", slot, i64)); - } - AccountsAddRootTiming { index_us: index_time.as_us(), cache_us: cache_time.as_us(), From 0fa53df1dc26e21649d257c8c8a504627b896d7f Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Tue, 14 Jun 2022 17:06:13 +0530 Subject: [PATCH 58/65] Split out voting and banking threads in banking stage Additionally this allows us to aggressively prune the buffer for voting threads as with the new vote state only the latest vote from each validator is necessary. --- core/benches/banking_stage.rs | 7 +- core/src/banking_stage.rs | 763 +++++++++++------- core/src/latest_unprocessed_votes.rs | 29 +- core/src/leader_slot_banking_stage_metrics.rs | 77 +- core/src/unprocessed_transaction_storage.rs | 79 +- 5 files changed, 600 insertions(+), 355 deletions(-) diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index ae0bd6b07b1b80..e46138fe121f26 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -13,6 +13,7 @@ use { leader_slot_banking_stage_metrics::LeaderSlotMetricsTracker, qos_service::QosService, unprocessed_packet_batches::*, + unprocessed_transaction_storage::{ThreadType, UnprocessedTransactionStorage}, }, solana_entry::entry::{next_hash, Entry}, solana_gossip::cluster_info::{ClusterInfo, Node}, @@ -83,8 +84,10 @@ fn bench_consume_buffered(bencher: &mut Bencher) { let transactions = vec![tx; 4194304]; let batches = transactions_to_deserialized_packets(&transactions).unwrap(); let batches_len = batches.len(); - let mut transaction_buffer = - UnprocessedPacketBatches::from_iter(batches.into_iter(), 2 * batches_len); + let mut transaction_buffer = UnprocessedTransactionStorage::new_transaction_storage( + UnprocessedPacketBatches::from_iter(batches.into_iter(), 2 * batches_len), + ThreadType::Transactions, + ); let (s, _r) = unbounded(); // This tests the performance of buffering packets. // If the packet buffers are copied, performance will be poor. diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 70105973de206e..f43accee731477 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -3,9 +3,11 @@ //! can do its processing in parallel with signature verification on the GPU. use { + min_max_heap::MinMaxHeap, crate::{ forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts, immutable_deserialized_packet::ImmutableDeserializedPacket, + latest_unprocessed_votes::{LatestUnprocessedVotes, VoteSource}, leader_slot_banking_stage_metrics::{LeaderSlotMetricsTracker, ProcessTransactionsSummary}, leader_slot_banking_stage_timing_metrics::{ LeaderExecuteAndCommitTimings, RecordTransactionsTimings, @@ -15,6 +17,7 @@ use { sigverify::SigverifyTracerPacketStats, tracer_packet_stats::TracerPacketStats, unprocessed_packet_batches::{self, *}, + unprocessed_transaction_storage::{self, ThreadType, UnprocessedTransactionStorage}, }, core::iter::repeat, crossbeam_channel::{ @@ -22,7 +25,6 @@ use { }, histogram::Histogram, itertools::Itertools, - min_max_heap::MinMaxHeap, solana_entry::entry::hash_transactions, solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, solana_ledger::{ @@ -54,6 +56,7 @@ use { Slot, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY, MAX_TRANSACTION_FORWARDING_DELAY_GPU, }, + feature_set::allow_votes_to_directly_update_vote_state, pubkey::Pubkey, saturating_add_assign, timing::{duration_as_ms, timestamp, AtomicInterval}, @@ -442,21 +445,57 @@ impl BankingStage { let data_budget = Arc::new(DataBudget::default()); let batch_limit = TOTAL_BUFFERED_PACKETS / ((num_threads - NUM_VOTE_PROCESSING_THREADS) as usize); + // Keeps track of extraneous vote transactions for the vote threads + let latest_unprocessed_votes = Arc::new(LatestUnprocessedVotes::new()); + let should_split_voting_threads = bank_forks + .read() + .map(|bank_forks| { + let bank = bank_forks.root_bank(); + bank.feature_set + .is_active(&allow_votes_to_directly_update_vote_state::id()) + }) + .unwrap_or(false); // Many banks that process transactions in parallel. let bank_thread_hdls: Vec> = (0..num_threads) .map(|i| { - let (verified_receiver, forward_option) = match i { - 0 => { - // Disable forwarding of vote transactions - // from gossip. Note - votes can also arrive from tpu - (verified_vote_receiver.clone(), ForwardOption::NotForward) - } - 1 => ( - tpu_verified_vote_receiver.clone(), - ForwardOption::ForwardTpuVote, - ), - _ => (verified_receiver.clone(), ForwardOption::ForwardTransaction), - }; + let (verified_receiver, unprocessed_transaction_storage) = + match (i, should_split_voting_threads) { + (0, false) => ( + verified_vote_receiver.clone(), + UnprocessedTransactionStorage::new_transaction_storage( + UnprocessedPacketBatches::with_capacity(batch_limit), + ThreadType::Voting(VoteSource::Gossip), + ), + ), + (0, true) => ( + verified_vote_receiver.clone(), + UnprocessedTransactionStorage::new_vote_storage( + latest_unprocessed_votes.clone(), + VoteSource::Gossip, + ), + ), + (1, false) => ( + tpu_verified_vote_receiver.clone(), + UnprocessedTransactionStorage::new_transaction_storage( + UnprocessedPacketBatches::with_capacity(batch_limit), + ThreadType::Voting(VoteSource::Tpu), + ), + ), + (1, true) => ( + tpu_verified_vote_receiver.clone(), + UnprocessedTransactionStorage::new_vote_storage( + latest_unprocessed_votes.clone(), + VoteSource::Tpu, + ), + ), + _ => ( + verified_receiver.clone(), + UnprocessedTransactionStorage::new_transaction_storage( + UnprocessedPacketBatches::with_capacity(batch_limit), + ThreadType::Transactions, + ), + ), + }; let mut packet_deserializer = PacketDeserializer::new(verified_receiver); let poh_recorder = poh_recorder.clone(); @@ -476,9 +515,7 @@ impl BankingStage { &poh_recorder, &cluster_info, &mut recv_start, - forward_option, i, - batch_limit, transaction_status_sender, gossip_vote_sender, &data_budget, @@ -486,6 +523,7 @@ impl BankingStage { log_messages_bytes_limit, connection_cache, &bank_forks, + unprocessed_transaction_storage, ); }) .unwrap() @@ -589,12 +627,111 @@ impl BankingStage { (Ok(()), packet_vec_len, Some(leader_pubkey)) } + #[allow(clippy::too_many_arguments)] + fn processing_function( + max_tx_ingestion_ns: u128, + poh_recorder: &Arc>, + slot_metrics_tracker: &mut LeaderSlotMetricsTracker, + recorder: &TransactionRecorder, + transaction_status_sender: &Option, + gossip_vote_sender: &ReplayVoteSender, + banking_stage_stats: &BankingStageStats, + qos_service: &QosService, + log_messages_bytes_limit: Option, + consumed_buffered_packets_count: &mut usize, + rebuffered_packet_count: &mut usize, + reached_end_of_slot: &mut bool, + test_fn: &Option, + packets_to_process: &Vec>, + ) -> Option> { + // TODO: Right now we iterate through buffer and try the highest weighted transaction once + // but we should retry the highest weighted transactions more often. + let (bank_start, poh_recorder_lock_time) = measure!( + poh_recorder.read().unwrap().bank_start(), + "poh_recorder.read", + ); + slot_metrics_tracker.increment_consume_buffered_packets_poh_recorder_lock_us( + poh_recorder_lock_time.as_us(), + ); + + let packets_to_process_len = packets_to_process.len(); + if let Some(BankStart { + working_bank, + bank_creation_time, + }) = bank_start + { + let (process_transactions_summary, process_packets_transactions_time) = measure!( + Self::process_packets_transactions( + &working_bank, + &bank_creation_time, + recorder, + packets_to_process.iter().map(|p| &**p), + transaction_status_sender, + gossip_vote_sender, + banking_stage_stats, + qos_service, + slot_metrics_tracker, + log_messages_bytes_limit + ), + "process_packets_transactions", + ); + slot_metrics_tracker.increment_process_packets_transactions_us( + process_packets_transactions_time.as_us(), + ); + + let ProcessTransactionsSummary { + reached_max_poh_height, + retryable_transaction_indexes, + .. + } = process_transactions_summary; + + if reached_max_poh_height + || !Bank::should_bank_still_be_processing_txs( + &bank_creation_time, + max_tx_ingestion_ns, + ) + { + *reached_end_of_slot = true; + } + + // The difference between all transactions passed to execution and the ones that + // are retryable were the ones that were either: + // 1) Committed into the block + // 2) Dropped without being committed because they had some fatal error (too old, + // duplicate signature, etc.) + // + // Note: This assumes that every packet deserializes into one transaction! + *consumed_buffered_packets_count += + packets_to_process_len.saturating_sub(retryable_transaction_indexes.len()); + + // Out of the buffered packets just retried, collect any still unprocessed + // transactions in this batch for forwarding + *rebuffered_packet_count += retryable_transaction_indexes.len(); + if let Some(test_fn) = test_fn { + test_fn(); + } + + slot_metrics_tracker + .increment_retryable_packets_count(retryable_transaction_indexes.len() as u64); + + Some(retryable_transaction_indexes) + } else if *reached_end_of_slot { + None + } else { + // mark as end-of-slot to avoid aggressively lock poh for the remaining for + // packet batches in buffer + *reached_end_of_slot = true; + + None + } + } + #[allow(clippy::too_many_arguments)] pub fn consume_buffered_packets( _my_pubkey: &Pubkey, max_tx_ingestion_ns: u128, poh_recorder: &Arc>, - buffered_packet_batches: &mut UnprocessedPacketBatches, + unprocessed_transaction_storage: &mut UnprocessedTransactionStorage, transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, test_fn: Option, @@ -607,122 +744,38 @@ impl BankingStage { ) { let mut rebuffered_packet_count = 0; let mut consumed_buffered_packets_count = 0; - let buffered_packets_len = buffered_packet_batches.len(); let mut proc_start = Measure::start("consume_buffered_process"); let mut reached_end_of_slot = false; - let mut retryable_packets = { - let capacity = buffered_packet_batches.capacity(); - std::mem::replace( - &mut buffered_packet_batches.packet_priority_queue, - MinMaxHeap::with_capacity(capacity), - ) - }; - let retryable_packets: MinMaxHeap> = retryable_packets - .drain_desc() - .chunks(num_packets_to_process_per_iteration) - .into_iter() - .flat_map(|packets_to_process| { - let packets_to_process = packets_to_process.into_iter().collect_vec(); - // TODO: Right now we iterate through buffer and try the highest weighted transaction once - // but we should retry the highest weighted transactions more often. - let (bank_start, poh_recorder_lock_time) = measure!( - poh_recorder.read().unwrap().bank_start(), - "poh_recorder.read", - ); - slot_metrics_tracker.increment_consume_buffered_packets_poh_recorder_lock_us( - poh_recorder_lock_time.as_us(), - ); + let num_packets_to_process = unprocessed_transaction_storage.len(); + let bank = poh_recorder.read().unwrap().bank(); - let packets_to_process_len = packets_to_process.len(); - if let Some(BankStart { - working_bank, - bank_creation_time, - }) = bank_start - { - let (process_transactions_summary, process_packets_transactions_time) = - measure!( - Self::process_packets_transactions( - &working_bank, - &bank_creation_time, - recorder, - packets_to_process.iter().map(|p| &**p), - transaction_status_sender.clone(), - gossip_vote_sender, - banking_stage_stats, - qos_service, - slot_metrics_tracker, - log_messages_bytes_limit - ), - "process_packets_transactions", - ); - slot_metrics_tracker.increment_process_packets_transactions_us( - process_packets_transactions_time.as_us(), - ); - - let ProcessTransactionsSummary { - reached_max_poh_height, - retryable_transaction_indexes, - .. - } = process_transactions_summary; - - if reached_max_poh_height - || !Bank::should_bank_still_be_processing_txs( - &bank_creation_time, - max_tx_ingestion_ns, - ) - { - reached_end_of_slot = true; - } - - // The difference between all transactions passed to execution and the ones that - // are retryable were the ones that were either: - // 1) Committed into the block - // 2) Dropped without being committed because they had some fatal error (too old, - // duplicate signature, etc.) - // - // Note: This assumes that every packet deserializes into one transaction! - consumed_buffered_packets_count += - packets_to_process_len.saturating_sub(retryable_transaction_indexes.len()); - - // Out of the buffered packets just retried, collect any still unprocessed - // transactions in this batch for forwarding - rebuffered_packet_count += retryable_transaction_indexes.len(); - if let Some(test_fn) = &test_fn { - test_fn(); - } - - slot_metrics_tracker.increment_retryable_packets_count( - retryable_transaction_indexes.len() as u64, - ); - - let result = retryable_transaction_indexes - .iter() - .map(|i| packets_to_process[*i].clone()) - .collect_vec(); - - // Remove the non-retryable packets, packets that were either: - // 1) Successfully processed - // 2) Failed but not retryable - Self::remove_non_retained_packets(buffered_packet_batches, &packets_to_process, &retryable_transaction_indexes); - - result - } else if reached_end_of_slot { - packets_to_process - } else { - // mark as end-of-slot to avoid aggressively lock poh for the remaining for - // packet batches in buffer - reached_end_of_slot = true; - - packets_to_process - } - }) - .collect(); - - buffered_packet_batches.packet_priority_queue = retryable_packets; + unprocessed_transaction_storage.process_packets( + bank, + num_packets_to_process_per_iteration, + |packets_to_process| { + Self::processing_function( + max_tx_ingestion_ns, + poh_recorder, + slot_metrics_tracker, + recorder, + &transaction_status_sender, + gossip_vote_sender, + banking_stage_stats, + qos_service, + log_messages_bytes_limit, + &mut consumed_buffered_packets_count, + &mut rebuffered_packet_count, + &mut reached_end_of_slot, + &test_fn, + packets_to_process, + ) + }, + ); if reached_end_of_slot { - slot_metrics_tracker - .set_end_of_slot_unprocessed_buffer_len(buffered_packet_batches.len() as u64); + slot_metrics_tracker.set_end_of_slot_unprocessed_buffer_len( + unprocessed_transaction_storage.len() as u64, + ); // We've hit the end of this slot, no need to perform more processing, // Packet filtering will be done before forwarding. @@ -732,17 +785,12 @@ impl BankingStage { debug!( "@{:?} done processing buffered batches: {} time: {:?}ms tx count: {} tx/s: {}", timestamp(), - buffered_packets_len, + num_packets_to_process, proc_start.as_ms(), consumed_buffered_packets_count, (consumed_buffered_packets_count as f32) / (proc_start.as_s()) ); - // Assert unprocessed queue is still consistent - assert_eq!( - buffered_packet_batches.packet_priority_queue.len(), - buffered_packet_batches.message_hash_to_transaction.len() - ); banking_stage_stats .consume_buffered_packets_elapsed .fetch_add(proc_start.as_us(), Ordering::Relaxed); @@ -793,8 +841,7 @@ impl BankingStage { socket: &UdpSocket, poh_recorder: &Arc>, cluster_info: &ClusterInfo, - buffered_packet_batches: &mut UnprocessedPacketBatches, - forward_option: &ForwardOption, + unprocessed_transaction_storage: &mut UnprocessedTransactionStorage, transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, banking_stage_stats: &BankingStageStats, @@ -807,6 +854,9 @@ impl BankingStage { tracer_packet_stats: &mut TracerPacketStats, bank_forks: &Arc>, ) { + if unprocessed_transaction_storage.should_not_process() { + return; + } let ((metrics_action, decision), make_decision_time) = measure!( { let bank_start; @@ -856,7 +906,7 @@ impl BankingStage { my_pubkey, max_tx_ingestion_ns, poh_recorder, - buffered_packet_batches, + unprocessed_transaction_storage, transaction_status_sender, gossip_vote_sender, None::>, @@ -875,9 +925,8 @@ impl BankingStage { BufferedPacketsDecision::Forward => { let (_, forward_time) = measure!( Self::handle_forwarding( - forward_option, cluster_info, - buffered_packet_batches, + unprocessed_transaction_storage, poh_recorder, socket, false, @@ -898,9 +947,8 @@ impl BankingStage { BufferedPacketsDecision::ForwardAndHold => { let (_, forward_and_hold_time) = measure!( Self::handle_forwarding( - forward_option, cluster_info, - buffered_packet_batches, + unprocessed_transaction_storage, poh_recorder, socket, true, @@ -923,9 +971,8 @@ impl BankingStage { #[allow(clippy::too_many_arguments)] fn handle_forwarding( - forward_option: &ForwardOption, cluster_info: &ClusterInfo, - buffered_packet_batches: &mut UnprocessedPacketBatches, + unprocessed_transaction_storage: &mut UnprocessedTransactionStorage, poh_recorder: &Arc>, socket: &UdpSocket, hold: bool, @@ -936,12 +983,7 @@ impl BankingStage { tracer_packet_stats: &mut TracerPacketStats, bank_forks: &Arc>, ) { - if let ForwardOption::NotForward = forward_option { - if !hold { - buffered_packet_batches.clear(); - } - return; - } + let forward_option = unprocessed_transaction_storage.forward_option(); // get current root bank from bank_forks, use it to sanitize transaction and // load all accounts from address loader; @@ -952,12 +994,11 @@ impl BankingStage { // sanitize and filter packets that are no longer valid (could be too old, a duplicate of something // already processed), then add to forwarding buffer. - let filter_forwarding_result = Self::filter_and_forward_with_account_limits( - ¤t_bank, - buffered_packet_batches, - &mut forward_packet_batches_by_accounts, - UNPROCESSED_BUFFER_STEP_SIZE, - ); + let filter_forwarding_result = unprocessed_transaction_storage + .filter_forwardable_packets_and_add_batches( + current_bank, + &mut forward_packet_batches_by_accounts, + ); slot_metrics_tracker.increment_transactions_from_packets_us( filter_forwarding_result.total_packet_conversion_us, ); @@ -982,7 +1023,7 @@ impl BankingStage { let (_forward_result, sucessful_forwarded_packets_count, leader_pubkey) = Self::forward_buffered_packets( connection_cache, - forward_option, + &forward_option, cluster_info, poh_recorder, socket, @@ -1021,7 +1062,7 @@ impl BankingStage { tracer_packet_stats.increment_total_cleared_from_buffer_after_forward( filter_forwarding_result.total_tracer_packets_in_buffer, ); - buffered_packet_batches.clear(); + unprocessed_transaction_storage.clear_forwarded_packets(); } } @@ -1259,12 +1300,12 @@ impl BankingStage { /// remove packets from UnprocessedPacketBatches.message_hash_to_transaction after they have /// been removed from UnprocessedPacketBatches.packet_priority_queue - fn remove_non_retained_packets( + pub(crate) fn remove_non_retained_packets( buffered_packet_batches: &mut UnprocessedPacketBatches, packets_to_process: &[Arc], retained_packet_indexes: &[usize], ) { - Self::filter_processed_packets( + unprocessed_transaction_storage::filter_processed_packets( retained_packet_indexes .iter() .chain(std::iter::once(&packets_to_process.len())), @@ -1319,9 +1360,7 @@ impl BankingStage { poh_recorder: &Arc>, cluster_info: &ClusterInfo, recv_start: &mut Instant, - forward_option: ForwardOption, id: u32, - batch_limit: usize, transaction_status_sender: Option, gossip_vote_sender: ReplayVoteSender, data_budget: &DataBudget, @@ -1329,10 +1368,10 @@ impl BankingStage { log_messages_bytes_limit: Option, connection_cache: Arc, bank_forks: &Arc>, + mut unprocessed_transaction_storage: UnprocessedTransactionStorage, ) { let recorder = poh_recorder.read().unwrap().recorder(); let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); - let mut buffered_packet_batches = UnprocessedPacketBatches::with_capacity(batch_limit); let mut banking_stage_stats = BankingStageStats::new(id); let mut tracer_packet_stats = TracerPacketStats::new(id); let qos_service = QosService::new(cost_model, id); @@ -1342,7 +1381,7 @@ impl BankingStage { loop { let my_pubkey = cluster_info.id(); - if !buffered_packet_batches.is_empty() + if !unprocessed_transaction_storage.is_empty() || last_metrics_update.elapsed() >= SLOT_BOUNDARY_CHECK_PERIOD { let (_, process_buffered_packets_time) = measure!( @@ -1351,8 +1390,7 @@ impl BankingStage { &socket, poh_recorder, cluster_info, - &mut buffered_packet_batches, - &forward_option, + &mut unprocessed_transaction_storage, transaction_status_sender.clone(), &gossip_vote_sender, &banking_stage_stats, @@ -1374,7 +1412,8 @@ impl BankingStage { tracer_packet_stats.report(1000); - let recv_timeout = if !buffered_packet_batches.is_empty() { + // Gossip thread will almost always not wait because the transaction storage will most likely not be empty + let recv_timeout = if !unprocessed_transaction_storage.is_empty() { // If there are buffered packets, run the equivalent of try_recv to try reading more // packets. This prevents starving BankingStage::consume_buffered_packets due to // buffered_packet_batches containing transactions that exceed the cost model for @@ -1391,7 +1430,7 @@ impl BankingStage { recv_start, recv_timeout, id, - &mut buffered_packet_batches, + &mut unprocessed_transaction_storage, &mut banking_stage_stats, &mut tracer_packet_stats, &mut slot_metrics_tracker, @@ -1470,7 +1509,7 @@ impl BankingStage { bank: &Arc, poh: &TransactionRecorder, batch: &TransactionBatch, - transaction_status_sender: Option, + transaction_status_sender: &Option, gossip_vote_sender: &ReplayVoteSender, log_messages_bytes_limit: Option, ) -> ExecuteAndCommitTransactionsOutput { @@ -1712,7 +1751,7 @@ impl BankingStage { txs: &[SanitizedTransaction], poh: &TransactionRecorder, chunk_offset: usize, - transaction_status_sender: Option, + transaction_status_sender: &Option, gossip_vote_sender: &ReplayVoteSender, qos_service: &QosService, log_messages_bytes_limit: Option, @@ -1905,7 +1944,7 @@ impl BankingStage { bank_creation_time: &Instant, transactions: &[SanitizedTransaction], poh: &TransactionRecorder, - transaction_status_sender: Option, + transaction_status_sender: &Option, gossip_vote_sender: &ReplayVoteSender, qos_service: &QosService, log_messages_bytes_limit: Option, @@ -1937,7 +1976,7 @@ impl BankingStage { &transactions[chunk_start..chunk_end], poh, chunk_start, - transaction_status_sender.clone(), + transaction_status_sender, gossip_vote_sender, qos_service, log_messages_bytes_limit, @@ -2105,32 +2144,13 @@ impl BankingStage { Self::filter_valid_transaction_indexes(&results, transaction_to_packet_indexes) } - fn filter_processed_packets<'a, F>( - retryable_transaction_indexes: impl Iterator, - mut f: F, - ) where - F: FnMut(usize, usize), - { - let mut prev_retryable_index = 0; - for (i, retryable_index) in retryable_transaction_indexes.enumerate() { - let start = if i == 0 { 0 } else { prev_retryable_index + 1 }; - - let end = *retryable_index; - prev_retryable_index = *retryable_index; - - if start < end { - f(start, end) - } - } - } - #[allow(clippy::too_many_arguments)] fn process_packets_transactions<'a>( bank: &'a Arc, bank_creation_time: &Instant, poh: &'a TransactionRecorder, deserialized_packets: impl Iterator, - transaction_status_sender: Option, + transaction_status_sender: &Option, gossip_vote_sender: &'a ReplayVoteSender, banking_stage_stats: &'a BankingStageStats, qos_service: &'a QosService, @@ -2238,7 +2258,7 @@ impl BankingStage { recv_start: &mut Instant, recv_timeout: Duration, id: u32, - buffered_packet_batches: &mut UnprocessedPacketBatches, + unprocessed_transaction_storage: &mut UnprocessedTransactionStorage, banking_stage_stats: &mut BankingStageStats, tracer_packet_stats: &mut TracerPacketStats, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, @@ -2251,7 +2271,7 @@ impl BankingStage { failed_sigverify_count, } = packet_deserializer.handle_received_packets( recv_timeout, - buffered_packet_batches.capacity() - buffered_packet_batches.len(), + unprocessed_transaction_storage.capacity() - unprocessed_transaction_storage.len(), )?; let packet_count = deserialized_packets.len(); debug!( @@ -2273,7 +2293,7 @@ impl BankingStage { let mut dropped_packets_count = 0; let mut newly_buffered_packets_count = 0; Self::push_unprocessed( - buffered_packet_batches, + unprocessed_transaction_storage, deserialized_packets, &mut dropped_packets_count, &mut newly_buffered_packets_count, @@ -2297,16 +2317,16 @@ impl BankingStage { .fetch_add(newly_buffered_packets_count, Ordering::Relaxed); banking_stage_stats .current_buffered_packet_batches_count - .swap(buffered_packet_batches.len(), Ordering::Relaxed); + .swap(unprocessed_transaction_storage.len(), Ordering::Relaxed); banking_stage_stats .current_buffered_packets_count - .swap(buffered_packet_batches.len(), Ordering::Relaxed); + .swap(unprocessed_transaction_storage.len(), Ordering::Relaxed); *recv_start = Instant::now(); Ok(()) } fn push_unprocessed( - unprocessed_packet_batches: &mut UnprocessedPacketBatches, + unprocessed_transaction_storage: &mut UnprocessedTransactionStorage, deserialized_packets: Vec, dropped_packets_count: &mut usize, newly_buffered_packets_count: &mut usize, @@ -2323,20 +2343,17 @@ impl BankingStage { slot_metrics_tracker .increment_newly_buffered_packets_count(deserialized_packets.len() as u64); - let (number_of_dropped_packets, number_of_dropped_tracer_packets) = - unprocessed_packet_batches.insert_batch( - deserialized_packets - .into_iter() - .map(DeserializedPacket::from_immutable_section), - ); - - saturating_add_assign!(*dropped_packets_count, number_of_dropped_packets); - slot_metrics_tracker.increment_exceeded_buffer_limit_dropped_packets_count( - number_of_dropped_packets as u64, + let insert_packet_batches_summary = + unprocessed_transaction_storage.insert_batch(deserialized_packets); + slot_metrics_tracker + .accumulate_insert_packet_batches_summary(&insert_packet_batches_summary); + saturating_add_assign!( + *dropped_packets_count, + insert_packet_batches_summary.num_dropped_packets + ); + tracer_packet_stats.increment_total_exceeded_banking_stage_buffer( + insert_packet_batches_summary.num_dropped_tracer_packets, ); - - tracer_packet_stats - .increment_total_exceeded_banking_stage_buffer(number_of_dropped_tracer_packets); } } @@ -2411,7 +2428,7 @@ mod tests { }, solana_program_runtime::timings::ProgramTiming, solana_rpc::transaction_status_service::TransactionStatusService, - solana_runtime::bank_forks::BankForks, + solana_runtime::{bank_forks::BankForks, genesis_utils::activate_feature}, solana_sdk::{ account::AccountSharedData, hash::Hash, @@ -2427,9 +2444,13 @@ mod tests { }, solana_streamer::{recvmmsg::recv_mmsg, socket::SocketAddrSpace}, solana_transaction_status::{TransactionStatusMeta, VersionedTransactionWithStatusMeta}, + solana_vote_program::{ + vote_state::VoteStateUpdate, vote_transaction::new_vote_state_update_transaction, + }, std::{ borrow::Cow, collections::HashSet, + error::Error, path::Path, sync::atomic::{AtomicBool, Ordering}, thread::sleep, @@ -3071,7 +3092,7 @@ mod tests { &transactions, &recorder, 0, - None, + &None, &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), None, @@ -3124,7 +3145,7 @@ mod tests { &transactions, &recorder, 0, - None, + &None, &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), None, @@ -3208,7 +3229,7 @@ mod tests { &transactions, &recorder, 0, - None, + &None, &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), None, @@ -3300,7 +3321,7 @@ mod tests { &transactions, &recorder, 0, - None, + &None, &gossip_vote_sender, &qos_service, None, @@ -3340,7 +3361,7 @@ mod tests { &transactions, &recorder, 0, - None, + &None, &gossip_vote_sender, &qos_service, None, @@ -3437,7 +3458,7 @@ mod tests { &transactions, &recorder, 0, - None, + &None, &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), None, @@ -3653,7 +3674,7 @@ mod tests { &Instant::now(), &transactions, &recorder, - None, + &None, &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), None, @@ -3720,7 +3741,7 @@ mod tests { &Instant::now(), &transactions, &recorder, - None, + &None, &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), None, @@ -3948,7 +3969,7 @@ mod tests { &transactions, &recorder, 0, - Some(TransactionStatusSender { + &Some(TransactionStatusSender { sender: transaction_status_sender, }), &gossip_vote_sender, @@ -4117,7 +4138,7 @@ mod tests { &[sanitized_tx.clone()], &recorder, 0, - Some(TransactionStatusSender { + &Some(TransactionStatusSender { sender: transaction_status_sender, }), &gossip_vote_sender, @@ -4222,10 +4243,13 @@ mod tests { unprocessed_packet_batches::transactions_to_deserialized_packets(&transactions) .unwrap(); assert_eq!(deserialized_packets.len(), num_conflicting_transactions); - let mut buffered_packet_batches: UnprocessedPacketBatches = - UnprocessedPacketBatches::from_iter( - deserialized_packets.into_iter(), - num_conflicting_transactions, + let mut buffered_packet_batches = + UnprocessedTransactionStorage::new_transaction_storage( + UnprocessedPacketBatches::from_iter( + deserialized_packets.into_iter(), + num_conflicting_transactions, + ), + ThreadType::Transactions, ); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); @@ -4319,10 +4343,13 @@ mod tests { .unwrap(); assert_eq!(deserialized_packets.len(), num_conflicting_transactions); let num_packets_to_process_per_iteration = 1; - let mut buffered_packet_batches: UnprocessedPacketBatches = - UnprocessedPacketBatches::from_iter( - deserialized_packets.clone().into_iter(), - num_conflicting_transactions, + let mut buffered_packet_batches = + UnprocessedTransactionStorage::new_transaction_storage( + UnprocessedPacketBatches::from_iter( + deserialized_packets.clone().into_iter(), + num_conflicting_transactions, + ), + ThreadType::Transactions, ); let all_packet_message_hashes: HashSet = buffered_packet_batches .iter() @@ -4431,16 +4458,18 @@ mod tests { let connection_cache = ConnectionCache::default(); let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); for (name, data_budget, expected_num_forwarded) in test_cases { - let mut unprocessed_packet_batches: UnprocessedPacketBatches = + let unprocessed_packet_batches: UnprocessedPacketBatches = UnprocessedPacketBatches::from_iter( vec![deserialized_packet.clone()].into_iter(), 1, ); let stats = BankingStageStats::default(); BankingStage::handle_forwarding( - &ForwardOption::ForwardTransaction, &cluster_info, - &mut unprocessed_packet_batches, + &mut UnprocessedTransactionStorage::new_transaction_storage( + unprocessed_packet_batches, + ThreadType::Transactions, + ), &poh_recorder, &socket, true, @@ -4491,11 +4520,13 @@ mod tests { DeserializedPacket::new(packet).unwrap() }; - let mut unprocessed_packet_batches: UnprocessedPacketBatches = + let mut unprocessed_packet_batches = UnprocessedTransactionStorage::new_transaction_storage( UnprocessedPacketBatches::from_iter( vec![forwarded_packet, normal_packet].into_iter(), 2, - ); + ), + ThreadType::Transactions, + ); let genesis_config_info = create_slow_genesis_config(10_000); let GenesisConfigInfo { @@ -4528,35 +4559,15 @@ mod tests { let connection_cache = ConnectionCache::default(); let test_cases = vec![ - ("not-forward", ForwardOption::NotForward, true, vec![], 2), - ( - "fwd-normal", - ForwardOption::ForwardTransaction, - true, - vec![normal_block_hash], - 2, - ), - ( - "fwd-no-op", - ForwardOption::ForwardTransaction, - true, - vec![], - 2, - ), - ( - "fwd-no-hold", - ForwardOption::ForwardTransaction, - false, - vec![], - 0, - ), + ("fwd-normal", true, vec![normal_block_hash], 2), + ("fwd-no-op", true, vec![], 2), + ("fwd-no-hold", false, vec![], 0), ]; let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); - for (name, forward_option, hold, expected_ids, expected_num_unprocessed) in test_cases { + for (name, hold, expected_ids, expected_num_unprocessed) in test_cases { let stats = BankingStageStats::default(); BankingStage::handle_forwarding( - &forward_option, &cluster_info, &mut unprocessed_packet_batches, &poh_recorder, @@ -4691,53 +4702,205 @@ mod tests { } #[test] - fn test_filter_processed_packets() { - let retryable_indexes = [0, 1, 2, 3]; - let mut non_retryable_indexes = vec![]; - let f = |start, end| { - non_retryable_indexes.push((start, end)); - }; - BankingStage::filter_processed_packets(retryable_indexes.iter(), f); - assert!(non_retryable_indexes.is_empty()); + fn test_unprocessed_transaction_storage_insert() -> Result<(), Box> { + let keypair = Keypair::new(); + let vote_keypair = Keypair::new(); + let pubkey = solana_sdk::pubkey::new_rand(); - let retryable_indexes = [0, 1, 2, 3, 5]; - let mut non_retryable_indexes = vec![]; - let f = |start, end| { - non_retryable_indexes.push((start, end)); - }; - BankingStage::filter_processed_packets(retryable_indexes.iter(), f); - assert_eq!(non_retryable_indexes, vec![(4, 5)]); + let small_transfer = Packet::from_data( + None, + system_transaction::transfer(&keypair, &pubkey, 1, Hash::new_unique()), + )?; + let mut vote = Packet::from_data( + None, + new_vote_state_update_transaction( + VoteStateUpdate::default(), + Hash::new_unique(), + &keypair, + &vote_keypair, + &vote_keypair, + None, + ), + )?; + vote.meta.flags.set(PacketFlags::SIMPLE_VOTE_TX, true); + let big_transfer = Packet::from_data( + None, + system_transaction::transfer(&keypair, &pubkey, 1000000, Hash::new_unique()), + )?; - let retryable_indexes = [1, 2, 3]; - let mut non_retryable_indexes = vec![]; - let f = |start, end| { - non_retryable_indexes.push((start, end)); - }; - BankingStage::filter_processed_packets(retryable_indexes.iter(), f); - assert_eq!(non_retryable_indexes, vec![(0, 1)]); + for thread_type in [ + ThreadType::Transactions, + ThreadType::Voting(VoteSource::Gossip), + ThreadType::Voting(VoteSource::Tpu), + ] { + let mut transaction_storage = UnprocessedTransactionStorage::new_transaction_storage( + UnprocessedPacketBatches::with_capacity(100), + thread_type, + ); + transaction_storage.insert_batch(vec![ + ImmutableDeserializedPacket::new(small_transfer.clone(), None)?, + ImmutableDeserializedPacket::new(vote.clone(), None)?, + ImmutableDeserializedPacket::new(big_transfer.clone(), None)?, + ]); + let deserialized_packets = transaction_storage + .iter() + .map(|packet| packet.immutable_section().original_packet().clone()) + .collect_vec(); + assert_eq!(3, deserialized_packets.len()); + assert!(deserialized_packets.contains(&small_transfer)); + assert!(deserialized_packets.contains(&vote)); + assert!(deserialized_packets.contains(&big_transfer)); + } - let retryable_indexes = [1, 2, 3, 5]; - let mut non_retryable_indexes = vec![]; - let f = |start, end| { - non_retryable_indexes.push((start, end)); - }; - BankingStage::filter_processed_packets(retryable_indexes.iter(), f); - assert_eq!(non_retryable_indexes, vec![(0, 1), (4, 5)]); + for vote_source in [VoteSource::Gossip, VoteSource::Tpu] { + let mut transaction_storage = UnprocessedTransactionStorage::new_vote_storage( + Arc::new(LatestUnprocessedVotes::new()), + vote_source, + ); + transaction_storage.insert_batch(vec![ + ImmutableDeserializedPacket::new(small_transfer.clone(), None)?, + ImmutableDeserializedPacket::new(vote.clone(), None)?, + ImmutableDeserializedPacket::new(big_transfer.clone(), None)?, + ]); + assert_eq!(1, transaction_storage.len()); + } + Ok(()) + } - let retryable_indexes = [1, 2, 3, 5, 8]; - let mut non_retryable_indexes = vec![]; - let f = |start, end| { - non_retryable_indexes.push((start, end)); - }; - BankingStage::filter_processed_packets(retryable_indexes.iter(), f); - assert_eq!(non_retryable_indexes, vec![(0, 1), (4, 5), (6, 8)]); + #[test] + fn test_unprocessed_transaction_storage_full_send() { + solana_logger::setup(); + let GenesisConfigInfo { + mut genesis_config, + mint_keypair, + .. + } = create_slow_genesis_config(10000); + activate_feature( + &mut genesis_config, + allow_votes_to_directly_update_vote_state::id(), + ); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let bank = Arc::new(bank_forks.read().unwrap().get(0).unwrap()); + let start_hash = bank.last_blockhash(); + let (verified_sender, verified_receiver) = unbounded(); + let (tpu_vote_sender, tpu_vote_receiver) = unbounded(); + let (gossip_verified_vote_sender, gossip_verified_vote_receiver) = unbounded(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + { + let blockstore = Arc::new( + Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"), + ); + let poh_config = PohConfig { + // limit tick count to avoid clearing working_bank at PohRecord then + // PohRecorderError(MaxHeightReached) at BankingStage + target_tick_count: Some(bank.max_tick_height() - 1), + ..PohConfig::default() + }; + let (exit, poh_recorder, poh_service, _entry_receiver) = + create_test_recorder(&bank, &blockstore, Some(poh_config), None); + let cluster_info = new_test_cluster_info(Node::new_localhost().info); + let cluster_info = Arc::new(cluster_info); + let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); - let retryable_indexes = [1, 2, 3, 5, 8, 8]; - let mut non_retryable_indexes = vec![]; - let f = |start, end| { - non_retryable_indexes.push((start, end)); - }; - BankingStage::filter_processed_packets(retryable_indexes.iter(), f); - assert_eq!(non_retryable_indexes, vec![(0, 1), (4, 5), (6, 8)]); + let banking_stage = BankingStage::new( + &cluster_info, + &poh_recorder, + verified_receiver, + tpu_vote_receiver, + gossip_verified_vote_receiver, + None, + gossip_vote_sender, + Arc::new(RwLock::new(CostModel::default())), + None, + Arc::new(ConnectionCache::default()), + bank_forks, + ); + + let keypairs = (0..100).map(|_| Keypair::new()).collect_vec(); + let vote_keypairs = (0..100).map(|_| Keypair::new()).collect_vec(); + for keypair in keypairs.iter() { + bank.process_transaction(&system_transaction::transfer( + &mint_keypair, + &keypair.pubkey(), + 20, + start_hash, + )) + .unwrap(); + } + + // Send a bunch of votes and transfers + let tpu_votes = (0..100_usize) + .map(|i| { + new_vote_state_update_transaction( + VoteStateUpdate::from(vec![ + (0, 8), + (1, 7), + (i as u64 + 10, 6), + (i as u64 + 11, 1), + ]), + Hash::new_unique(), + &keypairs[i], + &vote_keypairs[i], + &vote_keypairs[i], + None, + ); + }) + .collect_vec(); + let gossip_votes = (0..100_usize) + .map(|i| { + new_vote_state_update_transaction( + VoteStateUpdate::from(vec![ + (0, 8), + (1, 7), + (i as u64 + 64 + 5, 6), + (i as u64 + 7, 1), + ]), + Hash::new_unique(), + &keypairs[i], + &vote_keypairs[i], + &vote_keypairs[i], + None, + ); + }) + .collect_vec(); + let txs = (0..100_usize) + .map(|i| { + system_transaction::transfer( + &keypairs[i], + &keypairs[(i + 1) % 100].pubkey(), + 10, + start_hash, + ); + }) + .collect_vec(); + + let tpu_packet_batches = to_packet_batches(&tpu_votes, 10); + let gossip_packet_batches = to_packet_batches(&gossip_votes, 10); + let tx_packet_batches = to_packet_batches(&txs, 10); + + // Send em all + [ + (tpu_packet_batches, tpu_vote_sender.clone()), + (gossip_packet_batches, gossip_verified_vote_sender.clone()), + (tx_packet_batches, verified_sender.clone()), + ] + .into_iter() + .map(|(packet_batches, sender)| { + Builder::new() + .spawn(move || sender.send((packet_batches, None)).unwrap()) + .unwrap() + }) + .for_each(|handle| handle.join().unwrap()); + + drop(verified_sender); + drop(tpu_vote_sender); + drop(gossip_verified_vote_sender); + banking_stage.join().unwrap(); + exit.store(true, Ordering::Relaxed); + poh_service.join().unwrap(); + } + Blockstore::destroy(ledger_path.path()).unwrap(); } } diff --git a/core/src/latest_unprocessed_votes.rs b/core/src/latest_unprocessed_votes.rs index fe73582abd6de3..aeedc711b4c10f 100644 --- a/core/src/latest_unprocessed_votes.rs +++ b/core/src/latest_unprocessed_votes.rs @@ -1,4 +1,3 @@ -#![allow(dead_code)] use { crate::{ forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts, @@ -7,7 +6,7 @@ use { }, itertools::Itertools, rand::{thread_rng, Rng}, - solana_perf::packet::{Packet, PacketBatch}, + solana_perf::packet::Packet, solana_runtime::bank::Bank, solana_sdk::{clock::Slot, program_utils::limited_deserialize, pubkey::Pubkey}, solana_vote_program::vote_instruction::VoteInstruction, @@ -56,7 +55,9 @@ impl LatestValidatorVotePacket { match limited_deserialize::(&instruction.data) { Ok(VoteInstruction::UpdateVoteState(vote_state_update)) - | Ok(VoteInstruction::UpdateVoteStateSwitch(vote_state_update, _)) => { + | Ok(VoteInstruction::UpdateVoteStateSwitch(vote_state_update, _)) + | Ok(VoteInstruction::CompactUpdateVoteState(vote_state_update)) + | Ok(VoteInstruction::CompactUpdateVoteStateSwitch(vote_state_update, _)) => { let &pubkey = message .message .static_account_keys() @@ -102,16 +103,6 @@ impl LatestValidatorVotePacket { } } -pub fn deserialize_packets<'a>( - packet_batch: &'a PacketBatch, - packet_indexes: &'a [usize], - vote_source: VoteSource, -) -> impl Iterator + 'a { - packet_indexes.iter().filter_map(move |packet_index| { - LatestValidatorVotePacket::new(packet_batch[*packet_index].clone(), vote_source).ok() - }) -} - // TODO: replace this with rand::seq::index::sample_weighted once we can update rand to 0.8+ // This requires updating dependencies of ed25519-dalek as rand_core is not compatible cross // version https://github.com/dalek-cryptography/ed25519-dalek/pull/214 @@ -329,7 +320,7 @@ mod tests { super::*, itertools::Itertools, rand::{thread_rng, Rng}, - solana_perf::packet::{Packet, PacketFlags}, + solana_perf::packet::{Packet, PacketBatch, PacketFlags}, solana_runtime::{ bank::Bank, genesis_utils::{self, ValidatorVoteKeypairs}, @@ -361,6 +352,16 @@ mod tests { LatestValidatorVotePacket::new(packet, vote_source).unwrap() } + fn deserialize_packets<'a>( + packet_batch: &'a PacketBatch, + packet_indexes: &'a [usize], + vote_source: VoteSource, + ) -> impl Iterator + 'a { + packet_indexes.iter().filter_map(move |packet_index| { + LatestValidatorVotePacket::new(packet_batch[*packet_index].clone(), vote_source).ok() + }) + } + #[test] fn test_deserialize_vote_packets() { let keypairs = ValidatorVoteKeypairs::new_rand(); diff --git a/core/src/leader_slot_banking_stage_metrics.rs b/core/src/leader_slot_banking_stage_metrics.rs index ed556991e1d560..015f1fba0a8a29 100644 --- a/core/src/leader_slot_banking_stage_metrics.rs +++ b/core/src/leader_slot_banking_stage_metrics.rs @@ -1,5 +1,8 @@ use { - crate::leader_slot_banking_stage_timing_metrics::*, + crate::{ + leader_slot_banking_stage_timing_metrics::*, + unprocessed_transaction_storage::InsertPacketBatchesSummary, + }, solana_poh::poh_recorder::BankStart, solana_runtime::transaction_error_metrics::*, solana_sdk::{clock::Slot, saturating_add_assign}, @@ -270,6 +273,8 @@ pub(crate) struct LeaderSlotMetrics { transaction_error_metrics: TransactionErrorMetrics, + vote_packet_count_metrics: VotePacketCountMetrics, + timing_metrics: LeaderSlotTimingMetrics, // Used by tests to check if the `self.report()` method was called @@ -283,6 +288,7 @@ impl LeaderSlotMetrics { slot, packet_count_metrics: LeaderSlotPacketCountMetrics::new(), transaction_error_metrics: TransactionErrorMetrics::new(), + vote_packet_count_metrics: VotePacketCountMetrics::new(), timing_metrics: LeaderSlotTimingMetrics::new(bank_creation_time), is_reported: false, } @@ -294,6 +300,7 @@ impl LeaderSlotMetrics { self.timing_metrics.report(self.id, self.slot); self.transaction_error_metrics.report(self.id, self.slot); self.packet_count_metrics.report(self.id, self.slot); + self.vote_packet_count_metrics.report(self.id, self.slot); } /// Returns `Some(self.slot)` if the metrics have been reported, otherwise returns None @@ -310,6 +317,37 @@ impl LeaderSlotMetrics { } } +// Metrics describing vote tx packets that were processed in the tpu vote thread as well as +// extraneous votes that were filtered out +#[derive(Debug, Default)] +pub(crate) struct VotePacketCountMetrics { + // How many votes ingested from gossip were dropped + dropped_gossip_votes: u64, + + // How many votes ingested from tpu were dropped + dropped_tpu_votes: u64, +} + +impl VotePacketCountMetrics { + fn new() -> Self { + Self { ..Self::default() } + } + + fn report(&self, id: u32, slot: Slot) { + datapoint_info!( + "banking_stage-vote_packet_counts", + ("id", id as i64, i64), + ("slot", slot as i64, i64), + ( + "dropped_gossip_votes", + self.dropped_gossip_votes as i64, + i64 + ), + ("dropped_tpu_votes", self.dropped_tpu_votes as i64, i64) + ); + } +} + #[derive(Debug)] pub(crate) enum MetricsTrackerAction { Noop, @@ -498,6 +536,21 @@ impl LeaderSlotMetricsTracker { } } + pub(crate) fn accumulate_insert_packet_batches_summary( + &mut self, + insert_packet_batches_summary: &InsertPacketBatchesSummary, + ) { + self.increment_exceeded_buffer_limit_dropped_packets_count( + insert_packet_batches_summary.num_dropped_packets as u64, + ); + self.increment_dropped_gossip_vote_count( + insert_packet_batches_summary.num_dropped_gossip_vote_packets as u64, + ); + self.increment_dropped_tpu_vote_count( + insert_packet_batches_summary.num_dropped_tpu_vote_packets as u64, + ); + } + pub(crate) fn accumulate_transaction_errors( &mut self, error_metrics: &TransactionErrorMetrics, @@ -780,6 +833,28 @@ impl LeaderSlotMetricsTracker { ); } } + + pub(crate) fn increment_dropped_gossip_vote_count(&mut self, count: u64) { + if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { + saturating_add_assign!( + leader_slot_metrics + .vote_packet_count_metrics + .dropped_gossip_votes, + count + ); + } + } + + pub(crate) fn increment_dropped_tpu_vote_count(&mut self, count: u64) { + if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { + saturating_add_assign!( + leader_slot_metrics + .vote_packet_count_metrics + .dropped_tpu_votes, + count + ); + } + } } #[cfg(test)] diff --git a/core/src/unprocessed_transaction_storage.rs b/core/src/unprocessed_transaction_storage.rs index 6642da00a4bdc5..15413236fe7d1f 100644 --- a/core/src/unprocessed_transaction_storage.rs +++ b/core/src/unprocessed_transaction_storage.rs @@ -1,18 +1,16 @@ -#![allow(dead_code)] use { crate::{ banking_stage::{self, BankingStage, FilterForwardingResults, ForwardOption}, forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts, immutable_deserialized_packet::ImmutableDeserializedPacket, latest_unprocessed_votes::{ - self, LatestUnprocessedVotes, LatestValidatorVotePacket, VoteBatchInsertionMetrics, + LatestUnprocessedVotes, LatestValidatorVotePacket, VoteBatchInsertionMetrics, VoteSource, }, - unprocessed_packet_batches::{self, DeserializedPacket, UnprocessedPacketBatches}, + unprocessed_packet_batches::{DeserializedPacket, UnprocessedPacketBatches}, }, itertools::Itertools, min_max_heap::MinMaxHeap, - solana_perf::packet::PacketBatch, solana_runtime::bank::Bank, std::sync::Arc, }; @@ -25,6 +23,9 @@ pub enum UnprocessedTransactionStorage { LocalTransactionStorage(ThreadLocalUnprocessedPackets), } +unsafe impl Send for UnprocessedTransactionStorage {} +unsafe impl Sync for UnprocessedTransactionStorage {} + #[derive(Debug)] pub struct ThreadLocalUnprocessedPackets { unprocessed_packet_batches: UnprocessedPacketBatches, @@ -51,7 +52,7 @@ pub struct InsertPacketBatchesSummary { pub(crate) num_dropped_tracer_packets: usize, } -fn filter_processed_packets<'a, F>( +pub(crate) fn filter_processed_packets<'a, F>( retryable_transaction_indexes: impl Iterator, mut f: F, ) where @@ -145,17 +146,16 @@ impl UnprocessedTransactionStorage { } } - pub fn deserialize_and_insert_batch( + pub fn insert_batch( &mut self, - packet_batch: &PacketBatch, - packet_indexes: &[usize], + deserialized_packets: Vec, ) -> InsertPacketBatchesSummary { match self { Self::VoteStorage(vote_storage) => { let VoteBatchInsertionMetrics { num_dropped_gossip, num_dropped_tpu, - } = vote_storage.deserialize_and_insert_batch(packet_batch, packet_indexes); + } = vote_storage.insert_batch(deserialized_packets); InsertPacketBatchesSummary { num_dropped_packets: num_dropped_gossip + num_dropped_tpu, num_dropped_gossip_vote_packets: num_dropped_gossip, @@ -165,7 +165,7 @@ impl UnprocessedTransactionStorage { } Self::LocalTransactionStorage(transaction_storage) => { let (num_dropped_packets, num_dropped_tracer_packets) = - transaction_storage.deserialize_and_insert_batch(packet_batch, packet_indexes); + transaction_storage.insert_batch(deserialized_packets); InsertPacketBatchesSummary { num_dropped_packets, num_dropped_tracer_packets, @@ -241,17 +241,22 @@ impl VoteStorage { self.latest_unprocessed_votes.clear_forwarded_packets(); } - fn deserialize_and_insert_batch( + fn insert_batch( &mut self, - packet_batch: &PacketBatch, - packet_indexes: &[usize], + deserialized_packets: Vec, ) -> VoteBatchInsertionMetrics { self.latest_unprocessed_votes - .insert_batch(latest_unprocessed_votes::deserialize_packets( - packet_batch, - packet_indexes, - self.vote_source, - )) + .insert_batch( + deserialized_packets + .into_iter() + .filter_map(|deserialized_packet| { + LatestValidatorVotePacket::new_from_immutable( + Rc::new(deserialized_packet), + self.vote_source, + ) + .ok() + }), + ) } fn filter_forwardable_packets_and_add_batches( @@ -345,13 +350,14 @@ impl ThreadLocalUnprocessedPackets { self.unprocessed_packet_batches.clear(); } - fn deserialize_and_insert_batch( + fn insert_batch( &mut self, - packet_batch: &PacketBatch, - packet_indexes: &[usize], + deserialized_packets: Vec, ) -> (usize, usize) { self.unprocessed_packet_batches.insert_batch( - unprocessed_packet_batches::deserialize_packets(packet_batch, packet_indexes), + deserialized_packets + .into_iter() + .map(DeserializedPacket::from_immutable_section), ) } @@ -360,12 +366,18 @@ impl ThreadLocalUnprocessedPackets { bank: Arc, forward_packet_batches_by_accounts: &mut ForwardPacketBatchesByAccounts, ) -> FilterForwardingResults { - BankingStage::filter_and_forward_with_account_limits( + let results = BankingStage::filter_and_forward_with_account_limits( &bank, &mut self.unprocessed_packet_batches, forward_packet_batches_by_accounts, banking_stage::UNPROCESSED_BUFFER_STEP_SIZE, - ) + ); + + for deserialized_packet in self.unprocessed_packet_batches.iter_mut() { + // Mark so we don't forward again + deserialized_packet.forwarded = true; + } + results } fn process_packets(&mut self, batch_size: usize, mut processing_function: F) @@ -388,20 +400,11 @@ impl ThreadLocalUnprocessedPackets { if let Some(retryable_transaction_indexes) = processing_function(&packets_to_process) { - // Remove the non-retryable packets, packets that were either: - // 1) Successfully processed - // 2) Failed but not retryable - filter_processed_packets( - retryable_transaction_indexes - .iter() - .chain(std::iter::once(&packets_to_process.len())), - |start, end| { - for processed_packet in &packets_to_process[start..end] { - self.unprocessed_packet_batches - .message_hash_to_transaction - .remove(processed_packet.message_hash()); - } - }, + // TODO: move this function tree into this module + BankingStage::remove_non_retained_packets( + &mut self.unprocessed_packet_batches, + &packets_to_process, + &retryable_transaction_indexes, ); retryable_transaction_indexes .iter() From a8a7e148a7aa6c1e3cc281de4644e65931e7397d Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Thu, 29 Sep 2022 14:53:39 -0700 Subject: [PATCH 59/65] Update local cluster test to use new Vote ix --- local-cluster/tests/local_cluster_slow_1.rs | 30 ++++++++++++++------- programs/vote/src/vote_transaction.rs | 30 +++++++++++++++++++++ 2 files changed, 50 insertions(+), 10 deletions(-) diff --git a/local-cluster/tests/local_cluster_slow_1.rs b/local-cluster/tests/local_cluster_slow_1.rs index 90e4f9bd4cc315..3e63d1bc0d3b8f 100644 --- a/local-cluster/tests/local_cluster_slow_1.rs +++ b/local-cluster/tests/local_cluster_slow_1.rs @@ -30,6 +30,7 @@ use { hash::Hash, pubkey::Pubkey, signature::Signer, + vote::state::VoteStateUpdate, }, solana_streamer::socket::SocketAddrSpace, solana_vote_program::{vote_state::MAX_LOCKOUT_HISTORY, vote_transaction}, @@ -541,22 +542,31 @@ fn test_duplicate_shreds_broadcast_leader() { // root by this validator, but we're not concerned with lockout violations // by this validator so it's fine. let leader_blockstore = open_blockstore(&bad_leader_ledger_path); - let mut vote_slots: Vec = AncestorIterator::new_inclusive( + let mut vote_slots: Vec<(Slot, u32)> = AncestorIterator::new_inclusive( latest_vote_slot, &leader_blockstore, ) .take(MAX_LOCKOUT_HISTORY) + .zip(1..) .collect(); vote_slots.reverse(); - let vote_tx = vote_transaction::new_vote_transaction( - vote_slots, - vote_hash, - leader_vote_tx.message.recent_blockhash, - &node_keypair, - &vote_keypair, - &vote_keypair, - None, - ); + let mut vote = VoteStateUpdate::from(vote_slots); + let root = AncestorIterator::new_inclusive( + latest_vote_slot, + &leader_blockstore, + ) + .nth(MAX_LOCKOUT_HISTORY); + vote.root = root; + vote.hash = vote_hash; + let vote_tx = + vote_transaction::new_compact_vote_state_update_transaction( + vote, + leader_vote_tx.message.recent_blockhash, + &node_keypair, + &vote_keypair, + &vote_keypair, + None, + ); gossip_vote_index += 1; gossip_vote_index %= MAX_LOCKOUT_HISTORY; cluster_info.push_vote_at_index(vote_tx, gossip_vote_index as u8) diff --git a/programs/vote/src/vote_transaction.rs b/programs/vote/src/vote_transaction.rs index 43cdbb11d6b37c..48316daff6e6d9 100644 --- a/programs/vote/src/vote_transaction.rs +++ b/programs/vote/src/vote_transaction.rs @@ -72,3 +72,33 @@ pub fn new_vote_state_update_transaction( vote_tx.partial_sign(&[authorized_voter_keypair], blockhash); vote_tx } + +pub fn new_compact_vote_state_update_transaction( + vote_state_update: VoteStateUpdate, + blockhash: Hash, + node_keypair: &Keypair, + vote_keypair: &Keypair, + authorized_voter_keypair: &Keypair, + switch_proof_hash: Option, +) -> Transaction { + let vote_ix = if let Some(switch_proof_hash) = switch_proof_hash { + vote::instruction::compact_update_vote_state_switch( + &vote_keypair.pubkey(), + &authorized_voter_keypair.pubkey(), + vote_state_update, + switch_proof_hash, + ) + } else { + vote::instruction::compact_update_vote_state( + &vote_keypair.pubkey(), + &authorized_voter_keypair.pubkey(), + vote_state_update, + ) + }; + + let mut vote_tx = Transaction::new_with_payer(&[vote_ix], Some(&node_keypair.pubkey())); + + vote_tx.partial_sign(&[node_keypair], blockhash); + vote_tx.partial_sign(&[authorized_voter_keypair], blockhash); + vote_tx +} From 9b960621d1c864c341836f7b2a3ca2f11977845f Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Thu, 29 Sep 2022 16:26:42 -0700 Subject: [PATCH 60/65] Encapsulate transaction storage filtering better --- Cargo.lock | 1 + core/benches/unprocessed_packet_batches.rs | 32 +- core/src/banking_stage.rs | 496 +----------------- core/src/fetch_stage.rs | 7 +- .../src/forward_packet_batches_by_accounts.rs | 47 +- core/src/immutable_deserialized_packet.rs | 30 +- core/src/latest_unprocessed_votes.rs | 20 +- core/src/unprocessed_packet_batches.rs | 53 +- core/src/unprocessed_transaction_storage.rs | 484 ++++++++++++++++- runtime/Cargo.toml | 1 + runtime/src/bank.rs | 35 +- sdk/program/src/clock.rs | 4 + 12 files changed, 609 insertions(+), 601 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4864e616ef42af..e66c411b127575 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6145,6 +6145,7 @@ dependencies = [ "solana-logger 1.15.0", "solana-measure", "solana-metrics", + "solana-perf", "solana-program-runtime", "solana-rayon-threadlimit", "solana-sdk 1.15.0", diff --git a/core/benches/unprocessed_packet_batches.rs b/core/benches/unprocessed_packet_batches.rs index 1fa13dde7eabee..c4dbed1f615fb4 100644 --- a/core/benches/unprocessed_packet_batches.rs +++ b/core/benches/unprocessed_packet_batches.rs @@ -6,8 +6,11 @@ extern crate test; use { rand::distributions::{Distribution, Uniform}, solana_core::{ - banking_stage::*, forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts, + forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts, unprocessed_packet_batches::*, + unprocessed_transaction_storage::{ + ThreadType, UnprocessedTransactionStorage, UNPROCESSED_BUFFER_STEP_SIZE, + }, }, solana_measure::measure::Measure, solana_perf::packet::{Packet, PacketBatch}, @@ -104,7 +107,7 @@ fn insert_packet_batches( #[allow(clippy::unit_arg)] fn bench_packet_clone(bencher: &mut Bencher) { let batch_count = 1000; - let packet_per_batch_count = 128; + let packet_per_batch_count = UNPROCESSED_BUFFER_STEP_SIZE; let packet_batches: Vec = (0..batch_count) .map(|_| build_packet_batch(packet_per_batch_count, None).0) @@ -134,9 +137,9 @@ fn bench_packet_clone(bencher: &mut Bencher) { #[bench] #[ignore] fn bench_unprocessed_packet_batches_within_limit(bencher: &mut Bencher) { - let buffer_capacity = 1_000 * 128; + let buffer_capacity = 1_000 * UNPROCESSED_BUFFER_STEP_SIZE; let batch_count = 1_000; - let packet_per_batch_count = 128; + let packet_per_batch_count = UNPROCESSED_BUFFER_STEP_SIZE; bencher.iter(|| { insert_packet_batches(buffer_capacity, batch_count, packet_per_batch_count, false); @@ -148,9 +151,9 @@ fn bench_unprocessed_packet_batches_within_limit(bencher: &mut Bencher) { #[bench] #[ignore] fn bench_unprocessed_packet_batches_beyond_limit(bencher: &mut Bencher) { - let buffer_capacity = 1_000 * 128; + let buffer_capacity = 1_000 * UNPROCESSED_BUFFER_STEP_SIZE; let batch_count = 1_100; - let packet_per_batch_count = 128; + let packet_per_batch_count = UNPROCESSED_BUFFER_STEP_SIZE; // this is the worst scenario testing: all batches are uniformly populated with packets from // priority 100..228, so in order to drop a batch, algo will have to drop all packets that has @@ -167,9 +170,9 @@ fn bench_unprocessed_packet_batches_beyond_limit(bencher: &mut Bencher) { #[bench] #[ignore] fn bench_unprocessed_packet_batches_randomized_within_limit(bencher: &mut Bencher) { - let buffer_capacity = 1_000 * 128; + let buffer_capacity = 1_000 * UNPROCESSED_BUFFER_STEP_SIZE; let batch_count = 1_000; - let packet_per_batch_count = 128; + let packet_per_batch_count = UNPROCESSED_BUFFER_STEP_SIZE; bencher.iter(|| { insert_packet_batches(buffer_capacity, batch_count, packet_per_batch_count, true); @@ -181,9 +184,9 @@ fn bench_unprocessed_packet_batches_randomized_within_limit(bencher: &mut Benche #[bench] #[ignore] fn bench_unprocessed_packet_batches_randomized_beyond_limit(bencher: &mut Bencher) { - let buffer_capacity = 1_000 * 128; + let buffer_capacity = 1_000 * UNPROCESSED_BUFFER_STEP_SIZE; let batch_count = 1_100; - let packet_per_batch_count = 128; + let packet_per_batch_count = UNPROCESSED_BUFFER_STEP_SIZE; bencher.iter(|| { insert_packet_batches(buffer_capacity, batch_count, packet_per_batch_count, true); @@ -198,7 +201,6 @@ fn buffer_iter_desc_and_forward( ) { solana_logger::setup(); let mut unprocessed_packet_batches = UnprocessedPacketBatches::with_capacity(buffer_max_size); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = BankForks::new(bank); @@ -226,13 +228,15 @@ fn buffer_iter_desc_and_forward( // forward whole buffer { + let mut transaction_storage = UnprocessedTransactionStorage::new_transaction_storage( + unprocessed_packet_batches, + ThreadType::Transactions, + ); let mut forward_packet_batches_by_accounts = ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); - let _ = BankingStage::filter_and_forward_with_account_limits( + let _ = transaction_storage.filter_forwardable_packets_and_add_batches( ¤t_bank, - &mut unprocessed_packet_batches, &mut forward_packet_batches_by_accounts, - 128usize, ); } } diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index f43accee731477..f1ba329e07e34d 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -3,7 +3,6 @@ //! can do its processing in parallel with signature verification on the GPU. use { - min_max_heap::MinMaxHeap, crate::{ forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts, immutable_deserialized_packet::ImmutableDeserializedPacket, @@ -16,8 +15,10 @@ use { qos_service::QosService, sigverify::SigverifyTracerPacketStats, tracer_packet_stats::TracerPacketStats, - unprocessed_packet_batches::{self, *}, - unprocessed_transaction_storage::{self, ThreadType, UnprocessedTransactionStorage}, + unprocessed_packet_batches::*, + unprocessed_transaction_storage::{ + ThreadType, UnprocessedTransactionStorage, UNPROCESSED_BUFFER_STEP_SIZE, + }, }, core::iter::repeat, crossbeam_channel::{ @@ -35,7 +36,6 @@ use { solana_perf::{ data_budget::DataBudget, packet::{Packet, PacketBatch, PACKETS_PER_BATCH}, - perf_libs, }, solana_poh::poh_recorder::{BankStart, PohRecorder, PohRecorderError, TransactionRecorder}, solana_program_runtime::timings::ExecuteTimings, @@ -53,8 +53,8 @@ use { }, solana_sdk::{ clock::{ - Slot, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY, - MAX_TRANSACTION_FORWARDING_DELAY_GPU, + Slot, DEFAULT_TICKS_PER_SLOT, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, + HOLD_TRANSACTIONS_SLOT_OFFSET, MAX_PROCESSING_AGE, }, feature_set::allow_votes_to_directly_update_vote_state, pubkey::Pubkey, @@ -80,10 +80,6 @@ use { }, }; -/// Transaction forwarding -pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 2; -pub const HOLD_TRANSACTIONS_SLOT_OFFSET: u64 = 20; - // Fixed thread size seems to be fastest on GCP setup pub const NUM_THREADS: u32 = 6; @@ -94,7 +90,6 @@ const MAX_NUM_TRANSACTIONS_PER_BATCH: usize = 64; const NUM_VOTE_PROCESSING_THREADS: u32 = 2; const MIN_THREADS_BANKING: u32 = 1; const MIN_TOTAL_THREADS: u32 = NUM_VOTE_PROCESSING_THREADS + MIN_THREADS_BANKING; -pub const UNPROCESSED_BUFFER_STEP_SIZE: usize = 128; const SLOT_BOUNDARY_CHECK_PERIOD: Duration = Duration::from_millis(10); pub type BankingPacketBatch = (Vec, Option); @@ -996,7 +991,7 @@ impl BankingStage { // already processed), then add to forwarding buffer. let filter_forwarding_result = unprocessed_transaction_storage .filter_forwardable_packets_and_add_batches( - current_bank, + ¤t_bank, &mut forward_packet_batches_by_accounts, ); slot_metrics_tracker.increment_transactions_from_packets_us( @@ -1066,294 +1061,6 @@ impl BankingStage { } } - /// Filter out packets that fail to sanitize, or are no longer valid (could be - /// too old, a duplicate of something already processed). Doing this in batches to avoid - /// checking bank's blockhash and status cache per transaction which could be bad for performance. - /// Added valid and sanitized packets to forwarding queue. - pub fn filter_and_forward_with_account_limits( - bank: &Arc, - buffered_packet_batches: &mut UnprocessedPacketBatches, - forward_buffer: &mut ForwardPacketBatchesByAccounts, - batch_size: usize, - ) -> FilterForwardingResults { - let mut total_forwardable_tracer_packets: usize = 0; - let mut total_tracer_packets_in_buffer: usize = 0; - let mut total_forwardable_packets: usize = 0; - let mut total_packet_conversion_us: u64 = 0; - let mut total_filter_packets_us: u64 = 0; - let mut dropped_tx_before_forwarding_count: usize = 0; - - let mut original_priority_queue = Self::swap_priority_queue(buffered_packet_batches); - - // indicates if `forward_buffer` still accept more packets, see details at - // `ForwardPacketBatchesByAccounts.rs`. - let mut accepting_packets = true; - // batch iterate through buffered_packet_batches in desc priority order - let retained_priority_queue: MinMaxHeap<_> = original_priority_queue - .drain_desc() - .chunks(batch_size) - .into_iter() - .flat_map(|packets_to_process| { - let packets_to_process = packets_to_process.into_iter().collect_vec(); - - // Vec of same size of `packets_to_process`, each indicates - // corresponding packet is tracer packet. - let tracer_packet_indexes = packets_to_process - .iter() - .map(|deserialized_packet| { - deserialized_packet - .original_packet() - .meta - .is_tracer_packet() - }) - .collect::>(); - saturating_add_assign!( - total_tracer_packets_in_buffer, - tracer_packet_indexes - .iter() - .filter(|is_tracer| **is_tracer) - .count() - ); - - if accepting_packets { - let ( - (sanitized_transactions, transaction_to_packet_indexes), - packet_conversion_time, - ): ((Vec, Vec), _) = measure!( - Self::sanitize_unforwarded_packets( - buffered_packet_batches, - &packets_to_process, - bank, - ), - "sanitize_packet", - ); - saturating_add_assign!( - total_packet_conversion_us, - packet_conversion_time.as_us() - ); - - let (forwardable_transaction_indexes, filter_packets_time) = measure!( - Self::filter_invalid_transactions(&sanitized_transactions, bank,), - "filter_packets", - ); - saturating_add_assign!(total_filter_packets_us, filter_packets_time.as_us()); - - for forwardable_transaction_index in &forwardable_transaction_indexes { - saturating_add_assign!(total_forwardable_packets, 1); - let forwardable_packet_index = - transaction_to_packet_indexes[*forwardable_transaction_index]; - if tracer_packet_indexes[forwardable_packet_index] { - saturating_add_assign!(total_forwardable_tracer_packets, 1); - } - } - - let accepted_packet_indexes = Self::add_filtered_packets_to_forward_buffer( - forward_buffer, - &packets_to_process, - &sanitized_transactions, - &transaction_to_packet_indexes, - &forwardable_transaction_indexes, - &mut dropped_tx_before_forwarding_count, - ); - accepting_packets = - accepted_packet_indexes.len() == forwardable_transaction_indexes.len(); - - UnprocessedPacketBatches::mark_accepted_packets_as_forwarded( - buffered_packet_batches, - &packets_to_process, - &accepted_packet_indexes, - ); - - Self::collect_retained_packets( - buffered_packet_batches, - &packets_to_process, - &Self::prepare_filtered_packet_indexes( - &transaction_to_packet_indexes, - &forwardable_transaction_indexes, - ), - ) - } else { - // skip sanitizing and filtering if not longer able to add more packets for forwarding - saturating_add_assign!( - dropped_tx_before_forwarding_count, - packets_to_process.len() - ); - packets_to_process - } - }) - .collect(); - - // replace packet priority queue - buffered_packet_batches.packet_priority_queue = retained_priority_queue; - - inc_new_counter_info!( - "banking_stage-dropped_tx_before_forwarding", - dropped_tx_before_forwarding_count - ); - - FilterForwardingResults { - total_forwardable_packets, - total_tracer_packets_in_buffer, - total_forwardable_tracer_packets, - total_packet_conversion_us, - total_filter_packets_us, - } - } - - /// Take buffered_packet_batches's priority_queue out, leave empty MinMaxHeap in its place. - fn swap_priority_queue( - buffered_packet_batches: &mut UnprocessedPacketBatches, - ) -> MinMaxHeap> { - let capacity = buffered_packet_batches.capacity(); - std::mem::replace( - &mut buffered_packet_batches.packet_priority_queue, - MinMaxHeap::with_capacity(capacity), - ) - } - - /// sanitize un-forwarded packet into SanitizedTransaction for validation and forwarding. - fn sanitize_unforwarded_packets( - buffered_packet_batches: &mut UnprocessedPacketBatches, - packets_to_process: &[Arc], - bank: &Arc, - ) -> (Vec, Vec) { - // Get ref of ImmutableDeserializedPacket - let deserialized_packets = packets_to_process.iter().map(|p| &**p); - let (transactions, transaction_to_packet_indexes): (Vec, Vec) = - deserialized_packets - .enumerate() - .filter_map(|(packet_index, deserialized_packet)| { - if !buffered_packet_batches.is_forwarded(deserialized_packet) { - unprocessed_packet_batches::transaction_from_deserialized_packet( - deserialized_packet, - &bank.feature_set, - bank.vote_only_bank(), - bank.as_ref(), - ) - .map(|transaction| (transaction, packet_index)) - } else { - None - } - }) - .unzip(); - - // report metrics - inc_new_counter_info!("banking_stage-packet_conversion", 1); - let unsanitized_packets_filtered_count = - packets_to_process.len().saturating_sub(transactions.len()); - inc_new_counter_info!( - "banking_stage-dropped_tx_before_forwarding", - unsanitized_packets_filtered_count - ); - - (transactions, transaction_to_packet_indexes) - } - - /// Checks sanitized transactions against bank, returns valid transaction indexes - fn filter_invalid_transactions( - transactions: &[SanitizedTransaction], - bank: &Arc, - ) -> Vec { - let filter = vec![Ok(()); transactions.len()]; - let results = Self::bank_check_transactions(bank, transactions, &filter); - // report metrics - let filtered_out_transactions_count = transactions.len().saturating_sub(results.len()); - inc_new_counter_info!( - "banking_stage-dropped_tx_before_forwarding", - filtered_out_transactions_count - ); - - results - .iter() - .enumerate() - .filter_map( - |(tx_index, (result, _))| if result.is_ok() { Some(tx_index) } else { None }, - ) - .collect_vec() - } - - fn prepare_filtered_packet_indexes( - transaction_to_packet_indexes: &[usize], - retained_transaction_indexes: &[usize], - ) -> Vec { - retained_transaction_indexes - .iter() - .map(|tx_index| transaction_to_packet_indexes[*tx_index]) - .collect_vec() - } - - fn collect_retained_packets( - buffered_packet_batches: &mut UnprocessedPacketBatches, - packets_to_process: &[Arc], - retained_packet_indexes: &[usize], - ) -> Vec> { - Self::remove_non_retained_packets( - buffered_packet_batches, - packets_to_process, - retained_packet_indexes, - ); - retained_packet_indexes - .iter() - .map(|i| packets_to_process[*i].clone()) - .collect_vec() - } - - /// remove packets from UnprocessedPacketBatches.message_hash_to_transaction after they have - /// been removed from UnprocessedPacketBatches.packet_priority_queue - pub(crate) fn remove_non_retained_packets( - buffered_packet_batches: &mut UnprocessedPacketBatches, - packets_to_process: &[Arc], - retained_packet_indexes: &[usize], - ) { - unprocessed_transaction_storage::filter_processed_packets( - retained_packet_indexes - .iter() - .chain(std::iter::once(&packets_to_process.len())), - |start, end| { - for processed_packet in &packets_to_process[start..end] { - buffered_packet_batches - .message_hash_to_transaction - .remove(processed_packet.message_hash()); - } - }, - ) - } - - /// try to add filtered forwardable and valid packets to forward buffer; - /// returns vector of packet indexes that were accepted for forwarding. - fn add_filtered_packets_to_forward_buffer( - forward_buffer: &mut ForwardPacketBatchesByAccounts, - packets_to_process: &[Arc], - transactions: &[SanitizedTransaction], - transaction_to_packet_indexes: &[usize], - forwardable_transaction_indexes: &[usize], - dropped_tx_before_forwarding_count: &mut usize, - ) -> Vec { - let mut added_packets_count: usize = 0; - let mut accepted_packet_indexes = Vec::with_capacity(transaction_to_packet_indexes.len()); - for forwardable_transaction_index in forwardable_transaction_indexes { - let sanitized_transaction = &transactions[*forwardable_transaction_index]; - let forwardable_packet_index = - transaction_to_packet_indexes[*forwardable_transaction_index]; - let immutable_deserialized_packet = - packets_to_process[forwardable_packet_index].clone(); - if !forward_buffer.try_add_packet(sanitized_transaction, immutable_deserialized_packet) - { - break; - } - accepted_packet_indexes.push(forwardable_packet_index); - saturating_add_assign!(added_packets_count, 1); - } - - // count the packets not being forwarded in this batch - saturating_add_assign!( - *dropped_tx_before_forwarding_count, - forwardable_transaction_indexes.len() - added_packets_count - ); - - accepted_packet_indexes - } - #[allow(clippy::too_many_arguments)] fn process_loop( packet_deserializer: &mut PacketDeserializer, @@ -2096,35 +1803,6 @@ impl BankingStage { .collect_vec() } - /// Checks a batch of sanitized transactions again bank for age and status - fn bank_check_transactions( - bank: &Arc, - transactions: &[SanitizedTransaction], - filter: &[transaction::Result<()>], - ) -> Vec { - let mut error_counters = TransactionErrorMetrics::default(); - // The following code also checks if the blockhash for a transaction is too old - // The check accounts for - // 1. Transaction forwarding delay - // 2. The slot at which the next leader will actually process the transaction - // Drop the transaction if it will expire by the time the next node receives and processes it - let api = perf_libs::api(); - let max_tx_fwd_delay = if api.is_none() { - MAX_TRANSACTION_FORWARDING_DELAY - } else { - MAX_TRANSACTION_FORWARDING_DELAY_GPU - }; - - bank.check_transactions( - transactions, - filter, - (MAX_PROCESSING_AGE) - .saturating_sub(max_tx_fwd_delay) - .saturating_sub(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET as usize), - &mut error_counters, - ) - } - /// This function filters pending packets that are still valid /// # Arguments /// * `transactions` - a batch of transactions deserialized from packets @@ -2139,7 +1817,11 @@ impl BankingStage { let filter = Self::prepare_filter_for_pending_transactions(transactions.len(), pending_indexes); - let results = Self::bank_check_transactions(bank, transactions, &filter); + let results = bank.check_transactions_with_forwarding_delay( + transactions, + &filter, + FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, + ); Self::filter_valid_transaction_indexes(&results, transaction_to_packet_indexes) } @@ -2165,13 +1847,13 @@ impl BankingStage { deserialized_packets .enumerate() .filter_map(|(i, deserialized_packet)| { - unprocessed_packet_batches::transaction_from_deserialized_packet( - deserialized_packet, - &bank.feature_set, - bank.vote_only_bank(), - bank.as_ref(), - ) - .map(|transaction| (transaction, i)) + deserialized_packet + .compute_sanitized_transaction( + &bank.feature_set, + bank.vote_only_bank(), + bank.as_ref(), + ) + .map(|transaction| (transaction, i)) }) .unzip(), "packet_conversion", @@ -2411,6 +2093,7 @@ where mod tests { use { super::*, + crate::unprocessed_packet_batches, crossbeam_channel::{unbounded, Receiver}, solana_address_lookup_table_program::state::{AddressLookupTable, LookupTableMeta}, solana_entry::entry::{next_entry, next_versioned_entry, Entry, EntrySlice}, @@ -2455,7 +2138,6 @@ mod tests { sync::atomic::{AtomicBool, Ordering}, thread::sleep, }, - unprocessed_packet_batches::DeserializedPacket, }; fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo { @@ -3487,144 +3169,6 @@ mod tests { Blockstore::destroy(ledger_path.path()).unwrap(); } - #[test] - fn test_filter_and_forward_with_account_limits() { - solana_logger::setup(); - let GenesisConfigInfo { - genesis_config, - mint_keypair, - .. - } = create_genesis_config(10); - let current_bank = Arc::new(Bank::new_for_tests(&genesis_config)); - - let simple_transactions: Vec = (0..256) - .map(|_id| { - // packets are deserialized upon receiving, failed packets will not be - // forwarded; Therefore we need to create real packets here. - let key1 = Keypair::new(); - system_transaction::transfer( - &mint_keypair, - &key1.pubkey(), - genesis_config.rent.minimum_balance(0), - genesis_config.hash(), - ) - }) - .collect_vec(); - - let mut packets: Vec = simple_transactions - .iter() - .enumerate() - .map(|(packets_id, transaction)| { - let mut p = Packet::from_data(None, transaction).unwrap(); - p.meta.port = packets_id as u16; - p.meta.set_tracer(true); - DeserializedPacket::new(p).unwrap() - }) - .collect_vec(); - - // all packets are forwarded - { - let mut buffered_packet_batches: UnprocessedPacketBatches = - UnprocessedPacketBatches::from_iter(packets.clone().into_iter(), packets.len()); - let mut forward_packet_batches_by_accounts = - ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); - - let FilterForwardingResults { - total_forwardable_packets, - total_tracer_packets_in_buffer, - total_forwardable_tracer_packets, - .. - } = BankingStage::filter_and_forward_with_account_limits( - ¤t_bank, - &mut buffered_packet_batches, - &mut forward_packet_batches_by_accounts, - UNPROCESSED_BUFFER_STEP_SIZE, - ); - assert_eq!(total_forwardable_packets, 256); - assert_eq!(total_tracer_packets_in_buffer, 256); - assert_eq!(total_forwardable_tracer_packets, 256); - - // packets in a batch are forwarded in arbitrary order; verify the ports match after - // sorting - let expected_ports: Vec<_> = (0..256).collect(); - let mut forwarded_ports: Vec<_> = forward_packet_batches_by_accounts - .iter_batches() - .flat_map(|batch| { - batch - .get_forwardable_packets() - .into_iter() - .map(|p| p.meta.port) - }) - .collect(); - forwarded_ports.sort_unstable(); - assert_eq!(expected_ports, forwarded_ports); - } - - // some packets are forwarded - { - let num_already_forwarded = 16; - for packet in &mut packets[0..num_already_forwarded] { - packet.forwarded = true; - } - let mut buffered_packet_batches: UnprocessedPacketBatches = - UnprocessedPacketBatches::from_iter(packets.clone().into_iter(), packets.len()); - let mut forward_packet_batches_by_accounts = - ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); - let FilterForwardingResults { - total_forwardable_packets, - total_tracer_packets_in_buffer, - total_forwardable_tracer_packets, - .. - } = BankingStage::filter_and_forward_with_account_limits( - ¤t_bank, - &mut buffered_packet_batches, - &mut forward_packet_batches_by_accounts, - UNPROCESSED_BUFFER_STEP_SIZE, - ); - assert_eq!( - total_forwardable_packets, - packets.len() - num_already_forwarded - ); - assert_eq!(total_tracer_packets_in_buffer, packets.len()); - assert_eq!( - total_forwardable_tracer_packets, - packets.len() - num_already_forwarded - ); - } - - // some packets are invalid (already processed) - { - let num_already_processed = 16; - for tx in &simple_transactions[0..num_already_processed] { - assert_eq!(current_bank.process_transaction(tx), Ok(())); - } - let mut buffered_packet_batches: UnprocessedPacketBatches = - UnprocessedPacketBatches::from_iter(packets.clone().into_iter(), packets.len()); - let mut forward_packet_batches_by_accounts = - ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); - let FilterForwardingResults { - total_forwardable_packets, - total_tracer_packets_in_buffer, - total_forwardable_tracer_packets, - .. - } = BankingStage::filter_and_forward_with_account_limits( - ¤t_bank, - &mut buffered_packet_batches, - &mut forward_packet_batches_by_accounts, - UNPROCESSED_BUFFER_STEP_SIZE, - ); - assert_eq!( - total_forwardable_packets, - packets.len() - num_already_processed - ); - assert_eq!(total_tracer_packets_in_buffer, packets.len()); - assert_eq!( - total_forwardable_tracer_packets, - packets.len() - num_already_processed - ); - } - } - #[test] fn test_process_transactions_returns_unprocessed_txs() { solana_logger::setup(); diff --git a/core/src/fetch_stage.rs b/core/src/fetch_stage.rs index 701e4f58e85a0a..885f30ea882af3 100644 --- a/core/src/fetch_stage.rs +++ b/core/src/fetch_stage.rs @@ -1,16 +1,13 @@ //! The `fetch_stage` batches input from a UDP socket and sends it to a channel. use { - crate::{ - banking_stage::HOLD_TRANSACTIONS_SLOT_OFFSET, - result::{Error, Result}, - }, + crate::result::{Error, Result}, crossbeam_channel::{unbounded, RecvTimeoutError}, solana_metrics::{inc_new_counter_debug, inc_new_counter_info}, solana_perf::{packet::PacketBatchRecycler, recycler::Recycler}, solana_poh::poh_recorder::PohRecorder, solana_sdk::{ - clock::DEFAULT_TICKS_PER_SLOT, + clock::{DEFAULT_TICKS_PER_SLOT, HOLD_TRANSACTIONS_SLOT_OFFSET}, packet::{Packet, PacketFlags}, }, solana_streamer::streamer::{ diff --git a/core/src/forward_packet_batches_by_accounts.rs b/core/src/forward_packet_batches_by_accounts.rs index 9132b14a21c7a5..5f0ea000930a39 100644 --- a/core/src/forward_packet_batches_by_accounts.rs +++ b/core/src/forward_packet_batches_by_accounts.rs @@ -182,7 +182,7 @@ impl ForwardPacketBatchesByAccounts { mod tests { use { super::*, - crate::unprocessed_packet_batches::{self, DeserializedPacket}, + crate::unprocessed_packet_batches::DeserializedPacket, solana_runtime::transaction_priority_details::TransactionPriorityDetails, solana_sdk::{ feature_set::FeatureSet, hash::Hash, signature::Keypair, system_transaction, @@ -352,13 +352,14 @@ mod tests { // assert it is added, and buffer still accepts more packets { let packet = build_deserialized_packet_for_test(10, &hot_account, requested_cu); - let tx = unprocessed_packet_batches::transaction_from_deserialized_packet( - packet.immutable_section(), - &Arc::new(FeatureSet::default()), - false, //votes_only, - SimpleAddressLoader::Disabled, - ) - .unwrap(); + let tx = packet + .immutable_section() + .compute_sanitized_transaction( + &Arc::new(FeatureSet::default()), + false, //votes_only, + SimpleAddressLoader::Disabled, + ) + .unwrap(); assert!(forward_packet_batches_by_accounts .try_add_packet(&tx, packet.immutable_section().clone())); @@ -372,13 +373,14 @@ mod tests { { let packet = build_deserialized_packet_for_test(100, &hot_account, 1 /*requested_cu*/); - let tx = unprocessed_packet_batches::transaction_from_deserialized_packet( - packet.immutable_section(), - &Arc::new(FeatureSet::default()), - false, //votes_only, - SimpleAddressLoader::Disabled, - ) - .unwrap(); + let tx = packet + .immutable_section() + .compute_sanitized_transaction( + &Arc::new(FeatureSet::default()), + false, //votes_only, + SimpleAddressLoader::Disabled, + ) + .unwrap(); assert!(!forward_packet_batches_by_accounts .try_add_packet(&tx, packet.immutable_section().clone())); @@ -392,13 +394,14 @@ mod tests { { let packet = build_deserialized_packet_for_test(100, &other_account, 1 /*requested_cu*/); - let tx = unprocessed_packet_batches::transaction_from_deserialized_packet( - packet.immutable_section(), - &Arc::new(FeatureSet::default()), - false, //votes_only, - SimpleAddressLoader::Disabled, - ) - .unwrap(); + let tx = packet + .immutable_section() + .compute_sanitized_transaction( + &Arc::new(FeatureSet::default()), + false, //votes_only, + SimpleAddressLoader::Disabled, + ) + .unwrap(); assert!(!forward_packet_batches_by_accounts .try_add_packet(&tx, packet.immutable_section().clone())); diff --git a/core/src/immutable_deserialized_packet.rs b/core/src/immutable_deserialized_packet.rs index a8cefa993d6d69..111fe6e888040f 100644 --- a/core/src/immutable_deserialized_packet.rs +++ b/core/src/immutable_deserialized_packet.rs @@ -4,14 +4,18 @@ use { GetTransactionPriorityDetails, TransactionPriorityDetails, }, solana_sdk::{ + feature_set, hash::Hash, message::Message, sanitize::SanitizeError, short_vec::decode_shortu16_len, signature::Signature, - transaction::{SanitizedVersionedTransaction, VersionedTransaction}, + transaction::{ + AddressLoader, SanitizedTransaction, SanitizedVersionedTransaction, + VersionedTransaction, + }, }, - std::{cmp::Ordering, mem::size_of}, + std::{cmp::Ordering, mem::size_of, sync::Arc}, thiserror::Error, }; @@ -94,6 +98,28 @@ impl ImmutableDeserializedPacket { pub fn compute_unit_limit(&self) -> u64 { self.priority_details.compute_unit_limit } + + // This function deserializes packets into transactions, computes the blake3 hash of transaction + // messages, and verifies secp256k1 instructions. + pub fn compute_sanitized_transaction( + &self, + feature_set: &Arc, + votes_only: bool, + address_loader: impl AddressLoader, + ) -> Option { + if votes_only && !self.is_simple_vote() { + return None; + } + let tx = SanitizedTransaction::try_new( + self.transaction().clone(), + *self.message_hash(), + self.is_simple_vote(), + address_loader, + ) + .ok()?; + tx.verify_precompiles(feature_set).ok()?; + Some(tx) + } } impl PartialOrd for ImmutableDeserializedPacket { diff --git a/core/src/latest_unprocessed_votes.rs b/core/src/latest_unprocessed_votes.rs index aeedc711b4c10f..402fef4b665a21 100644 --- a/core/src/latest_unprocessed_votes.rs +++ b/core/src/latest_unprocessed_votes.rs @@ -2,7 +2,6 @@ use { crate::{ forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts, immutable_deserialized_packet::{DeserializedPacketError, ImmutableDeserializedPacket}, - unprocessed_packet_batches, }, itertools::Itertools, rand::{thread_rng, Rng}, @@ -231,7 +230,7 @@ impl LatestUnprocessedVotes { /// Votes from validators with 0 stakes are ignored pub fn get_and_insert_forwardable_packets( &self, - bank: Arc, + bank: &Arc, forward_packet_batches_by_accounts: &mut ForwardPacketBatchesByAccounts, ) -> usize { let mut continue_forwarding = true; @@ -250,9 +249,8 @@ impl LatestUnprocessedVotes { let mut vote = lock.write().unwrap(); if !vote.is_vote_taken() && !vote.is_forwarded() { let deserialized_vote_packet = vote.vote.as_ref().unwrap().clone(); - if let Some(sanitized_vote_transaction) = - unprocessed_packet_batches::transaction_from_deserialized_packet( - &deserialized_vote_packet, + if let Some(sanitized_vote_transaction) = deserialized_vote_packet + .compute_sanitized_transaction( &bank.feature_set, bank.vote_only_bank(), bank.as_ref(), @@ -599,7 +597,7 @@ mod tests { // Don't forward 0 stake accounts let forwarded = latest_unprocessed_votes - .get_and_insert_forwardable_packets(bank, &mut forward_packet_batches_by_accounts); + .get_and_insert_forwardable_packets(&bank, &mut forward_packet_batches_by_accounts); assert_eq!(0, forwarded); assert_eq!( 0, @@ -621,7 +619,7 @@ mod tests { // Don't forward votes from gossip let forwarded = latest_unprocessed_votes.get_and_insert_forwardable_packets( - Arc::new(bank), + &Arc::new(bank), &mut forward_packet_batches_by_accounts, ); @@ -645,10 +643,8 @@ mod tests { ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); // Forward from TPU - let forwarded = latest_unprocessed_votes.get_and_insert_forwardable_packets( - bank.clone(), - &mut forward_packet_batches_by_accounts, - ); + let forwarded = latest_unprocessed_votes + .get_and_insert_forwardable_packets(&bank, &mut forward_packet_batches_by_accounts); assert_eq!(1, forwarded); assert_eq!( @@ -663,7 +659,7 @@ mod tests { let mut forward_packet_batches_by_accounts = ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); let forwarded = latest_unprocessed_votes - .get_and_insert_forwardable_packets(bank, &mut forward_packet_batches_by_accounts); + .get_and_insert_forwardable_packets(&bank, &mut forward_packet_batches_by_accounts); assert_eq!(0, forwarded); assert_eq!( diff --git a/core/src/unprocessed_packet_batches.rs b/core/src/unprocessed_packet_batches.rs index a72ba3dffbf8d5..5f9348c28aa24c 100644 --- a/core/src/unprocessed_packet_batches.rs +++ b/core/src/unprocessed_packet_batches.rs @@ -3,11 +3,7 @@ use { min_max_heap::MinMaxHeap, solana_perf::packet::{Packet, PacketBatch}, solana_runtime::transaction_priority_details::TransactionPriorityDetails, - solana_sdk::{ - feature_set, - hash::Hash, - transaction::{AddressLoader, SanitizedTransaction, Transaction}, - }, + solana_sdk::{hash::Hash, transaction::Transaction}, std::{ cmp::Ordering, collections::{hash_map::Entry, HashMap}, @@ -283,7 +279,7 @@ impl UnprocessedPacketBatches { } pub fn mark_accepted_packets_as_forwarded( - buffered_packet_batches: &mut UnprocessedPacketBatches, + &mut self, packets_to_process: &[Arc], accepted_packet_indexes: &[usize], ) { @@ -291,7 +287,7 @@ impl UnprocessedPacketBatches { .iter() .for_each(|accepted_packet_index| { let accepted_packet = packets_to_process[*accepted_packet_index].clone(); - if let Some(deserialized_packet) = buffered_packet_batches + if let Some(deserialized_packet) = self .message_hash_to_transaction .get_mut(accepted_packet.message_hash()) { @@ -322,30 +318,6 @@ pub fn transactions_to_deserialized_packets( .collect() } -// This function deserializes packets into transactions, computes the blake3 hash of transaction -// messages, and verifies secp256k1 instructions. A list of sanitized transactions are returned -// with their packet indexes. -#[allow(clippy::needless_collect)] -pub fn transaction_from_deserialized_packet( - deserialized_packet: &ImmutableDeserializedPacket, - feature_set: &Arc, - votes_only: bool, - address_loader: impl AddressLoader, -) -> Option { - if votes_only && !deserialized_packet.is_simple_vote() { - return None; - } - let tx = SanitizedTransaction::try_new( - deserialized_packet.transaction().clone(), - *deserialized_packet.message_hash(), - deserialized_packet.is_simple_vote(), - address_loader, - ) - .ok()?; - tx.verify_precompiles(feature_set).ok()?; - Some(tx) -} - #[cfg(test)] mod tests { use { @@ -357,6 +329,7 @@ mod tests { transaction::{SimpleAddressLoader, Transaction}, }, solana_vote_program::vote_transaction, + std::sync::Arc, }; fn simple_deserialized_packet() -> DeserializedPacket { @@ -529,8 +502,7 @@ mod tests { let mut votes_only = false; let txs = packet_vector.iter().filter_map(|tx| { - transaction_from_deserialized_packet( - tx.immutable_section(), + tx.immutable_section().compute_sanitized_transaction( &Arc::new(FeatureSet::default()), votes_only, SimpleAddressLoader::Disabled, @@ -540,8 +512,7 @@ mod tests { votes_only = true; let txs = packet_vector.iter().filter_map(|tx| { - transaction_from_deserialized_packet( - tx.immutable_section(), + tx.immutable_section().compute_sanitized_transaction( &Arc::new(FeatureSet::default()), votes_only, SimpleAddressLoader::Disabled, @@ -560,8 +531,7 @@ mod tests { let mut votes_only = false; let txs = packet_vector.iter().filter_map(|tx| { - transaction_from_deserialized_packet( - tx.immutable_section(), + tx.immutable_section().compute_sanitized_transaction( &Arc::new(FeatureSet::default()), votes_only, SimpleAddressLoader::Disabled, @@ -571,8 +541,7 @@ mod tests { votes_only = true; let txs = packet_vector.iter().filter_map(|tx| { - transaction_from_deserialized_packet( - tx.immutable_section(), + tx.immutable_section().compute_sanitized_transaction( &Arc::new(FeatureSet::default()), votes_only, SimpleAddressLoader::Disabled, @@ -591,8 +560,7 @@ mod tests { let mut votes_only = false; let txs = packet_vector.iter().filter_map(|tx| { - transaction_from_deserialized_packet( - tx.immutable_section(), + tx.immutable_section().compute_sanitized_transaction( &Arc::new(FeatureSet::default()), votes_only, SimpleAddressLoader::Disabled, @@ -602,8 +570,7 @@ mod tests { votes_only = true; let txs = packet_vector.iter().filter_map(|tx| { - transaction_from_deserialized_packet( - tx.immutable_section(), + tx.immutable_section().compute_sanitized_transaction( &Arc::new(FeatureSet::default()), votes_only, SimpleAddressLoader::Disabled, diff --git a/core/src/unprocessed_transaction_storage.rs b/core/src/unprocessed_transaction_storage.rs index 15413236fe7d1f..62f9a4b7bb5e96 100644 --- a/core/src/unprocessed_transaction_storage.rs +++ b/core/src/unprocessed_transaction_storage.rs @@ -1,6 +1,6 @@ use { crate::{ - banking_stage::{self, BankingStage, FilterForwardingResults, ForwardOption}, + banking_stage::{FilterForwardingResults, ForwardOption}, forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts, immutable_deserialized_packet::ImmutableDeserializedPacket, latest_unprocessed_votes::{ @@ -11,10 +11,16 @@ use { }, itertools::Itertools, min_max_heap::MinMaxHeap, + solana_measure::measure, solana_runtime::bank::Bank, + solana_sdk::{ + clock::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, saturating_add_assign, + transaction::SanitizedTransaction, + }, std::sync::Arc, }; +pub const UNPROCESSED_BUFFER_STEP_SIZE: usize = 128; const MAX_STAKED_VALIDATORS: usize = 10_000; #[derive(Debug)] @@ -52,7 +58,7 @@ pub struct InsertPacketBatchesSummary { pub(crate) num_dropped_tracer_packets: usize, } -pub(crate) fn filter_processed_packets<'a, F>( +fn filter_processed_packets<'a, F>( retryable_transaction_indexes: impl Iterator, mut f: F, ) where @@ -177,7 +183,7 @@ impl UnprocessedTransactionStorage { pub fn filter_forwardable_packets_and_add_batches( &mut self, - bank: Arc, + bank: &Arc, forward_packet_batches_by_accounts: &mut ForwardPacketBatchesByAccounts, ) -> FilterForwardingResults { match self { @@ -251,7 +257,7 @@ impl VoteStorage { .into_iter() .filter_map(|deserialized_packet| { LatestValidatorVotePacket::new_from_immutable( - Rc::new(deserialized_packet), + Arc::new(deserialized_packet), self.vote_source, ) .ok() @@ -261,7 +267,7 @@ impl VoteStorage { fn filter_forwardable_packets_and_add_batches( &mut self, - bank: Arc, + bank: &Arc, forward_packet_batches_by_accounts: &mut ForwardPacketBatchesByAccounts, ) -> FilterForwardingResults { if matches!(self.vote_source, VoteSource::Tpu) { @@ -363,14 +369,13 @@ impl ThreadLocalUnprocessedPackets { fn filter_forwardable_packets_and_add_batches( &mut self, - bank: Arc, + bank: &Arc, forward_packet_batches_by_accounts: &mut ForwardPacketBatchesByAccounts, ) -> FilterForwardingResults { - let results = BankingStage::filter_and_forward_with_account_limits( + let results = self.filter_and_forward_with_account_limits( &bank, - &mut self.unprocessed_packet_batches, forward_packet_batches_by_accounts, - banking_stage::UNPROCESSED_BUFFER_STEP_SIZE, + UNPROCESSED_BUFFER_STEP_SIZE, ); for deserialized_packet in self.unprocessed_packet_batches.iter_mut() { @@ -380,17 +385,299 @@ impl ThreadLocalUnprocessedPackets { results } + /// Filter out packets that fail to sanitize, or are no longer valid (could be + /// too old, a duplicate of something already processed). Doing this in batches to avoid + /// checking bank's blockhash and status cache per transaction which could be bad for performance. + /// Added valid and sanitized packets to forwarding queue. + fn filter_and_forward_with_account_limits( + &mut self, + bank: &Arc, + forward_buffer: &mut ForwardPacketBatchesByAccounts, + batch_size: usize, + ) -> FilterForwardingResults { + let mut total_forwardable_tracer_packets: usize = 0; + let mut total_tracer_packets_in_buffer: usize = 0; + let mut total_forwardable_packets: usize = 0; + let mut total_packet_conversion_us: u64 = 0; + let mut total_filter_packets_us: u64 = 0; + let mut dropped_tx_before_forwarding_count: usize = 0; + + let mut original_priority_queue = self.swap_priority_queue(); + + // indicates if `forward_buffer` still accept more packets, see details at + // `ForwardPacketBatchesByAccounts.rs`. + let mut accepting_packets = true; + // batch iterate through self.unprocessed_packet_batches in desc priority order + let retained_priority_queue: MinMaxHeap> = + original_priority_queue + .drain_desc() + .chunks(batch_size) + .into_iter() + .flat_map(|packets_to_process| { + let packets_to_process = packets_to_process.into_iter().collect_vec(); + + // Vec of same size of `packets_to_process`, each indicates + // corresponding packet is tracer packet. + let tracer_packet_indexes = packets_to_process + .iter() + .map(|deserialized_packet| { + deserialized_packet + .original_packet() + .meta + .is_tracer_packet() + }) + .collect::>(); + saturating_add_assign!( + total_tracer_packets_in_buffer, + tracer_packet_indexes + .iter() + .filter(|is_tracer| **is_tracer) + .count() + ); + + if accepting_packets { + let ( + (sanitized_transactions, transaction_to_packet_indexes), + packet_conversion_time, + ): ((Vec, Vec), _) = measure!( + self.sanitize_unforwarded_packets(&packets_to_process, bank,), + "sanitize_packet", + ); + saturating_add_assign!( + total_packet_conversion_us, + packet_conversion_time.as_us() + ); + + let (forwardable_transaction_indexes, filter_packets_time) = measure!( + Self::filter_invalid_transactions(&sanitized_transactions, bank,), + "filter_packets", + ); + saturating_add_assign!( + total_filter_packets_us, + filter_packets_time.as_us() + ); + + for forwardable_transaction_index in &forwardable_transaction_indexes { + saturating_add_assign!(total_forwardable_packets, 1); + let forwardable_packet_index = + transaction_to_packet_indexes[*forwardable_transaction_index]; + if tracer_packet_indexes[forwardable_packet_index] { + saturating_add_assign!(total_forwardable_tracer_packets, 1); + } + } + + let accepted_packet_indexes = Self::add_filtered_packets_to_forward_buffer( + forward_buffer, + &packets_to_process, + &sanitized_transactions, + &transaction_to_packet_indexes, + &forwardable_transaction_indexes, + &mut dropped_tx_before_forwarding_count, + ); + accepting_packets = + accepted_packet_indexes.len() == forwardable_transaction_indexes.len(); + + self.unprocessed_packet_batches + .mark_accepted_packets_as_forwarded( + &packets_to_process, + &accepted_packet_indexes, + ); + + self.collect_retained_packets( + &packets_to_process, + &Self::prepare_filtered_packet_indexes( + &transaction_to_packet_indexes, + &forwardable_transaction_indexes, + ), + ) + } else { + // skip sanitizing and filtering if not longer able to add more packets for forwarding + saturating_add_assign!( + dropped_tx_before_forwarding_count, + packets_to_process.len() + ); + packets_to_process + } + }) + .collect(); + + // replace packet priority queue + self.unprocessed_packet_batches.packet_priority_queue = retained_priority_queue; + + inc_new_counter_info!( + "banking_stage-dropped_tx_before_forwarding", + dropped_tx_before_forwarding_count + ); + + FilterForwardingResults { + total_forwardable_packets, + total_tracer_packets_in_buffer, + total_forwardable_tracer_packets, + total_packet_conversion_us, + total_filter_packets_us, + } + } + + /// Take self.unprocessed_packet_batches's priority_queue out, leave empty MinMaxHeap in its place. + fn swap_priority_queue(&mut self) -> MinMaxHeap> { + let capacity = self.unprocessed_packet_batches.capacity(); + std::mem::replace( + &mut self.unprocessed_packet_batches.packet_priority_queue, + MinMaxHeap::with_capacity(capacity), + ) + } + + /// sanitize un-forwarded packet into SanitizedTransaction for validation and forwarding. + fn sanitize_unforwarded_packets( + &mut self, + packets_to_process: &[Arc], + bank: &Arc, + ) -> (Vec, Vec) { + // Get ref of ImmutableDeserializedPacket + let deserialized_packets = packets_to_process.iter().map(|p| &**p); + let (transactions, transaction_to_packet_indexes): (Vec, Vec) = + deserialized_packets + .enumerate() + .filter_map(|(packet_index, deserialized_packet)| { + if !self + .unprocessed_packet_batches + .is_forwarded(deserialized_packet) + { + deserialized_packet + .compute_sanitized_transaction( + &bank.feature_set, + bank.vote_only_bank(), + bank.as_ref(), + ) + .map(|transaction| (transaction, packet_index)) + } else { + None + } + }) + .unzip(); + + // report metrics + inc_new_counter_info!("banking_stage-packet_conversion", 1); + let unsanitized_packets_filtered_count = + packets_to_process.len().saturating_sub(transactions.len()); + inc_new_counter_info!( + "banking_stage-dropped_tx_before_forwarding", + unsanitized_packets_filtered_count + ); + + (transactions, transaction_to_packet_indexes) + } + + /// Checks sanitized transactions against bank, returns valid transaction indexes + fn filter_invalid_transactions( + transactions: &[SanitizedTransaction], + bank: &Arc, + ) -> Vec { + let filter = vec![Ok(()); transactions.len()]; + let results = bank.check_transactions_with_forwarding_delay( + transactions, + &filter, + FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, + ); + // report metrics + let filtered_out_transactions_count = transactions.len().saturating_sub(results.len()); + inc_new_counter_info!( + "banking_stage-dropped_tx_before_forwarding", + filtered_out_transactions_count + ); + + results + .iter() + .enumerate() + .filter_map( + |(tx_index, (result, _))| if result.is_ok() { Some(tx_index) } else { None }, + ) + .collect_vec() + } + + fn prepare_filtered_packet_indexes( + transaction_to_packet_indexes: &[usize], + retained_transaction_indexes: &[usize], + ) -> Vec { + retained_transaction_indexes + .iter() + .map(|tx_index| transaction_to_packet_indexes[*tx_index]) + .collect_vec() + } + + /// try to add filtered forwardable and valid packets to forward buffer; + /// returns vector of packet indexes that were accepted for forwarding. + fn add_filtered_packets_to_forward_buffer( + forward_buffer: &mut ForwardPacketBatchesByAccounts, + packets_to_process: &[Arc], + transactions: &[SanitizedTransaction], + transaction_to_packet_indexes: &[usize], + forwardable_transaction_indexes: &[usize], + dropped_tx_before_forwarding_count: &mut usize, + ) -> Vec { + let mut added_packets_count: usize = 0; + let mut accepted_packet_indexes = Vec::with_capacity(transaction_to_packet_indexes.len()); + for forwardable_transaction_index in forwardable_transaction_indexes { + let sanitized_transaction = &transactions[*forwardable_transaction_index]; + let forwardable_packet_index = + transaction_to_packet_indexes[*forwardable_transaction_index]; + let immutable_deserialized_packet = + packets_to_process[forwardable_packet_index].clone(); + if !forward_buffer.try_add_packet(sanitized_transaction, immutable_deserialized_packet) + { + break; + } + accepted_packet_indexes.push(forwardable_packet_index); + saturating_add_assign!(added_packets_count, 1); + } + + // count the packets not being forwarded in this batch + saturating_add_assign!( + *dropped_tx_before_forwarding_count, + forwardable_transaction_indexes.len() - added_packets_count + ); + + accepted_packet_indexes + } + + fn collect_retained_packets( + &mut self, + packets_to_process: &[Arc], + retained_packet_indexes: &[usize], + ) -> Vec> { + self.remove_non_retained_packets(packets_to_process, retained_packet_indexes); + retained_packet_indexes + .iter() + .map(|i| packets_to_process[*i].clone()) + .collect_vec() + } + + /// remove packets from UnprocessedPacketBatches.message_hash_to_transaction after they have + /// been removed from UnprocessedPacketBatches.packet_priority_queue + fn remove_non_retained_packets( + &mut self, + packets_to_process: &[Arc], + retained_packet_indexes: &[usize], + ) { + filter_processed_packets( + retained_packet_indexes + .iter() + .chain(std::iter::once(&packets_to_process.len())), + |start, end| { + for processed_packet in &packets_to_process[start..end] { + self.unprocessed_packet_batches + .message_hash_to_transaction + .remove(processed_packet.message_hash()); + } + }, + ) + } + fn process_packets(&mut self, batch_size: usize, mut processing_function: F) where F: FnMut(&Vec>) -> Option>, { - let mut retryable_packets = { - let capacity = self.unprocessed_packet_batches.capacity(); - std::mem::replace( - &mut self.unprocessed_packet_batches.packet_priority_queue, - MinMaxHeap::with_capacity(capacity), - ) - }; + let mut retryable_packets = self.swap_priority_queue(); let retryable_packets: MinMaxHeap> = retryable_packets .drain_desc() .chunks(batch_size) @@ -400,16 +687,10 @@ impl ThreadLocalUnprocessedPackets { if let Some(retryable_transaction_indexes) = processing_function(&packets_to_process) { - // TODO: move this function tree into this module - BankingStage::remove_non_retained_packets( - &mut self.unprocessed_packet_batches, + self.collect_retained_packets( &packets_to_process, &retryable_transaction_indexes, - ); - retryable_transaction_indexes - .iter() - .map(|i| packets_to_process[*i].clone()) - .collect_vec() + ) } else { packets_to_process } @@ -430,7 +711,16 @@ impl ThreadLocalUnprocessedPackets { #[cfg(test)] mod tests { - use super::*; + use { + super::*, + solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}, + solana_perf::packet::Packet, + solana_sdk::{ + signature::{Keypair, Signer}, + system_transaction, + transaction::Transaction, + }, + }; #[test] fn test_filter_processed_packets() { @@ -482,4 +772,148 @@ mod tests { filter_processed_packets(retryable_indexes.iter(), f); assert_eq!(non_retryable_indexes, vec![(0, 1), (4, 5), (6, 8)]); } + + #[test] + fn test_filter_and_forward_with_account_limits() { + solana_logger::setup(); + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10); + let current_bank = Arc::new(Bank::new_for_tests(&genesis_config)); + + let simple_transactions: Vec = (0..256) + .map(|_id| { + // packets are deserialized upon receiving, failed packets will not be + // forwarded; Therefore we need to create real packets here. + let key1 = Keypair::new(); + system_transaction::transfer( + &mint_keypair, + &key1.pubkey(), + genesis_config.rent.minimum_balance(0), + genesis_config.hash(), + ) + }) + .collect_vec(); + + let mut packets: Vec = simple_transactions + .iter() + .enumerate() + .map(|(packets_id, transaction)| { + let mut p = Packet::from_data(None, transaction).unwrap(); + p.meta.port = packets_id as u16; + p.meta.set_tracer(true); + DeserializedPacket::new(p).unwrap() + }) + .collect_vec(); + + // all packets are forwarded + { + let buffered_packet_batches: UnprocessedPacketBatches = + UnprocessedPacketBatches::from_iter(packets.clone().into_iter(), packets.len()); + let mut transaction_storage = UnprocessedTransactionStorage::new_transaction_storage( + buffered_packet_batches, + ThreadType::Transactions, + ); + let mut forward_packet_batches_by_accounts = + ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); + + let FilterForwardingResults { + total_forwardable_packets, + total_tracer_packets_in_buffer, + total_forwardable_tracer_packets, + .. + } = transaction_storage.filter_forwardable_packets_and_add_batches( + ¤t_bank, + &mut forward_packet_batches_by_accounts, + ); + assert_eq!(total_forwardable_packets, 256); + assert_eq!(total_tracer_packets_in_buffer, 256); + assert_eq!(total_forwardable_tracer_packets, 256); + + // packets in a batch are forwarded in arbitrary order; verify the ports match after + // sorting + let expected_ports: Vec<_> = (0..256).collect(); + let mut forwarded_ports: Vec<_> = forward_packet_batches_by_accounts + .iter_batches() + .flat_map(|batch| { + batch + .get_forwardable_packets() + .into_iter() + .map(|p| p.meta.port) + }) + .collect(); + forwarded_ports.sort_unstable(); + assert_eq!(expected_ports, forwarded_ports); + } + + // some packets are forwarded + { + let num_already_forwarded = 16; + for packet in &mut packets[0..num_already_forwarded] { + packet.forwarded = true; + } + let buffered_packet_batches: UnprocessedPacketBatches = + UnprocessedPacketBatches::from_iter(packets.clone().into_iter(), packets.len()); + let mut transaction_storage = UnprocessedTransactionStorage::new_transaction_storage( + buffered_packet_batches, + ThreadType::Transactions, + ); + let mut forward_packet_batches_by_accounts = + ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); + let FilterForwardingResults { + total_forwardable_packets, + total_tracer_packets_in_buffer, + total_forwardable_tracer_packets, + .. + } = transaction_storage.filter_forwardable_packets_and_add_batches( + ¤t_bank, + &mut forward_packet_batches_by_accounts, + ); + assert_eq!( + total_forwardable_packets, + packets.len() - num_already_forwarded + ); + assert_eq!(total_tracer_packets_in_buffer, packets.len()); + assert_eq!( + total_forwardable_tracer_packets, + packets.len() - num_already_forwarded + ); + } + + // some packets are invalid (already processed) + { + let num_already_processed = 16; + for tx in &simple_transactions[0..num_already_processed] { + assert_eq!(current_bank.process_transaction(tx), Ok(())); + } + let buffered_packet_batches: UnprocessedPacketBatches = + UnprocessedPacketBatches::from_iter(packets.clone().into_iter(), packets.len()); + let mut transaction_storage = UnprocessedTransactionStorage::new_transaction_storage( + buffered_packet_batches, + ThreadType::Transactions, + ); + let mut forward_packet_batches_by_accounts = + ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); + let FilterForwardingResults { + total_forwardable_packets, + total_tracer_packets_in_buffer, + total_forwardable_tracer_packets, + .. + } = transaction_storage.filter_forwardable_packets_and_add_batches( + ¤t_bank, + &mut forward_packet_batches_by_accounts, + ); + assert_eq!( + total_forwardable_packets, + packets.len() - num_already_processed + ); + assert_eq!(total_tracer_packets_in_buffer, packets.len()); + assert_eq!( + total_forwardable_tracer_packets, + packets.len() - num_already_processed + ); + } + } } diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 609e57b23330d2..4081b1fd9c93a1 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -48,6 +48,7 @@ solana-frozen-abi = { path = "../frozen-abi", version = "=1.15.0" } solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.15.0" } solana-measure = { path = "../measure", version = "=1.15.0" } solana-metrics = { path = "../metrics", version = "=1.15.0" } +solana-perf = { path = "../perf", version = "=1.15.0" } solana-program-runtime = { path = "../program-runtime", version = "=1.15.0" } solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.15.0" } solana-sdk = { path = "../sdk", version = "=1.15.0" } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 0e4595c212aaad..88584b0d28ac05 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -84,6 +84,7 @@ use { }, solana_measure::{measure, measure::Measure}, solana_metrics::{inc_new_counter_debug, inc_new_counter_info}, + solana_perf::perf_libs, solana_program_runtime::{ accounts_data_meter::MAX_ACCOUNTS_DATA_LEN, compute_budget::{self, ComputeBudget}, @@ -102,7 +103,7 @@ use { clock::{ BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY, - SECONDS_PER_DAY, + MAX_TRANSACTION_FORWARDING_DELAY_GPU, SECONDS_PER_DAY, }, ed25519_program, epoch_info::EpochInfo, @@ -138,7 +139,7 @@ use { sysvar::{self, Sysvar, SysvarId}, timing::years_as_slots, transaction::{ - MessageHash, Result, SanitizedTransaction, Transaction, TransactionError, + self, MessageHash, Result, SanitizedTransaction, Transaction, TransactionError, TransactionVerificationMode, VersionedTransaction, MAX_TX_ACCOUNT_LOCKS, }, transaction_context::{ @@ -7693,6 +7694,36 @@ impl Bank { .epoch_accounts_hash_manager .try_get_epoch_accounts_hash() } + + /// Checks a batch of sanitized transactions again bank for age and status + pub fn check_transactions_with_forwarding_delay( + &self, + transactions: &[SanitizedTransaction], + filter: &[transaction::Result<()>], + forward_transactions_to_leader_at_slot_offset: u64, + ) -> Vec { + let mut error_counters = TransactionErrorMetrics::default(); + // The following code also checks if the blockhash for a transaction is too old + // The check accounts for + // 1. Transaction forwarding delay + // 2. The slot at which the next leader will actually process the transaction + // Drop the transaction if it will expire by the time the next node receives and processes it + let api = perf_libs::api(); + let max_tx_fwd_delay = if api.is_none() { + MAX_TRANSACTION_FORWARDING_DELAY + } else { + MAX_TRANSACTION_FORWARDING_DELAY_GPU + }; + + self.check_transactions( + transactions, + filter, + (MAX_PROCESSING_AGE) + .saturating_sub(max_tx_fwd_delay) + .saturating_sub(forward_transactions_to_leader_at_slot_offset as usize), + &mut error_counters, + ) + } } /// Compute how much an account has changed size. This function is useful when the data size delta diff --git a/sdk/program/src/clock.rs b/sdk/program/src/clock.rs index 408fdebe2e3db2..a28a7351093e7d 100644 --- a/sdk/program/src/clock.rs +++ b/sdk/program/src/clock.rs @@ -108,6 +108,10 @@ pub const MAX_TRANSACTION_FORWARDING_DELAY_GPU: usize = 2; /// More delay is expected if CUDA is not enabled (as signature verification takes longer) pub const MAX_TRANSACTION_FORWARDING_DELAY: usize = 6; +/// Transaction forwarding, which leader to forward to and how long to hold +pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 2; +pub const HOLD_TRANSACTIONS_SLOT_OFFSET: u64 = 20; + /// The unit of time given to a leader for encoding a block. /// /// It is some some number of _ticks_ long. From cfd8b66da23f5f0c932cf553000dc393c3cbf132 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Thu, 29 Sep 2022 18:08:34 -0700 Subject: [PATCH 61/65] Address pr comments --- core/src/banking_stage.rs | 5 +- core/src/leader_slot_banking_stage_metrics.rs | 16 ++-- core/src/unprocessed_transaction_storage.rs | 87 +++++++++---------- 3 files changed, 48 insertions(+), 60 deletions(-) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index f1ba329e07e34d..dde4200d6456fe 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -623,7 +623,7 @@ impl BankingStage { } #[allow(clippy::too_many_arguments)] - fn processing_function( + fn do_process_packets( max_tx_ingestion_ns: u128, poh_recorder: &Arc>, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, @@ -748,7 +748,7 @@ impl BankingStage { bank, num_packets_to_process_per_iteration, |packets_to_process| { - Self::processing_function( + Self::do_process_packets( max_tx_ingestion_ns, poh_recorder, slot_metrics_tracker, @@ -1997,6 +1997,7 @@ impl BankingStage { banking_stage_stats .newly_buffered_packets_count .fetch_add(newly_buffered_packets_count, Ordering::Relaxed); + // TODO: One of these metrics can be deleted (duplicate) banking_stage_stats .current_buffered_packet_batches_count .swap(unprocessed_transaction_storage.len(), Ordering::Relaxed); diff --git a/core/src/leader_slot_banking_stage_metrics.rs b/core/src/leader_slot_banking_stage_metrics.rs index 015f1fba0a8a29..252529fb9b99c3 100644 --- a/core/src/leader_slot_banking_stage_metrics.rs +++ b/core/src/leader_slot_banking_stage_metrics.rs @@ -1,7 +1,7 @@ use { crate::{ leader_slot_banking_stage_timing_metrics::*, - unprocessed_transaction_storage::InsertPacketBatchesSummary, + unprocessed_transaction_storage::InsertPacketBatchSummary, }, solana_poh::poh_recorder::BankStart, solana_runtime::transaction_error_metrics::*, @@ -336,14 +336,10 @@ impl VotePacketCountMetrics { fn report(&self, id: u32, slot: Slot) { datapoint_info!( "banking_stage-vote_packet_counts", - ("id", id as i64, i64), - ("slot", slot as i64, i64), - ( - "dropped_gossip_votes", - self.dropped_gossip_votes as i64, - i64 - ), - ("dropped_tpu_votes", self.dropped_tpu_votes as i64, i64) + ("id", id, i64), + ("slot", slot, i64), + ("dropped_gossip_votes", self.dropped_gossip_votes, i64), + ("dropped_tpu_votes", self.dropped_tpu_votes, i64) ); } } @@ -538,7 +534,7 @@ impl LeaderSlotMetricsTracker { pub(crate) fn accumulate_insert_packet_batches_summary( &mut self, - insert_packet_batches_summary: &InsertPacketBatchesSummary, + insert_packet_batches_summary: &InsertPacketBatchSummary, ) { self.increment_exceeded_buffer_limit_dropped_packets_count( insert_packet_batches_summary.num_dropped_packets as u64, diff --git a/core/src/unprocessed_transaction_storage.rs b/core/src/unprocessed_transaction_storage.rs index 62f9a4b7bb5e96..ae08eee7050543 100644 --- a/core/src/unprocessed_transaction_storage.rs +++ b/core/src/unprocessed_transaction_storage.rs @@ -51,7 +51,7 @@ pub enum ThreadType { } #[derive(Debug, Default)] -pub struct InsertPacketBatchesSummary { +pub struct InsertPacketBatchSummary { pub(crate) num_dropped_packets: usize, pub(crate) num_dropped_gossip_vote_packets: usize, pub(crate) num_dropped_tpu_vote_packets: usize, @@ -155,28 +155,11 @@ impl UnprocessedTransactionStorage { pub fn insert_batch( &mut self, deserialized_packets: Vec, - ) -> InsertPacketBatchesSummary { + ) -> InsertPacketBatchSummary { match self { - Self::VoteStorage(vote_storage) => { - let VoteBatchInsertionMetrics { - num_dropped_gossip, - num_dropped_tpu, - } = vote_storage.insert_batch(deserialized_packets); - InsertPacketBatchesSummary { - num_dropped_packets: num_dropped_gossip + num_dropped_tpu, - num_dropped_gossip_vote_packets: num_dropped_gossip, - num_dropped_tpu_vote_packets: num_dropped_tpu, - ..InsertPacketBatchesSummary::default() - } - } + Self::VoteStorage(vote_storage) => vote_storage.insert_batch(deserialized_packets), Self::LocalTransactionStorage(transaction_storage) => { - let (num_dropped_packets, num_dropped_tracer_packets) = - transaction_storage.insert_batch(deserialized_packets); - InsertPacketBatchesSummary { - num_dropped_packets, - num_dropped_tracer_packets, - ..InsertPacketBatchesSummary::default() - } + transaction_storage.insert_batch(deserialized_packets) } } } @@ -250,19 +233,27 @@ impl VoteStorage { fn insert_batch( &mut self, deserialized_packets: Vec, - ) -> VoteBatchInsertionMetrics { - self.latest_unprocessed_votes - .insert_batch( - deserialized_packets - .into_iter() - .filter_map(|deserialized_packet| { - LatestValidatorVotePacket::new_from_immutable( - Arc::new(deserialized_packet), - self.vote_source, - ) - .ok() - }), - ) + ) -> InsertPacketBatchSummary { + let VoteBatchInsertionMetrics { + num_dropped_gossip, + num_dropped_tpu, + } = self.latest_unprocessed_votes.insert_batch( + deserialized_packets + .into_iter() + .filter_map(|deserialized_packet| { + LatestValidatorVotePacket::new_from_immutable( + Arc::new(deserialized_packet), + self.vote_source, + ) + .ok() + }), + ); + InsertPacketBatchSummary { + num_dropped_packets: num_dropped_gossip + num_dropped_tpu, + num_dropped_gossip_vote_packets: num_dropped_gossip, + num_dropped_tpu_vote_packets: num_dropped_tpu, + ..InsertPacketBatchSummary::default() + } } fn filter_forwardable_packets_and_add_batches( @@ -359,12 +350,18 @@ impl ThreadLocalUnprocessedPackets { fn insert_batch( &mut self, deserialized_packets: Vec, - ) -> (usize, usize) { - self.unprocessed_packet_batches.insert_batch( - deserialized_packets - .into_iter() - .map(DeserializedPacket::from_immutable_section), - ) + ) -> InsertPacketBatchSummary { + let (num_dropped_packets, num_dropped_tracer_packets) = + self.unprocessed_packet_batches.insert_batch( + deserialized_packets + .into_iter() + .map(DeserializedPacket::from_immutable_section), + ); + InsertPacketBatchSummary { + num_dropped_packets, + num_dropped_tracer_packets, + ..InsertPacketBatchSummary::default() + } } fn filter_forwardable_packets_and_add_batches( @@ -372,17 +369,11 @@ impl ThreadLocalUnprocessedPackets { bank: &Arc, forward_packet_batches_by_accounts: &mut ForwardPacketBatchesByAccounts, ) -> FilterForwardingResults { - let results = self.filter_and_forward_with_account_limits( + self.filter_and_forward_with_account_limits( &bank, forward_packet_batches_by_accounts, UNPROCESSED_BUFFER_STEP_SIZE, - ); - - for deserialized_packet in self.unprocessed_packet_batches.iter_mut() { - // Mark so we don't forward again - deserialized_packet.forwarded = true; - } - results + ) } /// Filter out packets that fail to sanitize, or are no longer valid (could be From b2ee69eec31f162fc843f19afacdedbdfdb6973d Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Thu, 29 Sep 2022 18:33:49 -0700 Subject: [PATCH 62/65] Commit cargo lock change --- programs/bpf/Cargo.lock | 1 + 1 file changed, 1 insertion(+) diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 0ced4666c766bd..9fa44a25cc0e1b 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -5478,6 +5478,7 @@ dependencies = [ "solana-frozen-abi-macro 1.15.0", "solana-measure", "solana-metrics", + "solana-perf", "solana-program-runtime", "solana-rayon-threadlimit", "solana-sdk 1.15.0", From 1f39f8165b7e2d6b094bd815c196fd106b53d672 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Thu, 29 Sep 2022 21:45:50 -0700 Subject: [PATCH 63/65] clippy --- core/src/latest_unprocessed_votes.rs | 2 +- core/src/unprocessed_transaction_storage.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/latest_unprocessed_votes.rs b/core/src/latest_unprocessed_votes.rs index 402fef4b665a21..be6b84b85689ec 100644 --- a/core/src/latest_unprocessed_votes.rs +++ b/core/src/latest_unprocessed_votes.rs @@ -235,7 +235,7 @@ impl LatestUnprocessedVotes { ) -> usize { let mut continue_forwarding = true; let pubkeys_by_stake = weighted_random_order_by_stake( - &bank, + bank, self.latest_votes_per_pubkey.read().unwrap().keys(), ) .collect_vec(); diff --git a/core/src/unprocessed_transaction_storage.rs b/core/src/unprocessed_transaction_storage.rs index ae08eee7050543..25e5c11ad767cf 100644 --- a/core/src/unprocessed_transaction_storage.rs +++ b/core/src/unprocessed_transaction_storage.rs @@ -370,7 +370,7 @@ impl ThreadLocalUnprocessedPackets { forward_packet_batches_by_accounts: &mut ForwardPacketBatchesByAccounts, ) -> FilterForwardingResults { self.filter_and_forward_with_account_limits( - &bank, + bank, forward_packet_batches_by_accounts, UNPROCESSED_BUFFER_STEP_SIZE, ) From b2ef3c13c3c6cd9e1ae6913794832637f2d8fdee Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Wed, 5 Oct 2022 14:26:06 -0700 Subject: [PATCH 64/65] Remove unsafe impls --- core/src/unprocessed_transaction_storage.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/core/src/unprocessed_transaction_storage.rs b/core/src/unprocessed_transaction_storage.rs index 25e5c11ad767cf..a6abdb57c9fa97 100644 --- a/core/src/unprocessed_transaction_storage.rs +++ b/core/src/unprocessed_transaction_storage.rs @@ -29,9 +29,6 @@ pub enum UnprocessedTransactionStorage { LocalTransactionStorage(ThreadLocalUnprocessedPackets), } -unsafe impl Send for UnprocessedTransactionStorage {} -unsafe impl Sync for UnprocessedTransactionStorage {} - #[derive(Debug)] pub struct ThreadLocalUnprocessedPackets { unprocessed_packet_batches: UnprocessedPacketBatches, From 8d7f0c6db13add6be7b1f3597cbbd1a97da41ba0 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Wed, 5 Oct 2022 14:26:30 -0700 Subject: [PATCH 65/65] pr comments --- core/benches/banking_stage.rs | 2 +- core/src/banking_stage.rs | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index e46138fe121f26..5ad2d81d940f79 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -97,7 +97,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) { std::u128::MAX, &poh_recorder, &mut transaction_buffer, - None, + &None, &s, None::>, &BankingStageStats::default(), diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index dde4200d6456fe..6c9014e8066ebf 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -727,7 +727,7 @@ impl BankingStage { max_tx_ingestion_ns: u128, poh_recorder: &Arc>, unprocessed_transaction_storage: &mut UnprocessedTransactionStorage, - transaction_status_sender: Option, + transaction_status_sender: &Option, gossip_vote_sender: &ReplayVoteSender, test_fn: Option, banking_stage_stats: &BankingStageStats, @@ -753,7 +753,7 @@ impl BankingStage { poh_recorder, slot_metrics_tracker, recorder, - &transaction_status_sender, + transaction_status_sender, gossip_vote_sender, banking_stage_stats, qos_service, @@ -837,7 +837,7 @@ impl BankingStage { poh_recorder: &Arc>, cluster_info: &ClusterInfo, unprocessed_transaction_storage: &mut UnprocessedTransactionStorage, - transaction_status_sender: Option, + transaction_status_sender: &Option, gossip_vote_sender: &ReplayVoteSender, banking_stage_stats: &BankingStageStats, recorder: &TransactionRecorder, @@ -1098,7 +1098,7 @@ impl BankingStage { poh_recorder, cluster_info, &mut unprocessed_transaction_storage, - transaction_status_sender.clone(), + &transaction_status_sender, &gossip_vote_sender, &banking_stage_stats, &recorder, @@ -3807,7 +3807,7 @@ mod tests { max_tx_processing_ns, &poh_recorder, &mut buffered_packet_batches, - None, + &None, &gossip_vote_sender, None::>, &BankingStageStats::default(), @@ -3828,7 +3828,7 @@ mod tests { max_tx_processing_ns, &poh_recorder, &mut buffered_packet_batches, - None, + &None, &gossip_vote_sender, None::>, &BankingStageStats::default(), @@ -3905,7 +3905,7 @@ mod tests { std::u128::MAX, &poh_recorder_, &mut buffered_packet_batches, - None, + &None, &gossip_vote_sender, test_fn, &BankingStageStats::default(),