diff --git a/.github/workflows/explorer_preview.yml b/.github/workflows/explorer_preview.yml index b0a6f1f5dc0b25..a864b012c37617 100644 --- a/.github/workflows/explorer_preview.yml +++ b/.github/workflows/explorer_preview.yml @@ -2,14 +2,16 @@ name : explorer_preview on: workflow_run: workflows: ["Explorer_build&test_on_PR"] - types: - - completed +# types: +# - completed jobs: explorer_preview: runs-on: ubuntu-latest if: ${{ github.event.workflow_run.conclusion == 'success' }} steps: - uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} - uses: amondnet/vercel-action@v20 with: vercel-token: ${{ secrets.VERCEL_TOKEN }} # Required diff --git a/Cargo.lock b/Cargo.lock index 702178919d125d..b1d675c0a814a8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -147,9 +147,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.51" +version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" +checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" dependencies = [ "proc-macro2 1.0.32", "quote 1.0.10", @@ -761,6 +761,26 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "console_error_panic_hook" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" +dependencies = [ + "cfg-if 1.0.0", + "wasm-bindgen", +] + +[[package]] +name = "console_log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "501a375961cef1a0d44767200e66e4a559283097e91d0730b1d75dfb2f8a1494" +dependencies = [ + "log 0.4.14", + "web-sys", +] + [[package]] name = "const_fn" version = "0.4.8" @@ -1028,12 +1048,13 @@ dependencies = [ [[package]] name = "dashmap" -version = "4.0.2" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c" +checksum = "b799062aaf67eb976af3bdca031ee6f846d2f0a5710ddbb0d2efee33f3cc4760" dependencies = [ "cfg-if 1.0.0", "num_cpus", + "parking_lot 0.11.2", "rayon", ] @@ -4035,9 +4056,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.130" +version = "1.0.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" +checksum = "b4ad69dfbd3e45369132cc64e6748c2d65cdfb001a2b1c232d128b4ad60561c1" dependencies = [ "serde_derive", ] @@ -4063,9 +4084,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.130" +version = "1.0.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" +checksum = "b710a83c4e0dff6a3d511946b95274ad9ca9e5d3ae497b63fda866ac955358d2" dependencies = [ "proc-macro2 1.0.32", "quote 1.0.10", @@ -4196,6 +4217,16 @@ dependencies = [ "opaque-debug 0.3.0", ] +[[package]] +name = "sha3" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31f935e31cf406e8c0e96c2815a5516181b7004ae8c5f296293221e9b1e356bd" +dependencies = [ + "digest 0.10.0", + "keccak", +] + [[package]] name = "sharded-slab" version = "0.1.4" @@ -4213,9 +4244,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook" -version = "0.3.10" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c98891d737e271a2954825ef19e46bd16bdb98e2746f2eec4f7a4ef7946efd1" +checksum = "c35dfd12afb7828318348b8c408383cf5071a086c1d4ab1c0f9840ec92dbb922" dependencies = [ "libc", "signal-hook-registry", @@ -4429,6 +4460,35 @@ dependencies = [ "tokio-postgres", ] +[[package]] +name = "solana-address-lookup-table-program" +version = "1.10.0" +dependencies = [ + "bincode", + "bytemuck", + "log 0.4.14", + "num-derive", + "num-traits", + "rustc_version 0.4.0", + "serde", + "solana-frozen-abi 1.10.0", + "solana-frozen-abi-macro 1.10.0", + "solana-program-runtime", + "solana-sdk", + "thiserror", +] + +[[package]] +name = "solana-address-lookup-table-program-tests" +version = "1.10.0" +dependencies = [ + "assert_matches", + "bincode", + "solana-address-lookup-table-program", + "solana-program-test", + "solana-sdk", +] + [[package]] name = "solana-banking-bench" version = "1.10.0" @@ -5407,7 +5467,7 @@ dependencies = [ "serde_bytes", "serde_derive", "sha2 0.9.8", - "sha3", + "sha3 0.9.1", "solana-frozen-abi 1.8.2", "solana-frozen-abi-macro 1.8.2", "solana-logger 1.8.2", @@ -5430,8 +5490,12 @@ dependencies = [ "bs58 0.4.0", "bv", "bytemuck", + "console_error_panic_hook", + "console_log", "curve25519-dalek 3.2.0", + "getrandom 0.1.16", "itertools 0.10.3", + "js-sys", "lazy_static", "libsecp256k1 0.6.0", "log 0.4.14", @@ -5446,13 +5510,14 @@ dependencies = [ "serde_derive", "serde_json", "sha2 0.10.0", - "sha3", + "sha3 0.10.0", "solana-frozen-abi 1.10.0", "solana-frozen-abi-macro 1.10.0", "solana-logger 1.10.0", "solana-sdk-macro 1.10.0", "static_assertions", "thiserror", + "wasm-bindgen", ] [[package]] @@ -5669,6 +5734,8 @@ dependencies = [ "libsecp256k1 0.6.0", "log 0.4.14", "memmap2 0.5.0", + "num-derive", + "num-traits", "num_cpus", "ouroboros", "rand 0.7.3", @@ -5677,6 +5744,7 @@ dependencies = [ "rustc_version 0.4.0", "serde", "serde_derive", + "solana-address-lookup-table-program", "solana-bucket-map", "solana-compute-budget-program", "solana-config-program", @@ -5711,12 +5779,13 @@ dependencies = [ "chrono", "curve25519-dalek 3.2.0", "derivation-path", - "digest 0.9.0", + "digest 0.10.0", "ed25519-dalek", "ed25519-dalek-bip32", "generic-array 0.14.4", "hmac 0.12.0", "itertools 0.10.3", + "js-sys", "lazy_static", "libsecp256k1 0.6.0", "log 0.4.14", @@ -5734,7 +5803,7 @@ dependencies = [ "serde_derive", "serde_json", "sha2 0.10.0", - "sha3", + "sha3 0.10.0", "solana-frozen-abi 1.10.0", "solana-frozen-abi-macro 1.10.0", "solana-logger 1.10.0", @@ -5743,6 +5812,7 @@ dependencies = [ "thiserror", "tiny-bip39", "uriparse", + "wasm-bindgen", ] [[package]] @@ -6112,9 +6182,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcd409d0fba8427ef41b5c1ef79dcabb592f1cff144b77e07094b66c010c2d52" +checksum = "5c7a237a92714db63de655e20af29a3b59c007881f2dfbdc2d3838ca3675f45f" dependencies = [ "byteorder", "combine", @@ -6834,9 +6904,9 @@ dependencies = [ [[package]] name = "tonic" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24203b79cf2d68909da91178db3026e77054effba0c5d93deb870d3ca7b35afa" +checksum = "ff08f4649d10a70ffa3522ca559031285d8e421d727ac85c60825761818f5d0a" dependencies = [ "async-stream", "async-trait", @@ -6866,9 +6936,9 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88358bb1dcfeb62dcce85c63006cafb964b7be481d522b7e09589d4d1e718d2a" +checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" dependencies = [ "proc-macro2 1.0.32", "prost-build", diff --git a/Cargo.toml b/Cargo.toml index 124e11a576f921..3a75258f22f3fa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,6 +46,8 @@ members = [ "poh", "poh-bench", "program-test", + "programs/address-lookup-table", + "programs/address-lookup-table-tests", "programs/bpf_loader", "programs/compute-budget", "programs/config", diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index b0c88252e9eede..4549eaaf4dad86 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -16,7 +16,7 @@ bs58 = "0.4.0" bv = "0.11.1" Inflector = "0.11.4" lazy_static = "1.4.0" -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" serde_json = "1.0.72" solana-config-program = { path = "../programs/config", version = "=1.10.0" } diff --git a/accountsdb-plugin-interface/Cargo.toml b/accountsdb-plugin-interface/Cargo.toml index b8525c238698b8..889560a214e740 100644 --- a/accountsdb-plugin-interface/Cargo.toml +++ b/accountsdb-plugin-interface/Cargo.toml @@ -7,7 +7,7 @@ version = "1.10.0" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" -documentation = "https://docs.rs/solana-validator" +documentation = "https://docs.rs/solana-accountsdb-plugin-interface" [dependencies] log = "0.4.11" diff --git a/accountsdb-plugin-interface/src/accountsdb_plugin_interface.rs b/accountsdb-plugin-interface/src/accountsdb_plugin_interface.rs index 3a6caa53a2881a..95e1d221fc5d22 100644 --- a/accountsdb-plugin-interface/src/accountsdb_plugin_interface.rs +++ b/accountsdb-plugin-interface/src/accountsdb_plugin_interface.rs @@ -12,16 +12,38 @@ use { impl Eq for ReplicaAccountInfo<'_> {} #[derive(Clone, PartialEq, Debug)] +/// Information about an account being updated pub struct ReplicaAccountInfo<'a> { + /// The Pubkey for the account pub pubkey: &'a [u8], + + /// The lamports for the account pub lamports: u64, + + /// The Pubkey of the owner program account pub owner: &'a [u8], + + /// This account's data contains a loaded program (and is now read-only) pub executable: bool, + + /// The epoch at which this account will next owe rent pub rent_epoch: u64, + + /// The data held in this account. pub data: &'a [u8], + + /// A global monotonically increasing atomic number, which can be used + /// to tell the order of the account update. For example, when an + /// account is updated in the same slot multiple times, the update + /// with higher write_version should supersede the one with lower + /// write_version. pub write_version: u64, } +/// A wrapper to future-proof ReplicaAccountInfo handling. +/// If there were a change to the structure of ReplicaAccountInfo, +/// there would be new enum entry for the newer version, forcing +/// plugin implementations to handle the change. pub enum ReplicaAccountInfoVersions<'a> { V0_0_1(&'a ReplicaAccountInfo<'a>), } @@ -38,28 +60,44 @@ pub enum ReplicaTransactionInfoVersions<'a> { V0_0_1(&'a ReplicaTransactionInfo<'a>), } +/// Errors returned by plugin calls #[derive(Error, Debug)] pub enum AccountsDbPluginError { + /// Error opening the configuration file; for example, when the file + /// is not found or when the validator process has no permission to read it. #[error("Error opening config file. Error detail: ({0}).")] ConfigFileOpenError(#[from] io::Error), + /// Error in reading the content of the config file or the content + /// is not in the expected format. #[error("Error reading config file. Error message: ({msg})")] ConfigFileReadError { msg: String }, + /// Error when updating the account. #[error("Error updating account. Error message: ({msg})")] AccountsUpdateError { msg: String }, + /// Error when updating the slot status #[error("Error updating slot status. Error message: ({msg})")] SlotStatusUpdateError { msg: String }, + /// Any custom error defined by the plugin. #[error("Plugin-defined custom error. Error message: ({0})")] Custom(Box), } +/// The current status of a slot #[derive(Debug, Clone)] pub enum SlotStatus { + /// The highest slot of the heaviest fork processed by the node. Ledger state at this slot is + /// not derived from a confirmed or finalized block, but if multiple forks are present, is from + /// the fork the validator believes is most likely to finalize. Processed, + + /// The highest slot having reached max vote lockout. Rooted, + + /// The highest slot that has been voted on by supermajority of the cluster, ie. is confirmed. Confirmed, } @@ -75,6 +113,9 @@ impl SlotStatus { pub type Result = std::result::Result; +/// Defines an AccountsDb plugin, to stream data from the runtime. +/// AccountsDb plugins must describe desired behavior for load and unload, +/// as well as how they will handle streamed data. pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug { fn name(&self) -> &'static str; @@ -93,6 +134,9 @@ pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug { fn on_unload(&mut self) {} /// Called when an account is updated at a slot. + /// When `is_startup` is true, it indicates the account is loaded from + /// snapshots when the validator starts up. When `is_startup` is false, + /// the account is updated during transaction processing. #[allow(unused_variables)] fn update_account( &mut self, diff --git a/accountsdb-plugin-manager/Cargo.toml b/accountsdb-plugin-manager/Cargo.toml index e1fd8ade95c283..42786896d41343 100644 --- a/accountsdb-plugin-manager/Cargo.toml +++ b/accountsdb-plugin-manager/Cargo.toml @@ -14,7 +14,7 @@ bs58 = "0.4.0" crossbeam-channel = "0.5" libloading = "0.7.2" log = "0.4.11" -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" serde_json = "1.0.72" solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.10.0" } diff --git a/accountsdb-plugin-postgres/Cargo.toml b/accountsdb-plugin-postgres/Cargo.toml index faf9cdafb208f3..87b8cfc1d87105 100644 --- a/accountsdb-plugin-postgres/Cargo.toml +++ b/accountsdb-plugin-postgres/Cargo.toml @@ -19,7 +19,7 @@ crossbeam-channel = "0.5" log = "0.4.14" postgres = { version = "0.19.2", features = ["with-chrono-0_4"] } postgres-types = { version = "0.2.2", features = ["derive"] } -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" serde_json = "1.0.72" solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.10.0" } diff --git a/accountsdb-plugin-postgres/scripts/create_schema.sql b/accountsdb-plugin-postgres/scripts/create_schema.sql index ec0de205dee6f7..58429fabb94cb4 100644 --- a/accountsdb-plugin-postgres/scripts/create_schema.sql +++ b/accountsdb-plugin-postgres/scripts/create_schema.sql @@ -113,9 +113,10 @@ CREATE TYPE "TransactionMessage" AS ( instructions "CompiledInstruction"[] ); -CREATE TYPE "AddressMapIndexes" AS ( - writable SMALLINT[], - readonly SMALLINT[] +CREATE TYPE "TransactionMessageAddressTableLookup" AS ( + account_key: BYTEA[], + writable_indexes SMALLINT[], + readonly_indexes SMALLINT[] ); CREATE TYPE "TransactionMessageV0" AS ( @@ -123,17 +124,17 @@ CREATE TYPE "TransactionMessageV0" AS ( account_keys BYTEA[], recent_blockhash BYTEA, instructions "CompiledInstruction"[], - address_map_indexes "AddressMapIndexes"[] + address_table_lookups "TransactionMessageAddressTableLookup"[] ); -CREATE TYPE "MappedAddresses" AS ( +CREATE TYPE "LoadedAddresses" AS ( writable BYTEA[], readonly BYTEA[] ); -CREATE TYPE "MappedMessage" AS ( +CREATE TYPE "LoadedMessageV0" AS ( message "TransactionMessageV0", - mapped_addresses "MappedAddresses" + loaded_addresses "LoadedAddresses" ); -- The table storing transactions @@ -143,7 +144,7 @@ CREATE TABLE transaction ( is_vote BOOL NOT NULL, message_type SMALLINT, -- 0: legacy, 1: v0 message legacy_message "TransactionMessage", - v0_mapped_message "MappedMessage", + v0_loaded_message "LoadedMessageV0", signatures BYTEA[], message_hash BYTEA, meta "TransactionStatusMeta", diff --git a/accountsdb-plugin-postgres/scripts/drop_schema.sql b/accountsdb-plugin-postgres/scripts/drop_schema.sql index 419ab44169cb95..e5b756870d1e5e 100644 --- a/accountsdb-plugin-postgres/scripts/drop_schema.sql +++ b/accountsdb-plugin-postgres/scripts/drop_schema.sql @@ -11,12 +11,12 @@ DROP TABLE transaction; DROP TYPE "TransactionError" CASCADE; DROP TYPE "TransactionErrorCode" CASCADE; -DROP TYPE "MappedMessage" CASCADE; -DROP TYPE "MappedAddresses" CASCADE; +DROP TYPE "LoadedMessageV0" CASCADE; +DROP TYPE "LoadedAddresses" CASCADE; DROP TYPE "TransactionMessageV0" CASCADE; -DROP TYPE "AddressMapIndexes" CASCADE; DROP TYPE "TransactionMessage" CASCADE; DROP TYPE "TransactionMessageHeader" CASCADE; +DROP TYPE "TransactionMessageAddressTableLookup" CASCADE; DROP TYPE "TransactionStatusMeta" CASCADE; DROP TYPE "RewardType" CASCADE; DROP TYPE "Reward" CASCADE; diff --git a/accountsdb-plugin-postgres/src/postgres_client/postgres_client_transaction.rs b/accountsdb-plugin-postgres/src/postgres_client/postgres_client_transaction.rs index bdc30b158c83ad..f48b2456cf8523 100644 --- a/accountsdb-plugin-postgres/src/postgres_client/postgres_client_transaction.rs +++ b/accountsdb-plugin-postgres/src/postgres_client/postgres_client_transaction.rs @@ -18,8 +18,8 @@ use { solana_sdk::{ instruction::CompiledInstruction, message::{ - v0::{self, AddressMapIndexes}, - MappedAddresses, MappedMessage, Message, MessageHeader, SanitizedMessage, + v0::{self, LoadedAddresses, MessageAddressTableLookup}, + Message, MessageHeader, SanitizedMessage, }, transaction::TransactionError, }, @@ -105,10 +105,11 @@ pub struct DbTransactionMessage { } #[derive(Clone, Debug, ToSql)] -#[postgres(name = "AddressMapIndexes")] -pub struct DbAddressMapIndexes { - pub writable: Vec, - pub readonly: Vec, +#[postgres(name = "TransactionMessageAddressTableLookup")] +pub struct DbTransactionMessageAddressTableLookup { + pub account_key: Vec, + pub writable_indexes: Vec, + pub readonly_indexes: Vec, } #[derive(Clone, Debug, ToSql)] @@ -118,21 +119,21 @@ pub struct DbTransactionMessageV0 { pub account_keys: Vec>, pub recent_blockhash: Vec, pub instructions: Vec, - pub address_map_indexes: Vec, + pub address_table_lookups: Vec, } #[derive(Clone, Debug, ToSql)] -#[postgres(name = "MappedAddresses")] -pub struct DbMappedAddresses { +#[postgres(name = "LoadedAddresses")] +pub struct DbLoadedAddresses { pub writable: Vec>, pub readonly: Vec>, } #[derive(Clone, Debug, ToSql)] -#[postgres(name = "MappedMessage")] -pub struct DbMappedMessage { +#[postgres(name = "LoadedMessageV0")] +pub struct DbLoadedMessageV0 { pub message: DbTransactionMessageV0, - pub mapped_addresses: DbMappedAddresses, + pub loaded_addresses: DbLoadedAddresses, } pub struct DbTransaction { @@ -141,7 +142,7 @@ pub struct DbTransaction { pub slot: i64, pub message_type: i16, pub legacy_message: Option, - pub v0_mapped_message: Option, + pub v0_loaded_message: Option, pub message_hash: Vec, pub meta: DbTransactionStatusMeta, pub signatures: Vec>, @@ -151,32 +152,33 @@ pub struct LogTransactionRequest { pub transaction_info: DbTransaction, } -impl From<&AddressMapIndexes> for DbAddressMapIndexes { - fn from(address_map_indexes: &AddressMapIndexes) -> Self { +impl From<&MessageAddressTableLookup> for DbTransactionMessageAddressTableLookup { + fn from(address_table_lookup: &MessageAddressTableLookup) -> Self { Self { - writable: address_map_indexes - .writable + account_key: address_table_lookup.account_key.as_ref().to_vec(), + writable_indexes: address_table_lookup + .writable_indexes .iter() - .map(|address_idx| *address_idx as i16) + .map(|idx| *idx as i16) .collect(), - readonly: address_map_indexes - .readonly + readonly_indexes: address_table_lookup + .readonly_indexes .iter() - .map(|address_idx| *address_idx as i16) + .map(|idx| *idx as i16) .collect(), } } } -impl From<&MappedAddresses> for DbMappedAddresses { - fn from(mapped_addresses: &MappedAddresses) -> Self { +impl From<&LoadedAddresses> for DbLoadedAddresses { + fn from(loaded_addresses: &LoadedAddresses) -> Self { Self { - writable: mapped_addresses + writable: loaded_addresses .writable .iter() .map(|pubkey| pubkey.as_ref().to_vec()) .collect(), - readonly: mapped_addresses + readonly: loaded_addresses .readonly .iter() .map(|pubkey| pubkey.as_ref().to_vec()) @@ -243,20 +245,20 @@ impl From<&v0::Message> for DbTransactionMessageV0 { .iter() .map(DbCompiledInstruction::from) .collect(), - address_map_indexes: message - .address_map_indexes + address_table_lookups: message + .address_table_lookups .iter() - .map(DbAddressMapIndexes::from) + .map(DbTransactionMessageAddressTableLookup::from) .collect(), } } } -impl From<&MappedMessage> for DbMappedMessage { - fn from(message: &MappedMessage) -> Self { +impl From<&v0::LoadedMessage> for DbLoadedMessageV0 { + fn from(message: &v0::LoadedMessage) -> Self { Self { message: DbTransactionMessageV0::from(&message.message), - mapped_addresses: DbMappedAddresses::from(&message.mapped_addresses), + loaded_addresses: DbLoadedAddresses::from(&message.loaded_addresses), } } } @@ -328,6 +330,7 @@ pub enum DbTransactionErrorCode { WouldExceedMaxBlockCostLimit, UnsupportedVersion, InvalidWritableAccount, + WouldExceedMaxAccountDataCostLimit, } impl From<&TransactionError> for DbTransactionErrorCode { @@ -356,6 +359,9 @@ impl From<&TransactionError> for DbTransactionErrorCode { TransactionError::WouldExceedMaxBlockCostLimit => Self::WouldExceedMaxBlockCostLimit, TransactionError::UnsupportedVersion => Self::UnsupportedVersion, TransactionError::InvalidWritableAccount => Self::InvalidWritableAccount, + TransactionError::WouldExceedMaxAccountDataCostLimit => { + Self::WouldExceedMaxAccountDataCostLimit + } } } } @@ -460,8 +466,8 @@ fn build_db_transaction(slot: u64, transaction_info: &ReplicaTransactionInfo) -> } _ => None, }, - v0_mapped_message: match transaction_info.transaction.message() { - SanitizedMessage::V0(mapped_message) => Some(DbMappedMessage::from(mapped_message)), + v0_loaded_message: match transaction_info.transaction.message() { + SanitizedMessage::V0(loaded_message) => Some(DbLoadedMessageV0::from(loaded_message)), _ => None, }, signatures: transaction_info @@ -485,7 +491,7 @@ impl SimplePostgresClient { config: &AccountsDbPluginPostgresConfig, ) -> Result { let stmt = "INSERT INTO transaction AS txn (signature, is_vote, slot, message_type, legacy_message, \ - v0_mapped_message, signatures, message_hash, meta, updated_on) \ + v0_loaded_message, signatures, message_hash, meta, updated_on) \ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)"; let stmt = client.prepare(stmt); @@ -521,7 +527,7 @@ impl SimplePostgresClient { &transaction_info.slot, &transaction_info.message_type, &transaction_info.legacy_message, - &transaction_info.v0_mapped_message, + &transaction_info.v0_loaded_message, &transaction_info.signatures, &transaction_info.message_hash, &transaction_info.meta, @@ -670,42 +676,44 @@ pub(crate) mod tests { check_inner_instructions_equality(&inner_instructions, &db_inner_instructions); } - fn check_address_map_indexes_equality( - address_map_indexes: &AddressMapIndexes, - db_address_map_indexes: &DbAddressMapIndexes, + fn check_address_table_lookups_equality( + address_table_lookups: &MessageAddressTableLookup, + db_address_table_lookups: &DbTransactionMessageAddressTableLookup, ) { assert_eq!( - address_map_indexes.writable.len(), - db_address_map_indexes.writable.len() + address_table_lookups.writable_indexes.len(), + db_address_table_lookups.writable_indexes.len() ); assert_eq!( - address_map_indexes.readonly.len(), - db_address_map_indexes.readonly.len() + address_table_lookups.readonly_indexes.len(), + db_address_table_lookups.readonly_indexes.len() ); - for i in 0..address_map_indexes.writable.len() { + for i in 0..address_table_lookups.writable_indexes.len() { assert_eq!( - address_map_indexes.writable[i], - db_address_map_indexes.writable[i] as u8 + address_table_lookups.writable_indexes[i], + db_address_table_lookups.writable_indexes[i] as u8 ) } - for i in 0..address_map_indexes.readonly.len() { + for i in 0..address_table_lookups.readonly_indexes.len() { assert_eq!( - address_map_indexes.readonly[i], - db_address_map_indexes.readonly[i] as u8 + address_table_lookups.readonly_indexes[i], + db_address_table_lookups.readonly_indexes[i] as u8 ) } } #[test] - fn test_transform_address_map_indexes() { - let address_map_indexes = AddressMapIndexes { - writable: vec![1, 2, 3], - readonly: vec![4, 5, 6], + fn test_transform_address_table_lookups() { + let address_table_lookups = MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![1, 2, 3], + readonly_indexes: vec![4, 5, 6], }; - let db_address_map_indexes = DbAddressMapIndexes::from(&address_map_indexes); - check_address_map_indexes_equality(&address_map_indexes, &db_address_map_indexes); + let db_address_table_lookups = + DbTransactionMessageAddressTableLookup::from(&address_table_lookups); + check_address_table_lookups_equality(&address_table_lookups, &db_address_table_lookups); } fn check_reward_equality(reward: &Reward, db_reward: &DbReward) { @@ -1089,7 +1097,7 @@ pub(crate) mod tests { check_transaction_message_equality(&message, &db_message); } - fn check_transaction_messagev0_equality( + fn check_transaction_message_v0_equality( message: &v0::Message, db_message: &DbTransactionMessageV0, ) { @@ -1106,18 +1114,18 @@ pub(crate) mod tests { ); } assert_eq!( - message.address_map_indexes.len(), - db_message.address_map_indexes.len() + message.address_table_lookups.len(), + db_message.address_table_lookups.len() ); - for i in 0..message.address_map_indexes.len() { - check_address_map_indexes_equality( - &message.address_map_indexes[i], - &db_message.address_map_indexes[i], + for i in 0..message.address_table_lookups.len() { + check_address_table_lookups_equality( + &message.address_table_lookups[i], + &db_message.address_table_lookups[i], ); } } - fn build_transaction_messagev0() -> v0::Message { + fn build_transaction_message_v0() -> v0::Message { v0::Message { header: MessageHeader { num_readonly_signed_accounts: 2, @@ -1144,71 +1152,76 @@ pub(crate) mod tests { data: vec![14, 15, 16], }, ], - address_map_indexes: vec![ - AddressMapIndexes { - writable: vec![0], - readonly: vec![1, 2], + address_table_lookups: vec![ + MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![0], + readonly_indexes: vec![1, 2], }, - AddressMapIndexes { - writable: vec![1], - readonly: vec![0, 2], + MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![1], + readonly_indexes: vec![0, 2], }, ], } } #[test] - fn test_transform_transaction_messagev0() { - let message = build_transaction_messagev0(); + fn test_transform_transaction_message_v0() { + let message = build_transaction_message_v0(); let db_message = DbTransactionMessageV0::from(&message); - check_transaction_messagev0_equality(&message, &db_message); + check_transaction_message_v0_equality(&message, &db_message); } - fn check_mapped_addresses( - mapped_addresses: &MappedAddresses, - db_mapped_addresses: &DbMappedAddresses, + fn check_loaded_addresses( + loaded_addresses: &LoadedAddresses, + db_loaded_addresses: &DbLoadedAddresses, ) { assert_eq!( - mapped_addresses.writable.len(), - db_mapped_addresses.writable.len() + loaded_addresses.writable.len(), + db_loaded_addresses.writable.len() ); - for i in 0..mapped_addresses.writable.len() { + for i in 0..loaded_addresses.writable.len() { assert_eq!( - mapped_addresses.writable[i].as_ref(), - db_mapped_addresses.writable[i] + loaded_addresses.writable[i].as_ref(), + db_loaded_addresses.writable[i] ); } assert_eq!( - mapped_addresses.readonly.len(), - db_mapped_addresses.readonly.len() + loaded_addresses.readonly.len(), + db_loaded_addresses.readonly.len() ); - for i in 0..mapped_addresses.readonly.len() { + for i in 0..loaded_addresses.readonly.len() { assert_eq!( - mapped_addresses.readonly[i].as_ref(), - db_mapped_addresses.readonly[i] + loaded_addresses.readonly[i].as_ref(), + db_loaded_addresses.readonly[i] ); } } - fn check_mapped_message_equality(message: &MappedMessage, db_message: &DbMappedMessage) { - check_transaction_messagev0_equality(&message.message, &db_message.message); - check_mapped_addresses(&message.mapped_addresses, &db_message.mapped_addresses); + fn check_loaded_message_v0_equality( + message: &v0::LoadedMessage, + db_message: &DbLoadedMessageV0, + ) { + check_transaction_message_v0_equality(&message.message, &db_message.message); + check_loaded_addresses(&message.loaded_addresses, &db_message.loaded_addresses); } #[test] - fn test_transform_mapped_message() { - let message = MappedMessage { - message: build_transaction_messagev0(), - mapped_addresses: MappedAddresses { + fn test_transform_loaded_message_v0() { + let message = v0::LoadedMessage { + message: build_transaction_message_v0(), + loaded_addresses: LoadedAddresses { writable: vec![Pubkey::new_unique(), Pubkey::new_unique()], readonly: vec![Pubkey::new_unique(), Pubkey::new_unique()], }, }; - let db_message = DbMappedMessage::from(&message); - check_mapped_message_equality(&message, &db_message); + let db_message = DbLoadedMessageV0::from(&message); + check_loaded_message_v0_equality(&message, &db_message); } fn check_transaction( @@ -1229,9 +1242,9 @@ pub(crate) mod tests { } SanitizedMessage::V0(message) => { assert_eq!(db_transaction.message_type, 1); - check_mapped_message_equality( + check_loaded_message_v0_equality( message, - db_transaction.v0_mapped_message.as_ref().unwrap(), + db_transaction.v0_loaded_message.as_ref().unwrap(), ); } } @@ -1298,7 +1311,7 @@ pub(crate) mod tests { Signature::new(&[2u8; 64]), Signature::new(&[3u8; 64]), ], - message: VersionedMessage::V0(build_transaction_messagev0()), + message: VersionedMessage::V0(build_transaction_message_v0()), } } @@ -1313,7 +1326,7 @@ pub(crate) mod tests { let transaction = SanitizedTransaction::try_create(transaction, message_hash, Some(true), |_message| { - Ok(MappedAddresses { + Ok(LoadedAddresses { writable: vec![Pubkey::new_unique(), Pubkey::new_unique()], readonly: vec![Pubkey::new_unique(), Pubkey::new_unique()], }) diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 24ba546676bb56..3d2f57f4bcf46f 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -13,7 +13,7 @@ use { get_tmp_ledger_path, }, solana_measure::measure::Measure, - solana_perf::packet::to_packets_chunked, + solana_perf::packet::to_packet_batches, solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry}, solana_runtime::{ accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks, @@ -212,7 +212,7 @@ fn main() { bank.clear_signatures(); } - let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk); + let mut verified: Vec<_> = to_packet_batches(&transactions, packets_per_chunk); let ledger_path = get_tmp_ledger_path!(); { let blockstore = Arc::new( @@ -364,7 +364,7 @@ fn main() { let sig: Vec = (0..64).map(|_| thread_rng().gen::()).collect(); tx.signatures[0] = Signature::new(&sig[0..64]); } - verified = to_packets_chunked(&transactions.clone(), packets_per_chunk); + verified = to_packet_batches(&transactions.clone(), packets_per_chunk); } start += chunk_len; diff --git a/banks-interface/Cargo.toml b/banks-interface/Cargo.toml index 52f52233fd3350..6f5dd133f7ffba 100644 --- a/banks-interface/Cargo.toml +++ b/banks-interface/Cargo.toml @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-banks-interface" edition = "2021" [dependencies] -serde = { version = "1.0.130", features = ["derive"] } +serde = { version = "1.0.131", features = ["derive"] } solana-sdk = { path = "../sdk", version = "=1.10.0" } tarpc = { version = "0.26.2", features = ["full"] } diff --git a/bench-streamer/src/main.rs b/bench-streamer/src/main.rs index bade7a943093ae..46eeeb761380e2 100644 --- a/bench-streamer/src/main.rs +++ b/bench-streamer/src/main.rs @@ -2,8 +2,8 @@ use { clap::{crate_description, crate_name, App, Arg}, solana_streamer::{ - packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE}, - streamer::{receiver, PacketReceiver}, + packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE}, + streamer::{receiver, PacketBatchReceiver}, }, std::{ cmp::max, @@ -20,19 +20,19 @@ use { fn producer(addr: &SocketAddr, exit: Arc) -> JoinHandle<()> { let send = UdpSocket::bind("0.0.0.0:0").unwrap(); - let mut msgs = Packets::default(); - msgs.packets.resize(10, Packet::default()); - for w in msgs.packets.iter_mut() { + let mut packet_batch = PacketBatch::default(); + packet_batch.packets.resize(10, Packet::default()); + for w in packet_batch.packets.iter_mut() { w.meta.size = PACKET_DATA_SIZE; w.meta.set_addr(addr); } - let msgs = Arc::new(msgs); + let packet_batch = Arc::new(packet_batch); spawn(move || loop { if exit.load(Ordering::Relaxed) { return; } let mut num = 0; - for p in &msgs.packets { + for p in &packet_batch.packets { let a = p.meta.addr(); assert!(p.meta.size <= PACKET_DATA_SIZE); send.send_to(&p.data[..p.meta.size], &a).unwrap(); @@ -42,14 +42,14 @@ fn producer(addr: &SocketAddr, exit: Arc) -> JoinHandle<()> { }) } -fn sink(exit: Arc, rvs: Arc, r: PacketReceiver) -> JoinHandle<()> { +fn sink(exit: Arc, rvs: Arc, r: PacketBatchReceiver) -> JoinHandle<()> { spawn(move || loop { if exit.load(Ordering::Relaxed) { return; } let timer = Duration::new(1, 0); - if let Ok(msgs) = r.recv_timeout(timer) { - rvs.fetch_add(msgs.packets.len(), Ordering::Relaxed); + if let Ok(packet_batch) = r.recv_timeout(timer) { + rvs.fetch_add(packet_batch.packets.len(), Ordering::Relaxed); } }) } @@ -81,7 +81,7 @@ fn main() -> Result<()> { let mut read_channels = Vec::new(); let mut read_threads = Vec::new(); - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); for _ in 0..num_sockets { let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap(); read.set_read_timeout(Some(Duration::new(1, 0))).unwrap(); diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index c289df33541462..667a30302ad1b3 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -226,6 +226,19 @@ EOF annotate --style info \ "downstream-projects skipped as no relevant files were modified" fi + + # Wasm support + if affects \ + ^ci/test-wasm.sh \ + ^ci/test-stable.sh \ + ^sdk/ \ + ; then + command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20 + else + annotate --style info \ + "wasm skipped as no relevant files were modified" + fi + # Benches... if affects \ .rs$ \ diff --git a/ci/dependabot-pr.sh b/ci/dependabot-pr.sh index 3ce48fe524fc7a..9ef6816cec5b96 100755 --- a/ci/dependabot-pr.sh +++ b/ci/dependabot-pr.sh @@ -30,8 +30,6 @@ EMAIL="dependabot-buildkite@noreply.solana.com" \ GIT_AUTHOR_NAME="$name" \ GIT_COMMITTER_NAME="$name" \ git commit -m "[auto-commit] Update all Cargo lock files" -git config pull.rebase false -git pull origin master --allow-unrelated-histories git push origin "HEAD:$branch" echo "Source branch is updated; failing this build for the next" diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile index 2e5bac5808818c..4bd0ee82f8eef2 100644 --- a/ci/docker-rust-nightly/Dockerfile +++ b/ci/docker-rust-nightly/Dockerfile @@ -1,4 +1,4 @@ -FROM solanalabs/rust:1.56.1 +FROM solanalabs/rust:1.57.0 ARG date RUN set -x \ diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index 5d768a81dbc7bf..5b482909d55812 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,6 +1,6 @@ # Note: when the rust version is changed also modify # ci/rust-version.sh to pick up the new image tag -FROM rust:1.56.1 +FROM rust:1.57.0 # Add Google Protocol Buffers for Libra's metrics library. ENV PROTOC_VERSION 3.8.0 @@ -11,6 +11,7 @@ RUN set -x \ && apt-get install apt-transport-https \ && echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list \ && apt-key adv --no-tty --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 \ + && curl -fsSL https://deb.nodesource.com/setup_current.x | bash - \ && apt update \ && apt install -y \ buildkite-agent \ @@ -19,15 +20,20 @@ RUN set -x \ lcov \ libudev-dev \ mscgen \ + nodejs \ net-tools \ rsync \ sudo \ golang \ unzip \ \ + && apt remove -y libcurl4-openssl-dev \ && rm -rf /var/lib/apt/lists/* \ + && node --version \ + && npm --version \ && rustup component add rustfmt \ && rustup component add clippy \ + && rustup target add wasm32-unknown-unknown \ && cargo install cargo-audit \ && cargo install mdbook \ && cargo install mdbook-linkcheck \ diff --git a/ci/rust-version.sh b/ci/rust-version.sh index 630afbac65256c..ea0eca05246fa7 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -18,13 +18,13 @@ if [[ -n $RUST_STABLE_VERSION ]]; then stable_version="$RUST_STABLE_VERSION" else - stable_version=1.56.1 + stable_version=1.57.0 fi if [[ -n $RUST_NIGHTLY_VERSION ]]; then nightly_version="$RUST_NIGHTLY_VERSION" else - nightly_version=2021-11-30 + nightly_version=2021-12-03 fi diff --git a/ci/test-stable.sh b/ci/test-stable.sh index 8f36b68b888233..177175e874d28a 100755 --- a/ci/test-stable.sh +++ b/ci/test-stable.sh @@ -103,6 +103,19 @@ test-local-cluster) _ "$cargo" stable test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1 exit 0 ;; +test-wasm) + _ node --version + _ npm --version + for dir in sdk/{program,}; do + if [[ -r "$dir"/package.json ]]; then + pushd "$dir" + _ npm install + _ npm test + popd + fi + done + exit 0 + ;; *) echo "Error: Unknown test: $testName" ;; diff --git a/ci/test-wasm.sh b/ci/test-wasm.sh new file mode 120000 index 00000000000000..0c92a5c7bd6fd4 --- /dev/null +++ b/ci/test-wasm.sh @@ -0,0 +1 @@ +test-stable.sh \ No newline at end of file diff --git a/cli-config/Cargo.toml b/cli-config/Cargo.toml index 05e65618ca6f20..e978c93329aaac 100644 --- a/cli-config/Cargo.toml +++ b/cli-config/Cargo.toml @@ -12,7 +12,7 @@ documentation = "https://docs.rs/solana-cli-config" [dependencies] dirs-next = "2.0.0" lazy_static = "1.4.0" -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" serde_yaml = "0.8.21" url = "2.2.2" diff --git a/cli-output/Cargo.toml b/cli-output/Cargo.toml index 7350a2ab120a26..9bda1dd6e47897 100644 --- a/cli-output/Cargo.toml +++ b/cli-output/Cargo.toml @@ -17,7 +17,7 @@ console = "0.15.0" humantime = "2.0.1" Inflector = "0.11.4" indicatif = "0.16.2" -serde = "1.0.130" +serde = "1.0.131" serde_json = "1.0.72" solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" } solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" } diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 2a647402669aa7..3d261058923e2d 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -23,7 +23,7 @@ num-traits = "0.2" pretty-hex = "0.2.1" reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] } semver = "1.0.4" -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" serde_json = "1.0.72" solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" } @@ -36,7 +36,7 @@ solana-config-program = { path = "../programs/config", version = "=1.10.0" } solana-faucet = { path = "../faucet", version = "=1.10.0" } solana-logger = { path = "../logger", version = "=1.10.0" } solana-program-runtime = { path = "../program-runtime", version = "=1.10.0" } -solana_rbpf = "=0.2.17" +solana_rbpf = "=0.2.18" solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.0" } solana-sdk = { path = "../sdk", version = "=1.10.0" } solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" } diff --git a/cli/src/feature.rs b/cli/src/feature.rs index d2dd5b6f4458db..391b83ce46609b 100644 --- a/cli/src/feature.rs +++ b/cli/src/feature.rs @@ -45,7 +45,7 @@ pub enum FeatureCliCommand { }, } -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, PartialEq, Eq)] #[serde(rename_all = "camelCase", tag = "status", content = "sinceSlot")] pub enum CliFeatureStatus { Inactive, @@ -53,7 +53,29 @@ pub enum CliFeatureStatus { Active(Slot), } -#[derive(Serialize, Deserialize)] +impl PartialOrd for CliFeatureStatus { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for CliFeatureStatus { + fn cmp(&self, other: &Self) -> Ordering { + match (self, other) { + (Self::Inactive, Self::Inactive) => Ordering::Equal, + (Self::Inactive, _) => Ordering::Greater, + (_, Self::Inactive) => Ordering::Less, + (Self::Pending, Self::Pending) => Ordering::Equal, + (Self::Pending, _) => Ordering::Greater, + (_, Self::Pending) => Ordering::Less, + (Self::Active(self_active_slot), Self::Active(other_active_slot)) => { + self_active_slot.cmp(other_active_slot) + } + } + } +} + +#[derive(Serialize, Deserialize, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct CliFeature { pub id: String, @@ -62,6 +84,21 @@ pub struct CliFeature { pub status: CliFeatureStatus, } +impl PartialOrd for CliFeature { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for CliFeature { + fn cmp(&self, other: &Self) -> Ordering { + match self.status.cmp(&other.status) { + Ordering::Equal => self.id.cmp(&other.id), + ordering => ordering, + } + } +} + #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct CliFeatures { @@ -93,7 +130,7 @@ impl fmt::Display for CliFeatures { CliFeatureStatus::Inactive => style("inactive".to_string()).red(), CliFeatureStatus::Pending => style("activation pending".to_string()).yellow(), CliFeatureStatus::Active(activation_slot) => - style(format!("active since slot {}", activation_slot)).green(), + style(format!("active since slot {:>9}", activation_slot)).green(), }, feature.description, )?; @@ -550,6 +587,8 @@ fn process_status( }); } + features.sort_unstable(); + let feature_activation_allowed = feature_activation_allowed(rpc_client, features.len() <= 1)?; let feature_set = CliFeatures { features, diff --git a/client/Cargo.toml b/client/Cargo.toml index 241ae57b4971df..dacf4533dac9b2 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -20,7 +20,7 @@ log = "0.4.14" rayon = "1.5.1" reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] } semver = "1.0.4" -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" serde_json = "1.0.72" solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" } diff --git a/core/Cargo.toml b/core/Cargo.toml index f20c4e2685a65c..8438ec6bac2218 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -20,7 +20,7 @@ bincode = "1.3.3" bs58 = "0.4.0" chrono = { version = "0.4.11", features = ["serde"] } crossbeam-channel = "0.5" -dashmap = { version = "4.0.2", features = ["rayon", "raw-api"] } +dashmap = { version = "5.0.0", features = ["rayon", "raw-api"] } etcd-client = { version = "0.8.1", features = ["tls"]} fs_extra = "1.2.0" histogram = "0.6.9" @@ -32,7 +32,7 @@ rand_chacha = "0.2.2" raptorq = "1.6.4" rayon = "1.5.1" retain_mut = "0.1.5" -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" solana-accountsdb-plugin-manager = { path = "../accountsdb-plugin-manager", version = "=1.10.0" } solana-client = { path = "../client", version = "=1.10.0" } diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 1b9e79a8568482..dbb0961af114d8 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -20,7 +20,7 @@ use { genesis_utils::{create_genesis_config, GenesisConfigInfo}, get_tmp_ledger_path, }, - solana_perf::{packet::to_packets_chunked, test_tx::test_tx}, + solana_perf::{packet::to_packet_batches, test_tx::test_tx}, solana_poh::poh_recorder::{create_test_recorder, WorkingBankEntry}, solana_runtime::{bank::Bank, cost_model::CostModel}, solana_sdk::{ @@ -77,11 +77,11 @@ fn bench_consume_buffered(bencher: &mut Bencher) { let tx = test_tx(); let len = 4096; let chunk_size = 1024; - let batches = to_packets_chunked(&vec![tx; len], chunk_size); - let mut packets = VecDeque::new(); + let batches = to_packet_batches(&vec![tx; len], chunk_size); + let mut packet_batches = VecDeque::new(); for batch in batches { let batch_len = batch.packets.len(); - packets.push_back((batch, vec![0usize; batch_len], false)); + packet_batches.push_back((batch, vec![0usize; batch_len], false)); } let (s, _r) = unbounded(); // This tests the performance of buffering packets. @@ -91,7 +91,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) { &my_pubkey, std::u128::MAX, &poh_recorder, - &mut packets, + &mut packet_batches, None, &s, None::>, @@ -206,7 +206,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { assert!(r.is_ok(), "sanity parallel execution"); } bank.clear_signatures(); - let verified: Vec<_> = to_packets_chunked(&transactions, PACKETS_PER_BATCH); + let verified: Vec<_> = to_packet_batches(&transactions, PACKETS_PER_BATCH); let ledger_path = get_tmp_ledger_path!(); { let blockstore = Arc::new( diff --git a/core/benches/sigverify_stage.rs b/core/benches/sigverify_stage.rs index e48ab9301c1690..894c474ce8bf62 100644 --- a/core/benches/sigverify_stage.rs +++ b/core/benches/sigverify_stage.rs @@ -8,7 +8,7 @@ use { log::*, rand::{thread_rng, Rng}, solana_core::{sigverify::TransactionSigVerifier, sigverify_stage::SigVerifyStage}, - solana_perf::{packet::to_packets_chunked, test_tx::test_tx}, + solana_perf::{packet::to_packet_batches, test_tx::test_tx}, solana_sdk::{ hash::Hash, signature::{Keypair, Signer}, @@ -28,7 +28,7 @@ fn bench_packet_discard(bencher: &mut Bencher) { let len = 30 * 1000; let chunk_size = 1024; let tx = test_tx(); - let mut batches = to_packets_chunked(&vec![tx; len], chunk_size); + let mut batches = to_packet_batches(&vec![tx; len], chunk_size); let mut total = 0; @@ -74,7 +74,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher) { let chunk_size = 1024; let mut batches = if use_same_tx { let tx = test_tx(); - to_packets_chunked(&vec![tx; len], chunk_size) + to_packet_batches(&vec![tx; len], chunk_size) } else { let from_keypair = Keypair::new(); let to_keypair = Keypair::new(); @@ -89,7 +89,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher) { ) }) .collect(); - to_packets_chunked(&txs, chunk_size) + to_packet_batches(&txs, chunk_size) }; trace!( diff --git a/core/src/ancestor_hashes_service.rs b/core/src/ancestor_hashes_service.rs index c824036cd85fed..4ccdb33338e6cc 100644 --- a/core/src/ancestor_hashes_service.rs +++ b/core/src/ancestor_hashes_service.rs @@ -14,7 +14,7 @@ use { solana_ledger::{blockstore::Blockstore, shred::SIZE_OF_NONCE}, solana_measure::measure::Measure, solana_perf::{ - packet::{limited_deserialize, Packet, Packets}, + packet::{limited_deserialize, Packet, PacketBatch}, recycler::Recycler, }, solana_runtime::bank::Bank, @@ -23,7 +23,7 @@ use { pubkey::Pubkey, timing::timestamp, }, - solana_streamer::streamer::{self, PacketReceiver}, + solana_streamer::streamer::{self, PacketBatchReceiver}, std::{ collections::HashSet, net::UdpSocket, @@ -197,7 +197,7 @@ impl AncestorHashesService { /// Listen for responses to our ancestors hashes repair requests fn run_responses_listener( ancestor_hashes_request_statuses: Arc>, - response_receiver: PacketReceiver, + response_receiver: PacketBatchReceiver, blockstore: Arc, outstanding_requests: Arc>, exit: Arc, @@ -240,7 +240,7 @@ impl AncestorHashesService { /// Process messages from the network fn process_new_packets_from_channel( ancestor_hashes_request_statuses: &DashMap, - response_receiver: &PacketReceiver, + response_receiver: &PacketBatchReceiver, blockstore: &Blockstore, outstanding_requests: &RwLock, stats: &mut AncestorHashesResponsesStats, @@ -249,17 +249,17 @@ impl AncestorHashesService { retryable_slots_sender: &RetryableSlotsSender, ) -> Result<()> { let timeout = Duration::new(1, 0); - let mut responses = vec![response_receiver.recv_timeout(timeout)?]; - let mut total_packets = responses[0].packets.len(); + let mut packet_batches = vec![response_receiver.recv_timeout(timeout)?]; + let mut total_packets = packet_batches[0].packets.len(); let mut dropped_packets = 0; - while let Ok(more) = response_receiver.try_recv() { - total_packets += more.packets.len(); + while let Ok(batch) = response_receiver.try_recv() { + total_packets += batch.packets.len(); if total_packets < *max_packets { // Drop the rest in the channel in case of DOS - responses.push(more); + packet_batches.push(batch); } else { - dropped_packets += more.packets.len(); + dropped_packets += batch.packets.len(); } } @@ -267,10 +267,10 @@ impl AncestorHashesService { stats.total_packets += total_packets; let mut time = Measure::start("ancestor_hashes::handle_packets"); - for response in responses { - Self::process_single_packets( + for packet_batch in packet_batches { + Self::process_packet_batch( ancestor_hashes_request_statuses, - response, + packet_batch, stats, outstanding_requests, blockstore, @@ -289,16 +289,16 @@ impl AncestorHashesService { Ok(()) } - fn process_single_packets( + fn process_packet_batch( ancestor_hashes_request_statuses: &DashMap, - packets: Packets, + packet_batch: PacketBatch, stats: &mut AncestorHashesResponsesStats, outstanding_requests: &RwLock, blockstore: &Blockstore, duplicate_slots_reset_sender: &DuplicateSlotsResetSender, retryable_slots_sender: &RetryableSlotsSender, ) { - packets.packets.iter().for_each(|packet| { + packet_batch.packets.iter().for_each(|packet| { let decision = Self::verify_and_process_ancestor_response( packet, ancestor_hashes_request_statuses, @@ -871,7 +871,7 @@ mod test { t_listen: JoinHandle<()>, exit: Arc, responder_info: ContactInfo, - response_receiver: PacketReceiver, + response_receiver: PacketBatchReceiver, correct_bank_hashes: HashMap, } diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index d1c7e5eb086eec..15898d4ff6d51b 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -15,7 +15,7 @@ use { solana_perf::{ cuda_runtime::PinnedVec, data_budget::DataBudget, - packet::{limited_deserialize, Packet, Packets, PACKETS_PER_BATCH}, + packet::{limited_deserialize, Packet, PacketBatch, PACKETS_PER_BATCH}, perf_libs, }, solana_poh::poh_recorder::{BankStart, PohRecorder, PohRecorderError, TransactionRecorder}, @@ -64,10 +64,10 @@ use { }; /// (packets, valid_indexes, forwarded) -/// Set of packets with a list of which are valid and if this batch has been forwarded. -type PacketsAndOffsets = (Packets, Vec, bool); +/// Batch of packets with a list of which are valid and if this batch has been forwarded. +type PacketBatchAndOffsets = (PacketBatch, Vec, bool); -pub type UnprocessedPackets = VecDeque; +pub type UnprocessedPacketBatches = VecDeque; /// Transaction forwarding pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 2; @@ -255,9 +255,9 @@ impl BankingStage { pub fn new( cluster_info: &Arc, poh_recorder: &Arc>, - verified_receiver: CrossbeamReceiver>, - tpu_verified_vote_receiver: CrossbeamReceiver>, - verified_vote_receiver: CrossbeamReceiver>, + verified_receiver: CrossbeamReceiver>, + tpu_verified_vote_receiver: CrossbeamReceiver>, + verified_vote_receiver: CrossbeamReceiver>, transaction_status_sender: Option, gossip_vote_sender: ReplayVoteSender, cost_model: Arc>, @@ -278,9 +278,9 @@ impl BankingStage { fn new_num_threads( cluster_info: &Arc, poh_recorder: &Arc>, - verified_receiver: CrossbeamReceiver>, - tpu_verified_vote_receiver: CrossbeamReceiver>, - verified_vote_receiver: CrossbeamReceiver>, + verified_receiver: CrossbeamReceiver>, + tpu_verified_vote_receiver: CrossbeamReceiver>, + verified_vote_receiver: CrossbeamReceiver>, num_threads: u32, transaction_status_sender: Option, gossip_vote_sender: ReplayVoteSender, @@ -346,12 +346,12 @@ impl BankingStage { } fn filter_valid_packets_for_forwarding<'a>( - all_packets: impl Iterator, + packet_batches: impl Iterator, ) -> Vec<&'a Packet> { - all_packets - .filter(|(_p, _indexes, forwarded)| !forwarded) - .flat_map(|(p, valid_indexes, _forwarded)| { - valid_indexes.iter().map(move |x| &p.packets[*x]) + packet_batches + .filter(|(_batch, _indexes, forwarded)| !forwarded) + .flat_map(|(batch, valid_indexes, _forwarded)| { + valid_indexes.iter().map(move |x| &batch.packets[*x]) }) .collect() } @@ -359,10 +359,10 @@ impl BankingStage { fn forward_buffered_packets( socket: &std::net::UdpSocket, tpu_forwards: &std::net::SocketAddr, - unprocessed_packets: &UnprocessedPackets, + buffered_packet_batches: &UnprocessedPacketBatches, data_budget: &DataBudget, ) -> std::io::Result<()> { - let packets = Self::filter_valid_packets_for_forwarding(unprocessed_packets.iter()); + let packets = Self::filter_valid_packets_for_forwarding(buffered_packet_batches.iter()); inc_new_counter_info!("banking_stage-forwarded_packets", packets.len()); const INTERVAL_MS: u64 = 100; const MAX_BYTES_PER_SECOND: usize = 10_000 * 1200; @@ -385,7 +385,7 @@ impl BankingStage { Ok(()) } - // Returns whether the given `Packets` has any more remaining unprocessed + // Returns whether the given `PacketBatch` has any more remaining unprocessed // transactions fn update_buffered_packets_with_new_unprocessed( original_unprocessed_indexes: &mut Vec, @@ -404,7 +404,7 @@ impl BankingStage { my_pubkey: &Pubkey, max_tx_ingestion_ns: u128, poh_recorder: &Arc>, - buffered_packets: &mut UnprocessedPackets, + buffered_packet_batches: &mut UnprocessedPacketBatches, transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, test_fn: Option, @@ -412,19 +412,21 @@ impl BankingStage { recorder: &TransactionRecorder, qos_service: &Arc, ) { - let mut rebuffered_packets_len = 0; + let mut rebuffered_packet_count = 0; let mut new_tx_count = 0; - let buffered_len = buffered_packets.len(); + let buffered_packet_batches_len = buffered_packet_batches.len(); let mut proc_start = Measure::start("consume_buffered_process"); let mut reached_end_of_slot = None; - buffered_packets.retain_mut(|(msgs, ref mut original_unprocessed_indexes, _forwarded)| { + buffered_packet_batches.retain_mut(|buffered_packet_batch_and_offsets| { + let (packet_batch, ref mut original_unprocessed_indexes, _forwarded) = + buffered_packet_batch_and_offsets; if let Some((next_leader, bank)) = &reached_end_of_slot { // We've hit the end of this slot, no need to perform more processing, // just filter the remaining packets for the invalid (e.g. too old) ones let new_unprocessed_indexes = Self::filter_unprocessed_packets( bank, - msgs, + packet_batch, original_unprocessed_indexes, my_pubkey, *next_leader, @@ -446,7 +448,7 @@ impl BankingStage { &working_bank, &bank_creation_time, recorder, - msgs, + packet_batch, original_unprocessed_indexes.to_owned(), transaction_status_sender.clone(), gossip_vote_sender, @@ -467,7 +469,7 @@ impl BankingStage { new_tx_count += processed; // Out of the buffered packets just retried, collect any still unprocessed // transactions in this batch for forwarding - rebuffered_packets_len += new_unprocessed_indexes.len(); + rebuffered_packet_count += new_unprocessed_indexes.len(); let has_more_unprocessed_transactions = Self::update_buffered_packets_with_new_unprocessed( original_unprocessed_indexes, @@ -478,7 +480,7 @@ impl BankingStage { } has_more_unprocessed_transactions } else { - rebuffered_packets_len += original_unprocessed_indexes.len(); + rebuffered_packet_count += original_unprocessed_indexes.len(); // `original_unprocessed_indexes` must have remaining packets to process // if not yet processed. assert!(Self::packet_has_more_unprocessed_transactions( @@ -494,7 +496,7 @@ impl BankingStage { debug!( "@{:?} done processing buffered batches: {} time: {:?}ms tx count: {} tx/s: {}", timestamp(), - buffered_len, + buffered_packet_batches_len, proc_start.as_ms(), new_tx_count, (new_tx_count as f32) / (proc_start.as_s()) @@ -505,7 +507,7 @@ impl BankingStage { .fetch_add(proc_start.as_us(), Ordering::Relaxed); banking_stage_stats .rebuffered_packets_count - .fetch_add(rebuffered_packets_len, Ordering::Relaxed); + .fetch_add(rebuffered_packet_count, Ordering::Relaxed); banking_stage_stats .consumed_buffered_packets_count .fetch_add(new_tx_count, Ordering::Relaxed); @@ -550,7 +552,7 @@ impl BankingStage { socket: &std::net::UdpSocket, poh_recorder: &Arc>, cluster_info: &ClusterInfo, - buffered_packets: &mut UnprocessedPackets, + buffered_packet_batches: &mut UnprocessedPacketBatches, forward_option: &ForwardOption, transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, @@ -592,7 +594,7 @@ impl BankingStage { my_pubkey, max_tx_ingestion_ns, poh_recorder, - buffered_packets, + buffered_packet_batches, transaction_status_sender, gossip_vote_sender, None::>, @@ -605,7 +607,7 @@ impl BankingStage { Self::handle_forwarding( forward_option, cluster_info, - buffered_packets, + buffered_packet_batches, poh_recorder, socket, false, @@ -616,7 +618,7 @@ impl BankingStage { Self::handle_forwarding( forward_option, cluster_info, - buffered_packets, + buffered_packet_batches, poh_recorder, socket, true, @@ -631,7 +633,7 @@ impl BankingStage { fn handle_forwarding( forward_option: &ForwardOption, cluster_info: &ClusterInfo, - buffered_packets: &mut UnprocessedPackets, + buffered_packet_batches: &mut UnprocessedPacketBatches, poh_recorder: &Arc>, socket: &UdpSocket, hold: bool, @@ -640,7 +642,7 @@ impl BankingStage { let addr = match forward_option { ForwardOption::NotForward => { if !hold { - buffered_packets.clear(); + buffered_packet_batches.clear(); } return; } @@ -653,20 +655,20 @@ impl BankingStage { Some(addr) => addr, None => return, }; - let _ = Self::forward_buffered_packets(socket, &addr, buffered_packets, data_budget); + let _ = Self::forward_buffered_packets(socket, &addr, buffered_packet_batches, data_budget); if hold { - buffered_packets.retain(|(_, index, _)| !index.is_empty()); - for (_, _, forwarded) in buffered_packets.iter_mut() { + buffered_packet_batches.retain(|(_, index, _)| !index.is_empty()); + for (_, _, forwarded) in buffered_packet_batches.iter_mut() { *forwarded = true; } } else { - buffered_packets.clear(); + buffered_packet_batches.clear(); } } #[allow(clippy::too_many_arguments)] fn process_loop( - verified_receiver: &CrossbeamReceiver>, + verified_receiver: &CrossbeamReceiver>, poh_recorder: &Arc>, cluster_info: &ClusterInfo, recv_start: &mut Instant, @@ -681,17 +683,17 @@ impl BankingStage { ) { let recorder = poh_recorder.lock().unwrap().recorder(); let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); - let mut buffered_packets = VecDeque::with_capacity(batch_limit); + let mut buffered_packet_batches = VecDeque::with_capacity(batch_limit); let banking_stage_stats = BankingStageStats::new(id); loop { let my_pubkey = cluster_info.id(); - while !buffered_packets.is_empty() { + while !buffered_packet_batches.is_empty() { let decision = Self::process_buffered_packets( &my_pubkey, &socket, poh_recorder, cluster_info, - &mut buffered_packets, + &mut buffered_packet_batches, &forward_option, transaction_status_sender.clone(), &gossip_vote_sender, @@ -709,7 +711,7 @@ impl BankingStage { } } - let recv_timeout = if !buffered_packets.is_empty() { + let recv_timeout = if !buffered_packet_batches.is_empty() { // If packets are buffered, let's wait for less time on recv from the channel. // This helps detect the next leader faster, and processing the buffered // packets quickly @@ -729,7 +731,7 @@ impl BankingStage { batch_limit, transaction_status_sender.clone(), &gossip_vote_sender, - &mut buffered_packets, + &mut buffered_packet_batches, &banking_stage_stats, duplicates, &recorder, @@ -947,8 +949,8 @@ impl BankingStage { bank.prepare_sanitized_batch_with_results(txs, transactions_qos_results.into_iter()); lock_time.stop(); - // retryable_txs includes AccountInUse, WouldExceedMaxBlockCostLimit and - // WouldExceedMaxAccountCostLimit + // retryable_txs includes AccountInUse, WouldExceedMaxBlockCostLimit + // WouldExceedMaxAccountCostLimit, and WouldExceedMaxAccountDataCostLimit let (result, mut retryable_txs) = Self::process_and_record_transactions_locked( bank, poh, @@ -1076,7 +1078,7 @@ impl BankingStage { // with their packet indexes. #[allow(clippy::needless_collect)] fn transactions_from_packets( - msgs: &Packets, + packet_batch: &PacketBatch, transaction_indexes: &[usize], feature_set: &Arc, votes_only: bool, @@ -1084,7 +1086,7 @@ impl BankingStage { transaction_indexes .iter() .filter_map(|tx_index| { - let p = &msgs.packets[*tx_index]; + let p = &packet_batch.packets[*tx_index]; if votes_only && !p.meta.is_simple_vote_tx { return None; } @@ -1149,7 +1151,7 @@ impl BankingStage { bank: &Arc, bank_creation_time: &Instant, poh: &TransactionRecorder, - msgs: &Packets, + packet_batch: &PacketBatch, packet_indexes: Vec, transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, @@ -1158,7 +1160,7 @@ impl BankingStage { ) -> (usize, usize, Vec) { let mut packet_conversion_time = Measure::start("packet_conversion"); let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets( - msgs, + packet_batch, &packet_indexes, &bank.feature_set, bank.vote_only_bank(), @@ -1214,7 +1216,7 @@ impl BankingStage { fn filter_unprocessed_packets( bank: &Arc, - msgs: &Packets, + packet_batch: &PacketBatch, transaction_indexes: &[usize], my_pubkey: &Pubkey, next_leader: Option, @@ -1232,7 +1234,7 @@ impl BankingStage { let mut unprocessed_packet_conversion_time = Measure::start("unprocessed_packet_conversion"); let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets( - msgs, + packet_batch, transaction_indexes, &bank.feature_set, bank.vote_only_bank(), @@ -1282,7 +1284,7 @@ impl BankingStage { /// Process the incoming packets fn process_packets( my_pubkey: &Pubkey, - verified_receiver: &CrossbeamReceiver>, + verified_receiver: &CrossbeamReceiver>, poh: &Arc>, recv_start: &mut Instant, recv_timeout: Duration, @@ -1290,41 +1292,41 @@ impl BankingStage { batch_limit: usize, transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, - buffered_packets: &mut UnprocessedPackets, + buffered_packet_batches: &mut UnprocessedPacketBatches, banking_stage_stats: &BankingStageStats, duplicates: &Arc, PacketHasher)>>, recorder: &TransactionRecorder, qos_service: &Arc, ) -> Result<(), RecvTimeoutError> { let mut recv_time = Measure::start("process_packets_recv"); - let mms = verified_receiver.recv_timeout(recv_timeout)?; + let packet_batches = verified_receiver.recv_timeout(recv_timeout)?; recv_time.stop(); - let mms_len = mms.len(); - let count: usize = mms.iter().map(|x| x.packets.len()).sum(); + let packet_batches_len = packet_batches.len(); + let packet_count: usize = packet_batches.iter().map(|x| x.packets.len()).sum(); debug!( "@{:?} process start stalled for: {:?}ms txs: {} id: {}", timestamp(), duration_as_ms(&recv_start.elapsed()), - count, + packet_count, id, ); - inc_new_counter_debug!("banking_stage-transactions_received", count); + inc_new_counter_debug!("banking_stage-transactions_received", packet_count); let mut proc_start = Measure::start("process_packets_transactions_process"); let mut new_tx_count = 0; - let mut mms_iter = mms.into_iter(); + let mut packet_batch_iter = packet_batches.into_iter(); let mut dropped_packets_count = 0; let mut dropped_packet_batches_count = 0; let mut newly_buffered_packets_count = 0; - while let Some(msgs) = mms_iter.next() { - let packet_indexes = Self::generate_packet_indexes(&msgs.packets); + while let Some(packet_batch) = packet_batch_iter.next() { + let packet_indexes = Self::generate_packet_indexes(&packet_batch.packets); let poh_recorder_bank = poh.lock().unwrap().get_poh_recorder_bank(); let working_bank_start = poh_recorder_bank.working_bank_start(); if PohRecorder::get_working_bank_if_not_expired(&working_bank_start).is_none() { Self::push_unprocessed( - buffered_packets, - msgs, + buffered_packet_batches, + packet_batch, packet_indexes, &mut dropped_packet_batches_count, &mut dropped_packets_count, @@ -1347,7 +1349,7 @@ impl BankingStage { working_bank, bank_creation_time, recorder, - &msgs, + &packet_batch, packet_indexes, transaction_status_sender.clone(), gossip_vote_sender, @@ -1359,8 +1361,8 @@ impl BankingStage { // Collect any unprocessed transactions in this batch for forwarding Self::push_unprocessed( - buffered_packets, - msgs, + buffered_packet_batches, + packet_batch, unprocessed_indexes, &mut dropped_packet_batches_count, &mut dropped_packets_count, @@ -1376,19 +1378,19 @@ impl BankingStage { let next_leader = poh.lock().unwrap().next_slot_leader(); // Walk thru rest of the transactions and filter out the invalid (e.g. too old) ones #[allow(clippy::while_let_on_iterator)] - while let Some(msgs) = mms_iter.next() { - let packet_indexes = Self::generate_packet_indexes(&msgs.packets); + while let Some(packet_batch) = packet_batch_iter.next() { + let packet_indexes = Self::generate_packet_indexes(&packet_batch.packets); let unprocessed_indexes = Self::filter_unprocessed_packets( working_bank, - &msgs, + &packet_batch, &packet_indexes, my_pubkey, next_leader, banking_stage_stats, ); Self::push_unprocessed( - buffered_packets, - msgs, + buffered_packet_batches, + packet_batch, unprocessed_indexes, &mut dropped_packet_batches_count, &mut dropped_packets_count, @@ -1409,11 +1411,11 @@ impl BankingStage { debug!( "@{:?} done processing transaction batches: {} time: {:?}ms tx count: {} tx/s: {} total count: {} id: {}", timestamp(), - mms_len, + packet_batches_len, proc_start.as_ms(), new_tx_count, (new_tx_count as f32) / (proc_start.as_s()), - count, + packet_count, id, ); banking_stage_stats @@ -1421,7 +1423,7 @@ impl BankingStage { .fetch_add(proc_start.as_us(), Ordering::Relaxed); banking_stage_stats .process_packets_count - .fetch_add(count, Ordering::Relaxed); + .fetch_add(packet_count, Ordering::Relaxed); banking_stage_stats .new_tx_count .fetch_add(new_tx_count, Ordering::Relaxed); @@ -1436,9 +1438,12 @@ impl BankingStage { .fetch_add(newly_buffered_packets_count, Ordering::Relaxed); banking_stage_stats .current_buffered_packet_batches_count - .swap(buffered_packets.len(), Ordering::Relaxed); + .swap(buffered_packet_batches.len(), Ordering::Relaxed); banking_stage_stats.current_buffered_packets_count.swap( - buffered_packets.iter().map(|packets| packets.1.len()).sum(), + buffered_packet_batches + .iter() + .map(|packets| packets.1.len()) + .sum(), Ordering::Relaxed, ); *recv_start = Instant::now(); @@ -1446,8 +1451,8 @@ impl BankingStage { } fn push_unprocessed( - unprocessed_packets: &mut UnprocessedPackets, - packets: Packets, + unprocessed_packet_batches: &mut UnprocessedPacketBatches, + packet_batch: PacketBatch, mut packet_indexes: Vec, dropped_packet_batches_count: &mut usize, dropped_packets_count: &mut usize, @@ -1462,7 +1467,7 @@ impl BankingStage { let mut duplicates = duplicates.lock().unwrap(); let (cache, hasher) = duplicates.deref_mut(); packet_indexes.retain(|i| { - let packet_hash = hasher.hash_packet(&packets.packets[*i]); + let packet_hash = hasher.hash_packet(&packet_batch.packets[*i]); match cache.get_mut(&packet_hash) { Some(_hash) => false, None => { @@ -1483,14 +1488,14 @@ impl BankingStage { ); } if Self::packet_has_more_unprocessed_transactions(&packet_indexes) { - if unprocessed_packets.len() >= batch_limit { + if unprocessed_packet_batches.len() >= batch_limit { *dropped_packet_batches_count += 1; - if let Some(dropped_batch) = unprocessed_packets.pop_front() { + if let Some(dropped_batch) = unprocessed_packet_batches.pop_front() { *dropped_packets_count += dropped_batch.1.len(); } } *newly_buffered_packets_count += packet_indexes.len(); - unprocessed_packets.push_back((packets, packet_indexes, false)); + unprocessed_packet_batches.push_back((packet_batch, packet_indexes, false)); } } @@ -1560,7 +1565,7 @@ mod tests { get_tmp_ledger_path, leader_schedule_cache::LeaderScheduleCache, }, - solana_perf::packet::to_packets_chunked, + solana_perf::packet::to_packet_batches, solana_poh::{ poh_recorder::{create_test_recorder, Record, WorkingBankEntry}, poh_service::PohService, @@ -1697,7 +1702,9 @@ mod tests { Blockstore::destroy(&ledger_path).unwrap(); } - pub fn convert_from_old_verified(mut with_vers: Vec<(Packets, Vec)>) -> Vec { + pub fn convert_from_old_verified( + mut with_vers: Vec<(PacketBatch, Vec)>, + ) -> Vec { with_vers.iter_mut().for_each(|(b, v)| { b.packets .iter_mut() @@ -1769,18 +1776,18 @@ mod tests { let tx_anf = system_transaction::transfer(&keypair, &to3, 1, start_hash); // send 'em over - let packets = to_packets_chunked(&[tx_no_ver, tx_anf, tx], 3); + let packet_batches = to_packet_batches(&[tx_no_ver, tx_anf, tx], 3); // glad they all fit - assert_eq!(packets.len(), 1); + assert_eq!(packet_batches.len(), 1); - let packets = packets + let packet_batches = packet_batches .into_iter() - .map(|packets| (packets, vec![0u8, 1u8, 1u8])) + .map(|batch| (batch, vec![0u8, 1u8, 1u8])) .collect(); - let packets = convert_from_old_verified(packets); + let packet_batches = convert_from_old_verified(packet_batches); verified_sender // no_ver, anf, tx - .send(packets) + .send(packet_batches) .unwrap(); drop(verified_sender); @@ -1846,24 +1853,24 @@ mod tests { let tx = system_transaction::transfer(&mint_keypair, &alice.pubkey(), 2, genesis_config.hash()); - let packets = to_packets_chunked(&[tx], 1); - let packets = packets + let packet_batches = to_packet_batches(&[tx], 1); + let packet_batches = packet_batches .into_iter() - .map(|packets| (packets, vec![1u8])) + .map(|batch| (batch, vec![1u8])) .collect(); - let packets = convert_from_old_verified(packets); - verified_sender.send(packets).unwrap(); + let packet_batches = convert_from_old_verified(packet_batches); + verified_sender.send(packet_batches).unwrap(); // Process a second batch that uses the same from account, so conflicts with above TX let tx = system_transaction::transfer(&mint_keypair, &alice.pubkey(), 1, genesis_config.hash()); - let packets = to_packets_chunked(&[tx], 1); - let packets = packets + let packet_batches = to_packet_batches(&[tx], 1); + let packet_batches = packet_batches .into_iter() - .map(|packets| (packets, vec![1u8])) + .map(|batch| (batch, vec![1u8])) .collect(); - let packets = convert_from_old_verified(packets); - verified_sender.send(packets).unwrap(); + let packet_batches = convert_from_old_verified(packet_batches); + verified_sender.send(packet_batches).unwrap(); let (vote_sender, vote_receiver) = unbounded(); let (tpu_vote_sender, tpu_vote_receiver) = unbounded(); @@ -2381,9 +2388,9 @@ mod tests { fn test_filter_valid_packets() { solana_logger::setup(); - let mut all_packets = (0..16) + let mut packet_batches = (0..16) .map(|packets_id| { - let packets = Packets::new( + let packet_batch = PacketBatch::new( (0..32) .map(|packet_id| { let mut p = Packet::default(); @@ -2395,11 +2402,11 @@ mod tests { let valid_indexes = (0..32) .filter_map(|x| if x % 2 != 0 { Some(x as usize) } else { None }) .collect_vec(); - (packets, valid_indexes, false) + (packet_batch, valid_indexes, false) }) .collect_vec(); - let result = BankingStage::filter_valid_packets_for_forwarding(all_packets.iter()); + let result = BankingStage::filter_valid_packets_for_forwarding(packet_batches.iter()); assert_eq!(result.len(), 256); @@ -2413,8 +2420,8 @@ mod tests { }) .collect_vec(); - all_packets[0].2 = true; - let result = BankingStage::filter_valid_packets_for_forwarding(all_packets.iter()); + packet_batches[0].2 = true; + let result = BankingStage::filter_valid_packets_for_forwarding(packet_batches.iter()); assert_eq!(result.len(), 240); } @@ -2666,12 +2673,15 @@ mod tests { setup_conflicting_transactions(&ledger_path); let recorder = poh_recorder.lock().unwrap().recorder(); let num_conflicting_transactions = transactions.len(); - let mut packets_vec = to_packets_chunked(&transactions, num_conflicting_transactions); - assert_eq!(packets_vec.len(), 1); - assert_eq!(packets_vec[0].packets.len(), num_conflicting_transactions); - let all_packets = packets_vec.pop().unwrap(); - let mut buffered_packets: UnprocessedPackets = vec![( - all_packets, + let mut packet_batches = to_packet_batches(&transactions, num_conflicting_transactions); + assert_eq!(packet_batches.len(), 1); + assert_eq!( + packet_batches[0].packets.len(), + num_conflicting_transactions + ); + let packet_batch = packet_batches.pop().unwrap(); + let mut buffered_packet_batches: UnprocessedPacketBatches = vec![( + packet_batch, (0..num_conflicting_transactions).into_iter().collect(), false, )] @@ -2687,7 +2697,7 @@ mod tests { &Pubkey::default(), max_tx_processing_ns, &poh_recorder, - &mut buffered_packets, + &mut buffered_packet_batches, None, &gossip_vote_sender, None::>, @@ -2695,7 +2705,10 @@ mod tests { &recorder, &Arc::new(QosService::new(Arc::new(RwLock::new(CostModel::default())))), ); - assert_eq!(buffered_packets[0].1.len(), num_conflicting_transactions); + assert_eq!( + buffered_packet_batches[0].1.len(), + num_conflicting_transactions + ); // When the poh recorder has a bank, should process all non conflicting buffered packets. // Processes one packet per iteration of the loop for num_expected_unprocessed in (0..num_conflicting_transactions).rev() { @@ -2704,7 +2717,7 @@ mod tests { &Pubkey::default(), max_tx_processing_ns, &poh_recorder, - &mut buffered_packets, + &mut buffered_packet_batches, None, &gossip_vote_sender, None::>, @@ -2713,9 +2726,9 @@ mod tests { &Arc::new(QosService::new(Arc::new(RwLock::new(CostModel::default())))), ); if num_expected_unprocessed == 0 { - assert!(buffered_packets.is_empty()) + assert!(buffered_packet_batches.is_empty()) } else { - assert_eq!(buffered_packets[0].1.len(), num_expected_unprocessed); + assert_eq!(buffered_packet_batches[0].1.len(), num_expected_unprocessed); } } poh_recorder @@ -2735,12 +2748,12 @@ mod tests { let (transactions, bank, poh_recorder, _entry_receiver, poh_simulator) = setup_conflicting_transactions(&ledger_path); let num_conflicting_transactions = transactions.len(); - let packets_vec = to_packets_chunked(&transactions, 1); - assert_eq!(packets_vec.len(), num_conflicting_transactions); - for single_packets in &packets_vec { - assert_eq!(single_packets.packets.len(), 1); + let packet_batches = to_packet_batches(&transactions, 1); + assert_eq!(packet_batches.len(), num_conflicting_transactions); + for single_packet_batch in &packet_batches { + assert_eq!(single_packet_batch.packets.len(), 1); } - let mut buffered_packets: UnprocessedPackets = packets_vec + let mut buffered_packet_batches: UnprocessedPacketBatches = packet_batches .clone() .into_iter() .map(|single_packets| (single_packets, vec![0], false)) @@ -2754,8 +2767,8 @@ mod tests { continue_receiver.recv().unwrap(); }); // When the poh recorder has a bank, it should process all non conflicting buffered packets. - // Because each conflicting transaction is in it's own `Packet` within `packets_vec`, then - // each iteration of this loop will process one element of `packets_vec`per iteration of the + // Because each conflicting transaction is in it's own `Packet` within a `PacketBatch`, then + // each iteration of this loop will process one element of the batch per iteration of the // loop. let interrupted_iteration = 1; poh_recorder.lock().unwrap().set_bank(&bank); @@ -2770,7 +2783,7 @@ mod tests { &Pubkey::default(), std::u128::MAX, &poh_recorder_, - &mut buffered_packets, + &mut buffered_packet_batches, None, &gossip_vote_sender, test_fn, @@ -2782,13 +2795,13 @@ mod tests { // Check everything is correct. All indexes after `interrupted_iteration` // should still be unprocessed assert_eq!( - buffered_packets.len(), - packets_vec[interrupted_iteration + 1..].len() + buffered_packet_batches.len(), + packet_batches[interrupted_iteration + 1..].len() ); for ((remaining_unprocessed_packet, _, _forwarded), original_packet) in - buffered_packets + buffered_packet_batches .iter() - .zip(&packets_vec[interrupted_iteration + 1..]) + .zip(&packet_batches[interrupted_iteration + 1..]) { assert_eq!( remaining_unprocessed_packet.packets[0], @@ -2823,10 +2836,10 @@ mod tests { #[test] fn test_forwarder_budget() { solana_logger::setup(); - // Create `Packets` with 1 unprocessed element - let single_element_packets = Packets::new(vec![Packet::default()]); - let mut unprocessed_packets: UnprocessedPackets = - vec![(single_element_packets, vec![0], false)] + // Create `PacketBatch` with 1 unprocessed packet + let single_packet_batch = PacketBatch::new(vec![Packet::default()]); + let mut unprocessed_packets: UnprocessedPacketBatches = + vec![(single_packet_batch, vec![0], false)] .into_iter() .collect(); @@ -2872,14 +2885,16 @@ mod tests { #[test] fn test_push_unprocessed_batch_limit() { solana_logger::setup(); - // Create `Packets` with 2 unprocessed elements - let new_packets = Packets::new(vec![Packet::default(); 2]); - let mut unprocessed_packets: UnprocessedPackets = - vec![(new_packets, vec![0, 1], false)].into_iter().collect(); + // Create `PacketBatch` with 2 unprocessed packets + let new_packet_batch = PacketBatch::new(vec![Packet::default(); 2]); + let mut unprocessed_packets: UnprocessedPacketBatches = + vec![(new_packet_batch, vec![0, 1], false)] + .into_iter() + .collect(); // Set the limit to 2 let batch_limit = 2; - // Create some new unprocessed packets - let new_packets = Packets::new(vec![Packet::default()]); + // Create new unprocessed packets and add to a batch + let new_packet_batch = PacketBatch::new(vec![Packet::default()]); let packet_indexes = vec![]; let duplicates = Arc::new(Mutex::new(( @@ -2894,7 +2909,7 @@ mod tests { // packets are not added to the unprocessed queue BankingStage::push_unprocessed( &mut unprocessed_packets, - new_packets.clone(), + new_packet_batch.clone(), packet_indexes, &mut dropped_packet_batches_count, &mut dropped_packets_count, @@ -2913,7 +2928,7 @@ mod tests { let packet_indexes = vec![0]; BankingStage::push_unprocessed( &mut unprocessed_packets, - new_packets, + new_packet_batch, packet_indexes.clone(), &mut dropped_packet_batches_count, &mut dropped_packets_count, @@ -2929,7 +2944,7 @@ mod tests { // Because we've reached the batch limit, old unprocessed packets are // dropped and the new one is appended to the end - let new_packets = Packets::new(vec![Packet::from_data( + let new_packet_batch = PacketBatch::new(vec![Packet::from_data( Some(&SocketAddr::from(([127, 0, 0, 1], 8001))), 42, ) @@ -2937,7 +2952,7 @@ mod tests { assert_eq!(unprocessed_packets.len(), batch_limit); BankingStage::push_unprocessed( &mut unprocessed_packets, - new_packets.clone(), + new_packet_batch.clone(), packet_indexes.clone(), &mut dropped_packet_batches_count, &mut dropped_packets_count, @@ -2947,7 +2962,10 @@ mod tests { &banking_stage_stats, ); assert_eq!(unprocessed_packets.len(), 2); - assert_eq!(unprocessed_packets[1].0.packets[0], new_packets.packets[0]); + assert_eq!( + unprocessed_packets[1].0.packets[0], + new_packet_batch.packets[0] + ); assert_eq!(dropped_packet_batches_count, 1); assert_eq!(dropped_packets_count, 2); assert_eq!(newly_buffered_packets_count, 2); @@ -2955,7 +2973,7 @@ mod tests { // Check duplicates are dropped (newly buffered shouldn't change) BankingStage::push_unprocessed( &mut unprocessed_packets, - new_packets.clone(), + new_packet_batch.clone(), packet_indexes, &mut dropped_packet_batches_count, &mut dropped_packets_count, @@ -2965,7 +2983,10 @@ mod tests { &banking_stage_stats, ); assert_eq!(unprocessed_packets.len(), 2); - assert_eq!(unprocessed_packets[1].0.packets[0], new_packets.packets[0]); + assert_eq!( + unprocessed_packets[1].0.packets[0], + new_packet_batch.packets[0] + ); assert_eq!(dropped_packet_batches_count, 1); assert_eq!(dropped_packets_count, 2); assert_eq!(newly_buffered_packets_count, 2); @@ -2988,19 +3009,19 @@ mod tests { fn make_test_packets( transactions: Vec, vote_indexes: Vec, - ) -> (Packets, Vec) { + ) -> (PacketBatch, Vec) { let capacity = transactions.len(); - let mut packets = Packets::with_capacity(capacity); + let mut packet_batch = PacketBatch::with_capacity(capacity); let mut packet_indexes = Vec::with_capacity(capacity); - packets.packets.resize(capacity, Packet::default()); + packet_batch.packets.resize(capacity, Packet::default()); for (index, tx) in transactions.iter().enumerate() { - Packet::populate_packet(&mut packets.packets[index], None, tx).ok(); + Packet::populate_packet(&mut packet_batch.packets[index], None, tx).ok(); packet_indexes.push(index); } for index in vote_indexes.iter() { - packets.packets[*index].meta.is_simple_vote_tx = true; + packet_batch.packets[*index].meta.is_simple_vote_tx = true; } - (packets, packet_indexes) + (packet_batch, packet_indexes) } #[test] @@ -3022,12 +3043,12 @@ mod tests { // packets with no votes { let vote_indexes = vec![]; - let (packets, packet_indexes) = + let (packet_batch, packet_indexes) = make_test_packets(vec![transfer_tx.clone(), transfer_tx.clone()], vote_indexes); let mut votes_only = false; let (txs, tx_packet_index) = BankingStage::transactions_from_packets( - &packets, + &packet_batch, &packet_indexes, &Arc::new(FeatureSet::default()), votes_only, @@ -3037,7 +3058,7 @@ mod tests { votes_only = true; let (txs, tx_packet_index) = BankingStage::transactions_from_packets( - &packets, + &packet_batch, &packet_indexes, &Arc::new(FeatureSet::default()), votes_only, @@ -3049,14 +3070,14 @@ mod tests { // packets with some votes { let vote_indexes = vec![0, 2]; - let (packets, packet_indexes) = make_test_packets( + let (packet_batch, packet_indexes) = make_test_packets( vec![vote_tx.clone(), transfer_tx, vote_tx.clone()], vote_indexes, ); let mut votes_only = false; let (txs, tx_packet_index) = BankingStage::transactions_from_packets( - &packets, + &packet_batch, &packet_indexes, &Arc::new(FeatureSet::default()), votes_only, @@ -3066,7 +3087,7 @@ mod tests { votes_only = true; let (txs, tx_packet_index) = BankingStage::transactions_from_packets( - &packets, + &packet_batch, &packet_indexes, &Arc::new(FeatureSet::default()), votes_only, @@ -3078,14 +3099,14 @@ mod tests { // packets with all votes { let vote_indexes = vec![0, 1, 2]; - let (packets, packet_indexes) = make_test_packets( + let (packet_batch, packet_indexes) = make_test_packets( vec![vote_tx.clone(), vote_tx.clone(), vote_tx], vote_indexes, ); let mut votes_only = false; let (txs, tx_packet_index) = BankingStage::transactions_from_packets( - &packets, + &packet_batch, &packet_indexes, &Arc::new(FeatureSet::default()), votes_only, @@ -3095,7 +3116,7 @@ mod tests { votes_only = true; let (txs, tx_packet_index) = BankingStage::transactions_from_packets( - &packets, + &packet_batch, &packet_indexes, &Arc::new(FeatureSet::default()), votes_only, diff --git a/core/src/broadcast_stage/standard_broadcast_run.rs b/core/src/broadcast_stage/standard_broadcast_run.rs index 7a0dbdfa560f80..877a234bd8791e 100644 --- a/core/src/broadcast_stage/standard_broadcast_run.rs +++ b/core/src/broadcast_stage/standard_broadcast_run.rs @@ -596,7 +596,7 @@ mod test { .expect("Expected a shred that signals an interrupt"); // Validate the shred - assert_eq!(shred.parent(), Some(parent)); + assert_eq!(shred.parent().unwrap(), parent); assert_eq!(shred.slot(), slot); assert_eq!(shred.index(), next_shred_index); assert!(shred.is_data()); diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index bae24e80da39f3..25944880857865 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -22,7 +22,7 @@ use { solana_ledger::blockstore::Blockstore, solana_measure::measure::Measure, solana_metrics::inc_new_counter_debug, - solana_perf::packet::{self, Packets}, + solana_perf::packet::{self, PacketBatch}, solana_poh::poh_recorder::PohRecorder, solana_rpc::{ optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender}, @@ -299,7 +299,7 @@ impl ClusterInfoVoteListener { pub fn new( exit: &Arc, cluster_info: Arc, - verified_packets_sender: CrossbeamSender>, + verified_packets_sender: CrossbeamSender>, poh_recorder: &Arc>, vote_tracker: Arc, bank_forks: Arc>, @@ -396,14 +396,14 @@ impl ClusterInfoVoteListener { #[allow(clippy::type_complexity)] fn verify_votes(votes: Vec) -> (Vec, Vec) { - let mut msgs = packet::to_packets_chunked(&votes, 1); + let mut packet_batches = packet::to_packet_batches(&votes, 1); // Votes should already be filtered by this point. let reject_non_vote = false; - sigverify::ed25519_verify_cpu(&mut msgs, reject_non_vote); + sigverify::ed25519_verify_cpu(&mut packet_batches, reject_non_vote); - let (vote_txs, vote_metadata) = izip!(votes.into_iter(), msgs,) - .filter_map(|(vote_tx, packet)| { + let (vote_txs, vote_metadata) = izip!(votes.into_iter(), packet_batches) + .filter_map(|(vote_tx, packet_batch)| { let (vote, vote_account_key) = vote_transaction::parse_vote_transaction(&vote_tx) .and_then(|(vote_account_key, vote, _)| { if vote.slots().is_empty() { @@ -413,16 +413,16 @@ impl ClusterInfoVoteListener { } })?; - // to_packets_chunked() above split into 1 packet long chunks - assert_eq!(packet.packets.len(), 1); - if !packet.packets[0].meta.discard { + // to_packet_batches() above splits into 1 packet long batches + assert_eq!(packet_batch.packets.len(), 1); + if !packet_batch.packets[0].meta.discard { if let Some(signature) = vote_tx.signatures.first().cloned() { return Some(( vote_tx, VerifiedVoteMetadata { vote_account_key, vote, - packet, + packet_batch, signature, }, )); @@ -438,7 +438,7 @@ impl ClusterInfoVoteListener { exit: Arc, verified_vote_label_packets_receiver: VerifiedLabelVotePacketsReceiver, poh_recorder: Arc>, - verified_packets_sender: &CrossbeamSender>, + verified_packets_sender: &CrossbeamSender>, ) -> Result<()> { let mut verified_vote_packets = VerifiedVotePackets::default(); let mut time_since_lock = Instant::now(); @@ -486,7 +486,7 @@ impl ClusterInfoVoteListener { fn check_for_leader_bank_and_send_votes( bank_vote_sender_state_option: &mut Option, current_working_bank: Arc, - verified_packets_sender: &CrossbeamSender>, + verified_packets_sender: &CrossbeamSender>, verified_vote_packets: &VerifiedVotePackets, ) -> Result<()> { // We will take this lock at most once every `BANK_SEND_VOTES_LOOP_SLEEP_MS` @@ -986,9 +986,9 @@ mod tests { use bincode::serialized_size; info!("max vote size {}", serialized_size(&vote_tx).unwrap()); - let msgs = packet::to_packets_chunked(&[vote_tx], 1); // panics if won't fit + let packet_batches = packet::to_packet_batches(&[vote_tx], 1); // panics if won't fit - assert_eq!(msgs.len(), 1); + assert_eq!(packet_batches.len(), 1); } fn run_vote_contains_authorized_voter(hash: Option) { @@ -1819,7 +1819,7 @@ mod tests { fn verify_packets_len(packets: &[VerifiedVoteMetadata], ref_value: usize) { let num_packets: usize = packets .iter() - .map(|vote_metadata| vote_metadata.packet.packets.len()) + .map(|vote_metadata| vote_metadata.packet_batch.packets.len()) .sum(); assert_eq!(num_packets, ref_value); } diff --git a/core/src/fetch_stage.rs b/core/src/fetch_stage.rs index 248d3bf65d6181..9a52e2f6b4dc33 100644 --- a/core/src/fetch_stage.rs +++ b/core/src/fetch_stage.rs @@ -6,10 +6,10 @@ use { result::{Error, Result}, }, solana_metrics::{inc_new_counter_debug, inc_new_counter_info}, - solana_perf::{packet::PacketsRecycler, recycler::Recycler}, + solana_perf::{packet::PacketBatchRecycler, recycler::Recycler}, solana_poh::poh_recorder::PohRecorder, solana_sdk::clock::DEFAULT_TICKS_PER_SLOT, - solana_streamer::streamer::{self, PacketReceiver, PacketSender}, + solana_streamer::streamer::{self, PacketBatchReceiver, PacketBatchSender}, std::{ net::UdpSocket, sync::{ @@ -34,7 +34,7 @@ impl FetchStage { exit: &Arc, poh_recorder: &Arc>, coalesce_ms: u64, - ) -> (Self, PacketReceiver, PacketReceiver) { + ) -> (Self, PacketBatchReceiver, PacketBatchReceiver) { let (sender, receiver) = channel(); let (vote_sender, vote_receiver) = channel(); ( @@ -58,8 +58,8 @@ impl FetchStage { tpu_forwards_sockets: Vec, tpu_vote_sockets: Vec, exit: &Arc, - sender: &PacketSender, - vote_sender: &PacketSender, + sender: &PacketBatchSender, + vote_sender: &PacketBatchSender, poh_recorder: &Arc>, coalesce_ms: u64, ) -> Self { @@ -79,18 +79,18 @@ impl FetchStage { } fn handle_forwarded_packets( - recvr: &PacketReceiver, - sendr: &PacketSender, + recvr: &PacketBatchReceiver, + sendr: &PacketBatchSender, poh_recorder: &Arc>, ) -> Result<()> { - let msgs = recvr.recv()?; - let mut len = msgs.packets.len(); - let mut batch = vec![msgs]; - while let Ok(more) = recvr.try_recv() { - len += more.packets.len(); - batch.push(more); + let packet_batch = recvr.recv()?; + let mut num_packets = packet_batch.packets.len(); + let mut packet_batches = vec![packet_batch]; + while let Ok(packet_batch) = recvr.try_recv() { + num_packets += packet_batch.packets.len(); + packet_batches.push(packet_batch); // Read at most 1K transactions in a loop - if len > 1024 { + if num_packets > 1024 { break; } } @@ -100,15 +100,15 @@ impl FetchStage { .unwrap() .would_be_leader(HOLD_TRANSACTIONS_SLOT_OFFSET.saturating_mul(DEFAULT_TICKS_PER_SLOT)) { - inc_new_counter_debug!("fetch_stage-honor_forwards", len); - for packets in batch { + inc_new_counter_debug!("fetch_stage-honor_forwards", num_packets); + for packet_batch in packet_batches { #[allow(clippy::question_mark)] - if sendr.send(packets).is_err() { + if sendr.send(packet_batch).is_err() { return Err(Error::Send); } } } else { - inc_new_counter_info!("fetch_stage-discard_forwards", len); + inc_new_counter_info!("fetch_stage-discard_forwards", num_packets); } Ok(()) @@ -119,12 +119,12 @@ impl FetchStage { tpu_forwards_sockets: Vec>, tpu_vote_sockets: Vec>, exit: &Arc, - sender: &PacketSender, - vote_sender: &PacketSender, + sender: &PacketBatchSender, + vote_sender: &PacketBatchSender, poh_recorder: &Arc>, coalesce_ms: u64, ) -> Self { - let recycler: PacketsRecycler = Recycler::warmed(1000, 1024); + let recycler: PacketBatchRecycler = Recycler::warmed(1000, 1024); let tpu_threads = sockets.into_iter().map(|socket| { streamer::receiver( diff --git a/core/src/qos_service.rs b/core/src/qos_service.rs index 435ed564411801..de50031ce430c4 100644 --- a/core/src/qos_service.rs +++ b/core/src/qos_service.rs @@ -133,6 +133,10 @@ impl QosService { self.metrics.retried_txs_per_account_limit_count.fetch_add(1, Ordering::Relaxed); Err(TransactionError::WouldExceedMaxAccountCostLimit) } + CostTrackerError::WouldExceedAccountDataMaxLimit => { + self.metrics.retried_txs_per_account_data_limit_count.fetch_add(1, Ordering::Relaxed); + Err(TransactionError::WouldExceedMaxAccountDataCostLimit) + } } } }) @@ -165,6 +169,7 @@ struct QosServiceMetrics { selected_txs_count: AtomicU64, retried_txs_per_block_limit_count: AtomicU64, retried_txs_per_account_limit_count: AtomicU64, + retried_txs_per_account_data_limit_count: AtomicU64, } impl QosServiceMetrics { @@ -204,6 +209,12 @@ impl QosServiceMetrics { .swap(0, Ordering::Relaxed) as i64, i64 ), + ( + "retried_txs_per_account_data_limit_count", + self.retried_txs_per_account_data_limit_count + .swap(0, Ordering::Relaxed) as i64, + i64 + ), ); } } diff --git a/core/src/repair_generic_traversal.rs b/core/src/repair_generic_traversal.rs index 8f35f67498e852..b5d78667828f3a 100644 --- a/core/src/repair_generic_traversal.rs +++ b/core/src/repair_generic_traversal.rs @@ -57,7 +57,7 @@ pub fn get_unknown_last_index( .entry(slot) .or_insert_with(|| blockstore.meta(slot).unwrap()); if let Some(slot_meta) = slot_meta { - if slot_meta.known_last_index().is_none() { + if slot_meta.last_index.is_none() { let shred_index = blockstore.get_index(slot).unwrap(); let num_processed_shreds = if let Some(shred_index) = shred_index { shred_index.data().num_shreds() as u64 @@ -123,7 +123,7 @@ pub fn get_closest_completion( if slot_meta.is_full() { continue; } - if let Some(last_index) = slot_meta.known_last_index() { + if let Some(last_index) = slot_meta.last_index { let shred_index = blockstore.get_index(slot).unwrap(); let dist = if let Some(shred_index) = shred_index { let shred_count = shred_index.data().num_shreds() as u64; diff --git a/core/src/retransmit_stage.rs b/core/src/retransmit_stage.rs index fecd40b322966c..ee91be1cd47128 100644 --- a/core/src/retransmit_stage.rs +++ b/core/src/retransmit_stage.rs @@ -27,7 +27,7 @@ use { shred::{Shred, ShredType}, }, solana_measure::measure::Measure, - solana_perf::packet::Packets, + solana_perf::packet::PacketBatch, solana_rayon_threadlimit::get_thread_count, solana_rpc::{max_slots::MaxSlots, rpc_subscriptions::RpcSubscriptions}, solana_runtime::{bank::Bank, bank_forks::BankForks}, @@ -433,7 +433,7 @@ impl RetransmitStage { cluster_info: Arc, retransmit_sockets: Arc>, repair_socket: Arc, - verified_receiver: Receiver>, + verified_receiver: Receiver>, exit: Arc, cluster_slots_update_receiver: ClusterSlotsUpdateReceiver, epoch_schedule: EpochSchedule, @@ -610,10 +610,10 @@ mod tests { let shred = Shred::new_from_data(0, 0, 0, None, true, true, 0, 0x20, 0); // it should send this over the sockets. retransmit_sender.send(vec![shred]).unwrap(); - let mut packets = Packets::new(vec![]); - solana_streamer::packet::recv_from(&mut packets, &me_retransmit, 1).unwrap(); - assert_eq!(packets.packets.len(), 1); - assert!(!packets.packets[0].meta.repair); + let mut packet_batch = PacketBatch::new(vec![]); + solana_streamer::packet::recv_from(&mut packet_batch, &me_retransmit, 1).unwrap(); + assert_eq!(packet_batch.packets.len(), 1); + assert!(!packet_batch.packets[0].meta.repair); } #[test] diff --git a/core/src/serve_repair.rs b/core/src/serve_repair.rs index 2086661e785818..d668dda68e1fad 100644 --- a/core/src/serve_repair.rs +++ b/core/src/serve_repair.rs @@ -25,11 +25,11 @@ use { }, solana_measure::measure::Measure, solana_metrics::inc_new_counter_debug, - solana_perf::packet::{limited_deserialize, Packets, PacketsRecycler}, + solana_perf::packet::{limited_deserialize, PacketBatch, PacketBatchRecycler}, solana_sdk::{ clock::Slot, hash::Hash, packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::duration_as_ms, }, - solana_streamer::streamer::{PacketReceiver, PacketSender}, + solana_streamer::streamer::{PacketBatchReceiver, PacketBatchSender}, std::{ collections::HashSet, net::SocketAddr, @@ -229,12 +229,12 @@ impl ServeRepair { fn handle_repair( me: &Arc>, - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, from_addr: &SocketAddr, blockstore: Option<&Arc>, request: RepairProtocol, stats: &mut ServeRepairStats, - ) -> Option { + ) -> Option { let now = Instant::now(); let my_id = me.read().unwrap().my_id(); @@ -317,10 +317,10 @@ impl ServeRepair { /// Process messages from the network fn run_listen( obj: &Arc>, - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, blockstore: Option<&Arc>, - requests_receiver: &PacketReceiver, - response_sender: &PacketSender, + requests_receiver: &PacketBatchReceiver, + response_sender: &PacketBatchSender, stats: &mut ServeRepairStats, max_packets: &mut usize, ) -> Result<()> { @@ -392,12 +392,12 @@ impl ServeRepair { pub fn listen( me: Arc>, blockstore: Option>, - requests_receiver: PacketReceiver, - response_sender: PacketSender, + requests_receiver: PacketBatchReceiver, + response_sender: PacketBatchSender, exit: &Arc, ) -> JoinHandle<()> { let exit = exit.clone(); - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); Builder::new() .name("solana-repair-listen".to_string()) .spawn(move || { @@ -432,14 +432,14 @@ impl ServeRepair { fn handle_packets( me: &Arc>, - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, blockstore: Option<&Arc>, - packets: Packets, - response_sender: &PacketSender, + packet_batch: PacketBatch, + response_sender: &PacketBatchSender, stats: &mut ServeRepairStats, ) { // iter over the packets - packets.packets.iter().for_each(|packet| { + packet_batch.packets.iter().for_each(|packet| { let from_addr = packet.meta.addr(); limited_deserialize(&packet.data[..packet.meta.size]) .into_iter() @@ -609,7 +609,7 @@ impl ServeRepair { } fn run_window_request( - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, from: &ContactInfo, from_addr: &SocketAddr, blockstore: Option<&Arc>, @@ -617,7 +617,7 @@ impl ServeRepair { slot: Slot, shred_index: u64, nonce: Nonce, - ) -> Option { + ) -> Option { if let Some(blockstore) = blockstore { // Try to find the requested index in one of the slots let packet = repair_response::repair_response_packet( @@ -630,7 +630,7 @@ impl ServeRepair { if let Some(packet) = packet { inc_new_counter_debug!("serve_repair-window-request-ledger", 1); - return Some(Packets::new_unpinned_with_recycler_data( + return Some(PacketBatch::new_unpinned_with_recycler_data( recycler, "run_window_request", vec![packet], @@ -651,13 +651,13 @@ impl ServeRepair { } fn run_highest_window_request( - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, from_addr: &SocketAddr, blockstore: Option<&Arc>, slot: Slot, highest_index: u64, nonce: Nonce, - ) -> Option { + ) -> Option { let blockstore = blockstore?; // Try to find the requested index in one of the slots let meta = blockstore.meta(slot).ok()??; @@ -670,7 +670,7 @@ impl ServeRepair { from_addr, nonce, )?; - return Some(Packets::new_unpinned_with_recycler_data( + return Some(PacketBatch::new_unpinned_with_recycler_data( recycler, "run_highest_window_request", vec![packet], @@ -680,14 +680,14 @@ impl ServeRepair { } fn run_orphan( - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, from_addr: &SocketAddr, blockstore: Option<&Arc>, mut slot: Slot, max_responses: usize, nonce: Nonce, - ) -> Option { - let mut res = Packets::new_unpinned_with_recycler(recycler.clone(), 64, "run_orphan"); + ) -> Option { + let mut res = PacketBatch::new_unpinned_with_recycler(recycler.clone(), 64, "run_orphan"); if let Some(blockstore) = blockstore { // Try to find the next "n" parent slots of the input slot while let Ok(Some(meta)) = blockstore.meta(slot) { @@ -720,12 +720,12 @@ impl ServeRepair { } fn run_ancestor_hashes( - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, from_addr: &SocketAddr, blockstore: Option<&Arc>, slot: Slot, nonce: Nonce, - ) -> Option { + ) -> Option { let blockstore = blockstore?; let ancestor_slot_hashes = if blockstore.is_duplicate_confirmed(slot) { let ancestor_iterator = @@ -746,7 +746,7 @@ impl ServeRepair { from_addr, nonce, )?; - Some(Packets::new_unpinned_with_recycler_data( + Some(PacketBatch::new_unpinned_with_recycler_data( recycler, "run_ancestor_hashes", vec![packet], @@ -778,7 +778,7 @@ mod tests { /// test run_window_request responds with the right shred, and do not overrun fn run_highest_window_request(slot: Slot, num_slots: u64, nonce: Nonce) { - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); solana_logger::setup(); let ledger_path = get_tmp_ledger_path!(); { @@ -848,7 +848,7 @@ mod tests { /// test window requests respond with the right shred, and do not overrun fn run_window_request(slot: Slot, nonce: Nonce) { - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); solana_logger::setup(); let ledger_path = get_tmp_ledger_path!(); { @@ -1017,7 +1017,7 @@ mod tests { fn run_orphan(slot: Slot, num_slots: u64, nonce: Nonce) { solana_logger::setup(); - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); let ledger_path = get_tmp_ledger_path!(); { let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); @@ -1091,7 +1091,7 @@ mod tests { #[test] fn run_orphan_corrupted_shred_size() { solana_logger::setup(); - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); let ledger_path = get_tmp_ledger_path!(); { let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); @@ -1152,7 +1152,7 @@ mod tests { #[test] fn test_run_ancestor_hashes() { solana_logger::setup(); - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); let ledger_path = get_tmp_ledger_path!(); { let slot = 0; diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index a0d02ba4a14caa..a580561870cdb6 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -6,12 +6,12 @@ use { solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats}, solana_perf::{ cuda_runtime::PinnedVec, - packet::{Packet, PacketsRecycler}, + packet::{Packet, PacketBatchRecycler}, recycler::Recycler, }, solana_runtime::bank_forks::BankForks, solana_sdk::clock::{Slot, DEFAULT_MS_PER_SLOT}, - solana_streamer::streamer::{self, PacketReceiver, PacketSender}, + solana_streamer::streamer::{self, PacketBatchReceiver, PacketBatchSender}, std::{ net::UdpSocket, sync::{atomic::AtomicBool, mpsc::channel, Arc, RwLock}, @@ -63,8 +63,8 @@ impl ShredFetchStage { // updates packets received on a channel and sends them on another channel fn modify_packets( - recvr: PacketReceiver, - sendr: PacketSender, + recvr: PacketBatchReceiver, + sendr: PacketBatchSender, bank_forks: Option>>, name: &'static str, modify: F, @@ -83,7 +83,7 @@ impl ShredFetchStage { let mut stats = ShredFetchStats::default(); let mut packet_hasher = PacketHasher::default(); - while let Some(mut p) = recvr.iter().next() { + while let Some(mut packet_batch) = recvr.iter().next() { if last_updated.elapsed().as_millis() as u64 > DEFAULT_MS_PER_SLOT { last_updated = Instant::now(); packet_hasher.reset(); @@ -97,8 +97,8 @@ impl ShredFetchStage { slots_per_epoch = root_bank.get_slots_in_epoch(root_bank.epoch()); } } - stats.shred_count += p.packets.len(); - p.packets.iter_mut().for_each(|packet| { + stats.shred_count += packet_batch.packets.len(); + packet_batch.packets.iter_mut().for_each(|packet| { Self::process_packet( packet, &mut shreds_received, @@ -124,7 +124,7 @@ impl ShredFetchStage { stats = ShredFetchStats::default(); last_stats = Instant::now(); } - if sendr.send(p).is_err() { + if sendr.send(packet_batch).is_err() { break; } } @@ -133,7 +133,7 @@ impl ShredFetchStage { fn packet_modifier( sockets: Vec>, exit: &Arc, - sender: PacketSender, + sender: PacketBatchSender, recycler: Recycler>, bank_forks: Option>>, name: &'static str, @@ -169,11 +169,11 @@ impl ShredFetchStage { sockets: Vec>, forward_sockets: Vec>, repair_socket: Arc, - sender: &PacketSender, + sender: &PacketBatchSender, bank_forks: Option>>, exit: &Arc, ) -> Self { - let recycler: PacketsRecycler = Recycler::warmed(100, 1024); + let recycler: PacketBatchRecycler = Recycler::warmed(100, 1024); let (mut tvu_threads, tvu_filter) = Self::packet_modifier( sockets, diff --git a/core/src/sigverify.rs b/core/src/sigverify.rs index 8ffa30bb84168a..74dbf5bdfc80ff 100644 --- a/core/src/sigverify.rs +++ b/core/src/sigverify.rs @@ -5,11 +5,11 @@ //! pub use solana_perf::sigverify::{ - batch_size, ed25519_verify_cpu, ed25519_verify_disabled, init, TxOffset, + count_packets_in_batches, ed25519_verify_cpu, ed25519_verify_disabled, init, TxOffset, }; use { crate::sigverify_stage::SigVerifier, - solana_perf::{cuda_runtime::PinnedVec, packet::Packets, recycler::Recycler, sigverify}, + solana_perf::{cuda_runtime::PinnedVec, packet::PacketBatch, recycler::Recycler, sigverify}, }; #[derive(Clone)] @@ -40,13 +40,13 @@ impl Default for TransactionSigVerifier { } impl SigVerifier for TransactionSigVerifier { - fn verify_batch(&self, mut batch: Vec) -> Vec { + fn verify_batches(&self, mut batches: Vec) -> Vec { sigverify::ed25519_verify( - &mut batch, + &mut batches, &self.recycler, &self.recycler_out, self.reject_non_vote, ); - batch + batches } } diff --git a/core/src/sigverify_shreds.rs b/core/src/sigverify_shreds.rs index 08ebae0bb22d8d..85078f510f3c6a 100644 --- a/core/src/sigverify_shreds.rs +++ b/core/src/sigverify_shreds.rs @@ -5,7 +5,7 @@ use { leader_schedule_cache::LeaderScheduleCache, shred::Shred, sigverify_shreds::verify_shreds_gpu, }, - solana_perf::{self, packet::Packets, recycler_cache::RecyclerCache}, + solana_perf::{self, packet::PacketBatch, recycler_cache::RecyclerCache}, solana_runtime::bank_forks::BankForks, std::{ collections::{HashMap, HashSet}, @@ -32,7 +32,7 @@ impl ShredSigVerifier { recycler_cache: RecyclerCache::warmed(), } } - fn read_slots(batches: &[Packets]) -> HashSet { + fn read_slots(batches: &[PacketBatch]) -> HashSet { batches .iter() .flat_map(|batch| batch.packets.iter().filter_map(Shred::get_slot_from_packet)) @@ -41,7 +41,7 @@ impl ShredSigVerifier { } impl SigVerifier for ShredSigVerifier { - fn verify_batch(&self, mut batches: Vec) -> Vec { + fn verify_batches(&self, mut batches: Vec) -> Vec { let r_bank = self.bank_forks.read().unwrap().working_bank(); let slots: HashSet = Self::read_slots(&batches); let mut leader_slots: HashMap = slots @@ -88,13 +88,13 @@ pub mod tests { 0, 0xc0de, ); - let mut batch = [Packets::default(), Packets::default()]; + let mut batches = [PacketBatch::default(), PacketBatch::default()]; let keypair = Keypair::new(); Shredder::sign_shred(&keypair, &mut shred); - batch[0].packets.resize(1, Packet::default()); - batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); - batch[0].packets[0].meta.size = shred.payload.len(); + batches[0].packets.resize(1, Packet::default()); + batches[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); + batches[0].packets[0].meta.size = shred.payload.len(); let mut shred = Shred::new_from_data( 0xc0de_dead, @@ -108,16 +108,16 @@ pub mod tests { 0xc0de, ); Shredder::sign_shred(&keypair, &mut shred); - batch[1].packets.resize(1, Packet::default()); - batch[1].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); - batch[1].packets[0].meta.size = shred.payload.len(); + batches[1].packets.resize(1, Packet::default()); + batches[1].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); + batches[1].packets[0].meta.size = shred.payload.len(); let expected: HashSet = [0xc0de_dead, 0xdead_c0de].iter().cloned().collect(); - assert_eq!(ShredSigVerifier::read_slots(&batch), expected); + assert_eq!(ShredSigVerifier::read_slots(&batches), expected); } #[test] - fn test_sigverify_shreds_verify_batch() { + fn test_sigverify_shreds_verify_batches() { let leader_keypair = Arc::new(Keypair::new()); let leader_pubkey = leader_keypair.pubkey(); let bank = Bank::new_for_tests( @@ -127,8 +127,8 @@ pub mod tests { let bf = Arc::new(RwLock::new(BankForks::new(bank))); let verifier = ShredSigVerifier::new(bf, cache); - let mut batch = vec![Packets::default()]; - batch[0].packets.resize(2, Packet::default()); + let mut batches = vec![PacketBatch::default()]; + batches[0].packets.resize(2, Packet::default()); let mut shred = Shred::new_from_data( 0, @@ -142,8 +142,8 @@ pub mod tests { 0xc0de, ); Shredder::sign_shred(&leader_keypair, &mut shred); - batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); - batch[0].packets[0].meta.size = shred.payload.len(); + batches[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); + batches[0].packets[0].meta.size = shred.payload.len(); let mut shred = Shred::new_from_data( 0, @@ -158,10 +158,10 @@ pub mod tests { ); let wrong_keypair = Keypair::new(); Shredder::sign_shred(&wrong_keypair, &mut shred); - batch[0].packets[1].data[0..shred.payload.len()].copy_from_slice(&shred.payload); - batch[0].packets[1].meta.size = shred.payload.len(); + batches[0].packets[1].data[0..shred.payload.len()].copy_from_slice(&shred.payload); + batches[0].packets[1].meta.size = shred.payload.len(); - let rv = verifier.verify_batch(batch); + let rv = verifier.verify_batches(batches); assert!(!rv[0].packets[0].meta.discard); assert!(rv[0].packets[1].meta.discard); } diff --git a/core/src/sigverify_stage.rs b/core/src/sigverify_stage.rs index 9810c5dc270852..9b63ba2b83c7b4 100644 --- a/core/src/sigverify_stage.rs +++ b/core/src/sigverify_stage.rs @@ -9,9 +9,9 @@ use { crate::sigverify, crossbeam_channel::{SendError, Sender as CrossbeamSender}, solana_measure::measure::Measure, - solana_perf::packet::Packets, + solana_perf::packet::PacketBatch, solana_sdk::timing, - solana_streamer::streamer::{self, PacketReceiver, StreamerError}, + solana_streamer::streamer::{self, PacketBatchReceiver, StreamerError}, std::{ collections::HashMap, sync::mpsc::{Receiver, RecvTimeoutError}, @@ -26,7 +26,7 @@ const MAX_SIGVERIFY_BATCH: usize = 10_000; #[derive(Error, Debug)] pub enum SigVerifyServiceError { #[error("send packets batch error")] - Send(#[from] SendError>), + Send(#[from] SendError>), #[error("streamer error")] Streamer(#[from] StreamerError), @@ -39,7 +39,7 @@ pub struct SigVerifyStage { } pub trait SigVerifier { - fn verify_batch(&self, batch: Vec) -> Vec; + fn verify_batches(&self, batches: Vec) -> Vec; } #[derive(Default, Clone)] @@ -49,7 +49,7 @@ pub struct DisabledSigVerifier {} struct SigVerifierStats { recv_batches_us_hist: histogram::Histogram, // time to call recv_batch verify_batches_pp_us_hist: histogram::Histogram, // per-packet time to call verify_batch - batches_hist: histogram::Histogram, // number of Packets structures per verify call + batches_hist: histogram::Histogram, // number of packet batches per verify call packets_hist: histogram::Histogram, // number of packets per verify call total_batches: usize, total_packets: usize, @@ -122,24 +122,24 @@ impl SigVerifierStats { } impl SigVerifier for DisabledSigVerifier { - fn verify_batch(&self, mut batch: Vec) -> Vec { - sigverify::ed25519_verify_disabled(&mut batch); - batch + fn verify_batches(&self, mut batches: Vec) -> Vec { + sigverify::ed25519_verify_disabled(&mut batches); + batches } } impl SigVerifyStage { #[allow(clippy::new_ret_no_self)] pub fn new( - packet_receiver: Receiver, - verified_sender: CrossbeamSender>, + packet_receiver: Receiver, + verified_sender: CrossbeamSender>, verifier: T, ) -> Self { let thread_hdl = Self::verifier_services(packet_receiver, verified_sender, verifier); Self { thread_hdl } } - pub fn discard_excess_packets(batches: &mut Vec, max_packets: usize) { + pub fn discard_excess_packets(batches: &mut Vec, max_packets: usize) { let mut received_ips = HashMap::new(); for (batch_index, batch) in batches.iter().enumerate() { for (packet_index, packets) in batch.packets.iter().enumerate() { @@ -169,12 +169,12 @@ impl SigVerifyStage { } fn verifier( - recvr: &PacketReceiver, - sendr: &CrossbeamSender>, + recvr: &PacketBatchReceiver, + sendr: &CrossbeamSender>, verifier: &T, stats: &mut SigVerifierStats, ) -> Result<()> { - let (mut batches, num_packets, recv_duration) = streamer::recv_batch(recvr)?; + let (mut batches, num_packets, recv_duration) = streamer::recv_packet_batches(recvr)?; let batches_len = batches.len(); debug!( @@ -187,7 +187,7 @@ impl SigVerifyStage { } let mut verify_batch_time = Measure::start("sigverify_batch_time"); - sendr.send(verifier.verify_batch(batches))?; + sendr.send(verifier.verify_batches(batches))?; verify_batch_time.stop(); debug!( @@ -216,8 +216,8 @@ impl SigVerifyStage { } fn verifier_service( - packet_receiver: PacketReceiver, - verified_sender: CrossbeamSender>, + packet_receiver: PacketBatchReceiver, + verified_sender: CrossbeamSender>, verifier: &T, ) -> JoinHandle<()> { let verifier = verifier.clone(); @@ -252,8 +252,8 @@ impl SigVerifyStage { } fn verifier_services( - packet_receiver: PacketReceiver, - verified_sender: CrossbeamSender>, + packet_receiver: PacketBatchReceiver, + verified_sender: CrossbeamSender>, verifier: T, ) -> JoinHandle<()> { Self::verifier_service(packet_receiver, verified_sender, &verifier) @@ -268,11 +268,12 @@ impl SigVerifyStage { mod tests { use {super::*, solana_perf::packet::Packet}; - fn count_non_discard(packets: &[Packets]) -> usize { - packets + fn count_non_discard(packet_batches: &[PacketBatch]) -> usize { + packet_batches .iter() - .map(|pp| { - pp.packets + .map(|batch| { + batch + .packets .iter() .map(|p| if p.meta.discard { 0 } else { 1 }) .sum::() @@ -283,14 +284,14 @@ mod tests { #[test] fn test_packet_discard() { solana_logger::setup(); - let mut p = Packets::default(); - p.packets.resize(10, Packet::default()); - p.packets[3].meta.addr = [1u16; 8]; - let mut packets = vec![p]; + let mut batch = PacketBatch::default(); + batch.packets.resize(10, Packet::default()); + batch.packets[3].meta.addr = [1u16; 8]; + let mut batches = vec![batch]; let max = 3; - SigVerifyStage::discard_excess_packets(&mut packets, max); - assert_eq!(count_non_discard(&packets), max); - assert!(!packets[0].packets[0].meta.discard); - assert!(!packets[0].packets[3].meta.discard); + SigVerifyStage::discard_excess_packets(&mut batches, max); + assert_eq!(count_non_discard(&batches), max); + assert!(!batches[0].packets[0].meta.discard); + assert!(!batches[0].packets[3].meta.discard); } } diff --git a/core/src/verified_vote_packets.rs b/core/src/verified_vote_packets.rs index a50cf9033e68d3..b11b1c0e7ccbbf 100644 --- a/core/src/verified_vote_packets.rs +++ b/core/src/verified_vote_packets.rs @@ -1,7 +1,7 @@ use { crate::{cluster_info_vote_listener::VerifiedLabelVotePacketsReceiver, result::Result}, crossbeam_channel::Select, - solana_perf::packet::Packets, + solana_perf::packet::PacketBatch, solana_runtime::bank::Bank, solana_sdk::{ account::from_account, clock::Slot, hash::Hash, pubkey::Pubkey, signature::Signature, @@ -20,7 +20,7 @@ const MAX_VOTES_PER_VALIDATOR: usize = 1000; pub struct VerifiedVoteMetadata { pub vote_account_key: Pubkey, pub vote: Box, - pub packet: Packets, + pub packet_batch: PacketBatch, pub signature: Signature, } @@ -70,7 +70,7 @@ impl<'a> ValidatorGossipVotesIterator<'a> { /// /// Iterator is done after iterating through all vote accounts impl<'a> Iterator for ValidatorGossipVotesIterator<'a> { - type Item = Vec; + type Item = Vec; fn next(&mut self) -> Option { // TODO: Maybe prioritize by stake weight @@ -116,7 +116,7 @@ impl<'a> Iterator for ValidatorGossipVotesIterator<'a> { None } }) - .collect::>() + .collect::>() }) }) }); @@ -130,7 +130,7 @@ impl<'a> Iterator for ValidatorGossipVotesIterator<'a> { } } -pub type SingleValidatorVotes = BTreeMap<(Slot, Hash), (Packets, Signature)>; +pub type SingleValidatorVotes = BTreeMap<(Slot, Hash), (PacketBatch, Signature)>; #[derive(Default)] pub struct VerifiedVotePackets(HashMap); @@ -150,7 +150,7 @@ impl VerifiedVotePackets { let VerifiedVoteMetadata { vote_account_key, vote, - packet, + packet_batch, signature, } = verfied_vote_metadata; if vote.is_empty() { @@ -161,7 +161,7 @@ impl VerifiedVotePackets { let hash = vote.hash(); let validator_votes = self.0.entry(vote_account_key).or_default(); - validator_votes.insert((slot, hash), (packet, signature)); + validator_votes.insert((slot, hash), (packet_batch, signature)); if validator_votes.len() > MAX_VOTES_PER_VALIDATOR { let smallest_key = validator_votes.keys().next().cloned().unwrap(); @@ -200,7 +200,7 @@ mod tests { s.send(vec![VerifiedVoteMetadata { vote_account_key, vote: Box::new(vote.clone()), - packet: Packets::default(), + packet_batch: PacketBatch::default(), signature: Signature::new(&[1u8; 64]), }]) .unwrap(); @@ -220,7 +220,7 @@ mod tests { s.send(vec![VerifiedVoteMetadata { vote_account_key, vote: Box::new(vote), - packet: Packets::default(), + packet_batch: PacketBatch::default(), signature: Signature::new(&[1u8; 64]), }]) .unwrap(); @@ -242,7 +242,7 @@ mod tests { s.send(vec![VerifiedVoteMetadata { vote_account_key, vote: Box::new(vote), - packet: Packets::default(), + packet_batch: PacketBatch::default(), signature: Signature::new(&[1u8; 64]), }]) .unwrap(); @@ -265,7 +265,7 @@ mod tests { s.send(vec![VerifiedVoteMetadata { vote_account_key, vote: Box::new(vote), - packet: Packets::default(), + packet_batch: PacketBatch::default(), signature: Signature::new(&[2u8; 64]), }]) .unwrap(); @@ -304,7 +304,7 @@ mod tests { s.send(vec![VerifiedVoteMetadata { vote_account_key, vote: Box::new(vote), - packet: Packets::default(), + packet_batch: PacketBatch::default(), signature: Signature::new(&[1u8; 64]), }]) .unwrap(); @@ -341,7 +341,7 @@ mod tests { s.send(vec![VerifiedVoteMetadata { vote_account_key, vote: Box::new(vote), - packet: Packets::default(), + packet_batch: PacketBatch::default(), signature: Signature::new_unique(), }]) .unwrap(); @@ -395,7 +395,7 @@ mod tests { s.send(vec![VerifiedVoteMetadata { vote_account_key, vote: Box::new(vote), - packet: Packets::new(vec![Packet::default(); num_packets]), + packet_batch: PacketBatch::new(vec![Packet::default(); num_packets]), signature: Signature::new_unique(), }]) .unwrap(); @@ -428,12 +428,12 @@ mod tests { // Get and verify batches let num_expected_batches = 2; for _ in 0..num_expected_batches { - let validator_batch: Vec = gossip_votes_iterator.next().unwrap(); + let validator_batch: Vec = gossip_votes_iterator.next().unwrap(); assert_eq!(validator_batch.len(), slot_hashes.slot_hashes().len()); let expected_len = validator_batch[0].packets.len(); assert!(validator_batch .iter() - .all(|p| p.packets.len() == expected_len)); + .all(|batch| batch.packets.len() == expected_len)); } // Should be empty now @@ -462,7 +462,7 @@ mod tests { s.send(vec![VerifiedVoteMetadata { vote_account_key, vote, - packet: Packets::default(), + packet_batch: PacketBatch::default(), signature: Signature::new_unique(), }]) .unwrap(); diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 919faaab1336c7..978d39c3ed2549 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -22,7 +22,7 @@ use { }, solana_measure::measure::Measure, solana_metrics::{inc_new_counter_debug, inc_new_counter_error}, - solana_perf::packet::{Packet, Packets}, + solana_perf::packet::{Packet, PacketBatch}, solana_rayon_threadlimit::get_thread_count, solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{clock::Slot, packet::PACKET_DATA_SIZE, pubkey::Pubkey}, @@ -164,8 +164,8 @@ fn verify_shred_slot(shred: &Shred, root: u64) -> bool { match shred.shred_type() { // Only data shreds have parent information ShredType::Data => match shred.parent() { - Some(parent) => blockstore::verify_shred_slots(shred.slot(), parent, root), - None => false, + Ok(parent) => blockstore::verify_shred_slots(shred.slot(), parent, root), + Err(_) => false, }, // Filter out outdated coding shreds ShredType::Code => shred.slot() >= root, @@ -353,7 +353,7 @@ fn recv_window( blockstore: &Blockstore, bank_forks: &RwLock, insert_shred_sender: &CrossbeamSender<(Vec, Vec>)>, - verified_receiver: &CrossbeamReceiver>, + verified_receiver: &CrossbeamReceiver>, retransmit_sender: &Sender>, shred_filter: F, thread_pool: &ThreadPool, @@ -458,7 +458,7 @@ impl WindowService { #[allow(clippy::too_many_arguments)] pub(crate) fn new( blockstore: Arc, - verified_receiver: CrossbeamReceiver>, + verified_receiver: CrossbeamReceiver>, retransmit_sender: Sender>, repair_socket: Arc, exit: Arc, @@ -629,7 +629,7 @@ impl WindowService { exit: Arc, blockstore: Arc, insert_sender: CrossbeamSender<(Vec, Vec>)>, - verified_receiver: CrossbeamReceiver>, + verified_receiver: CrossbeamReceiver>, shred_filter: F, bank_forks: Arc>, retransmit_sender: Sender>, diff --git a/core/tests/ledger_cleanup.rs b/core/tests/ledger_cleanup.rs index cf43483d0cbe64..c8e6c909c31f4e 100644 --- a/core/tests/ledger_cleanup.rs +++ b/core/tests/ledger_cleanup.rs @@ -9,7 +9,6 @@ mod tests { solana_ledger::{ blockstore::{make_many_slot_entries, Blockstore}, get_tmp_ledger_path, - shred::Shred, }, solana_measure::measure::Measure, std::{ @@ -48,6 +47,7 @@ mod tests { assert_compaction: bool, compaction_interval: Option, no_compaction: bool, + num_writers: u64, } #[derive(Clone, Copy, Debug)] @@ -167,6 +167,7 @@ mod tests { non_zero => Some(non_zero), }; let no_compaction = read_env("NO_COMPACTION", false); + let num_writers = read_env("NUM_WRITERS", 1); BenchmarkConfig { benchmark_slots, @@ -180,6 +181,7 @@ mod tests { assert_compaction, compaction_interval, no_compaction, + num_writers, } } @@ -221,6 +223,12 @@ mod tests { *storage_previous = storage_now; } + /** + * Example run command: + * BENCHMARK_SLOTS=10000 BATCH_SIZE=10 ENTRIES_PER_SLOT=1000 NUM_WRITERS=1 \ + * PRE_GENERATE_DATA=true cargo test --release tests::test_ledger_cleanup_compaction \ + * -- --exact --nocapture + */ #[test] fn test_ledger_cleanup_compaction() { solana_logger::setup(); @@ -243,6 +251,7 @@ mod tests { let stop_size_iterations = config.stop_size_iterations; let pre_generate_data = config.pre_generate_data; let compaction_interval = config.compaction_interval; + let num_writers = config.num_writers; let batches = benchmark_slots / batch_size; @@ -260,18 +269,20 @@ mod tests { let exit_cpu = Arc::new(AtomicBool::new(false)); let sys = CpuStatsUpdater::new(&exit_cpu); - let mut generated_batches = VecDeque::>::new(); + let mut shreds = VecDeque::new(); if pre_generate_data { let t0 = Instant::now(); eprintln!("PRE_GENERATE_DATA: (this may take a while)"); for i in 0..batches { let start_slot = i * batch_size; - let (shreds, _) = make_many_slot_entries(start_slot, batch_size, entries_per_slot); - generated_batches.push_back(shreds); + let (new_shreds, _) = + make_many_slot_entries(start_slot, batch_size, entries_per_slot); + shreds.push_back(new_shreds); } eprintln!("PRE_GENERATE_DATA: took {} ms", t0.elapsed().as_millis()); - }; + } + let shreds = Arc::new(Mutex::new(shreds)); let time_initial = Instant::now(); let mut time_previous = time_initial; @@ -296,63 +307,70 @@ mod tests { let mut total_slots = 0; let mut time = Instant::now(); let mut start = Measure::start("start"); - let shreds: Arc>>> = Arc::new(Mutex::new(VecDeque::new())); - let shreds1 = shreds.clone(); let insert_exit = Arc::new(AtomicBool::new(false)); - let insert_exit1 = insert_exit.clone(); - let blockstore1 = blockstore.clone(); - let insert_thread = Builder::new() - .name("insert_shreds".to_string()) - .spawn(move || { - let start = Instant::now(); - let mut now = Instant::now(); - let mut total = 0; - let mut total_batches = 0; - let mut total_inserted_shreds = 0; - let mut num_shreds = 0; - let mut max_speed = 0f32; - let mut min_speed = f32::MAX; - loop { - let (new_shreds, len) = { - let mut sl = shreds1.lock().unwrap(); - (sl.pop_front(), sl.len()) - }; - if now.elapsed().as_secs() > 0 { - let shreds_per_second = num_shreds as f32 / now.elapsed().as_secs() as f32; - warn!( - "tried: {} inserted: {} batches: {} len: {} shreds_per_second: {}", - total, total_inserted_shreds, total_batches, len, shreds_per_second, - ); - let average_speed = - total_inserted_shreds as f32 / start.elapsed().as_secs() as f32; - max_speed = max_speed.max(shreds_per_second); - min_speed = min_speed.min(shreds_per_second); - warn!( - "highest: {} lowest: {} avg: {}", - max_speed, min_speed, average_speed - ); - now = Instant::now(); - num_shreds = 0; - } - if let Some(new_shreds) = new_shreds { - total += new_shreds.len(); - total_batches += 1; - let br = blockstore1.insert_shreds(new_shreds, None, false).unwrap(); - total_inserted_shreds += br.1.len(); - num_shreds += br.1.len(); - } else { - thread::sleep(Duration::from_millis(200)); - } - if insert_exit1.load(Ordering::Relaxed) { - info!( - "insert exiting... highest shreds/s: {} lowest shreds/s: {}", - max_speed, min_speed - ); - break; + let mut insert_threads = vec![]; + + for i in 0..num_writers { + let cloned_insert_exit = insert_exit.clone(); + let cloned_blockstore = blockstore.clone(); + let cloned_shreds = shreds.clone(); + let insert_thread = Builder::new() + .name(format!("insert_shreds-{}", i)) + .spawn(move || { + let start = Instant::now(); + let mut now = Instant::now(); + let mut total = 0; + let mut total_batches = 0; + let mut total_inserted_shreds = 0; + let mut num_shreds = 0; + let mut max_speed = 0f32; + let mut min_speed = f32::MAX; + loop { + let (new_shreds, len) = { + let mut sl = cloned_shreds.lock().unwrap(); + (sl.pop_front(), sl.len()) + }; + if now.elapsed().as_secs() > 0 { + let shreds_per_second = num_shreds as f32 / now.elapsed().as_secs() as f32; + warn!( + "T{} tried: {} inserted: {} batches: {} len: {} shreds_per_second: {}", + i, total, total_inserted_shreds, total_batches, len, shreds_per_second, + ); + let average_speed = + total_inserted_shreds as f32 / start.elapsed().as_secs() as f32; + max_speed = max_speed.max(shreds_per_second); + min_speed = min_speed.min(shreds_per_second); + warn!( + "highest: {} lowest: {} avg: {}", + max_speed, min_speed, average_speed + ); + now = Instant::now(); + num_shreds = 0; + } + if let Some(new_shreds) = new_shreds { + total += new_shreds.len(); + total_batches += 1; + let br = cloned_blockstore.insert_shreds( + new_shreds, None, false).unwrap(); + total_inserted_shreds += br.1.len(); + num_shreds += br.1.len(); + } else { + warn!("Thread {} sleeps for 200 millis", i); + thread::sleep(Duration::from_millis(200)); + } + if cloned_insert_exit.load(Ordering::Relaxed) { + info!( + "insert exiting... highest shreds/s: {} lowest shreds/s: {}", + max_speed, min_speed + ); + break; + } } - } - }) - .unwrap(); + }) + .unwrap(); + insert_threads.push(insert_thread); + } + let mut entries_batch = make_many_slot_entries(0, batch_size, entries_per_slot).0; info!( "batch size: {} entries_per_slot: {} shreds_per_slot: {}", @@ -360,7 +378,7 @@ mod tests { entries_per_slot, entries_batch.len() ); - shreds.lock().unwrap().push_back(entries_batch.clone()); + for i in 0..batches { let start_slot = i * batch_size; @@ -377,23 +395,17 @@ mod tests { time = Instant::now(); } - if shreds.lock().unwrap().len() < 50 { + if !pre_generate_data && shreds.lock().unwrap().len() < 50 { let mut make_time = Measure::start("make_entries"); - let new_shreds = if pre_generate_data { - generated_batches.pop_front().unwrap() - } else { - num_slots += batch_size; - total_slots += batch_size; - entries_batch - .iter_mut() - .for_each(|shred| shred.set_slot(shred.slot() + batch_size)); - entries_batch.clone() - }; + num_slots += batch_size; + total_slots += batch_size; + entries_batch + .iter_mut() + .for_each(|shred| shred.set_slot(shred.slot() + batch_size)); + let new_shreds = entries_batch.clone(); shreds.lock().unwrap().push_back(new_shreds); make_time.stop(); total_make += make_time.as_us(); - } else { - thread::sleep(Duration::from_millis(200)); } sender.send(start_slot).unwrap(); @@ -422,7 +434,6 @@ mod tests { } } } - start.stop(); let mut now = Instant::now(); loop { if now.elapsed().as_secs() > 1 { @@ -439,12 +450,16 @@ mod tests { } } insert_exit.store(true, Ordering::Relaxed); - insert_thread.join().unwrap(); - info!( - "done {} {} shreds/s", + while let Some(thread) = insert_threads.pop() { + thread.join().unwrap(); + } + start.stop(); + + eprintln!( + "done {} {} slots/s", start, - (batches * batch_size) as f32 / start.as_s() + benchmark_slots as f32 / start.as_s() ); let u1 = storage_previous; diff --git a/docs/sidebars.js b/docs/sidebars.js index 3eb4c75df9f74d..b8e03e7dbb1082 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -91,6 +91,7 @@ module.exports = { }, "developing/test-validator", "developing/backwards-compatibility", + "developing/plugins/accountsdb_plugin" ], Integrating: ["integrations/exchange"], Validating: [ diff --git a/docs/src/cluster/overview.md b/docs/src/cluster/overview.md index fc73263ac34fa9..22f3acd00a056f 100644 --- a/docs/src/cluster/overview.md +++ b/docs/src/cluster/overview.md @@ -20,18 +20,15 @@ Clients send transactions to any validator's Transaction Processing Unit \(TPU\) ## Confirming Transactions -A Solana cluster is capable of subsecond _confirmation_ for up to 150 nodes with plans to scale up to hundreds of thousands of nodes. Once fully implemented, confirmation times are expected to increase only with the logarithm of the number of validators, where the logarithm's base is very high. If the base is one thousand, for example, it means that for the first thousand nodes, confirmation will be the duration of three network hops plus the time it takes the slowest validator of a supermajority to vote. For the next million nodes, confirmation increases by only one network hop. +A Solana cluster is capable of subsecond _confirmation_ for thousands of nodes with plans to scale up to hundreds of thousands of nodes. Confirmation times are expected to increase only with the logarithm of the number of validators, where the logarithm's base is very high. If the base is one thousand, for example, it means that for the first thousand nodes, confirmation will be the duration of three network hops plus the time it takes the slowest validator of a supermajority to vote. For the next million nodes, confirmation increases by only one network hop. Solana defines confirmation as the duration of time from when the leader timestamps a new entry to the moment when it recognizes a supermajority of ledger votes. -A gossip network is much too slow to achieve subsecond confirmation once the network grows beyond a certain size. The time it takes to send messages to all nodes is proportional to the square of the number of nodes. If a blockchain wants to achieve low confirmation and attempts to do it using a gossip network, it will be forced to centralize to just a handful of nodes. - Scalable confirmation can be achieved using the follow combination of techniques: 1. Timestamp transactions with a VDF sample and sign the timestamp. -2. Split the transactions into batches, send each to separate nodes and have - each node share its batch with its peers. +2. Split the transactions into batches, send each to separate nodes and have each node share its batch with its peers. 3. Repeat the previous step recursively until all nodes have all batches. @@ -39,4 +36,4 @@ Solana rotates leaders at fixed intervals, called _slots_. Each leader may only Next, transactions are broken into batches so that a node can send transactions to multiple parties without making multiple copies. If, for example, the leader needed to send 60 transactions to 6 nodes, it would break that collection of 60 into batches of 10 transactions and send one to each node. This allows the leader to put 60 transactions on the wire, not 60 transactions for each node. Each node then shares its batch with its peers. Once the node has collected all 6 batches, it reconstructs the original set of 60 transactions. -A batch of transactions can only be split so many times before it is so small that header information becomes the primary consumer of network bandwidth. At the time of this writing, the approach is scaling well up to about 150 validators. To scale up to hundreds of thousands of validators, each node can apply the same technique as the leader node to another set of nodes of equal size. We call the technique [_Turbine Block Propogation_](turbine-block-propagation.md). +A batch of transactions can only be split so many times before it is so small that header information becomes the primary consumer of network bandwidth. At the time of this writing (December, 2021), the approach is scaling well up to about 1,250 validators. To scale up to hundreds of thousands of validators, each node can apply the same technique as the leader node to another set of nodes of equal size. We call the technique [_Turbine Block Propogation_](turbine-block-propagation.md). diff --git a/docs/src/developing/plugins/accountsdb_plugin.md b/docs/src/developing/plugins/accountsdb_plugin.md new file mode 100644 index 00000000000000..f99c84fa5e4dd5 --- /dev/null +++ b/docs/src/developing/plugins/accountsdb_plugin.md @@ -0,0 +1,363 @@ +--- +title: AccountsDb Plugins +--- + +Overview +======== + +Validators under heavy RPC loads, such as when serving getProgramAccounts calls, +can fall behind the network. To solve this problem, the validator has been +enhanced to support a plugin mechanism through which the information about +accounts and slots can be transmitted to external data stores such as relational +databases, NoSQL databases or Kafka. RPC services then can be developed to +consume data from these external data stores with the possibility of more +flexible and targeted optimizations such as caching and indexing. This allows +the validator to focus on processing transactions without being slowed down by +busy RPC requests. + +This document describes the interfaces of the plugin and the referential plugin +implementation for the PostgreSQL database. + +[crates.io]: https://crates.io/search?q=solana- +[docs.rs]: https://docs.rs/releases/search?query=solana- + +### Important Crates: + +- [`solana-accountsdb-plugin-interface`] — This crate defines the plugin +interfaces. + +- [`solana-accountsdb-plugin-postgres`] — The crate for the referential +plugin implementation for the PostgreSQL database. + +[`solana-accountsdb-plugin-interface`]: https://docs.rs/solana-accountsdb-plugin-interface +[`solana-accountsdb-plugin-postgres`]: https://docs.rs/solana-accountsdb-plugin-postgres + + +The Plugin Interface +==================== + +The Plugin interface is declared in [`solana-accountsdb-plugin-interface`]. It +is defined by the trait `AccountsDbPlugin`. The plugin should implement the +trait and expose a "C" function `_create_plugin` to return the pointer to this +trait. For example, in the referential implementation, the following code +instantiates the PostgreSQL plugin `AccountsDbPluginPostgres ` and returns its +pointer. + +``` +#[no_mangle] +#[allow(improper_ctypes_definitions)] +/// # Safety +/// +/// This function returns the AccountsDbPluginPostgres pointer as trait AccountsDbPlugin. +pub unsafe extern "C" fn _create_plugin() -> *mut dyn AccountsDbPlugin { + let plugin = AccountsDbPluginPostgres::new(); + let plugin: Box = Box::new(plugin); + Box::into_raw(plugin) +} +``` + +A plugin implementation can implement the `on_load` method to initialize itself. +This function is invoked after a plugin is dynamically loaded into the validator +when it starts. The configuration of the plugin is controlled by a configuration +file in JSON format. The JSON file must have a field `libpath` that points +to the full path name of the shared library implementing the plugin, and may +have other configuration information, like connection parameters for the external +database. The plugin configuration file is specified by the validator's CLI +parameter `--accountsdb-plugin-config` and the file must be readable to the +validator process. + +Please see the [config file](#config) for the referential +PostgreSQL plugin below for an example. + +The plugin can implement the `on_unload` method to do any cleanup before the +plugin is unloaded when the validator is gracefully shutdown. + +The following method is used for notifying on an account update: + +``` + fn update_account( + &mut self, + account: ReplicaAccountInfoVersions, + slot: u64, + is_startup: bool, + ) -> Result<()> +``` + +The `ReplicaAccountInfoVersions` struct contains the metadata and data of the account +streamed. The `slot` points to the slot the account is being updated at. When +`is_startup` is true, it indicates the account is loaded from snapshots when +the validator starts up. When `is_startup` is false, the account is updated +when processing a transaction. + + +The following method is called when all accounts have been notified when the +validator restores the AccountsDb from snapshots at startup. + +``` +fn notify_end_of_startup(&mut self) -> Result<()> +``` + +When `update_account` is called during processing transactions, the plugin +should process the notification as fast as possible because any delay may +cause the validator to fall behind the network. Persistence to external data +store is best to be done asynchronously. + +The following method is used for notifying slot status changes: + +``` + fn update_slot_status( + &mut self, + slot: u64, + parent: Option, + status: SlotStatus, + ) -> Result<()> +``` + +To ensure data consistency, the plugin implementation can choose to abort +the validator in case of error persisting to external stores. When the +validator restarts the account data will be re-transmitted. + +For more details, please refer to the Rust documentation in +[`solana-accountsdb-plugin-interface`]. + +Example PostgreSQL Plugin +========================= + +The [`solana-accountsdb-plugin-postgres`] crate implements a plugin storing +account data to a PostgreSQL database to illustrate how a plugin can be +developed. + + +## Configuration File Format + + +The plugin is configured using the input configuration file. An example +configuration file looks like the following: + + +``` +{ + "libpath": "/solana/target/release/libsolana_accountsdb_plugin_postgres.so", + "host": "postgres-server", + "user": "solana", + "port": 5433, + "threads": 20, + "batch_size": 20, + "panic_on_db_errors": true, + "accounts_selector" : { + "accounts" : ["*"] + } +} +``` + +The `host`, `user`, and `port` control the PostgreSQL configuration +information. For more advanced connection options, please use the +`connection_str` field. Please see [Rust postgres configuration] +(https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html). + +To improve the throughput to the database, the plugin supports connection pooling +using multiple threads, each maintaining a connection to the PostgreSQL database. +The count of the threads is controlled by the `threads` field. A higher thread +count usually offers better performance. + +To further improve performance when saving large numbers of accounts at +startup, the plugin uses bulk inserts. The batch size is controlled by the +`batch_size` parameter. This can help reduce the round trips to the database. + +The `panic_on_db_errors` can be used to panic the validator in case of database +errors to ensure data consistency. + +## Account Selection + +The `accounts_selector` can be used to filter the accounts that should be persisted. + +For example, one can use the following to persist only the accounts with particular +Base58-encoded Pubkeys, + +``` + "accounts_selector" : { + "accounts" : ["pubkey-1", "pubkey-2", ..., "pubkey-n"], + } +``` + +Or use the following to select accounts with certain program owners: + +``` + "accounts_selector" : { + "owners" : ["pubkey-owner-1", "pubkey-owner-2", ..., "pubkey-owner-m"], + } +``` + +To select all accounts, use the wildcard character (*): + +``` + "accounts_selector" : { + "accounts" : ["*"], + } +``` + + +## Database Setup + +### Install PostgreSQL Server + +Please follow [PostgreSQL Ubuntu Installation](https://www.postgresql.org/download/linux/ubuntu/) +on instructions to install the PostgreSQL database server. For example, to +install postgresql-14, + +``` +sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' +wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - +sudo apt-get update +sudo apt-get -y install postgresql-14 +``` +### Control the Database Access + +Modify the pg_hba.conf as necessary to grant the plugin to access the database. +For example, in /etc/postgresql/14/main/pg_hba.conf, the following entry allows +nodes with IPs in the CIDR 10.138.0.0/24 to access all databases. The validator +runs in a node with an ip in the specified range. + +``` +host all all 10.138.0.0/24 trust +``` + +It is recommended to run the database server on a separate node from the validator for +better performance. + +### Configure the Database Performance Parameters + +Please refer to the [PostgreSQL Server Configuration](https://www.postgresql.org/docs/14/runtime-config.html) +for configuration details. The referential implementation uses the following +configurations for better database performance in the /etc/postgresql/14/main/postgresql.conf +which are different from the default postgresql-14 installation. + +``` +max_connections = 200 # (change requires restart) +shared_buffers = 1GB # min 128kB +effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching +wal_level = minimal # minimal, replica, or logical +fsync = off # flush data to disk for crash safety +synchronous_commit = off # synchronization level; +full_page_writes = off # recover from partial page writes +max_wal_senders = 0 # max number of walsender processes +``` + +The sample [postgresql.conf](https://github.com/solana-labs/solana/blob/7ac43b16d2c766df61ae0a06d7aaf14ba61996ac/accountsdb-plugin-postgres/scripts/postgresql.conf) +can be used for reference. + +### Create the Database Instance and the Role + +Start the server: + +``` +sudo systemctl start postgresql@14-main +``` + +Create the database. For example, the following creates a database named 'solana': + +``` +sudo -u postgres createdb solana -p 5433 +``` + +Create the database user. For example, the following creates a regular user named 'solana': + +``` +sudo -u postgres createuser -p 5433 solana +``` + +Verify the database is working using psql. For example, assuming the node running +PostgreSQL has the ip 10.138.0.9, the following command will land in a shell where +SQL commands can be entered: + +``` +psql -U solana -p 5433 -h 10.138.0.9 -w -d solana +``` + +### Create the Schema Objects + +Use the [create_schema.sql](https://github.com/solana-labs/solana/blob/7ac43b16d2c766df61ae0a06d7aaf14ba61996ac/accountsdb-plugin-postgres/scripts/create_schema.sql) +to create the objects for storing accounts and slots. + +Download the script from github: + +``` +wget https://raw.githubusercontent.com/solana-labs/solana/7ac43b16d2c766df61ae0a06d7aaf14ba61996ac/accountsdb-plugin-postgres/scripts/create_schema.sql +``` + +Then run the script: + +``` +psql -U solana -p 5433 -h 10.138.0.9 -w -d solana -f create_schema.sql +``` + +After this, start the validator with the plugin by using the `--accountsdb-plugin-config` +argument mentioned above. + +### Destroy the Schema Objects + +To destroy the database objects, created by `create_schema.sql`, use +[drop_schema.sql](https://github.com/solana-labs/solana/blob/7ac43b16d2c766df61ae0a06d7aaf14ba61996ac/accountsdb-plugin-postgres/scripts/drop_schema.sql). +For example, + +``` +psql -U solana -p 5433 -h 10.138.0.9 -w -d solana -f drop_schema.sql +``` + +## Capture Historical Account Data + +The account historical data is captured using a database trigger as shown in +`create_schema.sql`, + +``` +CREATE FUNCTION audit_account_update() RETURNS trigger AS $audit_account_update$ + BEGIN + INSERT INTO account_audit (pubkey, owner, lamports, slot, executable, rent_epoch, data, write_version, updated_on) + VALUES (OLD.pubkey, OLD.owner, OLD.lamports, OLD.slot, + OLD.executable, OLD.rent_epoch, OLD.data, OLD.write_version, OLD.updated_on); + RETURN NEW; + END; + +$audit_account_update$ LANGUAGE plpgsql; + +CREATE TRIGGER account_update_trigger AFTER UPDATE OR DELETE ON account + FOR EACH ROW EXECUTE PROCEDURE audit_account_update(); +``` + +The historical data is stored in the account_audit table. + +The trigger can be dropped to disable this feature, for example, + + +``` +DROP TRIGGER account_update_trigger ON account; +``` + +Over time, the account_audit can accumulate large amount of data. You may choose to +limit that by deleting older historical data. + + +For example, the following SQL statement can be used to keep up to 1000 of the most +recent records for an account: + +``` +delete from account_audit a2 where (pubkey, write_version) in + (select pubkey, write_version from + (select a.pubkey, a.updated_on, a.slot, a.write_version, a.lamports, + rank() OVER ( partition by pubkey order by write_version desc) as rnk + from account_audit a) ranked + where ranked.rnk > 1000) +``` + +## Performance Considerations + +When a validator lacks sufficient compute power, the overhead of saving the +account data can cause it to fall behind the network especially when all +accounts or a large number of accounts are selected. The node hosting the +PostgreSQL database need to be powerful enough to handle the database loads +as well. It has been found using GCP n2-standard-64 machine type for the +validator and n2-highmem-32 for the PostgreSQL node is adequate for handling +transmiting all accounts while keeping up with the network. In addition, it is +best to keep the validator and the PostgreSQL in the same local network to +reduce latency. You may need to size the validator and database nodes +differently if serving other loads. diff --git a/entry/Cargo.toml b/entry/Cargo.toml index 5fbfb68ed16309..53aa7615882add 100644 --- a/entry/Cargo.toml +++ b/entry/Cargo.toml @@ -15,7 +15,7 @@ dlopen_derive = "0.1.4" log = "0.4.11" rand = "0.7.0" rayon = "1.5.1" -serde = "1.0.130" +serde = "1.0.131" solana-measure = { path = "../measure", version = "=1.10.0" } solana-merkle-tree = { path = "../merkle-tree", version = "=1.10.0" } solana-metrics = { path = "../metrics", version = "=1.10.0" } diff --git a/entry/src/entry.rs b/entry/src/entry.rs index 59a607838caa88..a0d5337b76365a 100644 --- a/entry/src/entry.rs +++ b/entry/src/entry.rs @@ -15,7 +15,7 @@ use { solana_metrics::*, solana_perf::{ cuda_runtime::PinnedVec, - packet::{Packet, Packets, PacketsRecycler, PACKETS_PER_BATCH}, + packet::{Packet, PacketBatch, PacketBatchRecycler, PACKETS_PER_BATCH}, perf_libs, recycler::Recycler, sigverify, @@ -308,7 +308,7 @@ impl<'a> EntrySigVerificationState { pub struct VerifyRecyclers { hash_recycler: Recycler>, tick_count_recycler: Recycler>, - packet_recycler: PacketsRecycler, + packet_recycler: PacketBatchRecycler, out_recycler: Recycler>, tx_offset_recycler: Recycler, } @@ -499,12 +499,12 @@ pub fn start_verify_transactions( }) .flatten() .collect::>(); - let mut packets_vec = entry_txs + let mut packet_batches = entry_txs .par_iter() .chunks(PACKETS_PER_BATCH) .map(|slice| { let vec_size = slice.len(); - let mut packets = Packets::new_with_recycler( + let mut packet_batch = PacketBatch::new_with_recycler( verify_recyclers.packet_recycler.clone(), vec_size, "entry-sig-verify", @@ -515,13 +515,13 @@ pub fn start_verify_transactions( // uninitialized anyway, so the initilization would simply write junk into // the vector anyway. unsafe { - packets.packets.set_len(vec_size); + packet_batch.packets.set_len(vec_size); } let entry_tx_iter = slice .into_par_iter() .map(|tx| tx.to_versioned_transaction()); - let res = packets + let res = packet_batch .packets .par_iter_mut() .zip(entry_tx_iter) @@ -530,7 +530,7 @@ pub fn start_verify_transactions( Packet::populate_packet(pair.0, None, &pair.1).is_ok() }); if res { - Ok(packets) + Ok(packet_batch) } else { Err(TransactionError::SanitizeFailure) } @@ -542,14 +542,14 @@ pub fn start_verify_transactions( let gpu_verify_thread = thread::spawn(move || { let mut verify_time = Measure::start("sigverify"); sigverify::ed25519_verify( - &mut packets_vec, + &mut packet_batches, &tx_offset_recycler, &out_recycler, false, ); - let verified = packets_vec + let verified = packet_batches .iter() - .all(|packets| packets.packets.iter().all(|p| !p.meta.discard)); + .all(|batch| batch.packets.iter().all(|p| !p.meta.discard)); verify_time.stop(); (verified, verify_time.as_us()) }); diff --git a/explorer/package-lock.json b/explorer/package-lock.json index 23b01f469768b8..2344278def91a3 100644 --- a/explorer/package-lock.json +++ b/explorer/package-lock.json @@ -8,7 +8,7 @@ "name": "explorer", "version": "0.1.0", "dependencies": { - "@blockworks-foundation/mango-client": "^3.2.14", + "@blockworks-foundation/mango-client": "^3.2.15", "@bonfida/bot": "^0.5.3", "@bonfida/spl-name-service": "^0.1.22", "@cloudflare/stream-react": "^1.2.0", @@ -16,8 +16,8 @@ "@metaplex/js": "4.9.0", "@project-serum/serum": "^0.13.61", "@react-hook/debounce": "^4.0.0", - "@sentry/react": "^6.15.0", - "@solana/spl-token-registry": "^0.2.662", + "@sentry/react": "^6.16.1", + "@solana/spl-token-registry": "^0.2.801", "@solana/web3.js": "^1.31.0", "@testing-library/jest-dom": "^5.16.1", "@testing-library/react": "^12.1.2", @@ -57,7 +57,7 @@ "react-select": "^4.3.1", "sass": "^1.44.0", "superstruct": "^0.15.3", - "typescript": "^4.5.2" + "typescript": "^4.5.3" } }, "node_modules/@babel/code-frame": { @@ -1579,24 +1579,19 @@ "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==" }, "node_modules/@blockworks-foundation/mango-client": { - "version": "3.2.14", - "resolved": "https://registry.npmjs.org/@blockworks-foundation/mango-client/-/mango-client-3.2.14.tgz", - "integrity": "sha512-EBO39zx3wtX4p/BmkEznVOPvuKI8dm33zxDYRvDVLoVGDSnNMBMpRao3bgHiQpGAUp962wX9ScYmLfiZ7qdoEQ==", + "version": "3.2.15", + "resolved": "https://registry.npmjs.org/@blockworks-foundation/mango-client/-/mango-client-3.2.15.tgz", + "integrity": "sha512-oyTMgQ7t6CjUIfJ26RygLg1eSu4zmMypD9iPWAglzFqJkmrwc0HOrr309ylEeUT1P5PxUxl3E3AaxjnhzLjMRw==", "dependencies": { "@project-serum/anchor": "^0.16.2", "@project-serum/serum": "0.13.55", "@project-serum/sol-wallet-adapter": "^0.2.0", "@solana/spl-token": "^0.1.6", "@solana/web3.js": "1.21.0", - "axios": "^0.21.1", "big.js": "^6.1.1", - "bigint-buffer": "^1.1.5", "bn.js": "^5.2.0", "buffer-layout": "^1.2.1", - "cross-fetch": "^3.1.4", "dotenv": "^10.0.0", - "dotenv-expand": "^5.1.0", - "encoding": "^0.1.13", "yargs": "^17.0.1" }, "engines": { @@ -4448,13 +4443,13 @@ "integrity": "sha512-EYNwp3bU+98cpU4lAWYYL7Zz+2gryWH1qbdDTidVd6hkiR6weksdbMadyXKXNPEkQFhXM+hVO9ZygomHXp+AIw==" }, "node_modules/@sentry/browser": { - "version": "6.15.0", - "resolved": "https://registry.npmjs.org/@sentry/browser/-/browser-6.15.0.tgz", - "integrity": "sha512-ZiqfHK5DMVgDsgMTuSwxilWIqEnZzy4yuJ9Sr6Iap1yZddPSiKHYjbBieSHn57UsWHViRB3ojbwu44LfvXKJdQ==", + "version": "6.16.1", + "resolved": "https://registry.npmjs.org/@sentry/browser/-/browser-6.16.1.tgz", + "integrity": "sha512-F2I5RL7RTLQF9CccMrqt73GRdK3FdqaChED3RulGQX5lH6U3exHGFxwyZxSrY4x6FedfBFYlfXWWCJXpLnFkow==", "dependencies": { - "@sentry/core": "6.15.0", - "@sentry/types": "6.15.0", - "@sentry/utils": "6.15.0", + "@sentry/core": "6.16.1", + "@sentry/types": "6.16.1", + "@sentry/utils": "6.16.1", "tslib": "^1.9.3" }, "engines": { @@ -4462,14 +4457,14 @@ } }, "node_modules/@sentry/core": { - "version": "6.15.0", - "resolved": "https://registry.npmjs.org/@sentry/core/-/core-6.15.0.tgz", - "integrity": "sha512-mCbKyqvD1G3Re6gv6N8tRkBz84gvVWDfLtC6d1WBArIopzter6ktEbvq0cMT6EOvGI2OLXuJ6mtHA93/Q0gGpw==", - "dependencies": { - "@sentry/hub": "6.15.0", - "@sentry/minimal": "6.15.0", - "@sentry/types": "6.15.0", - "@sentry/utils": "6.15.0", + "version": "6.16.1", + "resolved": "https://registry.npmjs.org/@sentry/core/-/core-6.16.1.tgz", + "integrity": "sha512-UFI0264CPUc5cR1zJH+S2UPOANpm6dLJOnsvnIGTjsrwzR0h8Hdl6rC2R/GPq+WNbnipo9hkiIwDlqbqvIU5vw==", + "dependencies": { + "@sentry/hub": "6.16.1", + "@sentry/minimal": "6.16.1", + "@sentry/types": "6.16.1", + "@sentry/utils": "6.16.1", "tslib": "^1.9.3" }, "engines": { @@ -4477,12 +4472,12 @@ } }, "node_modules/@sentry/hub": { - "version": "6.15.0", - "resolved": "https://registry.npmjs.org/@sentry/hub/-/hub-6.15.0.tgz", - "integrity": "sha512-cUbHPeG6kKpGBaEMgbTWeU03Y1Up5T3urGF+cgtrn80PmPYYSUPvVvWlZQWPb8CJZ1yQ0gySWo5RUTatBFrEHA==", + "version": "6.16.1", + "resolved": "https://registry.npmjs.org/@sentry/hub/-/hub-6.16.1.tgz", + "integrity": "sha512-4PGtg6AfpqMkreTpL7ymDeQ/U1uXv03bKUuFdtsSTn/FRf9TLS4JB0KuTZCxfp1IRgAA+iFg6B784dDkT8R9eg==", "dependencies": { - "@sentry/types": "6.15.0", - "@sentry/utils": "6.15.0", + "@sentry/types": "6.16.1", + "@sentry/utils": "6.16.1", "tslib": "^1.9.3" }, "engines": { @@ -4490,12 +4485,12 @@ } }, "node_modules/@sentry/minimal": { - "version": "6.15.0", - "resolved": "https://registry.npmjs.org/@sentry/minimal/-/minimal-6.15.0.tgz", - "integrity": "sha512-7RJIvZsjBa1qFUfMrAzQsWdfZT6Gm4t6ZTYfkpsXPBA35hkzglKbBrhhsUvkxGIhUGw/PiCUqxBUjcmzQP0vfg==", + "version": "6.16.1", + "resolved": "https://registry.npmjs.org/@sentry/minimal/-/minimal-6.16.1.tgz", + "integrity": "sha512-dq+mI1EQIvUM+zJtGCVgH3/B3Sbx4hKlGf2Usovm9KoqWYA+QpfVBholYDe/H2RXgO7LFEefDLvOdHDkqeJoyA==", "dependencies": { - "@sentry/hub": "6.15.0", - "@sentry/types": "6.15.0", + "@sentry/hub": "6.16.1", + "@sentry/types": "6.16.1", "tslib": "^1.9.3" }, "engines": { @@ -4503,14 +4498,14 @@ } }, "node_modules/@sentry/react": { - "version": "6.15.0", - "resolved": "https://registry.npmjs.org/@sentry/react/-/react-6.15.0.tgz", - "integrity": "sha512-vrrFF/KtPJQ41tmDCWpaR+bN+/TqPwqncsGLfbClE2irY3x3eCJjT2qPstlB7NQ6rTYtScyekbB0fOoNkq9FFg==", - "dependencies": { - "@sentry/browser": "6.15.0", - "@sentry/minimal": "6.15.0", - "@sentry/types": "6.15.0", - "@sentry/utils": "6.15.0", + "version": "6.16.1", + "resolved": "https://registry.npmjs.org/@sentry/react/-/react-6.16.1.tgz", + "integrity": "sha512-n8fOEKbym4kBi946q3AWXBNy1UKTmABj/hE2nAJbTWhi5IwdM7WBG6QCT2yq7oTHLuTxQrAwgKQc+A6zFTyVHg==", + "dependencies": { + "@sentry/browser": "6.16.1", + "@sentry/minimal": "6.16.1", + "@sentry/types": "6.16.1", + "@sentry/utils": "6.16.1", "hoist-non-react-statics": "^3.3.2", "tslib": "^1.9.3" }, @@ -4522,19 +4517,19 @@ } }, "node_modules/@sentry/types": { - "version": "6.15.0", - "resolved": "https://registry.npmjs.org/@sentry/types/-/types-6.15.0.tgz", - "integrity": "sha512-zBw5gPUsofXUSpS3ZAXqRNedLRBvirl3sqkj2Lez7X2EkKRgn5D8m9fQIrig/X3TsKcXUpijDW5Buk5zeCVzJA==", + "version": "6.16.1", + "resolved": "https://registry.npmjs.org/@sentry/types/-/types-6.16.1.tgz", + "integrity": "sha512-Wh354g30UsJ5kYJbercektGX4ZMc9MHU++1NjeN2bTMnbofEcpUDWIiKeulZEY65IC1iU+1zRQQgtYO+/hgCUQ==", "engines": { "node": ">=6" } }, "node_modules/@sentry/utils": { - "version": "6.15.0", - "resolved": "https://registry.npmjs.org/@sentry/utils/-/utils-6.15.0.tgz", - "integrity": "sha512-gnhKKyFtnNmKWjDizo7VKD0/Vx8cgW1lCusM6WI7jy2jlO3bQA0+Dzgmr4mIReZ74mq4VpOd2Vfrx7ZldW1DMw==", + "version": "6.16.1", + "resolved": "https://registry.npmjs.org/@sentry/utils/-/utils-6.16.1.tgz", + "integrity": "sha512-7ngq/i4R8JZitJo9Sl8PDnjSbDehOxgr1vsoMmerIsyRZ651C/8B+jVkMhaAPgSdyJ0AlE3O7DKKTP1FXFw9qw==", "dependencies": { - "@sentry/types": "6.15.0", + "@sentry/types": "6.16.1", "tslib": "^1.9.3" }, "engines": { @@ -4616,9 +4611,9 @@ } }, "node_modules/@solana/spl-token-registry": { - "version": "0.2.662", - "resolved": "https://registry.npmjs.org/@solana/spl-token-registry/-/spl-token-registry-0.2.662.tgz", - "integrity": "sha512-0TDs5GcR4judBvND0BTfwcZOtexyUclyR8d71QAi9G75R1JE9ENPD2mGbwBy98FVy+hOEI9m5ZmgGuxu8ORBYA==", + "version": "0.2.801", + "resolved": "https://registry.npmjs.org/@solana/spl-token-registry/-/spl-token-registry-0.2.801.tgz", + "integrity": "sha512-v1Tndn4iDHvp9WSiWypUHnKUcR33qXjgjKjWJ0w0yG9M8zc3/WAaSWsj6YllRhxaBqLP4kMsjqpGyJyZzIAOaA==", "dependencies": { "cross-fetch": "3.0.6" }, @@ -7518,18 +7513,6 @@ "node": "*" } }, - "node_modules/bigint-buffer": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/bigint-buffer/-/bigint-buffer-1.1.5.tgz", - "integrity": "sha512-trfYco6AoZ+rKhKnxA0hgX0HAbVP/s808/EuDSe2JDzUnCp/xAsli35Orvk67UrTEcwuxZqYZDmfA2RXJgxVvA==", - "hasInstallScript": true, - "dependencies": { - "bindings": "^1.3.0" - }, - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/bignumber.js": { "version": "9.0.1", "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.0.1.tgz", @@ -9904,25 +9887,6 @@ "node": ">= 0.8" } }, - "node_modules/encoding": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.13.tgz", - "integrity": "sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==", - "dependencies": { - "iconv-lite": "^0.6.2" - } - }, - "node_modules/encoding/node_modules/iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/end-of-stream": { "version": "1.4.4", "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", @@ -24672,9 +24636,9 @@ "integrity": "sha512-7uc1O8h1M1g0rArakJdf0uLRSSgFcYexrVoKo+bzJd32gd4gDy2L/Z+8/FjPnU9ydY3pEnVPtr9FyscYY60K1g==" }, "node_modules/typescript": { - "version": "4.5.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.5.2.tgz", - "integrity": "sha512-5BlMof9H1yGt0P8/WF+wPNw6GfctgGjXp5hkblpyT+8rkASSmkUKMXrxR0Xg8ThVCi/JnHQiKXeBaEwCeQwMFw==", + "version": "4.5.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.5.3.tgz", + "integrity": "sha512-eVYaEHALSt+s9LbvgEv4Ef+Tdq7hBiIZgii12xXJnukryt3pMgJf6aKhoCZ3FWQsu6sydEnkg11fYXLzhLBjeQ==", "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -28099,24 +28063,19 @@ "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==" }, "@blockworks-foundation/mango-client": { - "version": "3.2.14", - "resolved": "https://registry.npmjs.org/@blockworks-foundation/mango-client/-/mango-client-3.2.14.tgz", - "integrity": "sha512-EBO39zx3wtX4p/BmkEznVOPvuKI8dm33zxDYRvDVLoVGDSnNMBMpRao3bgHiQpGAUp962wX9ScYmLfiZ7qdoEQ==", + "version": "3.2.15", + "resolved": "https://registry.npmjs.org/@blockworks-foundation/mango-client/-/mango-client-3.2.15.tgz", + "integrity": "sha512-oyTMgQ7t6CjUIfJ26RygLg1eSu4zmMypD9iPWAglzFqJkmrwc0HOrr309ylEeUT1P5PxUxl3E3AaxjnhzLjMRw==", "requires": { "@project-serum/anchor": "^0.16.2", "@project-serum/serum": "0.13.55", "@project-serum/sol-wallet-adapter": "^0.2.0", "@solana/spl-token": "^0.1.6", "@solana/web3.js": "1.21.0", - "axios": "^0.21.1", "big.js": "^6.1.1", - "bigint-buffer": "^1.1.5", "bn.js": "^5.2.0", "buffer-layout": "^1.2.1", - "cross-fetch": "^3.1.4", "dotenv": "^10.0.0", - "dotenv-expand": "^5.1.0", - "encoding": "^0.1.13", "yargs": "^17.0.1" }, "dependencies": { @@ -30257,72 +30216,72 @@ } }, "@sentry/browser": { - "version": "6.15.0", - "resolved": "https://registry.npmjs.org/@sentry/browser/-/browser-6.15.0.tgz", - "integrity": "sha512-ZiqfHK5DMVgDsgMTuSwxilWIqEnZzy4yuJ9Sr6Iap1yZddPSiKHYjbBieSHn57UsWHViRB3ojbwu44LfvXKJdQ==", + "version": "6.16.1", + "resolved": "https://registry.npmjs.org/@sentry/browser/-/browser-6.16.1.tgz", + "integrity": "sha512-F2I5RL7RTLQF9CccMrqt73GRdK3FdqaChED3RulGQX5lH6U3exHGFxwyZxSrY4x6FedfBFYlfXWWCJXpLnFkow==", "requires": { - "@sentry/core": "6.15.0", - "@sentry/types": "6.15.0", - "@sentry/utils": "6.15.0", + "@sentry/core": "6.16.1", + "@sentry/types": "6.16.1", + "@sentry/utils": "6.16.1", "tslib": "^1.9.3" } }, "@sentry/core": { - "version": "6.15.0", - "resolved": "https://registry.npmjs.org/@sentry/core/-/core-6.15.0.tgz", - "integrity": "sha512-mCbKyqvD1G3Re6gv6N8tRkBz84gvVWDfLtC6d1WBArIopzter6ktEbvq0cMT6EOvGI2OLXuJ6mtHA93/Q0gGpw==", - "requires": { - "@sentry/hub": "6.15.0", - "@sentry/minimal": "6.15.0", - "@sentry/types": "6.15.0", - "@sentry/utils": "6.15.0", + "version": "6.16.1", + "resolved": "https://registry.npmjs.org/@sentry/core/-/core-6.16.1.tgz", + "integrity": "sha512-UFI0264CPUc5cR1zJH+S2UPOANpm6dLJOnsvnIGTjsrwzR0h8Hdl6rC2R/GPq+WNbnipo9hkiIwDlqbqvIU5vw==", + "requires": { + "@sentry/hub": "6.16.1", + "@sentry/minimal": "6.16.1", + "@sentry/types": "6.16.1", + "@sentry/utils": "6.16.1", "tslib": "^1.9.3" } }, "@sentry/hub": { - "version": "6.15.0", - "resolved": "https://registry.npmjs.org/@sentry/hub/-/hub-6.15.0.tgz", - "integrity": "sha512-cUbHPeG6kKpGBaEMgbTWeU03Y1Up5T3urGF+cgtrn80PmPYYSUPvVvWlZQWPb8CJZ1yQ0gySWo5RUTatBFrEHA==", + "version": "6.16.1", + "resolved": "https://registry.npmjs.org/@sentry/hub/-/hub-6.16.1.tgz", + "integrity": "sha512-4PGtg6AfpqMkreTpL7ymDeQ/U1uXv03bKUuFdtsSTn/FRf9TLS4JB0KuTZCxfp1IRgAA+iFg6B784dDkT8R9eg==", "requires": { - "@sentry/types": "6.15.0", - "@sentry/utils": "6.15.0", + "@sentry/types": "6.16.1", + "@sentry/utils": "6.16.1", "tslib": "^1.9.3" } }, "@sentry/minimal": { - "version": "6.15.0", - "resolved": "https://registry.npmjs.org/@sentry/minimal/-/minimal-6.15.0.tgz", - "integrity": "sha512-7RJIvZsjBa1qFUfMrAzQsWdfZT6Gm4t6ZTYfkpsXPBA35hkzglKbBrhhsUvkxGIhUGw/PiCUqxBUjcmzQP0vfg==", + "version": "6.16.1", + "resolved": "https://registry.npmjs.org/@sentry/minimal/-/minimal-6.16.1.tgz", + "integrity": "sha512-dq+mI1EQIvUM+zJtGCVgH3/B3Sbx4hKlGf2Usovm9KoqWYA+QpfVBholYDe/H2RXgO7LFEefDLvOdHDkqeJoyA==", "requires": { - "@sentry/hub": "6.15.0", - "@sentry/types": "6.15.0", + "@sentry/hub": "6.16.1", + "@sentry/types": "6.16.1", "tslib": "^1.9.3" } }, "@sentry/react": { - "version": "6.15.0", - "resolved": "https://registry.npmjs.org/@sentry/react/-/react-6.15.0.tgz", - "integrity": "sha512-vrrFF/KtPJQ41tmDCWpaR+bN+/TqPwqncsGLfbClE2irY3x3eCJjT2qPstlB7NQ6rTYtScyekbB0fOoNkq9FFg==", - "requires": { - "@sentry/browser": "6.15.0", - "@sentry/minimal": "6.15.0", - "@sentry/types": "6.15.0", - "@sentry/utils": "6.15.0", + "version": "6.16.1", + "resolved": "https://registry.npmjs.org/@sentry/react/-/react-6.16.1.tgz", + "integrity": "sha512-n8fOEKbym4kBi946q3AWXBNy1UKTmABj/hE2nAJbTWhi5IwdM7WBG6QCT2yq7oTHLuTxQrAwgKQc+A6zFTyVHg==", + "requires": { + "@sentry/browser": "6.16.1", + "@sentry/minimal": "6.16.1", + "@sentry/types": "6.16.1", + "@sentry/utils": "6.16.1", "hoist-non-react-statics": "^3.3.2", "tslib": "^1.9.3" } }, "@sentry/types": { - "version": "6.15.0", - "resolved": "https://registry.npmjs.org/@sentry/types/-/types-6.15.0.tgz", - "integrity": "sha512-zBw5gPUsofXUSpS3ZAXqRNedLRBvirl3sqkj2Lez7X2EkKRgn5D8m9fQIrig/X3TsKcXUpijDW5Buk5zeCVzJA==" + "version": "6.16.1", + "resolved": "https://registry.npmjs.org/@sentry/types/-/types-6.16.1.tgz", + "integrity": "sha512-Wh354g30UsJ5kYJbercektGX4ZMc9MHU++1NjeN2bTMnbofEcpUDWIiKeulZEY65IC1iU+1zRQQgtYO+/hgCUQ==" }, "@sentry/utils": { - "version": "6.15.0", - "resolved": "https://registry.npmjs.org/@sentry/utils/-/utils-6.15.0.tgz", - "integrity": "sha512-gnhKKyFtnNmKWjDizo7VKD0/Vx8cgW1lCusM6WI7jy2jlO3bQA0+Dzgmr4mIReZ74mq4VpOd2Vfrx7ZldW1DMw==", + "version": "6.16.1", + "resolved": "https://registry.npmjs.org/@sentry/utils/-/utils-6.16.1.tgz", + "integrity": "sha512-7ngq/i4R8JZitJo9Sl8PDnjSbDehOxgr1vsoMmerIsyRZ651C/8B+jVkMhaAPgSdyJ0AlE3O7DKKTP1FXFw9qw==", "requires": { - "@sentry/types": "6.15.0", + "@sentry/types": "6.16.1", "tslib": "^1.9.3" } }, @@ -30431,9 +30390,9 @@ } }, "@solana/spl-token-registry": { - "version": "0.2.662", - "resolved": "https://registry.npmjs.org/@solana/spl-token-registry/-/spl-token-registry-0.2.662.tgz", - "integrity": "sha512-0TDs5GcR4judBvND0BTfwcZOtexyUclyR8d71QAi9G75R1JE9ENPD2mGbwBy98FVy+hOEI9m5ZmgGuxu8ORBYA==", + "version": "0.2.801", + "resolved": "https://registry.npmjs.org/@solana/spl-token-registry/-/spl-token-registry-0.2.801.tgz", + "integrity": "sha512-v1Tndn4iDHvp9WSiWypUHnKUcR33qXjgjKjWJ0w0yG9M8zc3/WAaSWsj6YllRhxaBqLP4kMsjqpGyJyZzIAOaA==", "requires": { "cross-fetch": "3.0.6" }, @@ -32650,14 +32609,6 @@ "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==" }, - "bigint-buffer": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/bigint-buffer/-/bigint-buffer-1.1.5.tgz", - "integrity": "sha512-trfYco6AoZ+rKhKnxA0hgX0HAbVP/s808/EuDSe2JDzUnCp/xAsli35Orvk67UrTEcwuxZqYZDmfA2RXJgxVvA==", - "requires": { - "bindings": "^1.3.0" - } - }, "bignumber.js": { "version": "9.0.1", "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.0.1.tgz", @@ -34609,24 +34560,6 @@ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=" }, - "encoding": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.13.tgz", - "integrity": "sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==", - "requires": { - "iconv-lite": "^0.6.2" - }, - "dependencies": { - "iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "requires": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - } - } - } - }, "end-of-stream": { "version": "1.4.4", "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", @@ -46088,9 +46021,9 @@ "integrity": "sha512-7uc1O8h1M1g0rArakJdf0uLRSSgFcYexrVoKo+bzJd32gd4gDy2L/Z+8/FjPnU9ydY3pEnVPtr9FyscYY60K1g==" }, "typescript": { - "version": "4.5.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.5.2.tgz", - "integrity": "sha512-5BlMof9H1yGt0P8/WF+wPNw6GfctgGjXp5hkblpyT+8rkASSmkUKMXrxR0Xg8ThVCi/JnHQiKXeBaEwCeQwMFw==" + "version": "4.5.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.5.3.tgz", + "integrity": "sha512-eVYaEHALSt+s9LbvgEv4Ef+Tdq7hBiIZgii12xXJnukryt3pMgJf6aKhoCZ3FWQsu6sydEnkg11fYXLzhLBjeQ==" }, "ua-parser-js": { "version": "0.7.28", diff --git a/explorer/package.json b/explorer/package.json index baaf63ef9c79a4..b58839ae3c9625 100644 --- a/explorer/package.json +++ b/explorer/package.json @@ -3,7 +3,7 @@ "version": "0.1.0", "private": true, "dependencies": { - "@blockworks-foundation/mango-client": "^3.2.14", + "@blockworks-foundation/mango-client": "^3.2.15", "@bonfida/bot": "^0.5.3", "@bonfida/spl-name-service": "^0.1.22", "@cloudflare/stream-react": "^1.2.0", @@ -11,8 +11,8 @@ "@metaplex/js": "4.9.0", "@project-serum/serum": "^0.13.61", "@react-hook/debounce": "^4.0.0", - "@sentry/react": "^6.15.0", - "@solana/spl-token-registry": "^0.2.662", + "@sentry/react": "^6.16.1", + "@solana/spl-token-registry": "^0.2.801", "@solana/web3.js": "^1.31.0", "@testing-library/jest-dom": "^5.16.1", "@testing-library/react": "^12.1.2", @@ -52,7 +52,7 @@ "react-select": "^4.3.1", "sass": "^1.44.0", "superstruct": "^0.15.3", - "typescript": "^4.5.2" + "typescript": "^4.5.3" }, "scripts": { "start": "react-scripts start", diff --git a/faucet/Cargo.toml b/faucet/Cargo.toml index 9b9f5a97a6984d..776f3e86736a23 100644 --- a/faucet/Cargo.toml +++ b/faucet/Cargo.toml @@ -14,7 +14,7 @@ bincode = "1.3.3" byteorder = "1.4.3" clap = "2.33" log = "0.4.14" -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" } solana-cli-config = { path = "../cli-config", version = "=1.10.0" } diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index 5c81e8e2a061a7..ca8329053c8e69 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -13,7 +13,7 @@ edition = "2021" bs58 = "0.4.0" bv = { version = "0.11.1", features = ["serde"] } log = "0.4.14" -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" sha2 = "0.10.0" solana-frozen-abi-macro = { path = "macro", version = "=1.10.0" } diff --git a/genesis/Cargo.toml b/genesis/Cargo.toml index 0ed6c9732233d0..3c4439bca03fbf 100644 --- a/genesis/Cargo.toml +++ b/genesis/Cargo.toml @@ -12,7 +12,7 @@ documentation = "https://docs.rs/solana-genesis" [dependencies] base64 = "0.12.3" clap = "2.33.1" -serde = "1.0.130" +serde = "1.0.131" serde_json = "1.0.72" serde_yaml = "0.8.21" solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" } diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index 62b833c35d7354..29f8eddd47bcb3 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -23,7 +23,7 @@ num-traits = "0.2" rand = "0.7.0" rand_chacha = "0.2.2" rayon = "1.5.1" -serde = "1.0.130" +serde = "1.0.131" serde_bytes = "0.11" serde_derive = "1.0.103" solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" } diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 071d31779f7c4a..9c6fc9dd7fdb24 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -47,8 +47,8 @@ use { solana_perf::{ data_budget::DataBudget, packet::{ - limited_deserialize, to_packets_with_destination, Packet, Packets, PacketsRecycler, - PACKET_DATA_SIZE, + limited_deserialize, to_packet_batch_with_destination, Packet, PacketBatch, + PacketBatchRecycler, PACKET_DATA_SIZE, }, }, solana_rayon_threadlimit::get_thread_count, @@ -67,7 +67,7 @@ use { packet, sendmmsg::{multi_target_send, SendPktsError}, socket::SocketAddrSpace, - streamer::{PacketReceiver, PacketSender}, + streamer::{PacketBatchReceiver, PacketBatchSender}, }, solana_vote_program::{ vote_state::MAX_LOCKOUT_HISTORY, vote_transaction::parse_vote_transaction, @@ -1588,9 +1588,9 @@ impl ClusterInfo { &self, thread_pool: &ThreadPool, gossip_validators: Option<&HashSet>, - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, stakes: &HashMap, - sender: &PacketSender, + sender: &PacketBatchSender, generate_pull_requests: bool, ) -> Result<(), GossipError> { let reqs = self.generate_new_gossip_requests( @@ -1600,11 +1600,11 @@ impl ClusterInfo { generate_pull_requests, ); if !reqs.is_empty() { - let packets = to_packets_with_destination(recycler.clone(), &reqs); + let packet_batch = to_packet_batch_with_destination(recycler.clone(), &reqs); self.stats .packets_sent_gossip_requests_count - .add_relaxed(packets.packets.len() as u64); - sender.send(packets)?; + .add_relaxed(packet_batch.packets.len() as u64); + sender.send(packet_batch)?; } Ok(()) } @@ -1699,7 +1699,7 @@ impl ClusterInfo { pub fn gossip( self: Arc, bank_forks: Option>>, - sender: PacketSender, + sender: PacketBatchSender, gossip_validators: Option>, exit: Arc, ) -> JoinHandle<()> { @@ -1715,7 +1715,7 @@ impl ClusterInfo { let mut last_contact_info_trace = timestamp(); let mut last_contact_info_save = timestamp(); let mut entrypoints_processed = false; - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); let crds_data = vec![ CrdsData::Version(Version::new(self.id())), CrdsData::NodeInstance( @@ -1840,9 +1840,9 @@ impl ClusterInfo { // from address, crds filter, caller contact info requests: Vec<(SocketAddr, CrdsFilter, CrdsValue)>, thread_pool: &ThreadPool, - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, stakes: &HashMap, - response_sender: &PacketSender, + response_sender: &PacketBatchSender, ) { let _st = ScopedTimer::from(&self.stats.handle_batch_pull_requests_time); if requests.is_empty() { @@ -1904,7 +1904,7 @@ impl ClusterInfo { &'a self, now: Instant, mut rng: &'a mut R, - packets: &'a mut Packets, + packet_batch: &'a mut PacketBatch, ) -> impl FnMut(&PullData) -> bool + 'a where R: Rng + CryptoRng, @@ -1917,7 +1917,7 @@ impl ClusterInfo { if let Some(ping) = ping { let ping = Protocol::PingMessage(ping); match Packet::from_data(Some(&node.1), ping) { - Ok(packet) => packets.packets.push(packet), + Ok(packet) => packet_batch.packets.push(packet), Err(err) => error!("failed to write ping packet: {:?}", err), }; } @@ -1944,10 +1944,10 @@ impl ClusterInfo { fn handle_pull_requests( &self, thread_pool: &ThreadPool, - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, requests: Vec, stakes: &HashMap, - ) -> Packets { + ) -> PacketBatch { const DEFAULT_EPOCH_DURATION_MS: u64 = DEFAULT_SLOTS_PER_EPOCH * DEFAULT_MS_PER_SLOT; let mut time = Measure::start("handle_pull_requests"); let callers = crds_value::filter_current(requests.iter().map(|r| &r.caller)); @@ -1958,12 +1958,12 @@ impl ClusterInfo { } let output_size_limit = self.update_data_budget(stakes.len()) / PULL_RESPONSE_MIN_SERIALIZED_SIZE; - let mut packets = - Packets::new_unpinned_with_recycler(recycler.clone(), 64, "handle_pull_requests"); + let mut packet_batch = + PacketBatch::new_unpinned_with_recycler(recycler.clone(), 64, "handle_pull_requests"); let (caller_and_filters, addrs): (Vec<_>, Vec<_>) = { let mut rng = rand::thread_rng(); let check_pull_request = - self.check_pull_request(Instant::now(), &mut rng, &mut packets); + self.check_pull_request(Instant::now(), &mut rng, &mut packet_batch); requests .into_iter() .filter(check_pull_request) @@ -2009,7 +2009,7 @@ impl ClusterInfo { }) .unzip(); if responses.is_empty() { - return packets; + return packet_batch; } let mut rng = rand::thread_rng(); let shuffle = WeightedShuffle::new(&mut rng, &scores).unwrap(); @@ -2023,7 +2023,7 @@ impl ClusterInfo { Ok(packet) => { if self.outbound_budget.take(packet.meta.size) { total_bytes += packet.meta.size; - packets.packets.push(packet); + packet_batch.packets.push(packet); sent += 1; } else { inc_new_counter_info!("gossip_pull_request-no_budget", 1); @@ -2043,7 +2043,7 @@ impl ClusterInfo { responses.len(), total_bytes ); - packets + packet_batch } fn handle_batch_pull_responses( @@ -2164,8 +2164,8 @@ impl ClusterInfo { fn handle_batch_ping_messages( &self, pings: I, - recycler: &PacketsRecycler, - response_sender: &PacketSender, + recycler: &PacketBatchRecycler, + response_sender: &PacketBatchSender, ) where I: IntoIterator, { @@ -2175,7 +2175,11 @@ impl ClusterInfo { } } - fn handle_ping_messages(&self, pings: I, recycler: &PacketsRecycler) -> Option + fn handle_ping_messages( + &self, + pings: I, + recycler: &PacketBatchRecycler, + ) -> Option where I: IntoIterator, { @@ -2197,9 +2201,12 @@ impl ClusterInfo { if packets.is_empty() { None } else { - let packets = - Packets::new_unpinned_with_recycler_data(recycler, "handle_ping_messages", packets); - Some(packets) + let packet_batch = PacketBatch::new_unpinned_with_recycler_data( + recycler, + "handle_ping_messages", + packets, + ); + Some(packet_batch) } } @@ -2222,9 +2229,9 @@ impl ClusterInfo { &self, messages: Vec<(Pubkey, Vec)>, thread_pool: &ThreadPool, - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, stakes: &HashMap, - response_sender: &PacketSender, + response_sender: &PacketBatchSender, ) { let _st = ScopedTimer::from(&self.stats.handle_batch_push_messages_time); if messages.is_empty() { @@ -2301,17 +2308,17 @@ impl ClusterInfo { if prune_messages.is_empty() { return; } - let mut packets = to_packets_with_destination(recycler.clone(), &prune_messages); - let num_prune_packets = packets.packets.len(); + let mut packet_batch = to_packet_batch_with_destination(recycler.clone(), &prune_messages); + let num_prune_packets = packet_batch.packets.len(); self.stats .push_response_count - .add_relaxed(packets.packets.len() as u64); + .add_relaxed(packet_batch.packets.len() as u64); let new_push_requests = self.new_push_requests(stakes); inc_new_counter_debug!("cluster_info-push_message-pushes", new_push_requests.len()); for (address, request) in new_push_requests { if ContactInfo::is_valid_address(&address, &self.socket_addr_space) { match Packet::from_data(Some(&address), &request) { - Ok(packet) => packets.packets.push(packet), + Ok(packet) => packet_batch.packets.push(packet), Err(err) => error!("failed to write push-request packet: {:?}", err), } } else { @@ -2323,8 +2330,8 @@ impl ClusterInfo { .add_relaxed(num_prune_packets as u64); self.stats .packets_sent_push_messages_count - .add_relaxed((packets.packets.len() - num_prune_packets) as u64); - let _ = response_sender.send(packets); + .add_relaxed((packet_batch.packets.len() - num_prune_packets) as u64); + let _ = response_sender.send(packet_batch); } fn require_stake_for_gossip(&self, stakes: &HashMap) -> bool { @@ -2342,8 +2349,8 @@ impl ClusterInfo { &self, packets: VecDeque<(/*from:*/ SocketAddr, Protocol)>, thread_pool: &ThreadPool, - recycler: &PacketsRecycler, - response_sender: &PacketSender, + recycler: &PacketBatchRecycler, + response_sender: &PacketBatchSender, stakes: &HashMap, _feature_set: Option<&FeatureSet>, epoch_duration: Duration, @@ -2460,15 +2467,15 @@ impl ClusterInfo { // handling of requests/messages. fn run_socket_consume( &self, - receiver: &PacketReceiver, + receiver: &PacketBatchReceiver, sender: &Sender>, thread_pool: &ThreadPool, ) -> Result<(), GossipError> { const RECV_TIMEOUT: Duration = Duration::from_secs(1); let packets: Vec<_> = receiver.recv_timeout(RECV_TIMEOUT)?.packets.into(); let mut packets = VecDeque::from(packets); - for payload in receiver.try_iter() { - packets.extend(payload.packets.iter().cloned()); + for packet_batch in receiver.try_iter() { + packets.extend(packet_batch.packets.iter().cloned()); let excess_count = packets.len().saturating_sub(MAX_GOSSIP_TRAFFIC); if excess_count > 0 { packets.drain(0..excess_count); @@ -2500,10 +2507,10 @@ impl ClusterInfo { /// Process messages from the network fn run_listen( &self, - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, bank_forks: Option<&RwLock>, receiver: &Receiver>, - response_sender: &PacketSender, + response_sender: &PacketBatchSender, thread_pool: &ThreadPool, last_print: &mut Instant, should_check_duplicate_instance: bool, @@ -2551,7 +2558,7 @@ impl ClusterInfo { pub(crate) fn start_socket_consume_thread( self: Arc, - receiver: PacketReceiver, + receiver: PacketBatchReceiver, sender: Sender>, exit: Arc, ) -> JoinHandle<()> { @@ -2581,12 +2588,12 @@ impl ClusterInfo { self: Arc, bank_forks: Option>>, requests_receiver: Receiver>, - response_sender: PacketSender, + response_sender: PacketBatchSender, should_check_duplicate_instance: bool, exit: Arc, ) -> JoinHandle<()> { let mut last_print = Instant::now(); - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); let thread_pool = ThreadPoolBuilder::new() .num_threads(get_thread_count().min(8)) .thread_name(|i| format!("sol-gossip-work-{}", i)) @@ -2955,9 +2962,9 @@ pub fn push_messages_to_peer( let reqs: Vec<_> = ClusterInfo::split_gossip_messages(PUSH_MESSAGE_MAX_PAYLOAD_SIZE, messages) .map(move |payload| (peer_gossip, Protocol::PushMessage(self_id, payload))) .collect(); - let packets = to_packets_with_destination(PacketsRecycler::default(), &reqs); + let packet_batch = to_packet_batch_with_destination(PacketBatchRecycler::default(), &reqs); let sock = UdpSocket::bind("0.0.0.0:0").unwrap(); - packet::send_to(&packets, &sock, socket_addr_space)?; + packet::send_to(&packet_batch, &sock, socket_addr_space)?; Ok(()) } @@ -3206,7 +3213,7 @@ mod tests { .iter() .map(|ping| Pong::new(ping, &this_node).unwrap()) .collect(); - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); let packets = cluster_info .handle_ping_messages( remote_nodes diff --git a/install/Cargo.toml b/install/Cargo.toml index 71d78fb373df8b..3a7944b8528df5 100644 --- a/install/Cargo.toml +++ b/install/Cargo.toml @@ -22,7 +22,7 @@ indicatif = "0.16.2" lazy_static = "1.4.0" nix = "0.23.0" reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] } -serde = { version = "1.0.130", features = ["derive"] } +serde = { version = "1.0.131", features = ["derive"] } serde_yaml = "0.8.21" solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" } solana-client = { path = "../client", version = "=1.10.0" } diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index 81480056ecdb60..e350862cdc3ab4 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -14,7 +14,7 @@ bs58 = "0.4.0" bytecount = "0.6.2" clap = "2.33.1" csv = "1.1.6" -dashmap = "4.0.2" +dashmap = "5.0.0" histogram = "*" itertools = "0.10.3" log = { version = "0.4.14" } @@ -45,7 +45,7 @@ jemallocator = {package = "tikv-jemallocator", version = "0.4.1", features = ["u assert_cmd = "2.0" [target."cfg(unix)".dependencies] -signal-hook = "0.3.10" +signal-hook = "0.3.12" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 21d6d069160301..9b86f93ce922ec 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -28,7 +28,7 @@ prost = "0.9.0" rand = "0.7.0" rand_chacha = "0.2.2" rayon = "1.5.1" -serde = "1.0.130" +serde = "1.0.131" serde_bytes = "0.11.5" sha2 = "0.10.0" solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.10.0" } diff --git a/ledger/benches/sigverify_shreds.rs b/ledger/benches/sigverify_shreds.rs index 4a3de44fffe70e..16bfd7200ef5ec 100644 --- a/ledger/benches/sigverify_shreds.rs +++ b/ledger/benches/sigverify_shreds.rs @@ -7,7 +7,7 @@ use { sigverify_shreds::{sign_shreds_cpu, sign_shreds_gpu, sign_shreds_gpu_pinned_keypair}, }, solana_perf::{ - packet::{Packet, Packets}, + packet::{Packet, PacketBatch}, recycler_cache::RecyclerCache, }, solana_sdk::signature::Keypair, @@ -21,13 +21,13 @@ const NUM_BATCHES: usize = 1; fn bench_sigverify_shreds_sign_gpu(bencher: &mut Bencher) { let recycler_cache = RecyclerCache::default(); - let mut packets = Packets::default(); - packets.packets.set_pinnable(); + let mut packet_batch = PacketBatch::default(); + packet_batch.packets.set_pinnable(); let slot = 0xdead_c0de; // need to pin explicitly since the resize will not cause re-allocation - packets.packets.reserve_and_pin(NUM_PACKETS); - packets.packets.resize(NUM_PACKETS, Packet::default()); - for p in packets.packets.iter_mut() { + packet_batch.packets.reserve_and_pin(NUM_PACKETS); + packet_batch.packets.resize(NUM_PACKETS, Packet::default()); + for p in packet_batch.packets.iter_mut() { let shred = Shred::new_from_data( slot, 0xc0de, @@ -41,25 +41,25 @@ fn bench_sigverify_shreds_sign_gpu(bencher: &mut Bencher) { ); shred.copy_to_packet(p); } - let mut batch = vec![packets; NUM_BATCHES]; + let mut batches = vec![packet_batch; NUM_BATCHES]; let keypair = Keypair::new(); let pinned_keypair = sign_shreds_gpu_pinned_keypair(&keypair, &recycler_cache); let pinned_keypair = Some(Arc::new(pinned_keypair)); //warmup for _ in 0..100 { - sign_shreds_gpu(&keypair, &pinned_keypair, &mut batch, &recycler_cache); + sign_shreds_gpu(&keypair, &pinned_keypair, &mut batches, &recycler_cache); } bencher.iter(|| { - sign_shreds_gpu(&keypair, &pinned_keypair, &mut batch, &recycler_cache); + sign_shreds_gpu(&keypair, &pinned_keypair, &mut batches, &recycler_cache); }) } #[bench] fn bench_sigverify_shreds_sign_cpu(bencher: &mut Bencher) { - let mut packets = Packets::default(); + let mut packet_batch = PacketBatch::default(); let slot = 0xdead_c0de; - packets.packets.resize(NUM_PACKETS, Packet::default()); - for p in packets.packets.iter_mut() { + packet_batch.packets.resize(NUM_PACKETS, Packet::default()); + for p in packet_batch.packets.iter_mut() { let shred = Shred::new_from_data( slot, 0xc0de, @@ -73,9 +73,9 @@ fn bench_sigverify_shreds_sign_cpu(bencher: &mut Bencher) { ); shred.copy_to_packet(p); } - let mut batch = vec![packets; NUM_BATCHES]; + let mut batches = vec![packet_batch; NUM_BATCHES]; let keypair = Keypair::new(); bencher.iter(|| { - sign_shreds_cpu(&keypair, &mut batch); + sign_shreds_cpu(&keypair, &mut batches); }) } diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 8495024d2fdb38..e89e02091b401a 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -10,13 +10,9 @@ use { IteratorMode, LedgerColumn, Result, WriteBatch, }, blockstore_meta::*, - erasure::ErasureConfig, leader_schedule_cache::LeaderScheduleCache, next_slots_iterator::NextSlotsIterator, - shred::{ - Result as ShredResult, Shred, ShredType, Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK, - SHRED_PAYLOAD_SIZE, - }, + shred::{Result as ShredResult, Shred, ShredType, Shredder, SHRED_PAYLOAD_SIZE}, }, bincode::deserialize, log::*, @@ -1059,21 +1055,16 @@ impl Blockstore { } } - let set_index = u64::from(shred.common_header.fec_set_index); - let erasure_config = ErasureConfig::new( - shred.coding_header.num_data_shreds as usize, - shred.coding_header.num_coding_shreds as usize, - ); - + let set_index = u64::from(shred.fec_set_index()); let erasure_meta = erasure_metas.entry((slot, set_index)).or_insert_with(|| { self.erasure_meta(slot, set_index) .expect("Expect database get to succeed") - .unwrap_or_else(|| ErasureMeta::new(set_index, erasure_config)) + .unwrap_or_else(|| ErasureMeta::from_coding_shred(&shred).unwrap()) }); // TODO: handle_duplicate is not invoked and so duplicate shreds are // not gossiped to the rest of cluster. - if erasure_config != erasure_meta.config() { + if !erasure_meta.check_coding_shred(&shred) { metrics.num_coding_shreds_invalid_erasure_config += 1; let conflicting_shred = self.find_conflicting_coding_shred( &shred, @@ -1096,7 +1087,7 @@ impl Blockstore { warn!("Received multiple erasure configs for the same erasure set!!!"); warn!( "Slot: {}, shred index: {}, set_index: {}, is_duplicate: {}, stored config: {:#?}, new config: {:#?}", - slot, shred.index(), set_index, self.has_duplicate_shreds_in_slot(slot), erasure_meta.config(), erasure_config + slot, shred.index(), set_index, self.has_duplicate_shreds_in_slot(slot), erasure_meta.config(), shred.coding_header, ); return false; @@ -1187,7 +1178,9 @@ impl Blockstore { &self.db, slot_meta_working_set, slot, - shred.parent().ok_or(InsertDataShredError::InvalidShred)?, + shred + .parent() + .map_err(|_| InsertDataShredError::InvalidShred)?, ); let slot_meta = &mut slot_meta_entry.new_slot_meta.borrow_mut(); @@ -1224,7 +1217,7 @@ impl Blockstore { } } - let set_index = u64::from(shred.common_header.fec_set_index); + let set_index = u64::from(shred.fec_set_index()); let newly_completed_data_sets = self.insert_data_shred( slot_meta, index_meta.data_mut(), @@ -1244,16 +1237,7 @@ impl Blockstore { } fn should_insert_coding_shred(shred: &Shred, last_root: &RwLock) -> bool { - let shred_index = shred.index(); - let fec_set_index = shred.common_header.fec_set_index; - let num_coding_shreds = shred.coding_header.num_coding_shreds as u32; - shred.is_code() - && shred_index >= fec_set_index - && shred_index - fec_set_index < num_coding_shreds - && num_coding_shreds != 0 - && num_coding_shreds <= 8 * MAX_DATA_SHREDS_PER_FEC_BLOCK - && num_coding_shreds - 1 <= u32::MAX - fec_set_index - && shred.slot() > *last_root.read().unwrap() + shred.is_code() && shred.sanitize() && shred.slot() > *last_root.read().unwrap() } fn insert_coding_shred( @@ -1267,7 +1251,7 @@ impl Blockstore { // Assert guaranteed by integrity checks on the shred that happen before // `insert_coding_shred` is called - assert!(shred.is_code() && shred_index >= shred.common_header.fec_set_index as u64); + assert!(shred.is_code() && shred.sanitize()); // Commit step: commit all changes to the mutable structures at once, or none at all. // We don't want only a subset of these changes going through. @@ -1354,14 +1338,14 @@ impl Blockstore { // Check that we do not receive shred_index >= than the last_index // for the slot let last_index = slot_meta.last_index; - if shred_index >= last_index { + if last_index.map(|ix| shred_index >= ix).unwrap_or_default() { let leader_pubkey = leader_schedule .and_then(|leader_schedule| leader_schedule.slot_leader_at(slot, None)); let ending_shred: Cow> = self.get_data_shred_from_just_inserted_or_db( just_inserted_data_shreds, slot, - last_index, + last_index.unwrap(), ); if self @@ -1380,7 +1364,7 @@ impl Blockstore { ( "error", format!( - "Leader {:?}, slot {}: received index {} >= slot.last_index {}, shred_source: {:?}", + "Leader {:?}, slot {}: received index {} >= slot.last_index {:?}, shred_source: {:?}", leader_pubkey, slot, shred_index, last_index, shred_source ), String @@ -1525,7 +1509,14 @@ impl Blockstore { i64 ), ("slot", slot_meta.slot, i64), - ("last_index", slot_meta.last_index, i64), + ( + "last_index", + slot_meta + .last_index + .and_then(|ix| i64::try_from(ix).ok()) + .unwrap_or(-1), + i64 + ), ("num_repaired", num_repaired, i64), ("num_recovered", num_recovered, i64), ); @@ -1557,7 +1548,8 @@ impl Blockstore { .collect() } - pub fn get_data_shreds( + #[cfg(test)] + fn get_data_shreds( &self, slot: Slot, from_index: u64, @@ -3186,20 +3178,11 @@ fn update_slot_meta( slot_meta.first_shred_timestamp = timestamp() - slot_time_elapsed; } slot_meta.consumed = new_consumed; - slot_meta.last_index = { - // If the last index in the slot hasn't been set before, then - // set it to this shred index - if slot_meta.last_index == std::u64::MAX { - if is_last_in_slot { - u64::from(index) - } else { - std::u64::MAX - } - } else { - slot_meta.last_index - } - }; - + // If the last index in the slot hasn't been set before, then + // set it to this shred index + if is_last_in_slot && slot_meta.last_index.is_none() { + slot_meta.last_index = Some(u64::from(index)); + } update_completed_data_indexes( is_last_in_slot || is_last_in_data, index, @@ -4037,7 +4020,7 @@ pub mod tests { let num_shreds = shreds_per_slot[i as usize]; assert_eq!(meta.consumed, num_shreds); assert_eq!(meta.received, num_shreds); - assert_eq!(meta.last_index, num_shreds - 1); + assert_eq!(meta.last_index, Some(num_shreds - 1)); if i == num_slots - 1 { assert!(meta.next_slots.is_empty()); } else { @@ -4262,7 +4245,7 @@ pub mod tests { assert_eq!(meta.consumed, num_shreds); assert_eq!(meta.received, num_shreds); assert_eq!(meta.parent_slot, 0); - assert_eq!(meta.last_index, num_shreds - 1); + assert_eq!(meta.last_index, Some(num_shreds - 1)); assert!(meta.next_slots.is_empty()); assert!(meta.is_connected); } @@ -4287,7 +4270,7 @@ pub mod tests { .meta(0) .unwrap() .expect("Expected metadata object to exist"); - assert_eq!(meta.last_index, num_shreds - 1); + assert_eq!(meta.last_index, Some(num_shreds - 1)); if i != 0 { assert_eq!(result.len(), 0); assert!(meta.consumed == 0 && meta.received == num_shreds as u64); @@ -4465,9 +4448,9 @@ pub mod tests { } assert_eq!(meta.consumed, 0); if num_shreds % 2 == 0 { - assert_eq!(meta.last_index, num_shreds - 1); + assert_eq!(meta.last_index, Some(num_shreds - 1)); } else { - assert_eq!(meta.last_index, std::u64::MAX); + assert_eq!(meta.last_index, None); } blockstore.insert_shreds(even_shreds, None, false).unwrap(); @@ -4481,7 +4464,7 @@ pub mod tests { assert_eq!(meta.received, num_shreds); assert_eq!(meta.consumed, num_shreds); assert_eq!(meta.parent_slot, parent_slot); - assert_eq!(meta.last_index, num_shreds - 1); + assert_eq!(meta.last_index, Some(num_shreds - 1)); } } @@ -4735,7 +4718,7 @@ pub mod tests { // Slot 1 is not trunk because slot 0 hasn't been inserted yet assert!(!s1.is_connected); assert_eq!(s1.parent_slot, 0); - assert_eq!(s1.last_index, shreds_per_slot as u64 - 1); + assert_eq!(s1.last_index, Some(shreds_per_slot as u64 - 1)); // 2) Write to the second slot let shreds2 = shreds @@ -4747,7 +4730,7 @@ pub mod tests { // Slot 2 is not trunk because slot 0 hasn't been inserted yet assert!(!s2.is_connected); assert_eq!(s2.parent_slot, 1); - assert_eq!(s2.last_index, shreds_per_slot as u64 - 1); + assert_eq!(s2.last_index, Some(shreds_per_slot as u64 - 1)); // Check the first slot again, it should chain to the second slot, // but still isn't part of the trunk @@ -4755,7 +4738,7 @@ pub mod tests { assert_eq!(s1.next_slots, vec![2]); assert!(!s1.is_connected); assert_eq!(s1.parent_slot, 0); - assert_eq!(s1.last_index, shreds_per_slot as u64 - 1); + assert_eq!(s1.last_index, Some(shreds_per_slot as u64 - 1)); // 3) Write to the zeroth slot, check that every slot // is now part of the trunk @@ -4771,7 +4754,7 @@ pub mod tests { } else { assert_eq!(s.parent_slot, i - 1); } - assert_eq!(s.last_index, shreds_per_slot as u64 - 1); + assert_eq!(s.last_index, Some(shreds_per_slot as u64 - 1)); assert!(s.is_connected); } } @@ -4852,7 +4835,7 @@ pub mod tests { } else { assert_eq!(s.parent_slot, i - 1); } - assert_eq!(s.last_index, shreds_per_slot as u64 - 1); + assert_eq!(s.last_index, Some(shreds_per_slot as u64 - 1)); assert!(s.is_connected); } } @@ -4901,7 +4884,7 @@ pub mod tests { assert_eq!(s.parent_slot, i - 1); } - assert_eq!(s.last_index, shreds_per_slot as u64 - 1); + assert_eq!(s.last_index, Some(shreds_per_slot as u64 - 1)); // Other than slot 0, no slots should be part of the trunk if i != 0 { @@ -4937,7 +4920,7 @@ pub mod tests { assert_eq!(s.parent_slot, i - 1); } - assert_eq!(s.last_index, shreds_per_slot as u64 - 1); + assert_eq!(s.last_index, Some(shreds_per_slot as u64 - 1)); } } } @@ -5191,7 +5174,7 @@ pub mod tests { let meta = blockstore.meta(i).unwrap().unwrap(); assert_eq!(meta.received, 1); - assert_eq!(meta.last_index, 0); + assert_eq!(meta.last_index, Some(0)); if i != 0 { assert_eq!(meta.parent_slot, i - 1); assert_eq!(meta.consumed, 1); @@ -5448,7 +5431,7 @@ pub mod tests { true, // is_last_in_slot 0, // reference_tick shred5.common_header.version, - shred5.common_header.fec_set_index, + shred5.fec_set_index(), ); assert!(blockstore.should_insert_data_shred( &empty_shred, @@ -5500,7 +5483,7 @@ pub mod tests { // Trying to insert a shred with index > the "is_last" shred should fail if shred8.is_data() { - shred8.set_slot(slot_meta.last_index + 1); + shred8.set_slot(slot_meta.last_index.unwrap() + 1); } else { panic!("Shred in unexpected format") } @@ -5664,7 +5647,7 @@ pub mod tests { DataShredHeader::default(), coding.clone(), ); - let index = coding_shred.index() - coding_shred.common_header.fec_set_index - 1; + let index = coding_shred.index() - coding_shred.fec_set_index() - 1; coding_shred.set_index(index as u32); assert!(!Blockstore::should_insert_coding_shred( @@ -5694,8 +5677,7 @@ pub mod tests { DataShredHeader::default(), coding.clone(), ); - let num_coding_shreds = - coding_shred.common_header.index - coding_shred.common_header.fec_set_index; + let num_coding_shreds = coding_shred.index() - coding_shred.fec_set_index(); coding_shred.coding_header.num_coding_shreds = num_coding_shreds as u16; assert!(!Blockstore::should_insert_coding_shred( &coding_shred, @@ -5712,7 +5694,9 @@ pub mod tests { coding.clone(), ); coding_shred.common_header.fec_set_index = std::u32::MAX - 1; + coding_shred.coding_header.num_data_shreds = 2; coding_shred.coding_header.num_coding_shreds = 3; + coding_shred.coding_header.position = 1; coding_shred.common_header.index = std::u32::MAX - 1; assert!(!Blockstore::should_insert_coding_shred( &coding_shred, @@ -5763,7 +5747,7 @@ pub mod tests { assert_eq!(slot_meta.consumed, num_shreds); assert_eq!(slot_meta.received, num_shreds); - assert_eq!(slot_meta.last_index, num_shreds - 1); + assert_eq!(slot_meta.last_index, Some(num_shreds - 1)); assert!(slot_meta.is_full()); let (shreds, _) = make_slot_entries(0, 0, 22); @@ -5772,7 +5756,7 @@ pub mod tests { assert_eq!(slot_meta.consumed, num_shreds); assert_eq!(slot_meta.received, num_shreds); - assert_eq!(slot_meta.last_index, num_shreds - 1); + assert_eq!(slot_meta.last_index, Some(num_shreds - 1)); assert!(slot_meta.is_full()); assert!(blockstore.has_duplicate_shreds_in_slot(0)); @@ -8459,7 +8443,7 @@ pub mod tests { assert_eq!(meta.consumed, 0); assert_eq!(meta.received, last_index + 1); assert_eq!(meta.parent_slot, 0); - assert_eq!(meta.last_index, last_index); + assert_eq!(meta.last_index, Some(last_index)); assert!(!blockstore.is_full(0)); } @@ -8475,7 +8459,7 @@ pub mod tests { assert_eq!(meta.consumed, num_shreds); assert_eq!(meta.received, num_shreds); assert_eq!(meta.parent_slot, 0); - assert_eq!(meta.last_index, num_shreds - 1); + assert_eq!(meta.last_index, Some(num_shreds - 1)); assert!(blockstore.is_full(0)); assert!(!blockstore.is_dead(0)); } diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index c1b1f4c63494b1..be5864595be9df 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -1,6 +1,9 @@ use { - crate::erasure::ErasureConfig, - serde::{Deserialize, Serialize}, + crate::{ + erasure::ErasureConfig, + shred::{Shred, ShredType}, + }, + serde::{Deserialize, Deserializer, Serialize, Serializer}, solana_sdk::{clock::Slot, hash::Hash}, std::{ collections::BTreeSet, @@ -24,8 +27,10 @@ pub struct SlotMeta { // The timestamp of the first time a shred was added for this slot pub first_shred_timestamp: u64, // The index of the shred that is flagged as the last shred for this slot. - pub last_index: u64, + #[serde(with = "serde_compat")] + pub last_index: Option, // The slot height of the block this one derives from. + // TODO use Option instead. pub parent_slot: Slot, // The list of slots, each of which contains a block that derives // from this one. @@ -37,6 +42,27 @@ pub struct SlotMeta { pub completed_data_indexes: BTreeSet, } +// Serde implementation of serialize and deserialize for Option +// where None is represented as u64::MAX; for backward compatibility. +mod serde_compat { + use super::*; + + pub(super) fn serialize(val: &Option, serializer: S) -> Result + where + S: Serializer, + { + val.unwrap_or(u64::MAX).serialize(serializer) + } + + pub(super) fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let val = u64::deserialize(deserializer)?; + Ok((val != u64::MAX).then(|| val)) + } +} + #[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)] /// Index recording presence/absence of shreds pub struct Index { @@ -56,9 +82,8 @@ pub struct ShredIndex { pub struct ErasureMeta { /// Which erasure set in the slot this is set_index: u64, - /// Deprecated field. - #[serde(rename = "first_coding_index")] - __unused_first_coding_index: u64, + /// First coding index in the FEC set + first_coding_index: u64, /// Size of shards in this erasure set #[serde(rename = "size")] __unused_size: usize, @@ -166,38 +191,30 @@ impl ShredIndex { impl SlotMeta { pub fn is_full(&self) -> bool { - // last_index is std::u64::MAX when it has no information about how + // last_index is None when it has no information about how // many shreds will fill this slot. // Note: A full slot with zero shreds is not possible. - if self.last_index == std::u64::MAX { - return false; - } - // Should never happen - if self.consumed > self.last_index + 1 { + if self + .last_index + .map(|ix| self.consumed > ix + 1) + .unwrap_or_default() + { datapoint_error!( "blockstore_error", ( "error", format!( - "Observed a slot meta with consumed: {} > meta.last_index + 1: {}", + "Observed a slot meta with consumed: {} > meta.last_index + 1: {:?}", self.consumed, - self.last_index + 1 + self.last_index.map(|ix| ix + 1), ), String ) ); } - self.consumed == self.last_index + 1 - } - - pub fn known_last_index(&self) -> Option { - if self.last_index == std::u64::MAX { - None - } else { - Some(self.last_index) - } + Some(self.consumed) == self.last_index.map(|ix| ix + 1) } pub fn is_parent_set(&self) -> bool { @@ -215,7 +232,6 @@ impl SlotMeta { slot, parent_slot, is_connected: slot == 0, - last_index: std::u64::MAX, ..SlotMeta::default() } } @@ -226,15 +242,41 @@ impl SlotMeta { } impl ErasureMeta { - pub(crate) fn new(set_index: u64, config: ErasureConfig) -> ErasureMeta { - ErasureMeta { - set_index, - config, - __unused_first_coding_index: 0, - __unused_size: 0, + pub(crate) fn from_coding_shred(shred: &Shred) -> Option { + match shred.shred_type() { + ShredType::Data => None, + ShredType::Code => { + let config = ErasureConfig::new( + usize::from(shred.coding_header.num_data_shreds), + usize::from(shred.coding_header.num_coding_shreds), + ); + let first_coding_index = u64::from(shred.first_coding_index()?); + let erasure_meta = ErasureMeta { + set_index: u64::from(shred.fec_set_index()), + config, + first_coding_index, + __unused_size: 0, + }; + Some(erasure_meta) + } } } + // Returns true if the erasure fields on the shred + // are consistent with the erasure-meta. + pub(crate) fn check_coding_shred(&self, shred: &Shred) -> bool { + let mut other = match Self::from_coding_shred(shred) { + Some(erasure_meta) => erasure_meta, + None => return false, + }; + other.__unused_size = self.__unused_size; + // Ignore first_coding_index field for now to be backward compatible. + // TODO remove this once cluster is upgraded to always populate + // first_coding_index field. + other.first_coding_index = self.first_coding_index; + self == &other + } + pub(crate) fn config(&self) -> ErasureConfig { self.config } @@ -246,7 +288,16 @@ impl ErasureMeta { pub(crate) fn coding_shreds_indices(&self) -> Range { let num_coding = self.config.num_coding() as u64; - self.set_index..self.set_index + num_coding + // first_coding_index == 0 may imply that the field is not populated. + // self.set_index to be backward compatible. + // TODO remove this once cluster is upgraded to always populate + // first_coding_index field. + let first_coding_index = if self.first_coding_index == 0 { + self.set_index + } else { + self.first_coding_index + }; + first_coding_index..first_coding_index + num_coding } pub(crate) fn status(&self, index: &Index) -> ErasureMetaStatus { @@ -316,7 +367,12 @@ mod test { let set_index = 0; let erasure_config = ErasureConfig::new(8, 16); - let e_meta = ErasureMeta::new(set_index, erasure_config); + let e_meta = ErasureMeta { + set_index, + first_coding_index: set_index, + config: erasure_config, + __unused_size: 0, + }; let mut rng = thread_rng(); let mut index = Index::new(0); diff --git a/ledger/src/erasure.rs b/ledger/src/erasure.rs index 546139812135ec..cf4052e41c0ed6 100644 --- a/ledger/src/erasure.rs +++ b/ledger/src/erasure.rs @@ -53,18 +53,18 @@ pub struct ErasureConfig { } impl ErasureConfig { - pub fn new(num_data: usize, num_coding: usize) -> ErasureConfig { + pub(crate) fn new(num_data: usize, num_coding: usize) -> ErasureConfig { ErasureConfig { num_data, num_coding, } } - pub fn num_data(self) -> usize { + pub(crate) fn num_data(self) -> usize { self.num_data } - pub fn num_coding(self) -> usize { + pub(crate) fn num_coding(self) -> usize { self.num_coding } } diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 48ab6efec725ce..68c9a25a015024 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -161,6 +161,9 @@ pub enum ShredError { "invalid parent offset; parent_offset {parent_offset} must be larger than slot {slot}" )] InvalidParentOffset { slot: Slot, parent_offset: u16 }, + + #[error("invalid payload")] + InvalidPayload, } pub type Result = std::result::Result; @@ -340,41 +343,32 @@ impl Shred { let common_header: ShredCommonHeader = Self::deserialize_obj(&mut start, SIZE_OF_COMMON_SHRED_HEADER, &payload)?; - let slot = common_header.slot; // Shreds should be padded out to SHRED_PAYLOAD_SIZE // so that erasure generation/recovery works correctly // But only the data_header.size is stored in blockstore. payload.resize(SHRED_PAYLOAD_SIZE, 0); - let shred = match common_header.shred_type { + let (data_header, coding_header) = match common_header.shred_type { ShredType::Code => { let coding_header: CodingShredHeader = Self::deserialize_obj(&mut start, SIZE_OF_CODING_SHRED_HEADER, &payload)?; - Self { - common_header, - data_header: DataShredHeader::default(), - coding_header, - payload, - } + (DataShredHeader::default(), coding_header) } ShredType::Data => { let data_header: DataShredHeader = Self::deserialize_obj(&mut start, SIZE_OF_DATA_SHRED_HEADER, &payload)?; - if u64::from(data_header.parent_offset) > common_header.slot { - return Err(ShredError::InvalidParentOffset { - slot, - parent_offset: data_header.parent_offset, - }); - } - Self { - common_header, - data_header, - coding_header: CodingShredHeader::default(), - payload, - } + (data_header, CodingShredHeader::default()) } }; - - Ok(shred) + let shred = Self { + common_header, + data_header, + coding_header, + payload, + }; + shred + .sanitize() + .then(|| shred) + .ok_or(ShredError::InvalidPayload) } pub fn new_empty_coding( @@ -448,13 +442,24 @@ impl Shred { self.common_header.slot } - pub fn parent(&self) -> Option { + pub fn parent(&self) -> Result { match self.shred_type() { ShredType::Data => { - let parent_offset = Slot::try_from(self.data_header.parent_offset); - self.slot().checked_sub(parent_offset.ok()?) + let slot = self.slot(); + let parent_offset = Slot::from(self.data_header.parent_offset); + if parent_offset == 0 && slot != 0 { + return Err(ShredError::InvalidParentOffset { + slot, + parent_offset: 0, + }); + } + slot.checked_sub(parent_offset) + .ok_or(ShredError::InvalidParentOffset { + slot, + parent_offset: self.data_header.parent_offset, + }) } - ShredType::Code => None, + ShredType::Code => Err(ShredError::InvalidShredType), } } @@ -462,19 +467,57 @@ impl Shred { self.common_header.index } + pub(crate) fn fec_set_index(&self) -> u32 { + self.common_header.fec_set_index + } + + pub(crate) fn first_coding_index(&self) -> Option { + match self.shred_type() { + ShredType::Data => None, + // TODO should be: self.index() - self.coding_header.position + // once position field is populated. + ShredType::Code => Some(self.fec_set_index()), + } + } + + // Returns true if the shred passes sanity checks. + pub(crate) fn sanitize(&self) -> bool { + self.erasure_block_index().is_some() + && match self.shred_type() { + ShredType::Data => { + self.parent().is_ok() + && usize::from(self.data_header.size) <= self.payload.len() + } + ShredType::Code => { + u32::from(self.coding_header.num_coding_shreds) + <= 8 * MAX_DATA_SHREDS_PER_FEC_BLOCK + } + } + } + pub fn version(&self) -> u16 { self.common_header.version } // Returns the block index within the erasure coding set. fn erasure_block_index(&self) -> Option { - let fec_set_index = self.common_header.fec_set_index; - let index = self.index().checked_sub(fec_set_index)? as usize; + let index = self.index().checked_sub(self.fec_set_index())?; + let index = usize::try_from(index).ok()?; match self.shred_type() { ShredType::Data => Some(index), ShredType::Code => { - let num_data_shreds = self.coding_header.num_data_shreds as usize; - let num_coding_shreds = self.coding_header.num_coding_shreds as usize; + // TODO should use first_coding_index once position field is + // populated. + // Assert that the last shred index in the erasure set does not + // overshoot u32. + self.fec_set_index().checked_add(u32::from( + self.coding_header + .num_data_shreds + .max(self.coding_header.num_coding_shreds) + .checked_sub(1)?, + ))?; + let num_data_shreds = usize::from(self.coding_header.num_data_shreds); + let num_coding_shreds = usize::from(self.coding_header.num_coding_shreds); let fec_set_size = num_data_shreds.checked_add(num_coding_shreds)?; let index = index.checked_add(num_data_shreds)?; (index < fec_set_size).then(|| index) @@ -849,7 +892,7 @@ impl Shredder { assert_eq!(fec_set_index, index); assert!(data.iter().all(|shred| shred.common_header.slot == slot && shred.common_header.version == version - && shred.common_header.fec_set_index == fec_set_index)); + && shred.fec_set_index() == fec_set_index)); let num_data = data.len(); let num_coding = if is_last_in_slot { (2 * MAX_DATA_SHREDS_PER_FEC_BLOCK as usize) @@ -895,7 +938,7 @@ impl Shredder { Self::verify_consistent_shred_payload_sizes("try_recovery()", &shreds)?; let (slot, fec_set_index) = match shreds.first() { None => return Ok(Vec::default()), - Some(shred) => (shred.slot(), shred.common_header.fec_set_index), + Some(shred) => (shred.slot(), shred.fec_set_index()), }; let (num_data_shreds, num_coding_shreds) = match shreds.iter().find(|shred| shred.is_code()) { @@ -905,9 +948,9 @@ impl Shredder { shred.coding_header.num_coding_shreds, ), }; - debug_assert!(shreds.iter().all( - |shred| shred.slot() == slot && shred.common_header.fec_set_index == fec_set_index - )); + debug_assert!(shreds + .iter() + .all(|shred| shred.slot() == slot && shred.fec_set_index() == fec_set_index)); debug_assert!(shreds .iter() .filter(|shred| shred.is_code()) @@ -1108,7 +1151,7 @@ pub fn verify_test_data_shred( assert!(shred.is_data()); assert_eq!(shred.index(), index); assert_eq!(shred.slot(), slot); - assert_eq!(shred.parent(), Some(parent)); + assert_eq!(shred.parent().unwrap(), parent); assert_eq!(verify, shred.verify(pk)); if is_last_in_slot { assert!(shred.last_in_slot()); @@ -1750,7 +1793,7 @@ pub mod tests { let max_per_block = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize; data_shreds.iter().enumerate().for_each(|(i, s)| { let expected_fec_set_index = start_index + ((i / max_per_block) * max_per_block) as u32; - assert_eq!(s.common_header.fec_set_index, expected_fec_set_index); + assert_eq!(s.fec_set_index(), expected_fec_set_index); }); coding_shreds.iter().enumerate().for_each(|(i, s)| { @@ -1758,7 +1801,7 @@ pub mod tests { while expected_fec_set_index as usize > data_shreds.len() { expected_fec_set_index -= max_per_block as u32; } - assert_eq!(s.common_header.fec_set_index, expected_fec_set_index); + assert_eq!(s.fec_set_index(), expected_fec_set_index); }); } @@ -1845,12 +1888,13 @@ pub mod tests { shred.copy_to_packet(&mut packet); let shred_res = Shred::new_from_serialized_shred(packet.data.to_vec()); assert_matches!( - shred_res, + shred.parent(), Err(ShredError::InvalidParentOffset { slot: 10, parent_offset: 1000 }) ); + assert_matches!(shred_res, Err(ShredError::InvalidPayload)); } #[test] diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index 0df3d7b6b5cab3..a8e8868cf0be09 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -12,10 +12,10 @@ use { solana_metrics::inc_new_counter_debug, solana_perf::{ cuda_runtime::PinnedVec, - packet::{limited_deserialize, Packet, Packets}, + packet::{limited_deserialize, Packet, PacketBatch}, perf_libs, recycler_cache::RecyclerCache, - sigverify::{self, batch_size, TxOffset}, + sigverify::{self, count_packets_in_batches, TxOffset}, }, solana_rayon_threadlimit::get_thread_count, solana_sdk::{ @@ -76,22 +76,26 @@ pub fn verify_shred_cpu(packet: &Packet, slot_leaders: &HashMap) Some(1) } -fn verify_shreds_cpu(batches: &[Packets], slot_leaders: &HashMap) -> Vec> { +fn verify_shreds_cpu( + batches: &[PacketBatch], + slot_leaders: &HashMap, +) -> Vec> { use rayon::prelude::*; - let count = batch_size(batches); - debug!("CPU SHRED ECDSA for {}", count); + let packet_count = count_packets_in_batches(batches); + debug!("CPU SHRED ECDSA for {}", packet_count); let rv = SIGVERIFY_THREAD_POOL.install(|| { batches .into_par_iter() - .map(|p| { - p.packets + .map(|batch| { + batch + .packets .par_iter() .map(|p| verify_shred_cpu(p, slot_leaders).unwrap_or(0)) .collect() }) .collect() }); - inc_new_counter_debug!("ed25519_shred_verify_cpu", count); + inc_new_counter_debug!("ed25519_shred_verify_cpu", packet_count); rv } @@ -99,7 +103,7 @@ fn slot_key_data_for_gpu< T: Sync + Sized + Default + std::fmt::Debug + Eq + std::hash::Hash + Clone + Copy + AsRef<[u8]>, >( offset_start: usize, - batches: &[Packets], + batches: &[PacketBatch], slot_keys: &HashMap, recycler_cache: &RecyclerCache, ) -> (PinnedVec, TxOffset, usize) { @@ -108,8 +112,9 @@ fn slot_key_data_for_gpu< let slots: Vec> = SIGVERIFY_THREAD_POOL.install(|| { batches .into_par_iter() - .map(|p| { - p.packets + .map(|batch| { + batch + .packets .iter() .map(|packet| { let slot_start = size_of::() + size_of::(); @@ -173,7 +178,7 @@ fn vec_size_in_packets(keyvec: &PinnedVec) -> usize { } fn resize_vec(keyvec: &mut PinnedVec) -> usize { - //HACK: Pubkeys vector is passed along as a `Packets` buffer to the GPU + //HACK: Pubkeys vector is passed along as a `PacketBatch` buffer to the GPU //TODO: GPU needs a more opaque interface, which can handle variable sized structures for data //Pad the Pubkeys buffer such that it is bigger than a buffer of Packet sized elems let num_in_packets = (keyvec.len() + (size_of::() - 1)) / size_of::(); @@ -183,7 +188,7 @@ fn resize_vec(keyvec: &mut PinnedVec) -> usize { fn shred_gpu_offsets( mut pubkeys_end: usize, - batches: &[Packets], + batches: &[PacketBatch], recycler_cache: &RecyclerCache, ) -> (TxOffset, TxOffset, TxOffset, Vec>) { let mut signature_offsets = recycler_cache.offsets().allocate("shred_signatures"); @@ -221,7 +226,7 @@ fn shred_gpu_offsets( } pub fn verify_shreds_gpu( - batches: &[Packets], + batches: &[PacketBatch], slot_leaders: &HashMap, recycler_cache: &RecyclerCache, ) -> Vec> { @@ -233,10 +238,10 @@ pub fn verify_shreds_gpu( let mut elems = Vec::new(); let mut rvs = Vec::new(); - let count = batch_size(batches); + let packet_count = count_packets_in_batches(batches); let (pubkeys, pubkey_offsets, mut num_packets) = slot_key_data_for_gpu(0, batches, slot_leaders, recycler_cache); - //HACK: Pubkeys vector is passed along as a `Packets` buffer to the GPU + //HACK: Pubkeys vector is passed along as a `PacketBatch` buffer to the GPU //TODO: GPU needs a more opaque interface, which can handle variable sized structures for data let pubkeys_len = num_packets * size_of::(); trace!("num_packets: {}", num_packets); @@ -251,15 +256,15 @@ pub fn verify_shreds_gpu( num: num_packets as u32, }); - for p in batches { + for batch in batches { elems.push(perf_libs::Elems { - elems: p.packets.as_ptr(), - num: p.packets.len() as u32, + elems: batch.packets.as_ptr(), + num: batch.packets.len() as u32, }); let mut v = Vec::new(); - v.resize(p.packets.len(), 0); + v.resize(batch.packets.len(), 0); rvs.push(v); - num_packets += p.packets.len(); + num_packets += batch.packets.len(); } out.resize(signature_offsets.len(), 0); @@ -290,7 +295,7 @@ pub fn verify_shreds_gpu( sigverify::copy_return_values(&v_sig_lens, &out, &mut rvs); - inc_new_counter_debug!("ed25519_shred_verify_gpu", count); + inc_new_counter_debug!("ed25519_shred_verify_gpu", packet_count); rvs } @@ -316,18 +321,18 @@ fn sign_shred_cpu(keypair: &Keypair, packet: &mut Packet) { packet.data[0..sig_end].copy_from_slice(signature.as_ref()); } -pub fn sign_shreds_cpu(keypair: &Keypair, batches: &mut [Packets]) { +pub fn sign_shreds_cpu(keypair: &Keypair, batches: &mut [PacketBatch]) { use rayon::prelude::*; - let count = batch_size(batches); - debug!("CPU SHRED ECDSA for {}", count); + let packet_count = count_packets_in_batches(batches); + debug!("CPU SHRED ECDSA for {}", packet_count); SIGVERIFY_THREAD_POOL.install(|| { - batches.par_iter_mut().for_each(|p| { - p.packets[..] + batches.par_iter_mut().for_each(|batch| { + batch.packets[..] .par_iter_mut() .for_each(|p| sign_shred_cpu(keypair, p)); }); }); - inc_new_counter_debug!("ed25519_shred_verify_cpu", count); + inc_new_counter_debug!("ed25519_shred_verify_cpu", packet_count); } pub fn sign_shreds_gpu_pinned_keypair(keypair: &Keypair, cache: &RecyclerCache) -> PinnedVec { @@ -350,14 +355,14 @@ pub fn sign_shreds_gpu_pinned_keypair(keypair: &Keypair, cache: &RecyclerCache) pub fn sign_shreds_gpu( keypair: &Keypair, pinned_keypair: &Option>>, - batches: &mut [Packets], + batches: &mut [PacketBatch], recycler_cache: &RecyclerCache, ) { let sig_size = size_of::(); let pubkey_size = size_of::(); let api = perf_libs::api(); - let count = batch_size(batches); - if api.is_none() || count < SIGN_SHRED_GPU_MIN || pinned_keypair.is_none() { + let packet_count = count_packets_in_batches(batches); + if api.is_none() || packet_count < SIGN_SHRED_GPU_MIN || pinned_keypair.is_none() { return sign_shreds_cpu(keypair, batches); } let api = api.unwrap(); @@ -370,10 +375,10 @@ pub fn sign_shreds_gpu( //should be zero let mut pubkey_offsets = recycler_cache.offsets().allocate("pubkey offsets"); - pubkey_offsets.resize(count, 0); + pubkey_offsets.resize(packet_count, 0); let mut secret_offsets = recycler_cache.offsets().allocate("secret_offsets"); - secret_offsets.resize(count, pubkey_size as u32); + secret_offsets.resize(packet_count, pubkey_size as u32); trace!("offset: {}", offset); let (signature_offsets, msg_start_offsets, msg_sizes, _v_sig_lens) = @@ -388,14 +393,14 @@ pub fn sign_shreds_gpu( num: num_keypair_packets as u32, }); - for p in batches.iter() { + for batch in batches.iter() { elems.push(perf_libs::Elems { - elems: p.packets.as_ptr(), - num: p.packets.len() as u32, + elems: batch.packets.as_ptr(), + num: batch.packets.len() as u32, }); let mut v = Vec::new(); - v.resize(p.packets.len(), 0); - num_packets += p.packets.len(); + v.resize(batch.packets.len(), 0); + num_packets += batch.packets.len(); } trace!("Starting verify num packets: {}", num_packets); @@ -447,7 +452,7 @@ pub fn sign_shreds_gpu( }); }); }); - inc_new_counter_debug!("ed25519_shred_sign_gpu", count); + inc_new_counter_debug!("ed25519_shred_sign_gpu", packet_count); } #[cfg(test)] @@ -506,7 +511,7 @@ pub mod tests { fn run_test_sigverify_shreds_cpu(slot: Slot) { solana_logger::setup(); - let mut batch = [Packets::default()]; + let mut batches = [PacketBatch::default()]; let mut shred = Shred::new_from_data( slot, 0xc0de, @@ -520,15 +525,15 @@ pub mod tests { ); let keypair = Keypair::new(); Shredder::sign_shred(&keypair, &mut shred); - batch[0].packets.resize(1, Packet::default()); - batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); - batch[0].packets[0].meta.size = shred.payload.len(); + batches[0].packets.resize(1, Packet::default()); + batches[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); + batches[0].packets[0].meta.size = shred.payload.len(); let leader_slots = [(slot, keypair.pubkey().to_bytes())] .iter() .cloned() .collect(); - let rv = verify_shreds_cpu(&batch, &leader_slots); + let rv = verify_shreds_cpu(&batches, &leader_slots); assert_eq!(rv, vec![vec![1]]); let wrong_keypair = Keypair::new(); @@ -536,19 +541,19 @@ pub mod tests { .iter() .cloned() .collect(); - let rv = verify_shreds_cpu(&batch, &leader_slots); + let rv = verify_shreds_cpu(&batches, &leader_slots); assert_eq!(rv, vec![vec![0]]); let leader_slots = HashMap::new(); - let rv = verify_shreds_cpu(&batch, &leader_slots); + let rv = verify_shreds_cpu(&batches, &leader_slots); assert_eq!(rv, vec![vec![0]]); let leader_slots = [(slot, keypair.pubkey().to_bytes())] .iter() .cloned() .collect(); - batch[0].packets[0].meta.size = 0; - let rv = verify_shreds_cpu(&batch, &leader_slots); + batches[0].packets[0].meta.size = 0; + let rv = verify_shreds_cpu(&batches, &leader_slots); assert_eq!(rv, vec![vec![0]]); } @@ -561,7 +566,7 @@ pub mod tests { solana_logger::setup(); let recycler_cache = RecyclerCache::default(); - let mut batch = [Packets::default()]; + let mut batches = [PacketBatch::default()]; let mut shred = Shred::new_from_data( slot, 0xc0de, @@ -575,9 +580,9 @@ pub mod tests { ); let keypair = Keypair::new(); Shredder::sign_shred(&keypair, &mut shred); - batch[0].packets.resize(1, Packet::default()); - batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); - batch[0].packets[0].meta.size = shred.payload.len(); + batches[0].packets.resize(1, Packet::default()); + batches[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); + batches[0].packets[0].meta.size = shred.payload.len(); let leader_slots = [ (std::u64::MAX, Pubkey::default().to_bytes()), @@ -586,7 +591,7 @@ pub mod tests { .iter() .cloned() .collect(); - let rv = verify_shreds_gpu(&batch, &leader_slots, &recycler_cache); + let rv = verify_shreds_gpu(&batches, &leader_slots, &recycler_cache); assert_eq!(rv, vec![vec![1]]); let wrong_keypair = Keypair::new(); @@ -597,14 +602,14 @@ pub mod tests { .iter() .cloned() .collect(); - let rv = verify_shreds_gpu(&batch, &leader_slots, &recycler_cache); + let rv = verify_shreds_gpu(&batches, &leader_slots, &recycler_cache); assert_eq!(rv, vec![vec![0]]); let leader_slots = [(std::u64::MAX, [0u8; 32])].iter().cloned().collect(); - let rv = verify_shreds_gpu(&batch, &leader_slots, &recycler_cache); + let rv = verify_shreds_gpu(&batches, &leader_slots, &recycler_cache); assert_eq!(rv, vec![vec![0]]); - batch[0].packets[0].meta.size = 0; + batches[0].packets[0].meta.size = 0; let leader_slots = [ (std::u64::MAX, Pubkey::default().to_bytes()), (slot, keypair.pubkey().to_bytes()), @@ -612,7 +617,7 @@ pub mod tests { .iter() .cloned() .collect(); - let rv = verify_shreds_gpu(&batch, &leader_slots, &recycler_cache); + let rv = verify_shreds_gpu(&batches, &leader_slots, &recycler_cache); assert_eq!(rv, vec![vec![0]]); } @@ -625,11 +630,11 @@ pub mod tests { solana_logger::setup(); let recycler_cache = RecyclerCache::default(); - let mut packets = Packets::default(); + let mut packet_batch = PacketBatch::default(); let num_packets = 32; let num_batches = 100; - packets.packets.resize(num_packets, Packet::default()); - for (i, p) in packets.packets.iter_mut().enumerate() { + packet_batch.packets.resize(num_packets, Packet::default()); + for (i, p) in packet_batch.packets.iter_mut().enumerate() { let shred = Shred::new_from_data( slot, 0xc0de, @@ -643,7 +648,7 @@ pub mod tests { ); shred.copy_to_packet(p); } - let mut batch = vec![packets; num_batches]; + let mut batches = vec![packet_batch; num_batches]; let keypair = Keypair::new(); let pinned_keypair = sign_shreds_gpu_pinned_keypair(&keypair, &recycler_cache); let pinned_keypair = Some(Arc::new(pinned_keypair)); @@ -655,14 +660,14 @@ pub mod tests { .cloned() .collect(); //unsigned - let rv = verify_shreds_gpu(&batch, &pubkeys, &recycler_cache); + let rv = verify_shreds_gpu(&batches, &pubkeys, &recycler_cache); assert_eq!(rv, vec![vec![0; num_packets]; num_batches]); //signed - sign_shreds_gpu(&keypair, &pinned_keypair, &mut batch, &recycler_cache); - let rv = verify_shreds_cpu(&batch, &pubkeys); + sign_shreds_gpu(&keypair, &pinned_keypair, &mut batches, &recycler_cache); + let rv = verify_shreds_cpu(&batches, &pubkeys); assert_eq!(rv, vec![vec![1; num_packets]; num_batches]); - let rv = verify_shreds_gpu(&batch, &pubkeys, &recycler_cache); + let rv = verify_shreds_gpu(&batches, &pubkeys, &recycler_cache); assert_eq!(rv, vec![vec![1; num_packets]; num_batches]); } @@ -674,7 +679,7 @@ pub mod tests { fn run_test_sigverify_shreds_sign_cpu(slot: Slot) { solana_logger::setup(); - let mut batch = [Packets::default()]; + let mut batches = [PacketBatch::default()]; let keypair = Keypair::new(); let shred = Shred::new_from_data( slot, @@ -687,9 +692,9 @@ pub mod tests { 0, 0xc0de, ); - batch[0].packets.resize(1, Packet::default()); - batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); - batch[0].packets[0].meta.size = shred.payload.len(); + batches[0].packets.resize(1, Packet::default()); + batches[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); + batches[0].packets[0].meta.size = shred.payload.len(); let pubkeys = [ (slot, keypair.pubkey().to_bytes()), (std::u64::MAX, Pubkey::default().to_bytes()), @@ -698,11 +703,11 @@ pub mod tests { .cloned() .collect(); //unsigned - let rv = verify_shreds_cpu(&batch, &pubkeys); + let rv = verify_shreds_cpu(&batches, &pubkeys); assert_eq!(rv, vec![vec![0]]); //signed - sign_shreds_cpu(&keypair, &mut batch); - let rv = verify_shreds_cpu(&batch, &pubkeys); + sign_shreds_cpu(&keypair, &mut batches); + let rv = verify_shreds_cpu(&batches, &pubkeys); assert_eq!(rv, vec![vec![1]]); } diff --git a/log-analyzer/Cargo.toml b/log-analyzer/Cargo.toml index e9b83dfc0f7f79..bcd3c178dcc603 100644 --- a/log-analyzer/Cargo.toml +++ b/log-analyzer/Cargo.toml @@ -12,7 +12,7 @@ publish = false [dependencies] byte-unit = "4.0.13" clap = "2.33.1" -serde = "1.0.130" +serde = "1.0.131" serde_json = "1.0.72" solana-logger = { path = "../logger", version = "=1.10.0" } solana-version = { path = "../version", version = "=1.10.0" } diff --git a/net-shaper/Cargo.toml b/net-shaper/Cargo.toml index 4f0bbc2b334d1b..7c529599229011 100644 --- a/net-shaper/Cargo.toml +++ b/net-shaper/Cargo.toml @@ -11,7 +11,7 @@ publish = false [dependencies] clap = "2.33.1" -serde = "1.0.130" +serde = "1.0.131" serde_json = "1.0.72" solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" } solana-logger = { path = "../logger", version = "=1.10.0" } diff --git a/net-utils/Cargo.toml b/net-utils/Cargo.toml index 423aed16602c08..6a51fa17556881 100644 --- a/net-utils/Cargo.toml +++ b/net-utils/Cargo.toml @@ -15,7 +15,7 @@ clap = "2.33.1" log = "0.4.14" nix = "0.23.0" rand = "0.7.0" -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" socket2 = "0.4.2" solana-logger = { path = "../logger", version = "=1.10.0" } diff --git a/perf/Cargo.toml b/perf/Cargo.toml index 91d7248497ada9..fd0cc2a6059f0e 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -18,7 +18,7 @@ lazy_static = "1.4.0" log = "0.4.14" rand = "0.7.0" rayon = "1.5.1" -serde = "1.0.130" +serde = "1.0.131" solana-logger = { path = "../logger", version = "=1.10.0" } solana-metrics = { path = "../metrics", version = "=1.10.0" } solana-sdk = { path = "../sdk", version = "=1.10.0" } diff --git a/perf/benches/recycler.rs b/perf/benches/recycler.rs index 63410ffc856b8d..0533e4a11eb3a2 100644 --- a/perf/benches/recycler.rs +++ b/perf/benches/recycler.rs @@ -3,7 +3,7 @@ extern crate test; use { - solana_perf::{packet::PacketsRecycler, recycler::Recycler}, + solana_perf::{packet::PacketBatchRecycler, recycler::Recycler}, test::Bencher, }; @@ -11,7 +11,7 @@ use { fn bench_recycler(bencher: &mut Bencher) { solana_logger::setup(); - let recycler: PacketsRecycler = Recycler::default(); + let recycler: PacketBatchRecycler = Recycler::default(); for _ in 0..1000 { let _packet = recycler.allocate(""); diff --git a/perf/benches/sigverify.rs b/perf/benches/sigverify.rs index a3211cade62b18..7c60f362b7a1a8 100644 --- a/perf/benches/sigverify.rs +++ b/perf/benches/sigverify.rs @@ -3,7 +3,7 @@ extern crate test; use { - solana_perf::{packet::to_packets_chunked, recycler::Recycler, sigverify, test_tx::test_tx}, + solana_perf::{packet::to_packet_batches, recycler::Recycler, sigverify, test_tx::test_tx}, test::Bencher, }; @@ -12,7 +12,7 @@ fn bench_sigverify(bencher: &mut Bencher) { let tx = test_tx(); // generate packet vector - let mut batches = to_packets_chunked(&std::iter::repeat(tx).take(128).collect::>(), 128); + let mut batches = to_packet_batches(&std::iter::repeat(tx).take(128).collect::>(), 128); let recycler = Recycler::default(); let recycler_out = Recycler::default(); @@ -28,7 +28,7 @@ fn bench_get_offsets(bencher: &mut Bencher) { // generate packet vector let mut batches = - to_packets_chunked(&std::iter::repeat(tx).take(1024).collect::>(), 1024); + to_packet_batches(&std::iter::repeat(tx).take(1024).collect::>(), 1024); let recycler = Recycler::default(); // verify packets diff --git a/perf/src/packet.rs b/perf/src/packet.rs index 59f9d8f7dfce34..d8c163a7af0ac2 100644 --- a/perf/src/packet.rs +++ b/perf/src/packet.rs @@ -13,13 +13,13 @@ pub const PACKETS_PER_BATCH: usize = 128; pub const NUM_RCVMMSGS: usize = 128; #[derive(Debug, Default, Clone)] -pub struct Packets { +pub struct PacketBatch { pub packets: PinnedVec, } -pub type PacketsRecycler = Recycler>; +pub type PacketBatchRecycler = Recycler>; -impl Packets { +impl PacketBatch { pub fn new(packets: Vec) -> Self { let packets = PinnedVec::from_vec(packets); Self { packets } @@ -27,48 +27,52 @@ impl Packets { pub fn with_capacity(capacity: usize) -> Self { let packets = PinnedVec::with_capacity(capacity); - Packets { packets } + PacketBatch { packets } } pub fn new_unpinned_with_recycler( - recycler: PacketsRecycler, + recycler: PacketBatchRecycler, size: usize, name: &'static str, ) -> Self { let mut packets = recycler.allocate(name); packets.reserve(size); - Packets { packets } + PacketBatch { packets } } - pub fn new_with_recycler(recycler: PacketsRecycler, size: usize, name: &'static str) -> Self { + pub fn new_with_recycler( + recycler: PacketBatchRecycler, + size: usize, + name: &'static str, + ) -> Self { let mut packets = recycler.allocate(name); packets.reserve_and_pin(size); - Packets { packets } + PacketBatch { packets } } pub fn new_with_recycler_data( - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, name: &'static str, mut packets: Vec, ) -> Self { - let mut vec = Self::new_with_recycler(recycler.clone(), packets.len(), name); - vec.packets.append(&mut packets); - vec + let mut batch = Self::new_with_recycler(recycler.clone(), packets.len(), name); + batch.packets.append(&mut packets); + batch } pub fn new_unpinned_with_recycler_data( - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, name: &'static str, mut packets: Vec, ) -> Self { - let mut vec = Self::new_unpinned_with_recycler(recycler.clone(), packets.len(), name); - vec.packets.append(&mut packets); - vec + let mut batch = Self::new_unpinned_with_recycler(recycler.clone(), packets.len(), name); + batch.packets.append(&mut packets); + batch } pub fn set_addr(&mut self, addr: &SocketAddr) { - for m in self.packets.iter_mut() { - m.meta.set_addr(addr); + for p in self.packets.iter_mut() { + p.meta.set_addr(addr); } } @@ -77,32 +81,32 @@ impl Packets { } } -pub fn to_packets_chunked(xs: &[T], chunks: usize) -> Vec { +pub fn to_packet_batches(xs: &[T], chunks: usize) -> Vec { let mut out = vec![]; for x in xs.chunks(chunks) { - let mut p = Packets::with_capacity(x.len()); - p.packets.resize(x.len(), Packet::default()); - for (i, o) in x.iter().zip(p.packets.iter_mut()) { - Packet::populate_packet(o, None, i).expect("serialize request"); + let mut batch = PacketBatch::with_capacity(x.len()); + batch.packets.resize(x.len(), Packet::default()); + for (i, packet) in x.iter().zip(batch.packets.iter_mut()) { + Packet::populate_packet(packet, None, i).expect("serialize request"); } - out.push(p); + out.push(batch); } out } #[cfg(test)] -pub fn to_packets(xs: &[T]) -> Vec { - to_packets_chunked(xs, NUM_PACKETS) +pub fn to_packet_batches_for_tests(xs: &[T]) -> Vec { + to_packet_batches(xs, NUM_PACKETS) } -pub fn to_packets_with_destination( - recycler: PacketsRecycler, +pub fn to_packet_batch_with_destination( + recycler: PacketBatchRecycler, dests_and_data: &[(SocketAddr, T)], -) -> Packets { - let mut out = Packets::new_unpinned_with_recycler( +) -> PacketBatch { + let mut out = PacketBatch::new_unpinned_with_recycler( recycler, dests_and_data.len(), - "to_packets_with_destination", + "to_packet_batch_with_destination", ); out.packets.resize(dests_and_data.len(), Packet::default()); for (dest_and_data, o) in dests_and_data.iter().zip(out.packets.iter_mut()) { @@ -143,21 +147,21 @@ mod tests { }; #[test] - fn test_to_packets() { + fn test_to_packet_batches() { let keypair = Keypair::new(); let hash = Hash::new(&[1; 32]); let tx = system_transaction::transfer(&keypair, &keypair.pubkey(), 1, hash); - let rv = to_packets(&[tx.clone(); 1]); + let rv = to_packet_batches_for_tests(&[tx.clone(); 1]); assert_eq!(rv.len(), 1); assert_eq!(rv[0].packets.len(), 1); #[allow(clippy::useless_vec)] - let rv = to_packets(&vec![tx.clone(); NUM_PACKETS]); + let rv = to_packet_batches_for_tests(&vec![tx.clone(); NUM_PACKETS]); assert_eq!(rv.len(), 1); assert_eq!(rv[0].packets.len(), NUM_PACKETS); #[allow(clippy::useless_vec)] - let rv = to_packets(&vec![tx; NUM_PACKETS + 1]); + let rv = to_packet_batches_for_tests(&vec![tx; NUM_PACKETS + 1]); assert_eq!(rv.len(), 2); assert_eq!(rv[0].packets.len(), NUM_PACKETS); assert_eq!(rv[1].packets.len(), 1); @@ -165,9 +169,10 @@ mod tests { #[test] fn test_to_packets_pinning() { - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); for i in 0..2 { - let _first_packets = Packets::new_with_recycler(recycler.clone(), i + 1, "first one"); + let _first_packets = + PacketBatch::new_with_recycler(recycler.clone(), i + 1, "first one"); } } } diff --git a/perf/src/recycler.rs b/perf/src/recycler.rs index 154d8832802f6b..a8169ab5564b6a 100644 --- a/perf/src/recycler.rs +++ b/perf/src/recycler.rs @@ -182,7 +182,7 @@ impl RecyclerX { #[cfg(test)] mod tests { - use {super::*, crate::packet::PacketsRecycler, std::iter::repeat_with}; + use {super::*, crate::packet::PacketBatchRecycler, std::iter::repeat_with}; impl Reset for u64 { fn reset(&mut self) { @@ -209,7 +209,7 @@ mod tests { #[test] fn test_recycler_shrink() { let mut rng = rand::thread_rng(); - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); // Allocate a burst of packets. const NUM_PACKETS: usize = RECYCLER_SHRINK_SIZE * 2; { diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index 6102c69af1d05f..e7e47c59bba1b6 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -9,7 +9,7 @@ use solana_sdk::transaction::Transaction; use { crate::{ cuda_runtime::PinnedVec, - packet::{Packet, Packets}, + packet::{Packet, PacketBatch}, perf_libs, recycler::Recycler, }, @@ -158,8 +158,8 @@ fn verify_packet(packet: &mut Packet, reject_non_vote: bool) { } } -pub fn batch_size(batches: &[Packets]) -> usize { - batches.iter().map(|p| p.packets.len()).sum() +pub fn count_packets_in_batches(batches: &[PacketBatch]) -> usize { + batches.iter().map(|batch| batch.packets.len()).sum() } // internal function to be unit-tested; should be used only by get_packet_offsets @@ -366,7 +366,7 @@ fn check_for_simple_vote_transaction( } pub fn generate_offsets( - batches: &mut [Packets], + batches: &mut [PacketBatch], recycler: &Recycler, reject_non_vote: bool, ) -> TxOffsets { @@ -381,9 +381,9 @@ pub fn generate_offsets( msg_sizes.set_pinnable(); let mut current_offset: usize = 0; let mut v_sig_lens = Vec::new(); - batches.iter_mut().for_each(|p| { + batches.iter_mut().for_each(|batch| { let mut sig_lens = Vec::new(); - p.packets.iter_mut().for_each(|packet| { + batch.packets.iter_mut().for_each(|packet| { let packet_offsets = get_packet_offsets(packet, current_offset, reject_non_vote); sig_lens.push(packet_offsets.sig_len); @@ -418,30 +418,32 @@ pub fn generate_offsets( ) } -pub fn ed25519_verify_cpu(batches: &mut [Packets], reject_non_vote: bool) { +pub fn ed25519_verify_cpu(batches: &mut [PacketBatch], reject_non_vote: bool) { use rayon::prelude::*; - let count = batch_size(batches); - debug!("CPU ECDSA for {}", batch_size(batches)); + let packet_count = count_packets_in_batches(batches); + debug!("CPU ECDSA for {}", packet_count); PAR_THREAD_POOL.install(|| { - batches.into_par_iter().for_each(|p| { - p.packets + batches.into_par_iter().for_each(|batch| { + batch + .packets .par_iter_mut() .for_each(|p| verify_packet(p, reject_non_vote)) }) }); - inc_new_counter_debug!("ed25519_verify_cpu", count); + inc_new_counter_debug!("ed25519_verify_cpu", packet_count); } -pub fn ed25519_verify_disabled(batches: &mut [Packets]) { +pub fn ed25519_verify_disabled(batches: &mut [PacketBatch]) { use rayon::prelude::*; - let count = batch_size(batches); - debug!("disabled ECDSA for {}", batch_size(batches)); - batches.into_par_iter().for_each(|p| { - p.packets + let packet_count = count_packets_in_batches(batches); + debug!("disabled ECDSA for {}", packet_count); + batches.into_par_iter().for_each(|batch| { + batch + .packets .par_iter_mut() .for_each(|p| p.meta.discard = false) }); - inc_new_counter_debug!("ed25519_verify_disabled", count); + inc_new_counter_debug!("ed25519_verify_disabled", packet_count); } pub fn copy_return_values(sig_lens: &[Vec], out: &PinnedVec, rvs: &mut Vec>) { @@ -495,7 +497,7 @@ pub fn get_checked_scalar(scalar: &[u8; 32]) -> Result<[u8; 32], PacketError> { Ok(out) } -pub fn mark_disabled(batches: &mut [Packets], r: &[Vec]) { +pub fn mark_disabled(batches: &mut [PacketBatch], r: &[Vec]) { batches.iter_mut().zip(r).for_each(|(b, v)| { b.packets.iter_mut().zip(v).for_each(|(p, f)| { p.meta.discard = *f == 0; @@ -504,7 +506,7 @@ pub fn mark_disabled(batches: &mut [Packets], r: &[Vec]) { } pub fn ed25519_verify( - batches: &mut [Packets], + batches: &mut [PacketBatch], recycler: &Recycler, recycler_out: &Recycler>, reject_non_vote: bool, @@ -516,21 +518,21 @@ pub fn ed25519_verify( let api = api.unwrap(); use crate::packet::PACKET_DATA_SIZE; - let count = batch_size(batches); + let packet_count = count_packets_in_batches(batches); // micro-benchmarks show GPU time for smallest batch around 15-20ms // and CPU speed for 64-128 sigverifies around 10-20ms. 64 is a nice // power-of-two number around that accounting for the fact that the CPU // may be busy doing other things while being a real validator // TODO: dynamically adjust this crossover - if count < 64 { + if packet_count < 64 { return ed25519_verify_cpu(batches, reject_non_vote); } let (signature_offsets, pubkey_offsets, msg_start_offsets, msg_sizes, sig_lens) = generate_offsets(batches, recycler, reject_non_vote); - debug!("CUDA ECDSA for {}", batch_size(batches)); + debug!("CUDA ECDSA for {}", packet_count); debug!("allocating out.."); let mut out = recycler_out.allocate("out_buffer"); out.set_pinnable(); @@ -538,15 +540,15 @@ pub fn ed25519_verify( let mut rvs = Vec::new(); let mut num_packets: usize = 0; - for p in batches.iter() { + for batch in batches.iter() { elems.push(perf_libs::Elems { - elems: p.packets.as_ptr(), - num: p.packets.len() as u32, + elems: batch.packets.as_ptr(), + num: batch.packets.len() as u32, }); let mut v = Vec::new(); - v.resize(p.packets.len(), 0); + v.resize(batch.packets.len(), 0); rvs.push(v); - num_packets = num_packets.saturating_add(p.packets.len()); + num_packets = num_packets.saturating_add(batch.packets.len()); } out.resize(signature_offsets.len(), 0); trace!("Starting verify num packets: {}", num_packets); @@ -575,7 +577,7 @@ pub fn ed25519_verify( trace!("done verify"); copy_return_values(&sig_lens, &out, &mut rvs); mark_disabled(batches, &rvs); - inc_new_counter_debug!("ed25519_verify_gpu", count); + inc_new_counter_debug!("ed25519_verify_gpu", packet_count); } #[cfg(test)] @@ -595,7 +597,7 @@ mod tests { use { super::*, crate::{ - packet::{Packet, Packets}, + packet::{Packet, PacketBatch}, sigverify::{self, PacketOffsets}, test_tx::{test_multisig_tx, test_tx, vote_tx}, }, @@ -623,9 +625,9 @@ mod tests { #[test] fn test_mark_disabled() { - let mut batch = Packets::default(); + let mut batch = PacketBatch::default(); batch.packets.push(Packet::default()); - let mut batches: Vec = vec![batch]; + let mut batches: Vec = vec![batch]; mark_disabled(&mut batches, &[vec![0]]); assert!(batches[0].packets[0].meta.discard); mark_disabled(&mut batches, &[vec![1]]); @@ -731,7 +733,7 @@ mod tests { assert!(packet.meta.discard); packet.meta.discard = false; - let mut batches = generate_packet_vec(&packet, 1, 1); + let mut batches = generate_packet_batches(&packet, 1, 1); ed25519_verify(&mut batches); assert!(batches[0].packets[0].meta.discard); } @@ -767,7 +769,7 @@ mod tests { assert!(packet.meta.discard); packet.meta.discard = false; - let mut batches = generate_packet_vec(&packet, 1, 1); + let mut batches = generate_packet_batches(&packet, 1, 1); ed25519_verify(&mut batches); assert!(batches[0].packets[0].meta.discard); } @@ -929,21 +931,21 @@ mod tests { ); } - fn generate_packet_vec( + fn generate_packet_batches( packet: &Packet, num_packets_per_batch: usize, num_batches: usize, - ) -> Vec { + ) -> Vec { // generate packet vector let batches: Vec<_> = (0..num_batches) .map(|_| { - let mut packets = Packets::default(); - packets.packets.resize(0, Packet::default()); + let mut packet_batch = PacketBatch::default(); + packet_batch.packets.resize(0, Packet::default()); for _ in 0..num_packets_per_batch { - packets.packets.push(packet.clone()); + packet_batch.packets.push(packet.clone()); } - assert_eq!(packets.packets.len(), num_packets_per_batch); - packets + assert_eq!(packet_batch.packets.len(), num_packets_per_batch); + packet_batch }) .collect(); assert_eq!(batches.len(), num_batches); @@ -960,7 +962,7 @@ mod tests { packet.data[20] = packet.data[20].wrapping_add(10); } - let mut batches = generate_packet_vec(&packet, n, 2); + let mut batches = generate_packet_batches(&packet, n, 2); // verify packets ed25519_verify(&mut batches); @@ -969,11 +971,11 @@ mod tests { let should_discard = modify_data; assert!(batches .iter() - .flat_map(|p| &p.packets) + .flat_map(|batch| &batch.packets) .all(|p| p.meta.discard == should_discard)); } - fn ed25519_verify(batches: &mut [Packets]) { + fn ed25519_verify(batches: &mut [PacketBatch]) { let recycler = Recycler::default(); let recycler_out = Recycler::default(); sigverify::ed25519_verify(batches, &recycler, &recycler_out, false); @@ -986,13 +988,13 @@ mod tests { tx.signatures.pop(); let packet = sigverify::make_packet_from_transaction(tx); - let mut batches = generate_packet_vec(&packet, 1, 1); + let mut batches = generate_packet_batches(&packet, 1, 1); // verify packets ed25519_verify(&mut batches); assert!(batches .iter() - .flat_map(|p| &p.packets) + .flat_map(|batch| &batch.packets) .all(|p| p.meta.discard)); } @@ -1020,7 +1022,7 @@ mod tests { let n = 4; let num_batches = 3; - let mut batches = generate_packet_vec(&packet, n, num_batches); + let mut batches = generate_packet_batches(&packet, n, num_batches); packet.data[40] = packet.data[40].wrapping_add(8); @@ -1035,7 +1037,7 @@ mod tests { ref_vec[0].push(0u8); assert!(batches .iter() - .flat_map(|p| &p.packets) + .flat_map(|batch| &batch.packets) .zip(ref_vec.into_iter().flatten()) .all(|(p, discard)| { if discard == 0 { @@ -1059,7 +1061,7 @@ mod tests { for _ in 0..50 { let n = thread_rng().gen_range(1, 30); let num_batches = thread_rng().gen_range(2, 30); - let mut batches = generate_packet_vec(&packet, n, num_batches); + let mut batches = generate_packet_batches(&packet, n, num_batches); let num_modifications = thread_rng().gen_range(0, 5); for _ in 0..num_modifications { @@ -1080,8 +1082,8 @@ mod tests { // check result batches .iter() - .flat_map(|p| &p.packets) - .zip(batches_cpu.iter().flat_map(|p| &p.packets)) + .flat_map(|batch| &batch.packets) + .zip(batches_cpu.iter().flat_map(|batch| &batch.packets)) .for_each(|(p1, p2)| assert_eq!(p1, p2)); } } @@ -1233,7 +1235,7 @@ mod tests { solana_logger::setup(); let mut current_offset = 0usize; - let mut batch = Packets::default(); + let mut batch = PacketBatch::default(); batch .packets .push(sigverify::make_packet_from_transaction(test_tx())); diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index bcff3fa614ea05..3285eddb7cf45c 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -25,6 +25,9 @@ use { std::{cell::RefCell, collections::HashMap, fmt::Debug, rc::Rc, sync::Arc}, }; +pub type TransactionAccountRefCell = (Pubkey, Rc>); +pub type TransactionAccountRefCells = Vec; + pub type ProcessInstructionWithContext = fn(usize, &[u8], &mut InvokeContext) -> Result<(), InstructionError>; @@ -138,7 +141,7 @@ pub struct InvokeContext<'a> { invoke_stack: Vec>, rent: Rent, pre_accounts: Vec, - accounts: &'a [(Pubkey, Rc>)], + accounts: &'a [TransactionAccountRefCell], builtin_programs: &'a [BuiltinProgram], pub sysvars: &'a [(Pubkey, Vec)], log_collector: Option>>, @@ -158,7 +161,7 @@ impl<'a> InvokeContext<'a> { #[allow(clippy::too_many_arguments)] pub fn new( rent: Rent, - accounts: &'a [(Pubkey, Rc>)], + accounts: &'a [TransactionAccountRefCell], builtin_programs: &'a [BuiltinProgram], sysvars: &'a [(Pubkey, Vec)], log_collector: Option>>, @@ -190,7 +193,7 @@ impl<'a> InvokeContext<'a> { } pub fn new_mock( - accounts: &'a [(Pubkey, Rc>)], + accounts: &'a [TransactionAccountRefCell], builtin_programs: &'a [BuiltinProgram], ) -> Self { Self::new( @@ -828,7 +831,7 @@ impl<'a> InvokeContext<'a> { } pub struct MockInvokeContextPreparation { - pub accounts: Vec<(Pubkey, Rc>)>, + pub accounts: TransactionAccountRefCells, pub message: Message, pub account_indices: Vec, } @@ -839,10 +842,7 @@ pub fn prepare_mock_invoke_context( keyed_accounts: &[(bool, bool, Pubkey, Rc>)], ) -> MockInvokeContextPreparation { #[allow(clippy::type_complexity)] - let (accounts, mut metas): ( - Vec<(Pubkey, Rc>)>, - Vec, - ) = keyed_accounts + let (accounts, mut metas): (TransactionAccountRefCells, Vec) = keyed_accounts .iter() .map(|(is_signer, is_writable, pubkey, account)| { ( diff --git a/program-runtime/src/log_collector.rs b/program-runtime/src/log_collector.rs index 0618291db2140b..1cb09259c16d44 100644 --- a/program-runtime/src/log_collector.rs +++ b/program-runtime/src/log_collector.rs @@ -3,17 +3,36 @@ use std::{cell::RefCell, rc::Rc}; const LOG_MESSAGES_BYTES_LIMIT: usize = 10 * 1000; -#[derive(Default)] pub struct LogCollector { messages: Vec, bytes_written: usize, + bytes_limit: Option, limit_warning: bool, } +impl Default for LogCollector { + fn default() -> Self { + Self { + messages: Vec::new(), + bytes_written: 0, + bytes_limit: Some(LOG_MESSAGES_BYTES_LIMIT), + limit_warning: false, + } + } +} + impl LogCollector { pub fn log(&mut self, message: &str) { + let limit = match self.bytes_limit { + Some(limit) => limit, + None => { + self.messages.push(message.to_string()); + return; + } + }; + let bytes_written = self.bytes_written.saturating_add(message.len()); - if bytes_written >= LOG_MESSAGES_BYTES_LIMIT { + if bytes_written >= limit { if !self.limit_warning { self.limit_warning = true; self.messages.push(String::from("Log truncated")); @@ -31,6 +50,13 @@ impl LogCollector { pub fn new_ref() -> Rc> { Rc::new(RefCell::new(Self::default())) } + + pub fn new_ref_with_limit(bytes_limit: Option) -> Rc> { + Rc::new(RefCell::new(Self { + bytes_limit, + ..Self::default() + })) + } } impl From for Vec { diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index 24bde080ad01aa..185802cb782633 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -8,12 +8,12 @@ repository = "https://github.com/solana-labs/solana" version = "1.10.0" [dependencies] -async-trait = "0.1.51" +async-trait = "0.1.52" base64 = "0.12.3" bincode = "1.3.3" chrono-humanize = "0.2.1" log = "0.4.14" -serde = "1.0.130" +serde = "1.0.131" solana-banks-client = { path = "../banks-client", version = "=1.10.0" } solana-banks-server = { path = "../banks-server", version = "=1.10.0" } solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.10.0" } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index eebb76b7c0dc79..4bd9e33a87199d 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -41,7 +41,7 @@ use { sysvar::{ clock, epoch_schedule, fees::{self}, - rent, Sysvar, + rent, Sysvar, SysvarId, }, }, solana_vote_program::vote_state::{VoteState, VoteStateVersions}, @@ -1045,6 +1045,18 @@ impl ProgramTestContext { bank.store_account(address, account); } + /// Create or overwrite a sysvar, subverting normal runtime checks. + /// + /// This method exists to make it easier to set up artificial situations + /// that would be difficult to replicate on a new test cluster. Beware + /// that it can be used to create states that would not be reachable + /// under normal conditions! + pub fn set_sysvar(&self, sysvar: &T) { + let bank_forks = self.bank_forks.read().unwrap(); + let bank = bank_forks.working_bank(); + bank.set_sysvar_for_tests(sysvar); + } + /// Force the working bank ahead to a new slot pub fn warp_to_slot(&mut self, warp_slot: Slot) -> Result<(), ProgramTestError> { let mut bank_forks = self.bank_forks.write().unwrap(); diff --git a/programs/address-lookup-table-tests/Cargo.toml b/programs/address-lookup-table-tests/Cargo.toml new file mode 100644 index 00000000000000..c89377eb218857 --- /dev/null +++ b/programs/address-lookup-table-tests/Cargo.toml @@ -0,0 +1,22 @@ +# This package only exists to avoid circular dependencies during cargo publish: +# solana-runtime -> solana-address-program-runtime -> solana-program-test -> solana-runtime + +[package] +name = "solana-address-lookup-table-program-tests" +version = "1.10.0" +authors = ["Solana Maintainers "] +repository = "https://github.com/solana-labs/solana" +license = "Apache-2.0" +homepage = "https://solana.com/" +edition = "2021" +publish = false + +[dev-dependencies] +assert_matches = "1.5.0" +bincode = "1.3.3" +solana-address-lookup-table-program = { path = "../address-lookup-table", version = "=1.10.0" } +solana-program-test = { path = "../../program-test", version = "=1.10.0" } +solana-sdk = { path = "../../sdk", version = "=1.10.0" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/address-lookup-table-tests/tests/close_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/close_lookup_table_ix.rs new file mode 100644 index 00000000000000..0ffc88455e28f2 --- /dev/null +++ b/programs/address-lookup-table-tests/tests/close_lookup_table_ix.rs @@ -0,0 +1,151 @@ +use { + assert_matches::assert_matches, + common::{ + add_lookup_table_account, assert_ix_error, new_address_lookup_table, + overwrite_slot_hashes_with_slots, setup_test_context, + }, + solana_address_lookup_table_program::instruction::close_lookup_table, + solana_program_test::*, + solana_sdk::{ + instruction::InstructionError, + pubkey::Pubkey, + signature::{Keypair, Signer}, + transaction::Transaction, + }, +}; + +mod common; + +#[tokio::test] +async fn test_close_lookup_table() { + let mut context = setup_test_context().await; + overwrite_slot_hashes_with_slots(&mut context, &[]); + + let authority_keypair = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority_keypair.pubkey()), 0); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let client = &mut context.banks_client; + let payer = &context.payer; + let recent_blockhash = context.last_blockhash; + let transaction = Transaction::new_signed_with_payer( + &[close_lookup_table( + lookup_table_address, + authority_keypair.pubkey(), + context.payer.pubkey(), + )], + Some(&payer.pubkey()), + &[payer, &authority_keypair], + recent_blockhash, + ); + + assert_matches!(client.process_transaction(transaction).await, Ok(())); + assert!(client + .get_account(lookup_table_address) + .await + .unwrap() + .is_none()); +} + +#[tokio::test] +async fn test_close_lookup_table_too_recent() { + let mut context = setup_test_context().await; + + let authority_keypair = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority_keypair.pubkey()), 0); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let ix = close_lookup_table( + lookup_table_address, + authority_keypair.pubkey(), + context.payer.pubkey(), + ); + + // Context sets up the slot hashes sysvar to have an entry + // for slot 0 which is what the default initialized table + // has as its derivation slot. Because that slot is present, + // the ix should fail. + assert_ix_error( + &mut context, + ix, + Some(&authority_keypair), + InstructionError::InvalidArgument, + ) + .await; +} + +#[tokio::test] +async fn test_close_immutable_lookup_table() { + let mut context = setup_test_context().await; + + let initialized_table = new_address_lookup_table(None, 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let authority = Keypair::new(); + let ix = close_lookup_table( + lookup_table_address, + authority.pubkey(), + Pubkey::new_unique(), + ); + + assert_ix_error( + &mut context, + ix, + Some(&authority), + InstructionError::Immutable, + ) + .await; +} + +#[tokio::test] +async fn test_close_lookup_table_with_wrong_authority() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let wrong_authority = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority.pubkey()), 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let ix = close_lookup_table( + lookup_table_address, + wrong_authority.pubkey(), + Pubkey::new_unique(), + ); + + assert_ix_error( + &mut context, + ix, + Some(&wrong_authority), + InstructionError::IncorrectAuthority, + ) + .await; +} + +#[tokio::test] +async fn test_close_lookup_table_without_signing() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority.pubkey()), 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let mut ix = close_lookup_table( + lookup_table_address, + authority.pubkey(), + Pubkey::new_unique(), + ); + ix.accounts[1].is_signer = false; + + assert_ix_error( + &mut context, + ix, + None, + InstructionError::MissingRequiredSignature, + ) + .await; +} diff --git a/programs/address-lookup-table-tests/tests/common.rs b/programs/address-lookup-table-tests/tests/common.rs new file mode 100644 index 00000000000000..a29fd6010f6174 --- /dev/null +++ b/programs/address-lookup-table-tests/tests/common.rs @@ -0,0 +1,103 @@ +#![allow(dead_code)] +use { + solana_address_lookup_table_program::{ + id, + processor::process_instruction, + state::{AddressLookupTable, LookupTableMeta}, + }, + solana_program_test::*, + solana_sdk::{ + account::AccountSharedData, + clock::Slot, + hash::Hash, + instruction::Instruction, + instruction::InstructionError, + pubkey::Pubkey, + signature::{Keypair, Signer}, + slot_hashes::SlotHashes, + transaction::{Transaction, TransactionError}, + }, + std::borrow::Cow, +}; + +pub async fn setup_test_context() -> ProgramTestContext { + let program_test = ProgramTest::new("", id(), Some(process_instruction)); + program_test.start_with_context().await +} + +pub async fn assert_ix_error( + context: &mut ProgramTestContext, + ix: Instruction, + authority_keypair: Option<&Keypair>, + expected_err: InstructionError, +) { + let client = &mut context.banks_client; + let payer = &context.payer; + let recent_blockhash = context.last_blockhash; + + let mut signers = vec![payer]; + if let Some(authority) = authority_keypair { + signers.push(authority); + } + + let transaction = Transaction::new_signed_with_payer( + &[ix], + Some(&payer.pubkey()), + &signers, + recent_blockhash, + ); + + assert_eq!( + client + .process_transaction(transaction) + .await + .unwrap_err() + .unwrap(), + TransactionError::InstructionError(0, expected_err), + ); +} + +pub fn new_address_lookup_table( + authority: Option, + num_addresses: usize, +) -> AddressLookupTable<'static> { + let mut addresses = Vec::with_capacity(num_addresses); + addresses.resize_with(num_addresses, Pubkey::new_unique); + AddressLookupTable { + meta: LookupTableMeta { + authority, + ..LookupTableMeta::default() + }, + addresses: Cow::Owned(addresses), + } +} + +pub async fn add_lookup_table_account( + context: &mut ProgramTestContext, + account_address: Pubkey, + address_lookup_table: AddressLookupTable<'static>, +) -> AccountSharedData { + let mut data = Vec::new(); + address_lookup_table.serialize_for_tests(&mut data).unwrap(); + + let rent = context.banks_client.get_rent().await.unwrap(); + let rent_exempt_balance = rent.minimum_balance(data.len()); + + let mut account = AccountSharedData::new( + rent_exempt_balance, + data.len(), + &solana_address_lookup_table_program::id(), + ); + account.set_data(data); + context.set_account(&account_address, &account); + + account +} + +pub fn overwrite_slot_hashes_with_slots(context: &mut ProgramTestContext, slots: &[Slot]) { + let mut slot_hashes = SlotHashes::default(); + for slot in slots { + slot_hashes.add(*slot, Hash::new_unique()); + } + context.set_sysvar(&slot_hashes); +} diff --git a/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs new file mode 100644 index 00000000000000..7f4da6f279dddf --- /dev/null +++ b/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs @@ -0,0 +1,158 @@ +use { + assert_matches::assert_matches, + common::{assert_ix_error, overwrite_slot_hashes_with_slots, setup_test_context}, + solana_address_lookup_table_program::{ + id, + instruction::create_lookup_table, + state::{AddressLookupTable, LOOKUP_TABLE_META_SIZE}, + }, + solana_program_test::*, + solana_sdk::{ + clock::Slot, instruction::InstructionError, pubkey::Pubkey, rent::Rent, signature::Signer, + signer::keypair::Keypair, transaction::Transaction, + }, +}; + +mod common; + +#[tokio::test] +async fn test_create_lookup_table() { + let mut context = setup_test_context().await; + + let test_recent_slot = 123; + overwrite_slot_hashes_with_slots(&mut context, &[test_recent_slot]); + + let client = &mut context.banks_client; + let payer = &context.payer; + let recent_blockhash = context.last_blockhash; + let authority_keypair = Keypair::new(); + let authority_address = authority_keypair.pubkey(); + let (create_lookup_table_ix, lookup_table_address) = + create_lookup_table(authority_address, payer.pubkey(), test_recent_slot); + + // First create should succeed + { + let transaction = Transaction::new_signed_with_payer( + &[create_lookup_table_ix.clone()], + Some(&payer.pubkey()), + &[payer, &authority_keypair], + recent_blockhash, + ); + + assert_matches!(client.process_transaction(transaction).await, Ok(())); + let lookup_table_account = client + .get_account(lookup_table_address) + .await + .unwrap() + .unwrap(); + assert_eq!(lookup_table_account.owner, crate::id()); + assert_eq!(lookup_table_account.data.len(), LOOKUP_TABLE_META_SIZE); + assert_eq!( + lookup_table_account.lamports, + Rent::default().minimum_balance(LOOKUP_TABLE_META_SIZE) + ); + let lookup_table = AddressLookupTable::deserialize(&lookup_table_account.data).unwrap(); + assert_eq!(lookup_table.meta.derivation_slot, test_recent_slot); + assert_eq!(lookup_table.meta.authority, Some(authority_address)); + assert_eq!(lookup_table.meta.last_extended_slot, 0); + assert_eq!(lookup_table.meta.last_extended_slot_start_index, 0); + assert_eq!(lookup_table.addresses.len(), 0); + } + + // Second create should fail + { + context.last_blockhash = client + .get_new_latest_blockhash(&recent_blockhash) + .await + .unwrap(); + assert_ix_error( + &mut context, + create_lookup_table_ix, + Some(&authority_keypair), + InstructionError::AccountAlreadyInitialized, + ) + .await; + } +} + +#[tokio::test] +async fn test_create_lookup_table_use_payer_as_authority() { + let mut context = setup_test_context().await; + + let test_recent_slot = 123; + overwrite_slot_hashes_with_slots(&mut context, &[test_recent_slot]); + + let client = &mut context.banks_client; + let payer = &context.payer; + let recent_blockhash = context.last_blockhash; + let authority_address = payer.pubkey(); + let transaction = Transaction::new_signed_with_payer( + &[create_lookup_table(authority_address, payer.pubkey(), test_recent_slot).0], + Some(&payer.pubkey()), + &[payer], + recent_blockhash, + ); + + assert_matches!(client.process_transaction(transaction).await, Ok(())); +} + +#[tokio::test] +async fn test_create_lookup_table_without_signer() { + let mut context = setup_test_context().await; + let unsigned_authority_address = Pubkey::new_unique(); + + let mut ix = create_lookup_table( + unsigned_authority_address, + context.payer.pubkey(), + Slot::MAX, + ) + .0; + ix.accounts[1].is_signer = false; + + assert_ix_error( + &mut context, + ix, + None, + InstructionError::MissingRequiredSignature, + ) + .await; +} + +#[tokio::test] +async fn test_create_lookup_table_not_recent_slot() { + let mut context = setup_test_context().await; + let payer = &context.payer; + let authority_keypair = Keypair::new(); + let authority_address = authority_keypair.pubkey(); + + let ix = create_lookup_table(authority_address, payer.pubkey(), Slot::MAX).0; + + assert_ix_error( + &mut context, + ix, + Some(&authority_keypair), + InstructionError::InvalidInstructionData, + ) + .await; +} + +#[tokio::test] +async fn test_create_lookup_table_pda_mismatch() { + let mut context = setup_test_context().await; + let test_recent_slot = 123; + overwrite_slot_hashes_with_slots(&mut context, &[test_recent_slot]); + let payer = &context.payer; + let authority_keypair = Keypair::new(); + let authority_address = authority_keypair.pubkey(); + + let mut ix = create_lookup_table(authority_address, payer.pubkey(), test_recent_slot).0; + ix.accounts[0].pubkey = Pubkey::new_unique(); + + assert_ix_error( + &mut context, + ix, + Some(&authority_keypair), + InstructionError::InvalidArgument, + ) + .await; +} diff --git a/programs/address-lookup-table-tests/tests/extend_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/extend_lookup_table_ix.rs new file mode 100644 index 00000000000000..ffed5c619f66a0 --- /dev/null +++ b/programs/address-lookup-table-tests/tests/extend_lookup_table_ix.rs @@ -0,0 +1,214 @@ +use { + assert_matches::assert_matches, + common::{add_lookup_table_account, new_address_lookup_table, setup_test_context}, + solana_address_lookup_table_program::{ + instruction::extend_lookup_table, + state::{AddressLookupTable, LookupTableMeta}, + }, + solana_program_test::*, + solana_sdk::{ + account::ReadableAccount, + instruction::Instruction, + instruction::InstructionError, + pubkey::{Pubkey, PUBKEY_BYTES}, + signature::{Keypair, Signer}, + transaction::{Transaction, TransactionError}, + }, + std::borrow::Cow, + std::result::Result, +}; + +mod common; + +struct ExpectedTableAccount { + lamports: u64, + data_len: usize, + state: AddressLookupTable<'static>, +} + +struct TestCase<'a> { + lookup_table_address: Pubkey, + instruction: Instruction, + extra_signer: Option<&'a Keypair>, + expected_result: Result, +} + +async fn run_test_case(context: &mut ProgramTestContext, test_case: TestCase<'_>) { + let client = &mut context.banks_client; + let payer = &context.payer; + let recent_blockhash = context.last_blockhash; + + let mut signers = vec![payer]; + if let Some(extra_signer) = test_case.extra_signer { + signers.push(extra_signer); + } + + let transaction = Transaction::new_signed_with_payer( + &[test_case.instruction], + Some(&payer.pubkey()), + &signers, + recent_blockhash, + ); + + let process_result = client.process_transaction(transaction).await; + + match test_case.expected_result { + Ok(expected_account) => { + assert_matches!(process_result, Ok(())); + + let table_account = client + .get_account(test_case.lookup_table_address) + .await + .unwrap() + .unwrap(); + + let lookup_table = AddressLookupTable::deserialize(&table_account.data).unwrap(); + assert_eq!(lookup_table, expected_account.state); + assert_eq!(table_account.lamports(), expected_account.lamports); + assert_eq!(table_account.data().len(), expected_account.data_len); + } + Err(expected_err) => { + assert_eq!( + process_result.unwrap_err().unwrap(), + TransactionError::InstructionError(0, expected_err), + ); + } + } +} + +#[tokio::test] +async fn test_extend_lookup_table() { + let mut context = setup_test_context().await; + let authority = Keypair::new(); + let current_bank_slot = 1; + let rent = context.banks_client.get_rent().await.unwrap(); + + for extend_same_slot in [true, false] { + for (num_existing_addresses, num_new_addresses, expected_result) in [ + (0, 0, Err(InstructionError::InvalidInstructionData)), + (0, 1, Ok(())), + (0, 10, Ok(())), + (1, 1, Ok(())), + (1, 10, Ok(())), + (255, 1, Ok(())), + (255, 2, Err(InstructionError::InvalidInstructionData)), + (246, 10, Ok(())), + (256, 1, Err(InstructionError::InvalidArgument)), + ] { + let mut lookup_table = + new_address_lookup_table(Some(authority.pubkey()), num_existing_addresses); + if extend_same_slot { + lookup_table.meta.last_extended_slot = current_bank_slot; + } + + let lookup_table_address = Pubkey::new_unique(); + let lookup_table_account = + add_lookup_table_account(&mut context, lookup_table_address, lookup_table.clone()) + .await; + + let mut new_addresses = Vec::with_capacity(num_new_addresses); + new_addresses.resize_with(num_new_addresses, Pubkey::new_unique); + let instruction = extend_lookup_table( + lookup_table_address, + authority.pubkey(), + context.payer.pubkey(), + new_addresses.clone(), + ); + + let mut expected_addresses: Vec = lookup_table.addresses.to_vec(); + expected_addresses.extend(new_addresses); + + let expected_result = expected_result.map(|_| { + let expected_data_len = + lookup_table_account.data().len() + num_new_addresses * PUBKEY_BYTES; + let expected_lamports = rent.minimum_balance(expected_data_len); + let expected_lookup_table = AddressLookupTable { + meta: LookupTableMeta { + last_extended_slot: current_bank_slot, + last_extended_slot_start_index: if extend_same_slot { + 0u8 + } else { + num_existing_addresses as u8 + }, + derivation_slot: lookup_table.meta.derivation_slot, + authority: lookup_table.meta.authority, + _padding: 0u16, + }, + addresses: Cow::Owned(expected_addresses), + }; + ExpectedTableAccount { + lamports: expected_lamports, + data_len: expected_data_len, + state: expected_lookup_table, + } + }); + + let test_case = TestCase { + lookup_table_address, + instruction, + extra_signer: Some(&authority), + expected_result, + }; + + run_test_case(&mut context, test_case).await; + } + } +} + +#[tokio::test] +async fn test_extend_addresses_authority_errors() { + let mut context = setup_test_context().await; + let authority = Keypair::new(); + + for (existing_authority, ix_authority, use_signer, expected_err) in [ + ( + Some(authority.pubkey()), + Keypair::new(), + true, + InstructionError::IncorrectAuthority, + ), + ( + Some(authority.pubkey()), + authority, + false, + InstructionError::MissingRequiredSignature, + ), + (None, Keypair::new(), true, InstructionError::Immutable), + ] { + let lookup_table = new_address_lookup_table(existing_authority, 0); + let lookup_table_address = Pubkey::new_unique(); + let _ = add_lookup_table_account(&mut context, lookup_table_address, lookup_table.clone()) + .await; + + let num_new_addresses = 1; + let mut new_addresses = Vec::with_capacity(num_new_addresses); + new_addresses.resize_with(num_new_addresses, Pubkey::new_unique); + let mut instruction = extend_lookup_table( + lookup_table_address, + ix_authority.pubkey(), + context.payer.pubkey(), + new_addresses.clone(), + ); + if !use_signer { + instruction.accounts[1].is_signer = false; + } + + let mut expected_addresses: Vec = lookup_table.addresses.to_vec(); + expected_addresses.extend(new_addresses); + + let extra_signer = if use_signer { + Some(&ix_authority) + } else { + None + }; + + let test_case = TestCase { + lookup_table_address, + instruction, + extra_signer, + expected_result: Err(expected_err), + }; + + run_test_case(&mut context, test_case).await; + } +} diff --git a/programs/address-lookup-table-tests/tests/freeze_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/freeze_lookup_table_ix.rs new file mode 100644 index 00000000000000..acb638a39fb4ab --- /dev/null +++ b/programs/address-lookup-table-tests/tests/freeze_lookup_table_ix.rs @@ -0,0 +1,141 @@ +use { + assert_matches::assert_matches, + common::{ + add_lookup_table_account, assert_ix_error, new_address_lookup_table, setup_test_context, + }, + solana_address_lookup_table_program::{ + instruction::freeze_lookup_table, state::AddressLookupTable, + }, + solana_program_test::*, + solana_sdk::{ + instruction::InstructionError, + pubkey::Pubkey, + signature::{Keypair, Signer}, + transaction::Transaction, + }, +}; + +mod common; + +#[tokio::test] +async fn test_freeze_lookup_table() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let mut initialized_table = new_address_lookup_table(Some(authority.pubkey()), 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account( + &mut context, + lookup_table_address, + initialized_table.clone(), + ) + .await; + + let client = &mut context.banks_client; + let payer = &context.payer; + let recent_blockhash = context.last_blockhash; + let transaction = Transaction::new_signed_with_payer( + &[freeze_lookup_table( + lookup_table_address, + authority.pubkey(), + )], + Some(&payer.pubkey()), + &[payer, &authority], + recent_blockhash, + ); + + assert_matches!(client.process_transaction(transaction).await, Ok(())); + let table_account = client + .get_account(lookup_table_address) + .await + .unwrap() + .unwrap(); + let lookup_table = AddressLookupTable::deserialize(&table_account.data).unwrap(); + assert_eq!(lookup_table.meta.authority, None); + + // Check that only the authority changed + initialized_table.meta.authority = None; + assert_eq!(initialized_table, lookup_table); +} + +#[tokio::test] +async fn test_freeze_immutable_lookup_table() { + let mut context = setup_test_context().await; + + let initialized_table = new_address_lookup_table(None, 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let authority = Keypair::new(); + let ix = freeze_lookup_table(lookup_table_address, authority.pubkey()); + + assert_ix_error( + &mut context, + ix, + Some(&authority), + InstructionError::Immutable, + ) + .await; +} + +#[tokio::test] +async fn test_freeze_lookup_table_with_wrong_authority() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let wrong_authority = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority.pubkey()), 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let ix = freeze_lookup_table(lookup_table_address, wrong_authority.pubkey()); + + assert_ix_error( + &mut context, + ix, + Some(&wrong_authority), + InstructionError::IncorrectAuthority, + ) + .await; +} + +#[tokio::test] +async fn test_freeze_lookup_table_without_signing() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority.pubkey()), 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let mut ix = freeze_lookup_table(lookup_table_address, authority.pubkey()); + ix.accounts[1].is_signer = false; + + assert_ix_error( + &mut context, + ix, + None, + InstructionError::MissingRequiredSignature, + ) + .await; +} + +#[tokio::test] +async fn test_freeze_empty_lookup_table() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority.pubkey()), 0); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let ix = freeze_lookup_table(lookup_table_address, authority.pubkey()); + + assert_ix_error( + &mut context, + ix, + Some(&authority), + InstructionError::InvalidInstructionData, + ) + .await; +} diff --git a/programs/address-lookup-table/Cargo.toml b/programs/address-lookup-table/Cargo.toml new file mode 100644 index 00000000000000..8062cb31b874f0 --- /dev/null +++ b/programs/address-lookup-table/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "solana-address-lookup-table-program" +version = "1.10.0" +description = "Solana address lookup table program" +authors = ["Solana Maintainers "] +repository = "https://github.com/solana-labs/solana" +license = "Apache-2.0" +homepage = "https://solana.com/" +documentation = "https://docs.rs/solana-address-loookup-table-program" +edition = "2021" + +[dependencies] +bincode = "1.3.3" +bytemuck = "1.7.2" +log = "0.4.14" +num-derive = "0.3" +num-traits = "0.2" +serde = { version = "1.0.127", features = ["derive"] } +solana-frozen-abi = { path = "../../frozen-abi", version = "=1.10.0" } +solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.10.0" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.10.0" } +solana-sdk = { path = "../../sdk", version = "=1.10.0" } +thiserror = "1.0" + +[build-dependencies] +rustc_version = "0.4" + +[lib] +crate-type = ["lib"] +name = "solana_address_lookup_table_program" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/address-lookup-table/build.rs b/programs/address-lookup-table/build.rs new file mode 120000 index 00000000000000..84539eddaa6ded --- /dev/null +++ b/programs/address-lookup-table/build.rs @@ -0,0 +1 @@ +../../frozen-abi/build.rs \ No newline at end of file diff --git a/programs/address-lookup-table/src/instruction.rs b/programs/address-lookup-table/src/instruction.rs new file mode 100644 index 00000000000000..6ba3dfe808bd46 --- /dev/null +++ b/programs/address-lookup-table/src/instruction.rs @@ -0,0 +1,147 @@ +use { + crate::id, + serde::{Deserialize, Serialize}, + solana_sdk::{ + clock::Slot, + instruction::{AccountMeta, Instruction}, + pubkey::Pubkey, + system_program, + }, +}; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] +pub enum ProgramInstruction { + /// Create an address lookup table + /// + /// # Account references + /// 0. `[WRITE]` Uninitialized address lookup table account + /// 1. `[SIGNER]` Account used to derive and control the new address lookup table. + /// 2. `[SIGNER, WRITE]` Account that will fund the new address lookup table. + /// 3. `[]` System program for CPI. + CreateLookupTable { + /// A recent slot must be used in the derivation path + /// for each initialized table. When closing table accounts, + /// the initialization slot must no longer be "recent" to prevent + /// address tables from being recreated with reordered or + /// otherwise malicious addresses. + recent_slot: Slot, + /// Address tables are always initialized at program-derived + /// addresses using the funding address, recent blockhash, and + /// the user-passed `bump_seed`. + bump_seed: u8, + }, + + /// Permanently freeze a address lookup table, making it immutable. + /// + /// # Account references + /// 0. `[WRITE]` Address lookup table account to freeze + /// 1. `[SIGNER]` Current authority + FreezeLookupTable, + + /// Extend an address lookup table with new addresses + /// + /// # Account references + /// 0. `[WRITE]` Address lookup table account to extend + /// 1. `[SIGNER]` Current authority + /// 2. `[SIGNER, WRITE]` Account that will fund the table reallocation + /// 3. `[]` System program for CPI. + ExtendLookupTable { new_addresses: Vec }, + + /// Close an address lookup table account + /// + /// # Account references + /// 0. `[WRITE]` Address lookup table account to close + /// 1. `[SIGNER]` Current authority + /// 2. `[WRITE]` Recipient of closed account lamports + CloseLookupTable, +} + +/// Derives the address of an address table account from a wallet address and a recent block's slot. +pub fn derive_lookup_table_address( + authority_address: &Pubkey, + recent_block_slot: Slot, +) -> (Pubkey, u8) { + Pubkey::find_program_address( + &[authority_address.as_ref(), &recent_block_slot.to_le_bytes()], + &id(), + ) +} + +/// Constructs an instruction to create a table account and returns +/// the instruction and the table account's derived address. +pub fn create_lookup_table( + authority_address: Pubkey, + payer_address: Pubkey, + recent_slot: Slot, +) -> (Instruction, Pubkey) { + let (lookup_table_address, bump_seed) = + derive_lookup_table_address(&authority_address, recent_slot); + let instruction = Instruction::new_with_bincode( + id(), + &ProgramInstruction::CreateLookupTable { + recent_slot, + bump_seed, + }, + vec![ + AccountMeta::new(lookup_table_address, false), + AccountMeta::new_readonly(authority_address, true), + AccountMeta::new(payer_address, true), + AccountMeta::new_readonly(system_program::id(), false), + ], + ); + + (instruction, lookup_table_address) +} + +/// Constructs an instruction that freezes an address lookup +/// table so that it can never be closed or extended again. Empty +/// lookup tables cannot be frozen. +pub fn freeze_lookup_table(lookup_table_address: Pubkey, authority_address: Pubkey) -> Instruction { + Instruction::new_with_bincode( + id(), + &ProgramInstruction::FreezeLookupTable, + vec![ + AccountMeta::new(lookup_table_address, false), + AccountMeta::new_readonly(authority_address, true), + ], + ) +} + +/// Constructs an instruction which extends an address lookup +/// table account with new addresses. +pub fn extend_lookup_table( + lookup_table_address: Pubkey, + authority_address: Pubkey, + payer_address: Pubkey, + new_addresses: Vec, +) -> Instruction { + Instruction::new_with_bincode( + id(), + &ProgramInstruction::ExtendLookupTable { new_addresses }, + vec![ + AccountMeta::new(lookup_table_address, false), + AccountMeta::new_readonly(authority_address, true), + AccountMeta::new(payer_address, true), + AccountMeta::new_readonly(system_program::id(), false), + ], + ) +} + +/// Returns an instruction that closes an address lookup table +/// account. The account will be deallocated and the lamports +/// will be drained to the recipient address. +pub fn close_lookup_table( + lookup_table_address: Pubkey, + authority_address: Pubkey, + recipient_address: Pubkey, +) -> Instruction { + Instruction::new_with_bincode( + id(), + &ProgramInstruction::CloseLookupTable, + vec![ + AccountMeta::new(lookup_table_address, false), + AccountMeta::new_readonly(authority_address, true), + AccountMeta::new(recipient_address, false), + ], + ) +} diff --git a/programs/address-lookup-table/src/lib.rs b/programs/address-lookup-table/src/lib.rs new file mode 100644 index 00000000000000..11433e64cabd0c --- /dev/null +++ b/programs/address-lookup-table/src/lib.rs @@ -0,0 +1,11 @@ +#![allow(incomplete_features)] +#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] +#![cfg_attr(RUSTC_NEEDS_PROC_MACRO_HYGIENE, feature(proc_macro_hygiene))] + +use solana_sdk::declare_id; + +pub mod instruction; +pub mod processor; +pub mod state; + +declare_id!("AddressLookupTab1e1111111111111111111111111"); diff --git a/programs/address-lookup-table/src/processor.rs b/programs/address-lookup-table/src/processor.rs new file mode 100644 index 00000000000000..ce2b9f161af8ae --- /dev/null +++ b/programs/address-lookup-table/src/processor.rs @@ -0,0 +1,388 @@ +use { + crate::{ + instruction::ProgramInstruction, + state::{ + AddressLookupTable, LookupTableMeta, ProgramState, LOOKUP_TABLE_MAX_ADDRESSES, + LOOKUP_TABLE_META_SIZE, + }, + }, + solana_program_runtime::{ic_msg, invoke_context::InvokeContext}, + solana_sdk::{ + account::{ReadableAccount, WritableAccount}, + account_utils::State, + clock::Slot, + instruction::InstructionError, + keyed_account::keyed_account_at_index, + program_utils::limited_deserialize, + pubkey::{Pubkey, PUBKEY_BYTES}, + slot_hashes::{SlotHashes, MAX_ENTRIES}, + system_instruction, + sysvar::{ + clock::{self, Clock}, + rent::{self, Rent}, + slot_hashes, + }, + }, + std::convert::TryFrom, +}; + +pub fn process_instruction( + first_instruction_account: usize, + instruction_data: &[u8], + invoke_context: &mut InvokeContext, +) -> Result<(), InstructionError> { + match limited_deserialize(instruction_data)? { + ProgramInstruction::CreateLookupTable { + recent_slot, + bump_seed, + } => Processor::create_lookup_table( + invoke_context, + first_instruction_account, + recent_slot, + bump_seed, + ), + ProgramInstruction::FreezeLookupTable => { + Processor::freeze_lookup_table(invoke_context, first_instruction_account) + } + ProgramInstruction::ExtendLookupTable { new_addresses } => { + Processor::extend_lookup_table(invoke_context, first_instruction_account, new_addresses) + } + ProgramInstruction::CloseLookupTable => { + Processor::close_lookup_table(invoke_context, first_instruction_account) + } + } +} + +fn checked_add(a: usize, b: usize) -> Result { + a.checked_add(b).ok_or(InstructionError::ArithmeticOverflow) +} + +pub struct Processor; +impl Processor { + fn create_lookup_table( + invoke_context: &mut InvokeContext, + first_instruction_account: usize, + untrusted_recent_slot: Slot, + bump_seed: u8, + ) -> Result<(), InstructionError> { + let keyed_accounts = invoke_context.get_keyed_accounts()?; + + let lookup_table_account = + keyed_account_at_index(keyed_accounts, first_instruction_account)?; + if lookup_table_account.data_len()? > 0 { + ic_msg!(invoke_context, "Table account must not be allocated"); + return Err(InstructionError::AccountAlreadyInitialized); + } + + let authority_account = + keyed_account_at_index(keyed_accounts, checked_add(first_instruction_account, 1)?)?; + let authority_key = *authority_account.signer_key().ok_or_else(|| { + ic_msg!(invoke_context, "Authority account must be a signer"); + InstructionError::MissingRequiredSignature + })?; + + let payer_account = + keyed_account_at_index(keyed_accounts, checked_add(first_instruction_account, 2)?)?; + let payer_key = *payer_account.signer_key().ok_or_else(|| { + ic_msg!(invoke_context, "Payer account must be a signer"); + InstructionError::MissingRequiredSignature + })?; + + let derivation_slot = { + let slot_hashes: SlotHashes = invoke_context.get_sysvar(&slot_hashes::id())?; + if slot_hashes.get(&untrusted_recent_slot).is_some() { + Ok(untrusted_recent_slot) + } else { + ic_msg!( + invoke_context, + "{} is not a recent slot", + untrusted_recent_slot + ); + Err(InstructionError::InvalidInstructionData) + } + }?; + + // Use a derived address to ensure that an address table can never be + // initialized more than once at the same address. + let derived_table_key = Pubkey::create_program_address( + &[ + authority_key.as_ref(), + &derivation_slot.to_le_bytes(), + &[bump_seed], + ], + &crate::id(), + )?; + + let table_key = *lookup_table_account.unsigned_key(); + if table_key != derived_table_key { + ic_msg!( + invoke_context, + "Table address must match derived address: {}", + derived_table_key + ); + return Err(InstructionError::InvalidArgument); + } + + let table_account_data_len = LOOKUP_TABLE_META_SIZE; + let rent: Rent = invoke_context.get_sysvar(&rent::id())?; + let required_lamports = rent + .minimum_balance(table_account_data_len) + .max(1) + .saturating_sub(lookup_table_account.lamports()?); + + if required_lamports > 0 { + invoke_context.native_invoke( + system_instruction::transfer(&payer_key, &table_key, required_lamports), + &[payer_key], + )?; + } + + invoke_context.native_invoke( + system_instruction::allocate(&table_key, table_account_data_len as u64), + &[table_key], + )?; + + invoke_context.native_invoke( + system_instruction::assign(&table_key, &crate::id()), + &[table_key], + )?; + + let keyed_accounts = invoke_context.get_keyed_accounts()?; + let lookup_table_account = + keyed_account_at_index(keyed_accounts, first_instruction_account)?; + lookup_table_account.set_state(&ProgramState::LookupTable(LookupTableMeta::new( + authority_key, + derivation_slot, + )))?; + + Ok(()) + } + + fn freeze_lookup_table( + invoke_context: &mut InvokeContext, + first_instruction_account: usize, + ) -> Result<(), InstructionError> { + let keyed_accounts = invoke_context.get_keyed_accounts()?; + + let lookup_table_account = + keyed_account_at_index(keyed_accounts, first_instruction_account)?; + if lookup_table_account.owner()? != crate::id() { + return Err(InstructionError::InvalidAccountOwner); + } + + let authority_account = + keyed_account_at_index(keyed_accounts, checked_add(first_instruction_account, 1)?)?; + if authority_account.signer_key().is_none() { + return Err(InstructionError::MissingRequiredSignature); + } + + let lookup_table_account_ref = lookup_table_account.try_account_ref()?; + let lookup_table_data = lookup_table_account_ref.data(); + let lookup_table = AddressLookupTable::deserialize(lookup_table_data)?; + + if lookup_table.meta.authority.is_none() { + ic_msg!(invoke_context, "Lookup table is already frozen"); + return Err(InstructionError::Immutable); + } + if lookup_table.meta.authority != Some(*authority_account.unsigned_key()) { + return Err(InstructionError::IncorrectAuthority); + } + if lookup_table.addresses.is_empty() { + ic_msg!(invoke_context, "Empty lookup tables cannot be frozen"); + return Err(InstructionError::InvalidInstructionData); + } + + let mut lookup_table_meta = lookup_table.meta; + drop(lookup_table_account_ref); + + lookup_table_meta.authority = None; + AddressLookupTable::overwrite_meta_data( + lookup_table_account + .try_account_ref_mut()? + .data_as_mut_slice(), + lookup_table_meta, + )?; + + Ok(()) + } + + fn extend_lookup_table( + invoke_context: &mut InvokeContext, + first_instruction_account: usize, + new_addresses: Vec, + ) -> Result<(), InstructionError> { + let keyed_accounts = invoke_context.get_keyed_accounts()?; + + let lookup_table_account = + keyed_account_at_index(keyed_accounts, first_instruction_account)?; + if lookup_table_account.owner()? != crate::id() { + return Err(InstructionError::InvalidAccountOwner); + } + + let authority_account = + keyed_account_at_index(keyed_accounts, checked_add(first_instruction_account, 1)?)?; + if authority_account.signer_key().is_none() { + return Err(InstructionError::MissingRequiredSignature); + } + + let payer_account = + keyed_account_at_index(keyed_accounts, checked_add(first_instruction_account, 2)?)?; + let payer_key = if let Some(payer_key) = payer_account.signer_key() { + *payer_key + } else { + ic_msg!(invoke_context, "Payer account must be a signer"); + return Err(InstructionError::MissingRequiredSignature); + }; + + let lookup_table_account_ref = lookup_table_account.try_account_ref()?; + let lookup_table_data = lookup_table_account_ref.data(); + let mut lookup_table = AddressLookupTable::deserialize(lookup_table_data)?; + + if lookup_table.meta.authority.is_none() { + return Err(InstructionError::Immutable); + } + if lookup_table.meta.authority != Some(*authority_account.unsigned_key()) { + return Err(InstructionError::IncorrectAuthority); + } + if lookup_table.addresses.len() >= LOOKUP_TABLE_MAX_ADDRESSES { + ic_msg!( + invoke_context, + "Lookup table is full and cannot contain more addresses" + ); + return Err(InstructionError::InvalidArgument); + } + + if new_addresses.is_empty() { + ic_msg!(invoke_context, "Must extend with at least one address"); + return Err(InstructionError::InvalidInstructionData); + } + + let new_table_addresses_len = lookup_table + .addresses + .len() + .saturating_add(new_addresses.len()); + if new_table_addresses_len > LOOKUP_TABLE_MAX_ADDRESSES { + ic_msg!( + invoke_context, + "Extended lookup table length {} would exceed max capacity of {}", + new_table_addresses_len, + LOOKUP_TABLE_MAX_ADDRESSES + ); + return Err(InstructionError::InvalidInstructionData); + } + + let clock: Clock = invoke_context.get_sysvar(&clock::id())?; + if clock.slot != lookup_table.meta.last_extended_slot { + lookup_table.meta.last_extended_slot = clock.slot; + lookup_table.meta.last_extended_slot_start_index = + u8::try_from(lookup_table.addresses.len()).map_err(|_| { + // This is impossible as long as the length of new_addresses + // is non-zero and LOOKUP_TABLE_MAX_ADDRESSES == u8::MAX + 1. + InstructionError::InvalidAccountData + })?; + } + + let lookup_table_meta = lookup_table.meta; + drop(lookup_table_account_ref); + + let new_table_data_len = checked_add( + LOOKUP_TABLE_META_SIZE, + new_table_addresses_len.saturating_mul(PUBKEY_BYTES), + )?; + + { + let mut lookup_table_account_ref_mut = lookup_table_account.try_account_ref_mut()?; + AddressLookupTable::overwrite_meta_data( + lookup_table_account_ref_mut.data_as_mut_slice(), + lookup_table_meta, + )?; + + let table_data = lookup_table_account_ref_mut.data_mut(); + for new_address in new_addresses { + table_data.extend_from_slice(new_address.as_ref()); + } + } + + let rent: Rent = invoke_context.get_sysvar(&rent::id())?; + let required_lamports = rent + .minimum_balance(new_table_data_len) + .max(1) + .saturating_sub(lookup_table_account.lamports()?); + + let table_key = *lookup_table_account.unsigned_key(); + if required_lamports > 0 { + invoke_context.native_invoke( + system_instruction::transfer(&payer_key, &table_key, required_lamports), + &[payer_key], + )?; + } + + Ok(()) + } + + fn close_lookup_table( + invoke_context: &mut InvokeContext, + first_instruction_account: usize, + ) -> Result<(), InstructionError> { + let keyed_accounts = invoke_context.get_keyed_accounts()?; + + let lookup_table_account = + keyed_account_at_index(keyed_accounts, first_instruction_account)?; + if lookup_table_account.owner()? != crate::id() { + return Err(InstructionError::InvalidAccountOwner); + } + + let authority_account = + keyed_account_at_index(keyed_accounts, checked_add(first_instruction_account, 1)?)?; + if authority_account.signer_key().is_none() { + return Err(InstructionError::MissingRequiredSignature); + } + + let recipient_account = + keyed_account_at_index(keyed_accounts, checked_add(first_instruction_account, 2)?)?; + if recipient_account.unsigned_key() == lookup_table_account.unsigned_key() { + ic_msg!( + invoke_context, + "Lookup table cannot be the recipient of reclaimed lamports" + ); + return Err(InstructionError::InvalidArgument); + } + + let lookup_table_account_ref = lookup_table_account.try_account_ref()?; + let lookup_table_data = lookup_table_account_ref.data(); + let lookup_table = AddressLookupTable::deserialize(lookup_table_data)?; + + if lookup_table.meta.authority.is_none() { + return Err(InstructionError::Immutable); + } + if lookup_table.meta.authority != Some(*authority_account.unsigned_key()) { + return Err(InstructionError::IncorrectAuthority); + } + + // Assert that the slot used in the derivation path of the lookup table address + // is no longer recent and can't be reused to initialize an account at the same address. + let slot_hashes: SlotHashes = invoke_context.get_sysvar(&slot_hashes::id())?; + if let Some(position) = slot_hashes.position(&lookup_table.meta.derivation_slot) { + let expiration = MAX_ENTRIES.saturating_sub(position); + ic_msg!( + invoke_context, + "Table cannot be closed until its derivation slot expires in {} blocks", + expiration + ); + return Err(InstructionError::InvalidArgument); + } + + drop(lookup_table_account_ref); + + let withdrawn_lamports = lookup_table_account.lamports()?; + recipient_account + .try_account_ref_mut()? + .checked_add_lamports(withdrawn_lamports)?; + + let mut lookup_table_account = lookup_table_account.try_account_ref_mut()?; + lookup_table_account.set_data(Vec::new()); + lookup_table_account.set_lamports(0); + + Ok(()) + } +} diff --git a/programs/address-lookup-table/src/state.rs b/programs/address-lookup-table/src/state.rs new file mode 100644 index 00000000000000..906487962bebed --- /dev/null +++ b/programs/address-lookup-table/src/state.rs @@ -0,0 +1,198 @@ +use { + serde::{Deserialize, Serialize}, + solana_frozen_abi_macro::{AbiEnumVisitor, AbiExample}, + solana_sdk::{clock::Slot, instruction::InstructionError, pubkey::Pubkey}, + std::borrow::Cow, +}; + +/// The maximum number of addresses that a lookup table can hold +pub const LOOKUP_TABLE_MAX_ADDRESSES: usize = 256; + +/// The serialized size of lookup table metadata +pub const LOOKUP_TABLE_META_SIZE: usize = 56; + +/// Program account states +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, AbiExample, AbiEnumVisitor)] +#[allow(clippy::large_enum_variant)] +pub enum ProgramState { + /// Account is not initialized. + Uninitialized, + /// Initialized `LookupTable` account. + LookupTable(LookupTableMeta), +} + +/// Address lookup table metadata +#[derive(Debug, Default, Serialize, Deserialize, PartialEq, Clone, AbiExample)] +pub struct LookupTableMeta { + /// The slot used to derive the table's address. The table cannot + /// be closed until the derivation slot is no longer "recent" + /// (not accessible in the `SlotHashes` sysvar). + pub derivation_slot: Slot, + /// The slot that the table was last extended. Address tables may + /// only be used to lookup addresses that were extended before + /// the current bank's slot. + pub last_extended_slot: Slot, + /// The start index where the table was last extended from during + /// the `last_extended_slot`. + pub last_extended_slot_start_index: u8, + /// Authority address which must sign for each modification. + pub authority: Option, + // Padding to keep addresses 8-byte aligned + pub _padding: u16, + // Raw list of addresses follows this serialized structure in + // the account's data, starting from `LOOKUP_TABLE_META_SIZE`. +} + +impl LookupTableMeta { + pub fn new(authority: Pubkey, derivation_slot: Slot) -> Self { + LookupTableMeta { + derivation_slot, + authority: Some(authority), + ..LookupTableMeta::default() + } + } +} + +#[derive(Debug, PartialEq, Clone, AbiExample)] +pub struct AddressLookupTable<'a> { + pub meta: LookupTableMeta, + pub addresses: Cow<'a, [Pubkey]>, +} + +impl<'a> AddressLookupTable<'a> { + /// Serialize an address table's updated meta data and zero + /// any leftover bytes. + pub fn overwrite_meta_data( + data: &mut [u8], + lookup_table_meta: LookupTableMeta, + ) -> Result<(), InstructionError> { + let meta_data = data + .get_mut(0..LOOKUP_TABLE_META_SIZE) + .ok_or(InstructionError::InvalidAccountData)?; + meta_data.fill(0); + bincode::serialize_into(meta_data, &ProgramState::LookupTable(lookup_table_meta)) + .map_err(|_| InstructionError::GenericError)?; + Ok(()) + } + + /// Serialize an address table including its addresses + pub fn serialize_for_tests(self, data: &mut Vec) -> Result<(), InstructionError> { + data.resize(LOOKUP_TABLE_META_SIZE, 0); + Self::overwrite_meta_data(data, self.meta)?; + self.addresses.iter().for_each(|address| { + data.extend_from_slice(address.as_ref()); + }); + Ok(()) + } + + /// Efficiently deserialize an address table without allocating + /// for stored addresses. + pub fn deserialize(data: &'a [u8]) -> Result, InstructionError> { + let program_state: ProgramState = + bincode::deserialize(data).map_err(|_| InstructionError::InvalidAccountData)?; + + let meta = match program_state { + ProgramState::LookupTable(meta) => Ok(meta), + ProgramState::Uninitialized => Err(InstructionError::UninitializedAccount), + }?; + + let raw_addresses_data = data.get(LOOKUP_TABLE_META_SIZE..).ok_or({ + // Should be impossible because table accounts must + // always be LOOKUP_TABLE_META_SIZE in length + InstructionError::InvalidAccountData + })?; + let addresses: &[Pubkey] = bytemuck::try_cast_slice(raw_addresses_data).map_err(|_| { + // Should be impossible because raw address data + // should be aligned and sized in multiples of 32 bytes + InstructionError::InvalidAccountData + })?; + + Ok(Self { + meta, + addresses: Cow::Borrowed(addresses), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + impl AddressLookupTable<'_> { + fn new_for_tests(meta: LookupTableMeta, num_addresses: usize) -> Self { + let mut addresses = Vec::with_capacity(num_addresses); + addresses.resize_with(num_addresses, Pubkey::new_unique); + AddressLookupTable { + meta, + addresses: Cow::Owned(addresses), + } + } + } + + impl LookupTableMeta { + fn new_for_tests() -> Self { + Self { + authority: Some(Pubkey::new_unique()), + ..LookupTableMeta::default() + } + } + } + + #[test] + fn test_lookup_table_meta_size() { + let lookup_table = ProgramState::LookupTable(LookupTableMeta::new_for_tests()); + let meta_size = bincode::serialized_size(&lookup_table).unwrap(); + assert!(meta_size as usize <= LOOKUP_TABLE_META_SIZE); + assert_eq!(meta_size as usize, 56); + + let lookup_table = ProgramState::LookupTable(LookupTableMeta::default()); + let meta_size = bincode::serialized_size(&lookup_table).unwrap(); + assert!(meta_size as usize <= LOOKUP_TABLE_META_SIZE); + assert_eq!(meta_size as usize, 24); + } + + #[test] + fn test_overwrite_meta_data() { + let meta = LookupTableMeta::new_for_tests(); + let empty_table = ProgramState::LookupTable(meta.clone()); + let mut serialized_table_1 = bincode::serialize(&empty_table).unwrap(); + serialized_table_1.resize(LOOKUP_TABLE_META_SIZE, 0); + + let address_table = AddressLookupTable::new_for_tests(meta, 0); + let mut serialized_table_2 = Vec::new(); + serialized_table_2.resize(LOOKUP_TABLE_META_SIZE, 0); + AddressLookupTable::overwrite_meta_data(&mut serialized_table_2, address_table.meta) + .unwrap(); + + assert_eq!(serialized_table_1, serialized_table_2); + } + + #[test] + fn test_deserialize() { + assert_eq!( + AddressLookupTable::deserialize(&[]).err(), + Some(InstructionError::InvalidAccountData), + ); + + assert_eq!( + AddressLookupTable::deserialize(&[0u8; LOOKUP_TABLE_META_SIZE]).err(), + Some(InstructionError::UninitializedAccount), + ); + + fn test_case(num_addresses: usize) { + let lookup_table_meta = LookupTableMeta::new_for_tests(); + let address_table = AddressLookupTable::new_for_tests(lookup_table_meta, num_addresses); + let mut address_table_data = Vec::new(); + AddressLookupTable::serialize_for_tests(address_table.clone(), &mut address_table_data) + .unwrap(); + assert_eq!( + AddressLookupTable::deserialize(&address_table_data).unwrap(), + address_table, + ); + } + + for case in [0, 1, 10, 255, 256] { + test_case(case); + } + } +} diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index c63a059d0c035a..793c726eaeefd4 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -95,9 +95,9 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-trait" -version = "0.1.51" +version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" +checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.6", @@ -458,6 +458,26 @@ dependencies = [ "winapi", ] +[[package]] +name = "console_error_panic_hook" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" +dependencies = [ + "cfg-if 1.0.0", + "wasm-bindgen", +] + +[[package]] +name = "console_log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "501a375961cef1a0d44767200e66e4a559283097e91d0730b1d75dfb2f8a1494" +dependencies = [ + "log", + "web-sys", +] + [[package]] name = "constant_time_eq" version = "0.1.5" @@ -618,12 +638,13 @@ dependencies = [ [[package]] name = "dashmap" -version = "4.0.2" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c" +checksum = "b799062aaf67eb976af3bdca031ee6f846d2f0a5710ddbb0d2efee33f3cc4760" dependencies = [ "cfg-if 1.0.0", "num_cpus", + "parking_lot", "rayon", ] @@ -1419,9 +1440,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.49" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc15e39392125075f60c95ba416f5381ff6c3a948ff02ab12464715adf56c821" +checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" dependencies = [ "wasm-bindgen", ] @@ -2363,9 +2384,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.130" +version = "1.0.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" +checksum = "b4ad69dfbd3e45369132cc64e6748c2d65cdfb001a2b1c232d128b4ad60561c1" dependencies = [ "serde_derive", ] @@ -2381,9 +2402,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.130" +version = "1.0.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" +checksum = "b710a83c4e0dff6a3d511946b95274ad9ca9e5d3ae497b63fda866ac955358d2" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.6", @@ -2474,6 +2495,16 @@ dependencies = [ "opaque-debug", ] +[[package]] +name = "sha3" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31f935e31cf406e8c0e96c2815a5516181b7004ae8c5f296293221e9b1e356bd" +dependencies = [ + "digest 0.10.0", + "keccak", +] + [[package]] name = "sharded-slab" version = "0.1.1" @@ -2541,6 +2572,24 @@ dependencies = [ "zstd", ] +[[package]] +name = "solana-address-lookup-table-program" +version = "1.10.0" +dependencies = [ + "bincode", + "bytemuck", + "log", + "num-derive", + "num-traits", + "rustc_version 0.4.0", + "serde", + "solana-frozen-abi 1.10.0", + "solana-frozen-abi-macro 1.10.0", + "solana-program-runtime", + "solana-sdk", + "thiserror", +] + [[package]] name = "solana-banks-client" version = "1.10.0" @@ -3239,7 +3288,7 @@ dependencies = [ "serde_bytes", "serde_derive", "sha2 0.9.8", - "sha3", + "sha3 0.9.1", "solana-frozen-abi 1.7.6", "solana-frozen-abi-macro 1.7.6", "solana-logger 1.7.6", @@ -3260,8 +3309,12 @@ dependencies = [ "bs58 0.4.0", "bv", "bytemuck", + "console_error_panic_hook", + "console_log", "curve25519-dalek 3.2.0", + "getrandom 0.1.14", "itertools 0.10.3", + "js-sys", "lazy_static", "libsecp256k1 0.6.0", "log", @@ -3275,12 +3328,13 @@ dependencies = [ "serde_bytes", "serde_derive", "sha2 0.10.0", - "sha3", + "sha3 0.10.0", "solana-frozen-abi 1.10.0", "solana-frozen-abi-macro 1.10.0", "solana-logger 1.10.0", "solana-sdk-macro 1.10.0", "thiserror", + "wasm-bindgen", ] [[package]] @@ -3370,6 +3424,8 @@ dependencies = [ "lazy_static", "log", "memmap2 0.5.0", + "num-derive", + "num-traits", "num_cpus", "ouroboros", "rand 0.7.3", @@ -3378,6 +3434,7 @@ dependencies = [ "rustc_version 0.4.0", "serde", "serde_derive", + "solana-address-lookup-table-program", "solana-bucket-map", "solana-compute-budget-program", "solana-config-program", @@ -3411,12 +3468,13 @@ dependencies = [ "byteorder 1.4.3", "chrono", "derivation-path", - "digest 0.9.0", + "digest 0.10.0", "ed25519-dalek", "ed25519-dalek-bip32", "generic-array 0.14.4", "hmac 0.12.0", "itertools 0.10.3", + "js-sys", "lazy_static", "libsecp256k1 0.6.0", "log", @@ -3434,7 +3492,7 @@ dependencies = [ "serde_derive", "serde_json", "sha2 0.10.0", - "sha3", + "sha3 0.10.0", "solana-frozen-abi 1.10.0", "solana-frozen-abi-macro 1.10.0", "solana-logger 1.10.0", @@ -3442,6 +3500,7 @@ dependencies = [ "solana-sdk-macro 1.10.0", "thiserror", "uriparse", + "wasm-bindgen", ] [[package]] @@ -3561,9 +3620,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcd409d0fba8427ef41b5c1ef79dcabb592f1cff144b77e07094b66c010c2d52" +checksum = "5c7a237a92714db63de655e20af29a3b59c007881f2dfbdc2d3838ca3675f45f" dependencies = [ "byteorder 1.4.3", "combine", @@ -4186,9 +4245,9 @@ checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" [[package]] name = "wasm-bindgen" -version = "0.2.72" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fe8f61dba8e5d645a4d8132dc7a0a66861ed5e1045d2c0ed940fab33bac0fbe" +checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -4196,9 +4255,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.72" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046ceba58ff062da072c7cb4ba5b22a37f00a302483f7e2a6cdc18fedbdc1fd3" +checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" dependencies = [ "bumpalo", "lazy_static", @@ -4223,9 +4282,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.72" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9aa01d36cda046f797c57959ff5f3c615c9cc63997a8d545831ec7976819b" +checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" dependencies = [ "quote 1.0.6", "wasm-bindgen-macro-support", @@ -4233,9 +4292,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.72" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96eb45c1b2ee33545a813a92dbb53856418bf7eb54ab34f7f7ff1448a5b3735d" +checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.6", @@ -4246,9 +4305,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.72" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7148f4696fb4960a346eaa60bbfb42a1ac4ebba21f750f75fc1375b098d5ffa" +checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" [[package]] name = "web-sys" diff --git a/programs/bpf/Cargo.toml b/programs/bpf/Cargo.toml index 04f1d2c916bb37..dbd7908540de16 100644 --- a/programs/bpf/Cargo.toml +++ b/programs/bpf/Cargo.toml @@ -33,7 +33,7 @@ solana-bpf-rust-realloc-invoke = { path = "rust/realloc_invoke", version = "=1.1 solana-cli-output = { path = "../../cli-output", version = "=1.10.0" } solana-logger = { path = "../../logger", version = "=1.10.0" } solana-measure = { path = "../../measure", version = "=1.10.0" } -solana_rbpf = "=0.2.17" +solana_rbpf = "=0.2.18" solana-runtime = { path = "../../runtime", version = "=1.10.0" } solana-program-runtime = { path = "../../program-runtime", version = "=1.10.0" } solana-sdk = { path = "../../sdk", version = "=1.10.0" } diff --git a/programs/bpf/rust/alloc/src/lib.rs b/programs/bpf/rust/alloc/src/lib.rs index 9f59d6aa9a97f8..5e98c4a13be465 100644 --- a/programs/bpf/rust/alloc/src/lib.rs +++ b/programs/bpf/rust/alloc/src/lib.rs @@ -2,7 +2,7 @@ #[macro_use] extern crate alloc; -use solana_program::{custom_panic_default, entrypoint::SUCCESS, msg}; +use solana_program::{custom_panic_default, entrypoint::SUCCESS, log::sol_log_64, msg}; use std::{alloc::Layout, mem}; #[no_mangle] @@ -46,7 +46,7 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { for i in 0..ITERS { assert_eq!(*ptr.add(i as usize), i as u8); } - msg!(0x3, 0, 0, 0, u64::from(*ptr.add(42))); + sol_log_64(0x3, 0, 0, 0, u64::from(*ptr.add(42))); assert_eq!(*ptr.add(42), 42); alloc::alloc::dealloc(ptr, layout); } @@ -61,7 +61,7 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { for v in ones.iter() { sum += ones[*v]; } - msg!(0x0, 0, 0, 0, sum as u64); + sol_log_64(0x0, 0, 0, 0, sum as u64); assert_eq!(sum, ITERS); } @@ -74,7 +74,7 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { for i in 0..ITERS { v.push(i); } - msg!(0x4, 0, 0, 0, v.len() as u64); + sol_log_64(0x4, 0, 0, 0, v.len() as u64); assert_eq!(v.len(), ITERS); } diff --git a/programs/bpf/rust/call_depth/src/lib.rs b/programs/bpf/rust/call_depth/src/lib.rs index 7ab2c69b58bf8e..888c491d98a2f6 100644 --- a/programs/bpf/rust/call_depth/src/lib.rs +++ b/programs/bpf/rust/call_depth/src/lib.rs @@ -1,6 +1,6 @@ //! Example Rust-based BPF program that tests call depth and stack usage -use solana_program::{custom_panic_default, entrypoint::SUCCESS, msg}; +use solana_program::{custom_panic_default, entrypoint::SUCCESS, log::sol_log_64, msg}; #[inline(never)] pub fn recurse(data: &mut [u8]) { @@ -8,7 +8,7 @@ pub fn recurse(data: &mut [u8]) { return; } recurse(&mut data[1..]); - msg!(line!(), 0, 0, 0, data[0]); + sol_log_64(line!() as u64, 0, 0, 0, data[0] as u64); } /// # Safety @@ -17,7 +17,7 @@ pub fn recurse(data: &mut [u8]) { pub unsafe extern "C" fn entrypoint(input: *mut u8) -> u64 { msg!("Call depth"); let depth = *(input.add(16) as *mut u8); - msg!(line!(), 0, 0, 0, depth); + sol_log_64(line!() as u64, 0, 0, 0, depth as u64); let mut data = Vec::with_capacity(depth as usize); for i in 0_u8..depth { data.push(i); diff --git a/programs/bpf/rust/invoked/src/processor.rs b/programs/bpf/rust/invoked/src/processor.rs index 3b94a69c0c5667..5053203688f26d 100644 --- a/programs/bpf/rust/invoked/src/processor.rs +++ b/programs/bpf/rust/invoked/src/processor.rs @@ -7,6 +7,7 @@ use solana_program::{ account_info::AccountInfo, bpf_loader, entrypoint, entrypoint::{ProgramResult, MAX_PERMITTED_DATA_INCREASE}, + log::sol_log_64, msg, program::{get_return_data, invoke, invoke_signed, set_return_data}, program_error::ProgramError, @@ -105,7 +106,7 @@ fn process_instruction( assert!(accounts[INVOKED_PROGRAM_DUP_INDEX] .try_borrow_mut_data() .is_err()); - msg!(data[0], 0, 0, 0, 0); + sol_log_64(data[0] as u64, 0, 0, 0, 0); } } RETURN_OK => { diff --git a/programs/bpf/rust/iter/src/lib.rs b/programs/bpf/rust/iter/src/lib.rs index 09ce81e5ef1c77..a262deb89f42f7 100644 --- a/programs/bpf/rust/iter/src/lib.rs +++ b/programs/bpf/rust/iter/src/lib.rs @@ -1,7 +1,7 @@ //! Example Rust-based BPF program tests loop iteration extern crate solana_program; -use solana_program::{custom_panic_default, entrypoint::SUCCESS, msg}; +use solana_program::{custom_panic_default, entrypoint::SUCCESS, log::sol_log_64}; #[no_mangle] pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { @@ -12,7 +12,7 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { for v in ones.iter() { sum += *v; } - msg!(0xff, 0, 0, 0, sum); + sol_log_64(0xff, 0, 0, 0, sum); assert_eq!(sum, ITERS as u64); SUCCESS diff --git a/programs/bpf/rust/many_args_dep/src/lib.rs b/programs/bpf/rust/many_args_dep/src/lib.rs index 98d9279c14bdef..d43d6f9a301897 100644 --- a/programs/bpf/rust/many_args_dep/src/lib.rs +++ b/programs/bpf/rust/many_args_dep/src/lib.rs @@ -1,7 +1,7 @@ //! Solana Rust-based BPF program utility functions and types extern crate solana_program; -use solana_program::msg; +use solana_program::{log::sol_log_64, msg}; pub fn many_args( arg1: u64, @@ -15,8 +15,8 @@ pub fn many_args( arg9: u64, ) -> u64 { msg!("Another package - many_args"); - msg!(arg1, arg2, arg3, arg4, arg5); - msg!(arg6, arg7, arg8, arg9, 0); + sol_log_64(arg1, arg2, arg3, arg4, arg5); + sol_log_64(arg6, arg7, arg8, arg9, 0); arg1 + arg2 + arg3 + arg4 + arg5 + arg6 + arg7 + arg8 + arg9 } @@ -39,8 +39,8 @@ pub fn many_args_sret( arg9: u64, ) -> Ret { msg!("Another package - many_args_sret"); - msg!(arg1, arg2, arg3, arg4, arg5); - msg!(arg6, arg7, arg8, arg9, 0); + sol_log_64(arg1, arg2, arg3, arg4, arg5); + sol_log_64(arg6, arg7, arg8, arg9, 0); Ret { group1: u128::from(arg1) + u128::from(arg2) + u128::from(arg3), group2: u128::from(arg4) + u128::from(arg5) + u128::from(arg6), diff --git a/programs/bpf/rust/param_passing/src/lib.rs b/programs/bpf/rust/param_passing/src/lib.rs index fa81fe5d459246..f3c1c30ee334b1 100644 --- a/programs/bpf/rust/param_passing/src/lib.rs +++ b/programs/bpf/rust/param_passing/src/lib.rs @@ -2,7 +2,7 @@ extern crate solana_program; use solana_bpf_rust_param_passing_dep::{Data, TestDep}; -use solana_program::{custom_panic_default, entrypoint::SUCCESS, msg}; +use solana_program::{custom_panic_default, entrypoint::SUCCESS, log::sol_log_64}; #[no_mangle] pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { @@ -17,7 +17,7 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { }; let test_dep = TestDep::new(&data, 1, 2, 3, 4, 5); - msg!(0, 0, 0, 0, test_dep.thirty); + sol_log_64(0, 0, 0, 0, test_dep.thirty as u64); assert!(test_dep.thirty == 30); SUCCESS diff --git a/programs/bpf_loader/Cargo.toml b/programs/bpf_loader/Cargo.toml index fca2ea48955555..efa655679345c4 100644 --- a/programs/bpf_loader/Cargo.toml +++ b/programs/bpf_loader/Cargo.toml @@ -20,7 +20,7 @@ libsecp256k1 = "0.6.0" solana-measure = { path = "../../measure", version = "=1.10.0" } solana-program-runtime = { path = "../../program-runtime", version = "=1.10.0" } solana-sdk = { path = "../../sdk", version = "=1.10.0" } -solana_rbpf = "=0.2.17" +solana_rbpf = "=0.2.18" thiserror = "1.0" [dev-dependencies] diff --git a/programs/config/Cargo.toml b/programs/config/Cargo.toml index 9d23e5b1cc37d2..64c7573b2c3658 100644 --- a/programs/config/Cargo.toml +++ b/programs/config/Cargo.toml @@ -12,7 +12,7 @@ edition = "2021" [dependencies] bincode = "1.3.3" chrono = { version = "0.4.11", features = ["serde"] } -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" solana-program-runtime = { path = "../../program-runtime", version = "=1.10.0" } solana-sdk = { path = "../../sdk", version = "=1.10.0" } diff --git a/programs/stake/Cargo.toml b/programs/stake/Cargo.toml index bf69c62cf210a4..1fb53aa51004e2 100644 --- a/programs/stake/Cargo.toml +++ b/programs/stake/Cargo.toml @@ -14,7 +14,7 @@ bincode = "1.3.3" log = "0.4.14" num-derive = "0.3" num-traits = "0.2" -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" solana-frozen-abi = { path = "../../frozen-abi", version = "=1.10.0" } solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.10.0" } diff --git a/programs/vote/Cargo.toml b/programs/vote/Cargo.toml index d16724d859feea..16641aa7a69b88 100644 --- a/programs/vote/Cargo.toml +++ b/programs/vote/Cargo.toml @@ -14,7 +14,7 @@ bincode = "1.3.3" log = "0.4.14" num-derive = "0.3" num-traits = "0.2" -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" solana-frozen-abi = { path = "../../frozen-abi", version = "=1.10.0" } solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.10.0" } diff --git a/programs/vote/src/vote_instruction.rs b/programs/vote/src/vote_instruction.rs index 9b4c46dd5d138e..2e114cbfa7f0c0 100644 --- a/programs/vote/src/vote_instruction.rs +++ b/programs/vote/src/vote_instruction.rs @@ -161,18 +161,14 @@ pub enum VoteInstruction { /// /// # Account references /// 0. `[Write]` Vote account to vote with - /// 1. `[]` Slot hashes sysvar - /// 2. `[]` Clock sysvar - /// 3. `[SIGNER]` Vote authority + /// 1. `[SIGNER]` Vote authority UpdateVoteState(VoteStateUpdate), /// Update the onchain vote state for the signer along with a switching proof. /// /// # Account references /// 0. `[Write]` Vote account to vote with - /// 1. `[]` Slot hashes sysvar - /// 2. `[]` Clock sysvar - /// 3. `[SIGNER]` Vote authority + /// 1. `[SIGNER]` Vote authority UpdateVoteStateSwitch(VoteStateUpdate, Hash), } @@ -338,8 +334,6 @@ pub fn update_vote_state( ) -> Instruction { let account_metas = vec![ AccountMeta::new(*vote_pubkey, false), - AccountMeta::new_readonly(sysvar::slot_hashes::id(), false), - AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(*authorized_voter_pubkey, true), ]; @@ -358,8 +352,6 @@ pub fn update_vote_state_switch( ) -> Instruction { let account_metas = vec![ AccountMeta::new(*vote_pubkey, false), - AccountMeta::new_readonly(sysvar::slot_hashes::id(), false), - AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(*authorized_voter_pubkey, true), ]; @@ -465,20 +457,23 @@ pub fn process_instruction( } VoteInstruction::UpdateVoteState(vote_state_update) | VoteInstruction::UpdateVoteStateSwitch(vote_state_update, _) => { - inc_new_counter_info!("vote-state-native", 1); - vote_state::process_vote_state_update( - me, - &from_keyed_account::(keyed_account_at_index( - keyed_accounts, - first_instruction_account + 1, - )?)?, - &from_keyed_account::(keyed_account_at_index( - keyed_accounts, - first_instruction_account + 2, - )?)?, - &vote_state_update, - &signers, - ) + if invoke_context + .feature_set + .is_active(&feature_set::allow_votes_to_directly_update_vote_state::id()) + { + inc_new_counter_info!("vote-state-native", 1); + let slot_hashes: SlotHashes = + invoke_context.get_sysvar(&sysvar::slot_hashes::id())?; + vote_state::process_vote_state_update( + me, + slot_hashes.slot_hashes(), + &invoke_context.get_sysvar(&sysvar::clock::id())?, + vote_state_update, + &signers, + ) + } else { + Err(InstructionError::InvalidInstructionData) + } } VoteInstruction::Withdraw(lamports) => { let to = keyed_account_at_index(keyed_accounts, first_instruction_account + 1)?; @@ -585,12 +580,19 @@ mod tests { let rent = Rent::default(); let rent_sysvar = (sysvar::rent::id(), bincode::serialize(&rent).unwrap()); + let clock = Clock::default(); + let clock_sysvar = (sysvar::clock::id(), bincode::serialize(&clock).unwrap()); + let slot_hashes = SlotHashes::default(); + let slot_hashes_sysvar = ( + sysvar::slot_hashes::id(), + bincode::serialize(&slot_hashes).unwrap(), + ); solana_program_runtime::invoke_context::mock_process_instruction_with_sysvars( &id(), Vec::new(), &instruction.data, &keyed_accounts, - &[rent_sysvar], + &[rent_sysvar, clock_sysvar, slot_hashes_sysvar], super::process_instruction, ) } diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index eab08f0ac704fe..2435cc47fee346 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -38,7 +38,7 @@ pub const INITIAL_LOCKOUT: usize = 2; // Maximum number of credits history to keep around pub const MAX_EPOCH_CREDITS_HISTORY: usize = 64; -// Offset of VoteState::pri : Clone + Debug {or_voters, for determining initialization status without deserialization +// Offset of VoteState::prior_voters, for determining initialization status without deserialization const DEFAULT_PRIOR_VOTERS_OFFSET: usize = 82; // VoteTransactionClone hack is done so that we can derive clone on the tower that uses the @@ -1156,14 +1156,14 @@ pub fn process_vote_state_update( vote_account: &KeyedAccount, slot_hashes: &[SlotHash], clock: &Clock, - vote_state_update: &VoteStateUpdate, + vote_state_update: VoteStateUpdate, signers: &HashSet, ) -> Result<(), InstructionError> { let mut vote_state = verify_and_get_vote_state(vote_account, clock, signers)?; - vote_state.check_slots_are_valid(vote_state_update, slot_hashes)?; + vote_state.check_slots_are_valid(&vote_state_update, slot_hashes)?; vote_state.process_new_vote_state( - vote_state_update.lockouts.clone(), + vote_state_update.lockouts, vote_state_update.root, vote_state_update.timestamp, clock.epoch, diff --git a/rbpf-cli/Cargo.toml b/rbpf-cli/Cargo.toml index 3a7bf6751a19b3..e5021af023a2b7 100644 --- a/rbpf-cli/Cargo.toml +++ b/rbpf-cli/Cargo.toml @@ -10,11 +10,11 @@ edition = "2021" [dependencies] clap = "3.0.0-beta.2" -serde = "1.0.130" +serde = "1.0.131" serde_json = "1.0.72" solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.10.0" } solana-logger = { path = "../logger", version = "=1.10.0" } solana-program-runtime = { path = "../program-runtime", version = "=1.10.0" } solana-sdk = { path = "../sdk", version = "=1.10.0" } -solana_rbpf = "=0.2.17" +solana_rbpf = "=0.2.18" time = "0.3.5" diff --git a/replica-lib/Cargo.toml b/replica-lib/Cargo.toml index d03eb2aa278152..e0e9ec60819d63 100644 --- a/replica-lib/Cargo.toml +++ b/replica-lib/Cargo.toml @@ -18,10 +18,10 @@ solana-rpc = { path = "../rpc", version = "=1.10.0" } solana-runtime = { path = "../runtime", version = "=1.10.0" } solana-sdk = { path = "../sdk", version = "=1.10.0" } tokio = { version = "1", features = ["full"] } -tonic = { version = "0.6.1", features = ["tls", "transport"] } +tonic = { version = "0.6.2", features = ["tls", "transport"] } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -tonic-build = "0.6.0" +tonic-build = "0.6.2" diff --git a/replica-node/Cargo.toml b/replica-node/Cargo.toml index e183bae6f5cf01..e76faf2d7c1ee4 100644 --- a/replica-node/Cargo.toml +++ b/replica-node/Cargo.toml @@ -41,4 +41,4 @@ tempfile = "3.2.0" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -tonic-build = "0.6.0" +tonic-build = "0.6.2" diff --git a/rpc-test/Cargo.toml b/rpc-test/Cargo.toml index 8481121e50adeb..440bedcc18ce53 100644 --- a/rpc-test/Cargo.toml +++ b/rpc-test/Cargo.toml @@ -16,7 +16,7 @@ jsonrpc-core = "18.0.0" jsonrpc-core-client = { version = "18.0.0", features = ["ipc", "ws"] } log = "0.4.11" reqwest = { version = "0.11.5", default-features = false, features = ["blocking", "rustls-tls", "json"] } -serde = "1.0.130" +serde = "1.0.131" serde_json = "1.0.72" solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" } solana-client = { path = "../client", version = "=1.10.0" } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 5ce13b4e802df9..577caf1c8c94e9 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -14,7 +14,7 @@ base64 = "0.12.3" bincode = "1.3.3" bs58 = "0.4.0" crossbeam-channel = "0.5" -dashmap = "4.0.2" +dashmap = "5.0.0" itertools = "0.10.3" jsonrpc-core = "18.0.0" jsonrpc-core-client = { version = "18.0.0", features = ["ipc", "ws"] } @@ -26,7 +26,7 @@ libc = "0.2.109" log = "0.4.14" rayon = "1.5.1" regex = "1.5.4" -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" serde_json = "1.0.72" soketto = "0.7" diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index d898d81cf315d0..ffb1eb24971feb 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -16,7 +16,7 @@ blake3 = "1.2.0" bv = { version = "0.11.1", features = ["serde"] } byteorder = "1.4.3" bzip2 = "0.4.3" -dashmap = { version = "4.0.2", features = ["rayon", "raw-api"] } +dashmap = { version = "5.0.0", features = ["rayon", "raw-api"] } crossbeam-channel = "0.5" dir-diff = "0.3.2" flate2 = "1.0.22" @@ -26,12 +26,15 @@ lazy_static = "1.4.0" log = "0.4.14" memmap2 = "0.5.0" num_cpus = "1.13.0" +num-derive = { version = "0.3" } +num-traits = { version = "0.2" } ouroboros = "0.13.0" rand = "0.7.0" rayon = "1.5.1" regex = "1.5.4" -serde = { version = "1.0.130", features = ["rc"] } +serde = { version = "1.0.131", features = ["rc"] } serde_derive = "1.0.103" +solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.10.0" } solana-config-program = { path = "../programs/config", version = "=1.10.0" } solana-compute-budget-program = { path = "../programs/compute-budget", version = "=1.10.0" } solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.0" } diff --git a/runtime/benches/accounts_index.rs b/runtime/benches/accounts_index.rs index 322e6674640144..0a5215726cb8dc 100644 --- a/runtime/benches/accounts_index.rs +++ b/runtime/benches/accounts_index.rs @@ -5,7 +5,7 @@ extern crate test; use { rand::{thread_rng, Rng}, solana_runtime::{ - accounts_db::AccountInfo, + account_info::AccountInfo, accounts_index::{ AccountSecondaryIndexes, AccountsIndex, ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, }, diff --git a/runtime/src/account_info.rs b/runtime/src/account_info.rs new file mode 100644 index 00000000000000..8f522547e7e29c --- /dev/null +++ b/runtime/src/account_info.rs @@ -0,0 +1,43 @@ +use crate::{accounts_db::AppendVecId, accounts_index::ZeroLamport}; + +#[derive(Default, Debug, PartialEq, Clone, Copy)] +pub struct AccountInfo { + /// index identifying the append storage + pub store_id: AppendVecId, + + /// offset into the storage + offset: usize, + + /// needed to track shrink candidacy in bytes. Used to update the number + /// of alive bytes in an AppendVec as newer slots purge outdated entries + stored_size: usize, + + /// lamports in the account used when squashing kept for optimization + /// purposes to remove accounts with zero balance. + lamports: u64, +} + +impl ZeroLamport for AccountInfo { + fn is_zero_lamport(&self) -> bool { + self.lamports == 0 + } +} + +impl AccountInfo { + pub fn new(store_id: AppendVecId, offset: usize, stored_size: usize, lamports: u64) -> Self { + Self { + store_id, + offset, + stored_size, + lamports, + } + } + + pub fn offset(&self) -> usize { + self.offset + } + + pub fn stored_size(&self) -> usize { + self.stored_size + } +} diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 514a97fefc6e93..1370d415e0ca52 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -1032,11 +1032,12 @@ impl Accounts { let keys: Vec<_> = txs .zip(results) .filter_map(|(tx, res)| match res { - Err(TransactionError::AccountInUse) => None, - Err(TransactionError::SanitizeFailure) => None, - Err(TransactionError::AccountLoadedTwice) => None, - Err(TransactionError::WouldExceedMaxBlockCostLimit) => None, - Err(TransactionError::WouldExceedMaxAccountCostLimit) => None, + Err(TransactionError::AccountInUse) + | Err(TransactionError::SanitizeFailure) + | Err(TransactionError::AccountLoadedTwice) + | Err(TransactionError::WouldExceedMaxBlockCostLimit) + | Err(TransactionError::WouldExceedMaxAccountCostLimit) + | Err(TransactionError::WouldExceedMaxAccountDataCostLimit) => None, _ => Some(tx.get_account_locks(demote_program_write_locks)), }) .collect(); diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index df9078bd64a12e..f0cac08fd4fe49 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -22,6 +22,7 @@ use std::{thread::sleep, time::Duration}; use { crate::{ + account_info::AccountInfo, accounts_background_service::{DroppedSlotsSender, SendDroppedBankCallback}, accounts_cache::{AccountsCache, CachedAccount, SlotCache}, accounts_hash::{AccountsHash, CalculateHashIntermediate, HashStats, PreviousPass}, @@ -113,7 +114,7 @@ const MAX_ITEMS_PER_CHUNK: Slot = 2_500; // operations that take a storage entry can maintain a common interface // when interacting with cached accounts. This id is "virtual" in that it // doesn't actually refer to an actual storage entry. -const CACHE_VIRTUAL_STORAGE_ID: usize = AppendVecId::MAX; +const CACHE_VIRTUAL_STORAGE_ID: AppendVecId = AppendVecId::MAX; // A specially reserved write version (identifier for ordering writes in an AppendVec) // for entries in the cache, so that operations that take a storage entry can maintain @@ -220,6 +221,19 @@ pub struct ErrorCounters { pub invalid_writable_account: usize, } +#[derive(Debug, Default, Clone, Copy)] +pub struct IndexGenerationInfo { + pub accounts_data_len: u64, +} + +#[derive(Debug, Default, Clone, Copy)] +struct SlotIndexGenerationInfo { + insert_time_us: u64, + num_accounts: u64, + num_accounts_rent_exempt: u64, + accounts_data_len: u64, +} + #[derive(Default, Debug)] struct GenerateIndexTimings { pub index_time: u64, @@ -234,6 +248,7 @@ struct GenerateIndexTimings { pub index_flush_us: u64, pub rent_exempt: u64, pub total_duplicates: u64, + pub accounts_data_len_dedup_time_us: u64, } #[derive(Default, Debug, PartialEq)] @@ -280,26 +295,15 @@ impl GenerateIndexTimings { i64 ), ("total_items", self.total_items as i64, i64), + ( + "accounts_data_len_dedup_time_us", + self.accounts_data_len_dedup_time_us as i64, + i64 + ), ); } } -#[derive(Default, Debug, PartialEq, Clone, Copy)] -pub struct AccountInfo { - /// index identifying the append storage - store_id: AppendVecId, - - /// offset into the storage - offset: usize, - - /// needed to track shrink candidacy in bytes. Used to update the number - /// of alive bytes in an AppendVec as newer slots purge outdated entries - stored_size: usize, - - /// lamports in the account used when squashing kept for optimization - /// purposes to remove accounts with zero balance. - lamports: u64, -} impl IsCached for AccountInfo { fn is_cached(&self) -> bool { self.store_id == CACHE_VIRTUAL_STORAGE_ID @@ -308,12 +312,6 @@ impl IsCached for AccountInfo { impl IndexValue for AccountInfo {} -impl ZeroLamport for AccountInfo { - fn is_zero_lamport(&self) -> bool { - self.lamports == 0 - } -} - impl ZeroLamport for AccountSharedData { fn is_zero_lamport(&self) -> bool { self.lamports() == 0 @@ -367,12 +365,13 @@ impl<'a> MultiThreadProgress<'a> { } /// An offset into the AccountsDb::storage vector +pub type AtomicAppendVecId = AtomicUsize; pub type AppendVecId = usize; pub type SnapshotStorage = Vec>; pub type SnapshotStorages = Vec; // Each slot has a set of storage entries. -pub(crate) type SlotStores = Arc>>>; +pub(crate) type SlotStores = Arc>>>; type AccountSlots = HashMap>; type AppendVecOffsets = HashMap>; @@ -629,7 +628,7 @@ struct CleanKeyTimings { /// Persistent storage structure holding the accounts #[derive(Debug)] pub struct AccountStorageEntry { - pub(crate) id: AtomicUsize, + pub(crate) id: AtomicAppendVecId, pub(crate) slot: AtomicU64, @@ -655,13 +654,13 @@ pub struct AccountStorageEntry { } impl AccountStorageEntry { - pub fn new(path: &Path, slot: Slot, id: usize, file_size: u64) -> Self { + pub fn new(path: &Path, slot: Slot, id: AppendVecId, file_size: u64) -> Self { let tail = AppendVec::file_name(slot, id); let path = Path::new(path).join(tail); let accounts = AppendVec::new(&path, true, file_size as usize); Self { - id: AtomicUsize::new(id), + id: AtomicAppendVecId::new(id), slot: AtomicU64::new(slot), accounts, count_and_status: RwLock::new((0, AccountStorageStatus::Available)), @@ -677,7 +676,7 @@ impl AccountStorageEntry { num_accounts: usize, ) -> Self { Self { - id: AtomicUsize::new(id), + id: AtomicAppendVecId::new(id), slot: AtomicU64::new(slot), accounts, count_and_status: RwLock::new((0, AccountStorageStatus::Available)), @@ -707,7 +706,7 @@ impl AccountStorageEntry { *count_and_status = (count, status); } - pub fn recycle(&self, slot: Slot, id: usize) { + pub fn recycle(&self, slot: Slot, id: AppendVecId) { let mut count_and_status = self.count_and_status.write().unwrap(); self.accounts.reset(); *count_and_status = (0, AccountStorageStatus::Available); @@ -981,7 +980,7 @@ pub struct AccountsDb { recycle_stores: RwLock, /// distribute the accounts across storage lists - pub next_id: AtomicUsize, + pub next_id: AtomicAppendVecId, /// Set of shrinkable stores organized by map of slot to append_vec_id pub shrink_candidate_slots: Mutex, @@ -1572,7 +1571,7 @@ impl AccountsDb { read_only_accounts_cache: ReadOnlyAccountsCache::new(MAX_READ_ONLY_CACHE_DATA_SIZE), recycle_stores: RwLock::new(RecycleStores::default()), uncleaned_pubkeys: DashMap::new(), - next_id: AtomicUsize::new(0), + next_id: AtomicAppendVecId::new(0), shrink_candidate_slots_v1: Mutex::new(Vec::new()), shrink_candidate_slots: Mutex::new(HashMap::new()), write_cache_limit_bytes: None, @@ -1844,7 +1843,7 @@ impl AccountsDb { no_delete }; if no_delete { - let mut pending_store_ids: HashSet = HashSet::new(); + let mut pending_store_ids = HashSet::new(); for (_bank_id, account_info) in account_infos { if !already_counted.contains(&account_info.store_id) { pending_store_ids.insert(account_info.store_id); @@ -2130,7 +2129,7 @@ impl AccountsDb { // found info relative to max_clean_root let (slot, account_info) = &slot_list[index_in_slot_list]; - if account_info.lamports == 0 { + if account_info.is_zero_lamport() { useless = false; purges_zero_lamports.insert( *pubkey, @@ -2234,7 +2233,7 @@ impl AccountsDb { // `clean_accounts_older_than_root()` let was_reclaimed = removed_accounts .get(&account_info.store_id) - .map(|store_removed| store_removed.contains(&account_info.offset)) + .map(|store_removed| store_removed.contains(&account_info.offset())) .unwrap_or(false); if was_reclaimed { return false; @@ -2590,7 +2589,7 @@ impl AccountsDb { if let Some(locked_entry) = lookup { let is_alive = locked_entry.slot_list().iter().any(|(_slot, i)| { i.store_id == stored_account.store_id - && i.offset == stored_account.account.offset + && i.offset() == stored_account.account.offset }); if !is_alive { // This pubkey was found in the storage, but no longer exists in the index. @@ -3088,7 +3087,12 @@ impl AccountsDb { bank_id, |pubkey, (account_info, slot)| { let account_slot = self - .get_account_accessor(slot, pubkey, account_info.store_id, account_info.offset) + .get_account_accessor( + slot, + pubkey, + account_info.store_id, + account_info.offset(), + ) .get_loaded_account() .map(|loaded_account| (pubkey, loaded_account.take_account(), slot)); scan_func(&mut collector, account_slot) @@ -3116,7 +3120,12 @@ impl AccountsDb { ancestors, |pubkey, (account_info, slot)| { if let Some(loaded_account) = self - .get_account_accessor(slot, pubkey, account_info.store_id, account_info.offset) + .get_account_accessor( + slot, + pubkey, + account_info.store_id, + account_info.offset(), + ) .get_loaded_account() { scan_func(&mut collector, (pubkey, loaded_account, slot)); @@ -3157,7 +3166,12 @@ impl AccountsDb { // changes to the index entry. // For details, see the comment in retry_to_get_account_accessor() let account_slot = self - .get_account_accessor(slot, pubkey, account_info.store_id, account_info.offset) + .get_account_accessor( + slot, + pubkey, + account_info.store_id, + account_info.offset(), + ) .get_loaded_account() .map(|loaded_account| (pubkey, loaded_account.take_account(), slot)) .unwrap(); @@ -3198,7 +3212,12 @@ impl AccountsDb { index_key, |pubkey, (account_info, slot)| { let account_slot = self - .get_account_accessor(slot, pubkey, account_info.store_id, account_info.offset) + .get_account_accessor( + slot, + pubkey, + account_info.store_id, + account_info.offset(), + ) .get_loaded_account() .map(|loaded_account| (pubkey, loaded_account.take_account(), slot)); scan_func(&mut collector, account_slot) @@ -3328,13 +3347,9 @@ impl AccountsDb { }; let slot_list = lock.slot_list(); - let ( - slot, - AccountInfo { - store_id, offset, .. - }, - ) = slot_list[index]; - + let (slot, info) = slot_list[index]; + let store_id = info.store_id; + let offset = info.offset(); let some_from_slow_path = if clone_in_lock { // the fast path must have failed.... so take the slower approach // of copying potentially large Account::data inside the lock. @@ -3359,7 +3374,7 @@ impl AccountsDb { fn retry_to_get_account_accessor<'a>( &'a self, mut slot: Slot, - mut store_id: usize, + mut store_id: AppendVecId, mut offset: usize, ancestors: &'a Ancestors, pubkey: &'a Pubkey, @@ -3697,7 +3712,7 @@ impl AccountsDb { &'a self, slot: Slot, pubkey: &'a Pubkey, - store_id: usize, + store_id: AppendVecId, offset: usize, ) -> LoadedAccountAccessor<'a> { if store_id == CACHE_VIRTUAL_STORAGE_ID { @@ -4460,14 +4475,14 @@ impl AccountsDb { let stored_size = offsets[1] - offsets[0]; storage.add_account(stored_size); - infos.push(AccountInfo { - store_id: storage.append_vec_id(), - offset: offsets[0], + infos.push(AccountInfo::new( + storage.append_vec_id(), + offsets[0], stored_size, - lamports: account + account .map(|account| account.lamports()) .unwrap_or_default(), - }); + )); } // restore the state to available storage.set_status(AccountStorageStatus::Available); @@ -4879,12 +4894,12 @@ impl AccountsDb { let account = account .map(|account| account.to_account_shared_data()) .unwrap_or_default(); - let account_info = AccountInfo { - store_id: CACHE_VIRTUAL_STORAGE_ID, - offset: CACHE_VIRTUAL_OFFSET, - stored_size: CACHE_VIRTUAL_STORED_SIZE, - lamports: account.lamports(), - }; + let account_info = AccountInfo::new( + CACHE_VIRTUAL_STORAGE_ID, + CACHE_VIRTUAL_OFFSET, + CACHE_VIRTUAL_STORED_SIZE, + account.lamports(), + ); self.notify_account_at_accounts_update(slot, meta, &account); @@ -5107,7 +5122,7 @@ impl AccountsDb { self.accounts_index.get(pubkey, Some(ancestors), Some(slot)) { let (slot, account_info) = &lock.slot_list()[index]; - if account_info.lamports != 0 { + if !account_info.is_zero_lamport() { // Because we're keeping the `lock' here, there is no need // to use retry_to_get_account_accessor() // In other words, flusher/shrinker/cleaner is blocked to @@ -5121,13 +5136,13 @@ impl AccountsDb { *slot, pubkey, account_info.store_id, - account_info.offset, + account_info.offset(), ) .get_loaded_account() .and_then( |loaded_account| { let loaded_hash = loaded_account.loaded_hash(); - let balance = account_info.lamports; + let balance = loaded_account.lamports(); if check_hash && !self.is_filler_account(pubkey) { let computed_hash = loaded_account.compute_hash(*slot, pubkey); @@ -6003,7 +6018,7 @@ impl AccountsDb { reclaimed_offsets .entry(account_info.store_id) .or_default() - .insert(account_info.offset); + .insert(account_info.offset()); } if let Some(expected_slot) = expected_slot { assert_eq!(*slot, expected_slot); @@ -6017,7 +6032,7 @@ impl AccountsDb { "AccountDB::accounts_index corrupted. Storage pointed to: {}, expected: {}, should only point to one slot", store.slot(), *slot ); - let count = store.remove_account(account_info.stored_size, reset_accounts); + let count = store.remove_account(account_info.stored_size(), reset_accounts); if count == 0 { self.dirty_stores .insert((*slot, store.append_vec_id()), store.clone()); @@ -6657,21 +6672,21 @@ impl AccountsDb { accounts_map } - /// return time_us, # accts rent exempt, total # accts fn generate_index_for_slot<'a>( &self, accounts_map: GenerateIndexAccountsMap<'a>, slot: &Slot, rent_collector: &RentCollector, - ) -> (u64, u64, u64) { + ) -> SlotIndexGenerationInfo { if accounts_map.is_empty() { - return (0, 0, 0); + return SlotIndexGenerationInfo::default(); } let secondary = !self.account_indexes.is_empty(); - let mut rent_exempt = 0; - let len = accounts_map.len(); + let mut accounts_data_len = 0; + let mut num_accounts_rent_exempt = 0; + let num_accounts = accounts_map.len(); let items = accounts_map.into_iter().map( |( pubkey, @@ -6689,29 +6704,30 @@ impl AccountsDb { &self.account_indexes, ); } + accounts_data_len += stored_account.data().len() as u64; if !rent_collector.should_collect_rent(&pubkey, &stored_account, false) || { let (_rent_due, exempt) = rent_collector.get_rent_due(&stored_account); exempt } { - rent_exempt += 1; + num_accounts_rent_exempt += 1; } ( pubkey, - AccountInfo { + AccountInfo::new( store_id, - offset: stored_account.offset, - stored_size: stored_account.stored_size, - lamports: stored_account.account_meta.lamports, - }, + stored_account.offset, + stored_account.stored_size, + stored_account.account_meta.lamports, + ), ) }, ); - let (dirty_pubkeys, insert_us) = self + let (dirty_pubkeys, insert_time_us) = self .accounts_index - .insert_new_if_missing_into_primary_index(*slot, len, items); + .insert_new_if_missing_into_primary_index(*slot, num_accounts, items); // dirty_pubkeys will contain a pubkey if an item has multiple rooted entries for // a given pubkey. If there is just a single item, there is no cleaning to @@ -6719,7 +6735,12 @@ impl AccountsDb { if !dirty_pubkeys.is_empty() { self.uncleaned_pubkeys.insert(*slot, dirty_pubkeys); } - (insert_us, rent_exempt, len as u64) + SlotIndexGenerationInfo { + insert_time_us, + num_accounts: num_accounts as u64, + num_accounts_rent_exempt, + accounts_data_len, + } } fn filler_unique_id_bytes() -> usize { @@ -6798,51 +6819,49 @@ impl AccountsDb { .skip(pass * per_pass) .take(per_pass) .collect::>(); - self.thread_pool.install(|| { - roots_in_this_pass.into_par_iter().for_each(|slot| { - let storage_maps: Vec> = self - .storage - .get_slot_storage_entries(*slot) - .unwrap_or_default(); - if storage_maps.is_empty() { - return; - } + roots_in_this_pass.into_par_iter().for_each(|slot| { + let storage_maps: Vec> = self + .storage + .get_slot_storage_entries(*slot) + .unwrap_or_default(); + if storage_maps.is_empty() { + return; + } - let partition = crate::bank::Bank::variable_cycle_partition_from_previous_slot( - epoch_schedule, - *slot, - ); - let subrange = crate::bank::Bank::pubkey_range_from_partition(partition); - - let idx = overall_index.fetch_add(1, Ordering::Relaxed); - let filler_entries = (idx + 1) * self.filler_account_count / root_count - - idx * self.filler_account_count / root_count; - let accounts = (0..filler_entries) - .map(|_| { - let my_id = added.fetch_add(1, Ordering::Relaxed); - let my_id_bytes = u32::to_be_bytes(my_id as u32); - - // pubkey begins life as entire filler 'suffix' pubkey - let mut key = self.filler_account_suffix.unwrap(); - let rent_prefix_bytes = Self::filler_rent_partition_prefix_bytes(); - // first bytes are replaced with rent partition range: filler_rent_partition_prefix_bytes - key.as_mut()[0..rent_prefix_bytes] - .copy_from_slice(&subrange.start().as_ref()[0..rent_prefix_bytes]); - // next bytes are replaced with my_id: filler_unique_id_bytes - key.as_mut()[rent_prefix_bytes - ..(rent_prefix_bytes + Self::filler_unique_id_bytes())] - .copy_from_slice(&my_id_bytes); - assert!(subrange.contains(&key)); - key - }) - .collect::>(); - let add = accounts - .iter() - .map(|key| (key, &account)) - .collect::>(); - let hashes = (0..filler_entries).map(|_| hash).collect::>(); - self.store_accounts_frozen(*slot, &add[..], Some(&hashes[..]), None, None); - }) + let partition = crate::bank::Bank::variable_cycle_partition_from_previous_slot( + epoch_schedule, + *slot, + ); + let subrange = crate::bank::Bank::pubkey_range_from_partition(partition); + + let idx = overall_index.fetch_add(1, Ordering::Relaxed); + let filler_entries = (idx + 1) * self.filler_account_count / root_count + - idx * self.filler_account_count / root_count; + let accounts = (0..filler_entries) + .map(|_| { + let my_id = added.fetch_add(1, Ordering::Relaxed); + let my_id_bytes = u32::to_be_bytes(my_id as u32); + + // pubkey begins life as entire filler 'suffix' pubkey + let mut key = self.filler_account_suffix.unwrap(); + let rent_prefix_bytes = Self::filler_rent_partition_prefix_bytes(); + // first bytes are replaced with rent partition range: filler_rent_partition_prefix_bytes + key.as_mut()[0..rent_prefix_bytes] + .copy_from_slice(&subrange.start().as_ref()[0..rent_prefix_bytes]); + // next bytes are replaced with my_id: filler_unique_id_bytes + key.as_mut()[rent_prefix_bytes + ..(rent_prefix_bytes + Self::filler_unique_id_bytes())] + .copy_from_slice(&my_id_bytes); + assert!(subrange.contains(&key)); + key + }) + .collect::>(); + let add = accounts + .iter() + .map(|key| (key, &account)) + .collect::>(); + let hashes = (0..filler_entries).map(|_| hash).collect::>(); + self.store_accounts_frozen(*slot, &add[..], Some(&hashes[..]), None, None); }); self.accounts_index.set_startup(false); } @@ -6855,7 +6874,7 @@ impl AccountsDb { limit_load_slot_count_from_snapshot: Option, verify: bool, genesis_config: &GenesisConfig, - ) { + ) -> IndexGenerationInfo { let mut slots = self.storage.all_slots(); #[allow(clippy::stable_sort_primitive)] slots.sort(); @@ -6870,6 +6889,7 @@ impl AccountsDb { genesis_config.slots_per_year(), &genesis_config.rent, ); + let accounts_data_len = AtomicU64::new(0); // pass == 0 always runs and generates the index // pass == 1 only runs if verify == true. @@ -6882,7 +6902,14 @@ impl AccountsDb { let storage_info = StorageSizeAndCountMap::default(); let total_processed_slots_across_all_threads = AtomicU64::new(0); let outer_slots_len = slots.len(); - let chunk_size = (outer_slots_len / 7) + 1; // approximately 400k slots in a snapshot + let threads = if self.accounts_index.is_disk_index_enabled() { + // these write directly to disk, so the more threads, the better + num_cpus::get() + } else { + // seems to be a good hueristic given varying # cpus for in-mem disk index + 8 + }; + let chunk_size = (outer_slots_len / (std::cmp::max(1, threads.saturating_sub(1)))) + 1; // approximately 400k slots in a snapshot let mut index_time = Measure::start("index"); let insertion_time_us = AtomicU64::new(0); let rent_exempt = AtomicU64::new(0); @@ -6915,10 +6942,16 @@ impl AccountsDb { let insert_us = if pass == 0 { // generate index - let (insert_us, rent_exempt_this_slot, total_this_slot) = - self.generate_index_for_slot(accounts_map, slot, &rent_collector); + let SlotIndexGenerationInfo { + insert_time_us: insert_us, + num_accounts: total_this_slot, + num_accounts_rent_exempt: rent_exempt_this_slot, + accounts_data_len: accounts_data_len_this_slot, + } = self.generate_index_for_slot(accounts_map, slot, &rent_collector); rent_exempt.fetch_add(rent_exempt_this_slot, Ordering::Relaxed); total_duplicates.fetch_add(total_this_slot, Ordering::Relaxed); + accounts_data_len + .fetch_add(accounts_data_len_this_slot, Ordering::Relaxed); insert_us } else { // verify index matches expected and measure the time to get all items @@ -6933,15 +6966,12 @@ impl AccountsDb { for (slot2, account_info2) in sl.iter() { if slot2 == slot { count += 1; - let ai = AccountInfo { - store_id: account_info.store_id, - offset: account_info.stored_account.offset, - stored_size: account_info.stored_account.stored_size, - lamports: account_info - .stored_account - .account_meta - .lamports, - }; + let ai = AccountInfo::new( + account_info.store_id, + account_info.stored_account.offset, + account_info.stored_account.stored_size, + account_info.stored_account.account_meta.lamports, + ); assert_eq!(&ai, account_info2); } } @@ -6972,6 +7002,30 @@ impl AccountsDb { }) .sum(); + // subtract data.len() from accounts_data_len for all old accounts that are in the index twice + let mut accounts_data_len_dedup_timer = + Measure::start("handle accounts data len duplicates"); + if pass == 0 { + let mut unique_pubkeys = HashSet::::default(); + self.uncleaned_pubkeys.iter().for_each(|entry| { + entry.value().iter().for_each(|pubkey| { + unique_pubkeys.insert(*pubkey); + }) + }); + let accounts_data_len_from_duplicates = unique_pubkeys + .into_iter() + .collect::>() + .par_chunks(4096) + .map(|pubkeys| self.pubkeys_to_duplicate_accounts_data_len(pubkeys)) + .sum(); + accounts_data_len.fetch_sub(accounts_data_len_from_duplicates, Ordering::Relaxed); + info!( + "accounts data len: {}", + accounts_data_len.load(Ordering::Relaxed) + ); + } + accounts_data_len_dedup_timer.stop(); + let storage_info_timings = storage_info_timings.into_inner().unwrap(); let mut index_flush_us = 0; @@ -6996,6 +7050,7 @@ impl AccountsDb { storage_size_accounts_map_us: storage_info_timings.storage_size_accounts_map_us, storage_size_accounts_map_flatten_us: storage_info_timings .storage_size_accounts_map_flatten_us, + accounts_data_len_dedup_time_us: accounts_data_len_dedup_timer.as_us(), ..GenerateIndexTimings::default() }; @@ -7009,6 +7064,43 @@ impl AccountsDb { } timings.report(); } + + IndexGenerationInfo { + accounts_data_len: accounts_data_len.load(Ordering::Relaxed), + } + } + + /// Used during generate_index() to get the _duplicate_ accounts data len from the given pubkeys + fn pubkeys_to_duplicate_accounts_data_len(&self, pubkeys: &[Pubkey]) -> u64 { + let mut accounts_data_len_from_duplicates = 0; + pubkeys.iter().for_each(|pubkey| { + if let Some(entry) = self.accounts_index.get_account_read_entry(pubkey) { + let slot_list = entry.slot_list(); + if slot_list.len() < 2 { + return; + } + // Only the account data len in the highest slot should be used, and the rest are + // duplicates. So sort the slot list in descending slot order, skip the first + // item, then sum up the remaining data len, which are the duplicates. + let mut slot_list = slot_list.clone(); + slot_list + .select_nth_unstable_by(0, |a, b| b.0.cmp(&a.0)) + .2 + .iter() + .for_each(|(slot, account_info)| { + let maybe_storage_entry = self + .storage + .get_account_storage_entry(*slot, account_info.store_id); + let mut accessor = LoadedAccountAccessor::Stored( + maybe_storage_entry.map(|entry| (entry, account_info.offset())), + ); + let loaded_account = accessor.check_and_get_loaded_account(); + let account = loaded_account.take_account(); + accounts_data_len_from_duplicates += account.data().len(); + }); + } + }); + accounts_data_len_from_duplicates as u64 } fn update_storage_info( @@ -10920,30 +11012,10 @@ pub mod tests { let key0 = Pubkey::new_from_array([0u8; 32]); let key1 = Pubkey::new_from_array([1u8; 32]); let key2 = Pubkey::new_from_array([2u8; 32]); - let info0 = AccountInfo { - store_id: 0, - offset: 0, - stored_size: 0, - lamports: 0, - }; - let info1 = AccountInfo { - store_id: 1, - offset: 0, - stored_size: 0, - lamports: 0, - }; - let info2 = AccountInfo { - store_id: 2, - offset: 0, - stored_size: 0, - lamports: 0, - }; - let info3 = AccountInfo { - store_id: 3, - offset: 0, - stored_size: 0, - lamports: 0, - }; + let info0 = AccountInfo::new(0, 0, 0, 0); + let info1 = AccountInfo::new(1, 0, 0, 0); + let info2 = AccountInfo::new(2, 0, 0, 0); + let info3 = AccountInfo::new(3, 0, 0, 0); let mut reclaims = vec![]; accounts_index.upsert( 0, @@ -11796,7 +11868,7 @@ pub mod tests { locked_entry.slot_list()[0] }) .unwrap(); - let removed_data_size = account_info.1.stored_size; + let removed_data_size = account_info.1.stored_size(); // Fetching the account from storage should return the same // stored size as in the index. assert_eq!(removed_data_size, account.stored_size); @@ -13252,12 +13324,7 @@ pub mod tests { } let do_test = |test_params: TestParameters| { - let account_info = AccountInfo { - store_id: 42, - offset: 123, - stored_size: 234, - lamports: 0, - }; + let account_info = AccountInfo::new(42, 123, 234, 0); let pubkey = solana_sdk::pubkey::new_rand(); let mut key_set = HashSet::default(); key_set.insert(pubkey); diff --git a/runtime/src/ancestors.rs b/runtime/src/ancestors.rs index 7960018cdf54bd..830076d0b056f5 100644 --- a/runtime/src/ancestors.rs +++ b/runtime/src/ancestors.rs @@ -1,12 +1,23 @@ -use {crate::accounts_index::RollingBitField, solana_sdk::clock::Slot, std::collections::HashMap}; +use { + crate::accounts_index::RollingBitField, + core::fmt::{Debug, Formatter}, + solana_sdk::clock::Slot, + std::collections::HashMap, +}; pub type AncestorsForSerialization = HashMap; -#[derive(Debug, Clone, PartialEq, AbiExample)] +#[derive(Clone, PartialEq, AbiExample)] pub struct Ancestors { ancestors: RollingBitField, } +impl Debug for Ancestors { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "{:?}", self.keys()) + } +} + // some tests produce ancestors ranges that are too large such // that we prefer to implement them in a sparse HashMap const ANCESTORS_HASH_MAP_SIZE: u64 = 8192; diff --git a/runtime/src/append_vec.rs b/runtime/src/append_vec.rs index 1f49dfe111f6f2..29f13ee270bc78 100644 --- a/runtime/src/append_vec.rs +++ b/runtime/src/append_vec.rs @@ -301,7 +301,7 @@ impl AppendVec { self.file_size } - pub fn file_name(slot: Slot, id: usize) -> String { + pub fn file_name(slot: Slot, id: impl std::fmt::Display) -> String { format!("{}.{}", slot, id) } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 22ec96492dec28..5beef12b9afbed 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -56,7 +56,7 @@ use { calculate_stake_weighted_timestamp, MaxAllowableDrift, MAX_ALLOWABLE_DRIFT_PERCENTAGE, MAX_ALLOWABLE_DRIFT_PERCENTAGE_FAST, MAX_ALLOWABLE_DRIFT_PERCENTAGE_SLOW, }, - stakes::Stakes, + stakes::{InvalidCacheEntryReason, Stakes, StakesCache}, status_cache::{SlotDelta, StatusCache}, system_instruction_processor::{get_system_account_kind, SystemAccountKind}, transaction_batch::TransactionBatch, @@ -74,7 +74,10 @@ use { solana_metrics::{inc_new_counter_debug, inc_new_counter_info}, solana_program_runtime::{ instruction_recorder::InstructionRecorder, - invoke_context::{BuiltinProgram, Executor, Executors, ProcessInstructionWithContext}, + invoke_context::{ + BuiltinProgram, Executor, Executors, ProcessInstructionWithContext, + TransactionAccountRefCells, + }, log_collector::LogCollector, timings::ExecuteDetailsTimings, }, @@ -118,7 +121,7 @@ use { slot_hashes::SlotHashes, slot_history::SlotHistory, system_transaction, - sysvar::{self}, + sysvar::{self, Sysvar, SysvarId}, timing::years_as_slots, transaction::{ Result, SanitizedTransaction, Transaction, TransactionError, @@ -143,7 +146,10 @@ use { ptr, rc::Rc, sync::{ - atomic::{AtomicBool, AtomicU64, Ordering::Relaxed}, + atomic::{ + AtomicBool, AtomicU64, + Ordering::{Acquire, Relaxed, Release}, + }, Arc, LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, }, time::{Duration, Instant}, @@ -234,9 +240,8 @@ impl ExecuteTimings { } type BankStatusCache = StatusCache>; -#[frozen_abi(digest = "32EjVUc6shHHVPpsnBAVfyBziMgyFzH8qxisLwmwwdS1")] +#[frozen_abi(digest = "GcfJc94Hb3s7gzF7Uh4YxLSiQf1MvUtMmtU45BvinkVT")] pub type BankSlotDelta = SlotDelta>; -type TransactionAccountRefCells = Vec<(Pubkey, Rc>)>; // Eager rent collection repeats in cyclic manner. // Each cycle is composed of number of tiny pubkey subranges @@ -769,7 +774,7 @@ pub(crate) struct BankFieldsToSerialize<'a> { pub(crate) rent_collector: RentCollector, pub(crate) epoch_schedule: EpochSchedule, pub(crate) inflation: Inflation, - pub(crate) stakes: &'a RwLock, + pub(crate) stakes: &'a StakesCache, pub(crate) epoch_stakes: &'a HashMap, pub(crate) is_delta: bool, } @@ -808,7 +813,7 @@ impl PartialEq for Bank { && self.rent_collector == other.rent_collector && self.epoch_schedule == other.epoch_schedule && *self.inflation.read().unwrap() == *other.inflation.read().unwrap() - && *self.stakes.read().unwrap() == *other.stakes.read().unwrap() + && *self.stakes_cache.stakes() == *other.stakes_cache.stakes() && self.epoch_stakes == other.epoch_stakes && self.is_delta.load(Relaxed) == other.is_delta.load(Relaxed) } @@ -988,7 +993,7 @@ pub struct Bank { inflation: Arc>, /// cache of vote_account and stake_account state for this fork - stakes: RwLock, + stakes_cache: StakesCache, /// staked nodes on epoch boundaries, saved off when a bank.slot() is at /// a leader schedule calculation boundary @@ -1040,6 +1045,10 @@ pub struct Bank { pub cost_tracker: RwLock, sysvar_cache: RwLock)>>, + + /// Current size of the accounts data. Used when processing messages to enforce a limit on its + /// maximum size. + accounts_data_len: AtomicU64, } impl Default for BlockhashQueue { @@ -1054,6 +1063,12 @@ struct VoteWithStakeDelegations { delegations: Vec<(Pubkey, (StakeState, AccountSharedData))>, } +struct LoadVoteAndStakeAccountsResult { + vote_with_stake_delegations_map: DashMap, + invalid_stake_keys: DashMap, + invalid_vote_keys: DashMap, +} + #[derive(Debug, Default)] pub struct NewBankOptions { pub vote_only_bank: bool, @@ -1119,7 +1134,7 @@ impl Bank { } fn default_with_accounts(accounts: Accounts) -> Self { - Self { + let bank = Self { rc: BankRc::new(accounts, Slot::default()), src: StatusCacheRc::default(), blockhash_queue: RwLock::::default(), @@ -1154,7 +1169,7 @@ impl Bank { rent_collector: RentCollector::default(), epoch_schedule: EpochSchedule::default(), inflation: Arc::>::default(), - stakes: RwLock::::default(), + stakes_cache: StakesCache::default(), epoch_stakes: HashMap::::default(), is_delta: AtomicBool::default(), builtin_programs: BuiltinPrograms::default(), @@ -1175,7 +1190,14 @@ impl Bank { vote_only_bank: false, cost_tracker: RwLock::::default(), sysvar_cache: RwLock::new(Vec::new()), - } + accounts_data_len: AtomicU64::default(), + }; + + let total_accounts_stats = bank.get_total_accounts_stats().unwrap(); + bank.accounts_data_len + .store(total_accounts_stats.data_len as u64, Release); + + bank } pub fn new_with_paths_for_tests( @@ -1263,7 +1285,7 @@ impl Bank { // genesis needs stakes for all epochs up to the epoch implied by // slot = 0 and genesis configuration { - let stakes = bank.stakes.read().unwrap(); + let stakes = bank.stakes_cache.stakes(); for epoch in 0..=bank.get_leader_schedule_epoch(bank.slot) { bank.epoch_stakes .insert(epoch, EpochStakes::new(&stakes, epoch)); @@ -1383,7 +1405,7 @@ impl Bank { transaction_entries_count: AtomicU64::new(0), transactions_per_entry_max: AtomicU64::new(0), // we will .clone_with_epoch() this soon after stake data update; so just .clone() for now - stakes: RwLock::new(parent.stakes.read().unwrap().clone()), + stakes_cache: StakesCache::new(parent.stakes_cache.stakes().clone()), epoch_stakes: parent.epoch_stakes.clone(), parent_hash: parent.hash(), parent_slot: parent.slot(), @@ -1421,6 +1443,7 @@ impl Bank { freeze_started: AtomicBool::new(false), cost_tracker: RwLock::new(CostTracker::default()), sysvar_cache: RwLock::new(Vec::new()), + accounts_data_len: AtomicU64::new(parent.accounts_data_len.load(Acquire)), }; let mut ancestors = Vec::with_capacity(1 + new.parents().len()); @@ -1440,10 +1463,7 @@ impl Bank { // Add new entry to stakes.stake_history, set appropriate epoch and // update vote accounts with warmed up stakes before saving a // snapshot of stakes in epoch stakes - new.stakes - .write() - .unwrap() - .activate_epoch(epoch, &thread_pool); + new.stakes_cache.activate_epoch(epoch, &thread_pool); // Save a snapshot of stakes for use in consensus and stake weighted networking let leader_schedule_epoch = epoch_schedule.get_leader_schedule_epoch(slot); @@ -1536,6 +1556,7 @@ impl Bank { debug_keys: Option>>, additional_builtins: Option<&Builtins>, debug_do_not_add_builtins: bool, + accounts_data_len: u64, ) -> Self { fn new() -> T { T::default() @@ -1576,7 +1597,7 @@ impl Bank { rent_collector: fields.rent_collector.clone_with_epoch(fields.epoch), epoch_schedule: fields.epoch_schedule, inflation: Arc::new(RwLock::new(fields.inflation)), - stakes: RwLock::new(fields.stakes), + stakes_cache: StakesCache::new(fields.stakes), epoch_stakes: fields.epoch_stakes, is_delta: AtomicBool::new(fields.is_delta), builtin_programs: new(), @@ -1598,6 +1619,7 @@ impl Bank { vote_only_bank: false, cost_tracker: RwLock::new(CostTracker::default()), sysvar_cache: RwLock::new(Vec::new()), + accounts_data_len: AtomicU64::new(accounts_data_len), }; bank.finish_init( genesis_config, @@ -1677,7 +1699,7 @@ impl Bank { rent_collector: self.rent_collector.clone(), epoch_schedule: self.epoch_schedule, inflation: *self.inflation.read().unwrap(), - stakes: &self.stakes, + stakes: &self.stakes_cache, epoch_stakes: &self.epoch_stakes, is_delta: self.is_delta.load(Relaxed), } @@ -1893,6 +1915,18 @@ impl Bank { }); } + pub fn set_sysvar_for_tests(&self, sysvar: &T) + where + T: Sysvar + SysvarId, + { + self.update_sysvar_account(&T::id(), |account| { + create_account( + sysvar, + self.inherit_specially_retained_account_fields(account), + ) + }); + } + fn update_slot_history(&self) { self.update_sysvar_account(&sysvar::slot_history::id(), |account| { let mut slot_history = account @@ -1946,12 +1980,11 @@ impl Bank { }); let new_epoch_stakes = - EpochStakes::new(&self.stakes.read().unwrap(), leader_schedule_epoch); + EpochStakes::new(&self.stakes_cache.stakes(), leader_schedule_epoch); { let vote_stakes: HashMap<_, _> = self - .stakes - .read() - .unwrap() + .stakes_cache + .stakes() .vote_accounts() .iter() .map(|(pubkey, (stake, _))| (*pubkey, *stake)) @@ -2008,7 +2041,7 @@ impl Bank { // if I'm the first Bank in an epoch, ensure stake_history is updated self.update_sysvar_account(&sysvar::stake_history::id(), |account| { create_account::( - self.stakes.read().unwrap().history(), + self.stakes_cache.stakes().history(), self.inherit_specially_retained_account_fields(account), ) }); @@ -2081,7 +2114,7 @@ impl Bank { let validator_rewards = (validator_rate * capitalization as f64 * epoch_duration_in_years) as u64; - let old_vote_balance_and_staked = self.stakes.read().unwrap().vote_balance_and_staked(); + let old_vote_balance_and_staked = self.stakes_cache.stakes().vote_balance_and_staked(); let validator_point_value = self.pay_validator_rewards_with_thread_pool( prev_epoch, @@ -2104,7 +2137,7 @@ impl Bank { }); } - let new_vote_balance_and_staked = self.stakes.read().unwrap().vote_balance_and_staked(); + let new_vote_balance_and_staked = self.stakes_cache.stakes().vote_balance_and_staked(); let validator_rewards_paid = new_vote_balance_and_staked - old_vote_balance_and_staked; assert_eq!( validator_rewards_paid, @@ -2136,7 +2169,7 @@ impl Bank { .fetch_add(validator_rewards_paid, Relaxed); let active_stake = if let Some(stake_history_entry) = - self.stakes.read().unwrap().history().get(prev_epoch) + self.stakes_cache.stakes().history().get(prev_epoch) { stake_history_entry.effective } else { @@ -2166,9 +2199,12 @@ impl Bank { &self, thread_pool: &ThreadPool, reward_calc_tracer: Option, - ) -> DashMap { - let stakes = self.stakes.read().unwrap(); - let accounts = DashMap::with_capacity(stakes.vote_accounts().as_ref().len()); + ) -> LoadVoteAndStakeAccountsResult { + let stakes = self.stakes_cache.stakes(); + let vote_with_stake_delegations_map = + DashMap::with_capacity(stakes.vote_accounts().as_ref().len()); + let invalid_stake_keys: DashMap = DashMap::new(); + let invalid_vote_keys: DashMap = DashMap::new(); thread_pool.install(|| { stakes @@ -2176,87 +2212,93 @@ impl Bank { .par_iter() .for_each(|(stake_pubkey, delegation)| { let vote_pubkey = &delegation.voter_pubkey; - let stake_account = match self.get_account_with_fixed_root(stake_pubkey) { - Some(stake_account) => stake_account, - None => return, - }; + if invalid_vote_keys.contains_key(vote_pubkey) { + return; + } - // fetch vote account from stakes cache if it hasn't been cached locally - let fetched_vote_account = if !accounts.contains_key(vote_pubkey) { - let vote_account = match self.get_account_with_fixed_root(vote_pubkey) { - Some(vote_account) => vote_account, - None => return, - }; + let stake_delegation = match self.get_account_with_fixed_root(stake_pubkey) { + Some(stake_account) => { + if stake_account.owner() != &solana_stake_program::id() { + invalid_stake_keys + .insert(*stake_pubkey, InvalidCacheEntryReason::WrongOwner); + return; + } - let vote_state: VoteState = - match StateMut::::state(&vote_account) { - Ok(vote_state) => vote_state.convert_to_current(), - Err(err) => { - debug!( - "failed to deserialize vote account {}: {}", - vote_pubkey, err - ); + match stake_account.state().ok() { + Some(stake_state) => (*stake_pubkey, (stake_state, stake_account)), + None => { + invalid_stake_keys + .insert(*stake_pubkey, InvalidCacheEntryReason::BadState); return; } - }; + } + } + None => { + invalid_stake_keys + .insert(*stake_pubkey, InvalidCacheEntryReason::Missing); + return; + } + }; - Some((vote_state, vote_account)) + let mut vote_delegations = if let Some(vote_delegations) = + vote_with_stake_delegations_map.get_mut(vote_pubkey) + { + vote_delegations } else { - None - }; + let vote_account = match self.get_account_with_fixed_root(vote_pubkey) { + Some(vote_account) => { + if vote_account.owner() != &solana_vote_program::id() { + invalid_vote_keys + .insert(*vote_pubkey, InvalidCacheEntryReason::WrongOwner); + return; + } + vote_account + } + None => { + invalid_vote_keys + .insert(*vote_pubkey, InvalidCacheEntryReason::Missing); + return; + } + }; - let fetched_vote_account_owner = fetched_vote_account - .as_ref() - .map(|(_vote_state, vote_account)| vote_account.owner()); + let vote_state = if let Ok(vote_state) = + StateMut::::state(&vote_account) + { + vote_state.convert_to_current() + } else { + invalid_vote_keys + .insert(*vote_pubkey, InvalidCacheEntryReason::BadState); + return; + }; + + vote_with_stake_delegations_map + .entry(*vote_pubkey) + .or_insert_with(|| VoteWithStakeDelegations { + vote_state: Arc::new(vote_state), + vote_account, + delegations: vec![], + }) + }; if let Some(reward_calc_tracer) = reward_calc_tracer.as_ref() { reward_calc_tracer(&RewardCalculationEvent::Staking( stake_pubkey, &InflationPointCalculationEvent::Delegation( *delegation, - fetched_vote_account_owner - .cloned() - .unwrap_or_else(solana_vote_program::id), + solana_vote_program::id(), ), )); } - // filter invalid delegation accounts - if stake_account.owner() != &solana_stake_program::id() - || (fetched_vote_account_owner.is_some() - && fetched_vote_account_owner != Some(&solana_vote_program::id())) - { - datapoint_warn!( - "bank-stake_delegation_accounts-invalid-account", - ("slot", self.slot() as i64, i64), - ("stake-address", format!("{:?}", stake_pubkey), String), - ("vote-address", format!("{:?}", vote_pubkey), String), - ); - return; - } - - let stake_delegation = match stake_account.state().ok() { - Some(stake_state) => (*stake_pubkey, (stake_state, stake_account)), - None => return, - }; - - if let Some((vote_state, vote_account)) = fetched_vote_account { - accounts - .entry(*vote_pubkey) - .or_insert_with(|| VoteWithStakeDelegations { - vote_state: Arc::new(vote_state), - vote_account, - delegations: vec![], - }); - } - - if let Some(mut stake_delegation_accounts) = accounts.get_mut(vote_pubkey) { - stake_delegation_accounts.delegations.push(stake_delegation); - } + vote_delegations.delegations.push(stake_delegation); }); }); - accounts + LoadVoteAndStakeAccountsResult { + vote_with_stake_delegations_map, + invalid_vote_keys, + invalid_stake_keys, + } } /// iterate over all stakes, redeem vote credits for each stake we can @@ -2269,14 +2311,31 @@ impl Bank { fix_activating_credits_observed: bool, thread_pool: &ThreadPool, ) -> f64 { - let stake_history = self.stakes.read().unwrap().history().clone(); - let vote_and_stake_accounts = self.load_vote_and_stake_accounts_with_thread_pool( - thread_pool, - reward_calc_tracer.as_ref(), - ); + let stake_history = self.stakes_cache.stakes().history().clone(); + let vote_with_stake_delegations_map = { + let LoadVoteAndStakeAccountsResult { + vote_with_stake_delegations_map, + invalid_stake_keys, + invalid_vote_keys, + } = self.load_vote_and_stake_accounts_with_thread_pool( + thread_pool, + reward_calc_tracer.as_ref(), + ); + + let evict_invalid_stakes_cache_entries = self + .feature_set + .is_active(&feature_set::evict_invalid_stakes_cache_entries::id()); + self.stakes_cache.handle_invalid_keys( + invalid_stake_keys, + invalid_vote_keys, + evict_invalid_stakes_cache_entries, + self.slot(), + ); + vote_with_stake_delegations_map + }; let points: u128 = thread_pool.install(|| { - vote_and_stake_accounts + vote_with_stake_delegations_map .par_iter() .map(|entry| { let VoteWithStakeDelegations { @@ -2307,8 +2366,8 @@ impl Bank { // pay according to point value let point_value = PointValue { rewards, points }; let vote_account_rewards: DashMap = - DashMap::with_capacity(vote_and_stake_accounts.len()); - let stake_delegation_iterator = vote_and_stake_accounts.into_par_iter().flat_map( + DashMap::with_capacity(vote_with_stake_delegations_map.len()); + let stake_delegation_iterator = vote_with_stake_delegations_map.into_par_iter().flat_map( |( vote_pubkey, VoteWithStakeDelegations { @@ -2696,9 +2755,8 @@ impl Bank { // highest staked node is the first collector self.collector_id = self - .stakes - .read() - .unwrap() + .stakes_cache + .stakes() .highest_staked_node() .unwrap_or_default(); @@ -3467,7 +3525,8 @@ impl Bank { Some(index) } Err(TransactionError::WouldExceedMaxBlockCostLimit) - | Err(TransactionError::WouldExceedMaxAccountCostLimit) => Some(index), + | Err(TransactionError::WouldExceedMaxAccountCostLimit) + | Err(TransactionError::WouldExceedMaxAccountDataCostLimit) => Some(index), Err(_) => None, Ok(_) => None, }) @@ -4704,13 +4763,11 @@ impl Bank { .accounts .store_slow_cached(self.slot(), pubkey, account); - if Stakes::is_stake(account) { - self.stakes.write().unwrap().store( - pubkey, - account, - self.stakes_remove_delegation_if_inactive_enabled(), - ); - } + self.stakes_cache.check_and_store( + pubkey, + account, + self.stakes_remove_delegation_if_inactive_enabled(), + ); } pub fn force_flush_accounts_cache(&self) { @@ -5332,9 +5389,9 @@ impl Bank { accounts_db_skip_shrink: bool, last_full_snapshot_slot: Option, ) -> bool { - info!("cleaning.."); let mut clean_time = Measure::start("clean"); - if self.slot() > 0 { + if !accounts_db_skip_shrink && self.slot() > 0 { + info!("cleaning.."); self.clean_accounts(true, true, last_full_snapshot_slot); } clean_time.stop(); @@ -5449,11 +5506,10 @@ impl Bank { let message = tx.message(); let loaded_transaction = raccs.as_ref().unwrap(); - for (_i, (pubkey, account)) in (0..message.account_keys_len()) - .zip(loaded_transaction.accounts.iter()) - .filter(|(_i, (_pubkey, account))| (Stakes::is_stake(account))) + for (_i, (pubkey, account)) in + (0..message.account_keys_len()).zip(loaded_transaction.accounts.iter()) { - self.stakes.write().unwrap().store( + self.stakes_cache.check_and_store( pubkey, account, self.stakes_remove_delegation_if_inactive_enabled(), @@ -5463,19 +5519,19 @@ impl Bank { } pub fn staked_nodes(&self) -> Arc> { - self.stakes.read().unwrap().staked_nodes() + self.stakes_cache.stakes().staked_nodes() } /// current vote accounts for this bank along with the stake /// attributed to each account pub fn vote_accounts(&self) -> Arc> { - let stakes = self.stakes.read().unwrap(); + let stakes = self.stakes_cache.stakes(); Arc::from(stakes.vote_accounts()) } /// Vote account for the given vote account pubkey along with the stake. pub fn get_vote_account(&self, vote_account: &Pubkey) -> Option<(/*stake:*/ u64, VoteAccount)> { - let stakes = self.stakes.read().unwrap(); + let stakes = self.stakes_cache.stakes(); stakes.vote_accounts().get(vote_account).cloned() } @@ -6215,7 +6271,7 @@ pub(crate) mod tests { impl Bank { fn cloned_stake_delegations(&self) -> StakeDelegations { - self.stakes.read().unwrap().stake_delegations().clone() + self.stakes_cache.stakes().stake_delegations().clone() } } @@ -8282,6 +8338,7 @@ pub(crate) mod tests { let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); let validator_points: u128 = bank0 .load_vote_and_stake_accounts_with_thread_pool(&thread_pool, null_tracer()) + .vote_with_stake_delegations_map .into_iter() .map( |( @@ -10501,10 +10558,10 @@ pub(crate) mod tests { // Non-builtin loader accounts can not be used for instruction processing { - let stakes = bank.stakes.read().unwrap(); + let stakes = bank.stakes_cache.stakes(); assert!(stakes.vote_accounts().as_ref().is_empty()); } - assert!(bank.stakes.read().unwrap().stake_delegations().is_empty()); + assert!(bank.stakes_cache.stakes().stake_delegations().is_empty()); assert_eq!(bank.calculate_capitalization(true), bank.capitalization()); let ((vote_id, vote_account), (stake_id, stake_account)) = @@ -10514,19 +10571,19 @@ pub(crate) mod tests { bank.store_account(&vote_id, &vote_account); bank.store_account(&stake_id, &stake_account); { - let stakes = bank.stakes.read().unwrap(); + let stakes = bank.stakes_cache.stakes(); assert!(!stakes.vote_accounts().as_ref().is_empty()); } - assert!(!bank.stakes.read().unwrap().stake_delegations().is_empty()); + assert!(!bank.stakes_cache.stakes().stake_delegations().is_empty()); assert_eq!(bank.calculate_capitalization(true), bank.capitalization()); bank.add_builtin("mock_program1", &vote_id, mock_ix_processor); bank.add_builtin("mock_program2", &stake_id, mock_ix_processor); { - let stakes = bank.stakes.read().unwrap(); + let stakes = bank.stakes_cache.stakes(); assert!(stakes.vote_accounts().as_ref().is_empty()); } - assert!(bank.stakes.read().unwrap().stake_delegations().is_empty()); + assert!(bank.stakes_cache.stakes().stake_delegations().is_empty()); assert_eq!(bank.calculate_capitalization(true), bank.capitalization()); assert_eq!( "mock_program1", @@ -10546,10 +10603,10 @@ pub(crate) mod tests { let new_hash = bank.get_accounts_hash(); assert_eq!(old_hash, new_hash); { - let stakes = bank.stakes.read().unwrap(); + let stakes = bank.stakes_cache.stakes(); assert!(stakes.vote_accounts().as_ref().is_empty()); } - assert!(bank.stakes.read().unwrap().stake_delegations().is_empty()); + assert!(bank.stakes_cache.stakes().stake_delegations().is_empty()); assert_eq!(bank.calculate_capitalization(true), bank.capitalization()); assert_eq!( "mock_program1", @@ -14287,8 +14344,9 @@ pub(crate) mod tests { ); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); - let vote_and_stake_accounts = - bank.load_vote_and_stake_accounts_with_thread_pool(&thread_pool, null_tracer()); + let vote_and_stake_accounts = bank + .load_vote_and_stake_accounts_with_thread_pool(&thread_pool, null_tracer()) + .vote_with_stake_delegations_map; assert_eq!(vote_and_stake_accounts.len(), 2); let mut vote_account = bank @@ -14328,8 +14386,9 @@ pub(crate) mod tests { // Accounts must be valid stake and vote accounts let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); - let vote_and_stake_accounts = - bank.load_vote_and_stake_accounts_with_thread_pool(&thread_pool, null_tracer()); + let vote_and_stake_accounts = bank + .load_vote_and_stake_accounts_with_thread_pool(&thread_pool, null_tracer()) + .vote_with_stake_delegations_map; assert_eq!(vote_and_stake_accounts.len(), 0); } diff --git a/runtime/src/block_cost_limits.rs b/runtime/src/block_cost_limits.rs index 97be8a062d9c2f..4c6846d6363d62 100644 --- a/runtime/src/block_cost_limits.rs +++ b/runtime/src/block_cost_limits.rs @@ -58,3 +58,6 @@ pub const MAX_BLOCK_UNITS: u64 = /// limit is to prevent too many transactions write to same account, threrefore /// reduce block's paralellism. pub const MAX_WRITABLE_ACCOUNT_UNITS: u64 = MAX_BLOCK_REPLAY_TIME_US * COMPUTE_UNIT_TO_US_RATIO; + +/// max len of account data in a slot (bytes) +pub const MAX_ACCOUNT_DATA_LEN: u64 = 100_000_000; diff --git a/runtime/src/builtins.rs b/runtime/src/builtins.rs index 3299820cf9c93c..87eb17660346db 100644 --- a/runtime/src/builtins.rs +++ b/runtime/src/builtins.rs @@ -172,6 +172,15 @@ fn feature_builtins() -> Vec<(Builtin, Pubkey, ActivationType)> { feature_set::prevent_calling_precompiles_as_programs::id(), ActivationType::RemoveProgram, ), + ( + Builtin::new( + "address_lookup_table_program", + solana_address_lookup_table_program::id(), + solana_address_lookup_table_program::processor::process_instruction, + ), + feature_set::versioned_tx_message_enabled::id(), + ActivationType::NewProgram, + ), ] } diff --git a/runtime/src/cost_model.rs b/runtime/src/cost_model.rs index a69b4c9f41ec09..ab74f9de58554a 100644 --- a/runtime/src/cost_model.rs +++ b/runtime/src/cost_model.rs @@ -10,7 +10,10 @@ use { execute_cost_table::ExecuteCostTable, }, log::*, - solana_sdk::{pubkey::Pubkey, transaction::SanitizedTransaction}, + solana_sdk::{ + instruction::CompiledInstruction, program_utils::limited_deserialize, pubkey::Pubkey, + system_instruction::SystemInstruction, system_program, transaction::SanitizedTransaction, + }, std::collections::HashMap, }; @@ -27,6 +30,7 @@ pub struct TransactionCost { // `cost_weight` is a multiplier could be applied to transaction cost, // if set to zero allows the transaction to bypass cost limit check. pub cost_weight: u32, + pub account_data_size: u64, } impl Default for TransactionCost { @@ -38,6 +42,7 @@ impl Default for TransactionCost { data_bytes_cost: 0u64, execution_cost: 0u64, cost_weight: 1u32, + account_data_size: 0u64, } } } @@ -118,6 +123,7 @@ impl CostModel { tx_cost.data_bytes_cost = self.get_data_bytes_cost(transaction); tx_cost.execution_cost = self.get_transaction_cost(transaction); tx_cost.cost_weight = self.calculate_cost_weight(transaction); + tx_cost.account_data_size = self.calculate_account_data_size(transaction); debug!("transaction {:?} has cost {:?}", transaction, tx_cost); tx_cost @@ -201,6 +207,59 @@ impl CostModel { } } + fn calculate_account_data_size_on_deserialized_system_instruction( + instruction: SystemInstruction, + ) -> u64 { + match instruction { + SystemInstruction::CreateAccount { + lamports: _lamports, + space, + owner: _owner, + } => space, + SystemInstruction::CreateAccountWithSeed { + base: _base, + seed: _seed, + lamports: _lamports, + space, + owner: _owner, + } => space, + SystemInstruction::Allocate { space } => space, + SystemInstruction::AllocateWithSeed { + base: _base, + seed: _seed, + space, + owner: _owner, + } => space, + _ => 0, + } + } + + fn calculate_account_data_size_on_instruction( + program_id: &Pubkey, + instruction: &CompiledInstruction, + ) -> u64 { + if program_id == &system_program::id() { + if let Ok(instruction) = limited_deserialize(&instruction.data) { + return Self::calculate_account_data_size_on_deserialized_system_instruction( + instruction, + ); + } + } + 0 + } + + /// eventually, potentially determine account data size of all writable accounts + /// at the moment, calculate account data size of account creation + fn calculate_account_data_size(&self, transaction: &SanitizedTransaction) -> u64 { + transaction + .message() + .program_instructions_iter() + .map(|(program_id, instruction)| { + Self::calculate_account_data_size_on_instruction(program_id, instruction) + }) + .sum() + } + fn calculate_cost_weight(&self, transaction: &SanitizedTransaction) -> u32 { if is_simple_vote_transaction(transaction) { // vote has zero cost weight, so it bypasses block cost limit checking @@ -272,6 +331,53 @@ mod tests { ); } + #[test] + fn test_cost_model_data_len_cost() { + let lamports = 0; + let owner = Pubkey::default(); + let seed = String::default(); + let space = 100; + let base = Pubkey::default(); + for instruction in [ + SystemInstruction::CreateAccount { + lamports, + space, + owner, + }, + SystemInstruction::CreateAccountWithSeed { + base, + seed: seed.clone(), + lamports, + space, + owner, + }, + SystemInstruction::Allocate { space }, + SystemInstruction::AllocateWithSeed { + base, + seed, + space, + owner, + }, + ] { + assert_eq!( + space, + CostModel::calculate_account_data_size_on_deserialized_system_instruction( + instruction + ) + ); + } + assert_eq!( + 0, + CostModel::calculate_account_data_size_on_deserialized_system_instruction( + SystemInstruction::TransferWithSeed { + lamports, + from_seed: String::default(), + from_owner: Pubkey::default(), + } + ) + ); + } + #[test] fn test_cost_model_simple_transaction() { let (mint_keypair, start_hash) = test_setup(); diff --git a/runtime/src/cost_tracker.rs b/runtime/src/cost_tracker.rs index 4ad4525fe384cb..d682b4119d240c 100644 --- a/runtime/src/cost_tracker.rs +++ b/runtime/src/cost_tracker.rs @@ -18,6 +18,8 @@ pub enum CostTrackerError { /// would exceed account max limit WouldExceedAccountMaxLimit, + + WouldExceedAccountDataMaxLimit, } #[derive(AbiExample, Debug)] @@ -27,6 +29,7 @@ pub struct CostTracker { cost_by_writable_accounts: HashMap, block_cost: u64, transaction_count: u64, + account_data_size: u64, } impl Default for CostTracker { @@ -44,6 +47,7 @@ impl CostTracker { cost_by_writable_accounts: HashMap::with_capacity(WRITABLE_ACCOUNTS_PER_BLOCK), block_cost: 0, transaction_count: 0, + account_data_size: 0, } } @@ -58,7 +62,11 @@ impl CostTracker { _transaction: &SanitizedTransaction, tx_cost: &TransactionCost, ) -> Result<(), CostTrackerError> { - self.would_fit(&tx_cost.writable_accounts, &tx_cost.sum()) + self.would_fit( + &tx_cost.writable_accounts, + tx_cost.sum(), + tx_cost.account_data_size, + ) } pub fn add_transaction_cost( @@ -66,7 +74,11 @@ impl CostTracker { _transaction: &SanitizedTransaction, tx_cost: &TransactionCost, ) { - self.add_transaction(&tx_cost.writable_accounts, &tx_cost.sum()); + self.add_transaction( + &tx_cost.writable_accounts, + tx_cost.sum(), + tx_cost.account_data_size, + ); } pub fn try_add( @@ -75,8 +87,8 @@ impl CostTracker { tx_cost: &TransactionCost, ) -> Result { let cost = tx_cost.sum() * tx_cost.cost_weight as u64; - self.would_fit(&tx_cost.writable_accounts, &cost)?; - self.add_transaction(&tx_cost.writable_accounts, &cost); + self.would_fit(&tx_cost.writable_accounts, cost, tx_cost.account_data_size)?; + self.add_transaction(&tx_cost.writable_accounts, cost, tx_cost.account_data_size); Ok(self.block_cost) } @@ -100,6 +112,7 @@ impl CostTracker { ), ("costliest_account", costliest_account.to_string(), String), ("costliest_account_cost", costliest_account_cost as i64, i64), + ("account_data_size", self.account_data_size, i64), ); } @@ -116,17 +129,26 @@ impl CostTracker { (costliest_account, costliest_account_cost) } - fn would_fit(&self, keys: &[Pubkey], cost: &u64) -> Result<(), CostTrackerError> { + fn would_fit( + &self, + keys: &[Pubkey], + cost: u64, + account_data_len: u64, + ) -> Result<(), CostTrackerError> { // check against the total package cost if self.block_cost + cost > self.block_cost_limit { return Err(CostTrackerError::WouldExceedBlockMaxLimit); } // check if the transaction itself is more costly than the account_cost_limit - if *cost > self.account_cost_limit { + if cost > self.account_cost_limit { return Err(CostTrackerError::WouldExceedAccountMaxLimit); } + if self.account_data_size.saturating_add(account_data_len) > MAX_ACCOUNT_DATA_LEN { + return Err(CostTrackerError::WouldExceedAccountDataMaxLimit); + } + // check each account against account_cost_limit, for account_key in keys.iter() { match self.cost_by_writable_accounts.get(account_key) { @@ -144,7 +166,7 @@ impl CostTracker { Ok(()) } - fn add_transaction(&mut self, keys: &[Pubkey], cost: &u64) { + fn add_transaction(&mut self, keys: &[Pubkey], cost: u64, account_data_size: u64) { for account_key in keys.iter() { *self .cost_by_writable_accounts @@ -153,6 +175,7 @@ impl CostTracker { } self.block_cost += cost; self.transaction_count += 1; + self.account_data_size = self.account_data_size.saturating_add(account_data_size); } } @@ -212,11 +235,24 @@ mod tests { // build testee to have capacity for one simple transaction let mut testee = CostTracker::new(cost, cost); - assert!(testee.would_fit(&keys, &cost).is_ok()); - testee.add_transaction(&keys, &cost); + assert!(testee.would_fit(&keys, cost, 0).is_ok()); + testee.add_transaction(&keys, cost, 0); assert_eq!(cost, testee.block_cost); } + #[test] + fn test_cost_tracker_add_data() { + let (mint_keypair, start_hash) = test_setup(); + let (_tx, keys, cost) = build_simple_transaction(&mint_keypair, &start_hash); + + // build testee to have capacity for one simple transaction + let mut testee = CostTracker::new(cost, cost); + assert!(testee.would_fit(&keys, cost, 0).is_ok()); + let old = testee.account_data_size; + testee.add_transaction(&keys, cost, 1); + assert_eq!(old + 1, testee.account_data_size); + } + #[test] fn test_cost_tracker_ok_add_two_same_accounts() { let (mint_keypair, start_hash) = test_setup(); @@ -227,12 +263,12 @@ mod tests { // build testee to have capacity for two simple transactions, with same accounts let mut testee = CostTracker::new(cost1 + cost2, cost1 + cost2); { - assert!(testee.would_fit(&keys1, &cost1).is_ok()); - testee.add_transaction(&keys1, &cost1); + assert!(testee.would_fit(&keys1, cost1, 0).is_ok()); + testee.add_transaction(&keys1, cost1, 0); } { - assert!(testee.would_fit(&keys2, &cost2).is_ok()); - testee.add_transaction(&keys2, &cost2); + assert!(testee.would_fit(&keys2, cost2, 0).is_ok()); + testee.add_transaction(&keys2, cost2, 0); } assert_eq!(cost1 + cost2, testee.block_cost); assert_eq!(1, testee.cost_by_writable_accounts.len()); @@ -249,12 +285,12 @@ mod tests { // build testee to have capacity for two simple transactions, with same accounts let mut testee = CostTracker::new(cmp::max(cost1, cost2), cost1 + cost2); { - assert!(testee.would_fit(&keys1, &cost1).is_ok()); - testee.add_transaction(&keys1, &cost1); + assert!(testee.would_fit(&keys1, cost1, 0).is_ok()); + testee.add_transaction(&keys1, cost1, 0); } { - assert!(testee.would_fit(&keys2, &cost2).is_ok()); - testee.add_transaction(&keys2, &cost2); + assert!(testee.would_fit(&keys2, cost2, 0).is_ok()); + testee.add_transaction(&keys2, cost2, 0); } assert_eq!(cost1 + cost2, testee.block_cost); assert_eq!(2, testee.cost_by_writable_accounts.len()); @@ -271,12 +307,12 @@ mod tests { let mut testee = CostTracker::new(cmp::min(cost1, cost2), cost1 + cost2); // should have room for first transaction { - assert!(testee.would_fit(&keys1, &cost1).is_ok()); - testee.add_transaction(&keys1, &cost1); + assert!(testee.would_fit(&keys1, cost1, 0).is_ok()); + testee.add_transaction(&keys1, cost1, 0); } // but no more sapce on the same chain (same signer account) { - assert!(testee.would_fit(&keys2, &cost2).is_err()); + assert!(testee.would_fit(&keys2, cost2, 0).is_err()); } } @@ -292,15 +328,34 @@ mod tests { let mut testee = CostTracker::new(cmp::max(cost1, cost2), cost1 + cost2 - 1); // should have room for first transaction { - assert!(testee.would_fit(&keys1, &cost1).is_ok()); - testee.add_transaction(&keys1, &cost1); + assert!(testee.would_fit(&keys1, cost1, 0).is_ok()); + testee.add_transaction(&keys1, cost1, 0); } // but no more room for package as whole { - assert!(testee.would_fit(&keys2, &cost2).is_err()); + assert!(testee.would_fit(&keys2, cost2, 0).is_err()); } } + #[test] + fn test_cost_tracker_reach_data_limit() { + let (mint_keypair, start_hash) = test_setup(); + // build two transactions with diff accounts + let (_tx1, _keys1, cost1) = build_simple_transaction(&mint_keypair, &start_hash); + let second_account = Keypair::new(); + let (_tx2, keys2, cost2) = build_simple_transaction(&second_account, &start_hash); + + // build testee that passes + let testee = CostTracker::new(cmp::max(cost1, cost2), cost1 + cost2 - 1); + assert!(testee + .would_fit(&keys2, cost2, MAX_ACCOUNT_DATA_LEN) + .is_ok()); + // data is too big + assert!(testee + .would_fit(&keys2, cost2, MAX_ACCOUNT_DATA_LEN + 1) + .is_err()); + } + #[test] fn test_cost_tracker_try_add_is_atomic() { let (mint_keypair, start_hash) = test_setup(); diff --git a/runtime/src/in_mem_accounts_index.rs b/runtime/src/in_mem_accounts_index.rs index 8a07fd6d3e2f38..d3daa9b0d82779 100644 --- a/runtime/src/in_mem_accounts_index.rs +++ b/runtime/src/in_mem_accounts_index.rs @@ -372,7 +372,10 @@ impl InMemAccountsIndex { } Entry::Vacant(vacant) => { // not in cache, look on disk - let directly_to_disk = self.storage.get_startup(); + + // desired to be this for filler accounts: self.storage.get_startup(); + // but, this has proven to be far too slow at high account counts + let directly_to_disk = false; if directly_to_disk { // We may like this to always run, but it is unclear. // If disk bucket needs to resize, then this call can stall for a long time. diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 81a5ada5fc34f6..2ff16a50afe6ef 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1,5 +1,6 @@ #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] #![allow(clippy::integer_arithmetic)] +pub mod account_info; pub mod accounts; pub mod accounts_background_service; pub mod accounts_cache; diff --git a/runtime/src/message_processor.rs b/runtime/src/message_processor.rs index debe01f3e90605..45a7d76a44baf5 100644 --- a/runtime/src/message_processor.rs +++ b/runtime/src/message_processor.rs @@ -3,12 +3,12 @@ use { solana_measure::measure::Measure, solana_program_runtime::{ instruction_recorder::InstructionRecorder, - invoke_context::{BuiltinProgram, Executors, InvokeContext}, + invoke_context::{BuiltinProgram, Executors, InvokeContext, TransactionAccountRefCell}, log_collector::LogCollector, timings::ExecuteDetailsTimings, }, solana_sdk::{ - account::{AccountSharedData, WritableAccount}, + account::WritableAccount, compute_budget::ComputeBudget, feature_set::{prevent_calling_precompiles_as_programs, FeatureSet}, hash::Hash, @@ -45,7 +45,7 @@ impl MessageProcessor { builtin_programs: &[BuiltinProgram], message: &Message, program_indices: &[Vec], - accounts: &[(Pubkey, Rc>)], + accounts: &[TransactionAccountRefCell], rent: Rent, log_collector: Option>>, executors: Rc>, @@ -89,9 +89,9 @@ impl MessageProcessor { // Fixup the special instructions key if present // before the account pre-values are taken care of - for (pubkey, accont) in accounts.iter().take(message.account_keys.len()) { + for (pubkey, account) in accounts.iter().take(message.account_keys.len()) { if instructions::check_id(pubkey) { - let mut mut_account_ref = accont.borrow_mut(); + let mut mut_account_ref = account.borrow_mut(); instructions::store_current_index( mut_account_ref.data_as_mut_slice(), instruction_index as u16, @@ -128,7 +128,7 @@ mod tests { super::*, crate::rent_collector::RentCollector, solana_sdk::{ - account::ReadableAccount, + account::{AccountSharedData, ReadableAccount}, instruction::{AccountMeta, Instruction, InstructionError}, keyed_account::keyed_account_at_index, message::Message, diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 600d8b823a2d79..e5423c07a9f6c5 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -5,7 +5,7 @@ use { accounts::Accounts, accounts_db::{ AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, AppendVecId, - BankHashInfo, + AtomicAppendVecId, BankHashInfo, IndexGenerationInfo, }, accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, @@ -334,7 +334,7 @@ fn reconstruct_bank_from_fields( where E: SerializableStorage + std::marker::Sync, { - let accounts_db = reconstruct_accountsdb_from_fields( + let (accounts_db, reconstructed_accounts_db_info) = reconstruct_accountsdb_from_fields( snapshot_accounts_db_fields, account_paths, unpacked_append_vec_map, @@ -359,6 +359,7 @@ where debug_keys, additional_builtins, debug_do_not_add_builtins, + reconstructed_accounts_db_info.accounts_data_len, ); info!("rent_collector: {:?}", bank.rent_collector()); @@ -386,6 +387,12 @@ where Ok(()) } +/// This struct contains side-info while reconstructing the accounts DB from fields. +#[derive(Debug, Default, Copy, Clone)] +struct ReconstructedAccountsDbInfo { + accounts_data_len: u64, +} + #[allow(clippy::too_many_arguments)] fn reconstruct_accountsdb_from_fields( snapshot_accounts_db_fields: SnapshotAccountsDbFields, @@ -399,7 +406,7 @@ fn reconstruct_accountsdb_from_fields( verify_index: bool, accounts_db_config: Option, accounts_update_notifier: Option, -) -> Result +) -> Result<(AccountsDb, ReconstructedAccountsDbInfo), Error> where E: SerializableStorage + std::marker::Sync, { @@ -430,7 +437,7 @@ where // Remap the deserialized AppendVec paths to point to correct local paths let num_collisions = AtomicUsize::new(0); - let next_append_vec_id = AtomicUsize::new(0); + let next_append_vec_id = AtomicAppendVecId::new(0); let mut measure_remap = Measure::start("remap"); let mut storage = (0..snapshot_storages.len()) .into_par_iter() @@ -536,11 +543,12 @@ where }) .unwrap(); - accounts_db.generate_index( + let IndexGenerationInfo { accounts_data_len } = accounts_db.generate_index( limit_load_slot_count_from_snapshot, verify_index, genesis_config, ); + accounts_db.maybe_add_filler_accounts(&genesis_config.epoch_schedule); handle.join().unwrap(); @@ -557,5 +565,8 @@ where ("accountsdb-notify-at-start-us", measure_notify.as_us(), i64), ); - Ok(Arc::try_unwrap(accounts_db).unwrap()) + Ok(( + Arc::try_unwrap(accounts_db).unwrap(), + ReconstructedAccountsDbInfo { accounts_data_len }, + )) } diff --git a/runtime/src/serde_snapshot/future.rs b/runtime/src/serde_snapshot/future.rs index 8fd4a9a455ff04..bc279e355148b3 100644 --- a/runtime/src/serde_snapshot/future.rs +++ b/runtime/src/serde_snapshot/future.rs @@ -2,9 +2,9 @@ use solana_frozen_abi::abi_example::IgnoreAsHelper; use { super::{common::UnusedAccounts, *}, - crate::ancestors::AncestorsForSerialization, + crate::{ancestors::AncestorsForSerialization, stakes::StakesCache}, solana_measure::measure::Measure, - std::cell::RefCell, + std::{cell::RefCell, sync::RwLock}, }; type AccountsDbFields = super::AccountsDbFields; @@ -42,7 +42,6 @@ impl From<&AccountStorageEntry> for SerializableAccountStorageEntry { } } -use std::sync::RwLock; // Deserializable version of Bank which need not be serializable, // because it's handled by SerializableVersionedBank. // So, sync fields with it! @@ -153,7 +152,7 @@ pub(crate) struct SerializableVersionedBank<'a> { pub(crate) rent_collector: RentCollector, pub(crate) epoch_schedule: EpochSchedule, pub(crate) inflation: Inflation, - pub(crate) stakes: &'a RwLock, + pub(crate) stakes: &'a StakesCache, pub(crate) unused_accounts: UnusedAccounts, pub(crate) epoch_stakes: &'a HashMap, pub(crate) is_delta: bool, diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 5de81e824280c1..c4975827bdff63 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -89,6 +89,7 @@ where Some(crate::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING), None, ) + .map(|(accounts_db, _)| accounts_db) } #[cfg(test)] @@ -312,7 +313,7 @@ mod test_bank_serialize { // This some what long test harness is required to freeze the ABI of // Bank's serialization due to versioned nature - #[frozen_abi(digest = "Fv5AFJSnZi9sssiE7Jn8bH2iTPnqu3UNc3np62r1sTsr")] + #[frozen_abi(digest = "EuYcD3JCEWRnQaFHW1CAy2bBqLkakc88iLJtZH6kYeVF")] #[derive(Serialize, AbiExample)] pub struct BankAbiTestWrapperFuture { #[serde(serialize_with = "wrapper_future")] diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index c58f0c2b93f965..970889e3d18516 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -6,13 +6,16 @@ use { stake_history::StakeHistory, vote_account::{VoteAccount, VoteAccounts, VoteAccountsHashMap}, }, + dashmap::DashMap, + num_derive::ToPrimitive, + num_traits::ToPrimitive, rayon::{ iter::{IntoParallelRefIterator, ParallelIterator}, ThreadPool, }, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, - clock::Epoch, + clock::{Epoch, Slot}, pubkey::Pubkey, stake::{ self, @@ -21,9 +24,139 @@ use { }, solana_stake_program::stake_state, solana_vote_program::vote_state::VoteState, - std::{collections::HashMap, sync::Arc}, + std::{ + collections::HashMap, + sync::{Arc, RwLock, RwLockReadGuard}, + }, }; +#[derive(Debug, Clone, PartialEq, ToPrimitive)] +pub enum InvalidCacheEntryReason { + Missing, + BadState, + WrongOwner, +} + +#[derive(Default, Debug, Deserialize, Serialize, AbiExample)] +pub struct StakesCache(RwLock); + +impl StakesCache { + pub fn new(stakes: Stakes) -> Self { + Self(RwLock::new(stakes)) + } + + pub fn stakes(&self) -> RwLockReadGuard { + self.0.read().unwrap() + } + + pub fn is_stake(account: &AccountSharedData) -> bool { + solana_vote_program::check_id(account.owner()) + || stake::program::check_id(account.owner()) + && account.data().len() >= std::mem::size_of::() + } + + pub fn check_and_store( + &self, + pubkey: &Pubkey, + account: &AccountSharedData, + remove_delegation_on_inactive: bool, + ) { + if solana_vote_program::check_id(account.owner()) { + let new_vote_account = if account.lamports() != 0 + && VoteState::is_correct_size_and_initialized(account.data()) + { + let vote_account = VoteAccount::from(account.clone()); + { + // Called to eagerly deserialize vote state + let _res = vote_account.vote_state(); + } + Some(vote_account) + } else { + None + }; + + self.0 + .write() + .unwrap() + .update_vote_account(pubkey, new_vote_account); + } else if solana_stake_program::check_id(account.owner()) { + let new_delegation = stake_state::delegation_from(account).map(|delegation| { + let stakes = self.stakes(); + let stake = if account.lamports() != 0 { + delegation.stake(stakes.epoch, Some(&stakes.stake_history)) + } else { + // when account is removed (lamports == 0), this special `else` clause ensures + // resetting cached stake value below, even if the account happens to be + // still staked for some (odd) reason + 0 + }; + (stake, delegation) + }); + + let remove_delegation = if remove_delegation_on_inactive { + new_delegation.is_none() + } else { + account.lamports() == 0 + }; + + self.0.write().unwrap().update_stake_delegation( + pubkey, + new_delegation, + remove_delegation, + ); + } + } + + pub fn activate_epoch(&self, next_epoch: Epoch, thread_pool: &ThreadPool) { + let mut stakes = self.0.write().unwrap(); + stakes.activate_epoch(next_epoch, thread_pool) + } + + pub fn handle_invalid_keys( + &self, + invalid_stake_keys: DashMap, + invalid_vote_keys: DashMap, + should_evict_invalid_entries: bool, + current_slot: Slot, + ) { + if invalid_stake_keys.is_empty() && invalid_vote_keys.is_empty() { + return; + } + + // Prune invalid stake delegations and vote accounts that were + // not properly evicted in normal operation. + let mut maybe_stakes = if should_evict_invalid_entries { + Some(self.0.write().unwrap()) + } else { + None + }; + + for (stake_pubkey, reason) in invalid_stake_keys { + if let Some(stakes) = maybe_stakes.as_mut() { + stakes.remove_stake_delegation(&stake_pubkey); + } + datapoint_warn!( + "bank-stake_delegation_accounts-invalid-account", + ("slot", current_slot as i64, i64), + ("stake-address", format!("{:?}", stake_pubkey), String), + ("reason", reason.to_i64().unwrap_or_default(), i64), + ); + } + + for (vote_pubkey, reason) in invalid_vote_keys { + if let Some(stakes) = maybe_stakes.as_mut() { + stakes.remove_vote_account(&vote_pubkey); + } + datapoint_warn!( + "bank-stake_delegation_accounts-invalid-account", + ("slot", current_slot as i64, i64), + ("vote-address", format!("{:?}", vote_pubkey), String), + ("reason", reason.to_i64().unwrap_or_default(), i64), + ); + } + } +} + #[derive(Default, Clone, PartialEq, Debug, Deserialize, Serialize, AbiExample)] pub struct Stakes { /// vote accounts @@ -144,89 +277,72 @@ impl Stakes { + self.vote_accounts.iter().map(get_lamports).sum::() } - pub fn is_stake(account: &AccountSharedData) -> bool { - solana_vote_program::check_id(account.owner()) - || stake::program::check_id(account.owner()) - && account.data().len() >= std::mem::size_of::() + pub fn remove_vote_account(&mut self, vote_pubkey: &Pubkey) { + self.vote_accounts.remove(vote_pubkey); } - pub fn store( + pub fn remove_stake_delegation(&mut self, stake_pubkey: &Pubkey) { + if let Some(removed_delegation) = self.stake_delegations.remove(stake_pubkey) { + let removed_stake = removed_delegation.stake(self.epoch, Some(&self.stake_history)); + self.vote_accounts + .sub_stake(&removed_delegation.voter_pubkey, removed_stake); + } + } + + pub fn update_vote_account( &mut self, - pubkey: &Pubkey, - account: &AccountSharedData, - remove_delegation_on_inactive: bool, + vote_pubkey: &Pubkey, + new_vote_account: Option, ) { - if solana_vote_program::check_id(account.owner()) { - // unconditionally remove existing at first; there is no dependent calculated state for - // votes, not like stakes (stake codepath maintains calculated stake value grouped by - // delegated vote pubkey) - let old = self.vote_accounts.remove(pubkey); - // when account is removed (lamports == 0 or data uninitialized), don't read so that - // given `pubkey` can be used for any owner in the future, while not affecting Stakes. - if account.lamports() != 0 && VoteState::is_correct_size_and_initialized(account.data()) - { - let stake = old.as_ref().map_or_else( - || self.calculate_stake(pubkey, self.epoch, &self.stake_history), - |v| v.0, - ); - - self.vote_accounts - .insert(*pubkey, (stake, VoteAccount::from(account.clone()))); - } - } else if stake::program::check_id(account.owner()) { - // old_stake is stake lamports and voter_pubkey from the pre-store() version - let old_stake = self.stake_delegations.get(pubkey).map(|delegation| { - ( - delegation.voter_pubkey, - delegation.stake(self.epoch, Some(&self.stake_history)), - ) - }); + // unconditionally remove existing at first; there is no dependent calculated state for + // votes, not like stakes (stake codepath maintains calculated stake value grouped by + // delegated vote pubkey) + let old_entry = self.vote_accounts.remove(vote_pubkey); + if let Some(new_vote_account) = new_vote_account { + debug_assert!(new_vote_account.is_deserialized()); + let new_stake = old_entry.as_ref().map_or_else( + || self.calculate_stake(vote_pubkey, self.epoch, &self.stake_history), + |(old_stake, _old_vote_account)| *old_stake, + ); - let delegation = stake_state::delegation_from(account); - - let stake = delegation.map(|delegation| { - ( - delegation.voter_pubkey, - if account.lamports() != 0 { - delegation.stake(self.epoch, Some(&self.stake_history)) - } else { - // when account is removed (lamports == 0), this special `else` clause ensures - // resetting cached stake value below, even if the account happens to be - // still staked for some (odd) reason - 0 - }, - ) - }); + self.vote_accounts + .insert(*vote_pubkey, (new_stake, new_vote_account)); + } + } - // if adjustments need to be made... - if stake != old_stake { - if let Some((voter_pubkey, stake)) = old_stake { - self.vote_accounts.sub_stake(&voter_pubkey, stake); - } - if let Some((voter_pubkey, stake)) = stake { - self.vote_accounts.add_stake(&voter_pubkey, stake); - } - } + pub fn update_stake_delegation( + &mut self, + stake_pubkey: &Pubkey, + new_delegation: Option<(u64, Delegation)>, + remove_delegation: bool, + ) { + // old_stake is stake lamports and voter_pubkey from the pre-store() version + let old_stake = self.stake_delegations.get(stake_pubkey).map(|delegation| { + ( + delegation.voter_pubkey, + delegation.stake(self.epoch, Some(&self.stake_history)), + ) + }); - let remove_delegation = if remove_delegation_on_inactive { - delegation.is_none() - } else { - account.lamports() == 0 - }; + let new_stake = new_delegation.map(|(stake, delegation)| (delegation.voter_pubkey, stake)); - if remove_delegation { - // when account is removed (lamports == 0), remove it from Stakes as well - // so that given `pubkey` can be used for any owner in the future, while not - // affecting Stakes. - self.stake_delegations.remove(pubkey); - } else if let Some(delegation) = delegation { - self.stake_delegations.insert(*pubkey, delegation); + // check if adjustments need to be made... + if new_stake != old_stake { + if let Some((voter_pubkey, stake)) = old_stake { + self.vote_accounts.sub_stake(&voter_pubkey, stake); } - } else { - // there is no need to remove possibly existing Stakes cache entries with given - // `pubkey` because this isn't possible, first of all. - // Runtime always enforces an intermediary write of account.lamports == 0, - // when not-System111-owned account.owner is swapped. + if let Some((voter_pubkey, stake)) = new_stake { + self.vote_accounts.add_stake(&voter_pubkey, stake); + } + } + + if remove_delegation { + // when account is removed (lamports == 0), remove it from Stakes as well + // so that given `pubkey` can be used for any owner in the future, while not + // affecting Stakes. + self.stake_delegations.remove(stake_pubkey); + } else if let Some((_stake, delegation)) = new_delegation { + self.stake_delegations.insert(*stake_pubkey, delegation); } } @@ -326,18 +442,19 @@ pub mod tests { #[test] fn test_stakes_basic() { for i in 0..4 { - let mut stakes = Stakes { + let stakes_cache = StakesCache::new(Stakes { epoch: i, ..Stakes::default() - }; + }); let ((vote_pubkey, vote_account), (stake_pubkey, mut stake_account)) = create_staked_node_accounts(10); - stakes.store(&vote_pubkey, &vote_account, true); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); let stake = stake_state::stake_from(&stake_account).unwrap(); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!( @@ -347,8 +464,9 @@ pub mod tests { } stake_account.set_lamports(42); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!( @@ -359,9 +477,10 @@ pub mod tests { // activate more let (_stake_pubkey, mut stake_account) = create_stake_account(42, &vote_pubkey); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); let stake = stake_state::stake_from(&stake_account).unwrap(); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!( @@ -371,8 +490,9 @@ pub mod tests { } stake_account.set_lamports(0); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 0); @@ -382,58 +502,62 @@ pub mod tests { #[test] fn test_stakes_highest() { - let mut stakes = Stakes::default(); + let stakes_cache = StakesCache::default(); - assert_eq!(stakes.highest_staked_node(), None); + assert_eq!(stakes_cache.stakes().highest_staked_node(), None); let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) = create_staked_node_accounts(10); - stakes.store(&vote_pubkey, &vote_account, true); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); let ((vote11_pubkey, vote11_account), (stake11_pubkey, stake11_account)) = create_staked_node_accounts(20); - stakes.store(&vote11_pubkey, &vote11_account, true); - stakes.store(&stake11_pubkey, &stake11_account, true); + stakes_cache.check_and_store(&vote11_pubkey, &vote11_account, true); + stakes_cache.check_and_store(&stake11_pubkey, &stake11_account, true); let vote11_node_pubkey = VoteState::from(&vote11_account).unwrap().node_pubkey; - assert_eq!(stakes.highest_staked_node(), Some(vote11_node_pubkey)) + let highest_staked_node = stakes_cache.stakes().highest_staked_node(); + assert_eq!(highest_staked_node, Some(vote11_node_pubkey)); } #[test] fn test_stakes_vote_account_disappear_reappear() { - let mut stakes = Stakes { + let stakes_cache = StakesCache::new(Stakes { epoch: 4, ..Stakes::default() - }; + }); let ((vote_pubkey, mut vote_account), (stake_pubkey, stake_account)) = create_staked_node_accounts(10); - stakes.store(&vote_pubkey, &vote_account, true); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 10); } vote_account.set_lamports(0); - stakes.store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_none()); } vote_account.set_lamports(1); - stakes.store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 10); @@ -444,9 +568,10 @@ pub mod tests { let mut pushed = vote_account.data().to_vec(); pushed.push(0); vote_account.set_data(pushed); - stakes.store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_none()); } @@ -455,17 +580,19 @@ pub mod tests { let default_vote_state = VoteState::default(); let versioned = VoteStateVersions::new_current(default_vote_state); VoteState::to(&versioned, &mut vote_account).unwrap(); - stakes.store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_none()); } vote_account.set_data(cache_data); - stakes.store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 10); @@ -474,10 +601,10 @@ pub mod tests { #[test] fn test_stakes_change_delegate() { - let mut stakes = Stakes { + let stakes_cache = StakesCache::new(Stakes { epoch: 4, ..Stakes::default() - }; + }); let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) = create_staked_node_accounts(10); @@ -485,15 +612,16 @@ pub mod tests { let ((vote_pubkey2, vote_account2), (_stake_pubkey2, stake_account2)) = create_staked_node_accounts(10); - stakes.store(&vote_pubkey, &vote_account, true); - stakes.store(&vote_pubkey2, &vote_account2, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&vote_pubkey2, &vote_account2, true); // delegates to vote_pubkey - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); let stake = stake_state::stake_from(&stake_account).unwrap(); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!( @@ -505,9 +633,10 @@ pub mod tests { } // delegates to vote_pubkey2 - stakes.store(&stake_pubkey, &stake_account2, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account2, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 0); @@ -520,23 +649,24 @@ pub mod tests { } #[test] fn test_stakes_multiple_stakers() { - let mut stakes = Stakes { + let stakes_cache = StakesCache::new(Stakes { epoch: 4, ..Stakes::default() - }; + }); let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) = create_staked_node_accounts(10); let (stake_pubkey2, stake_account2) = create_stake_account(10, &vote_pubkey); - stakes.store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); // delegates to vote_pubkey - stakes.store(&stake_pubkey, &stake_account, true); - stakes.store(&stake_pubkey2, &stake_account2, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&stake_pubkey2, &stake_account2, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 20); @@ -545,16 +675,17 @@ pub mod tests { #[test] fn test_activate_epoch() { - let mut stakes = Stakes::default(); + let stakes_cache = StakesCache::default(); let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) = create_staked_node_accounts(10); - stakes.store(&vote_pubkey, &vote_account, true); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); let stake = stake_state::stake_from(&stake_account).unwrap(); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert_eq!( vote_accounts.get(&vote_pubkey).unwrap().0, @@ -562,8 +693,9 @@ pub mod tests { ); } let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); - stakes.activate_epoch(3, &thread_pool); + stakes_cache.activate_epoch(3, &thread_pool); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert_eq!( vote_accounts.get(&vote_pubkey).unwrap().0, @@ -574,30 +706,32 @@ pub mod tests { #[test] fn test_stakes_not_delegate() { - let mut stakes = Stakes { + let stakes_cache = StakesCache::new(Stakes { epoch: 4, ..Stakes::default() - }; + }); let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) = create_staked_node_accounts(10); - stakes.store(&vote_pubkey, &vote_account, true); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 10); } // not a stake account, and whacks above entry - stakes.store( + stakes_cache.check_and_store( &stake_pubkey, &AccountSharedData::new(1, 0, &stake::program::id()), true, ); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 0); @@ -612,7 +746,7 @@ pub mod tests { #[test] fn test_vote_balance_and_staked_normal() { - let mut stakes = Stakes::default(); + let stakes_cache = StakesCache::default(); impl Stakes { pub fn vote_balance_and_warmed_staked(&self) -> u64 { self.vote_accounts @@ -625,17 +759,21 @@ pub mod tests { let genesis_epoch = 0; let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) = create_warming_staked_node_accounts(10, genesis_epoch); - stakes.store(&vote_pubkey, &vote_account, true); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); - assert_eq!(stakes.vote_balance_and_staked(), 11); - assert_eq!(stakes.vote_balance_and_warmed_staked(), 1); + { + let stakes = stakes_cache.stakes(); + assert_eq!(stakes.vote_balance_and_staked(), 11); + assert_eq!(stakes.vote_balance_and_warmed_staked(), 1); + } let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); for (epoch, expected_warmed_stake) in ((genesis_epoch + 1)..=3).zip(&[2, 3, 4]) { - stakes.activate_epoch(epoch, &thread_pool); + stakes_cache.activate_epoch(epoch, &thread_pool); // vote_balance_and_staked() always remain to return same lamports // while vote_balance_and_warmed_staked() gradually increases + let stakes = stakes_cache.stakes(); assert_eq!(stakes.vote_balance_and_staked(), 11); assert_eq!( stakes.vote_balance_and_warmed_staked(), diff --git a/runtime/src/vote_account.rs b/runtime/src/vote_account.rs index 453e54258f8ca3..daaae5af9abbf6 100644 --- a/runtime/src/vote_account.rs +++ b/runtime/src/vote_account.rs @@ -69,6 +69,10 @@ impl VoteAccount { inner.vote_state.read().unwrap() } + pub fn is_deserialized(&self) -> bool { + self.0.vote_state_once.is_completed() + } + /// VoteState.node_pubkey of this vote-account. fn node_pubkey(&self) -> Option { Some(self.vote_state().as_ref().ok()?.node_pubkey) diff --git a/sdk/.gitignore b/sdk/.gitignore index 5404b132dba6e1..14bd5d17098f2f 100644 --- a/sdk/.gitignore +++ b/sdk/.gitignore @@ -1,2 +1,4 @@ -/target/ /farf/ +/node_modules/ +/package-lock.json +/target/ diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 49f1c46ded20e0..6d888ba7444ac2 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -47,7 +47,7 @@ byteorder = { version = "1.4.3", optional = true } chrono = { default-features = false, features = ["alloc"], version = "0.4", optional = true } curve25519-dalek = { version = "3.2.0", optional = true } derivation-path = { version = "0.1.3", default-features = false } -digest = { version = "0.9.0", optional = true } +digest = { version = "0.10.0", optional = true } ed25519-dalek = { version = "=1.0.1", optional = true } ed25519-dalek-bip32 = { version = "0.1.1", optional = true } generic-array = { version = "0.14.4", default-features = false, features = ["serde", "more_lengths"], optional = true } @@ -64,12 +64,12 @@ qstring = "0.7.2" rand = { version = "0.7.0", optional = true } rand_chacha = { version = "0.2.2", optional = true } rustversion = "1.0.6" -serde = "1.0.130" +serde = "1.0.131" serde_bytes = "0.11" serde_derive = "1.0.103" serde_json = { version = "1.0.72", optional = true } sha2 = "0.10.0" -sha3 = { version = "0.9.1", optional = true } +sha3 = { version = "0.10.0", optional = true } solana-logger = { path = "../logger", version = "=1.10.0", optional = true } solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.0" } solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.0" } @@ -77,6 +77,10 @@ solana-program = { path = "program", version = "=1.10.0" } solana-sdk-macro = { path = "macro", version = "=1.10.0" } thiserror = "1.0" uriparse = "0.6.3" +wasm-bindgen = "0.2" + +[target.'cfg(target_arch = "wasm32")'.dependencies] +js-sys = "0.3.55" [dev-dependencies] curve25519-dalek = "3.2.0" diff --git a/sdk/macro/src/lib.rs b/sdk/macro/src/lib.rs index 8ffb6f54673cb9..7c240f4c7598b1 100644 --- a/sdk/macro/src/lib.rs +++ b/sdk/macro/src/lib.rs @@ -373,3 +373,31 @@ pub fn pubkeys(input: TokenStream) -> TokenStream { let pubkeys = parse_macro_input!(input as Pubkeys); TokenStream::from(quote! {#pubkeys}) } + +// The normal `wasm_bindgen` macro generates a .bss section which causes the resulting +// BPF program to fail to load, so for now this stub should be used when building for BPF +#[proc_macro_attribute] +pub fn wasm_bindgen_stub(_attr: TokenStream, item: TokenStream) -> TokenStream { + match parse_macro_input!(item as syn::Item) { + syn::Item::Struct(mut item_struct) => { + if let syn::Fields::Named(fields) = &mut item_struct.fields { + // Strip out any `#[wasm_bindgen]` added to struct fields. This is custom + // syntax supplied by the normal `wasm_bindgen` macro. + for field in fields.named.iter_mut() { + field.attrs.retain(|attr| { + !attr + .path + .segments + .iter() + .any(|segment| segment.ident == "wasm_bindgen") + }); + } + } + quote! { #item_struct } + } + item => { + quote!(#item) + } + } + .into() +} diff --git a/sdk/package.json b/sdk/package.json new file mode 120000 index 00000000000000..aa87faef28d8ce --- /dev/null +++ b/sdk/package.json @@ -0,0 +1 @@ +program/package.json \ No newline at end of file diff --git a/sdk/program/.gitignore b/sdk/program/.gitignore new file mode 100644 index 00000000000000..936e5c57af9478 --- /dev/null +++ b/sdk/program/.gitignore @@ -0,0 +1,2 @@ +/node_modules/ +/package-lock.json diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 23a888dc70c4e6..7f4408eab5a330 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -28,7 +28,7 @@ serde = "1.0.112" serde_bytes = "0.11" serde_derive = "1.0.103" sha2 = "0.10.0" -sha3 = "0.9.1" +sha3 = "0.10.0" solana-frozen-abi = { path = "../../frozen-abi", version = "=1.10.0" } solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.10.0" } solana-sdk-macro = { path = "../macro", version = "=1.10.0" } @@ -42,6 +42,13 @@ libsecp256k1 = "0.6.0" rand = "0.7.0" solana-logger = { path = "../../logger", version = "=1.10.0" } itertools = "0.10.1" +wasm-bindgen = "0.2" + +[target.'cfg(target_arch = "wasm32")'.dependencies] +console_error_panic_hook = "0.1.7" +console_log = "0.2.0" +js-sys = "0.3.55" +getrandom = { version = "0.1", features = ["wasm-bindgen"] } [target.'cfg(not(target_pointer_width = "64"))'.dependencies] parking_lot = "0.11" diff --git a/sdk/program/package.json b/sdk/program/package.json new file mode 100644 index 00000000000000..f1f074ff205daf --- /dev/null +++ b/sdk/program/package.json @@ -0,0 +1,14 @@ +{ + "devDependencies": { + "chai": "^4.3.4", + "mocha": "^9.1.2", + "prettier": "^2.4.1" + }, + "scripts": { + "postinstall": "npm run build", + "build": "wasm-pack build --target nodejs --dev --out-dir node_modules/crate --out-name crate", + "pretty": "prettier --check 'tests/*.mjs'", + "pretty:fix": "prettier --write 'tests/*.mjs'", + "test": "mocha 'tests/*.mjs'" + } +} diff --git a/sdk/program/src/hash.rs b/sdk/program/src/hash.rs index c150268cd36e78..66f5ecaa9801f0 100644 --- a/sdk/program/src/hash.rs +++ b/sdk/program/src/hash.rs @@ -1,7 +1,7 @@ //! The `hash` module provides functions for creating SHA-256 hashes. use { - crate::sanitize::Sanitize, + crate::{sanitize::Sanitize, wasm_bindgen}, borsh::{BorshDeserialize, BorshSchema, BorshSerialize}, sha2::{Digest, Sha256}, std::{convert::TryFrom, fmt, mem, str::FromStr}, @@ -11,6 +11,8 @@ use { pub const HASH_BYTES: usize = 32; /// Maximum string length of a base58 encoded hash const MAX_BASE58_LEN: usize = 44; + +#[wasm_bindgen] #[derive( Serialize, Deserialize, diff --git a/sdk/program/src/instruction.rs b/sdk/program/src/instruction.rs index 51324ab60953b3..803e95596d34b5 100644 --- a/sdk/program/src/instruction.rs +++ b/sdk/program/src/instruction.rs @@ -2,7 +2,7 @@ //! Defines a composable Instruction type and a memory-efficient CompiledInstruction. use { - crate::{pubkey::Pubkey, sanitize::Sanitize, short_vec}, + crate::{pubkey::Pubkey, sanitize::Sanitize, short_vec, wasm_bindgen}, bincode::serialize, borsh::BorshSerialize, serde::Serialize, @@ -240,13 +240,17 @@ pub enum InstructionError { // conversions must also be added } +#[wasm_bindgen] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct Instruction { /// Pubkey of the instruction processor that executes this instruction + #[wasm_bindgen(skip)] pub program_id: Pubkey, /// Metadata for what accounts should be passed to the instruction processor + #[wasm_bindgen(skip)] pub accounts: Vec, /// Opaque data passed to the instruction processor + #[wasm_bindgen(skip)] pub data: Vec, } diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 3d5768a6d909e4..8ac5140ad8374f 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -52,6 +52,12 @@ pub mod stake_history; pub mod system_instruction; pub mod system_program; pub mod sysvar; +pub mod wasm; + +#[cfg(target_arch = "bpf")] +pub use solana_sdk_macro::wasm_bindgen_stub as wasm_bindgen; +#[cfg(not(target_arch = "bpf"))] +pub use wasm_bindgen::prelude::wasm_bindgen; pub mod config { pub mod program { diff --git a/sdk/program/src/log.rs b/sdk/program/src/log.rs index a205241929c50c..4655c1b1fdc054 100644 --- a/sdk/program/src/log.rs +++ b/sdk/program/src/log.rs @@ -21,12 +21,10 @@ macro_rules! info { /// Print a message to the log /// -/// There are two fast forms: +/// Fast form: /// 1. Single string: `msg!("hi")` -/// 2. 5 integers: `msg!(1, 2, 3, 4, 5)` /// -/// The third form is more generic and incurs a very large runtime overhead so it should be used -/// with care: +/// The generic form incurs a very large runtime overhead so it should be used with care: /// 3. Generalized format string: `msg!("Hello {}: 1, 2, {}", "World", 3)` /// #[macro_export] @@ -34,15 +32,6 @@ macro_rules! msg { ($msg:expr) => { $crate::log::sol_log($msg) }; - ($arg1:expr, $arg2:expr, $arg3:expr, $arg4:expr, $arg5:expr) => { - $crate::log::sol_log_64( - $arg1 as u64, - $arg2 as u64, - $arg3 as u64, - $arg4 as u64, - $arg5 as u64, - ) - }; ($($arg:tt)*) => ($crate::log::sol_log(&format!($($arg)*))); } @@ -108,7 +97,7 @@ pub fn sol_log_data(data: &[&[u8]]) { #[allow(dead_code)] pub fn sol_log_slice(slice: &[u8]) { for (i, s) in slice.iter().enumerate() { - msg!(0, 0, 0, i, *s); + sol_log_64(0, 0, 0, i as u64, *s as u64); } } @@ -120,15 +109,15 @@ pub fn sol_log_slice(slice: &[u8]) { pub fn sol_log_params(accounts: &[AccountInfo], data: &[u8]) { for (i, account) in accounts.iter().enumerate() { msg!("AccountInfo"); - msg!(0, 0, 0, 0, i); + sol_log_64(0, 0, 0, 0, i as u64); msg!("- Is signer"); - msg!(0, 0, 0, 0, account.is_signer); + sol_log_64(0, 0, 0, 0, account.is_signer as u64); msg!("- Key"); account.key.log(); msg!("- Lamports"); - msg!(0, 0, 0, 0, account.lamports()); + sol_log_64(0, 0, 0, 0, account.lamports()); msg!("- Account data length"); - msg!(0, 0, 0, 0, account.data_len()); + sol_log_64(0, 0, 0, 0, account.data_len() as u64); msg!("- Owner"); account.owner.log(); } diff --git a/sdk/program/src/message/legacy.rs b/sdk/program/src/message/legacy.rs index 986b94b310ef7f..a02ff8d6a32a6d 100644 --- a/sdk/program/src/message/legacy.rs +++ b/sdk/program/src/message/legacy.rs @@ -12,7 +12,7 @@ use { serialize_utils::{ append_slice, append_u16, append_u8, read_pubkey, read_slice, read_u16, read_u8, }, - short_vec, system_instruction, system_program, sysvar, + short_vec, system_instruction, system_program, sysvar, wasm_bindgen, }, lazy_static::lazy_static, std::{collections::BTreeSet, convert::TryFrom, str::FromStr}, @@ -168,15 +168,18 @@ fn get_program_ids(instructions: &[Instruction]) -> Vec { // NOTE: Serialization-related changes must be paired with the custom serialization // for versioned messages in the `RemainingLegacyMessage` struct. +#[wasm_bindgen] #[frozen_abi(digest = "2KnLEqfLcTBQqitE22Pp8JYkaqVVbAkGbCfdeHoyxcAU")] #[derive(Serialize, Deserialize, Default, Debug, PartialEq, Eq, Clone, AbiExample)] #[serde(rename_all = "camelCase")] pub struct Message { /// The message header, identifying signed and read-only `account_keys` /// NOTE: Serialization-related changes must be paired with the direct read at sigverify. + #[wasm_bindgen(skip)] pub header: MessageHeader, /// All the account keys used by this transaction + #[wasm_bindgen(skip)] #[serde(with = "short_vec")] pub account_keys: Vec, @@ -185,6 +188,7 @@ pub struct Message { /// Programs that will be executed in sequence and committed in one atomic transaction if all /// succeed. + #[wasm_bindgen(skip)] #[serde(with = "short_vec")] pub instructions: Vec, } diff --git a/sdk/program/src/message/mod.rs b/sdk/program/src/message/mod.rs index 8c5737d38378d9..910a03c39cadbe 100644 --- a/sdk/program/src/message/mod.rs +++ b/sdk/program/src/message/mod.rs @@ -5,12 +5,10 @@ pub mod legacy; #[cfg(not(target_arch = "bpf"))] #[path = ""] mod non_bpf_modules { - mod mapped; mod sanitized; - pub mod v0; mod versions; - pub use {mapped::*, sanitized::*, versions::*}; + pub use {sanitized::*, versions::*}; } pub use legacy::Message; diff --git a/sdk/program/src/message/sanitized.rs b/sdk/program/src/message/sanitized.rs index 7a600e002243ea..25ef25c0adbfdc 100644 --- a/sdk/program/src/message/sanitized.rs +++ b/sdk/program/src/message/sanitized.rs @@ -2,7 +2,7 @@ use { crate::{ hash::Hash, instruction::{CompiledInstruction, Instruction}, - message::{MappedAddresses, MappedMessage, Message, MessageHeader}, + message::{v0::{self, LoadedAddresses}, legacy::Message as LegacyMessage, MessageHeader}, pubkey::Pubkey, sanitize::{Sanitize, SanitizeError}, serialize_utils::{append_slice, append_u16, append_u8}, @@ -17,9 +17,9 @@ use { #[derive(Debug, Clone)] pub enum SanitizedMessage { /// Sanitized legacy message - Legacy(Message), + Legacy(LegacyMessage), /// Sanitized version #0 message with mapped addresses - V0(MappedMessage), + V0(v0::LoadedMessage), } #[derive(PartialEq, Debug, Error, Eq, Clone)] @@ -44,9 +44,9 @@ impl From for SanitizeMessageError { } } -impl TryFrom for SanitizedMessage { +impl TryFrom for SanitizedMessage { type Error = SanitizeMessageError; - fn try_from(message: Message) -> Result { + fn try_from(message: LegacyMessage) -> Result { message.sanitize()?; let sanitized_msg = Self::Legacy(message); @@ -80,12 +80,12 @@ impl SanitizedMessage { pub fn header(&self) -> &MessageHeader { match self { Self::Legacy(message) => &message.header, - Self::V0(mapped_msg) => &mapped_msg.message.header, + Self::V0(message) => &message.header, } } /// Returns a legacy message if this sanitized message wraps one - pub fn legacy_message(&self) -> Option<&Message> { + pub fn legacy_message(&self) -> Option<&LegacyMessage> { if let Self::Legacy(message) = &self { Some(message) } else { @@ -103,7 +103,7 @@ impl SanitizedMessage { pub fn recent_blockhash(&self) -> &Hash { match self { Self::Legacy(message) => &message.recent_blockhash, - Self::V0(mapped_msg) => &mapped_msg.message.recent_blockhash, + Self::V0(message) => &message.recent_blockhash, } } @@ -112,7 +112,7 @@ impl SanitizedMessage { pub fn instructions(&self) -> &[CompiledInstruction] { match self { Self::Legacy(message) => &message.instructions, - Self::V0(mapped_msg) => &mapped_msg.message.instructions, + Self::V0(message) => &message.instructions, } } @@ -123,7 +123,7 @@ impl SanitizedMessage { ) -> impl Iterator { match self { Self::Legacy(message) => message.instructions.iter(), - Self::V0(mapped_msg) => mapped_msg.message.instructions.iter(), + Self::V0(message) => message.instructions.iter(), } .map(move |ix| { ( @@ -138,7 +138,7 @@ impl SanitizedMessage { pub fn account_keys_iter(&self) -> Box + '_> { match self { Self::Legacy(message) => Box::new(message.account_keys.iter()), - Self::V0(mapped_msg) => Box::new(mapped_msg.account_keys_iter()), + Self::V0(message) => Box::new(message.account_keys_iter()), } } @@ -146,7 +146,7 @@ impl SanitizedMessage { pub fn account_keys_len(&self) -> usize { match self { Self::Legacy(message) => message.account_keys.len(), - Self::V0(mapped_msg) => mapped_msg.account_keys_len(), + Self::V0(message) => message.account_keys_len(), } } @@ -251,10 +251,10 @@ impl SanitizedMessage { data } - /// Return the mapped addresses for this message if it has any. - fn mapped_addresses(&self) -> Option<&MappedAddresses> { + /// Return the resolved addresses for this message if it has any. + fn loaded_lookup_table_addresses(&self) -> Option<&LoadedAddresses> { match &self { - SanitizedMessage::V0(message) => Some(&message.mapped_addresses), + SanitizedMessage::V0(message) => Some(&message.loaded_addresses), _ => None, } } @@ -262,7 +262,7 @@ impl SanitizedMessage { /// Return the number of readonly accounts loaded by this message. pub fn num_readonly_accounts(&self) -> usize { let mapped_readonly_addresses = self - .mapped_addresses() + .loaded_lookup_table_addresses() .map(|keys| keys.readonly.len()) .unwrap_or_default(); mapped_readonly_addresses @@ -311,13 +311,13 @@ mod tests { #[test] fn test_try_from_message() { let dupe_key = Pubkey::new_unique(); - let legacy_message_with_dupes = Message { + let legacy_message_with_dupes = LegacyMessage { header: MessageHeader { num_required_signatures: 1, ..MessageHeader::default() }, account_keys: vec![dupe_key, dupe_key], - ..Message::default() + ..LegacyMessage::default() }; assert_eq!( @@ -325,9 +325,9 @@ mod tests { Some(SanitizeMessageError::DuplicateAccountKey), ); - let legacy_message_with_no_signers = Message { + let legacy_message_with_no_signers = LegacyMessage { account_keys: vec![Pubkey::new_unique()], - ..Message::default() + ..LegacyMessage::default() }; assert_eq!( @@ -346,7 +346,7 @@ mod tests { CompiledInstruction::new(2, &(), vec![0, 1]), ]; - let message = SanitizedMessage::try_from(Message::new_with_compiled_instructions( + let message = SanitizedMessage::try_from(LegacyMessage::new_with_compiled_instructions( 1, 0, 2, @@ -370,20 +370,20 @@ mod tests { let key4 = Pubkey::new_unique(); let key5 = Pubkey::new_unique(); - let legacy_message = SanitizedMessage::try_from(Message { + let legacy_message = SanitizedMessage::try_from(LegacyMessage { header: MessageHeader { num_required_signatures: 2, num_readonly_signed_accounts: 1, num_readonly_unsigned_accounts: 1, }, account_keys: vec![key0, key1, key2, key3], - ..Message::default() + ..LegacyMessage::default() }) .unwrap(); assert_eq!(legacy_message.num_readonly_accounts(), 2); - let mapped_message = SanitizedMessage::V0(MappedMessage { + let v0_message = SanitizedMessage::V0(v0::LoadedMessage { message: v0::Message { header: MessageHeader { num_required_signatures: 2, @@ -393,13 +393,13 @@ mod tests { account_keys: vec![key0, key1, key2, key3], ..v0::Message::default() }, - mapped_addresses: MappedAddresses { + loaded_addresses: LoadedAddresses { writable: vec![key4], readonly: vec![key5], }, }); - assert_eq!(mapped_message.num_readonly_accounts(), 3); + assert_eq!(v0_message.num_readonly_accounts(), 3); } #[test] @@ -427,7 +427,7 @@ mod tests { ]; let demote_program_write_locks = true; - let message = Message::new(&instructions, Some(&id1)); + let message = LegacyMessage::new(&instructions, Some(&id1)); let sanitized_message = SanitizedMessage::try_from(message.clone()).unwrap(); let serialized = sanitized_message.serialize_instructions(demote_program_write_locks); @@ -438,7 +438,7 @@ mod tests { // assert that Message::deserialize_instruction is compatible with SanitizedMessage::serialize_instructions for (i, instruction) in instructions.iter().enumerate() { assert_eq!( - Message::deserialize_instruction(i, &serialized).unwrap(), + LegacyMessage::deserialize_instruction(i, &serialized).unwrap(), *instruction ); } @@ -481,18 +481,18 @@ mod tests { data: vec![], }; - let legacy_message = SanitizedMessage::try_from(Message { + let legacy_message = SanitizedMessage::try_from(LegacyMessage { header: MessageHeader { num_required_signatures: 1, num_readonly_signed_accounts: 0, num_readonly_unsigned_accounts: 0, }, account_keys: vec![key0, key1, key2, program_id], - ..Message::default() + ..LegacyMessage::default() }) .unwrap(); - let mapped_message = SanitizedMessage::V0(MappedMessage { + let v0_message = SanitizedMessage::V0(v0::LoadedMessage { message: v0::Message { header: MessageHeader { num_required_signatures: 1, @@ -502,13 +502,13 @@ mod tests { account_keys: vec![key0, key1], ..v0::Message::default() }, - mapped_addresses: MappedAddresses { + loaded_addresses: LoadedAddresses { writable: vec![key2], readonly: vec![program_id], }, }); - for message in vec![legacy_message, mapped_message] { + for message in vec![legacy_message, v0_message] { assert_eq!( message.try_compile_instruction(&valid_instruction), Some(CompiledInstruction { diff --git a/sdk/program/src/message/v0.rs b/sdk/program/src/message/v0.rs deleted file mode 100644 index 9319d32157181d..00000000000000 --- a/sdk/program/src/message/v0.rs +++ /dev/null @@ -1,396 +0,0 @@ -use crate::{ - hash::Hash, - instruction::CompiledInstruction, - message::{MessageHeader, MESSAGE_VERSION_PREFIX}, - pubkey::Pubkey, - sanitize::{Sanitize, SanitizeError}, - short_vec, -}; - -/// Indexes that are mapped to addresses using an on-chain address map for -/// succinctly loading readonly and writable accounts. -#[derive(Serialize, Deserialize, Default, Debug, PartialEq, Eq, Clone, AbiExample)] -#[serde(rename_all = "camelCase")] -pub struct AddressMapIndexes { - #[serde(with = "short_vec")] - pub writable: Vec, - #[serde(with = "short_vec")] - pub readonly: Vec, -} - -/// Transaction message format which supports succinct account loading with -/// indexes for on-chain address maps. -#[derive(Serialize, Deserialize, Default, Debug, PartialEq, Eq, Clone, AbiExample)] -#[serde(rename_all = "camelCase")] -pub struct Message { - /// The message header, identifying signed and read-only `account_keys` - pub header: MessageHeader, - - /// List of accounts loaded by this transaction. - #[serde(with = "short_vec")] - pub account_keys: Vec, - - /// The blockhash of a recent block. - pub recent_blockhash: Hash, - - /// Instructions that invoke a designated program, are executed in sequence, - /// and committed in one atomic transaction if all succeed. - /// - /// # Notes - /// - /// Account and program indexes will index into the list of addresses - /// constructed from the concatenation of `account_keys`, flattened list of - /// `writable` address map indexes, and the flattened `readonly` address - /// map indexes. - #[serde(with = "short_vec")] - pub instructions: Vec, - - /// List of address map indexes used to succinctly load additional accounts - /// for this transaction. - /// - /// # Notes - /// - /// The last `address_map_indexes.len()` accounts of the read-only unsigned - /// accounts are loaded as address maps. - #[serde(with = "short_vec")] - pub address_map_indexes: Vec, -} - -impl Sanitize for Message { - fn sanitize(&self) -> Result<(), SanitizeError> { - // signing area and read-only non-signing area should not - // overlap - if usize::from(self.header.num_required_signatures) - .saturating_add(usize::from(self.header.num_readonly_unsigned_accounts)) - > self.account_keys.len() - { - return Err(SanitizeError::IndexOutOfBounds); - } - - // there should be at least 1 RW fee-payer account. - if self.header.num_readonly_signed_accounts >= self.header.num_required_signatures { - return Err(SanitizeError::IndexOutOfBounds); - } - - // there cannot be more address maps than read-only unsigned accounts. - let num_address_map_indexes = self.address_map_indexes.len(); - if num_address_map_indexes > usize::from(self.header.num_readonly_unsigned_accounts) { - return Err(SanitizeError::IndexOutOfBounds); - } - - // each map must load at least one entry - let mut num_loaded_accounts = self.account_keys.len(); - for indexes in &self.address_map_indexes { - let num_loaded_map_entries = indexes - .writable - .len() - .saturating_add(indexes.readonly.len()); - - if num_loaded_map_entries == 0 { - return Err(SanitizeError::InvalidValue); - } - - num_loaded_accounts = num_loaded_accounts.saturating_add(num_loaded_map_entries); - } - - // the number of loaded accounts must be <= 256 since account indices are - // encoded as `u8` - if num_loaded_accounts > 256 { - return Err(SanitizeError::IndexOutOfBounds); - } - - for ci in &self.instructions { - if usize::from(ci.program_id_index) >= num_loaded_accounts { - return Err(SanitizeError::IndexOutOfBounds); - } - // A program cannot be a payer. - if ci.program_id_index == 0 { - return Err(SanitizeError::IndexOutOfBounds); - } - for ai in &ci.accounts { - if usize::from(*ai) >= num_loaded_accounts { - return Err(SanitizeError::IndexOutOfBounds); - } - } - } - - Ok(()) - } -} - -impl Message { - /// Serialize this message with a version #0 prefix using bincode encoding. - pub fn serialize(&self) -> Vec { - bincode::serialize(&(MESSAGE_VERSION_PREFIX, self)).unwrap() - } -} - -#[cfg(test)] -mod tests { - use {super::*, crate::message::VersionedMessage}; - - fn simple_message() -> Message { - Message { - header: MessageHeader { - num_required_signatures: 1, - num_readonly_signed_accounts: 0, - num_readonly_unsigned_accounts: 1, - }, - account_keys: vec![Pubkey::new_unique(), Pubkey::new_unique()], - address_map_indexes: vec![AddressMapIndexes { - writable: vec![], - readonly: vec![0], - }], - ..Message::default() - } - } - - fn two_map_message() -> Message { - Message { - header: MessageHeader { - num_required_signatures: 1, - num_readonly_signed_accounts: 0, - num_readonly_unsigned_accounts: 2, - }, - account_keys: vec![ - Pubkey::new_unique(), - Pubkey::new_unique(), - Pubkey::new_unique(), - ], - address_map_indexes: vec![ - AddressMapIndexes { - writable: vec![1], - readonly: vec![0], - }, - AddressMapIndexes { - writable: vec![0], - readonly: vec![1], - }, - ], - ..Message::default() - } - } - - #[test] - fn test_sanitize_account_indices() { - assert!(Message { - account_keys: (0..=u8::MAX).map(|_| Pubkey::new_unique()).collect(), - address_map_indexes: vec![], - instructions: vec![CompiledInstruction { - program_id_index: 1, - accounts: vec![u8::MAX], - data: vec![], - }], - ..simple_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - account_keys: (0..u8::MAX).map(|_| Pubkey::new_unique()).collect(), - address_map_indexes: vec![], - instructions: vec![CompiledInstruction { - program_id_index: 1, - accounts: vec![u8::MAX], - data: vec![], - }], - ..simple_message() - } - .sanitize() - .is_err()); - - assert!(Message { - account_keys: (0..u8::MAX).map(|_| Pubkey::new_unique()).collect(), - instructions: vec![CompiledInstruction { - program_id_index: 1, - accounts: vec![u8::MAX], - data: vec![], - }], - ..simple_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - account_keys: (0..u8::MAX - 1).map(|_| Pubkey::new_unique()).collect(), - instructions: vec![CompiledInstruction { - program_id_index: 1, - accounts: vec![u8::MAX], - data: vec![], - }], - ..simple_message() - } - .sanitize() - .is_err()); - - assert!(Message { - address_map_indexes: vec![ - AddressMapIndexes { - writable: (0..200).step_by(2).collect(), - readonly: (1..200).step_by(2).collect(), - }, - AddressMapIndexes { - writable: (0..53).step_by(2).collect(), - readonly: (1..53).step_by(2).collect(), - }, - ], - instructions: vec![CompiledInstruction { - program_id_index: 1, - accounts: vec![u8::MAX], - data: vec![], - }], - ..two_map_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - address_map_indexes: vec![ - AddressMapIndexes { - writable: (0..200).step_by(2).collect(), - readonly: (1..200).step_by(2).collect(), - }, - AddressMapIndexes { - writable: (0..52).step_by(2).collect(), - readonly: (1..52).step_by(2).collect(), - }, - ], - instructions: vec![CompiledInstruction { - program_id_index: 1, - accounts: vec![u8::MAX], - data: vec![], - }], - ..two_map_message() - } - .sanitize() - .is_err()); - } - - #[test] - fn test_sanitize_excessive_loaded_accounts() { - assert!(Message { - account_keys: (0..=u8::MAX).map(|_| Pubkey::new_unique()).collect(), - address_map_indexes: vec![], - ..simple_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - account_keys: (0..257).map(|_| Pubkey::new_unique()).collect(), - address_map_indexes: vec![], - ..simple_message() - } - .sanitize() - .is_err()); - - assert!(Message { - account_keys: (0..u8::MAX).map(|_| Pubkey::new_unique()).collect(), - ..simple_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - account_keys: (0..256).map(|_| Pubkey::new_unique()).collect(), - ..simple_message() - } - .sanitize() - .is_err()); - - assert!(Message { - address_map_indexes: vec![ - AddressMapIndexes { - writable: (0..200).step_by(2).collect(), - readonly: (1..200).step_by(2).collect(), - }, - AddressMapIndexes { - writable: (0..53).step_by(2).collect(), - readonly: (1..53).step_by(2).collect(), - } - ], - ..two_map_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - address_map_indexes: vec![ - AddressMapIndexes { - writable: (0..200).step_by(2).collect(), - readonly: (1..200).step_by(2).collect(), - }, - AddressMapIndexes { - writable: (0..200).step_by(2).collect(), - readonly: (1..200).step_by(2).collect(), - } - ], - ..two_map_message() - } - .sanitize() - .is_err()); - } - - #[test] - fn test_sanitize_excessive_maps() { - assert!(Message { - header: MessageHeader { - num_readonly_unsigned_accounts: 1, - ..simple_message().header - }, - ..simple_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - header: MessageHeader { - num_readonly_unsigned_accounts: 0, - ..simple_message().header - }, - ..simple_message() - } - .sanitize() - .is_err()); - } - - #[test] - fn test_sanitize_address_map() { - assert!(Message { - address_map_indexes: vec![AddressMapIndexes { - writable: vec![0], - readonly: vec![], - }], - ..simple_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - address_map_indexes: vec![AddressMapIndexes { - writable: vec![], - readonly: vec![0], - }], - ..simple_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - address_map_indexes: vec![AddressMapIndexes { - writable: vec![], - readonly: vec![], - }], - ..simple_message() - } - .sanitize() - .is_err()); - } - - #[test] - fn test_serialize() { - let message = simple_message(); - let versioned_msg = VersionedMessage::V0(message.clone()); - assert_eq!(message.serialize(), versioned_msg.serialize()); - } -} diff --git a/sdk/program/src/message/versions.rs b/sdk/program/src/message/versions/mod.rs similarity index 91% rename from sdk/program/src/message/versions.rs rename to sdk/program/src/message/versions/mod.rs index 1ec621d74c0d50..9242731af611bb 100644 --- a/sdk/program/src/message/versions.rs +++ b/sdk/program/src/message/versions/mod.rs @@ -2,7 +2,7 @@ use { crate::{ hash::Hash, instruction::CompiledInstruction, - message::{v0, Message, MessageHeader}, + message::{legacy::Message as LegacyMessage, MessageHeader}, pubkey::Pubkey, sanitize::{Sanitize, SanitizeError}, short_vec, @@ -15,6 +15,8 @@ use { std::fmt, }; +pub mod v0; + /// Bit mask that indicates whether a serialized message is versioned. pub const MESSAGE_VERSION_PREFIX: u8 = 0x80; @@ -26,10 +28,10 @@ pub const MESSAGE_VERSION_PREFIX: u8 = 0x80; /// which message version is serialized starting from version `0`. If the first /// is bit is not set, all bytes are used to encode the legacy `Message` /// format. -#[frozen_abi(digest = "x2F3RG2RhJQWN6L2N3jebvcAvNYFrhE3sKTPJ4sENvL")] +#[frozen_abi(digest = "G4EAiqmGgBprgf5ePYemLJcoFfx4R7rhC1Weo2FVJ7fn")] #[derive(Debug, PartialEq, Eq, Clone, AbiEnumVisitor, AbiExample)] pub enum VersionedMessage { - Legacy(Message), + Legacy(LegacyMessage), V0(v0::Message), } @@ -98,7 +100,7 @@ impl VersionedMessage { impl Default for VersionedMessage { fn default() -> Self { - Self::Legacy(Message::default()) + Self::Legacy(LegacyMessage::default()) } } @@ -206,7 +208,7 @@ impl<'de> Deserialize<'de> for VersionedMessage { de::Error::invalid_length(1, &self) })?; - Ok(VersionedMessage::Legacy(Message { + Ok(VersionedMessage::Legacy(LegacyMessage { header: MessageHeader { num_required_signatures, num_readonly_signed_accounts: message.num_readonly_signed_accounts, @@ -247,7 +249,7 @@ mod tests { super::*, crate::{ instruction::{AccountMeta, Instruction}, - message::v0::AddressMapIndexes, + message::v0::MessageAddressTableLookup, }, }; @@ -274,7 +276,7 @@ mod tests { ), ]; - let mut message = Message::new(&instructions, Some(&id1)); + let mut message = LegacyMessage::new(&instructions, Some(&id1)); message.recent_blockhash = Hash::new_unique(); let bytes1 = bincode::serialize(&message).unwrap(); @@ -282,7 +284,7 @@ mod tests { assert_eq!(bytes1, bytes2); - let message1: Message = bincode::deserialize(&bytes1).unwrap(); + let message1: LegacyMessage = bincode::deserialize(&bytes1).unwrap(); let message2: VersionedMessage = bincode::deserialize(&bytes2).unwrap(); if let VersionedMessage::Legacy(message2) = message2 { @@ -299,27 +301,27 @@ mod tests { header: MessageHeader { num_required_signatures: 1, num_readonly_signed_accounts: 0, - num_readonly_unsigned_accounts: 2, + num_readonly_unsigned_accounts: 0, }, recent_blockhash: Hash::new_unique(), account_keys: vec![ Pubkey::new_unique(), - Pubkey::new_unique(), - Pubkey::new_unique(), ], - address_map_indexes: vec![ - AddressMapIndexes { - writable: vec![1], - readonly: vec![0], + address_table_lookups: vec![ + MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![1], + readonly_indexes: vec![0], }, - AddressMapIndexes { - writable: vec![0], - readonly: vec![1], + MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![0], + readonly_indexes: vec![1], }, ], instructions: vec![CompiledInstruction { program_id_index: 1, - accounts: vec![0], + accounts: vec![0, 2, 3, 4], data: vec![], }], }; diff --git a/sdk/program/src/message/mapped.rs b/sdk/program/src/message/versions/v0/loaded.rs similarity index 81% rename from sdk/program/src/message/mapped.rs rename to sdk/program/src/message/versions/v0/loaded.rs index 4c60e4fd5d3c6a..d0b39fcbdcaa5d 100644 --- a/sdk/program/src/message/mapped.rs +++ b/sdk/program/src/message/versions/v0/loaded.rs @@ -5,37 +5,44 @@ use { pubkey::Pubkey, sysvar, }, - std::{collections::HashSet, convert::TryFrom}, + std::{collections::HashSet, ops::Deref, convert::TryFrom}, }; -/// Combination of a version #0 message and its mapped addresses +/// Combination of a version #0 message and its loaded addresses #[derive(Debug, Clone)] -pub struct MappedMessage { - /// Message which loaded a collection of mapped addresses +pub struct LoadedMessage { + /// Message which loaded a collection of lookup table addresses pub message: v0::Message, - /// Collection of mapped addresses loaded by this message - pub mapped_addresses: MappedAddresses, + /// Addresses loaded with on-chain address lookup tables + pub loaded_addresses: LoadedAddresses, } -/// Collection of mapped addresses loaded succinctly by a transaction using -/// on-chain address map accounts. +impl Deref for LoadedMessage { + type Target = v0::Message; + fn deref(&self) -> &Self::Target { + &self.message + } +} + +/// Collection of addresses loaded from on-chain lookup tables, split +/// by readonly and writable. #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct MappedAddresses { +pub struct LoadedAddresses { /// List of addresses for writable loaded accounts pub writable: Vec, /// List of addresses for read-only loaded accounts pub readonly: Vec, } -impl MappedMessage { +impl LoadedMessage { /// Returns an iterator of account key segments. The ordering of segments /// affects how account indexes from compiled instructions are resolved and /// so should not be changed. fn account_keys_segment_iter(&self) -> impl Iterator> { vec![ &self.message.account_keys, - &self.mapped_addresses.writable, - &self.mapped_addresses.readonly, + &self.loaded_addresses.writable, + &self.loaded_addresses.readonly, ] .into_iter() } @@ -82,7 +89,7 @@ impl MappedMessage { let num_signed_accounts = usize::from(header.num_required_signatures); if key_index >= num_account_keys { let mapped_addresses_index = key_index.saturating_sub(num_account_keys); - mapped_addresses_index < self.mapped_addresses.writable.len() + mapped_addresses_index < self.loaded_addresses.writable.len() } else if key_index >= num_signed_accounts { let num_unsigned_accounts = num_account_keys.saturating_sub(num_signed_accounts); let num_writable_unsigned_accounts = num_unsigned_accounts @@ -138,7 +145,7 @@ mod tests { itertools::Itertools, }; - fn create_test_mapped_message() -> (MappedMessage, [Pubkey; 6]) { + fn check_test_loaded_message() -> (LoadedMessage, [Pubkey; 6]) { let key0 = Pubkey::new_unique(); let key1 = Pubkey::new_unique(); let key2 = Pubkey::new_unique(); @@ -146,7 +153,7 @@ mod tests { let key4 = Pubkey::new_unique(); let key5 = Pubkey::new_unique(); - let message = MappedMessage { + let message = LoadedMessage { message: v0::Message { header: MessageHeader { num_required_signatures: 2, @@ -156,7 +163,7 @@ mod tests { account_keys: vec![key0, key1, key2, key3], ..v0::Message::default() }, - mapped_addresses: MappedAddresses { + loaded_addresses: LoadedAddresses { writable: vec![key4], readonly: vec![key5], }, @@ -167,7 +174,7 @@ mod tests { #[test] fn test_account_keys_segment_iter() { - let (message, keys) = create_test_mapped_message(); + let (message, keys) = check_test_loaded_message(); let expected_segments = vec![ vec![keys[0], keys[1], keys[2], keys[3]], @@ -183,14 +190,14 @@ mod tests { #[test] fn test_account_keys_len() { - let (message, keys) = create_test_mapped_message(); + let (message, keys) = check_test_loaded_message(); assert_eq!(message.account_keys_len(), keys.len()); } #[test] fn test_account_keys_iter() { - let (message, keys) = create_test_mapped_message(); + let (message, keys) = check_test_loaded_message(); let mut iter = message.account_keys_iter(); for expected_key in keys { @@ -200,19 +207,19 @@ mod tests { #[test] fn test_has_duplicates() { - let message = create_test_mapped_message().0; + let message = check_test_loaded_message().0; assert!(!message.has_duplicates()); } #[test] fn test_has_duplicates_with_dupe_keys() { - let create_message_with_dupe_keys = |mut keys: Vec| MappedMessage { + let create_message_with_dupe_keys = |mut keys: Vec| LoadedMessage { message: v0::Message { account_keys: keys.split_off(2), ..v0::Message::default() }, - mapped_addresses: MappedAddresses { + loaded_addresses: LoadedAddresses { writable: keys.split_off(2), readonly: keys, }, @@ -234,7 +241,7 @@ mod tests { #[test] fn test_get_account_key() { - let (message, keys) = create_test_mapped_message(); + let (message, keys) = check_test_loaded_message(); assert_eq!(message.get_account_key(0), Some(&keys[0])); assert_eq!(message.get_account_key(1), Some(&keys[1])); @@ -246,7 +253,7 @@ mod tests { #[test] fn test_is_writable_index() { - let message = create_test_mapped_message().0; + let message = check_test_loaded_message().0; assert!(message.is_writable_index(0)); assert!(!message.is_writable_index(1)); @@ -258,15 +265,15 @@ mod tests { #[test] fn test_is_writable() { - let mut mapped_msg = create_test_mapped_message().0; + let mut message = check_test_loaded_message().0; - mapped_msg.message.account_keys[0] = sysvar::clock::id(); - assert!(mapped_msg.is_writable_index(0)); - assert!(!mapped_msg.is_writable(0, /*demote_program_write_locks=*/ true)); + message.message.account_keys[0] = sysvar::clock::id(); + assert!(message.is_writable_index(0)); + assert!(!message.is_writable(0, /*demote_program_write_locks=*/ true)); - mapped_msg.message.account_keys[0] = system_program::id(); - assert!(mapped_msg.is_writable_index(0)); - assert!(!mapped_msg.is_writable(0, /*demote_program_write_locks=*/ true)); + message.message.account_keys[0] = system_program::id(); + assert!(message.is_writable_index(0)); + assert!(!message.is_writable(0, /*demote_program_write_locks=*/ true)); } #[test] @@ -274,7 +281,7 @@ mod tests { let key0 = Pubkey::new_unique(); let key1 = Pubkey::new_unique(); let key2 = Pubkey::new_unique(); - let mapped_msg = MappedMessage { + let message = LoadedMessage { message: v0::Message { header: MessageHeader { num_required_signatures: 1, @@ -289,13 +296,13 @@ mod tests { }], ..v0::Message::default() }, - mapped_addresses: MappedAddresses { + loaded_addresses: LoadedAddresses { writable: vec![key1, key2], readonly: vec![], }, }; - assert!(mapped_msg.is_writable_index(2)); - assert!(!mapped_msg.is_writable(2, /*demote_program_write_locks=*/ true)); + assert!(message.is_writable_index(2)); + assert!(!message.is_writable(2, /*demote_program_write_locks=*/ true)); } } diff --git a/sdk/program/src/message/versions/v0/mod.rs b/sdk/program/src/message/versions/v0/mod.rs new file mode 100644 index 00000000000000..ac0e59919971b2 --- /dev/null +++ b/sdk/program/src/message/versions/v0/mod.rs @@ -0,0 +1,374 @@ +use crate::{ + hash::Hash, + instruction::CompiledInstruction, + message::{MessageHeader, MESSAGE_VERSION_PREFIX}, + pubkey::Pubkey, + sanitize::{Sanitize, SanitizeError}, + short_vec, +}; + +mod loaded; + +pub use loaded::*; + +/// Address table lookups describe an on-chain address lookup table to use +/// for loading more readonly and writable accounts in a single tx. +#[derive(Serialize, Deserialize, Default, Debug, PartialEq, Eq, Clone, AbiExample)] +#[serde(rename_all = "camelCase")] +pub struct MessageAddressTableLookup { + /// Address lookup table account key + pub account_key: Pubkey, + /// List of indexes used to load writable account addresses + #[serde(with = "short_vec")] + pub writable_indexes: Vec, + /// List of indexes used to load readonly account addresses + #[serde(with = "short_vec")] + pub readonly_indexes: Vec, +} + +/// Transaction message format which supports succinct account loading with +/// on-chain address lookup tables. +#[derive(Serialize, Deserialize, Default, Debug, PartialEq, Eq, Clone, AbiExample)] +#[serde(rename_all = "camelCase")] +pub struct Message { + /// The message header, identifying signed and read-only `account_keys` + pub header: MessageHeader, + + /// List of accounts loaded by this transaction. + #[serde(with = "short_vec")] + pub account_keys: Vec, + + /// The blockhash of a recent block. + pub recent_blockhash: Hash, + + /// Instructions that invoke a designated program, are executed in sequence, + /// and committed in one atomic transaction if all succeed. + /// + /// # Notes + /// + /// Account and program indexes will index into the list of addresses + /// constructed from the concatenation of three key lists: + /// 1) message `account_keys` + /// 2) ordered list of keys loaded from `writable` lookup table indexes + /// 3) ordered list of keys loaded from `readable` lookup table indexes + #[serde(with = "short_vec")] + pub instructions: Vec, + + /// List of address table lookups used to load additional accounts + /// for this transaction. + #[serde(with = "short_vec")] + pub address_table_lookups: Vec, +} + +impl Sanitize for Message { + fn sanitize(&self) -> Result<(), SanitizeError> { + // signing area and read-only non-signing area should not + // overlap + if usize::from(self.header.num_required_signatures) + .saturating_add(usize::from(self.header.num_readonly_unsigned_accounts)) + > self.account_keys.len() + { + return Err(SanitizeError::IndexOutOfBounds); + } + + // there should be at least 1 RW fee-payer account. + if self.header.num_readonly_signed_accounts >= self.header.num_required_signatures { + return Err(SanitizeError::InvalidValue); + } + + let mut num_loaded_accounts = self.account_keys.len(); + for lookup in &self.address_table_lookups { + let num_table_loaded_accounts = lookup + .writable_indexes + .len() + .saturating_add(lookup.readonly_indexes.len()); + + // each lookup table must be used to load at least one account + if num_table_loaded_accounts == 0 { + return Err(SanitizeError::InvalidValue); + } + + num_loaded_accounts = num_loaded_accounts.saturating_add(num_table_loaded_accounts); + } + + // the number of loaded accounts must be <= 256 since account indices are + // encoded as `u8` + if num_loaded_accounts > 256 { + return Err(SanitizeError::IndexOutOfBounds); + } + + for ci in &self.instructions { + if usize::from(ci.program_id_index) >= num_loaded_accounts { + return Err(SanitizeError::IndexOutOfBounds); + } + // A program cannot be a payer. + if ci.program_id_index == 0 { + return Err(SanitizeError::IndexOutOfBounds); + } + for ai in &ci.accounts { + if usize::from(*ai) >= num_loaded_accounts { + return Err(SanitizeError::IndexOutOfBounds); + } + } + } + + Ok(()) + } +} + +impl Message { + /// Serialize this message with a version #0 prefix using bincode encoding. + pub fn serialize(&self) -> Vec { + bincode::serialize(&(MESSAGE_VERSION_PREFIX, self)).unwrap() + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::message::VersionedMessage, + }; + + #[test] + fn test_sanitize() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + ..Message::default() + } + .sanitize() + .is_ok()); + } + + #[test] + fn test_sanitize_with_instruction() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique(), Pubkey::new_unique()], + instructions: vec![CompiledInstruction { + program_id_index: 1, + accounts: vec![0], + data: vec![] + }], + ..Message::default() + } + .sanitize() + .is_ok()); + } + + #[test] + fn test_sanitize_with_table_lookup() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![1, 2, 3], + readonly_indexes: vec![0], + }], + ..Message::default() + } + .sanitize() + .is_ok()); + } + + #[test] + fn test_sanitize_with_table_lookup_and_ix() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![1, 2, 3], + readonly_indexes: vec![0], + }], + instructions: vec![CompiledInstruction { + program_id_index: 4, + accounts: vec![0, 1, 2, 3], + data: vec![] + }], + ..Message::default() + } + .sanitize() + .is_ok()); + } + + #[test] + fn test_sanitize_without_signer() { + assert!(Message { + header: MessageHeader::default(), + account_keys: vec![Pubkey::new_unique()], + ..Message::default() + } + .sanitize() + .is_err()); + } + + #[test] + fn test_sanitize_without_writable_signer() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + ..Message::default() + } + .sanitize() + .is_err()); + } + + #[test] + fn test_sanitize_with_empty_table_lookup() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![], + readonly_indexes: vec![], + }], + ..Message::default() + } + .sanitize() + .is_err()); + } + + + #[test] + fn test_sanitize_with_max_account_keys() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: (0..=u8::MAX).map(|_| Pubkey::new_unique()).collect(), + ..Message::default() + } + .sanitize() + .is_ok()); + } + + #[test] + fn test_sanitize_with_too_many_account_keys() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: (0..=256).map(|_| Pubkey::new_unique()).collect(), + ..Message::default() + } + .sanitize() + .is_err()); + } + + #[test] + fn test_sanitize_with_max_table_loaded_keys() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: (0..=254).step_by(2).collect(), + readonly_indexes: (1..=254).step_by(2).collect(), + }], + ..Message::default() + } + .sanitize() + .is_ok()); + } + + #[test] + fn test_sanitize_with_too_many_table_loaded_keys() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: (0..=255).step_by(2).collect(), + readonly_indexes: (1..=255).step_by(2).collect(), + }], + ..Message::default() + } + .sanitize() + .is_err()); + } + + #[test] + fn test_sanitize_with_invalid_ix_program_id() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![0], + readonly_indexes: vec![], + }], + instructions: vec![CompiledInstruction { + program_id_index: 2, + accounts: vec![], + data: vec![] + }], + ..Message::default() + } + .sanitize() + .is_err()); + } + + #[test] + fn test_sanitize_with_invalid_ix_account() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![], + readonly_indexes: vec![0], + }], + instructions: vec![CompiledInstruction { + program_id_index: 1, + accounts: vec![2], + data: vec![] + }], + ..Message::default() + } + .sanitize() + .is_err()); + } + #[test] + fn test_serialize() { + let message = Message::default(); + let versioned_msg = VersionedMessage::V0(message.clone()); + assert_eq!(message.serialize(), versioned_msg.serialize()); + } +} diff --git a/sdk/program/src/pubkey.rs b/sdk/program/src/pubkey.rs index 6e97f179b61be9..35a6a25eb36ece 100644 --- a/sdk/program/src/pubkey.rs +++ b/sdk/program/src/pubkey.rs @@ -1,6 +1,6 @@ #![allow(clippy::integer_arithmetic)] use { - crate::{decode_error::DecodeError, hash::hashv}, + crate::{decode_error::DecodeError, hash::hashv, wasm_bindgen}, borsh::{BorshDeserialize, BorshSchema, BorshSerialize}, bytemuck::{Pod, Zeroable}, num_derive::{FromPrimitive, ToPrimitive}, @@ -48,6 +48,7 @@ impl From for PubkeyError { } } +#[wasm_bindgen] #[repr(transparent)] #[derive( AbiExample, @@ -67,7 +68,7 @@ impl From for PubkeyError { Serialize, Zeroable, )] -pub struct Pubkey([u8; 32]); +pub struct Pubkey(pub(crate) [u8; 32]); impl crate::sanitize::Sanitize for Pubkey {} @@ -222,7 +223,11 @@ impl Pubkey { /// derived in off-chain client programs, avoiding the compute cost of /// generating the address on-chain. The address may or may not then be /// verified by re-deriving it on-chain, depending on the requirements of - /// the program. + /// the program. This verification may be performed without the overhead of + /// re-searching for the bump key by using the [`create_program_address`] + /// function. + /// + /// [`create_program_address`]: Pubkey::create_program_address /// /// **Warning**: Because of the way the seeds are hashed there is a potential /// for program address collisions for the same program id. The seeds are @@ -481,17 +486,48 @@ impl Pubkey { } } - /// Create a valid [program derived address][pda] without a bump seed. + /// Create a valid [program derived address][pda] without searching for a bump seed. /// /// [pda]: https://docs.solana.com/developing/programming-model/calling-between-programs#program-derived-addresses /// - /// **Because this function does not create a bump seed, it may unpredictably - /// return an error and should not be used. It exists for backwards - /// compatibility reasons.** + /// Because this function does not create a bump seed, it may unpredictably + /// return an error for any given set of seeds and is not generally suitable + /// for creating program derived addresses. /// - /// See the documentation for [`find_program_address`] for a full description. + /// However, it can be used for efficiently verifying that a set of seeds plus + /// bump seed generated by [`find_program_address`] derives a particular + /// address as expected. See the example for details. + /// + /// See the documentation for [`find_program_address`] for a full description + /// of program derived addresses and bump seeds. /// /// [`find_program_address`]: Pubkey::find_program_address + /// + /// # Examples + /// + /// Creating a program derived address involves iteratively searching for a + /// bump seed for which the derived [`Pubkey`] does not lie on the ed25519 + /// curve. This search process is generally performed off-chain, with the + /// [`find_program_address`] function, after which the client passes the + /// bump seed to the program as instruction data. + /// + /// Depending on the application requirements, a program may wish to verify + /// that the set of seeds, plus the bump seed, do correctly generate an + /// expected address. + /// + /// The verification is performed by appending to the other seeds one + /// additional seed slice that contains the single `u8` bump seed, calling + /// `create_program_address`, checking that the return value is `Ok`, and + /// that the returned `Pubkey` has the expected value. + /// + /// ``` + /// # use solana_program::pubkey::Pubkey; + /// # let program_id = Pubkey::new_unique(); + /// let (expected_pda, bump_seed) = Pubkey::find_program_address(&[b"vault"], &program_id); + /// let actual_pda = Pubkey::create_program_address(&[b"vault", &[bump_seed]], &program_id)?; + /// assert_eq!(expected_pda, actual_pda); + /// # Ok::<(), anyhow::Error>(()) + /// ``` pub fn create_program_address( seeds: &[&[u8]], program_id: &Pubkey, diff --git a/sdk/program/src/slot_hashes.rs b/sdk/program/src/slot_hashes.rs index 1b1e65d8ebefa9..ae9efd7c5d9dbc 100644 --- a/sdk/program/src/slot_hashes.rs +++ b/sdk/program/src/slot_hashes.rs @@ -25,6 +25,9 @@ impl SlotHashes { } (self.0).truncate(MAX_ENTRIES); } + pub fn position(&self, slot: &Slot) -> Option { + self.binary_search_by(|(probe, _)| slot.cmp(probe)).ok() + } #[allow(clippy::trivially_copy_pass_by_ref)] pub fn get(&self, slot: &Slot) -> Option<&Hash> { self.binary_search_by(|(probe, _)| slot.cmp(probe)) diff --git a/sdk/program/src/system_instruction.rs b/sdk/program/src/system_instruction.rs index 59c6255146671c..1ae02021d9bd26 100644 --- a/sdk/program/src/system_instruction.rs +++ b/sdk/program/src/system_instruction.rs @@ -1,5 +1,4 @@ #[allow(deprecated)] -use crate::sysvar::recent_blockhashes; use { crate::{ decode_error::DecodeError, @@ -7,7 +6,7 @@ use { nonce, pubkey::Pubkey, system_program, - sysvar::rent, + sysvar::{recent_blockhashes, rent}, }, num_derive::{FromPrimitive, ToPrimitive}, thiserror::Error, diff --git a/sdk/program/src/wasm/hash.rs b/sdk/program/src/wasm/hash.rs new file mode 100644 index 00000000000000..add1e6bbe80657 --- /dev/null +++ b/sdk/program/src/wasm/hash.rs @@ -0,0 +1,57 @@ +//! `Hash` Javascript interface +#![cfg(target_arch = "wasm32")] +#![allow(non_snake_case)] +use { + crate::{hash::*, wasm::display_to_jsvalue}, + js_sys::{Array, Uint8Array}, + wasm_bindgen::{prelude::*, JsCast}, +}; + +#[wasm_bindgen] +impl Hash { + /// Create a new Hash object + /// + /// * `value` - optional hash as a base58 encoded string, `Uint8Array`, `[number]` + #[wasm_bindgen(constructor)] + pub fn constructor(value: JsValue) -> Result { + if let Some(base58_str) = value.as_string() { + base58_str.parse::().map_err(display_to_jsvalue) + } else if let Some(uint8_array) = value.dyn_ref::() { + Ok(Hash::new(&uint8_array.to_vec())) + } else if let Some(array) = value.dyn_ref::() { + let mut bytes = vec![]; + let iterator = js_sys::try_iter(&array.values())?.expect("array to be iterable"); + for x in iterator { + let x = x?; + + if let Some(n) = x.as_f64() { + if n >= 0. && n <= 255. { + bytes.push(n as u8); + continue; + } + } + return Err(format!("Invalid array argument: {:?}", x).into()); + } + Ok(Hash::new(&bytes)) + } else if value.is_undefined() { + Ok(Hash::default()) + } else { + Err("Unsupported argument".into()) + } + } + + /// Return the base58 string representation of the hash + pub fn toString(&self) -> String { + self.to_string() + } + + /// Checks if two `Hash`s are equal + pub fn equals(&self, other: &Hash) -> bool { + self == other + } + + /// Return the `Uint8Array` representation of the hash + pub fn toBytes(&self) -> Box<[u8]> { + self.0.clone().into() + } +} diff --git a/sdk/program/src/wasm/instructions.rs b/sdk/program/src/wasm/instructions.rs new file mode 100644 index 00000000000000..36abe05c6f4d8d --- /dev/null +++ b/sdk/program/src/wasm/instructions.rs @@ -0,0 +1,28 @@ +//! The `Instructions` struct is a workaround for the lack of Vec support in wasm-bindgen +//! (ref: https://github.com/rustwasm/wasm-bindgen/issues/111) +#![cfg(target_arch = "wasm32")] +use {crate::instruction::Instruction, wasm_bindgen::prelude::*}; + +#[wasm_bindgen] +#[derive(Default)] +pub struct Instructions { + instructions: Vec, +} + +#[wasm_bindgen] +impl Instructions { + #[wasm_bindgen(constructor)] + pub fn constructor() -> Instructions { + Instructions::default() + } + + pub fn push(&mut self, instruction: Instruction) { + self.instructions.push(instruction); + } +} + +impl From for Vec { + fn from(instructions: Instructions) -> Self { + instructions.instructions + } +} diff --git a/sdk/program/src/wasm/mod.rs b/sdk/program/src/wasm/mod.rs new file mode 100644 index 00000000000000..801142b487f10e --- /dev/null +++ b/sdk/program/src/wasm/mod.rs @@ -0,0 +1,24 @@ +//! solana-program Javascript interface +#![cfg(target_arch = "wasm32")] +use wasm_bindgen::prelude::*; + +pub mod hash; +pub mod instructions; +pub mod pubkey; +pub mod system_instruction; + +/// Initialize Javascript logging and panic handler +#[wasm_bindgen] +pub fn init() { + use std::sync::Once; + static INIT: Once = Once::new(); + + INIT.call_once(|| { + std::panic::set_hook(Box::new(console_error_panic_hook::hook)); + console_log::init_with_level(log::Level::Info).unwrap(); + }); +} + +pub fn display_to_jsvalue(display: T) -> JsValue { + display.to_string().into() +} diff --git a/sdk/program/src/wasm/pubkey.rs b/sdk/program/src/wasm/pubkey.rs new file mode 100644 index 00000000000000..a3aa27941927eb --- /dev/null +++ b/sdk/program/src/wasm/pubkey.rs @@ -0,0 +1,121 @@ +//! `Pubkey` Javascript interface +#![cfg(target_arch = "wasm32")] +#![allow(non_snake_case)] +use { + crate::{pubkey::*, wasm::display_to_jsvalue}, + js_sys::{Array, Uint8Array}, + wasm_bindgen::{prelude::*, JsCast}, +}; + +fn js_value_to_seeds_vec(array_of_uint8_arrays: &[JsValue]) -> Result>, JsValue> { + let vec_vec_u8 = array_of_uint8_arrays + .iter() + .filter_map(|u8_array| { + u8_array + .dyn_ref::() + .map(|u8_array| u8_array.to_vec()) + }) + .collect::>(); + + if vec_vec_u8.len() != array_of_uint8_arrays.len() { + Err("Invalid Array of Uint8Arrays".into()) + } else { + Ok(vec_vec_u8) + } +} + +#[wasm_bindgen] +impl Pubkey { + /// Create a new Pubkey object + /// + /// * `value` - optional public key as a base58 encoded string, `Uint8Array`, `[number]` + #[wasm_bindgen(constructor)] + pub fn constructor(value: JsValue) -> Result { + if let Some(base58_str) = value.as_string() { + base58_str.parse::().map_err(display_to_jsvalue) + } else if let Some(uint8_array) = value.dyn_ref::() { + Ok(Pubkey::new(&uint8_array.to_vec())) + } else if let Some(array) = value.dyn_ref::() { + let mut bytes = vec![]; + let iterator = js_sys::try_iter(&array.values())?.expect("array to be iterable"); + for x in iterator { + let x = x?; + + if let Some(n) = x.as_f64() { + if n >= 0. && n <= 255. { + bytes.push(n as u8); + continue; + } + } + return Err(format!("Invalid array argument: {:?}", x).into()); + } + Ok(Pubkey::new(&bytes)) + } else if value.is_undefined() { + Ok(Pubkey::default()) + } else { + Err("Unsupported argument".into()) + } + } + + /// Return the base58 string representation of the public key + pub fn toString(&self) -> String { + self.to_string() + } + + /// Check if a `Pubkey` is on the ed25519 curve. + pub fn isOnCurve(&self) -> bool { + self.is_on_curve() + } + + /// Checks if two `Pubkey`s are equal + pub fn equals(&self, other: &Pubkey) -> bool { + self == other + } + + /// Return the `Uint8Array` representation of the public key + pub fn toBytes(&self) -> Box<[u8]> { + self.0.clone().into() + } + + /// Derive a Pubkey from another Pubkey, string seed, and a program id + pub fn createWithSeed(base: &Pubkey, seed: &str, owner: &Pubkey) -> Result { + Pubkey::create_with_seed(base, seed, owner).map_err(display_to_jsvalue) + } + + /// Derive a program address from seeds and a program id + pub fn createProgramAddress( + seeds: Box<[JsValue]>, + program_id: &Pubkey, + ) -> Result { + let seeds_vec = js_value_to_seeds_vec(&seeds)?; + let seeds_slice = seeds_vec + .iter() + .map(|seed| seed.as_slice()) + .collect::>(); + + Pubkey::create_program_address(seeds_slice.as_slice(), program_id) + .map_err(display_to_jsvalue) + } + + /// Find a valid program address + /// + /// Returns: + /// * `[PubKey, number]` - the program address and bump seed + pub fn findProgramAddress( + seeds: Box<[JsValue]>, + program_id: &Pubkey, + ) -> Result { + let seeds_vec = js_value_to_seeds_vec(&seeds)?; + let seeds_slice = seeds_vec + .iter() + .map(|seed| seed.as_slice()) + .collect::>(); + + let (address, bump_seed) = Pubkey::find_program_address(seeds_slice.as_slice(), program_id); + + let result = Array::new_with_length(2); + result.set(0, address.into()); + result.set(1, bump_seed.into()); + Ok(result.into()) + } +} diff --git a/sdk/program/src/wasm/system_instruction.rs b/sdk/program/src/wasm/system_instruction.rs new file mode 100644 index 00000000000000..94dd636788092c --- /dev/null +++ b/sdk/program/src/wasm/system_instruction.rs @@ -0,0 +1,112 @@ +//! `SystemInstruction` Javascript interface +#![cfg(target_arch = "wasm32")] +#![allow(non_snake_case)] +use { + crate::{instruction::Instruction, pubkey::Pubkey, system_instruction::*}, + wasm_bindgen::prelude::*, +}; + +#[wasm_bindgen] +impl SystemInstruction { + pub fn createAccount( + from_pubkey: &Pubkey, + to_pubkey: &Pubkey, + lamports: u64, + space: u64, + owner: &Pubkey, + ) -> Instruction { + create_account(from_pubkey, to_pubkey, lamports, space, owner) + } + + pub fn createAccountWithSeed( + from_pubkey: &Pubkey, + to_pubkey: &Pubkey, + base: &Pubkey, + seed: &str, + lamports: u64, + space: u64, + owner: &Pubkey, + ) -> Instruction { + create_account_with_seed(from_pubkey, to_pubkey, base, seed, lamports, space, owner) + } + + pub fn assign(pubkey: &Pubkey, owner: &Pubkey) -> Instruction { + assign(pubkey, owner) + } + + pub fn assignWithSeed( + pubkey: &Pubkey, + base: &Pubkey, + seed: &str, + owner: &Pubkey, + ) -> Instruction { + assign_with_seed(pubkey, base, seed, owner) + } + + pub fn transfer(from_pubkey: &Pubkey, to_pubkey: &Pubkey, lamports: u64) -> Instruction { + transfer(from_pubkey, to_pubkey, lamports) + } + + pub fn transferWithSeed( + from_pubkey: &Pubkey, + from_base: &Pubkey, + from_seed: String, + from_owner: &Pubkey, + to_pubkey: &Pubkey, + lamports: u64, + ) -> Instruction { + transfer_with_seed( + from_pubkey, + from_base, + from_seed, + from_owner, + to_pubkey, + lamports, + ) + } + + pub fn allocate(pubkey: &Pubkey, space: u64) -> Instruction { + allocate(pubkey, space) + } + + pub fn allocateWithSeed( + address: &Pubkey, + base: &Pubkey, + seed: &str, + space: u64, + owner: &Pubkey, + ) -> Instruction { + allocate_with_seed(address, base, seed, space, owner) + } + + pub fn createNonceAccount( + from_pubkey: &Pubkey, + nonce_pubkey: &Pubkey, + authority: &Pubkey, + lamports: u64, + ) -> js_sys::Array { + let instructions = create_nonce_account(from_pubkey, nonce_pubkey, authority, lamports); + instructions.into_iter().map(JsValue::from).collect() + } + + pub fn advanceNonceAccount(nonce_pubkey: &Pubkey, authorized_pubkey: &Pubkey) -> Instruction { + advance_nonce_account(nonce_pubkey, authorized_pubkey) + } + + pub fn withdrawNonceAccount( + nonce_pubkey: &Pubkey, + authorized_pubkey: &Pubkey, + to_pubkey: &Pubkey, + lamports: u64, + ) -> Instruction { + withdraw_nonce_account(nonce_pubkey, authorized_pubkey, to_pubkey, lamports) + } + + pub fn authorizeNonceAccount( + nonce_pubkey: &Pubkey, + authorized_pubkey: &Pubkey, + new_authority: &Pubkey, + ) -> Instruction { + authorize_nonce_account(nonce_pubkey, authorized_pubkey, new_authority) + } +} diff --git a/sdk/program/tests/hash.mjs b/sdk/program/tests/hash.mjs new file mode 100644 index 00000000000000..4b25857a49d727 --- /dev/null +++ b/sdk/program/tests/hash.mjs @@ -0,0 +1,81 @@ +import { expect } from "chai"; +import { init, Hash } from "crate"; +init(); + +// TODO: wasm_bindgen doesn't currently support exporting constants +const HASH_BYTES = 32; + +describe("Hash", function () { + it("invalid", () => { + expect(() => { + new Hash([ + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + }).to.throw(); + + expect(() => { + new Hash([ + 'invalid', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ]); + }).to.throw(); + + expect(() => { + new Hash( + "0x300000000000000000000000000000000000000000000000000000000000000000000" + ); + }).to.throw(); + + expect(() => { + new Hash( + "0x300000000000000000000000000000000000000000000000000000000000000" + ); + }).to.throw(); + + expect(() => { + new Hash( + "135693854574979916511997248057056142015550763280047535983739356259273198796800000" + ); + }).to.throw(); + + expect(() => { + new Hash("12345"); + }).to.throw(); + }); + + it("toString", () => { + const key = new Hash("CiDwVBFgWV9E5MvXWoLgnEgn2hK7rJikbvfWavzAQz3"); + expect(key.toString()).to.eq("CiDwVBFgWV9E5MvXWoLgnEgn2hK7rJikbvfWavzAQz3"); + + const key2 = new Hash("1111111111111111111111111111BukQL"); + expect(key2.toString()).to.eq("1111111111111111111111111111BukQL"); + + const key3 = new Hash("11111111111111111111111111111111"); + expect(key3.toString()).to.eq("11111111111111111111111111111111"); + + const key4 = new Hash([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ]); + expect(key4.toString()).to.eq("11111111111111111111111111111111"); + }); + + it("toBytes", () => { + const key = new Hash("CiDwVBFgWV9E5MvXWoLgnEgn2hK7rJikbvfWavzAQz3"); + expect(key.toBytes()).to.deep.equal( + new Uint8Array([ + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + ]) + ); + + const key2 = new Hash(); + expect(key2.toBytes()).to.deep.equal( + new Uint8Array([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + ]) + ); + }); +}); diff --git a/sdk/program/tests/pubkey.mjs b/sdk/program/tests/pubkey.mjs new file mode 100644 index 00000000000000..67ee73ba3de0c6 --- /dev/null +++ b/sdk/program/tests/pubkey.mjs @@ -0,0 +1,185 @@ +import { expect } from "chai"; +import { init, Pubkey } from "crate"; +init(); + +// TODO: wasm_bindgen doesn't currently support exporting constants +const MAX_SEED_LEN = 32; + +describe("Pubkey", function () { + it("invalid", () => { + expect(() => { + new Pubkey([ + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + }).to.throw(); + + expect(() => { + new Pubkey([ + 'invalid', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ]); + }).to.throw(); + + expect(() => { + new Pubkey( + "0x300000000000000000000000000000000000000000000000000000000000000000000" + ); + }).to.throw(); + + expect(() => { + new Pubkey( + "0x300000000000000000000000000000000000000000000000000000000000000" + ); + }).to.throw(); + + expect(() => { + new Pubkey( + "135693854574979916511997248057056142015550763280047535983739356259273198796800000" + ); + }).to.throw(); + + expect(() => { + new Pubkey("12345"); + }).to.throw(); + }); + + it("toString", () => { + const key = new Pubkey("CiDwVBFgWV9E5MvXWoLgnEgn2hK7rJikbvfWavzAQz3"); + expect(key.toString()).to.eq("CiDwVBFgWV9E5MvXWoLgnEgn2hK7rJikbvfWavzAQz3"); + + const key2 = new Pubkey("1111111111111111111111111111BukQL"); + expect(key2.toString()).to.eq("1111111111111111111111111111BukQL"); + + const key3 = new Pubkey("11111111111111111111111111111111"); + expect(key3.toString()).to.eq("11111111111111111111111111111111"); + + const key4 = new Pubkey([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ]); + expect(key4.toString()).to.eq("11111111111111111111111111111111"); + }); + + it("toBytes", () => { + const key = new Pubkey("CiDwVBFgWV9E5MvXWoLgnEgn2hK7rJikbvfWavzAQz3"); + expect(key.toBytes()).to.deep.equal( + new Uint8Array([ + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + ]) + ); + + const key2 = new Pubkey(); + expect(key2.toBytes()).to.deep.equal( + new Uint8Array([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + ]) + ); + }); + + it("isOnCurve", () => { + let onCurve = new Pubkey("J4NYrSRccTUGXP7wmFwiByakqWKZb5RwpiAoskpgAQRb"); + expect(onCurve.isOnCurve()).to.be.true; + + let offCurve = new Pubkey("12rqwuEgBYiGhBrDJStCiqEtzQpTTiZbh7teNVLuYcFA"); + expect(offCurve.isOnCurve()).to.be.false; + }); + + it("equals", () => { + const arrayKey = new Pubkey([ + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ]); + const base58Key = new Pubkey("CiDwVBFgWV9E5MvXWoLgnEgn2hK7rJikbvfWavzAQz3"); + + expect(arrayKey.equals(base58Key)).to.be.true; + }); + + it("createWithSeed", async () => { + const defaultPublicKey = new Pubkey("11111111111111111111111111111111"); + const derivedKey = Pubkey.createWithSeed( + defaultPublicKey, + "limber chicken: 4/45", + defaultPublicKey + ); + + expect( + derivedKey.equals( + new Pubkey("9h1HyLCW5dZnBVap8C5egQ9Z6pHyjsh5MNy83iPqqRuq") + ) + ).to.be.true; + }); + + it("createProgramAddress", async () => { + const programId = new Pubkey("BPFLoader1111111111111111111111111111111111"); + const publicKey = new Pubkey("SeedPubey1111111111111111111111111111111111"); + + let programAddress = Pubkey.createProgramAddress( + [Buffer.from("", "utf8"), Buffer.from([1])], + programId + ); + expect( + programAddress.equals( + new Pubkey("3gF2KMe9KiC6FNVBmfg9i267aMPvK37FewCip4eGBFcT") + ) + ).to.be.true; + + programAddress = Pubkey.createProgramAddress( + [Buffer.from("☉", "utf8")], + programId + ); + expect( + programAddress.equals( + new Pubkey("7ytmC1nT1xY4RfxCV2ZgyA7UakC93do5ZdyhdF3EtPj7") + ) + ).to.be.true; + + programAddress = Pubkey.createProgramAddress( + [Buffer.from("Talking", "utf8"), Buffer.from("Squirrels", "utf8")], + programId + ); + expect( + programAddress.equals( + new Pubkey("HwRVBufQ4haG5XSgpspwKtNd3PC9GM9m1196uJW36vds") + ) + ).to.be.true; + + programAddress = Pubkey.createProgramAddress( + [publicKey.toBytes()], + programId + ); + expect( + programAddress.equals( + new Pubkey("GUs5qLUfsEHkcMB9T38vjr18ypEhRuNWiePW2LoK4E3K") + ) + ).to.be.true; + + const programAddress2 = Pubkey.createProgramAddress( + [Buffer.from("Talking", "utf8")], + programId + ); + expect(programAddress.equals(programAddress2)).to.eq(false); + + expect(() => { + Pubkey.createProgramAddress([Buffer.alloc(MAX_SEED_LEN + 1)], programId); + }).to.throw(); + }); + + it("findProgramAddress", async () => { + const programId = new Pubkey("BPFLoader1111111111111111111111111111111111"); + let [programAddress, nonce] = Pubkey.findProgramAddress( + [Buffer.from("", "utf8")], + programId + ); + expect( + programAddress.equals( + Pubkey.createProgramAddress( + [Buffer.from("", "utf8"), Buffer.from([nonce])], + programId + ) + ) + ).to.be.true; + }); +}); diff --git a/sdk/src/account.rs b/sdk/src/account.rs index ca19f91a857437..2e8e2fc34ad216 100644 --- a/sdk/src/account.rs +++ b/sdk/src/account.rs @@ -103,6 +103,7 @@ pub trait WritableAccount: ReadableAccount { ); Ok(()) } + fn data_mut(&mut self) -> &mut Vec; fn data_as_mut_slice(&mut self) -> &mut [u8]; fn set_owner(&mut self, owner: Pubkey); fn copy_into_owner_from_slice(&mut self, source: &[u8]); @@ -156,6 +157,9 @@ impl WritableAccount for Account { fn set_lamports(&mut self, lamports: u64) { self.lamports = lamports; } + fn data_mut(&mut self) -> &mut Vec { + &mut self.data + } fn data_as_mut_slice(&mut self) -> &mut [u8] { &mut self.data } @@ -192,9 +196,11 @@ impl WritableAccount for AccountSharedData { fn set_lamports(&mut self, lamports: u64) { self.lamports = lamports; } + fn data_mut(&mut self) -> &mut Vec { + Arc::make_mut(&mut self.data) + } fn data_as_mut_slice(&mut self) -> &mut [u8] { - let data = Arc::make_mut(&mut self.data); - &mut data[..] + &mut self.data_mut()[..] } fn set_owner(&mut self, owner: Pubkey) { self.owner = owner; diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 71e64b961233c3..24bf8c529d1f71 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -271,6 +271,14 @@ pub mod reject_non_rent_exempt_vote_withdraws { solana_sdk::declare_id!("7txXZZD6Um59YoLMF7XUNimbMjsqsWhc7g2EniiTrmp1"); } +pub mod evict_invalid_stakes_cache_entries { + solana_sdk::declare_id!("EMX9Q7TVFAmQ9V1CggAkhMzhXSg8ECp7fHrWQX2G1chf"); +} + +pub mod allow_votes_to_directly_update_vote_state { + solana_sdk::declare_id!("Ff8b1fBeB86q8cjq47ZhsQLgv5EkHu3G1C99zjUfAzrq"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -333,6 +341,8 @@ lazy_static! { (reject_empty_instruction_without_program::id(), "fail instructions which have native_loader as program_id directly"), (fixed_memcpy_nonoverlapping_check::id(), "use correct check for nonoverlapping regions in memcpy syscall"), (reject_non_rent_exempt_vote_withdraws::id(), "fail vote withdraw instructions which leave the account non-rent-exempt"), + (evict_invalid_stakes_cache_entries::id(), "evict invalid stakes cache entries on epoch boundaries"), + (allow_votes_to_directly_update_vote_state::id(), "enable direct vote state update"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index af9aa182034775..2df94726a17601 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -47,6 +47,7 @@ pub mod system_transaction; pub mod timing; pub mod transaction; pub mod transport; +pub mod wasm; /// Same as `declare_id` except report that this id has been deprecated pub use solana_sdk_macro::declare_deprecated_id; diff --git a/sdk/src/log.rs b/sdk/src/log.rs index 4cc45cf413cc0c..78a45afaf4a1e8 100644 --- a/sdk/src/log.rs +++ b/sdk/src/log.rs @@ -11,13 +11,4 @@ macro_rules! info { ($msg:expr) => { $crate::log::sol_log($msg) }; - ($arg1:expr, $arg2:expr, $arg3:expr, $arg4:expr, $arg5:expr) => { - $crate::log::sol_log_64( - $arg1 as u64, - $arg2 as u64, - $arg3 as u64, - $arg4 as u64, - $arg5 as u64, - ) - }; } diff --git a/sdk/src/signer/keypair.rs b/sdk/src/signer/keypair.rs index 3a80cf308e1fe4..63283d403e6eb6 100644 --- a/sdk/src/signer/keypair.rs +++ b/sdk/src/signer/keypair.rs @@ -17,9 +17,11 @@ use { io::{Read, Write}, path::Path, }, + wasm_bindgen::prelude::*, }; /// A vanilla Ed25519 key pair +#[wasm_bindgen] #[derive(Debug)] pub struct Keypair(ed25519_dalek::Keypair); diff --git a/sdk/src/transaction/error.rs b/sdk/src/transaction/error.rs index acf064b9f47a2d..60ed8c39f5563d 100644 --- a/sdk/src/transaction/error.rs +++ b/sdk/src/transaction/error.rs @@ -101,6 +101,10 @@ pub enum TransactionError { /// Transaction would exceed max account limit within the block #[error("Transaction would exceed max account limit within the block")] WouldExceedMaxAccountCostLimit, + + /// Transaction would exceed max account data limit within the block + #[error("Transaction would exceed max account data limit within the block")] + WouldExceedMaxAccountDataCostLimit, } impl From for TransactionError { diff --git a/sdk/src/transaction/mod.rs b/sdk/src/transaction/mod.rs index 300ebb1fc5f417..e81bbfef77abea 100644 --- a/sdk/src/transaction/mod.rs +++ b/sdk/src/transaction/mod.rs @@ -15,6 +15,7 @@ use { short_vec, signature::{Signature, SignerError}, signers::Signers, + wasm_bindgen, }, serde::Serialize, solana_program::{system_instruction::SystemInstruction, system_program}, @@ -38,6 +39,7 @@ pub enum TransactionVerificationMode { pub type Result = result::Result; /// An atomic transaction +#[wasm_bindgen] #[frozen_abi(digest = "FZtncnS1Xk8ghHfKiXE5oGiUbw2wJhmfXQuNgQR3K6Mc")] #[derive(Debug, PartialEq, Default, Eq, Clone, Serialize, Deserialize, AbiExample)] pub struct Transaction { @@ -47,10 +49,12 @@ pub struct Transaction { /// [`account_keys`]: Message::account_keys /// // NOTE: Serialization-related changes must be paired with the direct read at sigverify. + #[wasm_bindgen(skip)] #[serde(with = "short_vec")] pub signatures: Vec, /// The message to sign. + #[wasm_bindgen(skip)] pub message: Message, } diff --git a/sdk/src/transaction/sanitized.rs b/sdk/src/transaction/sanitized.rs index 2768e665da6238..a6db1587b4a312 100644 --- a/sdk/src/transaction/sanitized.rs +++ b/sdk/src/transaction/sanitized.rs @@ -3,7 +3,10 @@ use { crate::{ hash::Hash, - message::{v0, MappedAddresses, MappedMessage, SanitizedMessage, VersionedMessage}, + message::{ + v0::{self, LoadedAddresses}, + SanitizedMessage, VersionedMessage, + }, nonce::NONCED_TX_MARKER_IX_INDEX, precompiles::verify_if_precompile, program_utils::limited_deserialize, @@ -37,21 +40,21 @@ pub struct TransactionAccountLocks<'a> { impl SanitizedTransaction { /// Create a sanitized transaction from an unsanitized transaction. - /// If the input transaction uses address maps, attempt to map the - /// transaction keys to full addresses. + /// If the input transaction uses address tables, attempt to lookup + /// the address for each table index. pub fn try_create( tx: VersionedTransaction, message_hash: Hash, is_simple_vote_tx: Option, - address_mapper: impl Fn(&v0::Message) -> Result, + address_loader: impl Fn(&v0::Message) -> Result, ) -> Result { tx.sanitize()?; let signatures = tx.signatures; let message = match tx.message { VersionedMessage::Legacy(message) => SanitizedMessage::Legacy(message), - VersionedMessage::V0(message) => SanitizedMessage::V0(MappedMessage { - mapped_addresses: address_mapper(&message)?, + VersionedMessage::V0(message) => SanitizedMessage::V0(v0::LoadedMessage { + loaded_addresses: address_loader(&message)?, message, }), }; @@ -125,9 +128,9 @@ impl SanitizedTransaction { pub fn to_versioned_transaction(&self) -> VersionedTransaction { let signatures = self.signatures.clone(); match &self.message { - SanitizedMessage::V0(mapped_msg) => VersionedTransaction { + SanitizedMessage::V0(sanitized_msg) => VersionedTransaction { signatures, - message: VersionedMessage::V0(mapped_msg.message.clone()), + message: VersionedMessage::V0(sanitized_msg.message.clone()), }, SanitizedMessage::Legacy(message) => VersionedTransaction { signatures, @@ -193,7 +196,7 @@ impl SanitizedTransaction { fn message_data(&self) -> Vec { match &self.message { SanitizedMessage::Legacy(message) => message.serialize(), - SanitizedMessage::V0(mapped_msg) => mapped_msg.message.serialize(), + SanitizedMessage::V0(message) => message.serialize(), } } diff --git a/sdk/src/wasm/keypair.rs b/sdk/src/wasm/keypair.rs new file mode 100644 index 00000000000000..6f2ffebbb7ccf5 --- /dev/null +++ b/sdk/src/wasm/keypair.rs @@ -0,0 +1,34 @@ +//! `Keypair` Javascript interface +#![cfg(target_arch = "wasm32")] +#![allow(non_snake_case)] +use { + crate::signer::{keypair::Keypair, Signer}, + solana_program::{pubkey::Pubkey, wasm::display_to_jsvalue}, + wasm_bindgen::prelude::*, +}; + +#[wasm_bindgen] +impl Keypair { + /// Create a new `Keypair ` + #[wasm_bindgen(constructor)] + pub fn constructor() -> Keypair { + Keypair::new() + } + + /// Convert a `Keypair` to a `Uint8Array` + pub fn toBytes(&self) -> Box<[u8]> { + self.to_bytes().into() + } + + /// Recover a `Keypair` from a `Uint8Array` + pub fn fromBytes(bytes: &[u8]) -> Result { + Keypair::from_bytes(bytes).map_err(display_to_jsvalue) + } + + /// Return the `Pubkey` for this `Keypair` + #[wasm_bindgen(js_name = pubkey)] + pub fn js_pubkey(&self) -> Pubkey { + // `wasm_bindgen` does not support traits (`Signer) yet + self.pubkey() + } +} diff --git a/sdk/src/wasm/mod.rs b/sdk/src/wasm/mod.rs new file mode 100644 index 00000000000000..6946e730f79fde --- /dev/null +++ b/sdk/src/wasm/mod.rs @@ -0,0 +1,5 @@ +//! solana-sdk Javascript interface +#![cfg(target_arch = "wasm32")] + +pub mod keypair; +pub mod transaction; diff --git a/sdk/src/wasm/transaction.rs b/sdk/src/wasm/transaction.rs new file mode 100644 index 00000000000000..4b8bc6f82534a6 --- /dev/null +++ b/sdk/src/wasm/transaction.rs @@ -0,0 +1,58 @@ +//! `Transaction` Javascript interface +#![cfg(target_arch = "wasm32")] +#![allow(non_snake_case)] +use { + crate::{ + hash::Hash, + signer::keypair::Keypair, + {message::Message, transaction::Transaction}, + }, + solana_program::{ + pubkey::Pubkey, + wasm::{display_to_jsvalue, instructions::Instructions}, + }, + wasm_bindgen::prelude::*, +}; + +#[wasm_bindgen] +impl Transaction { + /// Create a new `Transaction` + #[wasm_bindgen(constructor)] + pub fn constructor(instructions: Instructions, payer: Option) -> Transaction { + let instructions: Vec<_> = instructions.into(); + Transaction::new_with_payer(&instructions, payer.as_ref()) + } + + /// Return a message containing all data that should be signed. + #[wasm_bindgen(js_name = message)] + pub fn js_message(&self) -> Message { + self.message.clone() + } + + /// Return the serialized message data to sign. + pub fn messageData(&self) -> Box<[u8]> { + self.message_data().into() + } + + /// Verify the transaction + #[wasm_bindgen(js_name = verify)] + pub fn js_verify(&self) -> Result<(), JsValue> { + self.verify().map_err(display_to_jsvalue) + } + + pub fn partialSign(&mut self, keypair: &Keypair, recent_blockhash: &Hash) { + self.partial_sign(&[keypair], *recent_blockhash); + } + + pub fn isSigned(&self) -> bool { + self.is_signed() + } + + pub fn toBytes(&self) -> Box<[u8]> { + bincode::serialize(self).unwrap().into() + } + + pub fn fromBytes(bytes: &[u8]) -> Result { + bincode::deserialize(bytes).map_err(display_to_jsvalue) + } +} diff --git a/sdk/tests/keypair.mjs b/sdk/tests/keypair.mjs new file mode 100644 index 00000000000000..092ba511ba0a9f --- /dev/null +++ b/sdk/tests/keypair.mjs @@ -0,0 +1,14 @@ +import { expect } from "chai"; +import { init, Keypair } from "crate"; +init(); + +describe("Keypair", function () { + it("works", () => { + const keypair = new Keypair(); + let bytes = keypair.toBytes(); + expect(bytes).to.have.length(64); + + const recoveredKeypair = Keypair.fromBytes(bytes); + expect(keypair.pubkey().equals(recoveredKeypair.pubkey())); + }); +}); diff --git a/sdk/tests/transaction.mjs b/sdk/tests/transaction.mjs new file mode 100644 index 00000000000000..c672b8c460848d --- /dev/null +++ b/sdk/tests/transaction.mjs @@ -0,0 +1,56 @@ +import { expect } from "chai"; +import { + init, + Pubkey, + Keypair, + Hash, + SystemInstruction, + Instructions, + Transaction, +} from "crate"; +init(); + +describe("Transaction", function () { + it("SystemInstruction::Transfer", () => { + const payer = Keypair.fromBytes( + new Uint8Array([ + 241, 230, 222, 64, 184, 48, 232, 92, 156, 210, 229, 183, 154, 251, 5, + 227, 98, 184, 34, 234, 39, 106, 62, 210, 166, 187, 31, 44, 40, 96, 24, + 51, 252, 28, 2, 120, 234, 212, 139, 111, 96, 8, 168, 204, 34, 72, 199, + 205, 117, 165, 82, 51, 32, 93, 211, 36, 239, 245, 139, 218, 99, 211, + 207, 177, + ]) + ); + + const src = Keypair.fromBytes( + new Uint8Array([ + 172, 219, 139, 103, 154, 105, 92, 23, 227, 108, 174, 80, 215, 227, 62, + 8, 66, 38, 151, 239, 148, 184, 180, 148, 149, 18, 106, 94, 73, 143, 27, + 132, 193, 64, 199, 93, 222, 83, 172, 224, 116, 205, 54, 38, 191, 178, + 149, 71, 65, 132, 46, 71, 126, 81, 63, 254, 21, 101, 90, 52, 67, 204, + 128, 199, + ]) + ); + + const dst = new Pubkey("11111111111111111111111111111112"); + + const recent_blockhash = new Hash( + "EETubP5AKHgjPAhzPAFcb8BAY1hMH639CWCFTqi3hq1k" + ); + + let instructions = new Instructions(); + instructions.push( + SystemInstruction.transfer(src.pubkey(), dst, BigInt(123)) + ); + + let transaction = new Transaction(instructions, payer.pubkey()); + transaction.partialSign(payer, recent_blockhash); + transaction.partialSign(src, recent_blockhash); + expect(transaction.isSigned()).to.be.true; + transaction.verify(); + + expect(Buffer.from(transaction.toBytes()).toString("base64")).to.equal( + "AoZrVzP93eyp3vbl6CU9XQjQfm4Xp/7nSiBlsX/kJmfTQZsGTOrFnt6EUqHVte97fGZ71UAXDfLbR5B31OtRdgdab57BOU8mq0ztMutZAVBPtGJHVly8RPz4TYa+OFU7EIk3Wrv4WUMCb/NR+LxELLH+tQt5SrkvB7rCE2DniM8JAgABBPwcAnjq1ItvYAiozCJIx811pVIzIF3TJO/1i9pj08+xwUDHXd5TrOB0zTYmv7KVR0GELkd+UT/+FWVaNEPMgMcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAxJrndgN4IFTxep3s6kO0ROug7bEsbx0xxuDkqEvwUusBAwIBAgwCAAAAewAAAAAAAAA=" + ); + }); +}); diff --git a/storage-bigtable/Cargo.toml b/storage-bigtable/Cargo.toml index a7f9d718158376..d5b191772934a1 100644 --- a/storage-bigtable/Cargo.toml +++ b/storage-bigtable/Cargo.toml @@ -19,7 +19,7 @@ goauth = "0.10.0" log = "0.4.14" prost = "0.9.0" prost-types = "0.9.0" -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" smpl_jwt = "0.6.1" solana-metrics = { path = "../metrics", version = "=1.10.0" } @@ -27,7 +27,7 @@ solana-sdk = { path = "../sdk", version = "=1.10.0" } solana-storage-proto = { path = "../storage-proto", version = "=1.10.0" } solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" } thiserror = "1.0" -tonic = { version = "0.6.1", features = ["tls", "transport"] } +tonic = { version = "0.6.2", features = ["tls", "transport"] } zstd = "0.9.0" # openssl is a dependency of the goauth and smpl_jwt crates, but explicitly diff --git a/storage-bigtable/build-proto/Cargo.lock b/storage-bigtable/build-proto/Cargo.lock index 0f9978a8ef5030..7d388637ca550f 100644 --- a/storage-bigtable/build-proto/Cargo.lock +++ b/storage-bigtable/build-proto/Cargo.lock @@ -353,9 +353,9 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88358bb1dcfeb62dcce85c63006cafb964b7be481d522b7e09589d4d1e718d2a" +checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" dependencies = [ "proc-macro2", "prost-build", diff --git a/storage-proto/Cargo.toml b/storage-proto/Cargo.toml index 43f3d742e1bb24..c8d1d21406cdba 100644 --- a/storage-proto/Cargo.toml +++ b/storage-proto/Cargo.toml @@ -13,7 +13,7 @@ edition = "2021" bincode = "1.3.3" bs58 = "0.4.0" prost = "0.9.0" -serde = "1.0.130" +serde = "1.0.131" solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" } solana-sdk = { path = "../sdk", version = "=1.10.0" } solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" } @@ -29,4 +29,4 @@ name = "solana_storage_proto" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -tonic-build = "0.6.0" +tonic-build = "0.6.2" diff --git a/storage-proto/proto/transaction_by_addr.proto b/storage-proto/proto/transaction_by_addr.proto index 36c17832ee9b85..d1a40cfd7cc5f6 100644 --- a/storage-proto/proto/transaction_by_addr.proto +++ b/storage-proto/proto/transaction_by_addr.proto @@ -45,6 +45,7 @@ enum TransactionErrorType { UNSUPPORTED_VERSION = 18; INVALID_WRITABLE_ACCOUNT = 19; WOULD_EXCEED_MAX_ACCOUNT_COST_LIMIT = 20; + WOULD_EXCEED_MAX_ACCOUNT_DATA_COST_LIMIT = 21; } message InstructionError { diff --git a/storage-proto/src/convert.rs b/storage-proto/src/convert.rs index e8511867c6b3a8..636dcc3f9dea44 100644 --- a/storage-proto/src/convert.rs +++ b/storage-proto/src/convert.rs @@ -567,6 +567,7 @@ impl TryFrom for TransactionError { 18 => TransactionError::UnsupportedVersion, 19 => TransactionError::InvalidWritableAccount, 20 => TransactionError::WouldExceedMaxAccountCostLimit, + 21 => TransactionError::WouldExceedMaxAccountDataCostLimit, _ => return Err("Invalid TransactionError"), }) } @@ -637,6 +638,9 @@ impl From for tx_by_addr::TransactionError { TransactionError::WouldExceedMaxAccountCostLimit => { tx_by_addr::TransactionErrorType::WouldExceedMaxAccountCostLimit } + TransactionError::WouldExceedMaxAccountDataCostLimit => { + tx_by_addr::TransactionErrorType::WouldExceedMaxAccountDataCostLimit + } } as i32, instruction_error: match transaction_error { TransactionError::InstructionError(index, ref instruction_error) => { diff --git a/streamer/src/packet.rs b/streamer/src/packet.rs index 58688ef80ec98a..b0abe551a3a2ef 100644 --- a/streamer/src/packet.rs +++ b/streamer/src/packet.rs @@ -9,13 +9,13 @@ use { }; pub use { solana_perf::packet::{ - limited_deserialize, to_packets_chunked, Packets, PacketsRecycler, NUM_PACKETS, + limited_deserialize, to_packet_batches, PacketBatch, PacketBatchRecycler, NUM_PACKETS, PACKETS_PER_BATCH, }, solana_sdk::packet::{Meta, Packet, PACKET_DATA_SIZE}, }; -pub fn recv_from(obj: &mut Packets, socket: &UdpSocket, max_wait_ms: u64) -> Result { +pub fn recv_from(batch: &mut PacketBatch, socket: &UdpSocket, max_wait_ms: u64) -> Result { let mut i = 0; //DOCUMENTED SIDE-EFFECT //Performance out of the IO without poll @@ -27,11 +27,11 @@ pub fn recv_from(obj: &mut Packets, socket: &UdpSocket, max_wait_ms: u64) -> Res trace!("receiving on {}", socket.local_addr().unwrap()); let start = Instant::now(); loop { - obj.packets.resize( + batch.packets.resize( std::cmp::min(i + NUM_RCVMMSGS, PACKETS_PER_BATCH), Packet::default(), ); - match recv_mmsg(socket, &mut obj.packets[i..]) { + match recv_mmsg(socket, &mut batch.packets[i..]) { Err(_) if i > 0 => { if start.elapsed().as_millis() as u64 > max_wait_ms { break; @@ -55,17 +55,17 @@ pub fn recv_from(obj: &mut Packets, socket: &UdpSocket, max_wait_ms: u64) -> Res } } } - obj.packets.truncate(i); + batch.packets.truncate(i); inc_new_counter_debug!("packets-recv_count", i); Ok(i) } pub fn send_to( - obj: &Packets, + batch: &PacketBatch, socket: &UdpSocket, socket_addr_space: &SocketAddrSpace, ) -> Result<()> { - for p in &obj.packets { + for p in &batch.packets { let addr = p.meta.addr(); if socket_addr_space.check(&addr) { socket.send_to(&p.data[..p.meta.size], &addr)?; @@ -90,9 +90,9 @@ mod tests { // test that the address is actually being updated let send_addr: SocketAddr = "127.0.0.1:123".parse().unwrap(); let packets = vec![Packet::default()]; - let mut msgs = Packets::new(packets); - msgs.set_addr(&send_addr); - assert_eq!(msgs.packets[0].meta.addr(), send_addr); + let mut packet_batch = PacketBatch::new(packets); + packet_batch.set_addr(&send_addr); + assert_eq!(packet_batch.packets[0].meta.addr(), send_addr); } #[test] @@ -102,21 +102,21 @@ mod tests { let addr = recv_socket.local_addr().unwrap(); let send_socket = UdpSocket::bind("127.0.0.1:0").expect("bind"); let saddr = send_socket.local_addr().unwrap(); - let mut p = Packets::default(); + let mut batch = PacketBatch::default(); - p.packets.resize(10, Packet::default()); + batch.packets.resize(10, Packet::default()); - for m in p.packets.iter_mut() { + for m in batch.packets.iter_mut() { m.meta.set_addr(&addr); m.meta.size = PACKET_DATA_SIZE; } - send_to(&p, &send_socket, &SocketAddrSpace::Unspecified).unwrap(); + send_to(&batch, &send_socket, &SocketAddrSpace::Unspecified).unwrap(); - let recvd = recv_from(&mut p, &recv_socket, 1).unwrap(); + let recvd = recv_from(&mut batch, &recv_socket, 1).unwrap(); - assert_eq!(recvd, p.packets.len()); + assert_eq!(recvd, batch.packets.len()); - for m in &p.packets { + for m in &batch.packets { assert_eq!(m.meta.size, PACKET_DATA_SIZE); assert_eq!(m.meta.addr(), saddr); } @@ -125,7 +125,7 @@ mod tests { #[test] pub fn debug_trait() { write!(io::sink(), "{:?}", Packet::default()).unwrap(); - write!(io::sink(), "{:?}", Packets::default()).unwrap(); + write!(io::sink(), "{:?}", PacketBatch::default()).unwrap(); } #[test] @@ -151,25 +151,25 @@ mod tests { let recv_socket = UdpSocket::bind("127.0.0.1:0").expect("bind"); let addr = recv_socket.local_addr().unwrap(); let send_socket = UdpSocket::bind("127.0.0.1:0").expect("bind"); - let mut p = Packets::default(); - p.packets.resize(PACKETS_PER_BATCH, Packet::default()); + let mut batch = PacketBatch::default(); + batch.packets.resize(PACKETS_PER_BATCH, Packet::default()); // Should only get PACKETS_PER_BATCH packets per iteration even // if a lot more were sent, and regardless of packet size for _ in 0..2 * PACKETS_PER_BATCH { - let mut p = Packets::default(); - p.packets.resize(1, Packet::default()); - for m in p.packets.iter_mut() { + let mut batch = PacketBatch::default(); + batch.packets.resize(1, Packet::default()); + for m in batch.packets.iter_mut() { m.meta.set_addr(&addr); m.meta.size = 1; } - send_to(&p, &send_socket, &SocketAddrSpace::Unspecified).unwrap(); + send_to(&batch, &send_socket, &SocketAddrSpace::Unspecified).unwrap(); } - let recvd = recv_from(&mut p, &recv_socket, 100).unwrap(); + let recvd = recv_from(&mut batch, &recv_socket, 100).unwrap(); // Check we only got PACKETS_PER_BATCH packets assert_eq!(recvd, PACKETS_PER_BATCH); - assert_eq!(p.packets.capacity(), PACKETS_PER_BATCH); + assert_eq!(batch.packets.capacity(), PACKETS_PER_BATCH); } } diff --git a/streamer/src/streamer.rs b/streamer/src/streamer.rs index 9bd857203cf4e2..58365f190f7ab3 100644 --- a/streamer/src/streamer.rs +++ b/streamer/src/streamer.rs @@ -3,7 +3,7 @@ use { crate::{ - packet::{self, send_to, Packets, PacketsRecycler, PACKETS_PER_BATCH}, + packet::{self, send_to, PacketBatch, PacketBatchRecycler, PACKETS_PER_BATCH}, recvmmsg::NUM_RCVMMSGS, socket::SocketAddrSpace, }, @@ -24,8 +24,8 @@ use { thiserror::Error, }; -pub type PacketReceiver = Receiver; -pub type PacketSender = Sender; +pub type PacketBatchReceiver = Receiver; +pub type PacketBatchSender = Sender; #[derive(Error, Debug)] pub enum StreamerError { @@ -36,7 +36,7 @@ pub enum StreamerError { RecvTimeout(#[from] RecvTimeoutError), #[error("send packets error")] - Send(#[from] SendError), + Send(#[from] SendError), } pub type Result = std::result::Result; @@ -44,8 +44,8 @@ pub type Result = std::result::Result; fn recv_loop( sock: &UdpSocket, exit: Arc, - channel: &PacketSender, - recycler: &PacketsRecycler, + channel: &PacketBatchSender, + recycler: &PacketBatchRecycler, name: &'static str, coalesce_ms: u64, use_pinned_memory: bool, @@ -55,10 +55,10 @@ fn recv_loop( let mut now = Instant::now(); let mut num_max_received = 0; // Number of times maximum packets were received loop { - let mut msgs = if use_pinned_memory { - Packets::new_with_recycler(recycler.clone(), PACKETS_PER_BATCH, name) + let mut packet_batch = if use_pinned_memory { + PacketBatch::new_with_recycler(recycler.clone(), PACKETS_PER_BATCH, name) } else { - Packets::with_capacity(PACKETS_PER_BATCH) + PacketBatch::with_capacity(PACKETS_PER_BATCH) }; loop { // Check for exit signal, even if socket is busy @@ -66,14 +66,14 @@ fn recv_loop( if exit.load(Ordering::Relaxed) { return Ok(()); } - if let Ok(len) = packet::recv_from(&mut msgs, sock, coalesce_ms) { + if let Ok(len) = packet::recv_from(&mut packet_batch, sock, coalesce_ms) { if len == NUM_RCVMMSGS { num_max_received += 1; } recv_count += len; call_count += 1; if len > 0 { - channel.send(msgs)?; + channel.send(packet_batch)?; } break; } @@ -97,8 +97,8 @@ fn recv_loop( pub fn receiver( sock: Arc, exit: &Arc, - packet_sender: PacketSender, - recycler: PacketsRecycler, + packet_sender: PacketBatchSender, + recycler: PacketBatchRecycler, name: &'static str, coalesce_ms: u64, use_pinned_memory: bool, @@ -233,40 +233,46 @@ impl StreamerSendStats { fn recv_send( sock: &UdpSocket, - r: &PacketReceiver, + r: &PacketBatchReceiver, socket_addr_space: &SocketAddrSpace, stats: &mut Option, ) -> Result<()> { let timer = Duration::new(1, 0); - let msgs = r.recv_timeout(timer)?; + let packet_batch = r.recv_timeout(timer)?; if let Some(stats) = stats { - msgs.packets.iter().for_each(|p| stats.record(p)); + packet_batch.packets.iter().for_each(|p| stats.record(p)); } - send_to(&msgs, sock, socket_addr_space)?; + send_to(&packet_batch, sock, socket_addr_space)?; Ok(()) } -pub fn recv_batch(recvr: &PacketReceiver) -> Result<(Vec, usize, Duration)> { +pub fn recv_packet_batches( + recvr: &PacketBatchReceiver, +) -> Result<(Vec, usize, Duration)> { let timer = Duration::new(1, 0); - let msgs = recvr.recv_timeout(timer)?; + let packet_batch = recvr.recv_timeout(timer)?; let recv_start = Instant::now(); - trace!("got msgs"); - let mut len = msgs.packets.len(); - let mut batch = vec![msgs]; - while let Ok(more) = recvr.try_recv() { - trace!("got more msgs"); - len += more.packets.len(); - batch.push(more); + trace!("got packets"); + let mut num_packets = packet_batch.packets.len(); + let mut packet_batches = vec![packet_batch]; + while let Ok(packet_batch) = recvr.try_recv() { + trace!("got more packets"); + num_packets += packet_batch.packets.len(); + packet_batches.push(packet_batch); } let recv_duration = recv_start.elapsed(); - trace!("batch len {}", batch.len()); - Ok((batch, len, recv_duration)) + trace!( + "packet batches len: {}, num packets: {}", + packet_batches.len(), + num_packets + ); + Ok((packet_batches, num_packets, recv_duration)) } pub fn responder( name: &'static str, sock: Arc, - r: PacketReceiver, + r: PacketBatchReceiver, socket_addr_space: SocketAddrSpace, stats_reporter_sender: Option>>, ) -> JoinHandle<()> { @@ -315,7 +321,7 @@ mod test { use { super::*, crate::{ - packet::{Packet, Packets, PACKET_DATA_SIZE}, + packet::{Packet, PacketBatch, PACKET_DATA_SIZE}, streamer::{receiver, responder}, }, solana_perf::recycler::Recycler, @@ -332,16 +338,16 @@ mod test { }, }; - fn get_msgs(r: PacketReceiver, num: &mut usize) { + fn get_packet_batches(r: PacketBatchReceiver, num_packets: &mut usize) { for _ in 0..10 { - let m = r.recv_timeout(Duration::new(1, 0)); - if m.is_err() { + let packet_batch_res = r.recv_timeout(Duration::new(1, 0)); + if packet_batch_res.is_err() { continue; } - *num -= m.unwrap().packets.len(); + *num_packets -= packet_batch_res.unwrap().packets.len(); - if *num == 0 { + if *num_packets == 0 { break; } } @@ -350,7 +356,7 @@ mod test { #[test] fn streamer_debug() { write!(io::sink(), "{:?}", Packet::default()).unwrap(); - write!(io::sink(), "{:?}", Packets::default()).unwrap(); + write!(io::sink(), "{:?}", PacketBatch::default()).unwrap(); } #[test] fn streamer_send_test() { @@ -379,23 +385,23 @@ mod test { SocketAddrSpace::Unspecified, None, ); - let mut msgs = Packets::default(); + let mut packet_batch = PacketBatch::default(); for i in 0..5 { - let mut b = Packet::default(); + let mut p = Packet::default(); { - b.data[0] = i as u8; - b.meta.size = PACKET_DATA_SIZE; - b.meta.set_addr(&addr); + p.data[0] = i as u8; + p.meta.size = PACKET_DATA_SIZE; + p.meta.set_addr(&addr); } - msgs.packets.push(b); + packet_batch.packets.push(p); } - s_responder.send(msgs).expect("send"); + s_responder.send(packet_batch).expect("send"); t_responder }; - let mut num = 5; - get_msgs(r_reader, &mut num); - assert_eq!(num, 0); + let mut packets_remaining = 5; + get_packet_batches(r_reader, &mut packets_remaining); + assert_eq!(packets_remaining, 0); exit.store(true, Ordering::Relaxed); t_receiver.join().expect("join"); t_responder.join().expect("join"); diff --git a/system-test/testnet-automation.sh b/system-test/testnet-automation.sh index 7d74c4d35ea80a..09af11a0a6ec81 100755 --- a/system-test/testnet-automation.sh +++ b/system-test/testnet-automation.sh @@ -105,8 +105,8 @@ function launch_testnet() { execution_step "Fetch reusable testnet keypairs" if [[ ! -d "${REPO_ROOT}"/net/keypairs ]]; then - git clone https://github.com/solana-labs/testnet-keypairs.git "${REPO_ROOT}"/net/keypairs - #git clone git@github.com:solana-labs/testnet-keypairs.git "${REPO_ROOT}"/net/keypairs +# git clone https://github.com/solana-labs/testnet-keypairs.git "${REPO_ROOT}"/net/keypairs + git clone git@github.com:solana-labs/testnet-keypairs.git "${REPO_ROOT}"/net/keypairs # If we have provider-specific keys (CoLo*, GCE*, etc) use them instead of generic val* if [[ -d "${REPO_ROOT}"/net/keypairs/"${CLOUD_PROVIDER}" ]]; then cp "${REPO_ROOT}"/net/keypairs/"${CLOUD_PROVIDER}"/* "${REPO_ROOT}"/net/keypairs/ diff --git a/transaction-status/Cargo.toml b/transaction-status/Cargo.toml index 08519885495e02..65cbbf7b6d5e15 100644 --- a/transaction-status/Cargo.toml +++ b/transaction-status/Cargo.toml @@ -16,7 +16,7 @@ bs58 = "0.4.0" Inflector = "0.11.4" lazy_static = "1.4.0" log = "0.4.14" -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" serde_json = "1.0.72" solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" } diff --git a/transaction-status/src/extract_memos.rs b/transaction-status/src/extract_memos.rs index cb6f7a94451b07..0fb0564805637c 100644 --- a/transaction-status/src/extract_memos.rs +++ b/transaction-status/src/extract_memos.rs @@ -76,7 +76,10 @@ mod test { solana_sdk::{ hash::Hash, instruction::CompiledInstruction, - message::{v0, MappedAddresses, MappedMessage, MessageHeader}, + message::{ + v0::{self, LoadedAddresses}, + MessageHeader, + }, }, }; @@ -125,7 +128,7 @@ mod test { let sanitized_message = SanitizedMessage::Legacy(message); assert_eq!(sanitized_message.extract_memos(), expected_memos); - let mapped_message = MappedMessage { + let sanitized_message = SanitizedMessage::V0(v0::LoadedMessage { message: v0::Message { header: MessageHeader { num_required_signatures: 1, @@ -136,12 +139,11 @@ mod test { instructions: memo_instructions, ..v0::Message::default() }, - mapped_addresses: MappedAddresses { + loaded_addresses: LoadedAddresses { writable: vec![], readonly: vec![spl_memo_id_v1(), another_program_id, spl_memo_id_v3()], }, - }; - let sanitized_mapped_message = SanitizedMessage::V0(mapped_message); - assert_eq!(sanitized_mapped_message.extract_memos(), expected_memos); + }); + assert_eq!(sanitized_message.extract_memos(), expected_memos); } } diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 543701d96aeea9..75147464c15bbe 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -56,7 +56,7 @@ jemallocator = {package = "tikv-jemallocator", version = "0.4.1", features = ["u [target."cfg(unix)".dependencies] libc = "0.2.109" -signal-hook = "0.3.10" +signal-hook = "0.3.12" sysctl = "0.4.3" [package.metadata.docs.rs] diff --git a/version/Cargo.toml b/version/Cargo.toml index 7c7b9222a85ab6..03b2b50f57bf50 100644 --- a/version/Cargo.toml +++ b/version/Cargo.toml @@ -11,7 +11,7 @@ edition = "2021" [dependencies] log = "0.4.14" -serde = "1.0.130" +serde = "1.0.131" serde_derive = "1.0.103" solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.0" } solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.0" }