diff --git a/core/lib/types/src/storage/writes.rs b/core/lib/types/src/storage/writes.rs deleted file mode 100644 index 9bdacebbbc3..00000000000 --- a/core/lib/types/src/storage/writes.rs +++ /dev/null @@ -1,71 +0,0 @@ -use crate::H256; -use serde::{Deserialize, Serialize}; -use zksync_basic_types::U256; - -/// In vm there are two types of writes Initial and Repeated. After the first write to the leaf, -/// we assign an index to it and in the future we should use index instead of full key. -/// It allows us to compress the data. -#[derive(Clone, Debug, Default, Eq, PartialEq)] -pub struct InitialStorageWrite { - pub index: u64, - pub key: U256, - pub value: H256, -} - -#[derive(Clone, Debug, Deserialize, Serialize, Default, Eq, PartialEq)] -pub struct RepeatedStorageWrite { - pub index: u64, - pub value: H256, -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::commitment::serialize_commitments; - use crate::{H256, U256}; - - #[test] - fn calculate_hash_for_storage_writes() { - let initial_writes = vec![ - InitialStorageWrite { - index: 1, - key: U256::from(1u32), - value: H256::from([1; 32]), - }, - InitialStorageWrite { - index: 2, - key: U256::from(2u32), - value: H256::from([3; 32]), - }, - ]; - let bytes = serialize_commitments(&initial_writes); - - let expected_bytes = "00000002\ - 0100000000000000000000000000000000000000000000000000000000000000\ - 0101010101010101010101010101010101010101010101010101010101010101\ - 0200000000000000000000000000000000000000000000000000000000000000\ - 0303030303030303030303030303030303030303030303030303030303030303"; - let expected_bytes = hex::decode(expected_bytes).unwrap(); - assert_eq!(expected_bytes, bytes); - - let repeated_writes = vec![ - RepeatedStorageWrite { - index: 1, - value: H256::from([1; 32]), - }, - RepeatedStorageWrite { - index: 2, - value: H256::from([3; 32]), - }, - ]; - let bytes = serialize_commitments(&repeated_writes); - - let expected_bytes = "00000002\ - 0000000000000001\ - 0101010101010101010101010101010101010101010101010101010101010101\ - 0000000000000002\ - 0303030303030303030303030303030303030303030303030303030303030303"; - let expected_bytes = hex::decode(expected_bytes).unwrap(); - assert_eq!(expected_bytes, bytes); - } -} diff --git a/core/lib/types/src/storage/writes/compression.rs b/core/lib/types/src/storage/writes/compression.rs new file mode 100644 index 00000000000..a325801b8a8 --- /dev/null +++ b/core/lib/types/src/storage/writes/compression.rs @@ -0,0 +1,394 @@ +use zksync_basic_types::U256; + +// Starting with version 1 for this compression strategy. Any modifications to our current strategy MUST +// increment this number. +pub const COMPRESSION_VERSION_NUMBER: u8 = 1; + +// Trait used to define functionality for different compression modes. Defines functions for +// output size, what type of operation was performed, and value/extended compression. +trait CompressionMode: 'static { + /// Id of the operation being performed. + fn operation_id(&self) -> usize; + /// Gets the diff and size of value + fn get_diff_and_size(&self) -> Option<(U256, usize)>; + /// Number of bytes the compressed value requires. None indicates that compression cannot be performed for the + /// given strategy. + fn output_size(&self) -> Option { + self.get_diff_and_size().map(|(_, size)| size) + } + /// Compress the value. + fn compress_value_only(&self) -> Option> { + let (diff, size) = self.get_diff_and_size()?; + + let mut buffer = [0u8; 32]; + diff.to_big_endian(&mut buffer); + + let diff = buffer[(32 - size)..].to_vec(); + + Some(diff) + } + /// Concatenation of the metadata byte (5 bits for len and 3 bits for operation type) and the compressed value. + fn compress_extended(&self) -> Option> { + self.compress_value_only().map(|compressed_value| { + let mut res: Vec = vec![]; + res.push(metadata_byte( + self.output_size().unwrap(), + self.operation_id(), + )); + res.extend(compressed_value); + res + }) + } +} + +struct CompressionByteAdd { + pub prev_value: U256, + pub new_value: U256, +} + +impl CompressionMode for CompressionByteAdd { + fn operation_id(&self) -> usize { + 1 + } + + fn get_diff_and_size(&self) -> Option<(U256, usize)> { + let diff = self.new_value.overflowing_sub(self.prev_value).0; + // Ceiling division + let size = (diff.bits() + 7) / 8; + + if size >= 31 { + None + } else { + Some((diff, size)) + } + } + + fn output_size(&self) -> Option { + self.get_diff_and_size().map(|(_, size)| size) + } + + fn compress_value_only(&self) -> Option> { + let (diff, size) = self.get_diff_and_size()?; + + let mut buffer = [0u8; 32]; + diff.to_big_endian(&mut buffer); + + let diff = buffer[(32 - size)..].to_vec(); + + Some(diff) + } +} + +struct CompressionByteSub { + pub prev_value: U256, + pub new_value: U256, +} + +impl CompressionMode for CompressionByteSub { + fn operation_id(&self) -> usize { + 2 + } + + fn get_diff_and_size(&self) -> Option<(U256, usize)> { + let diff = self.prev_value.overflowing_sub(self.new_value).0; + // Ceiling division + let size = (diff.bits() + 7) / 8; + + if size >= 31 { + None + } else { + Some((diff, size)) + } + } + + fn output_size(&self) -> Option { + self.get_diff_and_size().map(|(_, size)| size) + } +} + +struct CompressionByteTransform { + pub new_value: U256, +} + +impl CompressionMode for CompressionByteTransform { + fn operation_id(&self) -> usize { + 3 + } + + fn get_diff_and_size(&self) -> Option<(U256, usize)> { + // Ceiling division + let size = (self.new_value.bits() + 7) / 8; + + if size >= 31 { + None + } else { + Some((self.new_value, size)) + } + } + + fn output_size(&self) -> Option { + self.get_diff_and_size().map(|(_, size)| size) + } +} + +struct CompressionByteNone { + pub new_value: U256, +} + +impl CompressionByteNone { + fn new(new_value: U256) -> Self { + Self { new_value } + } +} + +impl CompressionMode for CompressionByteNone { + fn operation_id(&self) -> usize { + 0 + } + + fn get_diff_and_size(&self) -> Option<(U256, usize)> { + None + } + + fn output_size(&self) -> Option { + Some(32) + } + + fn compress_value_only(&self) -> Option> { + let mut buffer = [0u8; 32]; + self.new_value.to_big_endian(&mut buffer); + + Some(buffer.to_vec()) + } + + fn compress_extended(&self) -> Option> { + let mut res = [0u8; 33]; + + self.new_value.to_big_endian(&mut res[1..33]); + Some(res.to_vec()) + } +} + +fn default_passes(prev_value: U256, new_value: U256) -> Vec> { + vec![ + Box::new(CompressionByteAdd { + prev_value, + new_value, + }), + Box::new(CompressionByteSub { + prev_value, + new_value, + }), + Box::new(CompressionByteTransform { new_value }), + ] +} + +/// Generates the metadata byte for a given compression strategy. +/// The metadata byte is structured as: +/// First 5 bits: length of the compressed value +/// Last 3 bits: operation id corresponding to the given compression used. +fn metadata_byte(output_size: usize, operation_id: usize) -> u8 { + ((output_size << 3) | operation_id) as u8 +} + +/// For a given previous value and new value, try each compression strategy selecting the most +/// efficient one. Using that strategy, generate the extended compression (metadata byte and compressed value). +/// If none are found then use the full 32 byte new value with the metadata byte being `0x00` +pub fn compress_with_best_strategy(prev_value: U256, new_value: U256) -> Vec { + let compressors = default_passes(prev_value, new_value); + + compressors + .iter() + .filter_map(|e| e.compress_extended()) + .min_by_key(|bytes| bytes.len()) + .unwrap_or_else(|| { + CompressionByteNone::new(new_value) + .compress_extended() + .unwrap() + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::ops::{Add, BitAnd, Shr, Sub}; + + #[test] + fn test_compress_addition() { + let initial_val = U256::from(255438218); + let final_val = U256::from(255438638); + + let compress_add_strategy = CompressionByteAdd { + prev_value: initial_val, + new_value: final_val, + }; + + verify_sub_is_none(initial_val, final_val); + + assert!(compress_add_strategy.output_size() == Some(2)); + + assert!(compress_add_strategy.compress_value_only() == Some(vec![1, 164])); + + let compressed_val = compress_with_best_strategy(initial_val, final_val); + + assert!(compressed_val == vec![17, 1, 164]); + + let (metadata, compressed_val) = compressed_val.split_at(1); + + let metadata = U256::from(metadata); + let operation = metadata.bitand(U256::from(7u8)); + assert!(operation == U256::from(compress_add_strategy.operation_id())); + let len = metadata.shr(U256::from(3u8)); + assert!(len == U256::from(2)); + + let compressed_val = U256::from(compressed_val); + assert!( + (((compressed_val.bits() as f64) / 8f64).ceil() as usize) == (len.as_u128() as usize) + ); + assert!(initial_val.add(compressed_val) == final_val); + } + + #[test] + fn test_compress_subtraction() { + let initial_val = U256::from(580481589); + let final_val = U256::from(229496100); + + let compression_sub_strategy = CompressionByteSub { + prev_value: initial_val, + new_value: final_val, + }; + + verify_add_is_none(initial_val, final_val); + + assert!(compression_sub_strategy.output_size() == Some(4)); + + assert!(compression_sub_strategy.compress_value_only() == Some(vec![20, 235, 157, 17])); + assert!(compression_sub_strategy.compress_extended() == Some(vec![34, 20, 235, 157, 17])); + + let compressed_value = compress_with_best_strategy(initial_val, final_val); + + assert!(compressed_value == vec![34, 20, 235, 157, 17]); + + let (metadata, compressed_val) = compressed_value.split_at(1); + + let metadata = U256::from(metadata); + let operation = metadata.bitand(U256::from(7u8)); + assert!(operation == U256::from(compression_sub_strategy.operation_id())); + let len = metadata.shr(U256::from(3u8)); + assert!(len == U256::from(4)); + + let compressed_val = U256::from(compressed_val); + assert!( + (((compressed_val.bits() as f64) / 8f64).ceil() as usize) == (len.as_u128() as usize) + ); + assert!(initial_val.sub(compressed_val) == final_val); + } + + #[test] + fn test_compress_transform() { + let initial_val = U256::from(580481589); + let final_val = U256::from(1337); + + let compressed_value = compress_with_best_strategy(initial_val, final_val); + assert!(compressed_value == vec![19, 5, 57]); + + let (metadata, compressed_val) = compressed_value.split_at(1); + + let metadata = U256::from(metadata); + let operation = metadata.bitand(U256::from(7u8)); + assert!(operation == U256::from(3)); + let len = metadata.shr(U256::from(3u8)); + assert!(len == U256::from(2)); + + let compressed_val = U256::from(compressed_val); + assert!( + (((compressed_val.bits() as f64) / 8f64).ceil() as usize) == (len.as_u128() as usize) + ); + assert!(compressed_val == final_val); + } + + #[test] + fn test_compress_transform_to_zero() { + let initial_val = U256::from(580481589); + let final_val = U256::from(0); + + let compressed_value = compress_with_best_strategy(initial_val, final_val); + assert!(compressed_value == vec![3]); + + let (metadata, compressed_val) = compressed_value.split_at(1); + + let metadata = U256::from(metadata); + let operation = metadata.bitand(U256::from(7u8)); + assert!(operation == U256::from(3)); + let len = metadata.shr(U256::from(3u8)); + assert!(len == U256::from(0)); + + let compressed_val = U256::from(compressed_val); + assert!( + (((compressed_val.bits() as f64) / 8f64).ceil() as usize) == (len.as_u128() as usize) + ); + assert!(compressed_val == final_val); + } + + #[test] + fn test_compress_transform_to_one_from_max() { + let initial_val = U256::MAX; + let final_val = U256::from(1); + + let compressed_value = compress_with_best_strategy(initial_val, final_val); + assert!(compressed_value == vec![9, 2]); + + let (metadata, compressed_val) = compressed_value.split_at(1); + + let metadata = U256::from(metadata); + let operation = metadata.bitand(U256::from(7u8)); + assert!(operation == U256::from(1)); + let len = metadata.shr(U256::from(3u8)); + assert!(len == U256::from(1)); + + let compressed_val = U256::from(compressed_val); + assert!( + (((compressed_val.bits() as f64) / 8f64).ceil() as usize) == (len.as_u128() as usize) + ); + } + + #[test] + fn test_compress_transform_to_u256_max() { + let initial_val = U256::from(0); + let final_val = U256::MAX; + + let compressed_value = compress_with_best_strategy(initial_val, final_val); + assert!(compressed_value == vec![10, 1]); + + let (metadata, compressed_val) = compressed_value.split_at(1); + + let metadata = U256::from(metadata); + let operation = metadata.bitand(U256::from(7u8)); + assert!(operation == U256::from(2)); + let len = metadata.shr(U256::from(3u8)); + assert!(len == U256::from(1)); + + let compressed_val = U256::from(compressed_val); + assert!((((compressed_val.bits() as f64) / 8f64).ceil() as usize) == 1); + } + + fn verify_add_is_none(initial_val: U256, final_val: U256) { + let compression_add_strategy = CompressionByteAdd { + prev_value: initial_val, + new_value: final_val, + }; + + assert!(compression_add_strategy.compress_value_only().is_none()); + assert!(compression_add_strategy.compress_extended().is_none()); + } + + fn verify_sub_is_none(initial_val: U256, final_val: U256) { + let compression_sub_strategy = CompressionByteSub { + prev_value: initial_val, + new_value: final_val, + }; + + assert!(compression_sub_strategy.compress_value_only().is_none()); + assert!(compression_sub_strategy.compress_extended().is_none()); + } +} diff --git a/core/lib/types/src/storage/writes/mod.rs b/core/lib/types/src/storage/writes/mod.rs new file mode 100644 index 00000000000..0826af82000 --- /dev/null +++ b/core/lib/types/src/storage/writes/mod.rs @@ -0,0 +1,509 @@ +use std::convert::TryInto; + +use crate::H256; +use serde::{Deserialize, Serialize}; +use zksync_basic_types::{Address, U256}; + +use self::compression::{compress_with_best_strategy, COMPRESSION_VERSION_NUMBER}; + +mod compression; + +const BYTES_PER_ENUMERATION_INDEX: u8 = 4; +// Total byte size of all fields in StateDiffRecord struct +// 20 + 32 + 32 +8 + 32 + 32 +const STATE_DIFF_RECORD_SIZE: usize = 156; + +// 2 * 136 - the size that allows for two keccak rounds. +pub const PADDED_ENCODED_STORAGE_DIFF_LEN_BYTES: usize = 272; + +/// In vm there are two types of writes Initial and Repeated. After the first write to the key, +/// we assign an index to it and in the future we should use index instead of full key. +/// It allows us to compress the data, as the full key would use 32 bytes, and the index can be +/// represented only as BYTES_PER_ENUMERATION_INDEX bytes +#[derive(Clone, Debug, Default, Eq, PartialEq)] +pub struct InitialStorageWrite { + pub index: u64, + pub key: U256, + pub value: H256, +} + +/// For repeated writes, we can substitute the 32 byte key for a BYTES_PER_ENUMERATION_INDEX byte index +/// representing its leaf index in the tree. +#[derive(Clone, Debug, Deserialize, Serialize, Default, Eq, PartialEq)] +pub struct RepeatedStorageWrite { + pub index: u64, + pub value: H256, +} + +#[derive(Clone, Debug, Deserialize, Serialize, Default, Eq, PartialEq)] +pub struct StateDiffRecord { + /// address state diff occured at + pub address: Address, + /// storage slot key updated + pub key: U256, + /// derived_key == Blake2s(bytes32(address), key) + pub derived_key: [u8; 32], + /// index in tree of state diff + pub enumeration_index: u64, + /// previous value + pub initial_value: U256, + /// updated value + pub final_value: U256, +} + +impl StateDiffRecord { + // Serialize into byte representation. + fn encode(&self) -> [u8; STATE_DIFF_RECORD_SIZE] { + let mut encoding = [0u8; STATE_DIFF_RECORD_SIZE]; + let mut offset = 0; + let mut end = 0; + + end += 20; + encoding[offset..end].copy_from_slice(self.address.as_fixed_bytes()); + offset = end; + + end += 32; + self.key.to_big_endian(&mut encoding[offset..end]); + offset = end; + + end += 32; + encoding[offset..end].copy_from_slice(&self.derived_key); + offset = end; + + end += 8; + encoding[offset..end].copy_from_slice(&self.enumeration_index.to_be_bytes()); + offset = end; + + end += 32; + self.initial_value.to_big_endian(&mut encoding[offset..end]); + offset = end; + + end += 32; + self.final_value.to_big_endian(&mut encoding[offset..end]); + offset = end; + + debug_assert_eq!(offset, encoding.len()); + + encoding + } + + pub fn encode_padded(&self) -> [u8; PADDED_ENCODED_STORAGE_DIFF_LEN_BYTES] { + let mut extended_state_diff_encoding = [0u8; PADDED_ENCODED_STORAGE_DIFF_LEN_BYTES]; + let packed_encoding = self.encode(); + extended_state_diff_encoding[0..packed_encoding.len()].copy_from_slice(&packed_encoding); + + extended_state_diff_encoding + } + + /// Decode bytes into StateDiffRecord + pub fn try_from_slice(data: &[u8]) -> Option { + if data.len() == 156 { + Some(Self { + address: Address::from_slice(&data[0..20]), + key: U256::from(&data[20..52]), + derived_key: data[52..84].try_into().unwrap(), + enumeration_index: u64::from_be_bytes(data[84..92].try_into().unwrap()), + initial_value: U256::from(&data[92..124]), + final_value: U256::from(&data[124..156]), + }) + } else { + None + } + } + + /// compression follows the following algo: + /// 1. if repeated write: + /// entry <- enumeration_index || compressed value + /// 2. if initial write: + /// entry <- blake2(bytes32(address), key) || compressed value + /// size: + /// initial: max of 65 bytes + /// repeated: max of 38 bytes + /// before: 156 bytes for each + pub fn compress(&self) -> Vec { + let mut comp_state_diff = match self.enumeration_index { + 0 => self.derived_key.to_vec(), + enumeration_index if enumeration_index <= u32::MAX.into() => { + (self.enumeration_index as u32).to_be_bytes().to_vec() + } + enumeration_index => panic!("enumeration_index is too large: {}", enumeration_index), + }; + + comp_state_diff.extend(compress_with_best_strategy( + self.initial_value, + self.final_value, + )); + + comp_state_diff + } +} + +/// Compresses a vector of state diff records according to the following: +/// num_initial writes (u32) || compressed initial writes || compressed repeated writes +pub fn compress_state_diffs(mut state_diffs: Vec) -> Vec { + let mut res = vec![]; + + // IMPORTANT: Sorting here is determined by the order expected in the circuits. + state_diffs.sort_by_key(|rec| (rec.address, rec.key)); + + let (initial_writes, repeated_writes): (Vec<_>, Vec<_>) = state_diffs + .iter() + .partition(|rec| rec.enumeration_index == 0); + + res.extend((initial_writes.len() as u16).to_be_bytes()); + for state_diff in initial_writes { + res.extend(state_diff.compress()); + } + + for state_diff in repeated_writes { + res.extend(state_diff.compress()); + } + + prepend_header(res) +} + +/// Adds the header to the beginning of the compressed state diffs so it can be used as part of the overall +/// pubdata. Need to prepend: compression version || number of compressed state diffs || number of bytes used for +/// enumeration index. +fn prepend_header(compressed_state_diffs: Vec) -> Vec { + let mut res = vec![0u8; 5]; + res[0] = COMPRESSION_VERSION_NUMBER; + + res[1..4].copy_from_slice(&(compressed_state_diffs.len() as u32).to_be_bytes()[1..4]); + + res[4] = BYTES_PER_ENUMERATION_INDEX; + + res.extend(compressed_state_diffs); + + res.to_vec() +} + +#[cfg(test)] +mod tests { + use std::ops::{Add, Sub}; + use std::str::FromStr; + + use super::*; + use crate::commitment::serialize_commitments; + use crate::{H256, U256}; + + #[test] + fn calculate_hash_for_storage_writes() { + let initial_writes = vec![ + InitialStorageWrite { + index: 1, + key: U256::from(1u32), + value: H256::from([1; 32]), + }, + InitialStorageWrite { + index: 2, + key: U256::from(2u32), + value: H256::from([3; 32]), + }, + ]; + let bytes = serialize_commitments(&initial_writes); + + let expected_bytes = "00000002\ + 0100000000000000000000000000000000000000000000000000000000000000\ + 0101010101010101010101010101010101010101010101010101010101010101\ + 0200000000000000000000000000000000000000000000000000000000000000\ + 0303030303030303030303030303030303030303030303030303030303030303"; + let expected_bytes = hex::decode(expected_bytes).unwrap(); + assert_eq!(expected_bytes, bytes); + + let repeated_writes = vec![ + RepeatedStorageWrite { + index: 1, + value: H256::from([1; 32]), + }, + RepeatedStorageWrite { + index: 2, + value: H256::from([3; 32]), + }, + ]; + let bytes = serialize_commitments(&repeated_writes); + + let expected_bytes = "00000002\ + 0000000000000001\ + 0101010101010101010101010101010101010101010101010101010101010101\ + 0000000000000002\ + 0303030303030303030303030303030303030303030303030303030303030303"; + let expected_bytes = hex::decode(expected_bytes).unwrap(); + assert_eq!(expected_bytes, bytes); + } + + #[test] + fn test_compression() { + let initial_add = StateDiffRecord { + address: Address::from_str("0x09610c49cfe4a0509dbe319886eb0cfc01f2cfd1").unwrap(), + key: U256::from(1u8), + derived_key: [1u8; 32], + enumeration_index: 0u64, + initial_value: U256::default(), + final_value: U256::from(64u8), + }; + + let initial_sub = StateDiffRecord { + address: Address::from_str("0x1c915d9b098ecf548d978ef9931a09e4e2167fab").unwrap(), + key: U256::from(2u8), + derived_key: [2u8; 32], + enumeration_index: 0u64, + initial_value: U256::from(64u8), + final_value: U256::from(20u8), + }; + + let initial_transform = StateDiffRecord { + address: Address::from_str("0x3859d669dcc980c3ba68806a8d49bbc998da781d").unwrap(), + key: U256::from(3u8), + derived_key: [3u8; 32], + enumeration_index: 0u64, + initial_value: U256::MAX, + final_value: U256::from(255u8), + }; + + let initial_none = StateDiffRecord { + address: Address::from_str("0x3f441bf60f4d8f7704b262f41b7b015c21623f46").unwrap(), + key: U256::from(5u8), + derived_key: [4u8; 32], + enumeration_index: 0u64, + initial_value: U256::MAX / 2, + final_value: U256::MAX, + }; + + let repeated_add = StateDiffRecord { + address: Address::from_str("0x5e52dd6d60c2f89b5ced2bddf53794f0d8c58254").unwrap(), + key: U256::from(1u8), + derived_key: [5u8; 32], + enumeration_index: 1u64, + initial_value: U256::default(), + final_value: U256::from(64u8), + }; + + let repeated_sub = StateDiffRecord { + address: Address::from_str("0x9bac6b5cb15aa5f80f9480af6b530ecd93a30a41").unwrap(), + key: U256::from(2u8), + derived_key: [6u8; 32], + enumeration_index: 2u64, + initial_value: U256::from(64u8), + final_value: U256::from(20u8), + }; + + let repeated_transform = StateDiffRecord { + address: Address::from_str("0xaf21caa263eefa213301522c1062d22a890b2b6d").unwrap(), + key: U256::from(3u8), + derived_key: [7u8; 32], + enumeration_index: 3u64, + initial_value: U256::MAX, + final_value: U256::from(255u8), + }; + + let repeated_none = StateDiffRecord { + address: Address::from_str("0xb21058b7c589c49871a295575418e9e3edaf44b0").unwrap(), + key: U256::from(5u8), + derived_key: [8u8; 32], + enumeration_index: 5u64, + initial_value: U256::MAX / 2, + final_value: U256::MAX, + }; + + let storage_diffs = vec![ + initial_add, + initial_sub, + initial_transform, + initial_none, + repeated_add, + repeated_sub, + repeated_transform, + repeated_none, + ]; + + let compressed_state_diffs = compress_state_diffs(storage_diffs.clone()); + + let mut storage_diffs = storage_diffs.clone(); + storage_diffs.sort_by_key(|rec| (rec.address, rec.key)); + + let (header, compressed_state_diffs) = compressed_state_diffs.split_at(5); + + assert!(header[0] == COMPRESSION_VERSION_NUMBER); + assert!(U256::from(&header[1..4]) == U256::from(compressed_state_diffs.len())); + assert!(header[4] == 4u8); + + let (num_initial, compressed_state_diffs) = compressed_state_diffs.split_at(2); + assert!(num_initial[0] == 0u8); + assert!(num_initial[1] == 4u8); + + let (initial, repeated): (Vec<_>, Vec<_>) = + storage_diffs.iter().partition(|v| v.enumeration_index == 0); + assert!((initial.len() as u8) == num_initial[1]); + + // Initial + let (key, compressed_state_diffs) = compressed_state_diffs.split_at(32); + assert!(U256::from(key) == U256::from(initial[0].derived_key)); + let (metadata, compressed_state_diffs) = compressed_state_diffs.split_at(1); + let metadata = metadata[0]; + let operation = metadata & 7; + let len = (metadata >> 3) as usize; + verify_value( + initial[0].initial_value, + initial[0].final_value, + operation, + &compressed_state_diffs.to_vec()[..len], + ); + let compressed_state_diffs = &compressed_state_diffs[len..]; + + let (key, compressed_state_diffs) = compressed_state_diffs.split_at(32); + assert!(U256::from(key) == U256::from(initial[1].derived_key)); + let (metadata, compressed_state_diffs) = compressed_state_diffs.split_at(1); + let metadata = metadata[0]; + let operation = metadata & 7; + let len = (metadata >> 3) as usize; + verify_value( + initial[1].initial_value, + initial[1].final_value, + operation, + &compressed_state_diffs.to_vec()[..len], + ); + let compressed_state_diffs = &compressed_state_diffs[len..]; + + let (key, compressed_state_diffs) = compressed_state_diffs.split_at(32); + assert!(U256::from(key) == U256::from(initial[2].derived_key)); + let (metadata, compressed_state_diffs) = compressed_state_diffs.split_at(1); + let metadata = metadata[0]; + let operation = metadata & 7; + let len = (metadata >> 3) as usize; + verify_value( + initial[2].initial_value, + initial[2].final_value, + operation, + &compressed_state_diffs.to_vec()[..len], + ); + let compressed_state_diffs = &compressed_state_diffs[len..]; + + let (key, compressed_state_diffs) = compressed_state_diffs.split_at(32); + assert!(U256::from(key) == U256::from(initial[3].derived_key)); + let (metadata, compressed_state_diffs) = compressed_state_diffs.split_at(1); + let metadata = metadata[0]; + let operation = metadata & 7; + verify_value( + initial[3].initial_value, + initial[3].final_value, + operation, + &compressed_state_diffs.to_vec()[..32], + ); + let compressed_state_diffs = &compressed_state_diffs[32..]; + + // Repeated + let (enum_index, compressed_state_diffs) = compressed_state_diffs.split_at(4); + assert!((enum_index[3] as u64) == repeated[0].enumeration_index); + let (metadata, compressed_state_diffs) = compressed_state_diffs.split_at(1); + let metadata = metadata[0]; + let operation = metadata & 7; + let len = (metadata >> 3) as usize; + verify_value( + repeated[0].initial_value, + repeated[0].final_value, + operation, + &compressed_state_diffs.to_vec()[..len], + ); + let compressed_state_diffs = &compressed_state_diffs[len..]; + + let (enum_index, compressed_state_diffs) = compressed_state_diffs.split_at(4); + assert!((enum_index[3] as u64) == repeated[1].enumeration_index); + let (metadata, compressed_state_diffs) = compressed_state_diffs.split_at(1); + let metadata = metadata[0]; + let operation = metadata & 7; + let len = (metadata >> 3) as usize; + verify_value( + repeated[1].initial_value, + repeated[1].final_value, + operation, + &compressed_state_diffs.to_vec()[..len], + ); + let compressed_state_diffs = &compressed_state_diffs[len..]; + + let (enum_index, compressed_state_diffs) = compressed_state_diffs.split_at(4); + assert!((enum_index[3] as u64) == repeated[2].enumeration_index); + let (metadata, compressed_state_diffs) = compressed_state_diffs.split_at(1); + let metadata = metadata[0]; + let operation = metadata & 7; + let len = (metadata >> 3) as usize; + verify_value( + repeated[2].initial_value, + repeated[2].final_value, + operation, + &compressed_state_diffs.to_vec()[..len], + ); + let compressed_state_diffs = &compressed_state_diffs[len..]; + + let (enum_index, compressed_state_diffs) = compressed_state_diffs.split_at(4); + assert!((enum_index[3] as u64) == repeated[3].enumeration_index); + let (metadata, compressed_state_diffs) = compressed_state_diffs.split_at(1); + let metadata = metadata[0]; + let operation = metadata & 7; + verify_value( + repeated[3].initial_value, + repeated[3].final_value, + operation, + &compressed_state_diffs.to_vec()[..32], + ); + let compressed_state_diffs = &compressed_state_diffs[32..]; + + assert!(compressed_state_diffs.is_empty()); + } + + #[test] + fn test_encoding() { + let state_diff = StateDiffRecord { + address: Address::from_str("0x09610c49cfe4a0509dbe319886eb0cfc01f2cfd1").unwrap(), + key: U256::from(1u8), + derived_key: [1u8; 32], + enumeration_index: 0u64, + initial_value: U256::default(), + final_value: U256::from(64u8), + }; + + let encoded = state_diff.encode(); + let encoded_state_diff = hex::encode(encoded); + + let expected_encoding = "09610c49cfe4a0509dbe319886eb0cfc01f2cfd1000000000000\ + 000000000000000000000000000000000000000000000000000101010101010101010101010101010\ + 101010101010101010101010101010101010000000000000000000000000000000000000000000000\ + 000000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000040" + .to_string(); + + assert_eq!(encoded_state_diff, expected_encoding); + + let encode_padded = state_diff.encode_padded(); + let encoded_padded_state_diff = hex::encode(encode_padded); + + let expected_padded_encoding = "09610c49cfe4a0509dbe319886eb0cfc01f2cfd100000\ + 000000000000000000000000000000000000000000000000000000000010101010101010101010101\ + 010101010101010101010101010101010101010101000000000000000000000000000000000000000\ + 000000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 000000000000000000000040000000000000000000000000000000000000000000000000000000000\ + 000000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 000000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 0000000000000" + .to_string(); + + assert_eq!(encoded_padded_state_diff, expected_padded_encoding); + } + + fn verify_value( + initial_value: U256, + final_value: U256, + operation: u8, + compressed_value: &[u8], + ) { + if operation == 0 || operation == 3 { + assert!(U256::from(compressed_value) == final_value); + } else if operation == 1 { + assert!(initial_value.add(U256::from(compressed_value)) == final_value); + } else if operation == 2 { + assert!(initial_value.sub(U256::from(compressed_value)) == final_value); + } else { + panic!("invalid operation id"); + } + } +}