From dffc8cf2892947fbf550d08a8d6c2c3eabbee7cd Mon Sep 17 00:00:00 2001 From: Hero Bird Date: Wed, 20 May 2020 13:42:24 +0200 Subject: [PATCH] Implement storage (revision 2) module (#311) * [core] apply rustfmt * [core] fix warnings related to Wasm compilation * [core] add SpreadLayout impl for DynamicAllocator * [core] remove unused method on Bits256RefMut * [core] apply rustfmt * [core] remove some unneded ToDo comments The ToDo comments have been moved to the associated PR description. * [core] transit to new traits for LazyIndexMap * [core] transit to new traits for storage::Vec * [core] transit to new traits for storage::Stash * [core] transit to new traits for storage::Bitvec * [core] transit to new traits for dynamic storage allocator * [core] transit to new traits for LazyHashMap * [core] transit to new traits for storage::HashMap * [core] apply rustfmt * [core] remove old storage traits for storage::Pack * [core] transit to new storage traits for LazyArray * [core] transit to new storage traits for storage::SmallVec * [core] transit to new storage traits for the rest of the lazy abstractions * [core] transit to new storage traits for storage::Box * [core] fix compile error in Drop impl for storage::Box * [core] remove old storage trait impls for Bits256 * [core] remove old storage trait impls for dynamic storage allocator * [core] apply rustfmt * [core] remove old traits module * [core] replace KeyPtr2 usage with KeyPtr * [core] rename traits2 module to traits * [core] apply rustfmt * [core] add Drop impl to storage::Vec * [core] don't clear storage if key is none for storage::Vec * [core] impl Drop for storage::Stash * [core] simplify trait bounds for LazyHashMap * [core] impl Drop for storage::HashMap * [core] add Drop impl for storage::SmallVec * [core] add are_trait_objects lint as deny * [core] fix minor formatting issue * [core] add storage2::Memory utility * [core] remove usage of storage::Pack from internals of storage::Bitvec * [core] remove usage of storage::Pack from internals of storage::Stash * [core] remove usage of storage::Pack from internals of storage::HashMap * [core] add better Debug impl for LazyIndexMap The improved impl shows the cached entries which were hidden in the old impl behind the UnsafeCell. * [core] apply rustfmt * [core] improve Debug impl for LazyHashMap It now displays its internal cached entries. * [core] improve Debug impl for lazy::Entry * [core] improve Debug impl for LazyCell * [core] improve Debug impl for LazyArray * [core] apply rustfmt * [core] add REQUIRES_DEEP_CLEAN_UP to SpreadLayout With this we have a way for types to perform an optimized storage clean-up without having to load them in case they do not require a deep clean-up of their state. * [core] implement REQUIRES_DEEP_CLEAN_UP for all built-in types * [core] add non-storage trait impls for storage::HashMap * [core] simplify traits bounds of SpreadLayout impl for storage::HashMap * [core] fix bug in Wasm compilation * [core] add initial unit tests for storage::HashMap * [core] allow loading without key from LazyHashMap * [core] merge storage::HashMap insert and insert_get and fix bugs with it * [core] add new unit test for storage::HashMap * [core] generally allow lazy loading without keys for lazy abstractions * [core] apply rustfmt * [core] remove outdated docs of storage::HashMap::insert * [core] add unit test for storage::HashMap::contains_key * [core] apply rustfmt to storage::HashMap unit tests * [core] add unit test for storage::HashMap::{get, get_mut} * [core] fix the doc comment of storage::HashMap::remove * [core] add unit test for storage::HashMap::take * [core] add unit test for storage::HashMap::insert * [core] remove storage::HashMap::remove The API cannot be implemented properly at this point. * [core] implement Iterator::count efficiently for storage::HashMap iterators * [core] add prelude trait impls for crypto hashers * [core] add unit test for storage::HashMap::iter * [core] remove outdated doc comment line * [core] add doc comments to fowarding-to-packed utility functions * [core] add some high-level documentation for some root storage2 modules * [core] add some more high-level docs * [core] add return value to storage::Stash::defrag The returned value tells the caller how many storage cells have actually been freed by the routine. * [core] add return value to storage::HashMap::defrag * [core] add unit test for storage::HashMap::{values, values_mut} Also add tests for Iterator::size_hint impls. * [core] add tests for Iterator::size_hint impls of storage::Vec * [core] add unit test for storage::HashMap::keys * [core] add unit test for storage::HashMap::defrag * [core] add unit tests for LazyIndexMap * [core] remove lazy::Entry::take_value * [core] remove LazyIndexMap::take * [core] remove Entry::set_state Uses have been replaced with Entry::replace_state. * [core] remove Lazy{Array, HashMap}::take method Replace uses with Lazy{Array, HashMap}::put_get(.., None) * [core] add unit test for LazyIndexMap::put * [core] add unit test for LazyIndexMap::swap * [core] apply rustfmt * [core] cover Default impl of LazyIndexMap with unit test * [core] move imports to top for LazyIndexMap .rs file * [core] refactor lazy::Entry internals a bit * [core] add unit tests for Index impl of storage::Vec * [core] add unit tests for Index impl of storage::SmallVec * [core] add tests for Index impl of StorageStash * [core] improve panic message for Index{Mut} impl of storage::Stash * [core] add unit tests for Index{Mut} impl of storage::Stash * [core] extend unit test for storage::Stash::get * [core] disable certain tests in --release mode testing * [core] add unit test for LazyIndexMap::{get, get_mut} * [core] add some unit tests for LazyArray * [core] add some more unit tests for LazyArray * [core] add some more unit tests to LaryArray * [core] apply rustfmt * [core] add unit tests for LazyCell * [core] add unit test for SpreadLayout impl of LazyCell * [core] extend SpreadLayout test for LazyCell * [core] extend SpreadLayout test to also cover the clear_spread impl * [core] rename unit test for LazyCell * [core] fix clippy warning * [core] fix some LazyCell cache entry in lazy form * [core] add new unit test for Debug impl of lazy initialized LazyCell * [core] add more unit tests for lazily initialized LazyCell * [core] implement shallow clean-up of storage via LazyCell * [core] test that a lazily loaded LazyCell preserves its cached value * [core] apply rustfmt * [core] add additional check for LazyCell cache preservation * [core] fix bug in LazyIndexMap::clear_packed_at * [core] add unit test for SpreadLayout impl of LazyIndexMap * [core] fix bug in LazyArray::clear_packed_at * [core] add unit test for SpreadLayout impl of LazyArray * [core] make LazyArray::capacity and SmallVec::capcity more user friendly * [core] remove unnecessary trait bounds * [core] remove more unnecessary trait bounds * [core] add initial unit test for LazyHashMap * [core] add unit test for LazyHashMap::key_at * [core] apply rustfmt * [core] indent a block in test * [core] add unit test for LazyHashMap::put_get * [core] add unit test for LazyHashMap::{get, get_mut} * [core] add unit test for LazyHashMap::put * [core] add unit test for LazyHashMap::swap * [core] make hash builders reset their accumulator upon finalization * [core] add unit test for SpreadLayout impl of LazyHashMap * [core] fix unit test for LazyHashMap::key_at Also add prefix to hash-key calculation. * [core] add unit tests for SpreadLayout impl of storage::Vec * [core] add unit tests for SpreadLayout impl of storage::SmallVec * [core] add unit tests for SpreadLayout impl of storage::Stash * [core] apply rustfmt * [core] add unit tests for SpreadLayout impl of storage::HashMap * [core] add unit test for DynamicAllocation::key * [core] add unit tests for SpreadLayout impl of storage::Bitvec * [core] fix LazyCell::get unit test * [core] remove unused dependencies from Cargo.toml * [core] add missing docs for storage::{Stash, HashMap} * [core] deny missing docs of public items * [core] add Debug impl to storage::Box * [core] add unit tests for storage::Box * [core] remove internal Pack::{get, get_mut} methods * [core] fix bug in storage::Memory::{get, get_mut} API * [core] add unit tests for storage::Pack * [core] improve storage::Pack unit tests * [core] experimental inline(never) for debug_assertions compilation * [core] apply rustfmt * [core] remove experimental #[inline(never)] * [core] add unit test for Default impl of storage::Pack * [core] add unit tests for storage::Memory * [core] fix a unit test for storage::Box The storage::Box tests did not reset the dynamic storage allocator instance in between their runs which caued them to have side effects on to each other if run single threaded. * [core] fix minor bug in BitRefMut utility of storage::Bitvec * [core] cover storage::Bitvec::get_mut in get_works unit test * [core] add unit tests for BitRefMut utility of storage::Bitvec * [core] apply rustfmt * [core] improve panic message when encountering a double free * [core] adjust double free unit test for storage::Box * [core] improve double free of dynamic storage panic message * [core] apply rustfmt * [core] merge Bits256Ref and Bits256RefMut into ChunkRef * [core] split access.rs into bitref.rs and bitsref.rs * [core] apply rustfmt * [core] replace transmute with pointer cast Thanks clippy! * [core] add comment to explain repr(C) * [core] add PartialEq and Eq impls to BitRefMut * [core] add unit tests for ChunkRef * [core] add failure unit tests for dynamic storage allocator * [core] fix bug in SpreadLayout impl of Option * [core] add unit test for dynamic storage allocator SpreadLayout impl * [core] fix SpreadLayout impl for Result * [core] fix yet another bug in SpreadLayout impl of Result * [core] move forward_supported_array_lens macro to usage site * [core] refactor some code duplication with clear_spread_root_opt * [core] fix doc comment in storage::Pack * [core] remove some unused unsafe blocks They are going to be re-introduced once the unsafe_op_in_unsafe_fn lint has been implemented in the Rust compiler. * fix typo Co-authored-by: Andrew Jones * fix typo Co-authored-by: Andrew Jones * fix typo Co-authored-by: Andrew Jones * fix typo Co-authored-by: Andrew Jones * fix typo Co-authored-by: Andrew Jones * [core] remove usage of storage::Pack in dynamic storage allocator * [core] improve panic message in Lazy::{get, get_mut} * [core] add test for SpreadLayout::clear_spread impl of dynamic storage alloc * [core] remove code dupe * [core] refactor clear_spread_root_opt utility function * [core] implement SpreadLayout::REQUIRES_DEEP_CLEAN_UP for some types * [core] move from bool to u8 for Option and Result SpreadLayout impls * [core] fix bug in SpreadLayout impl for Option * fix typo Co-authored-by: Andrew Jones * [core] update LazyCell SAFETY comment * [core] update Entry docs * [core] remove unneeded code in lazy::Entry::pull_packed_root * fix typo Co-authored-by: Andrew Jones * fix typo Co-authored-by: Andrew Jones * fix typo Co-authored-by: Andrew Jones * fix typo Co-authored-by: Andrew Jones * [core] remove commented out code * [core] add new unit test for dynamic storage allocator * [core] refactor global storage allocator initialization routines * [core] fix Wasm compilation errors * [core] apply rustfmt * [core] surpress bad clippy lint * [core] remove dead code * [core] improve call_setup_works test * [core] fix bug in initialize_for for off-chain env * [core] initial steps to factor out BitStash from DynamicAllocator * [core] apply rustfmt * [core] add Derive impl for BitStash * [core] make use of storage::BitStash from dynamic storage allocator * [core] add unit tests for storage::BitStash * [core] apply rustfmt * [core] remove invalid TODO comment * [core] fix some out of bounds panic messages * [core] remove deliberate memory leak in test suite * [core] fix build failure for Wasm target * [core] add unit tests for SpreadLayout & PackedLayout impls of primitives * [core] add unit tests for packed layout explicitely * Fix some typos * Add simple double ended iter test * typos * comment typos * split hashmap to hash map in comments * fix typo Co-authored-by: Andrew Jones * fix typo in unreachable! message Co-authored-by: Andrew Jones * fix typo in expects message Co-authored-by: Andrew Jones * fix typo Co-authored-by: Andrew Jones * fix typo Co-authored-by: Andrew Jones * [core] add more comments to storage2::HashMap::defrag * [core] make early return for storage2::HashMap::defrag for limit = 0 * [core] improve storage2::HashMap::contains_key implementation * [core] rename new_vec_works test to new_works * [core] apply Andrew's suggestions (and more) * [core] fix typo: increase -> decrease * [core] add panic to Bitvec::push in case it reached its maximum capacity * [core] update comments for storage bit stash * [core] add more explanation comments * [core] some more renamings of test internals * improve reasoning Co-authored-by: Andrew Jones * fix typo Co-authored-by: Andrew Jones Co-authored-by: Andrew Jones --- README.md | 8 +- core/Cargo.toml | 7 +- core/src/env/call/mod.rs | 2 + core/src/env/engine/off_chain/test_api.rs | 6 + core/src/hash/builder.rs | 12 +- core/src/hash/hasher.rs | 5 +- core/src/lib.rs | 3 + core/src/storage2/alloc/allocation.rs | 87 ++ core/src/storage2/alloc/allocator.rs | 83 ++ core/src/storage2/alloc/init.rs | 160 ++++ core/src/storage2/alloc/mod.rs | 101 ++ core/src/storage2/alloc/tests.rs | 206 ++++ .../storage2/collections/bitstash/counts.rs | 104 ++ core/src/storage2/collections/bitstash/mod.rs | 181 ++++ .../storage2/collections/bitstash/storage.rs | 78 ++ .../storage2/collections/bitstash/tests.rs | 167 ++++ .../src/storage2/collections/bitvec/bitref.rs | 196 ++++ .../storage2/collections/bitvec/bits256.rs | 380 ++++++++ .../storage2/collections/bitvec/bitsref.rs | 206 ++++ core/src/storage2/collections/bitvec/impls.rs | 75 ++ core/src/storage2/collections/bitvec/iter.rs | 310 ++++++ core/src/storage2/collections/bitvec/mod.rs | 307 ++++++ .../storage2/collections/bitvec/storage.rs | 75 ++ core/src/storage2/collections/bitvec/tests.rs | 215 +++++ core/src/storage2/collections/boxed/impls.rs | 144 +++ core/src/storage2/collections/boxed/mod.rs | 120 +++ .../src/storage2/collections/boxed/storage.rs | 94 ++ core/src/storage2/collections/boxed/tests.rs | 218 +++++ .../src/storage2/collections/hashmap/impls.rs | 179 ++++ core/src/storage2/collections/hashmap/iter.rs | 419 ++++++++ core/src/storage2/collections/hashmap/mod.rs | 336 +++++++ .../storage2/collections/hashmap/storage.rs | 100 ++ .../src/storage2/collections/hashmap/tests.rs | 328 +++++++ core/src/storage2/collections/mod.rs | 55 ++ .../storage2/collections/smallvec/impls.rs | 139 +++ .../src/storage2/collections/smallvec/iter.rs | 233 +++++ core/src/storage2/collections/smallvec/mod.rs | 325 +++++++ .../storage2/collections/smallvec/storage.rs | 50 + .../storage2/collections/smallvec/tests.rs | 399 ++++++++ core/src/storage2/collections/stash/impls.rs | 164 ++++ core/src/storage2/collections/stash/iter.rs | 325 +++++++ core/src/storage2/collections/stash/mod.rs | 542 +++++++++++ .../src/storage2/collections/stash/storage.rs | 124 +++ core/src/storage2/collections/stash/tests.rs | 669 +++++++++++++ core/src/storage2/collections/vec/impls.rs | 139 +++ core/src/storage2/collections/vec/iter.rs | 215 +++++ core/src/storage2/collections/vec/mod.rs | 306 ++++++ core/src/storage2/collections/vec/storage.rs | 50 + core/src/storage2/collections/vec/tests.rs | 394 ++++++++ core/src/storage2/lazy/entry.rs | 276 ++++++ core/src/storage2/lazy/lazy_array.rs | 893 ++++++++++++++++++ core/src/storage2/lazy/lazy_cell.rs | 403 ++++++++ core/src/storage2/lazy/lazy_hmap.rs | 893 ++++++++++++++++++ core/src/storage2/lazy/lazy_imap.rs | 732 ++++++++++++++ core/src/storage2/lazy/mod.rs | 273 ++++++ core/src/storage2/memory.rs | 282 ++++++ core/src/storage2/mod.rs | 33 + core/src/storage2/pack.rs | 413 ++++++++ core/src/storage2/traits/impls/arrays.rs | 90 ++ core/src/storage2/traits/impls/collections.rs | 209 ++++ core/src/storage2/traits/impls/mod.rs | 151 +++ core/src/storage2/traits/impls/prims.rs | 342 +++++++ core/src/storage2/traits/impls/tuples.rs | 102 ++ core/src/storage2/traits/keyptr.rs | 49 + core/src/storage2/traits/mod.rs | 166 ++++ core/src/storage2/traits/optspec.rs | 132 +++ core/src/storage2/traits/packed.rs | 46 + core/src/storage2/traits/spread.rs | 84 ++ primitives/src/key.rs | 6 + 69 files changed, 14600 insertions(+), 16 deletions(-) create mode 100644 core/src/storage2/alloc/allocation.rs create mode 100644 core/src/storage2/alloc/allocator.rs create mode 100644 core/src/storage2/alloc/init.rs create mode 100644 core/src/storage2/alloc/mod.rs create mode 100644 core/src/storage2/alloc/tests.rs create mode 100644 core/src/storage2/collections/bitstash/counts.rs create mode 100644 core/src/storage2/collections/bitstash/mod.rs create mode 100644 core/src/storage2/collections/bitstash/storage.rs create mode 100644 core/src/storage2/collections/bitstash/tests.rs create mode 100644 core/src/storage2/collections/bitvec/bitref.rs create mode 100644 core/src/storage2/collections/bitvec/bits256.rs create mode 100644 core/src/storage2/collections/bitvec/bitsref.rs create mode 100644 core/src/storage2/collections/bitvec/impls.rs create mode 100644 core/src/storage2/collections/bitvec/iter.rs create mode 100644 core/src/storage2/collections/bitvec/mod.rs create mode 100644 core/src/storage2/collections/bitvec/storage.rs create mode 100644 core/src/storage2/collections/bitvec/tests.rs create mode 100644 core/src/storage2/collections/boxed/impls.rs create mode 100644 core/src/storage2/collections/boxed/mod.rs create mode 100644 core/src/storage2/collections/boxed/storage.rs create mode 100644 core/src/storage2/collections/boxed/tests.rs create mode 100644 core/src/storage2/collections/hashmap/impls.rs create mode 100644 core/src/storage2/collections/hashmap/iter.rs create mode 100644 core/src/storage2/collections/hashmap/mod.rs create mode 100644 core/src/storage2/collections/hashmap/storage.rs create mode 100644 core/src/storage2/collections/hashmap/tests.rs create mode 100644 core/src/storage2/collections/mod.rs create mode 100644 core/src/storage2/collections/smallvec/impls.rs create mode 100644 core/src/storage2/collections/smallvec/iter.rs create mode 100644 core/src/storage2/collections/smallvec/mod.rs create mode 100644 core/src/storage2/collections/smallvec/storage.rs create mode 100644 core/src/storage2/collections/smallvec/tests.rs create mode 100644 core/src/storage2/collections/stash/impls.rs create mode 100644 core/src/storage2/collections/stash/iter.rs create mode 100644 core/src/storage2/collections/stash/mod.rs create mode 100644 core/src/storage2/collections/stash/storage.rs create mode 100644 core/src/storage2/collections/stash/tests.rs create mode 100644 core/src/storage2/collections/vec/impls.rs create mode 100644 core/src/storage2/collections/vec/iter.rs create mode 100644 core/src/storage2/collections/vec/mod.rs create mode 100644 core/src/storage2/collections/vec/storage.rs create mode 100644 core/src/storage2/collections/vec/tests.rs create mode 100644 core/src/storage2/lazy/entry.rs create mode 100644 core/src/storage2/lazy/lazy_array.rs create mode 100644 core/src/storage2/lazy/lazy_cell.rs create mode 100644 core/src/storage2/lazy/lazy_hmap.rs create mode 100644 core/src/storage2/lazy/lazy_imap.rs create mode 100644 core/src/storage2/lazy/mod.rs create mode 100644 core/src/storage2/memory.rs create mode 100644 core/src/storage2/mod.rs create mode 100644 core/src/storage2/pack.rs create mode 100644 core/src/storage2/traits/impls/arrays.rs create mode 100644 core/src/storage2/traits/impls/collections.rs create mode 100644 core/src/storage2/traits/impls/mod.rs create mode 100644 core/src/storage2/traits/impls/prims.rs create mode 100644 core/src/storage2/traits/impls/tuples.rs create mode 100644 core/src/storage2/traits/keyptr.rs create mode 100644 core/src/storage2/traits/mod.rs create mode 100644 core/src/storage2/traits/optspec.rs create mode 100644 core/src/storage2/traits/packed.rs create mode 100644 core/src/storage2/traits/spread.rs diff --git a/README.md b/README.md index 9dfb163271a..361b73f8b44 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,8 @@ [f2]: https://paritytech.github.io/ink/ink_core [h1]: https://img.shields.io/badge/docs-abi-blue.svg [h2]: https://paritytech.github.io/ink/ink_abi +[i1]: https://img.shields.io/badge/docs-prelude-blue.svg +[i2]: https://paritytech.github.io/ink/ink_prelude **IMPORTANT NOTE:** WORK IN PROGRESS! Do not expect this to be working. @@ -25,9 +27,9 @@ For more information please visit [the ink! tutorial](https://substrate.dev/subs ## Developer Documentation -| `ink_abi` | `ink_core` | -| ------------- | ------------- | -| [![][h1]][h2] | [![][f1]][f2] | +| `ink_abi` | `ink_core` | `ink_prelude` | +| ------------- | ------------- | ------------- | +| [![][h1]][h2] | [![][f1]][f2] | [![][i1]][i2] | ### Interaction with Substrate diff --git a/core/Cargo.toml b/core/Cargo.toml index 47d69765fc3..4394a2b3410 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -23,13 +23,13 @@ ink_prelude = { version = "2.1.0", path = "../prelude/", default-features = fals scale = { package = "parity-scale-codec", version = "1.3", default-features = false, features = ["derive", "full"] } derive_more = { version = "0.99", default-features = false, features = ["from", "display"] } -smallvec = { version = "1.2", default-features = false, features = ["union"] } -cfg-if = "0.1" num-traits = { version = "0.2", default-features = false, features = ["i128"] } +cfg-if = "0.1" +array-init = "0.1" +generic-array = "0.14.1" paste = "0.1" # Hashes for the off-chain environment. -byteorder = { version = "1.3", optional = true } blake2-rfc = { version = "0.2", optional = true } sha2 = { version = "0.8", optional = true } tiny-keccak = { version = "2.0", optional = true } @@ -63,7 +63,6 @@ std = [ "rand/std", "num-traits/std", # Enables hashing crates for off-chain environment. - "byteorder", "blake2-rfc", "sha2", "tiny-keccak", diff --git a/core/src/env/call/mod.rs b/core/src/env/call/mod.rs index 484fb99ad20..5fd045dbbd7 100644 --- a/core/src/env/call/mod.rs +++ b/core/src/env/call/mod.rs @@ -18,6 +18,8 @@ mod builder; mod instantiate; mod utils; +/// The compile-time states of builder for calls and instantiations. +#[doc(hidden)] pub mod state { pub use crate::env::call::{ instantiate::state::{ diff --git a/core/src/env/engine/off_chain/test_api.rs b/core/src/env/engine/off_chain/test_api.rs index 997c6b34de5..ce41c81cda5 100644 --- a/core/src/env/engine/off_chain/test_api.rs +++ b/core/src/env/engine/off_chain/test_api.rs @@ -257,11 +257,17 @@ pub struct DefaultAccounts where T: EnvTypes, { + /// The predefined `ALICE` account holding substantial amounts of value. pub alice: T::AccountId, + /// The predefined `BOB` account holding some amounts of value. pub bob: T::AccountId, + /// The predefined `CHARLIE` account holding some amounts of value. pub charlie: T::AccountId, + /// The predefined `DJANGO` account holding no value. pub django: T::AccountId, + /// The predefined `EVE` account holding no value. pub eve: T::AccountId, + /// The predefined `FRANK` account holding no value. pub frank: T::AccountId, } diff --git a/core/src/hash/builder.rs b/core/src/hash/builder.rs index 1e902dfc70f..4cf660ff6dd 100644 --- a/core/src/hash/builder.rs +++ b/core/src/hash/builder.rs @@ -111,8 +111,8 @@ pub trait Finalize where H: Hasher, { - fn finalize_using(&self, output: &mut ::Output); - fn finalize(&self) -> ::Output; + fn finalize_using(&mut self, output: &mut ::Output); + fn finalize(&mut self) -> ::Output; } impl Finalize for HashBuilder @@ -120,11 +120,13 @@ where H: Hasher, S: Accumulator, { - fn finalize_using(&self, output: &mut ::Output) { - ::finalize_immediate(self.strategy.as_slice(), output) + fn finalize_using(&mut self, output: &mut ::Output) { + let output = ::finalize_immediate(self.strategy.as_slice(), output); + self.strategy.reset(); + output } - fn finalize(&self) -> ::Output { + fn finalize(&mut self) -> ::Output { let mut output = <::Output as Default>::default(); Self::finalize_using(self, &mut output); output diff --git a/core/src/hash/hasher.rs b/core/src/hash/hasher.rs index 27d9b1f51be..985d1bc02f4 100644 --- a/core/src/hash/hasher.rs +++ b/core/src/hash/hasher.rs @@ -34,6 +34,7 @@ macro_rules! impl_hasher_for { struct $ty_name:ident($fn_name:ident, $output_len:literal); ) => { $( #[$doc] )* + #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum $ty_name {} impl Hasher for $ty_name { @@ -47,21 +48,17 @@ macro_rules! impl_hasher_for { } impl_hasher_for! { /// SHA2 256-bit hasher. - #[derive(Debug)] struct Sha2x256Hasher(sha2_256, 32); } impl_hasher_for! { /// KECCAK 256-bit hasher. - #[derive(Debug)] struct Keccak256Hasher(keccak_256, 32); } impl_hasher_for! { /// BLAKE2 256-bit hasher. - #[derive(Debug)] struct Blake2x256Hasher(blake2_256, 32); } impl_hasher_for! { /// BLAKE2 128-bit hasher. - #[derive(Debug)] struct Blake2x128Hasher(blake2_128, 16); } diff --git a/core/src/lib.rs b/core/src/lib.rs index 9efdc713450..7c168994203 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -22,7 +22,9 @@ #![cfg_attr(not(feature = "std"), no_std)] #![deny( + missing_docs, bad_style, + bare_trait_objects, const_err, improper_ctypes, non_shorthand_field_patterns, @@ -49,6 +51,7 @@ extern crate ink_alloc; pub mod env; pub mod hash; pub mod storage; +pub mod storage2; // Needed for derive macros of `core/derive` sub crate. pub(crate) use crate as ink_core; diff --git a/core/src/storage2/alloc/allocation.rs b/core/src/storage2/alloc/allocation.rs new file mode 100644 index 00000000000..e702afeddc5 --- /dev/null +++ b/core/src/storage2/alloc/allocation.rs @@ -0,0 +1,87 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::hash::{ + Blake2x256, + Wrap, +}; +use ink_primitives::Key; + +/// A unique dynamic allocation. +/// +/// This can refer to a dynamically allocated storage cell. +/// It has been created by a dynamic storage allocator. +/// The initiator of the allocation has to make sure to deallocate +/// this dynamic allocation again using the same dynamic allocator +/// if it is no longer in use. +/// +/// # Note +/// +/// Normally instances of this type are not used directly and instead +/// a [`storage::Box`](`crate::storage2::Box`) is used instead. +#[derive( + Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, scale::Encode, scale::Decode, +)] +pub struct DynamicAllocation(pub(super) u32); + +impl DynamicAllocation { + /// Returns the allocation identifier as `u32`. + pub(super) fn get(self) -> u32 { + self.0 + } + + /// Returns the storage key associated with this dynamic allocation. + pub fn key(self) -> Key { + // We create a 25-bytes buffer for the hashing. + // This is due to the fact that we prepend the `u32` encoded identifier + // with the `b"DYNAMICALLY ALLOCATED"` byte string which has a length + // 21 bytes. Since `u32` always has an encoding length of 4 bytes we + // end up requiring 25 bytes in total. + // Optimization Opportunity: + // Since ink! always runs single threaded we could make this buffer + // static and instead reuse its contents with every invocation of this + // method. However, this would introduce `unsafe` Rust usage. + #[rustfmt::skip] + let mut buffer: [u8; 25] = [ + b'D', b'Y', b'N', b'A', b'M', b'I', b'C', b'A', b'L', b'L', b'Y', + b' ', + b'A', b'L', b'L', b'O', b'C', b'A', b'T', b'E', b'D', + b'_', b'_', b'_', b'_', + ]; + // Encode the `u32` identifier requires a 4 bytes buffer. + let mut hash_buffer = Wrap::from(&mut buffer[21..25]); + ::encode_to(&self.0, &mut hash_buffer); + let mut output = [0x00_u8; 32]; + ::hash_bytes_using(&buffer, &mut output); + Key::from(output) + } +} + +#[test] +fn get_works() { + let expected_keys = [ + b"\ + \x0A\x0F\xF5\x30\xBD\x5A\xB6\x67\ + \x85\xC9\x74\x6D\x01\x33\xD7\xE1\ + \x24\x40\xC4\x67\xA9\xF0\x6D\xCA\ + \xE7\xED\x2E\x78\x32\x77\xE9\x10", + b"\ + \x11\x5A\xC0\xB2\x29\xA5\x34\x10\ + \xB0\xC0\x2D\x47\x49\xDC\x7A\x09\ + \xB9\x6D\xF9\x51\xB6\x1D\x4F\x3B\ + \x4E\x75\xAC\x3B\x14\x57\x47\x96", + ]; + assert_eq!(DynamicAllocation(0).key(), Key(*expected_keys[0])); + assert_eq!(DynamicAllocation(1).key(), Key(*expected_keys[1])); +} diff --git a/core/src/storage2/alloc/allocator.rs b/core/src/storage2/alloc/allocator.rs new file mode 100644 index 00000000000..dcfee1e3677 --- /dev/null +++ b/core/src/storage2/alloc/allocator.rs @@ -0,0 +1,83 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::DynamicAllocation; +use crate::storage2::{ + collections::BitStash, + traits::{ + KeyPtr, + SpreadLayout, + }, +}; + +/// The dynamic allocator. +/// +/// Manages dynamic storage allocations in a very efficient and economic way. +#[derive(Debug, Default, PartialEq, Eq)] +pub struct DynamicAllocator { + allocations: BitStash, +} + +impl SpreadLayout for DynamicAllocator { + const FOOTPRINT: u64 = ::FOOTPRINT; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + Self { + allocations: SpreadLayout::pull_spread(ptr), + } + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + SpreadLayout::push_spread(&self.allocations, ptr); + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + SpreadLayout::clear_spread(&self.allocations, ptr); + } +} + +impl DynamicAllocator { + /// Returns a new dynamic storage allocation. + /// + /// # Panics + /// + /// If the dynamic allocator ran out of free dynamic allocations. + pub fn alloc(&mut self) -> DynamicAllocation { + DynamicAllocation(self.allocations.put()) + } + + /// Frees the given dynamic storage allocation. + /// + /// This makes the given dynamic storage allocation available again + /// for new dynamic storage allocations. + /// + /// # Panics + /// + /// Panics if the given dynamic allocation is invalid. + /// A dynamic allocation is invalid if it is not represented as occupied + /// in the `free` list. + pub fn free(&mut self, allocation: DynamicAllocation) { + let index = allocation.get(); + if !self + .allocations + .take(index) + .expect("invalid dynamic storage allocation") + { + panic!( + "encountered double free of dynamic storage: at index {}", + index + ) + } + } +} diff --git a/core/src/storage2/alloc/init.rs b/core/src/storage2/alloc/init.rs new file mode 100644 index 00000000000..28172d0b006 --- /dev/null +++ b/core/src/storage2/alloc/init.rs @@ -0,0 +1,160 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::DynamicAllocator; +use crate::storage2::traits::pull_spread_root; +use cfg_if::cfg_if; +use ink_primitives::Key; + +/// The default dynamic allocator key offset. +/// +/// This is where the dynamic allocator is stored on the contract storage. +const DYNAMIC_ALLOCATOR_KEY_OFFSET: [u8; 32] = [0xFE; 32]; + +/// The phase in which a contract execution can be. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum ContractPhase { + /// Initializes the global dynamic storage allocator from scratch. + /// + /// Upon initialization it will be created from scratch as if the + /// contract has been deployed for the first time. + Deploy, + /// Initializes the global dynamic storage allocator from storage. + /// + /// Upon initialization the dynamic storage allocator will be pulled + /// from the contract storage with the assumption that a former + /// contract deployment has already taken place in the past. + Call, +} + +/// The state of the dynamic allocator global instance. +#[derive(Debug)] +#[allow(clippy::large_enum_variant)] +enum DynamicAllocatorState { + /// The global instance has not yet been initialized. + /// + /// Upon initialization it will be created from scratch as if the + /// contract has been deployed for the first time. + UninitDeploy, + /// The global instance has not yet been initialized. + /// + /// Upon initialization it will be pulled from the contract storage + /// with the assumption that a former contract deployment has already + /// taken place in the past. + UninitCall, + /// The global instance has already been initialized successfully. + Initialized(DynamicAllocator), +} + +impl From for DynamicAllocatorState { + fn from(phase: ContractPhase) -> Self { + match phase { + ContractPhase::Deploy => DynamicAllocatorState::UninitDeploy, + ContractPhase::Call => DynamicAllocatorState::UninitCall, + } + } +} + +cfg_if! { + if #[cfg(all(not(feature = "std"), target_arch = "wasm32"))] { + // Procedures for the Wasm compilation: + + /// The global instance for the dynamic storage allocator. + static mut GLOBAL_INSTANCE: DynamicAllocatorState = DynamicAllocatorState::UninitDeploy; + + /// Commands the (re-)initialization of the global instance for the dynamic + /// storage allocator. + pub fn initialize_for(phase: ContractPhase) { + let instance = unsafe { &mut GLOBAL_INSTANCE }; + // We do not allow reinitialization for Wasm targets for performance reasons. + if let DynamicAllocatorState::Initialized(_) = instance { + panic!("cannot reinitialize dynamic storage allocator instance in Wasm"); + } + *instance = phase.into(); + } + + /// Runs the given closure on the global instance for the dynamic storage allocator. + pub fn on_call(f: F) -> R + where + F: FnOnce(&mut DynamicAllocator) -> R, + { + let instance = unsafe { &mut GLOBAL_INSTANCE }; + match instance { + DynamicAllocatorState::UninitDeploy => { + let mut allocator = DynamicAllocator::default(); + let result = f(&mut allocator); + *instance = DynamicAllocatorState::Initialized(allocator); + result + } + DynamicAllocatorState::UninitCall => { + let mut allocator = pull_spread_root::(&Key(DYNAMIC_ALLOCATOR_KEY_OFFSET)); + let result = f(&mut allocator); + *instance = DynamicAllocatorState::Initialized(allocator); + result + } + DynamicAllocatorState::Initialized(ref mut allocator) => { + f(allocator) + } + } + } + + } else if #[cfg(feature = "std")] { + // Procedures for the off-chain environment and testing compilation: + + use ::core::cell::RefCell; + thread_local!( + /// The global instance for the dynamic storage allocator. + static GLOBAL_INSTANCE: RefCell = RefCell::new(DynamicAllocatorState::UninitDeploy); + ); + + /// Commands the (re-)initialization of the global instance for the dynamic + /// storage allocator. + pub fn initialize_for(phase: ContractPhase) { + GLOBAL_INSTANCE.with(|instance| { + instance.replace_with(|_| phase.into()) + }); + } + + /// Runs the given closure on the global instance for the dynamic storage allocator. + pub fn on_call(f: F) -> R + where + F: FnOnce(&mut DynamicAllocator) -> R, + { + GLOBAL_INSTANCE.with(|instance| { + match &mut *instance.borrow_mut() { + instance @ DynamicAllocatorState::UninitDeploy => { + let mut allocator = DynamicAllocator::default(); + let result = f(&mut allocator); + *instance = DynamicAllocatorState::Initialized(allocator); + result + } + instance @ DynamicAllocatorState::UninitCall => { + let mut allocator = pull_spread_root::(&Key(DYNAMIC_ALLOCATOR_KEY_OFFSET)); + let result = f(&mut allocator); + *instance = DynamicAllocatorState::Initialized(allocator); + result + } + DynamicAllocatorState::Initialized(instance) => { + f(instance) + } + } + }) + } + + } else { + compile_error! { + "ink! only support compilation as `std` or `no_std` + `wasm32-unknown`" + } + } +} diff --git a/core/src/storage2/alloc/mod.rs b/core/src/storage2/alloc/mod.rs new file mode 100644 index 00000000000..7ad3f03d045 --- /dev/null +++ b/core/src/storage2/alloc/mod.rs @@ -0,0 +1,101 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The default dynamic storage allocator. +//! +//! Allows to allocate storage cells in a dynamic fashion. +//! This is important if users want to combine types of varying storage +//! footprints. For example, dynamic allocations are required whenever +//! a user wants to use a storage collection (e.g. `storage::Vec`) in +//! another storage collection: `storage::Vec>` +//! +//! # Simplification +//! +//! The contracts pallet is using 256 bit keys for identifying storage cells. +//! This implies a storage space of 2^256 cells which is big enough to say that +//! there are probably never going to happen collisions anywhere at any time +//! if keys are chosen randomly. Using the built-in crypto hashers on unique +//! input we can be sure that there are never going to be collisions in this +//! space of 2^256 cells. +//! +//! This way we can reduce the problem of finding another region in our storage +//! that fits certain requirements (e.g. a minimum size) to the problem of +//! finding another uniform slot. Since we are on 32-bit WebAssembly we have +//! memory limitations that makes it impractical to have more than 2^32 dynamic +//! allocated entities and so we can create another limitation for having a +//! total of 2^32 dynamic allocations at any point in time. +//! This enables us to have 32-bit keys instead of 256-bit keys. +//! +//! We can convert such 32-bit keys (represented by e.g. a `u32`) into 256-bit +//! keys by using one of the built-in crypto hashes that has a 256-bit output, +//! e.g. KECCAK, SHA2 or BLAKE2. For technical reasons we should prepend the +//! bytes of the 32-bit key by some unique byte sequence, e.g.: +//! ```no_compile +//! let key256 = blake2x256(b"DYNAMICALLY ALLOCATED", bytes(key32)); +//! ``` +//! +//! # Internals +//! +//! As described in [# Simplification] there are 2^32 possible uniform dynamic +//! allocations available. For each such slot the dynamic allocator stores via +//! a single bit in a bitvector if that slot is free or occupied. +//! This bitvector is called the `free` list. +//! However, searching in this `free` list for a 0 bit and thus a free slot +//! for a dynamic allocation would mean that for every 256 consecutively +//! occupied dynamic allocations there was a contract storage lookup required. +//! This might seem a lot but given that there could be thousands or +//! tens of thousands of dynamic allocations at any given time this might not scale +//! well. +//! For the reason of improving scalability we added another vector: the +//! so-called `set_bits` vector. +//! In this vector every `u8` element densely stores the number of set bits +//! (bits that are `1` or `true`) for each 256-bit package in the `free` list. +//! (Note that the `free` list is organized in 256-bit chunks of bits.) +//! +//! This way, to search for an unoccupied dynamic allocation we iterate over +//! the set-bits vector which is 32 times more dense than our `free` list. +//! The additional density implies that we can query up to 8192 potential +//! dynamic storage allocations with a single contract storage look-up. + +mod allocation; +mod allocator; +mod init; + +#[cfg(test)] +mod tests; + +pub use self::{ + allocation::DynamicAllocation, + init::{ + initialize_for, + ContractPhase, + }, +}; +use self::{ + allocator::DynamicAllocator, + init::on_call, +}; + +/// Returns a new dynamic storage allocation. +pub fn alloc() -> DynamicAllocation { + on_call(DynamicAllocator::alloc) +} + +/// Frees the given dynamic storage allocation. +/// +/// This makes the given dynamic storage allocation available again +/// for new dynamic storage allocations. +pub fn free(allocation: DynamicAllocation) { + on_call(|allocator| allocator.free(allocation)) +} diff --git a/core/src/storage2/alloc/tests.rs b/core/src/storage2/alloc/tests.rs new file mode 100644 index 00000000000..64908d65bf1 --- /dev/null +++ b/core/src/storage2/alloc/tests.rs @@ -0,0 +1,206 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + alloc, + free, + initialize_for, + ContractPhase, + DynamicAllocation, + DynamicAllocator, +}; +use crate::{ + env::{ + test, + DefaultEnvTypes, + }, + storage2::traits::{ + KeyPtr, + SpreadLayout, + }, +}; +use ink_primitives::Key; + +fn run_default_test(f: F) +where + F: FnOnce(), +{ + initialize_for(ContractPhase::Deploy); + test::run_test::(|_| { + f(); + Ok(()) + }) + .unwrap(); +} + +#[test] +fn alloc_works() { + run_default_test(|| { + assert_eq!(alloc(), DynamicAllocation(0)); + }) +} + +cfg_if::cfg_if! { + if #[cfg(miri)] { + // We need to lower the test allocations because miri's stacked borrows + // analysis currently is super linear for some work loads. + // Read more here: https://github.com/rust-lang/miri/issues/1367 + const TEST_ALLOCATIONS: u32 = 10; + } else { + const TEST_ALLOCATIONS: u32 = 10_000; + } +} + +#[test] +fn many_allocs_works() { + run_default_test(|| { + for i in 0..TEST_ALLOCATIONS { + assert_eq!(alloc(), DynamicAllocation(i)); + } + }) +} + +#[test] +fn free_works() { + run_default_test(|| { + // Check that this pattern does not panic. + free(alloc()); + }) +} + +#[test] +fn many_alloc_and_free_works() { + run_default_test(|| { + for i in 0..TEST_ALLOCATIONS { + assert_eq!(alloc(), DynamicAllocation(i)); + } + for i in 0..TEST_ALLOCATIONS { + free(DynamicAllocation(i)) + } + assert_eq!(alloc(), DynamicAllocation(0)); + }) +} + +#[test] +fn alloc_free_in_the_middle() { + run_default_test(|| { + for i in 0..TEST_ALLOCATIONS { + assert_eq!(alloc(), DynamicAllocation(i)); + } + for i in 0..TEST_ALLOCATIONS { + free(DynamicAllocation(i)); + assert_eq!(alloc(), DynamicAllocation(i)); + } + }) +} + +#[test] +#[should_panic(expected = "encountered double free of dynamic storage: at index 0")] +fn double_free_panics() { + run_default_test(|| { + let a0 = alloc(); + let _ = alloc(); + free(a0); + free(a0); + }) +} + +#[test] +#[should_panic(expected = "invalid dynamic storage allocation")] +fn free_out_of_bounds() { + run_default_test(|| { + free(DynamicAllocation(0)); + }) +} + +fn spread_layout_alloc_setup() -> DynamicAllocator { + let mut alloc = DynamicAllocator::default(); + assert_eq!(alloc.alloc(), DynamicAllocation(0)); + assert_eq!(alloc.alloc(), DynamicAllocation(1)); + assert_eq!(alloc.alloc(), DynamicAllocation(2)); + assert_eq!(alloc.alloc(), DynamicAllocation(3)); + assert_eq!(alloc.alloc(), DynamicAllocation(4)); + alloc.free(DynamicAllocation(3)); + alloc.free(DynamicAllocation(1)); + alloc +} + +#[test] +fn spread_pull_push_works() { + run_default_test(|| { + let mut alloc = spread_layout_alloc_setup(); + let root_key = Key([0x77; 32]); + // Push the current state of the dynamic storage allocator to the storage: + SpreadLayout::push_spread(&alloc, &mut KeyPtr::from(root_key)); + // Now check if the new allocations are filling the freed ones: + assert_eq!(alloc.alloc(), DynamicAllocation(1)); + assert_eq!(alloc.alloc(), DynamicAllocation(3)); + // Pull another instance of the storage allocator from storage, + // then check if both allocators are equal after also allocating the same + // allocation slots: + let mut alloc2 = + ::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(alloc2.alloc(), DynamicAllocation(1)); + assert_eq!(alloc2.alloc(), DynamicAllocation(3)); + assert_eq!(alloc2, alloc); + }) +} + +#[test] +#[should_panic(expected = "encountered empty storage cell")] +fn spread_clear_works() { + run_default_test(|| { + let alloc = spread_layout_alloc_setup(); + let root_key = Key([0x42; 32]); + // Push the current state of the dynamic storage allocator to the storage: + SpreadLayout::push_spread(&alloc, &mut KeyPtr::from(root_key)); + // Pull another instance of the storage allocator from storage, + // then check if both allocators are equal after also allocating the same + // allocation slots: + let alloc2 = + ::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(alloc2, alloc); + // Now clear the storage associated with `alloc2` again and test if another + // loaded instance from the same storage region panics upon pulling: + SpreadLayout::clear_spread(&alloc2, &mut KeyPtr::from(root_key)); + // We have to prevent calling `Drop` of `alloc3` since it has been created + // deliberately upon invalid contract storage. Since interacting with `alloc3` + // panics which immediately initiates the dropping routines we have to + // wrap it in `ManuallyDrop` before we interact with it to avoid to panic + // while panicking. + let alloc3 = + ::pull_spread(&mut KeyPtr::from(root_key)); + let mut alloc3 = core::mem::ManuallyDrop::new(alloc3); + // Now interact with `alloc3` to make it load from the invalid storage: + let _ = alloc3.alloc(); + }) +} + +#[test] +fn test_call_setup_works() { + test::run_test::(|_| { + let mut allocator = DynamicAllocator::default(); + assert_eq!(allocator.alloc(), DynamicAllocation(0)); + assert_eq!(allocator.alloc(), DynamicAllocation(1)); + let root_key = Key([0xFE; 32]); + SpreadLayout::push_spread(&allocator, &mut KeyPtr::from(root_key)); + initialize_for(ContractPhase::Call); + assert_eq!(alloc(), DynamicAllocation(2)); + assert_eq!(alloc(), DynamicAllocation(3)); + free(DynamicAllocation(0)); + free(DynamicAllocation(2)); + Ok(()) + }) + .unwrap(); +} diff --git a/core/src/storage2/collections/bitstash/counts.rs b/core/src/storage2/collections/bitstash/counts.rs new file mode 100644 index 00000000000..df2f9255b0c --- /dev/null +++ b/core/src/storage2/collections/bitstash/counts.rs @@ -0,0 +1,104 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use core::ops::{ + Index, + IndexMut, +}; + +/// Stores the number of set bits for each 256-bits block in a compact `u8`. +#[derive(Debug, Default, PartialEq, Eq, scale::Encode, scale::Decode)] +pub struct CountFree { + /// Set bits per 256-bit chunk. + counts: [u8; 32], + /// Since with `u8` can only count up to 255 but there might be the need + /// to count up to 256 bits for 256-bit chunks we need to store one extra + /// bit per counter to determine filled chunks. + full: FullMask, +} + +impl Index for CountFree { + type Output = u8; + + fn index(&self, index: u8) -> &Self::Output { + &self.counts[index as usize] + } +} + +impl IndexMut for CountFree { + fn index_mut(&mut self, index: u8) -> &mut Self::Output { + &mut self.counts[index as usize] + } +} + +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, scale::Encode, scale::Decode)] +pub struct FullMask(u32); + +impl FullMask { + /// Returns `true` if the 256-bit chunk at the given index is full. + pub fn is_full(self, index: u8) -> bool { + assert!(index < 32); + (self.0 >> (31 - index as u32)) & 0x01 == 1 + } + + /// Sets the flag for the 256-bit chunk at the given index to `full`. + pub fn set_full(&mut self, index: u8) { + self.0 |= 1_u32 << (31 - index as u32); + } + + /// Resets the flag for the 256-bit chunk at the given index to not `full`. + pub fn reset_full(&mut self, index: u8) { + self.0 &= !(1_u32 << (31 - index as u32)); + } +} + +impl CountFree { + /// Returns the position of the first free `u8` in the free counts. + /// + /// Returns `None` if all counts are `0xFF`. + pub fn position_first_zero(&mut self) -> Option { + for (i, count) in self.counts.iter_mut().enumerate() { + if !self.full.is_full(i as u8) { + if *count == !0 { + self.full.set_full(i as u8); + } else { + *count += 1; + } + return Some(i as u8) + } + } + None + } + + /// Decreases the number of set bits for the given index. + /// + /// Returns the new number of set bits. + /// + /// # Panics + /// + /// - If the given index is out of bounds. + /// - If the decrement would cause an overflow. + pub fn dec(&mut self, index: u8) -> u8 { + assert!(index < 32, "index is out of bounds"); + if self.full.is_full(index) { + self.full.reset_full(index); + } else { + let new_value = self.counts[index as usize] + .checked_sub(1) + .expect("set bits decrement overflowed"); + self.counts[index as usize] = new_value; + } + self.counts[index as usize] + } +} diff --git a/core/src/storage2/collections/bitstash/mod.rs b/core/src/storage2/collections/bitstash/mod.rs new file mode 100644 index 00000000000..b5ee8a4359f --- /dev/null +++ b/core/src/storage2/collections/bitstash/mod.rs @@ -0,0 +1,181 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage bit stash data structure and utilities. +//! +//! Allows to compactly and efficiently put and take bits in a compressed +//! and very efficient way. + +mod counts; +mod storage; + +#[cfg(test)] +mod tests; + +use self::counts::CountFree; +use crate::storage2::collections::{ + Bitvec as StorageBitvec, + Vec as StorageVec, +}; + +/// The index type used in the storage bit stash. +type Index = u32; + +/// A stash for bits operating on the contract storage. +/// +/// Allows to efficienty put and take bits and +/// stores the underlying bits in an extremely compressed format. +#[derive(Debug, Default, PartialEq, Eq)] +pub struct BitStash { + /// Counter for set bits in a 256-bit chunk of the `free` list. + /// + /// For every 256-bit chunk stored in `free` stores a `u8` that counts + /// the number of set bits in the 256-bit chunk. This information is used + /// to compact the information in `free` to make a `first fit` linear + /// search for a new free storage slot more scalable. Since `u8` can only + /// represent 256 different states but since we consider 0 we need an extra + /// 9th bit. This 9th bit tells for every 256-bit chunk if it is full. + /// + /// In theory it is possible to search up to 8192 storage cells for free + /// slots with a single contract storage look-up. By iterating over the 32 + /// `CountFree` instances of a single instance. + counts: StorageVec, + /// Stores the underlying bits of the storage bit stash. + free: StorageBitvec, +} + +impl BitStash { + /// Creates a new storage bit stash. + pub fn new() -> Self { + Self { + counts: StorageVec::new(), + free: StorageBitvec::new(), + } + } + + /// Returns the bit position of the first 256-bit chunk with zero bits + /// in the `free` list. + /// + /// Returns the bit position of the first bit in the 256-bit chunk and not + /// the chunk position since that's what [`Bitvec::get_chunk`] expects. + /// + /// Also directly increases the count of the first found free bit chunk. + fn position_first_zero(&mut self) -> Option { + // Iterate over the `counts` list of the bit stash. + // The counts list consists of packs of 32 counts per element. + for (n, counts) in self.counts.iter_mut().enumerate() { + if let Some(i) = counts.position_first_zero() { + let n = n as u64; + let i = i as u64; + return Some(n * (32 * 256) + i * 256) + } + } + None + } + + /// Returns the number of required counts elements. + fn required_counts(&self) -> u32 { + let capacity = self.free.capacity(); + if capacity == 0 { + return 0 + } + 1 + ((capacity - 1) / (32 * 256)) as u32 + } + + /// Returns `true` if the bit at the indexed slot is set (`1`). + /// + /// Returns `None` if the index is out of bounds. + pub fn get(&self, index: Index) -> Option { + self.free.get(index) + } + + /// Puts another set bit into the storage bit stash. + /// + /// Returns the index to the slot where the set bit has been inserted. + pub fn put(&mut self) -> Index { + if let Some(index) = self.position_first_zero() { + if index == self.free.len() as u64 { + self.free.push(true); + return self.free.len() - 1 + } + let mut bits256 = self + .free + .get_chunk_mut(index as u32) + .expect("must exist if indices have been found"); + if let Some(first_zero) = bits256.position_first_zero() { + bits256 + .get_mut(first_zero) + .expect("first zero is invalid") + .set(); + index as u32 + first_zero as u32 + } else { + // We found a free storage slot but it isn't within the valid + // bounds of the free list but points to its end. So we simply + // append another 1 bit (`true`) to the free list and return + // a new index pointing to it. No need to push to the counts + // list in this case. + self.free.push(true); + self.free.len() - 1 + } + } else { + // We found no free 256-bit slot: + // + // - Check if we already have allocated too many (2^32) bits and + // panic if that's the case. The check is done on the internal + // storage bit vector. + // - Otherwise allocate a new pack of 256-bits in the free list + // and mirror it in the counts list. + self.free.push(true); + if self.counts.len() < self.required_counts() { + // We need to push another counts element. + let mut counter = CountFree::default(); + counter[0_u8] = 1; + self.counts.push(counter); + } + // Return the new slot. + self.free.len() - 1 + } + } + + /// Takes the bit from the given index and returns it. + /// + /// Returns `true` if the indexed bit was set (`1`). + /// Returns `None` if the index is out of bounds. + /// + /// # Note + /// + /// This frees up the indexed slot for putting in another set bit. + pub fn take(&mut self, index: Index) -> Option { + if index >= self.free.len() { + // Bail out early if index is out of bounds. + return None + } + let mut access = self.free.get_mut(index).expect("index is out of bounds"); + if !access.get() { + return Some(false) + } + // At this point the bit was found to be set (`true`) and we have + // update the underlying internals in order to reset it so the index + // becomes free for another bit again. + access.reset(); + // Update the counts list. + let counts_id = index / (256 * 32); + let byte_id = ((index / 256) % 32) as u8; + self.counts + .get_mut(counts_id) + .expect("invalid counts ID") + .dec(byte_id); + Some(true) + } +} diff --git a/core/src/storage2/collections/bitstash/storage.rs b/core/src/storage2/collections/bitstash/storage.rs new file mode 100644 index 00000000000..46b19cbb8e7 --- /dev/null +++ b/core/src/storage2/collections/bitstash/storage.rs @@ -0,0 +1,78 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + BitStash, + CountFree, +}; +use crate::storage2::{ + collections::{ + Bitvec as StorageBitvec, + Vec as StorageVec, + }, + traits::{ + forward_clear_packed, + forward_pull_packed, + forward_push_packed, + KeyPtr, + PackedLayout, + SpreadLayout, + }, +}; +use ink_primitives::Key; + +impl SpreadLayout for CountFree { + const FOOTPRINT: u64 = 1; + const REQUIRES_DEEP_CLEAN_UP: bool = false; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + forward_pull_packed::(ptr) + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + forward_push_packed::(self, ptr) + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + forward_clear_packed::(self, ptr) + } +} + +impl PackedLayout for CountFree { + fn pull_packed(&mut self, _at: &Key) {} + fn push_packed(&self, _at: &Key) {} + fn clear_packed(&self, _at: &Key) {} +} + +impl SpreadLayout for BitStash { + const FOOTPRINT: u64 = as SpreadLayout>::FOOTPRINT + + ::FOOTPRINT; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + Self { + counts: SpreadLayout::pull_spread(ptr), + free: SpreadLayout::pull_spread(ptr), + } + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + SpreadLayout::push_spread(&self.counts, ptr); + SpreadLayout::push_spread(&self.free, ptr); + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + SpreadLayout::clear_spread(&self.counts, ptr); + SpreadLayout::clear_spread(&self.free, ptr); + } +} diff --git a/core/src/storage2/collections/bitstash/tests.rs b/core/src/storage2/collections/bitstash/tests.rs new file mode 100644 index 00000000000..66b43644c17 --- /dev/null +++ b/core/src/storage2/collections/bitstash/tests.rs @@ -0,0 +1,167 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::BitStash; +use crate::{ + env, + storage2::traits::{ + KeyPtr, + SpreadLayout, + }, +}; +use ink_primitives::Key; + +cfg_if::cfg_if! { + if #[cfg(miri)] { + // We need to lower the test allocations because miri's stacked borrows + // analysis currently is super linear for some work loads. + // Read more here: https://github.com/rust-lang/miri/issues/1367 + const TEST_ALLOCATIONS: u32 = 10; + } else { + const TEST_ALLOCATIONS: u32 = 10_000; + } +} + +#[test] +fn default_works() { + let default = BitStash::default(); + assert_eq!(default.get(0), None); +} + +#[test] +fn put_and_take_works() { + let mut default = BitStash::default(); + assert_eq!(default.get(0), None); + assert_eq!(default.put(), 0); + assert_eq!(default.get(0), Some(true)); + assert_eq!(default.take(0), Some(true)); + assert_eq!(default.get(0), Some(false)); +} + +#[test] +fn put_works() { + let mut default = BitStash::default(); + for i in 0..TEST_ALLOCATIONS { + assert_eq!(default.get(i), None); + assert_eq!(default.put(), i); + assert_eq!(default.get(i), Some(true)); + } +} + +fn filled_bitstash() -> BitStash { + let mut default = BitStash::default(); + for i in 0..TEST_ALLOCATIONS { + assert_eq!(default.put(), i); + assert_eq!(default.get(i), Some(true)); + } + default +} + +#[test] +fn get_works() { + let mut default = filled_bitstash(); + // Remove all bits at indices `(% 3 == 0)` and `(% 5 == 0)`. + for i in 0..TEST_ALLOCATIONS { + if i % 3 == 0 || i % 5 == 0 { + default.take(i); + } + } + for i in 0..TEST_ALLOCATIONS { + let expected = !(i % 3 == 0 || i % 5 == 0); + assert_eq!(default.get(i), Some(expected)); + } +} + +#[test] +fn take_in_order_works() { + let mut default = filled_bitstash(); + for i in 0..TEST_ALLOCATIONS { + assert_eq!(default.get(i), Some(true)); + assert_eq!(default.take(i), Some(true)); + assert_eq!(default.get(i), Some(false)); + } +} + +#[test] +fn take_in_rev_order_works() { + let mut default = filled_bitstash(); + for i in (0..TEST_ALLOCATIONS).rev() { + assert_eq!(default.get(i), Some(true)); + assert_eq!(default.take(i), Some(true)); + assert_eq!(default.get(i), Some(false)); + } +} + +#[test] +fn take_refill_works() { + let mut default = filled_bitstash(); + for i in 0..TEST_ALLOCATIONS { + assert_eq!(default.get(i), Some(true)); + assert_eq!(default.take(i), Some(true)); + assert_eq!(default.get(i), Some(false)); + assert_eq!(default.put(), i); + assert_eq!(default.get(i), Some(true)); + } +} + +#[test] +fn take_refill_rev_works() { + let mut default = filled_bitstash(); + for i in (0..TEST_ALLOCATIONS).rev() { + assert_eq!(default.get(i), Some(true)); + assert_eq!(default.take(i), Some(true)); + assert_eq!(default.get(i), Some(false)); + assert_eq!(default.put(), i); + assert_eq!(default.get(i), Some(true)); + } +} + +#[test] +fn spread_layout_push_pull_works() { + env::test::run_test::(|_| { + let default = filled_bitstash(); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&default, &mut KeyPtr::from(root_key)); + let pulled = ::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(default, pulled); + Ok(()) + }) + .unwrap() +} + +#[test] +#[should_panic(expected = "encountered empty storage cell")] +fn spread_layout_clear_works() { + env::test::run_test::(|_| { + let default = filled_bitstash(); + // First push the instance to the contract storage. + // Then load a valid instance, check it and clear its associated storage. + // Afterwards load the invalid instance from the same storage region + // and try to interact with it which is expected to fail. + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&default, &mut KeyPtr::from(root_key)); + let pulled = ::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(default, pulled); + SpreadLayout::clear_spread(&pulled, &mut KeyPtr::from(root_key)); + let invalid = + ::pull_spread(&mut KeyPtr::from(root_key)); + // We have to prevent calling its destructor since that would also panic but + // in an uncontrollable way. + let mut invalid = core::mem::ManuallyDrop::new(invalid); + // Now interact with invalid instance. + let _ = invalid.put(); + Ok(()) + }) + .unwrap() +} diff --git a/core/src/storage2/collections/bitvec/bitref.rs b/core/src/storage2/collections/bitvec/bitref.rs new file mode 100644 index 00000000000..eb7f85cf59f --- /dev/null +++ b/core/src/storage2/collections/bitvec/bitref.rs @@ -0,0 +1,196 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![allow(clippy::len_without_is_empty)] + +use super::{ + Bits256, + Index256, +}; + +/// A mutable bit access for operating on a single bit within a 256-bit pack. +#[derive(Debug)] +pub struct BitRefMut<'a> { + /// The queried pack of 256 bits. + bits: &'a mut Bits256, + /// The bit position witihn the queried bit pack. + at: u8, +} + +impl<'a> PartialEq for BitRefMut<'a> { + fn eq(&self, other: &Self) -> bool { + self.get() == other.get() + } +} + +impl<'a> Eq for BitRefMut<'a> {} + +impl<'a> BitRefMut<'a> { + /// Creates a new bit access for the indexed bit within the 256-bit pack. + pub(super) fn new(bits: &'a mut Bits256, at: Index256) -> Self { + Self { bits, at } + } + + /// Returns the value of the indexed bit. + /// + /// # Note + /// + /// - If 0: returns `false` + /// - If 1: returns `true` + pub fn get(&self) -> bool { + self.bits.get(self.at) + } + + /// Sets the value of the indexed bit to the given new value. + pub fn set_to(&mut self, new_value: bool) { + self.bits.set_to(self.at, new_value) + } + + /// Sets the indexed bit to `1` (true). + pub fn set(&mut self) { + self.bits.set(self.at) + } + + /// Resets the indexed bit to `0` (false). + pub fn reset(&mut self) { + self.bits.reset(self.at) + } + + /// Flips the indexed bit. + pub fn flip(&mut self) { + self.bits.flip(self.at) + } + + /// Computes bitwise XOR for the indexed bit and `rhs`. + pub fn xor(&mut self, rhs: bool) { + self.bits.xor(self.at, rhs) + } + + /// Computes bitwise AND for the indexed bit and `rhs`. + pub fn and(&mut self, rhs: bool) { + self.bits.and(self.at, rhs) + } + + /// Computes bitwise OR for the indexed bit and `rhs`. + pub fn or(&mut self, rhs: bool) { + self.bits.or(self.at, rhs) + } +} + +#[cfg(test)] +mod tests { + use super::BitRefMut; + use crate::storage2::collections::bitvec::Bits256; + + fn is_populated_bit_set(index: u8) -> bool { + (index % 5) == 0 || (index % 13) == 0 + } + + fn populated_bits256() -> Bits256 { + let mut bits256 = Bits256::default(); + for i in 0..256 { + let i = i as u8; + bits256.set_to(i, is_populated_bit_set(i)); + } + bits256 + } + + #[test] + fn get_set_works() { + let mut bits256 = populated_bits256(); + for i in 0..=255 { + let mut bitref = BitRefMut::new(&mut bits256, i); + let expected = is_populated_bit_set(i); + assert_eq!(bitref.get(), expected); + // Set only every second bit to true and check this later: + bitref.set_to(i % 2 == 0); + } + // Check if `set_to` was successful: + for i in 0..=255 { + assert_eq!(bits256.get(i), i % 2 == 0); + } + } + + #[test] + fn flip_works() { + let mut bits256 = populated_bits256(); + for i in 0..=255 { + let mut bitref = BitRefMut::new(&mut bits256, i); + bitref.flip(); + } + // Check if `flip` was successful: + for i in 0..=255 { + assert_eq!(bits256.get(i), !is_populated_bit_set(i)); + } + } + + #[test] + fn set_and_reset_works() { + let mut bits256 = populated_bits256(); + for i in 0..=255 { + let mut bitref = BitRefMut::new(&mut bits256, i); + if i % 2 == 0 { + bitref.set(); + } else { + bitref.reset(); + } + } + // Check if `set` and `reset` was successful: + for i in 0..=255 { + assert_eq!(bits256.get(i), i % 2 == 0); + } + } + + #[test] + fn bitops_works() { + let mut bits256 = populated_bits256(); + for i in 0..=255 { + let mut bitref = BitRefMut::new(&mut bits256, i); + let expected = is_populated_bit_set(i); + fn test_xor(bitref: &mut BitRefMut, expected: bool) { + fn test_xor_for(bitref: &mut BitRefMut, expected: bool, input: bool) { + assert_eq!(bitref.get(), expected); + bitref.xor(input); + assert_eq!(bitref.get(), expected ^ input); + bitref.set_to(expected); + } + test_xor_for(bitref, expected, false); + test_xor_for(bitref, expected, true); + } + test_xor(&mut bitref, expected); + fn test_and(bitref: &mut BitRefMut, expected: bool) { + fn test_and_for(bitref: &mut BitRefMut, expected: bool, input: bool) { + assert_eq!(bitref.get(), expected); + bitref.and(input); + assert_eq!(bitref.get(), expected & input); + bitref.set_to(expected); + } + test_and_for(bitref, expected, false); + test_and_for(bitref, expected, true); + } + test_and(&mut bitref, expected); + fn test_or(bitref: &mut BitRefMut, expected: bool) { + fn test_or_for(bitref: &mut BitRefMut, expected: bool, input: bool) { + assert_eq!(bitref.get(), expected); + bitref.or(input); + assert_eq!(bitref.get(), expected | input); + bitref.set_to(expected); + } + test_or_for(bitref, expected, false); + test_or_for(bitref, expected, true); + } + test_or(&mut bitref, expected); + } + } +} diff --git a/core/src/storage2/collections/bitvec/bits256.rs b/core/src/storage2/collections/bitvec/bits256.rs new file mode 100644 index 00000000000..b5d34d7cfb4 --- /dev/null +++ b/core/src/storage2/collections/bitvec/bits256.rs @@ -0,0 +1,380 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + super::extend_lifetime, + BitRefMut, + Bits64, + Index256, + Index64, +}; + +/// A chunk of 256 bits. +#[derive(Debug, Copy, Clone, PartialEq, Eq, scale::Encode, scale::Decode)] +pub struct Bits256 { + bits: [Bits64; 4], +} + +impl Default for Bits256 { + fn default() -> Self { + Self { + bits: Default::default(), + } + } +} + +/// Iterator over the valid bits of a pack of 256 bits. +#[derive(Debug, Copy, Clone)] +pub struct Iter<'a> { + bits: &'a Bits256, + start: u16, + end: u16, +} + +impl<'a> Iter<'a> { + fn new(bits256: &'a Bits256, len: u16) -> Self { + Self { + bits: bits256, + start: 0, + end: len, + } + } + + fn remaining(&self) -> u16 { + self.end - self.start + } +} + +impl<'a> ExactSizeIterator for Iter<'a> {} + +impl<'a> Iterator for Iter<'a> { + type Item = bool; + + fn next(&mut self) -> Option { + ::nth(self, 0) + } + + fn nth(&mut self, n: usize) -> Option { + assert!(n < 256); + let n = n as u16; + if self.start + n >= self.end { + return None + } + let start = self.start + n; + self.start += 1 + n; + Some(self.bits.get(start as u8)) + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = self.remaining() as usize; + (remaining, Some(remaining)) + } + + fn count(self) -> usize { + self.remaining() as usize + } +} + +impl<'a> DoubleEndedIterator for Iter<'a> { + fn next_back(&mut self) -> Option { + ::nth_back(self, 0) + } + + fn nth_back(&mut self, n: usize) -> Option { + assert!(n < 256); + let n = n as u16; + if self.start + n >= self.end { + return None + } + self.end -= 1 + n; + Some(self.bits.get(self.end as u8)) + } +} + +/// Iterator over the valid mutable bits of a pack of 256 bits. +#[derive(Debug)] +pub struct IterMut<'a> { + bits: &'a mut Bits256, + start: u16, + end: u16, +} + +impl<'a> IterMut<'a> { + fn new(bits256: &'a mut Bits256, len: u16) -> Self { + Self { + bits: bits256, + start: 0, + end: len, + } + } + + fn remaining(&self) -> u16 { + self.end - self.start + } + + /// Returns a bit access for the given index with extended but valid lifetimes. + fn get<'b>(&'b mut self, index: u8) -> BitRefMut<'a> { + unsafe { BitRefMut::new(extend_lifetime(&mut self.bits), index) } + } +} + +impl<'a> ExactSizeIterator for IterMut<'a> {} + +impl<'a> Iterator for IterMut<'a> { + type Item = BitRefMut<'a>; + + fn next(&mut self) -> Option { + ::nth(self, 0) + } + + fn nth(&mut self, n: usize) -> Option { + assert!(n < 256); + let n = n as u16; + if self.start + n >= self.end { + return None + } + let start = self.start + n; + self.start += 1 + n; + Some(self.get(start as u8)) + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = self.remaining() as usize; + (remaining, Some(remaining)) + } + + fn count(self) -> usize { + self.remaining() as usize + } +} + +impl<'a> DoubleEndedIterator for IterMut<'a> { + fn next_back(&mut self) -> Option { + ::nth_back(self, 0) + } + + fn nth_back(&mut self, n: usize) -> Option { + assert!(n < 256); + let n = n as u16; + if self.start + n >= self.end { + return None + } + self.end -= 1 + n; + Some(self.get(self.end as u8)) + } +} + +impl Bits256 { + fn bits_at(&self, index: Index256) -> (&u64, Index64) { + (&self.bits[(index / 64) as usize], index % 64) + } + + fn bits_at_mut(&mut self, index: Index256) -> (&mut u64, Index64) { + (&mut self.bits[(index / 64) as usize], index % 64) + } + + /// Yields the first `len` bits of the pack of 256 bits. + pub(super) fn iter(&self, len: u16) -> Iter { + Iter::new(self, len) + } + + /// Yields mutable accessors to the first `len` bits of the pack of 256 bits. + pub(super) fn iter_mut(&mut self, len: u16) -> IterMut { + IterMut::new(self, len) + } + + /// Returns the bit value for the bit at the given index. + pub fn get(&self, at: Index256) -> bool { + let (bits64, pos64) = self.bits_at(at); + bits64 & (0x01 << (63 - pos64)) != 0 + } + + /// Sets the bit value for the bit at the given index to the given value. + pub(super) fn set_to(&mut self, at: Index256, new_value: bool) { + if new_value { + self.set(at) + } else { + self.reset(at) + } + } + + /// Flips the bit value for the bit at the given index. + pub(super) fn flip(&mut self, at: Index256) { + self.xor(at, true) + } + + /// Sets the bit value for the bit at the given index to 1 (`true`). + pub(super) fn set(&mut self, at: Index256) { + self.or(at, true) + } + + /// Sets the bit value for the bit at the given index to 0 (`false`). + pub(super) fn reset(&mut self, at: Index256) { + self.and(at, false) + } + + fn op_at_with(&mut self, at: Index256, rhs: bool, op: F) + where + F: FnOnce(&mut Bits64, Bits64), + { + let (bits64, pos64) = self.bits_at_mut(at); + let rhs = (rhs as u64) << (63 - pos64); + op(bits64, rhs); + } + + /// Computes bitwise AND for the bit at the given index and `rhs`. + pub(super) fn and(&mut self, at: Index256, rhs: bool) { + self.op_at_with(at, !rhs, |bits64, rhs| *bits64 &= !rhs) + } + + /// Computes bitwise OR for the bit at the given index and `rhs`. + pub(super) fn or(&mut self, at: Index256, rhs: bool) { + self.op_at_with(at, rhs, |bits64, rhs| *bits64 |= rhs) + } + + /// Computes bitwise XOR for the bit at the given index and `rhs`. + pub(super) fn xor(&mut self, at: Index256, rhs: bool) { + self.op_at_with(at, rhs, |bits64, rhs| *bits64 ^= rhs) + } + + /// Returns the position of the first zero bit if any. + pub fn position_first_zero(&self) -> Option { + let mut offset: u32 = 0; + for bits64 in &self.bits { + if *bits64 != !0 { + return Some(((!bits64).leading_zeros() + offset) as u8) + } + offset += 64; + } + None + } +} + +#[cfg(test)] +mod tests { + use super::Bits256; + + #[test] + fn default_works() { + assert_eq!( + Bits256::default(), + Bits256 { + bits: [0x00, 0x00, 0x00, 0x00], + } + ); + } + + fn populated_bits256() -> Bits256 { + let mut bits256 = Bits256::default(); + for i in 0..256 { + let i = i as u8; + bits256.set_to(i, (i % 5) == 0 || (i % 13) == 0); + } + bits256 + } + + #[test] + fn get_works() { + let bits256 = populated_bits256(); + for i in 0..256 { + let i = i as u8; + assert_eq!(bits256.get(i), (i % 5) == 0 || (i % 13) == 0); + } + } + + #[test] + fn set_works() { + let mut bits256 = populated_bits256(); + for i in 0..256 { + let i = i as u8; + bits256.set(i); + assert_eq!(bits256.get(i), true); + } + } + + #[test] + fn reset_works() { + let mut bits256 = populated_bits256(); + for i in 0..256 { + let i = i as u8; + bits256.reset(i); + assert_eq!(bits256.get(i), false); + } + } + + #[test] + fn and_works() { + let mut bits256 = populated_bits256(); + for i in 0..256 { + let i = i as u8; + bits256.and(i, i % 2 == 0); + assert_eq!( + bits256.get(i), + (i % 2) == 0 && ((i % 5) == 0 || (i % 13) == 0) + ); + } + } + + #[test] + fn or_works() { + let mut bits256 = populated_bits256(); + for i in 0..256 { + let i = i as u8; + bits256.or(i, i % 2 == 0); + assert_eq!( + bits256.get(i), + (i % 2) == 0 || (i % 5) == 0 || (i % 13) == 0 + ); + } + } + + #[test] + fn xor_works() { + let mut bits256 = populated_bits256(); + for i in 0..256 { + let i = i as u8; + bits256.xor(i, i % 2 == 0); + let a = (i % 2) == 0; + let b = (i % 5) == 0 || (i % 13) == 0; + assert_eq!(bits256.get(i), a != b); + } + } + + #[test] + fn position_first_zero_works() { + // Zero bits256: + let empty = Bits256::default(); + assert_eq!(empty.position_first_zero(), Some(0)); + // First bit is set: + let first_bit_is_set = Bits256 { + bits: [0x8000_0000_0000_0000, 0x00, 0x00, 0x00], + }; + assert_eq!(first_bit_is_set.position_first_zero(), Some(1)); + // Last bit is unset: + let first_bit_is_set = Bits256 { + bits: [!0, !0, !0, !1], + }; + assert_eq!(first_bit_is_set.position_first_zero(), Some(3 * 64 + 63)); + // Some middle bit is unset: + let first_bit_is_set = Bits256 { + bits: [!0, !0, !0xFFFF_FFFF, !1], + }; + assert_eq!(first_bit_is_set.position_first_zero(), Some(2 * 64 + 32)); + // All bits set: + let all_bits_set = Bits256 { + bits: [!0, !0, !0, !0], + }; + assert_eq!(all_bits_set.position_first_zero(), None); + } +} diff --git a/core/src/storage2/collections/bitvec/bitsref.rs b/core/src/storage2/collections/bitvec/bitsref.rs new file mode 100644 index 00000000000..f37b5b218c7 --- /dev/null +++ b/core/src/storage2/collections/bitvec/bitsref.rs @@ -0,0 +1,206 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![allow(clippy::len_without_is_empty)] + +use super::{ + BitRefMut, + Bits256, + Bits256BitsIter, + Bits256BitsIterMut, +}; + +/// A reference to a subslice within a 256-bit chunk. +/// +/// This is a reference wrapper around either a shared 256-bit chunk +/// or an exclusive 256-bit chunk. Also it prevents accesses to out of bounds +/// bits. +#[derive(Debug, Copy, Clone)] +#[repr(C)] // This is repr(C) to be on the safe side for the Deref impl. +pub struct ChunkRef { + /// The reference to the 256-bits chunk. + bits: T, + /// The length of the accessible chunk area. + len: u32, +} + +impl ChunkRef { + /// Returns the length of the 256-bit chunk. + /// + /// # Note + /// + /// This is the number of valid bits in the chunk of 256 bits. + /// The valid bits are consecutive and always start from index 0. + pub fn len(&self) -> u32 { + self.len + } +} + +impl<'a> ChunkRef<&'a Bits256> { + /// Creates a new shared 256-bit chunk access with the given length. + pub(super) fn shared(bits: &'a Bits256, len: u32) -> Self { + Self { bits, len } + } + + /// Returns the position of the first valid zero bit if any. + pub fn position_first_zero(&self) -> Option { + let position = self.bits.position_first_zero()?; + if position as u32 >= self.len() { + return None + } + Some(position) + } + + /// Returns the value of the indexed bit. + /// + /// # Note + /// + /// - If 0: returns `false` + /// - If 1: returns `true` + pub fn get(&self, index: u8) -> Option { + if index as u32 >= self.len { + return None + } + self.bits.get(index).into() + } + + /// Returns an iterator over the valid bits of `self`. + pub(super) fn iter(&self) -> Bits256BitsIter { + self.bits.iter(self.len as u16) + } +} + +impl<'a> ChunkRef<&'a mut Bits256> { + /// Creates a new exclusive 256-bit chunk access with the given length. + pub(super) fn exclusive(bits: &'a mut Bits256, len: u32) -> Self { + Self { bits, len } + } + + /// Returns mutable access to a single bit if the index is out of bounds. + pub fn get_mut(&mut self, index: u8) -> Option { + if index as u32 >= self.len { + return None + } + BitRefMut::new(self.bits, index).into() + } + + /// Returns an iterator over mutable accessors to the valid bits of `self`. + pub(super) fn iter_mut(&mut self) -> Bits256BitsIterMut { + self.bits.iter_mut(self.len as u16) + } +} + +impl<'a> core::ops::Deref for ChunkRef<&'a mut Bits256> { + type Target = ChunkRef<&'a Bits256>; + + fn deref(&self) -> &Self::Target { + // This implementation allows to mirror the interface on + // `ChunkRef<&'a Bits256>` onto `ChunkRef<&'a mut Bits256>` + // without the need of separate implementations. + // + // SAFETY: The `ChunkRef` struct is `repr(C)` which should guarantee + // that both `ChunkRef<&'a mut Bits256>` as well as + // `ChunkRef<&'a Bits256>` have the same internal layout + // and thus can be transmuted safely. + let ptr: *const Self = self; + unsafe { &*(ptr as *const Self::Target) } + } +} + +#[cfg(test)] +mod tests { + use super::{ + Bits256, + ChunkRef, + }; + + fn is_populated_bit_set(index: u8) -> bool { + (index % 5) == 0 || (index % 13) == 0 + } + + fn populated_bits256() -> Bits256 { + let mut bits256 = Bits256::default(); + for i in 0..256 { + let i = i as u8; + bits256.set_to(i, is_populated_bit_set(i)); + } + bits256 + } + + #[test] + fn shared_works() { + let len: u8 = 100; + let bits = populated_bits256(); + let cref = ChunkRef::shared(&bits, len as u32); + assert_eq!(cref.len(), len as u32); + // Get works: + for i in 0..len { + assert_eq!(cref.get(i), Some(is_populated_bit_set(i))); + } + assert_eq!(cref.get(len), None); + // Iter works: + for (i, val) in cref.iter().enumerate() { + assert_eq!(val, is_populated_bit_set(i as u8)); + } + } + + #[test] + fn exclusive_works() { + let len: u8 = 100; + let mut bits = populated_bits256(); + let mut cref = ChunkRef::exclusive(&mut bits, len as u32); + assert_eq!(cref.len(), len as u32); + // `get` and `get_mut` works: + for i in 0..len { + assert_eq!(cref.get(i), Some(is_populated_bit_set(i))); + assert_eq!( + cref.get_mut(i).map(|br| br.get()), + Some(is_populated_bit_set(i)) + ); + } + assert_eq!(cref.get(len), None); + assert_eq!(cref.get_mut(len), None); + // `iter` works: + for (i, val) in cref.iter().enumerate() { + assert_eq!(val, is_populated_bit_set(i as u8)); + } + } + + #[test] + fn position_first_zero_works() { + let len = 256; + let mut zeros = Default::default(); + let mut cref = ChunkRef::exclusive(&mut zeros, len); + for i in 0..len { + assert_eq!(cref.position_first_zero(), Some(i as u8)); + cref.get_mut(i as u8).unwrap().set(); + } + // Now all bits are set to `1`: + assert_eq!(cref.position_first_zero(), None); + } + + #[test] + fn iter_mut_works() { + let len = 100; + let mut zeros = Default::default(); + let mut cref = ChunkRef::exclusive(&mut zeros, len); + // Initialize all bits with 0 and set them to 1 via `iter_mut`. + // Then check if they are 1: + for mut byte in cref.iter_mut() { + assert!(!byte.get()); + byte.set(); + } + assert!(cref.iter().all(|byte| byte)); + } +} diff --git a/core/src/storage2/collections/bitvec/impls.rs b/core/src/storage2/collections/bitvec/impls.rs new file mode 100644 index 00000000000..a9c03aa345d --- /dev/null +++ b/core/src/storage2/collections/bitvec/impls.rs @@ -0,0 +1,75 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + BitsIter, + Bitvec as StorageBitvec, +}; +use core::iter::FromIterator; + +impl Default for StorageBitvec { + fn default() -> Self { + Self::new() + } +} + +impl PartialEq for StorageBitvec { + fn eq(&self, other: &Self) -> bool { + if self.len() != other.len() { + return false + } + self.bits.eq(&other.bits) + } +} + +impl Eq for StorageBitvec {} + +impl Extend for StorageBitvec { + fn extend>(&mut self, iter: T) { + for value in iter { + self.push(value) + } + } +} + +impl<'a> Extend<&'a bool> for StorageBitvec { + fn extend>(&mut self, iter: T) { + for value in iter { + self.push(*value) + } + } +} + +impl FromIterator for StorageBitvec { + fn from_iter>(iter: T) -> Self { + let mut bitvec = Self::default(); + bitvec.extend(iter); + bitvec + } +} + +impl<'a> FromIterator<&'a bool> for StorageBitvec { + fn from_iter>(iter: T) -> Self { + >::from_iter(iter.into_iter().copied()) + } +} + +impl<'a> IntoIterator for &'a StorageBitvec { + type Item = bool; + type IntoIter = BitsIter<'a>; + + fn into_iter(self) -> Self::IntoIter { + self.bits() + } +} diff --git a/core/src/storage2/collections/bitvec/iter.rs b/core/src/storage2/collections/bitvec/iter.rs new file mode 100644 index 00000000000..19a57c3375c --- /dev/null +++ b/core/src/storage2/collections/bitvec/iter.rs @@ -0,0 +1,310 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + super::extend_lifetime, + BitRefMut, + Bits256, + Bits256BitsIter, + Bits256BitsIterMut, + Bitvec as StorageBitvec, + ChunkRef, +}; +use crate::storage2::collections::vec::{ + Iter as StorageVecIter, + IterMut as StorageVecIterMut, +}; +use core::cmp::min; + +/// Iterator over the bits of a storage bit vector. +#[derive(Debug, Copy, Clone)] +pub struct BitsIter<'a> { + remaining: u32, + bits256_iter: Bits256Iter<'a>, + front_iter: Option>, + back_iter: Option>, +} + +impl<'a> BitsIter<'a> { + /// Creates a new iterator yielding the bits of the storage bit vector. + pub(super) fn new(bitvec: &'a StorageBitvec) -> Self { + Self { + remaining: bitvec.len(), + bits256_iter: bitvec.iter_chunks(), + front_iter: None, + back_iter: None, + } + } +} + +impl<'a> ExactSizeIterator for BitsIter<'a> {} + +impl<'a> Iterator for BitsIter<'a> { + type Item = bool; + + fn next(&mut self) -> Option { + loop { + if let Some(ref mut front_iter) = self.front_iter { + if let front @ Some(_) = front_iter.next() { + self.remaining -= 1; + return front + } + } + match self.bits256_iter.next() { + None => { + if let Some(back) = self.back_iter.as_mut()?.next() { + self.remaining -= 1; + return Some(back) + } + return None + } + Some(ref mut front) => { + self.front_iter = Some(unsafe { extend_lifetime(front) }.iter()); + } + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = self.remaining as usize; + (remaining, Some(remaining)) + } + + fn count(self) -> usize { + self.remaining as usize + } +} + +impl<'a> DoubleEndedIterator for BitsIter<'a> { + fn next_back(&mut self) -> Option { + loop { + if let Some(ref mut back_iter) = self.back_iter { + if let back @ Some(_) = back_iter.next_back() { + self.remaining -= 1; + return back + } + } + match self.bits256_iter.next_back() { + None => { + if let Some(front) = self.front_iter.as_mut()?.next_back() { + self.remaining -= 1; + return Some(front) + } + return None + } + Some(ref mut back) => { + self.back_iter = Some(unsafe { extend_lifetime(back) }.iter()); + } + } + } + } +} + +/// Iterator over the bits of a storage bit vector. +#[derive(Debug)] +pub struct BitsIterMut<'a> { + remaining: u32, + bits256_iter: Bits256IterMut<'a>, + front_iter: Option>, + back_iter: Option>, +} + +impl<'a> BitsIterMut<'a> { + /// Creates a new iterator yielding the bits of the storage bit vector. + pub(super) fn new(bitvec: &'a mut StorageBitvec) -> Self { + Self { + remaining: bitvec.len(), + bits256_iter: bitvec.iter_chunks_mut(), + front_iter: None, + back_iter: None, + } + } +} + +impl<'a> ExactSizeIterator for BitsIterMut<'a> {} + +impl<'a> Iterator for BitsIterMut<'a> { + type Item = BitRefMut<'a>; + + fn next(&mut self) -> Option { + loop { + if let Some(ref mut front_iter) = self.front_iter { + if let front @ Some(_) = front_iter.next() { + self.remaining -= 1; + return front + } + } + match self.bits256_iter.next() { + None => { + if let Some(back) = self.back_iter.as_mut()?.next() { + self.remaining -= 1; + return Some(back) + } + return None + } + Some(ref mut front) => { + self.front_iter = Some(unsafe { extend_lifetime(front) }.iter_mut()); + } + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = self.remaining as usize; + (remaining, Some(remaining)) + } + + fn count(self) -> usize { + self.remaining as usize + } +} + +impl<'a> DoubleEndedIterator for BitsIterMut<'a> { + fn next_back(&mut self) -> Option { + loop { + if let Some(ref mut back_iter) = self.back_iter { + if let back @ Some(_) = back_iter.next_back() { + self.remaining -= 1; + return back + } + } + match self.bits256_iter.next_back() { + None => { + if let Some(front) = self.front_iter.as_mut()?.next_back() { + self.remaining -= 1; + return Some(front) + } + return None + } + Some(ref mut back) => { + self.back_iter = Some(unsafe { extend_lifetime(back) }.iter_mut()); + } + } + } + } +} + +/// Iterator over the 256-bit chunks of a storage bitvector. +#[derive(Debug, Copy, Clone)] +pub struct Bits256Iter<'a> { + /// The storage vector iterator over the internal 256-bit chunks. + iter: StorageVecIter<'a, Bits256>, + /// The remaining bits to be yielded. + remaining: u32, +} + +impl<'a> Bits256Iter<'a> { + /// Creates a new 256-bit chunks iterator over the given storage bitvector. + pub(super) fn new(bitvec: &'a StorageBitvec) -> Self { + Self { + iter: bitvec.bits.iter(), + remaining: bitvec.len(), + } + } +} + +impl<'a> Iterator for Bits256Iter<'a> { + type Item = ChunkRef<&'a Bits256>; + + fn next(&mut self) -> Option { + if self.remaining == 0 { + return None + } + let len = min(256, self.remaining); + self.remaining = self.remaining.saturating_sub(256); + self.iter + .next() + .map(|bits256| ChunkRef::shared(bits256, len)) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + fn count(self) -> usize { + self.iter.count() + } +} + +impl<'a> DoubleEndedIterator for Bits256Iter<'a> { + fn next_back(&mut self) -> Option { + if self.remaining == 0 { + return None + } + let mut len = self.remaining % 256; + if len == 0 { + len = 256; + } + self.remaining = self.remaining.saturating_sub(len); + self.iter + .next_back() + .map(|bits256| ChunkRef::shared(bits256, len)) + } +} + +impl<'a> ExactSizeIterator for Bits256Iter<'a> {} + +/// Iterator over mutable 256-bit chunks of a storage bitvector. +#[derive(Debug)] +pub struct Bits256IterMut<'a> { + /// The storage vector iterator over the internal mutable 256-bit chunks. + iter: StorageVecIterMut<'a, Bits256>, + /// The remaining bits to be yielded. + remaining: u32, +} + +impl<'a> Bits256IterMut<'a> { + /// Creates a new 256-bit chunks iterator over the given storage bitvector. + pub(super) fn new(bitvec: &'a mut StorageBitvec) -> Self { + Self { + remaining: bitvec.len(), + iter: bitvec.bits.iter_mut(), + } + } +} + +impl<'a> Iterator for Bits256IterMut<'a> { + type Item = ChunkRef<&'a mut Bits256>; + + fn next(&mut self) -> Option { + let len = min(256, self.remaining); + self.remaining = self.remaining.saturating_sub(256); + self.iter + .next() + .map(|bits256| ChunkRef::exclusive(bits256, len)) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + fn count(self) -> usize { + self.iter.count() + } +} + +impl<'a> DoubleEndedIterator for Bits256IterMut<'a> { + fn next_back(&mut self) -> Option { + let mut len = self.remaining % 256; + self.remaining -= len; + if len == 0 { + len = 256; + } + self.iter + .next_back() + .map(|bits256| ChunkRef::exclusive(bits256, len)) + } +} + +impl<'a> ExactSizeIterator for Bits256IterMut<'a> {} diff --git a/core/src/storage2/collections/bitvec/mod.rs b/core/src/storage2/collections/bitvec/mod.rs new file mode 100644 index 00000000000..1f0c9b79ada --- /dev/null +++ b/core/src/storage2/collections/bitvec/mod.rs @@ -0,0 +1,307 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage bit vector data structure and utilities. +//! +//! Allows to compactly and efficiently store and manipulate on single bits. + +mod bitref; +mod bits256; +mod bitsref; +mod impls; +mod iter; +mod storage; + +#[cfg(test)] +mod tests; + +pub use self::{ + bitref::BitRefMut, + bitsref::ChunkRef, + iter::{ + BitsIter, + BitsIterMut, + }, +}; +use self::{ + bits256::{ + Bits256, + Iter as Bits256BitsIter, + IterMut as Bits256BitsIterMut, + }, + iter::{ + Bits256Iter, + Bits256IterMut, + }, +}; +use crate::storage2::{ + Lazy, + Vec as StorageVec, +}; + +/// The index of a bit pack within the bit vector. +type Index = u32; + +/// A bit position within a 256-bit package. +type Index256 = u8; + +/// A bit position within a `u64`. +type Index64 = u8; + +/// A pack of 64 bits. +type Bits64 = u64; + +/// A storage bit vector. +/// +/// # Note +/// +/// Organizes its bits in chunks of 256 bits. +/// Allows to `push`, `pop`, inspect and manipulate the underlying bits. +#[derive(Debug)] +pub struct Bitvec { + /// The length of the bit vector. + len: Lazy, + /// The bits of the bit vector. + /// + /// Organized in packs of 256 bits. + bits: StorageVec, +} + +impl Bitvec { + /// Creates a new empty bit vector. + pub fn new() -> Self { + Self { + len: Lazy::from(0), + bits: StorageVec::new(), + } + } + + /// Returns the length of the bit vector in bits. + pub fn len(&self) -> u32 { + *self.len + } + + /// Returns `true` if the bit vector is empty. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the capacity of the bit vector in bits. + /// + /// # Note + /// + /// Returns a `u64` since it is always greater than or equal to `self.len()` + /// which itself returns a `u32`. + pub fn capacity(&self) -> u64 { + (self.bits.len() * 256) as u64 + } + + /// Returns an iterator over the bits of the storage bit vector. + pub fn bits(&self) -> BitsIter { + BitsIter::new(self) + } + + /// Returns an iterator over the mutable bits of the storage bit vector. + pub fn bits_mut(&mut self) -> BitsIterMut { + BitsIterMut::new(self) + } + + /// Returns an iterator over the 256-bit chunks of the storage bit vector. + pub(super) fn iter_chunks(&self) -> Bits256Iter { + Bits256Iter::new(self) + } + + /// Returns an iterator over the mutable 256-bit chunks of the storage bit vector. + pub(super) fn iter_chunks_mut(&mut self) -> Bits256IterMut { + Bits256IterMut::new(self) + } + + /// Splits the given index into a 256-bit pack index and bit position index. + fn split_index(&self, at: Index) -> Option<(Index, Index256)> { + if at >= self.len() { + return None + } + Some((at / 256, (at % 256) as u8)) + } + + /// Returns the immutable access pair to the underlying 256-bits pack and bit. + /// + /// Returns `None` if the given index is out of bounds. + fn get_bits256(&self, at: Index) -> Option<(&Bits256, Index256)> { + let (index, pos256) = self.split_index(at)?; + let bits256 = self.bits.get(index).expect("index is out of bounds"); + Some((bits256, pos256)) + } + + /// Returns the mutable access pair to the underlying 256-bits pack and bit. + /// + /// Returns `None` if the given index is out of bounds. + fn get_bits256_mut(&mut self, at: Index) -> Option<(&mut Bits256, Index256)> { + let (index, pos256) = self.split_index(at)?; + let bits256 = self.bits.get_mut(index).expect("index is out of bounds"); + Some((bits256, pos256)) + } + + /// Returns a mutable bit access to the bit at the given index if any. + fn get_access_mut(&mut self, at: Index) -> Option { + self.get_bits256_mut(at) + .map(|(bits256, pos256)| BitRefMut::new(bits256, pos256)) + } + + /// Returns the value of the bit at the given index if any. + pub fn get(&self, at: Index) -> Option { + self.get_bits256(at) + .map(|(bits256, pos256)| bits256.get(pos256)) + } + + /// Returns a mutable bit access to the bit at the given index if any. + pub fn get_mut(&mut self, at: Index) -> Option { + self.get_access_mut(at) + } + + /// Returns a shared reference to the 256-bit chunk for the bit at the given index. + pub fn get_chunk(&self, at: Index) -> Option> { + if at >= self.len() { + return None + } + use core::cmp::min; + let chunk_id = at / 256; + let chunk_len = min(256, self.len() - at); + let bits256 = self.bits.get(chunk_id).expect("index is out of bounds"); + Some(ChunkRef::shared(bits256, chunk_len)) + } + + /// Returns an exclusive reference to the 256-bit chunk for the bit at the given index. + pub fn get_chunk_mut(&mut self, at: Index) -> Option> { + if at >= self.len() { + return None + } + use core::cmp::min; + let chunk_id = at / 256; + let chunk_len = min(256, self.len() - at); + let bits256 = self.bits.get_mut(chunk_id).expect("index is out of bounds"); + Some(ChunkRef::exclusive(bits256, chunk_len)) + } + + /// Returns the first bit of the bit vector. + /// + /// # Note + /// + /// Returns `None` if the bit vector is empty. + pub fn first(&self) -> Option { + if self.is_empty() { + return None + } + self.get(0) + } + + /// Returns a mutable bit access to the first bit of the bit vector. + /// + /// # Note + /// + /// Returns `None` if the bit vector is empty. + pub fn first_mut(&mut self) -> Option { + if self.is_empty() { + return None + } + self.get_access_mut(0) + } + + /// Returns the last bit of the bit vector. + /// + /// # Note + /// + /// Returns `None` if the bit vector is empty. + pub fn last(&self) -> Option { + if self.is_empty() { + return None + } + self.get(self.len() - 1) + } + + /// Returns a mutable bit access to the last bit of the bit vector. + /// + /// # Note + /// + /// Returns `None` if the bit vector is empty. + pub fn last_mut(&mut self) -> Option { + if self.is_empty() { + return None + } + self.get_access_mut(self.len() - 1) + } + + /// The maximum number of bits that can be pushed to a storage bit vector. + fn maximum_capacity(&self) -> u32 { + u32::MAX + } + + /// Pushes the given value onto the bit vector. + /// + /// # Note + /// + /// This increases the length of the bit vector. + /// + /// # Panics + /// + /// If the storage bit vector reached its maximum capacity. + pub fn push(&mut self, value: bool) { + assert!( + self.len() < self.maximum_capacity(), + "reached maximum capacity for storage bit vector" + ); + if self.len() as u64 == self.capacity() { + // Case: All 256-bits packs are full or there are none: + // Need to push another 256-bit pack to the storage vector. + let mut bits256 = Bits256::default(); + if value { + // If `value` is `true` set its first bit to `1`. + bits256.set(0); + debug_assert_eq!(bits256.get(0), true); + }; + self.bits.push(bits256); + *self.len += 1; + } else { + // Case: The last 256-bit pack has unused bits: + // - Set last bit of last 256-bit pack to the given value. + // - Opt.: Since bits are initialized as 0 we only need + // to mutate this value if `value` is `true`. + *self.len += 1; + if value { + self.last_mut() + .expect("must have at least a valid bit in this case") + .set() + } + } + } + + /// Pops the last bit from the bit vector. + /// + /// Returns the popped bit as `bool`. + /// + /// # Note + /// + /// This reduces the length of the bit vector by one. + pub fn pop(&mut self) -> Option { + if self.is_empty() { + // Bail out early if the bit vector is emtpy. + return None + } + let mut access = self.last_mut().expect("must be some if non-empty"); + let popped = access.get(); + access.reset(); + *self.len -= 1; + Some(popped) + } +} diff --git a/core/src/storage2/collections/bitvec/storage.rs b/core/src/storage2/collections/bitvec/storage.rs new file mode 100644 index 00000000000..0070e835ffd --- /dev/null +++ b/core/src/storage2/collections/bitvec/storage.rs @@ -0,0 +1,75 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + Bits256, + Bitvec as StorageBitvec, +}; +use crate::storage2::{ + traits::{ + forward_clear_packed, + forward_pull_packed, + forward_push_packed, + KeyPtr, + PackedLayout, + SpreadLayout, + }, + Pack, + Vec as StorageVec, +}; +use ink_primitives::Key; + +impl SpreadLayout for Bits256 { + const FOOTPRINT: u64 = 1; + const REQUIRES_DEEP_CLEAN_UP: bool = false; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + forward_pull_packed::(ptr) + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + forward_push_packed::(self, ptr) + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + forward_clear_packed::(self, ptr) + } +} + +impl PackedLayout for Bits256 { + fn pull_packed(&mut self, _at: &Key) {} + fn push_packed(&self, _at: &Key) {} + fn clear_packed(&self, _at: &Key) {} +} + +impl SpreadLayout for StorageBitvec { + const FOOTPRINT: u64 = 1 + > as SpreadLayout>::FOOTPRINT; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + Self { + len: SpreadLayout::pull_spread(ptr), + bits: SpreadLayout::pull_spread(ptr), + } + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + SpreadLayout::push_spread(&self.len, ptr); + SpreadLayout::push_spread(&self.bits, ptr); + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + SpreadLayout::clear_spread(&self.len, ptr); + SpreadLayout::clear_spread(&self.bits, ptr); + } +} diff --git a/core/src/storage2/collections/bitvec/tests.rs b/core/src/storage2/collections/bitvec/tests.rs new file mode 100644 index 00000000000..9a163487a45 --- /dev/null +++ b/core/src/storage2/collections/bitvec/tests.rs @@ -0,0 +1,215 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::Bitvec as StorageBitvec; +use crate::{ + env, + storage2::traits::{ + KeyPtr, + SpreadLayout, + }, +}; +use ink_primitives::Key; + +#[test] +fn new_default_works() { + // Check if `Bitvec::new` works: + let mut bitvec = StorageBitvec::new(); + assert_eq!(bitvec.len(), 0); + assert_eq!(bitvec.capacity(), 0); + assert!(bitvec.is_empty()); + assert_eq!(bitvec.bits().next(), None); + assert_eq!(bitvec.get(0), None); + assert!(bitvec.first().is_none()); + assert!(bitvec.first_mut().is_none()); + assert!(bitvec.last().is_none()); + assert!(bitvec.last_mut().is_none()); + // Check if `Bitvec::default` works: + let mut default = StorageBitvec::default(); + assert_eq!(default.len(), 0); + assert_eq!(bitvec.capacity(), 0); + assert!(default.is_empty()); + assert_eq!(default.bits().next(), None); + assert_eq!(default.get(0), None); + assert!(default.first().is_none()); + assert!(default.first_mut().is_none()); + assert!(default.last().is_none()); + assert!(default.last_mut().is_none()); + // Check if both are equal: + assert_eq!(bitvec, default); +} + +/// Creates a storage bitvector where every bit at every 5th and 13th index +/// is set to `1` (true). The bitvector has a total length of 600 bits which +/// requires it to have 3 chunks of 256-bit giving a capacity of 768 bits. +fn bitvec_600() -> StorageBitvec { + let bitvec = (0..600) + .map(|i| (i % 5) == 0 || (i % 13) == 0) + .collect::(); + assert_eq!(bitvec.len(), 600); + assert_eq!(bitvec.capacity(), 768); + bitvec +} + +#[test] +fn get_works() { + let mut bitvec = bitvec_600(); + for i in 0..bitvec.len() { + assert_eq!(bitvec.get(i), Some((i % 5) == 0 || (i % 13) == 0)); + assert_eq!( + bitvec.get_mut(i).map(|b| b.get()), + Some((i % 5) == 0 || (i % 13) == 0) + ); + } +} + +#[test] +fn iter_next_works() { + let bitvec = bitvec_600(); + // Test iterator over read-only bits. + for (i, bit) in bitvec.bits().enumerate() { + assert_eq!(bit, (i % 5) == 0 || (i % 13) == 0); + } + // Test iterator over mutable accessors to bits. + let mut bitvec = bitvec; + for (i, accessor) in bitvec.bits_mut().enumerate() { + assert_eq!(accessor.get(), (i % 5) == 0 || (i % 13) == 0); + } +} + +#[test] +fn iter_next_back_works() { + let bitvec = bitvec_600(); + // Test iterator over read-only bits. + for (i, bit) in bitvec.bits().enumerate().rev() { + assert_eq!(bit, (i % 5) == 0 || (i % 13) == 0); + } + // Test iterator over mutable accessors to bits. + let mut bitvec = bitvec; + for (i, accessor) in bitvec.bits_mut().enumerate().rev() { + assert_eq!(accessor.get(), (i % 5) == 0 || (i % 13) == 0); + } +} + +#[test] +fn double_ended_iter_works() { + let mut bitvec = StorageBitvec::default(); + bitvec.push(true); + bitvec.push(true); + bitvec.push(true); + + let mut iter = bitvec.bits(); + assert_eq!(Some(true), iter.next()); + assert_eq!(Some(true), iter.next_back()); + assert_eq!(Some(true), iter.next()); + assert_eq!(None, iter.next()); + assert_eq!(None, iter.next_back()); +} + +#[test] +fn push_works() { + let mut bitvec = StorageBitvec::new(); + assert_eq!(bitvec.len(), 0); + assert_eq!(bitvec.capacity(), 0); + // Push `1` + bitvec.push(true); + assert_eq!(bitvec.len(), 1); + assert_eq!(bitvec.capacity(), 256); + assert_eq!(bitvec.first(), Some(true)); + assert_eq!(bitvec.first_mut().map(|access| access.get()), Some(true)); + assert_eq!(bitvec.last(), Some(true)); + assert_eq!(bitvec.last_mut().map(|access| access.get()), Some(true)); + // Push `0` + bitvec.push(false); + assert_eq!(bitvec.len(), 2); + assert_eq!(bitvec.capacity(), 256); + assert_eq!(bitvec.first(), Some(true)); + assert_eq!(bitvec.first_mut().map(|access| access.get()), Some(true)); + assert_eq!(bitvec.last(), Some(false)); + assert_eq!(bitvec.last_mut().map(|access| access.get()), Some(false)); + // Push `1` + bitvec.push(true); + assert_eq!(bitvec.len(), 3); + assert_eq!(bitvec.capacity(), 256); + assert_eq!(bitvec.first(), Some(true)); + assert_eq!(bitvec.first_mut().map(|access| access.get()), Some(true)); + assert_eq!(bitvec.last(), Some(true)); + assert_eq!(bitvec.last_mut().map(|access| access.get()), Some(true)); +} + +#[test] +fn pop_works() { + let mut bitvec = [true, false, true].iter().collect::(); + assert_eq!(bitvec.len(), 3); + assert_eq!(bitvec.capacity(), 256); + // Pop `1` (true) + assert_eq!(bitvec.pop(), Some(true)); + assert_eq!(bitvec.len(), 2); + assert_eq!(bitvec.capacity(), 256); + assert_eq!(bitvec.first(), Some(true)); + assert_eq!(bitvec.first_mut().map(|access| access.get()), Some(true)); + assert_eq!(bitvec.last(), Some(false)); + assert_eq!(bitvec.last_mut().map(|access| access.get()), Some(false)); + // Pop `0` (false) + assert_eq!(bitvec.pop(), Some(false)); + assert_eq!(bitvec.len(), 1); + assert_eq!(bitvec.capacity(), 256); + assert_eq!(bitvec.first(), Some(true)); + assert_eq!(bitvec.first_mut().map(|access| access.get()), Some(true)); + assert_eq!(bitvec.last(), Some(true)); + assert_eq!(bitvec.last_mut().map(|access| access.get()), Some(true)); + // Pop `1` (true) + assert_eq!(bitvec.pop(), Some(true)); + assert_eq!(bitvec.len(), 0); + assert_eq!(bitvec.capacity(), 256); + assert!(bitvec.first().is_none()); + assert!(bitvec.first_mut().is_none()); + assert!(bitvec.last().is_none()); + assert!(bitvec.last_mut().is_none()); +} + +#[test] +fn spread_layout_push_pull_works() -> env::Result<()> { + env::test::run_test::(|_| { + let bv1 = bitvec_600(); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&bv1, &mut KeyPtr::from(root_key)); + // Load the pushed storage vector into another instance and check that + // both instances are equal: + let bv2 = + ::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(bv1, bv2); + Ok(()) + }) +} + +#[test] +#[should_panic(expected = "encountered empty storage cell")] +fn spread_layout_clear_works() { + env::test::run_test::(|_| { + let bv1 = bitvec_600(); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&bv1, &mut KeyPtr::from(root_key)); + // It has already been asserted that a valid instance can be pulled + // from contract storage after a push to the same storage region. + // + // Now clear the associated storage from `bv1` and check whether + // loading another instance from this storage will panic since the + // vector's length property cannot read a value: + SpreadLayout::clear_spread(&bv1, &mut KeyPtr::from(root_key)); + let _ = ::pull_spread(&mut KeyPtr::from(root_key)); + Ok(()) + }) + .unwrap() +} diff --git a/core/src/storage2/collections/boxed/impls.rs b/core/src/storage2/collections/boxed/impls.rs new file mode 100644 index 00000000000..c9befed3020 --- /dev/null +++ b/core/src/storage2/collections/boxed/impls.rs @@ -0,0 +1,144 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::Box as StorageBox; +use crate::storage2::traits::{ + clear_spread_root, + SpreadLayout, +}; + +impl Drop for StorageBox +where + T: SpreadLayout, +{ + fn drop(&mut self) { + clear_spread_root::(self, &self.allocation.key()); + crate::storage2::alloc::free(self.allocation); + } +} + +impl core::cmp::PartialEq for StorageBox +where + T: PartialEq + SpreadLayout, +{ + fn eq(&self, other: &Self) -> bool { + PartialEq::eq(StorageBox::get(self), StorageBox::get(other)) + } +} + +impl core::cmp::Eq for StorageBox where T: Eq + SpreadLayout {} + +impl core::cmp::PartialOrd for StorageBox +where + T: PartialOrd + SpreadLayout, +{ + fn partial_cmp(&self, other: &Self) -> Option { + PartialOrd::partial_cmp(StorageBox::get(self), StorageBox::get(other)) + } + fn lt(&self, other: &Self) -> bool { + PartialOrd::lt(StorageBox::get(self), StorageBox::get(other)) + } + fn le(&self, other: &Self) -> bool { + PartialOrd::le(StorageBox::get(self), StorageBox::get(other)) + } + fn ge(&self, other: &Self) -> bool { + PartialOrd::ge(StorageBox::get(self), StorageBox::get(other)) + } + fn gt(&self, other: &Self) -> bool { + PartialOrd::gt(StorageBox::get(self), StorageBox::get(other)) + } +} + +impl core::cmp::Ord for StorageBox +where + T: core::cmp::Ord + SpreadLayout, +{ + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + Ord::cmp(StorageBox::get(self), StorageBox::get(other)) + } +} + +impl core::fmt::Display for StorageBox +where + T: core::fmt::Display + SpreadLayout, +{ + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + core::fmt::Display::fmt(StorageBox::get(self), f) + } +} + +impl core::hash::Hash for StorageBox +where + T: core::hash::Hash + SpreadLayout, +{ + fn hash(&self, state: &mut H) { + StorageBox::get(self).hash(state) + } +} + +impl core::convert::AsRef for StorageBox +where + T: SpreadLayout, +{ + fn as_ref(&self) -> &T { + StorageBox::get(self) + } +} + +impl core::convert::AsMut for StorageBox +where + T: SpreadLayout, +{ + fn as_mut(&mut self) -> &mut T { + StorageBox::get_mut(self) + } +} + +impl ink_prelude::borrow::Borrow for StorageBox +where + T: SpreadLayout, +{ + fn borrow(&self) -> &T { + StorageBox::get(self) + } +} + +impl ink_prelude::borrow::BorrowMut for StorageBox +where + T: SpreadLayout, +{ + fn borrow_mut(&mut self) -> &mut T { + StorageBox::get_mut(self) + } +} + +impl core::ops::Deref for StorageBox +where + T: SpreadLayout, +{ + type Target = T; + + fn deref(&self) -> &Self::Target { + StorageBox::get(self) + } +} + +impl core::ops::DerefMut for StorageBox +where + T: SpreadLayout, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + StorageBox::get_mut(self) + } +} diff --git a/core/src/storage2/collections/boxed/mod.rs b/core/src/storage2/collections/boxed/mod.rs new file mode 100644 index 00000000000..1f749a5660a --- /dev/null +++ b/core/src/storage2/collections/boxed/mod.rs @@ -0,0 +1,120 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A dynamically allocated storage entity. +//! +//! Users can use this in order to make certain `SpreadLayout` storage entities +//! used in contexts that require a `PackedLayout` storage entity by simply +//! packing the storage entity witihn a `storage::Box`. +//! +//! Dynamic allocations caused by the creation of `storage::Box` instances do +//! have some limited overhead: +//! +//! - The dynamic allocation itself has to be provided by some dynamic storage +//! allocator that needs to be invoked. +//! - Each dynamic storage allocation implies roughly 1.12 bits of overhead. +//! - Upon ever first dereferencing of a `storage::Box` instance a cryptographic +//! hash routine is run in order to compute the underlying storage key. +//! +//! Use this abstraction with caution due to the aforementioned performance +//! implications. + +mod impls; +mod storage; + +#[cfg(test)] +mod tests; + +use crate::storage2::{ + alloc::{ + alloc, + DynamicAllocation, + }, + lazy::Lazy, + traits::SpreadLayout, +}; +use ink_primitives::Key; + +/// An indirection to some dynamically allocated storage entity. +#[derive(Debug)] +pub struct Box +where + T: SpreadLayout, +{ + /// The storage area where the boxed storage entity is stored. + allocation: DynamicAllocation, + /// The cache for the boxed storage entity. + value: Lazy, +} + +impl Box +where + T: SpreadLayout, +{ + /// Creates a new boxed entity. + pub fn new(value: T) -> Self { + Self { + allocation: alloc(), + value: Lazy::new(value), + } + } + + /// Creates a new boxed entity that has not yet loaded its value. + fn lazy(allocation: DynamicAllocation) -> Self { + Self { + allocation, + value: Lazy::lazy(allocation.key()), + } + } + + /// Returns the underlying storage key for the dynamic allocated entity. + fn key(&self) -> Key { + self.allocation.key() + } +} + +impl Box +where + T: SpreadLayout, +{ + /// Returns a shared reference to the boxed value. + /// + /// # Note + /// + /// This loads the value from the pointed to contract storage + /// if this did not happen before. + /// + /// # Panics + /// + /// If loading from contract storage failed. + #[must_use] + pub fn get(boxed: &Self) -> &T { + &boxed.value + } + + /// Returns an exclusive reference to the boxed value. + /// + /// # Note + /// + /// This loads the value from the pointed to contract storage + /// if this did not happen before. + /// + /// # Panics + /// + /// If loading from contract storage failed. + #[must_use] + pub fn get_mut(boxed: &mut Self) -> &mut T { + &mut boxed.value + } +} diff --git a/core/src/storage2/collections/boxed/storage.rs b/core/src/storage2/collections/boxed/storage.rs new file mode 100644 index 00000000000..83e97437311 --- /dev/null +++ b/core/src/storage2/collections/boxed/storage.rs @@ -0,0 +1,94 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::Box as StorageBox; +use crate::storage2::{ + alloc::DynamicAllocation, + traits::{ + forward_clear_packed, + forward_pull_packed, + forward_push_packed, + KeyPtr, + PackedLayout, + SpreadLayout, + }, +}; +use ink_prelude::vec::Vec; +use ink_primitives::Key; + +impl SpreadLayout for StorageBox +where + T: SpreadLayout, +{ + const FOOTPRINT: u64 = 1; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + forward_pull_packed::(ptr) + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + forward_push_packed::(&self, ptr) + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + forward_clear_packed::(&self, ptr) + } +} + +impl scale::Encode for StorageBox +where + T: SpreadLayout, +{ + fn size_hint(&self) -> usize { + ::size_hint(&self.allocation) + } + + fn encode_to(&self, dest: &mut O) { + ::encode_to(&self.allocation, dest) + } + + fn encode(&self) -> Vec { + ::encode(&self.allocation) + } + + fn using_encoded R>(&self, f: F) -> R { + ::using_encoded(&self.allocation, f) + } +} + +impl scale::Decode for StorageBox +where + T: SpreadLayout, +{ + fn decode(value: &mut I) -> Result { + Ok(StorageBox::lazy( + ::decode(value)?, + )) + } +} + +impl PackedLayout for StorageBox +where + T: SpreadLayout, +{ + fn pull_packed(&mut self, _at: &Key) {} + + fn push_packed(&self, _at: &Key) { + ::push_spread(Self::get(self), &mut KeyPtr::from(self.key())) + } + + fn clear_packed(&self, _at: &Key) { + ::clear_spread(Self::get(self), &mut KeyPtr::from(self.key())) + } +} diff --git a/core/src/storage2/collections/boxed/tests.rs b/core/src/storage2/collections/boxed/tests.rs new file mode 100644 index 00000000000..04933cf62b4 --- /dev/null +++ b/core/src/storage2/collections/boxed/tests.rs @@ -0,0 +1,218 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::Box as StorageBox; +use crate::{ + env, + env::test::DefaultAccounts, + storage2::{ + alloc::{ + initialize_for, + ContractPhase, + }, + traits::{ + KeyPtr, + SpreadLayout, + }, + Pack, + }, +}; +use core::{ + cmp::Ordering, + convert::{ + AsMut, + AsRef, + }, + ops::{ + Deref, + DerefMut, + }, +}; +use ink_prelude::borrow::{ + Borrow, + BorrowMut, +}; +use ink_primitives::Key; + +fn run_test(f: F) +where + F: FnOnce(DefaultAccounts), +{ + env::test::run_test::(|default_accounts| { + initialize_for(ContractPhase::Deploy); + f(default_accounts); + Ok(()) + }) + .unwrap() +} + +#[test] +fn new_works() { + run_test(|_| { + let mut expected = 1; + let mut boxed = StorageBox::new(expected); + assert_eq!(StorageBox::get(&boxed), &expected); + assert_eq!(StorageBox::get_mut(&mut boxed), &mut expected); + assert_eq!(Deref::deref(&boxed), &expected); + assert_eq!(DerefMut::deref_mut(&mut boxed), &mut expected); + assert_eq!(AsRef::as_ref(&boxed), &expected); + assert_eq!(AsMut::as_mut(&mut boxed), &mut expected); + assert_eq!(Borrow::::borrow(&boxed), &expected); + assert_eq!(BorrowMut::::borrow_mut(&mut boxed), &mut expected); + }) +} + +#[test] +fn partial_eq_works() { + run_test(|_| { + let b1 = StorageBox::new(b'X'); + let b2 = StorageBox::new(b'Y'); + let b3 = StorageBox::new(b'X'); + assert!( as PartialEq>::ne(&b1, &b2)); + assert!( as PartialEq>::eq(&b1, &b3)); + }) +} + +#[test] +fn partial_ord_works() { + run_test(|_| { + let b1 = StorageBox::new(1); + let b2 = StorageBox::new(2); + let b3 = StorageBox::new(1); + assert_eq!( + as PartialOrd>::partial_cmp(&b1, &b2), + Some(Ordering::Less) + ); + assert_eq!( + as PartialOrd>::partial_cmp(&b2, &b1), + Some(Ordering::Greater) + ); + assert_eq!( + as PartialOrd>::partial_cmp(&b1, &b3), + Some(Ordering::Equal) + ); + }) +} + +#[test] +fn spread_layout_push_pull_works() { + run_test(|_| { + let b1 = StorageBox::new(b'A'); + assert_eq!(*b1, b'A'); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&b1, &mut KeyPtr::from(root_key)); + // Now load another instance of storage box from the same key and check + // if both instances are equal: + let b2 = SpreadLayout::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(b1, b2); + // We have to forget one of the storage boxes because we otherwise get + // a double free panic since their `Drop` implementations both try to + // free the same dynamic allocation. + core::mem::forget(b2); + }) +} + +#[test] +#[should_panic(expected = "storage entry was empty")] +fn spread_layout_clear_works() { + run_test(|_| { + let b1 = StorageBox::new(b'A'); + assert_eq!(*b1, b'A'); + let root_key = Key([0x42; 32]); + // Manually clear the storage of `b1`. Then another load from the same + // key region should panic since the entry is empty: + SpreadLayout::push_spread(&b1, &mut KeyPtr::from(root_key)); + SpreadLayout::clear_spread(&b1, &mut KeyPtr::from(root_key)); + let b2: StorageBox = SpreadLayout::pull_spread(&mut KeyPtr::from(root_key)); + // We have to forget one of the storage boxes because we otherwise get + // a double free panic since their `Drop` implementations both try to + // free the same dynamic allocation. + core::mem::forget(b2); + }) +} + +#[test] +fn packed_layout_works() { + run_test(|_| { + let p1 = Pack::new((StorageBox::new(b'A'), StorageBox::new([0x01; 4]))); + assert_eq!(*p1.0, b'A'); + assert_eq!(*p1.1, [0x01; 4]); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&p1, &mut KeyPtr::from(root_key)); + // Now load another instance of storage box from the same key and check + // if both instances are equal: + let p2: Pack<(StorageBox, StorageBox<[i32; 4]>)> = + SpreadLayout::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(p1, p2); + // We have to forget one of the storage boxes because we otherwise get + // a double free panic since their `Drop` implementations both try to + // free the same dynamic allocation. + core::mem::forget(p2); + }) +} + +#[test] +fn recursive_pull_push_works() { + run_test(|_| { + let rec1 = StorageBox::new(StorageBox::new(b'A')); + assert_eq!(**rec1, b'A'); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&rec1, &mut KeyPtr::from(root_key)); + // Now load another instance of storage box from the same key and check + // if both instances are equal: + let rec2: StorageBox> = + SpreadLayout::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(rec1, rec2); + // We have to forget one of the storage boxes because we otherwise get + // a double free panic since their `Drop` implementations both try to + // free the same dynamic allocation. + core::mem::forget(rec2); + }) +} + +#[test] +#[should_panic(expected = "storage entry was empty")] +fn recursive_clear_works() { + run_test(|_| { + let rec1 = StorageBox::new(StorageBox::new(b'A')); + assert_eq!(**rec1, b'A'); + let root_key = Key([0x42; 32]); + // Manually clear the storage of `rec1`. Then another load from the same + // key region should panic since the entry is empty: + SpreadLayout::push_spread(&rec1, &mut KeyPtr::from(root_key)); + SpreadLayout::clear_spread(&rec1, &mut KeyPtr::from(root_key)); + let rec2: StorageBox> = + SpreadLayout::pull_spread(&mut KeyPtr::from(root_key)); + // We have to forget one of the storage boxes because we otherwise get + // a double free panic since their `Drop` implementations both try to + // free the same dynamic allocation. + core::mem::forget(rec2); + }) +} + +#[test] +#[should_panic(expected = "encountered double free of dynamic storage: at index 0")] +fn double_free_panics() { + run_test(|_| { + let b1 = StorageBox::new(b'A'); + let root_key = Key([0x42; 32]); + // Manually clear the storage of `rec1`. Then another load from the same + // key region should panic since the entry is empty: + SpreadLayout::push_spread(&b1, &mut KeyPtr::from(root_key)); + let b2: StorageBox = SpreadLayout::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(b1, b2); + // At this point both `b1` and `b2` are getting dropped trying to free + // the same dynamic allocation which panics. + }) +} diff --git a/core/src/storage2/collections/hashmap/impls.rs b/core/src/storage2/collections/hashmap/impls.rs new file mode 100644 index 00000000000..2a92bfcf34f --- /dev/null +++ b/core/src/storage2/collections/hashmap/impls.rs @@ -0,0 +1,179 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + HashMap as StorageHashMap, + Iter, + IterMut, +}; +use crate::{ + hash::hasher::Hasher, + storage2::traits::PackedLayout, +}; +use core::{ + cmp::{ + Eq, + Ord, + PartialEq, + }, + iter::FromIterator, + ops, +}; +use ink_prelude::borrow::{ + Borrow, + ToOwned, +}; +use ink_primitives::Key; + +impl Drop for StorageHashMap +where + K: Ord + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From<::Output>, +{ + fn drop(&mut self) { + self.clear_cells(); + } +} + +impl Default for StorageHashMap +where + K: Ord + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From<::Output>, +{ + fn default() -> Self { + Self::new() + } +} + +impl<'a, K, V, H, Q> ops::Index<&'a Q> for StorageHashMap +where + Q: Ord + scale::Encode + ToOwned, + K: Borrow + Ord + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From<::Output>, +{ + type Output = V; + + fn index(&self, index: &Q) -> &Self::Output { + self.get(index).expect("index out of bounds") + } +} + +impl<'a, K, V, H, Q> ops::IndexMut<&'a Q> for StorageHashMap +where + Q: Ord + scale::Encode + ToOwned, + K: Borrow + Ord + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From<::Output>, +{ + fn index_mut(&mut self, index: &Q) -> &mut Self::Output { + self.get_mut(index).expect("index out of bounds") + } +} + +impl<'a, K: 'a, V: 'a, H> IntoIterator for &'a StorageHashMap +where + K: Ord + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From<::Output>, +{ + type Item = (&'a K, &'a V); + type IntoIter = Iter<'a, K, V, H>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, K: 'a, V: 'a, H> IntoIterator for &'a mut StorageHashMap +where + K: Ord + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From<::Output>, +{ + type Item = (&'a K, &'a mut V); + type IntoIter = IterMut<'a, K, V, H>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +impl Extend<(K, V)> for StorageHashMap +where + K: Ord + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From<::Output>, +{ + fn extend(&mut self, iter: I) + where + I: IntoIterator, + { + for (key, value) in iter { + self.insert(key, value); + } + } +} + +impl FromIterator<(K, V)> for StorageHashMap +where + K: Ord + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From<::Output>, +{ + fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + let mut vec = StorageHashMap::new(); + vec.extend(iter); + vec + } +} + +impl PartialEq for StorageHashMap +where + K: Ord + Clone + PackedLayout, + V: PartialEq + PackedLayout, + H: Hasher, + Key: From<::Output>, +{ + fn eq(&self, other: &Self) -> bool { + if self.len() != other.len() { + return false + } + self.iter() + .map(|(key, value)| (value, other.get(key))) + .all(|(lhs, maybe_rhs)| maybe_rhs.map(|rhs| rhs == lhs).unwrap_or(false)) + } +} + +impl Eq for StorageHashMap +where + K: Ord + Clone + PackedLayout, + V: Eq + PackedLayout, + H: Hasher, + Key: From<::Output>, +{ +} diff --git a/core/src/storage2/collections/hashmap/iter.rs b/core/src/storage2/collections/hashmap/iter.rs new file mode 100644 index 00000000000..0b98eb3669e --- /dev/null +++ b/core/src/storage2/collections/hashmap/iter.rs @@ -0,0 +1,419 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::ValueEntry; +use crate::{ + hash::hasher::Hasher, + storage2::{ + collections::{ + extend_lifetime, + stash::Iter as StashIter, + HashMap as StorageHashMap, + }, + lazy::LazyHashMap, + traits::PackedLayout, + }, +}; +use ink_primitives::Key; + +/// An iterator over shared references to the elements of a storage hash map. +#[derive(Debug, Copy, Clone)] +pub struct Iter<'a, K, V, H> +where + K: PackedLayout, +{ + /// The iterator over the map's keys. + keys_iter: StashIter<'a, K>, + /// The lazy hash map to query the values. + values: &'a LazyHashMap, H>, +} + +impl<'a, K, V, H> Iter<'a, K, V, H> +where + K: Ord + Clone + PackedLayout, +{ + /// Creates a new iterator for the given storage hash map. + pub(crate) fn new(hash_map: &'a StorageHashMap) -> Self + where + H: Hasher, + V: PackedLayout, + Key: From<::Output>, + { + Self { + keys_iter: hash_map.keys.iter(), + values: &hash_map.values, + } + } +} + +impl<'a, K, V, H> Iter<'a, K, V, H> +where + K: Ord + Eq + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From, +{ + /// Queries the value for the given key and returns the key/value pair. + /// + /// # Panics + /// + /// If the key refers to an invalid element. + fn query_value(&self, key: &'a K) -> ::Item { + let entry = self + .values + .get(key) + .expect("a key must always refer to an existing entry"); + (key, &entry.value) + } +} + +impl<'a, K, V, H> Iterator for Iter<'a, K, V, H> +where + K: Ord + Eq + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From, +{ + type Item = (&'a K, &'a V); + + fn count(self) -> usize { + self.keys_iter.count() + } + + fn next(&mut self) -> Option { + let key = self.keys_iter.next()?; + Some(self.query_value(key)) + } + + fn size_hint(&self) -> (usize, Option) { + self.keys_iter.size_hint() + } +} + +impl<'a, K, V, H> ExactSizeIterator for Iter<'a, K, V, H> +where + K: Ord + Eq + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From, +{ +} + +impl<'a, K, V, H> DoubleEndedIterator for Iter<'a, K, V, H> +where + K: Ord + Eq + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From, +{ + fn next_back(&mut self) -> Option { + let key = self.keys_iter.next_back()?; + Some(self.query_value(key)) + } +} + +/// An iterator over shared references to the elements of a storage hash map. +#[derive(Debug)] +pub struct IterMut<'a, K, V, H> +where + K: PackedLayout, +{ + /// The iterator over the map's keys. + keys_iter: StashIter<'a, K>, + /// The lazy hash map to query the values. + values: &'a mut LazyHashMap, H>, +} + +impl<'a, K, V, H> IterMut<'a, K, V, H> +where + K: Ord + Clone + PackedLayout, +{ + /// Creates a new iterator for the given storage hash map. + pub(crate) fn new(hash_map: &'a mut StorageHashMap) -> Self + where + H: Hasher, + V: PackedLayout, + Key: From<::Output>, + { + Self { + keys_iter: hash_map.keys.iter(), + values: &mut hash_map.values, + } + } +} + +impl<'a, K, V, H> IterMut<'a, K, V, H> +where + K: Ord + Eq + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From, +{ + /// Queries the value for the given key and returns the key/value pair. + /// + /// # Panics + /// + /// If the key refers to an invalid element. + fn query_value<'b>(&'b mut self, key: &'a K) -> ::Item { + let entry = self + .values + .get_mut(key) + .expect("a key must always refer to an existing entry"); + (key, unsafe { + extend_lifetime::<'b, 'a, V>(&mut entry.value) + }) + } +} + +impl<'a, K, V, H> Iterator for IterMut<'a, K, V, H> +where + K: Ord + Eq + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From, +{ + type Item = (&'a K, &'a mut V); + + fn count(self) -> usize { + self.keys_iter.count() + } + + fn next(&mut self) -> Option { + let key = self.keys_iter.next()?; + Some(self.query_value(key)) + } + + fn size_hint(&self) -> (usize, Option) { + self.keys_iter.size_hint() + } +} + +impl<'a, K, V, H> ExactSizeIterator for IterMut<'a, K, V, H> +where + K: Ord + Eq + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From, +{ +} + +impl<'a, K, V, H> DoubleEndedIterator for IterMut<'a, K, V, H> +where + K: Ord + Eq + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From, +{ + fn next_back(&mut self) -> Option { + let key = self.keys_iter.next_back()?; + Some(self.query_value(key)) + } +} + +/// An iterator over shared references to the values of a storage hash map. +#[derive(Debug, Copy, Clone)] +pub struct Values<'a, K, V, H> +where + K: PackedLayout, +{ + /// The key/values pair iterator. + iter: Iter<'a, K, V, H>, +} + +impl<'a, K, V, H> Values<'a, K, V, H> +where + K: Ord + Clone + PackedLayout, +{ + /// Creates a new iterator for the given storage hash map. + pub(crate) fn new(hash_map: &'a StorageHashMap) -> Self + where + H: Hasher, + V: PackedLayout, + Key: From<::Output>, + { + Self { + iter: hash_map.iter(), + } + } +} + +impl<'a, K, V, H> Iterator for Values<'a, K, V, H> +where + K: Ord + Eq + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From, +{ + type Item = &'a V; + + fn count(self) -> usize { + self.iter.count() + } + + fn next(&mut self) -> Option { + self.iter.next().map(|(_key, value)| value) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl<'a, K, V, H> ExactSizeIterator for Values<'a, K, V, H> +where + K: Ord + Eq + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From, +{ +} + +impl<'a, K, V, H> DoubleEndedIterator for Values<'a, K, V, H> +where + K: Ord + Eq + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From, +{ + fn next_back(&mut self) -> Option { + self.iter.next_back().map(|(_key, value)| value) + } +} + +/// An iterator over exclusive references to the values of a storage hash map. +#[derive(Debug)] +pub struct ValuesMut<'a, K, V, H> +where + K: PackedLayout, +{ + /// The key/values pair iterator. + iter: IterMut<'a, K, V, H>, +} + +impl<'a, K, V, H> ValuesMut<'a, K, V, H> +where + K: Ord + Clone + PackedLayout, +{ + /// Creates a new iterator for the given storage hash map. + pub(crate) fn new(hash_map: &'a mut StorageHashMap) -> Self + where + H: Hasher, + V: PackedLayout, + Key: From<::Output>, + { + Self { + iter: hash_map.iter_mut(), + } + } +} + +impl<'a, K, V, H> Iterator for ValuesMut<'a, K, V, H> +where + K: Ord + Eq + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From, +{ + type Item = &'a mut V; + + fn count(self) -> usize { + self.iter.count() + } + + fn next(&mut self) -> Option { + self.iter.next().map(|(_key, value)| value) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl<'a, K, V, H> ExactSizeIterator for ValuesMut<'a, K, V, H> +where + K: Ord + Eq + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From, +{ +} + +impl<'a, K, V, H> DoubleEndedIterator for ValuesMut<'a, K, V, H> +where + K: Ord + Eq + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From, +{ + fn next_back(&mut self) -> Option { + self.iter.next_back().map(|(_key, value)| value) + } +} + +/// An iterator over references to the keys of a storage hash map. +#[derive(Debug, Copy, Clone)] +pub struct Keys<'a, K> +where + K: PackedLayout, +{ + /// The key iterator. + iter: StashIter<'a, K>, +} + +impl<'a, K> Keys<'a, K> +where + K: Ord + Clone + PackedLayout, +{ + /// Creates a new iterator for the given storage hash map. + pub(crate) fn new(hash_map: &'a StorageHashMap) -> Self + where + H: Hasher, + V: PackedLayout, + Key: From<::Output>, + { + Self { + iter: hash_map.keys.iter(), + } + } +} + +impl<'a, K> Iterator for Keys<'a, K> +where + K: PackedLayout, +{ + type Item = &'a K; + + fn count(self) -> usize { + self.iter.count() + } + + fn next(&mut self) -> Option { + self.iter.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl<'a, K> ExactSizeIterator for Keys<'a, K> where K: PackedLayout {} + +impl<'a, K> DoubleEndedIterator for Keys<'a, K> +where + K: PackedLayout, +{ + fn next_back(&mut self) -> Option { + self.iter.next_back() + } +} diff --git a/core/src/storage2/collections/hashmap/mod.rs b/core/src/storage2/collections/hashmap/mod.rs new file mode 100644 index 00000000000..292da2a2673 --- /dev/null +++ b/core/src/storage2/collections/hashmap/mod.rs @@ -0,0 +1,336 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A storage hash map that allows to associate keys with values. + +mod impls; +mod iter; +mod storage; + +#[cfg(test)] +mod tests; + +pub use self::iter::{ + Iter, + IterMut, + Keys, + Values, + ValuesMut, +}; +use crate::{ + hash::hasher::{ + Blake2x256Hasher, + Hasher, + }, + storage2::{ + collections::Stash, + lazy::LazyHashMap, + traits::PackedLayout, + }, +}; +use core::{ + borrow::Borrow, + cmp::Eq, +}; +use ink_prelude::borrow::ToOwned; +use ink_primitives::Key; + +/// The index type within a hashmap. +/// +/// # Note +/// +/// Used for key indices internal to the hashmap. +type KeyIndex = u32; + +/// A hash map operating on the contract storage. +/// +/// Stores a mapping between keys and values. +/// +/// # Note +/// +/// Unlike Rust's standard `HashMap` that uses the [`core::hash::Hash`] trait +/// in order to hash its keys the storage hash map uses the [`scale::Encode`] +/// encoding in order to hash its keys using a built-in cryptographic +/// hash function provided by the chain runtime. +/// +/// The main difference between the lower-level `LazyHashMap` and the +/// `storage::HashMap` is that the latter is aware of its associated keys and +/// values and operates on those instances directly as opposed to `Option` +/// instances of them. Also it provides a more high-level and user focused +/// API. +/// +/// Users should generally prefer using this storage hash map over the low-level +/// `LazyHashMap` for direct usage in their smart contracts. +#[derive(Debug)] +pub struct HashMap +where + K: Ord + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From<::Output>, +{ + /// The keys of the storage hash map. + keys: Stash, + /// The values of the storage hash map. + values: LazyHashMap, H>, +} + +/// An entry within the storage hash map. +/// +/// Stores the value as well as the index to its associated key. +#[derive(Debug, scale::Encode, scale::Decode)] +struct ValueEntry { + /// The value stored in this entry. + value: V, + /// The index of the key associated with this value. + key_index: KeyIndex, +} + +impl HashMap +where + K: Ord + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From<::Output>, +{ + /// Creates a new empty storage hash map. + pub fn new() -> Self { + Self { + keys: Stash::new(), + values: LazyHashMap::new(), + } + } + + /// Returns the number of key- value pairs stored in the hash map. + pub fn len(&self) -> u32 { + self.keys.len() + } + + /// Returns `true` if the hash map is empty. + pub fn is_empty(&self) -> bool { + self.keys.is_empty() + } + + /// Returns an iterator yielding shared references to all key/value pairs + /// of the hash map. + /// + /// # Note + /// + /// - Avoid unbounded iteration over big storage hash maps. + /// - Prefer using methods like `Iterator::take` in order to limit the number + /// of yielded elements. + pub fn iter(&self) -> Iter { + Iter::new(self) + } + + /// Returns an iterator yielding exclusive references to all key/value pairs + /// of the hash map. + /// + /// # Note + /// + /// - Avoid unbounded iteration over big storage hash maps. + /// - Prefer using methods like `Iterator::take` in order to limit the number + /// of yielded elements. + pub fn iter_mut(&mut self) -> IterMut { + IterMut::new(self) + } + + /// Returns an iterator yielding shared references to all values of the hash map. + /// + /// # Note + /// + /// - Avoid unbounded iteration over big storage hash maps. + /// - Prefer using methods like `Iterator::take` in order to limit the number + /// of yielded elements. + pub fn values(&self) -> Values { + Values::new(self) + } + + /// Returns an iterator yielding shared references to all values of the hash map. + /// + /// # Note + /// + /// - Avoid unbounded iteration over big storage hash maps. + /// - Prefer using methods like `Iterator::take` in order to limit the number + /// of yielded elements. + pub fn values_mut(&mut self) -> ValuesMut { + ValuesMut::new(self) + } + + /// Returns an iterator yielding shared references to all keys of the hash map. + /// + /// # Note + /// + /// - Avoid unbounded iteration over big storage hash maps. + /// - Prefer using methods like `Iterator::take` in order to limit the number + /// of yielded elements. + pub fn keys(&self) -> Keys { + Keys::new(self) + } +} + +impl HashMap +where + K: Ord + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From<::Output>, +{ + fn clear_cells(&self) { + if self.values.key().is_none() { + // We won't clear any storage if we are in lazy state since there + // probably has not been any state written to storage, yet. + return + } + for key in self.keys() { + // It might seem wasteful to clear all entries instead of just + // the occupied ones. However this spares us from having one extra + // read for every element in the storage stash to filter out vacant + // entries. So this is actually a trade-off and at the time of this + // implementation it is unclear which path is more efficient. + // + // The bet is that clearing a storage cell is cheaper than reading one. + self.values.clear_packed_at(key); + } + } +} + +impl HashMap +where + K: Ord + Eq + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From, +{ + /// Inserts a key-value pair into the map. + /// + /// Returns the previous value associated with the same key if any. + /// If the map did not have this key present, `None` is returned. + /// + /// # Note + /// + /// - If the map did have this key present, the value is updated, + /// and the old value is returned. The key is not updated, though; + /// this matters for types that can be `==` without being identical. + pub fn insert(&mut self, key: K, new_value: V) -> Option { + if let Some(occupied) = self.values.get_mut(&key) { + // Update value, don't update key. + let old_value = core::mem::replace(&mut occupied.value, new_value); + return Some(old_value) + } + // At this point we know that `key` does not yet exist in the map. + let key_index = self.keys.put(key.to_owned()); + self.values.put( + key, + Some(ValueEntry { + value: new_value, + key_index, + }), + ); + None + } + + /// Removes the key/value pair from the map associated with the given key. + /// + /// - Returns the removed value if any. + /// + /// # Note + /// + /// The key may be any borrowed form of the map's key type, + /// but `Hash` and `Eq` on the borrowed form must match those for the key type. + pub fn take(&mut self, key: &Q) -> Option + where + K: Borrow, + Q: Ord + scale::Encode + ToOwned, + { + let entry = self.values.put_get(key, None)?; + self.keys + .take(entry.key_index) + .expect("`key_index` must point to a valid key entry"); + Some(entry.value) + } + + /// Returns a shared reference to the value corresponding to the key. + /// + /// The key may be any borrowed form of the map's key type, + /// but `Hash` and `Eq` on the borrowed form must match those for the key type. + pub fn get(&self, key: &Q) -> Option<&V> + where + K: Borrow, + Q: Ord + scale::Encode + ToOwned, + { + self.values.get(key).map(|entry| &entry.value) + } + + /// Returns a mutable reference to the value corresponding to the key. + /// + /// The key may be any borrowed form of the map's key type, + /// but `Hash` and `Eq` on the borrowed form must match those for the key type. + pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> + where + K: Borrow, + Q: Ord + scale::Encode + ToOwned, + { + self.values.get_mut(key).map(|entry| &mut entry.value) + } + + /// Returns `true` if there is an entry corresponding to the key in the map. + pub fn contains_key(&self, key: &Q) -> bool + where + K: Borrow, + Q: Ord + PartialEq + Eq + scale::Encode + ToOwned, + { + // We do not check if the given key is equal to the queried key which is + // what normally a hash map implementation does because we do not resolve + // or prevent collisions in this hash map implementation at any level. + // Having a collision is virtually impossible since we + // are using a keyspace of 2^256 bit. + self.values.get(key).is_some() + } + + /// Defragments storage used by the storage hash map. + /// + /// Returns the number of storage cells freed this way. + /// + /// A `max_iterations` parameter of `None` means that there is no limit + /// to the number of iterations performed. This is generally not advised. + /// + /// # Note + /// + /// This frees storage that is held but not necessary for the hash map to hold. + /// This operation might be expensive, especially for big `max_iteration` + /// parameters. The `max_iterations` parameter can be used to limit the + /// expensiveness for this operation and instead free up storage incrementally. + pub fn defrag(&mut self, max_iterations: Option) -> u32 { + // This method just defrags the underlying `storage::Stash` used to + // store the keys as it can sometimes take a lot of unused storage + // if many keys have been removed at some point. Some hash map + // implementations might even prefer to perform this operation with a + // limit set to 1 after every successful removal. + if let Some(0) = max_iterations { + // Bail out early if the iteration limit is set to 0 anyways to + // completely avoid doing work in this case.y + return 0 + } + let len_vacant = self.keys.capacity() - self.keys.len(); + let max_iterations = max_iterations.unwrap_or(len_vacant); + let values = &mut self.values; + let callback = |old_index, new_index, key: &K| { + let value_entry = values.get_mut(key).expect("key must be valid"); + debug_assert_eq!(value_entry.key_index, old_index); + value_entry.key_index = new_index; + }; + self.keys.defrag(Some(max_iterations), callback) + } +} diff --git a/core/src/storage2/collections/hashmap/storage.rs b/core/src/storage2/collections/hashmap/storage.rs new file mode 100644 index 00000000000..951343d4070 --- /dev/null +++ b/core/src/storage2/collections/hashmap/storage.rs @@ -0,0 +1,100 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of ink! storage traits. + +use super::{ + HashMap as StorageHashMap, + ValueEntry, +}; +use crate::{ + hash::hasher::Hasher, + storage2::{ + collections::Stash as StorageStash, + traits::{ + forward_clear_packed, + forward_pull_packed, + forward_push_packed, + KeyPtr, + PackedLayout, + SpreadLayout, + }, + }, +}; +use ink_primitives::Key; + +impl SpreadLayout for ValueEntry +where + T: PackedLayout, +{ + const FOOTPRINT: u64 = 1; + const REQUIRES_DEEP_CLEAN_UP: bool = ::REQUIRES_DEEP_CLEAN_UP; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + forward_pull_packed::(ptr) + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + forward_push_packed::(self, ptr) + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + forward_clear_packed::(self, ptr) + } +} + +impl PackedLayout for ValueEntry +where + T: PackedLayout, +{ + fn pull_packed(&mut self, at: &Key) { + ::pull_packed(&mut self.value, at) + } + + fn push_packed(&self, at: &Key) { + ::push_packed(&self.value, at) + } + + fn clear_packed(&self, at: &Key) { + ::clear_packed(&self.value, at) + } +} + +impl SpreadLayout for StorageHashMap +where + K: Ord + Clone + PackedLayout, + V: PackedLayout, + H: Hasher, + Key: From<::Output>, +{ + const FOOTPRINT: u64 = 1 + as SpreadLayout>::FOOTPRINT; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + Self { + keys: SpreadLayout::pull_spread(ptr), + values: SpreadLayout::pull_spread(ptr), + } + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + SpreadLayout::push_spread(&self.keys, ptr); + SpreadLayout::push_spread(&self.values, ptr); + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + self.clear_cells(); + SpreadLayout::clear_spread(&self.keys, ptr); + SpreadLayout::clear_spread(&self.values, ptr); + } +} diff --git a/core/src/storage2/collections/hashmap/tests.rs b/core/src/storage2/collections/hashmap/tests.rs new file mode 100644 index 00000000000..f9d90346328 --- /dev/null +++ b/core/src/storage2/collections/hashmap/tests.rs @@ -0,0 +1,328 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::HashMap as StorageHashMap; +use crate::{ + env, + storage2::traits::{ + KeyPtr, + SpreadLayout, + }, +}; +use ink_primitives::Key; + +#[test] +fn new_works() { + // `StorageHashMap::new` + let hmap = >::new(); + assert!(hmap.is_empty()); + assert_eq!(hmap.len(), 0); + assert!(hmap.iter().next().is_none()); + // `StorageHashMap::default` + let default = as Default>::default(); + assert!(default.is_empty()); + assert_eq!(default.len(), 0); + assert!(default.iter().next().is_none()); + // `StorageHashMap::new` and `StorageHashMap::default` should be equal. + assert_eq!(hmap, default); +} + +#[test] +fn from_iterator_works() { + let test_values = [(b'A', 1), (b'B', 2), (b'C', 3), (b'D', 4)]; + let hmap = test_values + .iter() + .copied() + .collect::>(); + assert!(!hmap.is_empty()); + assert_eq!(hmap.len(), 4); + assert_eq!(hmap, { + let mut hmap = >::new(); + for (key, value) in &test_values { + assert_eq!(hmap.insert(*key, *value), None); + } + hmap + }); +} + +#[test] +fn from_empty_iterator_works() { + assert_eq!( + [].iter().copied().collect::>(), + >::new(), + ); +} + +#[test] +fn contains_key_works() { + // Empty hash map. + let hmap = >::new(); + assert!(!hmap.contains_key(&b'A')); + assert!(!hmap.contains_key(&b'E')); + // Filled hash map. + let hmap = [(b'A', 1), (b'B', 2), (b'C', 3), (b'D', 4)] + .iter() + .copied() + .collect::>(); + assert!(hmap.contains_key(&b'A')); + assert!(hmap.contains_key(&b'B')); + assert!(hmap.contains_key(&b'C')); + assert!(hmap.contains_key(&b'D')); + assert!(!hmap.contains_key(&b'E')); +} + +#[test] +fn get_works() { + // Empty hash map. + let hmap = >::new(); + assert_eq!(hmap.get(&b'A'), None); + assert_eq!(hmap.get(&b'E'), None); + // Filled hash map: `get` + let hmap = [(b'A', 1), (b'B', 2), (b'C', 3), (b'D', 4)] + .iter() + .copied() + .collect::>(); + assert_eq!(hmap.get(&b'A'), Some(&1)); + assert_eq!(hmap.get(&b'B'), Some(&2)); + assert_eq!(hmap.get(&b'C'), Some(&3)); + assert_eq!(hmap.get(&b'D'), Some(&4)); + assert_eq!(hmap.get(&b'E'), None); + // Filled hash map: `get_mut` + let mut hmap = hmap; + assert_eq!(hmap.get_mut(&b'A'), Some(&mut 1)); + assert_eq!(hmap.get_mut(&b'B'), Some(&mut 2)); + assert_eq!(hmap.get_mut(&b'C'), Some(&mut 3)); + assert_eq!(hmap.get_mut(&b'D'), Some(&mut 4)); + assert_eq!(hmap.get_mut(&b'E'), None); +} + +#[test] +fn insert_works() { + let mut hmap = >::new(); + // Start with an empty hash map. + assert_eq!(hmap.len(), 0); + assert_eq!(hmap.get(&b'A'), None); + // Insert first value. + hmap.insert(b'A', 1); + assert_eq!(hmap.len(), 1); + assert_eq!(hmap.get(&b'A'), Some(&1)); + assert_eq!(hmap.get_mut(&b'A'), Some(&mut 1)); + // Update the inserted value. + hmap.insert(b'A', 2); + assert_eq!(hmap.len(), 1); + assert_eq!(hmap.get(&b'A'), Some(&2)); + assert_eq!(hmap.get_mut(&b'A'), Some(&mut 2)); + // Insert another value. + hmap.insert(b'B', 3); + assert_eq!(hmap.len(), 2); + assert_eq!(hmap.get(&b'B'), Some(&3)); + assert_eq!(hmap.get_mut(&b'B'), Some(&mut 3)); +} + +#[test] +fn take_works() { + // Empty hash map. + let mut hmap = >::new(); + assert_eq!(hmap.take(&b'A'), None); + assert_eq!(hmap.take(&b'E'), None); + // Filled hash map: `get` + let mut hmap = [(b'A', 1), (b'B', 2), (b'C', 3), (b'D', 4)] + .iter() + .copied() + .collect::>(); + assert_eq!(hmap.len(), 4); + assert_eq!(hmap.take(&b'A'), Some(1)); + assert_eq!(hmap.len(), 3); + assert_eq!(hmap.take(&b'A'), None); + assert_eq!(hmap.len(), 3); + assert_eq!(hmap.take(&b'B'), Some(2)); + assert_eq!(hmap.len(), 2); + assert_eq!(hmap.take(&b'C'), Some(3)); + assert_eq!(hmap.len(), 1); + assert_eq!(hmap.take(&b'D'), Some(4)); + assert_eq!(hmap.len(), 0); + assert_eq!(hmap.take(&b'E'), None); + assert_eq!(hmap.len(), 0); +} + +#[test] +fn iter_next_works() { + let hmap = [(b'A', 1), (b'B', 2), (b'C', 3), (b'D', 4)] + .iter() + .copied() + .collect::>(); + // Test iterator over shared references: + let mut iter = hmap.iter(); + assert_eq!(iter.count(), 4); + assert_eq!(iter.size_hint(), (4, Some(4))); + assert_eq!(iter.next(), Some((&b'A', &1))); + assert_eq!(iter.size_hint(), (3, Some(3))); + assert_eq!(iter.next(), Some((&b'B', &2))); + assert_eq!(iter.size_hint(), (2, Some(2))); + assert_eq!(iter.count(), 2); + assert_eq!(iter.next(), Some((&b'C', &3))); + assert_eq!(iter.size_hint(), (1, Some(1))); + assert_eq!(iter.next(), Some((&b'D', &4))); + assert_eq!(iter.size_hint(), (0, Some(0))); + assert_eq!(iter.count(), 0); + assert_eq!(iter.next(), None); + // Test iterator over exclusive references: + let mut hmap = hmap; + let mut iter = hmap.iter_mut(); + assert_eq!(iter.size_hint(), (4, Some(4))); + assert_eq!(iter.next(), Some((&b'A', &mut 1))); + assert_eq!(iter.size_hint(), (3, Some(3))); + assert_eq!(iter.next(), Some((&b'B', &mut 2))); + assert_eq!(iter.size_hint(), (2, Some(2))); + assert_eq!(iter.next(), Some((&b'C', &mut 3))); + assert_eq!(iter.size_hint(), (1, Some(1))); + assert_eq!(iter.next(), Some((&b'D', &mut 4))); + assert_eq!(iter.size_hint(), (0, Some(0))); + assert_eq!(iter.next(), None); + assert_eq!(iter.count(), 0); +} + +#[test] +fn values_next_works() { + let hmap = [(b'A', 1), (b'B', 2), (b'C', 3), (b'D', 4)] + .iter() + .copied() + .collect::>(); + // Test iterator over shared references: + let mut iter = hmap.values(); + assert_eq!(iter.count(), 4); + assert_eq!(iter.size_hint(), (4, Some(4))); + assert_eq!(iter.next(), Some(&1)); + assert_eq!(iter.size_hint(), (3, Some(3))); + assert_eq!(iter.next(), Some(&2)); + assert_eq!(iter.size_hint(), (2, Some(2))); + assert_eq!(iter.count(), 2); + assert_eq!(iter.next(), Some(&3)); + assert_eq!(iter.size_hint(), (1, Some(1))); + assert_eq!(iter.next(), Some(&4)); + assert_eq!(iter.size_hint(), (0, Some(0))); + assert_eq!(iter.count(), 0); + assert_eq!(iter.next(), None); + // Test iterator over exclusive references: + let mut hmap = hmap; + let mut iter = hmap.values_mut(); + assert_eq!(iter.size_hint(), (4, Some(4))); + assert_eq!(iter.next(), Some(&mut 1)); + assert_eq!(iter.size_hint(), (3, Some(3))); + assert_eq!(iter.next(), Some(&mut 2)); + assert_eq!(iter.size_hint(), (2, Some(2))); + assert_eq!(iter.next(), Some(&mut 3)); + assert_eq!(iter.size_hint(), (1, Some(1))); + assert_eq!(iter.next(), Some(&mut 4)); + assert_eq!(iter.size_hint(), (0, Some(0))); + assert_eq!(iter.next(), None); + assert_eq!(iter.count(), 0); +} + +#[test] +fn keys_next_works() { + let hmap = [(b'A', 1), (b'B', 2), (b'C', 3), (b'D', 4)] + .iter() + .copied() + .collect::>(); + let mut iter = hmap.keys(); + assert_eq!(iter.count(), 4); + assert_eq!(iter.size_hint(), (4, Some(4))); + assert_eq!(iter.next(), Some(&b'A')); + assert_eq!(iter.size_hint(), (3, Some(3))); + assert_eq!(iter.next(), Some(&b'B')); + assert_eq!(iter.size_hint(), (2, Some(2))); + assert_eq!(iter.count(), 2); + assert_eq!(iter.next(), Some(&b'C')); + assert_eq!(iter.size_hint(), (1, Some(1))); + assert_eq!(iter.next(), Some(&b'D')); + assert_eq!(iter.size_hint(), (0, Some(0))); + assert_eq!(iter.count(), 0); + assert_eq!(iter.next(), None); +} + +#[test] +fn defrag_works() { + let expected = [(b'A', 1), (b'D', 4)] + .iter() + .copied() + .collect::>(); + // Defrag without limits: + let mut hmap = [(b'A', 1), (b'B', 2), (b'C', 3), (b'D', 4)] + .iter() + .copied() + .collect::>(); + assert_eq!(hmap.defrag(None), 0); + assert_eq!(hmap.take(&b'B'), Some(2)); + assert_eq!(hmap.take(&b'C'), Some(3)); + assert_eq!(hmap.defrag(None), 2); + assert_eq!(hmap.defrag(None), 0); + assert_eq!(hmap, expected); + // Defrag with limits: + let mut hmap = [(b'A', 1), (b'B', 2), (b'C', 3), (b'D', 4)] + .iter() + .copied() + .collect::>(); + assert_eq!(hmap.defrag(None), 0); + assert_eq!(hmap.take(&b'B'), Some(2)); + assert_eq!(hmap.take(&b'C'), Some(3)); + assert_eq!(hmap.defrag(Some(1)), 1); + assert_eq!(hmap.defrag(Some(1)), 1); + assert_eq!(hmap.defrag(Some(1)), 0); + assert_eq!(hmap, expected); +} + +#[test] +fn spread_layout_push_pull_works() -> env::Result<()> { + env::test::run_test::(|_| { + let hmap1 = [(b'A', 1), (b'B', 2), (b'C', 3), (b'D', 4)] + .iter() + .copied() + .collect::>(); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&hmap1, &mut KeyPtr::from(root_key)); + // Load the pushed storage vector into another instance and check that + // both instances are equal: + let hmap2 = as SpreadLayout>::pull_spread( + &mut KeyPtr::from(root_key), + ); + assert_eq!(hmap1, hmap2); + Ok(()) + }) +} + +#[test] +#[should_panic(expected = "storage entry was empty")] +fn spread_layout_clear_works() { + env::test::run_test::(|_| { + let hmap1 = [(b'A', 1), (b'B', 2), (b'C', 3), (b'D', 4)] + .iter() + .copied() + .collect::>(); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&hmap1, &mut KeyPtr::from(root_key)); + // It has already been asserted that a valid instance can be pulled + // from contract storage after a push to the same storage region. + // + // Now clear the associated storage from `hmap1` and check whether + // loading another instance from this storage will panic since the + // vector's length property cannot read a value: + SpreadLayout::clear_spread(&hmap1, &mut KeyPtr::from(root_key)); + let _ = as SpreadLayout>::pull_spread( + &mut KeyPtr::from(root_key), + ); + Ok(()) + }) + .unwrap() +} diff --git a/core/src/storage2/collections/mod.rs b/core/src/storage2/collections/mod.rs new file mode 100644 index 00000000000..86fc3437f70 --- /dev/null +++ b/core/src/storage2/collections/mod.rs @@ -0,0 +1,55 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! High-level collections used to manage storage entities in the persisted +//! contract storage. +//! +//! Users should generally use these collections in their contracts directly +//! or as building blocks for their collections and algorithms. + +pub mod bitstash; +pub mod bitvec; +pub mod boxed; +pub mod hashmap; +pub mod smallvec; +pub mod stash; +pub mod vec; + +#[doc(inline)] +pub use self::{ + bitstash::BitStash, + bitvec::Bitvec, + boxed::Box, + hashmap::HashMap, + smallvec::SmallVec, + stash::Stash, + vec::Vec, +}; + +/// Extends the lifetime 'a to the outliving lifetime 'b for the given reference. +/// +/// # Note +/// +/// This interface is a bit more constraint than a simple +/// [transmut](`core::mem::transmute`) and therefore preferred +/// for extending lifetimes only. +/// +/// # Safety +/// +/// This function is `unsafe` because lifetimes can be extended beyond the +/// lifetimes of the objects they are referencing and thus potentially create +/// dangling references if not used carefully. +pub(crate) unsafe fn extend_lifetime<'a, 'b: 'a, T>(reference: &'a mut T) -> &'b mut T { + core::mem::transmute::<&'a mut T, &'b mut T>(reference) +} diff --git a/core/src/storage2/collections/smallvec/impls.rs b/core/src/storage2/collections/smallvec/impls.rs new file mode 100644 index 00000000000..c16e57e74b2 --- /dev/null +++ b/core/src/storage2/collections/smallvec/impls.rs @@ -0,0 +1,139 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + Iter, + SmallVec, +}; +use crate::storage2::{ + lazy::LazyArrayLength, + traits::PackedLayout, +}; +use core::iter::{ + Extend, + FromIterator, +}; + +impl Drop for SmallVec +where + T: PackedLayout, + N: LazyArrayLength, +{ + fn drop(&mut self) { + self.clear_cells() + } +} + +impl core::ops::Index for SmallVec +where + T: PackedLayout, + N: LazyArrayLength, +{ + type Output = T; + + fn index(&self, index: u32) -> &Self::Output { + match self.get(index) { + Some(value) => value, + None => { + panic!( + "index out of bounds: the len is {} but the index is {}", + self.len(), + index + ) + } + } + } +} + +impl core::ops::IndexMut for SmallVec +where + T: PackedLayout, + N: LazyArrayLength, +{ + fn index_mut(&mut self, index: u32) -> &mut Self::Output { + let len = self.len(); + match self.get_mut(index) { + Some(value) => value, + None => { + panic!( + "index out of bounds: the len is {} but the index is {}", + len, index + ) + } + } + } +} + +impl<'a, T: 'a, N> IntoIterator for &'a SmallVec +where + T: PackedLayout, + N: LazyArrayLength, +{ + type Item = &'a T; + type IntoIter = Iter<'a, T, N>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl Extend for SmallVec +where + T: PackedLayout, + N: LazyArrayLength, +{ + fn extend(&mut self, iter: I) + where + I: IntoIterator, + { + for item in iter { + self.push(item) + } + } +} + +impl FromIterator for SmallVec +where + T: PackedLayout, + N: LazyArrayLength, +{ + fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + let mut vec = SmallVec::new(); + vec.extend(iter); + vec + } +} + +impl core::cmp::PartialEq for SmallVec +where + T: PartialEq + PackedLayout, + N: LazyArrayLength, +{ + fn eq(&self, other: &Self) -> bool { + if self.len() != other.len() { + return false + } + self.iter().zip(other.iter()).all(|(lhs, rhs)| lhs == rhs) + } +} + +impl core::cmp::Eq for SmallVec +where + T: Eq + PackedLayout, + N: LazyArrayLength, +{ +} diff --git a/core/src/storage2/collections/smallvec/iter.rs b/core/src/storage2/collections/smallvec/iter.rs new file mode 100644 index 00000000000..9323db2d480 --- /dev/null +++ b/core/src/storage2/collections/smallvec/iter.rs @@ -0,0 +1,233 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::SmallVec; +use crate::storage2::{ + collections::extend_lifetime, + lazy::LazyArrayLength, + traits::PackedLayout, +}; + +/// An iterator over shared references to the elements of a small storage vector. +#[derive(Debug, Clone, Copy)] +pub struct Iter<'a, T, N> +where + T: PackedLayout, + N: LazyArrayLength, +{ + /// The storage vector to iterate over. + vec: &'a SmallVec, + /// The current begin of the iteration. + begin: u32, + /// The current end of the iteration. + end: u32, +} + +impl<'a, T, N> Iter<'a, T, N> +where + T: PackedLayout, + N: LazyArrayLength, +{ + /// Creates a new iterator for the given storage vector. + pub(crate) fn new(vec: &'a SmallVec) -> Self { + Self { + vec, + begin: 0, + end: vec.len(), + } + } + + /// Returns the amount of remaining elements to yield by the iterator. + fn remaining(&self) -> u32 { + self.end - self.begin + } +} + +impl<'a, T, N> Iterator for Iter<'a, T, N> +where + T: PackedLayout, + N: LazyArrayLength, +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + ::nth(self, 0) + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = self.remaining() as usize; + (remaining, Some(remaining)) + } + + fn count(self) -> usize { + self.remaining() as usize + } + + fn nth(&mut self, n: usize) -> Option { + debug_assert!(self.begin <= self.end); + let n = n as u32; + if self.begin + n >= self.end { + return None + } + let cur = self.begin + n; + self.begin += 1 + n; + self.vec.get(cur).expect("access is within bounds").into() + } +} + +impl<'a, T, N> ExactSizeIterator for Iter<'a, T, N> +where + T: PackedLayout, + N: LazyArrayLength, +{ +} + +impl<'a, T, N> DoubleEndedIterator for Iter<'a, T, N> +where + T: PackedLayout, + N: LazyArrayLength, +{ + fn next_back(&mut self) -> Option { + ::nth_back(self, 0) + } + + fn nth_back(&mut self, n: usize) -> Option { + debug_assert!(self.begin <= self.end); + let n = n as u32; + if self.begin >= self.end.saturating_sub(n) { + return None + } + self.end -= 1 + n; + self.vec + .get(self.end) + .expect("access is within bounds") + .into() + } +} + +/// An iterator over exclusive references to the elements of a small storage vector. +#[derive(Debug)] +pub struct IterMut<'a, T, N> +where + T: PackedLayout, + N: LazyArrayLength, +{ + /// The storage vector to iterate over. + vec: &'a mut SmallVec, + /// The current begin of the iteration. + begin: u32, + /// The current end of the iteration. + end: u32, +} + +impl<'a, T, N> IterMut<'a, T, N> +where + T: PackedLayout, + N: LazyArrayLength, +{ + /// Creates a new iterator for the given storage vector. + pub(crate) fn new(vec: &'a mut SmallVec) -> Self { + let len = vec.len(); + Self { + vec, + begin: 0, + end: len, + } + } + + /// Returns the amount of remaining elements to yield by the iterator. + fn remaining(&self) -> u32 { + self.end - self.begin + } +} + +impl<'a, T, N> IterMut<'a, T, N> +where + T: PackedLayout, + N: LazyArrayLength, +{ + fn get_mut<'b>(&'b mut self, at: u32) -> Option<&'a mut T> { + self.vec.get_mut(at).map(|value| { + // SAFETY: We extend the lifetime of the reference here. + // + // This is safe because the iterator yields an exclusive + // reference to every element in the iterated vector + // just once and also there can be only one such iterator + // for the same vector at the same time which is + // guaranteed by the constructor of the iterator. + unsafe { extend_lifetime::<'b, 'a, T>(value) } + }) + } +} + +impl<'a, T, N> Iterator for IterMut<'a, T, N> +where + T: PackedLayout, + N: LazyArrayLength, +{ + type Item = &'a mut T; + + fn next(&mut self) -> Option { + ::nth(self, 0) + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = self.remaining() as usize; + (remaining, Some(remaining)) + } + + fn count(self) -> usize { + self.remaining() as usize + } + + fn nth(&mut self, n: usize) -> Option { + debug_assert!(self.begin <= self.end); + let n = n as u32; + if self.begin + n >= self.end { + return None + } + let cur = self.begin + n; + self.begin += 1 + n; + self.get_mut(cur).expect("access is within bounds").into() + } +} + +impl<'a, T, N> ExactSizeIterator for IterMut<'a, T, N> +where + T: PackedLayout, + N: LazyArrayLength, +{ +} + +impl<'a, T, N> DoubleEndedIterator for IterMut<'a, T, N> +where + T: PackedLayout, + N: LazyArrayLength, +{ + fn next_back(&mut self) -> Option { + ::nth_back(self, 0) + } + + fn nth_back(&mut self, n: usize) -> Option { + debug_assert!(self.begin <= self.end); + let n = n as u32; + if self.begin >= self.end.saturating_sub(n) { + return None + } + self.end -= 1 + n; + self.get_mut(self.end) + .expect("access is within bounds") + .into() + } +} diff --git a/core/src/storage2/collections/smallvec/mod.rs b/core/src/storage2/collections/smallvec/mod.rs new file mode 100644 index 00000000000..f4136181d73 --- /dev/null +++ b/core/src/storage2/collections/smallvec/mod.rs @@ -0,0 +1,325 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A small storage vector that allows to store a limited amount of elements. +//! +//! Prefer using [`SmallVec`] over [`crate::storage2::Vec`] if you know up front +//! the maximum amount of unique elements that have to be stored in the vector +//! at the same time, given the number is fairly low: e.g. not exceeding several +//! hundreds of elements. + +mod impls; +mod iter; +mod storage; + +#[cfg(test)] +mod tests; + +pub use self::iter::{ + Iter, + IterMut, +}; +use crate::storage2::{ + lazy::{ + Lazy, + LazyArray, + LazyArrayLength, + }, + traits::PackedLayout, +}; + +/// The used index type. +type Index = u32; + +/// A contiguous growable array type. +/// +/// # Note +/// +/// - The `storage::SmallVec` has a very similar API compared to a `storage::Vec`. +/// The major difference between both data structures is that the `SmallVec` +/// can only contain up to a fixed amount of elements given by `N` whereas the +/// `Vec` can contain up to 2^32 elements which is the maximum for 32-bit Wasm +/// targets. +/// - The performance characteristics may be different from Rust's +/// `Vec` due to the internal differences. +/// - Allows to store up to N elements. +#[derive(Debug)] +pub struct SmallVec +where + T: PackedLayout, + N: LazyArrayLength, +{ + /// The current length of the small vector. + len: Lazy, + /// The entries of the small vector. + elems: LazyArray, +} + +impl Default for SmallVec +where + T: PackedLayout, + N: LazyArrayLength, +{ + fn default() -> Self { + Self::new() + } +} + +impl SmallVec +where + T: PackedLayout, + N: LazyArrayLength, +{ + /// Clears the underlying storage cells of the storage vector. + /// + /// # Note + /// + /// This completely invalidates the storage vector's invariances about + /// the contents of its associated storage region. + /// + /// This API is used for the `Drop` implementation of [`Vec`] as well as + /// for the [`SpreadLayout::clear_spread`] trait implementation. + fn clear_cells(&self) { + if self.elems.key().is_none() { + // We won't clear any storage if we are in lazy state since there + // probably has not been any state written to storage, yet. + return + } + for index in 0..self.len() { + self.elems.clear_packed_at(index); + } + } +} + +impl SmallVec +where + T: PackedLayout, + N: LazyArrayLength, +{ + /// Creates a new empty vector. + pub fn new() -> Self { + Self { + len: Lazy::new(0), + elems: Default::default(), + } + } + + /// Returns the capacity of the small vector. + #[inline] + pub fn capacity(&self) -> u32 { + self.elems.capacity() + } + + /// Returns the number of elements in the vector, also referred to as its 'length'. + #[inline] + pub fn len(&self) -> u32 { + *self.len + } + + /// Returns `true` if the vector contains no elements. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +impl SmallVec +where + T: PackedLayout, + N: LazyArrayLength, +{ + /// Returns an iterator yielding shared references to all elements. + /// + /// # Note + /// + /// - Avoid unbounded iteration over big storage vectors. + /// - Prefer using methods like `Iterator::take` in order to limit the number + /// of yielded elements. + pub fn iter(&self) -> Iter { + Iter::new(self) + } + + /// Returns an iterator yielding exclusive references to all elements. + /// + /// # Note + /// + /// - Avoid unbounded iteration over big storage vectors. + /// - Prefer using methods like `Iterator::take` in order to limit the number + /// of yielded elements. + pub fn iter_mut(&mut self) -> IterMut { + IterMut::new(self) + } + + /// Returns the index if it is witihn bounds or `None` otherwise. + fn within_bounds(&self, index: Index) -> Option { + if index < self.len() { + return Some(index) + } + None + } + + /// Returns a shared reference to the first element if any. + pub fn first(&self) -> Option<&T> { + if self.is_empty() { + return None + } + self.get(0) + } + + /// Returns a shared reference to the last element if any. + pub fn last(&self) -> Option<&T> { + if self.is_empty() { + return None + } + let last_index = self.len() - 1; + self.get(last_index) + } + + /// Returns a shared reference to the indexed element. + /// + /// Returns `None` if `index` is out of bounds. + pub fn get(&self, index: u32) -> Option<&T> { + self.within_bounds(index) + .and_then(|index| self.elems.get(index)) + } +} + +impl SmallVec +where + T: PackedLayout, + N: LazyArrayLength, +{ + /// Appends an element to the back of the vector. + pub fn push(&mut self, value: T) { + assert!( + self.len() < self.capacity(), + "cannot push more elements into the vector" + ); + let last_index = self.len(); + *self.len += 1; + self.elems.put(last_index, Some(value)); + } +} + +impl SmallVec +where + T: PackedLayout, + N: LazyArrayLength, +{ + /// Pops the last element from the vector and returns it. + // + /// Returns `None` if the vector is empty. + pub fn pop(&mut self) -> Option { + if self.is_empty() { + return None + } + let last_index = self.len() - 1; + *self.len = last_index; + self.elems.put_get(last_index, None) + } + + /// Pops the last element from the vector and immediately drops it. + /// + /// Returns `Some(())` if an element has been removed and `None` otherwise. + /// + /// # Note + /// + /// This operation is a bit more efficient than [`SmallVec::pop`] + /// since it avoids reading from contract storage in some use cases. + pub fn pop_drop(&mut self) -> Option<()> { + if self.is_empty() { + return None + } + let last_index = self.len() - 1; + *self.len = last_index; + self.elems.put(last_index, None); + Some(()) + } + + /// Returns an exclusive reference to the first element if any. + pub fn first_mut(&mut self) -> Option<&mut T> { + if self.is_empty() { + return None + } + self.get_mut(0) + } + + /// Returns an exclusive reference to the last element if any. + pub fn last_mut(&mut self) -> Option<&mut T> { + if self.is_empty() { + return None + } + let last_index = self.len() - 1; + self.get_mut(last_index) + } + + /// Returns an exclusive reference to the indexed element. + /// + /// Returns `None` if `index` is out of bounds. + pub fn get_mut(&mut self, index: u32) -> Option<&mut T> { + self.within_bounds(index) + .and_then(move |index| self.elems.get_mut(index)) + } + + /// Swaps the elements at the given indices. + /// + /// # Panics + /// + /// If one or both indices are out of bounds. + pub fn swap(&mut self, a: u32, b: u32) { + assert!( + a < self.len() && b < self.len(), + "indices are out of bounds" + ); + self.elems.swap(a, b) + } + + /// Removes the indexed element from the vector and returns it. + /// + /// The last element of the vector is put into the indexed slot. + /// Returns `None` and does not mutate the vector if the index is out of bounds. + /// + /// # Note + /// + /// This operation does not preserve ordering but is constant time. + pub fn swap_remove(&mut self, n: u32) -> Option { + if self.is_empty() { + return None + } + self.elems.swap(n, self.len() - 1); + self.pop() + } + + /// Removes the indexed element from the vector. + /// + /// The last element of the vector is put into the indexed slot. + /// Returns `Some(())` if an element has been removed and `None` otherwise. + /// + /// # Note + /// + /// This operation should be preferred over [`Vec::swap_remove`] if there is + /// no need to return the removed element since it avoids a contract storage + /// read for some use cases. + pub fn swap_remove_drop(&mut self, n: u32) -> Option<()> { + if self.is_empty() { + return None + } + self.elems.put(n, None); + let last_index = self.len() - 1; + let last = self.elems.put_get(last_index, None); + self.elems.put(n, last); + *self.len = last_index; + Some(()) + } +} diff --git a/core/src/storage2/collections/smallvec/storage.rs b/core/src/storage2/collections/smallvec/storage.rs new file mode 100644 index 00000000000..611bdfdda6c --- /dev/null +++ b/core/src/storage2/collections/smallvec/storage.rs @@ -0,0 +1,50 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::SmallVec; +use crate::storage2::{ + lazy::LazyArrayLength, + traits::{ + KeyPtr, + PackedLayout, + SpreadLayout, + }, +}; +use generic_array::typenum::Unsigned; + +impl SpreadLayout for SmallVec +where + T: PackedLayout, + N: LazyArrayLength, +{ + const FOOTPRINT: u64 = 1 + ::U64; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + Self { + len: SpreadLayout::pull_spread(ptr), + elems: SpreadLayout::pull_spread(ptr), + } + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + SpreadLayout::push_spread(&self.len, ptr); + SpreadLayout::push_spread(&self.elems, ptr); + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + self.clear_cells(); + SpreadLayout::clear_spread(&self.len, ptr); + SpreadLayout::clear_spread(&self.elems, ptr); + } +} diff --git a/core/src/storage2/collections/smallvec/tests.rs b/core/src/storage2/collections/smallvec/tests.rs new file mode 100644 index 00000000000..015711663a0 --- /dev/null +++ b/core/src/storage2/collections/smallvec/tests.rs @@ -0,0 +1,399 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::SmallVec; +use crate::{ + env, + storage2::traits::{ + KeyPtr, + SpreadLayout, + }, +}; +use generic_array::typenum::*; +use ink_primitives::Key; + +#[test] +fn new_vec_works() { + let vec = >::new(); + assert!(vec.is_empty()); + assert_eq!(vec.len(), 0); + assert_eq!(vec.get(0), None); + assert!(vec.iter().next().is_none()); + let default = as Default>::default(); + assert!(default.is_empty()); + assert_eq!(default.len(), 0); + assert_eq!(vec.get(0), None); + assert!(default.iter().next().is_none()); +} + +#[test] +fn from_iterator_works() { + let some_primes = [b'A', b'B', b'C', b'D']; + assert_eq!(some_primes.iter().copied().collect::>(), { + let mut vec = SmallVec::new(); + for prime in &some_primes { + vec.push(*prime) + } + vec + }); +} + +#[test] +#[should_panic] +fn from_iterator_too_many() { + let some_primes = [b'A', b'B', b'C', b'D', b'E']; + let _ = some_primes.iter().copied().collect::>(); +} + +#[test] +fn from_empty_iterator_works() { + assert_eq!( + [].iter().copied().collect::>(), + SmallVec::new(), + ); +} + +#[test] +fn first_last_of_empty() { + let mut vec = >::new(); + assert_eq!(vec.first(), None); + assert_eq!(vec.first_mut(), None); + assert_eq!(vec.last(), None); + assert_eq!(vec.last_mut(), None); +} + +#[test] +fn pop_on_empty_works() { + let mut vec = >::new(); + assert_eq!(vec.pop(), None); +} + +#[test] +fn push_pop_first_last_works() { + /// Asserts conditions are met for the given storage vector. + fn assert_vec(vec: &SmallVec, len: u32, first: F, last: L) + where + F: Into>, + L: Into>, + { + assert_eq!(vec.is_empty(), len == 0); + assert_eq!(vec.len(), len); + assert_eq!(vec.first().copied(), first.into()); + assert_eq!(vec.last().copied(), last.into()); + } + + let mut vec = SmallVec::new(); + assert_vec(&vec, 0, None, None); + + // Sequence of `push` + vec.push(b'A'); + assert_vec(&vec, 1, b'A', b'A'); + vec.push(b'B'); + assert_vec(&vec, 2, b'A', b'B'); + vec.push(b'C'); + assert_vec(&vec, 3, b'A', b'C'); + vec.push(b'D'); + assert_vec(&vec, 4, b'A', b'D'); + + // Sequence of `pop` + assert_eq!(vec.pop(), Some(b'D')); + assert_vec(&vec, 3, b'A', b'C'); + assert_eq!(vec.pop(), Some(b'C')); + assert_vec(&vec, 2, b'A', b'B'); + assert_eq!(vec.pop(), Some(b'B')); + assert_vec(&vec, 1, b'A', b'A'); + assert_eq!(vec.pop(), Some(b'A')); + assert_vec(&vec, 0, None, None); + + // Pop from empty vector. + assert_eq!(vec.pop(), None); + assert_vec(&vec, 0, None, None); +} + +#[test] +#[should_panic] +fn push_beyond_limits_fails() { + let mut vec = [b'A', b'B', b'C', b'D'] + .iter() + .copied() + .collect::>(); + vec.push(b'E'); +} + +/// Creates a storage vector from the given slice. +fn vec_from_slice(slice: &[u8]) -> SmallVec { + slice.iter().copied().collect::>() +} + +/// Asserts that the the given ordered storage vector elements are equal to the +/// ordered elements of the given slice. +fn assert_eq_slice(vec: &SmallVec, slice: &[u8]) { + assert_eq!(vec.len() as usize, slice.len()); + let vec_copy = vec.iter().copied().collect::>(); + assert_eq!(vec_copy.as_slice(), slice); +} + +#[test] +fn pop_drop_works() { + let elems = [b'A', b'B', b'C', b'D']; + let mut vec = vec_from_slice(&elems); + assert_eq!(vec.pop_drop(), Some(())); + assert_eq_slice(&vec, &elems[0..3]); + assert_eq!(vec.pop_drop(), Some(())); + assert_eq_slice(&vec, &elems[0..2]); + assert_eq!(vec.pop_drop(), Some(())); + assert_eq_slice(&vec, &elems[0..1]); + assert_eq!(vec.pop_drop(), Some(())); + assert_eq_slice(&vec, &[]); + assert_eq!(vec.pop_drop(), None); + assert_eq_slice(&vec, &[]); +} + +#[test] +fn get_works() { + let elems = [b'A', b'B', b'C', b'D']; + let mut vec = vec_from_slice(&elems); + for (n, mut expected) in elems.iter().copied().enumerate() { + let n = n as u32; + assert_eq!(vec.get(n), Some(&expected)); + assert_eq!(vec.get_mut(n), Some(&mut expected)); + assert_eq!(&vec[n], &expected); + assert_eq!(&mut vec[n], &mut expected); + } + let len = vec.len(); + assert_eq!(vec.get(len), None); + assert_eq!(vec.get_mut(len), None); +} + +#[test] +#[should_panic(expected = "index out of bounds: the len is 3 but the index is 3")] +fn index_out_of_bounds_works() { + let test_values = [b'a', b'b', b'c']; + let vec = vec_from_slice(&test_values); + let _ = &vec[test_values.len() as u32]; +} + +#[test] +#[should_panic(expected = "index out of bounds: the len is 3 but the index is 3")] +fn index_mut_out_of_bounds_works() { + let test_values = [b'a', b'b', b'c']; + let mut vec = vec_from_slice(&test_values); + let _ = &mut vec[test_values.len() as u32]; +} + +#[test] +fn iter_next_works() { + let elems = [b'A', b'B', b'C', b'D']; + let vec = vec_from_slice(&elems); + // Test iterator over shared references. + let mut iter = vec.iter(); + assert_eq!(iter.count(), 4); + assert_eq!(iter.next(), Some(&b'A')); + assert_eq!(iter.next(), Some(&b'B')); + assert_eq!(iter.count(), 2); + assert_eq!(iter.next(), Some(&b'C')); + assert_eq!(iter.next(), Some(&b'D')); + assert_eq!(iter.count(), 0); + assert_eq!(iter.next(), None); + // Test iterator over exclusive references. + let mut vec = vec; + let mut iter = vec.iter_mut(); + assert_eq!(iter.next(), Some(&mut b'A')); + assert_eq!(iter.next(), Some(&mut b'B')); + assert_eq!(iter.next(), Some(&mut b'C')); + assert_eq!(iter.next(), Some(&mut b'D')); + assert_eq!(iter.next(), None); + assert_eq!(iter.count(), 0); +} + +#[test] +fn iter_nth_works() { + let elems = [b'A', b'B', b'C', b'D']; + let vec = vec_from_slice(&elems); + // Test iterator over shared references. + let mut iter = vec.iter(); + assert_eq!(iter.count(), 4); + assert_eq!(iter.nth(1), Some(&b'B')); + assert_eq!(iter.count(), 2); + assert_eq!(iter.nth(1), Some(&b'D')); + assert_eq!(iter.count(), 0); + assert_eq!(iter.nth(1), None); + // Test iterator over exclusive references. + let mut vec = vec; + let mut iter = vec.iter_mut(); + assert_eq!(iter.nth(1), Some(&mut b'B')); + assert_eq!(iter.nth(1), Some(&mut b'D')); + assert_eq!(iter.nth(1), None); + assert_eq!(iter.count(), 0); +} + +#[test] +fn iter_next_back_works() { + let elems = [b'A', b'B', b'C', b'D']; + let vec = vec_from_slice(&elems); + // Test iterator over shared references. + let mut iter = vec.iter().rev(); + assert_eq!(iter.clone().count(), 4); + assert_eq!(iter.next(), Some(&b'D')); + assert_eq!(iter.next(), Some(&b'C')); + assert_eq!(iter.clone().count(), 2); + assert_eq!(iter.next(), Some(&b'B')); + assert_eq!(iter.next(), Some(&b'A')); + assert_eq!(iter.clone().count(), 0); + assert_eq!(iter.next(), None); + // Test iterator over exclusive references. + let mut vec = vec; + let mut iter = vec.iter_mut().rev(); + assert_eq!(iter.next(), Some(&mut b'D')); + assert_eq!(iter.next(), Some(&mut b'C')); + assert_eq!(iter.next(), Some(&mut b'B')); + assert_eq!(iter.next(), Some(&mut b'A')); + assert_eq!(iter.next(), None); + assert_eq!(iter.count(), 0); +} + +#[test] +fn iter_nth_back_works() { + let elems = [b'A', b'B', b'C', b'D']; + let vec = vec_from_slice(&elems); + // Test iterator over shared references. + let mut iter = vec.iter().rev(); + assert_eq!(iter.clone().count(), 4); + assert_eq!(iter.nth(1), Some(&b'C')); + assert_eq!(iter.clone().count(), 2); + assert_eq!(iter.nth(1), Some(&b'A')); + assert_eq!(iter.clone().count(), 0); + assert_eq!(iter.nth(1), None); + // Test iterator over exclusive references. + let mut vec = vec; + let mut iter = vec.iter_mut().rev(); + assert_eq!(iter.nth(1), Some(&mut b'C')); + assert_eq!(iter.nth(1), Some(&mut b'A')); + assert_eq!(iter.nth(1), None); + assert_eq!(iter.count(), 0); +} + +#[test] +fn swap_works() { + let elems = [b'A', b'B', b'C', b'D']; + let mut vec = vec_from_slice(&elems); + + // Swap at same position is a no-op. + for index in 0..elems.len() as u32 { + vec.swap(index, index); + assert_eq_slice(&vec, &elems); + } + + // Swap first and second + vec.swap(0, 1); + assert_eq_slice(&vec, &[b'B', b'A', b'C', b'D']); + // Swap third and last + vec.swap(2, 3); + assert_eq_slice(&vec, &[b'B', b'A', b'D', b'C']); + // Swap first and last + vec.swap(0, 3); + assert_eq_slice(&vec, &[b'C', b'A', b'D', b'B']); +} + +#[test] +#[should_panic] +fn swap_one_invalid_index() { + let mut vec = vec_from_slice(&[b'A', b'B', b'C', b'D']); + vec.swap(0, vec.len()); +} + +#[test] +#[should_panic] +fn swap_both_invalid_indices() { + let mut vec = vec_from_slice(&[b'A', b'B', b'C', b'D']); + vec.swap(vec.len(), vec.len()); +} + +#[test] +fn swap_remove_works() { + let mut vec = vec_from_slice(&[b'A', b'B', b'C', b'D']); + + // Swap remove first element. + assert_eq!(vec.swap_remove(0), Some(b'A')); + assert_eq_slice(&vec, &[b'D', b'B', b'C']); + // Swap remove middle element. + assert_eq!(vec.swap_remove(1), Some(b'B')); + assert_eq_slice(&vec, &[b'D', b'C']); + // Swap remove last element. + assert_eq!(vec.swap_remove(1), Some(b'C')); + assert_eq_slice(&vec, &[b'D']); + // Swap remove only element. + assert_eq!(vec.swap_remove(0), Some(b'D')); + assert_eq_slice(&vec, &[]); + // Swap remove from empty vector. + assert_eq!(vec.swap_remove(0), None); + assert_eq_slice(&vec, &[]); +} + +#[test] +fn swap_remove_drop_works() { + let mut vec = vec_from_slice(&[b'A', b'B', b'C', b'D']); + + // Swap remove first element. + assert_eq!(vec.swap_remove_drop(0), Some(())); + assert_eq_slice(&vec, &[b'D', b'B', b'C']); + // Swap remove middle element. + assert_eq!(vec.swap_remove_drop(1), Some(())); + assert_eq_slice(&vec, &[b'D', b'C']); + // Swap remove last element. + assert_eq!(vec.swap_remove_drop(1), Some(())); + assert_eq_slice(&vec, &[b'D']); + // Swap remove only element. + assert_eq!(vec.swap_remove_drop(0), Some(())); + assert_eq_slice(&vec, &[]); + // Swap remove from empty vector. + assert_eq!(vec.swap_remove_drop(0), None); + assert_eq_slice(&vec, &[]); +} + +#[test] +fn spread_layout_push_pull_works() -> env::Result<()> { + env::test::run_test::(|_| { + let vec1 = vec_from_slice(&[b'a', b'b', b'c', b'd']); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&vec1, &mut KeyPtr::from(root_key)); + // Load the pushed storage vector into another instance and check that + // both instances are equal: + let vec2 = + as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(vec1, vec2); + Ok(()) + }) +} + +#[test] +#[should_panic(expected = "encountered empty storage cell")] +fn spread_layout_clear_works() { + env::test::run_test::(|_| { + let vec1 = vec_from_slice(&[b'a', b'b', b'c', b'd']); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&vec1, &mut KeyPtr::from(root_key)); + // It has already been asserted that a valid instance can be pulled + // from contract storage after a push to the same storage region. + // + // Now clear the associated storage from `vec1` and check whether + // loading another instance from this storage will panic since the + // vector's length property cannot read a value: + SpreadLayout::clear_spread(&vec1, &mut KeyPtr::from(root_key)); + let _ = + as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); + Ok(()) + }) + .unwrap() +} diff --git a/core/src/storage2/collections/stash/impls.rs b/core/src/storage2/collections/stash/impls.rs new file mode 100644 index 00000000000..d9dd7f7a476 --- /dev/null +++ b/core/src/storage2/collections/stash/impls.rs @@ -0,0 +1,164 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of generic traits that are useful for the storage stash. + +use super::{ + Iter, + IterMut, + Stash as StorageStash, +}; +use crate::storage2::traits::PackedLayout; +use core::iter::{ + Extend, + FromIterator, +}; + +impl Drop for StorageStash +where + T: PackedLayout, +{ + fn drop(&mut self) { + self.clear_cells(); + } +} + +impl Default for StorageStash +where + T: PackedLayout, +{ + fn default() -> Self { + StorageStash::new() + } +} + +cfg_if::cfg_if! { + if #[cfg(debug_assertions)] { + impl StorageStash + where + T: PackedLayout, + { + fn assert_index_within_bounds(&self, index: u32) { + if index >= self.len() { + panic!( + "index out of bounds: the len is {} but the index is {}", + self.len(), + index + ) + } + } + } + } else { + impl StorageStash + where + T: PackedLayout, + { + fn assert_index_within_bounds(&self, index: u32) {} + } + } +} + +impl core::ops::Index for StorageStash +where + T: PackedLayout, +{ + type Output = T; + + fn index(&self, index: u32) -> &Self::Output { + self.assert_index_within_bounds(index); + match self.get(index) { + Some(value) => value, + None => panic!("indexed vacant entry: at index {}", index), + } + } +} + +impl core::ops::IndexMut for StorageStash +where + T: PackedLayout, +{ + fn index_mut(&mut self, index: u32) -> &mut Self::Output { + self.assert_index_within_bounds(index); + match self.get_mut(index) { + Some(value) => value, + None => panic!("indexed vacant entry: at index {}", index), + } + } +} + +impl<'a, T: 'a> IntoIterator for &'a StorageStash +where + T: PackedLayout, +{ + type Item = &'a T; + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, T: 'a> IntoIterator for &'a mut StorageStash +where + T: PackedLayout, +{ + type Item = &'a mut T; + type IntoIter = IterMut<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +impl Extend for StorageStash +where + T: PackedLayout, +{ + fn extend(&mut self, iter: I) + where + I: IntoIterator, + { + for item in iter { + self.put(item); + } + } +} + +impl FromIterator for StorageStash +where + T: PackedLayout, +{ + fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + let mut vec = StorageStash::new(); + vec.extend(iter); + vec + } +} + +impl core::cmp::PartialEq for StorageStash +where + T: PartialEq + PackedLayout, +{ + fn eq(&self, other: &Self) -> bool { + if self.len() != other.len() { + return false + } + self.iter().zip(other.iter()).all(|(lhs, rhs)| lhs == rhs) + } +} + +impl core::cmp::Eq for StorageStash where T: scale::Decode + Eq + PackedLayout {} diff --git a/core/src/storage2/collections/stash/iter.rs b/core/src/storage2/collections/stash/iter.rs new file mode 100644 index 00000000000..422de8a2bb2 --- /dev/null +++ b/core/src/storage2/collections/stash/iter.rs @@ -0,0 +1,325 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[cfg(test)] +use super::Entry; +use super::Stash; +use crate::storage2::{ + collections::extend_lifetime, + traits::PackedLayout, +}; + +/// An iterator over shared references to the elements of a storage stash. +#[derive(Debug, Clone, Copy)] +pub struct Iter<'a, T> +where + T: PackedLayout, +{ + /// The storage stash to iterate over. + stash: &'a Stash, + /// The number of already yielded elements. + /// + /// # Note + /// + /// This is important to make this iterator an `ExactSizeIterator`. + yielded: u32, + /// The current begin of the iteration. + begin: u32, + /// The current end of the iteration. + end: u32, +} + +impl<'a, T> Iter<'a, T> +where + T: PackedLayout, +{ + /// Creates a new iterator for the given storage stash. + pub(crate) fn new(stash: &'a Stash) -> Self { + Self { + stash, + yielded: 0, + begin: 0, + end: stash.len_entries(), + } + } + + /// Returns the amount of remaining elements to yield by the iterator. + fn remaining(&self) -> u32 { + self.stash.len() - self.yielded + } +} + +impl<'a, T> Iterator for Iter<'a, T> +where + T: PackedLayout, +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + loop { + debug_assert!(self.begin <= self.end); + if self.begin == self.end { + return None + } + let cur = self.begin; + self.begin += 1; + match self.stash.get(cur) { + Some(value) => { + self.yielded += 1; + return Some(value) + } + None => continue, + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = self.remaining() as usize; + (remaining, Some(remaining)) + } + + fn count(self) -> usize { + self.remaining() as usize + } +} + +impl<'a, T> ExactSizeIterator for Iter<'a, T> where T: PackedLayout {} + +impl<'a, T> DoubleEndedIterator for Iter<'a, T> +where + T: PackedLayout, +{ + fn next_back(&mut self) -> Option { + loop { + debug_assert!(self.begin <= self.end); + if self.begin == self.end { + return None + } + debug_assert_ne!(self.end, 0); + self.end -= 1; + match self.stash.get(self.end) { + Some(value) => { + self.yielded += 1; + return Some(value) + } + None => continue, + } + } + } +} + +/// An iterator over exclusive references to the elements of a storage stash. +#[derive(Debug)] +pub struct IterMut<'a, T> +where + T: PackedLayout, +{ + /// The storage stash to iterate over. + stash: &'a mut Stash, + /// The number of already yielded elements. + /// + /// # Note + /// + /// This is important to make this iterator an `ExactSizeIterator`. + yielded: u32, + /// The current begin of the iteration. + begin: u32, + /// The current end of the iteration. + end: u32, +} + +impl<'a, T> IterMut<'a, T> +where + T: PackedLayout, +{ + /// Creates a new iterator for the given storage stash. + pub(crate) fn new(stash: &'a mut Stash) -> Self { + let len = stash.len_entries(); + Self { + stash, + yielded: 0, + begin: 0, + end: len, + } + } + + /// Returns the amount of remaining elements to yield by the iterator. + fn remaining(&self) -> u32 { + self.stash.len() - self.yielded + } +} + +impl<'a, T> IterMut<'a, T> +where + T: PackedLayout, +{ + fn get_mut<'b>(&'b mut self, at: u32) -> Option<&'a mut T> { + self.stash.get_mut(at).map(|value| { + // SAFETY: We extend the lifetime of the reference here. + // + // This is safe because the iterator yields an exclusive + // reference to every element in the iterated vector + // just once and also there can be only one such iterator + // for the same vector at the same time which is + // guaranteed by the constructor of the iterator. + unsafe { extend_lifetime::<'b, 'a, T>(value) } + }) + } +} + +impl<'a, T> Iterator for IterMut<'a, T> +where + T: PackedLayout, +{ + type Item = &'a mut T; + + fn next(&mut self) -> Option { + loop { + debug_assert!(self.begin <= self.end); + if self.begin == self.end { + return None + } + let cur = self.begin; + self.begin += 1; + match self.get_mut(cur) { + Some(value) => { + self.yielded += 1; + return Some(value) + } + None => continue, + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = self.remaining() as usize; + (remaining, Some(remaining)) + } + + fn count(self) -> usize { + self.remaining() as usize + } +} + +impl<'a, T> ExactSizeIterator for IterMut<'a, T> where T: PackedLayout {} + +impl<'a, T> DoubleEndedIterator for IterMut<'a, T> +where + T: PackedLayout, +{ + fn next_back(&mut self) -> Option { + loop { + debug_assert!(self.begin <= self.end); + if self.begin == self.end { + return None + } + debug_assert_ne!(self.end, 0); + self.end -= 1; + match self.get_mut(self.end) { + Some(value) => { + self.yielded += 1; + return Some(value) + } + None => continue, + } + } + } +} + +/// An iterator over shared references to the entries of a storage stash. +/// +/// # Note +/// +/// This is an internal API and mainly used for testing the storage stash. +#[derive(Debug, Clone, Copy)] +#[cfg(test)] +pub struct Entries<'a, T> +where + T: PackedLayout, +{ + /// The storage stash to iterate over. + stash: &'a Stash, + /// The current begin of the iteration. + begin: u32, + /// The current end of the iteration. + end: u32, +} + +#[cfg(test)] +impl<'a, T> Entries<'a, T> +where + T: PackedLayout, +{ + /// Creates a new iterator for the given storage stash. + pub(crate) fn new(stash: &'a Stash) -> Self { + let len = stash.len_entries(); + Self { + stash, + begin: 0, + end: len, + } + } +} + +#[cfg(test)] +impl<'a, T> Iterator for Entries<'a, T> +where + T: PackedLayout, +{ + type Item = &'a Entry; + + fn next(&mut self) -> Option { + debug_assert!(self.begin <= self.end); + if self.begin == self.end { + return None + } + let cur = self.begin; + self.begin += 1; + let entry = self + .stash + .entries + .get(cur) + .expect("iterator indices are within bounds"); + Some(entry) + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = (self.end - self.begin) as usize; + (remaining, Some(remaining)) + } +} + +#[cfg(test)] +impl<'a, T> ExactSizeIterator for Entries<'a, T> where T: PackedLayout {} + +#[cfg(test)] +impl<'a, T> DoubleEndedIterator for Entries<'a, T> +where + T: PackedLayout, +{ + fn next_back(&mut self) -> Option { + debug_assert!(self.begin <= self.end); + if self.begin == self.end { + return None + } + debug_assert_ne!(self.end, 0); + self.end -= 1; + let entry = self + .stash + .entries + .get(self.end) + .expect("iterator indices are within bounds"); + Some(entry) + } +} diff --git a/core/src/storage2/collections/stash/mod.rs b/core/src/storage2/collections/stash/mod.rs new file mode 100644 index 00000000000..883ec008d8d --- /dev/null +++ b/core/src/storage2/collections/stash/mod.rs @@ -0,0 +1,542 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A storage stash allowing to store indexed elements efficiently. + +mod impls; +mod iter; +mod storage; + +#[cfg(test)] +mod tests; + +#[cfg(test)] +use self::iter::Entries; +pub use self::iter::{ + Iter, + IterMut, +}; +use crate::storage2::{ + lazy::LazyIndexMap, + traits::PackedLayout, + Pack, +}; +use ink_primitives::Key; + +/// An index into the stash. +type Index = u32; + +/// A stash data structure operating on contract storage. +/// +/// This allows to store information similar to a vector but in unordered +/// fashion which enables constant time random deletion of elements. This allows +/// for efficient attachment of data to some numeric indices. +#[derive(Debug)] +pub struct Stash +where + T: PackedLayout, +{ + /// The combined and commonly used header data. + header: Pack
, + /// The storage entries of the stash. + entries: LazyIndexMap>, +} + +/// Stores general commonly required information about the storage stash. +#[derive(Debug, scale::Encode, scale::Decode)] +struct Header { + /// The latest vacant index. + /// + /// - If all entries are occupied: + /// - Points to the entry at index `self.len`. + /// - If some entries are vacant: + /// - Points to the entry that has been vacated most recently. + last_vacant: Index, + /// The number of items stored in the stash. + /// + /// # Note + /// + /// We cannot simply use the underlying length of the vector + /// since it would include vacant slots as well. + len: u32, + /// The number of entries currently managed by the stash. + len_entries: u32, +} + +/// A vacant entry with previous and next vacant indices. +#[derive(Debug, Copy, Clone, scale::Encode, scale::Decode)] +pub struct VacantEntry { + /// The next vacant index. + next: Index, + /// The previous vacant index. + prev: Index, +} + +/// An entry within the stash. +/// +/// The vacant entries within a storage stash form a doubly linked list of +/// vacant entries that is used to quickly re-use their vacant storage. +#[derive(Debug, scale::Encode, scale::Decode)] +pub enum Entry { + /// A vacant entry that holds the index to the next and previous vacant entry. + Vacant(VacantEntry), + /// An occupied entry that hold the value. + Occupied(T), +} + +impl Entry { + /// Returns `true` if the entry is occupied. + pub fn is_occupied(&self) -> bool { + if let Entry::Occupied(_) = self { + return true + } + false + } + + /// Returns `true` if the entry is vacant. + pub fn is_vacant(&self) -> bool { + !self.is_occupied() + } + + /// Returns the vacant entry if the entry is vacant, otherwise returns `None`. + fn try_to_vacant(&self) -> Option { + match self { + Entry::Occupied(_) => None, + Entry::Vacant(vacant_entry) => Some(*vacant_entry), + } + } + + /// Returns the vacant entry if the entry is vacant, otherwise returns `None`. + fn try_to_vacant_mut(&mut self) -> Option<&mut VacantEntry> { + match self { + Entry::Occupied(_) => None, + Entry::Vacant(vacant_entry) => Some(vacant_entry), + } + } +} + +impl Stash +where + T: PackedLayout, +{ + /// Creates a new empty stash. + pub fn new() -> Self { + Self { + header: Pack::new(Header { + last_vacant: 0, + len: 0, + len_entries: 0, + }), + entries: LazyIndexMap::new(), + } + } + + /// Returns the number of elements stored in the stash. + pub fn len(&self) -> u32 { + self.header.len + } + + /// Returns `true` if the stash contains no elements. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the number of entries the stash can hold without + /// allocating another storage cell. + /// + /// # Note + /// + /// This is the total number of occupied and vacant entries of the stash. + pub fn capacity(&self) -> u32 { + self.len_entries() + } + + /// Returns the number of entries currently managed by the storage stash. + fn len_entries(&self) -> u32 { + self.header.len_entries + } + + /// Returns the underlying key to the cells. + /// + /// # Note + /// + /// This is a low-level utility getter and should + /// normally not be required by users. + pub fn entries_key(&self) -> Option<&Key> { + self.entries.key() + } + + /// Returns an iterator yielding shared references to all elements of the stash. + /// + /// # Note + /// + /// Avoid unbounded iteration over big storage stashes. + /// Prefer using methods like `Iterator::take` in order to limit the number + /// of yielded elements. + pub fn iter(&self) -> Iter { + Iter::new(self) + } + + /// Returns an iterator yielding exclusive references to all elements of the stash. + /// + /// # Note + /// + /// Avoid unbounded iteration over big storage stashes. + /// Prefer using methods like `Iterator::take` in order to limit the number + /// of yielded elements. + pub fn iter_mut(&mut self) -> IterMut { + IterMut::new(self) + } + + /// Returns an iterator yielding shared references to all entries of the stash. + /// + /// # Note + /// + /// This is an internal API mainly used for testing the storage stash. + #[cfg(test)] + fn entries(&self) -> Entries { + Entries::new(self) + } + + /// Returns `true` if the storage stash has vacant entries. + fn has_vacant_entries(&self) -> bool { + self.header.len != self.header.len_entries + } + + /// Returns the index of the last vacant entry if any. + fn last_vacant_index(&self) -> Option { + if self.has_vacant_entries() { + Some(self.header.last_vacant) + } else { + None + } + } +} + +impl Stash +where + T: PackedLayout, +{ + /// Returns a shared reference to the element at the given index. + pub fn get(&self, at: Index) -> Option<&T> { + if at >= self.len_entries() { + // Bail out early if the index is out of bounds. + return None + } + self.entries.get(at).and_then(|entry| { + match entry { + Entry::Occupied(val) => Some(val), + Entry::Vacant { .. } => None, + } + }) + } + + /// Returns an exclusive reference to the element at the given index. + pub fn get_mut(&mut self, at: Index) -> Option<&mut T> { + if at >= self.len_entries() { + // Bail out early if the index is out of bounds. + return None + } + self.entries.get_mut(at).and_then(|entry| { + match entry { + Entry::Occupied(val) => Some(val), + Entry::Vacant { .. } => None, + } + }) + } +} + +impl Stash +where + T: PackedLayout, +{ + /// Clears the underlying storage cells of the storage vector. + /// + /// # Note + /// + /// This completely invalidates the storage vector's invariances about + /// the contents of its associated storage region. + /// + /// This API is used for the `Drop` implementation of [`Vec`] as well as + /// for the [`SpreadLayout::clear_spread`] trait implementation. + fn clear_cells(&self) { + if self.entries.key().is_none() { + // We won't clear any storage if we are in lazy state since there + // probably has not been any state written to storage, yet. + return + } + for index in 0..self.len_entries() { + // It might seem wasteful to clear all entries instead of just + // the occupied ones. However this spares us from having one extra + // read for every element in the storage stash to filter out vacant + // entries. So this is actually a trade-off and at the time of this + // implementation it is unclear which path is more efficient. + // + // The bet is that clearing a storage cell is cheaper than reading one. + self.entries.clear_packed_at(index); + } + } +} + +impl Stash +where + T: PackedLayout, +{ + /// Rebinds the `prev` and `next` bindings of the neighbours of the vacant entry. + /// + /// # Note + /// + /// The `removed_index` points to the index of the removed vacant entry. + fn remove_vacant_entry(&mut self, removed_index: Index, vacant_entry: VacantEntry) { + let prev_vacant = vacant_entry.prev; + let next_vacant = vacant_entry.next; + if prev_vacant == removed_index && next_vacant == removed_index { + // There is no other vacant entry left in the storage stash so + // there is nothing to update. Bail out early. + return + } + if prev_vacant == next_vacant { + // There is only one other vacant entry left. + // We can update the single vacant entry in a single look-up. + let entry = self + .entries + .get_mut(prev_vacant) + .map(Entry::try_to_vacant_mut) + .flatten() + .expect("`prev` must point to an existing entry at this point"); + debug_assert_eq!(entry.prev, removed_index); + debug_assert_eq!(entry.next, removed_index); + entry.prev = prev_vacant; + entry.next = prev_vacant; + } else { + // There are multiple other vacant entries left. + let prev = self + .entries + .get_mut(prev_vacant) + .map(Entry::try_to_vacant_mut) + .flatten() + .expect("`prev` must point to an existing entry at this point"); + debug_assert_eq!(prev.next, removed_index); + prev.next = next_vacant; + let next = self + .entries + .get_mut(next_vacant) + .map(Entry::try_to_vacant_mut) + .flatten() + .expect("`next` must point to an existing entry at this point"); + debug_assert_eq!(next.prev, removed_index); + next.prev = prev_vacant; + } + // Bind the last vacant pointer to the vacant position with the lower index. + // This has the effect that lower indices are refilled more quickly. + use core::cmp::min; + if removed_index == self.header.last_vacant { + self.header.last_vacant = min(prev_vacant, next_vacant); + } + } + + /// Put the element into the stash at the next vacant position. + /// + /// Returns the stash index that the element was put into. + pub fn put(&mut self, new_value: T) -> Index { + let new_entry = Some(Entry::Occupied(new_value)); + let new_index = if let Some(index) = self.last_vacant_index() { + // Put the new element to the most recent vacant index if not all entries are occupied. + let old_entry = self + .entries + .put_get(index, new_entry) + .expect("a `last_vacant_index()` must point to an occupied cell"); + let vacant_entry = match old_entry { + Entry::Vacant(vacant_entry) => vacant_entry, + Entry::Occupied(_) => { + unreachable!("`last_vacant_index()` must point to a vacant entry") + } + }; + self.remove_vacant_entry(index, vacant_entry); + index + } else { + // Push the new element to the end if all entries are occupied. + let new_index = self.header.len_entries; + self.entries.put(new_index, new_entry); + self.header.last_vacant += 1; + self.header.len_entries += 1; + new_index + }; + self.header.len += 1; + new_index + } + + /// Takes the element stored at the given index if any. + pub fn take(&mut self, at: Index) -> Option { + // Cases: + // - There are vacant entries already. + // - There are no vacant entries before. + if at >= self.len_entries() { + // Early return since `at` index is out of bounds. + return None + } + // Precompute prev and next vacant entries as we might need them later. + // Due to borrow checker constraints we cannot have this at a later stage. + let (prev, next) = if let Some(index) = self.last_vacant_index() { + let root_vacant = self + .entries + .get(index) + .map(|entry| entry.try_to_vacant()) + .flatten() + .expect("last_vacant must point to an existing vacant entry"); + // Form the linked vacant entries in a way that makes it more likely + // for them to refill the stash from low indices. + if at < index { + // Insert before root if new vacant index is smaller than root. + (root_vacant.prev, index) + } else if at < root_vacant.next { + // Insert between root and its next vacant entry if smaller than + // current root's next index. + (index, root_vacant.next) + } else { + // Insert before root entry if index is greater. But we won't + // update the new element to be the new root index in this case. + (root_vacant.prev, index) + } + } else { + // Default prev and next to the given at index. + // So the resulting vacant index is pointing to itself. + (at, at) + }; + let entry_mut = self.entries.get_mut(at).expect("index is out of bounds"); + if entry_mut.is_vacant() { + // Early return if the taken entry is already vacant. + return None + } + // At this point we know that the entry is occupied with a value. + let new_vacant_entry = Entry::Vacant(VacantEntry { prev, next }); + let taken_entry = core::mem::replace(entry_mut, new_vacant_entry); + // Update links from and to neighbouring vacant entries. + if prev == next { + // Previous and next are the same so we can update the vacant + // neighbour with a single look-up. + let entry = self + .entries + .get_mut(next) + .map(Entry::try_to_vacant_mut) + .flatten() + .expect("`next` must point to an existing vacant entry at this point"); + entry.prev = at; + entry.next = at; + } else { + // Previous and next vacant entries are different and thus need + // different look-ups to update them. + self.entries + .get_mut(prev) + .map(Entry::try_to_vacant_mut) + .flatten() + .expect("`prev` must point to an existing vacant entry at this point") + .next = at; + self.entries + .get_mut(next) + .map(Entry::try_to_vacant_mut) + .flatten() + .expect("`next` must point to an existing vacant entry at this point") + .prev = at; + } + // Take the value out of the taken occupied entry and return it. + match taken_entry { + Entry::Occupied(value) => { + use core::cmp::min; + self.header.last_vacant = + min(self.header.last_vacant, min(at, min(prev, next))); + self.header.len -= 1; + Some(value) + } + Entry::Vacant { .. } => { + unreachable!("the taken entry is known to be occupied") + } + } + } + + /// Defragments the underlying storage to minimize footprint. + /// + /// Returns the number of storage cells freed this way. + /// + /// This might invalidate indices stored outside of the stash. + /// + /// # Callback + /// + /// In order to keep those indices up-to-date the caller can provide + /// a callback function that is called for every moved entry + /// with a shared reference to the entries value and the old as well + /// as the new index. + /// + /// # Note + /// + /// - If `max_iterations` is `Some` concrete value it is used in order to + /// bound the number of iterations and won't try to defrag until the stash + /// is optimally compacted. + /// - Users are advised to call this method using `Some` concrete + /// value to keep gas costs within certain bounds. + /// - The call to the given callback takes place before the reinsertion + /// of the shifted occupied entry. + pub fn defrag(&mut self, max_iterations: Option, mut callback: C) -> u32 + where + C: FnMut(Index, Index, &T), + { + let len_entries = self.len_entries(); + let mut freed_cells = 0; + for index in (0..len_entries) + .rev() + .take(max_iterations.unwrap_or(len_entries) as usize) + { + if !self.has_vacant_entries() { + // Bail out as soon as there are no more vacant entries left. + return freed_cells + } + // In any case we are going to free yet another storage cell. + freed_cells += 1; + match self + .entries + .put_get(index, None) + .expect("index is out of bounds") + { + Entry::Vacant(vacant_entry) => { + // Remove the vacant entry and rebind its neighbours. + self.remove_vacant_entry(index, vacant_entry); + } + Entry::Occupied(value) => { + // Move the occupied entry into one of the remaining vacant + // entries. We do not re-use the `put` method to not update + // the length and other header information. + let vacant_index = self + .last_vacant_index() + .expect("it has been asserted that there are vacant entries"); + callback(index, vacant_index, &value); + let new_entry = Some(Entry::Occupied(value)); + let old_entry = self.entries.put_get(vacant_index, new_entry).expect( + "`last_vacant_index` index must point to an occupied cell", + ); + let vacant_entry = match old_entry { + Entry::Vacant(vacant_entry) => vacant_entry, + Entry::Occupied(_) => { + unreachable!( + "`last_vacant_index` must point to a vacant entry" + ) + } + }; + self.remove_vacant_entry(vacant_index, vacant_entry); + } + } + self.header.len_entries -= 1; + } + freed_cells + } +} diff --git a/core/src/storage2/collections/stash/storage.rs b/core/src/storage2/collections/stash/storage.rs new file mode 100644 index 00000000000..ac0ae659448 --- /dev/null +++ b/core/src/storage2/collections/stash/storage.rs @@ -0,0 +1,124 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of ink! storage traits. + +use super::{ + Entry, + Header, + Stash as StorageStash, +}; +use crate::storage2::{ + lazy::LazyIndexMap, + traits::{ + forward_clear_packed, + forward_pull_packed, + forward_push_packed, + KeyPtr, + PackedLayout, + SpreadLayout, + }, +}; +use ink_primitives::Key; + +impl SpreadLayout for Header { + const FOOTPRINT: u64 = 1; + const REQUIRES_DEEP_CLEAN_UP: bool = false; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + forward_pull_packed::(ptr) + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + forward_push_packed::(self, ptr) + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + forward_clear_packed::(self, ptr) + } +} + +impl PackedLayout for Header { + fn pull_packed(&mut self, _at: &Key) {} + fn push_packed(&self, _at: &Key) {} + fn clear_packed(&self, _at: &Key) {} +} + +impl SpreadLayout for Entry +where + T: PackedLayout, +{ + const FOOTPRINT: u64 = 1; + const REQUIRES_DEEP_CLEAN_UP: bool = ::REQUIRES_DEEP_CLEAN_UP; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + forward_pull_packed::(ptr) + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + forward_push_packed::(self, ptr) + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + forward_clear_packed::(self, ptr) + } +} + +impl PackedLayout for Entry +where + T: PackedLayout, +{ + fn pull_packed(&mut self, at: &Key) { + if let Entry::Occupied(value) = self { + ::pull_packed(value, at) + } + } + + fn push_packed(&self, at: &Key) { + if let Entry::Occupied(value) = self { + ::push_packed(value, at) + } + } + + fn clear_packed(&self, at: &Key) { + if let Entry::Occupied(value) = self { + ::clear_packed(value, at) + } + } +} + +impl SpreadLayout for StorageStash +where + T: PackedLayout, +{ + const FOOTPRINT: u64 = 1 + as SpreadLayout>::FOOTPRINT; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + Self { + header: SpreadLayout::pull_spread(ptr), + entries: SpreadLayout::pull_spread(ptr), + } + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + SpreadLayout::push_spread(&self.header, ptr); + SpreadLayout::push_spread(&self.entries, ptr); + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + self.clear_cells(); + SpreadLayout::clear_spread(&self.header, ptr); + SpreadLayout::clear_spread(&self.entries, ptr); + } +} diff --git a/core/src/storage2/collections/stash/tests.rs b/core/src/storage2/collections/stash/tests.rs new file mode 100644 index 00000000000..bae3cc53f77 --- /dev/null +++ b/core/src/storage2/collections/stash/tests.rs @@ -0,0 +1,669 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::Stash as StorageStash; +use crate::{ + env, + storage2::traits::{ + KeyPtr, + SpreadLayout, + }, +}; +use ink_primitives::Key; + +#[test] +fn new_works() { + // `StorageVec::new` + let stash = >::new(); + assert!(stash.is_empty()); + assert_eq!(stash.len(), 0); + assert_eq!(stash.get(0), None); + assert!(stash.iter().next().is_none()); + // `StorageVec::default` + let default = as Default>::default(); + assert!(default.is_empty()); + assert_eq!(default.len(), 0); + assert_eq!(stash.get(0), None); + assert!(default.iter().next().is_none()); + // `StorageVec::new` and `StorageVec::default` should be equal. + assert_eq!(stash, default); +} + +#[test] +fn from_iterator_works() { + let test_values = [b'A', b'B', b'C', b'D', b'E', b'F']; + let stash = test_values.iter().copied().collect::>(); + assert_eq!(stash, { + let mut stash = StorageStash::new(); + for (index, value) in test_values.iter().enumerate() { + assert_eq!(index as u32, stash.put(*value)); + } + stash + }); + assert_eq!(stash.len(), test_values.len() as u32); + assert_eq!(stash.is_empty(), false); +} + +#[test] +fn from_empty_iterator_works() { + assert_eq!( + [].iter().copied().collect::>(), + StorageStash::new(), + ); +} + +#[test] +fn take_from_filled_works() { + let test_values = [b'A', b'B', b'C', b'D', b'E', b'F']; + let mut stash = test_values.iter().copied().collect::>(); + for (index, expected_value) in test_values.iter().enumerate() { + assert_eq!(stash.take(index as u32), Some(*expected_value)); + } +} + +#[test] +fn take_from_empty_works() { + let mut stash = >::new(); + assert_eq!(stash.take(0), None); +} + +#[test] +fn take_out_of_bounds_works() { + let mut stash = [b'A', b'B', b'C'] + .iter() + .copied() + .collect::>(); + assert_eq!(stash.take(3), None); +} + +#[test] +fn get_works() { + let test_values = [b'A', b'B', b'C', b'D', b'E', b'F']; + let mut stash = test_values.iter().copied().collect::>(); + for (index, &expected_value) in test_values.iter().enumerate() { + let mut expected_value = expected_value; + let index = index as u32; + assert_eq!(stash.get(index), Some(&expected_value)); + assert_eq!(stash.get_mut(index), Some(&mut expected_value)); + assert_eq!(&stash[index], &expected_value); + assert_eq!(&mut stash[index], &mut expected_value); + } + // Get out of bounds works: + let len = stash.len(); + assert_eq!(stash.get(len), None); + assert_eq!(stash.get_mut(len), None); + // Get vacant entry works: + assert_eq!(stash.get(1), Some(&b'B')); + assert_eq!(stash.get_mut(1), Some(&mut b'B')); + assert_eq!(stash.take(1), Some(b'B')); + assert_eq!(stash.get(1), None); + assert_eq!(stash.get_mut(1), None); +} + +#[cfg(debug_assertions)] +#[test] +#[should_panic(expected = "index out of bounds: the len is 3 but the index is 3")] +fn index_out_of_bounds_works() { + let test_values = [b'a', b'b', b'c']; + let stash = test_values.iter().copied().collect::>(); + let _ = &stash[test_values.len() as u32]; +} + +#[cfg(debug_assertions)] +#[test] +#[should_panic(expected = "index out of bounds: the len is 3 but the index is 3")] +fn index_mut_out_of_bounds_works() { + let test_values = [b'a', b'b', b'c']; + let mut stash = test_values.iter().copied().collect::>(); + let _ = &mut stash[test_values.len() as u32]; +} + +#[test] +#[should_panic(expected = "indexed vacant entry: at index 1")] +fn index_vacant_works() { + let test_values = [b'a', b'b', b'c']; + let mut stash = test_values.iter().copied().collect::>(); + assert_eq!(stash.take(1), Some(b'b')); + let _ = &stash[1]; +} + +#[test] +#[should_panic(expected = "indexed vacant entry: at index 1")] +fn index_mut_vacant_works() { + let test_values = [b'a', b'b', b'c']; + let mut stash = test_values.iter().copied().collect::>(); + assert_eq!(stash.take(1), Some(b'b')); + let _ = &mut stash[1]; +} + +#[test] +fn len_is_empty_works() { + let mut stash = StorageStash::new(); + assert_eq!(stash.len(), 0); + assert!(stash.is_empty()); + stash.put(b'A'); + assert_eq!(stash.len(), 1); + assert!(!stash.is_empty()); + stash.take(0); + assert_eq!(stash.len(), 0); + assert!(stash.is_empty()); +} + +#[test] +fn iter_works() { + let stash = [b'A', b'B', b'C'] + .iter() + .copied() + .collect::>(); + // Test iterator over shared references. + let mut iter = stash.iter(); + assert_eq!(iter.count(), 3); + assert_eq!(iter.next(), Some(&b'A')); + assert_eq!(iter.count(), 2); + assert_eq!(iter.next(), Some(&b'B')); + assert_eq!(iter.count(), 1); + assert_eq!(iter.next(), Some(&b'C')); + assert_eq!(iter.count(), 0); + assert_eq!(iter.next(), None); + // Test iterator over exclusive references. + let mut stash = stash; + let mut iter = stash.iter_mut(); + assert_eq!(iter.next(), Some(&mut b'A')); + assert_eq!(iter.next(), Some(&mut b'B')); + assert_eq!(iter.next(), Some(&mut b'C')); + assert_eq!(iter.next(), None); + assert_eq!(iter.count(), 0); +} + +/// Create a stash that only has vacant entries. +fn create_vacant_stash() -> StorageStash { + let mut stash = [b'A', b'B', b'C'] + .iter() + .copied() + .collect::>(); + for i in 0..stash.len() { + stash.take(i); + } + assert_eq!(stash.len(), 0); + assert!(stash.is_empty()); + assert_eq!(stash.len_entries(), 3); + stash +} + +/// Create a stash where every second entry is vacant. +fn create_holey_stash() -> StorageStash { + let elements = [b'A', b'B', b'C', b'D', b'E', b'F']; + let mut stash = elements.iter().copied().collect::>(); + for i in 0..stash.len() { + stash.take(i * 2); + } + assert_eq!(stash.len() as usize, elements.len() / 2); + assert!(!stash.is_empty()); + assert_eq!(stash.len_entries() as usize, elements.len()); + stash +} + +#[test] +fn iter_over_vacant_works() { + let stash = create_vacant_stash(); + // Test iterator over shared references. + let mut iter = stash.iter(); + assert_eq!(iter.count(), 0); + assert_eq!(iter.next(), None); + // Test iterator over exclusive references. + let mut stash = stash; + let mut iter = stash.iter_mut(); + assert_eq!(iter.next(), None); + // Test reverse iterator over shared references. + let mut iter = stash.iter().rev(); + assert_eq!(iter.clone().count(), 0); + assert_eq!(iter.next(), None); + // Test reverse iterator over exclusive references. + let mut stash = stash; + let mut iter = stash.iter_mut().rev(); + assert_eq!(iter.next(), None); +} + +#[test] +fn iter_over_holey_works() { + let stash = create_holey_stash(); + // Test iterator over shared references. + let mut iter = stash.iter(); + assert_eq!(iter.count(), 3); + assert_eq!(iter.next(), Some(&b'B')); + assert_eq!(iter.count(), 2); + assert_eq!(iter.next(), Some(&b'D')); + assert_eq!(iter.count(), 1); + assert_eq!(iter.next(), Some(&b'F')); + assert_eq!(iter.count(), 0); + assert_eq!(iter.next(), None); + // Test iterator over exclusive references. + let mut stash = stash; + let mut iter = stash.iter_mut(); + assert_eq!(iter.next(), Some(&mut b'B')); + assert_eq!(iter.next(), Some(&mut b'D')); + assert_eq!(iter.next(), Some(&mut b'F')); + assert_eq!(iter.next(), None); + assert_eq!(iter.count(), 0); +} + +#[test] +fn iter_rev_over_holey_works() { + let stash = create_holey_stash(); + // Test iterator over shared references. + let mut iter = stash.iter().rev(); + assert_eq!(iter.clone().count(), 3); + assert_eq!(iter.next(), Some(&b'F')); + assert_eq!(iter.clone().count(), 2); + assert_eq!(iter.next(), Some(&b'D')); + assert_eq!(iter.clone().count(), 1); + assert_eq!(iter.next(), Some(&b'B')); + assert_eq!(iter.clone().count(), 0); + assert_eq!(iter.next(), None); + // Test iterator over exclusive references. + let mut stash = stash; + let mut iter = stash.iter_mut().rev(); + assert_eq!(iter.next(), Some(&mut b'F')); + assert_eq!(iter.next(), Some(&mut b'D')); + assert_eq!(iter.next(), Some(&mut b'B')); + assert_eq!(iter.next(), None); + assert_eq!(iter.count(), 0); +} + +#[test] +fn iter_rev_works() { + let stash = [b'A', b'B', b'C'] + .iter() + .copied() + .collect::>(); + // Test iterator over shared references. + let mut iter = stash.iter().rev(); + assert_eq!(iter.next(), Some(&b'C')); + assert_eq!(iter.next(), Some(&b'B')); + assert_eq!(iter.next(), Some(&b'A')); + assert_eq!(iter.next(), None); + // Test iterator over exclusive references. + let mut stash = stash; + let mut iter = stash.iter_mut().rev(); + assert_eq!(iter.next(), Some(&mut b'C')); + assert_eq!(iter.next(), Some(&mut b'B')); + assert_eq!(iter.next(), Some(&mut b'A')); + assert_eq!(iter.next(), None); +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +struct EntryMove { + from: u32, + to: u32, + value: u8, +} + +#[test] +fn simple_defrag_works() { + let mut stash = [b'A', b'B', b'C', b'D', b'E', b'F'] + .iter() + .copied() + .collect::>(); + assert_eq!(stash.len(), 6); + assert_eq!(stash.len_entries(), 6); + assert_eq!(stash.take(3), Some(b'D')); + assert_eq!(stash.take(1), Some(b'B')); + assert_eq!(stash.take(5), Some(b'F')); + assert_eq!(stash.take(4), Some(b'E')); + assert_eq!(stash.len(), 2); + assert_eq!(stash.len_entries(), 6); + // Now stash looks like this: + // + // i | 0 | 1 | 2 | 3 | 4 | 5 | + // next | | | | | | | + // prev | | | | | | | + // val | A | | C | | | | + // + // After defrag the stash should look like this: + // + // i | 0 | 1 | + // next | | | + // prev | | | + // val | A | C | + let mut entry_moves = Vec::new(); + let callback = |from, to, value: &u8| { + entry_moves.push(EntryMove { + from, + to, + value: *value, + }); + }; + assert_eq!(stash.defrag(None, callback), 4); + assert_eq!(stash.len(), 2); + assert_eq!(stash.len_entries(), 2); + assert_eq!(stash.get(0), Some(&b'A')); + assert_eq!(stash.get(1), Some(&b'C')); + assert_eq!( + &entry_moves, + &[EntryMove { + from: 2, + to: 1, + value: 67 + }] + ); +} + +/// Returns a storage stash that looks internally like this: +/// +/// i | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | +/// next | | | | | | | | | +/// prev | | | | | | | | | +/// val | | | | | E | | | H | +fn complex_defrag_setup() -> StorageStash { + let mut stash = [b'A', b'B', b'C', b'D', b'E', b'F', b'G', b'H'] + .iter() + .copied() + .collect::>(); + assert_eq!(stash.len(), 8); + assert_eq!(stash.len_entries(), 8); + // Remove some of the entries in specific order. + assert_eq!(stash.take(0), Some(b'A')); + assert_eq!(stash.take(6), Some(b'G')); + assert_eq!(stash.take(1), Some(b'B')); + assert_eq!(stash.take(5), Some(b'F')); + assert_eq!(stash.take(2), Some(b'C')); + assert_eq!(stash.take(3), Some(b'D')); + assert_eq!(stash.len(), 2); + assert_eq!(stash.len_entries(), 8); + stash +} + +/// Returns the expected entry move set for the complex defragmentation test. +fn complex_defrag_expected_moves() -> &'static [EntryMove] { + &[ + EntryMove { + from: 7, + to: 0, + value: 72, + }, + EntryMove { + from: 4, + to: 1, + value: 69, + }, + ] +} + +#[test] +fn complex_defrag_works() { + let mut stash = complex_defrag_setup(); + let mut entry_moves = Vec::new(); + let callback = |from, to, value: &u8| { + entry_moves.push(EntryMove { + from, + to, + value: *value, + }); + }; + assert_eq!(stash.defrag(None, callback), 6); + // After defrag the stash should look like this: + // + // i | 0 | 1 | + // next | | | + // prev | | | + // val | H | E | + assert_eq!(stash.len(), 2); + assert_eq!(stash.len_entries(), 2); + assert_eq!(stash.get(0), Some(&b'H')); + assert_eq!(stash.get(1), Some(&b'E')); + assert_eq!(entry_moves.as_slice(), complex_defrag_expected_moves()); +} + +#[test] +fn incremental_defrag_works() { + // This tests asserts that incremental defragmentation of storage stashes + // yields the same result as immediate defragmentation of the same stash. + let mut stash = complex_defrag_setup(); + let mut entry_moves = Vec::new(); + let mut callback = |from, to, value: &u8| { + entry_moves.push(EntryMove { + from, + to, + value: *value, + }); + }; + let len_entries_before = stash.len_entries(); + for i in 0..stash.len_entries() { + stash.defrag(Some(1), &mut callback); + assert_eq!( + stash.len_entries(), + core::cmp::max(2, len_entries_before - i - 1) + ); + } + // After defrag the stash should look like this: + // + // i | 0 | 1 | + // next | | | + // prev | | | + // val | H | E | + assert_eq!(stash.len(), 2); + assert_eq!(stash.len_entries(), 2); + assert_eq!(stash.get(0), Some(&b'H')); + assert_eq!(stash.get(1), Some(&b'E')); + assert_eq!(entry_moves.as_slice(), complex_defrag_expected_moves()); +} + +#[derive(Debug, PartialEq, Eq)] +enum Entry { + /// Vacant entry with `prev` and `next` links. + Vacant(u32, u32), + /// Occupied entry with value. + Occupied(u8), +} + +fn entries_of_stash(stash: &StorageStash) -> Vec { + stash + .entries() + .map(|entry| { + use super::Entry as StashEntry; + match entry { + StashEntry::Vacant(entry) => Entry::Vacant(entry.prev, entry.next), + StashEntry::Occupied(value) => Entry::Occupied(*value), + } + }) + .collect::>() +} + +#[test] +fn take_in_order_works() { + let mut stash = [b'A', b'B', b'C', b'D'] + .iter() + .copied() + .collect::>(); + assert_eq!(stash.len(), 4); + assert_eq!(stash.len_entries(), 4); + assert_eq!(stash.last_vacant_index(), None); + assert_eq!( + entries_of_stash(&stash), + vec![ + Entry::Occupied(b'A'), + Entry::Occupied(b'B'), + Entry::Occupied(b'C'), + Entry::Occupied(b'D') + ] + ); + // Take first. + assert_eq!(stash.take(0), Some(b'A')); + assert_eq!(stash.len(), 3); + assert_eq!(stash.len_entries(), 4); + assert_eq!(stash.last_vacant_index(), Some(0)); + assert_eq!( + entries_of_stash(&stash), + vec![ + Entry::Vacant(0, 0), + Entry::Occupied(b'B'), + Entry::Occupied(b'C'), + Entry::Occupied(b'D') + ] + ); + // Take second. + assert_eq!(stash.take(1), Some(b'B')); + assert_eq!(stash.len(), 2); + assert_eq!(stash.len_entries(), 4); + assert_eq!(stash.last_vacant_index(), Some(0)); + assert_eq!( + entries_of_stash(&stash), + vec![ + Entry::Vacant(1, 1), + Entry::Vacant(0, 0), + Entry::Occupied(b'C'), + Entry::Occupied(b'D') + ] + ); + // Take third. + assert_eq!(stash.take(2), Some(b'C')); + assert_eq!(stash.len(), 1); + assert_eq!(stash.len_entries(), 4); + assert_eq!(stash.last_vacant_index(), Some(0)); + assert_eq!( + entries_of_stash(&stash), + vec![ + Entry::Vacant(2, 1), + Entry::Vacant(0, 2), + Entry::Vacant(1, 0), + Entry::Occupied(b'D') + ] + ); + // Take last. + assert_eq!(stash.take(3), Some(b'D')); + assert_eq!(stash.len(), 0); + assert_eq!(stash.len_entries(), 4); + assert_eq!(stash.last_vacant_index(), Some(0)); + assert_eq!( + entries_of_stash(&stash), + vec![ + Entry::Vacant(3, 1), + Entry::Vacant(0, 2), + Entry::Vacant(1, 3), + Entry::Vacant(2, 0), + ] + ); +} + +#[test] +fn take_rev_order_works() { + let mut stash = [b'A', b'B', b'C', b'D'] + .iter() + .copied() + .collect::>(); + assert_eq!(stash.len(), 4); + assert_eq!(stash.len_entries(), 4); + assert_eq!(stash.last_vacant_index(), None); + assert_eq!( + entries_of_stash(&stash), + vec![ + Entry::Occupied(b'A'), + Entry::Occupied(b'B'), + Entry::Occupied(b'C'), + Entry::Occupied(b'D') + ] + ); + // Take last. + assert_eq!(stash.take(3), Some(b'D')); + assert_eq!(stash.len(), 3); + assert_eq!(stash.len_entries(), 4); + assert_eq!(stash.last_vacant_index(), Some(3)); + assert_eq!( + entries_of_stash(&stash), + vec![ + Entry::Occupied(b'A'), + Entry::Occupied(b'B'), + Entry::Occupied(b'C'), + Entry::Vacant(3, 3) + ] + ); + // Take third. + assert_eq!(stash.take(2), Some(b'C')); + assert_eq!(stash.len(), 2); + assert_eq!(stash.len_entries(), 4); + assert_eq!(stash.last_vacant_index(), Some(2)); + assert_eq!( + entries_of_stash(&stash), + vec![ + Entry::Occupied(b'A'), + Entry::Occupied(b'B'), + Entry::Vacant(3, 3), + Entry::Vacant(2, 2) + ] + ); + // Take second. + assert_eq!(stash.take(1), Some(b'B')); + assert_eq!(stash.len(), 1); + assert_eq!(stash.len_entries(), 4); + assert_eq!(stash.last_vacant_index(), Some(1)); + assert_eq!( + entries_of_stash(&stash), + vec![ + Entry::Occupied(b'A'), + Entry::Vacant(3, 2), + Entry::Vacant(1, 3), + Entry::Vacant(2, 1) + ] + ); + // Take first. + assert_eq!(stash.take(0), Some(b'A')); + assert_eq!(stash.len(), 0); + assert_eq!(stash.len_entries(), 4); + assert_eq!(stash.last_vacant_index(), Some(0)); + assert_eq!( + entries_of_stash(&stash), + vec![ + Entry::Vacant(3, 1), + Entry::Vacant(0, 2), + Entry::Vacant(1, 3), + Entry::Vacant(2, 0) + ] + ); +} + +#[test] +fn spread_layout_push_pull_works() -> env::Result<()> { + env::test::run_test::(|_| { + let stash1 = create_holey_stash(); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&stash1, &mut KeyPtr::from(root_key)); + // Load the pushed storage vector into another instance and check that + // both instances are equal: + let stash2 = + as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(stash1, stash2); + Ok(()) + }) +} + +#[test] +#[should_panic(expected = "storage entry was empty")] +fn spread_layout_clear_works() { + env::test::run_test::(|_| { + let stash1 = create_holey_stash(); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&stash1, &mut KeyPtr::from(root_key)); + // It has already been asserted that a valid instance can be pulled + // from contract storage after a push to the same storage region. + // + // Now clear the associated storage from `stash1` and check whether + // loading another instance from this storage will panic since the + // vector's length property cannot read a value: + SpreadLayout::clear_spread(&stash1, &mut KeyPtr::from(root_key)); + let _ = + as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); + Ok(()) + }) + .unwrap() +} diff --git a/core/src/storage2/collections/vec/impls.rs b/core/src/storage2/collections/vec/impls.rs new file mode 100644 index 00000000000..a31eaa5b9a7 --- /dev/null +++ b/core/src/storage2/collections/vec/impls.rs @@ -0,0 +1,139 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of generic traits that are useful for the storage vector. + +use super::{ + Iter, + IterMut, + Vec as StorageVec, +}; +use crate::storage2::traits::PackedLayout; +use core::iter::{ + Extend, + FromIterator, +}; + +impl Drop for StorageVec +where + T: PackedLayout, +{ + fn drop(&mut self) { + self.clear_cells(); + } +} + +impl core::ops::Index for StorageVec +where + T: PackedLayout, +{ + type Output = T; + + fn index(&self, index: u32) -> &Self::Output { + match self.get(index) { + Some(value) => value, + None => { + panic!( + "index out of bounds: the len is {} but the index is {}", + self.len(), + index + ) + } + } + } +} + +impl core::ops::IndexMut for StorageVec +where + T: PackedLayout, +{ + fn index_mut(&mut self, index: u32) -> &mut Self::Output { + let len = self.len(); + match self.get_mut(index) { + Some(value) => value, + None => { + panic!( + "index out of bounds: the len is {} but the index is {}", + len, index + ) + } + } + } +} + +impl<'a, T: 'a> IntoIterator for &'a StorageVec +where + T: PackedLayout, +{ + type Item = &'a T; + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, T: 'a> IntoIterator for &'a mut StorageVec +where + T: PackedLayout, +{ + type Item = &'a mut T; + type IntoIter = IterMut<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +impl Extend for StorageVec +where + T: PackedLayout, +{ + fn extend(&mut self, iter: I) + where + I: IntoIterator, + { + for item in iter { + self.push(item) + } + } +} + +impl FromIterator for StorageVec +where + T: PackedLayout, +{ + fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + let mut vec = StorageVec::new(); + vec.extend(iter); + vec + } +} + +impl core::cmp::PartialEq for StorageVec +where + T: PartialEq + PackedLayout, +{ + fn eq(&self, other: &Self) -> bool { + if self.len() != other.len() { + return false + } + self.iter().zip(other.iter()).all(|(lhs, rhs)| lhs == rhs) + } +} + +impl core::cmp::Eq for StorageVec where T: Eq + PackedLayout {} diff --git a/core/src/storage2/collections/vec/iter.rs b/core/src/storage2/collections/vec/iter.rs new file mode 100644 index 00000000000..8eea028e216 --- /dev/null +++ b/core/src/storage2/collections/vec/iter.rs @@ -0,0 +1,215 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + storage2 as storage, + storage2::{ + collections::extend_lifetime, + traits::PackedLayout, + }, +}; + +/// An iterator over shared references to the elements of a storage vector. +#[derive(Debug, Clone, Copy)] +pub struct Iter<'a, T> +where + T: PackedLayout, +{ + /// The storage vector to iterate over. + vec: &'a storage::Vec, + /// The current begin of the iteration. + begin: u32, + /// The current end of the iteration. + end: u32, +} + +impl<'a, T> Iter<'a, T> +where + T: PackedLayout, +{ + /// Creates a new iterator for the given storage vector. + pub(crate) fn new(vec: &'a storage::Vec) -> Self { + Self { + vec, + begin: 0, + end: vec.len(), + } + } + + /// Returns the amount of remaining elements to yield by the iterator. + fn remaining(&self) -> u32 { + self.end - self.begin + } +} + +impl<'a, T> Iterator for Iter<'a, T> +where + T: PackedLayout, +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + ::nth(self, 0) + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = self.remaining() as usize; + (remaining, Some(remaining)) + } + + fn count(self) -> usize { + self.remaining() as usize + } + + fn nth(&mut self, n: usize) -> Option { + debug_assert!(self.begin <= self.end); + let n = n as u32; + if self.begin + n >= self.end { + return None + } + let cur = self.begin + n; + self.begin += 1 + n; + self.vec.get(cur).expect("access is within bounds").into() + } +} + +impl<'a, T> ExactSizeIterator for Iter<'a, T> where T: PackedLayout {} + +impl<'a, T> DoubleEndedIterator for Iter<'a, T> +where + T: PackedLayout, +{ + fn next_back(&mut self) -> Option { + ::nth_back(self, 0) + } + + fn nth_back(&mut self, n: usize) -> Option { + debug_assert!(self.begin <= self.end); + let n = n as u32; + if self.begin >= self.end.saturating_sub(n) { + return None + } + self.end -= 1 + n; + self.vec + .get(self.end) + .expect("access is within bounds") + .into() + } +} + +/// An iterator over exclusive references to the elements of a storage vector. +#[derive(Debug)] +pub struct IterMut<'a, T> +where + T: PackedLayout, +{ + /// The storage vector to iterate over. + vec: &'a mut storage::Vec, + /// The current begin of the iteration. + begin: u32, + /// The current end of the iteration. + end: u32, +} + +impl<'a, T> IterMut<'a, T> +where + T: PackedLayout, +{ + /// Creates a new iterator for the given storage vector. + pub(crate) fn new(vec: &'a mut storage::Vec) -> Self { + let len = vec.len(); + Self { + vec, + begin: 0, + end: len, + } + } + + /// Returns the amount of remaining elements to yield by the iterator. + fn remaining(&self) -> u32 { + self.end - self.begin + } +} + +impl<'a, T> IterMut<'a, T> +where + T: PackedLayout, +{ + fn get_mut<'b>(&'b mut self, at: u32) -> Option<&'a mut T> { + self.vec.get_mut(at).map(|value| { + // SAFETY: We extend the lifetime of the reference here. + // + // This is safe because the iterator yields an exclusive + // reference to every element in the iterated vector + // just once and also there can be only one such iterator + // for the same vector at the same time which is + // guaranteed by the constructor of the iterator. + unsafe { extend_lifetime::<'b, 'a, T>(value) } + }) + } +} + +impl<'a, T> Iterator for IterMut<'a, T> +where + T: PackedLayout, +{ + type Item = &'a mut T; + + fn next(&mut self) -> Option { + ::nth(self, 0) + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = self.remaining() as usize; + (remaining, Some(remaining)) + } + + fn count(self) -> usize { + self.remaining() as usize + } + + fn nth(&mut self, n: usize) -> Option { + debug_assert!(self.begin <= self.end); + let n = n as u32; + if self.begin + n >= self.end { + return None + } + let cur = self.begin + n; + self.begin += 1 + n; + self.get_mut(cur).expect("access is within bounds").into() + } +} + +impl<'a, T> ExactSizeIterator for IterMut<'a, T> where T: PackedLayout {} + +impl<'a, T> DoubleEndedIterator for IterMut<'a, T> +where + T: PackedLayout, +{ + fn next_back(&mut self) -> Option { + ::nth_back(self, 0) + } + + fn nth_back(&mut self, n: usize) -> Option { + debug_assert!(self.begin <= self.end); + let n = n as u32; + if self.begin >= self.end.saturating_sub(n) { + return None + } + self.end -= 1 + n; + self.get_mut(self.end) + .expect("access is within bounds") + .into() + } +} diff --git a/core/src/storage2/collections/vec/mod.rs b/core/src/storage2/collections/vec/mod.rs new file mode 100644 index 00000000000..45718c0be80 --- /dev/null +++ b/core/src/storage2/collections/vec/mod.rs @@ -0,0 +1,306 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A storage vector used to store elements in a contiguous sequenced order. +//! +//! This is by default the go-to collection for most smart contracts if there +//! are not special requirements to the storage data structure. + +mod impls; +mod iter; +mod storage; + +#[cfg(test)] +mod tests; + +pub use self::iter::{ + Iter, + IterMut, +}; +use crate::storage2::{ + lazy::{ + Lazy, + LazyIndexMap, + }, + traits::PackedLayout, +}; + +/// A contiguous growable array type, written `Vec` but pronounced 'vector'. +/// +/// # Note +/// +/// Despite the similarity to Rust's `Vec` type this storage `Vec` has many +/// differences in its internal data layout. While it stores its data in contiguous +/// storage slots this does not mean that the data is actually densely stored +/// in memory. +/// +/// Also its technical performance characteristics may be different from Rust's +/// `Vec` due to the differences stated above. +/// +/// Allows to store up to `2^32` elements and is guaranteed to not reallocate +/// upon pushing new elements to it. +#[derive(Debug)] +pub struct Vec +where + T: PackedLayout, +{ + /// The length of the vector. + len: Lazy, + /// The synchronized cells to operate on the contract storage. + elems: LazyIndexMap, +} + +impl Default for Vec +where + T: PackedLayout, +{ + fn default() -> Self { + Self::new() + } +} + +impl Vec +where + T: PackedLayout, +{ + /// Creates a new empty storage vector. + pub fn new() -> Self { + Self { + len: Lazy::new(0), + elems: LazyIndexMap::new(), + } + } + + /// Returns the number of elements in the vector, also referred to as its 'length'. + pub fn len(&self) -> u32 { + *self.len + } + + /// Returns `true` if the vector contains no elements. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +impl Vec +where + T: PackedLayout, +{ + /// Clears the underlying storage cells of the storage vector. + /// + /// # Note + /// + /// This completely invalidates the storage vector's invariances about + /// the contents of its associated storage region. + /// + /// This API is used for the `Drop` implementation of [`Vec`] as well as + /// for the [`SpreadLayout::clear_spread`] trait implementation. + fn clear_cells(&self) { + if self.elems.key().is_none() { + // We won't clear any storage if we are in lazy state since there + // probably has not been any state written to storage, yet. + return + } + for index in 0..self.len() { + self.elems.clear_packed_at(index); + } + } +} + +impl Vec +where + T: PackedLayout, +{ + /// Returns an iterator yielding shared references to all elements of the vector. + /// + /// # Note + /// + /// Avoid unbounded iteration over big storage vectors. + /// Prefer using methods like `Iterator::take` in order to limit the number + /// of yielded elements. + pub fn iter(&self) -> Iter { + Iter::new(self) + } + + /// Returns an iterator yielding exclusive references to all elements of the vector. + /// + /// # Note + /// + /// Avoid unbounded iteration over big storage vectors. + /// Prefer using methods like `Iterator::take` in order to limit the number + /// of yielded elements. + pub fn iter_mut(&mut self) -> IterMut { + IterMut::new(self) + } + + /// Returns the index if it is witihn bounds or `None` otherwise. + fn within_bounds(&self, index: u32) -> Option { + if index < self.len() { + return Some(index) + } + None + } + + /// Returns a shared reference to the first element if any. + pub fn first(&self) -> Option<&T> { + if self.is_empty() { + return None + } + self.get(0) + } + + /// Returns a shared reference to the last element if any. + pub fn last(&self) -> Option<&T> { + if self.is_empty() { + return None + } + let last_index = self.len() - 1; + self.get(last_index) + } + + /// Returns a shared reference to the indexed element. + /// + /// Returns `None` if `index` is out of bounds. + pub fn get(&self, index: u32) -> Option<&T> { + self.within_bounds(index) + .and_then(|index| self.elems.get(index)) + } +} + +impl Vec +where + T: PackedLayout, +{ + /// Appends an element to the back of the vector. + pub fn push(&mut self, value: T) { + assert!( + self.len() < core::u32::MAX, + "cannot push more elements into the storage vector" + ); + let last_index = self.len(); + *self.len += 1; + self.elems.put(last_index, Some(value)); + } +} + +impl Vec +where + T: PackedLayout, +{ + /// Pops the last element from the vector and returns it. + // + /// Returns `None` if the vector is empty. + pub fn pop(&mut self) -> Option { + if self.is_empty() { + return None + } + let last_index = self.len() - 1; + *self.len = last_index; + self.elems.put_get(last_index, None) + } + + /// Pops the last element from the vector and immediately drops it. + /// + /// Returns `Some(())` if an element has been removed and `None` otherwise. + /// + /// # Note + /// + /// This operation is a bit more efficient than [`Vec::pop`] + /// since it avoids reading from contract storage in some use cases. + pub fn pop_drop(&mut self) -> Option<()> { + if self.is_empty() { + return None + } + let last_index = self.len() - 1; + *self.len = last_index; + self.elems.put(last_index, None); + Some(()) + } + + /// Returns an exclusive reference to the first element if any. + pub fn first_mut(&mut self) -> Option<&mut T> { + if self.is_empty() { + return None + } + self.get_mut(0) + } + + /// Returns an exclusive reference to the last element if any. + pub fn last_mut(&mut self) -> Option<&mut T> { + if self.is_empty() { + return None + } + let last_index = self.len() - 1; + self.get_mut(last_index) + } + + /// Returns an exclusive reference to the indexed element. + /// + /// Returns `None` if `index` is out of bounds. + pub fn get_mut(&mut self, index: u32) -> Option<&mut T> { + self.within_bounds(index) + .and_then(move |index| self.elems.get_mut(index)) + } + + /// Swaps the elements at the given indices. + /// + /// # Panics + /// + /// If one or both indices are out of bounds. + pub fn swap(&mut self, a: u32, b: u32) { + assert!( + a < self.len() && b < self.len(), + "indices are out of bounds" + ); + self.elems.swap(a, b) + } + + /// Removes the indexed element from the vector and returns it. + /// + /// The last element of the vector is put into the indexed slot. + /// Returns `None` and does not mutate the vector if the index is out of bounds. + /// + /// # Note + /// + /// This operation does not preserve ordering but is constant time. + pub fn swap_remove(&mut self, n: u32) -> Option { + if self.is_empty() { + return None + } + self.elems.swap(n, self.len() - 1); + self.pop() + } + + /// Removes the indexed element from the vector. + /// + /// The last element of the vector is put into the indexed slot. + /// Returns `Some(())` if an element has been removed and `None` otherwise. + /// + /// # Note + /// + /// This operation should be preferred over [`Vec::swap_remove`] if there is + /// no need to return the removed element since it avoids a contract storage + /// read for some use cases. + pub fn swap_remove_drop(&mut self, n: u32) -> Option<()> { + if self.is_empty() { + return None + } + self.elems.put(n, None); + let last_index = self.len() - 1; + let last = self.elems.put_get(last_index, None); + self.elems.put(n, last); + *self.len = last_index; + Some(()) + } +} diff --git a/core/src/storage2/collections/vec/storage.rs b/core/src/storage2/collections/vec/storage.rs new file mode 100644 index 00000000000..428a8efcf99 --- /dev/null +++ b/core/src/storage2/collections/vec/storage.rs @@ -0,0 +1,50 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of ink! storage traits. + +use super::Vec as StorageVec; +use crate::storage2::{ + lazy::LazyIndexMap, + traits::{ + KeyPtr, + PackedLayout, + SpreadLayout, + }, +}; + +impl SpreadLayout for StorageVec +where + T: PackedLayout, +{ + const FOOTPRINT: u64 = 1 + as SpreadLayout>::FOOTPRINT; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + Self { + len: SpreadLayout::pull_spread(ptr), + elems: SpreadLayout::pull_spread(ptr), + } + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + SpreadLayout::push_spread(&self.len, ptr); + SpreadLayout::push_spread(&self.elems, ptr); + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + self.clear_cells(); + SpreadLayout::clear_spread(&self.len, ptr); + SpreadLayout::clear_spread(&self.elems, ptr); + } +} diff --git a/core/src/storage2/collections/vec/tests.rs b/core/src/storage2/collections/vec/tests.rs new file mode 100644 index 00000000000..f964e9589ac --- /dev/null +++ b/core/src/storage2/collections/vec/tests.rs @@ -0,0 +1,394 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::Vec as StorageVec; +use crate::{ + env, + storage2::traits::{ + KeyPtr, + SpreadLayout, + }, +}; +use ink_primitives::Key; + +#[test] +fn new_vec_works() { + // `StorageVec::new` + let vec = >::new(); + assert!(vec.is_empty()); + assert_eq!(vec.len(), 0); + assert_eq!(vec.get(0), None); + assert!(vec.iter().next().is_none()); + // `StorageVec::default` + let default = as Default>::default(); + assert!(default.is_empty()); + assert_eq!(default.len(), 0); + assert_eq!(vec.get(0), None); + assert!(default.iter().next().is_none()); + // `StorageVec::new` and `StorageVec::default` should be equal. + assert_eq!(vec, default); +} + +#[test] +fn from_iterator_works() { + let some_primes = [1, 2, 3, 5, 7, 11, 13]; + assert_eq!(some_primes.iter().copied().collect::>(), { + let mut vec = StorageVec::new(); + for prime in &some_primes { + vec.push(*prime) + } + vec + }); +} + +#[test] +fn from_empty_iterator_works() { + assert_eq!( + [].iter().copied().collect::>(), + StorageVec::new(), + ); +} + +#[test] +fn first_last_of_empty() { + let mut vec = >::new(); + assert_eq!(vec.first(), None); + assert_eq!(vec.first_mut(), None); + assert_eq!(vec.last(), None); + assert_eq!(vec.last_mut(), None); +} + +#[test] +fn push_pop_first_last_works() { + /// Asserts conditions are met for the given storage vector. + fn assert_vec(vec: &StorageVec, len: u32, first: F, last: L) + where + F: Into>, + L: Into>, + { + assert_eq!(vec.is_empty(), len == 0); + assert_eq!(vec.len(), len); + assert_eq!(vec.first().copied(), first.into()); + assert_eq!(vec.last().copied(), last.into()); + } + + let mut vec = StorageVec::new(); + assert_vec(&vec, 0, None, None); + + // Sequence of `push` + vec.push(b'a'); + assert_vec(&vec, 1, b'a', b'a'); + vec.push(b'b'); + assert_vec(&vec, 2, b'a', b'b'); + vec.push(b'c'); + assert_vec(&vec, 3, b'a', b'c'); + vec.push(b'd'); + assert_vec(&vec, 4, b'a', b'd'); + + // Sequence of `pop` + assert_eq!(vec.pop(), Some(b'd')); + assert_vec(&vec, 3, b'a', b'c'); + assert_eq!(vec.pop(), Some(b'c')); + assert_vec(&vec, 2, b'a', b'b'); + assert_eq!(vec.pop(), Some(b'b')); + assert_vec(&vec, 1, b'a', b'a'); + assert_eq!(vec.pop(), Some(b'a')); + assert_vec(&vec, 0, None, None); + + // Pop from empty vector. + assert_eq!(vec.pop(), None); + assert_vec(&vec, 0, None, None); +} + +#[test] +fn pop_drop_works() { + let elems = [b'a', b'b', b'c', b'd']; + let mut vec = vec_from_slice(&elems); + assert_eq!(vec.pop_drop(), Some(())); + assert_eq_slice(&vec, &elems[0..3]); + assert_eq!(vec.pop_drop(), Some(())); + assert_eq_slice(&vec, &elems[0..2]); + assert_eq!(vec.pop_drop(), Some(())); + assert_eq_slice(&vec, &elems[0..1]); + assert_eq!(vec.pop_drop(), Some(())); + assert_eq_slice(&vec, &[]); + assert_eq!(vec.pop_drop(), None); + assert_eq_slice(&vec, &[]); +} + +#[test] +fn get_works() { + let elems = [b'a', b'b', b'c', b'd']; + let mut vec = vec_from_slice(&elems); + for (n, mut expected) in elems.iter().copied().enumerate() { + let n = n as u32; + assert_eq!(vec.get(n), Some(&expected)); + assert_eq!(vec.get_mut(n), Some(&mut expected)); + assert_eq!(&vec[n], &expected); + assert_eq!(&mut vec[n], &mut expected); + } + let len = vec.len(); + assert_eq!(vec.get(len), None); + assert_eq!(vec.get_mut(len), None); +} + +#[test] +#[should_panic(expected = "index out of bounds: the len is 3 but the index is 3")] +fn index_out_of_bounds_works() { + let test_values = [b'a', b'b', b'c']; + let vec = vec_from_slice(&test_values); + let _ = &vec[test_values.len() as u32]; +} + +#[test] +#[should_panic(expected = "index out of bounds: the len is 3 but the index is 3")] +fn index_mut_out_of_bounds_works() { + let test_values = [b'a', b'b', b'c']; + let mut vec = vec_from_slice(&test_values); + let _ = &mut vec[test_values.len() as u32]; +} + +#[test] +fn iter_next_works() { + let elems = [b'a', b'b', b'c', b'd']; + let vec = vec_from_slice(&elems); + // Test iterator over `&T`: + let mut iter = vec.iter(); + assert_eq!(iter.count(), 4); + assert_eq!(iter.size_hint(), (4, Some(4))); + assert_eq!(iter.next(), Some(&b'a')); + assert_eq!(iter.size_hint(), (3, Some(3))); + assert_eq!(iter.next(), Some(&b'b')); + assert_eq!(iter.size_hint(), (2, Some(2))); + assert_eq!(iter.count(), 2); + assert_eq!(iter.next(), Some(&b'c')); + assert_eq!(iter.size_hint(), (1, Some(1))); + assert_eq!(iter.next(), Some(&b'd')); + assert_eq!(iter.size_hint(), (0, Some(0))); + assert_eq!(iter.count(), 0); + assert_eq!(iter.next(), None); + // Test iterator over `&mut T`: + let mut vec = vec; + let mut iter = vec.iter_mut(); + assert_eq!(iter.size_hint(), (4, Some(4))); + assert_eq!(iter.next(), Some(&mut b'a')); + assert_eq!(iter.size_hint(), (3, Some(3))); + assert_eq!(iter.next(), Some(&mut b'b')); + assert_eq!(iter.size_hint(), (2, Some(2))); + assert_eq!(iter.next(), Some(&mut b'c')); + assert_eq!(iter.size_hint(), (1, Some(1))); + assert_eq!(iter.next(), Some(&mut b'd')); + assert_eq!(iter.size_hint(), (0, Some(0))); + assert_eq!(iter.next(), None); + assert_eq!(iter.count(), 0); +} + +#[test] +fn iter_nth_works() { + let elems = [b'a', b'b', b'c', b'd']; + let vec = vec_from_slice(&elems); + // Test iterator over `&T`: + let mut iter = vec.iter(); + assert_eq!(iter.count(), 4); + assert_eq!(iter.size_hint(), (4, Some(4))); + assert_eq!(iter.nth(1), Some(&b'b')); + assert_eq!(iter.count(), 2); + assert_eq!(iter.size_hint(), (2, Some(2))); + assert_eq!(iter.nth(1), Some(&b'd')); + assert_eq!(iter.size_hint(), (0, Some(0))); + assert_eq!(iter.count(), 0); + assert_eq!(iter.nth(1), None); + // Test iterator over `&mut T`: + let mut vec = vec; + let mut iter = vec.iter_mut(); + assert_eq!(iter.size_hint(), (4, Some(4))); + assert_eq!(iter.nth(1), Some(&mut b'b')); + assert_eq!(iter.size_hint(), (2, Some(2))); + assert_eq!(iter.nth(1), Some(&mut b'd')); + assert_eq!(iter.size_hint(), (0, Some(0))); + assert_eq!(iter.nth(1), None); + assert_eq!(iter.count(), 0); +} + +#[test] +fn iter_next_back_works() { + let elems = [b'a', b'b', b'c', b'd']; + let vec = vec_from_slice(&elems); + // Test iterator over `&T`: + let mut iter = vec.iter().rev(); + assert_eq!(iter.clone().count(), 4); + assert_eq!(iter.next(), Some(&b'd')); + assert_eq!(iter.next(), Some(&b'c')); + assert_eq!(iter.clone().count(), 2); + assert_eq!(iter.next(), Some(&b'b')); + assert_eq!(iter.next(), Some(&b'a')); + assert_eq!(iter.clone().count(), 0); + assert_eq!(iter.next(), None); + // Test iterator over `&mut T`: + let mut vec = vec; + let mut iter = vec.iter_mut().rev(); + assert_eq!(iter.next(), Some(&mut b'd')); + assert_eq!(iter.next(), Some(&mut b'c')); + assert_eq!(iter.next(), Some(&mut b'b')); + assert_eq!(iter.next(), Some(&mut b'a')); + assert_eq!(iter.next(), None); + assert_eq!(iter.count(), 0); +} + +#[test] +fn iter_nth_back_works() { + let elems = [b'a', b'b', b'c', b'd']; + let vec = vec_from_slice(&elems); + // Test iterator over `&T`: + let mut iter = vec.iter().rev(); + assert_eq!(iter.clone().count(), 4); + assert_eq!(iter.nth(1), Some(&b'c')); + assert_eq!(iter.clone().count(), 2); + assert_eq!(iter.nth(1), Some(&b'a')); + assert_eq!(iter.clone().count(), 0); + assert_eq!(iter.nth(1), None); + // Test iterator over `&mut T`: + let mut vec = vec; + let mut iter = vec.iter_mut().rev(); + assert_eq!(iter.nth(1), Some(&mut b'c')); + assert_eq!(iter.nth(1), Some(&mut b'a')); + assert_eq!(iter.nth(1), None); + assert_eq!(iter.count(), 0); +} + +/// Asserts that the the given ordered storage vector elements are equal to the +/// ordered elements of the given slice. +fn assert_eq_slice(vec: &StorageVec, slice: &[u8]) { + assert_eq!(vec.len() as usize, slice.len()); + assert!(vec.iter().zip(slice.iter()).all(|(lhs, rhs)| *lhs == *rhs)) +} + +/// Creates a storage vector from the given slice. +fn vec_from_slice(slice: &[u8]) -> StorageVec { + slice.iter().copied().collect::>() +} + +#[test] +fn swap_works() { + let elems = [b'a', b'b', b'c', b'd']; + let mut vec = vec_from_slice(&elems); + + // Swap at same position is a no-op. + for index in 0..elems.len() as u32 { + vec.swap(index, index); + assert_eq_slice(&vec, &elems); + } + + // Swap first and second + vec.swap(0, 1); + assert_eq_slice(&vec, &[b'b', b'a', b'c', b'd']); + // Swap third and last + vec.swap(2, 3); + assert_eq_slice(&vec, &[b'b', b'a', b'd', b'c']); + // Swap first and last + vec.swap(0, 3); + assert_eq_slice(&vec, &[b'c', b'a', b'd', b'b']); +} + +#[test] +#[should_panic] +fn swap_one_invalid_index() { + let mut vec = vec_from_slice(&[b'a', b'b', b'c', b'd']); + vec.swap(0, vec.len()); +} + +#[test] +#[should_panic] +fn swap_both_invalid_indices() { + let mut vec = vec_from_slice(&[b'a', b'b', b'c', b'd']); + vec.swap(vec.len(), vec.len()); +} + +#[test] +fn swap_remove_works() { + let mut vec = vec_from_slice(&[b'a', b'b', b'c', b'd']); + + // Swap remove first element. + assert_eq!(vec.swap_remove(0), Some(b'a')); + assert_eq_slice(&vec, &[b'd', b'b', b'c']); + // Swap remove middle element. + assert_eq!(vec.swap_remove(1), Some(b'b')); + assert_eq_slice(&vec, &[b'd', b'c']); + // Swap remove last element. + assert_eq!(vec.swap_remove(1), Some(b'c')); + assert_eq_slice(&vec, &[b'd']); + // Swap remove only element. + assert_eq!(vec.swap_remove(0), Some(b'd')); + assert_eq_slice(&vec, &[]); + // Swap remove from empty vector. + assert_eq!(vec.swap_remove(0), None); + assert_eq_slice(&vec, &[]); +} + +#[test] +fn swap_remove_drop_works() { + let mut vec = vec_from_slice(&[b'a', b'b', b'c', b'd']); + + // Swap remove first element. + assert_eq!(vec.swap_remove_drop(0), Some(())); + assert_eq_slice(&vec, &[b'd', b'b', b'c']); + // Swap remove middle element. + assert_eq!(vec.swap_remove_drop(1), Some(())); + assert_eq_slice(&vec, &[b'd', b'c']); + // Swap remove last element. + assert_eq!(vec.swap_remove_drop(1), Some(())); + assert_eq_slice(&vec, &[b'd']); + // Swap remove only element. + assert_eq!(vec.swap_remove_drop(0), Some(())); + assert_eq_slice(&vec, &[]); + // Swap remove from empty vector. + assert_eq!(vec.swap_remove_drop(0), None); + assert_eq_slice(&vec, &[]); +} + +#[test] +fn spread_layout_push_pull_works() -> env::Result<()> { + env::test::run_test::(|_| { + let vec1 = vec_from_slice(&[b'a', b'b', b'c', b'd']); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&vec1, &mut KeyPtr::from(root_key)); + // Load the pushed storage vector into another instance and check that + // both instances are equal: + let vec2 = + as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(vec1, vec2); + Ok(()) + }) +} + +#[test] +#[should_panic(expected = "encountered empty storage cell")] +fn spread_layout_clear_works() { + env::test::run_test::(|_| { + let vec1 = vec_from_slice(&[b'a', b'b', b'c', b'd']); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&vec1, &mut KeyPtr::from(root_key)); + // It has already been asserted that a valid instance can be pulled + // from contract storage after a push to the same storage region. + // + // Now clear the associated storage from `vec1` and check whether + // loading another instance from this storage will panic since the + // vector's length property cannot read a value: + SpreadLayout::clear_spread(&vec1, &mut KeyPtr::from(root_key)); + let _ = + as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); + Ok(()) + }) + .unwrap() +} diff --git a/core/src/storage2/lazy/entry.rs b/core/src/storage2/lazy/entry.rs new file mode 100644 index 00000000000..fc4da0675f4 --- /dev/null +++ b/core/src/storage2/lazy/entry.rs @@ -0,0 +1,276 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[cfg(doc)] +use crate::storage2::lazy::{ + LazyArray, + LazyIndexMap, +}; +use crate::storage2::traits::{ + clear_packed_root, + clear_spread_root_opt, + pull_packed_root_opt, + pull_spread_root_opt, + push_packed_root_opt, + push_spread_root_opt, + KeyPtr, + PackedLayout, + SpreadLayout, +}; +use core::{ + cell::Cell, + fmt, + fmt::Debug, +}; +use ink_prelude::vec::Vec; +use ink_primitives::Key; + +/// The entry of a single cached value of a lazy storage data structure. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct Entry { + /// The value or `None` if the value has been removed. + value: Option, + /// This is [`EntryState::Mutated`] if the value has been mutated and is in + /// need to be synchronized with the contract storage. If it is + /// [`EntryState::Preserved`] the value from the contract storage has been + /// preserved and does not need to be synchronized. + state: Cell, +} + +impl Debug for Entry +where + T: Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Entry") + .field("value", &self.value) + .field("state", &self.state.get()) + .finish() + } +} + +#[test] +fn debug_impl_works() { + let e1 = >::new(None, EntryState::Preserved); + assert_eq!( + format!("{:?}", &e1), + "Entry { value: None, state: Preserved }", + ); + let e2 = Entry::new(Some(42), EntryState::Mutated); + assert_eq!( + format!("{:?}", &e2), + "Entry { value: Some(42), state: Mutated }", + ); +} + +/// The state of the entry. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub enum EntryState { + /// The entry's value must be synchronized with the contract storage. + Mutated, + /// The entry's value preserved the value from the contract storage. + Preserved, +} + +impl EntryState { + /// Returns `true` if the entry state is mutated. + pub fn is_mutated(self) -> bool { + match self { + EntryState::Mutated => true, + EntryState::Preserved => false, + } + } + + /// Returns `true` if the entry state is preserved. + pub fn is_preserved(self) -> bool { + !self.is_mutated() + } +} + +impl SpreadLayout for Entry +where + T: SpreadLayout, +{ + const FOOTPRINT: u64 = ::FOOTPRINT; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + let root_key = ptr.next_for::(); + Self::new(pull_spread_root_opt::(&root_key), EntryState::Preserved) + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + let old_state = self.replace_state(EntryState::Preserved); + if old_state.is_mutated() { + let root_key = ptr.next_for::(); + push_spread_root_opt::(self.value().into(), &root_key); + } + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + let root_key = ptr.next_for::(); + clear_spread_root_opt::(&root_key, || self.value().into()); + } +} + +impl scale::Encode for Entry +where + T: scale::Encode, +{ + #[inline] + fn size_hint(&self) -> usize { + as scale::Encode>::size_hint(&self.value) + } + + #[inline] + fn encode_to(&self, dest: &mut O) { + as scale::Encode>::encode_to(&self.value, dest) + } + + #[inline] + fn encode(&self) -> Vec { + as scale::Encode>::encode(&self.value) + } + + #[inline] + fn using_encoded R>(&self, f: F) -> R { + as scale::Encode>::using_encoded(&self.value, f) + } +} + +impl scale::Decode for Entry +where + T: scale::Decode, +{ + fn decode(input: &mut I) -> Result { + Ok(Self::new( + as scale::Decode>::decode(input)?, + EntryState::Preserved, + )) + } +} + +impl PackedLayout for Entry +where + T: PackedLayout, +{ + #[inline] + fn pull_packed(&mut self, at: &Key) { + PackedLayout::pull_packed(&mut self.value, at) + } + + #[inline] + fn push_packed(&self, at: &Key) { + PackedLayout::push_packed(&self.value, at) + } + + #[inline] + fn clear_packed(&self, at: &Key) { + PackedLayout::clear_packed(&self.value, at) + } +} + +impl Entry +where + T: PackedLayout, +{ + /// Pulls the entity from the underlying associated storage as packed representation. + /// + /// # Note + /// + /// Mainly used by lazy storage abstractions that only allow operating on + /// packed storage entities such as [`LazyIndexMap`] or [`LazyArray`]. + pub fn pull_packed_root(root_key: &Key) -> Self { + Self::new(pull_packed_root_opt::(root_key), EntryState::Preserved) + } + + /// Pushes the underlying associated storage as packed representation. + /// + /// # Note + /// + /// Mainly used by lazy storage abstractions that only allow operating on + /// packed storage entities such as [`LazyIndexMap`] or [`LazyArray`]. + pub fn push_packed_root(&self, root_key: &Key) { + let old_state = self.replace_state(EntryState::Preserved); + if old_state.is_mutated() { + self.replace_state(EntryState::Preserved); + push_packed_root_opt::(self.value().into(), &root_key); + } + } + + /// Clears the underlying associated storage as packed representation. + /// + /// # Note + /// + /// Mainly used by lazy storage abstractions that only allow operating on + /// packed storage entities such as [`LazyIndexMap`] or [`LazyArray`]. + pub fn clear_packed_root(&self, root_key: &Key) { + clear_packed_root::>(self.value(), &root_key); + } +} + +impl Entry { + /// Creates a new entry with the value and state. + pub fn new(value: Option, state: EntryState) -> Self { + Self { + value, + state: Cell::new(state), + } + } + + /// Replaces the current entry state with the new state and returns it. + pub fn replace_state(&self, new_state: EntryState) -> EntryState { + // The implementation of `Cell::set` uses `Cell::replace` so instead + // of offering both APIs we simply opted to offer just the more general + // replace API for `Entry`. + self.state.replace(new_state) + } + + /// Returns a shared reference to the value of the entry. + pub fn value(&self) -> &Option { + &self.value + } + + /// Returns an exclusive reference to the entry value. + /// + /// # Note + /// + /// This changes the `mutate` state of the entry if the entry was occupied + /// since the caller could potentially change the returned value. + pub fn value_mut(&mut self) -> &mut Option { + if self.value.is_some() { + self.state.set(EntryState::Mutated); + } + &mut self.value + } + + /// Converts the entry into its value. + pub fn into_value(self) -> Option { + self.value + } + + /// Puts the new value into the entry and returns the old value. + /// + /// # Note + /// + /// This changes the `mutate` state of the entry to `true` as long as at + /// least one of `old_value` and `new_value` is `Some`. + pub fn put(&mut self, new_value: Option) -> Option { + let new_value_is_some = new_value.is_some(); + let old_value = core::mem::replace(&mut self.value, new_value); + if old_value.is_some() || new_value_is_some { + self.state.set(EntryState::Mutated); + } + old_value + } +} diff --git a/core/src/storage2/lazy/lazy_array.rs b/core/src/storage2/lazy/lazy_array.rs new file mode 100644 index 00000000000..8ca334dbb3b --- /dev/null +++ b/core/src/storage2/lazy/lazy_array.rs @@ -0,0 +1,893 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + Entry, + EntryState, +}; +use crate::storage2::traits::{ + clear_packed_root, + pull_packed_root_opt, + KeyPtr, + PackedLayout, + SpreadLayout, +}; +use core::{ + cell::UnsafeCell, + fmt, + fmt::Debug, + mem, + ptr::NonNull, +}; +use generic_array::{ + typenum::{ + UInt, + UTerm, + Unsigned, + B0, + B1, + }, + ArrayLength, + GenericArray, +}; +use ink_primitives::Key; + +/// The index type used in the lazy storage chunk. +pub type Index = u32; + +/// Utility trait for helping with lazy array construction. +pub trait LazyArrayLength: + ArrayLength>>> + Unsigned +{ +} +impl LazyArrayLength for UTerm {} +impl>>>> LazyArrayLength for UInt {} +impl>>>> LazyArrayLength for UInt {} + +/// A lazy storage array that spans over N storage cells. +/// +/// Storage data structure to emulate storage arrays: `[T; N]`. +/// +/// # Note +/// +/// Computes operations on the underlying N storage cells in a lazy fashion. +/// Due to the size constraints the `LazyArray` is generally more efficient +/// than the [`LazyMap`](`super::LazyIndexMap`) for most use cases with limited elements. +/// +/// This is mainly used as low-level storage primitives by other high-level +/// storage primitives in order to manage the contract storage for a whole +/// chunk of storage cells. +pub struct LazyArray +where + N: LazyArrayLength, +{ + /// The offset key for the N cells. + /// + /// If the lazy chunk has been initialized during contract initialization + /// the key will be `None` since there won't be a storage region associated + /// to the lazy chunk which prevents it from lazily loading elements. This, + /// however, is only checked at contract runtime. We might incorporate + /// compile-time checks for this particular use case later on. + key: Option, + /// The subset of currently cached entries of the lazy storage chunk. + /// + /// An entry is cached as soon as it is loaded or written. + cached_entries: EntryArray, +} + +struct DebugEntryArray<'a, T, N>(&'a EntryArray) +where + T: Debug, + N: LazyArrayLength; + +impl<'a, T, N> Debug for DebugEntryArray<'a, T, N> +where + T: Debug, + N: LazyArrayLength, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_map() + .entries(self.0.iter().enumerate().filter_map(|(key, entry)| { + match entry { + Some(entry) => Some((key, entry)), + None => None, + } + })) + .finish() + } +} + +impl Debug for LazyArray +where + T: Debug, + N: LazyArrayLength, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("LazyArray") + .field("key", &self.key) + .field("cached_entries", &DebugEntryArray(&self.cached_entries)) + .finish() + } +} + +#[test] +fn debug_impl_works() { + use generic_array::typenum::U4; + let mut larray = >::new(); + // Empty imap. + assert_eq!( + format!("{:?}", &larray), + "LazyArray { key: None, cached_entries: {} }", + ); + // Filled imap. + larray.put(0, Some(1)); + larray.put(2, Some(2)); + larray.put(3, None); + assert_eq!( + format!("{:?}", &larray), + "LazyArray { \ + key: None, \ + cached_entries: {\ + 0: Entry { \ + value: Some(1), \ + state: Mutated \ + }, \ + 2: Entry { \ + value: Some(2), \ + state: Mutated \ + }, \ + 3: Entry { \ + value: None, \ + state: Mutated \ + }\ + } \ + }", + ); +} + +/// Returns the capacity for an array with the given array length. +fn array_capacity() -> u32 +where + N: LazyArrayLength, +{ + ::U32 +} + +/// The underlying array cache for the [`LazyArray`]. +#[derive(Debug)] +pub struct EntryArray +where + N: LazyArrayLength, +{ + /// The cache entries of the entry array. + entries: GenericArray>>, N>, +} + +#[derive(Debug)] +pub struct EntriesIter<'a, T> { + iter: core::slice::Iter<'a, UnsafeCell>>>, +} + +impl<'a, T> EntriesIter<'a, T> { + pub fn new(entry_array: &'a EntryArray) -> Self + where + N: LazyArrayLength, + { + Self { + iter: entry_array.entries.iter(), + } + } +} + +impl<'a, T> Iterator for EntriesIter<'a, T> { + type Item = &'a Option>; + + fn next(&mut self) -> Option { + self.iter.next().map(|cell| unsafe { &*cell.get() }) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + fn count(self) -> usize + where + Self: Sized, + { + self.iter.count() + } +} + +impl<'a, T> DoubleEndedIterator for EntriesIter<'a, T> { + fn next_back(&mut self) -> Option { + self.iter.next_back().map(|cell| unsafe { &*cell.get() }) + } +} + +impl<'a, T> ExactSizeIterator for EntriesIter<'a, T> {} + +impl EntryArray +where + N: LazyArrayLength, +{ + /// Creates a new entry array cache. + pub fn new() -> Self { + Self { + entries: Default::default(), + } + } +} + +impl Default for EntryArray +where + N: LazyArrayLength, +{ + fn default() -> Self { + Self::new() + } +} + +impl EntryArray +where + N: LazyArrayLength, +{ + /// Returns the constant capacity of the lazy array. + #[inline] + pub fn capacity() -> u32 { + array_capacity::() + } + + /// Puts the the new value into the indexed slot and + /// returns the old value if any. + fn put(&self, at: Index, new_value: Option) -> Option { + mem::replace( + unsafe { &mut *self.entries.as_slice()[at as usize].get() }, + Some(Entry::new(new_value, EntryState::Mutated)), + ) + .map(Entry::into_value) + .flatten() + } + + /// Inserts a new entry into the cache and returns an exclusive reference to it. + unsafe fn insert_entry(&self, at: Index, new_entry: Entry) -> NonNull> { + let entry: &mut Option> = + &mut *UnsafeCell::get(&self.entries[at as usize]); + *entry = Some(new_entry); + entry + .as_mut() + .map(NonNull::from) + .expect("just inserted the entry") + } + + /// Returns an exclusive reference to the entry at the given index if any. + unsafe fn get_entry_mut(&self, at: Index) -> Option<&mut Entry> { + if at >= Self::capacity() { + return None + } + (&mut *UnsafeCell::get(&self.entries[at as usize])).as_mut() + } + + /// Returns an iterator that yields shared references to all cached entries. + pub fn iter(&self) -> EntriesIter { + EntriesIter::new(self) + } +} + +impl LazyArray +where + T: PackedLayout, + N: LazyArrayLength, +{ + /// Clears the underlying storage of the entry at the given index. + /// + /// # Safety + /// + /// For performance reasons this does not synchronize the lazy array's + /// memory-side cache which invalidates future accesses the cleared entry. + /// Care should be taken when using this API. + /// + /// The general use of this API is to streamline `Drop` implementations of + /// high-level abstractions that build upon this low-level data strcuture. + pub fn clear_packed_at(&self, index: Index) { + let root_key = self.key_at(index).expect("cannot clear in lazy state"); + if ::REQUIRES_DEEP_CLEAN_UP { + // We need to load the entity before we remove its associated contract storage + // because it requires a deep clean-up which propagates clearing to its fields, + // for example in the case of `T` being a `storage::Box`. + let entity = self.get(index).expect("cannot clear a non existing entity"); + clear_packed_root::(&entity, &root_key); + } else { + // The type does not require deep clean-up so we can simply clean-up + // its associated storage cell and be done without having to load it first. + crate::env::clear_contract_storage(root_key); + } + } +} + +impl Default for LazyArray +where + N: LazyArrayLength, +{ + fn default() -> Self { + Self::new() + } +} + +impl LazyArray +where + N: LazyArrayLength, +{ + /// Creates a new empty lazy array. + /// + /// # Note + /// + /// A lazy array created this way cannot be used to load from the contract storage. + /// All operations that directly or indirectly load from storage will panic. + pub fn new() -> Self { + Self { + key: None, + cached_entries: Default::default(), + } + } + + /// Creates a new empty lazy array positioned at the given key. + /// + /// # Note + /// + /// This constructor is private and should never need to be called from + /// outside this module. It is used to construct a lazy array from a + /// key that is only useful upon a contract call. + /// Use [`LazyArray::new`] for construction during contract initialization. + fn lazy(key: Key) -> Self { + Self { + key: Some(key), + cached_entries: Default::default(), + } + } + + /// Returns the constant capacity of the lazy array. + #[inline] + pub fn capacity(&self) -> u32 { + array_capacity::() + } + + /// Returns the offset key of the lazy array if any. + pub fn key(&self) -> Option<&Key> { + self.key.as_ref() + } + + /// Returns a shared reference to the underlying cached entries. + /// + /// # Safety + /// + /// This operation is safe since it returns a shared reference from + /// a `&self` which is viable in safe Rust. + fn cached_entries(&self) -> &EntryArray { + &self.cached_entries + } + + /// Puts a new value into the given indexed slot. + /// + /// # Note + /// + /// Use [`LazyArray::put_get`]`(None)` to remove an element. + pub fn put(&mut self, at: Index, new_value: Option) { + self.cached_entries().put(at, new_value); + } +} + +impl SpreadLayout for LazyArray +where + T: PackedLayout, + N: LazyArrayLength, +{ + const FOOTPRINT: u64 = ::U64; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + Self::lazy(ptr.next_for::()) + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + let offset_key = ptr.next_for::(); + for (index, entry) in self.cached_entries().iter().enumerate() { + if let Some(entry) = entry { + let root_key = offset_key + index as u64; + entry.push_packed_root(&root_key); + } + } + } + + #[inline] + fn clear_spread(&self, _ptr: &mut KeyPtr) { + // Low-level lazy abstractions won't perform automated clean-up since + // they generally are not aware of their entire set of associated + // elements. The high-level abstractions that build upon them are + // responsible for cleaning up. + } +} + +impl LazyArray +where + N: LazyArrayLength, +{ + /// Returns the offset key for the given index if not out of bounds. + pub fn key_at(&self, at: Index) -> Option { + if at >= self.capacity() { + return None + } + self.key.map(|key| key + at as u64) + } +} + +impl LazyArray +where + T: PackedLayout, + N: LazyArrayLength, +{ + /// Loads the entry at the given index. + /// + /// Tries to load the entry from cache and falls back to lazily load the + /// entry from the contract storage. + fn load_through_cache(&self, at: Index) -> NonNull> { + assert!(at < self.capacity(), "index is out of bounds"); + match unsafe { self.cached_entries.get_entry_mut(at) } { + Some(entry) => { + // Load value from cache. + NonNull::from(entry) + } + None => { + // Load value from storage and put into cache. + // Then load value from cache. + let value = self + .key_at(at) + .map(|key| pull_packed_root_opt::(&key)) + .unwrap_or(None); + let entry = Entry::new(value, EntryState::Preserved); + unsafe { self.cached_entries.insert_entry(at, entry) } + } + } + } + + /// Loads the entry at the given index. + /// + /// Tries to load the entry from cache and falls back to lazily load the + /// entry from the contract storage. + /// + /// # Panics + /// + /// - If the lazy array is in a state that forbids lazy loading. + /// - If the given index is out of bounds. + fn load_through_cache_mut(&mut self, index: Index) -> &mut Entry { + // SAFETY: + // Returning a `&mut Entry` from within a `&mut self` function + // won't allow creating aliasing between exclusive references. + unsafe { &mut *self.load_through_cache(index).as_ptr() } + } + + /// Returns a shared reference to the element at the given index if any. + /// + /// # Note + /// + /// This operation eventually loads from contract storage. + /// + /// # Panics + /// + /// If the given index is out of bounds. + pub fn get(&self, at: Index) -> Option<&T> { + unsafe { &*self.load_through_cache(at).as_ptr() } + .value() + .into() + } + + /// Returns an exclusive reference to the element at the given index if any. + /// + /// # Note + /// + /// This operation eventually loads from contract storage. + /// + /// # Panics + /// + /// If the given index is out of bounds. + pub fn get_mut(&mut self, at: Index) -> Option<&mut T> { + self.load_through_cache_mut(at).value_mut().into() + } + + /// Puts the new value into the indexed slot and returns the old value if any. + /// + /// # Note + /// + /// - This operation eventually loads from contract storage. + /// - Prefer [`LazyArray::put`] if you are not interested in the old value. + /// - Use [`LazyArray::put_get`]`(None)` to remove an element. + /// + /// # Panics + /// + /// If the given index is out of bounds. + pub fn put_get(&mut self, at: Index, new_value: Option) -> Option { + self.load_through_cache_mut(at).put(new_value) + } + + /// Swaps the values at indices x and y. + /// + /// # Note + /// + /// This operation eventually loads from contract storage. + /// + /// # Panics + /// + /// If any of the given indices is out of bounds. + pub fn swap(&mut self, a: Index, b: Index) { + assert!(a < self.capacity(), "a is out of bounds"); + assert!(b < self.capacity(), "b is out of bounds"); + if a == b { + // Bail out early if both indices are the same. + return + } + let (loaded_a, loaded_b) = + // SAFETY: The loaded `x` and `y` entries are distinct from each + // other guaranteed by the previous checks so they cannot + // alias. + unsafe { ( + &mut *self.load_through_cache(a).as_ptr(), + &mut *self.load_through_cache(b).as_ptr(), + ) }; + if loaded_a.value().is_none() && loaded_b.value().is_none() { + // Bail out since nothing has to be swapped if both values are `None`. + return + } + // At this point at least one of the values is `Some` so we have to + // perform the swap and set both entry states to mutated. + loaded_a.replace_state(EntryState::Mutated); + loaded_b.replace_state(EntryState::Mutated); + core::mem::swap(loaded_a.value_mut(), loaded_b.value_mut()); + } +} + +#[cfg(test)] +mod tests { + use super::{ + super::{ + Entry, + EntryState, + }, + Index, + LazyArray, + LazyArrayLength, + }; + use crate::{ + env, + storage2::traits::{ + KeyPtr, + SpreadLayout, + }, + }; + use generic_array::typenum::U4; + use ink_primitives::Key; + + /// Asserts that the cached entries of the given `imap` is equal to the `expected` slice. + fn assert_cached_entries( + larray: &LazyArray, + expected: &[(Index, Entry)], + ) where + N: LazyArrayLength, + { + let mut len = 0; + for (given, expected) in larray + .cached_entries() + .iter() + .enumerate() + .filter_map(|(index, entry)| { + match entry { + Some(entry) => Some((index as u32, entry)), + None => None, + } + }) + .zip(expected.iter().map(|(index, entry)| (*index, entry))) + { + assert_eq!(given, expected); + len += 1; + } + assert_eq!(len, expected.len()); + } + + #[test] + fn new_works() { + let larray = >::new(); + // Key must be none. + assert_eq!(larray.key(), None); + assert_eq!(larray.key_at(0), None); + assert_eq!(larray.capacity(), 4); + // Cached elements must be empty. + assert_cached_entries(&larray, &[]); + // Same as default: + let default_larray = >::default(); + assert_eq!(default_larray.key(), larray.key()); + assert_eq!(default_larray.key_at(0), larray.key_at(0)); + assert_eq!(larray.capacity(), 4); + assert_cached_entries(&default_larray, &[]); + } + + #[test] + fn lazy_works() { + let key = Key([0x42; 32]); + let larray = >::lazy(key); + // Key must be Some. + assert_eq!(larray.key(), Some(&key)); + assert_eq!(larray.key_at(0), Some(key)); + assert_eq!(larray.key_at(1), Some(key + 1u64)); + assert_eq!(larray.capacity(), 4); + // Cached elements must be empty. + assert_cached_entries(&larray, &[]); + } + + #[test] + fn get_works() { + let mut larray = >::new(); + let nothing_changed = &[ + (0, Entry::new(None, EntryState::Preserved)), + (1, Entry::new(Some(b'B'), EntryState::Mutated)), + (2, Entry::new(None, EntryState::Preserved)), + (3, Entry::new(Some(b'D'), EntryState::Mutated)), + ]; + // Put some values. + assert_eq!(larray.put_get(0, None), None); + assert_eq!(larray.put_get(1, Some(b'B')), None); + assert_eq!(larray.put_get(2, None), None); + assert_eq!(larray.put_get(3, Some(b'D')), None); + assert_cached_entries(&larray, nothing_changed); + // `get` works: + assert_eq!(larray.get(0), None); + assert_eq!(larray.get(1), Some(&b'B')); + assert_eq!(larray.get(2), None); + assert_eq!(larray.get(3), Some(&b'D')); + assert_cached_entries(&larray, nothing_changed); + // `get_mut` works: + assert_eq!(larray.get_mut(0), None); + assert_eq!(larray.get_mut(1), Some(&mut b'B')); + assert_eq!(larray.get_mut(2), None); + assert_eq!(larray.get_mut(3), Some(&mut b'D')); + assert_cached_entries(&larray, nothing_changed); + } + + #[test] + #[should_panic(expected = "index is out of bounds")] + fn get_out_of_bounds_works() { + let larray = >::new(); + let _ = larray.get(4); + } + + #[test] + fn put_get_works() { + let mut larray = >::new(); + // Assert that the array cache is empty at first. + assert_cached_entries(&larray, &[]); + // Put none values. + assert_eq!(larray.put_get(0, None), None); + assert_eq!(larray.put_get(1, None), None); + assert_eq!(larray.put_get(3, None), None); + assert_cached_entries( + &larray, + &[ + (0, Entry::new(None, EntryState::Preserved)), + (1, Entry::new(None, EntryState::Preserved)), + (3, Entry::new(None, EntryState::Preserved)), + ], + ); + // Override with some values. + assert_eq!(larray.put_get(0, Some(b'A')), None); + assert_eq!(larray.put_get(1, Some(b'B')), None); + assert_eq!(larray.put_get(3, None), None); + assert_cached_entries( + &larray, + &[ + (0, Entry::new(Some(b'A'), EntryState::Mutated)), + (1, Entry::new(Some(b'B'), EntryState::Mutated)), + (3, Entry::new(None, EntryState::Preserved)), + ], + ); + // Override some values with none. + assert_eq!(larray.put_get(1, None), Some(b'B')); + assert_eq!(larray.put_get(3, None), None); + assert_cached_entries( + &larray, + &[ + (0, Entry::new(Some(b'A'), EntryState::Mutated)), + (1, Entry::new(None, EntryState::Mutated)), + (3, Entry::new(None, EntryState::Preserved)), + ], + ); + } + + #[test] + #[should_panic(expected = "index is out of bounds")] + fn put_get_out_of_bounds_works() { + let mut larray = >::new(); + let _ = larray.put_get(4, Some(b'A')); + } + + #[test] + fn put_works() { + let mut larray = >::new(); + // Put some values. + larray.put(0, None); + larray.put(1, Some(b'B')); + larray.put(3, None); + // The main difference between `put` and `put_get` is that `put` never + // loads from storage which also has one drawback: Putting a `None` + // value always ends-up in `Mutated` state for the entry even if the + // entry is already `None`. + assert_cached_entries( + &larray, + &[ + (0, Entry::new(None, EntryState::Mutated)), + (1, Entry::new(Some(b'B'), EntryState::Mutated)), + (3, Entry::new(None, EntryState::Mutated)), + ], + ); + // Overwrite entries: + larray.put(0, Some(b'A')); + larray.put(1, None); + larray.put(2, Some(b'C')); + larray.put(3, None); + assert_cached_entries( + &larray, + &[ + (0, Entry::new(Some(b'A'), EntryState::Mutated)), + (1, Entry::new(None, EntryState::Mutated)), + (2, Entry::new(Some(b'C'), EntryState::Mutated)), + (3, Entry::new(None, EntryState::Mutated)), + ], + ); + } + + #[test] + #[should_panic(expected = "index out of bounds: the len is 4 but the index is 4")] + fn put_out_of_bounds_works() { + let mut larray = >::new(); + larray.put(4, Some(b'A')); + } + + #[test] + fn swap_works() { + let mut larray = >::new(); + let nothing_changed = &[ + (0, Entry::new(Some(b'A'), EntryState::Mutated)), + (1, Entry::new(Some(b'B'), EntryState::Mutated)), + (2, Entry::new(None, EntryState::Preserved)), + (3, Entry::new(None, EntryState::Preserved)), + ]; + // Put some values. + assert_eq!(larray.put_get(0, Some(b'A')), None); + assert_eq!(larray.put_get(1, Some(b'B')), None); + assert_eq!(larray.put_get(2, None), None); + assert_eq!(larray.put_get(3, None), None); + assert_cached_entries(&larray, nothing_changed); + // Swap same indices: Check that nothing has changed. + for i in 0..4 { + larray.swap(i, i); + } + assert_cached_entries(&larray, nothing_changed); + // Swap `None` values: Check that nothing has changed. + larray.swap(2, 3); + larray.swap(3, 2); + assert_cached_entries(&larray, nothing_changed); + // Swap `Some` and `None`: + larray.swap(0, 2); + assert_cached_entries( + &larray, + &[ + (0, Entry::new(None, EntryState::Mutated)), + (1, Entry::new(Some(b'B'), EntryState::Mutated)), + (2, Entry::new(Some(b'A'), EntryState::Mutated)), + (3, Entry::new(None, EntryState::Preserved)), + ], + ); + // Swap `Some` and `Some`: + larray.swap(1, 2); + assert_cached_entries( + &larray, + &[ + (0, Entry::new(None, EntryState::Mutated)), + (1, Entry::new(Some(b'A'), EntryState::Mutated)), + (2, Entry::new(Some(b'B'), EntryState::Mutated)), + (3, Entry::new(None, EntryState::Preserved)), + ], + ); + } + + #[test] + #[should_panic(expected = "b is out of bounds")] + fn swap_rhs_out_of_bounds() { + let mut larray = >::new(); + larray.swap(0, 4); + } + + #[test] + #[should_panic(expected = "a is out of bounds")] + fn swap_both_out_of_bounds() { + let mut larray = >::new(); + larray.swap(4, 4); + } + + #[test] + fn spread_layout_works() -> env::Result<()> { + env::test::run_test::(|_| { + let mut larray = >::new(); + let nothing_changed = &[ + (0, Entry::new(Some(b'A'), EntryState::Mutated)), + (1, Entry::new(Some(b'B'), EntryState::Mutated)), + (2, Entry::new(None, EntryState::Preserved)), + (3, Entry::new(None, EntryState::Preserved)), + ]; + // Put some values. + assert_eq!(larray.put_get(0, Some(b'A')), None); + assert_eq!(larray.put_get(1, Some(b'B')), None); + assert_eq!(larray.put_get(2, None), None); + assert_eq!(larray.put_get(3, None), None); + assert_cached_entries(&larray, nothing_changed); + // Push the lazy index map onto the contract storage and then load + // another instance of it from the contract stoarge. + // Then: Compare both instances to be equal. + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&larray, &mut KeyPtr::from(root_key)); + let larray2 = as SpreadLayout>::pull_spread( + &mut KeyPtr::from(root_key), + ); + assert_cached_entries(&larray2, &[]); + assert_eq!(larray2.get(0), Some(&b'A')); + assert_eq!(larray2.get(1), Some(&b'B')); + assert_eq!(larray2.get(2), None); + assert_eq!(larray2.get(3), None); + assert_cached_entries( + &larray2, + &[ + (0, Entry::new(Some(b'A'), EntryState::Preserved)), + (1, Entry::new(Some(b'B'), EntryState::Preserved)), + (2, Entry::new(None, EntryState::Preserved)), + (3, Entry::new(None, EntryState::Preserved)), + ], + ); + // Clear the first lazy index map instance and reload another instance + // to check whether the associated storage has actually been freed + // again: + SpreadLayout::clear_spread(&larray2, &mut KeyPtr::from(root_key)); + // The above `clear_spread` call is a no-op since lazy index map is + // generally not aware of its associated elements. So we have to + // manually clear them from the contract storage which is what the + // high-level data structures like `storage::Vec` would command: + larray2.clear_packed_at(0); + larray2.clear_packed_at(1); + larray2.clear_packed_at(2); // Not really needed here. + larray2.clear_packed_at(3); // Not really needed here. + let larray3 = as SpreadLayout>::pull_spread( + &mut KeyPtr::from(root_key), + ); + assert_cached_entries(&larray3, &[]); + assert_eq!(larray3.get(0), None); + assert_eq!(larray3.get(1), None); + assert_eq!(larray3.get(2), None); + assert_eq!(larray3.get(3), None); + assert_cached_entries( + &larray3, + &[ + (0, Entry::new(None, EntryState::Preserved)), + (1, Entry::new(None, EntryState::Preserved)), + (2, Entry::new(None, EntryState::Preserved)), + (3, Entry::new(None, EntryState::Preserved)), + ], + ); + Ok(()) + }) + } +} diff --git a/core/src/storage2/lazy/lazy_cell.rs b/core/src/storage2/lazy/lazy_cell.rs new file mode 100644 index 00000000000..4e956154408 --- /dev/null +++ b/core/src/storage2/lazy/lazy_cell.rs @@ -0,0 +1,403 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + Entry, + EntryState, +}; +use crate::storage2::traits::{ + clear_spread_root_opt, + pull_spread_root_opt, + KeyPtr, + SpreadLayout, +}; +use core::{ + cell::UnsafeCell, + fmt, + fmt::Debug, + ptr::NonNull, +}; +use ink_primitives::Key; + +/// A lazy storage entity. +/// +/// This loads its value from storage upon first use. +/// +/// # Note +/// +/// Use this if the storage field doesn't need to be loaded in some or most cases. +pub struct LazyCell +where + T: SpreadLayout, +{ + /// The key to lazily load the value from. + /// + /// # Note + /// + /// This can be `None` on contract initialization where a `LazyCell` is + /// normally initialized given a concrete value. + key: Option, + /// The low-level cache for the lazily loaded storage value. + /// + /// # Safety (Dev) + /// + /// We use `UnsafeCell` instead of `RefCell` because + /// the intended use-case is to hand out references (`&` and `&mut`) + /// to the callers of `Lazy`. This cannot be done without `unsafe` + /// code even with `RefCell`. Also `RefCell` has a larger memory footprint + /// and has additional overhead that we can avoid by the interface + /// and the fact that ink! code is always run single-threaded. + /// Being efficient is important here because this is intended to be + /// a low-level primitive with lots of dependencies. + cache: UnsafeCell>>, +} + +impl Debug for LazyCell +where + T: Debug + SpreadLayout, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("LazyCell") + .field("key", &self.key) + .field("cache", unsafe { &*self.cache.get() }) + .finish() + } +} + +#[test] +fn debug_impl_works() { + let c1 = >::new(None); + assert_eq!( + format!("{:?}", &c1), + "LazyCell { key: None, cache: Some(Entry { value: None, state: Mutated }) }", + ); + let c2 = >::new(Some(42)); + assert_eq!( + format!("{:?}", &c2), + "LazyCell { key: None, cache: Some(Entry { value: Some(42), state: Mutated }) }", + ); + let c3 = >::lazy(Key([0x00; 32])); + assert_eq!( + format!("{:?}", &c3), + "LazyCell { \ + key: Some(Key(\ + 0x00_\ + 00000000_00000000_\ + 00000000_00000000_\ + 00000000_00000000_\ + 00000000_000000)), \ + cache: None \ + }", + ); +} + +impl Drop for LazyCell +where + T: SpreadLayout, +{ + fn drop(&mut self) { + if let Some(key) = self.key() { + if let Some(entry) = self.entry() { + clear_spread_root_opt::(key, || entry.value().into()) + } + } + } +} + +impl SpreadLayout for LazyCell +where + T: SpreadLayout, +{ + const FOOTPRINT: u64 = ::FOOTPRINT; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + Self::lazy(ptr.next_for::()) + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + if let Some(entry) = self.entry() { + SpreadLayout::push_spread(entry, ptr) + } + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + if let Some(entry) = self.entry() { + SpreadLayout::clear_spread(entry, ptr) + } + } +} + +// # Developer Note +// +// Implementing PackedLayout for LazyCell is not useful since that would +// potentially allow overlapping distinct LazyCell instances by pulling +// from the same underlying storage cell. +// +// If a user wants a packed LazyCell they can instead pack its inner type. + +impl From for LazyCell +where + T: SpreadLayout, +{ + fn from(value: T) -> Self { + Self::new(Some(value)) + } +} + +impl Default for LazyCell +where + T: Default + SpreadLayout, +{ + fn default() -> Self { + Self::new(Some(Default::default())) + } +} + +impl LazyCell +where + T: SpreadLayout, +{ + /// Creates an already populated lazy storage cell. + /// + /// # Note + /// + /// Since this already has a value it will never actually load from + /// the contract storage. + #[must_use] + pub fn new(value: Option) -> Self { + Self { + key: None, + cache: UnsafeCell::new(Some(Entry::new(value, EntryState::Mutated))), + } + } + + /// Creates a lazy storage cell for the given key. + /// + /// # Note + /// + /// This will actually lazily load from the associated storage cell + /// upon access. + #[must_use] + pub fn lazy(key: Key) -> Self { + Self { + key: Some(key), + cache: UnsafeCell::new(None), + } + } + + /// Returns the lazy key if any. + /// + /// # Note + /// + /// The key is `None` if the `LazyCell` has been initialized as a value. + /// This generally only happens in ink! constructors. + fn key(&self) -> Option<&Key> { + self.key.as_ref() + } + + /// Returns the cached entry. + fn entry(&self) -> Option<&Entry> { + unsafe { &*self.cache.get() }.as_ref() + } +} + +impl LazyCell +where + T: SpreadLayout, +{ + /// Loads the storage entry. + /// + /// Tries to load the entry from cache and falls back to lazily load the + /// entry from the contract storage. + unsafe fn load_through_cache(&self) -> NonNull> { + // SAFETY: This is critical because we mutably access the entry. + // However, we mutate the entry only if it is vacant. + // If the entry is occupied by a value we return early. + // This way we do not invalidate pointers to this value. + let cache = &mut *self.cache.get(); + if cache.is_none() { + // Load value from storage and then return the cached entry. + let value = self + .key + .map(|key| pull_spread_root_opt::(&key)) + .unwrap_or(None); + *cache = Some(Entry::new(value, EntryState::Preserved)); + } + debug_assert!(cache.is_some()); + NonNull::from(cache.as_mut().expect("unpopulated cache entry")) + } + + /// Returns a shared reference to the entry. + fn load_entry(&self) -> &Entry { + // SAFETY: We load the entry either from cache of from contract storage. + // + // This is safe because we are just returning a shared reference + // from within a `&self` method. This also cannot change the + // loaded value and thus cannot change the `mutate` flag of the + // entry. Aliases using this method are safe since ink! is + // single-threaded. + unsafe { &*self.load_through_cache().as_ptr() } + } + + /// Returns an exclusive reference to the entry. + fn load_entry_mut(&mut self) -> &mut Entry { + // SAFETY: We load the entry either from cache of from contract storage. + // + // This is safe because we are just returning an exclusive reference + // from within a `&mut self` method. This may change the + // loaded value and thus the `mutate` flag of the entry is set. + // Aliases cannot happen through this method since ink! is + // single-threaded. + let entry = unsafe { &mut *self.load_through_cache().as_ptr() }; + entry.replace_state(EntryState::Mutated); + entry + } + + /// Returns a shared reference to the value. + /// + /// # Note + /// + /// This eventually lazily loads the value from the contract storage. + /// + /// # Panics + /// + /// If decoding the loaded value to `T` failed. + #[must_use] + pub fn get(&self) -> Option<&T> { + self.load_entry().value().into() + } + + /// Returns an exclusive reference to the value. + /// + /// # Note + /// + /// This eventually lazily loads the value from the contract storage. + /// + /// # Panics + /// + /// If decoding the loaded value to `T` failed. + #[must_use] + pub fn get_mut(&mut self) -> Option<&mut T> { + self.load_entry_mut().value_mut().into() + } +} + +#[cfg(test)] +mod tests { + use super::{ + Entry, + EntryState, + LazyCell, + }; + use crate::{ + env, + env::test::run_test, + storage2::traits::{ + KeyPtr, + SpreadLayout, + }, + }; + use ink_primitives::Key; + + #[test] + fn new_works() { + // Initialized via some value: + let mut a = >::new(Some(b'A')); + assert_eq!(a.key(), None); + assert_eq!( + a.entry(), + Some(&Entry::new(Some(b'A'), EntryState::Mutated)) + ); + assert_eq!(a.get(), Some(&b'A')); + assert_eq!(a.get_mut(), Some(&mut b'A')); + // Initialized as none: + let mut b = >::new(None); + assert_eq!(b.key(), None); + assert_eq!(b.entry(), Some(&Entry::new(None, EntryState::Mutated))); + assert_eq!(b.get(), None); + assert_eq!(b.get_mut(), None); + // Same as default or from: + let default_lc = >::default(); + let from_lc = LazyCell::from(u8::default()); + let new_lc = LazyCell::new(Some(u8::default())); + assert_eq!(default_lc.get(), from_lc.get()); + assert_eq!(from_lc.get(), new_lc.get()); + assert_eq!(new_lc.get(), Some(&u8::default())); + } + + #[test] + fn lazy_works() { + let root_key = Key([0x42; 32]); + let cell = >::lazy(root_key); + assert_eq!(cell.key(), Some(&root_key)); + } + + #[test] + fn lazy_get_works() -> env::Result<()> { + run_test::(|_| { + let cell = >::lazy(Key([0x42; 32])); + let value = cell.get(); + // We do the normally unreachable check in order to have an easier + // time finding the issue if the above execution did not panic. + assert_eq!(value, None); + Ok(()) + }) + } + + #[test] + fn get_mut_works() { + let mut cell = >::new(Some(1)); + assert_eq!(cell.get(), Some(&1)); + *cell.get_mut().unwrap() += 1; + assert_eq!(cell.get(), Some(&2)); + } + + #[test] + fn spread_layout_works() -> env::Result<()> { + run_test::(|_| { + let cell_a0 = >::new(Some(b'A')); + assert_eq!(cell_a0.get(), Some(&b'A')); + // Push `cell_a0` to the contract storage. + // Then, pull `cell_a1` from the contract storage and check if it is + // equal to `cell_a0`. + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&cell_a0, &mut KeyPtr::from(root_key)); + let cell_a1 = + as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(cell_a1.get(), cell_a0.get()); + assert_eq!(cell_a1.get(), Some(&b'A')); + assert_eq!( + cell_a1.entry(), + Some(&Entry::new(Some(b'A'), EntryState::Preserved)) + ); + // Also test if a lazily instantiated cell works: + let cell_a2 = >::lazy(root_key); + assert_eq!(cell_a2.get(), cell_a0.get()); + assert_eq!(cell_a2.get(), Some(&b'A')); + assert_eq!( + cell_a2.entry(), + Some(&Entry::new(Some(b'A'), EntryState::Preserved)) + ); + // Test if clearing works: + SpreadLayout::clear_spread(&cell_a1, &mut KeyPtr::from(root_key)); + let cell_a3 = >::lazy(root_key); + assert_eq!(cell_a3.get(), None); + assert_eq!( + cell_a3.entry(), + Some(&Entry::new(None, EntryState::Preserved)) + ); + Ok(()) + }) + } +} diff --git a/core/src/storage2/lazy/lazy_hmap.rs b/core/src/storage2/lazy/lazy_hmap.rs new file mode 100644 index 00000000000..fc14f35ba2f --- /dev/null +++ b/core/src/storage2/lazy/lazy_hmap.rs @@ -0,0 +1,893 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + Entry, + EntryState, +}; +use crate::{ + hash::{ + hasher::Hasher, + HashBuilder, + }, + storage2::traits::{ + clear_packed_root, + pull_packed_root_opt, + KeyPtr, + PackedLayout, + SpreadLayout, + }, +}; +use core::{ + borrow::Borrow, + cell::{ + RefCell, + UnsafeCell, + }, + cmp::{ + Eq, + Ord, + }, + fmt, + fmt::Debug, + ptr::NonNull, +}; +use ink_prelude::{ + borrow::ToOwned, + boxed::Box, + collections::BTreeMap, + vec::Vec, +}; +use ink_primitives::Key; + +/// The map for the contract storage entries. +/// +/// # Note +/// +/// We keep the whole entry in a `Box` in order to prevent pointer +/// invalidation upon updating the cache through `&self` methods as in +/// [`LazyMap::get`]. +pub type EntryMap = BTreeMap>>; + +/// A lazy storage mapping that stores entries under their SCALE encoded key hashes. +/// +/// # Note +/// +/// This is mainly used as low-level storage primitives by other high-level +/// storage primitives in order to manage the contract storage for a whole +/// mapping of storage cells. +/// +/// This storage data structure might store its entires anywhere in the contract +/// storage. It is the users responsibility to keep track of the entries if it +/// is necessary to do so. +pub struct LazyHashMap { + /// The offset key for the storage mapping. + /// + /// This offsets the mapping for the entries stored in the contract storage + /// so that all lazy hash map instances store equal entries at different + /// locations of the contract storage and avoid collissions. + key: Option, + /// The currently cached entries of the lazy storage mapping. + /// + /// This normally only represents a subset of the total set of elements. + /// An entry is cached as soon as it is loaded or written. + cached_entries: UnsafeCell>, + /// The used hash builder. + hash_builder: RefCell>>, +} + +struct DebugEntryMap<'a, K, V>(&'a UnsafeCell>); + +impl<'a, K, V> Debug for DebugEntryMap<'a, K, V> +where + K: Debug, + V: Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_map() + .entries(unsafe { &*self.0.get() }.iter()) + .finish() + } +} + +impl Debug for LazyHashMap +where + K: Debug, + V: Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // The `hash_builder` field is not really required or needed for debugging purposes. + f.debug_struct("LazyHashMap") + .field("key", &self.key) + .field("cached_entries", &DebugEntryMap(&self.cached_entries)) + .finish() + } +} + +#[test] +fn debug_impl_works() { + use crate::hash::hasher::Blake2x256Hasher; + let mut hmap = >::new(); + // Empty hmap. + assert_eq!( + format!("{:?}", &hmap), + "LazyHashMap { key: None, cached_entries: {} }", + ); + // Filled hmap. + hmap.put('A', Some(1)); + hmap.put('B', Some(2)); + hmap.put('C', None); + assert_eq!( + format!("{:?}", &hmap), + "LazyHashMap { \ + key: None, \ + cached_entries: {\ + 'A': Entry { \ + value: Some(1), \ + state: Mutated \ + }, \ + 'B': Entry { \ + value: Some(2), \ + state: Mutated \ + }, \ + 'C': Entry { \ + value: None, \ + state: Mutated \ + }\ + } \ + }", + ); +} + +impl SpreadLayout for LazyHashMap +where + K: Ord + scale::Encode, + V: PackedLayout, + H: Hasher, + Key: From<::Output>, +{ + const FOOTPRINT: u64 = 1; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + Self::lazy(ptr.next_for::()) + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + let offset_key = ptr.next_for::(); + for (index, entry) in self.entries().iter() { + let root_key = self.to_offset_key(&offset_key, index); + entry.push_packed_root(&root_key); + } + } + + #[inline] + fn clear_spread(&self, _ptr: &mut KeyPtr) { + // Low-level lazy abstractions won't perform automated clean-up since + // they generally are not aware of their entire set of associated + // elements. The high-level abstractions that build upon them are + // responsible for cleaning up. + } +} + +// # Developer Note +// +// Even thought `LazyHashMap` would require storing just a single key a thus +// be a packable storage entity we cannot really make it one since this could +// allow for overlapping lazy hash map instances. +// An example for this would be a `Pack<(LazyHashMap, LazyHashMap)>` where +// both lazy hash maps would use the same underlying key and thus would apply +// the same underlying key mapping. + +impl Default for LazyHashMap +where + K: Ord, +{ + fn default() -> Self { + Self::new() + } +} + +impl LazyHashMap +where + K: Ord, +{ + /// Creates a new empty lazy hash map. + /// + /// # Note + /// + /// A lazy map created this way cannot be used to load from the contract storage. + /// All operations that directly or indirectly load from storage will panic. + pub fn new() -> Self { + Self { + key: None, + cached_entries: UnsafeCell::new(EntryMap::new()), + hash_builder: RefCell::new(HashBuilder::from(Vec::new())), + } + } + + /// Creates a new empty lazy hash map positioned at the given key. + /// + /// # Note + /// + /// This constructor is private and should never need to be called from + /// outside this module. It is used to construct a lazy index map from a + /// key that is only useful upon a contract call. Use [`LazyIndexMap::new`] + /// for construction during contract initialization. + fn lazy(key: Key) -> Self { + Self { + key: Some(key), + cached_entries: UnsafeCell::new(EntryMap::new()), + hash_builder: RefCell::new(HashBuilder::from(Vec::new())), + } + } + + /// Returns the offset key of the lazy map if any. + pub fn key(&self) -> Option<&Key> { + self.key.as_ref() + } + + /// Returns a shared reference to the underlying entries. + fn entries(&self) -> &EntryMap { + // SAFETY: It is safe to return a `&` reference from a `&self` receiver. + unsafe { &*self.cached_entries.get() } + } + + /// Returns an exclusive reference to the underlying entries. + fn entries_mut(&mut self) -> &mut EntryMap { + // SAFETY: It is safe to return a `&mut` reference from a `&mut self` receiver. + unsafe { &mut *self.cached_entries.get() } + } + + /// Puts the new value under the given key. + /// + /// # Note + /// + /// - Use [`LazyHashMap::put`]`(None)` in order to remove an element. + /// - Prefer this method over [`LazyHashMap::put_get`] if you are not interested + /// in the old value of the same cell index. + /// + /// # Panics + /// + /// - If the lazy hash map is in an invalid state that forbids interaction + /// with the underlying contract storage. + /// - If the decoding of the old element at the given index failed. + pub fn put(&mut self, key: K, new_value: Option) { + self.entries_mut() + .insert(key, Box::new(Entry::new(new_value, EntryState::Mutated))); + } +} + +impl LazyHashMap +where + K: Ord + scale::Encode, + H: Hasher, + Key: From<::Output>, +{ + /// Returns an offset key for the given key pair. + fn to_offset_key(&self, storage_key: &Key, key: &Q) -> Key + where + K: Borrow, + Q: scale::Encode, + { + #[derive(scale::Encode)] + struct KeyPair<'a, Q> { + prefix: [u8; 11], + storage_key: &'a Key, + value_key: &'a Q, + } + let key_pair = KeyPair { + prefix: [ + b'i', b'n', b'k', b' ', b'h', b'a', b's', b'h', b'm', b'a', b'p', + ], + storage_key, + value_key: key, + }; + self.hash_builder + .borrow_mut() + .hash_encoded(&key_pair) + .into() + } + + /// Returns an offset key for the given key. + fn key_at(&self, key: &Q) -> Option + where + K: Borrow, + Q: scale::Encode, + { + self.key + .map(|storage_key| self.to_offset_key(&storage_key, key)) + } +} + +impl LazyHashMap +where + K: Ord + Eq + scale::Encode, + V: PackedLayout, + H: Hasher, + Key: From<::Output>, +{ + /// Lazily loads the value at the given index. + /// + /// # Note + /// + /// Only loads a value if `key` is set and if the value has not been loaded yet. + /// Returns the freshly loaded or already loaded entry of the value. + /// + /// # Safety + /// + /// This function has a `&self` receiver while returning an `Option<*mut T>` + /// which is unsafe in isolation. The caller has to determine how to forward + /// the returned `*mut T`. + /// + /// # Safety + /// + /// This is an `unsafe` operation because it has a `&self` receiver but returns + /// a `*mut Entry` pointer that allows for exclusive access. This is safe + /// within internal use only and should never be given outside of the lazy + /// entity for public `&self` methods. + unsafe fn lazily_load(&self, key: &Q) -> NonNull> + where + K: Borrow, + Q: Ord + scale::Encode + ToOwned, + { + // SAFETY: We have put the whole `cached_entries` mapping into an + // `UnsafeCell` because of this caching functionality. The + // trick here is that due to using `Box` internally + // we are able to return references to the cached entries + // while maintaining the invariant that mutating the caching + // `BTreeMap` will never invalidate those references. + // By returning a raw pointer we enforce an `unsafe` block at + // the caller site to underline that guarantees are given by the + // caller. + let cached_entries = &mut *self.cached_entries.get(); + use ink_prelude::collections::btree_map::Entry as BTreeMapEntry; + // We have to clone the key here because we do not have access to the unsafe + // raw entry API for Rust hash maps, yet since it is unstable. We can remove + // the contraints on `K: Clone` once we have access to this API. + // Read more about the issue here: https://github.com/rust-lang/rust/issues/56167 + match cached_entries.entry(key.to_owned()) { + BTreeMapEntry::Occupied(occupied) => { + NonNull::from(&mut **occupied.into_mut()) + } + BTreeMapEntry::Vacant(vacant) => { + let value = self + .key_at(key) + .map(|key| pull_packed_root_opt::(&key)) + .unwrap_or(None); + NonNull::from( + &mut **vacant + .insert(Box::new(Entry::new(value, EntryState::Preserved))), + ) + } + } + } + + /// Lazily loads the value associated with the given key. + /// + /// # Note + /// + /// Only loads a value if `key` is set and if the value has not been loaded yet. + /// Returns a pointer to the freshly loaded or already loaded entry of the value. + /// + /// # Panics + /// + /// - If the lazy chunk is in an invalid state that forbids interaction. + /// - If the lazy chunk is not in a state that allows lazy loading. + fn lazily_load_mut(&mut self, index: &Q) -> &mut Entry + where + K: Borrow, + Q: Ord + scale::Encode + ToOwned, + { + // SAFETY: + // - Returning a `&mut Entry` is safe because entities inside the + // cache are stored within a `Box` to not invalidate references into + // them upon operating on the outer cache. + unsafe { &mut *self.lazily_load(index).as_ptr() } + } + + /// Clears the underlying storage of the entry at the given index. + /// + /// # Safety + /// + /// For performance reasons this does not synchronize the lazy index map's + /// memory-side cache which invalidates future accesses the cleared entry. + /// Care should be taken when using this API. + /// + /// The general use of this API is to streamline `Drop` implementations of + /// high-level abstractions that build upon this low-level data strcuture. + pub fn clear_packed_at(&self, index: &Q) + where + K: Borrow, + V: PackedLayout, + Q: Ord + scale::Encode + ToOwned, + { + let root_key = self.key_at(index).expect("cannot clear in lazy state"); + if ::REQUIRES_DEEP_CLEAN_UP { + // We need to load the entity before we remove its associated contract storage + // because it requires a deep clean-up which propagates clearing to its fields, + // for example in the case of `T` being a `storage::Box`. + let entity = self.get(index).expect("cannot clear a non existing entity"); + clear_packed_root::(&entity, &root_key); + } else { + // The type does not require deep clean-up so we can simply clean-up + // its associated storage cell and be done without having to load it first. + crate::env::clear_contract_storage(root_key); + } + } + + /// Returns a shared reference to the value associated with the given key if any. + /// + /// # Panics + /// + /// - If the lazy chunk is in an invalid state that forbids interaction. + /// - If the decoding of the element at the given index failed. + pub fn get(&self, index: &Q) -> Option<&V> + where + K: Borrow, + Q: Ord + scale::Encode + ToOwned, + { + // SAFETY: Dereferencing the `*mut T` pointer into a `&T` is safe + // since this method's receiver is `&self` so we do not + // leak non-shared references to the outside. + unsafe { &*self.lazily_load(index).as_ptr() }.value().into() + } + + /// Returns an exclusive reference to the value associated with the given key if any. + /// + /// # Panics + /// + /// - If the lazy chunk is in an invalid state that forbids interaction. + /// - If the decoding of the element at the given index failed. + pub fn get_mut(&mut self, index: &Q) -> Option<&mut V> + where + K: Borrow, + Q: Ord + scale::Encode + ToOwned, + { + self.lazily_load_mut(index).value_mut().into() + } + + /// Puts the new value under the given key and returns the old value if any. + /// + /// # Note + /// + /// - Use [`LazyHashMap::put_get`]`(None)` in order to remove an element + /// and retrieve the old element back. + /// + /// # Panics + /// + /// - If the lazy hashmap is in an invalid state that forbids interaction. + /// - If the decoding of the old element at the given index failed. + pub fn put_get(&mut self, key: &Q, new_value: Option) -> Option + where + K: Borrow, + Q: Ord + scale::Encode + ToOwned, + { + self.lazily_load_mut(key).put(new_value) + } + + /// Swaps the values at entries with associated keys `x` and `y`. + /// + /// This operation tries to be as efficient as possible and reuse allocations. + /// + /// # Panics + /// + /// - If the lazy hashmap is in an invalid state that forbids interaction. + /// - If the decoding of one of the elements failed. + pub fn swap(&mut self, x: &Q1, y: &Q2) + where + K: Borrow + Borrow, + Q1: Ord + PartialEq + scale::Encode + ToOwned, + Q2: Ord + PartialEq + scale::Encode + ToOwned, + { + if x == y { + // Bail out early if both indices are the same. + return + } + let (loaded_x, loaded_y) = + // SAFETY: The loaded `x` and `y` entries are distinct from each + // other guaranteed by the previous check. Also `lazily_load` + // guarantees to return a pointer to a pinned entity + // so that the returned references do not conflict with + // each other. + unsafe { ( + &mut *self.lazily_load(x).as_ptr(), + &mut *self.lazily_load(y).as_ptr(), + ) }; + if loaded_x.value().is_none() && loaded_y.value().is_none() { + // Bail out since nothing has to be swapped if both values are `None`. + return + } + // Set the `mutate` flag since at this point at least one of the loaded + // values is guaranteed to be `Some`. + loaded_x.replace_state(EntryState::Mutated); + loaded_y.replace_state(EntryState::Mutated); + core::mem::swap(loaded_x.value_mut(), loaded_y.value_mut()); + } +} + +#[cfg(test)] +mod tests { + use super::{ + Entry, + EntryState, + LazyHashMap, + }; + use crate::{ + env, + hash::hasher::{ + Blake2x256Hasher, + Sha2x256Hasher, + }, + storage2::traits::{ + KeyPtr, + SpreadLayout, + }, + }; + use ink_primitives::Key; + + /// Asserts that the cached entries of the given `imap` is equal to the `expected` slice. + fn assert_cached_entries( + hmap: &LazyHashMap, + expected: &[(i32, Entry)], + ) { + assert_eq!(hmap.entries().len(), expected.len()); + for (given, expected) in hmap + .entries() + .iter() + .map(|(index, boxed_entry)| (*index, &**boxed_entry)) + .zip(expected.iter().map(|(index, entry)| (*index, entry))) + { + assert_eq!(given, expected); + } + } + + fn new_hmap() -> LazyHashMap { + >::new() + } + + #[test] + fn new_works() { + let hmap = new_hmap(); + // Key must be none. + assert_eq!(hmap.key(), None); + assert_eq!(hmap.key_at(&0), None); + // Cached elements must be empty. + assert_cached_entries(&hmap, &[]); + // Same as default: + let default_hmap = >::default(); + assert_eq!(hmap.key(), default_hmap.key()); + assert_eq!(hmap.entries(), default_hmap.entries()); + } + + #[test] + fn key_at_works() { + let key = Key([0x42; 32]); + + // BLAKE2 256-bit hasher: + let hmap1 = >::lazy(key); + // Key must be some. + assert_eq!(hmap1.key(), Some(&key)); + // Cached elements must be empty. + assert_cached_entries(&hmap1, &[]); + let hmap1_at_0 = b"\ + \x67\x7E\xD3\xA4\x72\x2A\x83\x60\ + \x96\x65\x0E\xCD\x1F\x2C\xE8\x5D\ + \xBF\x7E\xC0\xFF\x16\x40\x8A\xD8\ + \x75\x88\xDE\x52\xF5\x8B\x99\xAF"; + assert_eq!(hmap1.key_at(&0), Some(Key(*hmap1_at_0))); + // Same parameters must yield the same key: + // + // This tests an actual regression that happened because the + // hash accumulator was not reset after a hash finalization. + assert_cached_entries(&hmap1, &[]); + assert_eq!(hmap1.key_at(&0), Some(Key(*hmap1_at_0))); + assert_eq!( + hmap1.key_at(&1), + Some(Key(*b"\ + \x9A\x46\x1F\xB3\xA1\xC4\x20\xF8\ + \xA0\xD9\xA7\x79\x2F\x07\xFB\x7D\ + \x49\xDD\xAB\x08\x67\x90\x96\x15\ + \xFB\x85\x36\x3B\x82\x94\x85\x3F")) + ); + // SHA2 256-bit hasher: + let hmap2 = >::lazy(key); + // Key must be some. + assert_eq!(hmap2.key(), Some(&key)); + // Cached elements must be empty. + assert_cached_entries(&hmap2, &[]); + assert_eq!( + hmap1.key_at(&0), + Some(Key(*b"\ + \x67\x7E\xD3\xA4\x72\x2A\x83\x60\ + \x96\x65\x0E\xCD\x1F\x2C\xE8\x5D\ + \xBF\x7E\xC0\xFF\x16\x40\x8A\xD8\ + \x75\x88\xDE\x52\xF5\x8B\x99\xAF")) + ); + assert_eq!( + hmap1.key_at(&1), + Some(Key(*b"\ + \x9A\x46\x1F\xB3\xA1\xC4\x20\xF8\ + \xA0\xD9\xA7\x79\x2F\x07\xFB\x7D\ + \x49\xDD\xAB\x08\x67\x90\x96\x15\ + \xFB\x85\x36\x3B\x82\x94\x85\x3F")) + ); + } + + #[test] + fn put_get_works() { + let mut hmap = new_hmap(); + // Put some values. + assert_eq!(hmap.put_get(&1, Some(b'A')), None); + assert_eq!(hmap.put_get(&2, Some(b'B')), None); + assert_eq!(hmap.put_get(&4, Some(b'C')), None); + assert_cached_entries( + &hmap, + &[ + (1, Entry::new(Some(b'A'), EntryState::Mutated)), + (2, Entry::new(Some(b'B'), EntryState::Mutated)), + (4, Entry::new(Some(b'C'), EntryState::Mutated)), + ], + ); + // Put none values. + assert_eq!(hmap.put_get(&3, None), None); + assert_eq!(hmap.put_get(&5, None), None); + assert_cached_entries( + &hmap, + &[ + (1, Entry::new(Some(b'A'), EntryState::Mutated)), + (2, Entry::new(Some(b'B'), EntryState::Mutated)), + (3, Entry::new(None, EntryState::Preserved)), + (4, Entry::new(Some(b'C'), EntryState::Mutated)), + (5, Entry::new(None, EntryState::Preserved)), + ], + ); + // Override some values with none. + assert_eq!(hmap.put_get(&2, None), Some(b'B')); + assert_eq!(hmap.put_get(&4, None), Some(b'C')); + assert_cached_entries( + &hmap, + &[ + (1, Entry::new(Some(b'A'), EntryState::Mutated)), + (2, Entry::new(None, EntryState::Mutated)), + (3, Entry::new(None, EntryState::Preserved)), + (4, Entry::new(None, EntryState::Mutated)), + (5, Entry::new(None, EntryState::Preserved)), + ], + ); + // Override none values with some. + assert_eq!(hmap.put_get(&3, Some(b'X')), None); + assert_eq!(hmap.put_get(&5, Some(b'Y')), None); + assert_cached_entries( + &hmap, + &[ + (1, Entry::new(Some(b'A'), EntryState::Mutated)), + (2, Entry::new(None, EntryState::Mutated)), + (3, Entry::new(Some(b'X'), EntryState::Mutated)), + (4, Entry::new(None, EntryState::Mutated)), + (5, Entry::new(Some(b'Y'), EntryState::Mutated)), + ], + ); + } + + #[test] + fn get_works() { + let mut hmap = new_hmap(); + let nothing_changed = &[ + (1, Entry::new(None, EntryState::Preserved)), + (2, Entry::new(Some(b'B'), EntryState::Mutated)), + (3, Entry::new(None, EntryState::Preserved)), + (4, Entry::new(Some(b'D'), EntryState::Mutated)), + ]; + // Put some values. + assert_eq!(hmap.put_get(&1, None), None); + assert_eq!(hmap.put_get(&2, Some(b'B')), None); + assert_eq!(hmap.put_get(&3, None), None); + assert_eq!(hmap.put_get(&4, Some(b'D')), None); + assert_cached_entries(&hmap, nothing_changed); + // `get` works: + assert_eq!(hmap.get(&1), None); + assert_eq!(hmap.get(&2), Some(&b'B')); + assert_eq!(hmap.get(&3), None); + assert_eq!(hmap.get(&4), Some(&b'D')); + assert_cached_entries(&hmap, nothing_changed); + // `get_mut` works: + assert_eq!(hmap.get_mut(&1), None); + assert_eq!(hmap.get_mut(&2), Some(&mut b'B')); + assert_eq!(hmap.get_mut(&3), None); + assert_eq!(hmap.get_mut(&4), Some(&mut b'D')); + assert_cached_entries(&hmap, nothing_changed); + // `get` or `get_mut` without cache: + assert_eq!(hmap.get(&5), None); + assert_eq!(hmap.get_mut(&5), None); + } + + #[test] + fn put_works() { + let mut hmap = new_hmap(); + // Put some values. + hmap.put(1, None); + hmap.put(2, Some(b'B')); + hmap.put(4, None); + // The main difference between `put` and `put_get` is that `put` never + // loads from storage which also has one drawback: Putting a `None` + // value always ends-up in `Mutated` state for the entry even if the + // entry is already `None`. + assert_cached_entries( + &hmap, + &[ + (1, Entry::new(None, EntryState::Mutated)), + (2, Entry::new(Some(b'B'), EntryState::Mutated)), + (4, Entry::new(None, EntryState::Mutated)), + ], + ); + // Overwrite entries: + hmap.put(1, Some(b'A')); + hmap.put(2, None); + assert_cached_entries( + &hmap, + &[ + (1, Entry::new(Some(b'A'), EntryState::Mutated)), + (2, Entry::new(None, EntryState::Mutated)), + (4, Entry::new(None, EntryState::Mutated)), + ], + ); + } + + #[test] + fn swap_works() { + let mut hmap = new_hmap(); + let nothing_changed = &[ + (1, Entry::new(Some(b'A'), EntryState::Mutated)), + (2, Entry::new(Some(b'B'), EntryState::Mutated)), + (3, Entry::new(None, EntryState::Preserved)), + (4, Entry::new(None, EntryState::Preserved)), + ]; + // Put some values. + assert_eq!(hmap.put_get(&1, Some(b'A')), None); + assert_eq!(hmap.put_get(&2, Some(b'B')), None); + assert_eq!(hmap.put_get(&3, None), None); + assert_eq!(hmap.put_get(&4, None), None); + assert_cached_entries(&hmap, nothing_changed); + // Swap same indices: Check that nothing has changed. + for i in 0..4 { + hmap.swap(&i, &i); + } + assert_cached_entries(&hmap, nothing_changed); + // Swap `None` values: Check that nothing has changed. + hmap.swap(&3, &4); + hmap.swap(&4, &3); + assert_cached_entries(&hmap, nothing_changed); + // Swap `Some` and `None`: + hmap.swap(&1, &3); + assert_cached_entries( + &hmap, + &[ + (1, Entry::new(None, EntryState::Mutated)), + (2, Entry::new(Some(b'B'), EntryState::Mutated)), + (3, Entry::new(Some(b'A'), EntryState::Mutated)), + (4, Entry::new(None, EntryState::Preserved)), + ], + ); + // Swap `Some` and `Some`: + hmap.swap(&2, &3); + assert_cached_entries( + &hmap, + &[ + (1, Entry::new(None, EntryState::Mutated)), + (2, Entry::new(Some(b'A'), EntryState::Mutated)), + (3, Entry::new(Some(b'B'), EntryState::Mutated)), + (4, Entry::new(None, EntryState::Preserved)), + ], + ); + // Swap out of bounds: `None` and `None` + hmap.swap(&4, &5); + assert_cached_entries( + &hmap, + &[ + (1, Entry::new(None, EntryState::Mutated)), + (2, Entry::new(Some(b'A'), EntryState::Mutated)), + (3, Entry::new(Some(b'B'), EntryState::Mutated)), + (4, Entry::new(None, EntryState::Preserved)), + (5, Entry::new(None, EntryState::Preserved)), + ], + ); + // Swap out of bounds: `Some` and `None` + hmap.swap(&3, &6); + assert_cached_entries( + &hmap, + &[ + (1, Entry::new(None, EntryState::Mutated)), + (2, Entry::new(Some(b'A'), EntryState::Mutated)), + (3, Entry::new(None, EntryState::Mutated)), + (4, Entry::new(None, EntryState::Preserved)), + (5, Entry::new(None, EntryState::Preserved)), + (6, Entry::new(Some(b'B'), EntryState::Mutated)), + ], + ); + } + + #[test] + fn spread_layout_works() -> env::Result<()> { + env::test::run_test::(|_| { + let mut hmap = new_hmap(); + let nothing_changed = &[ + (1, Entry::new(Some(b'A'), EntryState::Mutated)), + (2, Entry::new(Some(b'B'), EntryState::Mutated)), + (3, Entry::new(None, EntryState::Preserved)), + (4, Entry::new(None, EntryState::Preserved)), + ]; + // Put some values. + assert_eq!(hmap.put_get(&1, Some(b'A')), None); + assert_eq!(hmap.put_get(&2, Some(b'B')), None); + assert_eq!(hmap.put_get(&3, None), None); + assert_eq!(hmap.put_get(&4, None), None); + assert_cached_entries(&hmap, nothing_changed); + // Push the lazy index map onto the contract storage and then load + // another instance of it from the contract stoarge. + // Then: Compare both instances to be equal. + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&hmap, &mut KeyPtr::from(root_key)); + let hmap2 = + as SpreadLayout>::pull_spread( + &mut KeyPtr::from(root_key), + ); + assert_cached_entries(&hmap2, &[]); + assert_eq!(hmap2.key(), Some(&Key([0x42; 32]))); + assert_eq!(hmap2.get(&1), Some(&b'A')); + assert_eq!(hmap2.get(&2), Some(&b'B')); + assert_eq!(hmap2.get(&3), None); + assert_eq!(hmap2.get(&4), None); + assert_cached_entries( + &hmap2, + &[ + (1, Entry::new(Some(b'A'), EntryState::Preserved)), + (2, Entry::new(Some(b'B'), EntryState::Preserved)), + (3, Entry::new(None, EntryState::Preserved)), + (4, Entry::new(None, EntryState::Preserved)), + ], + ); + // Clear the first lazy index map instance and reload another instance + // to check whether the associated storage has actually been freed + // again: + SpreadLayout::clear_spread(&hmap2, &mut KeyPtr::from(root_key)); + // The above `clear_spread` call is a no-op since lazy index map is + // generally not aware of its associated elements. So we have to + // manually clear them from the contract storage which is what the + // high-level data structures like `storage::Vec` would command: + hmap2.clear_packed_at(&1); + hmap2.clear_packed_at(&2); + hmap2.clear_packed_at(&3); // Not really needed here. + hmap2.clear_packed_at(&4); // Not really needed here. + let hmap3 = + as SpreadLayout>::pull_spread( + &mut KeyPtr::from(root_key), + ); + assert_cached_entries(&hmap3, &[]); + assert_eq!(hmap3.get(&1), None); + assert_eq!(hmap3.get(&2), None); + assert_eq!(hmap3.get(&3), None); + assert_eq!(hmap3.get(&4), None); + assert_cached_entries( + &hmap3, + &[ + (1, Entry::new(None, EntryState::Preserved)), + (2, Entry::new(None, EntryState::Preserved)), + (3, Entry::new(None, EntryState::Preserved)), + (4, Entry::new(None, EntryState::Preserved)), + ], + ); + Ok(()) + }) + } +} diff --git a/core/src/storage2/lazy/lazy_imap.rs b/core/src/storage2/lazy/lazy_imap.rs new file mode 100644 index 00000000000..a406d788aea --- /dev/null +++ b/core/src/storage2/lazy/lazy_imap.rs @@ -0,0 +1,732 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + Entry, + EntryState, +}; +use crate::storage2::traits::{ + clear_packed_root, + pull_packed_root_opt, + KeyPtr, + PackedLayout, + SpreadLayout, +}; +use core::{ + cell::UnsafeCell, + fmt, + fmt::Debug, + ptr::NonNull, +}; +use ink_prelude::{ + boxed::Box, + collections::BTreeMap, +}; +use ink_primitives::Key; + +/// The index type used in the lazy storage chunk. +pub type Index = u32; + +/// A lazy storage chunk that spans over a whole chunk of storage cells. +/// +/// # Note +/// +/// This is mainly used as low-level storage primitives by other high-level +/// storage primitives in order to manage the contract storage for a whole +/// chunk of storage cells. +/// +/// A chunk of storage cells is a contiguous range of 2^32 storage cells. +pub struct LazyIndexMap { + /// The offset key for the chunk of cells. + /// + /// If the lazy chunk has been initialized during contract initialization + /// the key will be `None` since there won't be a storage region associated + /// to the lazy chunk which prevents it from lazily loading elements. This, + /// however, is only checked at contract runtime. We might incorporate + /// compile-time checks for this particular use case later on. + key: Option, + /// The subset of currently cached entries of the lazy storage chunk. + /// + /// An entry is cached as soon as it is loaded or written. + cached_entries: UnsafeCell>, +} + +struct DebugEntryMap<'a, V>(&'a UnsafeCell>); + +impl<'a, V> Debug for DebugEntryMap<'a, V> +where + V: Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_map() + .entries(unsafe { &*self.0.get() }.iter()) + .finish() + } +} + +impl Debug for LazyIndexMap +where + V: Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("LazyIndexMap") + .field("key", &self.key) + .field("cached_entries", &DebugEntryMap(&self.cached_entries)) + .finish() + } +} + +#[test] +fn debug_impl_works() { + let mut imap = >::new(); + // Empty imap. + assert_eq!( + format!("{:?}", &imap), + "LazyIndexMap { key: None, cached_entries: {} }", + ); + // Filled imap. + imap.put(0, Some(1)); + imap.put(42, Some(2)); + imap.put(999, None); + assert_eq!( + format!("{:?}", &imap), + "LazyIndexMap { \ + key: None, \ + cached_entries: {\ + 0: Entry { \ + value: Some(1), \ + state: Mutated \ + }, \ + 42: Entry { \ + value: Some(2), \ + state: Mutated \ + }, \ + 999: Entry { \ + value: None, \ + state: Mutated \ + }\ + } \ + }", + ); +} + +impl Default for LazyIndexMap { + fn default() -> Self { + Self::new() + } +} + +/// The map for the contract storage entries. +/// +/// # Note +/// +/// We keep the whole entry in a `Box` in order to prevent pointer +/// invalidation upon updating the cache through `&self` methods as in +/// [`LazyIndexMap::get`]. +pub type EntryMap = BTreeMap>>; + +impl LazyIndexMap { + /// Creates a new empty lazy map. + /// + /// # Note + /// + /// A lazy map created this way cannot be used to load from the contract storage. + /// All operations that directly or indirectly load from storage will panic. + pub fn new() -> Self { + Self { + key: None, + cached_entries: UnsafeCell::new(EntryMap::new()), + } + } + + /// Creates a new empty lazy map positioned at the given key. + /// + /// # Note + /// + /// This constructor is private and should never need to be called from + /// outside this module. It is used to construct a lazy index map from a + /// key that is only useful upon a contract call. Use [`LazyIndexMap::new`] + /// for construction during contract initialization. + fn lazy(key: Key) -> Self { + Self { + key: Some(key), + cached_entries: UnsafeCell::new(EntryMap::new()), + } + } + + /// Returns the offset key of the lazy map if any. + pub fn key(&self) -> Option<&Key> { + self.key.as_ref() + } + + /// Returns a shared reference to the underlying entries. + fn entries(&self) -> &EntryMap { + // SAFETY: It is safe to return a `&` reference from a `&self` receiver. + unsafe { &*self.cached_entries.get() } + } + + /// Returns an exclusive reference to the underlying entries. + fn entries_mut(&mut self) -> &mut EntryMap { + // SAFETY: It is safe to return a `&mut` reference from a `&mut self` receiver. + unsafe { &mut *self.cached_entries.get() } + } + + /// Puts the new value at the given index. + /// + /// # Note + /// + /// - Use [`LazyIndexMap::put`]`(None)` in order to remove an element. + /// - Prefer this method over [`LazyIndexMap::put_get`] if you are not interested + /// in the old value of the same cell index. + /// + /// # Panics + /// + /// - If the lazy chunk is in an invalid state that forbids interaction. + /// - If the decoding of the old element at the given index failed. + pub fn put(&mut self, index: Index, new_value: Option) { + self.entries_mut() + .insert(index, Box::new(Entry::new(new_value, EntryState::Mutated))); + } +} + +impl SpreadLayout for LazyIndexMap +where + V: PackedLayout, +{ + const FOOTPRINT: u64 = 1_u64 << 32; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + Self::lazy(ptr.next_for::()) + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + let offset_key = ptr.next_for::(); + for (&index, entry) in self.entries().iter() { + let root_key = offset_key + index; + entry.push_packed_root(&root_key); + } + } + + #[inline] + fn clear_spread(&self, _ptr: &mut KeyPtr) { + // Low-level lazy abstractions won't perform automated clean-up since + // they generally are not aware of their entire set of associated + // elements. The high-level abstractions that build upon them are + // responsible for cleaning up. + } +} + +impl LazyIndexMap +where + V: PackedLayout, +{ + /// Clears the underlying storage of the entry at the given index. + /// + /// # Safety + /// + /// For performance reasons this does not synchronize the lazy index map's + /// memory-side cache which invalidates future accesses the cleared entry. + /// Care should be taken when using this API. + /// + /// The general use of this API is to streamline `Drop` implementations of + /// high-level abstractions that build upon this low-level data strcuture. + pub fn clear_packed_at(&self, index: Index) { + let root_key = self.key_at(index).expect("cannot clear in lazy state"); + if ::REQUIRES_DEEP_CLEAN_UP { + // We need to load the entity before we remove its associated contract storage + // because it requires a deep clean-up which propagates clearing to its fields, + // for example in the case of `T` being a `storage::Box`. + let entity = self.get(index).expect("cannot clear a non existing entity"); + clear_packed_root::(&entity, &root_key); + } else { + // The type does not require deep clean-up so we can simply clean-up + // its associated storage cell and be done without having to load it first. + crate::env::clear_contract_storage(root_key); + } + } +} + +impl LazyIndexMap +where + V: PackedLayout, +{ + /// Returns an offset key for the given index. + pub fn key_at(&self, index: Index) -> Option { + let key = self.key?; + let offset_key = key + index as u64; + Some(offset_key) + } + + /// Lazily loads the value at the given index. + /// + /// # Note + /// + /// Only loads a value if `key` is set and if the value has not been loaded yet. + /// Returns the freshly loaded or already loaded entry of the value. + /// + /// # Safety + /// + /// This function has a `&self` receiver while returning an `Option<*mut T>` + /// which is unsafe in isolation. The caller has to determine how to forward + /// the returned `*mut T`. + /// + /// # Safety + /// + /// This is an `unsafe` operation because it has a `&self` receiver but returns + /// a `*mut Entry` pointer that allows for exclusive access. This is safe + /// within internal use only and should never be given outside of the lazy + /// entity for public `&self` methods. + unsafe fn lazily_load(&self, index: Index) -> NonNull> { + // SAFETY: We have put the whole `cached_entries` mapping into an + // `UnsafeCell` because of this caching functionality. The + // trick here is that due to using `Box` internally + // we are able to return references to the cached entries + // while maintaining the invariant that mutating the caching + // `BTreeMap` will never invalidate those references. + // By returning a raw pointer we enforce an `unsafe` block at + // the caller site to underline that guarantees are given by the + // caller. + let cached_entries = &mut *self.cached_entries.get(); + use ink_prelude::collections::btree_map::Entry as BTreeMapEntry; + match cached_entries.entry(index) { + BTreeMapEntry::Occupied(occupied) => { + NonNull::from(&mut **occupied.into_mut()) + } + BTreeMapEntry::Vacant(vacant) => { + let value = self + .key_at(index) + .map(|key| pull_packed_root_opt::(&key)) + .unwrap_or(None); + NonNull::from( + &mut **vacant + .insert(Box::new(Entry::new(value, EntryState::Preserved))), + ) + } + } + } + + /// Lazily loads the value at the given index. + /// + /// # Note + /// + /// Only loads a value if `key` is set and if the value has not been loaded yet. + /// Returns the freshly loaded or already loaded entry of the value. + /// + /// # Panics + /// + /// - If the lazy chunk is in an invalid state that forbids interaction. + /// - If the lazy chunk is not in a state that allows lazy loading. + fn lazily_load_mut(&mut self, index: Index) -> &mut Entry { + // SAFETY: + // - Returning a `&mut Entry` is safe because entities inside the + // cache are stored within a `Box` to not invalidate references into + // them upon operating on the outer cache. + unsafe { &mut *self.lazily_load(index).as_ptr() } + } + + /// Returns a shared reference to the element at the given index if any. + /// + /// # Panics + /// + /// - If the lazy chunk is in an invalid state that forbids interaction. + /// - If the decoding of the element at the given index failed. + pub fn get(&self, index: Index) -> Option<&V> { + // SAFETY: Dereferencing the `*mut T` pointer into a `&T` is safe + // since this method's receiver is `&self` so we do not + // leak non-shared references to the outside. + unsafe { &*self.lazily_load(index).as_ptr() }.value().into() + } + + /// Returns an exclusive reference to the element at the given index if any. + /// + /// # Panics + /// + /// - If the lazy chunk is in an invalid state that forbids interaction. + /// - If the decoding of the element at the given index failed. + pub fn get_mut(&mut self, index: Index) -> Option<&mut V> { + self.lazily_load_mut(index).value_mut().into() + } + + /// Puts the new value at the given index and returns the old value if any. + /// + /// # Note + /// + /// - Use [`LazyIndexMap::put_get`]`(None)` in order to remove an element + /// and retrieve the old element back. + /// + /// # Panics + /// + /// - If the lazy chunk is in an invalid state that forbids interaction. + /// - If the decoding of the old element at the given index failed. + pub fn put_get(&mut self, index: Index, new_value: Option) -> Option { + self.lazily_load_mut(index).put(new_value) + } + + /// Swaps the values at indices `x` and `y`. + /// + /// This operation tries to be as efficient as possible and reuse allocations. + /// + /// # Panics + /// + /// - If the lazy chunk is in an invalid state that forbids interaction. + /// - If the decoding of one of the elements failed. + pub fn swap(&mut self, x: Index, y: Index) { + if x == y { + // Bail out early if both indices are the same. + return + } + let (loaded_x, loaded_y) = + // SAFETY: The loaded `x` and `y` entries are distinct from each + // other guaranteed by the previous check. Also `lazily_load` + // guarantees to return a pointer to a pinned entity + // so that the returned references do not conflict with + // each other. + unsafe { ( + &mut *self.lazily_load(x).as_ptr(), + &mut *self.lazily_load(y).as_ptr(), + ) }; + if loaded_x.value().is_none() && loaded_y.value().is_none() { + // Bail out since nothing has to be swapped if both values are `None`. + return + } + // Set the `mutate` flag since at this point at least one of the loaded + // values is guaranteed to be `Some`. + loaded_x.replace_state(EntryState::Mutated); + loaded_y.replace_state(EntryState::Mutated); + core::mem::swap(loaded_x.value_mut(), loaded_y.value_mut()); + } +} + +#[cfg(test)] +mod tests { + use super::{ + super::{ + Entry, + EntryState, + }, + Index, + LazyIndexMap, + }; + use crate::{ + env, + storage2::traits::{ + KeyPtr, + SpreadLayout, + }, + }; + use ink_primitives::Key; + + /// Asserts that the cached entries of the given `imap` is equal to the `expected` slice. + fn assert_cached_entries(imap: &LazyIndexMap, expected: &[(Index, Entry)]) { + assert_eq!(imap.entries().len(), expected.len()); + for (given, expected) in imap + .entries() + .iter() + .map(|(index, boxed_entry)| (*index, &**boxed_entry)) + .zip(expected.iter().map(|(index, entry)| (*index, entry))) + { + assert_eq!(given, expected); + } + } + + #[test] + fn new_works() { + let imap = >::new(); + // Key must be none. + assert_eq!(imap.key(), None); + assert_eq!(imap.key_at(0), None); + // Cached elements must be empty. + assert_cached_entries(&imap, &[]); + // Same as default: + let default_imap = >::default(); + assert_eq!(imap.key(), default_imap.key()); + assert_eq!(imap.entries(), default_imap.entries()); + } + + #[test] + fn lazy_works() { + let key = Key([0x42; 32]); + let imap = >::lazy(key); + // Key must be none. + assert_eq!(imap.key(), Some(&key)); + assert_eq!(imap.key_at(0), Some(key)); + assert_eq!(imap.key_at(1), Some(key + 1u64)); + // Cached elements must be empty. + assert_cached_entries(&imap, &[]); + } + + #[test] + fn put_get_works() { + let mut imap = >::new(); + // Put some values. + assert_eq!(imap.put_get(1, Some(b'A')), None); + assert_eq!(imap.put_get(2, Some(b'B')), None); + assert_eq!(imap.put_get(4, Some(b'C')), None); + assert_cached_entries( + &imap, + &[ + (1, Entry::new(Some(b'A'), EntryState::Mutated)), + (2, Entry::new(Some(b'B'), EntryState::Mutated)), + (4, Entry::new(Some(b'C'), EntryState::Mutated)), + ], + ); + // Put none values. + assert_eq!(imap.put_get(3, None), None); + assert_eq!(imap.put_get(5, None), None); + assert_cached_entries( + &imap, + &[ + (1, Entry::new(Some(b'A'), EntryState::Mutated)), + (2, Entry::new(Some(b'B'), EntryState::Mutated)), + (3, Entry::new(None, EntryState::Preserved)), + (4, Entry::new(Some(b'C'), EntryState::Mutated)), + (5, Entry::new(None, EntryState::Preserved)), + ], + ); + // Override some values with none. + assert_eq!(imap.put_get(2, None), Some(b'B')); + assert_eq!(imap.put_get(4, None), Some(b'C')); + assert_cached_entries( + &imap, + &[ + (1, Entry::new(Some(b'A'), EntryState::Mutated)), + (2, Entry::new(None, EntryState::Mutated)), + (3, Entry::new(None, EntryState::Preserved)), + (4, Entry::new(None, EntryState::Mutated)), + (5, Entry::new(None, EntryState::Preserved)), + ], + ); + // Override none values with some. + assert_eq!(imap.put_get(3, Some(b'X')), None); + assert_eq!(imap.put_get(5, Some(b'Y')), None); + assert_cached_entries( + &imap, + &[ + (1, Entry::new(Some(b'A'), EntryState::Mutated)), + (2, Entry::new(None, EntryState::Mutated)), + (3, Entry::new(Some(b'X'), EntryState::Mutated)), + (4, Entry::new(None, EntryState::Mutated)), + (5, Entry::new(Some(b'Y'), EntryState::Mutated)), + ], + ); + } + + #[test] + fn get_works() { + let mut imap = >::new(); + let nothing_changed = &[ + (1, Entry::new(None, EntryState::Preserved)), + (2, Entry::new(Some(b'B'), EntryState::Mutated)), + (3, Entry::new(None, EntryState::Preserved)), + (4, Entry::new(Some(b'D'), EntryState::Mutated)), + ]; + // Put some values. + assert_eq!(imap.put_get(1, None), None); + assert_eq!(imap.put_get(2, Some(b'B')), None); + assert_eq!(imap.put_get(3, None), None); + assert_eq!(imap.put_get(4, Some(b'D')), None); + assert_cached_entries(&imap, nothing_changed); + // `get` works: + assert_eq!(imap.get(1), None); + assert_eq!(imap.get(2), Some(&b'B')); + assert_eq!(imap.get(3), None); + assert_eq!(imap.get(4), Some(&b'D')); + assert_cached_entries(&imap, nothing_changed); + // `get_mut` works: + assert_eq!(imap.get_mut(1), None); + assert_eq!(imap.get_mut(2), Some(&mut b'B')); + assert_eq!(imap.get_mut(3), None); + assert_eq!(imap.get_mut(4), Some(&mut b'D')); + assert_cached_entries(&imap, nothing_changed); + // `get` or `get_mut` without cache: + assert_eq!(imap.get(5), None); + assert_eq!(imap.get_mut(5), None); + } + + #[test] + fn put_works() { + let mut imap = >::new(); + // Put some values. + imap.put(1, None); + imap.put(2, Some(b'B')); + imap.put(4, None); + // The main difference between `put` and `put_get` is that `put` never + // loads from storage which also has one drawback: Putting a `None` + // value always ends-up in `Mutated` state for the entry even if the + // entry is already `None`. + assert_cached_entries( + &imap, + &[ + (1, Entry::new(None, EntryState::Mutated)), + (2, Entry::new(Some(b'B'), EntryState::Mutated)), + (4, Entry::new(None, EntryState::Mutated)), + ], + ); + // Overwrite entries: + imap.put(1, Some(b'A')); + imap.put(2, None); + assert_cached_entries( + &imap, + &[ + (1, Entry::new(Some(b'A'), EntryState::Mutated)), + (2, Entry::new(None, EntryState::Mutated)), + (4, Entry::new(None, EntryState::Mutated)), + ], + ); + } + + #[test] + fn swap_works() { + let mut imap = >::new(); + let nothing_changed = &[ + (1, Entry::new(Some(b'A'), EntryState::Mutated)), + (2, Entry::new(Some(b'B'), EntryState::Mutated)), + (3, Entry::new(None, EntryState::Preserved)), + (4, Entry::new(None, EntryState::Preserved)), + ]; + // Put some values. + assert_eq!(imap.put_get(1, Some(b'A')), None); + assert_eq!(imap.put_get(2, Some(b'B')), None); + assert_eq!(imap.put_get(3, None), None); + assert_eq!(imap.put_get(4, None), None); + assert_cached_entries(&imap, nothing_changed); + // Swap same indices: Check that nothing has changed. + for i in 0..4 { + imap.swap(i, i); + } + assert_cached_entries(&imap, nothing_changed); + // Swap `None` values: Check that nothing has changed. + imap.swap(3, 4); + imap.swap(4, 3); + assert_cached_entries(&imap, nothing_changed); + // Swap `Some` and `None`: + imap.swap(1, 3); + assert_cached_entries( + &imap, + &[ + (1, Entry::new(None, EntryState::Mutated)), + (2, Entry::new(Some(b'B'), EntryState::Mutated)), + (3, Entry::new(Some(b'A'), EntryState::Mutated)), + (4, Entry::new(None, EntryState::Preserved)), + ], + ); + // Swap `Some` and `Some`: + imap.swap(2, 3); + assert_cached_entries( + &imap, + &[ + (1, Entry::new(None, EntryState::Mutated)), + (2, Entry::new(Some(b'A'), EntryState::Mutated)), + (3, Entry::new(Some(b'B'), EntryState::Mutated)), + (4, Entry::new(None, EntryState::Preserved)), + ], + ); + // Swap out of bounds: `None` and `None` + imap.swap(4, 5); + assert_cached_entries( + &imap, + &[ + (1, Entry::new(None, EntryState::Mutated)), + (2, Entry::new(Some(b'A'), EntryState::Mutated)), + (3, Entry::new(Some(b'B'), EntryState::Mutated)), + (4, Entry::new(None, EntryState::Preserved)), + (5, Entry::new(None, EntryState::Preserved)), + ], + ); + // Swap out of bounds: `Some` and `None` + imap.swap(3, 6); + assert_cached_entries( + &imap, + &[ + (1, Entry::new(None, EntryState::Mutated)), + (2, Entry::new(Some(b'A'), EntryState::Mutated)), + (3, Entry::new(None, EntryState::Mutated)), + (4, Entry::new(None, EntryState::Preserved)), + (5, Entry::new(None, EntryState::Preserved)), + (6, Entry::new(Some(b'B'), EntryState::Mutated)), + ], + ); + } + + #[test] + fn spread_layout_works() -> env::Result<()> { + env::test::run_test::(|_| { + let mut imap = >::new(); + let nothing_changed = &[ + (1, Entry::new(Some(b'A'), EntryState::Mutated)), + (2, Entry::new(Some(b'B'), EntryState::Mutated)), + (3, Entry::new(None, EntryState::Preserved)), + (4, Entry::new(None, EntryState::Preserved)), + ]; + // Put some values. + assert_eq!(imap.put_get(1, Some(b'A')), None); + assert_eq!(imap.put_get(2, Some(b'B')), None); + assert_eq!(imap.put_get(3, None), None); + assert_eq!(imap.put_get(4, None), None); + assert_cached_entries(&imap, nothing_changed); + // Push the lazy index map onto the contract storage and then load + // another instance of it from the contract stoarge. + // Then: Compare both instances to be equal. + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&imap, &mut KeyPtr::from(root_key)); + let imap2 = as SpreadLayout>::pull_spread( + &mut KeyPtr::from(root_key), + ); + assert_cached_entries(&imap2, &[]); + assert_eq!(imap2.get(1), Some(&b'A')); + assert_eq!(imap2.get(2), Some(&b'B')); + assert_eq!(imap2.get(3), None); + assert_eq!(imap2.get(4), None); + assert_cached_entries( + &imap2, + &[ + (1, Entry::new(Some(b'A'), EntryState::Preserved)), + (2, Entry::new(Some(b'B'), EntryState::Preserved)), + (3, Entry::new(None, EntryState::Preserved)), + (4, Entry::new(None, EntryState::Preserved)), + ], + ); + // Clear the first lazy index map instance and reload another instance + // to check whether the associated storage has actually been freed + // again: + SpreadLayout::clear_spread(&imap2, &mut KeyPtr::from(root_key)); + // The above `clear_spread` call is a no-op since lazy index map is + // generally not aware of its associated elements. So we have to + // manually clear them from the contract storage which is what the + // high-level data structures like `storage::Vec` would command: + imap2.clear_packed_at(1); + imap2.clear_packed_at(2); + imap2.clear_packed_at(3); // Not really needed here. + imap2.clear_packed_at(4); // Not really needed here. + let imap3 = as SpreadLayout>::pull_spread( + &mut KeyPtr::from(root_key), + ); + assert_cached_entries(&imap3, &[]); + assert_eq!(imap3.get(1), None); + assert_eq!(imap3.get(2), None); + assert_eq!(imap3.get(3), None); + assert_eq!(imap3.get(4), None); + assert_cached_entries( + &imap3, + &[ + (1, Entry::new(None, EntryState::Preserved)), + (2, Entry::new(None, EntryState::Preserved)), + (3, Entry::new(None, EntryState::Preserved)), + (4, Entry::new(None, EntryState::Preserved)), + ], + ); + Ok(()) + }) + } +} diff --git a/core/src/storage2/lazy/mod.rs b/core/src/storage2/lazy/mod.rs new file mode 100644 index 00000000000..c68c70e1f5c --- /dev/null +++ b/core/src/storage2/lazy/mod.rs @@ -0,0 +1,273 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Low-level collections and data structures to manage storage entities in the +//! persisted contract storage. +//! +//! Users should generally avoid using these collections directly in their +//! contracts and should instead adhere to the high-level collections found +//! in [`crate::storage2::collections`]. +//! The low-level collections are mainly used as building blocks for internals +//! of other higher-level storage collections. +//! +//! These low-level collections are not aware of the elements they manage thus +//! extra care has to be taken when operating directly on them. + +mod entry; +mod lazy_array; +mod lazy_cell; +mod lazy_hmap; +mod lazy_imap; + +use self::entry::{ + Entry, + EntryState, +}; +pub use self::{ + lazy_array::{ + LazyArray, + LazyArrayLength, + }, + lazy_cell::LazyCell, + lazy_hmap::LazyHashMap, + lazy_imap::LazyIndexMap, +}; +use crate::storage2::traits::{ + KeyPtr, + SpreadLayout, +}; +use ink_primitives::Key; + +/// A lazy storage entity. +/// +/// This loads its value from storage upon first use. +/// +/// # Note +/// +/// Use this if the storage field doesn't need to be loaded in some or most cases. +#[derive(Debug)] +pub struct Lazy +where + T: SpreadLayout, +{ + cell: LazyCell, +} + +impl SpreadLayout for Lazy +where + T: SpreadLayout, +{ + const FOOTPRINT: u64 = ::FOOTPRINT; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + Self { + cell: as SpreadLayout>::pull_spread(ptr), + } + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + SpreadLayout::push_spread(&self.cell, ptr) + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + SpreadLayout::clear_spread(&self.cell, ptr) + } +} + +impl Lazy +where + T: SpreadLayout, +{ + /// Creates an eagerly populated lazy storage value. + #[must_use] + pub fn new(value: T) -> Self { + Self { + cell: LazyCell::new(Some(value)), + } + } + + /// Creates a true lazy storage value for the given key. + #[must_use] + pub fn lazy(key: Key) -> Self { + Self { + cell: LazyCell::lazy(key), + } + } +} + +impl Lazy +where + T: SpreadLayout, +{ + /// Returns a shared reference to the lazily loaded value. + /// + /// # Note + /// + /// This loads the value from the contract storage if this did not happen before. + /// + /// # Panics + /// + /// If loading from contract storage failed. + #[must_use] + pub fn get(lazy: &Self) -> &T { + lazy.cell.get().expect("encountered empty storage cell") + } + + /// Returns an exclusive reference to the lazily loaded value. + /// + /// # Note + /// + /// This loads the value from the contract storage if this did not happed before. + /// + /// # Panics + /// + /// If loading from contract storage failed. + #[must_use] + pub fn get_mut(lazy: &mut Self) -> &mut T { + lazy.cell.get_mut().expect("encountered empty storage cell") + } +} + +impl From for Lazy +where + T: SpreadLayout, +{ + fn from(value: T) -> Self { + Self::new(value) + } +} + +impl Default for Lazy +where + T: Default + SpreadLayout, +{ + fn default() -> Self { + Self::new(Default::default()) + } +} + +impl core::cmp::PartialEq for Lazy +where + T: PartialEq + SpreadLayout, +{ + fn eq(&self, other: &Self) -> bool { + PartialEq::eq(Lazy::get(self), Lazy::get(other)) + } +} + +impl core::cmp::Eq for Lazy where T: Eq + SpreadLayout {} + +impl core::cmp::PartialOrd for Lazy +where + T: PartialOrd + SpreadLayout, +{ + fn partial_cmp(&self, other: &Self) -> Option { + PartialOrd::partial_cmp(Lazy::get(self), Lazy::get(other)) + } + fn lt(&self, other: &Self) -> bool { + PartialOrd::lt(Lazy::get(self), Lazy::get(other)) + } + fn le(&self, other: &Self) -> bool { + PartialOrd::le(Lazy::get(self), Lazy::get(other)) + } + fn ge(&self, other: &Self) -> bool { + PartialOrd::ge(Lazy::get(self), Lazy::get(other)) + } + fn gt(&self, other: &Self) -> bool { + PartialOrd::gt(Lazy::get(self), Lazy::get(other)) + } +} + +impl core::cmp::Ord for Lazy +where + T: core::cmp::Ord + SpreadLayout, +{ + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + Ord::cmp(Lazy::get(self), Lazy::get(other)) + } +} + +impl core::fmt::Display for Lazy +where + T: core::fmt::Display + SpreadLayout, +{ + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + core::fmt::Display::fmt(Lazy::get(self), f) + } +} + +impl core::hash::Hash for Lazy +where + T: core::hash::Hash + SpreadLayout, +{ + fn hash(&self, state: &mut H) { + Lazy::get(self).hash(state); + } +} + +impl core::convert::AsRef for Lazy +where + T: SpreadLayout, +{ + fn as_ref(&self) -> &T { + Lazy::get(self) + } +} + +impl core::convert::AsMut for Lazy +where + T: SpreadLayout, +{ + fn as_mut(&mut self) -> &mut T { + Lazy::get_mut(self) + } +} + +impl ink_prelude::borrow::Borrow for Lazy +where + T: SpreadLayout, +{ + fn borrow(&self) -> &T { + Lazy::get(self) + } +} + +impl ink_prelude::borrow::BorrowMut for Lazy +where + T: SpreadLayout, +{ + fn borrow_mut(&mut self) -> &mut T { + Lazy::get_mut(self) + } +} + +impl core::ops::Deref for Lazy +where + T: SpreadLayout, +{ + type Target = T; + + fn deref(&self) -> &Self::Target { + Lazy::get(self) + } +} + +impl core::ops::DerefMut for Lazy +where + T: SpreadLayout, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + Lazy::get_mut(self) + } +} diff --git a/core/src/storage2/memory.rs b/core/src/storage2/memory.rs new file mode 100644 index 00000000000..723b857293a --- /dev/null +++ b/core/src/storage2/memory.rs @@ -0,0 +1,282 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::storage2::traits::{ + KeyPtr, + SpreadLayout, +}; +use core::{ + convert::{ + self, + AsRef, + }, + fmt, + fmt::Display, + ops::{ + Deref, + DerefMut, + }, +}; +use ink_prelude::borrow::{ + Borrow, + BorrowMut, +}; + +/// An instance that is solely stored within the contract's memory. +/// +/// This will never be stored to or loaded from contract storage. +/// +/// # Note +/// +/// Use instances of this type in order to have some shared state between +/// contract messages and functions. +/// Its usage is comparable to the Solidity's `memory` instances. +/// Pulling an instance of this type from the contract storage will always +/// yield a default constructed value. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Memory { + /// The inner value that will always be stored within contract memory. + inner: T, +} + +impl SpreadLayout for Memory +where + T: Default, +{ + const FOOTPRINT: u64 = 0; + + fn pull_spread(_ptr: &mut KeyPtr) -> Self { + Default::default() + } + + fn push_spread(&self, _ptr: &mut KeyPtr) {} + fn clear_spread(&self, _ptr: &mut KeyPtr) {} +} + +impl Memory { + /// Creates a new memory instance. + pub fn new(inner: T) -> Self { + Self { inner } + } + + /// Returns a shared reference to the inner `T`. + pub fn get(memory: &Self) -> &T { + &memory.inner + } + + /// Returns an exclusive reference to the inner `T`. + pub fn get_mut(memory: &mut Self) -> &mut T { + &mut memory.inner + } +} + +impl From for Memory { + fn from(inner: T) -> Self { + Self::new(inner) + } +} + +impl Default for Memory +where + T: Default, +{ + fn default() -> Self { + Self::new(::default()) + } +} + +impl Display for Memory +where + T: Display, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + core::fmt::Display::fmt(Self::get(self), f) + } +} + +impl Deref for Memory { + type Target = T; + + fn deref(&self) -> &Self::Target { + Self::get(self) + } +} + +impl DerefMut for Memory { + fn deref_mut(&mut self) -> &mut Self::Target { + Self::get_mut(self) + } +} + +impl AsRef for Memory +where + T: SpreadLayout, +{ + fn as_ref(&self) -> &T { + Self::get(self) + } +} + +impl convert::AsMut for Memory +where + T: SpreadLayout, +{ + fn as_mut(&mut self) -> &mut T { + Self::get_mut(self) + } +} + +impl Borrow for Memory +where + T: SpreadLayout, +{ + fn borrow(&self) -> &T { + Self::get(self) + } +} + +impl BorrowMut for Memory +where + T: SpreadLayout, +{ + fn borrow_mut(&mut self) -> &mut T { + Self::get_mut(self) + } +} + +#[cfg(test)] +mod tests { + use super::Memory; + use crate::{ + env, + env::test::DefaultAccounts, + storage2::traits::{ + KeyPtr, + SpreadLayout, + }, + }; + use core::{ + convert::{ + AsMut, + AsRef, + }, + ops::{ + Deref, + DerefMut, + }, + }; + use ink_prelude::borrow::{ + Borrow, + BorrowMut, + }; + use ink_primitives::Key; + + type ComplexTuple = (u8, [i32; 4], (bool, i32)); + + fn complex_value() -> ComplexTuple { + (b'A', [0x00; 4], (true, 42)) + } + + #[test] + fn new_works() { + let mut expected = complex_value(); + let mut mem = Memory::new(expected); + assert_eq!( as Deref>::deref(&mem), &expected); + assert_eq!( as DerefMut>::deref_mut(&mut mem), &mut expected); + assert_eq!( as AsRef<_>>::as_ref(&mem), &expected); + assert_eq!( as AsMut<_>>::as_mut(&mut mem), &mut expected); + assert_eq!(Borrow::::borrow(&mem), &expected); + assert_eq!( + BorrowMut::::borrow_mut(&mut mem), + &mut expected + ); + assert_eq!(Memory::get(&mem), &expected); + assert_eq!(Memory::get_mut(&mut mem), &mut expected); + } + + #[test] + fn from_works() { + let mut expected = complex_value(); + let mut from = Memory::from(expected); + assert_eq!(from, Memory::new(expected)); + assert_eq!(Memory::get(&from), &expected); + assert_eq!(Memory::get_mut(&mut from), &mut expected); + } + + #[test] + fn default_works() { + use core::fmt::Debug; + fn assert_default() + where + T: Debug + Default + PartialEq, + { + let mut memory_default = as Default>::default(); + let mut default = ::default(); + assert_eq!(>::get(&memory_default), &default); + assert_eq!(>::get_mut(&mut memory_default), &mut default); + } + assert_default::(); + assert_default::(); + assert_default::>(); + assert_default::>(); + } + + #[test] + fn spread_layout_push_pull_works() { + let p1 = Memory::new((b'A', [0x00; 4], (true, 42))); + assert_eq!(*p1, (b'A', [0x00; 4], (true, 42))); + assert_ne!(p1, Default::default()); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&p1, &mut KeyPtr::from(root_key)); + // Now load another instance of a pack from the same key and check + // if both instances are equal: + let p2 = SpreadLayout::pull_spread(&mut KeyPtr::from(root_key)); + assert_ne!(p1, p2); + assert_eq!(p2, Default::default()); + } + + fn run_test(f: F) + where + F: FnOnce(DefaultAccounts), + { + env::test::run_test::(|default_accounts| { + f(default_accounts); + Ok(()) + }) + .unwrap() + } + + #[test] + fn spread_layout_clear_works() { + run_test(|_| { + // Clearing a memory instance should have no effect on the underlying + // contract storage. We can test this by pushing and pulling a storage + // affecting entity in between on the same storage region: + let root_key = Key([0x42; 32]); + ::push_spread(&42, &mut KeyPtr::from(root_key)); + let loaded1 = ::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(loaded1, 42); + let mem = Memory::new(77); + SpreadLayout::push_spread(&mem, &mut KeyPtr::from(root_key)); + let loaded2 = ::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(loaded2, 42); + // Now we clear the `i32` from storage and check whether that works. + // We load as `Option` in order to expect `None`: + ::clear_spread(&loaded2, &mut KeyPtr::from(root_key)); + use crate::storage2::traits::pull_packed_root_opt; + let loaded3 = pull_packed_root_opt::>(&root_key); + assert_eq!(loaded3, None); + }) + } +} diff --git a/core/src/storage2/mod.rs b/core/src/storage2/mod.rs new file mode 100644 index 00000000000..a27f67c77d3 --- /dev/null +++ b/core/src/storage2/mod.rs @@ -0,0 +1,33 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Core abstractions for storage manipulation. (revision 2) + +pub mod alloc; +pub mod collections; +pub mod lazy; +mod memory; +mod pack; +pub mod traits; + +#[doc(inline)] +pub use self::{ + collections::{ + Box, + Vec, + }, + lazy::Lazy, + memory::Memory, + pack::Pack, +}; diff --git a/core/src/storage2/pack.rs b/core/src/storage2/pack.rs new file mode 100644 index 00000000000..0a1beff238d --- /dev/null +++ b/core/src/storage2/pack.rs @@ -0,0 +1,413 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::storage2::traits::{ + forward_clear_packed, + forward_pull_packed, + forward_push_packed, + KeyPtr, + PackedLayout, + SpreadLayout, +}; +use ink_primitives::Key; + +/// Packs the inner `T` so that it only occupies a single contract storage cell. +/// +/// # Note +/// +/// This is an important modular building stone in order to manage contract +/// storage occupation. By default types try to distribute themselves onto +/// their respective contract storage area. However, upon packing them into +/// `Pack` they will be compressed to only ever make use of a single +/// contract storage cell. Sometimes this can be advantageous for performance +/// reasons. +/// +/// # Usage +/// +/// - A `Pack` is equivalent to `i32` in its storage occupation. +/// - A `Pack<(i32, i32)>` will occupy a single cell compared to `(i32, i32)` +/// which occupies a cell per `i32`. +/// - A `Lazy>` lazily loads a `Pack<[u8; 8]>` which occupies +/// a single cell whereas a `[u8; 8]` would occupy 8 cells in total - one for +/// each `u8`. +/// - Rust collections will never use more than a single cell. So +/// `Pack>` and `LinkedList` will occupy the same amount of +/// cells, namely 1. +/// - Packs can be packed. So for example a +/// `Pack<(Pack<(i32, i32)>, Pack<[u8; 8]>)` uses just one cell instead of +/// two cells which is the case for `(Pack<(i32, i32)>, Pack<[u8; 8]>)`. +/// - Not all `storage` types can be packed. Only those that are implementing +/// the `PackedLayout` trait. For example `storage::Vec` does not implement +/// this trait and thus cannot be packed. +/// +/// As a general advice pack values together that are frequently used together. +/// Also pack many very small elements (e.g. `u8`, `bool`, `u16`) together. +#[derive(Debug, Copy, Clone, scale::Encode, scale::Decode)] +pub struct Pack { + /// The packed `T` value. + inner: T, +} + +impl Pack { + /// Creates a new packed value. + pub fn new(value: T) -> Self { + Self { inner: value } + } + + /// Returns the packed value. + pub fn into_inner(pack: Self) -> T { + pack.inner + } + + /// Returns a shared reference to the packed value. + pub fn as_inner(pack: &Pack) -> &T { + &pack.inner + } + + /// Returns an exclusive reference to the packed value. + pub fn as_inner_mut(pack: &mut Pack) -> &mut T { + &mut pack.inner + } +} + +impl SpreadLayout for Pack +where + T: PackedLayout, +{ + const FOOTPRINT: u64 = 1; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + Pack::from(forward_pull_packed::(ptr)) + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + forward_push_packed::(Self::as_inner(self), ptr) + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + forward_clear_packed::(Self::as_inner(self), ptr) + } +} + +impl PackedLayout for Pack +where + T: PackedLayout, +{ + fn pull_packed(&mut self, at: &Key) { + ::pull_packed(Self::as_inner_mut(self), at) + } + fn push_packed(&self, at: &Key) { + ::push_packed(Self::as_inner(self), at) + } + fn clear_packed(&self, at: &Key) { + ::clear_packed(Self::as_inner(self), at) + } +} + +impl From for Pack { + fn from(value: T) -> Self { + Self::new(value) + } +} + +impl Default for Pack +where + T: Default, +{ + fn default() -> Self { + Self::new(Default::default()) + } +} + +impl core::ops::Deref for Pack { + type Target = T; + + fn deref(&self) -> &Self::Target { + Self::as_inner(self) + } +} + +impl core::ops::DerefMut for Pack { + fn deref_mut(&mut self) -> &mut Self::Target { + Self::as_inner_mut(self) + } +} + +impl core::cmp::PartialEq for Pack +where + T: PartialEq, +{ + fn eq(&self, other: &Self) -> bool { + PartialEq::eq(Self::as_inner(self), Self::as_inner(other)) + } +} + +impl core::cmp::Eq for Pack where T: Eq {} + +impl core::cmp::PartialOrd for Pack +where + T: PartialOrd, +{ + fn partial_cmp(&self, other: &Self) -> Option { + PartialOrd::partial_cmp(Self::as_inner(self), Self::as_inner(other)) + } + fn lt(&self, other: &Self) -> bool { + PartialOrd::lt(Self::as_inner(self), Self::as_inner(other)) + } + fn le(&self, other: &Self) -> bool { + PartialOrd::le(Self::as_inner(self), Self::as_inner(other)) + } + fn ge(&self, other: &Self) -> bool { + PartialOrd::ge(Self::as_inner(self), Self::as_inner(other)) + } + fn gt(&self, other: &Self) -> bool { + PartialOrd::gt(Self::as_inner(self), Self::as_inner(other)) + } +} + +impl core::cmp::Ord for Pack +where + T: core::cmp::Ord, +{ + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + Ord::cmp(Self::as_inner(self), Self::as_inner(other)) + } +} + +impl core::fmt::Display for Pack +where + T: core::fmt::Display, +{ + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + core::fmt::Display::fmt(Self::as_inner(self), f) + } +} + +impl core::hash::Hash for Pack +where + T: core::hash::Hash, +{ + fn hash(&self, state: &mut H) { + Self::as_inner(self).hash(state); + } +} + +impl core::convert::AsRef for Pack { + fn as_ref(&self) -> &T { + Self::as_inner(self) + } +} + +impl core::convert::AsMut for Pack { + fn as_mut(&mut self) -> &mut T { + Self::as_inner_mut(self) + } +} + +impl ink_prelude::borrow::Borrow for Pack { + fn borrow(&self) -> &T { + Self::as_inner(self) + } +} + +impl ink_prelude::borrow::BorrowMut for Pack { + fn borrow_mut(&mut self) -> &mut T { + Self::as_inner_mut(self) + } +} + +#[cfg(test)] +mod tests { + use super::Pack; + use crate::{ + env, + env::test::DefaultAccounts, + storage2::traits::{ + pull_packed_root, + push_packed_root, + KeyPtr, + SpreadLayout, + }, + }; + use core::{ + cmp::Ordering, + convert::{ + AsMut, + AsRef, + }, + ops::{ + Deref, + DerefMut, + }, + }; + use ink_prelude::borrow::{ + Borrow, + BorrowMut, + }; + use ink_primitives::Key; + + type ComplexTuple = (u8, [i32; 4], (bool, i32)); + + fn complex_value() -> ComplexTuple { + (b'A', [0x00; 4], (true, 42)) + } + + #[test] + fn new_works() { + let mut expected = complex_value(); + let mut pack = Pack::new(expected); + assert_eq!( as Deref>::deref(&pack), &expected); + assert_eq!( as DerefMut>::deref_mut(&mut pack), &mut expected); + assert_eq!( as AsRef<_>>::as_ref(&pack), &expected); + assert_eq!( as AsMut<_>>::as_mut(&mut pack), &mut expected); + assert_eq!(Borrow::::borrow(&pack), &expected); + assert_eq!( + BorrowMut::::borrow_mut(&mut pack), + &mut expected + ); + assert_eq!(Pack::as_inner(&pack), &expected); + assert_eq!(Pack::as_inner_mut(&mut pack), &mut expected); + assert_eq!(Pack::into_inner(pack), expected); + } + + #[test] + fn from_works() { + let mut expected = complex_value(); + let mut from = Pack::from(expected); + assert_eq!(from, Pack::new(expected)); + assert_eq!(Pack::as_inner(&from), &expected); + assert_eq!(Pack::as_inner_mut(&mut from), &mut expected); + assert_eq!(Pack::into_inner(from), expected); + } + + #[test] + fn default_works() { + use core::fmt::Debug; + fn assert_default() + where + T: Debug + Default + PartialEq, + { + let pack_default = as Default>::default(); + assert_eq!( + >::into_inner(pack_default), + ::default() + ); + } + assert_default::(); + assert_default::(); + assert_default::>(); + assert_default::>(); + } + + #[test] + fn partial_eq_works() { + let b1 = Pack::new(b'X'); + let b2 = Pack::new(b'Y'); + let b3 = Pack::new(b'X'); + assert!( as PartialEq>::ne(&b1, &b2)); + assert!( as PartialEq>::eq(&b1, &b3)); + } + + #[test] + fn partial_ord_works() { + let b1 = Pack::new(1); + let b2 = Pack::new(2); + let b3 = Pack::new(1); + assert_eq!( + as PartialOrd>::partial_cmp(&b1, &b2), + Some(Ordering::Less) + ); + assert_eq!( + as PartialOrd>::partial_cmp(&b2, &b1), + Some(Ordering::Greater) + ); + assert_eq!( + as PartialOrd>::partial_cmp(&b1, &b3), + Some(Ordering::Equal) + ); + // Less-than + assert!( as PartialOrd>::lt(&b1, &b2)); + // Less-than-or-equals + assert!( as PartialOrd>::le(&b1, &b2)); + assert!( as PartialOrd>::le(&b1, &b3)); + // Greater-than + assert!( as PartialOrd>::gt(&b2, &b1)); + // Greater-than-or-equals + assert!( as PartialOrd>::ge(&b2, &b1)); + assert!( as PartialOrd>::ge(&b3, &b1)); + } + + fn run_test(f: F) + where + F: FnOnce(DefaultAccounts), + { + env::test::run_test::(|default_accounts| { + f(default_accounts); + Ok(()) + }) + .unwrap() + } + + #[test] + fn spread_layout_push_pull_works() { + run_test(|_| { + let p1 = Pack::new((b'A', [0x00; 4], (true, 42))); + assert_eq!(*p1, (b'A', [0x00; 4], (true, 42))); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&p1, &mut KeyPtr::from(root_key)); + // Now load another instance of a pack from the same key and check + // if both instances are equal: + let p2 = SpreadLayout::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(p1, p2); + }) + } + + #[test] + #[should_panic(expected = "storage entry was empty")] + fn spread_layout_clear_works() { + run_test(|_| { + let p1 = Pack::new((b'A', [0x00; 4], (true, 42))); + assert_eq!(*p1, (b'A', [0x00; 4], (true, 42))); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&p1, &mut KeyPtr::from(root_key)); + // Now load another instance of a pack from the same key and check + // if both instances are equal: + let p2 = SpreadLayout::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(p1, p2); + // Clearing the underlying storage of p2 immediately so that + // loading another instance of pack again should panic. + SpreadLayout::clear_spread(&p2, &mut KeyPtr::from(root_key)); + let p3 = SpreadLayout::pull_spread(&mut KeyPtr::from(root_key)); + assert_eq!(p1, p3); + }) + } + + #[test] + fn spread_and_packed_layout_are_equal() { + run_test(|_| { + // Push as spread, pull as packed: + let p1 = Pack::new((b'A', [0x00; 4], (true, 42))); + assert_eq!(*p1, (b'A', [0x00; 4], (true, 42))); + let root_key = Key([0x42; 32]); + SpreadLayout::push_spread(&p1, &mut KeyPtr::from(root_key)); + let p2 = pull_packed_root::>(&root_key); + assert_eq!(p1, p2); + // Push as packed, pull as spread: + let root_key2 = Key([0x43; 32]); + push_packed_root(&p2, &root_key2); + let p3 = SpreadLayout::pull_spread(&mut KeyPtr::from(root_key2)); + assert_eq!(p2, p3); + }) + } +} diff --git a/core/src/storage2/traits/impls/arrays.rs b/core/src/storage2/traits/impls/arrays.rs new file mode 100644 index 00000000000..d54ab6b3b3c --- /dev/null +++ b/core/src/storage2/traits/impls/arrays.rs @@ -0,0 +1,90 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::storage2::traits::{ + KeyPtr, + PackedLayout, + SpreadLayout, +}; +use array_init::array_init; +use ink_primitives::Key; + +#[rustfmt::skip] +macro_rules! forward_supported_array_lens { + ( $mac:ident ) => { + $mac! { + 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, + } + }; +} + +macro_rules! impl_layout_for_array { + ( $($len:literal),* $(,)? ) => { + $( + impl SpreadLayout for [T; $len] + where + T: SpreadLayout, + { + const FOOTPRINT: u64 = $len * ::FOOTPRINT; + const REQUIRES_DEEP_CLEAN_UP: bool = ::REQUIRES_DEEP_CLEAN_UP; + + fn push_spread(&self, ptr: &mut KeyPtr) { + for elem in self { + ::push_spread(elem, ptr) + } + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + for elem in self { + ::clear_spread(elem, ptr) + } + } + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + array_init::(|_| ::pull_spread(ptr)) + } + } + + impl PackedLayout for [T; $len] + where + T: PackedLayout, + { + #[inline] + fn push_packed(&self, at: &Key) { + for elem in self { + ::push_packed(elem, at) + } + } + + #[inline] + fn clear_packed(&self, at: &Key) { + for elem in self { + ::clear_packed(elem, at) + } + } + + #[inline] + fn pull_packed(&mut self, at: &Key) { + for elem in self { + ::pull_packed(elem, at) + } + } + } + )* + } +} +forward_supported_array_lens!(impl_layout_for_array); diff --git a/core/src/storage2/traits/impls/collections.rs b/core/src/storage2/traits/impls/collections.rs new file mode 100644 index 00000000000..d4edb13a53a --- /dev/null +++ b/core/src/storage2/traits/impls/collections.rs @@ -0,0 +1,209 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::storage2::traits::{ + impls::{ + forward_clear_packed, + forward_pull_packed, + forward_push_packed, + }, + KeyPtr, + PackedLayout, + SpreadLayout, +}; +use ink_prelude::{ + collections::{ + BTreeMap as StdBTreeMap, + BTreeSet as StdBTreeSet, + BinaryHeap as StdBinaryHeap, + LinkedList as StdLinkedList, + VecDeque as StdVecDeque, + }, + vec::Vec, +}; +use ink_primitives::Key; + +impl SpreadLayout for StdBTreeMap +where + K: PackedLayout + Ord, + V: PackedLayout, +{ + const FOOTPRINT: u64 = 1; + const REQUIRES_DEEP_CLEAN_UP: bool = ::REQUIRES_DEEP_CLEAN_UP; + + #[inline] + fn pull_spread(ptr: &mut KeyPtr) -> Self { + forward_pull_packed::(ptr) + } + + #[inline] + fn push_spread(&self, ptr: &mut KeyPtr) { + forward_push_packed::(self, ptr) + } + + #[inline] + fn clear_spread(&self, ptr: &mut KeyPtr) { + forward_clear_packed::(self, ptr) + } +} + +impl PackedLayout for StdBTreeMap +where + K: PackedLayout + Ord, + V: PackedLayout, +{ + fn push_packed(&self, at: &Key) { + for (key, val) in self { + ::push_packed(key, at); + ::push_packed(val, at); + } + } + + fn clear_packed(&self, at: &Key) { + for (key, val) in self { + ::clear_packed(key, at); + ::clear_packed(val, at); + } + } + + fn pull_packed(&mut self, at: &Key) { + // We cannot mutate keys in a map so we can forward pull signals + // only to the values of a map. + for val in self.values_mut() { + ::pull_packed(val, at); + } + } +} + +impl SpreadLayout for StdBTreeSet +where + T: PackedLayout + Ord, +{ + const FOOTPRINT: u64 = 1; + const REQUIRES_DEEP_CLEAN_UP: bool = ::REQUIRES_DEEP_CLEAN_UP; + + #[inline] + fn pull_spread(ptr: &mut KeyPtr) -> Self { + forward_pull_packed::(ptr) + } + + #[inline] + fn push_spread(&self, ptr: &mut KeyPtr) { + forward_push_packed::(self, ptr) + } + + #[inline] + fn clear_spread(&self, ptr: &mut KeyPtr) { + forward_clear_packed::(self, ptr) + } +} + +impl PackedLayout for StdBTreeSet +where + T: PackedLayout + Ord, +{ + fn push_packed(&self, at: &Key) { + for key in self { + ::push_packed(key, at); + } + } + + fn clear_packed(&self, at: &Key) { + for key in self { + ::clear_packed(key, at); + } + } + + #[inline(always)] + fn pull_packed(&mut self, _at: &Key) { + // We cannot mutate keys in a set so we cannot forward pull signals. + } +} + +impl SpreadLayout for StdBinaryHeap +where + T: PackedLayout + Ord, +{ + const FOOTPRINT: u64 = 1; + const REQUIRES_DEEP_CLEAN_UP: bool = ::REQUIRES_DEEP_CLEAN_UP; + + #[inline] + fn pull_spread(ptr: &mut KeyPtr) -> Self { + forward_pull_packed::(ptr) + } + + #[inline] + fn push_spread(&self, ptr: &mut KeyPtr) { + forward_push_packed::(self, ptr) + } + + #[inline] + fn clear_spread(&self, ptr: &mut KeyPtr) { + forward_clear_packed::(self, ptr) + } +} + +impl PackedLayout for StdBinaryHeap +where + T: PackedLayout + Ord, +{ + fn push_packed(&self, at: &Key) { + for value in self { + ::push_packed(value, at); + } + } + + fn clear_packed(&self, at: &Key) { + for value in self { + ::clear_packed(value, at); + } + } + + #[inline(always)] + fn pull_packed(&mut self, _at: &Key) { + // We cannot mutate keys in a heap so we cannot forward pull signals. + } +} + +macro_rules! impl_push_at_for_collection { + ( $($collection:ident),* $(,)? ) => { + $( + impl_always_packed_layout!($collection, deep: ::REQUIRES_DEEP_CLEAN_UP); + + impl PackedLayout for $collection + where + T: PackedLayout, + { + fn push_packed(&self, at: &Key) { + for elem in self { + ::push_packed(elem, at) + } + } + + fn clear_packed(&self, at: &Key) { + for elem in self { + ::clear_packed(elem, at) + } + } + + fn pull_packed(&mut self, at: &Key) { + for elem in self { + ::pull_packed(elem, at) + } + } + } + )* + }; +} +impl_push_at_for_collection!(Vec, StdLinkedList, StdVecDeque,); diff --git a/core/src/storage2/traits/impls/mod.rs b/core/src/storage2/traits/impls/mod.rs new file mode 100644 index 00000000000..ba0327176ec --- /dev/null +++ b/core/src/storage2/traits/impls/mod.rs @@ -0,0 +1,151 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +macro_rules! impl_always_packed_layout { + ( $name:ident < $($frag:ident),+ >, deep: $deep:expr ) => { + const _: () = { + use crate::storage2::traits::impls::{ + forward_clear_packed, + forward_pull_packed, + forward_push_packed, + }; + impl<$($frag),+> SpreadLayout for $name < $($frag),+ > + where + $( + $frag: PackedLayout, + )+ + { + const FOOTPRINT: u64 = 1; + + const REQUIRES_DEEP_CLEAN_UP: bool = $deep; + + #[inline] + fn pull_spread(ptr: &mut KeyPtr) -> Self { + forward_pull_packed::(ptr) + } + + #[inline] + fn push_spread(&self, ptr: &mut KeyPtr) { + forward_push_packed::(self, ptr) + } + + #[inline] + fn clear_spread(&self, ptr: &mut KeyPtr) { + forward_clear_packed::(self, ptr) + } + } + }; + }; + ( $name:ty, deep: $deep:expr ) => { + const _: () = { + use crate::storage2::traits::impls::{ + forward_clear_packed, + forward_pull_packed, + forward_push_packed, + }; + impl SpreadLayout for $name + where + Self: PackedLayout, + { + const FOOTPRINT: u64 = 1; + + const REQUIRES_DEEP_CLEAN_UP: bool = $deep; + + #[inline] + fn pull_spread(ptr: &mut KeyPtr) -> Self { + forward_pull_packed::(ptr) + } + + #[inline] + fn push_spread(&self, ptr: &mut KeyPtr) { + forward_push_packed::(self, ptr) + } + + #[inline] + fn clear_spread(&self, ptr: &mut KeyPtr) { + forward_clear_packed::(self, ptr) + } + } + }; + }; +} + +mod arrays; +mod collections; +mod prims; +mod tuples; + +use super::{ + clear_packed_root, + pull_packed_root, + push_packed_root, + PackedLayout, +}; +use crate::storage2::traits::KeyPtr; + +/// Returns the greater of both values. +const fn max(a: u64, b: u64) -> u64 { + [a, b][(a > b) as usize] +} + +/// Pulls an instance of type `T` in packed fashion from the contract storage. +/// +/// Loads the instance from the storage location identified by `ptr`. +/// The storage entity is expected to be decodable in its packed form. +/// +/// # Note +/// +/// Use this utility function to use a packed pull operation for the type +/// instead of a spreaded pull operation. +#[inline] +pub fn forward_pull_packed(ptr: &mut KeyPtr) -> T +where + T: PackedLayout, +{ + pull_packed_root::(&ptr.next_for::()) +} + +/// Pushes an instance of type `T` in packed fashion to the contract storage. +/// +/// Stores the instance to the storage location identified by `ptr`. +/// The storage entity is expected to be encodable in its packed form. +/// +/// # Note +/// +/// Use this utility function to use a packed push operation for the type +/// instead of a spreaded push operation. +#[inline] +pub fn forward_push_packed(entity: &T, ptr: &mut KeyPtr) +where + T: PackedLayout, +{ + push_packed_root::(entity, &ptr.next_for::()) +} + +/// Clears an instance of type `T` in packed fashion from the contract storage. +/// +/// Clears the instance from the storage location identified by `ptr`. +/// The cleared storage entity is expected to be encoded in its packed form. +/// +/// # Note +/// +/// Use this utility function to use a packed clear operation for the type +/// instead of a spreaded clear operation. +#[inline] +pub fn forward_clear_packed(entity: &T, ptr: &mut KeyPtr) +where + T: PackedLayout, +{ + clear_packed_root::(entity, &ptr.next_for::()) +} diff --git a/core/src/storage2/traits/impls/prims.rs b/core/src/storage2/traits/impls/prims.rs new file mode 100644 index 00000000000..c6309c01f81 --- /dev/null +++ b/core/src/storage2/traits/impls/prims.rs @@ -0,0 +1,342 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::max; +use crate::{ + env::{ + AccountId, + Hash, + }, + storage2::traits::{ + KeyPtr, + PackedLayout, + SpreadLayout, + }, +}; +use ink_prelude::{ + boxed::Box, + string::String, +}; +use ink_primitives::Key; + +macro_rules! impl_layout_for_primitive { + ( $($ty:ty),* $(,)? ) => { + $( + impl_always_packed_layout!($ty, deep: false); + impl PackedLayout for $ty { + #[inline(always)] + fn pull_packed(&mut self, _at: &Key) {} + #[inline(always)] + fn push_packed(&self, _at: &Key) {} + #[inline(always)] + fn clear_packed(&self, _at: &Key) {} + } + )* + }; +} +#[rustfmt::skip] +impl_layout_for_primitive!( + // We do not include `f32` and `f64` since Wasm contracts currently + // do not support them since they are non deterministic. We might add them + // to this list once we add deterministic support for those primitives. + Key, Hash, AccountId, + String, + bool, + u8, u16, u32, u64, u128, + i8, i16, i32, i64, i128, +); + +impl SpreadLayout for Option +where + T: SpreadLayout, +{ + const FOOTPRINT: u64 = 1 + ::FOOTPRINT; + const REQUIRES_DEEP_CLEAN_UP: bool = ::REQUIRES_DEEP_CLEAN_UP; + + fn push_spread(&self, ptr: &mut KeyPtr) { + ::push_spread(&(self.is_some() as u8), ptr); + if let Some(value) = self { + ::push_spread(value, ptr); + } + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + // We do not really need the reference to 0 (zero) + // in order to clean-up the `bool` value from the storage. + // However the API is demanding a reference so we give it one. + ::clear_spread(&0, ptr); + if let Some(value) = self { + ::clear_spread(value, ptr) + } + } + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + match ::pull_spread(ptr) { + 0u8 => None, + 1u8 => Some(::pull_spread(ptr)), + _ => unreachable!("invalid Option discriminant"), + } + } +} + +impl PackedLayout for Option +where + T: PackedLayout, +{ + #[inline] + fn push_packed(&self, at: &Key) { + if let Some(value) = self { + ::push_packed(value, at) + } + } + + #[inline] + fn clear_packed(&self, at: &Key) { + if let Some(value) = self { + ::clear_packed(value, at) + } + } + + #[inline] + fn pull_packed(&mut self, at: &Key) { + if let Some(value) = self { + ::pull_packed(value, at) + } + } +} + +impl SpreadLayout for Result +where + T: SpreadLayout, + E: SpreadLayout, +{ + const FOOTPRINT: u64 = 1 + max( + ::FOOTPRINT, + ::FOOTPRINT, + ); + const REQUIRES_DEEP_CLEAN_UP: bool = ::REQUIRES_DEEP_CLEAN_UP + || ::REQUIRES_DEEP_CLEAN_UP; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + match ::pull_spread(ptr) { + 0 => Ok(::pull_spread(ptr)), + 1 => Err(::pull_spread(ptr)), + _ => unreachable!("invalid Result discriminant"), + } + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + match self { + Ok(value) => { + ::push_spread(&0, ptr); + ::push_spread(value, ptr); + } + Err(error) => { + ::push_spread(&1, ptr); + ::push_spread(error, ptr); + } + } + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + // Clear the discriminant, same for all variants. + ::clear_spread(&0, ptr); + match self { + Ok(value) => { + ::clear_spread(value, ptr); + } + Err(error) => { + ::clear_spread(error, ptr); + } + } + } +} + +impl PackedLayout for Result +where + T: PackedLayout, + E: PackedLayout, +{ + #[inline] + fn push_packed(&self, at: &Key) { + match self { + Ok(value) => ::push_packed(value, at), + Err(error) => ::push_packed(error, at), + } + } + + #[inline] + fn clear_packed(&self, at: &Key) { + match self { + Ok(value) => ::clear_packed(value, at), + Err(error) => ::clear_packed(error, at), + } + } + + #[inline] + fn pull_packed(&mut self, at: &Key) { + match self { + Ok(value) => ::pull_packed(value, at), + Err(error) => ::pull_packed(error, at), + } + } +} + +impl SpreadLayout for Box +where + T: SpreadLayout, +{ + const FOOTPRINT: u64 = ::FOOTPRINT; + const REQUIRES_DEEP_CLEAN_UP: bool = ::REQUIRES_DEEP_CLEAN_UP; + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + Box::new(::pull_spread(ptr)) + } + + fn push_spread(&self, ptr: &mut KeyPtr) { + ::push_spread(&*self, ptr) + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + ::clear_spread(&*self, ptr) + } +} + +impl PackedLayout for Box +where + T: PackedLayout, +{ + #[inline] + fn push_packed(&self, at: &Key) { + ::push_packed(&*self, at) + } + + #[inline] + fn clear_packed(&self, at: &Key) { + ::clear_packed(&*self, at) + } + + #[inline] + fn pull_packed(&mut self, at: &Key) { + ::pull_packed(&mut *self, at) + } +} + +#[cfg(test)] +mod tests { + use crate::{ + env, + env::AccountId, + storage2::traits::{ + clear_spread_root, + pull_packed_root, + pull_spread_root, + push_packed_root, + push_spread_root, + }, + }; + use ink_primitives::Key; + + /// Runs `f` using the off-chain testing environment. + fn run_test(f: F) + where + F: FnOnce(), + { + env::test::run_test::(|_| { + f(); + Ok(()) + }) + .unwrap() + } + + macro_rules! push_pull_works_for_primitive { + ( $name:ty, [$($value:expr),*] ) => { + paste::item! { + #[test] + #[allow(non_snake_case)] + fn [<$name _pull_push_works>] () { + run_test(|| { + $({ + let x: $name = $value; + let key = Key([0x42; 32]); + let key2 = Key([0x77; 32]); + push_spread_root(&x, &key); + let y: $name = pull_spread_root(&key); + assert_eq!(x, y); + push_packed_root(&x, &key2); + let z: $name = pull_packed_root(&key); + assert_eq!(x, z); + })* + }) + } + + #[test] + #[should_panic(expected = "storage entry was empty")] + #[allow(non_snake_case)] + fn [<$name _clean_works>]() { + run_test(|| { + $({ + let x: $name = $value; + let key = Key([0x42; 32]); + push_spread_root(&x, &key); + // Works since we just populated the storage. + let y: $name = pull_spread_root(&key); + assert_eq!(x, y); + clear_spread_root(&x, &key); + // Panics since it loads eagerly from cleared storage. + let _: $name = pull_spread_root(&key); + })* + }) + } + } + }; + } + push_pull_works_for_primitive!(bool, [false, true]); + push_pull_works_for_primitive!( + String, + [Default::default(), String::from("Hello, World!")] + ); + push_pull_works_for_primitive!( + Key, + [ + Key::from([0x00; 32]), + Key::from([0x42; 32]), + Key::from([0xFF; 32]) + ] + ); + push_pull_works_for_primitive!( + AccountId, + [ + AccountId::from([0x00; 32]), + AccountId::from([0x42; 32]), + AccountId::from([0xFF; 32]) + ] + ); + push_pull_works_for_primitive!(i8, [0, Default::default(), 1, i8::MIN, i8::MAX]); + push_pull_works_for_primitive!(i16, [0, Default::default(), 2, i16::MIN, i16::MAX]); + push_pull_works_for_primitive!(i32, [0, Default::default(), 3, i32::MIN, i32::MAX]); + push_pull_works_for_primitive!(i64, [0, Default::default(), 4, i64::MIN, i64::MAX]); + push_pull_works_for_primitive!( + i128, + [0, Default::default(), 5, i128::MIN, i128::MAX] + ); + push_pull_works_for_primitive!(u8, [0, Default::default(), 10, u8::MIN, u8::MAX]); + push_pull_works_for_primitive!(u16, [0, Default::default(), 20, u16::MIN, u16::MAX]); + push_pull_works_for_primitive!(u32, [0, Default::default(), 30, u32::MIN, u32::MAX]); + push_pull_works_for_primitive!(u64, [0, Default::default(), 40, u64::MIN, u64::MAX]); + push_pull_works_for_primitive!( + u128, + [0, Default::default(), 50, u128::MIN, u128::MAX] + ); +} diff --git a/core/src/storage2/traits/impls/tuples.rs b/core/src/storage2/traits/impls/tuples.rs new file mode 100644 index 00000000000..99510f8af87 --- /dev/null +++ b/core/src/storage2/traits/impls/tuples.rs @@ -0,0 +1,102 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::storage2::traits::{ + KeyPtr, + PackedLayout, + SpreadLayout, +}; +use ink_primitives::Key; + +macro_rules! impl_layout_for_tuple { + ( $($frag:ident),* $(,)? ) => { + impl<$($frag),*> SpreadLayout for ($($frag),* ,) + where + $( + $frag: SpreadLayout, + )* + { + const FOOTPRINT: u64 = 0 $(+ <$frag as SpreadLayout>::FOOTPRINT)*; + const REQUIRES_DEEP_CLEAN_UP: bool = false $(|| <$frag as SpreadLayout>::REQUIRES_DEEP_CLEAN_UP)*; + + fn push_spread(&self, ptr: &mut KeyPtr) { + #[allow(non_snake_case)] + let ($($frag),*,) = self; + $( + <$frag as SpreadLayout>::push_spread($frag, ptr); + )* + } + + fn clear_spread(&self, ptr: &mut KeyPtr) { + #[allow(non_snake_case)] + let ($($frag),*,) = self; + $( + <$frag as SpreadLayout>::clear_spread($frag, ptr); + )* + } + + fn pull_spread(ptr: &mut KeyPtr) -> Self { + ( + $( + <$frag as SpreadLayout>::pull_spread(ptr), + )* + ) + } + } + + impl<$($frag),*> PackedLayout for ($($frag),* ,) + where + $( + $frag: PackedLayout, + )* + { + #[inline] + fn push_packed(&self, at: &Key) { + #[allow(non_snake_case)] + let ($($frag),*,) = self; + $( + <$frag as PackedLayout>::push_packed($frag, at); + )* + } + + #[inline] + fn clear_packed(&self, at: &Key) { + #[allow(non_snake_case)] + let ($($frag),*,) = self; + $( + <$frag as PackedLayout>::clear_packed($frag, at); + )* + } + + #[inline] + fn pull_packed(&mut self, at: &Key) { + #[allow(non_snake_case)] + let ($($frag),*,) = self; + $( + <$frag as PackedLayout>::pull_packed($frag, at); + )* + } + } + } +} +impl_layout_for_tuple!(A); +impl_layout_for_tuple!(A, B); +impl_layout_for_tuple!(A, B, C); +impl_layout_for_tuple!(A, B, C, D); +impl_layout_for_tuple!(A, B, C, D, E); +impl_layout_for_tuple!(A, B, C, D, E, F); +impl_layout_for_tuple!(A, B, C, D, E, F, G); +impl_layout_for_tuple!(A, B, C, D, E, F, G, H); +impl_layout_for_tuple!(A, B, C, D, E, F, G, H, I); +impl_layout_for_tuple!(A, B, C, D, E, F, G, H, I, J); diff --git a/core/src/storage2/traits/keyptr.rs b/core/src/storage2/traits/keyptr.rs new file mode 100644 index 00000000000..07a391ae695 --- /dev/null +++ b/core/src/storage2/traits/keyptr.rs @@ -0,0 +1,49 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::SpreadLayout; +use ink_primitives::Key; + +/// A key pointer. +/// +/// Mainly used by [`SpreadLayout`] trait in order to provide +/// a streamlined and efficient interface for accessing the underlying [`Key`]. +pub struct KeyPtr { + /// The underlying key. + key: Key, +} + +impl From for KeyPtr { + fn from(key: Key) -> Self { + Self { key } + } +} + +impl KeyPtr { + /// Advances the key pointer by the same amount of the footprint of the + /// generic type parameter of `T` and returns the old value. + pub fn next_for(&mut self) -> Key + where + T: SpreadLayout, + { + self.advance_by(::FOOTPRINT) + } + + /// Advances the key pointer by the given amount and returns the old value. + pub fn advance_by(&mut self, amount: u64) -> Key { + let copy = self.key; + self.key += amount; + copy + } +} diff --git a/core/src/storage2/traits/mod.rs b/core/src/storage2/traits/mod.rs new file mode 100644 index 00000000000..ebbb0fe1a37 --- /dev/null +++ b/core/src/storage2/traits/mod.rs @@ -0,0 +1,166 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits and interfaces to operate with storage entities. +//! +//! Generally a type is said to be a storage entity if it implements the +//! `SpreadLayout` trait. This defines certain constants and routines in order +//! to tell a smart contract how to load and store instances of this type +//! from and to the contract's storage. +//! +//! The `PackedLayout` trait can then be implemented on top of the `SpreadLayout` +//! for types that further allow to be stored in the contract storage in a more +//! compressed format to a single storage cell. + +mod impls; +mod keyptr; +mod optspec; +mod packed; +mod spread; + +pub(crate) use self::optspec::{ + clear_spread_root_opt, + pull_packed_root_opt, + pull_spread_root_opt, + push_packed_root_opt, + push_spread_root_opt, +}; +pub use self::{ + impls::{ + forward_clear_packed, + forward_pull_packed, + forward_push_packed, + }, + keyptr::KeyPtr, + packed::PackedLayout, + spread::SpreadLayout, +}; +use ink_primitives::Key; + +/// Pulls an instance of type `T` from the contract storage using spread layout. +/// +/// The root key denotes the offset into the contract storage where the +/// instance of type `T` is being pulled from. +/// +/// # Note +/// +/// - The routine assumes that the instance has previously been stored to +/// the contract storage using spread layout. +/// - Users should prefer using this function directly instead of using the +/// trait methods on [`SpreadLayout`]. +pub fn pull_spread_root(root_key: &Key) -> T +where + T: SpreadLayout, +{ + let mut ptr = KeyPtr::from(*root_key); + ::pull_spread(&mut ptr) +} + +/// Clears the entity from the contract storage using spread layout. +/// +/// The root key denotes the offset into the contract storage where the +/// instance of type `T` is being cleared from. +/// +/// # Note +/// +/// - The routine assumes that the instance has previously been stored to +/// the contract storage using spread layout. +/// - Users should prefer using this function directly instead of using the +/// trait methods on [`SpreadLayout`]. +pub fn clear_spread_root(entity: &T, root_key: &Key) +where + T: SpreadLayout, +{ + let mut ptr = KeyPtr::from(*root_key); + ::clear_spread(entity, &mut ptr); +} + +/// Pushes the entitiy to the contract storage using spread layout. +/// +/// The root key denotes the offset into the contract storage where the +/// instance of type `T` is being pushed to. +/// +/// # Note +/// +/// - The routine will push the given entity to the contract storage using +/// spread layout. +/// - Users should prefer using this function directly instead of using the +/// trait methods on [`SpreadLayout`]. +pub fn push_spread_root(entity: &T, root_key: &Key) +where + T: SpreadLayout, +{ + let mut ptr = KeyPtr::from(*root_key); + ::push_spread(entity, &mut ptr); +} + +/// Pulls an instance of type `T` from the contract storage using packed layout. +/// +/// The root key denotes the offset into the contract storage where the +/// instance of type `T` is being pulled from. +/// +/// # Note +/// +/// - The routine assumes that the instance has previously been stored to +/// the contract storage using packed layout. +/// - Users should prefer using this function directly instead of using the +/// trait methods on [`PackedLayout`]. +pub fn pull_packed_root(root_key: &Key) -> T +where + T: PackedLayout, +{ + let mut entity = crate::env::get_contract_storage::(*root_key) + .expect("storage entry was empty") + .expect("could not properly decode storage entry"); + ::pull_packed(&mut entity, root_key); + entity +} + +/// Pushes the entitiy to the contract storage using packed layout. +/// +/// The root key denotes the offset into the contract storage where the +/// instance of type `T` is being pushed to. +/// +/// # Note +/// +/// - The routine will push the given entity to the contract storage using +/// packed layout. +/// - Users should prefer using this function directly instead of using the +/// trait methods on [`PackedLayout`]. +pub fn push_packed_root(entity: &T, root_key: &Key) +where + T: PackedLayout, +{ + ::push_packed(entity, root_key); + crate::env::set_contract_storage(*root_key, entity); +} + +/// Clears the entity from the contract storage using packed layout. +/// +/// The root key denotes the offset into the contract storage where the +/// instance of type `T` is being cleared from. +/// +/// # Note +/// +/// - The routine assumes that the instance has previously been stored to +/// the contract storage using packed layout. +/// - Users should prefer using this function directly instead of using the +/// trait methods on [`PackedLayout`]. +pub fn clear_packed_root(entity: &T, root_key: &Key) +where + T: PackedLayout, +{ + ::clear_packed(entity, root_key); + crate::env::clear_contract_storage(*root_key); +} diff --git a/core/src/storage2/traits/optspec.rs b/core/src/storage2/traits/optspec.rs new file mode 100644 index 00000000000..066ea084515 --- /dev/null +++ b/core/src/storage2/traits/optspec.rs @@ -0,0 +1,132 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implement specialized routines for managing Option storage entities. +//! +//! These are mere optimizations compared to the non specialized root functions. +//! The specializations make use of the storage entry state (occupied or vacant) +//! in order to store the option's state thus using less storage in total. + +use super::{ + KeyPtr, + PackedLayout, + SpreadLayout, +}; +use crate::env; +use ink_primitives::Key; + +pub fn pull_spread_root_opt(root_key: &Key) -> Option +where + T: SpreadLayout, +{ + // In case the contract storage is occupied we handle + // the Option as if it was a T. + env::get_contract_storage::<()>(*root_key) + .map(|_| super::pull_spread_root::(root_key)) +} + +pub fn push_spread_root_opt(entity: Option<&T>, root_key: &Key) +where + T: SpreadLayout, +{ + match entity { + Some(value) => { + // Handle the Option as if it was a T. + // + // Sadly this doesn't not work well with `Option>`. + // For this we'd need specialization in Rust or similar. + super::push_spread_root(value, root_key) + } + None => clear_spread_root_opt::(root_key, || entity), + } +} + +pub fn clear_spread_root_opt<'a, T: 'a, F>(root_key: &Key, f: F) +where + T: SpreadLayout, + F: FnOnce() -> Option<&'a T>, +{ + // We can clean up some storage entity using its `SpreadLayout::clear_spread` + // implementation or its defined storage footprint. + // + // While using its `SpreadLayout::clear_spread` implementation is more precise + // and will only clean-up what is necessary it requires an actual instance. + // Loading such an instance if it is not already in the memory cache of some + // lazy abstraction will incur significant overhead. + // Using its defined storage footprint this procedure can eagerly clean-up + // the associated contract storage region, however, this might clean-up more + // cells than needed. + // + // There are types that need a so-called "deep" clean-up. An example for this + // is `storage::Box>` where the outer storage box definitely + // needs to propagate clearing signals onto its inner `storage::Box` in order + // to properly clean-up the whole associate contract storage region. + // This is when we cannot avoid loading the entity for the clean-up procedure. + // + // If the entity that shall be cleaned-up does not require deep clean-up we + // check if its storage footprint exceeds a certain threshold and only then + // we will still load it first in order to not clean-up too many unneeded + // storage cells. + let footprint = ::FOOTPRINT; + let threshold = 16; // Arbitrarily chosen. Might need adjustments later. + if footprint >= threshold || ::REQUIRES_DEEP_CLEAN_UP { + // We need to load the entity before we remove its associated contract storage + // because it requires a deep clean-up which propagates clearing to its fields, + // for example in the case of `T` being a `storage::Box`. + if let Some(value) = f() { + super::clear_spread_root(value, root_key); + return + } + } + // Clean-up eagerly without potentially loading the entity from storage: + let mut ptr = KeyPtr::from(*root_key); + for _ in 0..footprint { + env::clear_contract_storage(ptr.advance_by(1)); + } +} + +pub fn pull_packed_root_opt(root_key: &Key) -> Option +where + T: PackedLayout, +{ + match env::get_contract_storage::(*root_key) { + Some(value) => { + // In case the contract storage is occupied we handle + // the Option as if it was a T. + let mut value = value.expect("decoding does not match expected type"); + ::pull_packed(&mut value, root_key); + Some(value) + } + None => None, + } +} + +pub fn push_packed_root_opt(entity: Option<&T>, root_key: &Key) +where + T: PackedLayout, +{ + match entity { + Some(value) => { + // Handle the Option as if it was a T. + // + // Sadly this doesn't not work well with `Option>`. + // For this we'd need specialization in Rust or similar. + super::push_packed_root(value, root_key) + } + None => { + // Clear the associated storage cell. + env::clear_contract_storage(*root_key); + } + } +} diff --git a/core/src/storage2/traits/packed.rs b/core/src/storage2/traits/packed.rs new file mode 100644 index 00000000000..2ef5f2ccd0e --- /dev/null +++ b/core/src/storage2/traits/packed.rs @@ -0,0 +1,46 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::SpreadLayout; +use ink_primitives::Key; + +/// Types that can be stored to and loaded from a single contract storage cell. +pub trait PackedLayout: SpreadLayout + scale::Encode + scale::Decode { + /// Indicates to `self` that is has just been pulled from the storage. + /// + /// # Note + /// + /// Most types will have to implement a simple forwarding to their fields. + /// However, some types such as [`storage::Box`](`crate::storage2::Box`) + /// are required to perform some special handling upon receiving this signal. + fn pull_packed(&mut self, at: &Key); + + /// Indicates to `self` that it is about to be pushed to contract storage. + /// + /// # Note + /// + /// Most types will have to implement a simple forwarding to their fields. + /// However, some types such as [`storage::Box`](`crate::storage2::Box`) + /// are required to perform some special handling upon receiving this signal. + fn push_packed(&self, at: &Key); + + /// Indicates to `self` that it is about to be cleared from contract storage. + /// + /// # Note + /// + /// Most types will have to implement a simple forwarding to their fields. + /// However, some types such as [`storage::Box`](`crate::storage2::Box`) + /// are required to perform some special handling upon receiving this signal. + fn clear_packed(&self, at: &Key); +} diff --git a/core/src/storage2/traits/spread.rs b/core/src/storage2/traits/spread.rs new file mode 100644 index 00000000000..a6e14b41f28 --- /dev/null +++ b/core/src/storage2/traits/spread.rs @@ -0,0 +1,84 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::KeyPtr; + +/// Types that can be stored to and loaded from the contract storage. +pub trait SpreadLayout { + /// The footprint of the type. + /// + /// This is the number of adjunctive cells the type requires in order to + /// be stored in the contract storage with spread layout. + /// + /// # Examples + /// + /// An instance of type `i32` requires one storage cell so its footprint is + /// 1. An instance of type `(i32, i32)` requires 2 storage cells since a + /// tuple or any other combined data structure always associates disjunct + /// cells for its sub types. The same applies to arrays, e.g. `[i32; 5]` + /// has a footprint of 5. + const FOOTPRINT: u64; + + /// Indicates whether a type requires deep clean-up of its state meaning that + /// a clean-up routine has to decode an entity into an instance in order to + /// eventually recurse upon its tear-down. + /// This is not required for the majority of primitive data types such as `i32`, + /// however types such as `storage::Box` that might want to forward the clean-up + /// procedure to their inner `T` require a deep clean-up. + /// + /// # Note + /// + /// The default is set to `true` in order to have correctness by default since + /// no type invariants break if a deep clean-up is performed on a type that does + /// not need it but performing a shallow clean-up for a type that requires a + /// deep clean-up would break invariants. + /// This is solely a setting to improve performance upon clean-up for some types. + const REQUIRES_DEEP_CLEAN_UP: bool = true; + + /// Pulls an instance of `Self` from the contract storage. + /// + /// The key pointer denotes the position where the instance is being pulled + /// from within the contract storage + /// + /// # Note + /// + /// This method of pulling is depth-first: Sub-types are pulled first and + /// construct the super-type through this procedure. + fn pull_spread(ptr: &mut KeyPtr) -> Self; + + /// Pushes an instance of `Self` to the contract storage. + /// + /// - Tries to spread `Self` to as many storage cells as possible. + /// - The key pointer denotes the position where the instance is being pushed + /// to the contract storage. + /// + /// # Note + /// + /// This method of pushing is depth-first: Sub-types are pushed before + /// their parent or super type. + fn push_spread(&self, ptr: &mut KeyPtr); + + /// Clears an instance of `Self` from the contract storage. + /// + /// - Tries to clean `Self` from contract storage as if `self` was stored + /// in it using spread layout. + /// - The key pointer denotes the position where the instance is being cleared + /// from the contract storage. + /// + /// # Note + /// + /// This method of clearing is depth-first: Sub-types are cleared before + /// their parent or super type. + fn clear_spread(&self, ptr: &mut KeyPtr); +} diff --git a/primitives/src/key.rs b/primitives/src/key.rs index e8902c01dfa..dd62717a443 100644 --- a/primitives/src/key.rs +++ b/primitives/src/key.rs @@ -37,6 +37,12 @@ use scale::{ #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encode, Decode)] pub struct Key(pub [u8; 32]); +impl From<[u8; 32]> for Key { + fn from(bytes: [u8; 32]) -> Self { + Self(bytes) + } +} + #[cfg(feature = "std")] impl type_metadata::HasTypeId for Key { fn type_id() -> type_metadata::TypeId {