diff --git a/Cargo.lock b/Cargo.lock index 72653fefa59e3..1eab8009dd2f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1081,6 +1081,16 @@ dependencies = [ "dirs-sys", ] +[[package]] +name = "dirs" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13aea89a5c93364a98e9b37b2fa237effbb694d5cfe01c5b70941f7eb087d5e3" +dependencies = [ + "cfg-if", + "dirs-sys", +] + [[package]] name = "dirs-sys" version = "0.3.4" @@ -4686,6 +4696,19 @@ dependencies = [ "sp-storage", ] +[[package]] +name = "parity-db" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4174d70be686b0d7cdee964b2d723e1461e28390c8804f01efc907d81043be9" +dependencies = [ + "blake2-rfc", + "libc", + "log", + "memmap", + "parking_lot 0.10.2", +] + [[package]] name = "parity-multiaddr" version = "0.7.3" @@ -5034,6 +5057,16 @@ dependencies = [ "output_vt100", ] +[[package]] +name = "pretty_env_logger" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" +dependencies = [ + "env_logger 0.7.1", + "log", +] + [[package]] name = "primitive-types" version = "0.7.0" @@ -5900,6 +5933,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", + "sp-database 2.0.0-dev", "sp-externalities", "sp-inherents", "sp-keyring", @@ -5954,6 +5988,7 @@ dependencies = [ name = "sc-client-db" version = "0.8.0-dev" dependencies = [ + "blake2-rfc", "env_logger 0.7.1", "hash-db", "kvdb", @@ -5961,6 +5996,7 @@ dependencies = [ "kvdb-rocksdb", "linked-hash-map", "log", + "parity-db", "parity-scale-codec", "parity-util-mem", "parking_lot 0.10.2", @@ -5972,10 +6008,12 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", + "sp-database 2.0.0-dev", "sp-keyring", "sp-runtime", "sp-state-machine", "sp-trie", + "subdb", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", @@ -6995,6 +7033,17 @@ dependencies = [ "libc", ] +[[package]] +name = "simplelog" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcacac97349a890d437921dfb23cbec52ab5b4752551cb637df2721371acd467" +dependencies = [ + "chrono", + "log", + "term", +] + [[package]] name = "slab" version = "0.4.2" @@ -7375,6 +7424,23 @@ dependencies = [ "zeroize", ] +[[package]] +name = "sp-database" +version = "2.0.0-alpha.5" +source = "git+https://github.com/paritytech/substrate?branch=gav-db-trait#9404815700a840586fc8760a60180f4e1bf97ce4" +dependencies = [ + "kvdb", + "parking_lot 0.10.2", +] + +[[package]] +name = "sp-database" +version = "2.0.0-dev" +dependencies = [ + "kvdb", + "parking_lot 0.10.2", +] + [[package]] name = "sp-debug-derive" version = "2.0.0-dev" @@ -7855,6 +7921,26 @@ dependencies = [ "syn 1.0.17", ] +[[package]] +name = "subdb" +version = "0.1.0" +source = "git+https://github.com/paritytech/subdb#353bd49a95e618641b552fe890b272f0feb6d752" +dependencies = [ + "blake2-rfc", + "derive_more", + "hash-db", + "hex", + "log", + "memmap", + "parity-scale-codec", + "parking_lot 0.10.2", + "pretty_env_logger", + "simplelog", + "smallvec 1.3.0", + "sp-database 2.0.0-alpha.5", + "twox-hash", +] + [[package]] name = "subkey" version = "2.0.0-dev" @@ -7917,6 +8003,7 @@ dependencies = [ "sc-informant", "sc-network", "sc-service", + "sp-database 2.0.0-dev", "wasm-bindgen", "wasm-bindgen-futures", ] @@ -8272,6 +8359,16 @@ dependencies = [ "winapi 0.3.8", ] +[[package]] +name = "term" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0863a3345e70f61d613eab32ee046ccd1bcc5f9105fe402c61fcd0c13eeb8b5" +dependencies = [ + "dirs", + "winapi 0.3.8", +] + [[package]] name = "termcolor" version = "1.1.0" diff --git a/Cargo.toml b/Cargo.toml index ab19142da6ee6..ad05847f61f49 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -123,6 +123,7 @@ members = [ "primitives/consensus/vrf", "primitives/core", "primitives/chain-spec", + "primitives/database", "primitives/debug-derive", "primitives/storage", "primitives/externalities", diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 6d7beaa5eef3f..1f22e85ab6e63 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -148,7 +148,7 @@ cli = [ "node-transaction-factory", "sc-cli", "frame-benchmarking-cli", - "sc-service/rocksdb", + "sc-service/db", "structopt", "substrate-build-script-utils", ] diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index ee31b88a66fa2..8ca5132eb9e64 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] pallet-balances = { version = "2.0.0-dev", path = "../../../frame/balances" } sc-client = { version = "0.8.0-dev", path = "../../../client/" } -sc-client-db = { version = "0.8.0-dev", path = "../../../client/db/", features = ["kvdb-rocksdb"] } +sc-client-db = { version = "0.8.0-dev", path = "../../../client/db/", features = ["kvdb-rocksdb", "parity-db"] } sc-client-api = { version = "2.0.0-dev", path = "../../../client/api/" } codec = { package = "parity-scale-codec", version = "1.3.0" } pallet-contracts = { version = "2.0.0-dev", path = "../../../frame/contracts" } @@ -54,4 +54,4 @@ fs_extra = "1" [dev-dependencies] criterion = "0.3.0" sc-cli = { version = "0.8.0-dev", path = "../../../client/cli" } -sc-service = { version = "0.8.0-dev", path = "../../../client/service", features = ["rocksdb"] } +sc-service = { version = "0.8.0-dev", path = "../../../client/service", features = ["db"] } diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 2ca6428bedc30..9f7ab57895750 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -184,7 +184,7 @@ impl BenchDb { state_cache_size: 16*1024*1024, state_cache_child_ratio: Some((0, 100)), pruning: PruningMode::ArchiveAll, - source: sc_client_db::DatabaseSettingsSrc::Path { + source: sc_client_db::DatabaseSettingsSrc::RocksDb { path: dir.into(), cache_size: 512, }, diff --git a/client/Cargo.toml b/client/Cargo.toml index 2711a6847fe34..bf93cdfde88a1 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -38,6 +38,7 @@ sp-blockchain = { version = "2.0.0-dev", path = "../primitives/blockchain" } sp-state-machine = { version = "0.8.0-dev", path = "../primitives/state-machine" } sc-telemetry = { version = "2.0.0-dev", path = "telemetry" } sp-trie = { version = "2.0.0-dev", path = "../primitives/trie" } +sp-database = { version = "2.0.0-dev", path = "../primitives/database" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-dev", path = "../utils/prometheus" } tracing = "0.1.10" diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index f09a8d8d47392..2adccdd879ed3 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -122,6 +122,20 @@ impl ExecutionStrategy { } } +arg_enum! { + /// Database backend + #[allow(missing_docs)] + #[derive(Debug, Clone, Copy)] + pub enum Database { + // Facebooks RocksDB + RocksDb, + // Subdb. https://github.com/paritytech/subdb/ + SubDb, + // ParityDb. https://github.com/paritytech/parity-db/ + ParityDb, + } +} + /// Default value for the `--execution-syncing` parameter. pub const DEFAULT_EXECUTION_SYNCING: ExecutionStrategy = ExecutionStrategy::NativeElseWasm; /// Default value for the `--execution-import-block` parameter. diff --git a/client/cli/src/commands/export_blocks_cmd.rs b/client/cli/src/commands/export_blocks_cmd.rs index 48abd409d6833..297d83506be37 100644 --- a/client/cli/src/commands/export_blocks_cmd.rs +++ b/client/cli/src/commands/export_blocks_cmd.rs @@ -74,7 +74,7 @@ impl ExportBlocksCmd { <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, ::Hash: std::str::FromStr, { - if let DatabaseConfig::Path { ref path, .. } = &config.database { + if let DatabaseConfig::RocksDb { ref path, .. } = &config.database { info!("DB path: {}", path.display()); } diff --git a/client/cli/src/commands/mod.rs b/client/cli/src/commands/mod.rs index d05a5464b215d..68c22c4868565 100644 --- a/client/cli/src/commands/mod.rs +++ b/client/cli/src/commands/mod.rs @@ -204,9 +204,16 @@ macro_rules! substrate_cli_subcommands { &self, base_path: &::std::path::PathBuf, cache_size: usize, + database: $crate::Database, ) -> $crate::Result<::sc_service::config::DatabaseConfig> { match self { - $($enum::$variant(cmd) => cmd.database_config(base_path, cache_size)),* + $($enum::$variant(cmd) => cmd.database_config(base_path, cache_size, database)),* + } + } + + fn database(&self) -> $crate::Result<::std::option::Option<$crate::Database>> { + match self { + $($enum::$variant(cmd) => cmd.database()),* } } diff --git a/client/cli/src/commands/purge_chain_cmd.rs b/client/cli/src/commands/purge_chain_cmd.rs index 845423695eae8..3be2883bd5093 100644 --- a/client/cli/src/commands/purge_chain_cmd.rs +++ b/client/cli/src/commands/purge_chain_cmd.rs @@ -39,7 +39,7 @@ impl PurgeChainCmd { /// Run the purge command pub fn run(&self, config: Configuration) -> error::Result<()> { let db_path = match &config.database { - DatabaseConfig::Path { path, .. } => path, + DatabaseConfig::RocksDb { path, .. } => path, _ => { eprintln!("Cannot purge custom database implementation"); return Ok(()); diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 6c4cc0e7103f9..8c8490ff1a2b2 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -21,6 +21,7 @@ use crate::{ init_logger, ImportParams, KeystoreParams, NetworkParams, NodeKeyParams, PruningParams, SharedParams, SubstrateCli, }; +use crate::arg_enums::Database; use app_dirs::{AppDataType, AppInfo}; use names::{Generator, Name}; use sc_service::config::{ @@ -152,11 +153,26 @@ pub trait CliConfiguration: Sized { .unwrap_or(Default::default())) } + /// Get the database backend variant. + /// + /// By default this is retrieved from `ImportParams` if it is available. Otherwise its `None`. + fn database(&self) -> Result> { + Ok(self.import_params().map(|x| x.database())) + } + /// Get the database configuration. /// /// By default this is retrieved from `SharedParams` - fn database_config(&self, base_path: &PathBuf, cache_size: usize) -> Result { - Ok(self.shared_params().database_config(base_path, cache_size)) + fn database_config(&self, + base_path: &PathBuf, + cache_size: usize, + database: Database, + ) -> Result { + Ok(self.shared_params().database_config( + base_path, + cache_size, + database, + )) } /// Get the state cache size. @@ -376,6 +392,7 @@ pub trait CliConfiguration: Sized { let net_config_dir = config_dir.join(DEFAULT_NETWORK_CONFIG_PATH); let client_id = C::client_id(); let database_cache_size = self.database_cache_size()?.unwrap_or(128); + let database = self.database()?.unwrap_or(Database::RocksDb); let node_key = self.node_key(&net_config_dir)?; let role = self.role(is_dev)?; let max_runtime_instances = self.max_runtime_instances()?.unwrap_or(8); @@ -394,7 +411,7 @@ pub trait CliConfiguration: Sized { node_key, )?, keystore: self.keystore_config(&config_dir)?, - database: self.database_config(&config_dir, database_cache_size)?, + database: self.database_config(&config_dir, database_cache_size, database)?, state_cache_size: self.state_cache_size()?, state_cache_child_ratio: self.state_cache_child_ratio()?, pruning: self.pruning(is_dev, &role)?, diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs index 08ca1c8f8ffe0..95b04b039a48e 100644 --- a/client/cli/src/params/import_params.rs +++ b/client/cli/src/params/import_params.rs @@ -17,7 +17,7 @@ use crate::arg_enums::{ ExecutionStrategy, TracingReceiver, WasmExecutionMethod, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, DEFAULT_EXECUTION_IMPORT_BLOCK, DEFAULT_EXECUTION_OFFCHAIN_WORKER, DEFAULT_EXECUTION_OTHER, - DEFAULT_EXECUTION_SYNCING, + DEFAULT_EXECUTION_SYNCING, Database, }; use crate::params::PruningParams; use crate::Result; @@ -54,6 +54,16 @@ pub struct ImportParams { #[structopt(flatten)] pub execution_strategies: ExecutionStrategiesParams, + /// Select database backend to use. + #[structopt( + long = "database", + alias = "db", + value_name = "DB", + case_insensitive = true, + default_value = "RocksDb" + )] + pub database: Database, + /// Limit the memory the database cache can use. #[structopt(long = "db-cache", value_name = "MiB")] pub database_cache_size: Option, @@ -132,6 +142,11 @@ impl ImportParams { pub fn database_cache_size(&self) -> Option { self.database_cache_size } + + /// Limit the memory the database cache can use. + pub fn database(&self) -> Database { + self.database + } } /// Execution strategies parameters. diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs index f7f9db102c714..d6dd1bd9c16e1 100644 --- a/client/cli/src/params/shared_params.rs +++ b/client/cli/src/params/shared_params.rs @@ -17,9 +17,7 @@ use sc_service::config::DatabaseConfig; use std::path::PathBuf; use structopt::StructOpt; - -/// default sub directory to store database -const DEFAULT_DB_CONFIG_PATH: &'static str = "db"; +use crate::arg_enums::Database; /// Shared parameters used by all `CoreParams`. #[derive(Debug, StructOpt, Clone)] @@ -79,10 +77,19 @@ impl SharedParams { &self, base_path: &PathBuf, cache_size: usize, + database: Database, ) -> DatabaseConfig { - DatabaseConfig::Path { - path: base_path.join(DEFAULT_DB_CONFIG_PATH), - cache_size, + match database { + Database::RocksDb => DatabaseConfig::RocksDb { + path: base_path.join("db"), + cache_size, + }, + Database::SubDb => DatabaseConfig::SubDb { + path: base_path.join("subdb"), + }, + Database::ParityDb => DatabaseConfig::ParityDb { + path: base_path.join("paritydb"), + }, } } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 5952856bdafd9..2a0739a831beb 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -466,7 +466,7 @@ impl SlotDuration { cb(client.runtime_api(), &BlockId::number(Zero::zero()))?; info!( - "⏱ Loaded block-time = {:?} milliseconds from genesis on first-launch", + "⏱ Loaded block-time = {:?} milliseconds from genesis on first-launch", genesis_slot_duration ); diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 70f1c091a250e..307cacbfb2d5b 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -21,6 +21,7 @@ linked-hash-map = "0.5.2" hash-db = "0.15.2" parity-util-mem = { version = "0.6.0", default-features = false, features = ["std"] } codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +blake2-rfc = "0.2.18" sc-client-api = { version = "2.0.0-dev", path = "../api" } sp-core = { version = "2.0.0-dev", path = "../../primitives/core" } @@ -32,6 +33,9 @@ sc-state-db = { version = "0.8.0-dev", path = "../state-db" } sp-trie = { version = "2.0.0-dev", path = "../../primitives/trie" } sp-consensus = { version = "0.8.0-dev", path = "../../primitives/consensus/common" } sp-blockchain = { version = "2.0.0-dev", path = "../../primitives/blockchain" } +sp-database = { version = "2.0.0-dev", path = "../../primitives/database" } +parity-db = { version = "0.1", optional = true } +subdb = { git = "https://github.com/paritytech/subdb", optional = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-dev", path = "../../utils/prometheus" } [dev-dependencies] diff --git a/client/db/src/cache/list_storage.rs b/client/db/src/cache/list_storage.rs index 606090ee1401d..07cd9fb866359 100644 --- a/client/db/src/cache/list_storage.rs +++ b/client/db/src/cache/list_storage.rs @@ -18,17 +18,17 @@ use std::sync::Arc; -use kvdb::{KeyValueDB, DBTransaction}; - use sp_blockchain::{Error as ClientError, Result as ClientResult}; use codec::{Encode, Decode}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use crate::utils::{self, db_err, meta_keys}; +use sp_database::{Database, Transaction}; +use crate::utils::{self, meta_keys}; use crate::cache::{CacheItemT, ComplexBlockId}; use crate::cache::list_cache::{CommitOperation, Fork}; use crate::cache::list_entry::{Entry, StorageEntry}; +use crate::DbHash; /// Single list-cache metadata. #[derive(Debug)] @@ -97,19 +97,19 @@ pub struct DbColumns { pub struct DbStorage { name: Vec, meta_key: Vec, - db: Arc, + db: Arc>, columns: DbColumns, } impl DbStorage { /// Create new database-backed list cache storage. - pub fn new(name: Vec, db: Arc, columns: DbColumns) -> Self { + pub fn new(name: Vec, db: Arc>, columns: DbColumns) -> Self { let meta_key = meta::key(&name); DbStorage { name, meta_key, db, columns } } /// Get reference to the database. - pub fn db(&self) -> &Arc { &self.db } + pub fn db(&self) -> &Arc> { &self.db } /// Get reference to the database columns. pub fn columns(&self) -> &DbColumns { &self.columns } @@ -135,49 +135,45 @@ impl Storage for DbStorage { } fn read_meta(&self) -> ClientResult> { - self.db.get(self.columns.meta, &self.meta_key) - .map_err(db_err) - .and_then(|meta| match meta { - Some(meta) => meta::decode(&*meta), - None => Ok(Metadata { - finalized: None, - unfinalized: Vec::new(), - }), + match self.db.get(self.columns.meta, &self.meta_key) { + Some(meta) => meta::decode(&*meta), + None => Ok(Metadata { + finalized: None, + unfinalized: Vec::new(), }) + } } fn read_entry(&self, at: &ComplexBlockId) -> ClientResult>> { - self.db.get(self.columns.cache, &self.encode_block_id(at)) - .map_err(db_err) - .and_then(|entry| match entry { - Some(entry) => StorageEntry::::decode(&mut &entry[..]) - .map_err(|_| ClientError::Backend("Failed to decode cache entry".into())) - .map(Some), - None => Ok(None), - }) + match self.db.get(self.columns.cache, &self.encode_block_id(at)) { + Some(entry) => StorageEntry::::decode(&mut &entry[..]) + .map_err(|_| ClientError::Backend("Failed to decode cache entry".into())) + .map(Some), + None => Ok(None), + } } } /// Database-backed list cache storage transaction. pub struct DbStorageTransaction<'a> { storage: &'a DbStorage, - tx: &'a mut DBTransaction, + tx: &'a mut Transaction, } impl<'a> DbStorageTransaction<'a> { /// Create new database transaction. - pub fn new(storage: &'a DbStorage, tx: &'a mut DBTransaction) -> Self { + pub fn new(storage: &'a DbStorage, tx: &'a mut Transaction) -> Self { DbStorageTransaction { storage, tx } } } impl<'a, Block: BlockT, T: CacheItemT> StorageTransaction for DbStorageTransaction<'a> { fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry) { - self.tx.put(self.storage.columns.cache, &self.storage.encode_block_id(at), &entry.encode()); + self.tx.set_from_vec(self.storage.columns.cache, &self.storage.encode_block_id(at), entry.encode()); } fn remove_storage_entry(&mut self, at: &ComplexBlockId) { - self.tx.delete(self.storage.columns.cache, &self.storage.encode_block_id(at)); + self.tx.remove(self.storage.columns.cache, &self.storage.encode_block_id(at)); } fn update_meta( @@ -186,10 +182,10 @@ impl<'a, Block: BlockT, T: CacheItemT> StorageTransaction for DbStorag unfinalized: &[Fork], operation: &CommitOperation, ) { - self.tx.put( + self.tx.set_from_vec( self.storage.columns.meta, &self.storage.meta_key, - &meta::encode(best_finalized_entry, unfinalized, operation)); + meta::encode(best_finalized_entry, unfinalized, operation)); } } diff --git a/client/db/src/cache/mod.rs b/client/db/src/cache/mod.rs index 8fd1adc094ae4..8d3e1f358b322 100644 --- a/client/db/src/cache/mod.rs +++ b/client/db/src/cache/mod.rs @@ -19,14 +19,14 @@ use std::{sync::Arc, collections::{HashMap, hash_map::Entry}}; use parking_lot::RwLock; -use kvdb::{KeyValueDB, DBTransaction}; - use sc_client_api::blockchain::{well_known_cache_keys::{self, Id as CacheKeyId}, Cache as BlockchainCache}; use sp_blockchain::Result as ClientResult; +use sp_database::{Database, Transaction}; use codec::{Encode, Decode}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}; -use crate::utils::{self, COLUMN_META, db_err}; +use crate::utils::{self, COLUMN_META}; +use crate::DbHash; use self::list_cache::{ListCache, PruningStrategy}; @@ -78,7 +78,7 @@ impl CacheItemT for T where T: Clone + Decode + Encode + PartialEq {} /// Database-backed blockchain data cache. pub struct DbCache { cache_at: HashMap, self::list_storage::DbStorage>>, - db: Arc, + db: Arc>, key_lookup_column: u32, header_column: u32, cache_column: u32, @@ -89,7 +89,7 @@ pub struct DbCache { impl DbCache { /// Create new cache. pub fn new( - db: Arc, + db: Arc>, key_lookup_column: u32, header_column: u32, cache_column: u32, @@ -113,7 +113,7 @@ impl DbCache { } /// Begin cache transaction. - pub fn transaction<'a>(&'a mut self, tx: &'a mut DBTransaction) -> DbCacheTransaction<'a, Block> { + pub fn transaction<'a>(&'a mut self, tx: &'a mut Transaction) -> DbCacheTransaction<'a, Block> { DbCacheTransaction { cache: self, tx, @@ -125,7 +125,7 @@ impl DbCache { /// Begin cache transaction with given ops. pub fn transaction_with_ops<'a>( &'a mut self, - tx: &'a mut DBTransaction, + tx: &'a mut Transaction, ops: DbCacheTransactionOps, ) -> DbCacheTransaction<'a, Block> { DbCacheTransaction { @@ -169,7 +169,7 @@ impl DbCache { fn get_cache_helper<'a, Block: BlockT>( cache_at: &'a mut HashMap, self::list_storage::DbStorage>>, name: CacheKeyId, - db: &Arc, + db: &Arc>, key_lookup: u32, header: u32, cache: u32, @@ -215,7 +215,7 @@ impl DbCacheTransactionOps { /// Database-backed blockchain data cache transaction valid for single block import. pub struct DbCacheTransaction<'a, Block: BlockT> { cache: &'a mut DbCache, - tx: &'a mut DBTransaction, + tx: &'a mut Transaction, cache_at_ops: HashMap>>, best_finalized_block: Option>, } @@ -328,7 +328,7 @@ impl BlockchainCache for DbCacheSync { let genesis_hash = cache.genesis_hash; let cache_contents = vec![(*key, data)].into_iter().collect(); let db = cache.db.clone(); - let mut dbtx = DBTransaction::new(); + let mut dbtx = Transaction::new(); let tx = cache.transaction(&mut dbtx); let tx = tx.on_block_insert( ComplexBlockId::new(Default::default(), Zero::zero()), @@ -337,7 +337,7 @@ impl BlockchainCache for DbCacheSync { EntryType::Genesis, )?; let tx_ops = tx.into_ops(); - db.write(dbtx).map_err(db_err)?; + db.commit(dbtx); cache.commit(tx_ops)?; Ok(()) } diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index a28cd604fe363..5447e8b72567f 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -19,7 +19,6 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; use hash_db::Prefix; -use kvdb::{KeyValueDB, DBTransaction}; use codec::{Decode, Encode}; use parking_lot::RwLock; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -27,12 +26,14 @@ use sp_trie::MemoryDB; use sc_client_api::backend::PrunableStateChangesTrieStorage; use sp_blockchain::{well_known_cache_keys, Cache as BlockchainCache}; use sp_core::{ChangesTrieConfiguration, ChangesTrieConfigurationRange, convert_hash}; +use sp_database::Transaction; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, HashFor, NumberFor, One, Zero, CheckedSub, }; use sp_runtime::generic::{BlockId, DigestItem, ChangesTrieSignal}; -use sp_state_machine::{DBValue, ChangesTrieBuildCache, ChangesTrieCacheAction}; -use crate::utils::{self, Meta, meta_keys, db_err}; +use sp_state_machine::{ChangesTrieBuildCache, ChangesTrieCacheAction}; +use crate::{Database, DbHash}; +use crate::utils::{self, Meta, meta_keys}; use crate::cache::{ DbCacheSync, DbCache, DbCacheTransactionOps, ComplexBlockId, EntryType as CacheEntryType, @@ -76,7 +77,7 @@ impl From> for DbChangesTrieStorageT /// Stores all tries in separate DB column. /// Lock order: meta, tries_meta, cache, build_cache. pub struct DbChangesTrieStorage { - db: Arc, + db: Arc>, meta_column: u32, changes_tries_column: u32, key_lookup_column: u32, @@ -111,7 +112,7 @@ struct ChangesTriesMeta { impl DbChangesTrieStorage { /// Create new changes trie storage. pub fn new( - db: Arc, + db: Arc>, meta_column: u32, changes_tries_column: u32, key_lookup_column: u32, @@ -149,7 +150,7 @@ impl DbChangesTrieStorage { /// Commit new changes trie. pub fn commit( &self, - tx: &mut DBTransaction, + tx: &mut Transaction, mut changes_trie: MemoryDB>, parent_block: ComplexBlockId, block: ComplexBlockId, @@ -160,7 +161,7 @@ impl DbChangesTrieStorage { ) -> ClientResult> { // insert changes trie, associated with block, into DB for (key, (val, _)) in changes_trie.drain() { - tx.put(self.changes_tries_column, key.as_ref(), &val); + tx.set(self.changes_tries_column, key.as_ref(), &val); } // if configuration has not been changed AND block is not finalized => nothing to do here @@ -205,7 +206,7 @@ impl DbChangesTrieStorage { /// Called when block is finalized. pub fn finalize( &self, - tx: &mut DBTransaction, + tx: &mut Transaction, parent_block_hash: Block::Hash, block_hash: Block::Hash, block_num: NumberFor, @@ -254,7 +255,7 @@ impl DbChangesTrieStorage { /// When block is reverted. pub fn revert( &self, - tx: &mut DBTransaction, + tx: &mut Transaction, block: &ComplexBlockId, ) -> ClientResult> { Ok(self.cache.0.write().transaction(tx) @@ -280,7 +281,7 @@ impl DbChangesTrieStorage { /// Prune obsolete changes tries. fn prune( &self, - tx: &mut DBTransaction, + tx: &mut Transaction, block_hash: Block::Hash, block_num: NumberFor, new_header: Option<&Block::Header>, @@ -313,7 +314,7 @@ impl DbChangesTrieStorage { hash: convert_hash(&block_hash), number: block_num, }, - |node| tx.delete(self.changes_tries_column, node.as_ref()), + |node| tx.remove(self.changes_tries_column, node.as_ref()), ); next_digest_range_start = end + One::one(); @@ -486,18 +487,17 @@ where self.build_cache.read().with_changed_keys(root, functor) } - fn get(&self, key: &Block::Hash, _prefix: Prefix) -> Result, String> { - self.db.get(self.changes_tries_column, key.as_ref()) - .map_err(|err| format!("{}", err)) + fn get(&self, key: &Block::Hash, _prefix: Prefix) -> Result>, String> { + Ok(self.db.get(self.changes_tries_column, key.as_ref())) } } /// Read changes tries metadata from database. fn read_tries_meta( - db: &dyn KeyValueDB, + db: &dyn Database, meta_column: u32, ) -> ClientResult> { - match db.get(meta_column, meta_keys::CHANGES_TRIES_META).map_err(db_err)? { + match db.get(meta_column, meta_keys::CHANGES_TRIES_META) { Some(h) => match Decode::decode(&mut &h[..]) { Ok(h) => Ok(h), Err(err) => Err(ClientError::Backend(format!("Error decoding changes tries metadata: {}", err))), @@ -511,11 +511,11 @@ fn read_tries_meta( /// Write changes tries metadata from database. fn write_tries_meta( - tx: &mut DBTransaction, + tx: &mut Transaction, meta_column: u32, meta: &ChangesTriesMeta, ) { - tx.put(meta_column, meta_keys::CHANGES_TRIES_META, &meta.encode()); + tx.set_from_vec(meta_column, meta_keys::CHANGES_TRIES_META, meta.encode()); } #[cfg(test)] @@ -707,7 +707,7 @@ mod tests { let finalize_block = |number| { let header = backend.blockchain().header(BlockId::Number(number)).unwrap().unwrap(); - let mut tx = DBTransaction::new(); + let mut tx = Transaction::new(); let cache_ops = backend.changes_tries_storage.finalize( &mut tx, *header.parent_hash(), @@ -716,7 +716,7 @@ mod tests { None, None, ).unwrap(); - backend.storage.db.write(tx).unwrap(); + backend.storage.db.commit(tx); backend.changes_tries_storage.post_commit(Some(cache_ops)); }; diff --git a/client/db/src/children.rs b/client/db/src/children.rs index 2ef67de6a83e1..3916321f17286 100644 --- a/client/db/src/children.rs +++ b/client/db/src/children.rs @@ -16,23 +16,21 @@ //! Functionality for reading and storing children hashes from db. -use kvdb::{KeyValueDB, DBTransaction}; use codec::{Encode, Decode}; use sp_blockchain; use std::hash::Hash; +use sp_database::{Database, Transaction}; +use crate::DbHash; /// Returns the hashes of the children blocks of the block with `parent_hash`. pub fn read_children< K: Eq + Hash + Clone + Encode + Decode, V: Eq + Hash + Clone + Encode + Decode, ->(db: &dyn KeyValueDB, column: u32, prefix: &[u8], parent_hash: K) -> sp_blockchain::Result> { +>(db: &dyn Database, column: u32, prefix: &[u8], parent_hash: K) -> sp_blockchain::Result> { let mut buf = prefix.to_vec(); parent_hash.using_encoded(|s| buf.extend(s)); - let raw_val_opt = match db.get(column, &buf[..]) { - Ok(raw_val_opt) => raw_val_opt, - Err(_) => return Err(sp_blockchain::Error::Backend("Error reading value from database".into())), - }; + let raw_val_opt = db.get(column, &buf[..]); let raw_val = match raw_val_opt { Some(val) => val, @@ -53,7 +51,7 @@ pub fn write_children< K: Eq + Hash + Clone + Encode + Decode, V: Eq + Hash + Clone + Encode + Decode, >( - tx: &mut DBTransaction, + tx: &mut Transaction, column: u32, prefix: &[u8], parent_hash: K, @@ -61,34 +59,35 @@ pub fn write_children< ) { let mut key = prefix.to_vec(); parent_hash.using_encoded(|s| key.extend(s)); - tx.put_vec(column, &key[..], children_hashes.encode()); + tx.set_from_vec(column, &key[..], children_hashes.encode()); } /// Prepare transaction to remove the children of `parent_hash`. pub fn remove_children< K: Eq + Hash + Clone + Encode + Decode, >( - tx: &mut DBTransaction, + tx: &mut Transaction, column: u32, prefix: &[u8], parent_hash: K, ) { let mut key = prefix.to_vec(); parent_hash.using_encoded(|s| key.extend(s)); - tx.delete(column, &key[..]); + tx.remove(column, &key); } #[cfg(test)] mod tests { use super::*; + use std::sync::Arc; #[test] fn children_write_read_remove() { const PREFIX: &[u8] = b"children"; - let db = ::kvdb_memorydb::create(1); + let db = Arc::new(sp_database::MemDb::default()); - let mut tx = DBTransaction::new(); + let mut tx = Transaction::new(); let mut children1 = Vec::new(); children1.push(1_3); @@ -100,19 +99,19 @@ mod tests { children2.push(1_6); write_children(&mut tx, 0, PREFIX, 1_2, children2); - db.write(tx.clone()).expect("(2) Committing transaction failed"); + db.commit(tx.clone()); - let r1: Vec = read_children(&db, 0, PREFIX, 1_1).expect("(1) Getting r1 failed"); - let r2: Vec = read_children(&db, 0, PREFIX, 1_2).expect("(1) Getting r2 failed"); + let r1: Vec = read_children(&*db, 0, PREFIX, 1_1).expect("(1) Getting r1 failed"); + let r2: Vec = read_children(&*db, 0, PREFIX, 1_2).expect("(1) Getting r2 failed"); assert_eq!(r1, vec![1_3, 1_5]); assert_eq!(r2, vec![1_4, 1_6]); remove_children(&mut tx, 0, PREFIX, 1_2); - db.write(tx).expect("(2) Committing transaction failed"); + db.commit(tx); - let r1: Vec = read_children(&db, 0, PREFIX, 1_1).expect("(2) Getting r1 failed"); - let r2: Vec = read_children(&db, 0, PREFIX, 1_2).expect("(2) Getting r2 failed"); + let r1: Vec = read_children(&*db, 0, PREFIX, 1_1).expect("(2) Getting r1 failed"); + let r2: Vec = read_children(&*db, 0, PREFIX, 1_2).expect("(2) Getting r2 failed"); assert_eq!(r1, vec![1_3, 1_5]); assert_eq!(r2.len(), 0); diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 70f666aebf0e5..782e0f6db2a62 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! Client backend that uses RocksDB database as storage. +//! Client backend that is backed by a database. //! //! # Canonicality vs. Finality //! @@ -40,9 +40,13 @@ mod storage_cache; mod upgrade; mod utils; mod stats; +#[cfg(feature = "parity-db")] +mod parity_db; +#[cfg(feature = "subdb")] +mod subdb; use std::sync::Arc; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::io; use std::collections::HashMap; @@ -57,8 +61,8 @@ use sp_blockchain::{ }; use codec::{Decode, Encode}; use hash_db::Prefix; -use kvdb::{KeyValueDB, DBTransaction}; use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; +use sp_database::Transaction; use parking_lot::RwLock; use sp_core::{ChangesTrieConfiguration, traits::CodeExecutor}; use sp_core::storage::{well_known_keys, ChildInfo}; @@ -75,7 +79,7 @@ use sp_state_machine::{ StorageCollection, ChildStorageCollection, backend::Backend as StateBackend, StateMachineStats, }; -use crate::utils::{DatabaseType, Meta, db_err, meta_keys, read_db, read_meta}; +use crate::utils::{DatabaseType, Meta, meta_keys, read_db, read_meta}; use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}; use sc_client::leaves::{LeafSet, FinalizationDisplaced}; use sc_state_db::StateDb; @@ -83,15 +87,15 @@ use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache}; use crate::storage_cache::{CachingState, SyncingCachingState, SharedCache, new_shared_cache}; use crate::stats::StateUsageStats; use log::{trace, debug, warn}; -pub use sc_state_db::PruningMode; use prometheus_endpoint::Registry; +// Re-export the Database trait so that one can pass an implementation of it. +pub use sp_database::Database; +pub use sc_state_db::PruningMode; + #[cfg(any(feature = "kvdb-rocksdb", test))] pub use bench::BenchmarkingState; -#[cfg(feature = "test-helpers")] -use sc_client::in_mem::Backend as InMemoryBackend; - const CANONICALIZATION_DELAY: u64 = 4096; const MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR: u32 = 32768; @@ -103,8 +107,8 @@ pub type DbState = sp_state_machine::TrieBackend< Arc>>, HashFor >; -/// Re-export the KVDB trait so that one can pass an implementation of it. -pub use kvdb; +/// Hash type that this backend uses for the database. +pub type DbHash = [u8; 32]; /// A reference tracking state. /// @@ -279,17 +283,42 @@ pub struct DatabaseSettings { } /// Where to find the database.. +#[derive(Clone)] pub enum DatabaseSettingsSrc { - /// Load a database from a given path. Recommended for most uses. - Path { + /// Load a RocksDB database from a given path. Recommended for most uses. + RocksDb { /// Path to the database. path: PathBuf, /// Cache size in MiB. cache_size: usize, }, + /// Load a ParityDb database from a given path. + ParityDb { + /// Path to the database. + path: PathBuf, + }, + + /// Load a Subdb database from a given path. + SubDb { + /// Path to the database. + path: PathBuf, + }, + /// Use a custom already-open database. - Custom(Arc), + Custom(Arc>), +} + +impl DatabaseSettingsSrc { + /// Return dabase path for databases that are on the disk. + pub fn path(&self) -> Option<&Path> { + match self { + DatabaseSettingsSrc::RocksDb { path, .. } => Some(path.as_path()), + DatabaseSettingsSrc::ParityDb { path, .. } => Some(path.as_path()), + DatabaseSettingsSrc::SubDb { path, .. } => Some(path.as_path()), + DatabaseSettingsSrc::Custom(_) => None, + } + } } /// Create an instance of db-backed client. @@ -357,26 +386,26 @@ struct PendingBlock { } // wrapper that implements trait required for state_db -struct StateMetaDb<'a>(&'a dyn KeyValueDB); +struct StateMetaDb<'a>(&'a dyn Database); impl<'a> sc_state_db::MetaDb for StateMetaDb<'a> { type Error = io::Error; fn get_meta(&self, key: &[u8]) -> Result>, Self::Error> { - self.0.get(columns::STATE_META, key).map(|r| r.map(|v| v.to_vec())) + Ok(self.0.get(columns::STATE_META, key)) } } /// Block database pub struct BlockchainDb { - db: Arc, + db: Arc>, meta: Arc, Block::Hash>>>, leaves: RwLock>>, header_metadata_cache: HeaderMetadataCache, } impl BlockchainDb { - fn new(db: Arc) -> ClientResult { + fn new(db: Arc>) -> ClientResult { let meta = read_meta::(&*db, columns::HEADER)?; let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?; Ok(BlockchainDb { @@ -547,11 +576,11 @@ pub struct BlockImportOperation { } impl BlockImportOperation { - fn apply_aux(&mut self, transaction: &mut DBTransaction) { + fn apply_aux(&mut self, transaction: &mut Transaction) { for (key, maybe_val) in self.aux_ops.drain(..) { match maybe_val { - Some(val) => transaction.put_vec(columns::AUX, &key, val), - None => transaction.delete(columns::AUX, &key), + Some(val) => transaction.set_from_vec(columns::AUX, &key, val), + None => transaction.remove(columns::AUX, &key), } } } @@ -676,7 +705,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc } struct StorageDb { - pub db: Arc, + pub db: Arc>, pub state_db: StateDb>, } @@ -693,7 +722,7 @@ impl sc_state_db::NodeDb for StorageDb { type Key = [u8]; fn get(&self, key: &[u8]) -> Result>, Self::Error> { - self.db.get(columns::STATE, key).map(|r| r.map(|v| v.to_vec())) + Ok(self.db.get(columns::STATE, key)) } } @@ -776,13 +805,14 @@ impl Backend { /// The pruning window is how old a block must be before the state is pruned. pub fn new(config: DatabaseSettings, canonicalization_delay: u64) -> ClientResult { let db = crate::utils::open_database::(&config, DatabaseType::Full)?; - Self::from_kvdb(db as Arc<_>, canonicalization_delay, &config) + Self::from_database(db as Arc<_>, canonicalization_delay, &config) } /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test(keep_blocks: u32, canonicalization_delay: u64) -> Self { - let db = Arc::new(kvdb_memorydb::create(crate::utils::NUM_COLUMNS)); + let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); + let db = sp_database::as_database(db); let db_setting = DatabaseSettings { state_cache_size: 16777216, state_cache_child_ratio: Some((50, 100)), @@ -793,8 +823,8 @@ impl Backend { Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") } - fn from_kvdb( - db: Arc, + fn from_database( + db: Arc>, canonicalization_delay: u64, config: &DatabaseSettings, ) -> ClientResult { @@ -843,61 +873,6 @@ impl Backend { }) } - /// Returns in-memory blockchain that contains the same set of blocks as self. - #[cfg(feature = "test-helpers")] - pub fn as_in_memory(&self) -> InMemoryBackend { - use sc_client_api::backend::{Backend as ClientBackend, BlockImportOperation}; - use sc_client::blockchain::Backend as BlockchainBackend; - - let inmem = InMemoryBackend::::new(); - - // get all headers hashes && sort them by number (could be duplicate) - let mut headers: Vec<(NumberFor, Block::Hash, Block::Header)> = Vec::new(); - for (_, header) in self.blockchain.db.iter(columns::HEADER) { - let header = Block::Header::decode(&mut &header[..]).unwrap(); - let hash = header.hash(); - let number = *header.number(); - let pos = headers.binary_search_by(|item| item.0.cmp(&number)); - match pos { - Ok(pos) => headers.insert(pos, (number, hash, header)), - Err(pos) => headers.insert(pos, (number, hash, header)), - } - } - - // insert all other headers + bodies + justifications - let info = self.blockchain.info(); - for (number, hash, header) in headers { - let id = BlockId::Hash(hash); - let justification = self.blockchain.justification(id).unwrap(); - let body = self.blockchain.body(id).unwrap(); - let state = self.state_at(id).unwrap().pairs(); - - let new_block_state = if number.is_zero() { - NewBlockState::Final - } else if hash == info.best_hash { - NewBlockState::Best - } else { - NewBlockState::Normal - }; - let mut op = inmem.begin_operation().unwrap(); - op.set_block_data(header, body, justification, new_block_state).unwrap(); - op.update_db_storage(vec![(None, state.into_iter().map(|(k, v)| (k, Some(v))).collect())]) - .unwrap(); - inmem.commit_operation(op).unwrap(); - } - - // and now finalize the best block we have - inmem.finalize_block(BlockId::Hash(info.finalized_hash), None).unwrap(); - - inmem - } - - /// Returns total number of blocks (headers) in the block DB. - #[cfg(feature = "test-helpers")] - pub fn blocks_count(&self) -> u64 { - self.blockchain.db.iter(columns::HEADER).count() as u64 - } - /// Handle setting head within a transaction. `route_to` should be the last /// block that existed in the database. `best_to` should be the best block /// to be set. @@ -907,7 +882,7 @@ impl Backend { /// to be best, `route_to` should equal to `best_to`. fn set_head_with_transaction( &self, - transaction: &mut DBTransaction, + transaction: &mut Transaction, route_to: Block::Hash, best_to: (NumberFor, Block::Hash), ) -> ClientResult<(Vec, Vec)> { @@ -957,7 +932,7 @@ impl Backend { } let lookup_key = utils::number_and_hash_to_lookup_key(best_to.0, &best_to.1)?; - transaction.put(columns::META, meta_keys::BEST_BLOCK, &lookup_key); + transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, lookup_key); utils::insert_number_to_key_mapping( transaction, columns::KEY_LOOKUP, @@ -984,7 +959,7 @@ impl Backend { fn finalize_block_with_transaction( &self, - transaction: &mut DBTransaction, + transaction: &mut Transaction, hash: &Block::Hash, header: &Block::Header, last_finalized: Option, @@ -1005,10 +980,10 @@ impl Backend { )?; if let Some(justification) = justification { - transaction.put( + transaction.set_from_vec( columns::JUSTIFICATION, &utils::number_and_hash_to_lookup_key(number, hash)?, - &justification.encode(), + justification.encode(), ); } Ok((*hash, number, false, true)) @@ -1017,7 +992,7 @@ impl Backend { // performs forced canonicalization with a delay after importing a non-finalized block. fn force_delayed_canonicalize( &self, - transaction: &mut DBTransaction, + transaction: &mut Transaction, hash: Block::Hash, number: NumberFor, ) @@ -1051,7 +1026,7 @@ impl Backend { fn try_commit_operation(&self, mut operation: BlockImportOperation) -> ClientResult<()> { - let mut transaction = DBTransaction::new(); + let mut transaction = Transaction::new(); let mut finalization_displaced_leaves = None; operation.apply_aux(&mut transaction); @@ -1103,17 +1078,17 @@ impl Backend { header_metadata, ); - transaction.put(columns::HEADER, &lookup_key, &pending_block.header.encode()); + transaction.set_from_vec(columns::HEADER, &lookup_key, pending_block.header.encode()); if let Some(body) = &pending_block.body { - transaction.put(columns::BODY, &lookup_key, &body.encode()); + transaction.set_from_vec(columns::BODY, &lookup_key, body.encode()); } if let Some(justification) = pending_block.justification { - transaction.put(columns::JUSTIFICATION, &lookup_key, &justification.encode()); + transaction.set_from_vec(columns::JUSTIFICATION, &lookup_key, justification.encode()); } if number.is_zero() { - transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key); - transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); + transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); + transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); // for tests, because config is set from within the reset_storage if operation.changes_trie_config_update.is_none() { @@ -1260,31 +1235,17 @@ impl Backend { None }; - let write_result = self.storage.db.write(transaction).map_err(db_err); + self.storage.db.commit(transaction); if let Some(( number, hash, enacted, retracted, - displaced_leaf, + _displaced_leaf, is_best, mut cache, )) = imported { - if let Err(e) = write_result { - let mut leaves = self.blockchain.leaves.write(); - let mut undo = leaves.undo(); - if let Some(displaced_leaf) = displaced_leaf { - undo.undo_import(displaced_leaf); - } - - if let Some(finalization_displaced) = finalization_displaced_leaves { - undo.undo_finalization(finalization_displaced); - } - - return Err(e) - } - cache.sync_cache( &enacted, &retracted, @@ -1317,7 +1278,7 @@ impl Backend { // was not a child of the last finalized block. fn note_finalized( &self, - transaction: &mut DBTransaction, + transaction: &mut Transaction, is_inserted: bool, f_header: &Block::Header, f_hash: Block::Hash, @@ -1328,7 +1289,7 @@ impl Backend { if self.storage.state_db.best_canonical().map(|c| f_num.saturated_into::() > c).unwrap_or(true) { let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash.clone())?; - transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key); + transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); let commit = self.storage.state_db.canonicalize_block(&f_hash) .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; @@ -1357,18 +1318,18 @@ impl Backend { } } -fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::CommitSet>) { +fn apply_state_commit(transaction: &mut Transaction, commit: sc_state_db::CommitSet>) { for (key, val) in commit.data.inserted.into_iter() { - transaction.put(columns::STATE, &key[..], &val); + transaction.set_from_vec(columns::STATE, &key[..], val); } for key in commit.data.deleted.into_iter() { - transaction.delete(columns::STATE, &key[..]); + transaction.remove(columns::STATE, &key[..]); } for (key, val) in commit.meta.inserted.into_iter() { - transaction.put(columns::STATE_META, &key[..], &val); + transaction.set_from_vec(columns::STATE_META, &key[..], val); } for key in commit.meta.deleted.into_iter() { - transaction.delete(columns::STATE_META, &key[..]); + transaction.remove(columns::STATE_META, &key[..]); } } @@ -1380,19 +1341,19 @@ impl sc_client_api::backend::AuxStore for Backend where Block: Blo I: IntoIterator, D: IntoIterator, >(&self, insert: I, delete: D) -> ClientResult<()> { - let mut transaction = DBTransaction::new(); + let mut transaction = Transaction::new(); for (k, v) in insert { - transaction.put(columns::AUX, k, v); + transaction.set(columns::AUX, k, v); } for k in delete { - transaction.delete(columns::AUX, k); + transaction.remove(columns::AUX, k); } - self.storage.db.write(transaction).map_err(db_err)?; + self.storage.db.commit(transaction); Ok(()) } fn get_aux(&self, key: &[u8]) -> ClientResult>> { - Ok(self.storage.db.get(columns::AUX, key).map(|r| r.map(|v| v.to_vec())).map_err(db_err)?) + Ok(self.storage.db.get(columns::AUX, key)) } } @@ -1453,36 +1414,24 @@ impl sc_client_api::backend::Backend for Backend { fn finalize_block(&self, block: BlockId, justification: Option) -> ClientResult<()> { - let mut transaction = DBTransaction::new(); + let mut transaction = Transaction::new(); let hash = self.blockchain.expect_block_hash_from_id(&block)?; let header = self.blockchain.expect_header(block)?; let mut displaced = None; - let commit = |displaced| { - let mut changes_trie_cache_ops = None; - let (hash, number, is_best, is_finalized) = self.finalize_block_with_transaction( - &mut transaction, - &hash, - &header, - None, - justification, - &mut changes_trie_cache_ops, - displaced, - )?; - self.storage.db.write(transaction).map_err(db_err)?; - self.blockchain.update_meta(hash, number, is_best, is_finalized); - self.changes_tries_storage.post_commit(changes_trie_cache_ops); - Ok(()) - }; - match commit(&mut displaced) { - Ok(()) => self.storage.state_db.apply_pending(), - e @ Err(_) => { - self.storage.state_db.revert_pending(); - if let Some(displaced) = displaced { - self.blockchain.leaves.write().undo().undo_finalization(displaced); - } - return e; - } - } + + let mut changes_trie_cache_ops = None; + let (hash, number, is_best, is_finalized) = self.finalize_block_with_transaction( + &mut transaction, + &hash, + &header, + None, + justification, + &mut changes_trie_cache_ops, + &mut displaced, + )?; + self.storage.db.commit(transaction); + self.blockchain.update_meta(hash, number, is_best, is_finalized); + self.changes_tries_storage.post_commit(changes_trie_cache_ops); Ok(()) } @@ -1497,11 +1446,12 @@ impl sc_client_api::backend::Backend for Backend { fn usage_info(&self) -> Option { let (io_stats, state_stats) = self.io_stats.take_or_else(|| ( - self.storage.db.io_stats(kvdb::IoStatsKind::SincePrevious), + // TODO: implement DB stats and cache size retrieval + kvdb::IoStats::empty(), self.state_usage.take(), ) ); - let database_cache = MemorySize::from_bytes(parity_util_mem::malloc_size(&*self.storage.db)); + let database_cache = MemorySize::from_bytes(0); let state_cache = MemorySize::from_bytes( (*&self.shared_cache).lock().used_storage_cache_size(), ); @@ -1547,7 +1497,7 @@ impl sc_client_api::backend::Backend for Backend { if best_number.is_zero() { return Ok(c.saturated_into::>()) } - let mut transaction = DBTransaction::new(); + let mut transaction = Transaction::new(); match self.storage.state_db.revert_one() { Some(commit) => { apply_state_commit(&mut transaction, commit); @@ -1571,13 +1521,13 @@ impl sc_client_api::backend::Backend for Backend { removed_number, ), )?; - transaction.put(columns::META, meta_keys::BEST_BLOCK, &key); if update_finalized { - transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &key); + transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, key.clone()); } - transaction.delete(columns::KEY_LOOKUP, removed.hash().as_ref()); + transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, key); + transaction.remove(columns::KEY_LOOKUP, removed.hash().as_ref()); children::remove_children(&mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, best_hash); - self.storage.db.write(transaction).map_err(db_err)?; + self.storage.db.commit(transaction); self.changes_tries_storage.post_commit(Some(changes_trie_cache_ops)); self.blockchain.update_meta(best_hash, best_number, true, update_finalized); } @@ -1591,12 +1541,12 @@ impl sc_client_api::backend::Backend for Backend { let reverted = revert_blocks()?; let revert_leaves = || -> ClientResult<()> { - let mut transaction = DBTransaction::new(); + let mut transaction = Transaction::new(); let mut leaves = self.blockchain.leaves.write(); leaves.revert(best_hash, best_number); leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); - self.storage.db.write(transaction).map_err(db_err)?; + self.storage.db.commit(transaction); Ok(()) }; @@ -1959,7 +1909,7 @@ pub(crate) mod tests { assert_eq!(backend.storage.db.get( columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).unwrap().unwrap(), &b"hello"[..]); + ).unwrap(), &b"hello"[..]); hash }; @@ -1996,7 +1946,7 @@ pub(crate) mod tests { assert_eq!(backend.storage.db.get( columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).unwrap().unwrap(), &b"hello"[..]); + ).unwrap(), &b"hello"[..]); hash }; @@ -2034,7 +1984,7 @@ pub(crate) mod tests { assert!(backend.storage.db.get( columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).unwrap().is_some()); + ).is_some()); hash }; @@ -2068,7 +2018,7 @@ pub(crate) mod tests { assert!(backend.storage.db.get( columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).unwrap().is_none()); + ).is_none()); } backend.finalize_block(BlockId::Number(1), None).unwrap(); @@ -2077,7 +2027,7 @@ pub(crate) mod tests { assert!(backend.storage.db.get( columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).unwrap().is_none()); + ).is_none()); } #[test] diff --git a/client/db/src/light.rs b/client/db/src/light.rs index e3dcdedd5096d..c87388a9546fc 100644 --- a/client/db/src/light.rs +++ b/client/db/src/light.rs @@ -20,8 +20,6 @@ use std::{sync::Arc, collections::HashMap}; use std::convert::TryInto; use parking_lot::RwLock; -use kvdb::{KeyValueDB, DBTransaction}; - use sc_client_api::{backend::{AuxStore, NewBlockState}, UsageInfo}; use sc_client::blockchain::{ BlockStatus, Cache as BlockchainCache,Info as BlockchainInfo, @@ -33,13 +31,14 @@ use sp_blockchain::{ HeaderBackend as BlockchainHeaderBackend, well_known_cache_keys, }; +use sp_database::{Database, Transaction}; use sc_client::light::blockchain::Storage as LightBlockchainStorage; use codec::{Decode, Encode}; use sp_runtime::generic::{DigestItem, BlockId}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, One, NumberFor, HashFor}; use crate::cache::{DbCacheSync, DbCache, ComplexBlockId, EntryType as CacheEntryType}; -use crate::utils::{self, meta_keys, DatabaseType, Meta, db_err, read_db, block_id_to_lookup_key, read_meta}; -use crate::{DatabaseSettings, FrozenForDuration}; +use crate::utils::{self, meta_keys, DatabaseType, Meta, read_db, block_id_to_lookup_key, read_meta}; +use crate::{DatabaseSettings, FrozenForDuration, DbHash}; use log::{trace, warn, debug}; pub(crate) mod columns { @@ -59,7 +58,7 @@ const CHANGES_TRIE_CHT_PREFIX: u8 = 1; /// Light blockchain storage. Stores most recent headers + CHTs for older headers. /// Locks order: meta, cache. pub struct LightStorage { - db: Arc, + db: Arc>, meta: RwLock, Block::Hash>>, cache: Arc>, header_metadata_cache: HeaderMetadataCache, @@ -78,14 +77,11 @@ impl LightStorage { /// Create new memory-backed `LightStorage` for tests. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test() -> Self { - use utils::NUM_COLUMNS; - - let db = Arc::new(::kvdb_memorydb::create(NUM_COLUMNS)); - + let db = Arc::new(sp_database::MemDb::default()); Self::from_kvdb(db as Arc<_>).expect("failed to create test-db") } - fn from_kvdb(db: Arc) -> ClientResult { + fn from_kvdb(db: Arc>) -> ClientResult { let meta = read_meta::(&*db, columns::HEADER)?; let cache = DbCache::new( db.clone(), @@ -230,7 +226,7 @@ impl LightStorage { /// to be best, `route_to` should equal to `best_to`. fn set_head_with_transaction( &self, - transaction: &mut DBTransaction, + transaction: &mut Transaction, route_to: Block::Hash, best_to: (NumberFor, Block::Hash), ) -> ClientResult<()> { @@ -266,7 +262,7 @@ impl LightStorage { } } - transaction.put(columns::META, meta_keys::BEST_BLOCK, &lookup_key); + transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, lookup_key); utils::insert_number_to_key_mapping( transaction, columns::KEY_LOOKUP, @@ -280,7 +276,7 @@ impl LightStorage { // Note that a block is finalized. Only call with child of last finalized block. fn note_finalized( &self, - transaction: &mut DBTransaction, + transaction: &mut Transaction, header: &Block::Header, hash: Block::Hash, ) -> ClientResult<()> { @@ -293,7 +289,7 @@ impl LightStorage { } let lookup_key = utils::number_and_hash_to_lookup_key(header.number().clone(), hash)?; - transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key); + transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); // build new CHT(s) if required if let Some(new_cht_number) = cht::is_build_required(cht::size(), *header.number()) { @@ -309,7 +305,7 @@ impl LightStorage { let new_header_cht_root = cht::compute_root::, _>( cht::size(), new_cht_number, cht_range.map(|num| self.hash(num)) )?; - transaction.put( + transaction.set( columns::CHT, &cht_key(HEADER_CHT_PREFIX, new_cht_start)?, new_header_cht_root.as_ref() @@ -327,7 +323,7 @@ impl LightStorage { cht::size(), new_cht_number, cht_range .map(|num| self.changes_trie_root(BlockId::Number(num))) )?; - transaction.put( + transaction.set( columns::CHT, &cht_key(CHANGES_TRIE_CHT_PREFIX, new_cht_start)?, new_changes_trie_cht_root.as_ref() @@ -350,7 +346,7 @@ impl LightStorage { prune_block, hash )?; - transaction.delete(columns::HEADER, &lookup_key); + transaction.remove(columns::HEADER, &lookup_key); } prune_block += One::one(); } @@ -377,7 +373,7 @@ impl LightStorage { } let cht_start = cht::start_number(cht_size, cht_number); - self.db.get(columns::CHT, &cht_key(cht_type, cht_start)?).map_err(db_err)? + self.db.get(columns::CHT, &cht_key(cht_type, cht_start)?) .ok_or_else(no_cht_for_block) .and_then(|hash| Block::Hash::decode(&mut &*hash).map_err(|_| no_cht_for_block())) .map(Some) @@ -394,18 +390,19 @@ impl AuxStore for LightStorage I: IntoIterator, D: IntoIterator, >(&self, insert: I, delete: D) -> ClientResult<()> { - let mut transaction = DBTransaction::new(); + let mut transaction = Transaction::new(); for (k, v) in insert { - transaction.put(columns::AUX, k, v); + transaction.set(columns::AUX, k, v); } for k in delete { - transaction.delete(columns::AUX, k); + transaction.remove(columns::AUX, k); } - self.db.write(transaction).map_err(db_err) + self.db.commit(transaction); + Ok(()) } fn get_aux(&self, key: &[u8]) -> ClientResult>> { - self.db.get(columns::AUX, key).map(|r| r.map(|v| v.to_vec())).map_err(db_err) + Ok(self.db.get(columns::AUX, key)) } } @@ -419,7 +416,7 @@ impl LightBlockchainStorage for LightStorage leaf_state: NewBlockState, aux_ops: Vec<(Vec, Option>)>, ) -> ClientResult<()> { - let mut transaction = DBTransaction::new(); + let mut transaction = Transaction::new(); let hash = header.hash(); let number = *header.number(); @@ -427,8 +424,8 @@ impl LightBlockchainStorage for LightStorage for (key, maybe_val) in aux_ops { match maybe_val { - Some(val) => transaction.put_vec(columns::AUX, &key, val), - None => transaction.delete(columns::AUX, &key), + Some(val) => transaction.set_from_vec(columns::AUX, &key, val), + None => transaction.remove(columns::AUX, &key), } } @@ -445,7 +442,7 @@ impl LightBlockchainStorage for LightStorage number, hash, )?; - transaction.put(columns::HEADER, &lookup_key, &header.encode()); + transaction.set_from_vec(columns::HEADER, &lookup_key, header.encode()); let header_metadata = CachedHeaderMetadata::from(&header); self.header_metadata_cache.insert_header_metadata( @@ -456,7 +453,7 @@ impl LightBlockchainStorage for LightStorage let is_genesis = number.is_zero(); if is_genesis { self.cache.0.write().set_genesis_hash(hash); - transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); + transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); } let finalized = match leaf_state { @@ -493,7 +490,7 @@ impl LightBlockchainStorage for LightStorage debug!("Light DB Commit {:?} ({})", hash, number); - self.db.write(transaction).map_err(db_err)?; + self.db.commit(transaction); cache.commit(cache_ops) .expect("only fails if cache with given name isn't loaded yet;\ cache is already loaded because there are cache_ops; qed"); @@ -509,9 +506,9 @@ impl LightBlockchainStorage for LightStorage let hash = header.hash(); let number = header.number(); - let mut transaction = DBTransaction::new(); + let mut transaction = Transaction::new(); self.set_head_with_transaction(&mut transaction, hash.clone(), (number.clone(), hash.clone()))?; - self.db.write(transaction).map_err(db_err)?; + self.db.commit(transaction); self.update_meta(hash, header.number().clone(), true, false); Ok(()) } else { @@ -537,7 +534,7 @@ impl LightBlockchainStorage for LightStorage fn finalize_header(&self, id: BlockId) -> ClientResult<()> { if let Some(header) = self.header(id)? { - let mut transaction = DBTransaction::new(); + let mut transaction = Transaction::new(); let hash = header.hash(); let number = *header.number(); self.note_finalized(&mut transaction, &header, hash.clone())?; @@ -550,7 +547,7 @@ impl LightBlockchainStorage for LightStorage )? .into_ops(); - self.db.write(transaction).map_err(db_err)?; + self.db.commit(transaction); cache.commit(cache_ops) .expect("only fails if cache with given name isn't loaded yet;\ cache is already loaded because there are cache_ops; qed"); @@ -575,8 +572,9 @@ impl LightBlockchainStorage for LightStorage fn usage_info(&self) -> Option { use sc_client_api::{MemoryInfo, IoInfo, MemorySize}; - let database_cache = MemorySize::from_bytes(parity_util_mem::malloc_size(&*self.db)); - let io_stats = self.io_stats.take_or_else(|| self.db.io_stats(kvdb::IoStatsKind::SincePrevious)); + // TODO: reimplement IO stats + let database_cache = MemorySize::from_bytes(0); + let io_stats = self.io_stats.take_or_else(|| kvdb::IoStats::empty()); Some(UsageInfo { memory: MemoryInfo { @@ -732,21 +730,25 @@ pub(crate) mod tests { #[test] fn import_header_works() { - let db = LightStorage::new_test(); + let raw_db = Arc::new(sp_database::MemDb::default()); + let db = LightStorage::from_kvdb(raw_db.clone()).unwrap(); let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(db.db.iter(columns::HEADER).count(), 1); - assert_eq!(db.db.iter(columns::KEY_LOOKUP).count(), 2); + assert_eq!(raw_db.count(columns::HEADER), 1); + assert_eq!(raw_db.count(columns::KEY_LOOKUP), 2); let _ = insert_block(&db, HashMap::new(), || default_header(&genesis_hash, 1)); - assert_eq!(db.db.iter(columns::HEADER).count(), 2); - assert_eq!(db.db.iter(columns::KEY_LOOKUP).count(), 4); + assert_eq!(raw_db.count(columns::HEADER), 2); + assert_eq!(raw_db.count(columns::KEY_LOOKUP), 4); } #[test] fn finalized_ancient_headers_are_replaced_with_cht() { - fn insert_headers Header>(header_producer: F) -> LightStorage { - let db = LightStorage::new_test(); + fn insert_headers Header>(header_producer: F) -> + (Arc>, LightStorage) + { + let raw_db = Arc::new(sp_database::MemDb::default()); + let db = LightStorage::from_kvdb(raw_db.clone()).unwrap(); let cht_size: u64 = cht::size(); let ucht_size: usize = cht_size as _; @@ -758,8 +760,8 @@ pub(crate) mod tests { for number in 0..cht::size() { prev_hash = insert_block(&db, HashMap::new(), || header_producer(&prev_hash, 1 + number)); } - assert_eq!(db.db.iter(columns::HEADER).count(), 1 + ucht_size); - assert_eq!(db.db.iter(columns::CHT).count(), 0); + assert_eq!(raw_db.count(columns::HEADER), 1 + ucht_size); + assert_eq!(raw_db.count(columns::CHT), 0); // insert next SIZE blocks && ensure that nothing is pruned for number in 0..(cht_size as _) { @@ -769,8 +771,8 @@ pub(crate) mod tests { || header_producer(&prev_hash, 1 + cht_size + number), ); } - assert_eq!(db.db.iter(columns::HEADER).count(), 1 + ucht_size + ucht_size); - assert_eq!(db.db.iter(columns::CHT).count(), 0); + assert_eq!(raw_db.count(columns::HEADER), 1 + ucht_size + ucht_size); + assert_eq!(raw_db.count(columns::CHT), 0); // insert block #{2 * cht::size() + 1} && check that new CHT is created + headers of this CHT are pruned // nothing is yet finalized, so nothing is pruned. @@ -779,23 +781,23 @@ pub(crate) mod tests { HashMap::new(), || header_producer(&prev_hash, 1 + cht_size + cht_size), ); - assert_eq!(db.db.iter(columns::HEADER).count(), 2 + ucht_size + ucht_size); - assert_eq!(db.db.iter(columns::CHT).count(), 0); + assert_eq!(raw_db.count(columns::HEADER), 2 + ucht_size + ucht_size); + assert_eq!(raw_db.count(columns::CHT), 0); // now finalize the block. for i in (0..(ucht_size + ucht_size)).map(|i| i + 1) { db.finalize_header(BlockId::Number(i as _)).unwrap(); } db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); - db + (raw_db, db) } // when headers are created without changes tries roots - let db = insert_headers(default_header); + let (raw_db, db) = insert_headers(default_header); let cht_size: u64 = cht::size(); - assert_eq!(db.db.iter(columns::HEADER).count(), (1 + cht_size + 1) as usize); - assert_eq!(db.db.iter(columns::KEY_LOOKUP).count(), (2 * (1 + cht_size + 1)) as usize); - assert_eq!(db.db.iter(columns::CHT).count(), 1); + assert_eq!(raw_db.count(columns::HEADER), (1 + cht_size + 1) as usize); + assert_eq!(raw_db.count(columns::KEY_LOOKUP), (2 * (1 + cht_size + 1)) as usize); + assert_eq!(raw_db.count(columns::CHT), 1); assert!((0..cht_size as _).all(|i| db.header(BlockId::Number(1 + i)).unwrap().is_none())); assert!(db.header_cht_root(cht_size, cht_size / 2).unwrap().is_some()); assert!(db.header_cht_root(cht_size, cht_size + cht_size / 2).unwrap().is_none()); @@ -803,9 +805,9 @@ pub(crate) mod tests { assert!(db.changes_trie_cht_root(cht_size, cht_size + cht_size / 2).unwrap().is_none()); // when headers are created with changes tries roots - let db = insert_headers(header_with_changes_trie); - assert_eq!(db.db.iter(columns::HEADER).count(), (1 + cht_size + 1) as usize); - assert_eq!(db.db.iter(columns::CHT).count(), 2); + let (raw_db, db) = insert_headers(header_with_changes_trie); + assert_eq!(raw_db.count(columns::HEADER), (1 + cht_size + 1) as usize); + assert_eq!(raw_db.count(columns::CHT), 2); assert!((0..cht_size as _).all(|i| db.header(BlockId::Number(1 + i)).unwrap().is_none())); assert!(db.header_cht_root(cht_size, cht_size / 2).unwrap().is_some()); assert!(db.header_cht_root(cht_size, cht_size + cht_size / 2).unwrap().is_none()); diff --git a/client/db/src/offchain.rs b/client/db/src/offchain.rs index 3d0f8c6795a94..8c58d5f42c391 100644 --- a/client/db/src/offchain.rs +++ b/client/db/src/offchain.rs @@ -21,14 +21,13 @@ use std::{ sync::Arc, }; -use crate::columns; -use kvdb::KeyValueDB; +use crate::{columns, Database, DbHash, Transaction}; use parking_lot::Mutex; /// Offchain local storage #[derive(Clone)] pub struct LocalStorage { - db: Arc, + db: Arc>, locks: Arc, Arc>>>>, } @@ -43,12 +42,13 @@ impl LocalStorage { /// Create new offchain storage for tests (backed by memorydb) #[cfg(any(test, feature = "test-helpers"))] pub fn new_test() -> Self { - let db = Arc::new(kvdb_memorydb::create(crate::utils::NUM_COLUMNS)); + let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); + let db = sp_database::as_database(db); Self::new(db as _) } /// Create offchain local storage with given `KeyValueDB` backend. - pub fn new(db: Arc) -> Self { + pub fn new(db: Arc>) -> Self { Self { db, locks: Default::default(), @@ -59,20 +59,15 @@ impl LocalStorage { impl sp_core::offchain::OffchainStorage for LocalStorage { fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]) { let key: Vec = prefix.iter().chain(key).cloned().collect(); - let mut tx = self.db.transaction(); - tx.put(columns::OFFCHAIN, &key, value); + let mut tx = Transaction::new(); + tx.set(columns::OFFCHAIN, &key, value); - if let Err(e) = self.db.write(tx) { - log::warn!("Error writing to the offchain DB: {:?}", e); - } + self.db.commit(tx); } fn get(&self, prefix: &[u8], key: &[u8]) -> Option> { let key: Vec = prefix.iter().chain(key).cloned().collect(); self.db.get(columns::OFFCHAIN, &key) - .ok() - .and_then(|x| x) - .map(|v| v.to_vec()) } fn compare_and_set( @@ -91,9 +86,7 @@ impl sp_core::offchain::OffchainStorage for LocalStorage { let is_set; { let _key_guard = key_lock.lock(); - let val = self.db.get(columns::OFFCHAIN, &key) - .ok() - .and_then(|x| x); + let val = self.db.get(columns::OFFCHAIN, &key); is_set = val.as_ref().map(|x| &**x) == old_value; if is_set { diff --git a/client/db/src/parity_db.rs b/client/db/src/parity_db.rs new file mode 100644 index 0000000000000..a4e64d310b877 --- /dev/null +++ b/client/db/src/parity_db.rs @@ -0,0 +1,56 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +/// A `Database` adapter for parity-db. + +use sp_database::{Database, Change, Transaction, ColumnId}; + +struct DbAdapter(parity_db::Db); + +fn handle_err(result: parity_db::Result) -> T { + match result { + Ok(r) => r, + Err(e) => { + panic!("Critical database eror: {:?}", e); + } + } +} + +/// Wrap RocksDb database into a trait object that implements `sp_database::Database` +pub fn open(path: &std::path::Path, num_columns: u32) -> parity_db::Result>> { + let db = parity_db::Db::with_columns(path, num_columns as u8)?; + Ok(std::sync::Arc::new(DbAdapter(db))) +} + +impl Database for DbAdapter { + fn commit(&self, transaction: Transaction) { + handle_err(self.0.commit(transaction.0.into_iter().map(|change| + match change { + Change::Set(col, key, value) => (col as u8, key, Some(value)), + Change::Remove(col, key) => (col as u8, key, None), + _ => unimplemented!(), + })) + ); + } + + fn get(&self, col: ColumnId, key: &[u8]) -> Option> { + handle_err(self.0.get(col as u8, key)) + } + + fn lookup(&self, _hash: &H) -> Option> { + unimplemented!(); + } +} diff --git a/client/db/src/subdb.rs b/client/db/src/subdb.rs new file mode 100644 index 0000000000000..2e436aa2c92c8 --- /dev/null +++ b/client/db/src/subdb.rs @@ -0,0 +1,87 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +/// A `Database` adapter for subdb. + +use sp_database::{self, ColumnId}; +use parking_lot::RwLock; +use blake2_rfc::blake2b::blake2b; +use codec::Encode; +use subdb::{Database, KeyType}; + +/// A database hidden behind an RwLock, so that it implements Send + Sync. +/// +/// Construct by creating a `Database` and then using `.into()`. +pub struct DbAdapter(RwLock>); + +/// Wrap RocksDb database into a trait object that implements `sp_database::Database` +pub fn open( + path: &std::path::Path, + _num_columns: u32, +) -> Result>, subdb::Error> { + let db = subdb::Options::from_path(path.into()).open()?; + Ok(std::sync::Arc::new(DbAdapter(RwLock::new(db)))) +} + +impl sp_database::Database for DbAdapter { + fn get(&self, col: ColumnId, key: &[u8]) -> Option> { + let mut hash = H::default(); + (col, key).using_encoded(|d| + hash.as_mut().copy_from_slice(blake2b(32, &[], d).as_bytes()) + ); + self.0.read().get(&hash) + } + + fn with_get(&self, col: ColumnId, key: &[u8], f: &mut dyn FnMut(&[u8])) { + let mut hash = H::default(); + (col, key).using_encoded(|d| + hash.as_mut().copy_from_slice(blake2b(32, &[], d).as_bytes()) + ); + let _ = self.0.read().get_ref(&hash).map(|d| f(d.as_ref())); + } + + fn set(&self, col: ColumnId, key: &[u8], value: &[u8]) { + let mut hash = H::default(); + (col, key).using_encoded(|d| + hash.as_mut().copy_from_slice(blake2b(32, &[], d).as_bytes()) + ); + self.0.write().insert(&value, &hash); + } + + fn remove(&self, col: ColumnId, key: &[u8]) { + let mut hash = H::default(); + (col, key).using_encoded(|d| + hash.as_mut().copy_from_slice(blake2b(32, &[], d).as_bytes()) + ); + let _ = self.0.write().remove(&hash); + } + + fn lookup(&self, hash: &H) -> Option> { + self.0.read().get(hash) + } + + fn with_lookup(&self, hash: &H, f: &mut dyn FnMut(&[u8])) { + let _ = self.0.read().get_ref(hash).map(|d| f(d.as_ref())); + } + + fn store(&self, hash: &H, preimage: &[u8]) { + self.0.write().insert(preimage, hash); + } + + fn release(&self, hash: &H) { + let _ = self.0.write().remove(hash); + } +} diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 5e6794108ecfe..95592d071f777 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -19,18 +19,9 @@ use std::fs; use std::io::{Read, Write, ErrorKind}; use std::path::{Path, PathBuf}; -use std::sync::Arc; -use codec::Encode; -use kvdb_rocksdb::{Database, DatabaseConfig}; -use parking_lot::RwLock; -use sp_blockchain::{well_known_cache_keys, Cache}; -use sp_core::ChangesTrieConfiguration; use sp_runtime::traits::Block as BlockT; -use crate::{ - cache::{ComplexBlockId, DbCache, DbCacheSync}, - utils::{DatabaseType, check_database_type, db_err, read_genesis_hash}, -}; +use crate::utils::DatabaseType; /// Version file name. const VERSION_FILE_NAME: &'static str = "db_version"; @@ -38,69 +29,21 @@ const VERSION_FILE_NAME: &'static str = "db_version"; /// Current db version. const CURRENT_VERSION: u32 = 1; -/// Number of columns in v0. -const V0_NUM_COLUMNS: u32 = 10; - /// Upgrade database to current version. -pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> { - let db_version = current_version(db_path)?; - match db_version { - 0 => migrate_0_to_1::(db_path, db_type)?, - 1 => (), - _ => Err(sp_blockchain::Error::Backend(format!("Future database version: {}", db_version)))?, +pub fn upgrade_db(db_path: &Path, _db_type: DatabaseType) -> sp_blockchain::Result<()> { + let is_empty = db_path.read_dir().map_or(true, |mut d| d.next().is_none()); + if !is_empty { + let db_version = current_version(db_path)?; + match db_version { + 0 => Err(sp_blockchain::Error::Backend(format!("Unsupported database version: {}", db_version)))?, + 1 => (), + _ => Err(sp_blockchain::Error::Backend(format!("Future database version: {}", db_version)))?, + } } update_version(db_path) } -/// Migration from version0 to version1: -/// 1) the number of columns has changed from 10 to 11; -/// 2) changes tries configuration are now cached. -fn migrate_0_to_1(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> { - { - let db = open_database(db_path, db_type, V0_NUM_COLUMNS)?; - db.add_column().map_err(db_err)?; - db.flush().map_err(db_err)?; - } - - let db = open_database(db_path, db_type, V0_NUM_COLUMNS + 1)?; - - const V0_FULL_KEY_LOOKUP_COLUMN: u32 = 3; - const V0_FULL_HEADER_COLUMN: u32 = 4; - const V0_FULL_CACHE_COLUMN: u32 = 10; // that's the column we have just added - const V0_LIGHT_KEY_LOOKUP_COLUMN: u32 = 1; - const V0_LIGHT_HEADER_COLUMN: u32 = 2; - const V0_LIGHT_CACHE_COLUMN: u32 = 3; - - let (key_lookup_column, header_column, cache_column) = match db_type { - DatabaseType::Full => ( - V0_FULL_KEY_LOOKUP_COLUMN, - V0_FULL_HEADER_COLUMN, - V0_FULL_CACHE_COLUMN, - ), - DatabaseType::Light => ( - V0_LIGHT_KEY_LOOKUP_COLUMN, - V0_LIGHT_HEADER_COLUMN, - V0_LIGHT_CACHE_COLUMN, - ), - }; - - let genesis_hash: Option = read_genesis_hash(&db)?; - if let Some(genesis_hash) = genesis_hash { - let cache: DbCacheSync = DbCacheSync(RwLock::new(DbCache::new( - Arc::new(db), - key_lookup_column, - header_column, - cache_column, - genesis_hash, - ComplexBlockId::new(genesis_hash, 0.into()), - ))); - let changes_trie_config: Option = None; - cache.initialize(&well_known_cache_keys::CHANGES_TRIE_CONFIG, changes_trie_config.encode())?; - } - - Ok(()) -} /// Reads current database version from the file at given path. /// If the file does not exist returns 0. @@ -118,14 +61,9 @@ fn current_version(path: &Path) -> sp_blockchain::Result { } } -/// Opens database of given type with given number of columns. -fn open_database(db_path: &Path, db_type: DatabaseType, db_columns: u32) -> sp_blockchain::Result { - let db_path = db_path.to_str() - .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; - let db_cfg = DatabaseConfig::with_columns(db_columns); - let db = Database::open(&db_cfg, db_path).map_err(db_err)?; - check_database_type(&db, db_type)?; - Ok(db) +/// Maps database error to client error +fn db_err(err: std::io::Error) -> sp_blockchain::Error { + sp_blockchain::Error::Backend(format!("{}", err)) } /// Writes current database version to the file. @@ -152,8 +90,6 @@ mod tests { use super::*; fn create_db(db_path: &Path, version: Option) { - let db_cfg = DatabaseConfig::with_columns(V0_NUM_COLUMNS); - Database::open(&db_cfg, db_path.to_str().unwrap()).unwrap(); if let Some(version) = version { fs::create_dir_all(db_path).unwrap(); let mut file = fs::File::create(version_file_path(db_path)).unwrap(); @@ -166,7 +102,7 @@ mod tests { state_cache_size: 0, state_cache_child_ratio: None, pruning: PruningMode::ArchiveAll, - source: DatabaseSettingsSrc::Path { path: db_path.to_owned(), cache_size: 128 }, + source: DatabaseSettingsSrc::RocksDb { path: db_path.to_owned(), cache_size: 128 }, }, DatabaseType::Full).map(|_| ()) } @@ -184,15 +120,4 @@ mod tests { open_database(db_dir.path()).unwrap(); assert_eq!(current_version(db_dir.path()).unwrap(), CURRENT_VERSION); } - - #[test] - fn upgrade_from_0_to_1_works() { - for version_from_file in &[None, Some(0)] { - let db_dir = tempfile::TempDir::new().unwrap(); - let db_path = db_dir.path(); - create_db(db_path, *version_from_file); - open_database(db_path).unwrap(); - assert_eq!(current_version(db_path).unwrap(), CURRENT_VERSION); - } - } } diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index 16239a82c2e24..9506dc4e7fab0 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -18,21 +18,19 @@ //! full and light storages. use std::sync::Arc; -use std::{io, convert::TryInto}; +use std::convert::TryInto; -use kvdb::{KeyValueDB, DBTransaction}; -#[cfg(any(feature = "kvdb-rocksdb", test))] -use kvdb_rocksdb::{Database, DatabaseConfig}; use log::debug; use codec::Decode; use sp_trie::DBValue; +use sp_database::Transaction; use sp_runtime::generic::BlockId; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, Zero, UniqueSaturatedFrom, UniqueSaturatedInto, }; -use crate::{DatabaseSettings, DatabaseSettingsSrc}; +use crate::{DatabaseSettings, DatabaseSettingsSrc, Database, DbHash}; /// Number of columns in the db. Must be the same for both full && light dbs. /// Otherwise RocksDb will fail to open database && check its type. @@ -136,35 +134,35 @@ pub fn lookup_key_to_number(key: &[u8]) -> sp_blockchain::Result where /// Delete number to hash mapping in DB transaction. pub fn remove_number_to_key_mapping>( - transaction: &mut DBTransaction, + transaction: &mut Transaction, key_lookup_col: u32, number: N, ) -> sp_blockchain::Result<()> { - transaction.delete(key_lookup_col, number_index_key(number)?.as_ref()); + transaction.remove(key_lookup_col, number_index_key(number)?.as_ref()); Ok(()) } /// Remove key mappings. pub fn remove_key_mappings, H: AsRef<[u8]>>( - transaction: &mut DBTransaction, + transaction: &mut Transaction, key_lookup_col: u32, number: N, hash: H, ) -> sp_blockchain::Result<()> { remove_number_to_key_mapping(transaction, key_lookup_col, number)?; - transaction.delete(key_lookup_col, hash.as_ref()); + transaction.remove(key_lookup_col, hash.as_ref()); Ok(()) } /// Place a number mapping into the database. This maps number to current perceived /// block hash at that position. pub fn insert_number_to_key_mapping + Clone, H: AsRef<[u8]>>( - transaction: &mut DBTransaction, + transaction: &mut Transaction, key_lookup_col: u32, number: N, hash: H, ) -> sp_blockchain::Result<()> { - transaction.put_vec( + transaction.set_from_vec( key_lookup_col, number_index_key(number.clone())?.as_ref(), number_and_hash_to_lookup_key(number, hash)?, @@ -174,12 +172,12 @@ pub fn insert_number_to_key_mapping + Clone, H: AsRef<[u8]>>( /// Insert a hash to key mapping in the database. pub fn insert_hash_to_key_mapping, H: AsRef<[u8]> + Clone>( - transaction: &mut DBTransaction, + transaction: &mut Transaction, key_lookup_col: u32, number: N, hash: H, ) -> sp_blockchain::Result<()> { - transaction.put_vec( + transaction.set_from_vec( key_lookup_col, hash.clone().as_ref(), number_and_hash_to_lookup_key(number, hash)?, @@ -191,42 +189,35 @@ pub fn insert_hash_to_key_mapping, H: AsRef<[u8]> + Clone>( /// block lookup key is the DB-key header, block and justification are stored under. /// looks up lookup key by hash from DB as necessary. pub fn block_id_to_lookup_key( - db: &dyn KeyValueDB, + db: &dyn Database, key_lookup_col: u32, id: BlockId ) -> Result>, sp_blockchain::Error> where Block: BlockT, ::sp_runtime::traits::NumberFor: UniqueSaturatedFrom + UniqueSaturatedInto, { - let res = match id { + Ok(match id { BlockId::Number(n) => db.get( key_lookup_col, number_index_key(n)?.as_ref(), ), - BlockId::Hash(h) => db.get(key_lookup_col, h.as_ref()), - }; - - res.map_err(db_err) -} - -/// Maps database error to client error -pub fn db_err(err: io::Error) -> sp_blockchain::Error { - sp_blockchain::Error::Backend(format!("{}", err)) + BlockId::Hash(h) => db.get(key_lookup_col, h.as_ref()) + }) } -/// Open RocksDB database. +/// Opens the configured database. pub fn open_database( config: &DatabaseSettings, db_type: DatabaseType, -) -> sp_blockchain::Result> { - let db: Arc = match &config.source { +) -> sp_blockchain::Result>> { + let db: Arc> = match &config.source { #[cfg(any(feature = "kvdb-rocksdb", test))] - DatabaseSettingsSrc::Path { path, cache_size } => { + DatabaseSettingsSrc::RocksDb { path, cache_size } => { // first upgrade database to required version crate::upgrade::upgrade_db::(&path, db_type)?; // and now open database assuming that it has the latest version - let mut db_config = DatabaseConfig::with_columns(NUM_COLUMNS); + let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(NUM_COLUMNS); let state_col_budget = (*cache_size as f64 * 0.9) as usize; let other_col_budget = (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1); let mut memory_budget = std::collections::HashMap::new(); @@ -245,21 +236,32 @@ pub fn open_database( log::trace!( target: "db", - "Open database at {}, state column budget: {} MiB, others({}) column cache: {} MiB", + "Open RocksDB database at {}, state column budget: {} MiB, others({}) column cache: {} MiB", path, state_col_budget, NUM_COLUMNS, other_col_budget, ); - Arc::new(Database::open(&db_config, &path).map_err(db_err)?) + let db = kvdb_rocksdb::Database::open(&db_config, &path) + .map_err(|err| sp_blockchain::Error::Backend(format!("{}", err)))?; + sp_database::as_database(db) }, - #[cfg(not(any(feature = "kvdb-rocksdb", test)))] - DatabaseSettingsSrc::Path { .. } => { - let msg = "Try to open RocksDB database with RocksDB disabled".into(); - return Err(sp_blockchain::Error::Backend(msg)); + #[cfg(feature = "subdb")] + DatabaseSettingsSrc::SubDb { path } => { + crate::subdb::open(&path, NUM_COLUMNS) + .map_err(|e| sp_blockchain::Error::Backend(format!("{:?}", e)))? + }, + #[cfg(feature = "parity-db")] + DatabaseSettingsSrc::ParityDb { path } => { + crate::parity_db::open(&path, NUM_COLUMNS) + .map_err(|e| sp_blockchain::Error::Backend(format!("{:?}", e)))? }, DatabaseSettingsSrc::Custom(db) => db.clone(), + _ => { + let msg = "Trying to open a unsupported database".into(); + return Err(sp_blockchain::Error::Backend(msg)); + }, }; check_database_type(&*db, db_type)?; @@ -268,8 +270,8 @@ pub fn open_database( } /// Check database type. -pub fn check_database_type(db: &dyn KeyValueDB, db_type: DatabaseType) -> sp_blockchain::Result<()> { - match db.get(COLUMN_META, meta_keys::TYPE).map_err(db_err)? { +pub fn check_database_type(db: &dyn Database, db_type: DatabaseType) -> sp_blockchain::Result<()> { + match db.get(COLUMN_META, meta_keys::TYPE) { Some(stored_type) => { if db_type.as_str().as_bytes() != &*stored_type { return Err(sp_blockchain::Error::Backend( @@ -277,9 +279,9 @@ pub fn check_database_type(db: &dyn KeyValueDB, db_type: DatabaseType) -> sp_blo } }, None => { - let mut transaction = DBTransaction::new(); - transaction.put(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes()); - db.write(transaction).map_err(db_err)?; + let mut transaction = Transaction::new(); + transaction.set(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes()); + db.commit(transaction) }, } @@ -288,7 +290,7 @@ pub fn check_database_type(db: &dyn KeyValueDB, db_type: DatabaseType) -> sp_blo /// Read database column entry for the given block. pub fn read_db( - db: &dyn KeyValueDB, + db: &dyn Database, col_index: u32, col: u32, id: BlockId @@ -297,14 +299,14 @@ pub fn read_db( Block: BlockT, { block_id_to_lookup_key(db, col_index, id).and_then(|key| match key { - Some(key) => db.get(col, key.as_ref()).map_err(db_err), + Some(key) => Ok(db.get(col, key.as_ref())), None => Ok(None), }) } /// Read a header from the database. pub fn read_header( - db: &dyn KeyValueDB, + db: &dyn Database, col_index: u32, col: u32, id: BlockId, @@ -322,7 +324,7 @@ pub fn read_header( /// Required header from the database. pub fn require_header( - db: &dyn KeyValueDB, + db: &dyn Database, col_index: u32, col: u32, id: BlockId, @@ -334,7 +336,7 @@ pub fn require_header( } /// Read meta from the database. -pub fn read_meta(db: &dyn KeyValueDB, col_header: u32) -> Result< +pub fn read_meta(db: &dyn Database, col_header: u32) -> Result< Meta<<::Header as HeaderT>::Number, Block::Hash>, sp_blockchain::Error, > @@ -353,11 +355,10 @@ pub fn read_meta(db: &dyn KeyValueDB, col_header: u32) -> Result< }; let load_meta_block = |desc, key| -> Result<_, sp_blockchain::Error> { - if let Some(Some(header)) = db.get(COLUMN_META, key).and_then(|id| - match id { - Some(id) => db.get(col_header, &id).map(|h| h.map(|b| Block::Header::decode(&mut &b[..]).ok())), - None => Ok(None), - }).map_err(db_err)? + if let Some(Some(header)) = match db.get(COLUMN_META, key) { + Some(id) => db.get(col_header, &id).map(|b| Block::Header::decode(&mut &b[..]).ok()), + None => None, + } { let hash = header.hash(); debug!("DB Opened blockchain db, fetched {} = {:?} ({})", desc, hash, header.number()); @@ -380,8 +381,8 @@ pub fn read_meta(db: &dyn KeyValueDB, col_header: u32) -> Result< } /// Read genesis hash from database. -pub fn read_genesis_hash(db: &dyn KeyValueDB) -> sp_blockchain::Result> { - match db.get(COLUMN_META, meta_keys::GENESIS_HASH).map_err(db_err)? { +pub fn read_genesis_hash(db: &dyn Database) -> sp_blockchain::Result> { + match db.get(COLUMN_META, meta_keys::GENESIS_HASH) { Some(h) => match Decode::decode(&mut &h[..]) { Ok(h) => Ok(Some(h)), Err(err) => Err(sp_blockchain::Error::Backend( diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index fec4f1317b2dd..ae129871db95e 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -32,6 +32,7 @@ use sp_blockchain::{ use sc_client_api::{BlockchainEvents, BlockImportNotification, FinalityNotifications, ImportNotifications, FinalityNotification, backend::{TransactionFor, AuxStore, Backend, Finalizer}, BlockBackend}; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_client::LongestChain; +use sc_client::blockchain::HeaderBackend; use sc_network::config::Role; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; use sp_consensus::import_queue::{ @@ -359,13 +360,9 @@ impl Peer { /// Test helper to compare the blockchain state of multiple (networked) /// clients. - /// Potentially costly, as it creates in-memory copies of both blockchains in order - /// to compare them. If you have easier/softer checks that are sufficient, e.g. - /// by using .info(), you should probably use it instead of this. pub fn blockchain_canon_equals(&self, other: &Self) -> bool { if let (Some(mine), Some(others)) = (self.backend.clone(), other.backend.clone()) { - mine.as_in_memory().blockchain() - .canon_equals_to(others.as_in_memory().blockchain()) + mine.blockchain().info().best_hash == others.blockchain().info().best_hash } else { false } @@ -374,7 +371,7 @@ impl Peer { /// Count the total number of imported blocks. pub fn blocks_count(&self) -> u64 { self.backend.as_ref().map( - |backend| backend.blocks_count() + |backend| backend.blockchain().info().best_number ).unwrap_or(0) } @@ -382,6 +379,12 @@ impl Peer { pub fn failed_verifications(&self) -> HashMap<::Hash, String> { self.verifier.failed_verifications.lock().clone() } + + pub fn has_block(&self, hash: &H256) -> bool { + self.backend.as_ref().map( + |backend| backend.blockchain().header(BlockId::hash(*hash)).unwrap().is_some() + ).unwrap_or(false) + } } /// Implements `BlockImport` for any `Transaction`. Internally the transaction is diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 8acf265e91892..60e9e558c5fff 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -339,13 +339,15 @@ fn syncs_all_forks() { net.peer(0).push_blocks(2, false); net.peer(1).push_blocks(2, false); - net.peer(0).push_blocks(2, true); - net.peer(1).push_blocks(4, false); + let b1 = net.peer(0).push_blocks(2, true); + let b2 = net.peer(1).push_blocks(4, false); net.block_until_sync(); - // Check that all peers have all of the blocks. - assert_eq!(9, net.peer(0).blocks_count()); - assert_eq!(9, net.peer(1).blocks_count()); + // Check that all peers have all of the branches. + assert!(net.peer(0).has_block(&b1)); + assert!(net.peer(0).has_block(&b2)); + assert!(net.peer(1).has_block(&b1)); + assert!(net.peer(1).has_block(&b2)); } #[test] @@ -587,24 +589,11 @@ fn syncs_header_only_forks() { net.peer(0).push_blocks(2, true); let small_hash = net.peer(0).client().info().best_hash; - let small_number = net.peer(0).client().info().best_number; net.peer(1).push_blocks(4, false); net.block_until_sync(); // Peer 1 will sync the small fork even though common block state is missing - assert_eq!(9, net.peer(0).blocks_count()); - assert_eq!(9, net.peer(1).blocks_count()); - - // Request explicit header-only sync request for the ancient fork. - let first_peer_id = net.peer(0).id(); - net.peer(1).set_sync_fork_request(vec![first_peer_id], small_hash, small_number); - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none() { - return Poll::Pending - } - Poll::Ready(()) - })); + assert!(net.peer(1).has_block(&small_hash)); } #[test] diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 20b5c12a585f7..ba55c68b349fa 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -12,10 +12,10 @@ description = "Substrate service. Starts a thread that spins up the network, cli targets = ["x86_64-unknown-linux-gnu"] [features] -default = ["rocksdb"] +default = ["db"] # The RocksDB feature activates the RocksDB database backend. If it is not activated, and you pass # a path to a database, an error will be produced at runtime. -rocksdb = ["sc-client-db/kvdb-rocksdb"] +db = ["sc-client-db/kvdb-rocksdb", "sc-client-db/parity-db"] wasmtime = [ "sc-executor/wasmtime", ] diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index acb546fc6052b..4f370c1118fb2 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -17,7 +17,7 @@ use crate::{Service, NetworkStatus, NetworkState, error::Error, DEFAULT_PROTOCOL_ID, MallocSizeOfWasm}; use crate::{TaskManagerBuilder, start_rpc_servers, build_network_future, TransactionPoolAdapter}; use crate::status_sinks; -use crate::config::{Configuration, DatabaseConfig, KeystoreConfig, PrometheusConfig}; +use crate::config::{Configuration, KeystoreConfig, PrometheusConfig}; use crate::metrics::MetricsService; use sc_client_api::{ self, @@ -196,15 +196,7 @@ fn new_full_parts( state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), pruning: config.pruning.clone(), - source: match &config.database { - DatabaseConfig::Path { path, cache_size } => - sc_client_db::DatabaseSettingsSrc::Path { - path: path.clone(), - cache_size: *cache_size, - }, - DatabaseConfig::Custom(db) => - sc_client_db::DatabaseSettingsSrc::Custom(db.clone()), - }, + source: config.database.clone(), }; let extensions = sc_client_api::execution_extensions::ExecutionExtensions::new( @@ -308,15 +300,7 @@ impl ServiceBuilder<(), (), (), (), (), (), (), (), (), (), ()> { state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), pruning: config.pruning.clone(), - source: match &config.database { - DatabaseConfig::Path { path, cache_size } => - sc_client_db::DatabaseSettingsSrc::Path { - path: path.clone(), - cache_size: *cache_size, - }, - DatabaseConfig::Custom(db) => - sc_client_db::DatabaseSettingsSrc::Custom(db.clone()), - }, + source: config.database.clone(), }; sc_client_db::light::LightStorage::new(db_settings)? }; diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 0515a31c7c656..b90bed723f057 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -17,7 +17,7 @@ //! Service configuration. pub use sc_client::ExecutionStrategies; -pub use sc_client_db::{kvdb::KeyValueDB, PruningMode}; +pub use sc_client_db::{Database, PruningMode, DatabaseSettingsSrc as DatabaseConfig}; pub use sc_network::Multiaddr; pub use sc_network::config::{ExtTransport, MultiaddrWithPeerId, NetworkConfiguration, Role, NodeKeyConfig}; pub use sc_executor::WasmExecutionMethod; @@ -124,21 +124,6 @@ impl KeystoreConfig { } } -/// Configuration of the database of the client. -#[derive(Clone)] -pub enum DatabaseConfig { - /// Database file at a specific path. Recommended for most uses. - Path { - /// Path to the database. - path: PathBuf, - /// Cache Size for internal database in MiB - cache_size: usize, - }, - - /// A custom implementation of an already-open database. - Custom(Arc), -} - /// Configuration of the Prometheus endpoint. #[derive(Clone)] pub struct PrometheusConfig { diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 2811076ba38b7..a53201465880c 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -175,7 +175,7 @@ fn node_config { hash: H, @@ -59,7 +61,7 @@ impl FinalizationDisplaced { #[derive(Debug, Clone, PartialEq, Eq)] pub struct LeafSet { storage: BTreeMap, Vec>, - pending_added: Vec>, + pending_added: Vec<(H, N)>, pending_removed: Vec, } @@ -77,21 +79,20 @@ impl LeafSet where } /// Read the leaf list from the DB, using given prefix for keys. - pub fn read_from_db(db: &dyn KeyValueDB, column: u32, prefix: &[u8]) -> Result { + pub fn read_from_db(db: &dyn Database, column: u32, prefix: &[u8]) -> Result { let mut storage = BTreeMap::new(); - for (key, value) in db.iter_from_prefix(column, prefix) { - if !key.starts_with(prefix) { break } - let raw_hash = &mut &key[prefix.len()..]; - let hash = match Decode::decode(raw_hash) { - Ok(hash) => hash, - Err(_) => return Err(Error::Backend("Error decoding hash".into())), - }; - let number = match Decode::decode(&mut &value[..]) { - Ok(number) => number, - Err(_) => return Err(Error::Backend("Error decoding number".into())), - }; - storage.entry(Reverse(number)).or_insert_with(Vec::new).push(hash); + match db.get(column, prefix) { + Some(leaves) => { + let vals: Vec<_> = match Decode::decode(&mut leaves.as_ref()) { + Ok(vals) => vals, + Err(_) => return Err(Error::Backend("Error decoding leaves".into())), + }; + for (number, hashes) in vals.into_iter() { + storage.insert(Reverse(number), hashes); + } + } + None => {}, } Ok(Self { storage, @@ -124,7 +125,7 @@ impl LeafSet where }; self.insert_leaf(Reverse(number.clone()), hash.clone()); - self.pending_added.push(LeafSetItem { hash, number: Reverse(number) }); + self.pending_added.push((hash, number)); displaced } @@ -185,7 +186,7 @@ impl LeafSet where // this is an invariant of regular block import. if !leaves_contains_best { self.insert_leaf(best_number.clone(), best_hash.clone()); - self.pending_added.push(LeafSetItem { hash: best_hash, number: best_number }); + self.pending_added.push((best_hash, best_number.0)); } } @@ -201,18 +202,11 @@ impl LeafSet where } /// Write the leaf list to the database transaction. - pub fn prepare_transaction(&mut self, tx: &mut DBTransaction, column: u32, prefix: &[u8]) { - let mut buf = prefix.to_vec(); - for LeafSetItem { hash, number } in self.pending_added.drain(..) { - hash.using_encoded(|s| buf.extend(s)); - tx.put_vec(column, &buf[..], number.0.encode()); - buf.truncate(prefix.len()); // reuse allocation. - } - for hash in self.pending_removed.drain(..) { - hash.using_encoded(|s| buf.extend(s)); - tx.delete(column, &buf[..]); - buf.truncate(prefix.len()); // reuse allocation. - } + pub fn prepare_transaction(&mut self, tx: &mut Transaction, column: u32, prefix: &[u8]) { + let leaves: Vec<_> = self.storage.iter().map(|(n, h)| (n.0.clone(), h.clone())).collect(); + tx.set_from_vec(column, prefix, leaves.encode()); + self.pending_added.clear(); + self.pending_removed.clear(); } #[cfg(test)] @@ -281,6 +275,7 @@ impl<'a, H: 'a, N: 'a> Drop for Undo<'a, H, N> { #[cfg(test)] mod tests { use super::*; + use std::sync::Arc; #[test] fn it_works() { @@ -305,7 +300,7 @@ mod tests { #[test] fn flush_to_disk() { const PREFIX: &[u8] = b"abcdefg"; - let db = ::kvdb_memorydb::create(1); + let db = Arc::new(sp_database::MemDb::default()); let mut set = LeafSet::new(); set.import(0u32, 0u32, 0u32); @@ -314,12 +309,12 @@ mod tests { set.import(2_1, 2, 1_1); set.import(3_1, 3, 2_1); - let mut tx = DBTransaction::new(); + let mut tx = Transaction::new(); set.prepare_transaction(&mut tx, 0, PREFIX); - db.write(tx).unwrap(); + db.commit(tx); - let set2 = LeafSet::read_from_db(&db, 0, PREFIX).unwrap(); + let set2 = LeafSet::read_from_db(&*db, 0, PREFIX).unwrap(); assert_eq!(set, set2); } @@ -339,7 +334,7 @@ mod tests { #[test] fn finalization_consistent_with_disk() { const PREFIX: &[u8] = b"prefix"; - let db = ::kvdb_memorydb::create(1); + let db = Arc::new(sp_database::MemDb::default()); let mut set = LeafSet::new(); set.import(10_1u32, 10u32, 0u32); @@ -349,21 +344,21 @@ mod tests { assert!(set.contains(10, 10_1)); - let mut tx = DBTransaction::new(); + let mut tx = Transaction::new(); set.prepare_transaction(&mut tx, 0, PREFIX); - db.write(tx).unwrap(); + db.commit(tx); let _ = set.finalize_height(11); - let mut tx = DBTransaction::new(); + let mut tx = Transaction::new(); set.prepare_transaction(&mut tx, 0, PREFIX); - db.write(tx).unwrap(); + db.commit(tx); assert!(set.contains(11, 11_1)); assert!(set.contains(11, 11_2)); assert!(set.contains(12, 12_1)); assert!(!set.contains(10, 10_1)); - let set2 = LeafSet::read_from_db(&db, 0, PREFIX).unwrap(); + let set2 = LeafSet::read_from_db(&*db, 0, PREFIX).unwrap(); assert_eq!(set, set2); } diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 7e2040ee234a7..35e1231698a46 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -208,7 +208,7 @@ pub trait Contains { /// /// **Should be used for benchmarking only!!!** #[cfg(feature = "runtime-benchmarks")] - fn add(t: &T); + fn add(t: &T) { unimplemented!() } } /// Determiner to say whether a given account is unused. diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index e92dfd8c98e89..45d627a1c2722 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -242,7 +242,7 @@ pub trait Cache: Send + Sync { } /// Blockchain info -#[derive(Debug)] +#[derive(Debug, Eq, PartialEq)] pub struct Info { /// Best block hash. pub best_hash: Block::Hash, diff --git a/primitives/database/Cargo.toml b/primitives/database/Cargo.toml new file mode 100644 index 0000000000000..2ab86de61d65f --- /dev/null +++ b/primitives/database/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "sp-database" +version = "2.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate database trait." +documentation = "https://docs.rs/sp-database" + +[dependencies] +parking_lot = "0.10.0" +kvdb = "0.5.0" diff --git a/primitives/database/src/kvdb.rs b/primitives/database/src/kvdb.rs new file mode 100644 index 0000000000000..85a324b5c105f --- /dev/null +++ b/primitives/database/src/kvdb.rs @@ -0,0 +1,59 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +/// A wrapper around `kvdb::Database` that implements `sp_database::Database` trait + +use ::kvdb::{DBTransaction, KeyValueDB}; + +use crate::{Database, Change, Transaction, ColumnId}; + +struct DbAdapter(D); + +fn handle_err(result: std::io::Result) -> T { + match result { + Ok(r) => r, + Err(e) => { + panic!("Critical database eror: {:?}", e); + } + } +} + +/// Wrap RocksDb database into a trait object that implements `sp_database::Database` +pub fn as_database(db: D) -> std::sync::Arc> { + std::sync::Arc::new(DbAdapter(db)) +} + +impl Database for DbAdapter { + fn commit(&self, transaction: Transaction) { + let mut tx = DBTransaction::new(); + for change in transaction.0.into_iter() { + match change { + Change::Set(col, key, value) => tx.put_vec(col, &key, value), + Change::Remove(col, key) => tx.delete(col, &key), + _ => unimplemented!(), + } + } + handle_err(self.0.write(tx)); + } + + fn get(&self, col: ColumnId, key: &[u8]) -> Option> { + handle_err(self.0.get(col, key)) + } + + fn lookup(&self, _hash: &H) -> Option> { + unimplemented!(); + } +} diff --git a/primitives/database/src/lib.rs b/primitives/database/src/lib.rs new file mode 100644 index 0000000000000..bd9bd2eb54c28 --- /dev/null +++ b/primitives/database/src/lib.rs @@ -0,0 +1,187 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! The main database trait, allowing Substrate to store data persistently. + +mod mem; +mod kvdb; + +pub use mem::MemDb; +pub use crate::kvdb::as_database; + +/// An identifier for a column. +pub type ColumnId = u32; + +/// An alteration to the database. +#[derive(Clone)] +pub enum Change { + Set(ColumnId, Vec, Vec), + Remove(ColumnId, Vec), + Store(H, Vec), + Release(H), +} + +/// An alteration to the database that references the data. +pub enum ChangeRef<'a, H> { + Set(ColumnId, &'a [u8], &'a [u8]), + Remove(ColumnId, &'a [u8]), + Store(H, &'a [u8]), + Release(H), +} + +/// A series of changes to the database that can be committed atomically. They do not take effect +/// until passed into `Database::commit`. +#[derive(Default, Clone)] +pub struct Transaction(pub Vec>); + +impl Transaction { + /// Create a new transaction to be prepared and committed atomically. + pub fn new() -> Self { + Transaction(Vec::new()) + } + /// Set the value of `key` in `col` to `value`, replacing anything that is there currently. + pub fn set(&mut self, col: ColumnId, key: &[u8], value: &[u8]) { + self.0.push(Change::Set(col, key.to_vec(), value.to_vec())) + } + /// Set the value of `key` in `col` to `value`, replacing anything that is there currently. + pub fn set_from_vec(&mut self, col: ColumnId, key: &[u8], value: Vec) { + self.0.push(Change::Set(col, key.to_vec(), value)) + } + /// Remove the value of `key` in `col`. + pub fn remove(&mut self, col: ColumnId, key: &[u8]) { + self.0.push(Change::Remove(col, key.to_vec())) + } + /// Store the `preimage` of `hash` into the database, so that it may be looked up later with + /// `Database::lookup`. This may be called multiple times, but `Database::lookup` but subsequent + /// calls will ignore `preimage` and simply increase the number of references on `hash`. + pub fn store(&mut self, hash: H, preimage: &[u8]) { + self.0.push(Change::Store(hash, preimage.to_vec())) + } + /// Release the preimage of `hash` from the database. An equal number of these to the number of + /// corresponding `store`s must have been given before it is legal for `Database::lookup` to + /// be unable to provide the preimage. + pub fn release(&mut self, hash: H) { + self.0.push(Change::Release(hash)) + } +} + +pub trait Database: Send + Sync { + /// Commit the `transaction` to the database atomically. Any further calls to `get` or `lookup` + /// will reflect the new state. + fn commit(&self, transaction: Transaction) { + for change in transaction.0.into_iter() { + match change { + Change::Set(col, key, value) => self.set(col, &key, &value), + Change::Remove(col, key) => self.remove(col, &key), + Change::Store(hash, preimage) => self.store(&hash, &preimage), + Change::Release(hash) => self.release(&hash), + } + } + } + + /// Commit the `transaction` to the database atomically. Any further calls to `get` or `lookup` + /// will reflect the new state. + fn commit_ref<'a>(&self, transaction: &mut dyn Iterator>) { + let mut tx = Transaction::new(); + for change in transaction { + match change { + ChangeRef::Set(col, key, value) => tx.set(col, key, value), + ChangeRef::Remove(col, key) => tx.remove(col, key), + ChangeRef::Store(hash, preimage) => tx.store(hash, preimage), + ChangeRef::Release(hash) => tx.release(hash), + } + } + self.commit(tx); + } + + /// Retrieve the value previously stored against `key` or `None` if + /// `key` is not currently in the database. + fn get(&self, col: ColumnId, key: &[u8]) -> Option>; + + /// Call `f` with the value previously stored against `key`. + /// + /// This may be faster than `get` since it doesn't allocate. + /// Use `with_get` helper function if you need `f` to return a value from `f` + fn with_get(&self, col: ColumnId, key: &[u8], f: &mut dyn FnMut(&[u8])) { + self.get(col, key).map(|v| f(&v)); + } + + /// Set the value of `key` in `col` to `value`, replacing anything that is there currently. + fn set(&self, col: ColumnId, key: &[u8], value: &[u8]) { + let mut t = Transaction::new(); + t.set(col, key, value); + self.commit(t); + } + /// Remove the value of `key` in `col`. + fn remove(&self, col: ColumnId, key: &[u8]) { + let mut t = Transaction::new(); + t.remove(col, key); + self.commit(t); + } + + /// Retrieve the first preimage previously `store`d for `hash` or `None` if no preimage is + /// currently stored. + fn lookup(&self, hash: &H) -> Option>; + + /// Call `f` with the preimage stored for `hash` and return the result, or `None` if no preimage + /// is currently stored. + /// + /// This may be faster than `lookup` since it doesn't allocate. + /// Use `with_lookup` helper function if you need `f` to return a value from `f` + fn with_lookup(&self, hash: &H, f: &mut dyn FnMut(&[u8])) { + self.lookup(hash).map(|v| f(&v)); + } + + /// Store the `preimage` of `hash` into the database, so that it may be looked up later with + /// `Database::lookup`. This may be called multiple times, but `Database::lookup` but subsequent + /// calls will ignore `preimage` and simply increase the number of references on `hash`. + fn store(&self, hash: &H, preimage: &[u8]) { + let mut t = Transaction::new(); + t.store(hash.clone(), preimage); + self.commit(t); + } + + /// Release the preimage of `hash` from the database. An equal number of these to the number of + /// corresponding `store`s must have been given before it is legal for `Database::lookup` to + /// be unable to provide the preimage. + fn release(&self, hash: &H) { + let mut t = Transaction::new(); + t.release(hash.clone()); + self.commit(t); + } +} + +/// Call `f` with the value previously stored against `key` and return the result, or `None` if +/// `key` is not currently in the database. +/// +/// This may be faster than `get` since it doesn't allocate. +pub fn with_get(db: &dyn Database, col: ColumnId, key: &[u8], mut f: impl FnMut(&[u8]) -> R) -> Option { + let mut result: Option = None; + let mut adapter = |k: &_| { result = Some(f(k)); }; + db.with_get(col, key, &mut adapter); + result +} + +/// Call `f` with the preimage stored for `hash` and return the result, or `None` if no preimage +/// is currently stored. +/// +/// This may be faster than `lookup` since it doesn't allocate. +pub fn with_lookup(db: &dyn Database, hash: &H, mut f: impl FnMut(&[u8]) -> R) -> Option { + let mut result: Option = None; + let mut adapter = |k: &_| { result = Some(f(k)); }; + db.with_lookup(hash, &mut adapter); + result +} diff --git a/primitives/database/src/mem.rs b/primitives/database/src/mem.rs new file mode 100644 index 0000000000000..09d6149bed174 --- /dev/null +++ b/primitives/database/src/mem.rs @@ -0,0 +1,68 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! In-memory implementation of `Database` + +use std::collections::HashMap; +use crate::{Database, Transaction, ColumnId, Change}; +use parking_lot::RwLock; + +#[derive(Default)] +/// This implements `Database` as an in-memory hash map. `commit` is not atomic. +pub struct MemDb + (RwLock<(HashMap, Vec>>, HashMap>)>); + +impl Database for MemDb + where H: Clone + Send + Sync + Eq + PartialEq + Default + std::hash::Hash +{ + fn commit(&self, transaction: Transaction) { + let mut s = self.0.write(); + for change in transaction.0.into_iter() { + match change { + Change::Set(col, key, value) => { s.0.entry(col).or_default().insert(key, value); }, + Change::Remove(col, key) => { s.0.entry(col).or_default().remove(&key); }, + Change::Store(hash, preimage) => { s.1.insert(hash, preimage); }, + Change::Release(hash) => { s.1.remove(&hash); }, + } + } + } + + fn get(&self, col: ColumnId, key: &[u8]) -> Option> { + let s = self.0.read(); + s.0.get(&col).and_then(|c| c.get(key).cloned()) + } + + fn lookup(&self, hash: &H) -> Option> { + let s = self.0.read(); + s.1.get(hash).cloned() + } +} + +impl MemDb + where H: Clone + Send + Sync + Eq + PartialEq + Default + std::hash::Hash +{ + /// Create a new instance + pub fn new() -> Self { + MemDb::default() + } + + /// Count number of values in a column + pub fn count(&self, col: ColumnId) -> usize { + let s = self.0.read(); + s.0.get(&col).map(|c| c.len()).unwrap_or(0) + } +} + diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml index 4ffec41633643..29211298b731e 100644 --- a/utils/browser/Cargo.toml +++ b/utils/browser/Cargo.toml @@ -22,6 +22,7 @@ js-sys = "0.3.34" wasm-bindgen = "0.2.57" wasm-bindgen-futures = "0.4.7" kvdb-web = "0.5" +sp-database = { version = "2.0.0-dev", path = "../../primitives/database" } sc-informant = { version = "0.8.0-dev", path = "../../client/informant" } sc-service = { version = "0.8.0-dev", path = "../../client/service", default-features = false } sc-network = { path = "../../client/network", version = "0.8.0-dev"} diff --git a/utils/browser/src/lib.rs b/utils/browser/src/lib.rs index 572ebcb464cd0..35b1bc99c4501 100644 --- a/utils/browser/src/lib.rs +++ b/utils/browser/src/lib.rs @@ -70,7 +70,7 @@ where info!("Opening Indexed DB database '{}'...", name); let db = kvdb_web::Database::open(name, 10).await?; - DatabaseConfig::Custom(Arc::new(db)) + DatabaseConfig::Custom(sp_database::as_database(db)) }, keystore: KeystoreConfig::InMemory, default_heap_pages: Default::default(), diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 9366c276ed9a5..86eb20faf8075 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -26,5 +26,5 @@ structopt = "0.3.8" codec = { version = "1.3.0", package = "parity-scale-codec" } [features] -default = ["rocksdb"] -rocksdb = ["sc-client-db/kvdb-rocksdb"] +default = ["db"] +db = ["sc-client-db/kvdb-rocksdb", "sc-client-db/parity-db"]