diff --git a/CHANGELOG.md b/CHANGELOG.md index ded9c95e2d2a..7f4d998aa8f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,9 @@ ### Breaking +- [#3048](https://github.com/ChainSafe/forest/pull/3048): Remove support for + rocksdb + ### Added - [#2816](https://github.com/ChainSafe/forest/issues/2816): Support `2k` devnet. diff --git a/Cargo.lock b/Cargo.lock index dd985382aa0e..5aa9d5c61a01 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -663,26 +663,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bindgen" -version = "0.64.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4243e6031260db77ede97ad86c27e501d646a27ab57b59a574f725d98ab1fb4" -dependencies = [ - "bitflags", - "cexpr", - "clang-sys", - "lazy_static", - "lazycell", - "peeking_take_while", - "proc-macro2", - "quote", - "regex", - "rustc-hash", - "shlex", - "syn 1.0.109", -] - [[package]] name = "bitflags" version = "1.3.2" @@ -989,17 +969,6 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" -[[package]] -name = "bzip2-sys" -version = "0.1.11+1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - [[package]] name = "cached" version = "0.30.0" @@ -1054,15 +1023,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] - [[package]] name = "cfg-if" version = "0.1.10" @@ -1186,17 +1146,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "clang-sys" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" -dependencies = [ - "glob", - "libc", - "libloading", -] - [[package]] name = "clap" version = "4.3.4" @@ -2966,7 +2915,6 @@ dependencies = [ "raw_sync", "rayon", "reqwest", - "rocksdb", "rustyline", "semver", "serde", @@ -4294,12 +4242,6 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - [[package]] name = "leb128" version = "0.2.5" @@ -4407,16 +4349,6 @@ dependencies = [ "libipld-core 0.14.0", ] -[[package]] -name = "libloading" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" -dependencies = [ - "cfg-if 1.0.0", - "winapi", -] - [[package]] name = "libm" version = "0.1.4" @@ -4857,22 +4789,6 @@ dependencies = [ "yamux", ] -[[package]] -name = "librocksdb-sys" -version = "0.10.0+7.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fe4d5874f5ff2bc616e55e8c6086d478fcda13faf9495768a4aa1c22042d30b" -dependencies = [ - "bindgen", - "bzip2-sys", - "cc", - "glob", - "libc", - "libz-sys", - "lz4-sys", - "tikv-jemalloc-sys", -] - [[package]] name = "libsecp256k1" version = "0.7.1" @@ -5871,12 +5787,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "pem" version = "1.1.1" @@ -6790,16 +6700,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "rocksdb" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "015439787fce1e75d55f279078d33ff14b4af5d93d995e8838ee4631301c8a99" -dependencies = [ - "libc", - "librocksdb-sys", -] - [[package]] name = "rtcp" version = "0.7.2" @@ -7410,12 +7310,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" -[[package]] -name = "shlex" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" - [[package]] name = "signal-hook-registry" version = "1.4.1" diff --git a/Cargo.toml b/Cargo.toml index 8138aaf0a7a6..a3d77413f1a0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -129,7 +129,7 @@ num-rational = "0.4" num-traits = "0.2" num_cpus = "1.14" once_cell = "1.15" -parity-db = { version = "0.4.6", optional = true, default_features = false } +parity-db = { version = "0.4.6", default_features = false } parking_lot = "0.12" pbr = "1.1" pin-project-lite = "0.2" @@ -146,9 +146,6 @@ reqwest = { version = "0.11.18", default-features = false, features = [ "stream", "rustls-tls", ] } # use rustls instead of native (openSSL) tls to drop the number of build dependencies -rocksdb = { version = "0.20", optional = true, default_features = false, features = [ - "lz4", -] } # only opt in to the compression we use, see #2227 rustyline = "10.1.1" semver = "1.0" serde = { version = "1.0", default-features = false, features = ["derive"] } @@ -222,19 +219,15 @@ overflow-checks = true # These should be refactored (probably removed) in #2984 [features] -default = ["paritydb", "jemalloc", "fil_cns"] +default = ["jemalloc", "fil_cns"] instrumented_kernel = ["dep:stdext"] insecure_post = [] -doctest-private = [] # see lib.rs::doctest_private -benchmark-private = [] # see lib.rs::benchmark_private - -# Databases -paritydb = ["dep:parity-db"] -rocksdb = ["dep:rocksdb"] +doctest-private = [] # see lib.rs::doctest_private +benchmark-private = [] # see lib.rs::benchmark_private # Allocator rustalloc = [] -jemalloc = ["dep:tikv-jemallocator", "rocksdb?/jemalloc"] +jemalloc = ["dep:tikv-jemallocator"] mimalloc = ["dep:mimalloc"] # Consensus diff --git a/Makefile b/Makefile index e079965e5314..7744b4644a67 100644 --- a/Makefile +++ b/Makefile @@ -15,17 +15,13 @@ install-daemon: install: cargo install --locked --path . --force -# Installs Forest binaries with RocksDb backend -install-with-rocksdb: - cargo install --locked --path . --force --no-default-features --features jemalloc,rocksdb,fil_cns - # Installs Forest binaries with default rust global allocator install-with-rustalloc: - cargo install --locked --path . --force --no-default-features --features rustalloc,paritydb,fil_cns + cargo install --locked --path . --force --no-default-features --features rustalloc,fil_cns # Installs Forest binaries with MiMalloc global allocator install-with-mimalloc: - cargo install --locked --path . --force --no-default-features --features mimalloc,paritydb,fil_cns + cargo install --locked --path . --force --no-default-features --features mimalloc,fil_cns install-deps: apt-get update -y @@ -79,12 +75,9 @@ lint-clippy: cargo clippy --features=instrumented_kernel --quiet --no-deps -- --deny=warnings # different consensus algos (repeated for clarity) - cargo clippy --features=paritydb,rustalloc,fil_cns --no-default-features --quiet --no-deps -- --deny=warnings - cargo clippy --features=paritydb,rustalloc,deleg_cns --no-default-features --quiet --no-deps -- --deny=warnings + cargo clippy --features=rustalloc,fil_cns --no-default-features --quiet --no-deps -- --deny=warnings + cargo clippy --features=rustalloc,deleg_cns --no-default-features --quiet --no-deps -- --deny=warnings - # different databases (repeated for clarity) - cargo clippy --features=paritydb,rustalloc,fil_cns --no-default-features --quiet --no-deps -- --deny=warnings - cargo clippy --features=rocksdb,rustalloc,fil_cns --no-default-features --quiet --no-deps -- --deny=warnings DOCKERFILES=$(wildcard Dockerfile*) lint-docker: $(DOCKERFILES) @@ -108,10 +101,6 @@ docker-run: test: cargo nextest run - # different databases (repeated for clarity) - cargo nextest run --features=paritydb,rustalloc,fil_cns --no-default-features db - cargo nextest run --features=rocksdb,rustalloc,fil_cns --no-default-features db - # nextest doesn't run doctests https://github.com/nextest-rs/nextest/issues/16 # see also lib.rs::doctest_private cargo test --doc --features doctest-private @@ -119,10 +108,6 @@ test: test-release: cargo nextest run --release - # different databases (repeated for clarity) - cargo nextest run --release --features=paritydb,rustalloc,fil_cns --no-default-features db - cargo nextest run --release --features=rocksdb,rustalloc,fil_cns --no-default-features db - test-all: test test-release smoke-test: diff --git a/documentation/src/trouble_shooting.md b/documentation/src/trouble_shooting.md index e7f941568b74..d3731703de0f 100644 --- a/documentation/src/trouble_shooting.md +++ b/documentation/src/trouble_shooting.md @@ -2,13 +2,6 @@ ## Common Issues -#### File Descriptor Limits - -By default, Forest will use large database files (roughly 1GiB each). Lowering -the size of these files lets RocksDB use less memory but runs the risk of -hitting the open-files limit. If you do hit this limit, either increase the file -size or use `ulimit` to increase the open-files limit. - #### Jemalloc issues on Apple Silicon macs Forest is compiled with `jemalloc` as a default allocator. If you are having diff --git a/src/chain_sync/tipset_syncer.rs b/src/chain_sync/tipset_syncer.rs index d314b8712e99..cfe5c4137fda 100644 --- a/src/chain_sync/tipset_syncer.rs +++ b/src/chain_sync/tipset_syncer.rs @@ -1197,12 +1197,6 @@ async fn validate_tipset &DbConfig { - &self.rocks_db - } - } else if #[cfg(feature = "paritydb")] { - pub fn db_config(&self) -> &DbConfig { - &self.parity_db - } - } + pub fn db_config(&self) -> &DbConfig { + &self.parity_db } } @@ -145,7 +135,6 @@ mod test { #[derive(Clone, Debug)] struct ConfigPartial { client: Client, - rocks_db: crate::db::rocks_config::RocksDbConfig, parity_db: crate::db::parity_db_config::ParityDbConfig, network: crate::libp2p::Libp2pConfig, sync: crate::chain_sync::SyncConfig, @@ -155,7 +144,6 @@ mod test { fn from(val: ConfigPartial) -> Self { Config { client: val.client, - rocks_db: val.rocks_db, parity_db: val.parity_db, network: val.network, sync: val.sync, @@ -185,19 +173,6 @@ mod test { token_exp: Duration::milliseconds(i64::arbitrary(g)), show_progress_bars: ProgressBarVisibility::arbitrary(g), }, - rocks_db: crate::db::rocks_config::RocksDbConfig { - create_if_missing: bool::arbitrary(g), - parallelism: i32::arbitrary(g), - write_buffer_size: u32::arbitrary(g) as _, - max_open_files: i32::arbitrary(g), - max_background_jobs: Option::arbitrary(g), - compaction_style: String::arbitrary(g), - enable_statistics: bool::arbitrary(g), - stats_dump_period_sec: u32::arbitrary(g), - log_level: String::arbitrary(g), - optimize_filters_for_hits: bool::arbitrary(g), - optimize_for_point_lookup: i32::arbitrary(g), - }, parity_db: crate::db::parity_db_config::ParityDbConfig { enable_statistics: bool::arbitrary(g), compression_type: String::arbitrary(g), diff --git a/src/daemon/daemon.rs b/src/daemon/daemon.rs index a40003bac957..515161a3aaa5 100644 --- a/src/daemon/daemon.rs +++ b/src/daemon/daemon.rs @@ -417,12 +417,6 @@ pub(super) async fn start( } } - // For convenience, flush the database after we've potentially loaded a new - // snapshot. This ensures the snapshot won't have to be re-imported if - // Forest is interrupted. As of writing, flushing only affects RocksDB and - // is a no-op with ParityDB. - state_manager.blockstore().flush()?; - // Halt if opts.halt_after_import { // Cancel all async services diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index 7ae20172295a..224a14d20c1b 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -441,12 +441,6 @@ pub(super) async fn start( } } - // For convenience, flush the database after we've potentially loaded a new - // snapshot. This ensures the snapshot won't have to be re-imported if - // Forest is interrupted. As of writing, flushing only affects RocksDB and - // is a no-op with ParityDB. - state_manager.blockstore().flush()?; - // Halt if opts.halt_after_import { // Cancel all async services diff --git a/src/db/errors.rs b/src/db/errors.rs index 27aa79951e53..b9354666329c 100644 --- a/src/db/errors.rs +++ b/src/db/errors.rs @@ -10,7 +10,6 @@ pub enum Error { InvalidBulkLen, #[error("Cannot use unopened database")] Unopened, - #[cfg(any(feature = "rocksdb", feature = "paritydb"))] #[error(transparent)] Database(#[from] crate::db::db_engine::DbError), #[error("{0}")] @@ -24,7 +23,6 @@ impl PartialEq for Error { match (self, other) { (&InvalidBulkLen, &InvalidBulkLen) => true, (&Unopened, &Unopened) => true, - #[cfg(any(feature = "rocksdb", feature = "paritydb"))] (&Database(_), &Database(_)) => true, (Other(a), Other(b)) => a == b, _ => false, diff --git a/src/db/mod.rs b/src/db/mod.rs index 111dffe58fa3..f69b6acf6ebc 100644 --- a/src/db/mod.rs +++ b/src/db/mod.rs @@ -4,24 +4,11 @@ mod errors; mod memory; mod metrics; - -cfg_if::cfg_if! { - if #[cfg(feature = "rocksdb")] { - pub mod rocks; - } else if #[cfg(feature = "paritydb")] { - pub mod parity_db; - } -} - -// Not using conditional compilation here because DB config types are used in -// forest config +pub mod parity_db; pub mod parity_db_config; -pub mod rocks_config; - pub use errors::Error; pub use memory::MemoryDB; -#[cfg(any(feature = "paritydb", feature = "rocksdb"))] pub mod rolling; /// Store interface used as a KV store implementation @@ -52,11 +39,6 @@ pub trait Store { .into_iter() .try_for_each(|(key, value)| self.write(key.into(), value.into())) } - - /// Flush writing buffer if there is any. Default implementation is blank - fn flush(&self) -> Result<(), Error> { - Ok(()) - } } impl Store for &BS { @@ -97,25 +79,15 @@ pub trait DBStatistics { } } -#[cfg(any(feature = "paritydb", feature = "rocksdb"))] pub mod db_engine { use std::path::{Path, PathBuf}; use crate::db::rolling::*; - cfg_if::cfg_if! { - if #[cfg(feature = "rocksdb")] { - pub type Db = crate::db::rocks::RocksDb; - pub type DbConfig = crate::db::rocks_config::RocksDbConfig; - pub(in crate::db) type DbError = rocksdb::Error; - const DIR_NAME: &str = "rocksdb"; - } else if #[cfg(feature = "paritydb")] { - pub type Db = crate::db::parity_db::ParityDb; - pub type DbConfig = crate::db::parity_db_config::ParityDbConfig; - pub(in crate::db) type DbError = parity_db::Error; - const DIR_NAME: &str = "paritydb"; - } - } + pub type Db = crate::db::parity_db::ParityDb; + pub type DbConfig = crate::db::parity_db_config::ParityDbConfig; + pub(in crate::db) type DbError = parity_db::Error; + const DIR_NAME: &str = "paritydb"; pub fn db_root(chain_data_root: &Path) -> PathBuf { chain_data_root.join(DIR_NAME) @@ -133,9 +105,6 @@ pub mod db_engine { mod tests { pub mod db_utils; mod mem_test; - #[cfg(feature = "paritydb")] mod parity_test; - #[cfg(feature = "rocksdb")] - mod rocks_test; pub mod subtests; } diff --git a/src/db/rocks.rs b/src/db/rocks.rs deleted file mode 100644 index dd304598eca3..000000000000 --- a/src/db/rocks.rs +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2019-2023 ChainSafe Systems -// SPDX-License-Identifier: Apache-2.0, MIT - -use std::{path::Path, sync::Arc}; - -use crate::libp2p_bitswap::{BitswapStoreRead, BitswapStoreReadWrite}; -use anyhow::anyhow; -use cid::Cid; -use fvm_ipld_blockstore::Blockstore; -use rocksdb::{ - BlockBasedOptions, Cache, DBCompactionStyle, DBCompressionType, DataBlockIndexType, LogLevel, - Options, WriteBatch, WriteOptions, DB, -}; - -use super::{errors::Error, Store}; -use crate::db::{metrics, rocks_config::RocksDbConfig, DBStatistics}; - -lazy_static::lazy_static! { - static ref WRITE_OPT_NO_WAL: WriteOptions = { - let mut opt = WriteOptions::default(); - opt.disable_wal(true); - opt - }; -} - -/// Converts string to a compaction style `RocksDB` variant. -fn compaction_style_from_str(s: &str) -> anyhow::Result> { - match s.to_lowercase().as_str() { - "level" => Ok(Some(DBCompactionStyle::Level)), - "universal" => Ok(Some(DBCompactionStyle::Universal)), - "fifo" => Ok(Some(DBCompactionStyle::Fifo)), - "none" => Ok(None), - _ => Err(anyhow!("invalid compaction option")), - } -} - -/// Converts string to a log level `RocksDB` variant. -fn log_level_from_str(s: &str) -> anyhow::Result { - match s.to_lowercase().as_str() { - "debug" => Ok(LogLevel::Debug), - "warn" => Ok(LogLevel::Warn), - "error" => Ok(LogLevel::Error), - "fatal" => Ok(LogLevel::Fatal), - "header" => Ok(LogLevel::Header), - _ => Err(anyhow!("invalid log level option")), - } -} - -#[cfg(test)] -mod test { - use rocksdb::DBCompactionStyle; - - use super::*; - - #[test] - fn compaction_style_from_str_test() { - let test_cases = vec![ - ("Level", Ok(Some(DBCompactionStyle::Level))), - ("UNIVERSAL", Ok(Some(DBCompactionStyle::Universal))), - ("fifo", Ok(Some(DBCompactionStyle::Fifo))), - ("none", Ok(None)), - ("cthulhu", Err(anyhow!("some error message"))), - ]; - for (input, expected) in test_cases { - let actual = compaction_style_from_str(input); - if let Ok(compaction_style) = actual { - assert_eq!(expected.unwrap(), compaction_style); - } else { - assert!(expected.is_err()); - } - } - } -} - -/// `RocksDB` instance this satisfies the [Store] interface. -#[derive(Clone)] -pub struct RocksDb { - pub db: Arc, - options: Options, -} - -/// `RocksDb` is used as the KV store for Forest -/// -/// Usage: -/// ```no_run -/// use crate::db::rocks::RocksDb; -/// use crate::db::rocks_config::RocksDbConfig; -/// -/// let mut db = RocksDb::open("test_db", &RocksDbConfig::default()).unwrap(); -/// ``` -impl RocksDb { - fn to_options(config: &RocksDbConfig) -> Options { - let mut db_opts = Options::default(); - db_opts.create_if_missing(config.create_if_missing); - db_opts.increase_parallelism(config.parallelism); - db_opts.set_write_buffer_size(config.write_buffer_size); - db_opts.set_max_open_files(config.max_open_files); - - if let Some(max_background_jobs) = config.max_background_jobs { - db_opts.set_max_background_jobs(max_background_jobs); - } - if let Some(compaction_style) = compaction_style_from_str(&config.compaction_style).unwrap() - { - db_opts.set_compaction_style(compaction_style); - db_opts.set_disable_auto_compactions(false); - } else { - db_opts.set_disable_auto_compactions(true); - } - db_opts.set_compression_type(DBCompressionType::Lz4); - if config.enable_statistics { - db_opts.set_stats_dump_period_sec(config.stats_dump_period_sec); - db_opts.enable_statistics(); - }; - db_opts.set_log_level(log_level_from_str(&config.log_level).unwrap()); - db_opts.set_optimize_filters_for_hits(config.optimize_filters_for_hits); - // Comes from https://github.com/facebook/rocksdb/blob/main/options/options.cc#L606 - // Only modified to upgrade format to v5 - if !config.optimize_for_point_lookup.is_negative() { - let cache_size = config.optimize_for_point_lookup as usize; - let mut opts = BlockBasedOptions::default(); - opts.set_format_version(5); - opts.set_data_block_index_type(DataBlockIndexType::BinaryAndHash); - opts.set_data_block_hash_ratio(0.75); - opts.set_bloom_filter(10.0, false); - let cache = Cache::new_lru_cache(cache_size * 1024 * 1024).unwrap(); - opts.set_block_cache(&cache); - db_opts.set_block_based_table_factory(&opts); - db_opts.set_memtable_prefix_bloom_ratio(0.02); - db_opts.set_memtable_whole_key_filtering(true); - } - db_opts - } - - pub fn open

(path: P, config: &RocksDbConfig) -> Result - where - P: AsRef, - { - let db_opts = Self::to_options(config); - Ok(Self { - db: Arc::new(DB::open(&db_opts, path)?), - options: db_opts, - }) - } - - pub fn get_statistics(&self) -> Option { - self.options.get_statistics() - } -} - -impl Store for RocksDb { - fn read(&self, key: K) -> Result>, Error> - where - K: AsRef<[u8]>, - { - self.db.get(key).map_err(Error::from) - } - - fn write(&self, key: K, value: V) -> Result<(), Error> - where - K: AsRef<[u8]>, - V: AsRef<[u8]>, - { - Ok(self.db.put_opt(key, value, &WRITE_OPT_NO_WAL)?) - } - - fn exists(&self, key: K) -> Result - where - K: AsRef<[u8]>, - { - self.db - .get_pinned(key) - .map(|v| v.is_some()) - .map_err(Error::from) - } - - fn bulk_write( - &self, - values: impl IntoIterator>, impl Into>)>, - ) -> Result<(), Error> { - let mut batch = WriteBatch::default(); - for (k, v) in values { - batch.put(k.into(), v.into()); - } - Ok(self.db.write_without_wal(batch)?) - } - - fn flush(&self) -> Result<(), Error> { - self.db.flush().map_err(|e| Error::Other(e.to_string())) - } -} - -impl Blockstore for RocksDb { - fn get(&self, k: &Cid) -> anyhow::Result>> { - self.read(k.to_bytes()).map_err(Into::into) - } - - fn put_keyed(&self, k: &Cid, block: &[u8]) -> anyhow::Result<()> { - metrics::BLOCK_SIZE_BYTES.observe(block.len() as f64); - self.write(k.to_bytes(), block).map_err(Into::into) - } - - fn put_many_keyed(&self, blocks: I) -> anyhow::Result<()> - where - Self: Sized, - D: AsRef<[u8]>, - I: IntoIterator, - { - let mut batch = WriteBatch::default(); - for (cid, v) in blocks.into_iter() { - let k = cid.to_bytes(); - let v = v.as_ref(); - metrics::BLOCK_SIZE_BYTES.observe(v.len() as f64); - batch.put(k, v); - } - // This function is used in `fvm_ipld_car::load_car` - // It reduces time cost of loading mainnet snapshot - // by ~10% by not writing to WAL(write ahead log). - Ok(self.db.write_without_wal(batch)?) - } -} - -impl BitswapStoreRead for RocksDb { - fn contains(&self, cid: &Cid) -> anyhow::Result { - Ok(self.exists(cid.to_bytes())?) - } - - fn get(&self, cid: &Cid) -> anyhow::Result>> { - Blockstore::get(self, cid) - } -} - -impl BitswapStoreReadWrite for RocksDb { - /// `fvm_ipld_encoding::DAG_CBOR(0x71)` is covered by - /// [`libipld::DefaultParams`] under feature `dag-cbor` - type Params = libipld::DefaultParams; - - fn insert(&self, block: &libipld::Block) -> anyhow::Result<()> { - self.put_keyed(block.cid(), block.data()) - } -} - -impl DBStatistics for RocksDb { - fn get_statistics(&self) -> Option { - self.options.get_statistics() - } -} diff --git a/src/db/rocks_config.rs b/src/db/rocks_config.rs deleted file mode 100644 index 394d8fef7e22..000000000000 --- a/src/db/rocks_config.rs +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2019-2023 ChainSafe Systems -// SPDX-License-Identifier: Apache-2.0, MIT -use num_cpus; -use serde::{Deserialize, Serialize}; - -/// `RocksDB` configuration exposed in Forest. -/// Only subset of possible options is implemented, add missing ones when -/// needed. For description of different options please refer to the `rocksdb` -/// crate::db documentation. -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] -#[serde(default)] -pub struct RocksDbConfig { - pub create_if_missing: bool, - pub parallelism: i32, - /// This is the `memtable` size in bytes. - pub write_buffer_size: usize, - pub max_open_files: i32, - pub max_background_jobs: Option, - pub compaction_style: String, - pub enable_statistics: bool, - pub stats_dump_period_sec: u32, - pub log_level: String, - pub optimize_filters_for_hits: bool, - pub optimize_for_point_lookup: i32, -} - -impl Default for RocksDbConfig { - fn default() -> Self { - Self { - create_if_missing: true, - parallelism: num_cpus::get() as i32, - write_buffer_size: 2usize.pow(30), // 1 GiB - max_open_files: -1, - max_background_jobs: None, - compaction_style: "none".into(), - enable_statistics: false, - stats_dump_period_sec: 600, - log_level: "warn".into(), - optimize_filters_for_hits: true, - optimize_for_point_lookup: 8, - } - } -} diff --git a/src/db/rolling/impls.rs b/src/db/rolling/impls.rs index 149aca2e1c27..fcc18d91050a 100644 --- a/src/db/rolling/impls.rs +++ b/src/db/rolling/impls.rs @@ -109,10 +109,6 @@ impl Store for RollingDB { ) -> Result<(), crate::db::Error> { Store::bulk_write(&self.current(), values) } - - fn flush(&self) -> Result<(), crate::db::Error> { - Store::flush(&self.current()) - } } impl BitswapStoreRead for RollingDB { @@ -161,17 +157,6 @@ impl FileBackedObject for DbIndex { } } -impl Drop for RollingDB { - fn drop(&mut self) { - if let Err(err) = self.flush() { - warn!( - "Error flushing rolling db under {}: {err}", - self.db_root.display() - ); - } - } -} - impl RollingDB { pub fn load_or_create(db_root: PathBuf, db_config: DbConfig) -> anyhow::Result { if !db_root.exists() { diff --git a/src/db/tests/db_utils/mod.rs b/src/db/tests/db_utils/mod.rs index 060ad44439c8..315d146d4ea7 100644 --- a/src/db/tests/db_utils/mod.rs +++ b/src/db/tests/db_utils/mod.rs @@ -1,8 +1,4 @@ // Copyright 2019-2023 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT -#[cfg(feature = "paritydb")] pub(in crate::db) mod parity; - -#[cfg(feature = "rocksdb")] -pub(in crate::db) mod rocks; diff --git a/src/db/tests/db_utils/rocks.rs b/src/db/tests/db_utils/rocks.rs deleted file mode 100644 index 46efc4d1d4a6..000000000000 --- a/src/db/tests/db_utils/rocks.rs +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2019-2023 ChainSafe Systems -// SPDX-License-Identifier: Apache-2.0, MIT - -use std::ops::Deref; - -use crate::db::{rocks::RocksDb, rocks_config::RocksDbConfig}; - -/// Temporary, self-cleaning RocksDB -pub struct TempRocksDB { - db: RocksDb, - _dir: tempfile::TempDir, // kept for cleaning up during Drop -} - -impl TempRocksDB { - /// Creates a new DB in a temporary path that gets wiped out when the - /// variable gets out of scope. - pub fn new() -> TempRocksDB { - let dir = tempfile::Builder::new() - .tempdir() - .expect("Failed to create temporary path for db."); - let path = dir.path().join("rocksdb"); - - TempRocksDB { - db: RocksDb::open(path, &RocksDbConfig::default()).unwrap(), - _dir: dir, - } - } -} - -impl Deref for TempRocksDB { - type Target = RocksDb; - - fn deref(&self) -> &Self::Target { - &self.db - } -} diff --git a/src/db/tests/rocks_test.rs b/src/db/tests/rocks_test.rs deleted file mode 100644 index b45ee6e24091..000000000000 --- a/src/db/tests/rocks_test.rs +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019-2023 ChainSafe Systems -// SPDX-License-Identifier: Apache-2.0, MIT - -use super::{db_utils::rocks::TempRocksDB, subtests}; - -#[test] -fn db_write() { - let db = TempRocksDB::new(); - subtests::write(&*db); -} - -#[test] -fn db_read() { - let db = TempRocksDB::new(); - subtests::read(&*db); -} - -#[test] -fn db_exists() { - let db = TempRocksDB::new(); - subtests::exists(&*db); -} - -#[test] -fn db_does_not_exist() { - let db = TempRocksDB::new(); - subtests::does_not_exist(&*db); -} - -#[test] -fn db_bulk_write() { - let db = TempRocksDB::new(); - subtests::bulk_write(&*db); -}