diff --git a/CHANGELOG.md b/CHANGELOG.md index a9f1451..116b083 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -600,7 +600,7 @@ * Cleaned up and improved coverage and tests of the ffi module (alexreg) * Added many new methods to the `Options` type (development by ngaut, BusyJay, zhangjinpeng1987, siddontang and hhkbp2. ported by kaedroho) * Added `len` and `is_empty` methods to `WriteBatch` (development by siddontang. ported by kaedroho) -* Added `path` mathod to `DB` (development by siddontang. ported by kaedroho) +* Added `path` method to `DB` (development by siddontang. ported by kaedroho) * `DB::open` now accepts any type that implements `Into` as the path argument (kaedroho) * `DB` now implements the `Debug` trait (kaedroho) * Add iterator_cf to snapshot (jezell) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b4d3d41..9a7dd18 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,7 +14,7 @@ Thank you for taking an interest in the project, and contributing to it - it's a - **Ensure the bug applies to the Rust wrapper, and not the underlying library** - bugs in the RocksDB library should be [reported upstream](https://github.com/facebook/rocksdb/issues). - When [creating an issue](https://github.com/rust-rocksdb/rust-rocksdb/issues/new) please try to: - **Use a clear and descriptive title** to identify the issue - - **Provide enough context** to acurately summarize the issue. Not every issue will need detailed steps to recreate, example code, stack traces, etc. - use your own judgment on what information would be helpful to anyone working on the issue. It's easier for someone to skim over too much context, than stop and wait for a response when context is missing. + - **Provide enough context** to accurately summarize the issue. Not every issue will need detailed steps to recreate, example code, stack traces, etc. - use your own judgment on what information would be helpful to anyone working on the issue. It's easier for someone to skim over too much context, than stop and wait for a response when context is missing. ## Feature Requests [feature-requests]: #feature-requests diff --git a/librocksdb-sys/build.rs b/librocksdb-sys/build.rs index 536f34f..c28c09c 100644 --- a/librocksdb-sys/build.rs +++ b/librocksdb-sys/build.rs @@ -284,7 +284,7 @@ fn build_rocksdb() { config.flag("-EHsc"); config.flag("-std:c++17"); } else { - config.flag(&cxx_standard()); + config.flag(cxx_standard()); // matches the flags in CMakeLists.txt from rocksdb config.flag("-Wsign-compare"); config.flag("-Wshadow"); diff --git a/src/backup.rs b/src/backup.rs index 42417f7..9ae180b 100644 --- a/src/backup.rs +++ b/src/backup.rs @@ -134,7 +134,6 @@ impl BackupEngine { /// return Err(e.to_string()); /// } /// ``` - pub fn restore_from_latest_backup, W: AsRef>( &mut self, db_dir: D, diff --git a/src/checkpoint.rs b/src/checkpoint.rs index 0339313..3cae480 100644 --- a/src/checkpoint.rs +++ b/src/checkpoint.rs @@ -66,7 +66,7 @@ impl<'db> Checkpoint<'db> { } } -impl<'db> Drop for Checkpoint<'db> { +impl Drop for Checkpoint<'_> { fn drop(&mut self) { unsafe { ffi::rocksdb_checkpoint_object_destroy(self.inner); diff --git a/src/column_family.rs b/src/column_family.rs index e12a74e..696ea48 100644 --- a/src/column_family.rs +++ b/src/column_family.rs @@ -138,7 +138,7 @@ impl Drop for ColumnFamily { // these behaviors must be identical between BoundColumnFamily and UnboundColumnFamily // due to the unsafe transmute() in bound_column_family()! -impl<'a> Drop for BoundColumnFamily<'a> { +impl Drop for BoundColumnFamily<'_> { fn drop(&mut self) { destroy_handle(self.inner); } @@ -170,7 +170,7 @@ impl AsColumnFamilyRef for ColumnFamily { } } -impl<'a> AsColumnFamilyRef for &'a ColumnFamily { +impl AsColumnFamilyRef for &'_ ColumnFamily { fn inner(&self) -> *mut ffi::rocksdb_column_family_handle_t { self.inner } @@ -181,7 +181,7 @@ impl<'a> AsColumnFamilyRef for &'a ColumnFamily { // isn't expected to be used as naked. // Also, ColumnFamilyRef might not be Arc> depending crate // feature flags so, we can't use the type alias here. -impl<'a> AsColumnFamilyRef for Arc> { +impl AsColumnFamilyRef for Arc> { fn inner(&self) -> *mut ffi::rocksdb_column_family_handle_t { self.inner } @@ -191,5 +191,5 @@ unsafe impl Send for ColumnFamily {} unsafe impl Sync for ColumnFamily {} unsafe impl Send for UnboundColumnFamily {} unsafe impl Sync for UnboundColumnFamily {} -unsafe impl<'a> Send for BoundColumnFamily<'a> {} -unsafe impl<'a> Sync for BoundColumnFamily<'a> {} +unsafe impl Send for BoundColumnFamily<'_> {} +unsafe impl Sync for BoundColumnFamily<'_> {} diff --git a/src/db_iterator.rs b/src/db_iterator.rs index c9064bc..7ecb26a 100644 --- a/src/db_iterator.rs +++ b/src/db_iterator.rs @@ -377,7 +377,7 @@ impl<'a, D: DBAccess> DBRawIteratorWithThreadMode<'a, D> { } } -impl<'a, D: DBAccess> Drop for DBRawIteratorWithThreadMode<'a, D> { +impl Drop for DBRawIteratorWithThreadMode<'_, D> { fn drop(&mut self) { unsafe { ffi::rocksdb_iter_destroy(self.inner.as_ptr()); @@ -385,8 +385,8 @@ impl<'a, D: DBAccess> Drop for DBRawIteratorWithThreadMode<'a, D> { } } -unsafe impl<'a, D: DBAccess> Send for DBRawIteratorWithThreadMode<'a, D> {} -unsafe impl<'a, D: DBAccess> Sync for DBRawIteratorWithThreadMode<'a, D> {} +unsafe impl Send for DBRawIteratorWithThreadMode<'_, D> {} +unsafe impl Sync for DBRawIteratorWithThreadMode<'_, D> {} /// A type alias to keep compatibility. See [`DBIteratorWithThreadMode`] for details pub type DBIterator<'a> = DBIteratorWithThreadMode<'a, DB>; @@ -501,7 +501,7 @@ impl<'a, D: DBAccess> DBIteratorWithThreadMode<'a, D> { } } -impl<'a, D: DBAccess> Iterator for DBIteratorWithThreadMode<'a, D> { +impl Iterator for DBIteratorWithThreadMode<'_, D> { type Item = Result; fn next(&mut self) -> Option> { @@ -521,7 +521,7 @@ impl<'a, D: DBAccess> Iterator for DBIteratorWithThreadMode<'a, D> { } } -impl<'a, D: DBAccess> std::iter::FusedIterator for DBIteratorWithThreadMode<'a, D> {} +impl std::iter::FusedIterator for DBIteratorWithThreadMode<'_, D> {} impl<'a, D: DBAccess> Into> for DBIteratorWithThreadMode<'a, D> { fn into(self) -> DBRawIteratorWithThreadMode<'a, D> { diff --git a/src/db_options.rs b/src/db_options.rs index 37e6572..59f5a81 100644 --- a/src/db_options.rs +++ b/src/db_options.rs @@ -1580,9 +1580,10 @@ impl Options { /// UniversalCompactionBuilder::PickPeriodicCompaction(). /// For backward compatibility, the effective value of this option takes /// into account the value of option `ttl`. The logic is as follows: - /// - both options are set to 30 days if they have the default value. - /// - if both options are zero, zero is picked. Otherwise, we take the min - /// value among non-zero options values (i.e. takes the stricter limit). + /// + /// - both options are set to 30 days if they have the default value. + /// - if both options are zero, zero is picked. Otherwise, we take the min + /// value among non-zero options values (i.e. takes the stricter limit). /// /// One main use of the feature is to make sure a file goes through compaction /// filters periodically. Users can also use the feature to clear up SST @@ -2148,8 +2149,8 @@ impl Options { /// The exact behavior of this parameter is platform dependent. /// /// On POSIX systems, after RocksDB reads data from disk it will - /// mark the pages as "unneeded". The operating system may - or may not - /// - evict these pages from memory, reducing pressure on the system + /// mark the pages as "unneeded". The operating system may or may not + /// evict these pages from memory, reducing pressure on the system /// cache. If the disk block is requested again this can result in /// additional disk I/O. /// @@ -3705,9 +3706,11 @@ impl Options { /// to be able to ingest behind (call IngestExternalFile() skipping keys /// that already exist, rather than overwriting matching keys). /// Setting this option to true has the following effects: - /// 1) Disable some internal optimizations around SST file compression. - /// 2) Reserve the last level for ingested files only. - /// 3) Compaction will not include any file from the last level. + /// + /// 1. Disable some internal optimizations around SST file compression. + /// 2. Reserve the last level for ingested files only. + /// 3. Compaction will not include any file from the last level. + /// /// Note that only Universal Compaction supports allow_ingest_behind. /// `num_levels` should be >= 3 if this option is turned on. /// @@ -3830,10 +3833,12 @@ impl Options { /// or an IDENTITY file (historical, deprecated), or both. If this option is /// set to false (old behavior), then `write_identity_file` must be set to true. /// The manifest is preferred because + /// /// 1. The IDENTITY file is not checksummed, so it is not as safe against /// corruption. /// 2. The IDENTITY file may or may not be copied with the DB (e.g. not /// copied by BackupEngine), so is not reliable for the provenance of a DB. + /// /// This option might eventually be obsolete and removed as Identity files /// are phased out. /// diff --git a/src/db_pinnable_slice.rs b/src/db_pinnable_slice.rs index e6bd6a4..8ea515b 100644 --- a/src/db_pinnable_slice.rs +++ b/src/db_pinnable_slice.rs @@ -28,17 +28,17 @@ pub struct DBPinnableSlice<'a> { db: PhantomData<&'a DB>, } -unsafe impl<'a> Send for DBPinnableSlice<'a> {} -unsafe impl<'a> Sync for DBPinnableSlice<'a> {} +unsafe impl Send for DBPinnableSlice<'_> {} +unsafe impl Sync for DBPinnableSlice<'_> {} -impl<'a> AsRef<[u8]> for DBPinnableSlice<'a> { +impl AsRef<[u8]> for DBPinnableSlice<'_> { fn as_ref(&self) -> &[u8] { // Implement this via Deref so as not to repeat ourselves self } } -impl<'a> Deref for DBPinnableSlice<'a> { +impl Deref for DBPinnableSlice<'_> { type Target = [u8]; fn deref(&self) -> &[u8] { @@ -50,7 +50,7 @@ impl<'a> Deref for DBPinnableSlice<'a> { } } -impl<'a> Drop for DBPinnableSlice<'a> { +impl Drop for DBPinnableSlice<'_> { fn drop(&mut self) { unsafe { ffi::rocksdb_pinnableslice_destroy(self.ptr); @@ -58,7 +58,7 @@ impl<'a> Drop for DBPinnableSlice<'a> { } } -impl<'a> DBPinnableSlice<'a> { +impl DBPinnableSlice<'_> { /// Used to wrap a PinnableSlice from rocksdb to avoid unnecessary memcpy /// /// # Unsafe diff --git a/src/snapshot.rs b/src/snapshot.rs index 9affa84..81c6ad4 100644 --- a/src/snapshot.rs +++ b/src/snapshot.rs @@ -259,7 +259,7 @@ impl<'a, D: DBAccess> SnapshotWithThreadMode<'a, D> { } } -impl<'a, D: DBAccess> Drop for SnapshotWithThreadMode<'a, D> { +impl Drop for SnapshotWithThreadMode<'_, D> { fn drop(&mut self) { unsafe { self.db.release_snapshot(self.inner); @@ -269,5 +269,5 @@ impl<'a, D: DBAccess> Drop for SnapshotWithThreadMode<'a, D> { /// `Send` and `Sync` implementations for `SnapshotWithThreadMode` are safe, because `SnapshotWithThreadMode` is /// immutable and can be safely shared between threads. -unsafe impl<'a, D: DBAccess> Send for SnapshotWithThreadMode<'a, D> {} -unsafe impl<'a, D: DBAccess> Sync for SnapshotWithThreadMode<'a, D> {} +unsafe impl Send for SnapshotWithThreadMode<'_, D> {} +unsafe impl Sync for SnapshotWithThreadMode<'_, D> {} diff --git a/src/sst_file_writer.rs b/src/sst_file_writer.rs index f2b5a18..b475a77 100644 --- a/src/sst_file_writer.rs +++ b/src/sst_file_writer.rs @@ -27,8 +27,8 @@ pub struct SstFileWriter<'a> { phantom: PhantomData<&'a Options>, } -unsafe impl<'a> Send for SstFileWriter<'a> {} -unsafe impl<'a> Sync for SstFileWriter<'a> {} +unsafe impl Send for SstFileWriter<'_> {} +unsafe impl Sync for SstFileWriter<'_> {} struct EnvOptions { inner: *mut ffi::rocksdb_envoptions_t, @@ -205,7 +205,7 @@ impl<'a> SstFileWriter<'a> { } } -impl<'a> Drop for SstFileWriter<'a> { +impl Drop for SstFileWriter<'_> { fn drop(&mut self) { unsafe { ffi::rocksdb_sstfilewriter_destroy(self.inner); diff --git a/src/transactions/transaction.rs b/src/transactions/transaction.rs index 3c1b696..feee2dd 100644 --- a/src/transactions/transaction.rs +++ b/src/transactions/transaction.rs @@ -33,9 +33,9 @@ pub struct Transaction<'db, DB> { pub(crate) _marker: PhantomData<&'db DB>, } -unsafe impl<'db, DB> Send for Transaction<'db, DB> {} +unsafe impl Send for Transaction<'_, DB> {} -impl<'db, DB> DBAccess for Transaction<'db, DB> { +impl DBAccess for Transaction<'_, DB> { unsafe fn create_snapshot(&self) -> *const ffi::rocksdb_snapshot_t { ffi::rocksdb_transaction_get_snapshot(self.inner) } @@ -116,7 +116,7 @@ impl<'db, DB> DBAccess for Transaction<'db, DB> { } } -impl<'db, DB> Transaction<'db, DB> { +impl Transaction<'_, DB> { /// Write all batched keys to the DB atomically. /// /// May return any error that could be returned by `DB::write`. @@ -892,7 +892,7 @@ impl<'db, DB> Transaction<'db, DB> { } } -impl<'db, DB> Drop for Transaction<'db, DB> { +impl Drop for Transaction<'_, DB> { fn drop(&mut self) { unsafe { ffi::rocksdb_transaction_destroy(self.inner); diff --git a/tests/test_column_family.rs b/tests/test_column_family.rs index 41e9b8b..318d63f 100644 --- a/tests/test_column_family.rs +++ b/tests/test_column_family.rs @@ -160,7 +160,7 @@ fn test_column_family_with_transactiondb() { } } - // should properly open db when specyfing all column families + // should properly open db when specifying all column families { let mut opts = Options::default(); opts.set_merge_operator_associative("test operator", test_provided_merge); diff --git a/tests/test_db.rs b/tests/test_db.rs index a662f8f..300734a 100644 --- a/tests/test_db.rs +++ b/tests/test_db.rs @@ -16,7 +16,7 @@ mod util; use std::convert::TryInto; use std::sync::atomic::{AtomicUsize, Ordering}; -use std::{mem, sync::Arc, thread, time::Duration}; +use std::{sync::Arc, thread, time::Duration}; use pretty_assertions::assert_eq; @@ -264,14 +264,14 @@ fn snapshot_test() { } #[derive(Clone)] -struct SnapshotWrapper { - snapshot: Arc>, +struct SnapshotWrapper<'db> { + snapshot: Arc>, } -impl SnapshotWrapper { - fn new(db: &DB) -> Self { +impl<'db> SnapshotWrapper<'db> { + fn new(db: &'db DB) -> Self { Self { - snapshot: Arc::new(unsafe { mem::transmute(db.snapshot()) }), + snapshot: Arc::new(db.snapshot()), } } @@ -291,13 +291,14 @@ fn sync_snapshot_test() { assert!(db.put(b"k1", b"v1").is_ok()); assert!(db.put(b"k2", b"v2").is_ok()); - let wrapper = SnapshotWrapper::new(&db); - let wrapper_1 = wrapper.clone(); - let handler_1 = thread::spawn(move || wrapper_1.check("k1", b"v1")); - let handler_2 = thread::spawn(move || wrapper.check("k2", b"v2")); - - assert!(handler_1.join().unwrap()); - assert!(handler_2.join().unwrap()); + let wrapper_1 = SnapshotWrapper::new(&db); + let wrapper_2 = wrapper_1.clone(); + thread::scope(|s| { + let handler_1 = s.spawn(move || wrapper_1.check("k1", b"v1")); + let handler_2 = s.spawn(move || wrapper_2.check("k2", b"v2")); + assert!(handler_1.join().unwrap()); + assert!(handler_2.join().unwrap()); + }); } #[test]