From 9a3c725f0674a3f0a87561c2885c6bc74ad9226f Mon Sep 17 00:00:00 2001 From: Peter Shugalev Date: Mon, 29 Jun 2020 14:10:18 +0300 Subject: [PATCH] Removed extra files produced by merge --- README.md | 2 + doc/impl.md | 170 -------------- doc/index.md | 523 ----------------------------------------- doc/log_format.md | 75 ------ doc/table_format.md | 107 --------- port/port_posix_sse.cc | 110 --------- 6 files changed, 2 insertions(+), 985 deletions(-) delete mode 100644 doc/impl.md delete mode 100644 doc/index.md delete mode 100644 doc/log_format.md delete mode 100644 doc/table_format.md delete mode 100644 port/port_posix_sse.cc diff --git a/README.md b/README.md index 4f8cbc1ac2..4c9dbcb84c 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ Zcoin +=============== [![Financial Contributors on Open Collective](https://opencollective.com/zcoin/all/badge.svg?label=financial+contributors)](https://opencollective.com/zcoin) [![latest-release](https://img.shields.io/github/release/zcoinofficial/zcoin)](https://github.com/zcoinofficial/zcoin/releases) [![GitHub last-release](https://img.shields.io/github/release-date/zcoinofficial/zcoin)](https://github.com/zcoinofficial/zcoin/releases) @@ -72,6 +73,7 @@ docker start zcoind ``` Linux Build Instructions and Notes +================================== Dependencies ---------------------- diff --git a/doc/impl.md b/doc/impl.md deleted file mode 100644 index 4b13f2a6ba..0000000000 --- a/doc/impl.md +++ /dev/null @@ -1,170 +0,0 @@ -## Files - -The implementation of leveldb is similar in spirit to the representation of a -single [Bigtable tablet (section 5.3)](http://research.google.com/archive/bigtable.html). -However the organization of the files that make up the representation is -somewhat different and is explained below. - -Each database is represented by a set of files stored in a directory. There are -several different types of files as documented below: - -### Log files - -A log file (*.log) stores a sequence of recent updates. Each update is appended -to the current log file. When the log file reaches a pre-determined size -(approximately 4MB by default), it is converted to a sorted table (see below) -and a new log file is created for future updates. - -A copy of the current log file is kept in an in-memory structure (the -`memtable`). This copy is consulted on every read so that read operations -reflect all logged updates. - -## Sorted tables - -A sorted table (*.ldb) stores a sequence of entries sorted by key. Each entry is -either a value for the key, or a deletion marker for the key. (Deletion markers -are kept around to hide obsolete values present in older sorted tables). - -The set of sorted tables are organized into a sequence of levels. The sorted -table generated from a log file is placed in a special **young** level (also -called level-0). When the number of young files exceeds a certain threshold -(currently four), all of the young files are merged together with all of the -overlapping level-1 files to produce a sequence of new level-1 files (we create -a new level-1 file for every 2MB of data.) - -Files in the young level may contain overlapping keys. However files in other -levels have distinct non-overlapping key ranges. Consider level number L where -L >= 1. When the combined size of files in level-L exceeds (10^L) MB (i.e., 10MB -for level-1, 100MB for level-2, ...), one file in level-L, and all of the -overlapping files in level-(L+1) are merged to form a set of new files for -level-(L+1). These merges have the effect of gradually migrating new updates -from the young level to the largest level using only bulk reads and writes -(i.e., minimizing expensive seeks). - -### Manifest - -A MANIFEST file lists the set of sorted tables that make up each level, the -corresponding key ranges, and other important metadata. A new MANIFEST file -(with a new number embedded in the file name) is created whenever the database -is reopened. The MANIFEST file is formatted as a log, and changes made to the -serving state (as files are added or removed) are appended to this log. - -### Current - -CURRENT is a simple text file that contains the name of the latest MANIFEST -file. - -### Info logs - -Informational messages are printed to files named LOG and LOG.old. - -### Others - -Other files used for miscellaneous purposes may also be present (LOCK, *.dbtmp). - -## Level 0 - -When the log file grows above a certain size (1MB by default): -Create a brand new memtable and log file and direct future updates here -In the background: -Write the contents of the previous memtable to an sstable -Discard the memtable -Delete the old log file and the old memtable -Add the new sstable to the young (level-0) level. - -## Compactions - -When the size of level L exceeds its limit, we compact it in a background -thread. The compaction picks a file from level L and all overlapping files from -the next level L+1. Note that if a level-L file overlaps only part of a -level-(L+1) file, the entire file at level-(L+1) is used as an input to the -compaction and will be discarded after the compaction. Aside: because level-0 -is special (files in it may overlap each other), we treat compactions from -level-0 to level-1 specially: a level-0 compaction may pick more than one -level-0 file in case some of these files overlap each other. - -A compaction merges the contents of the picked files to produce a sequence of -level-(L+1) files. We switch to producing a new level-(L+1) file after the -current output file has reached the target file size (2MB). We also switch to a -new output file when the key range of the current output file has grown enough -to overlap more than ten level-(L+2) files. This last rule ensures that a later -compaction of a level-(L+1) file will not pick up too much data from -level-(L+2). - -The old files are discarded and the new files are added to the serving state. - -Compactions for a particular level rotate through the key space. In more detail, -for each level L, we remember the ending key of the last compaction at level L. -The next compaction for level L will pick the first file that starts after this -key (wrapping around to the beginning of the key space if there is no such -file). - -Compactions drop overwritten values. They also drop deletion markers if there -are no higher numbered levels that contain a file whose range overlaps the -current key. - -### Timing - -Level-0 compactions will read up to four 1MB files from level-0, and at worst -all the level-1 files (10MB). I.e., we will read 14MB and write 14MB. - -Other than the special level-0 compactions, we will pick one 2MB file from level -L. In the worst case, this will overlap ~ 12 files from level L+1 (10 because -level-(L+1) is ten times the size of level-L, and another two at the boundaries -since the file ranges at level-L will usually not be aligned with the file -ranges at level-L+1). The compaction will therefore read 26MB and write 26MB. -Assuming a disk IO rate of 100MB/s (ballpark range for modern drives), the worst -compaction cost will be approximately 0.5 second. - -If we throttle the background writing to something small, say 10% of the full -100MB/s speed, a compaction may take up to 5 seconds. If the user is writing at -10MB/s, we might build up lots of level-0 files (~50 to hold the 5*10MB). This -may significantly increase the cost of reads due to the overhead of merging more -files together on every read. - -Solution 1: To reduce this problem, we might want to increase the log switching -threshold when the number of level-0 files is large. Though the downside is that -the larger this threshold, the more memory we will need to hold the -corresponding memtable. - -Solution 2: We might want to decrease write rate artificially when the number of -level-0 files goes up. - -Solution 3: We work on reducing the cost of very wide merges. Perhaps most of -the level-0 files will have their blocks sitting uncompressed in the cache and -we will only need to worry about the O(N) complexity in the merging iterator. - -### Number of files - -Instead of always making 2MB files, we could make larger files for larger levels -to reduce the total file count, though at the expense of more bursty -compactions. Alternatively, we could shard the set of files into multiple -directories. - -An experiment on an ext3 filesystem on Feb 04, 2011 shows the following timings -to do 100K file opens in directories with varying number of files: - - -| Files in directory | Microseconds to open a file | -|-------------------:|----------------------------:| -| 1000 | 9 | -| 10000 | 10 | -| 100000 | 16 | - -So maybe even the sharding is not necessary on modern filesystems? - -## Recovery - -* Read CURRENT to find name of the latest committed MANIFEST -* Read the named MANIFEST file -* Clean up stale files -* We could open all sstables here, but it is probably better to be lazy... -* Convert log chunk to a new level-0 sstable -* Start directing new writes to a new log file with recovered sequence# - -## Garbage collection of files - -`DeleteObsoleteFiles()` is called at the end of every compaction and at the end -of recovery. It finds the names of all files in the database. It deletes all log -files that are not the current log file. It deletes all table files that are not -referenced from some level and are not the output of an active compaction. diff --git a/doc/index.md b/doc/index.md deleted file mode 100644 index be8569692b..0000000000 --- a/doc/index.md +++ /dev/null @@ -1,523 +0,0 @@ -leveldb -======= - -_Jeff Dean, Sanjay Ghemawat_ - -The leveldb library provides a persistent key value store. Keys and values are -arbitrary byte arrays. The keys are ordered within the key value store -according to a user-specified comparator function. - -## Opening A Database - -A leveldb database has a name which corresponds to a file system directory. All -of the contents of database are stored in this directory. The following example -shows how to open a database, creating it if necessary: - -```c++ -#include -#include "leveldb/db.h" - -leveldb::DB* db; -leveldb::Options options; -options.create_if_missing = true; -leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db); -assert(status.ok()); -... -``` - -If you want to raise an error if the database already exists, add the following -line before the `leveldb::DB::Open` call: - -```c++ -options.error_if_exists = true; -``` - -## Status - -You may have noticed the `leveldb::Status` type above. Values of this type are -returned by most functions in leveldb that may encounter an error. You can check -if such a result is ok, and also print an associated error message: - -```c++ -leveldb::Status s = ...; -if (!s.ok()) cerr << s.ToString() << endl; -``` - -## Closing A Database - -When you are done with a database, just delete the database object. Example: - -```c++ -... open the db as described above ... -... do something with db ... -delete db; -``` - -## Reads And Writes - -The database provides Put, Delete, and Get methods to modify/query the database. -For example, the following code moves the value stored under key1 to key2. - -```c++ -std::string value; -leveldb::Status s = db->Get(leveldb::ReadOptions(), key1, &value); -if (s.ok()) s = db->Put(leveldb::WriteOptions(), key2, value); -if (s.ok()) s = db->Delete(leveldb::WriteOptions(), key1); -``` - -## Atomic Updates - -Note that if the process dies after the Put of key2 but before the delete of -key1, the same value may be left stored under multiple keys. Such problems can -be avoided by using the `WriteBatch` class to atomically apply a set of updates: - -```c++ -#include "leveldb/write_batch.h" -... -std::string value; -leveldb::Status s = db->Get(leveldb::ReadOptions(), key1, &value); -if (s.ok()) { - leveldb::WriteBatch batch; - batch.Delete(key1); - batch.Put(key2, value); - s = db->Write(leveldb::WriteOptions(), &batch); -} -``` - -The `WriteBatch` holds a sequence of edits to be made to the database, and these -edits within the batch are applied in order. Note that we called Delete before -Put so that if key1 is identical to key2, we do not end up erroneously dropping -the value entirely. - -Apart from its atomicity benefits, `WriteBatch` may also be used to speed up -bulk updates by placing lots of individual mutations into the same batch. - -## Synchronous Writes - -By default, each write to leveldb is asynchronous: it returns after pushing the -write from the process into the operating system. The transfer from operating -system memory to the underlying persistent storage happens asynchronously. The -sync flag can be turned on for a particular write to make the write operation -not return until the data being written has been pushed all the way to -persistent storage. (On Posix systems, this is implemented by calling either -`fsync(...)` or `fdatasync(...)` or `msync(..., MS_SYNC)` before the write -operation returns.) - -```c++ -leveldb::WriteOptions write_options; -write_options.sync = true; -db->Put(write_options, ...); -``` - -Asynchronous writes are often more than a thousand times as fast as synchronous -writes. The downside of asynchronous writes is that a crash of the machine may -cause the last few updates to be lost. Note that a crash of just the writing -process (i.e., not a reboot) will not cause any loss since even when sync is -false, an update is pushed from the process memory into the operating system -before it is considered done. - -Asynchronous writes can often be used safely. For example, when loading a large -amount of data into the database you can handle lost updates by restarting the -bulk load after a crash. A hybrid scheme is also possible where every Nth write -is synchronous, and in the event of a crash, the bulk load is restarted just -after the last synchronous write finished by the previous run. (The synchronous -write can update a marker that describes where to restart on a crash.) - -`WriteBatch` provides an alternative to asynchronous writes. Multiple updates -may be placed in the same WriteBatch and applied together using a synchronous -write (i.e., `write_options.sync` is set to true). The extra cost of the -synchronous write will be amortized across all of the writes in the batch. - -## Concurrency - -A database may only be opened by one process at a time. The leveldb -implementation acquires a lock from the operating system to prevent misuse. -Within a single process, the same `leveldb::DB` object may be safely shared by -multiple concurrent threads. I.e., different threads may write into or fetch -iterators or call Get on the same database without any external synchronization -(the leveldb implementation will automatically do the required synchronization). -However other objects (like Iterator and `WriteBatch`) may require external -synchronization. If two threads share such an object, they must protect access -to it using their own locking protocol. More details are available in the public -header files. - -## Iteration - -The following example demonstrates how to print all key,value pairs in a -database. - -```c++ -leveldb::Iterator* it = db->NewIterator(leveldb::ReadOptions()); -for (it->SeekToFirst(); it->Valid(); it->Next()) { - cout << it->key().ToString() << ": " << it->value().ToString() << endl; -} -assert(it->status().ok()); // Check for any errors found during the scan -delete it; -``` - -The following variation shows how to process just the keys in the range -[start,limit): - -```c++ -for (it->Seek(start); - it->Valid() && it->key().ToString() < limit; - it->Next()) { - ... -} -``` - -You can also process entries in reverse order. (Caveat: reverse iteration may be -somewhat slower than forward iteration.) - -```c++ -for (it->SeekToLast(); it->Valid(); it->Prev()) { - ... -} -``` - -## Snapshots - -Snapshots provide consistent read-only views over the entire state of the -key-value store. `ReadOptions::snapshot` may be non-NULL to indicate that a -read should operate on a particular version of the DB state. If -`ReadOptions::snapshot` is NULL, the read will operate on an implicit snapshot -of the current state. - -Snapshots are created by the `DB::GetSnapshot()` method: - -```c++ -leveldb::ReadOptions options; -options.snapshot = db->GetSnapshot(); -... apply some updates to db ... -leveldb::Iterator* iter = db->NewIterator(options); -... read using iter to view the state when the snapshot was created ... -delete iter; -db->ReleaseSnapshot(options.snapshot); -``` - -Note that when a snapshot is no longer needed, it should be released using the -`DB::ReleaseSnapshot` interface. This allows the implementation to get rid of -state that was being maintained just to support reading as of that snapshot. - -## Slice - -The return value of the `it->key()` and `it->value()` calls above are instances -of the `leveldb::Slice` type. Slice is a simple structure that contains a length -and a pointer to an external byte array. Returning a Slice is a cheaper -alternative to returning a `std::string` since we do not need to copy -potentially large keys and values. In addition, leveldb methods do not return -null-terminated C-style strings since leveldb keys and values are allowed to -contain `'\0'` bytes. - -C++ strings and null-terminated C-style strings can be easily converted to a -Slice: - -```c++ -leveldb::Slice s1 = "hello"; - -std::string str("world"); -leveldb::Slice s2 = str; -``` - -A Slice can be easily converted back to a C++ string: - -```c++ -std::string str = s1.ToString(); -assert(str == std::string("hello")); -``` - -Be careful when using Slices since it is up to the caller to ensure that the -external byte array into which the Slice points remains live while the Slice is -in use. For example, the following is buggy: - -```c++ -leveldb::Slice slice; -if (...) { - std::string str = ...; - slice = str; -} -Use(slice); -``` - -When the if statement goes out of scope, str will be destroyed and the backing -storage for slice will disappear. - -## Comparators - -The preceding examples used the default ordering function for key, which orders -bytes lexicographically. You can however supply a custom comparator when opening -a database. For example, suppose each database key consists of two numbers and -we should sort by the first number, breaking ties by the second number. First, -define a proper subclass of `leveldb::Comparator` that expresses these rules: - -```c++ -class TwoPartComparator : public leveldb::Comparator { - public: - // Three-way comparison function: - // if a < b: negative result - // if a > b: positive result - // else: zero result - int Compare(const leveldb::Slice& a, const leveldb::Slice& b) const { - int a1, a2, b1, b2; - ParseKey(a, &a1, &a2); - ParseKey(b, &b1, &b2); - if (a1 < b1) return -1; - if (a1 > b1) return +1; - if (a2 < b2) return -1; - if (a2 > b2) return +1; - return 0; - } - - // Ignore the following methods for now: - const char* Name() const { return "TwoPartComparator"; } - void FindShortestSeparator(std::string*, const leveldb::Slice&) const {} - void FindShortSuccessor(std::string*) const {} -}; -``` - -Now create a database using this custom comparator: - -```c++ -TwoPartComparator cmp; -leveldb::DB* db; -leveldb::Options options; -options.create_if_missing = true; -options.comparator = &cmp; -leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db); -... -``` - -### Backwards compatibility - -The result of the comparator's Name method is attached to the database when it -is created, and is checked on every subsequent database open. If the name -changes, the `leveldb::DB::Open` call will fail. Therefore, change the name if -and only if the new key format and comparison function are incompatible with -existing databases, and it is ok to discard the contents of all existing -databases. - -You can however still gradually evolve your key format over time with a little -bit of pre-planning. For example, you could store a version number at the end of -each key (one byte should suffice for most uses). When you wish to switch to a -new key format (e.g., adding an optional third part to the keys processed by -`TwoPartComparator`), (a) keep the same comparator name (b) increment the -version number for new keys (c) change the comparator function so it uses the -version numbers found in the keys to decide how to interpret them. - -## Performance - -Performance can be tuned by changing the default values of the types defined in -`include/leveldb/options.h`. - -### Block size - -leveldb groups adjacent keys together into the same block and such a block is -the unit of transfer to and from persistent storage. The default block size is -approximately 4096 uncompressed bytes. Applications that mostly do bulk scans -over the contents of the database may wish to increase this size. Applications -that do a lot of point reads of small values may wish to switch to a smaller -block size if performance measurements indicate an improvement. There isn't much -benefit in using blocks smaller than one kilobyte, or larger than a few -megabytes. Also note that compression will be more effective with larger block -sizes. - -### Compression - -Each block is individually compressed before being written to persistent -storage. Compression is on by default since the default compression method is -very fast, and is automatically disabled for uncompressible data. In rare cases, -applications may want to disable compression entirely, but should only do so if -benchmarks show a performance improvement: - -```c++ -leveldb::Options options; -options.compression = leveldb::kNoCompression; -... leveldb::DB::Open(options, name, ...) .... -``` - -### Cache - -The contents of the database are stored in a set of files in the filesystem and -each file stores a sequence of compressed blocks. If options.cache is non-NULL, -it is used to cache frequently used uncompressed block contents. - -```c++ -#include "leveldb/cache.h" - -leveldb::Options options; -options.cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache -leveldb::DB* db; -leveldb::DB::Open(options, name, &db); -... use the db ... -delete db -delete options.cache; -``` - -Note that the cache holds uncompressed data, and therefore it should be sized -according to application level data sizes, without any reduction from -compression. (Caching of compressed blocks is left to the operating system -buffer cache, or any custom Env implementation provided by the client.) - -When performing a bulk read, the application may wish to disable caching so that -the data processed by the bulk read does not end up displacing most of the -cached contents. A per-iterator option can be used to achieve this: - -```c++ -leveldb::ReadOptions options; -options.fill_cache = false; -leveldb::Iterator* it = db->NewIterator(options); -for (it->SeekToFirst(); it->Valid(); it->Next()) { - ... -} -``` - -### Key Layout - -Note that the unit of disk transfer and caching is a block. Adjacent keys -(according to the database sort order) will usually be placed in the same block. -Therefore the application can improve its performance by placing keys that are -accessed together near each other and placing infrequently used keys in a -separate region of the key space. - -For example, suppose we are implementing a simple file system on top of leveldb. -The types of entries we might wish to store are: - - filename -> permission-bits, length, list of file_block_ids - file_block_id -> data - -We might want to prefix filename keys with one letter (say '/') and the -`file_block_id` keys with a different letter (say '0') so that scans over just -the metadata do not force us to fetch and cache bulky file contents. - -### Filters - -Because of the way leveldb data is organized on disk, a single `Get()` call may -involve multiple reads from disk. The optional FilterPolicy mechanism can be -used to reduce the number of disk reads substantially. - -```c++ -leveldb::Options options; -options.filter_policy = NewBloomFilterPolicy(10); -leveldb::DB* db; -leveldb::DB::Open(options, "/tmp/testdb", &db); -... use the database ... -delete db; -delete options.filter_policy; -``` - -The preceding code associates a Bloom filter based filtering policy with the -database. Bloom filter based filtering relies on keeping some number of bits of -data in memory per key (in this case 10 bits per key since that is the argument -we passed to `NewBloomFilterPolicy`). This filter will reduce the number of -unnecessary disk reads needed for Get() calls by a factor of approximately -a 100. Increasing the bits per key will lead to a larger reduction at the cost -of more memory usage. We recommend that applications whose working set does not -fit in memory and that do a lot of random reads set a filter policy. - -If you are using a custom comparator, you should ensure that the filter policy -you are using is compatible with your comparator. For example, consider a -comparator that ignores trailing spaces when comparing keys. -`NewBloomFilterPolicy` must not be used with such a comparator. Instead, the -application should provide a custom filter policy that also ignores trailing -spaces. For example: - -```c++ -class CustomFilterPolicy : public leveldb::FilterPolicy { - private: - FilterPolicy* builtin_policy_; - - public: - CustomFilterPolicy() : builtin_policy_(NewBloomFilterPolicy(10)) {} - ~CustomFilterPolicy() { delete builtin_policy_; } - - const char* Name() const { return "IgnoreTrailingSpacesFilter"; } - - void CreateFilter(const Slice* keys, int n, std::string* dst) const { - // Use builtin bloom filter code after removing trailing spaces - std::vector trimmed(n); - for (int i = 0; i < n; i++) { - trimmed[i] = RemoveTrailingSpaces(keys[i]); - } - return builtin_policy_->CreateFilter(&trimmed[i], n, dst); - } -}; -``` - -Advanced applications may provide a filter policy that does not use a bloom -filter but uses some other mechanism for summarizing a set of keys. See -`leveldb/filter_policy.h` for detail. - -## Checksums - -leveldb associates checksums with all data it stores in the file system. There -are two separate controls provided over how aggressively these checksums are -verified: - -`ReadOptions::verify_checksums` may be set to true to force checksum -verification of all data that is read from the file system on behalf of a -particular read. By default, no such verification is done. - -`Options::paranoid_checks` may be set to true before opening a database to make -the database implementation raise an error as soon as it detects an internal -corruption. Depending on which portion of the database has been corrupted, the -error may be raised when the database is opened, or later by another database -operation. By default, paranoid checking is off so that the database can be used -even if parts of its persistent storage have been corrupted. - -If a database is corrupted (perhaps it cannot be opened when paranoid checking -is turned on), the `leveldb::RepairDB` function may be used to recover as much -of the data as possible - -## Approximate Sizes - -The `GetApproximateSizes` method can used to get the approximate number of bytes -of file system space used by one or more key ranges. - -```c++ -leveldb::Range ranges[2]; -ranges[0] = leveldb::Range("a", "c"); -ranges[1] = leveldb::Range("x", "z"); -uint64_t sizes[2]; -leveldb::Status s = db->GetApproximateSizes(ranges, 2, sizes); -``` - -The preceding call will set `sizes[0]` to the approximate number of bytes of -file system space used by the key range `[a..c)` and `sizes[1]` to the -approximate number of bytes used by the key range `[x..z)`. - -## Environment - -All file operations (and other operating system calls) issued by the leveldb -implementation are routed through a `leveldb::Env` object. Sophisticated clients -may wish to provide their own Env implementation to get better control. -For example, an application may introduce artificial delays in the file IO -paths to limit the impact of leveldb on other activities in the system. - -```c++ -class SlowEnv : public leveldb::Env { - ... implementation of the Env interface ... -}; - -SlowEnv env; -leveldb::Options options; -options.env = &env; -Status s = leveldb::DB::Open(options, ...); -``` - -## Porting - -leveldb may be ported to a new platform by providing platform specific -implementations of the types/methods/functions exported by -`leveldb/port/port.h`. See `leveldb/port/port_example.h` for more details. - -In addition, the new platform may need a new default `leveldb::Env` -implementation. See `leveldb/util/env_posix.h` for an example. - -## Other Information - -Details about the leveldb implementation may be found in the following -documents: - -1. [Implementation notes](impl.md) -2. [Format of an immutable Table file](table_format.md) -3. [Format of a log file](log_format.md) diff --git a/doc/log_format.md b/doc/log_format.md deleted file mode 100644 index f32cb5d7da..0000000000 --- a/doc/log_format.md +++ /dev/null @@ -1,75 +0,0 @@ -leveldb Log format -================== -The log file contents are a sequence of 32KB blocks. The only exception is that -the tail of the file may contain a partial block. - -Each block consists of a sequence of records: - - block := record* trailer? - record := - checksum: uint32 // crc32c of type and data[] ; little-endian - length: uint16 // little-endian - type: uint8 // One of FULL, FIRST, MIDDLE, LAST - data: uint8[length] - -A record never starts within the last six bytes of a block (since it won't fit). -Any leftover bytes here form the trailer, which must consist entirely of zero -bytes and must be skipped by readers. - -Aside: if exactly seven bytes are left in the current block, and a new non-zero -length record is added, the writer must emit a FIRST record (which contains zero -bytes of user data) to fill up the trailing seven bytes of the block and then -emit all of the user data in subsequent blocks. - -More types may be added in the future. Some Readers may skip record types they -do not understand, others may report that some data was skipped. - - FULL == 1 - FIRST == 2 - MIDDLE == 3 - LAST == 4 - -The FULL record contains the contents of an entire user record. - -FIRST, MIDDLE, LAST are types used for user records that have been split into -multiple fragments (typically because of block boundaries). FIRST is the type -of the first fragment of a user record, LAST is the type of the last fragment of -a user record, and MIDDLE is the type of all interior fragments of a user -record. - -Example: consider a sequence of user records: - - A: length 1000 - B: length 97270 - C: length 8000 - -**A** will be stored as a FULL record in the first block. - -**B** will be split into three fragments: first fragment occupies the rest of -the first block, second fragment occupies the entirety of the second block, and -the third fragment occupies a prefix of the third block. This will leave six -bytes free in the third block, which will be left empty as the trailer. - -**C** will be stored as a FULL record in the fourth block. - ----- - -## Some benefits over the recordio format: - -1. We do not need any heuristics for resyncing - just go to next block boundary - and scan. If there is a corruption, skip to the next block. As a - side-benefit, we do not get confused when part of the contents of one log - file are embedded as a record inside another log file. - -2. Splitting at approximate boundaries (e.g., for mapreduce) is simple: find the - next block boundary and skip records until we hit a FULL or FIRST record. - -3. We do not need extra buffering for large records. - -## Some downsides compared to recordio format: - -1. No packing of tiny records. This could be fixed by adding a new record type, - so it is a shortcoming of the current implementation, not necessarily the - format. - -2. No compression. Again, this could be fixed by adding new record types. diff --git a/doc/table_format.md b/doc/table_format.md deleted file mode 100644 index 5fe7e72411..0000000000 --- a/doc/table_format.md +++ /dev/null @@ -1,107 +0,0 @@ -leveldb File format -=================== - - - [data block 1] - [data block 2] - ... - [data block N] - [meta block 1] - ... - [meta block K] - [metaindex block] - [index block] - [Footer] (fixed size; starts at file_size - sizeof(Footer)) - - -The file contains internal pointers. Each such pointer is called -a BlockHandle and contains the following information: - - offset: varint64 - size: varint64 - -See [varints](https://developers.google.com/protocol-buffers/docs/encoding#varints) -for an explanation of varint64 format. - -1. The sequence of key/value pairs in the file are stored in sorted -order and partitioned into a sequence of data blocks. These blocks -come one after another at the beginning of the file. Each data block -is formatted according to the code in `block_builder.cc`, and then -optionally compressed. - -2. After the data blocks we store a bunch of meta blocks. The -supported meta block types are described below. More meta block types -may be added in the future. Each meta block is again formatted using -`block_builder.cc` and then optionally compressed. - -3. A "metaindex" block. It contains one entry for every other meta -block where the key is the name of the meta block and the value is a -BlockHandle pointing to that meta block. - -4. An "index" block. This block contains one entry per data block, -where the key is a string >= last key in that data block and before -the first key in the successive data block. The value is the -BlockHandle for the data block. - -5. At the very end of the file is a fixed length footer that contains -the BlockHandle of the metaindex and index blocks as well as a magic number. - - metaindex_handle: char[p]; // Block handle for metaindex - index_handle: char[q]; // Block handle for index - padding: char[40-p-q];// zeroed bytes to make fixed length - // (40==2*BlockHandle::kMaxEncodedLength) - magic: fixed64; // == 0xdb4775248b80fb57 (little-endian) - -## "filter" Meta Block - -If a `FilterPolicy` was specified when the database was opened, a -filter block is stored in each table. The "metaindex" block contains -an entry that maps from `filter.` to the BlockHandle for the filter -block where `` is the string returned by the filter policy's -`Name()` method. - -The filter block stores a sequence of filters, where filter i contains -the output of `FilterPolicy::CreateFilter()` on all keys that are stored -in a block whose file offset falls within the range - - [ i*base ... (i+1)*base-1 ] - -Currently, "base" is 2KB. So for example, if blocks X and Y start in -the range `[ 0KB .. 2KB-1 ]`, all of the keys in X and Y will be -converted to a filter by calling `FilterPolicy::CreateFilter()`, and the -resulting filter will be stored as the first filter in the filter -block. - -The filter block is formatted as follows: - - [filter 0] - [filter 1] - [filter 2] - ... - [filter N-1] - - [offset of filter 0] : 4 bytes - [offset of filter 1] : 4 bytes - [offset of filter 2] : 4 bytes - ... - [offset of filter N-1] : 4 bytes - - [offset of beginning of offset array] : 4 bytes - lg(base) : 1 byte - -The offset array at the end of the filter block allows efficient -mapping from a data block offset to the corresponding filter. - -## "stats" Meta Block - -This meta block contains a bunch of stats. The key is the name -of the statistic. The value contains the statistic. - -TODO(postrelease): record following stats. - - data size - index size - key size (uncompressed) - value size (uncompressed) - number of entries - number of data blocks diff --git a/port/port_posix_sse.cc b/port/port_posix_sse.cc deleted file mode 100644 index 2d49c21dd8..0000000000 --- a/port/port_posix_sse.cc +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2016 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// A portable implementation of crc32c, optimized to handle -// four bytes at a time. -// -// In a separate source file to allow this accelerated CRC32C function to be -// compiled with the appropriate compiler flags to enable x86 SSE 4.2 -// instructions. - -#include -#include -#include "port/port.h" - -#if defined(LEVELDB_PLATFORM_POSIX_SSE) - -#if defined(_MSC_VER) -#include -#elif defined(__GNUC__) && defined(__SSE4_2__) -#include -#endif - -#endif // defined(LEVELDB_PLATFORM_POSIX_SSE) - -namespace leveldb { -namespace port { - -#if defined(LEVELDB_PLATFORM_POSIX_SSE) - -// Used to fetch a naturally-aligned 32-bit word in little endian byte-order -static inline uint32_t LE_LOAD32(const uint8_t *p) { - // SSE is x86 only, so ensured that |p| is always little-endian. - uint32_t word; - memcpy(&word, p, sizeof(word)); - return word; -} - -#if defined(_M_X64) || defined(__x86_64__) // LE_LOAD64 is only used on x64. - -// Used to fetch a naturally-aligned 64-bit word in little endian byte-order -static inline uint64_t LE_LOAD64(const uint8_t *p) { - uint64_t dword; - memcpy(&dword, p, sizeof(dword)); - return dword; -} - -#endif // defined(_M_X64) || defined(__x86_64__) - -#endif // defined(LEVELDB_PLATFORM_POSIX_SSE) - -// For further improvements see Intel publication at: -// http://download.intel.com/design/intarch/papers/323405.pdf -uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) { -#if !defined(LEVELDB_PLATFORM_POSIX_SSE) - return 0; -#else - - const uint8_t *p = reinterpret_cast(buf); - const uint8_t *e = p + size; - uint32_t l = crc ^ 0xffffffffu; - -#define STEP1 do { \ - l = _mm_crc32_u8(l, *p++); \ -} while (0) -#define STEP4 do { \ - l = _mm_crc32_u32(l, LE_LOAD32(p)); \ - p += 4; \ -} while (0) -#define STEP8 do { \ - l = _mm_crc32_u64(l, LE_LOAD64(p)); \ - p += 8; \ -} while (0) - - if (size > 16) { - // Process unaligned bytes - for (unsigned int i = reinterpret_cast(p) % 8; i; --i) { - STEP1; - } - - // _mm_crc32_u64 is only available on x64. -#if defined(_M_X64) || defined(__x86_64__) - // Process 8 bytes at a time - while ((e-p) >= 8) { - STEP8; - } - // Process 4 bytes at a time - if ((e-p) >= 4) { - STEP4; - } -#else // !(defined(_M_X64) || defined(__x86_64__)) - // Process 4 bytes at a time - while ((e-p) >= 4) { - STEP4; - } -#endif // defined(_M_X64) || defined(__x86_64__) - } - // Process the last few bytes - while (p != e) { - STEP1; - } -#undef STEP8 -#undef STEP4 -#undef STEP1 - return l ^ 0xffffffffu; -#endif // defined(LEVELDB_PLATFORM_POSIX_SSE) -} - -} // namespace port -} // namespace leveldb