From ccb0929ed1ed622a8e77ce00402bfc0478e026f9 Mon Sep 17 00:00:00 2001 From: Pawel Pekrol Date: Mon, 17 Feb 2014 15:09:06 +0100 Subject: [PATCH 1/7] RavenDB-1735. Voron: Implement TramsactionalStorage.GetDatabaseSizeInBytes --- Voron/Debugging/EnvironmentStats.cs | 2 ++ Voron/StorageEnvironment.cs | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/Voron/Debugging/EnvironmentStats.cs b/Voron/Debugging/EnvironmentStats.cs index 575e7fad80..d0a18aee4e 100644 --- a/Voron/Debugging/EnvironmentStats.cs +++ b/Voron/Debugging/EnvironmentStats.cs @@ -6,5 +6,7 @@ public class EnvironmentStats public long FreePagesOverhead; public long RootPages; public long UnallocatedPagesAtEndOfFile; + public long UsedDataFileSizeInBytes; + public long AllocatedDataFileSizeInBytes; } } \ No newline at end of file diff --git a/Voron/StorageEnvironment.cs b/Voron/StorageEnvironment.cs index cde3284bce..9d14fbd98c 100644 --- a/Voron/StorageEnvironment.cs +++ b/Voron/StorageEnvironment.cs @@ -461,12 +461,16 @@ public Dictionary> AllPages(Transaction tx) public EnvironmentStats Stats() { + var numberOfAllocatedPages = Math.Max(_dataPager.NumberOfAllocatedPages, State.NextPageNumber - 1); // async apply to data file task + return new EnvironmentStats { FreePages = _freeSpaceHandling.GetFreePageCount(), FreePagesOverhead = State.FreeSpaceRoot.State.PageCount, RootPages = State.Root.State.PageCount, - UnallocatedPagesAtEndOfFile = _dataPager.NumberOfAllocatedPages - NextPageNumber + UnallocatedPagesAtEndOfFile = _dataPager.NumberOfAllocatedPages - NextPageNumber, + UsedDataFileSizeInBytes = (State.NextPageNumber - 1) * AbstractPager.PageSize, + AllocatedDataFileSizeInBytes = numberOfAllocatedPages * AbstractPager.PageSize }; } From 12ac3a1a43cf573727614e547750c7ea3ee63e9d Mon Sep 17 00:00:00 2001 From: Michael Yarichuk Date: Tue, 18 Feb 2014 12:19:13 +0200 Subject: [PATCH 2/7] the following changes included: 1)fix for RavenDB-1740 2)added credentials when ReplicationInformer::RefreshReplicationInformation() is called 3)fix after bad merge for WriteBatch.cs --- Voron/Impl/WriteBatch.cs | 91 ++++++++++++++++++++-------------------- 1 file changed, 45 insertions(+), 46 deletions(-) diff --git a/Voron/Impl/WriteBatch.cs b/Voron/Impl/WriteBatch.cs index 74ffff20c9..30281ae5e6 100644 --- a/Voron/Impl/WriteBatch.cs +++ b/Voron/Impl/WriteBatch.cs @@ -10,9 +10,10 @@ namespace Voron.Impl public class WriteBatch : IDisposable { private readonly Dictionary> _lastOperations; - private readonly Dictionary>> _multiTreeOperations; + private readonly Dictionary>> _multiTreeOperations; private readonly SliceEqualityComparer _sliceEqualityComparer; + private bool _disposeAfterWrite = true; public IEnumerable Operations { @@ -28,62 +29,59 @@ public IEnumerable Operations } } - public int OperationsCount - { - get - { - return _lastOperations.Sum(x => x.Value.Count) + - _multiTreeOperations.Sum(x => x.Value.Sum(y => y.Value.Count)); - } - } - public Func Size { get { return () => - { - long totalSize = 0; - - if (_lastOperations.Count > 0) - totalSize += _lastOperations.Sum( - operation => - operation.Value.Values.Sum(x => x.Type == BatchOperationType.Add ? x.ValueSize + x.Key.Size : x.Key.Size)); - - if (_multiTreeOperations.Count > 0) - totalSize += _multiTreeOperations.Sum( - tree => - tree.Value.Sum( - multiOp => multiOp.Value.Sum(x => x.Type == BatchOperationType.Add ? x.ValueSize + x.Key.Size : x.Key.Size))); - return totalSize; - }; + { + long totalSize = 0; + + if (_lastOperations.Count > 0) + totalSize += _lastOperations.Sum( + operation => + operation.Value.Values.Sum(x => x.Type == BatchOperationType.Add ? x.ValueSize + x.Key.Size : x.Key.Size)); + + if (_multiTreeOperations.Count > 0) + totalSize += _multiTreeOperations.Sum( + tree => + tree.Value.Sum( + multiOp => multiOp.Value.Sum(x => x.Type == BatchOperationType.Add ? x.ValueSize + x.Key.Size : x.Key.Size))); + return totalSize; + }; } } public bool IsEmpty { get { return _lastOperations.Count == 0 && _multiTreeOperations.Count == 0; } } - internal bool TryGetValue(string treeName, Slice key, out Stream value, out ushort? version, out BatchOperationType operationType) + public bool DisposeAfterWrite + { + get { return _disposeAfterWrite; } + set { _disposeAfterWrite = value; } + } + + internal bool TryGetValue(string treeName, Slice key, out Stream value, out ushort? version, out BatchOperationType operationType) { - value = null; - version = null; + value = null; + version = null; operationType = BatchOperationType.None; if (treeName == null) treeName = Constants.RootTreeName; //first check if it is a multi-tree operation - Dictionary> treeOperations; - if (_multiTreeOperations.TryGetValue(treeName, out treeOperations)) - { - List operationRecords; - if (treeOperations.TryGetValue(key, out operationRecords)) - { + Dictionary> treeOperations; + if (_multiTreeOperations.TryGetValue(treeName, out treeOperations)) + { + List operationRecords; + if (treeOperations.TryGetValue(key, out operationRecords)) + { //since in multi-tree there are many operations for single tree key, then fetching operation type and value is meaningless - return true; - } - } + return true; + } + } - Dictionary operations; + Dictionary operations; if (_lastOperations.TryGetValue(treeName, out operations) == false) return false; @@ -91,12 +89,13 @@ internal bool TryGetValue(string treeName, Slice key, out Stream value, out usho if (operations.TryGetValue(key, out operation)) { operationType = operation.Type; - version = operation.Version; + version = operation.Version; if (operation.Type == BatchOperationType.Delete) return true; - value = operation.Value as Stream; + value = operation.Value as Stream; + operation.Reset(); // will reset stream position if (operation.Type == BatchOperationType.Add) return true; @@ -109,7 +108,7 @@ internal bool TryGetValue(string treeName, Slice key, out Stream value, out usho public WriteBatch() { _lastOperations = new Dictionary>(); - _multiTreeOperations = new Dictionary>>(); + _multiTreeOperations = new Dictionary>>(); _sliceEqualityComparer = new SliceEqualityComparer(); } @@ -171,16 +170,16 @@ private void AddOperation(BatchOperation operation) if (operation.Type == BatchOperationType.MultiAdd || operation.Type == BatchOperationType.MultiDelete) { - Dictionary> multiTreeOperationsOfTree; + Dictionary> multiTreeOperationsOfTree; if (_multiTreeOperations.TryGetValue(treeName, out multiTreeOperationsOfTree) == false) { _multiTreeOperations[treeName] = - multiTreeOperationsOfTree = new Dictionary>(_sliceEqualityComparer); + multiTreeOperationsOfTree = new Dictionary>(_sliceEqualityComparer); } - List specificMultiTreeOperations; + List specificMultiTreeOperations; if (multiTreeOperationsOfTree.TryGetValue(operation.Key, out specificMultiTreeOperations) == false) - multiTreeOperationsOfTree[operation.Key] = specificMultiTreeOperations = new List(); + multiTreeOperationsOfTree[operation.Key] = specificMultiTreeOperations = new List(); specificMultiTreeOperations.Add(operation); } @@ -254,7 +253,7 @@ private BatchOperation(Slice key, object value, ushort? version, string treeName public void SetVersionFrom(BatchOperation other) { - if (other.Version != null && + if (other.Version != null && other.Version + 1 == Version) Version = other.Version; } From 060febd4ab28b0e6f213bf367c761b060c92ed6a Mon Sep 17 00:00:00 2001 From: Pawel Pekrol Date: Wed, 19 Feb 2014 07:25:01 +0100 Subject: [PATCH 3/7] RavenDB-1717. Voron: Add temp path option --- Voron.Tests/Storage/Files.cs | 78 ++++++++++++++++++++++++++++++ Voron.Tests/Voron.Tests.csproj | 3 +- Voron/StorageEnvironmentOptions.cs | 24 ++++++--- 3 files changed, 96 insertions(+), 9 deletions(-) create mode 100644 Voron.Tests/Storage/Files.cs diff --git a/Voron.Tests/Storage/Files.cs b/Voron.Tests/Storage/Files.cs new file mode 100644 index 0000000000..8c65b82c5d --- /dev/null +++ b/Voron.Tests/Storage/Files.cs @@ -0,0 +1,78 @@ +// ----------------------------------------------------------------------- +// +// Copyright (c) Hibernating Rhinos LTD. All rights reserved. +// +// ----------------------------------------------------------------------- +using System.IO; + +using Xunit; + +namespace Voron.Tests.Storage +{ + public class Files : StorageTest + { + private readonly string path; + + private readonly string temp; + + public Files() + { + path = Path.GetFullPath("Data"); + temp = Path.GetFullPath("Temp"); + + DeleteDirectory(path); + DeleteDirectory(temp); + } + + [Fact] + public void ByDefaultAllFilesShouldBeStoredInOneDirectory() + { + var options = (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(path); + + Assert.Equal(path, options.BasePath); + Assert.Equal(options.BasePath, options.TempPath); + } + + [Fact] + public void TemporaryPathTest() + { + var options = (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(path, temp); + + Assert.Equal(path, options.BasePath); + Assert.Equal(temp, options.TempPath); + } + + [Fact] + public void DefaultScratchLocation() + { + var options = (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(path); + using (var env = new StorageEnvironment(options)) + { + var scratchFile = Path.Combine(path, "scratch.buffers"); + Assert.True(File.Exists(scratchFile)); + } + } + + [Fact] + public void ScratchLocationWithTemporaryPathSpecified() + { + var options = (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(path, temp); + using (var env = new StorageEnvironment(options)) + { + var scratchFile = Path.Combine(path, "scratch.buffers"); + var scratchFileTemp = Path.Combine(temp, "scratch.buffers"); + + Assert.False(File.Exists(scratchFile)); + Assert.True(File.Exists(scratchFileTemp)); + } + } + + public override void Dispose() + { + DeleteDirectory(path); + DeleteDirectory(temp); + + base.Dispose(); + } + } +} \ No newline at end of file diff --git a/Voron.Tests/Voron.Tests.csproj b/Voron.Tests/Voron.Tests.csproj index fdf3a4dca7..000e5e16fe 100644 --- a/Voron.Tests/Voron.Tests.csproj +++ b/Voron.Tests/Voron.Tests.csproj @@ -53,12 +53,10 @@ - ..\..\SharedLibs\xunit\xunit.dll - ..\..\SharedLibs\xunit\xunit.extensions.dll @@ -96,6 +94,7 @@ + diff --git a/Voron/StorageEnvironmentOptions.cs b/Voron/StorageEnvironmentOptions.cs index 021e78670c..99ebdbf46e 100644 --- a/Voron/StorageEnvironmentOptions.cs +++ b/Voron/StorageEnvironmentOptions.cs @@ -89,9 +89,9 @@ public static StorageEnvironmentOptions GetInMemory() return new PureMemoryStorageEnvironmentOptions(); } - public static StorageEnvironmentOptions ForPath(string path) + public static StorageEnvironmentOptions ForPath(string path, string tempPath = null) { - return new DirectoryStorageEnvironmentOptions(path); + return new DirectoryStorageEnvironmentOptions(path, tempPath); } public IDisposable AllowManualFlushing() @@ -106,19 +106,24 @@ public IDisposable AllowManualFlushing() public class DirectoryStorageEnvironmentOptions : StorageEnvironmentOptions { private readonly string _basePath; + private readonly string _tempPath; + private readonly Lazy _dataPager; private readonly ConcurrentDictionary> _journals = new ConcurrentDictionary>(StringComparer.OrdinalIgnoreCase); - public DirectoryStorageEnvironmentOptions(string basePath) + public DirectoryStorageEnvironmentOptions(string basePath, string tempPath) { _basePath = Path.GetFullPath(basePath); - + _tempPath = !string.IsNullOrEmpty(tempPath) ? Path.GetFullPath(tempPath) : _basePath; + if (Directory.Exists(_basePath) == false) - { Directory.CreateDirectory(_basePath); - } + + if (_basePath != tempPath && Directory.Exists(_tempPath) == false) + Directory.CreateDirectory(_tempPath); + _dataPager = new Lazy(() => new Win32MemoryMapPager(Path.Combine(_basePath, Constants.DatabaseFilename))); } @@ -135,6 +140,11 @@ public string BasePath get { return _basePath; } } + public string TempPath + { + get { return _tempPath; } + } + public override IJournalWriter CreateJournalWriter(long journalNumber, long journalSize) { var name = JournalName(journalNumber); @@ -225,7 +235,7 @@ public override unsafe void WriteHeader(string filename, FileHeader* header) public override IVirtualPager CreateScratchPager(string name) { - var scratchFile = Path.Combine(_basePath, name); + var scratchFile = Path.Combine(_tempPath, name); if (File.Exists(scratchFile)) File.Delete(scratchFile); From d9751223e496072c56f24c0777c5db94fda36bb0 Mon Sep 17 00:00:00 2001 From: Michael Yarichuk Date: Thu, 20 Feb 2014 12:58:50 +0200 Subject: [PATCH 4/7] fixes for RavenDB-1745 --- Voron/Impl/Journal/WriteAheadJournal.cs | 63 ++++++++++++++----------- 1 file changed, 36 insertions(+), 27 deletions(-) diff --git a/Voron/Impl/Journal/WriteAheadJournal.cs b/Voron/Impl/Journal/WriteAheadJournal.cs index 550a88bca0..b8c6abc8e8 100644 --- a/Voron/Impl/Journal/WriteAheadJournal.cs +++ b/Voron/Impl/Journal/WriteAheadJournal.cs @@ -361,10 +361,12 @@ public class JournalApplicator : IDisposable private long _totalWrittenButUnsyncedBytes; private DateTime _lastDataFileSyncTime; private JournalFile _lastFlushedJournal; + private bool _isDisposed; public JournalApplicator(WriteAheadJournal waj) { _waj = waj; + _isDisposed = false; } public void ApplyLogsToDataFile(long oldestActiveTransaction, Transaction transaction = null) @@ -377,8 +379,12 @@ public void ApplyLogsToDataFile(long oldestActiveTransaction, Transaction transa locked = true; } + try { + if (_isDisposed) + return; + var alreadyInWriteTx = transaction != null && transaction.Flags == TransactionFlags.ReadWrite; var jrnls = _waj._files.Select(x => x.GetSnapshot()).OrderBy(x => x.Number).ToList(); @@ -522,33 +528,26 @@ private void ApplyPagesToDataFileFromScratch(Dictionary x.Key) - .Select(x => scratchBufferPool.ReadPage(x.Value.ScratchPos, scratchPagerState)) - .ToList(); - - var last = sortedPages.Last(); + var sortedPages = pagesToWrite.OrderBy(x => x.Key) + .Select(x => scratchBufferPool.ReadPage(x.Value.ScratchPos, scratchPagerState)) + .ToList(); - var numberOfPagesInLastPage = last.IsOverflow == false ? 1 : - _waj._env.Options.DataPager.GetNumberOfOverflowPages(last.OverflowSize); + var last = sortedPages.Last(); - EnsureDataPagerSpacing(transaction, last, numberOfPagesInLastPage, alreadyInWriteTx); + var numberOfPagesInLastPage = last.IsOverflow == false ? 1 : + _waj._env.Options.DataPager.GetNumberOfOverflowPages(last.OverflowSize); - long written = 0; - int index = 0; - foreach (var page in sortedPages) - { - written += _waj._dataPager.Write(page); - index++; - } + EnsureDataPagerSpacing(transaction, last, numberOfPagesInLastPage, alreadyInWriteTx); - _totalWrittenButUnsyncedBytes += written; - } - finally + long written = 0; + int index = 0; + foreach (var page in sortedPages) { - scratchPagerState.Release(); + written += _waj._dataPager.Write(page); + index++; } + + _totalWrittenButUnsyncedBytes += written; } private void EnsureDataPagerSpacing(Transaction transaction, Page last, int numberOfPagesInLastPage, @@ -645,14 +644,24 @@ public void UpdateFileHeaderAfterDataFileSync(JournalFile file, long oldestActiv public void Dispose() { - foreach (var journalFile in _journalsToDelete) + if (_isDisposed) + return; + + using (TakeFlushingLock()) { - // we need to release all unused journals - // however here we don't force them to DeleteOnClose - // because we didn't synced the data file yet - // and we will need them on a next database recovery - journalFile.Value.Release(); + foreach (var journalFile in _journalsToDelete) + { + // we need to release all unused journals + // however here we don't force them to DeleteOnClose + // because we didn't synced the data file yet + // and we will need them on a next database recovery + journalFile.Value.Release(); + } + + _isDisposed = true; } + + _flushingSemaphore.Dispose(); } public IDisposable TakeFlushingLock() From 01662db819d7292903ccdfe095551c4d0c9c2fd7 Mon Sep 17 00:00:00 2001 From: Michael Yarichuk Date: Fri, 16 May 2014 15:51:31 +0300 Subject: [PATCH 5/7] when using TreeIterator, and the RequiredPrefix is set, it is intuitive that Seek with Slice.BeforeAllKeys should set iterator to the first key with the if it is indeed found prefix. --- .../RequiredPrefixWithSeekBeforeAllKeys.cs | 65 +++++++++++++++++++ Voron.Tests/Voron.Tests.csproj | 1 + Voron/Trees/TreeIterator.cs | 8 +++ 3 files changed, 74 insertions(+) create mode 100644 Voron.Tests/Bugs/RequiredPrefixWithSeekBeforeAllKeys.cs diff --git a/Voron.Tests/Bugs/RequiredPrefixWithSeekBeforeAllKeys.cs b/Voron.Tests/Bugs/RequiredPrefixWithSeekBeforeAllKeys.cs new file mode 100644 index 0000000000..712c4c805a --- /dev/null +++ b/Voron.Tests/Bugs/RequiredPrefixWithSeekBeforeAllKeys.cs @@ -0,0 +1,65 @@ +using Voron.Impl; +using Xunit; + +namespace Voron.Tests.Bugs +{ + public class RequiredPrefixWithSeekBeforeAllKeys : StorageTest + { + [Fact] + public void SeekBeforeAllKeys_with_required_prefix_should_return_true_if_relevant_nodes_exist() + { + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + Env.CreateTree(tx, "testTree"); + tx.Commit(); + } + + var wb = new WriteBatch(); + wb.Add("AA", StreamFor("Foo1"), "testTree"); + wb.Add("AB", StreamFor("Foo2"), "testTree"); + + wb.Add("ACA", StreamFor("Foo3"), "testTree"); + wb.Add("ACB", StreamFor("Foo4"), "testTree"); + wb.Add("ACC", StreamFor("Foo5"), "testTree"); + + wb.Add("ADA", StreamFor("Foo6"), "testTree"); + wb.Add("ADB", StreamFor("Foo7"), "testTree"); + + Env.Writer.Write(wb); + + using(var snapshot = Env.CreateSnapshot()) + using (var iterator = snapshot.Iterate("testTree")) + { + iterator.RequiredPrefix = "AC"; + Assert.True(iterator.Seek(Slice.BeforeAllKeys)); + } + } + + + [Fact] + public void SeekBeforeAllKeys_with_required_prefix_should_return_false_if_relevant_nodes_doesnt_exist() + { + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + Env.CreateTree(tx, "testTree"); + tx.Commit(); + } + + var wb = new WriteBatch(); + wb.Add("AA", StreamFor("Foo1"), "testTree"); + wb.Add("AB", StreamFor("Foo2"), "testTree"); + + wb.Add("ADA", StreamFor("Foo6"), "testTree"); + wb.Add("ADB", StreamFor("Foo7"), "testTree"); + + Env.Writer.Write(wb); + + using (var snapshot = Env.CreateSnapshot()) + using (var iterator = snapshot.Iterate("testTree")) + { + iterator.RequiredPrefix = "AC"; + Assert.False(iterator.Seek(Slice.BeforeAllKeys)); + } + } + } +} diff --git a/Voron.Tests/Voron.Tests.csproj b/Voron.Tests/Voron.Tests.csproj index b657c8557c..c905dadf62 100644 --- a/Voron.Tests/Voron.Tests.csproj +++ b/Voron.Tests/Voron.Tests.csproj @@ -74,6 +74,7 @@ + diff --git a/Voron/Trees/TreeIterator.cs b/Voron/Trees/TreeIterator.cs index 81ee49c792..3955d0eb53 100644 --- a/Voron/Trees/TreeIterator.cs +++ b/Voron/Trees/TreeIterator.cs @@ -47,6 +47,14 @@ public bool Seek(Slice key) _currentPage = _tree.FindPageFor(_tx, key, out lazy); _cursor = lazy.Value; _cursor.Pop(); + + //if required prefix is set and need to seek to beginning/end + //--> skip to beginning of relevant keys + if (RequiredPrefix != null && + !RequiredPrefix.Equals(Slice.Empty) && + (key.Equals(Slice.BeforeAllKeys) || key.Equals(Slice.AfterAllKeys))) + key = RequiredPrefix; + var node = _currentPage.Search(key, _cmp); if (node == null) { From 7b8ccdad5c6f2131bf543f14b634962d57c566a9 Mon Sep 17 00:00:00 2001 From: Michael Yarichuk Date: Mon, 19 May 2014 11:31:29 +0300 Subject: [PATCH 6/7] transferred changes from RavenDB Voron version --- Voron.Tests/Backups/Incremental.cs | 1 + .../Bugs/AccessViolationWithIteratorUsage.cs | 60 ++ Voron.Tests/Bugs/Isolation.cs | 233 ++++-- Voron.Tests/Bugs/Iterating.cs | 40 + Voron.Tests/Bugs/MultiAdds.cs | 3 +- Voron.Tests/Bugs/MultiReads.cs | 38 + Voron.Tests/Bugs/Snapshots.cs | 9 +- Voron.Tests/Bugs/TreeRebalancer.cs | 57 +- Voron.Tests/Journal/Mvcc.cs | 4 +- .../Journal/UncommittedTransactions.cs | 2 +- Voron.Tests/MultiTreeSize.cs | 43 ++ Voron.Tests/Storage/Batches.cs | 8 +- Voron.Tests/Storage/Concurrency.cs | 18 +- Voron.Tests/Storage/Increments.cs | 80 ++ Voron.Tests/Storage/InitialSize.cs | 91 +++ Voron.Tests/Storage/Pagers.cs | 2 +- Voron.Tests/Storage/SplittingVeryBig.cs | 9 +- Voron.Tests/Trees/FreeSpaceTest.cs | 106 +++ Voron.Tests/Util/StreamExtensions.cs | 2 - Voron.Tests/Voron.Tests.csproj | 17 +- Voron/Debugging/DebugActionType.cs | 3 +- Voron/Debugging/EnvironmentStats.cs | 13 - Voron/Impl/Backup/BackupMethods.cs | 4 +- Voron/Impl/Backup/FullBackup.cs | 59 +- Voron/Impl/Backup/IncrementalBackup.cs | 267 ++++--- Voron/Impl/Backup/VoronBackupUtil.cs | 29 + Voron/Impl/Constants.cs | 2 +- Voron/Impl/FileHeaders/HeaderAccessor.cs | 3 +- Voron/Impl/FreeSpace/FreeSpaceHandling.cs | 23 +- Voron/Impl/FreeSpace/IFreeSpaceHandling.cs | 1 - Voron/Impl/FreeSpace/NoFreeSpaceHandling.cs | 5 - Voron/Impl/FreeSpace/StreamBitArray.cs | 4 +- Voron/Impl/Journal/JournalFile.cs | 23 +- Voron/Impl/Journal/JournalReader.cs | 374 +++++----- Voron/Impl/Journal/Win32JournalWriter.cs | 6 +- Voron/Impl/Journal/WriteAheadJournal.cs | 598 ++++----------- Voron/Impl/Paging/AbstractPager.cs | 64 +- Voron/Impl/Paging/FilePager.cs | 6 +- Voron/Impl/Paging/TemporaryPage.cs | 4 +- Voron/Impl/Paging/Win32MemoryMapPager.cs | 29 +- .../Win32PageFileBackedMemoryMappedPager.cs | 27 +- Voron/Impl/Paging/Win32PureMemoryPager.cs | 2 + Voron/Impl/ScratchBufferPool.cs | 2 +- Voron/Impl/Transaction.cs | 35 +- Voron/Impl/TransactionMergingWriter.cs | 702 ++++++++++-------- Voron/Impl/WriteBatch.cs | 224 ++++-- Voron/Slice.cs | 204 ++--- Voron/SliceWriter.cs | 40 + Voron/StorageEnvironment.cs | 96 ++- Voron/StorageEnvironmentOptions.cs | 59 +- Voron/Trees/IIterator.cs | 2 - Voron/Trees/NodeHeader.cs | 20 +- Voron/Trees/Page.cs | 133 ++-- Voron/Trees/PageIterator.cs | 103 +++ Voron/Trees/PageSplitter.cs | 30 +- Voron/Trees/Tree.MultiTree.cs | 356 +++++++++ Voron/Trees/Tree.cs | 242 ++---- Voron/Trees/TreeIterator.cs | 66 +- Voron/Trees/TreeRebalancer.cs | 23 +- Voron/Util/PageTable.cs | 23 +- Voron/Util/TreeExtensions.cs | 4 + Voron/ValueReader.cs | 336 ++++++--- Voron/Voron.csproj | 12 +- 63 files changed, 3102 insertions(+), 1979 deletions(-) create mode 100644 Voron.Tests/Bugs/AccessViolationWithIteratorUsage.cs create mode 100644 Voron.Tests/Bugs/Iterating.cs create mode 100644 Voron.Tests/Bugs/MultiReads.cs create mode 100644 Voron.Tests/MultiTreeSize.cs create mode 100644 Voron.Tests/Storage/Increments.cs create mode 100644 Voron.Tests/Storage/InitialSize.cs create mode 100644 Voron/Impl/Backup/VoronBackupUtil.cs create mode 100644 Voron/SliceWriter.cs create mode 100644 Voron/Trees/PageIterator.cs create mode 100644 Voron/Trees/Tree.MultiTree.cs diff --git a/Voron.Tests/Backups/Incremental.cs b/Voron.Tests/Backups/Incremental.cs index 989aab1d56..15a3e505f7 100644 --- a/Voron.Tests/Backups/Incremental.cs +++ b/Voron.Tests/Backups/Incremental.cs @@ -7,6 +7,7 @@ using System; using System.Diagnostics; using System.IO; +using System.Text; using Voron.Impl; using Voron.Impl.Backup; using Voron.Impl.Paging; diff --git a/Voron.Tests/Bugs/AccessViolationWithIteratorUsage.cs b/Voron.Tests/Bugs/AccessViolationWithIteratorUsage.cs new file mode 100644 index 0000000000..4292f70546 --- /dev/null +++ b/Voron.Tests/Bugs/AccessViolationWithIteratorUsage.cs @@ -0,0 +1,60 @@ +// ----------------------------------------------------------------------- +// +// Copyright (c) Hibernating Rhinos LTD. All rights reserved. +// +// ----------------------------------------------------------------------- +using System.IO; +using Xunit; + +namespace Voron.Tests.Bugs +{ + public class AccessViolationWithIteratorUsage : StorageTest + { + protected override void Configure(StorageEnvironmentOptions options) + { + options.ManualFlushing = true; + } + + [Fact] + public void ShouldNotThrow() + { + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + var tree = Env.CreateTree(tx, "test"); + + tree.Add(tx, "items/1", new MemoryStream()); + tree.Add(tx, "items/2", new MemoryStream()); + + tx.Commit(); + } + + using (var snapshot = Env.CreateSnapshot()) + using (var iterator = snapshot.Iterate("test")) + { + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + for (int i = 0; i < 10; i++) + { + Env.State.GetTree(tx, "test").Add(tx, "items/" + i, new MemoryStream(new byte[2048])); + } + + tx.Commit(); + } + + Assert.True(iterator.Seek(Slice.BeforeAllKeys)); + + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + for (int i = 10; i < 40; i++) + { + Env.State.GetTree(tx, "test").Add(tx, "items/" + i, new MemoryStream(new byte[2048])); + } + + tx.Commit(); + } + + iterator.MoveNext(); + } + } + } +} \ No newline at end of file diff --git a/Voron.Tests/Bugs/Isolation.cs b/Voron.Tests/Bugs/Isolation.cs index 6495298b25..eb86944824 100644 --- a/Voron.Tests/Bugs/Isolation.cs +++ b/Voron.Tests/Bugs/Isolation.cs @@ -1,88 +1,195 @@ -namespace Voron.Tests.Bugs +using System.Collections.Generic; + +namespace Voron.Tests.Bugs { - using System.IO; + using System.IO; + + using Xunit; + + public class Isolation : StorageTest + { + [Fact] + public void MultiTreeIteratorShouldBeIsolated1() + { + var directory = "Test2"; + + DeleteDirectory(directory); + + var options = StorageEnvironmentOptions.ForPath(directory); + + using (var env = new StorageEnvironment(options)) + { + CreateTrees(env, 1, "tree"); + + for (var i = 0; i < 10; i++) + Write(env, i); + + using (var txr = env.NewTransaction(TransactionFlags.Read)) + { + var key = Write(env, 10); + + using (var iterator = txr.ReadTree("tree0").MultiRead(txr, "key/1")) + { + Assert.True(iterator.Seek(Slice.BeforeAllKeys)); + + var count = 0; + + do + { + Assert.True(iterator.CurrentKey.ToString() != key, string.Format("Key '{0}' should not be present in multi-iterator", key)); + + count++; + } + while (iterator.MoveNext()); + + Assert.Equal(10, count); + } + } + } + } + + [Fact] + public void MultiTreeIteratorShouldBeIsolated2() + { + var directory = "Test2"; + + DeleteDirectory(directory); - using Xunit; + var options = StorageEnvironmentOptions.ForPath(directory); - public class Isolation : StorageTest - { - [Fact] - public void ScratchPagesShouldNotBeReleasedUntilNotUsed() - { - var directory = "Test2"; + using (var env = new StorageEnvironment(options)) + { + CreateTrees(env, 1, "tree"); - if (Directory.Exists(directory)) - Directory.Delete(directory, true); + for (var i = 0; i < 11; i++) + Write(env, i); - var options = StorageEnvironmentOptions.ForPath(directory); + using (var txr = env.NewTransaction(TransactionFlags.Read)) + { + var key = Delete(env, 10); - options.ManualFlushing = true; - using (var env = new StorageEnvironment(options)) - { - CreateTrees(env, 2, "tree"); - for (int a = 0; a < 3; a++) - { - using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) - { - tx.Environment.State.GetTree(tx,"tree0").Add(tx, string.Format("key/{0}/{1}/1", new string('0', 1000), a), new MemoryStream()); - tx.Environment.State.GetTree(tx,"tree0").Add(tx, string.Format("key/{0}/{1}/2", new string('0', 1000), a), new MemoryStream()); + using (var iterator = txr.ReadTree("tree0").MultiRead(txr, "key/1")) + { + Assert.True(iterator.Seek(Slice.BeforeAllKeys)); - tx.Commit(); - } - } + var keys = new List(); - using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) - { - tx.Environment.State.GetTree(tx,"tree1").Add(tx, "yek/1", new MemoryStream()); + do + { + keys.Add(iterator.CurrentKey.ToString()); + } + while (iterator.MoveNext()); - tx.Commit(); - } + Assert.Equal(11, keys.Count); + Assert.Contains(key, keys); + } + } + } + } - using (var txr = env.NewTransaction(TransactionFlags.Read)) - { - using (var iterator = txr.Environment.State.GetTree(txr, "tree0").Iterate(txr)) - { - Assert.True(iterator.Seek(Slice.BeforeAllKeys)); // all pages are from scratch (one from position 11) + private static string Delete(StorageEnvironment env, int i) + { + using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) + { + var key = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + i.ToString("D2"); - var currentKey = iterator.CurrentKey.ToString(); + txw.ReadTree("tree0").MultiDelete(txw, "key/1", key); + txw.Commit(); - env.FlushLogToDataFile(); // frees pages from scratch (including the one at position 11) + return key; + } + } - using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) - { - var tree = txw.Environment.State.GetTree(txw, "tree1"); - tree.Add(txw, string.Format("yek/{0}/0/0", new string('0', 1000)), new MemoryStream()); // allocates new page from scratch (position 11) + private static string Write(StorageEnvironment env, int i) + { + using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) + { + var key = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + i.ToString("D2"); - txw.Commit(); - } + txw.ReadTree("tree0").MultiAdd(txw, "key/1", key); + txw.Commit(); + + return key; + } + } + + [Fact] + public void ScratchPagesShouldNotBeReleasedUntilNotUsed() + { + var directory = "Test2"; + + if (Directory.Exists(directory)) + Directory.Delete(directory, true); + + var options = StorageEnvironmentOptions.ForPath(directory); + + options.ManualFlushing = true; + using (var env = new StorageEnvironment(options)) + { + CreateTrees(env, 2, "tree"); + for (int a = 0; a < 3; a++) + { + using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) + { + tx.Environment.State.GetTree(tx, "tree0").Add(tx, string.Format("key/{0}/{1}/1", new string('0', 1000), a), new MemoryStream()); + tx.Environment.State.GetTree(tx, "tree0").Add(tx, string.Format("key/{0}/{1}/2", new string('0', 1000), a), new MemoryStream()); + + tx.Commit(); + } + } + + using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) + { + tx.Environment.State.GetTree(tx, "tree1").Add(tx, "yek/1", new MemoryStream()); + + tx.Commit(); + } + + using (var txr = env.NewTransaction(TransactionFlags.Read)) + { + using (var iterator = txr.Environment.State.GetTree(txr, "tree0").Iterate(txr)) + { + Assert.True(iterator.Seek(Slice.BeforeAllKeys)); // all pages are from scratch (one from position 11) + + var currentKey = iterator.CurrentKey.ToString(); + + env.FlushLogToDataFile(); // frees pages from scratch (including the one at position 11) + + using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) + { + var tree = txw.Environment.State.GetTree(txw, "tree1"); + tree.Add(txw, string.Format("yek/{0}/0/0", new string('0', 1000)), new MemoryStream()); // allocates new page from scratch (position 11) + + txw.Commit(); + } Assert.Equal(currentKey, iterator.CurrentKey.ToString()); using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) - { - var tree = txw.Environment.State.GetTree(txw, "tree1"); - tree.Add(txw, "fake", new MemoryStream()); + { + var tree = txw.Environment.State.GetTree(txw, "tree1"); + tree.Add(txw, "fake", new MemoryStream()); - txw.Commit(); - } + txw.Commit(); + } - Assert.Equal(currentKey, iterator.CurrentKey.ToString()); + Assert.Equal(currentKey, iterator.CurrentKey.ToString()); - var count = 0; + var count = 0; - do - { - currentKey = iterator.CurrentKey.ToString(); - count++; + do + { + currentKey = iterator.CurrentKey.ToString(); + count++; - Assert.Contains("key/", currentKey); - } - while (iterator.MoveNext()); + Assert.Contains("key/", currentKey); + } + while (iterator.MoveNext()); - Assert.Equal(6, count); - } - } - } - } - } + Assert.Equal(6, count); + } + } + } + } + } } \ No newline at end of file diff --git a/Voron.Tests/Bugs/Iterating.cs b/Voron.Tests/Bugs/Iterating.cs new file mode 100644 index 0000000000..d0eb57b245 --- /dev/null +++ b/Voron.Tests/Bugs/Iterating.cs @@ -0,0 +1,40 @@ +// ----------------------------------------------------------------------- +// +// Copyright (c) Hibernating Rhinos LTD. All rights reserved. +// +// ----------------------------------------------------------------------- +using Xunit; + +namespace Voron.Tests.Bugs +{ + public class Iterating : StorageTest + { + [Fact] + public void IterationShouldNotFindAnyRecordsAndShouldNotThrowWhenNumberOfEntriesOnPageIs1AndKeyDoesNotMatch() + { + using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) + { + using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) + { + env.CreateTree(tx, "tree"); + + tx.Commit(); + } + + using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) + { + var tree = tx.ReadTree("tree"); + tree.Add(tx, @"Raven\Database\1", StreamFor("123")); + + tx.Commit(); + } + + using (var snapshot = env.CreateSnapshot()) + using (var iterator = snapshot.Iterate("tree")) + { + Assert.False(iterator.Seek(@"Raven\Filesystem\")); + } + } + } + } +} \ No newline at end of file diff --git a/Voron.Tests/Bugs/MultiAdds.cs b/Voron.Tests/Bugs/MultiAdds.cs index b0773d6aa0..75df256499 100644 --- a/Voron.Tests/Bugs/MultiAdds.cs +++ b/Voron.Tests/Bugs/MultiAdds.cs @@ -233,6 +233,7 @@ private void ValidateMultiRecords(StorageEnvironment env, IEnumerable tr { for (var j = 0; j < 10; j++) { + foreach (var treeName in trees) { var tree = tx.Environment.State.GetTree(tx,treeName); @@ -307,4 +308,4 @@ private IList CreateTrees(StorageEnvironment env, int number, string pre return results; } } -} \ No newline at end of file +} diff --git a/Voron.Tests/Bugs/MultiReads.cs b/Voron.Tests/Bugs/MultiReads.cs new file mode 100644 index 0000000000..a73371b7f5 --- /dev/null +++ b/Voron.Tests/Bugs/MultiReads.cs @@ -0,0 +1,38 @@ +// ----------------------------------------------------------------------- +// +// Copyright (c) Hibernating Rhinos LTD. All rights reserved. +// +// ----------------------------------------------------------------------- +using Xunit; + +namespace Voron.Tests.Bugs +{ + public class MultiReads : StorageTest + { + [Fact] + public void MultiReadShouldKeepItemOrder() + { + foreach (var treeName in CreateTrees(Env, 1, "tree")) + { + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + tx.ReadTree(treeName).MultiAdd(tx, "queue1", "queue1/07000000-0000-0000-0000-000000000001"); + tx.ReadTree(treeName).MultiAdd(tx, "queue1", "queue1/07000000-0000-0000-0000-000000000002"); + + tx.Commit(); + } + + using (var snapshot = Env.CreateSnapshot()) + using (var iterator = snapshot.MultiRead(treeName, "queue1")) + { + Assert.True(iterator.Seek(Slice.BeforeAllKeys)); + + Assert.Equal("queue1/07000000-0000-0000-0000-000000000001", iterator.CurrentKey.ToString()); + Assert.True(iterator.MoveNext()); + Assert.Equal("queue1/07000000-0000-0000-0000-000000000002", iterator.CurrentKey.ToString()); + Assert.False(iterator.MoveNext()); + } + } + } + } +} \ No newline at end of file diff --git a/Voron.Tests/Bugs/Snapshots.cs b/Voron.Tests/Bugs/Snapshots.cs index 0fee740831..59ae7519c4 100644 --- a/Voron.Tests/Bugs/Snapshots.cs +++ b/Voron.Tests/Bugs/Snapshots.cs @@ -1,4 +1,5 @@ -using Voron.Debugging; +using System.Linq; +using Voron.Debugging; namespace Voron.Tests.Bugs { @@ -68,7 +69,8 @@ public void SnapshotIssue() Assert.NotNull(result); { - Assert.Equal(testBuffer, result.Reader.ReadBytes(result.Reader.Length)); + int used; + Assert.Equal(testBuffer, result.Reader.ReadBytes(result.Reader.Length, out used).Take(used).ToArray()); } } } @@ -125,7 +127,8 @@ public void SnapshotIssue_ExplicitFlushing() Assert.NotNull(result); { - Assert.Equal(testBuffer, result.Reader.ReadBytes(result.Reader.Length)); + int used; + Assert.Equal(testBuffer, result.Reader.ReadBytes(result.Reader.Length, out used).Take(used).ToArray()); } } } diff --git a/Voron.Tests/Bugs/TreeRebalancer.cs b/Voron.Tests/Bugs/TreeRebalancer.cs index 84ae543662..c251840868 100644 --- a/Voron.Tests/Bugs/TreeRebalancer.cs +++ b/Voron.Tests/Bugs/TreeRebalancer.cs @@ -1,4 +1,6 @@ -namespace Voron.Tests.Bugs +using System.IO; + +namespace Voron.Tests.Bugs { using System; using System.Collections.Generic; @@ -109,5 +111,58 @@ private void ValidateMulti(StorageEnvironment env, IEnumerable trees) } } } + + [Fact] + public void ShouldNotThrowThatPageIsFullDuringTreeRebalancing() + { + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + var tree = Env.CreateTree(tx, "rebalancing-issue"); + + var aKey = new string('a', 1024); + var bKey = new string('b', 1024); + var cKey = new string('c', 1024); + var dKey = new string('d', 1024); + var eKey = new string('e', 600); + var fKey = new string('f', 920); + + tree.Add(tx, aKey, new MemoryStream(new byte[1000])); + tree.Add(tx, bKey, new MemoryStream(new byte[1000])); + tree.Add(tx, cKey, new MemoryStream(new byte[1000])); + tree.Add(tx, dKey, new MemoryStream(new byte[1000])); + tree.Add(tx, eKey, new MemoryStream(new byte[800])); + tree.Add(tx, fKey, new MemoryStream(new byte[10])); + + RenderAndShow(tx, 1, "rebalancing-issue"); + + // to expose the bug we need to delete the last item from the left most page + // tree rebalance will try to fix the first reference (the implicit ref page node) in the parent page which is almost full + // and will fail because there is no space to put a new node + + tree.Delete(tx, aKey); // this line throws "The page is full and cannot add an entry, this is probably a bug" + + tx.Commit(); + + using (var iterator = tree.Iterate(tx)) + { + Assert.True(iterator.Seek(Slice.BeforeAllKeys)); + + Assert.Equal(bKey, iterator.CurrentKey); + Assert.True(iterator.MoveNext()); + + Assert.Equal(cKey, iterator.CurrentKey); + Assert.True(iterator.MoveNext()); + + Assert.Equal(dKey, iterator.CurrentKey); + Assert.True(iterator.MoveNext()); + + Assert.Equal(eKey, iterator.CurrentKey); + Assert.True(iterator.MoveNext()); + + Assert.Equal(fKey, iterator.CurrentKey); + Assert.False(iterator.MoveNext()); + } + } + } } } \ No newline at end of file diff --git a/Voron.Tests/Journal/Mvcc.cs b/Voron.Tests/Journal/Mvcc.cs index 36d2e8c77b..8c145c233d 100644 --- a/Voron.Tests/Journal/Mvcc.cs +++ b/Voron.Tests/Journal/Mvcc.cs @@ -5,6 +5,7 @@ // ----------------------------------------------------------------------- using System.IO; +using System.Linq; using Voron.Impl; using Voron.Impl.Paging; using Xunit; @@ -52,7 +53,8 @@ public void ShouldNotFlushUntilThereAreActiveOlderTransactions() var readResult = txr.State.Root.Read(txr, "items/1"); - var readData = readResult.Reader.ReadBytes(readResult.Reader.Length); + int used; + var readData = readResult.Reader.ReadBytes(readResult.Reader.Length, out used).Take(used).ToArray(); for (int i = 0; i < 3000; i++) { diff --git a/Voron.Tests/Journal/UncommittedTransactions.cs b/Voron.Tests/Journal/UncommittedTransactions.cs index 8c5ce98981..9718da1ca1 100644 --- a/Voron.Tests/Journal/UncommittedTransactions.cs +++ b/Voron.Tests/Journal/UncommittedTransactions.cs @@ -38,7 +38,7 @@ public void UncommittedTransactionMustNotModifyPageTranslationTableOfLogFile() using (var tx2 = Env.NewTransaction(TransactionFlags.Read)) { // tx was not committed so in the log should not apply - var readPage = Env.Journal.ReadPage(tx2,pageAllocatedInUncommittedTransaction); + var readPage = Env.Journal.ReadPage(tx2,pageAllocatedInUncommittedTransaction, scratchPagerState: null); Assert.Null(readPage); } diff --git a/Voron.Tests/MultiTreeSize.cs b/Voron.Tests/MultiTreeSize.cs new file mode 100644 index 0000000000..8418b1d696 --- /dev/null +++ b/Voron.Tests/MultiTreeSize.cs @@ -0,0 +1,43 @@ +// ----------------------------------------------------------------------- +// +// Copyright (c) Hibernating Rhinos LTD. All rights reserved. +// +// ----------------------------------------------------------------------- +using System; +using Voron.Impl.Paging; +using Xunit; + +namespace Voron.Tests +{ + public class MultiTreeSize : StorageTest + { + [Fact] + public void Single_AddMulti_WillUseOnePage() + { + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + tx.State.Root.MultiAdd(tx, "ChildTreeKey", "test"); + tx.Commit(); + } + + Assert.Equal(AbstractPager.PageSize, + Env.Stats().UsedDataFileSizeInBytes + ); + } + + [Fact] + public void TwoSmall_AddMulti_WillUseOnePage() + { + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + tx.State.Root.MultiAdd(tx, "ChildTreeKey", "test1"); + tx.State.Root.MultiAdd(tx, "ChildTreeKey", "test2"); + tx.Commit(); + } + + Assert.Equal(AbstractPager.PageSize, + Env.Stats().UsedDataFileSizeInBytes + ); + } + } +} \ No newline at end of file diff --git a/Voron.Tests/Storage/Batches.cs b/Voron.Tests/Storage/Batches.cs index 7170e1b319..86c3ebf2d2 100644 --- a/Voron.Tests/Storage/Batches.cs +++ b/Voron.Tests/Storage/Batches.cs @@ -353,7 +353,7 @@ public async Task BatchErrorHandling() batch1.Add("key/1", new MemoryStream(Encoding.UTF8.GetBytes("tree1")), "tree1"); var batch2 = new WriteBatch(); - batch2.Add("key/1", new MemoryStream(Encoding.UTF8.GetBytes("tree2")), "tree2"); + batch2.Add("key/1", new MemoryStream(Encoding.UTF8.GetBytes("tree2")), "tree2", version: 1); var batch3 = new WriteBatch(); batch3.Add("key/1", new MemoryStream(Encoding.UTF8.GetBytes("tree3")), "tree3"); @@ -374,7 +374,7 @@ public async Task BatchErrorHandling() } catch (AggregateException e) { - Assert.Equal("No such tree: tree2", e.InnerException.Message); + Assert.Equal("Cannot add 'key/1' to 'tree2' tree. Version mismatch. Expected: 1. Actual: 0.", e.InnerException.Message); using (var tx = Env.NewTransaction(TransactionFlags.Read)) { @@ -393,7 +393,7 @@ public async Task MergedBatchErrorHandling() batch1.Add("key/1", new MemoryStream(Encoding.UTF8.GetBytes("tree1")), "tree1"); var batch2 = new WriteBatch(); - batch2.Add("key/1", new MemoryStream(Encoding.UTF8.GetBytes("tree2")), "tree2"); + batch2.Add("key/1", new MemoryStream(Encoding.UTF8.GetBytes("tree2")), "tree2", version: 1); var batch3 = new WriteBatch(); batch3.Add("key/1", new MemoryStream(Encoding.UTF8.GetBytes("tree3")), "tree3"); @@ -424,7 +424,7 @@ public async Task MergedBatchErrorHandling() } catch (AggregateException e) { - Assert.Equal("No such tree: tree2", e.InnerException.Message); + Assert.Equal("Cannot add 'key/1' to 'tree2' tree. Version mismatch. Expected: 1. Actual: 0.", e.InnerException.Message); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) diff --git a/Voron.Tests/Storage/Concurrency.cs b/Voron.Tests/Storage/Concurrency.cs index b36ac7ba2d..1c58497c0b 100644 --- a/Voron.Tests/Storage/Concurrency.cs +++ b/Voron.Tests/Storage/Concurrency.cs @@ -103,7 +103,7 @@ public void Missing() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var e = Assert.Throws(() => tx.State.Root.Add(tx, "key/1", StreamFor("321"), 0)); - Assert.Equal("Cannot add 'key/1'. Version mismatch. Expected: 0. Actual: 1.", e.Message); + Assert.Equal("Cannot add 'key/1' to 'Root' tree. Version mismatch. Expected: 0. Actual: 1.", e.Message); } } @@ -121,13 +121,13 @@ public void ConcurrencyExceptionShouldBeThrownWhenVersionMismatch() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var e = Assert.Throws(() => tx.State.Root.Add(tx, "key/1", StreamFor("321"), 2)); - Assert.Equal("Cannot add 'key/1'. Version mismatch. Expected: 2. Actual: 1.", e.Message); + Assert.Equal("Cannot add 'key/1' to 'Root' tree. Version mismatch. Expected: 2. Actual: 1.", e.Message); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var e = Assert.Throws(() => tx.State.Root.Delete(tx, "key/1", 2)); - Assert.Equal("Cannot delete 'key/1'. Version mismatch. Expected: 2. Actual: 1.", e.Message); + Assert.Equal("Cannot delete 'key/1' to 'Root' tree. Version mismatch. Expected: 2. Actual: 1.", e.Message); } } @@ -144,14 +144,14 @@ public void ConcurrencyExceptionShouldBeThrownWhenVersionMismatchMultiTree() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - var e = Assert.Throws(() => tx.State.Root.MultiAdd(tx, "key/1", "321", 2)); - Assert.Equal("Cannot add 'key/1'. Version mismatch. Expected: 2. Actual: 1.", e.Message); + var e = Assert.Throws(() => tx.State.Root.MultiAdd(tx, "key/1", "321", version: 2)); + Assert.Equal("Cannot add value '321' to key 'key/1' to 'Root' tree. Version mismatch. Expected: 2. Actual: 0.", e.Message); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var e = Assert.Throws(() => tx.State.Root.MultiDelete(tx, "key/1", "123", 2)); - Assert.Equal("Cannot delete 'key/1'. Version mismatch. Expected: 2. Actual: 1.", e.Message); + Assert.Equal("Cannot delete value '123' to key 'key/1' to 'Root' tree. Version mismatch. Expected: 2. Actual: 1.", e.Message); } } @@ -220,7 +220,7 @@ public void BatchMissing() batch2.Add("key/1", StreamFor("123"), Constants.RootTreeName, 0); var e = Assert.Throws(() => Env.Writer.Write(batch2)).InnerException; - Assert.Equal("Cannot add 'key/1'. Version mismatch. Expected: 0. Actual: 1.", e.Message); + Assert.Equal("Cannot add 'key/1' to 'Root' tree. Version mismatch. Expected: 0. Actual: 1.", e.Message); } [Fact] @@ -235,13 +235,13 @@ public void BatchConcurrencyExceptionShouldBeThrownWhenVersionMismatch() batch2.Add("key/1", StreamFor("123"), Constants.RootTreeName, 2); var e = Assert.Throws(() => Env.Writer.Write(batch2)).InnerException; - Assert.Equal("Cannot add 'key/1'. Version mismatch. Expected: 2. Actual: 1.", e.Message); + Assert.Equal("Cannot add 'key/1' to 'Root' tree. Version mismatch. Expected: 2. Actual: 1.", e.Message); var batch3 = new WriteBatch(); batch3.Delete("key/1", Constants.RootTreeName, 2); e = Assert.Throws(() => Env.Writer.Write(batch3)).InnerException; - Assert.Equal("Cannot delete 'key/1'. Version mismatch. Expected: 2. Actual: 1.", e.Message); + Assert.Equal("Cannot delete 'key/1' to 'Root' tree. Version mismatch. Expected: 2. Actual: 1.", e.Message); } [Fact] diff --git a/Voron.Tests/Storage/Increments.cs b/Voron.Tests/Storage/Increments.cs new file mode 100644 index 0000000000..140fa76595 --- /dev/null +++ b/Voron.Tests/Storage/Increments.cs @@ -0,0 +1,80 @@ +// ----------------------------------------------------------------------- +// +// Copyright (c) Hibernating Rhinos LTD. All rights reserved. +// +// ----------------------------------------------------------------------- +using Voron.Impl; + +using Xunit; + +namespace Voron.Tests.Storage +{ + public class Increments : StorageTest + { + [Fact] + public void SimpleIncrementShouldWork() + { + CreateTrees(Env, 1, "tree"); + + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + Assert.Equal(10, tx.ReadTree("tree0").Increment(tx, "key/1", 10)); + + tx.Commit(); + } + + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + Assert.Equal(15, tx.ReadTree("tree0").Increment(tx, "key/1", 5)); + + tx.Commit(); + } + + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + Assert.Equal(12, tx.ReadTree("tree0").Increment(tx, "key/1", -3)); + + tx.Commit(); + } + + using (var tx = Env.NewTransaction(TransactionFlags.Read)) + { + var read = tx.ReadTree("tree0").Read(tx, "key/1"); + + Assert.NotNull(read); + Assert.Equal(3, read.Version); + Assert.Equal(12, read.Reader.ReadLittleEndianInt64()); + } + } + + [Fact] + public void SimpleIncrementShouldWorkUsingWriteBatch() + { + CreateTrees(Env, 1, "tree"); + + var writeBatch = new WriteBatch(); + writeBatch.Increment("key/1", 10, "tree0"); + + Env.Writer.Write(writeBatch); + + writeBatch = new WriteBatch(); + writeBatch.Increment("key/1", 5, "tree0"); + + Env.Writer.Write(writeBatch); + + writeBatch = new WriteBatch(); + writeBatch.Increment("key/1", -3, "tree0"); + + Env.Writer.Write(writeBatch); + + using (var tx = Env.NewTransaction(TransactionFlags.Read)) + { + var read = tx.ReadTree("tree0").Read(tx, "key/1"); + + Assert.NotNull(read); + Assert.Equal(3, read.Version); + Assert.Equal(12, read.Reader.ReadLittleEndianInt64()); + } + } + } +} \ No newline at end of file diff --git a/Voron.Tests/Storage/InitialSize.cs b/Voron.Tests/Storage/InitialSize.cs new file mode 100644 index 0000000000..943aa5c756 --- /dev/null +++ b/Voron.Tests/Storage/InitialSize.cs @@ -0,0 +1,91 @@ +// ----------------------------------------------------------------------- +// +// Copyright (c) Hibernating Rhinos LTD. All rights reserved. +// +// ----------------------------------------------------------------------- +using System; +using System.IO; + +using Voron.Impl; + +using Xunit; + +namespace Voron.Tests.Storage +{ + public class InitialSize : StorageTest + { + private readonly string path; + + public InitialSize() + { + path = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); + if (Directory.Exists(path)) + Directory.Delete(path, true); + } + + [Fact] + public void WhenInitialFileSizeIsNotSetTheFileSizeForDataFileAndScratchFileShouldBeSetToSystemAllocationGranularity() + { + NativeMethods.SYSTEM_INFO systemInfo; + NativeMethods.GetSystemInfo(out systemInfo); + + var options = StorageEnvironmentOptions.ForPath(path); + options.InitialFileSize = null; + + using (new StorageEnvironment(options)) + { + var dataFile = Path.Combine(path, Constants.DatabaseFilename); + var scratchFile = Path.Combine(path, "scratch.buffers"); + + Assert.Equal(systemInfo.allocationGranularity, new FileInfo(dataFile).Length); + Assert.Equal(systemInfo.allocationGranularity, new FileInfo(scratchFile).Length); + } + } + + [Fact] + public void WhenInitialFileSizeIsSetTheFileSizeForDataFileAndScratchFileShouldBeSetAccordingly() + { + NativeMethods.SYSTEM_INFO systemInfo; + NativeMethods.GetSystemInfo(out systemInfo); + + var options = StorageEnvironmentOptions.ForPath(path); + options.InitialFileSize = systemInfo.allocationGranularity * 2; + + using (new StorageEnvironment(options)) + { + var dataFile = Path.Combine(path, Constants.DatabaseFilename); + var scratchFile = Path.Combine(path, "scratch.buffers"); + + Assert.Equal(systemInfo.allocationGranularity * 2, new FileInfo(dataFile).Length); + Assert.Equal(systemInfo.allocationGranularity * 2, new FileInfo(scratchFile).Length); + } + } + + [Fact] + public void WhenInitialFileSizeIsSetTheFileSizeForDataFileAndScratchFileShouldBeSetAccordinglyAndItWillBeRoundedToTheNearestGranularity() + { + NativeMethods.SYSTEM_INFO systemInfo; + NativeMethods.GetSystemInfo(out systemInfo); + + var options = StorageEnvironmentOptions.ForPath(path); + options.InitialFileSize = systemInfo.allocationGranularity * 2 + 1; + + using (new StorageEnvironment(options)) + { + var dataFile = Path.Combine(path, Constants.DatabaseFilename); + var scratchFile = Path.Combine(path, "scratch.buffers"); + + Assert.Equal(systemInfo.allocationGranularity * 3, new FileInfo(dataFile).Length); + Assert.Equal(systemInfo.allocationGranularity * 3, new FileInfo(scratchFile).Length); + } + } + + public override void Dispose() + { + if (!string.IsNullOrEmpty(path) && Directory.Exists(path)) + Directory.Delete(path, true); + + base.Dispose(); + } + } +} \ No newline at end of file diff --git a/Voron.Tests/Storage/Pagers.cs b/Voron.Tests/Storage/Pagers.cs index 052f9b046c..275308c514 100644 --- a/Voron.Tests/Storage/Pagers.cs +++ b/Voron.Tests/Storage/Pagers.cs @@ -25,7 +25,7 @@ public void MemoryMapPagerReleasesPagerState() [Fact] public void MemoryMapWithoutBackingReleasePagerState() { - PagerReleasesPagerState(() => new Win32PageFileBackedMemoryMappedPager()); + PagerReleasesPagerState(() => new Win32PageFileBackedMemoryMappedPager("test")); } [Fact] diff --git a/Voron.Tests/Storage/SplittingVeryBig.cs b/Voron.Tests/Storage/SplittingVeryBig.cs index 0b5c27904b..5aeb758d63 100644 --- a/Voron.Tests/Storage/SplittingVeryBig.cs +++ b/Voron.Tests/Storage/SplittingVeryBig.cs @@ -1,4 +1,5 @@ -using Voron.Impl; +using System.Linq; +using Voron.Impl; using Voron.Impl.Paging; namespace Voron.Tests.Storage @@ -41,7 +42,8 @@ public void ShouldBeAbleToWriteValuesGreaterThanLogAndReadThem() var reader = read.Reader; Assert.Equal(buffer.Length, read.Reader.Length); - Assert.Equal(buffer, reader.ReadBytes(read.Reader.Length)); + int used; + Assert.Equal(buffer, reader.ReadBytes(read.Reader.Length, out used).Take(used).ToArray()); } } @@ -90,7 +92,8 @@ public void ShouldBeAbleToWriteValuesGreaterThanLogAndRecoverThem() { Assert.Equal(buffer.Length, read.Reader.Length); - Assert.Equal(buffer, read.Reader.ReadBytes(read.Reader.Length)); + int used; + Assert.Equal(buffer, read.Reader.ReadBytes(read.Reader.Length, out used).Take(used).ToArray()); } } } diff --git a/Voron.Tests/Trees/FreeSpaceTest.cs b/Voron.Tests/Trees/FreeSpaceTest.cs index 38755d4199..fdc27a3f56 100644 --- a/Voron.Tests/Trees/FreeSpaceTest.cs +++ b/Voron.Tests/Trees/FreeSpaceTest.cs @@ -1,4 +1,5 @@ using System; +using System.Collections.Generic; using System.IO; using Voron.Impl.FreeSpace; using Xunit; @@ -68,5 +69,110 @@ public void ShouldReturnProperPageFromSecondSection() Assert.Equal(FreeSpaceHandling.NumberOfPagesInSection + 1, Env.FreeSpaceHandling.TryAllocateFromFreeSpace(tx, 1)); } } + + [Fact] + public void CanReuseMostOfFreePages_RemainingOnesCanBeTakenToHandleFreeSpace() + { + const int maxPageNumber = 4000000; + const int numberOfFreedPages = 100; + var random = new Random(3); + var freedPages = new HashSet(); + + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + tx.State.NextPageNumber = maxPageNumber + 1; + + tx.Commit(); + } + + for (int i = 0; i < numberOfFreedPages; i++) + { + long pageToFree; + do + { + pageToFree = random.Next(0, maxPageNumber); + } while (freedPages.Add(pageToFree) == false); + + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + Env.FreeSpaceHandling.FreePage(tx, pageToFree); + + tx.Commit(); + } + } + + // we cannot expect that all freed pages will be available for a reuse + // some freed pages can be used internally by free space handling + // 80% should be definitely a safe value + + var minNumberOfFreePages = numberOfFreedPages * 0.8; + + for (int i = 0; i < minNumberOfFreePages; i++) + { + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + var page = Env.FreeSpaceHandling.TryAllocateFromFreeSpace(tx, 1); + + Assert.NotNull(page); + Assert.True(freedPages.Remove(page.Value)); + + tx.Commit(); + } + } + } + + [Fact] + public void FreeSpaceHandlingShouldNotReturnPagesThatAreAlreadyAllocated() + { + const int maxPageNumber = 400000; + const int numberOfFreedPages = 60; + var random = new Random(2); + var freedPages = new HashSet(); + + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + tx.State.NextPageNumber = maxPageNumber + 1; + + tx.Commit(); + } + + for (int i = 0; i < numberOfFreedPages; i++) + { + long pageToFree; + do + { + pageToFree = random.Next(0, maxPageNumber); + } while (freedPages.Add(pageToFree) == false); + + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + Env.FreeSpaceHandling.FreePage(tx, pageToFree); + + tx.Commit(); + } + } + + var alreadyReused = new List(); + + do + { + using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) + { + var page = Env.FreeSpaceHandling.TryAllocateFromFreeSpace(tx, 1); + + if (page == null) + { + break; + } + + Assert.False(alreadyReused.Contains(page.Value), "Free space handling returned a page number that has been already allocated. Page number: " + page); + Assert.True(freedPages.Remove(page.Value)); + + alreadyReused.Add(page.Value); + + tx.Commit(); + } + } while (true); + } } } \ No newline at end of file diff --git a/Voron.Tests/Util/StreamExtensions.cs b/Voron.Tests/Util/StreamExtensions.cs index 43fa03b3fa..da58e91a2b 100644 --- a/Voron.Tests/Util/StreamExtensions.cs +++ b/Voron.Tests/Util/StreamExtensions.cs @@ -57,7 +57,6 @@ public static int ReadInt32(this Stream stream) return BitConverter.ToInt32(buffer, 0); } -#if !SILVERLIGHT public static string ReadString(this Stream stream) { return ReadString(stream, Encoding.UTF8); @@ -83,7 +82,6 @@ public static string ReadStringWithoutPrefix(this Stream stream, Encoding encodi return encoding.GetString(buffer); } -#endif public static void Write(this Stream stream, string value) { diff --git a/Voron.Tests/Voron.Tests.csproj b/Voron.Tests/Voron.Tests.csproj index 191b31c904..8ffd58cc4e 100644 --- a/Voron.Tests/Voron.Tests.csproj +++ b/Voron.Tests/Voron.Tests.csproj @@ -1,5 +1,5 @@  - + Debug @@ -9,7 +9,7 @@ Properties Voron.Tests Voron.Tests - v4.5 + v4.5.1 512 ..\ @@ -63,6 +63,7 @@ + @@ -74,22 +75,23 @@ - + + - + @@ -98,6 +100,8 @@ + + @@ -119,7 +123,6 @@ - @@ -141,7 +144,9 @@ Always - + + + diff --git a/Voron/Debugging/DebugActionType.cs b/Voron/Debugging/DebugActionType.cs index 6c8e459f1b..ee27fa01c3 100644 --- a/Voron/Debugging/DebugActionType.cs +++ b/Voron/Debugging/DebugActionType.cs @@ -6,6 +6,7 @@ public enum DebugActionType Delete, MultiAdd, MultiDelete, - CreateTree, + CreateTree, + Increment } } diff --git a/Voron/Debugging/EnvironmentStats.cs b/Voron/Debugging/EnvironmentStats.cs index 1e41d5e32d..ee4dd33800 100644 --- a/Voron/Debugging/EnvironmentStats.cs +++ b/Voron/Debugging/EnvironmentStats.cs @@ -1,24 +1,11 @@ namespace Voron.Debugging { -<<<<<<< HEAD public class EnvironmentStats { - public long FreePages; public long FreePagesOverhead; public long RootPages; public long UnallocatedPagesAtEndOfFile; public long UsedDataFileSizeInBytes; public long AllocatedDataFileSizeInBytes; } -======= - public class EnvironmentStats - { - public long FreePages; - public long FreePagesOverhead; - public long RootPages; - public long UnallocatedPagesAtEndOfFile; - public long UsedDataFileSizeInBytes; - public long AllocatedDataFileSizeInBytes; - } ->>>>>>> e45804f0d5ace9c91417184e68593aafb1774486 } \ No newline at end of file diff --git a/Voron/Impl/Backup/BackupMethods.cs b/Voron/Impl/Backup/BackupMethods.cs index 14e92aab05..13750bba1e 100644 --- a/Voron/Impl/Backup/BackupMethods.cs +++ b/Voron/Impl/Backup/BackupMethods.cs @@ -3,15 +3,15 @@ // Copyright (c) Hibernating Rhinos LTD. All rights reserved. // // ----------------------------------------------------------------------- - namespace Voron.Impl.Backup { public class BackupMethods { - public const string Filename = "RavenDB.Voron.Backup"; + public const string Filename = "RavenDB.Voron.Backup"; public static FullBackup Full = new FullBackup(); public static IncrementalBackup Incremental = new IncrementalBackup(); + } } \ No newline at end of file diff --git a/Voron/Impl/Backup/FullBackup.cs b/Voron/Impl/Backup/FullBackup.cs index c274bcad68..6583ca0d28 100644 --- a/Voron/Impl/Backup/FullBackup.cs +++ b/Voron/Impl/Backup/FullBackup.cs @@ -4,10 +4,12 @@ // // ----------------------------------------------------------------------- +using System; using System.Diagnostics; using System.IO; using System.IO.Compression; using System.Threading; +using System.Threading.Tasks; using Voron.Impl.FileHeaders; using Voron.Impl.Journal; using Voron.Impl.Paging; @@ -17,13 +19,20 @@ namespace Voron.Impl.Backup { public unsafe class FullBackup { - public void ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal) - { + + public void ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal, + Action infoNotify = null) + { + infoNotify = infoNotify ?? (s => { }); + var dataPager = env.Options.DataPager; var copier = new DataCopier(AbstractPager.PageSize * 16); Transaction txr = null; try { + + infoNotify("Voron copy headers"); + using (var file = new FileStream(backupPath, FileMode.Create)) using (var package = new ZipArchive(file, ZipArchiveMode.Create)) { @@ -39,25 +48,11 @@ public void ToFile(StorageEnvironment env, string backupPath, CompressionLevel c Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2); - foreach (var headerFileName in HeaderAccessor.HeaderFileNames) - { - var header = stackalloc FileHeader[1]; - - if (env.Options.ReadHeader(headerFileName, header)) - { - var headerPart = package.CreateEntry(headerFileName, compression); - Debug.Assert(headerPart != null); - - using (var headerStream = headerPart.Open()) - { - copier.ToStream((byte*) header, sizeof (FileHeader), headerStream); - } - } - } + VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options); // journal files snapshot files = env.Journal.Files; - + foreach (var journalFile in files) { journalFile.AddRef(); @@ -102,8 +97,10 @@ public void ToFile(StorageEnvironment env, string backupPath, CompressionLevel c using (var stream = journalPart.Open()) { copier.ToStream(journalFile, 0, pagesToCopy, stream); + infoNotify(string.Format("Voron copy journal file {0} ", journalFile)); } - } + + } } finally { @@ -119,11 +116,31 @@ public void ToFile(StorageEnvironment env, string backupPath, CompressionLevel c if (txr != null) txr.Dispose(); } + infoNotify(string.Format("Voron backup db finished")); } - public void Restore(string backupPath, string voronDataDir) + public void Restore(string backupPath, string voronDataDir, string journalDir = null) { - ZipFile.ExtractToDirectory(backupPath, voronDataDir); + journalDir = journalDir ?? voronDataDir; + + if (Directory.Exists(voronDataDir) == false) + Directory.CreateDirectory(voronDataDir); + + if (Directory.Exists(journalDir) == false) + Directory.CreateDirectory(journalDir); + + using (var zip = ZipFile.OpenRead(backupPath)) + { + foreach (var entry in zip.Entries) + { + var dst = Path.GetExtension(entry.Name) == ".journal" ? journalDir : voronDataDir; + using (var input = entry.Open()) + using(var output = new FileStream(Path.Combine(dst, entry.Name), FileMode.CreateNew)) + { + input.CopyTo(output); + } + } + } } } } \ No newline at end of file diff --git a/Voron/Impl/Backup/IncrementalBackup.cs b/Voron/Impl/Backup/IncrementalBackup.cs index 39e95eab73..2ef630f41a 100644 --- a/Voron/Impl/Backup/IncrementalBackup.cs +++ b/Voron/Impl/Backup/IncrementalBackup.cs @@ -10,6 +10,7 @@ using System.IO; using System.IO.Compression; using System.Linq; +using System.Threading.Tasks; using Voron.Impl.Journal; using Voron.Impl.Paging; using Voron.Trees; @@ -19,8 +20,22 @@ namespace Voron.Impl.Backup { public unsafe class IncrementalBackup { - public long ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal) + public class IncrementalRestorePaths { + private string _journalLocation; + public string DatabaseLocation { get; set; } + public string JournalLocation + { + get { return _journalLocation ?? DatabaseLocation; } + set { _journalLocation = value; } + } + } + + public long ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal, + Action infoNotify = null) + { + infoNotify = infoNotify ?? (s => { }); + if (env.Options.IncrementalBackupEnabled == false) throw new InvalidOperationException("Incremental backup is disabled for this storage"); @@ -29,31 +44,31 @@ public long ToFile(StorageEnvironment env, string backupPath, CompressionLevel c var copier = new DataCopier(AbstractPager.PageSize * 16); var backupSuccess = true; - IncrementalBackupInfo backupInfo; long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; - using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) + using (var file = new FileStream(backupPath, FileMode.Create)) + using (var package = new ZipArchive(file, ZipArchiveMode.Create)) { - backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup); - - if (env.Journal.CurrentFile != null) + IncrementalBackupInfo backupInfo; + using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { - lastWrittenLogFile = env.Journal.CurrentFile.Number; - lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition; - } + backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup); - // txw.Commit(); intentionally not committing - } + if (env.Journal.CurrentFile != null) + { + lastWrittenLogFile = env.Journal.CurrentFile.Number; + lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition; + } - using (env.NewTransaction(TransactionFlags.Read)) - { - var usedJournals = new List(); + // txw.Commit(); intentionally not committing + } - try + using (env.NewTransaction(TransactionFlags.Read)) { - using (var file = new FileStream(backupPath, FileMode.Create)) - using (var package = new ZipArchive(file, ZipArchiveMode.Create)) + var usedJournals = new List(); + + try { long lastBackedUpPage = -1; long lastBackedUpFile = -1; @@ -65,6 +80,8 @@ public long ToFile(StorageEnvironment env, string backupPath, CompressionLevel c for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++) { + var num = journalNum; + var journalFile = env.Journal.Files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use if (journalFile == null) { @@ -72,9 +89,6 @@ public long ToFile(StorageEnvironment env, string backupPath, CompressionLevel c using (var pager = env.Options.OpenJournalPager(journalNum)) { journalSize = Utils.NearestPowerOfTwo(pager.NumberOfAllocatedPages * AbstractPager.PageSize); - if (journalSize >= env.Options.MaxLogFileSize) // can't set for more than the max log file size - throw new InvalidOperationException("Recovered journal size is " + journalSize + - ", while the maximum journal size can be " + env.Options.MaxLogFileSize); } journalFile = new JournalFile(env.Options.CreateJournalWriter(journalNum, journalSize), journalNum); @@ -104,6 +118,8 @@ public long ToFile(StorageEnvironment env, string backupPath, CompressionLevel c using (var stream = part.Open()) { copier.ToStream(journalFile, startBackupAt, pagesToCopy, stream); + infoNotify(string.Format("Voron Incr copy journal number {0}", num)); + } lastBackedUpFile = journalFile.Number; @@ -124,34 +140,34 @@ public long ToFile(StorageEnvironment env, string backupPath, CompressionLevel c //Debug.Assert(lastBackedUpPage != -1); env.HeaderAccessor.Modify(header => - { - header->IncrementalBackup.LastBackedUpJournal = lastBackedUpFile; - header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage; - }); + { + header->IncrementalBackup.LastBackedUpJournal = lastBackedUpFile; + header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage; + }); } - } - catch (Exception) - { - backupSuccess = false; - throw; - } - finally - { - foreach (var file in usedJournals) + catch (Exception) + { + backupSuccess = false; + throw; + } + finally { - if (backupSuccess) // if backup succeeded we can remove journals + foreach (var jrnl in usedJournals) { - if (file.Number != lastWrittenLogFile) // prevent deletion of the current journal + if (backupSuccess) // if backup succeeded we can remove journals { - file.DeleteOnClose = true; + if (jrnl.Number < lastWrittenLogFile) // prevent deletion of the current journal and journals with a greater number + { + jrnl.DeleteOnClose = true; + } } - } - file.Release(); + jrnl.Release(); + } } + infoNotify(string.Format("Voron Incr Backup total {0} pages", numberOfBackedUpPages)); + return numberOfBackedUpPages; } - - return numberOfBackedUpPages; } } @@ -169,7 +185,7 @@ public void Restore(StorageEnvironmentOptions options, IEnumerable backu options.OwnsPagers = ownsPagers; } - private void Restore(StorageEnvironment env, string backupPath) + private void Restore(StorageEnvironment env, string singleBackupFile) { using (env.Journal.Applicator.TakeFlushingLock()) { @@ -180,107 +196,124 @@ private void Restore(StorageEnvironment env, string backupPath) env.FlushLogToDataFile(txw); } - List journalNames; - - using (var package = ZipFile.Open(backupPath, ZipArchiveMode.Read)) - { - journalNames = package.Entries.Select(x => x.Name).ToList(); - } - - if (journalNames.Count == 0) - return; - - var tempDir = Directory.CreateDirectory(Path.GetTempPath() + Guid.NewGuid()).FullName; - var toDispose = new List(); - - try + using (var package = ZipFile.Open(singleBackupFile, ZipArchiveMode.Read)) { - ZipFile.ExtractToDirectory(backupPath, tempDir); + if (package.Entries.Count == 0) + return; - TransactionHeader* lastTxHeader = null; + var toDispose = new List(); - var pagesToWrite = new Dictionary>(); + var tempDir = Directory.CreateDirectory(Path.GetTempPath() + Guid.NewGuid()).FullName; - long journalNumber = -1; - foreach (var journalName in journalNames) + try { - var pager = new Win32MemoryMapPager(Path.Combine(tempDir, journalName)); - toDispose.Add(pager); - + TransactionHeader* lastTxHeader = null; + var pagesToWrite = new Dictionary>(); - if (long.TryParse(journalName.Replace(".journal", string.Empty), out journalNumber) == false) + long journalNumber = -1; + foreach (var entry in package.Entries) { - throw new InvalidOperationException("Cannot parse journal file number"); + switch (Path.GetExtension(entry.Name)) + { + case ".journal": + + var jounalFileName = Path.Combine(tempDir, entry.Name); + using (var output = new FileStream(jounalFileName, FileMode.Create)) + using (var input = entry.Open()) + { + output.Position = output.Length; + input.CopyTo(output); + } + + var pager = new Win32MemoryMapPager(jounalFileName); + toDispose.Add(pager); + + if (long.TryParse(Path.GetFileNameWithoutExtension(entry.Name), out journalNumber) == false) + { + throw new InvalidOperationException("Cannot parse journal file number"); + } + + var recoveryPager = new Win32MemoryMapPager(Path.Combine(tempDir, StorageEnvironmentOptions.JournalRecoveryName(journalNumber))); + toDispose.Add(recoveryPager); + + var reader = new JournalReader(pager, recoveryPager, 0, lastTxHeader); + + while (reader.ReadOneTransaction(env.Options)) + { + lastTxHeader = reader.LastTransactionHeader; + } + + foreach (var translation in reader.TransactionPageTranslation) + { + var pageInJournal = translation.Value.JournalPos; + pagesToWrite[translation.Key] = () => recoveryPager.Read(pageInJournal); + } + + break; + default: + throw new InvalidOperationException("Unknown file, cannot restore: " + entry); + } } - var recoveryPager = new Win32MemoryMapPager(Path.Combine(tempDir, StorageEnvironmentOptions.JournalRecoveryName(journalNumber))); - toDispose.Add(recoveryPager); + var sortedPages = pagesToWrite.OrderBy(x => x.Key) + .Select(x => x.Value()) + .ToList(); - var reader = new JournalReader(pager, recoveryPager, 0, lastTxHeader); + var last = sortedPages.Last(); - while (reader.ReadOneTransaction(env.Options)) - { - lastTxHeader = reader.LastTransactionHeader; - } + env.Options.DataPager.EnsureContinuous(txw, last.PageNumber, + last.IsOverflow + ? env.Options.DataPager.GetNumberOfOverflowPages( + last.OverflowSize) + : 1); - foreach (var translation in reader.TransactionPageTranslation) + foreach (var page in sortedPages) { - var pageInJournal = translation.Value.JournalPos; - pagesToWrite[translation.Key] = () => recoveryPager.Read(pageInJournal); + env.Options.DataPager.Write(page); } - } - var sortedPages = pagesToWrite.OrderBy(x => x.Key) - .Select(x => x.Value()) - .ToList(); + env.Options.DataPager.Sync(); - var last = sortedPages.Last(); + txw.State.Root = Tree.Open(txw, env._sliceComparer, &lastTxHeader->Root); + txw.State.FreeSpaceRoot = Tree.Open(txw, env._sliceComparer, &lastTxHeader->FreeSpace); - env.Options.DataPager.EnsureContinuous(txw, last.PageNumber, - last.IsOverflow - ? env.Options.DataPager.GetNumberOfOverflowPages( - last.OverflowSize) - : 1); + txw.State.FreeSpaceRoot.Name = Constants.FreeSpaceTreeName; + txw.State.Root.Name = Constants.RootTreeName; - foreach (var page in sortedPages) - { - env.Options.DataPager.Write(page); - } - - env.Options.DataPager.Sync(); + txw.State.NextPageNumber = lastTxHeader->LastPageNumber + 1; - txw.State.Root = Tree.Open(txw, env._sliceComparer, &lastTxHeader->Root); - txw.State.FreeSpaceRoot = Tree.Open(txw, env._sliceComparer, &lastTxHeader->FreeSpace); + env.Journal.Clear(txw); - txw.State.FreeSpaceRoot.Name = Constants.FreeSpaceTreeName; - txw.State.Root.Name = Constants.RootTreeName; + txw.Commit(); - txw.State.NextPageNumber = lastTxHeader->LastPageNumber + 1; + env.HeaderAccessor.Modify(header => + { + header->TransactionId = lastTxHeader->TransactionId; + header->LastPageNumber = lastTxHeader->LastPageNumber; - env.Journal.Clear(txw); + header->Journal.LastSyncedJournal = journalNumber; + header->Journal.LastSyncedTransactionId = lastTxHeader->TransactionId; - txw.Commit(); + header->Root = lastTxHeader->Root; + header->FreeSpace = lastTxHeader->FreeSpace; - env.HeaderAccessor.Modify(header => + header->Journal.CurrentJournal = journalNumber + 1; + header->Journal.JournalFilesCount = 0; + }); + } + finally { - header->TransactionId = lastTxHeader->TransactionId; - header->LastPageNumber = lastTxHeader->LastPageNumber; - - header->Journal.LastSyncedJournal = journalNumber; - header->Journal.LastSyncedTransactionId = lastTxHeader->TransactionId; - - header->Root = lastTxHeader->Root; - header->FreeSpace = lastTxHeader->FreeSpace; - - header->Journal.CurrentJournal = journalNumber + 1; - header->Journal.JournalFilesCount = 0; - }); - } - finally - { - toDispose.ForEach(x => x.Dispose()); - - Directory.Delete(tempDir, true); + toDispose.ForEach(x => x.Dispose()); + + try + { + Directory.Delete(tempDir, true); + } + catch (Exception) + { + // just temp dir - ignore it + } + } } } } diff --git a/Voron/Impl/Backup/VoronBackupUtil.cs b/Voron/Impl/Backup/VoronBackupUtil.cs new file mode 100644 index 0000000000..fd2c163fe9 --- /dev/null +++ b/Voron/Impl/Backup/VoronBackupUtil.cs @@ -0,0 +1,29 @@ +using System.Diagnostics; +using System.IO.Compression; +using Voron.Impl.FileHeaders; +using Voron.Util; + +namespace Voron.Impl.Backup +{ + internal static unsafe class VoronBackupUtil + { + internal static void CopyHeaders(CompressionLevel compression, ZipArchive package, DataCopier copier, StorageEnvironmentOptions storageEnvironmentOptions) + { + foreach (var headerFileName in HeaderAccessor.HeaderFileNames) + { + var header = stackalloc FileHeader[1]; + + if (!storageEnvironmentOptions.ReadHeader(headerFileName, header)) + continue; + + var headerPart = package.CreateEntry(headerFileName, compression); + Debug.Assert(headerPart != null); + + using (var headerStream = headerPart.Open()) + { + copier.ToStream((byte*)header, sizeof(FileHeader), headerStream); + } + } + } + } +} \ No newline at end of file diff --git a/Voron/Impl/Constants.cs b/Voron/Impl/Constants.cs index be448bd995..f9f9bec2f8 100644 --- a/Voron/Impl/Constants.cs +++ b/Voron/Impl/Constants.cs @@ -21,7 +21,7 @@ public unsafe class Constants public static int PageNumberSize = sizeof(long); public static int NodeOffsetSize = sizeof(ushort); - public const int CurrentVersion = 0x00010009; + public const int CurrentVersion = 2; public const string RootTreeName = "Root"; public const string FreeSpaceTreeName = "Free Space"; diff --git a/Voron/Impl/FileHeaders/HeaderAccessor.cs b/Voron/Impl/FileHeaders/HeaderAccessor.cs index 1c1e83acf9..50df855ac2 100644 --- a/Voron/Impl/FileHeaders/HeaderAccessor.cs +++ b/Voron/Impl/FileHeaders/HeaderAccessor.cs @@ -72,7 +72,8 @@ public bool Initialize() } if (f1->Version != Constants.CurrentVersion) - throw new InvalidDataException("This is a db file for version " + f1->Version + ", which is not compatible with the current version " + Constants.CurrentVersion); + throw new InvalidDataException("This is a db file for version " + f1->Version + ", which is not compatible with the current version " + Constants.CurrentVersion + Environment.NewLine + + "Error at " + _env.Options.BasePath); if (f1->TransactionId < 0) throw new InvalidDataException("The transaction number cannot be negative"); diff --git a/Voron/Impl/FreeSpace/FreeSpaceHandling.cs b/Voron/Impl/FreeSpace/FreeSpaceHandling.cs index ac4912eb1d..8ce1d1a021 100644 --- a/Voron/Impl/FreeSpace/FreeSpaceHandling.cs +++ b/Voron/Impl/FreeSpace/FreeSpaceHandling.cs @@ -7,14 +7,6 @@ namespace Voron.Impl.FreeSpace public class FreeSpaceHandling : IFreeSpaceHandling { internal const int NumberOfPagesInSection = 256 * 8; // 256 bytes, 8 bits per byte = 2,048 - each section 8 MB in size - private readonly StorageEnvironment _env; - private readonly Slice _freePagesCount; - - public FreeSpaceHandling(StorageEnvironment env) - { - _freePagesCount = new Slice(EndianBitConverter.Big.GetBytes(long.MinValue)); - _env = env; - } public long? TryAllocateFromFreeSpace(Transaction tx, int num) { @@ -51,7 +43,7 @@ public FreeSpaceHandling(StorageEnvironment env) var stream = it.CreateReaderForCurrent(); { var current = new StreamBitArray(stream); - var currentSectionId = it.CurrentKey.ToInt64(); + var currentSectionId = it.CurrentKey.CreateReader().ReadBigEndianInt64(); //need to find full free pages if (current.SetCount < NumberOfPagesInSection) @@ -150,7 +142,7 @@ private static void ResetSections(ref int foundSections, List sections, r var stream = it.CreateReaderForCurrent(); { var current = new StreamBitArray(stream); - var currentSectionId = it.CurrentKey.ToInt64(); + var currentSectionId = it.CurrentKey.CreateReader().ReadBigEndianInt64(); long? page; if (current.SetCount < num) @@ -268,17 +260,6 @@ private static bool TryFindSmallValueMergingTwoSections(Transaction tx, TreeIter return true; } - public long GetFreePageCount() - { - using (var tx = _env.NewTransaction(TransactionFlags.Read)) - { - var readResult = tx.State.FreeSpaceRoot.Read(tx, _freePagesCount); - if (readResult == null) - return 0; - return readResult.Reader.ReadInt64(); - } - } - public List AllPages(Transaction tx) { return tx.State.FreeSpaceRoot.AllPages(tx); diff --git a/Voron/Impl/FreeSpace/IFreeSpaceHandling.cs b/Voron/Impl/FreeSpace/IFreeSpaceHandling.cs index 8bba58678a..214fa83f5e 100644 --- a/Voron/Impl/FreeSpace/IFreeSpaceHandling.cs +++ b/Voron/Impl/FreeSpace/IFreeSpaceHandling.cs @@ -5,7 +5,6 @@ namespace Voron.Impl.FreeSpace public interface IFreeSpaceHandling { long? TryAllocateFromFreeSpace(Transaction tx, int num); - long GetFreePageCount(); List AllPages(Transaction tx); void FreePage(Transaction tx, long pageNumber); } diff --git a/Voron/Impl/FreeSpace/NoFreeSpaceHandling.cs b/Voron/Impl/FreeSpace/NoFreeSpaceHandling.cs index 81f7ef6e90..5e20dccaaf 100644 --- a/Voron/Impl/FreeSpace/NoFreeSpaceHandling.cs +++ b/Voron/Impl/FreeSpace/NoFreeSpaceHandling.cs @@ -9,11 +9,6 @@ public class NoFreeSpaceHandling : IFreeSpaceHandling return null; } - public long GetFreePageCount() - { - return 0; - } - public List AllPages(Transaction tx) { return new List(); diff --git a/Voron/Impl/FreeSpace/StreamBitArray.cs b/Voron/Impl/FreeSpace/StreamBitArray.cs index 4781adc1bb..8b3179f6a2 100644 --- a/Voron/Impl/FreeSpace/StreamBitArray.cs +++ b/Voron/Impl/FreeSpace/StreamBitArray.cs @@ -49,10 +49,10 @@ public StreamBitArray() public StreamBitArray(ValueReader reader) { - SetCount = reader.ReadInt32(); + SetCount = reader.ReadLittleEndianInt32(); for (var i = 0; i < _inner.Length; i++) { - _inner[i] = reader.ReadInt32(); + _inner[i] = reader.ReadLittleEndianInt32(); } } diff --git a/Voron/Impl/Journal/JournalFile.cs b/Voron/Impl/Journal/JournalFile.cs index b6f4f96dde..0bb53618d1 100644 --- a/Voron/Impl/Journal/JournalFile.cs +++ b/Voron/Impl/Journal/JournalFile.cs @@ -7,8 +7,12 @@ using System; using System.Collections.Generic; using System.Diagnostics; +using System.IO; using System.Linq; +using System.Runtime.InteropServices; +using System.Security; using System.Threading; +using System.Threading.Tasks; using Voron.Impl.Paging; using Voron.Trees; using Voron.Util; @@ -90,7 +94,7 @@ public JournalFile(IJournalWriter journalWriter, long journalNumber, long lastSy #if DEBUG Trace.WriteLine( - "Disposing a journal file from finalizer! It should be diposed by using JournalFile.Release() instead!. Log file number: " + + "Disposing a journal file from finalizer! It should be disposed by using JournalFile.Release() instead!. Log file number: " + Number + ". Number of references: " + _refs + " " + _st); #endif } @@ -164,10 +168,7 @@ public bool ReadTransaction(long pos, TransactionHeader* txHeader) return _journalWriter.Read(pos, (byte*)txHeader, sizeof(TransactionHeader)); } - /// - /// write transaction's raw page data into journal. returns write page position - /// - public long Write(Transaction tx, byte*[] pages) + public void Write(Transaction tx, byte*[] pages) { var txPages = tx.GetTransactionPages(); @@ -184,13 +185,10 @@ public long Write(Transaction tx, byte*[] pages) _unusedPages.AddRange(unused); } - var position = writePagePos * AbstractPager.PageSize; - _journalWriter.WriteGather(position, pages); - - return writePagePos; + _journalWriter.WriteGather(writePagePos * AbstractPager.PageSize, pages); } - private void UpdatePageTranslationTable(Transaction tx, List txPages, HashSet unused, Dictionary ptt) + private unsafe void UpdatePageTranslationTable(Transaction tx, List txPages, HashSet unused, Dictionary ptt) { for (int index = 1; index < txPages.Count; index++) { @@ -201,9 +199,8 @@ private void UpdatePageTranslationTable(Transaction tx, List _transactionPageTranslation = new Dictionary(); - private int _recoveryPage; - - public bool RequireHeaderUpdate { get; private set; } - - public long NextWritePage - { - get { return _readingPage; } - } - - public JournalReader(IVirtualPager pager, IVirtualPager recoveryPager, long lastSyncedTransactionId, TransactionHeader* previous) - { - if (pager == null) throw new ArgumentNullException("pager"); - - RequireHeaderUpdate = false; - _pager = pager; - _recoveryPager = recoveryPager; - _lastSyncedTransactionId = lastSyncedTransactionId; - _readingPage = 0; - _recoveryPage = 0; - LastTransactionHeader = previous; - _previousTransactionCrc = 0; - } - - public TransactionHeader* LastTransactionHeader { get; private set; } - - protected bool ReadOneTransactionForShipping(StorageEnvironmentOptions options, out TransactionToShip transactionToShipRecord) - { - transactionToShipRecord = null; - if (_readingPage >= _pager.NumberOfAllocatedPages) - return false; - - TransactionHeader* current; - if (!TryReadAndValidateHeader(options, out current)) - return false; - - var compressedPageCount = (current->CompressedSize / AbstractPager.PageSize) + (current->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1); - if (current->TransactionId <= _lastSyncedTransactionId) - { - LastTransactionHeader = current; - _readingPage += compressedPageCount; - return true; // skipping - } - - if (!ValidatePagesCrc(options, compressedPageCount, current)) - return false; - - var compressedPagesRaw = new byte[compressedPageCount * AbstractPager.PageSize]; - fixed (byte* compressedDataPtr = compressedPagesRaw) - NativeMethods.memcpy(compressedDataPtr, _pager.AcquirePagePointer(_readingPage), compressedPageCount * AbstractPager.PageSize); - - transactionToShipRecord = new TransactionToShip(*current) - { - CompressedData = new MemoryStream(compressedPagesRaw), //no need to compress the pages --> after being written to Journal they are already compressed - PreviousTransactionCrc = _previousTransactionCrc - }; - - _previousTransactionCrc = current->Crc; - - _readingPage += compressedPageCount; - return true; - } - - public IEnumerable ReadJournalForShipping(StorageEnvironmentOptions options) - { - TransactionToShip transactionToShip; - while (ReadOneTransactionForShipping(options, out transactionToShip)) - yield return transactionToShip; - } - - public bool ReadOneTransaction(StorageEnvironmentOptions options, bool checkCrc = true) - { - if (_readingPage >= _pager.NumberOfAllocatedPages) - return false; - - TransactionHeader* current; - if (!TryReadAndValidateHeader(options, out current)) - return false; - - var compressedPages = (current->CompressedSize / AbstractPager.PageSize) + (current->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1); - - if (current->TransactionId <= _lastSyncedTransactionId) - { - LastTransactionHeader = current; - _readingPage += compressedPages; - return true; // skipping - } - - if (checkCrc && !ValidatePagesCrc(options, compressedPages, current)) - return false; - - var totalPageCount = current->PageCount + current->OverflowPageCount; - - _recoveryPager.EnsureContinuous(null, _recoveryPage, totalPageCount + 1); - var dataPage = _recoveryPager.AcquirePagePointer(_recoveryPage); - - NativeMethods.memset(dataPage, 0, totalPageCount * AbstractPager.PageSize); - try - { - LZ4.Decode64(_pager.AcquirePagePointer(_readingPage), current->CompressedSize, dataPage, current->UncompressedSize, true); - } - catch (Exception e) - { - options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e); - RequireHeaderUpdate = true; - - return false; - } - - var tempTransactionPageTranslaction = (*current).GetTransactionToPageTranslation(_recoveryPager, ref _recoveryPage); - - _readingPage += compressedPages; - - LastTransactionHeader = current; - - foreach (var pagePosition in tempTransactionPageTranslaction) - { - _transactionPageTranslation[pagePosition.Key] = pagePosition.Value; - } - - return true; - } - - - - private bool ValidatePagesCrc(StorageEnvironmentOptions options, int compressedPages, TransactionHeader* current) - { - uint crc = Crc.Value(_pager.AcquirePagePointer(_readingPage), 0, compressedPages * AbstractPager.PageSize); - - if (crc != current->Crc) - { - RequireHeaderUpdate = true; - options.InvokeRecoveryError(this, "Invalid CRC signature for transaction " + current->TransactionId, null); - - return false; - } - return true; - } - - public void RecoverAndValidate(StorageEnvironmentOptions options) - { - if (_recoveryPager == null) throw new InvalidOperationException("recoveryPager should not be null"); - - while (ReadOneTransaction(options)) - { - } - } - - - - public Dictionary TransactionPageTranslation - { - get { return _transactionPageTranslation; } - } - - private bool TryReadAndValidateHeader(StorageEnvironmentOptions options, out TransactionHeader* current) - { - current = (TransactionHeader*)_pager.Read(_readingPage).Base; - - if (current->HeaderMarker != Constants.TransactionHeaderMarker) - { - // not a transaction page, - - // if the header marker is zero, we are probably in the area at the end of the log file, and have no additional log records - // to read from it. This can happen if the next transaction was too big to fit in the current log file. We stop reading - // this log file and move to the next one. - - RequireHeaderUpdate = current->HeaderMarker != 0; - if (RequireHeaderUpdate) + public unsafe class JournalReader + { + private readonly IVirtualPager _pager; + private readonly IVirtualPager _recoveryPager; + + private readonly long _lastSyncedTransactionId; + private long _readingPage; + + private readonly Dictionary _transactionPageTranslation = new Dictionary(); + private int _recoveryPage; + + public bool RequireHeaderUpdate { get; private set; } + + public long NextWritePage + { + get { return _readingPage; } + } + + public JournalReader(IVirtualPager pager, IVirtualPager recoveryPager, long lastSyncedTransactionId, TransactionHeader* previous) + { + RequireHeaderUpdate = false; + _pager = pager; + _recoveryPager = recoveryPager; + _lastSyncedTransactionId = lastSyncedTransactionId; + _readingPage = 0; + _recoveryPage = 0; + LastTransactionHeader = previous; + } + + public TransactionHeader* LastTransactionHeader { get; private set; } + + public bool ReadOneTransaction(StorageEnvironmentOptions options,bool checkCrc = true) + { + if (_readingPage >= _pager.NumberOfAllocatedPages) + return false; + + TransactionHeader* current; + if (!TryReadAndValidateHeader(options, out current)) + return false; + + var compressedPages = (current->CompressedSize / AbstractPager.PageSize) + (current->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1); + + if (current->TransactionId <= _lastSyncedTransactionId) + { + LastTransactionHeader = current; + _readingPage += compressedPages; + return true; // skipping + } + + if (checkCrc) + { + uint crc = Crc.Value(_pager.AcquirePagePointer(_readingPage), 0, compressedPages * AbstractPager.PageSize); + + if (crc != current->Crc) { - options.InvokeRecoveryError(this, - "Transaction " + current->TransactionId + - " header marker was set to garbage value, file is probably corrupted", null); - } - - return false; - } - - ValidateHeader(current, LastTransactionHeader); + RequireHeaderUpdate = true; + options.InvokeRecoveryError(this, "Invalid CRC signature for transaction " + current->TransactionId, null); - if (current->TxMarker.HasFlag(TransactionMarker.Commit) == false) - { - // uncommitted transaction, probably - RequireHeaderUpdate = true; - options.InvokeRecoveryError(this, - "Transaction " + current->TransactionId + - " was not committed", null); - return false; - } - - _readingPage++; - return true; - } + return false; + } + } + + _recoveryPager.EnsureContinuous(null, _recoveryPage, (current->PageCount + current->OverflowPageCount) + 1); + var dataPage = _recoveryPager.AcquirePagePointer(_recoveryPage); + + NativeMethods.memset(dataPage, 0, (current->PageCount + current->OverflowPageCount) * AbstractPager.PageSize); + try + { + LZ4.Decode64(_pager.AcquirePagePointer(_readingPage), current->CompressedSize, dataPage, current->UncompressedSize, true); + } + catch (Exception e) + { + options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e); + RequireHeaderUpdate = true; + + return false; + } + + var tempTransactionPageTranslaction = new Dictionary(); + + for (var i = 0; i < current->PageCount; i++) + { + Debug.Assert(_pager.Disposed == false); + Debug.Assert(_recoveryPager.Disposed == false); + + var page = _recoveryPager.Read(_recoveryPage); + + tempTransactionPageTranslaction[page.PageNumber] = new JournalFile.PagePosition + { + JournalPos = _recoveryPage, + TransactionId = current->TransactionId + }; + + if (page.IsOverflow) + { + var numOfPages = _recoveryPager.GetNumberOfOverflowPages(page.OverflowSize); + _recoveryPage += numOfPages; + } + else + { + _recoveryPage++; + } + } + + _readingPage += compressedPages; + + LastTransactionHeader = current; + + foreach (var pagePosition in tempTransactionPageTranslaction) + { + _transactionPageTranslation[pagePosition.Key] = pagePosition.Value; + } + + return true; + } + + public void RecoverAndValidate(StorageEnvironmentOptions options) + { + while (ReadOneTransaction(options)) + { + } + } + + public Dictionary TransactionPageTranslation + { + get { return _transactionPageTranslation; } + } + + private bool TryReadAndValidateHeader(StorageEnvironmentOptions options,out TransactionHeader* current) + { + current = (TransactionHeader*)_pager.Read(_readingPage).Base; + + if (current->HeaderMarker != Constants.TransactionHeaderMarker) + { + // not a transaction page, + + // if the header marker is zero, we are probably in the area at the end of the log file, and have no additional log records + // to read from it. This can happen if the next transaction was too big to fit in the current log file. We stop reading + // this log file and move to the next one. - private void ValidateHeader(TransactionHeader* current, TransactionHeader* previous) + RequireHeaderUpdate = current->HeaderMarker != 0; + if (RequireHeaderUpdate) + { + options.InvokeRecoveryError(this, + "Transaction " + current->TransactionId + + " header marker was set to garbage value, file is probably corrupted", null); + } + + return false; + } + + ValidateHeader(current, LastTransactionHeader); + + if (current->TxMarker.HasFlag(TransactionMarker.Commit) == false) + { + // uncommitted transaction, probably + RequireHeaderUpdate = true; + options.InvokeRecoveryError(this, + "Transaction " + current->TransactionId + + " was not committed", null); + return false; + } + + _readingPage++; + return true; + } + + private void ValidateHeader(TransactionHeader* current, TransactionHeader* previous) { if (current->TransactionId < 0) throw new InvalidDataException("Transaction id cannot be less than 0 (Tx: " + current->TransactionId + " )"); @@ -225,8 +186,7 @@ private void ValidateHeader(TransactionHeader* current, TransactionHeader* previ { if (current->CompressedSize <= 0) throw new InvalidDataException("Compression error in transaction."); - } - else + } else throw new InvalidDataException("Uncompressed transactions are not supported."); if (previous == null) @@ -243,5 +203,5 @@ public override string ToString() { return _pager.ToString(); } - } + } } diff --git a/Voron/Impl/Journal/Win32JournalWriter.cs b/Voron/Impl/Journal/Win32JournalWriter.cs index 5b288d4335..b3978b1f76 100644 --- a/Voron/Impl/Journal/Win32JournalWriter.cs +++ b/Voron/Impl/Journal/Win32JournalWriter.cs @@ -72,7 +72,11 @@ public void WriteGather(long position, byte*[] pages) } _segments[pages.Length].Buffer = null; // null terminating - WriteFileGather(_handle, _segments, (uint) pages.Length*4096, IntPtr.Zero, _nativeOverlapped); + var operationCompleted = WriteFileGather(_handle, _segments, (uint) pages.Length*4096, IntPtr.Zero, _nativeOverlapped); + + if (operationCompleted) + return; + switch (Marshal.GetLastWin32Error()) { case ErrorSuccess: diff --git a/Voron/Impl/Journal/WriteAheadJournal.cs b/Voron/Impl/Journal/WriteAheadJournal.cs index 989bfc204e..47613a4e9f 100644 --- a/Voron/Impl/Journal/WriteAheadJournal.cs +++ b/Voron/Impl/Journal/WriteAheadJournal.cs @@ -5,14 +5,11 @@ // ----------------------------------------------------------------------- using System; -using System.Collections.Concurrent; using System.Collections.Generic; using System.Diagnostics; -using System.IO; using System.Linq; -using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; using System.Threading; +using System.Threading.Tasks; using Voron.Exceptions; using Voron.Impl.FileHeaders; using Voron.Impl.Paging; @@ -21,6 +18,8 @@ namespace Voron.Impl.Journal { + using System.IO; + public unsafe class WriteAheadJournal : IDisposable { private readonly StorageEnvironment _env; @@ -32,17 +31,14 @@ public unsafe class WriteAheadJournal : IDisposable private long _journalIndex = -1; private readonly LZ4 _lz4 = new LZ4(); private readonly JournalApplicator _journalApplicator; - private readonly JournalShipper _journalShipper; - private readonly ReaderWriterLockSlim _journalSyncObj; private readonly ModifyHeaderAction _updateLogInfo; - + private ImmutableAppendOnlyList _files = ImmutableAppendOnlyList.Empty; internal JournalFile CurrentFile; private readonly HeaderAccessor _headerAccessor; - private readonly IVirtualPager _compressionPager; - public event Action OnTransactionCommit; + private IVirtualPager _compressionPager; public WriteAheadJournal(StorageEnvironment env) { @@ -59,17 +55,13 @@ public WriteAheadJournal(StorageEnvironment env) }; _compressionPager = _env.Options.CreateScratchPager("compression.buffers"); - _journalSyncObj = new ReaderWriterLockSlim(); - _journalApplicator = new JournalApplicator(this, _journalSyncObj); - _journalShipper = new JournalShipper(this, _journalSyncObj); + _journalApplicator = new JournalApplicator(this); } public ImmutableAppendOnlyList Files { get { return _files; } } public JournalApplicator Applicator { get { return _journalApplicator; } } - public JournalShipper Shipper { get { return _journalShipper; } } - private JournalFile NextFile(int numberOfPages = 1) { _journalIndex++; @@ -124,6 +116,7 @@ public bool RecoverDatabase(TransactionHeader* txHeader) if (_env.Options.TryDeleteJournal(unusedfiles) == false) break; } + } var lastSyncedTransactionId = logInfo.LastSyncedTransactionId; @@ -154,7 +147,7 @@ public bool RecoverDatabase(TransactionHeader* txHeader) { if (pagesToWrite.Count > 0) ApplyPagesToDataFileFromJournal(pagesToWrite); - + *txHeader = *lastReadHeaderPtr; lastSyncedTxId = txHeader->TransactionId; lastSyncedJournal = journalNumber; @@ -257,10 +250,12 @@ private void RecoverCurrentJournalSize(IVirtualPager pager) if (journalSize >= _env.Options.MaxLogFileSize) // can't set for more than the max log file size return; - _currentJournalFileSize = journalSize; + // this set the size of the _next_ journal file size + _currentJournalFileSize = Math.Min(journalSize, _env.Options.MaxLogFileSize); } - public Page ReadPage(Transaction tx, long pageNumber) + + public Page ReadPage(Transaction tx, long pageNumber, PagerState scratchPagerState) { // read transactions have to read from journal snapshots if (tx.Flags == TransactionFlags.Read) @@ -271,7 +266,7 @@ public Page ReadPage(Transaction tx, long pageNumber) JournalFile.PagePosition value; if (tx.JournalSnapshots[i].PageTranslationTable.TryGetValue(tx, pageNumber, out value)) { - var page = _env.ScratchBufferPool.ReadPage(value.ScratchPos); + var page = _env.ScratchBufferPool.ReadPage(value.ScratchPos, scratchPagerState); Debug.Assert(page.PageNumber == pageNumber); @@ -289,7 +284,7 @@ public Page ReadPage(Transaction tx, long pageNumber) JournalFile.PagePosition value; if (files[i].PageTranslationTable.TryGetValue(tx, pageNumber, out value)) { - var page = _env.ScratchBufferPool.ReadPage(value.ScratchPos); + var page = _env.ScratchBufferPool.ReadPage(value.ScratchPos, scratchPagerState); Debug.Assert(page.PageNumber == pageNumber); @@ -356,89 +351,11 @@ public void Clear(Transaction tx) CurrentFile = null; } - public class JournalSyncEventArgs : EventArgs - { - public long OldestTransactionId { get; private set; } - - public JournalSyncEventArgs(long oldestTransactionId) - { - OldestTransactionId = oldestTransactionId; - } - } - - public class JournalShipper - { - private readonly ReaderWriterLockSlim _shippingSemaphore; - private readonly WriteAheadJournal _waj; - - public JournalShipper(WriteAheadJournal waj, ReaderWriterLockSlim shippingSemaphore = null) - { - _waj = waj; - _shippingSemaphore = shippingSemaphore ?? new ReaderWriterLockSlim(); - } - - public IEnumerable ReadJournalForShippings(long lastTransactionId) - { - bool locked = false; - if (_shippingSemaphore.IsReadLockHeld == false) - { - if (_shippingSemaphore.TryEnterReadLock(Debugger.IsAttached ? TimeSpan.FromMinutes(30) : TimeSpan.FromSeconds(30)) == false) - throw new TimeoutException("Could not acquire the read lock in 30 seconds"); - locked = true; - } - - try - { - var logInfo = _waj._headerAccessor.Get(ptr => ptr->Journal); - var transactionsToShip = new List(); - - for (int journalNumber = 0; journalNumber < logInfo.JournalFilesCount; journalNumber++) - { - var journalReader = new JournalReader(_waj._env.Options.OpenJournalPager(journalNumber), null, lastTransactionId, null); - var journalLogs = journalReader.ReadJournalForShipping(_waj._env.Options).ToList(); - - if (journalLogs.Count > 0) - transactionsToShip.AddRange(journalLogs); - } - - return transactionsToShip; - } - finally - { - if(locked) - _shippingSemaphore.ExitReadLock(); - } - } - - public void ApplyShippedLogs(IEnumerable shippedTransactions) - { - if(shippedTransactions == null) - throw new ArgumentNullException(); - shippedTransactions = shippedTransactions.OrderBy(x => x.Header.TransactionId).ToList(); - - if (shippedTransactions.Any() == false) - return; - - using (var tempPager = _waj._env.Options.CreateScratchPager(StorageEnvironmentOptions.TempBufferName())) - { - tempPager.DeleteOnClose = true; - var shippedTransactionsReader = new ShippedTransactionsReader(tempPager); - shippedTransactionsReader.ReadTransactions(shippedTransactions); - - using (var tx = _waj._env.NewTransaction(TransactionFlags.ReadWrite)) - { - tx.WriteDirect(shippedTransactionsReader.RawPageData); - tx.Commit(); - } - } - } - } - public class JournalApplicator : IDisposable { - private const long DelayedDataFileSynchronizationBytesLimit = 2L*1024*1024*1024; - private readonly TimeSpan DelayedDataFileSynchronizationTimeLimit = TimeSpan.FromMinutes(1); - private readonly ReaderWriterLockSlim _flushingSemaphore; + private const long DelayedDataFileSynchronizationBytesLimit = 2L * 1024 * 1024 * 1024; + private readonly TimeSpan _delayedDataFileSynchronizationTimeLimit = TimeSpan.FromMinutes(1); + private readonly ReaderWriterLockSlim _flushingSemaphore = new ReaderWriterLockSlim(); private readonly Dictionary _journalsToDelete = new Dictionary(); private readonly WriteAheadJournal _waj; private long _lastSyncedTransactionId; @@ -447,20 +364,12 @@ public class JournalApplicator : IDisposable private DateTime _lastDataFileSyncTime; private JournalFile _lastFlushedJournal; - public event EventHandler ApplyLogsToDataFileFinished; - - public JournalApplicator(WriteAheadJournal waj, ReaderWriterLockSlim flushingSemaphore = null) + public JournalApplicator(WriteAheadJournal waj) { _waj = waj; - _flushingSemaphore = flushingSemaphore ?? new ReaderWriterLockSlim(); } - public long LastSyncedTransactionId - { - get { return _lastSyncedTransactionId; } - } - - public void ApplyLogsToDataFile(long oldestActiveTransaction, Transaction transaction = null) + public void ApplyLogsToDataFile(long oldestActiveTransaction, CancellationToken token, Transaction transaction = null) { bool locked = false; if (_flushingSemaphore.IsWriteLockHeld == false) @@ -472,18 +381,74 @@ public void ApplyLogsToDataFile(long oldestActiveTransaction, Transaction transa try { - var alreadyInWriteTx = transaction != null && transaction.Flags == TransactionFlags.ReadWrite; + if (token.IsCancellationRequested) + return; - var journalSnapshots = _waj.Files.Select(x => x.GetSnapshot()) - .OrderBy(x => x.Number) - .ToList(); + var alreadyInWriteTx = transaction != null && transaction.Flags == TransactionFlags.ReadWrite; - if (journalSnapshots.Count == 0) + var jrnls = _waj._files.Select(x => x.GetSnapshot()).OrderBy(x => x.Number).ToList(); + if (jrnls.Count == 0) return; // nothing to do - Debug.Assert(journalSnapshots.First().Number >= _lastSyncedJournal); + Debug.Assert(jrnls.First().Number >= _lastSyncedJournal); + + var pagesToWrite = new Dictionary(); + + long lastProcessedJournal = -1; + long previousJournalMaxTransactionId = -1; + + long lastFlushedTransactionId = -1; + + foreach (var journalFile in jrnls.Where(x => x.Number >= _lastSyncedJournal)) + { + var currentJournalMaxTransactionId = -1L; + + foreach (var pagePosition in journalFile.PageTranslationTable.IterateLatestAsOf(journalFile.LastTransaction)) + { + if (oldestActiveTransaction != 0 && + pagePosition.Value.TransactionId >= oldestActiveTransaction) + { + // we cannot write this yet, there is a read transaction that might be looking at this + // however, we _aren't_ going to be writing this to the data file, since that would be a + // waste, we would just overwrite that value in the next flush anyway + JournalFile.PagePosition existingPagePosition; + if (pagesToWrite.TryGetValue(pagePosition.Key, out existingPagePosition) && + pagePosition.Value.JournalNumber == existingPagePosition.JournalNumber) + { + // remove the page only when it comes from the same journal + // otherwise we can damage the journal's page translation table (PTT) + // because the existing overwrite in a next journal can be filtered out + // so we wouldn't write any page to the data file + pagesToWrite.Remove(pagePosition.Key); + } + + continue; + } + + if (journalFile.Number == _lastSyncedJournal && pagePosition.Value.TransactionId <= _lastSyncedTransactionId) + continue; + + currentJournalMaxTransactionId = Math.Max(currentJournalMaxTransactionId, pagePosition.Value.TransactionId); + + if (currentJournalMaxTransactionId < previousJournalMaxTransactionId) + throw new InvalidOperationException( + "Journal applicator read beyond the oldest active transaction in the next journal file. " + + "This should never happen. Current journal max tx id: " + currentJournalMaxTransactionId + + ", previous journal max ix id: " + previousJournalMaxTransactionId + + ", oldest active transaction: " + oldestActiveTransaction); + + + lastProcessedJournal = journalFile.Number; + pagesToWrite[pagePosition.Key] = pagePosition.Value; + + lastFlushedTransactionId = currentJournalMaxTransactionId; + } - var pagesToWrite = GetPagesFromJournals(oldestActiveTransaction, journalSnapshots); + if (currentJournalMaxTransactionId == -1L) + continue; + + previousJournalMaxTransactionId = currentJournalMaxTransactionId; + } if (pagesToWrite.Count == 0) return; @@ -498,32 +463,53 @@ public void ApplyLogsToDataFile(long oldestActiveTransaction, Transaction transa return; } - var unusedJournals = GetAlreadyHandledJournalFiles(journalSnapshots); + var unusedJournals = GetUnusedJournalFiles(jrnls, lastProcessedJournal, lastFlushedTransactionId); + + foreach (var unused in unusedJournals.Where(unused => !_journalsToDelete.ContainsKey(unused.Number))) + { + _journalsToDelete.Add(unused.Number, unused); + } using (var txw = alreadyInWriteTx ? null : _waj._env.NewTransaction(TransactionFlags.ReadWrite)) { - _lastFlushedJournal = _waj.Files.First(x => x.Number == _lastSyncedJournal); + _lastSyncedJournal = lastProcessedJournal; + _lastSyncedTransactionId = lastFlushedTransactionId; - RemoveUnusedJournalsIfNeeded(unusedJournals); + _lastFlushedJournal = _waj._files.First(x => x.Number == _lastSyncedJournal); + + if (unusedJournals.Count > 0) + { + var lastUnusedJournalNumber = unusedJournals.Last().Number; + _waj._files = _waj._files.RemoveWhile(x => x.Number <= lastUnusedJournalNumber, new List()); + } if (_waj._files.Count == 0) _waj.CurrentFile = null; FreeScratchPages(unusedJournals, txw); - var hasSynced = false; if (_totalWrittenButUnsyncedBytes > DelayedDataFileSynchronizationBytesLimit || - DateTime.Now - _lastDataFileSyncTime > DelayedDataFileSynchronizationTimeLimit) + DateTime.Now - _lastDataFileSyncTime > _delayedDataFileSynchronizationTimeLimit) { - SyncDataFileWrites(oldestActiveTransaction); - hasSynced = true; + _waj._dataPager.Sync(); + + UpdateFileHeaderAfterDataFileSync(_lastFlushedJournal, oldestActiveTransaction); + + foreach (var toDelete in _journalsToDelete.Values) + { + if (_waj._env.Options.IncrementalBackupEnabled == false) + toDelete.DeleteOnClose = true; + + toDelete.Release(); + } + + _journalsToDelete.Clear(); + _totalWrittenButUnsyncedBytes = 0; + _lastDataFileSyncTime = DateTime.Now; } if (txw != null) txw.Commit(); - - if (hasSynced) - OnApplyLogsToDataFileFinished(_lastSyncedTransactionId); } } finally @@ -533,143 +519,7 @@ public void ApplyLogsToDataFile(long oldestActiveTransaction, Transaction transa } } - private void SyncDataFileWrites(long oldestActiveTransaction) - { - _waj._dataPager.Sync(); - - UpdateFileHeaderAfterDataFileSync(_lastFlushedJournal, oldestActiveTransaction); - - foreach (var toDelete in _journalsToDelete.Values) - { - if (_waj._env.Options.IncrementalBackupEnabled == false) - toDelete.DeleteOnClose = true; - - toDelete.Release(); - } - - _journalsToDelete.Clear(); - _totalWrittenButUnsyncedBytes = 0; - _lastDataFileSyncTime = DateTime.Now; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private void RemoveUnusedJournalsIfNeeded(IReadOnlyCollection unusedJournals) - { - if (unusedJournals.Any()) - _waj._files = _waj._files.RemoveWhile(x => x.Number <= unusedJournals.Last().Number, new List()); - } - - - protected IReadOnlyList GetAlreadyHandledJournalFiles(IEnumerable journalSnapshots) - { - var unusedJournalFiles = new List(); - foreach (var j in journalSnapshots) - { - if (j.Number > _lastSyncedJournal) // after the last log we handled, nothing to do here - continue; - if (j.Number == _lastSyncedJournal) // we are in the last log we handled - { - if (j.AvailablePages != 0 || // if there are more pages to be used here or - j.PageTranslationTable.MaxTransactionId() != _lastSyncedTransactionId) // we didn't handle the whole journal - continue; // do not mark it as handled - } - unusedJournalFiles.Add(_waj.Files.First(x => x.Number == j.Number)); - } - return unusedJournalFiles; - } - - protected Dictionary GetPagesFromJournals(long oldestActiveTransaction, IEnumerable journalSnapshots) - { - var pagesToWrite = new Dictionary(); - - long lastProcessedJournal = -1; - long previousJournalMaxTransactionId = -1; - long lastFlushedTransactionId = -1; - - foreach (var journalFile in journalSnapshots.Where(x => x.Number >= _lastSyncedJournal)) - { - var currentJournalMaxTransactionId = -1L; - - foreach (var pagePosition in journalFile.PageTranslationTable.IterateLatestAsOf(journalFile.LastTransaction)) - { - if (ShouldSkipFetchingPagePosition(oldestActiveTransaction, pagePosition, pagesToWrite)) - continue; - - var isAlreadyHandled = journalFile.Number == _lastSyncedJournal && - pagePosition.Value.TransactionId <= _lastSyncedTransactionId; - if (isAlreadyHandled) - continue; - - currentJournalMaxTransactionId = GetAndValidateOldestTransactionId(currentJournalMaxTransactionId, - oldestActiveTransaction, - pagePosition, - previousJournalMaxTransactionId); - - lastProcessedJournal = journalFile.Number; - pagesToWrite[pagePosition.Key] = pagePosition.Value; - - lastFlushedTransactionId = currentJournalMaxTransactionId; - } - - if (currentJournalMaxTransactionId == -1L) - continue; - - previousJournalMaxTransactionId = currentJournalMaxTransactionId; - } - - _lastSyncedJournal = lastProcessedJournal; - _lastSyncedTransactionId = lastFlushedTransactionId; - - return pagesToWrite; - } - - private static long GetAndValidateOldestTransactionId(long currentJournalMaxTransactionId, long oldestActiveTransaction, - KeyValuePair pagePosition, long previousJournalMaxTransactionId) - { - currentJournalMaxTransactionId = Math.Max(currentJournalMaxTransactionId, pagePosition.Value.TransactionId); - - if (currentJournalMaxTransactionId < previousJournalMaxTransactionId) - throw new InvalidOperationException( - "Journal applicator read beyond the oldest active transaction in the next journal file. " + - "This should never happen. Current journal max tx id: " + currentJournalMaxTransactionId + - ", previous journal max ix id: " + previousJournalMaxTransactionId + - ", oldest active transaction: " + oldestActiveTransaction); - return currentJournalMaxTransactionId; - } - - protected static bool ShouldSkipFetchingPagePosition(long oldestActiveTransaction, KeyValuePair pagePosition, - Dictionary pagesToWrite) - { - if (oldestActiveTransaction != 0 && - pagePosition.Value.TransactionId >= oldestActiveTransaction) - { - // we cannot write this yet, there is a read transaction that might be looking at this - // however, we _aren't_ going to be writing this to the data file, since that would be a - // waste, we would just overwrite that value in the next flush anyway - JournalFile.PagePosition existingPagePosition; - if (pagesToWrite.TryGetValue(pagePosition.Key, out existingPagePosition) && - pagePosition.Value.JournalNumber == existingPagePosition.JournalNumber) - { - // remove the page only when it comes from the same journal - // otherwise we can damage the journal's page translation table (PTT) - // because the existing overwrite in a next journal can be filtered out - // so we wouldn't write any page to the data file - pagesToWrite.Remove(pagePosition.Key); - } - - return true; - } - return false; - } - - protected void OnApplyLogsToDataFileFinished(long oldestTransactionId) - { - var applyLogsToDataFileFinished = ApplyLogsToDataFileFinished; - if (applyLogsToDataFileFinished != null) - applyLogsToDataFileFinished(this, new JournalSyncEventArgs(oldestTransactionId)); - } - - public Dictionary writtenPages = new Dictionary(); + public Dictionary writtenPages = new Dictionary(); private void ApplyPagesToDataFileFromScratch(Dictionary pagesToWrite, Transaction transaction, bool alreadyInWriteTx) { @@ -680,8 +530,8 @@ private void ApplyPagesToDataFileFromScratch(Dictionary x.Key) - .Select(x => scratchBufferPool.ReadPage(x.Value.ScratchPos, scratchPagerState)) - .ToList(); + .Select(x => scratchBufferPool.ReadPage(x.Value.ScratchPos, scratchPagerState)) + .ToList(); var last = sortedPages.Last(); @@ -737,12 +587,31 @@ private void FreeScratchPages(IEnumerable unusedJournalFiles, Trans journalFile.FreeScratchPagesOlderThan(txw, _lastSyncedTransactionId); } + foreach (var jrnl in _waj._files.OrderBy(x => x.Number)) { jrnl.FreeScratchPagesOlderThan(txw, _lastSyncedTransactionId); } } + private List GetUnusedJournalFiles(IEnumerable jrnls, long lastProcessedJournal, long lastFlushedTransactionId) + { + var unusedJournalFiles = new List(); + foreach (var j in jrnls) + { + if (j.Number > lastProcessedJournal) // after the last log we synced, nothing to do here + continue; + if (j.Number == lastProcessedJournal) // we are in the last log we synced + { + if (j.AvailablePages != 0 || // if there are more pages to be used here or + j.PageTranslationTable.MaxTransactionId() != lastFlushedTransactionId) // we didn't synchronize whole journal + continue; // do not mark it as unused + } + unusedJournalFiles.Add(_waj._files.First(x => x.Number == j.Number)); + } + return unusedJournalFiles; + } + public void UpdateFileHeaderAfterDataFileSync(JournalFile file, long oldestActiveTransaction) { var txHeaders = stackalloc TransactionHeader[2]; @@ -760,7 +629,7 @@ public void UpdateFileHeaderAfterDataFileSync(JournalFile file, long oldestActiv break; lastReadTxHeader = *readTxHeader; - + var compressedPages = (readTxHeader->CompressedSize / AbstractPager.PageSize) + (readTxHeader->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1); txPos += compressedPages + 1; @@ -796,52 +665,30 @@ public void Dispose() } } - public IDisposable TakeFlushingLock() - { - _flushingSemaphore.EnterWriteLock(); - return new DisposableAction(() => _flushingSemaphore.ExitWriteLock()); - } + public IDisposable TakeFlushingLock() + { + _flushingSemaphore.EnterWriteLock(); + return new DisposableAction(() => _flushingSemaphore.ExitWriteLock()); + } } - private uint _previousTransactionCrc; - public void WriteToJournal(Transaction tx, int pageCount) { - var pages = CompressPages(tx, pageCount, _compressionPager); - - if (CurrentFile == null || CurrentFile.AvailablePages < pages.Length) - { - CurrentFile = NextFile(pages.Length); - } - - var transactionHeader = *(TransactionHeader*)pages[0]; + var pages = CompressPages(tx, pageCount, _compressionPager); - var writePage = CurrentFile.Write(tx, pages); - - var onTransactionCommit = OnTransactionCommit; - if (onTransactionCommit != null) + if (CurrentFile == null || CurrentFile.AvailablePages < pages.Length) { - var bufferSize = pages.Length * AbstractPager.PageSize; - var buffer = new byte[bufferSize]; - - fixed (byte* bp = buffer) - CurrentFile.JournalWriter.Read(writePage, bp, bufferSize); - - var stream = new MemoryStream(buffer,AbstractPager.PageSize, (pages.Length - 1) * AbstractPager.PageSize); - var transactionToShip = new TransactionToShip(transactionHeader) - { - CompressedData = stream, - PreviousTransactionCrc = _previousTransactionCrc - }; - - _previousTransactionCrc = transactionHeader.Crc; - onTransactionCommit(transactionToShip); + CurrentFile = NextFile(pages.Length); + } + CurrentFile.Write(tx, pages); + if (CurrentFile.AvailablePages == 0) + { + CurrentFile = null; } - if (CurrentFile.AvailablePages == 0) - CurrentFile = null; } + private byte*[] CompressPages(Transaction tx, int numberOfPages, IVirtualPager compressionPager) { // numberOfPages include the tx header page, which we don't compress @@ -868,19 +715,19 @@ public void WriteToJournal(Transaction tx, int pageCount) write += count; } - var sizeAfterCompression = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBuffer); + var len = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBuffer); + var compressedPages = (len / AbstractPager.PageSize) + (len % AbstractPager.PageSize == 0 ? 0 : 1); + + var pages = new byte*[compressedPages + 1]; - var compressedPages = (sizeAfterCompression / AbstractPager.PageSize) + (sizeAfterCompression % AbstractPager.PageSize == 0 ? 0 : 1); var txHeaderBase = tx.Environment.ScratchBufferPool.AcquirePagePointer(txPages[0].PositionInScratchBuffer); var txHeader = (TransactionHeader*)txHeaderBase; txHeader->Compressed = true; - txHeader->CompressedSize = sizeAfterCompression; + txHeader->CompressedSize = len; txHeader->UncompressedSize = sizeInBytes; - var pages = new byte*[compressedPages + 1]; pages[0] = txHeaderBase; - for (int index = 0; index < compressedPages; index++) { pages[index + 1] = compressionBuffer + (index * AbstractPager.PageSize); @@ -903,139 +750,4 @@ private int DoCompression(byte* input, byte* output, int inputLength, int output return doCompression; } } - - public unsafe class UnmanagedVectorMemoryStream : Stream - { - private readonly byte*[] _pages; - private readonly int _start; - private readonly int _pageSize; - - public UnmanagedVectorMemoryStream(byte*[] pages, int start, int pageSize) - { - _pages = pages; - _start = start; - _pageSize = pageSize; - _length = _pageSize*(_pages.Length - start); - } - -#if DEBUG - internal byte[] DebugReadAllData(int pageSize = 0) - { - if (pageSize == 0) - pageSize = _pageSize; - - var buffer = new byte[pageSize*(_pages.Length - _start)]; - - fixed(byte* bp = buffer) - for (int pageIndex = _start; pageIndex < _pages.Length; pageIndex++) - NativeMethods.memcpy(bp + ((pageIndex - _start)*pageSize), _pages[pageIndex], pageSize); - - return buffer; - } -#endif - public override void Flush() - { - throw new NotSupportedException(); - } - - public override long Seek(long offset, SeekOrigin origin) - { - switch (origin) - { - case SeekOrigin.Current: - if (Position + offset > Length) - throw new ArgumentOutOfRangeException("offset"); - Position += offset; - break; - case SeekOrigin.Begin: - if(offset > Length || offset < 0) - throw new ArgumentOutOfRangeException("offset"); - Position = offset; - break; - case SeekOrigin.End: - if(offset > Length) - throw new ArgumentOutOfRangeException("offset"); - Position = Length - offset; - break; - } - - return Position; - } - - public override void SetLength(long value) - { - throw new NotSupportedException(); - } - - public override int Read(byte[] buffer, int offset, int count) - { - fixed (byte* pb = buffer) - { - count = Math.Min(count, (int) (Length - Position)); - if (count == 0) - return 0; - int read = 0; - - var pageSpan = count / _pageSize; - if (count % _pageSize > 0) //consider in page span also partial pages - pageSpan++; - - var startPage = (Position / _pageSize) + _start; - var positionInStartPage = (int)(Position % _pageSize); - var endPage = startPage + pageSpan - 1; - - Debug.Assert(endPage >= startPage); - - int firstPageCount = _pageSize - positionInStartPage; - NativeMethods.memcpy(pb + offset, _pages[startPage] + positionInStartPage, firstPageCount); - read += firstPageCount; - count -= firstPageCount; - - for (var pageIndex = startPage + 1; pageIndex < endPage; pageIndex++) - { - NativeMethods.memcpy(pb + offset + read, _pages[pageIndex], _pageSize); - read += _pageSize; - count -= _pageSize; - } - - if (count > 0) - { - NativeMethods.memcpy(pb + offset + read, _pages[endPage], count); - read += count; - } - - Position += read; - return read; - } - } - - public override void Write(byte[] buffer, int offset, int count) - { - throw new NotSupportedException(); - } - - public override bool CanRead - { - get { return true; } - } - - public override bool CanSeek - { - get { return true; } - } - - public override bool CanWrite - { - get { return false; } - } - - - private readonly long _length; - public override long Length - { - get { return _length; } - } - - public override long Position { get; set; } - } } diff --git a/Voron/Impl/Paging/AbstractPager.cs b/Voron/Impl/Paging/AbstractPager.cs index e0fd7352d7..38b2cd6ac1 100644 --- a/Voron/Impl/Paging/AbstractPager.cs +++ b/Voron/Impl/Paging/AbstractPager.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Concurrent; using System.Diagnostics; +using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Threading.Tasks; using Voron.Trees; @@ -11,7 +12,6 @@ namespace Voron.Impl.Paging public unsafe abstract class AbstractPager : IVirtualPager { protected int MinIncreaseSize { get { return 16 * PageSize; } } - private long _increaseSize; private DateTime _lastIncrease; @@ -19,20 +19,19 @@ public PagerState PagerState { get { - Debug.Assert(Disposed == false); - - return _pagerState; + ThrowObjectDisposedIfNeeded(); + return _pagerState; } - set + set { - Debug.Assert(Disposed == false); + ThrowObjectDisposedIfNeeded(); _source = GetSourceName(); _pagerState = value; } } - private string _source; + private string _source; protected AbstractPager() { _increaseSize = MinIncreaseSize; @@ -58,7 +57,7 @@ protected AbstractPager() public Page Read(long pageNumber, PagerState pagerState = null) { - Debug.Assert(Disposed == false); + ThrowObjectDisposedIfNeeded(); if (pageNumber + 1 > NumberOfAllocatedPages) { @@ -66,14 +65,14 @@ public Page Read(long pageNumber, PagerState pagerState = null) " because number of allocated pages is " + NumberOfAllocatedPages); } - return new Page(AcquirePagePointer(pageNumber, pagerState), _source); + return new Page(AcquirePagePointer(pageNumber, pagerState), _source, PageSize); } protected abstract string GetSourceName(); public virtual Page GetWritable(long pageNumber) { - Debug.Assert(Disposed == false); + ThrowObjectDisposedIfNeeded(); if (pageNumber + 1 > NumberOfAllocatedPages) { @@ -81,7 +80,7 @@ public virtual Page GetWritable(long pageNumber) " because number of allocated pages is " + NumberOfAllocatedPages); } - return new Page(AcquirePagePointer(pageNumber), _source); + return new Page(AcquirePagePointer(pageNumber), _source, PageSize); } public abstract byte* AcquirePagePointer(long pageNumber, PagerState pagerState = null); @@ -90,7 +89,7 @@ public virtual Page GetWritable(long pageNumber) public virtual PagerState TransactionBegan() { - Debug.Assert(Disposed == false); + ThrowObjectDisposedIfNeeded(); var state = PagerState; state.AddRef(); @@ -99,14 +98,14 @@ public virtual PagerState TransactionBegan() public bool WillRequireExtension(long requestedPageNumber, int numberOfPages) { - Debug.Assert(Disposed == false); + ThrowObjectDisposedIfNeeded(); return requestedPageNumber + numberOfPages > NumberOfAllocatedPages; } public void EnsureContinuous(Transaction tx, long requestedPageNumber, int numberOfPages) { - Debug.Assert(Disposed == false); + ThrowObjectDisposedIfNeeded(); if (requestedPageNumber + numberOfPages <= NumberOfAllocatedPages) return; @@ -126,15 +125,15 @@ public void EnsureContinuous(Transaction tx, long requestedPageNumber, int numbe public bool ShouldGoToOverflowPage(int len) { - Debug.Assert(Disposed == false); + ThrowObjectDisposedIfNeeded(); return len + Constants.PageHeaderSize > MaxNodeSize; } public int GetNumberOfOverflowPages(int overflowSize) { - Debug.Assert(Disposed == false); - + ThrowObjectDisposedIfNeeded(); + overflowSize += Constants.PageHeaderSize; return (overflowSize / PageSize) + (overflowSize % PageSize == 0 ? 0 : 1); } @@ -145,19 +144,22 @@ public int GetNumberOfOverflowPages(int overflowSize) public virtual void Dispose() { - if (PagerState != null) - { - PagerState.Release(); - PagerState = null; - } + if (Disposed) + return; - Task.WaitAll(_tasks.ToArray()); + if (PagerState != null) + { + PagerState.Release(); + PagerState = null; + } - Disposed = true; - GC.SuppressFinalize(this); - } + Task.WaitAll(_tasks.ToArray()); + + Disposed = true; + GC.SuppressFinalize(this); + } - ~AbstractPager() + ~AbstractPager() { Dispose(); } @@ -204,5 +206,13 @@ public void RegisterDisposal(Task run) { _tasks.Add(run); } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + protected void ThrowObjectDisposedIfNeeded() + { + if (Disposed) + throw new ObjectDisposedException("The pager is already disposed"); + } + } } diff --git a/Voron/Impl/Paging/FilePager.cs b/Voron/Impl/Paging/FilePager.cs index 01a9d34fd4..73db359db8 100644 --- a/Voron/Impl/Paging/FilePager.cs +++ b/Voron/Impl/Paging/FilePager.cs @@ -80,7 +80,8 @@ public override void AllocateMorePages(Transaction tx, long newLength) Debug.Assert(_fileStream.Length == newLength); - PagerState.Release(); // when the last transaction using this is over, will dispose it + var tmp = PagerState; + PagerState newPager = CreateNewPagerState(); if (tx != null) // we only pass null during startup, and we don't need it there @@ -90,6 +91,7 @@ public override void AllocateMorePages(Transaction tx, long newLength) } PagerState = newPager; + tmp.Release(); // when the last transaction using this is over, will dispose it NumberOfAllocatedPages = newLength / PageSize; } @@ -184,6 +186,8 @@ public override int WriteDirect(Page start, long pagePosition, int pagesToWrite) public override void Dispose() { + if (Disposed) + return; base.Dispose(); _fileStream.Dispose(); diff --git a/Voron/Impl/Paging/TemporaryPage.cs b/Voron/Impl/Paging/TemporaryPage.cs index cb1cec7285..cc0acba0ed 100644 --- a/Voron/Impl/Paging/TemporaryPage.cs +++ b/Voron/Impl/Paging/TemporaryPage.cs @@ -38,7 +38,7 @@ public Page TempPage { get { - return new Page((byte*)_tempPage.ToPointer(), "temp") + return new Page((byte*)_tempPage.ToPointer(), "temp", AbstractPager.PageSize) { Upper = AbstractPager.PageSize, Lower = (ushort)Constants.PageHeaderSize, @@ -46,6 +46,6 @@ public Page TempPage }; } } - + public IDisposable ReturnTemporaryPageToPool { get; set; } } } \ No newline at end of file diff --git a/Voron/Impl/Paging/Win32MemoryMapPager.cs b/Voron/Impl/Paging/Win32MemoryMapPager.cs index 6711c33f90..5cc3850907 100644 --- a/Voron/Impl/Paging/Win32MemoryMapPager.cs +++ b/Voron/Impl/Paging/Win32MemoryMapPager.cs @@ -38,6 +38,7 @@ private struct SplitValue } public Win32MemoryMapPager(string file, + long? initialFileSize = null, NativeFileAttributes options = NativeFileAttributes.Normal, NativeFileAccess access = NativeFileAccess.GenericRead | NativeFileAccess.GenericWrite) { @@ -51,7 +52,6 @@ public Win32MemoryMapPager(string file, ? MemoryMappedFileAccess.Read : MemoryMappedFileAccess.ReadWrite; - Trace.WriteLine(string.Format("creating/opening file (name = {0}), access type = {1}", file, _memoryMappedFileAccess)); _handle = NativeFileMethods.CreateFile(file, access, NativeFileShare.Read | NativeFileShare.Write | NativeFileShare.Delete, IntPtr.Zero, NativeFileCreationDisposition.OpenAlways, options, IntPtr.Zero); @@ -75,14 +75,17 @@ public Win32MemoryMapPager(string file, _access.HasFlag(NativeFileAccess.GenericAll) || _access.HasFlag(NativeFileAccess.FILE_GENERIC_WRITE)) { - long fileLengthAfterAdjustment = _fileStream.Length; - if (_fileStream.Length == 0 || (_fileStream.Length%AllocationGranularity != 0)) + var fileLength = _fileStream.Length; + if (fileLength == 0 && initialFileSize.HasValue) + fileLength = initialFileSize.Value; + + if (_fileStream.Length == 0 || (fileLength % AllocationGranularity != 0)) { - fileLengthAfterAdjustment = NearestSizeToAllocationGranularity(_fileInfo.Length); - _fileStream.SetLength(fileLengthAfterAdjustment); + fileLength = NearestSizeToAllocationGranularity(fileLength); + _fileStream.SetLength(fileLength); } - _totalAllocationSize = fileLengthAfterAdjustment; + _totalAllocationSize = fileLength; } NumberOfAllocatedPages = _totalAllocationSize / PageSize; @@ -102,6 +105,8 @@ private long NearestSizeToAllocationGranularity(long size) public override void AllocateMorePages(Transaction tx, long newLength) { + ThrowObjectDisposedIfNeeded(); + var newLengthAfterAdjustment = NearestSizeToAllocationGranularity(newLength); if (newLengthAfterAdjustment < _totalAllocationSize) @@ -136,8 +141,9 @@ public override void AllocateMorePages(Transaction tx, long newLength) tx.AddPagerState(newPagerState); } - PagerState.Release(); //replacing the pager state --> so one less reference for it + var tmp = PagerState; PagerState = newPagerState; + tmp.Release(); //replacing the pager state --> so one less reference for it } _totalAllocationSize += allocationSize; @@ -239,11 +245,15 @@ protected override string GetSourceName() public override byte* AcquirePagePointer(long pageNumber, PagerState pagerState = null) { + ThrowObjectDisposedIfNeeded(); + return (pagerState ?? PagerState).MapBase + (pageNumber*PageSize); } public override void Sync() { + ThrowObjectDisposedIfNeeded(); + if (PagerState.AllocationInfos.Any(allocationInfo => MemoryMapNativeMethods.FlushViewOfFile(allocationInfo.BaseAddress, new IntPtr(allocationInfo.Size)) == false)) throw new Win32Exception(); @@ -268,6 +278,8 @@ public override string ToString() public override int WriteDirect(Page start, long pagePosition, int pagesToWrite) { + ThrowObjectDisposedIfNeeded(); + int toCopy = pagesToWrite*PageSize; NativeMethods.memcpy(PagerState.MapBase + pagePosition*PageSize, start.Base, toCopy); @@ -276,6 +288,9 @@ public override int WriteDirect(Page start, long pagePosition, int pagesToWrite) public override void Dispose() { + if (Disposed) + return; + _fileStream.Dispose(); _handle.Close(); if (DeleteOnClose) diff --git a/Voron/Impl/Paging/Win32PageFileBackedMemoryMappedPager.cs b/Voron/Impl/Paging/Win32PageFileBackedMemoryMappedPager.cs index 8ba8c5c20f..b9114574d3 100644 --- a/Voron/Impl/Paging/Win32PageFileBackedMemoryMappedPager.cs +++ b/Voron/Impl/Paging/Win32PageFileBackedMemoryMappedPager.cs @@ -14,18 +14,24 @@ namespace Voron.Impl.Paging { public unsafe class Win32PageFileBackedMemoryMappedPager : AbstractPager { + private readonly string _name; public readonly long AllocationGranularity; private long _totalAllocationSize; private const int MaxAllocationRetries = 100; + private static int _counter; + private readonly int _instanceId; - public Win32PageFileBackedMemoryMappedPager() + public Win32PageFileBackedMemoryMappedPager(string name, long? initialFileSize = null) { - NativeMethods.SYSTEM_INFO systemInfo; + _name = name; + NativeMethods.SYSTEM_INFO systemInfo; NativeMethods.GetSystemInfo(out systemInfo); AllocationGranularity = systemInfo.allocationGranularity; - _totalAllocationSize = systemInfo.allocationGranularity; + _totalAllocationSize = initialFileSize.HasValue ? NearestSizeToAllocationGranularity(initialFileSize.Value) : systemInfo.allocationGranularity; + + _instanceId = Interlocked.Increment(ref _counter); PagerState.Release(); Debug.Assert(AllocationGranularity % PageSize == 0); @@ -35,7 +41,7 @@ public Win32PageFileBackedMemoryMappedPager() protected override string GetSourceName() { - return "MemMapInSystemPage, Size : " + _totalAllocationSize; + return "MemMapInSystemPage: " + _name + " " + _instanceId + ", Size : " + _totalAllocationSize; } public override byte* AcquirePagePointer(long pageNumber, PagerState pagerState = null) @@ -53,6 +59,7 @@ public override int Write(Page page, long? pageNumber) { long startPage = pageNumber ?? page.PageNumber; + //note: GetNumberOfOverflowPages and WriteDirect can throw ObjectDisposedException if the pager is already disposed int toWrite = page.IsOverflow ? GetNumberOfOverflowPages(page.OverflowSize) : 1; return WriteDirect(page, startPage, toWrite); @@ -60,6 +67,7 @@ public override int Write(Page page, long? pageNumber) public override void AllocateMorePages(Transaction tx, long newLength) { + ThrowObjectDisposedIfNeeded(); var newLengthAfterAdjustment = NearestSizeToAllocationGranularity(newLength); if (newLengthAfterAdjustment < _totalAllocationSize) @@ -95,8 +103,9 @@ public override void AllocateMorePages(Transaction tx, long newLength) // we always share the same memory mapped files references between all pages, since to close them // would be to lose all the memory assoicated with them PagerState.DisposeFilesOnDispose = false; - PagerState.Release(); //replacing the pager state --> so one less reference for it - PagerState = newPagerState; + var tmp = PagerState; + PagerState = newPagerState; + tmp.Release(); //replacing the pager state --> so one less reference for it } _totalAllocationSize += allocationSize; @@ -106,6 +115,8 @@ public override void AllocateMorePages(Transaction tx, long newLength) public override int WriteDirect(Page start, long pagePosition, int pagesToWrite) { + ThrowObjectDisposedIfNeeded(); + int toCopy = pagesToWrite * PageSize; NativeMethods.memcpy(PagerState.MapBase + pagePosition * PageSize, start.Base, toCopy); @@ -128,7 +139,7 @@ private PagerState AllocateMorePagesAndRemapContinuously(long allocationSize) { var message = string.Format( - "Unable to allocate more pages - unsucsessfully tried to allocate continuous block of size = {0} bytes\r\n" + + "Unable to allocate more pages - unsuccessfully tried to allocate continuous block of size = {0} bytes\r\n" + "It is likely that we are suffering from virtual memory exhaustion or memory fragmentation.\r\n" + "64 bits process: {1}\r\n" + "If you are running in 32 bits, this is expected, and you need to run in 64 bits to resume normal operations.\r\n" + @@ -150,7 +161,7 @@ private PagerState AllocateMorePagesAndRemapContinuously(long allocationSize) if (newAlloctedBaseAddress == null || newAlloctedBaseAddress == (byte*)0) { - Trace.WriteLine("Failed to remap file continuously. Unmapping already mapped files and re-trying"); + Debug.WriteLine("Failed to remap file continuously. Unmapping already mapped files and re-trying"); UndoMappings(allocationInfoAfterReallocation); failedToAllocate = true; break; diff --git a/Voron/Impl/Paging/Win32PureMemoryPager.cs b/Voron/Impl/Paging/Win32PureMemoryPager.cs index 4b004efe96..46c5cb4e44 100644 --- a/Voron/Impl/Paging/Win32PureMemoryPager.cs +++ b/Voron/Impl/Paging/Win32PureMemoryPager.cs @@ -110,6 +110,8 @@ public override int WriteDirect(Page start, long pagePosition, int pagesToWrite) public override void Dispose() { + if (Disposed) + return; base.Dispose(); for (ulong i = 0; i < _rangesCount; i++) { diff --git a/Voron/Impl/ScratchBufferPool.cs b/Voron/Impl/ScratchBufferPool.cs index 42505b23ab..d27faf96d8 100644 --- a/Voron/Impl/ScratchBufferPool.cs +++ b/Voron/Impl/ScratchBufferPool.cs @@ -30,7 +30,7 @@ private class PendingPage public ScratchBufferPool(StorageEnvironment env) { _scratchPager = env.Options.CreateScratchPager("scratch.buffers"); - _scratchPager.AllocateMorePages(null, env.Options.InitialLogFileSize); + _scratchPager.AllocateMorePages(null, env.Options.InitialFileSize.HasValue ? Math.Max(env.Options.InitialFileSize.Value, env.Options.InitialLogFileSize) : env.Options.InitialLogFileSize); } public PagerState PagerState { get { return _scratchPager.PagerState; } } diff --git a/Voron/Impl/Transaction.cs b/Voron/Impl/Transaction.cs index 77d6ca0a26..6d683fa408 100644 --- a/Voron/Impl/Transaction.cs +++ b/Voron/Impl/Transaction.cs @@ -2,12 +2,14 @@ using System.Collections.Generic; using System.Diagnostics; using System.Linq; +using System.Threading.Tasks; using Voron.Exceptions; using Voron.Impl.FileHeaders; using Voron.Impl.FreeSpace; using Voron.Impl.Journal; using Voron.Impl.Paging; using Voron.Trees; +using Voron.Util; namespace Voron.Impl { @@ -53,6 +55,7 @@ public long Id private TransactionHeader* _txHeader; private readonly List _transactionPages = new List(); private readonly Dictionary _trees = new Dictionary(); + private readonly PagerState _scratchPagerState; public bool Committed { get; private set; } @@ -75,13 +78,15 @@ public Transaction(StorageEnvironment env, long id, TransactionFlags flags, IFre _id = id; _freeSpaceHandling = freeSpaceHandling; Flags = flags; - var scratchPagerState = env.ScratchBufferPool.PagerState; scratchPagerState.AddRef(); _pagerStates.Add(scratchPagerState); - if (flags.HasFlag(TransactionFlags.ReadWrite) == false) { + // for read transactions, we need to keep the pager state frozen + // for write transactions, we can use the current one (which == null) + _scratchPagerState = scratchPagerState; + _state = env.State; _journal.GetSnapshots().ForEach(AddJournalSnapshot); return; @@ -94,28 +99,10 @@ public Transaction(StorageEnvironment env, long id, TransactionFlags flags, IFre MarkTreesForWriteTransaction(); } - internal void WriteDirect(byte*[] pages) - { - foreach (var pageData in pages) - { - var allocation = _env.ScratchBufferPool.Allocate(this, 1); - var page = _env.ScratchBufferPool.ReadPage(allocation.PositionInScratchBuffer); - NativeMethods.memcpy(page.Base, pageData, AbstractPager.PageSize); - - page.Dirty = true; - _dirtyPages.Add(page.PageNumber); - - _transactionPages.Add(allocation); - _allocatedPagesInTransaction++; - - - } - } - private void InitTransactionHeader() { var allocation = _env.ScratchBufferPool.Allocate(this, 1); - var page = _env.ScratchBufferPool.ReadPage(allocation.PositionInScratchBuffer); + var page = _env.ScratchBufferPool.ReadPage(allocation.PositionInScratchBuffer, _scratchPagerState); _transactionPages.Add(allocation); NativeMethods.memset(page.Base, 0, AbstractPager.PageSize); _txHeader = (TransactionHeader*)page.Base; @@ -201,11 +188,11 @@ public Page GetReadOnlyPage(long pageNumber) Page p; if (_scratchPagesTable.TryGetValue(pageNumber, out value)) { - p = _env.ScratchBufferPool.ReadPage(value.PositionInScratchBuffer); + p = _env.ScratchBufferPool.ReadPage(value.PositionInScratchBuffer, _scratchPagerState); } else { - p = _journal.ReadPage(this, pageNumber) ?? _dataPager.Read(pageNumber); + p = _journal.ReadPage(this, pageNumber, _scratchPagerState) ?? _dataPager.Read(pageNumber); } Debug.Assert(p != null && p.PageNumber == pageNumber, string.Format("Requested ReadOnly page #{0}. Got #{1} from {2}", pageNumber, p.PageNumber, p.Source)); @@ -372,7 +359,7 @@ public void Dispose() { if (!Committed && !RolledBack && Flags == TransactionFlags.ReadWrite) Rollback(); - + _env.TransactionCompleted(this); foreach (var pagerState in _pagerStates) { diff --git a/Voron/Impl/TransactionMergingWriter.cs b/Voron/Impl/TransactionMergingWriter.cs index 4fddd18439..0446ff3f0b 100644 --- a/Voron/Impl/TransactionMergingWriter.cs +++ b/Voron/Impl/TransactionMergingWriter.cs @@ -1,332 +1,382 @@ -using System.Threading.Tasks; +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + using Voron.Debugging; -using Voron.Trees; using Voron.Util; namespace Voron.Impl { - using System; - using System.Collections.Concurrent; - using System.Collections.Generic; - using System.Diagnostics; - using System.IO; - using System.Linq; - using System.Threading; - using Extensions; - - public class TransactionMergingWriter : IDisposable - { - private readonly StorageEnvironment _env; - - private readonly ConcurrentQueue _pendingWrites = new ConcurrentQueue(); - private readonly CancellationTokenSource _cancellationTokenSource = new CancellationTokenSource(); - private readonly ManualResetEventSlim _stopWrites = new ManualResetEventSlim(); - private readonly ManualResetEventSlim _hasWrites = new ManualResetEventSlim(); - private readonly DebugJournal _debugJournal; - private readonly ConcurrentQueue _eventsBuffer = new ConcurrentQueue(); - - private bool ShouldRecordToDebugJournal - { - get - { - return _debugJournal != null && _debugJournal.IsRecording; - } - } - - private readonly Lazy _backgroundTask; - - internal TransactionMergingWriter(StorageEnvironment env, DebugJournal debugJournal = null) - { - _env = env; - _stopWrites.Set(); - _debugJournal = debugJournal; - _backgroundTask = new Lazy(() => Task.Factory.StartNew(BackgroundWriter, _cancellationTokenSource.Token, - TaskCreationOptions.LongRunning, - TaskScheduler.Current)); - } - - public IDisposable StopWrites() - { - _stopWrites.Reset(); - - return new DisposableAction(() => _stopWrites.Set()); - } - - public void Write(WriteBatch batch) - { - if (batch.IsEmpty) - return; - - EnsureValidBackgroundTaskState(); - - using (var mine = new OutstandingWrite(batch, this)) - { - _pendingWrites.Enqueue(mine); - - _hasWrites.Set(); - - mine.Wait(); - } - } - - private void EnsureValidBackgroundTaskState() - { - var backgroundTask = _backgroundTask.Value; - if (backgroundTask.IsCanceled || backgroundTask.IsFaulted) - backgroundTask.Wait(); // would throw - if (backgroundTask.IsCompleted) - throw new InvalidOperationException("The write background task has already completed!"); - } - - private void BackgroundWriter() - { - var cancellationToken = _cancellationTokenSource.Token; - while (cancellationToken.IsCancellationRequested == false) - { - _stopWrites.Wait(cancellationToken); - _hasWrites.Reset(); - - OutstandingWrite write; - while (_pendingWrites.TryDequeue(out write)) - { - HandleActualWrites(write); - } - _hasWrites.Wait(cancellationToken); - } - } - - private void HandleActualWrites(OutstandingWrite mine) - { - List writes = null; - try - { - writes = BuildBatchGroup(mine); - using (var tx = _env.NewTransaction(TransactionFlags.ReadWrite)) - { - HandleOperations(tx, writes.SelectMany(x => x.Batch.Operations)); - - try - { - tx.Commit(); - if (ShouldRecordToDebugJournal) - _debugJournal.Flush(); - - foreach (var write in writes) - write.Completed(); - } - catch (Exception e) - { - // if we have an error duing the commit, we can't recover, just fail them all. - foreach (var write in writes) - { - write.Errored(e); - } - } - } - } - catch (Exception e) - { - HandleWriteFailure(writes, mine, e); - } - } - - private void HandleWriteFailure(List writes, OutstandingWrite mine, Exception e) - { - if (writes == null || writes.Count == 0) - { - mine.Errored(e); - throw new InvalidOperationException("Couldn't get items to write", e); - } - - if (writes.Count == 1) - { - writes[0].Errored(e); - return; - } - - SplitWrites(writes); - } - - private void HandleOperations(Transaction tx, IEnumerable operations) - { - foreach (var g in operations.GroupBy(x => x.TreeName)) - { - var tree = tx.State.GetTree(tx, g.Key); - // note that the ordering is done purely for performance reasons - // we rely on the fact that there can be only a single operation per key in - // each batch, and that we don't make any guarantees regarding ordering between - // concurrent merged writes - foreach (var operation in g.OrderBy(x => x.Key, SliceEqualityComparer.Instance)) - { - operation.Reset(); - DebugActionType actionType; - switch (operation.Type) - { - case WriteBatch.BatchOperationType.Add: - tree.Add(tx, operation.Key, operation.Value as Stream, operation.Version); - actionType = DebugActionType.Add; - break; - case WriteBatch.BatchOperationType.Delete: - tree.Delete(tx, operation.Key, operation.Version); - actionType = DebugActionType.Delete; - break; - case WriteBatch.BatchOperationType.MultiAdd: - tree.MultiAdd(tx, operation.Key, operation.Value as Slice, operation.Version); - actionType = DebugActionType.MultiAdd; - break; - case WriteBatch.BatchOperationType.MultiDelete: - tree.MultiDelete(tx, operation.Key, operation.Value as Slice, operation.Version); - actionType = DebugActionType.MultiDelete; - break; - default: - throw new ArgumentOutOfRangeException(); - } - - if (ShouldRecordToDebugJournal) - _debugJournal.RecordAction(actionType, operation.Key, g.Key, operation.Value); - - } - } - } - - private void SplitWrites(List writes) - { - for (var index = 0; index < writes.Count; index++) - { - var write = writes[index]; - try - { - using (var tx = _env.NewTransaction(TransactionFlags.ReadWrite)) - { - HandleOperations(tx, write.Batch.Operations); - tx.Commit(); - write.Completed(); - } - } - catch (Exception e) - { - write.Errored(e); - } - } - } - - private List BuildBatchGroup(OutstandingWrite mine) - { - // Allow the group to grow up to a maximum size, but if the - // original write is small, limit the growth so we do not slow - // down the small write too much. - long maxSize = 16 * 1024 * 1024; // 16 MB by default - if (mine.Size < 128 * 1024) - maxSize = (1024 * 1024); // 1 MB if small - - var list = new List { mine }; - - maxSize -= mine.Size; - - while (true) - { - if (maxSize <= 0) - break; - - OutstandingWrite item; - if (_pendingWrites.TryDequeue(out item) == false) - break; - list.Add(item); - maxSize -= item.Size; - } - - return list; - } - - private class OutstandingWrite : IDisposable - { - private readonly TransactionMergingWriter _transactionMergingWriter; - private Exception _exception; - private readonly ManualResetEventSlim _completed; - - public OutstandingWrite(WriteBatch batch, TransactionMergingWriter transactionMergingWriter) - { - _transactionMergingWriter = transactionMergingWriter; - Batch = batch; - Size = batch.Size(); - - if (transactionMergingWriter._eventsBuffer.TryDequeue(out _completed) == false) - _completed = new ManualResetEventSlim(); - _completed.Reset(); - } - - public WriteBatch Batch { get; private set; } - - public long Size { get; private set; } - - public void Dispose() - { - if(Batch.DisposeAfterWrite) - Batch.Dispose(); - - _transactionMergingWriter._eventsBuffer.Enqueue(_completed); - } - - public void Errored(Exception e) - { - var wasSet = _completed.IsSet; - - _exception = e; - _completed.Set(); - - if (wasSet) - throw new InvalidOperationException("This should not happen."); - } - - public void Completed() - { - var wasSet = _completed.IsSet; - _completed.Set(); - - if (wasSet) - throw new InvalidOperationException("This should not happen."); - } - - public void Wait() - { - _completed.Wait(); - if (_exception != null) - { - throw new AggregateException("Error when executing write", _exception); - } - } - } - - public void Dispose() - { - _cancellationTokenSource.Cancel(); - _stopWrites.Set(); - _hasWrites.Set(); - - try - { - if (_backgroundTask.IsValueCreated == false) - return; - _backgroundTask.Value.Wait(); - } - catch (TaskCanceledException) - { - } - catch (AggregateException e) - { - if (e.InnerException is TaskCanceledException) - return; - throw; - } - finally - { - foreach (var manualResetEventSlim in _eventsBuffer) - { - manualResetEventSlim.Dispose(); - } - _stopWrites.Dispose(); - _hasWrites.Dispose(); - } - } - } + public class TransactionMergingWriter : IDisposable + { + private readonly StorageEnvironment _env; + + private readonly CancellationToken _cancellationToken; + + private readonly ConcurrentQueue _pendingWrites = new ConcurrentQueue(); + private readonly ManualResetEventSlim _stopWrites = new ManualResetEventSlim(); + private readonly ManualResetEventSlim _hasWrites = new ManualResetEventSlim(); + private readonly DebugJournal _debugJournal; + private readonly ConcurrentQueue _eventsBuffer = new ConcurrentQueue(); + + private bool ShouldRecordToDebugJournal + { + get + { + return _debugJournal != null && _debugJournal.IsRecording; + } + } + + private readonly Lazy _backgroundTask; + + internal TransactionMergingWriter(StorageEnvironment env, CancellationToken cancellationToken, DebugJournal debugJournal = null) + { + _env = env; + _cancellationToken = cancellationToken; + _stopWrites.Set(); + _debugJournal = debugJournal; + _backgroundTask = new Lazy(() => Task.Factory.StartNew(BackgroundWriter, _cancellationToken, + TaskCreationOptions.LongRunning, + TaskScheduler.Current)); + } + + public IDisposable StopWrites() + { + _stopWrites.Reset(); + + return new DisposableAction(() => _stopWrites.Set()); + } + + public void Write(WriteBatch batch) + { + if (batch.IsEmpty) + return; + + EnsureValidBackgroundTaskState(); + + using (var mine = new OutstandingWrite(batch, this)) + { + _pendingWrites.Enqueue(mine); + + _hasWrites.Set(); + + mine.Wait(); + } + } + + private void EnsureValidBackgroundTaskState() + { + var backgroundTask = _backgroundTask.Value; + if (backgroundTask.IsCanceled || backgroundTask.IsFaulted) + backgroundTask.Wait(); // would throw + if (backgroundTask.IsCompleted) + throw new InvalidOperationException("The write background task has already completed!"); + } + + private void BackgroundWriter() + { + while (_cancellationToken.IsCancellationRequested == false) + { + _cancellationToken.ThrowIfCancellationRequested(); + + _stopWrites.Wait(_cancellationToken); + _hasWrites.Reset(); + + OutstandingWrite write; + while (_pendingWrites.TryDequeue(out write)) + { + HandleActualWrites(write, _cancellationToken); + } + _hasWrites.Wait(_cancellationToken); + } + } + + private void HandleActualWrites(OutstandingWrite mine, CancellationToken token) + { + List writes = null; + try + { + writes = BuildBatchGroup(mine); + using (var tx = _env.NewTransaction(TransactionFlags.ReadWrite)) + { + HandleOperations(tx, writes, _cancellationToken); + + try + { + tx.Commit(); + if (ShouldRecordToDebugJournal) + _debugJournal.Flush(); + + foreach (var write in writes) + write.Completed(); + } + catch (Exception e) + { + // if we have an error duing the commit, we can't recover, just fail them all. + foreach (var write in writes) + { + write.Errored(e); + } + } + } + } + catch (Exception e) + { + HandleWriteFailure(writes, mine, e); + } + } + + private void HandleWriteFailure(List writes, OutstandingWrite mine, Exception e) + { + if (writes == null || writes.Count == 0) + { + mine.Errored(e); + throw new InvalidOperationException("Couldn't get items to write", e); + } + + if (writes.Count == 1) + { + writes[0].Errored(e); + return; + } + + SplitWrites(writes); + } + + private void HandleOperations(Transaction tx, List writes, CancellationToken token) + { + var trees = writes + .SelectMany(x => x.Trees) + .Distinct(); + + foreach (var treeName in trees) + { + token.ThrowIfCancellationRequested(); + + var tree = tx.State.GetTree(tx, treeName); + foreach (var write in writes) + { + foreach (var operation in write.GetOperations(treeName)) + { + token.ThrowIfCancellationRequested(); + + operation.Reset(); + + try + { + DebugActionType actionType; + switch (operation.Type) + { + case WriteBatch.BatchOperationType.Add: + var stream = operation.Value as Stream; + if (stream != null) + tree.Add(tx, operation.Key, stream, operation.Version); + else + tree.Add(tx, operation.Key, (Slice)operation.Value, operation.Version); + actionType = DebugActionType.Add; + break; + case WriteBatch.BatchOperationType.Delete: + tree.Delete(tx, operation.Key, operation.Version); + actionType = DebugActionType.Delete; + break; + case WriteBatch.BatchOperationType.MultiAdd: + tree.MultiAdd(tx, operation.Key, operation.Value as Slice, version: operation.Version); + actionType = DebugActionType.MultiAdd; + break; + case WriteBatch.BatchOperationType.MultiDelete: + tree.MultiDelete(tx, operation.Key, operation.Value as Slice, operation.Version); + actionType = DebugActionType.MultiDelete; + break; + case WriteBatch.BatchOperationType.Increment: + tree.Increment(tx, operation.Key, (long)operation.Value, operation.Version); + actionType = DebugActionType.Increment; + break; + default: + throw new ArgumentOutOfRangeException(); + } + + if (ShouldRecordToDebugJournal) + _debugJournal.RecordAction(actionType, operation.Key, treeName, operation.Value); + } + catch (Exception e) + { + if (operation.ExceptionTypesToIgnore.Contains(e.GetType()) == false) + throw; + } + } + } + } + } + + private void SplitWrites(List writes) + { + for (var index = 0; index < writes.Count; index++) + { + var write = writes[index]; + try + { + _cancellationToken.ThrowIfCancellationRequested(); + + using (var tx = _env.NewTransaction(TransactionFlags.ReadWrite)) + { + HandleOperations(tx, new List { write }, _cancellationToken); + tx.Commit(); + write.Completed(); + } + } + catch (Exception e) + { + write.Errored(e); + } + } + } + + private List BuildBatchGroup(OutstandingWrite mine) + { + // Allow the group to grow up to a maximum size, but if the + // original write is small, limit the growth so we do not slow + // down the small write too much. + long maxSize = 64 * 1024 * 1024; // 64 MB by default + if (mine.Size < 128 * 1024) + maxSize = (2 * 1024 * 1024); // 2 MB if small + + var list = new List { mine }; + + maxSize -= mine.Size; + + while (true) + { + if (maxSize <= 0) + break; + + OutstandingWrite item; + if (_pendingWrites.TryDequeue(out item) == false) + break; + list.Add(item); + maxSize -= item.Size; + } + + return list; + } + + private class OutstandingWrite : IDisposable + { + private readonly WriteBatch _batch; + + private readonly TransactionMergingWriter _transactionMergingWriter; + private Exception _exception; + private readonly ManualResetEventSlim _completed; + + private readonly Dictionary> _operations = new Dictionary>(); + + public OutstandingWrite(WriteBatch batch, TransactionMergingWriter transactionMergingWriter) + { + _batch = batch; + _transactionMergingWriter = transactionMergingWriter; + + _operations = CreateOperations(batch); + Size = batch.Size(); + + if (transactionMergingWriter._eventsBuffer.TryDequeue(out _completed) == false) + _completed = new ManualResetEventSlim(); + _completed.Reset(); + } + + private static Dictionary> CreateOperations(WriteBatch batch) + { + return batch.Trees.ToDictionary(tree => tree, tree => batch.GetSortedOperations(tree).ToList()); + } + + public IEnumerable Trees + { + get + { + return _batch.Trees; + } + } + + public IEnumerable GetOperations(string treeName) + { + List operations; + if (_operations.TryGetValue(treeName, out operations)) + return operations; + + return Enumerable.Empty(); + } + + public long Size { get; private set; } + + public void Dispose() + { + if (_batch.DisposeAfterWrite) + _batch.Dispose(); + + _transactionMergingWriter._eventsBuffer.Enqueue(_completed); + } + + public void Errored(Exception e) + { + var wasSet = _completed.IsSet; + + _exception = e; + _completed.Set(); + + if (wasSet) + throw new InvalidOperationException("This should not happen."); + } + + public void Completed() + { + var wasSet = _completed.IsSet; + _completed.Set(); + + if (wasSet) + throw new InvalidOperationException("This should not happen."); + } + + public void Wait() + { + _completed.Wait(); + if (_exception != null) + { + throw new AggregateException("Error when executing write", _exception); + } + } + } + + public void Dispose() + { + _stopWrites.Set(); + _hasWrites.Set(); + + try + { + if (_backgroundTask.IsValueCreated == false) + return; + _backgroundTask.Value.Wait(); + } + catch (TaskCanceledException) + { + } + catch (AggregateException e) + { + if (e.InnerException is TaskCanceledException || e.InnerException is OperationCanceledException) + return; + throw; + } + finally + { + foreach (var manualResetEventSlim in _eventsBuffer) + { + manualResetEventSlim.Dispose(); + } + _stopWrites.Dispose(); + _hasWrites.Dispose(); + } + } + } } diff --git a/Voron/Impl/WriteBatch.cs b/Voron/Impl/WriteBatch.cs index 30281ae5e6..eff8870544 100644 --- a/Voron/Impl/WriteBatch.cs +++ b/Voron/Impl/WriteBatch.cs @@ -1,55 +1,69 @@ -using System.Collections.Concurrent; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.IO; +using System.Linq; + +using Voron.Exceptions; namespace Voron.Impl { - using System; - using System.Collections.Generic; - using System.IO; - using System.Linq; - public class WriteBatch : IDisposable { private readonly Dictionary> _lastOperations; private readonly Dictionary>> _multiTreeOperations; + private readonly HashSet _trees = new HashSet(); + private readonly SliceEqualityComparer _sliceEqualityComparer; private bool _disposeAfterWrite = true; - public IEnumerable Operations + public HashSet Trees { get { - var allOperations = _lastOperations.SelectMany(x => x.Value.Values); - - if (_multiTreeOperations.Count == 0) - return allOperations; - - return allOperations.Concat(_multiTreeOperations.SelectMany(x => x.Value.Values) - .SelectMany(x => x)); + return _trees; } } - public Func Size + public IEnumerable GetSortedOperations(string treeName) { - get + Dictionary operations; + if (_lastOperations.TryGetValue(treeName, out operations)) { - return () => - { - long totalSize = 0; - - if (_lastOperations.Count > 0) - totalSize += _lastOperations.Sum( - operation => - operation.Value.Values.Sum(x => x.Type == BatchOperationType.Add ? x.ValueSize + x.Key.Size : x.Key.Size)); - - if (_multiTreeOperations.Count > 0) - totalSize += _multiTreeOperations.Sum( - tree => - tree.Value.Sum( - multiOp => multiOp.Value.Sum(x => x.Type == BatchOperationType.Add ? x.ValueSize + x.Key.Size : x.Key.Size))); - return totalSize; - }; + foreach (var operation in operations.OrderBy(x => x.Key, _sliceEqualityComparer)) + yield return operation.Value; } + + if (_multiTreeOperations.Count == 0) + yield break; + + Dictionary> multiOperations; + if (_multiTreeOperations.TryGetValue(treeName, out multiOperations) == false) + yield break; + + foreach (var operation in multiOperations + .OrderBy(x => x.Key, _sliceEqualityComparer) + .SelectMany(x => x.Value) + .OrderBy(x => (Slice)x.Value, _sliceEqualityComparer)) + yield return operation; + } + + public long Size() + { + long totalSize = 0; + + if (_lastOperations.Count > 0) + totalSize += _lastOperations.Sum( + operation => + operation.Value.Values.Sum(x => x.Type == BatchOperationType.Add ? x.ValueSize + x.Key.Size : x.Key.Size)); + + if (_multiTreeOperations.Count > 0) + totalSize += _multiTreeOperations.Sum( + tree => + tree.Value.Sum( + multiOp => multiOp.Value.Sum(x => x.Type == BatchOperationType.Add ? x.ValueSize + x.Key.Size : x.Key.Size))); + return totalSize; } public bool IsEmpty { get { return _lastOperations.Count == 0 && _multiTreeOperations.Count == 0; } } @@ -112,62 +126,87 @@ public WriteBatch() _sliceEqualityComparer = new SliceEqualityComparer(); } - public void Add(Slice key, Stream value, string treeName, ushort? version = null) + public void Add(Slice key, Slice value, string treeName, ushort? version = null, bool shouldIgnoreConcurrencyExceptions = false) + { + AssertValidTreeName(treeName); + if (value == null) throw new ArgumentNullException("value"); + + var batchOperation = BatchOperation.Add(key, value, version, treeName); + if (shouldIgnoreConcurrencyExceptions) + batchOperation.SetIgnoreExceptionOnExecution(); + AddOperation(batchOperation); + } + + public void Add(Slice key, Stream value, string treeName, ushort? version = null, bool shouldIgnoreConcurrencyExceptions = false) { - if (treeName != null && treeName.Length == 0) throw new ArgumentException("treeName must not be empty", "treeName"); + AssertValidTreeName(treeName); if (value == null) throw new ArgumentNullException("value"); - //TODO : check up if adding empty values make sense in Voron --> in order to be consistent with existing behavior of Esent, this should be allowed - // if (value.Length == 0) - // throw new ArgumentException("Cannot add empty value"); if (value.Length > int.MaxValue) throw new ArgumentException("Cannot add a value that is over 2GB in size", "value"); - - AddOperation(new BatchOperation(key, value, version, treeName, BatchOperationType.Add)); + var batchOperation = BatchOperation.Add(key, value, version, treeName); + if (shouldIgnoreConcurrencyExceptions) + batchOperation.SetIgnoreExceptionOnExecution(); + AddOperation(batchOperation); } public void Delete(Slice key, string treeName, ushort? version = null) { AssertValidRemove(treeName); - AddOperation(new BatchOperation(key, null as Stream, version, treeName, BatchOperationType.Delete)); + AddOperation(BatchOperation.Delete(key, version, treeName)); } private static void AssertValidRemove(string treeName) { - if (treeName != null && treeName.Length == 0) throw new ArgumentException("treeName must not be empty", "treeName"); + AssertValidTreeName(treeName); } public void MultiAdd(Slice key, Slice value, string treeName, ushort? version = null) { AssertValidMultiOperation(value, treeName); - AddOperation(new BatchOperation(key, value, version, treeName, BatchOperationType.MultiAdd)); + AddOperation(BatchOperation.MultiAdd(key, value, version, treeName)); } - private static void AssertValidMultiOperation(Slice value, string treeName) + public void MultiDelete(Slice key, Slice value, string treeName, ushort? version = null) { - if (treeName != null && treeName.Length == 0) throw new ArgumentException("treeName must not be empty", "treeName"); - if (value == null) throw new ArgumentNullException("value"); - if (value.Size == 0) - throw new ArgumentException("Cannot add empty value"); + AssertValidMultiOperation(value, treeName); + + AddOperation(BatchOperation.MultiDelete(key, value, version, treeName)); } - public void MultiDelete(Slice key, Slice value, string treeName, ushort? version = null) + public void Increment(Slice key, long delta, string treeName, ushort? version = null) { - AssertValidMultiOperation(value, treeName); + AssertValidTreeName(treeName); - AddOperation(new BatchOperation(key, value, version, treeName, BatchOperationType.MultiDelete)); + AddOperation(BatchOperation.Increment(key, delta, version, treeName)); + } + + private static void AssertValidTreeName(string treeName) + { + if (treeName != null && treeName.Length == 0) + throw new ArgumentException("treeName must not be empty", "treeName"); + } + + private static void AssertValidMultiOperation(Slice value, string treeName) + { + AssertValidTreeName(treeName); + if (value == null) throw new ArgumentNullException("value"); + if (value.Size == 0) + throw new ArgumentException("Cannot add empty value"); } private void AddOperation(BatchOperation operation) { var treeName = operation.TreeName; - if (treeName != null && treeName.Length == 0) throw new ArgumentException("treeName must not be empty", "treeName"); + AssertValidTreeName(treeName); if (treeName == null) treeName = Constants.RootTreeName; + _trees.Add(treeName); + if (operation.Type == BatchOperationType.MultiAdd || operation.Type == BatchOperationType.MultiDelete) { Dictionary> multiTreeOperationsOfTree; @@ -202,13 +241,52 @@ private void AddOperation(BatchOperation operation) } } - public class BatchOperation + public class BatchOperation : IComparable { - private readonly long originalStreamPosition; +#if DEBUG + private readonly StackTrace stackTrace; + public StackTrace StackTrace + { + get { return stackTrace; } + } +#endif + private readonly long originalStreamPosition; + private readonly HashSet exceptionTypesToIgnore = new HashSet(); private readonly Action reset = delegate { }; + private readonly Slice valSlice; + + public static BatchOperation Add(Slice key, Slice value, ushort? version, string treeName) + { + return new BatchOperation(key, value, version, treeName, BatchOperationType.Add); + } + + public static BatchOperation Add(Slice key, Stream stream, ushort? version, string treeName) + { + return new BatchOperation(key, stream, version, treeName, BatchOperationType.Add); + } + + public static BatchOperation Delete(Slice key, ushort? version, string treeName) + { + return new BatchOperation(key, null as Stream, version, treeName, BatchOperationType.Delete); + } + + public static BatchOperation MultiAdd(Slice key, Slice value, ushort? version, string treeName) + { + return new BatchOperation(key, value, version, treeName, BatchOperationType.MultiAdd); + } + + public static BatchOperation MultiDelete(Slice key, Slice value, ushort? version, string treeName) + { + return new BatchOperation(key, value, version, treeName, BatchOperationType.MultiDelete); + } - public BatchOperation(Slice key, Stream value, ushort? version, string treeName, BatchOperationType type) + public static BatchOperation Increment(Slice key, long delta, ushort? version, string treeName) + { + return new BatchOperation(key, delta, version, treeName, BatchOperationType.Increment); + } + + private BatchOperation(Slice key, Stream value, ushort? version, string treeName, BatchOperationType type) : this(key, value as object, version, treeName, type) { if (value != null) @@ -218,13 +296,18 @@ public BatchOperation(Slice key, Stream value, ushort? version, string treeName, reset = () => value.Position = originalStreamPosition; } + +#if DEBUG + stackTrace = new StackTrace(); +#endif } - public BatchOperation(Slice key, Slice value, ushort? version, string treeName, BatchOperationType type) + private BatchOperation(Slice key, Slice value, ushort? version, string treeName, BatchOperationType type) : this(key, value as object, version, treeName, type) { if (value != null) { + valSlice = value; originalStreamPosition = 0; ValueSize = value.Size; } @@ -251,6 +334,12 @@ private BatchOperation(Slice key, object value, ushort? version, string treeName public ushort? Version { get; private set; } + public HashSet ExceptionTypesToIgnore + { + get { return exceptionTypesToIgnore; } + } + + public void SetVersionFrom(BatchOperation other) { if (other.Version != null && @@ -262,6 +351,30 @@ public void Reset() { reset(); } + + public void SetIgnoreExceptionOnExecution() + where T : Exception + { + ExceptionTypesToIgnore.Add(typeof(T)); + } + + public unsafe int CompareTo(BatchOperation other) + { + var r = SliceEqualityComparer.Instance.Compare(Key, other.Key); + if (r != 0) + return r; + if (valSlice != null) + { + if (other.valSlice == null) + return -1; + return valSlice.Compare(other.valSlice, NativeMethods.memcmp); + } + else if (other.valSlice != null) + { + return 1; + } + return 0; + } } public enum BatchOperationType @@ -271,6 +384,7 @@ public enum BatchOperationType Delete = 2, MultiAdd = 3, MultiDelete = 4, + Increment = 5 } public void Dispose() diff --git a/Voron/Slice.cs b/Voron/Slice.cs index c499124812..47630cc8c0 100644 --- a/Voron/Slice.cs +++ b/Voron/Slice.cs @@ -4,7 +4,6 @@ using System.Text; using Voron.Impl; using Voron.Trees; -using Voron.Util.Conversion; namespace Voron { @@ -12,22 +11,23 @@ public unsafe class Slice { public static Slice AfterAllKeys = new Slice(SliceOptions.AfterAllKeys); public static Slice BeforeAllKeys = new Slice(SliceOptions.BeforeAllKeys); - public static Slice Empty = new Slice(new byte[0]); + public static Slice Empty = new Slice(new byte[0]); - private ushort _pointerSize; - public SliceOptions Options; + private ushort _size; private readonly byte[] _array; private byte* _pointer; + public SliceOptions Options; + public ushort Size { - get { return (ushort)(_array == null ? _pointerSize : _array.Length); } + get { return _size; } } public void Set(byte* p, ushort size) { _pointer = p; - _pointerSize = size; + _size = size; } public Slice(SliceOptions options) @@ -35,21 +35,26 @@ public Slice(SliceOptions options) Options = options; _pointer = null; _array = null; - _pointerSize = 0; + _size = 0; } public Slice(byte* key, ushort size) { - _pointerSize = size; + _size = size; Options = SliceOptions.Key; _array = null; _pointer = key; } - public Slice(byte[] key) + public Slice(byte[] key) : this(key, (ushort)key.Length) + { + + } + + public Slice(byte[] key, ushort size) { if (key == null) throw new ArgumentNullException("key"); - _pointerSize = 0; + _size = size; Options = SliceOptions.Key; _pointer = null; _array = key; @@ -61,7 +66,7 @@ public Slice(NodeHeader* node) Set(node); } - protected bool Equals(Slice other) + public bool Equals(Slice other) { return Compare(other, NativeMethods.memcmp) == 0; } @@ -71,64 +76,64 @@ public override bool Equals(object obj) if (ReferenceEquals(null, obj)) return false; if (ReferenceEquals(this, obj)) return true; if (obj.GetType() != GetType()) return false; - return Equals((Slice) obj); + return Equals((Slice)obj); } public override int GetHashCode() { - if (_array != null) - return ComputeHashArray(); - return ComputeHashPointer(); + if (_array != null) + return ComputeHashArray(); + return ComputeHashPointer(); } - private int ComputeHashPointer() - { - unchecked - { - const int p = 16777619; - int hash = (int)2166136261; - - for (int i = 0; i < _pointerSize; i++) - hash = (hash ^ _pointer[i]) * p; - - hash += hash << 13; - hash ^= hash >> 7; - hash += hash << 3; - hash ^= hash >> 17; - hash += hash << 5; - return hash; - } - } + private int ComputeHashPointer() + { + unchecked + { + const int p = 16777619; + int hash = (int)2166136261; + + for (int i = 0; i < _size; i++) + hash = (hash ^ _pointer[i]) * p; + + hash += hash << 13; + hash ^= hash >> 7; + hash += hash << 3; + hash ^= hash >> 17; + hash += hash << 5; + return hash; + } + } - private int ComputeHashArray() - { - unchecked - { - const int p = 16777619; - int hash = (int) 2166136261; - - for (int i = 0; i < _array.Length; i++) - hash = (hash ^ _array[i]) * p; - - hash += hash << 13; - hash ^= hash >> 7; - hash += hash << 3; - hash ^= hash >> 17; - hash += hash << 5; - return hash; - } - } + private int ComputeHashArray() + { + unchecked + { + const int p = 16777619; + int hash = (int)2166136261; + + for (int i = 0; i < _size; i++) + hash = (hash ^ _array[i]) * p; + + hash += hash << 13; + hash ^= hash >> 7; + hash += hash << 3; + hash ^= hash >> 17; + hash += hash << 5; + return hash; + } + } - public override string ToString() + public override string ToString() { // this is used for debug purposes only if (Options != SliceOptions.Key) return Options.ToString(); - - if(_array != null) - return Encoding.UTF8.GetString(_array); - return new string((sbyte*) _pointer, 0, _pointerSize, Encoding.UTF8); + if (_array != null) + return Encoding.UTF8.GetString(_array,0, _size); + + return new string((sbyte*)_pointer, 0, _size, Encoding.UTF8); } public int Compare(Slice other, SliceComparer cmp) @@ -184,25 +189,42 @@ public void CopyTo(byte* dest) { if (_array == null) { - NativeMethods.memcpy(dest, _pointer, _pointerSize); + NativeMethods.memcpy(dest, _pointer, _size); return; } fixed (byte* a = _array) { - NativeMethods.memcpy(dest, a, _array.Length); + NativeMethods.memcpy(dest, a, _size); + } + } + + public void CopyTo(byte[] dest) + { + if (_array == null) + { + fixed (byte* p = dest) + NativeMethods.memcpy(p, _pointer, _size); + return; } + Buffer.BlockCopy(_array, 0, dest, 0, _size); + } + + public void CopyTo(int from, byte[] dest, int offset, int count) + { + if (from + count > Size) + throw new ArgumentOutOfRangeException("from", "Cannot copy data after the end of the slice"); + if(offset + count > dest.Length) + throw new ArgumentOutOfRangeException("from", "Cannot copy data after the end of the buffer" + + ""); + if (_array == null) + { + fixed (byte* p = dest) + NativeMethods.memcpy(p, _pointer + from, count); + return; + } + Buffer.BlockCopy(_array, from, dest, offset, count); } - public void CopyTo(byte[] dest) - { - if (_array == null) - { - fixed(byte* p = dest) - NativeMethods.memcpy(p, _pointer, _pointerSize); - return; - } - Buffer.BlockCopy(_array,0, dest, 0, _array.Length); - } public void Set(NodeHeader* node) { @@ -210,37 +232,29 @@ public void Set(NodeHeader* node) } - public Slice Clone() - { - var buffer = new byte[Size]; - if (_array == null) - { - fixed (byte* dest = buffer) - { - NativeMethods.memcpy(dest, _pointer, _pointerSize); - } - } - else - { - Buffer.BlockCopy(_array, 0, buffer, 0, Size); - } - return new Slice(buffer); - } - - public long ToInt64() + public Slice Clone() { - if (Size != sizeof(long)) - throw new NotSupportedException("Invalid size for int 64 key"); - if (_array != null) - return EndianBitConverter.Big.ToInt64(_array, 0); - var buffer = new byte[Size]; - fixed (byte* dest = buffer) + if (_array == null) { - NativeMethods.memcpy(dest, _pointer, _pointerSize); + fixed (byte* dest = buffer) + { + NativeMethods.memcpy(dest, _pointer, _size); + } } - - return EndianBitConverter.Big.ToInt64(buffer, 0); + else + { + Buffer.BlockCopy(_array, 0, buffer, 0, Size); + } + return new Slice(buffer); } + + public ValueReader CreateReader() + { + if(_array != null) + return new ValueReader(_array, _size); + + return new ValueReader(_pointer, _size); + } } } \ No newline at end of file diff --git a/Voron/SliceWriter.cs b/Voron/SliceWriter.cs new file mode 100644 index 0000000000..438a69a531 --- /dev/null +++ b/Voron/SliceWriter.cs @@ -0,0 +1,40 @@ +using System.Text; +using Voron.Util.Conversion; + +namespace Voron +{ + public struct SliceWriter + { + private int _pos; + private readonly byte[] _buffer; + + public SliceWriter(int size) + { + _pos = 0; + _buffer = new byte[size]; + } + + public void WriteBigEndian(int i) + { + EndianBitConverter.Big.CopyBytes(i, _buffer, _pos); + _pos += sizeof (int); + } + + public void WriteBigEndian(long l) + { + EndianBitConverter.Big.CopyBytes(l, _buffer, _pos); + _pos += sizeof(long); + } + + public void WriteBigEndian(short s) + { + EndianBitConverter.Big.CopyBytes(s, _buffer, _pos); + _pos += sizeof(short); + } + + public Slice CreateSlice() + { + return new Slice(_buffer); + } + } +} \ No newline at end of file diff --git a/Voron/StorageEnvironment.cs b/Voron/StorageEnvironment.cs index 186e6c33a0..05c989770f 100644 --- a/Voron/StorageEnvironment.cs +++ b/Voron/StorageEnvironment.cs @@ -45,9 +45,7 @@ public class StorageEnvironment : IDisposable private EndOfDiskSpaceEvent _endOfDiskSpace; private int _sizeOfUnflushedTransactionsInJournalFile; - - public TemporaryPage TemporaryPage { get; private set; } - + private Queue _tempPagesPool = new Queue(); public TransactionMergingWriter Writer { get; private set; } @@ -66,7 +64,7 @@ public StorageEnvironment(StorageEnvironmentOptions options,string debugJournalN if(Writer != null) Writer.Dispose(); - Writer = new TransactionMergingWriter(this, DebugJournal); + Writer = new TransactionMergingWriter(this, _cancellationTokenSource.Token, DebugJournal); } #endif @@ -74,10 +72,9 @@ public unsafe StorageEnvironment(StorageEnvironmentOptions options) { try { - TemporaryPage = new TemporaryPage(); _options = options; _dataPager = options.DataPager; - _freeSpaceHandling = new FreeSpaceHandling(this); + _freeSpaceHandling = new FreeSpaceHandling(); _sliceComparer = NativeMethods.memcmp; _headerAccessor = new HeaderAccessor(this); var isNew = _headerAccessor.Initialize(); @@ -94,7 +91,7 @@ public unsafe StorageEnvironment(StorageEnvironmentOptions options) State.FreeSpaceRoot.Name = Constants.FreeSpaceTreeName; State.Root.Name = Constants.RootTreeName; - Writer = new TransactionMergingWriter(this); + Writer = new TransactionMergingWriter(this, _cancellationTokenSource.Token); if (_options.ManualFlushing == false) _flushingTask = FlushWritesToDataFileAsync(); @@ -221,7 +218,7 @@ public DebugJournal DebugJournal if (Writer != null && value != null) { Writer.Dispose(); - Writer = new TransactionMergingWriter(this, _debugJournal); + Writer = new TransactionMergingWriter(this, _cancellationTokenSource.Token, _debugJournal); } } @@ -291,9 +288,10 @@ public void Dispose() try { - if (_flushingTask != null) + var flushingTaskCopy = _flushingTask; + if (flushingTaskCopy != null) { - switch (_flushingTask.Status) + switch (flushingTaskCopy.Status) { case TaskStatus.RanToCompletion: case TaskStatus.Canceled: @@ -301,17 +299,16 @@ public void Dispose() default: try { - _flushingTask.Wait(); + flushingTaskCopy.Wait(); } catch (AggregateException ae) { - if (ae.InnerException is OperationCanceledException == false) - throw ae.InnerException; + if (ae.InnerException is OperationCanceledException == false) + throw; } break; } } - } finally { @@ -322,8 +319,8 @@ public void Dispose() _headerAccessor, _scratchBufferPool, _options.OwnsPagers ? _options : null, - _journal, TemporaryPage - }) + _journal + }.Concat(_tempPagesPool)) { try { @@ -360,7 +357,8 @@ public Transaction NewTransaction(TransactionFlags flags, TimeSpan? timeout = nu { if (_endOfDiskSpace.CanContinueWriting) { - Debug.Assert(_flushingTask.Status == TaskStatus.Canceled || _flushingTask.Status == TaskStatus.RanToCompletion); + var flushingTask = _flushingTask; + Debug.Assert(flushingTask != null && (flushingTask.Status == TaskStatus.Canceled || flushingTask.Status == TaskStatus.RanToCompletion)); _cancellationTokenSource = new CancellationTokenSource(); _flushingTask = FlushWritesToDataFileAsync(); _endOfDiskSpace = null; @@ -368,14 +366,13 @@ public Transaction NewTransaction(TransactionFlags flags, TimeSpan? timeout = nu } } - long txId; - Transaction tx; + Transaction tx; _txCommit.EnterReadLock(); try { - txId = flags == TransactionFlags.ReadWrite ? _transactionsCounter + 1 : _transactionsCounter; - tx = new Transaction(this, txId, flags, _freeSpaceHandling); + long txId = flags == TransactionFlags.ReadWrite ? _transactionsCounter + 1 : _transactionsCounter; + tx = new Transaction(this, txId, flags, _freeSpaceHandling); } finally { @@ -460,7 +457,6 @@ public EnvironmentStats Stats() return new EnvironmentStats { - FreePages = _freeSpaceHandling.GetFreePageCount(), FreePagesOverhead = State.FreeSpaceRoot.State.PageCount, RootPages = State.Root.State.PageCount, UnallocatedPagesAtEndOfFile = _dataPager.NumberOfAllocatedPages - NextPageNumber, @@ -500,7 +496,7 @@ private Task FlushWritesToDataFileAsync() try { - _journal.Applicator.ApplyLogsToDataFile(OldestTransaction); + _journal.Applicator.ApplyLogsToDataFile(OldestTransaction, _cancellationTokenSource.Token); } catch (TimeoutException) { @@ -516,15 +512,16 @@ public void FlushLogToDataFile(Transaction tx = null) if (_options.ManualFlushing == false) throw new NotSupportedException("Manual flushes are not set in the storage options, cannot manually flush!"); - _journal.Applicator.ApplyLogsToDataFile(OldestTransaction, tx); + _journal.Applicator.ApplyLogsToDataFile(OldestTransaction, _cancellationTokenSource.Token, tx); } public void AssertFlushingNotFailed() { - if (_flushingTask == null || _flushingTask.IsFaulted == false) + var flushingTaskCopy = _flushingTask; + if (flushingTaskCopy == null || flushingTaskCopy.IsFaulted == false) return; - _flushingTask.Wait();// force re-throw of error + flushingTaskCopy.Wait();// force re-throw of error } public void HandleDataDiskFullException(DiskFullException exception) @@ -535,5 +532,52 @@ public void HandleDataDiskFullException(DiskFullException exception) _cancellationTokenSource.Cancel(); _endOfDiskSpace = new EndOfDiskSpaceEvent(exception.DriveInfo); } + + public IDisposable GetTemporaryPage(Transaction tx, out TemporaryPage tmp) + { + if (tx.Flags != TransactionFlags.ReadWrite) + throw new ArgumentException("Temporary pages are only available for write transactions"); + if (_tempPagesPool.Count > 0) + { + tmp = _tempPagesPool.Dequeue(); + return tmp.ReturnTemporaryPageToPool; + } + + tmp = new TemporaryPage(); + try + { + return tmp.ReturnTemporaryPageToPool = new ReturnTemporaryPageToPool(this, tmp); + } + catch (Exception) + { + tmp.Dispose(); + throw; + } + } + + private class ReturnTemporaryPageToPool : IDisposable + { + private readonly TemporaryPage _tmp; + private readonly StorageEnvironment _env; + + public ReturnTemporaryPageToPool(StorageEnvironment env, TemporaryPage tmp) + { + _tmp = tmp; + _env = env; + } + + public void Dispose() + { + try + { + _env._tempPagesPool.Enqueue(_tmp); + } + catch (Exception) + { + _tmp.Dispose(); + throw; + } + } + } } } diff --git a/Voron/StorageEnvironmentOptions.cs b/Voron/StorageEnvironmentOptions.cs index d2d9372198..df1124759f 100644 --- a/Voron/StorageEnvironmentOptions.cs +++ b/Voron/StorageEnvironmentOptions.cs @@ -28,6 +28,8 @@ public void InvokeRecoveryError(object sender, string message, Exception e) handler(this, new RecoveryErrorEventArgs(message, e)); } + public long? InitialFileSize { get; set; } + public long MaxLogFileSize { get { return _maxLogFileSize; } @@ -63,6 +65,7 @@ public long InitialLogFileSize public int IdleFlushTimeout { get; set; } public long? MaxStorageSize { get; set; } + public abstract string BasePath { get; } public abstract IJournalWriter CreateJournalWriter(long journalNumber, long journalSize); @@ -89,9 +92,9 @@ public static StorageEnvironmentOptions CreateMemoryOnly() return new PureMemoryStorageEnvironmentOptions(); } - public static StorageEnvironmentOptions ForPath(string path, string tempPath = null) + public static StorageEnvironmentOptions ForPath(string path, string tempPath = null, string journalPath = null) { - return new DirectoryStorageEnvironmentOptions(path, tempPath); + return new DirectoryStorageEnvironmentOptions(path, tempPath, journalPath); } public IDisposable AllowManualFlushing() @@ -105,7 +108,8 @@ public IDisposable AllowManualFlushing() public class DirectoryStorageEnvironmentOptions : StorageEnvironmentOptions { - private readonly string _basePath; + private readonly string _journalPath; + private readonly string _basePath; private readonly string _tempPath; private readonly Lazy _dataPager; @@ -113,18 +117,22 @@ public class DirectoryStorageEnvironmentOptions : StorageEnvironmentOptions private readonly ConcurrentDictionary> _journals = new ConcurrentDictionary>(StringComparer.OrdinalIgnoreCase); - public DirectoryStorageEnvironmentOptions(string basePath, string tempPath) + public DirectoryStorageEnvironmentOptions(string basePath, string tempPath, string journalPath) { - _basePath = Path.GetFullPath(basePath); + _basePath = Path.GetFullPath(basePath); _tempPath = !string.IsNullOrEmpty(tempPath) ? Path.GetFullPath(tempPath) : _basePath; - + _journalPath = !string.IsNullOrEmpty(journalPath) ? Path.GetFullPath(journalPath) : _basePath; + if (Directory.Exists(_basePath) == false) Directory.CreateDirectory(_basePath); if (_basePath != tempPath && Directory.Exists(_tempPath) == false) Directory.CreateDirectory(_tempPath); - _dataPager = new Lazy(() => new Win32MemoryMapPager(Path.Combine(_basePath, Constants.DatabaseFilename))); + if (_journalPath != tempPath && Directory.Exists(_journalPath) == false) + Directory.CreateDirectory(_journalPath); + + _dataPager = new Lazy(() => new Win32MemoryMapPager(Path.Combine(_basePath, Constants.DatabaseFilename), InitialFileSize)); } public override IVirtualPager DataPager @@ -135,7 +143,7 @@ public override IVirtualPager DataPager } } - public string BasePath + public override string BasePath { get { return _basePath; } } @@ -148,7 +156,7 @@ public string TempPath public override IJournalWriter CreateJournalWriter(long journalNumber, long journalSize) { var name = JournalName(journalNumber); - var path = Path.Combine(_basePath, name); + var path = Path.Combine(_journalPath, name); var result = _journals.GetOrAdd(name, _ => new Lazy(() => new Win32FileJournalWriter(path, journalSize))); if (result.Value.Disposed) @@ -184,7 +192,7 @@ public override bool TryDeleteJournal(long number) if (_journals.TryRemove(name, out lazy) && lazy.IsValueCreated) lazy.Value.Dispose(); - var file = Path.Combine(_basePath, name); + var file = Path.Combine(_journalPath, name); if (File.Exists(file) == false) return false; File.Delete(file); @@ -200,13 +208,18 @@ public unsafe override bool ReadHeader(string filename, FileHeader* header) } using (var fs = new FileStream(path, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)) { - var ptr = (byte*)header; + if (fs.Length != sizeof (FileHeader)) + return false; // wrong file size + + var ptr = (byte*)header; int remaining = sizeof(FileHeader); while (remaining > 0) { int read; if (NativeFileMethods.ReadFile(fs.SafeFileHandle, ptr, remaining, out read, null) == false) throw new Win32Exception(); + if (read == 0) + return false; // we should be reading _something_ here, if we can't, then it is an error and we assume corruption ptr += read; remaining -= read; } @@ -239,13 +252,13 @@ public override IVirtualPager CreateScratchPager(string name) if (File.Exists(scratchFile)) File.Delete(scratchFile); - return new Win32MemoryMapPager(scratchFile, (NativeFileAttributes.DeleteOnClose | NativeFileAttributes.Temporary)); + return new Win32MemoryMapPager(scratchFile, InitialFileSize, (NativeFileAttributes.DeleteOnClose | NativeFileAttributes.Temporary)); } public override IVirtualPager OpenJournalPager(long journalNumber) { var name = JournalName(journalNumber); - var path = Path.Combine(_basePath, name); + var path = Path.Combine(_journalPath, name); if (File.Exists(path) == false) throw new InvalidOperationException("No such journal " + path); return new Win32MemoryMapPager(path, access: NativeFileAccess.GenericRead); @@ -256,17 +269,15 @@ public class PureMemoryStorageEnvironmentOptions : StorageEnvironmentOptions { private readonly IVirtualPager _dataPager; - private Dictionary _logs = + private readonly Dictionary _logs = new Dictionary(StringComparer.OrdinalIgnoreCase); - private Dictionary _headers = + private readonly Dictionary _headers = new Dictionary(StringComparer.OrdinalIgnoreCase); - public PureMemoryStorageEnvironmentOptions() { - //_dataPager = new Win32PureMemoryPager(); //TODO : after Win32PageFileBackedMemoryMappedPager is finished and works, change this to Win32PageFileBackedMemoryMappedPager with Guid.New as memoryName - _dataPager = new Win32PageFileBackedMemoryMappedPager(); + _dataPager = new Win32PageFileBackedMemoryMappedPager("data.pager", InitialFileSize); } public override IVirtualPager DataPager @@ -274,6 +285,11 @@ public override IVirtualPager DataPager get { return _dataPager; } } + public override string BasePath + { + get { return ":memory:"; } + } + public override IJournalWriter CreateJournalWriter(long journalNumber, long journalSize) { var name = JournalName(journalNumber); @@ -337,7 +353,7 @@ public override unsafe void WriteHeader(string filename, FileHeader* header) public override IVirtualPager CreateScratchPager(string name) { - return new Win32PageFileBackedMemoryMappedPager(); + return new Win32PageFileBackedMemoryMappedPager(name, InitialFileSize); } public override IVirtualPager OpenJournalPager(long journalNumber) @@ -360,11 +376,6 @@ public static string JournalRecoveryName(long number) return string.Format("{0:D19}.recovery", number); } - public static string TempBufferName() - { - return string.Format("{0}.scratch", Guid.NewGuid()); - } - public abstract void Dispose(); public abstract bool TryDeleteJournal(long number); diff --git a/Voron/Trees/IIterator.cs b/Voron/Trees/IIterator.cs index f6aeb7c433..cd4c2e72b1 100644 --- a/Voron/Trees/IIterator.cs +++ b/Voron/Trees/IIterator.cs @@ -15,7 +15,5 @@ public interface IIterator : IDisposable bool MovePrev(); bool Skip(int count); ValueReader CreateReaderForCurrent(); - - IEnumerable DumpValues(); } } \ No newline at end of file diff --git a/Voron/Trees/NodeHeader.cs b/Voron/Trees/NodeHeader.cs index 40cc90a89d..1c67f6c537 100644 --- a/Voron/Trees/NodeHeader.cs +++ b/Voron/Trees/NodeHeader.cs @@ -1,12 +1,11 @@ using System; -using System.IO; using System.Runtime.InteropServices; using Voron.Impl; namespace Voron.Trees { [StructLayout(LayoutKind.Explicit, Pack = 1)] - public struct NodeHeader + public unsafe struct NodeHeader { [FieldOffset(0)] public int DataSize; @@ -29,8 +28,17 @@ public int GetNodeSize() (Flags == (NodeFlags.PageRef) ? 0 : DataSize); } + public static byte* DirectAccess(Transaction tx, NodeHeader* node) + { + if (node->Flags == (NodeFlags.PageRef)) + { + var overFlowPage = tx.GetReadOnlyPage(node->PageNumber); + return overFlowPage.Base + Constants.PageHeaderSize; + } + return (byte*) node + node->KeySize + Constants.NodeHeaderSize; + } - public unsafe static ValueReader Reader(Transaction tx, NodeHeader* node) + public static ValueReader Reader(Transaction tx, NodeHeader* node) { if (node->Flags == (NodeFlags.PageRef)) { @@ -40,7 +48,7 @@ public unsafe static ValueReader Reader(Transaction tx, NodeHeader* node) return new ValueReader((byte*)node + node->KeySize + Constants.NodeHeaderSize, node->DataSize); } - public unsafe static Slice GetData(Transaction tx, NodeHeader* node) + public static Slice GetData(Transaction tx, NodeHeader* node) { if (node->Flags == (NodeFlags.PageRef)) { @@ -53,7 +61,7 @@ public unsafe static Slice GetData(Transaction tx, NodeHeader* node) } - public unsafe static void CopyTo(Transaction tx, NodeHeader* node, byte* dest) + public static void CopyTo(Transaction tx, NodeHeader* node, byte* dest) { if (node->Flags == (NodeFlags.PageRef)) { @@ -63,7 +71,7 @@ public unsafe static void CopyTo(Transaction tx, NodeHeader* node, byte* dest) NativeMethods.memcpy(dest, (byte*)node + node->KeySize + Constants.NodeHeaderSize, node->DataSize); } - public unsafe static int GetDataSize(Transaction tx, NodeHeader* node) + public static int GetDataSize(Transaction tx, NodeHeader* node) { if (node->Flags == (NodeFlags.PageRef)) { diff --git a/Voron/Trees/Page.cs b/Voron/Trees/Page.cs index 9352645015..43ef29d151 100644 --- a/Voron/Trees/Page.cs +++ b/Voron/Trees/Page.cs @@ -1,12 +1,12 @@ using System; using System.Collections.Generic; using System.Diagnostics; +using System.Security.Policy; using System.Text; using Voron.Debugging; using Voron.Impl; using Voron.Impl.FileHeaders; using Voron.Impl.Paging; -using Voron.Util.Conversion; namespace Voron.Trees { @@ -16,16 +16,18 @@ public unsafe class Page private readonly PageHeader* _header; public readonly string Source; + private readonly ushort _pageSize; - public int LastMatch; + public int LastMatch; public int LastSearchPosition; public bool Dirty; - public Page(byte* b, string source) + public Page(byte* b, string source, ushort pageSize) { _base = b; _header = (PageHeader*)b; Source = source; + _pageSize = pageSize; } public long PageNumber { get { return _header->PageNumber; } set { _header->PageNumber = value; } } @@ -66,22 +68,27 @@ public ushort* KeysOffsets return GetNode(LastSearchPosition); } + var pageKey = new Slice(SliceOptions.Key); + if (NumberOfEntries == 1) + { + pageKey.Set(GetNode(0)); + LastMatch = key.Compare(pageKey, cmp); + LastSearchPosition = LastMatch > 0 ? 1 : 0; + return LastSearchPosition == 0 ? GetNode(0) : null; + } + int low = IsLeaf ? 0 : 1; int high = NumberOfEntries - 1; int position = 0; - var pageKey = new Slice(SliceOptions.Key); - bool matched = false; - NodeHeader* node = null; while (low <= high) { position = (low + high) >> 1; - node = GetNode(position); + var node = GetNode(position); pageKey.Set(node); LastMatch = key.Compare(pageKey, cmp); - matched = true; if (LastMatch == 0) break; @@ -91,11 +98,6 @@ public ushort* KeysOffsets high = position - 1; } - if (matched == false) - { - LastMatch = key.Compare(pageKey, cmp); - } - if (LastMatch > 0) // found entry less than key { position++; // move to the smallest entry larger than the key @@ -189,6 +191,18 @@ public void RemoveNode(int index) return (byte*)node + Constants.NodeHeaderSize + key.Size; } + public void ChangeImplicitRefPageNode(long implicitRefPageNumber) + { + const int implicitRefIndex = 0; + + var node = GetNode(implicitRefIndex); + + node->KeySize = 0; + node->Flags = NodeFlags.PageRef; + node->Version = 1; + node->PageNumber = implicitRefPageNumber; + } + private NodeHeader* CreateNode(int index, Slice key, NodeFlags flags, int len, ushort previousNodeVersion) { Debug.Assert(index <= NumberOfEntries && index >= 0); @@ -201,7 +215,7 @@ public void RemoveNode(int index) { KeysOffsets[i] = KeysOffsets[i - 1]; } - var nodeSize = SizeOf.NodeEntry(AbstractPager.PageMaxSpace, key, len); + var nodeSize = SizeOf.NodeEntry(PageMaxSpace, key, len); var node = AllocateNewNode(index, key, nodeSize, previousNodeVersion); if (key.Options == SliceOptions.Key) @@ -256,7 +270,8 @@ internal void CopyNodeDataToEndOfPage(NodeHeader* other, Slice key = null) private NodeHeader* AllocateNewNode(int index, Slice key, int nodeSize, ushort previousNodeVersion) { - if (previousNodeVersion + 1 > ushort.MaxValue) + int newSize = previousNodeVersion + 1; + if (newSize > ushort.MaxValue) previousNodeVersion = 0; var newNodeOffset = (ushort)(_header->Upper - nodeSize); @@ -280,7 +295,7 @@ public int SizeLeft public int SizeUsed { - get { return _header->Lower + AbstractPager.PageMaxSpace - _header->Upper; } + get { return _header->Lower + PageMaxSpace - _header->Upper; } } @@ -308,18 +323,22 @@ public void Truncate(Transaction tx, int i) // when truncating, we copy the values to a tmp page // this has the effect of compacting the page data and avoiding // internal page fragmentation - var copy = tx.Environment.TemporaryPage.TempPage; - copy.Flags = Flags; - for (int j = 0; j < i; j++) - { - copy.CopyNodeDataToEndOfPage(GetNode(j)); - } - NativeMethods.memcpy(_base + Constants.PageHeaderSize, - copy._base + Constants.PageHeaderSize, - AbstractPager.PageSize - Constants.PageHeaderSize); - - Upper = copy.Upper; - Lower = copy.Lower; + TemporaryPage tmp; + using (tx.Environment.GetTemporaryPage(tx, out tmp)) + { + var copy = tmp.TempPage; + copy.Flags = Flags; + for (int j = 0; j < i; j++) + { + copy.CopyNodeDataToEndOfPage(GetNode(j)); + } + NativeMethods.memcpy(_base + Constants.PageHeaderSize, + copy._base + Constants.PageHeaderSize, + _pageSize - Constants.PageHeaderSize); + + Upper = copy.Upper; + Lower = copy.Lower; + } if (LastSearchPosition > i) LastSearchPosition = i; @@ -363,27 +382,31 @@ public bool HasSpaceFor(Transaction tx, int len) return true; } - private void Defrag(Transaction tx) - { - var tmp = tx.Environment.TemporaryPage.TempPage; - NativeMethods.memcpy(tmp.Base, Base, AbstractPager.PageSize); - - var numberOfEntries = NumberOfEntries; - - Upper = AbstractPager.PageSize; - - for (int i = 0; i < numberOfEntries; i++) - { - var node = tmp.GetNode(i); - var size = node->GetNodeSize() - Constants.NodeOffsetSize; - size += size & 1; - NativeMethods.memcpy(Base + Upper - size, (byte*) node, size); - Upper -= (ushort)size; - KeysOffsets[i] = Upper; - } - } + private void Defrag(Transaction tx) + { + TemporaryPage tmp; + using (tx.Environment.GetTemporaryPage(tx, out tmp)) + { + var tempPage = tmp.TempPage; + NativeMethods.memcpy(tempPage.Base, Base, _pageSize); + + var numberOfEntries = NumberOfEntries; + + Upper = _pageSize; + + for (int i = 0; i < numberOfEntries; i++) + { + var node = tempPage.GetNode(i); + var size = node->GetNodeSize() - Constants.NodeOffsetSize; + size += size & 1; + NativeMethods.memcpy(Base + Upper - size, (byte*) node, size); + Upper -= (ushort) size; + KeysOffsets[i] = Upper; + } + } + } - private bool HasSpaceFor(int len) + private bool HasSpaceFor(int len) { return len <= SizeLeft; } @@ -401,10 +424,18 @@ private bool HasSpaceFor(Slice key, int len) public int GetRequiredSpace(Slice key, int len) { - return SizeOf.NodeEntry(AbstractPager.PageMaxSpace, key, len) + Constants.NodeOffsetSize; + return SizeOf.NodeEntry(PageMaxSpace, key, len) + Constants.NodeOffsetSize; } - public string this[int i] + public int PageMaxSpace + { + get + { + return _pageSize - Constants.PageHeaderSize; + } + } + + public string this[int i] { get { return new Slice(GetNode(i)).ToString(); } } @@ -491,7 +522,7 @@ public int CalcSizeUsed() public int CalcSizeLeft() { - var sl = AbstractPager.PageMaxSpace - CalcSizeUsed(); + var sl = PageMaxSpace - CalcSizeUsed(); Debug.Assert(sl >= 0); return sl; } diff --git a/Voron/Trees/PageIterator.cs b/Voron/Trees/PageIterator.cs new file mode 100644 index 0000000000..a84bd4a145 --- /dev/null +++ b/Voron/Trees/PageIterator.cs @@ -0,0 +1,103 @@ +using System; +using System.Collections.Generic; +using Voron.Impl; + +namespace Voron.Trees +{ + public unsafe class PageIterator : IIterator + { + private readonly SliceComparer _cmp; + private readonly Page _page; + private Slice _currentKey = new Slice(SliceOptions.Key); + + public PageIterator(SliceComparer cmp, Page page) + { + this._cmp = cmp; + this._page = page; + } + + public void Dispose() + { + + } + + public bool Seek(Slice key) + { + var current = _page.Search(key, _cmp); + if (current == null) + return false; + _currentKey.Set(current); + return this.ValidateCurrentKey(current, _cmp); + } + + public NodeHeader* Current + { + get + { + if (_page.LastSearchPosition< 0 || _page.LastSearchPosition >= _page.NumberOfEntries) + throw new InvalidOperationException("No current page was set"); + return _page.GetNode(_page.LastSearchPosition); + } + } + + + public Slice CurrentKey + { + get + { + if (_page.LastSearchPosition < 0 || _page.LastSearchPosition >= _page.NumberOfEntries) + throw new InvalidOperationException("No current page was set"); + return _currentKey; + } + } + public int GetCurrentDataSize() + { + return Current->DataSize; + } + + + public Slice RequiredPrefix { get; set; } + public Slice MaxKey { get; set; } + + public bool MoveNext() + { + _page.LastSearchPosition++; + return TrySetPosition(); + } + + public bool MovePrev() + { + _page.LastSearchPosition--; + + return TrySetPosition(); + + } + + public bool Skip(int count) + { + _page.LastSearchPosition += count; + + return TrySetPosition(); + } + + private bool TrySetPosition() + { + if (_page.LastSearchPosition < 0 || _page.LastSearchPosition >= _page.NumberOfEntries) + return false; + + var current = _page.GetNode(_page.LastSearchPosition); + if (this.ValidateCurrentKey(current, _cmp) == false) + { + return false; + } + _currentKey.Set(current); + return true; + } + + public ValueReader CreateReaderForCurrent() + { + var node = Current; + return new ValueReader((byte*)node + node->KeySize + Constants.NodeHeaderSize, node->DataSize); + } + } +} \ No newline at end of file diff --git a/Voron/Trees/PageSplitter.cs b/Voron/Trees/PageSplitter.cs index 91d043b255..60eb45ccbc 100644 --- a/Voron/Trees/PageSplitter.cs +++ b/Voron/Trees/PageSplitter.cs @@ -66,8 +66,23 @@ public PageSplitter(Transaction tx, } else { - // we already popped the page, so the current one on the stack is what the parent of the page - _parentPage = _tx.ModifyPage(_cursor.CurrentPage.PageNumber, _cursor.CurrentPage); + // we already popped the page, so the current one on the stack is the parent of the page + + if (_tree.Name == Constants.FreeSpaceTreeName) + { + // a special case for FreeSpaceTree because the allocation of a new page called above + // can cause a delete of a free space section resulting in a run of the tree rebalancer + // and here the parent page that exists in cursor can be outdated + + _parentPage = _tx.ModifyPage(_cursor.CurrentPage.PageNumber, null); // pass _null_ to make sure we'll get the most updated parent page + _parentPage.LastSearchPosition = _cursor.CurrentPage.LastSearchPosition; + _parentPage.LastMatch = _cursor.CurrentPage.LastMatch; + } + else + { + _parentPage = _tx.ModifyPage(_cursor.CurrentPage.PageNumber, _cursor.CurrentPage); + } + _cursor.Update(_cursor.Pages.First, _parentPage); } @@ -76,7 +91,16 @@ public PageSplitter(Transaction tx, _tx.ClearRecentFoundPages(_tree); } - if (_page.LastSearchPosition >= _page.NumberOfEntries) + if (_tree.Name == Constants.FreeSpaceTreeName) + { + // we need to refresh the LastSearchPosition of the split page which is used by the free space handling + // because the allocation of a new page called above could remove some sections + // from the page that is being split + + _page.NodePositionFor(_newKey, _cmp); + } + + if (_page.LastSearchPosition >= _page.NumberOfEntries) { // when we get a split at the end of the page, we take that as a hint that the user is doing // sequential inserts, at that point, we are going to keep the current page as is and create a new diff --git a/Voron/Trees/Tree.MultiTree.cs b/Voron/Trees/Tree.MultiTree.cs new file mode 100644 index 0000000000..6eb0e28c50 --- /dev/null +++ b/Voron/Trees/Tree.MultiTree.cs @@ -0,0 +1,356 @@ +// ----------------------------------------------------------------------- +// +// Copyright (c) Hibernating Rhinos LTD. All rights reserved. +// +// ----------------------------------------------------------------------- +using System; +using System.Diagnostics; +using System.IO; +using Voron.Impl; +using Voron.Impl.FileHeaders; +using Voron.Impl.Paging; +using Voron.Util; + +namespace Voron.Trees +{ + /* Multi tree behavior + * ------------------- + * A multi tree is a tree that is used only with MultiRead, MultiAdd, MultiDelete + * The common use case is a secondary index that allows duplicates. + * + * The API exposed goes like this: + * + * MultiAdd("key", "val1"), MultiAdd("key", "val2"), MultiAdd("key", "val3") + * + * And then you can read it back with MultiRead("key") : IIterator + * + * When deleting, you delete one value at a time: MultiDelete("key", "val1") + * + * The actual values are stored as keys in a separate tree per key. In order to optimize + * space usage, multi trees work in the following fashion. + * + * If the totale size of the values per key is less than NodeMaxSize, we store them as an embedded + * page inside the owning tree. If then are more than the node max size, we create a separate tree + * for them and then only store the tree root infromation. + */ + public unsafe partial class Tree + { + public bool IsMultiValueTree { get; set; } + + public void MultiAdd(Transaction tx, Slice key, Slice value, ushort? version = null) + { + if (value == null) throw new ArgumentNullException("value"); + int maxNodeSize = tx.DataPager.MaxNodeSize; + if (value.Size > maxNodeSize) + throw new ArgumentException( + "Cannot add a value to child tree that is over " + maxNodeSize + " bytes in size", "value"); + if (value.Size == 0) + throw new ArgumentException("Cannot add empty value to child tree"); + + State.IsModified = true; + + Lazy lazy; + var page = FindPageFor(tx, key, out lazy); + if ((page == null || page.LastMatch != 0)) + { + MultiAddOnNewValue(tx, key, value, version, maxNodeSize); + return; + } + + page = tx.ModifyPage(page.PageNumber, page); + + var item = page.GetNode(page.LastSearchPosition); + + // already was turned into a multi tree, not much to do here + if (item->Flags == NodeFlags.MultiValuePageRef) + { + var existingTree = OpenOrCreateMultiValueTree(tx, key, item); + existingTree.DirectAdd(tx, value, 0, version: version); + return; + } + + byte* nestedPagePtr; + if (item->Flags == NodeFlags.PageRef) + { + var overFlowPage = tx.ModifyPage(item->PageNumber, null); + nestedPagePtr = overFlowPage.Base + Constants.PageHeaderSize; + } + else + { + nestedPagePtr = NodeHeader.DirectAccess(tx, item); + } + + var nestedPage = new Page(nestedPagePtr, "multi tree", (ushort) NodeHeader.GetDataSize(tx, item)); + + var existingItem = nestedPage.Search(value, NativeMethods.memcmp); + if (nestedPage.LastMatch != 0) + existingItem = null;// not an actual match, just greater than + + ushort previousNodeRevision = existingItem != null ? existingItem->Version : (ushort)0; + CheckConcurrency(key, value, version, previousNodeRevision, TreeActionType.Add); + + if (existingItem != null) + { + // maybe same value added twice? + var tmpKey = new Slice(item); + if (tmpKey.Compare(value, _cmp) == 0) + return; // already there, turning into a no-op + nestedPage.RemoveNode(nestedPage.LastSearchPosition); + } + + if (nestedPage.HasSpaceFor(tx, value, 0)) + { + // we are now working on top of the modified root page, we can just modify the memory directly + nestedPage.AddDataNode(nestedPage.LastSearchPosition, value, 0, previousNodeRevision); + return; + } + + int pageSize = nestedPage.CalcSizeUsed() + Constants.PageHeaderSize; + var newRequiredSize = pageSize + nestedPage.GetRequiredSpace(value, 0); + if (newRequiredSize <= maxNodeSize) + { + // we can just expand the current value... no need to create a nested tree yet + var actualPageSize = (ushort)Math.Min(Utils.NearestPowerOfTwo(newRequiredSize), maxNodeSize); + ExpandMultiTreeNestedPageSize(tx, key, value, nestedPagePtr, actualPageSize, item->DataSize); + + return; + } + // we now have to convert this into a tree instance, instead of just a nested page + var tree = Create(tx, _cmp, TreeFlags.MultiValue); + for (int i = 0; i < nestedPage.NumberOfEntries; i++) + { + var existingValue = nestedPage.GetNodeKey(i); + tree.DirectAdd(tx, existingValue, 0); + } + tree.DirectAdd(tx, value, 0, version: version); + tx.AddMultiValueTree(this, key, tree); + // we need to record that we switched to tree mode here, so the next call wouldn't also try to create the tree again + DirectAdd(tx, key, sizeof (TreeRootHeader), NodeFlags.MultiValuePageRef); + } + + private void ExpandMultiTreeNestedPageSize(Transaction tx, Slice key, Slice value, byte* nestedPagePtr, ushort newSize, int currentSize) + { + Debug.Assert(newSize > currentSize); + TemporaryPage tmp; + using (tx.Environment.GetTemporaryPage(tx, out tmp)) + { + var tempPagePointer = tmp.TempPagePointer; + NativeMethods.memcpy(tempPagePointer, nestedPagePtr, currentSize); + Delete(tx, key); // release our current page + Page nestedPage = new Page(tempPagePointer, "multi tree", (ushort)currentSize); + + var ptr = DirectAdd(tx, key, newSize); + + var newNestedPage = new Page(ptr, "multi tree", newSize) + { + Lower = (ushort)Constants.PageHeaderSize, + Upper = newSize, + Flags = PageFlags.Leaf, + PageNumber = -1L // mark as invalid page number + }; + + Slice nodeKey = new Slice(SliceOptions.Key); + for (int i = 0; i < nestedPage.NumberOfEntries; i++) + { + var nodeHeader = nestedPage.GetNode(i); + nodeKey.Set(nodeHeader); + newNestedPage.AddDataNode(i, nodeKey, 0, + (ushort)(nodeHeader->Version - 1)); // we dec by one because AdddataNode will inc by one, and we don't want to change those values + } + + newNestedPage.Search(value, _cmp); + newNestedPage.AddDataNode(newNestedPage.LastSearchPosition, value, 0, 0); + } + } + + private void MultiAddOnNewValue(Transaction tx, Slice key, Slice value, ushort? version, int maxNodeSize) + { + var requiredPageSize = Constants.PageHeaderSize + SizeOf.LeafEntry(-1, value, 0) + Constants.NodeOffsetSize; + if (requiredPageSize > maxNodeSize) + { + // no choice, very big value, we might as well just put it in its own tree from the get go... + // otherwise, we would have to put this in overflow page, and that won't save us any space anyway + + var tree = Create(tx, _cmp, TreeFlags.MultiValue); + tree.DirectAdd(tx, value, 0); + tx.AddMultiValueTree(this, key, tree); + + DirectAdd(tx, key, sizeof (TreeRootHeader), NodeFlags.MultiValuePageRef); + return; + } + + var actualPageSize = (ushort) Math.Min(Utils.NearestPowerOfTwo(requiredPageSize), maxNodeSize); + + var ptr = DirectAdd(tx, key, actualPageSize); + + var nestedPage = new Page(ptr, "multi tree", actualPageSize) + { + PageNumber = -1L,// hint that this is an inner page + Lower = (ushort) Constants.PageHeaderSize, + Upper = actualPageSize, + Flags = PageFlags.Leaf, + }; + + CheckConcurrency(key, value, version, 0, TreeActionType.Add); + + nestedPage.AddDataNode(0, value, 0, 0); + } + + public void MultiDelete(Transaction tx, Slice key, Slice value, ushort? version = null) + { + State.IsModified = true; + Lazy lazy; + var page = FindPageFor(tx, key, out lazy); + if (page == null || page.LastMatch != 0) + { + return; //nothing to delete - key not found + } + + page = tx.ModifyPage(page.PageNumber, page); + + var item = page.GetNode(page.LastSearchPosition); + + if (item->Flags == NodeFlags.MultiValuePageRef) //multi-value tree exists + { + var tree = OpenOrCreateMultiValueTree(tx, key, item); + + tree.Delete(tx, value, version); + + // previously, we would convert back to a simple model if we dropped to a single entry + // however, it doesn't really make sense, once you got enough values to go to an actual nested + // tree, you are probably going to remain that way, or be removed completely. + if (tree.State.EntriesCount != 0) + return; + tx.TryRemoveMultiValueTree(this, key); + tx.FreePage(tree.State.RootPageNumber); + Delete(tx, key); + } + else // we use a nested page here + { + var nestedPage = new Page(NodeHeader.DirectAccess(tx, item), "multi tree", (ushort)NodeHeader.GetDataSize(tx, item)); + var nestedItem = nestedPage.Search(value, NativeMethods.memcmp); + if (nestedItem == null) // value not found + return; + + byte* nestedPagePtr; + if (item->Flags == NodeFlags.PageRef) + { + var overFlowPage = tx.ModifyPage(item->PageNumber, null); + nestedPagePtr = overFlowPage.Base + Constants.PageHeaderSize; + } + else + { + nestedPagePtr = NodeHeader.DirectAccess(tx, item); + } + + nestedPage = new Page(nestedPagePtr, "multi tree", (ushort)NodeHeader.GetDataSize(tx, item)); + + CheckConcurrency(key, value, version, nestedItem->Version, TreeActionType.Delete); + nestedPage.RemoveNode(nestedPage.LastSearchPosition); + if (nestedPage.NumberOfEntries == 0) + Delete(tx, key); + } + } + + public IIterator MultiRead(Transaction tx, Slice key) + { + Lazy lazy; + var page = FindPageFor(tx, key, out lazy); + + if (page == null || page.LastMatch != 0) + { + return new EmptyIterator(); + } + + var item = page.Search(key, _cmp); + + var fetchedNodeKey = new Slice(item); + if (fetchedNodeKey.Compare(key, _cmp) != 0) + { + throw new InvalidDataException("Was unable to retrieve the correct node. Data corruption possible"); + } + + if (item->Flags == NodeFlags.MultiValuePageRef) + { + var tree = OpenOrCreateMultiValueTree(tx, key, item); + + return tree.Iterate(tx); + } + + var nestedPage = new Page(NodeHeader.DirectAccess(tx, item), "multi tree", (ushort)NodeHeader.GetDataSize(tx, item)); + + return new PageIterator(_cmp, nestedPage); + } + + internal Tree OpenOrCreateMultiValueTree(Transaction tx, Slice key, NodeHeader* item) + { + Tree tree; + if (tx.TryGetMultiValueTree(this, key, out tree)) + return tree; + + var childTreeHeader = + (TreeRootHeader*)((byte*)item + item->KeySize + Constants.NodeHeaderSize); + Debug.Assert(childTreeHeader->RootPageNumber < tx.State.NextPageNumber); + tree = childTreeHeader != null ? + Open(tx, _cmp, childTreeHeader) : + Create(tx, _cmp); + + tx.AddMultiValueTree(this, key, tree); + return tree; + } + + public bool SetAsMultiValueTreeRef(Transaction tx, Slice key) + { + Lazy lazy; + var foundPage = FindPageFor(tx, key, out lazy); + var page = tx.ModifyPage(foundPage.PageNumber, foundPage); + + if (page.LastMatch != 0) + return false; // not there + + var nodeHeader = page.GetNode(page.LastSearchPosition); + if (nodeHeader->Flags == NodeFlags.MultiValuePageRef) + return false; + if (nodeHeader->Flags != NodeFlags.Data) + throw new InvalidOperationException("Only data nodes can be set to MultiValuePageRef"); + nodeHeader->Flags = NodeFlags.MultiValuePageRef; + return true; + } + + private bool TryOverwriteDataOrMultiValuePageRefNode(NodeHeader* updatedNode, Slice key, int len, + NodeFlags requestedNodeType, ushort? version, + out byte* pos) + { + switch (requestedNodeType) + { + case NodeFlags.Data: + case NodeFlags.MultiValuePageRef: + { + if (updatedNode->DataSize == len && + (updatedNode->Flags == NodeFlags.Data || updatedNode->Flags == NodeFlags.MultiValuePageRef)) + { + CheckConcurrency(key, version, updatedNode->Version, TreeActionType.Add); + + if (updatedNode->Version == ushort.MaxValue) + updatedNode->Version = 0; + updatedNode->Version++; + + updatedNode->Flags = requestedNodeType; + + { + pos = (byte*)updatedNode + Constants.NodeHeaderSize + key.Size; + return true; + } + } + break; + } + case NodeFlags.PageRef: + throw new InvalidOperationException("We never add PageRef explicitly"); + default: + throw new ArgumentOutOfRangeException(); + } + pos = null; + return false; + } + } +} \ No newline at end of file diff --git a/Voron/Trees/Tree.cs b/Voron/Trees/Tree.cs index be0abc8249..9f3dd2cbd4 100644 --- a/Voron/Trees/Tree.cs +++ b/Voron/Trees/Tree.cs @@ -3,18 +3,15 @@ using System.Data; using System.Diagnostics; using System.IO; -using System.Linq; using Voron.Debugging; +using Voron.Exceptions; using Voron.Impl; using Voron.Impl.FileHeaders; using Voron.Impl.Paging; -using Voron.Util; namespace Voron.Trees { - using Exceptions; - - public unsafe class Tree + public unsafe partial class Tree { private TreeMutableState _state = new TreeMutableState(); @@ -80,109 +77,64 @@ public void Add(Transaction tx, Slice key, Stream value, ushort? version = null) if (value.Length > int.MaxValue) throw new ArgumentException("Cannot add a value that is over 2GB in size", "value"); - State.IsModified = true; - - var pos = DirectAdd(tx, key, (int) value.Length, version: version); + State.IsModified = true; + var pos = DirectAdd(tx, key, (int)value.Length, version: version); - var temporaryPage = tx.Environment.TemporaryPage; - var tempPageBuffer = temporaryPage.TempPageBuffer; - var tempPagePointer = temporaryPage.TempPagePointer; - while (true) - { - var read = value.Read(tempPageBuffer, 0, AbstractPager.PageSize); - if (read == 0) - break; - NativeMethods.memcpy(pos , tempPagePointer, read); - pos += read; - } + CopyStreamToPointer(tx, value, pos); } - public void MultiDelete(Transaction tx, Slice key, Slice value, ushort? version = null) - { - State.IsModified = true; - Lazy lazy; - var page = FindPageFor(tx, key, out lazy); - if (page == null || page.LastMatch != 0) - { - return; //nothing to delete - key not found - } - - page = tx.ModifyPage(page.PageNumber, page); - - var item = page.GetNode(page.LastSearchPosition); - - if (item->Flags == NodeFlags.MultiValuePageRef) //multi-value tree exists - { - var tree = OpenOrCreateMultiValueTree(tx, key, item); - - tree.Delete(tx, value, version); - - if (tree.State.EntriesCount > 1) - return; - // convert back to simple key/val - var iterator = tree.Iterate(tx); - if (!iterator.Seek(Slice.BeforeAllKeys)) - throw new InvalidDataException( - "MultiDelete() failed : sub-tree is empty where it should not be, this is probably a Voron bug."); + public long Increment(Transaction tx, Slice key, long delta, ushort? version = null) + { + long currentValue = 0; - var dataToSave = iterator.CurrentKey; + var read = Read(tx, key); + if (read != null) + currentValue = read.Reader.ReadLittleEndianInt64(); - var ptr = DirectAdd(tx, key, dataToSave.Size); - dataToSave.CopyTo(ptr); + var value = currentValue + delta; + Add(tx, key, BitConverter.GetBytes(value), version); - tx.TryRemoveMultiValueTree(this, key); - tx.FreePage(tree.State.RootPageNumber); - } - else //the regular key->value pattern - { - Delete(tx, key, version); - } - } + return value; + } - public void MultiAdd(Transaction tx, Slice key, Slice value, ushort? version = null) + public void Add(Transaction tx, Slice key, byte[] value, ushort? version = null) { if (value == null) throw new ArgumentNullException("value"); - if (value.Size > tx.DataPager.MaxNodeSize) - throw new ArgumentException( - "Cannot add a value to child tree that is over " + tx.DataPager.MaxNodeSize + " bytes in size", "value"); - if (value.Size == 0) - throw new ArgumentException("Cannot add empty value to child tree"); State.IsModified = true; - Lazy lazy; - var page = FindPageFor(tx, key, out lazy); + var pos = DirectAdd(tx, key, value.Length, version: version); - if (page == null || page.LastMatch != 0) + fixed (byte* src = value) { - var ptr = DirectAdd(tx, key, value.Size, version: version); - value.CopyTo(ptr); - return; + NativeMethods.memcpy(pos, src, value.Length); } + } - page = tx.ModifyPage(page.PageNumber, page); + public void Add(Transaction tx, Slice key, Slice value, ushort? version = null) + { + if (value == null) throw new ArgumentNullException("value"); - var item = page.GetNode(page.LastSearchPosition); + State.IsModified = true; + var pos = DirectAdd(tx, key, value.Size, version: version); - CheckConcurrency(key, version, item->Version, TreeActionType.Add); - var existingValue = new Slice(DirectRead(tx, key), (ushort) item->DataSize); - if (existingValue.Compare(value, _cmp) == 0) - return; //nothing to do, the exact value is already there + value.CopyTo(pos); + } - if (item->Flags == NodeFlags.MultiValuePageRef) - { - var tree = OpenOrCreateMultiValueTree(tx, key, item); - tree.DirectAdd(tx, value, 0); - } - else // need to turn to tree + private static void CopyStreamToPointer(Transaction tx, Stream value, byte* pos) + { + TemporaryPage tmp; + using (tx.Environment.GetTemporaryPage(tx, out tmp)) { - var tree = Create(tx, _cmp, TreeFlags.MultiValue); - var current = NodeHeader.GetData(tx, item); - tree.DirectAdd(tx, current, 0); - tree.DirectAdd(tx, value, 0); - tx.AddMultiValueTree(this, key, tree); - - // we need to record that we switched to tree mode here, so the next call wouldn't also try to create the tree again - DirectAdd(tx, key, sizeof (TreeRootHeader), NodeFlags.MultiValuePageRef); + var tempPageBuffer = tmp.TempPageBuffer; + var tempPagePointer = tmp.TempPagePointer; + while (true) + { + var read = value.Read(tempPageBuffer, 0, AbstractPager.PageSize); + if (read == 0) + break; + NativeMethods.memcpy(pos, tempPagePointer, read); + pos += read; + } } } @@ -279,23 +231,6 @@ public void MultiAdd(Transaction tx, Slice key, Slice value, ushort? version = n return dataPos; } - public bool SetAsMultiValueTreeRef(Transaction tx, Slice key) - { - Lazy lazy; - var foundPage = FindPageFor(tx, key, out lazy); - var page = tx.ModifyPage(foundPage.PageNumber, foundPage); - - if (page.LastMatch != 0) - return false; // not there - - var nodeHeader = page.GetNode(page.LastSearchPosition); - if (nodeHeader->Flags == NodeFlags.MultiValuePageRef) - return false; - if (nodeHeader->Flags != NodeFlags.Data) - throw new InvalidOperationException("Only data nodes can be set to MultiValuePageRef"); - nodeHeader->Flags = NodeFlags.MultiValuePageRef; - return true; - } private long WriteToOverflowPages(Transaction tx, TreeMutableState txInfo, int overflowSize, out byte* dataPos) { @@ -375,8 +310,7 @@ public Page FindPageFor(Transaction tx, Slice key, out Lazy cursor) private Page SearchForPage(Transaction tx, Slice key, ref Lazy cursor) { - Page p; - p = tx.GetReadOnlyPage(State.RootPageNumber); + var p = tx.GetReadOnlyPage(State.RootPageNumber); var c = new Cursor(); c.Push(p); @@ -600,33 +534,6 @@ public ushort ReadVersion(Transaction tx, Slice key) return node->Version; } - public IIterator MultiRead(Transaction tx, Slice key) - { - Lazy lazy; - var page = FindPageFor(tx, key, out lazy); - - if (page == null || page.LastMatch != 0) - { - return new EmptyIterator(); - } - - var item = page.Search(key, _cmp); - - var fetchedNodeKey = new Slice(item); - if (fetchedNodeKey.Compare(key, _cmp) != 0) - { - throw new InvalidDataException("Was unable to retrieve the correct node. Data corruption possible"); - } - - if (item->Flags == NodeFlags.MultiValuePageRef) - { - var tree = OpenOrCreateMultiValueTree(tx, key, item); - - return tree.Iterate(tx); - } - - return new SingleEntryIterator(_cmp, item, tx); - } internal byte* DirectRead(Transaction tx, Slice key) { @@ -702,31 +609,20 @@ public override string ToString() return Name + " " + State.EntriesCount; } - internal Tree OpenOrCreateMultiValueTree(Transaction tx, Slice key, NodeHeader* item) + + private void CheckConcurrency(Slice key, ushort? expectedVersion, ushort nodeVersion, TreeActionType actionType) { - Tree tree; - if (tx.TryGetMultiValueTree(this, key, out tree)) - return tree; - - var childTreeHeader = - (TreeRootHeader*) ((byte*) item + item->KeySize + Constants.NodeHeaderSize); - Debug.Assert(childTreeHeader->RootPageNumber < tx.State.NextPageNumber); - tree = childTreeHeader != null ? - Open(tx, _cmp, childTreeHeader) : - Create(tx, _cmp); - - tx.AddMultiValueTree(this, key, tree); - return tree; + if (expectedVersion.HasValue && nodeVersion != expectedVersion.Value) + throw new ConcurrencyException(string.Format("Cannot {0} '{1}' to '{4}' tree. Version mismatch. Expected: {2}. Actual: {3}.", actionType.ToString().ToLowerInvariant(), key, expectedVersion.Value, nodeVersion, Name)); } - private static void CheckConcurrency(Slice key, ushort? expectedVersion, ushort nodeVersion, TreeActionType actionType) + + private void CheckConcurrency(Slice key, Slice value, ushort? expectedVersion, ushort nodeVersion, TreeActionType actionType) { if (expectedVersion.HasValue && nodeVersion != expectedVersion.Value) - throw new ConcurrencyException(string.Format("Cannot {0} '{1}'. Version mismatch. Expected: {2}. Actual: {3}.", actionType.ToString().ToLowerInvariant(), key, expectedVersion.Value, nodeVersion)); + throw new ConcurrencyException(string.Format("Cannot {0} value '{5}' to key '{1}' to '{4}' tree. Version mismatch. Expected: {2}. Actual: {3}.", actionType.ToString().ToLowerInvariant(), key, expectedVersion.Value, nodeVersion, Name, value)); } - public bool IsMultiValueTree { get; set; } - public enum TreeActionType { Add, @@ -738,43 +634,7 @@ public Tree Clone() return new Tree(_cmp, _state.Clone()){ Name = Name }; } - private static bool TryOverwriteDataOrMultiValuePageRefNode(NodeHeader* updatedNode, Slice key, int len, - NodeFlags requestedNodeType, ushort? version, - out byte* pos) - { - switch (requestedNodeType) - { - case NodeFlags.Data: - case NodeFlags.MultiValuePageRef: - { - if (updatedNode->DataSize == len && - (updatedNode->Flags == NodeFlags.Data || updatedNode->Flags == NodeFlags.MultiValuePageRef)) - { - CheckConcurrency(key, version, updatedNode->Version, TreeActionType.Add); - - if (updatedNode->Version == ushort.MaxValue) - updatedNode->Version = 0; - updatedNode->Version++; - - updatedNode->Flags = requestedNodeType; - - { - pos = (byte*)updatedNode + Constants.NodeHeaderSize + key.Size; - return true; - } - } - break; - } - case NodeFlags.PageRef: - throw new InvalidOperationException("We never add PageRef explicitly"); - default: - throw new ArgumentOutOfRangeException(); - } - pos = null; - return false; - } - - private static bool TryOverwriteOverflowPages(Transaction tx, TreeMutableState treeState, NodeHeader* updatedNode, + private bool TryOverwriteOverflowPages(Transaction tx, TreeMutableState treeState, NodeHeader* updatedNode, Slice key, int len, ushort? version, out byte* pos) { if (updatedNode->Flags == NodeFlags.PageRef && @@ -814,4 +674,4 @@ private static bool TryOverwriteOverflowPages(Transaction tx, TreeMutableState t return false; } } -} \ No newline at end of file +} diff --git a/Voron/Trees/TreeIterator.cs b/Voron/Trees/TreeIterator.cs index 3955d0eb53..7aa01f7e25 100644 --- a/Voron/Trees/TreeIterator.cs +++ b/Voron/Trees/TreeIterator.cs @@ -22,39 +22,17 @@ public TreeIterator(Tree tree, Transaction tx, SliceComparer cmp) _cmp = cmp; } - public int GetCurrentDataSize() { return NodeHeader.GetDataSize(_tx, Current); } - public IIterator CreateMutliValueIterator() - { - var item = Current; - if (item->Flags == NodeFlags.MultiValuePageRef) - { - var tree = _tree.OpenOrCreateMultiValueTree(_tx, _currentKey, item); - - return tree.Iterate(_tx); - } - - return new SingleEntryIterator(_cmp, item, _tx); - } - public bool Seek(Slice key) { Lazy lazy; _currentPage = _tree.FindPageFor(_tx, key, out lazy); _cursor = lazy.Value; _cursor.Pop(); - - //if required prefix is set and need to seek to beginning/end - //--> skip to beginning of relevant keys - if (RequiredPrefix != null && - !RequiredPrefix.Equals(Slice.Empty) && - (key.Equals(Slice.BeforeAllKeys) || key.Equals(Slice.AfterAllKeys))) - key = RequiredPrefix; - var node = _currentPage.Search(key, _cmp); if (node == null) { @@ -68,18 +46,36 @@ public Slice CurrentKey { get { - if (_currentPage == null || _currentPage.LastSearchPosition >= _currentPage.NumberOfEntries) + if (_currentPage == null) throw new InvalidOperationException("No current page was set"); + + if (_currentPage.LastSearchPosition >= _currentPage.NumberOfEntries) + throw new InvalidOperationException(string.Format("Current page is invalid. Search position ({0}) exceeds number of entries ({1}). Page: {2}.", _currentPage.LastSearchPosition, _currentPage.NumberOfEntries, _currentPage)); + return _currentKey; } } + /// + /// Deletes the current key/value pair and returns true if there is + /// another key after it + /// + public bool DeleteCurrentAndMoveNext() + { + _tree.Delete(_tx, CurrentKey); + return MovePrev() && MoveNext(); + } + public NodeHeader* Current { get { - if (_currentPage == null || _currentPage.LastSearchPosition >= _currentPage.NumberOfEntries) + if (_currentPage == null) throw new InvalidOperationException("No current page was set"); + + if (_currentPage.LastSearchPosition >= _currentPage.NumberOfEntries) + throw new InvalidOperationException(string.Format("Current page is invalid. Search position ({0}) exceeds number of entries ({1}). Page: {2}.", _currentPage.LastSearchPosition, _currentPage.NumberOfEntries, _currentPage)); + return _currentPage.GetNode(_currentPage.LastSearchPosition); } } @@ -163,17 +159,6 @@ public ValueReader CreateReaderForCurrent() return NodeHeader.Reader(_tx, Current); } - public IEnumerable DumpValues() - { - if(Seek(Slice.BeforeAllKeys) == false) - yield break; - - do - { - yield return CurrentKey.ToString(); - } while (MoveNext()); - } - public void Dispose() { } @@ -190,6 +175,17 @@ public long TreeRootPage public static class IteratorExtensions { + public static IEnumerable DumpValues(this IIterator self) + { + if (self.Seek(Slice.BeforeAllKeys) == false) + yield break; + + do + { + yield return self.CurrentKey.ToString(); + } while (self.MoveNext()); + } + public unsafe static bool ValidateCurrentKey(this IIterator self, NodeHeader* node, SliceComparer cmp) { if (self.RequiredPrefix != null) diff --git a/Voron/Trees/TreeRebalancer.cs b/Voron/Trees/TreeRebalancer.cs index bf8d74b532..1591e7cfde 100644 --- a/Voron/Trees/TreeRebalancer.cs +++ b/Voron/Trees/TreeRebalancer.cs @@ -2,6 +2,7 @@ using System.Diagnostics; using Voron.Debugging; using Voron.Impl; +using Voron.Impl.Paging; namespace Voron.Trees { @@ -30,14 +31,13 @@ public Page Execute(Cursor cursor, Page page) if (page.NumberOfEntries == 0) // empty page, just delete it and fixup parent { - // need to delete the implicit left page, shift right - if (parentPage.LastSearchPosition == 0 && parentPage.NumberOfEntries > 2) - { + // need to change the implicit left page + if (parentPage.LastSearchPosition == 0 && parentPage.NumberOfEntries > 2) + { var newImplicit = parentPage.GetNode(1)->PageNumber; - parentPage.RemoveNode(0); - parentPage.RemoveNode(0); - parentPage.AddPageRefNode(0, Slice.Empty, newImplicit); - } + parentPage.RemoveNode(0); + parentPage.ChangeImplicitRefPageNode(newImplicit); + } else // will be set to rights by the next rebalance call { parentPage.RemoveNode(parentPage.LastSearchPositionOrLastEntry); @@ -219,8 +219,8 @@ private void MoveBranchNode(Page parentPage, Page from, Page to) var implicitLeftKey = GetActualKey(to, 0); var leftPageNumber = to.GetNode(0)->PageNumber; to.AddPageRefNode(1, implicitLeftKey, leftPageNumber); - to.AddPageRefNode(0, Slice.BeforeAllKeys, pageNum); - to.RemoveNode(1); + + to.ChangeImplicitRefPageNode(pageNum); // setup the new implicit node } else { @@ -229,12 +229,9 @@ private void MoveBranchNode(Page parentPage, Page from, Page to) if (from.LastSearchPositionOrLastEntry == 0) { - // cannot just remove the left node, need to adjust those var rightPageNumber = from.GetNode(1)->PageNumber; from.RemoveNode(0); // remove the original implicit node - from.RemoveNode(0); // remove the next node that we now turned into implicit - from.EnsureHasSpaceFor(_tx, Slice.BeforeAllKeys, -1); - from.AddPageRefNode(0, Slice.BeforeAllKeys, rightPageNumber); + from.ChangeImplicitRefPageNode(rightPageNumber); // setup the new implicit node Debug.Assert(from.NumberOfEntries >= 2); } else diff --git a/Voron/Util/PageTable.cs b/Voron/Util/PageTable.cs index 54583c3597..051a31c836 100644 --- a/Voron/Util/PageTable.cs +++ b/Voron/Util/PageTable.cs @@ -107,13 +107,9 @@ public bool TryGetValue(Transaction tx, long page, out JournalFile.PagePosition public long MaxTransactionId() { - var transactionIds = _values.Values.Select(x => x[x.Count - 1].Value) - .Where(x => x != null) - .ToList(); - if(transactionIds.Any()) - return transactionIds.Max(x => x.TransactionId); - - return -1; + return _values.Values.Select(x => x[x.Count - 1].Value) + .Where(x => x != null) + .Max(x => x.TransactionId); } public List> AllPagesOlderThan(long oldestActiveTransaction) @@ -124,20 +120,7 @@ public long MaxTransactionId() return val.Value.TransactionId < oldestActiveTransaction; }).Select(x => new KeyValuePair(x.Key, x.Value[x.Value.Count - 1].Value)) .ToList(); - } - public void SetItemsNoTransaction(Dictionary ptt) - { - foreach (var item in ptt) - { - var result = _values.TryAdd(item.Key, ImmutableAppendOnlyList.Empty.Append(new PageValue - { - Transaction = -1, - Value = item.Value - })); - if (result == false) - throw new InvalidOperationException("Duplicate item or calling SetItemsNoTransaction twice? " + item.Key); - } } public long GetLastSeenTransaction() diff --git a/Voron/Util/TreeExtensions.cs b/Voron/Util/TreeExtensions.cs index 2b6465fbb7..392b9aba9e 100644 --- a/Voron/Util/TreeExtensions.cs +++ b/Voron/Util/TreeExtensions.cs @@ -18,11 +18,15 @@ public static Tree GetTree(this StorageEnvironmentState state, Transaction tx, s if (treeName.Equals(Constants.FreeSpaceTreeName, StringComparison.InvariantCultureIgnoreCase)) return state.FreeSpaceRoot; + Tree tree = tx.ReadTree(treeName); if (tree != null) return tree; + if (tx.Flags == TransactionFlags.ReadWrite) + return tx.Environment.CreateTree(tx, treeName); + throw new InvalidOperationException("No such tree: " + treeName); } } diff --git a/Voron/ValueReader.cs b/Voron/ValueReader.cs index b49f38ee2e..1992a2b470 100644 --- a/Voron/ValueReader.cs +++ b/Voron/ValueReader.cs @@ -2,122 +2,234 @@ using System.IO; using System.Text; using Voron.Impl; +using Voron.Util; +using Voron.Util.Conversion; namespace Voron { - public unsafe class ValueReader - { - private readonly byte* _val; - private readonly byte[] _buffer; - private readonly int _len; + public unsafe struct ValueReader + { + [ThreadStatic] + private static byte[] tmpBuf; + private int _pos; - public int Length { get { return _len; } } - - public void Reset() - { - _pos = 0; - } - - public ValueReader(Stream stream) - { - var position = stream.Position; - _len = (int)(stream.Length - stream.Position); - _buffer = new byte[_len]; - - int pos = 0; - while (true) - { - var read = stream.Read(_buffer, pos, _buffer.Length - pos); - if (read == 0) - break; - pos += read; - } - stream.Position = position; - - } - - public Stream AsStream() + private readonly byte[] _buffer; + private readonly int _len; + private readonly byte* _val; + + public ValueReader(Stream stream) + { + long position = stream.Position; + _len = (int) (stream.Length - stream.Position); + _buffer = new byte[_len]; + + int pos = 0; + while (true) + { + int read = stream.Read(_buffer, pos, _buffer.Length - pos); + if (read == 0) + break; + pos += read; + } + stream.Position = position; + + _pos = 0; + _val = null; + } + + public ValueReader(byte[] array, int len) { - if (_val == null) - return new MemoryStream(_buffer, writable: false); - return new UnmanagedMemoryStream(_val, _len, _len, FileAccess.Read); + if (array == null) throw new ArgumentNullException("array"); + _buffer = array; + _len = len; + _pos = 0; + _val = null; } - public ValueReader(byte* val, int len) - { - _val = val; - _len = len; - } - - public int Read(byte[] buffer, int offset, int count) - { - fixed (byte* b = buffer) - return Read(b + offset, count); - } - - public int Read(byte* buffer, int count) - { - count = Math.Min(count, _len - _pos); - - if (_val == null) - { - fixed (byte* b = _buffer) - NativeMethods.memcpy(buffer, b + _pos, count); - } - else - { - NativeMethods.memcpy(buffer, _val + _pos, count); - } - _pos += count; - - return count; - } - - public int ReadInt32() - { - if (_len - _pos < sizeof(int)) - throw new EndOfStreamException(); - var buffer = new byte[sizeof(int)]; - - Read(buffer, 0, sizeof (int)); - - return BitConverter.ToInt32(buffer, 0); - } - - public long ReadInt64() - { - if (_len - _pos < sizeof(long)) - throw new EndOfStreamException(); - var buffer = new byte[sizeof(long)]; - - Read(buffer, 0, sizeof(long)); - - return BitConverter.ToInt64(buffer, 0); - } - - public string ToStringValue() - { - return Encoding.UTF8.GetString(ReadBytes(_len - _pos)); - } - - public byte[] ReadBytes(int length) - { - var size = Math.Min(length, _len - _pos); - var buffer = new byte[size]; - Read(buffer, 0, size); - return buffer; - } - - public void CopyTo(Stream stream) - { - var buffer = new byte[4096]; - while (true) - { - var read = Read(buffer, 0, buffer.Length); - if (read == 0) - return; - stream.Write(buffer, 0, read); - } - } - } -} \ No newline at end of file + public ValueReader(byte* val, int len) + { + _val = val; + _len = len; + _pos = 0; + _buffer = null; + } + + public int Length + { + get { return _len; } + } + + public bool EndOfData + { + get { return _len == _pos; } + } + + public void Reset() + { + _pos = 0; + } + + public Stream AsStream() + { + if (_val == null) + return new MemoryStream(_buffer, false); + return new UnmanagedMemoryStream(_val, _len, _len, FileAccess.Read); + } + + public int Read(byte[] buffer, int offset, int count) + { + fixed (byte* b = buffer) + return Read(b + offset, count); + } + + public int Read(byte* buffer, int count) + { + count = Math.Min(count, _len - _pos); + + if (_val == null) + { + fixed (byte* b = _buffer) + NativeMethods.memcpy(buffer, b + _pos, count); + } + else + { + NativeMethods.memcpy(buffer, _val + _pos, count); + } + _pos += count; + + return count; + } + + public int ReadLittleEndianInt32() + { + if (_len - _pos < sizeof (int)) + throw new EndOfStreamException(); + var buffer = EnsureTempBuffer(sizeof (int)); + + Read(buffer, 0, sizeof (int)); + + return BitConverter.ToInt32(buffer, 0); + } + + + public long ReadLittleEndianInt64() + { + if (_len - _pos < sizeof (long)) + throw new EndOfStreamException(); + var buffer = EnsureTempBuffer(sizeof(long)); + + Read(buffer, 0, sizeof (long)); + + return BitConverter.ToInt64(buffer, 0); + } + + public int ReadBigEndianInt32() + { + if (_len - _pos < sizeof(int)) + throw new EndOfStreamException(); + var buffer = EnsureTempBuffer(sizeof(int)); + + Read(buffer, 0, sizeof(int)); + + return EndianBitConverter.Big.ToInt32(buffer, 0); + } + + + public long ReadBigEndianInt64() + { + if (_len - _pos < sizeof(long)) + throw new EndOfStreamException(); + var buffer = EnsureTempBuffer(sizeof(long)); + + Read(buffer, 0, sizeof(long)); + + return EndianBitConverter.Big.ToInt64(buffer, 0); + } + + + private byte[] EnsureTempBuffer(int size) + { + if (tmpBuf != null && tmpBuf.Length >= size) + return tmpBuf; + return tmpBuf = new byte[Utils.NearestPowerOfTwo(size)]; + } + + public string ToStringValue() + { + int length = _len - _pos; + int used; + return Encoding.UTF8.GetString(ReadBytes(length, out used), 0, used); + } + + public override string ToString() + { + var old = _pos; + var stringValue = ToStringValue(); + _pos = old; + return stringValue; + } + + public byte[] ReadBytes(int length, out int used) + { + int size = Math.Min(length, _len - _pos); + var buffer = EnsureTempBuffer(length); + used = Read(buffer, 0, size); + return buffer; + } + + public void CopyTo(Stream stream) + { + var buffer = new byte[4096]; + while (true) + { + int read = Read(buffer, 0, buffer.Length); + if (read == 0) + return; + stream.Write(buffer, 0, read); + } + } + + public int CompareTo(ValueReader other) + { + int r = CompareData(other, Math.Min(Length, other.Length)); + if (r != 0) + return r; + return Length - other.Length; + } + + private int CompareData(ValueReader other, int len) + { + if (_buffer != null) + { + fixed (byte* a = _buffer) + { + if (other._buffer != null) + { + fixed (byte* b = other._buffer) + { + return NativeMethods.memcmp(a, b, len); + } + } + return NativeMethods.memcmp(a, other._val, len); + } + } + if (other._buffer != null) + { + fixed (byte* b = other._buffer) + { + return NativeMethods.memcmp(_val, b, len); + } + } + return NativeMethods.memcmp(_val, other._val, len); + } + + public Slice AsSlice() + { + if (_len >= ushort.MaxValue) + throw new InvalidOperationException("Cannot convert to slice, len is too big: " + _len); + if (_buffer != null) + return new Slice(_buffer, (ushort) _len); + return new Slice(_val, (ushort) _len); + } + } +} diff --git a/Voron/Voron.csproj b/Voron/Voron.csproj index 6ba9fb800b..c27d2d73d1 100644 --- a/Voron/Voron.csproj +++ b/Voron/Voron.csproj @@ -1,5 +1,5 @@  - + Debug @@ -9,7 +9,7 @@ Properties Voron Voron - v4.5 + v4.5.1 512 ..\ @@ -62,14 +62,15 @@ + - - - + + + @@ -115,7 +116,6 @@ - From f808b8bd0dbdf5ffccb0d99841fa9bd7eae463ea Mon Sep 17 00:00:00 2001 From: Michael Yarichuk Date: Mon, 26 May 2014 17:14:42 +0300 Subject: [PATCH 7/7] updated repo to latest version --- Voron.Tests/Backups/Full.cs | 6 +- Voron.Tests/Backups/Incremental.cs | 18 +- .../Bugs/AccessViolationWithIteratorUsage.cs | 8 +- .../Bugs/ChecksumMismatchAfterRecovery.cs | 6 +- Voron.Tests/Bugs/Deletes.cs | 2 +- Voron.Tests/Bugs/EmptyTree.cs | 6 +- Voron.Tests/Bugs/FlushingToDataFile.cs | 22 +- .../Bugs/IndexPointToNonLeafPageTests.cs | 4 +- .../Bugs/InvalidReleasesOfScratchPages.cs | 4 +- Voron.Tests/Bugs/Isolation.cs | 20 +- Voron.Tests/Bugs/Iterating.cs | 2 +- Voron.Tests/Bugs/LargeValues.cs | 2 +- Voron.Tests/Bugs/MemoryAccess.cs | 8 +- Voron.Tests/Bugs/MultiAdds.cs | 14 +- Voron.Tests/Bugs/MultiReads.cs | 4 +- Voron.Tests/Bugs/PageSplitter.cs | 10 +- Voron.Tests/Bugs/PageTableIssue.cs | 10 +- .../PagesFilteredOutByJournalApplicator.cs | 22 +- Voron.Tests/Bugs/Recovery.cs | 34 +-- Voron.Tests/Bugs/RecoveryMultipleJournals.cs | 36 ++-- Voron.Tests/Bugs/RecoveryWithManualFlush.cs | 24 +-- Voron.Tests/Bugs/Snapshots.cs | 8 +- Voron.Tests/Bugs/TreeRebalancer.cs | 24 +-- Voron.Tests/Bugs/UpdateLastItem.cs | 10 +- Voron.Tests/Journal/BasicActions.cs | 10 +- Voron.Tests/Journal/EdgeCases.cs | 8 +- Voron.Tests/Journal/Mvcc.cs | 8 +- Voron.Tests/MultiTreeSize.cs | 6 +- Voron.Tests/MultiValueTree.cs | 48 ++--- Voron.Tests/Optimizations/Writes.cs | 28 +-- Voron.Tests/Storage/Batches.cs | 40 ++-- Voron.Tests/Storage/BigValue.cs | 26 +-- Voron.Tests/Storage/Concurrency.cs | 66 +++--- Voron.Tests/Storage/FreeScratchPages.cs | 4 +- Voron.Tests/Storage/Increments.cs | 10 +- .../MemoryMapWithoutBackingPagerTest.cs | 2 +- Voron.Tests/Storage/MultiTransactions.cs | 2 +- Voron.Tests/Storage/Quotas.cs | 2 +- Voron.Tests/Storage/Restarts.cs | 16 +- Voron.Tests/Storage/Snapshots.cs | 4 +- Voron.Tests/Storage/SplittingVeryBig.cs | 8 +- Voron.Tests/Storage/VeryBig.cs | 6 +- Voron.Tests/StorageTest.cs | 2 +- Voron.Tests/Trees/Basic.cs | 20 +- Voron.Tests/Trees/CanDefrag.cs | 6 +- Voron.Tests/Trees/CanIterateBackward.cs | 18 +- Voron.Tests/Trees/Deletes.cs | 12 +- Voron.Tests/Trees/FreeSpaceTest.cs | 6 +- Voron.Tests/Trees/Iteration.cs | 8 +- Voron.Tests/Trees/MultipleTrees.cs | 10 +- Voron.Tests/Trees/Rebalance.cs | 66 +++--- Voron.Tests/Trees/Updates.cs | 36 ++-- Voron/Impl/FreeSpace/FreeSpaceHandling.cs | 32 +-- Voron/Impl/SnapshotReader.cs | 14 +- Voron/Impl/StorageEnvironmentState.cs | 8 +- Voron/Impl/Transaction.cs | 45 ++-- Voron/Impl/TransactionMergingWriter.cs | 12 +- Voron/StorageEnvironment.cs | 14 +- Voron/Trees/Tree.MultiTree.cs | 103 ++++----- Voron/Trees/Tree.cs | 199 +++++++++--------- Voron/Trees/TreeIterator.cs | 4 +- Voron/ValueReader.cs | 2 + 62 files changed, 596 insertions(+), 619 deletions(-) diff --git a/Voron.Tests/Backups/Full.cs b/Voron.Tests/Backups/Full.cs index 33dd33462c..6ebfc4fef4 100644 --- a/Voron.Tests/Backups/Full.cs +++ b/Voron.Tests/Backups/Full.cs @@ -45,7 +45,7 @@ public void CanBackupAndRestore() { for (int i = 0; i < 500; i++) { - tx.State.Root.Add(tx, "items/" + i, new MemoryStream(buffer)); + tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); @@ -60,7 +60,7 @@ public void CanBackupAndRestore() { for (int i = 500; i < 1000; i++) { - tx.State.Root.Add(tx, "items/" + i, new MemoryStream(buffer)); + tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); @@ -79,7 +79,7 @@ public void CanBackupAndRestore() { for (int i = 0; i < 1000; i++) { - var readResult = tx.State.Root.Read(tx, "items/" + i); + var readResult = tx.State.Root.Read("items/" + i); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); readResult.Reader.CopyTo(memoryStream); diff --git a/Voron.Tests/Backups/Incremental.cs b/Voron.Tests/Backups/Incremental.cs index 15a3e505f7..543e5ab1af 100644 --- a/Voron.Tests/Backups/Incremental.cs +++ b/Voron.Tests/Backups/Incremental.cs @@ -45,7 +45,7 @@ public void CanBackupAndRestoreOnEmptyStorage() { for (int i = 0; i < 500; i++) { - tx.State.Root.Add(tx, "items/" + i, new MemoryStream(buffer)); + tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); @@ -65,7 +65,7 @@ public void CanBackupAndRestoreOnEmptyStorage() { for (int i = 0; i < 500; i++) { - var readResult = tx.State.Root.Read(tx, "items/" + i); + var readResult = tx.State.Root.Read("items/" + i); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); readResult.Reader.CopyTo(memoryStream); @@ -88,7 +88,7 @@ public void CanDoMultipleIncrementalBackupsAndRestoreOneByOne() { for (int i = 0; i < 300; i++) { - tx.State.Root.Add(tx, "items/" + i, new MemoryStream(buffer)); + tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); @@ -100,7 +100,7 @@ public void CanDoMultipleIncrementalBackupsAndRestoreOneByOne() { for (int i = 300; i < 600; i++) { - tx.State.Root.Add(tx, "items/" + i, new MemoryStream(buffer)); + tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); @@ -112,7 +112,7 @@ public void CanDoMultipleIncrementalBackupsAndRestoreOneByOne() { for (int i = 600; i < 1000; i++) { - tx.State.Root.Add(tx, "items/" + i, new MemoryStream(buffer)); + tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); @@ -138,7 +138,7 @@ public void CanDoMultipleIncrementalBackupsAndRestoreOneByOne() { for (int i = 0; i < 1000; i++) { - var readResult = tx.State.Root.Read(tx, "items/" + i); + var readResult = tx.State.Root.Read("items/" + i); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); readResult.Reader.CopyTo(memoryStream); @@ -160,7 +160,7 @@ public void IncrementalBackupShouldCopyJustNewPagesSinceLastBackup() { for (int i = 0; i < 5; i++) { - tx.State.Root.Add(tx, "items/" + i, new MemoryStream(buffer)); + tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); @@ -178,7 +178,7 @@ public void IncrementalBackupShouldCopyJustNewPagesSinceLastBackup() { for (int i = 5; i < 10; i++) { - tx.State.Root.Add(tx, "items/" + i, new MemoryStream(buffer)); + tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); @@ -205,7 +205,7 @@ public void IncrementalBackupShouldCopyJustNewPagesSinceLastBackup() { for (int i = 0; i < 10; i++) { - var readResult = tx.State.Root.Read(tx, "items/" + i); + var readResult = tx.State.Root.Read("items/" + i); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); readResult.Reader.CopyTo(memoryStream); diff --git a/Voron.Tests/Bugs/AccessViolationWithIteratorUsage.cs b/Voron.Tests/Bugs/AccessViolationWithIteratorUsage.cs index 4292f70546..d2a0f90e80 100644 --- a/Voron.Tests/Bugs/AccessViolationWithIteratorUsage.cs +++ b/Voron.Tests/Bugs/AccessViolationWithIteratorUsage.cs @@ -22,8 +22,8 @@ public void ShouldNotThrow() { var tree = Env.CreateTree(tx, "test"); - tree.Add(tx, "items/1", new MemoryStream()); - tree.Add(tx, "items/2", new MemoryStream()); + tree.Add("items/1", new MemoryStream()); + tree.Add("items/2", new MemoryStream()); tx.Commit(); } @@ -35,7 +35,7 @@ public void ShouldNotThrow() { for (int i = 0; i < 10; i++) { - Env.State.GetTree(tx, "test").Add(tx, "items/" + i, new MemoryStream(new byte[2048])); + Env.State.GetTree(tx, "test").Add("items/" + i, new MemoryStream(new byte[2048])); } tx.Commit(); @@ -47,7 +47,7 @@ public void ShouldNotThrow() { for (int i = 10; i < 40; i++) { - Env.State.GetTree(tx, "test").Add(tx, "items/" + i, new MemoryStream(new byte[2048])); + Env.State.GetTree(tx, "test").Add("items/" + i, new MemoryStream(new byte[2048])); } tx.Commit(); diff --git a/Voron.Tests/Bugs/ChecksumMismatchAfterRecovery.cs b/Voron.Tests/Bugs/ChecksumMismatchAfterRecovery.cs index 776df3d042..85410bba26 100644 --- a/Voron.Tests/Bugs/ChecksumMismatchAfterRecovery.cs +++ b/Voron.Tests/Bugs/ChecksumMismatchAfterRecovery.cs @@ -45,7 +45,7 @@ public void ShouldNotThrowChecksumMismatch() { for (int i = 0; i < 50; i++) { - tx.State.Root.Add(tx, "items/" + i, new MemoryStream(buffer)); + tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); @@ -55,7 +55,7 @@ public void ShouldNotThrowChecksumMismatch() { for (int i = 50; i < 100; i++) { - tx.State.Root.Add(tx, "items/" + i, new MemoryStream(buffer)); + tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); @@ -71,7 +71,7 @@ public void ShouldNotThrowChecksumMismatch() for (int i = 0; i < 100; i++) { - var readResult = tx.State.Root.Read(tx, "items/" + i); + var readResult = tx.State.Root.Read("items/" + i); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); readResult.Reader.CopyTo(memoryStream); diff --git a/Voron.Tests/Bugs/Deletes.cs b/Voron.Tests/Bugs/Deletes.cs index f1a15452af..3578be35c6 100644 --- a/Voron.Tests/Bugs/Deletes.cs +++ b/Voron.Tests/Bugs/Deletes.cs @@ -49,7 +49,7 @@ public void RebalancerIssue() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var t1 = tx.Environment.State.GetTree(tx,"tree1"); - t1.Delete(tx, "Foo180"); // rebalancer fails to move 1st node from one branch to another + t1.Delete("Foo180"); // rebalancer fails to move 1st node from one branch to another } } } diff --git a/Voron.Tests/Bugs/EmptyTree.cs b/Voron.Tests/Bugs/EmptyTree.cs index c2b452b758..a4b9a448d1 100644 --- a/Voron.Tests/Bugs/EmptyTree.cs +++ b/Voron.Tests/Bugs/EmptyTree.cs @@ -18,7 +18,7 @@ public void ShouldBeEmpty() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var treeIterator = tx.Environment.State.GetTree(tx,"events").Iterate(tx); + var treeIterator = tx.Environment.State.GetTree(tx,"events").Iterate(); Assert.False(treeIterator.Seek(Slice.AfterAllKeys)); @@ -43,7 +43,7 @@ public void SurviveRestart() using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.Environment.State.GetTree(tx,"events").Add(tx, "test", new MemoryStream(0)); + tx.Environment.State.GetTree(tx,"events").Add("test", new MemoryStream(0)); tx.Commit(); } @@ -61,7 +61,7 @@ public void SurviveRestart() using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.Environment.State.GetTree(tx,"events"); - var readResult = tree.Read(tx, "test"); + var readResult = tree.Read("test"); Assert.NotNull(readResult); tx.Commit(); diff --git a/Voron.Tests/Bugs/FlushingToDataFile.cs b/Voron.Tests/Bugs/FlushingToDataFile.cs index a45af60e70..85c756ba51 100644 --- a/Voron.Tests/Bugs/FlushingToDataFile.cs +++ b/Voron.Tests/Bugs/FlushingToDataFile.cs @@ -36,14 +36,14 @@ public unsafe void ReadTransactionShouldNotReadFromJournalSnapshotIfJournalWasFl using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "foo/0", new MemoryStream(value1)); + tx.State.Root.Add("foo/0", new MemoryStream(value1)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "foo/1", new MemoryStream(value1)); + tx.State.Root.Add("foo/1", new MemoryStream(value1)); tx.Commit(); } @@ -60,7 +60,7 @@ public unsafe void ReadTransactionShouldNotReadFromJournalSnapshotIfJournalWasFl for (var i = 0; i < 2; i++) { - var readResult = tx.State.Root.Read(tx, "foo/" + i); + var readResult = tx.State.Root.Read("foo/" + i); Assert.NotNull(readResult); Assert.Equal(value1.Length, readResult.Reader.Length); @@ -88,29 +88,29 @@ public void FlushingOperationShouldHaveOwnScratchPagerStateReference() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "foo/0", new MemoryStream(value1)); - tx.State.Root.Add(tx, "foo/1", new MemoryStream(value1)); + tx.State.Root.Add("foo/0", new MemoryStream(value1)); + tx.State.Root.Add("foo/1", new MemoryStream(value1)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "foo/0", new MemoryStream(value1)); + tx.State.Root.Add("foo/0", new MemoryStream(value1)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "foo/4", new MemoryStream(value1)); + tx.State.Root.Add("foo/4", new MemoryStream(value1)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - var readResult = tx.State.Root.Read(tx, "foo/0"); + var readResult = tx.State.Root.Read("foo/0"); Assert.NotNull(readResult); Assert.Equal(value1.Length, readResult.Reader.Length); @@ -126,7 +126,7 @@ public void FlushingOperationShouldHaveOwnScratchPagerStateReference() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - var readResult = tx.State.Root.Read(tx, "foo/0"); + var readResult = tx.State.Root.Read("foo/0"); Assert.NotNull(readResult); Assert.Equal(value1.Length, readResult.Reader.Length); @@ -164,7 +164,7 @@ public void OldestActiveTransactionShouldBeCalculatedProperly() { foreach (var tree in trees) { - tx.Environment.State.GetTree(tx,tree).Add(tx, string.Format("key/{0}/{1}", a, i), new MemoryStream(buffer)); + tx.Environment.State.GetTree(tx,tree).Add(string.Format("key/{0}/{1}", a, i), new MemoryStream(buffer)); } } @@ -183,7 +183,7 @@ public void OldestActiveTransactionShouldBeCalculatedProperly() { foreach (var tree in trees) { - using (var iterator = tx.Environment.State.GetTree(tx,tree).Iterate(tx)) + using (var iterator = tx.Environment.State.GetTree(tx,tree).Iterate()) { if (!iterator.Seek(Slice.BeforeAllKeys)) continue; diff --git a/Voron.Tests/Bugs/IndexPointToNonLeafPageTests.cs b/Voron.Tests/Bugs/IndexPointToNonLeafPageTests.cs index 61d2ced03a..c6685943cf 100644 --- a/Voron.Tests/Bugs/IndexPointToNonLeafPageTests.cs +++ b/Voron.Tests/Bugs/IndexPointToNonLeafPageTests.cs @@ -33,7 +33,7 @@ public void ShouldProperlyMovePositionForNextPageAllocationInScratchBufferPool() { enumerator.MoveNext(); - tx.State.Root.Add(tx, enumerator.Current.Key.ToString("0000000000000000"), new MemoryStream(enumerator.Current.Value)); + tx.State.Root.Add(enumerator.Current.Key.ToString("0000000000000000"), new MemoryStream(enumerator.Current.Value)); } tx.Commit(); @@ -46,7 +46,7 @@ public void ShouldProperlyMovePositionForNextPageAllocationInScratchBufferPool() { foreach (var item in sequentialLargeIds) { - var readResult = tx.State.Root.Read(tx, item.Key.ToString("0000000000000000")); + var readResult = tx.State.Root.Read(item.Key.ToString("0000000000000000")); Assert.NotNull(readResult); diff --git a/Voron.Tests/Bugs/InvalidReleasesOfScratchPages.cs b/Voron.Tests/Bugs/InvalidReleasesOfScratchPages.cs index a8223e9593..9c725cb40a 100644 --- a/Voron.Tests/Bugs/InvalidReleasesOfScratchPages.cs +++ b/Voron.Tests/Bugs/InvalidReleasesOfScratchPages.cs @@ -22,12 +22,12 @@ public void ReadTransactionCanReadJustCommittedValue() using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { - txw.Environment.State.GetTree(txw, "tree0").Add(txw, "key/1", new MemoryStream()); + txw.Environment.State.GetTree(txw, "tree0").Add("key/1", new MemoryStream()); txw.Commit(); using (var txr = env.NewTransaction(TransactionFlags.Read)) { - Assert.NotNull(txr.Environment.State.GetTree(txr, "tree0").Read(txr, "key/1")); + Assert.NotNull(txr.Environment.State.GetTree(txr, "tree0").Read("key/1")); } } } diff --git a/Voron.Tests/Bugs/Isolation.cs b/Voron.Tests/Bugs/Isolation.cs index eb86944824..3052ab2807 100644 --- a/Voron.Tests/Bugs/Isolation.cs +++ b/Voron.Tests/Bugs/Isolation.cs @@ -28,7 +28,7 @@ public void MultiTreeIteratorShouldBeIsolated1() { var key = Write(env, 10); - using (var iterator = txr.ReadTree("tree0").MultiRead(txr, "key/1")) + using (var iterator = txr.ReadTree("tree0").MultiRead("key/1")) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); @@ -68,7 +68,7 @@ public void MultiTreeIteratorShouldBeIsolated2() { var key = Delete(env, 10); - using (var iterator = txr.ReadTree("tree0").MultiRead(txr, "key/1")) + using (var iterator = txr.ReadTree("tree0").MultiRead("key/1")) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); @@ -93,7 +93,7 @@ private static string Delete(StorageEnvironment env, int i) { var key = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + i.ToString("D2"); - txw.ReadTree("tree0").MultiDelete(txw, "key/1", key); + txw.ReadTree("tree0").MultiDelete("key/1", key); txw.Commit(); return key; @@ -106,7 +106,7 @@ private static string Write(StorageEnvironment env, int i) { var key = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + i.ToString("D2"); - txw.ReadTree("tree0").MultiAdd(txw, "key/1", key); + txw.ReadTree("tree0").MultiAdd("key/1", key); txw.Commit(); return key; @@ -131,8 +131,8 @@ public void ScratchPagesShouldNotBeReleasedUntilNotUsed() { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.Environment.State.GetTree(tx, "tree0").Add(tx, string.Format("key/{0}/{1}/1", new string('0', 1000), a), new MemoryStream()); - tx.Environment.State.GetTree(tx, "tree0").Add(tx, string.Format("key/{0}/{1}/2", new string('0', 1000), a), new MemoryStream()); + tx.Environment.State.GetTree(tx, "tree0").Add(string.Format("key/{0}/{1}/1", new string('0', 1000), a), new MemoryStream()); + tx.Environment.State.GetTree(tx, "tree0").Add(string.Format("key/{0}/{1}/2", new string('0', 1000), a), new MemoryStream()); tx.Commit(); } @@ -140,14 +140,14 @@ public void ScratchPagesShouldNotBeReleasedUntilNotUsed() using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.Environment.State.GetTree(tx, "tree1").Add(tx, "yek/1", new MemoryStream()); + tx.Environment.State.GetTree(tx, "tree1").Add("yek/1", new MemoryStream()); tx.Commit(); } using (var txr = env.NewTransaction(TransactionFlags.Read)) { - using (var iterator = txr.Environment.State.GetTree(txr, "tree0").Iterate(txr)) + using (var iterator = txr.Environment.State.GetTree(txr, "tree0").Iterate()) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); // all pages are from scratch (one from position 11) @@ -158,7 +158,7 @@ public void ScratchPagesShouldNotBeReleasedUntilNotUsed() using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = txw.Environment.State.GetTree(txw, "tree1"); - tree.Add(txw, string.Format("yek/{0}/0/0", new string('0', 1000)), new MemoryStream()); // allocates new page from scratch (position 11) + tree.Add(string.Format("yek/{0}/0/0", new string('0', 1000)), new MemoryStream()); // allocates new page from scratch (position 11) txw.Commit(); } @@ -168,7 +168,7 @@ public void ScratchPagesShouldNotBeReleasedUntilNotUsed() using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = txw.Environment.State.GetTree(txw, "tree1"); - tree.Add(txw, "fake", new MemoryStream()); + tree.Add("fake", new MemoryStream()); txw.Commit(); } diff --git a/Voron.Tests/Bugs/Iterating.cs b/Voron.Tests/Bugs/Iterating.cs index d0eb57b245..55296e636c 100644 --- a/Voron.Tests/Bugs/Iterating.cs +++ b/Voron.Tests/Bugs/Iterating.cs @@ -24,7 +24,7 @@ public void IterationShouldNotFindAnyRecordsAndShouldNotThrowWhenNumberOfEntries using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.ReadTree("tree"); - tree.Add(tx, @"Raven\Database\1", StreamFor("123")); + tree.Add(@"Raven\Database\1", StreamFor("123")); tx.Commit(); } diff --git a/Voron.Tests/Bugs/LargeValues.cs b/Voron.Tests/Bugs/LargeValues.cs index fdd5417ac4..a3dc74de0e 100644 --- a/Voron.Tests/Bugs/LargeValues.cs +++ b/Voron.Tests/Bugs/LargeValues.cs @@ -32,7 +32,7 @@ public void ShouldProperlyRecover() { enumerator.MoveNext(); - tx.State.Root.Add(tx, enumerator.Current.Key.ToString("0000000000000000"), new MemoryStream(enumerator.Current.Value)); + tx.State.Root.Add(enumerator.Current.Key.ToString("0000000000000000"), new MemoryStream(enumerator.Current.Value)); } tx.Commit(); diff --git a/Voron.Tests/Bugs/MemoryAccess.cs b/Voron.Tests/Bugs/MemoryAccess.cs index 0e81094aed..9cb22ec42a 100644 --- a/Voron.Tests/Bugs/MemoryAccess.cs +++ b/Voron.Tests/Bugs/MemoryAccess.cs @@ -23,8 +23,8 @@ public void ShouldNotThrowAccessViolation() { foreach (var tree in trees) { - tx.State.GetTree(tx, tree).Add(tx, string.Format("key/{0}/{1}/1", new string('0', 1000), a), new MemoryStream()); - tx.State.GetTree(tx, tree).Add(tx, string.Format("key/{0}/{1}/2", new string('0', 1000), a), new MemoryStream()); + tx.State.GetTree(tx, tree).Add(string.Format("key/{0}/{1}/1", new string('0', 1000), a), new MemoryStream()); + tx.State.GetTree(tx, tree).Add(string.Format("key/{0}/{1}/2", new string('0', 1000), a), new MemoryStream()); } tx.Commit(); @@ -35,7 +35,7 @@ public void ShouldNotThrowAccessViolation() { foreach (var tree in trees) { - using (var iterator = txr.State.GetTree(txr, tree).Iterate(txr)) + using (var iterator = txr.State.GetTree(txr, tree).Iterate()) { if (!iterator.Seek(Slice.BeforeAllKeys)) continue; @@ -44,7 +44,7 @@ public void ShouldNotThrowAccessViolation() using (var txw = Env.NewTransaction(TransactionFlags.ReadWrite)) { - txw.State.GetTree(txw, tree).Add(txw, string.Format("key/{0}/0/0", new string('0', 1000)), new MemoryStream()); + txw.State.GetTree(txw, tree).Add(string.Format("key/{0}/0/0", new string('0', 1000)), new MemoryStream()); txw.Commit(); } diff --git a/Voron.Tests/Bugs/MultiAdds.cs b/Voron.Tests/Bugs/MultiAdds.cs index 75df256499..3140c542be 100644 --- a/Voron.Tests/Bugs/MultiAdds.cs +++ b/Voron.Tests/Bugs/MultiAdds.cs @@ -57,7 +57,7 @@ public void MultiAdds_And_MultiDeletes_After_Causing_PageSplit_DoNot_Fail(int si var tree = tx.Environment.State.GetTree(tx,"foo"); foreach (var buffer in inputData) { - Assert.DoesNotThrow(() => tree.MultiAdd(tx, "ChildTreeKey", new Slice(buffer))); + Assert.DoesNotThrow(() => tree.MultiAdd("ChildTreeKey", new Slice(buffer))); } tx.Commit(); } @@ -68,7 +68,7 @@ public void MultiAdds_And_MultiDeletes_After_Causing_PageSplit_DoNot_Fail(int si for (int i = 0; i < inputData.Count; i++) { var buffer = inputData[i]; - Assert.DoesNotThrow(() => tree.MultiDelete(tx, "ChildTreeKey", new Slice(buffer))); + Assert.DoesNotThrow(() => tree.MultiDelete("ChildTreeKey", new Slice(buffer))); } tx.Commit(); @@ -125,7 +125,7 @@ public void SplitterIssue2() using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.Environment.State.GetTree(tx,"multi"); - using (var iterator = tree.MultiRead(tx, "0")) + using (var iterator = tree.MultiRead("0")) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); @@ -153,7 +153,7 @@ public void SplitterIssue2() using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.Environment.State.GetTree(tx,"multi"); - using (var iterator = tree.MultiRead(tx, "0")) + using (var iterator = tree.MultiRead("0")) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); @@ -190,7 +190,7 @@ public void CanAddMultiValuesUnderTheSameKeyToBatch() using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.Environment.State.GetTree(tx,"multitree0"); - using (var it = tree.MultiRead(tx, "key")) + using (var it = tree.MultiRead("key")) { Assert.True(it.Seek(Slice.BeforeAllKeys)); @@ -209,7 +209,7 @@ private void ValidateRecords(StorageEnvironment env, IEnumerable trees, in { foreach (var tree in trees) { - using (var iterator = tree.Iterate(tx)) + using (var iterator = tree.Iterate()) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); @@ -237,7 +237,7 @@ private void ValidateMultiRecords(StorageEnvironment env, IEnumerable tr foreach (var treeName in trees) { var tree = tx.Environment.State.GetTree(tx,treeName); - using (var iterator = tree.MultiRead(tx, (j % 10).ToString())) + using (var iterator = tree.MultiRead((j % 10).ToString())) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); diff --git a/Voron.Tests/Bugs/MultiReads.cs b/Voron.Tests/Bugs/MultiReads.cs index a73371b7f5..d66ccbdef3 100644 --- a/Voron.Tests/Bugs/MultiReads.cs +++ b/Voron.Tests/Bugs/MultiReads.cs @@ -16,8 +16,8 @@ public void MultiReadShouldKeepItemOrder() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.ReadTree(treeName).MultiAdd(tx, "queue1", "queue1/07000000-0000-0000-0000-000000000001"); - tx.ReadTree(treeName).MultiAdd(tx, "queue1", "queue1/07000000-0000-0000-0000-000000000002"); + tx.ReadTree(treeName).MultiAdd("queue1", "queue1/07000000-0000-0000-0000-000000000001"); + tx.ReadTree(treeName).MultiAdd("queue1", "queue1/07000000-0000-0000-0000-000000000002"); tx.Commit(); } diff --git a/Voron.Tests/Bugs/PageSplitter.cs b/Voron.Tests/Bugs/PageSplitter.cs index 998e2f6aa3..1caac0578f 100644 --- a/Voron.Tests/Bugs/PageSplitter.cs +++ b/Voron.Tests/Bugs/PageSplitter.cs @@ -50,7 +50,7 @@ public void TreeAdds_WithVeryLargeKey() for (int index = 0; index < inputData.Count; index++) { var keyString = inputData[index]; - Assert.DoesNotThrow(() => tree.Add(tx, keyString, new MemoryStream(new byte[] { 1, 2, 3, 4 }))); + Assert.DoesNotThrow(() => tree.Add(keyString, new MemoryStream(new byte[] { 1, 2, 3, 4 }))); } tx.Commit(); @@ -79,7 +79,7 @@ public void PageSplitterShouldCalculateSeparatorKeyCorrectly() { var tree = tx.Environment.State.GetTree(tx, treeName); - tree.Add(tx, id, new MemoryStream(testBuffer)); + tree.Add(id, new MemoryStream(testBuffer)); } tx.Commit(); @@ -112,9 +112,9 @@ public void PageSplitter_SmallRun() { var tree = tx.Environment.State.GetTree(tx, treeName); - tree.Add(tx, id, new MemoryStream(testBuffer)); + tree.Add(id, new MemoryStream(testBuffer)); - var readResult = tree.Read(tx, id); + var readResult = tree.Read(id); Assert.NotNull(readResult); } @@ -148,7 +148,7 @@ public void PageSplitterShouldCalculateSeparatorKeyCorrectly2() foreach (var treeName in trees) { var tree = tx.Environment.State.GetTree(tx, treeName); - tree.Add(tx, id, new MemoryStream(testBuffer)); + tree.Add(id, new MemoryStream(testBuffer)); } tx.Commit(); diff --git a/Voron.Tests/Bugs/PageTableIssue.cs b/Voron.Tests/Bugs/PageTableIssue.cs index 6709cd5586..0891c9665e 100644 --- a/Voron.Tests/Bugs/PageTableIssue.cs +++ b/Voron.Tests/Bugs/PageTableIssue.cs @@ -24,7 +24,7 @@ public void MissingScratchPagesInPageTable() var tree2 = Env.CreateTree(txw, "bar"); var tree3 = Env.CreateTree(txw, "baz"); - tree1.Add(txw, "foos/1", new MemoryStream(bytes)); + tree1.Add("foos/1", new MemoryStream(bytes)); txw.Commit(); @@ -35,7 +35,7 @@ public void MissingScratchPagesInPageTable() { var tree = Env.State.GetTree(txw, "bar"); - tree.Add(txw, "bars/1", new MemoryStream(bytes)); + tree.Add("bars/1", new MemoryStream(bytes)); txw.Commit(); @@ -52,7 +52,7 @@ public void MissingScratchPagesInPageTable() // here we have to put a big value to be sure that in next transaction we will put the // updated value into a new journal file - this is the key to expose the issue - tree.Add(txw, "bazs/1", new MemoryStream(bytesToFillFirstJournalCompletely)); + tree.Add("bazs/1", new MemoryStream(bytesToFillFirstJournalCompletely)); txw.Commit(); @@ -65,7 +65,7 @@ public void MissingScratchPagesInPageTable() { var tree = Env.State.GetTree(txw, "foo"); - tree.Add(txw, "foos/1", new MemoryStream()); + tree.Add("foos/1", new MemoryStream()); txw.Commit(); @@ -74,7 +74,7 @@ public void MissingScratchPagesInPageTable() Env.FlushLogToDataFile(); - Assert.NotNull(Env.State.GetTree(txr, "foo").Read(txr, "foos/1")); + Assert.NotNull(Env.State.GetTree(txr, "foo").Read("foos/1")); } } } diff --git a/Voron.Tests/Bugs/PagesFilteredOutByJournalApplicator.cs b/Voron.Tests/Bugs/PagesFilteredOutByJournalApplicator.cs index 18c7852df6..146355d5b8 100644 --- a/Voron.Tests/Bugs/PagesFilteredOutByJournalApplicator.cs +++ b/Voron.Tests/Bugs/PagesFilteredOutByJournalApplicator.cs @@ -20,7 +20,7 @@ public void CouldNotReadPagesThatWereFilteredOutByJournalApplicator_1() { var tree = Env.CreateTree(txw, "foo"); - tree.Add(txw, "bars/1", new MemoryStream(bytes)); + tree.Add("bars/1", new MemoryStream(bytes)); txw.Commit(); @@ -47,7 +47,7 @@ public void CouldNotReadPagesThatWereFilteredOutByJournalApplicator_1() { var tree = Env.State.GetTree(txw, "foo"); - tree.Add(txw, "bars/1", new MemoryStream()); + tree.Add("bars/1", new MemoryStream()); txw.Commit(); @@ -56,7 +56,7 @@ public void CouldNotReadPagesThatWereFilteredOutByJournalApplicator_1() Env.FlushLogToDataFile(); - Assert.NotNull(Env.State.GetTree(txr, "foo").Read(txr, "bars/1")); + Assert.NotNull(Env.State.GetTree(txr, "foo").Read("bars/1")); } } @@ -69,10 +69,10 @@ public void CouldNotReadPagesThatWereFilteredOutByJournalApplicator_2() { var tree = Env.CreateTree(txw, "foo"); - tree.Add(txw, "bars/1", new MemoryStream(bytes)); - tree.Add(txw, "bars/2", new MemoryStream(bytes)); - tree.Add(txw, "bars/3", new MemoryStream(bytes)); - tree.Add(txw, "bars/4", new MemoryStream(bytes)); + tree.Add("bars/1", new MemoryStream(bytes)); + tree.Add("bars/2", new MemoryStream(bytes)); + tree.Add("bars/3", new MemoryStream(bytes)); + tree.Add("bars/4", new MemoryStream(bytes)); txw.Commit(); @@ -83,8 +83,8 @@ public void CouldNotReadPagesThatWereFilteredOutByJournalApplicator_2() { var tree = Env.State.GetTree(txw, "foo"); - tree.Add(txw, "bars/0", new MemoryStream()); - tree.Add(txw, "bars/5", new MemoryStream()); + tree.Add("bars/0", new MemoryStream()); + tree.Add("bars/5", new MemoryStream()); txw.Commit(); @@ -111,7 +111,7 @@ public void CouldNotReadPagesThatWereFilteredOutByJournalApplicator_2() { var tree = Env.State.GetTree(txw, "foo"); - tree.Add(txw, "bars/4", new MemoryStream()); + tree.Add("bars/4", new MemoryStream()); txw.Commit(); @@ -120,7 +120,7 @@ public void CouldNotReadPagesThatWereFilteredOutByJournalApplicator_2() Env.FlushLogToDataFile(); - Assert.NotNull(Env.State.GetTree(txr, "foo").Read(txr, "bars/5")); + Assert.NotNull(Env.State.GetTree(txr, "foo").Read("bars/5")); } } } diff --git a/Voron.Tests/Bugs/Recovery.cs b/Voron.Tests/Bugs/Recovery.cs index 8a8396d598..176d238922 100644 --- a/Voron.Tests/Bugs/Recovery.cs +++ b/Voron.Tests/Bugs/Recovery.cs @@ -44,7 +44,7 @@ public void StorageRecoveryShouldWorkWhenThereSingleTransactionToRecoverFromLog( for (var i = 0; i < 100; i++) { - tree.Add(tx, "key" + i, new MemoryStream()); + tree.Add("key" + i, new MemoryStream()); } tx.Commit(); @@ -67,7 +67,7 @@ public void StorageRecoveryShouldWorkWhenThereSingleTransactionToRecoverFromLog( for (var i = 0; i < 100; i++) { - Assert.NotNull(tree.Read(tx, "key" + i)); + Assert.NotNull(tree.Read("key" + i)); } } } @@ -95,7 +95,7 @@ public void StorageRecoveryShouldWorkWhenThereAreCommitedAndUncommitedTransactio { for (var i = 0; i < 10000; i++) { - tx.Environment.State.GetTree(tx,"tree").Add(tx, "a" + i, new MemoryStream()); + tx.Environment.State.GetTree(tx,"tree").Add("a" + i, new MemoryStream()); } } } @@ -127,8 +127,8 @@ public void StorageRecoveryShouldWorkWhenThereAreCommitedAndUncommitedTransactio { for (var i = 0; i < 10000; i++) { - tx.Environment.State.GetTree(tx,"atree").Add(tx, "a" + i, new MemoryStream()); - tx.Environment.State.GetTree(tx,"btree").MultiAdd(tx, "a" + i, "a" + i); + tx.Environment.State.GetTree(tx,"atree").Add("a" + i, new MemoryStream()); + tx.Environment.State.GetTree(tx,"btree").MultiAdd("a" + i, "a" + i); } } } @@ -155,7 +155,7 @@ public void StorageRecoveryShouldWorkWhenThereAreMultipleCommitedTransactions() for (var i = 0; i < 1000; i++) { - tree.Add(tx, "key" + i, new MemoryStream()); + tree.Add("key" + i, new MemoryStream()); } tx.Commit(); @@ -167,7 +167,7 @@ public void StorageRecoveryShouldWorkWhenThereAreMultipleCommitedTransactions() for (var i = 0; i < 1; i++) { - tree.Add(tx, "key" + i, new MemoryStream()); + tree.Add("key" + i, new MemoryStream()); } tx.Commit(); @@ -191,12 +191,12 @@ public void StorageRecoveryShouldWorkWhenThereAreMultipleCommitedTransactions() for (var i = 0; i < 1000; i++) { - Assert.NotNull(aTree.Read(tx, "key" + i)); + Assert.NotNull(aTree.Read("key" + i)); } for (var i = 0; i < 1; i++) { - Assert.NotNull(bTree.Read(tx, "key" + i)); + Assert.NotNull(bTree.Read("key" + i)); } } } @@ -219,7 +219,7 @@ public void StorageRecoveryShouldWorkWhenThereAreMultipleCommitedTransactions2() for (var i = 0; i < 1000; i++) { - tree.Add(tx, "key" + i, new MemoryStream()); + tree.Add("key" + i, new MemoryStream()); } tx.Commit(); @@ -231,7 +231,7 @@ public void StorageRecoveryShouldWorkWhenThereAreMultipleCommitedTransactions2() for (var i = 0; i < 5; i++) { - tree.Add(tx, "key" + i, new MemoryStream()); + tree.Add("key" + i, new MemoryStream()); } tx.Commit(); @@ -255,12 +255,12 @@ public void StorageRecoveryShouldWorkWhenThereAreMultipleCommitedTransactions2() for (var i = 0; i < 1000; i++) { - Assert.NotNull(aTree.Read(tx, "key" + i)); + Assert.NotNull(aTree.Read("key" + i)); } for (var i = 0; i < 5; i++) { - Assert.NotNull(bTree.Read(tx, "key" + i)); + Assert.NotNull(bTree.Read("key" + i)); } } } @@ -298,8 +298,8 @@ public void StorageRecoveryShouldWorkForSplitTransactions() for (var i = 0; i < count; i++) { - aTree.Add(tx, "a" + i, new MemoryStream(buffer)); - bTree.MultiAdd(tx, "a", "a" + i); + aTree.Add("a" + i, new MemoryStream(buffer)); + bTree.MultiAdd("a", "a" + i); } tx.Commit(); @@ -328,12 +328,12 @@ public void StorageRecoveryShouldWorkForSplitTransactions() for (var i = 0; i < count; i++) { - var read = aTree.Read(tx, "a" + i); + var read = aTree.Read("a" + i); Assert.NotNull(read); Assert.Equal(expectedString, read.Reader.ToStringValue()); } - using (var iterator = bTree.MultiRead(tx, "a")) + using (var iterator = bTree.MultiRead("a")) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); diff --git a/Voron.Tests/Bugs/RecoveryMultipleJournals.cs b/Voron.Tests/Bugs/RecoveryMultipleJournals.cs index 6b8e5b2708..e990dec05e 100644 --- a/Voron.Tests/Bugs/RecoveryMultipleJournals.cs +++ b/Voron.Tests/Bugs/RecoveryMultipleJournals.cs @@ -31,7 +31,7 @@ public void CanRecoverAfterRestartWithMultipleFilesInSingleTransaction() { for (var i = 0; i < 1000; i++) { - tx.Environment.State.GetTree(tx,"tree").Add(tx, "a" + i, new MemoryStream(new byte[100])); + tx.Environment.State.GetTree(tx,"tree").Add("a" + i, new MemoryStream(new byte[100])); } tx.Commit(); } @@ -50,7 +50,7 @@ public void CanRecoverAfterRestartWithMultipleFilesInSingleTransaction() { for (var i = 0; i < 1000; i++) { - var readResult = tx.Environment.State.GetTree(tx,"tree").Read(tx, "a" + i); + var readResult = tx.Environment.State.GetTree(tx,"tree").Read("a" + i); Assert.NotNull(readResult); { Assert.Equal(100, readResult.Reader.Length); @@ -76,7 +76,7 @@ public void CanResetLogInfoAfterBigUncommitedTransaction() { for (var i = 0; i < 1000; i++) { - tx.Environment.State.GetTree(tx,"tree").Add(tx, "a" + i, new MemoryStream(new byte[100])); + tx.Environment.State.GetTree(tx,"tree").Add("a" + i, new MemoryStream(new byte[100])); } //tx.Commit(); - not committing here } @@ -85,7 +85,7 @@ public void CanResetLogInfoAfterBigUncommitedTransaction() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.Environment.State.GetTree(tx,"tree").Add(tx, "a", new MemoryStream(new byte[100])); + tx.Environment.State.GetTree(tx,"tree").Add("a", new MemoryStream(new byte[100])); tx.Commit(); } @@ -106,7 +106,7 @@ public void CanResetLogInfoAfterBigUncommitedTransaction2() { for (var i = 0; i < 1000; i++) { - tx.Environment.State.GetTree(tx,"tree").Add(tx, "a" + i, new MemoryStream(new byte[100])); + tx.Environment.State.GetTree(tx,"tree").Add("a" + i, new MemoryStream(new byte[100])); } tx.Commit(); } @@ -121,7 +121,7 @@ public void CanResetLogInfoAfterBigUncommitedTransaction2() { for (var i = 0; i < 1000; i++) { - tx.Environment.State.GetTree(tx,"tree").Add(tx, "b" + i, new MemoryStream(buffer)); + tx.Environment.State.GetTree(tx,"tree").Add("b" + i, new MemoryStream(buffer)); } //tx.Commit(); - not committing here } @@ -130,7 +130,7 @@ public void CanResetLogInfoAfterBigUncommitedTransaction2() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.Environment.State.GetTree(tx,"tree").Add(tx, "b", new MemoryStream(buffer)); + tx.Environment.State.GetTree(tx,"tree").Add("b", new MemoryStream(buffer)); tx.Commit(); } @@ -143,7 +143,7 @@ public void CanResetLogInfoAfterBigUncommitedTransactionWithRestart() RequireFileBasedPager(); using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - Env.CreateTree(tx, "tree").Add(tx, "exists", new MemoryStream(new byte[100])); + Env.CreateTree(tx, "tree").Add("exists", new MemoryStream(new byte[100])); tx.Commit(); } @@ -152,7 +152,7 @@ public void CanResetLogInfoAfterBigUncommitedTransactionWithRestart() { for (var i = 0; i < 1000; i++) { - tx.Environment.State.GetTree(tx,"tree").Add(tx, "a" + i, new MemoryStream(new byte[100])); + tx.Environment.State.GetTree(tx,"tree").Add("a" + i, new MemoryStream(new byte[100])); } tx.Commit(); } @@ -167,11 +167,11 @@ public void CanResetLogInfoAfterBigUncommitedTransactionWithRestart() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = Env.CreateTree(tx, "tree"); - Assert.NotNull(tree.Read(tx, "exists")); - Assert.Null(tree.Read(tx, "a1")); - Assert.Null(tree.Read(tx, "a100")); - Assert.Null(tree.Read(tx, "a500")); - Assert.Null(tree.Read(tx, "a1000")); + Assert.NotNull(tree.Read("exists")); + Assert.Null(tree.Read("a1")); + Assert.Null(tree.Read("a100")); + Assert.Null(tree.Read("a500")); + Assert.Null(tree.Read("a1000")); tx.Commit(); } @@ -192,7 +192,7 @@ public void CanResetLogInfoAfterBigUncommitedTransactionWithRestart2() { for (var i = 0; i < 1000; i++) { - tx.Environment.State.GetTree(tx,"tree").Add(tx, "a" + i, new MemoryStream(new byte[100])); + tx.Environment.State.GetTree(tx,"tree").Add("a" + i, new MemoryStream(new byte[100])); } tx.Commit(); } @@ -203,7 +203,7 @@ public void CanResetLogInfoAfterBigUncommitedTransactionWithRestart2() { for (var i = 0; i < 1000; i++) { - tx.Environment.State.GetTree(tx,"tree").Add(tx, "b" + i, new MemoryStream(new byte[100])); + tx.Environment.State.GetTree(tx,"tree").Add("b" + i, new MemoryStream(new byte[100])); } tx.Commit(); } @@ -235,7 +235,7 @@ public void CorruptingOneTransactionWillKillAllFutureTransactions() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.Environment.State.GetTree(tx,"tree").Add(tx, "a" + i, new MemoryStream(new byte[100])); + tx.Environment.State.GetTree(tx,"tree").Add("a" + i, new MemoryStream(new byte[100])); tx.Commit(); } } @@ -258,7 +258,7 @@ public void CorruptingOneTransactionWillKillAllFutureTransactions() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - Assert.Null(tx.Environment.State.GetTree(tx,"tree").Read(tx, "a999")); + Assert.Null(tx.Environment.State.GetTree(tx,"tree").Read("a999")); } } diff --git a/Voron.Tests/Bugs/RecoveryWithManualFlush.cs b/Voron.Tests/Bugs/RecoveryWithManualFlush.cs index e0612f397a..d6f468ded3 100644 --- a/Voron.Tests/Bugs/RecoveryWithManualFlush.cs +++ b/Voron.Tests/Bugs/RecoveryWithManualFlush.cs @@ -21,8 +21,8 @@ public void ShouldRecoverFromJournalsAfterFlushWhereLastPageOfFlushedTxHadTheSam { using (var tx1 = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx1.State.Root.Add(tx1, "item/1", new MemoryStream(new byte[4000])); - tx1.State.Root.Add(tx1, "item/2", new MemoryStream(new byte[4000])); + tx1.State.Root.Add("item/1", new MemoryStream(new byte[4000])); + tx1.State.Root.Add("item/2", new MemoryStream(new byte[4000])); tx1.Commit(); } @@ -33,7 +33,7 @@ public void ShouldRecoverFromJournalsAfterFlushWhereLastPageOfFlushedTxHadTheSam // this will also override the page translation table for the page where item/2 is placed - tx2.State.Root.Add(tx2, "item/2", new MemoryStream(new byte[3999])); + tx2.State.Root.Add("item/2", new MemoryStream(new byte[3999])); tx2.Commit(); } @@ -57,12 +57,12 @@ public void ShouldRecoverFromJournalsAfterFlushWhereLastPageOfFlushedTxHadTheSam using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var readResult = tx.State.Root.Read(tx, "item/1"); + var readResult = tx.State.Root.Read("item/1"); Assert.NotNull(readResult); Assert.Equal(4000, readResult.Reader.Length); - readResult = tx.State.Root.Read(tx, "item/2"); + readResult = tx.State.Root.Read("item/2"); Assert.NotNull(readResult); Assert.Equal(3999, readResult.Reader.Length); @@ -74,15 +74,15 @@ public void ShouldRecoverTransactionEndPositionsTableAfterRestart() { using (var tx1 = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx1.State.Root.Add(tx1, "item/1", new MemoryStream(new byte[4000])); - tx1.State.Root.Add(tx1, "item/2", new MemoryStream(new byte[4000])); + tx1.State.Root.Add("item/1", new MemoryStream(new byte[4000])); + tx1.State.Root.Add("item/2", new MemoryStream(new byte[4000])); tx1.Commit(); } using (var tx2 = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx2.State.Root.Add(tx2, "item/2", new MemoryStream(new byte[3999])); + tx2.State.Root.Add("item/2", new MemoryStream(new byte[3999])); tx2.Commit(); } @@ -100,12 +100,12 @@ public void ShouldRecoverTransactionEndPositionsTableAfterRestart() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var readResult = tx.State.Root.Read(tx, "item/1"); + var readResult = tx.State.Root.Read("item/1"); Assert.NotNull(readResult); Assert.Equal(4000, readResult.Reader.Length); - readResult = tx.State.Root.Read(tx, "item/2"); + readResult = tx.State.Root.Read("item/2"); Assert.NotNull(readResult); Assert.Equal(3999, readResult.Reader.Length); @@ -117,7 +117,7 @@ public void StorageRecoveryAfterFlushingToDataFile() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "items/1", new MemoryStream(new byte[] { 1, 2, 3 })); + tx.State.Root.Add("items/1", new MemoryStream(new byte[] { 1, 2, 3 })); tx.Commit(); } @@ -127,7 +127,7 @@ public void StorageRecoveryAfterFlushingToDataFile() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var readResult = tx.State.Root.Read(tx, "items/1"); + var readResult = tx.State.Root.Read("items/1"); Assert.NotNull(readResult); Assert.Equal(3, readResult.Reader.Length); diff --git a/Voron.Tests/Bugs/Snapshots.cs b/Voron.Tests/Bugs/Snapshots.cs index 59ae7519c4..7cb7c3c323 100644 --- a/Voron.Tests/Bugs/Snapshots.cs +++ b/Voron.Tests/Bugs/Snapshots.cs @@ -44,7 +44,7 @@ public void SnapshotIssue() var t1 = tx.Environment.State.GetTree(tx,"tree1"); for (var i = 0; i < DocumentCount; i++) { - t1.Add(tx, "docs/" + i, new MemoryStream(testBuffer)); + t1.Add("docs/" + i, new MemoryStream(testBuffer)); } tx.Commit(); @@ -57,7 +57,7 @@ public void SnapshotIssue() var t1 = tx.Environment.State.GetTree(tx,"tree1"); for (var i = 0; i < DocumentCount; i++) { - t1.Delete(tx, "docs/" + i); + t1.Delete("docs/" + i); } tx.Commit(); @@ -98,7 +98,7 @@ public void SnapshotIssue_ExplicitFlushing() var t1 = tx.Environment.State.GetTree(tx, "tree1"); for (var i = 0; i < DocumentCount; i++) { - t1.Add(tx, "docs/" + i, new MemoryStream(testBuffer)); + t1.Add("docs/" + i, new MemoryStream(testBuffer)); } tx.Commit(); @@ -113,7 +113,7 @@ public void SnapshotIssue_ExplicitFlushing() var t1 = tx.Environment.State.GetTree(tx, "tree1"); for (var i = 0; i < DocumentCount; i++) { - t1.Delete(tx, "docs/" + i); + t1.Delete("docs/" + i); } tx.Commit(); diff --git a/Voron.Tests/Bugs/TreeRebalancer.cs b/Voron.Tests/Bugs/TreeRebalancer.cs index c251840868..bd68361586 100644 --- a/Voron.Tests/Bugs/TreeRebalancer.cs +++ b/Voron.Tests/Bugs/TreeRebalancer.cs @@ -27,14 +27,14 @@ public void TreeRabalancerShouldCopyNodeFlagsWhenMultiValuePageRefIsSet() addedIds.Add("test/0/user-" + i, id); - multiTree.MultiAdd(tx, "test/0/user-" + i, id); + multiTree.MultiAdd("test/0/user-" + i, id); } } foreach (var multiTreeName in multiTrees) { var multiTree = tx.Environment.State.GetTree(tx,multiTreeName); - multiTree.MultiAdd(tx, "test/0/user-50", Guid.NewGuid().ToString()); + multiTree.MultiAdd("test/0/user-50", Guid.NewGuid().ToString()); } tx.Commit(); @@ -49,7 +49,7 @@ public void TreeRabalancerShouldCopyNodeFlagsWhenMultiValuePageRefIsSet() { var multiTree = tx.Environment.State.GetTree(tx,multiTreeName); - multiTree.MultiDelete(tx, "test/0/user-" + i, addedIds["test/0/user-" + i]); + multiTree.MultiDelete("test/0/user-" + i, addedIds["test/0/user-" + i]); } tx.Commit(); @@ -70,7 +70,7 @@ public void TreeRabalancerShouldCopyNodeFlagsWhenMultiValuePageRefIsSet() { var multiTree = tx.Environment.State.GetTree(tx,multiTreeName); - multiTree.MultiDelete(tx, "test/0/user-" + i, addedIds["test/0/user-" + i]); + multiTree.MultiDelete("test/0/user-" + i, addedIds["test/0/user-" + i]); } tx.Commit(); @@ -126,12 +126,12 @@ public void ShouldNotThrowThatPageIsFullDuringTreeRebalancing() var eKey = new string('e', 600); var fKey = new string('f', 920); - tree.Add(tx, aKey, new MemoryStream(new byte[1000])); - tree.Add(tx, bKey, new MemoryStream(new byte[1000])); - tree.Add(tx, cKey, new MemoryStream(new byte[1000])); - tree.Add(tx, dKey, new MemoryStream(new byte[1000])); - tree.Add(tx, eKey, new MemoryStream(new byte[800])); - tree.Add(tx, fKey, new MemoryStream(new byte[10])); + tree.Add(aKey, new MemoryStream(new byte[1000])); + tree.Add(bKey, new MemoryStream(new byte[1000])); + tree.Add(cKey, new MemoryStream(new byte[1000])); + tree.Add(dKey, new MemoryStream(new byte[1000])); + tree.Add(eKey, new MemoryStream(new byte[800])); + tree.Add(fKey, new MemoryStream(new byte[10])); RenderAndShow(tx, 1, "rebalancing-issue"); @@ -139,11 +139,11 @@ public void ShouldNotThrowThatPageIsFullDuringTreeRebalancing() // tree rebalance will try to fix the first reference (the implicit ref page node) in the parent page which is almost full // and will fail because there is no space to put a new node - tree.Delete(tx, aKey); // this line throws "The page is full and cannot add an entry, this is probably a bug" + tree.Delete(aKey); // this line throws "The page is full and cannot add an entry, this is probably a bug" tx.Commit(); - using (var iterator = tree.Iterate(tx)) + using (var iterator = tree.Iterate()) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); diff --git a/Voron.Tests/Bugs/UpdateLastItem.cs b/Voron.Tests/Bugs/UpdateLastItem.cs index fd44ed82b5..fef8de12ba 100644 --- a/Voron.Tests/Bugs/UpdateLastItem.cs +++ b/Voron.Tests/Bugs/UpdateLastItem.cs @@ -12,14 +12,14 @@ public void ShouldWork() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.DirectAdd(tx, "events", sizeof (TreeRootHeader)); - tx.State.Root.DirectAdd(tx, "aggregations", sizeof (TreeRootHeader)); - tx.State.Root.DirectAdd(tx, "aggregation-status", sizeof (TreeRootHeader)); + tx.State.Root.DirectAdd("events", sizeof (TreeRootHeader)); + tx.State.Root.DirectAdd("aggregations", sizeof (TreeRootHeader)); + tx.State.Root.DirectAdd("aggregation-status", sizeof (TreeRootHeader)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.DirectAdd(tx, "events", sizeof (TreeRootHeader)); + tx.State.Root.DirectAdd("events", sizeof (TreeRootHeader)); tx.Commit(); } @@ -28,7 +28,7 @@ public void ShouldWork() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.DirectAdd(tx, "events", sizeof (TreeRootHeader)); + tx.State.Root.DirectAdd("events", sizeof (TreeRootHeader)); tx.Commit(); } diff --git a/Voron.Tests/Journal/BasicActions.cs b/Voron.Tests/Journal/BasicActions.cs index ea2df744d5..33810ee2b3 100644 --- a/Voron.Tests/Journal/BasicActions.cs +++ b/Voron.Tests/Journal/BasicActions.cs @@ -31,7 +31,7 @@ public void CanUseMultipleLogFiles() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "item/" + i, new MemoryStream(bytes)); + tx.State.Root.Add("item/" + i, new MemoryStream(bytes)); tx.Commit(); } } @@ -42,7 +42,7 @@ public void CanUseMultipleLogFiles() { using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - Assert.NotNull(tx.State.Root.Read(tx, "item/" + i)); + Assert.NotNull(tx.State.Root.Read("item/" + i)); } } } @@ -52,13 +52,13 @@ public void ShouldNotReadUncommittedTransaction() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "items/1", StreamFor("values/1")); + tx.State.Root.Add("items/1", StreamFor("values/1")); // tx.Commit(); uncommitted transaction } using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - Assert.Null(tx.State.Root.Read(tx, "items/1")); + Assert.Null(tx.State.Root.Read("items/1")); } } @@ -70,7 +70,7 @@ public void CanFlushDataFromLogToDataFile() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "items/" + i, StreamFor("values/" + i)); + tx.State.Root.Add("items/" + i, StreamFor("values/" + i)); tx.Commit(); } } diff --git a/Voron.Tests/Journal/EdgeCases.cs b/Voron.Tests/Journal/EdgeCases.cs index 9896f75a99..b1e5e7db05 100644 --- a/Voron.Tests/Journal/EdgeCases.cs +++ b/Voron.Tests/Journal/EdgeCases.cs @@ -25,28 +25,28 @@ public void TransactionCommitShouldSetCurrentLogFileToNullIfItIsFull() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var bytes = new byte[4 * AbstractPager.PageSize]; - tx.State.Root.Add(tx, "items/0", new MemoryStream(bytes)); + tx.State.Root.Add("items/0", new MemoryStream(bytes)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var bytes = new byte[1 * AbstractPager.PageSize]; - tx.State.Root.Add(tx, "items/1", new MemoryStream(bytes)); + tx.State.Root.Add("items/1", new MemoryStream(bytes)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var bytes = new byte[1 * AbstractPager.PageSize]; - tx.State.Root.Add(tx, "items/1", new MemoryStream(bytes)); + tx.State.Root.Add("items/1", new MemoryStream(bytes)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var bytes = new byte[1 * AbstractPager.PageSize]; - tx.State.Root.Add(tx, "items/1", new MemoryStream(bytes)); + tx.State.Root.Add("items/1", new MemoryStream(bytes)); tx.Commit(); } diff --git a/Voron.Tests/Journal/Mvcc.cs b/Voron.Tests/Journal/Mvcc.cs index 8c145c233d..138f61ca3e 100644 --- a/Voron.Tests/Journal/Mvcc.cs +++ b/Voron.Tests/Journal/Mvcc.cs @@ -34,8 +34,8 @@ public void ShouldNotFlushUntilThereAreActiveOlderTransactions() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "items/1", new MemoryStream(ones)); - tx.State.Root.Add(tx, "items/2", new MemoryStream(ones)); + tx.State.Root.Add("items/1", new MemoryStream(ones)); + tx.State.Root.Add("items/2", new MemoryStream(ones)); tx.Commit(); } @@ -45,13 +45,13 @@ public void ShouldNotFlushUntilThereAreActiveOlderTransactions() { using (var txw = Env.NewTransaction(TransactionFlags.ReadWrite)) { - txw.State.Root.Add(txw, "items/1", new MemoryStream(nines)); + txw.State.Root.Add("items/1", new MemoryStream(nines)); txw.Commit(); } Env.FlushLogToDataFile(); // should not flush pages of items/1 because there is an active read transaction - var readResult = txr.State.Root.Read(txr, "items/1"); + var readResult = txr.State.Root.Read("items/1"); int used; var readData = readResult.Reader.ReadBytes(readResult.Reader.Length, out used).Take(used).ToArray(); diff --git a/Voron.Tests/MultiTreeSize.cs b/Voron.Tests/MultiTreeSize.cs index 8418b1d696..026f478153 100644 --- a/Voron.Tests/MultiTreeSize.cs +++ b/Voron.Tests/MultiTreeSize.cs @@ -16,7 +16,7 @@ public void Single_AddMulti_WillUseOnePage() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.MultiAdd(tx, "ChildTreeKey", "test"); + tx.State.Root.MultiAdd("ChildTreeKey", "test"); tx.Commit(); } @@ -30,8 +30,8 @@ public void TwoSmall_AddMulti_WillUseOnePage() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.MultiAdd(tx, "ChildTreeKey", "test1"); - tx.State.Root.MultiAdd(tx, "ChildTreeKey", "test2"); + tx.State.Root.MultiAdd("ChildTreeKey", "test1"); + tx.State.Root.MultiAdd("ChildTreeKey", "test2"); tx.Commit(); } diff --git a/Voron.Tests/MultiValueTree.cs b/Voron.Tests/MultiValueTree.cs index 9f4029fdee..f641c8bb49 100644 --- a/Voron.Tests/MultiValueTree.cs +++ b/Voron.Tests/MultiValueTree.cs @@ -27,13 +27,13 @@ public void Single_MultiAdd_And_Read_DataStored() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.Environment.State.GetTree(tx,"foo").MultiAdd(tx, "ChildTreeKey", new Slice(buffer)); + tx.Environment.State.GetTree(tx,"foo").MultiAdd("ChildTreeKey", new Slice(buffer)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - using (var fetchedDataIterator = tx.Environment.State.GetTree(tx,"foo").MultiRead(tx, "ChildTreeKey")) + using (var fetchedDataIterator = tx.Environment.State.GetTree(tx,"foo").MultiRead("ChildTreeKey")) { fetchedDataIterator.Seek(Slice.BeforeAllKeys); @@ -59,7 +59,7 @@ public void MultiDelete_Remains_One_Entry_The_Data_Is_Retrieved_With_MultiRead() { for (int i = 0; i < INPUT_COUNT; i++) { - tx.State.Root.MultiAdd(tx, CHILDTREE_KEY, inputData[i]); + tx.State.Root.MultiAdd(CHILDTREE_KEY, inputData[i]); } tx.Commit(); } @@ -68,7 +68,7 @@ public void MultiDelete_Remains_One_Entry_The_Data_Is_Retrieved_With_MultiRead() { for (int i = 0; i < INPUT_COUNT - 1; i++) { - tx.State.Root.MultiDelete(tx, CHILDTREE_KEY, inputData[i]); + tx.State.Root.MultiDelete(CHILDTREE_KEY, inputData[i]); inputData.Remove(inputData[i]); } tx.Commit(); @@ -94,7 +94,7 @@ public void MultiDelete_Remains_No_Entries_ChildTreeKey_Doesnt_Exist() { for (int i = 0; i < INPUT_COUNT; i++) { - tx.State.Root.MultiAdd(tx, CHILDTREE_KEY, inputData[i]); + tx.State.Root.MultiAdd(CHILDTREE_KEY, inputData[i]); } tx.Commit(); } @@ -103,14 +103,14 @@ public void MultiDelete_Remains_No_Entries_ChildTreeKey_Doesnt_Exist() { for (int i = 0; i < INPUT_COUNT; i++) { - tx.State.Root.MultiDelete(tx, CHILDTREE_KEY, inputData[i]); + tx.State.Root.MultiDelete(CHILDTREE_KEY, inputData[i]); } tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var iterator = tx.State.Root.MultiRead(tx, CHILDTREE_KEY); + var iterator = tx.State.Root.MultiRead(CHILDTREE_KEY); iterator.Seek(Slice.BeforeAllKeys); Assert.False(iterator.MoveNext()); } @@ -131,19 +131,19 @@ public void Single_MultiAdd_And_Single_MultiDelete_DataDeleted() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.Environment.State.GetTree(tx,"foo").MultiAdd(tx, "ChildTreeKey", new Slice(buffer)); + tx.Environment.State.GetTree(tx,"foo").MultiAdd("ChildTreeKey", new Slice(buffer)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.Environment.State.GetTree(tx,"foo").MultiDelete(tx, "ChildTreeKey", new Slice(buffer)); + tx.Environment.State.GetTree(tx,"foo").MultiDelete("ChildTreeKey", new Slice(buffer)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - Assert.Equal(typeof(EmptyIterator), tx.Environment.State.GetTree(tx,"foo").MultiRead(tx, "ChildTreeKey").GetType()); + Assert.Equal(typeof(EmptyIterator), tx.Environment.State.GetTree(tx,"foo").MultiRead("ChildTreeKey").GetType()); } } @@ -165,10 +165,10 @@ public void Multiple_MultiAdd_And_MultiDelete_InTheSame_Transaction_EntryDeleted { for (int i = 0; i < INPUT_COUNT; i++) { - tx.State.Root.MultiAdd(tx, CHILDTREE_KEY, inputData[i]); + tx.State.Root.MultiAdd(CHILDTREE_KEY, inputData[i]); } - tx.State.Root.MultiDelete(tx, CHILDTREE_KEY, inputData[indexToDelete]); + tx.State.Root.MultiDelete(CHILDTREE_KEY, inputData[indexToDelete]); tx.Commit(); } @@ -200,10 +200,10 @@ public void NamedTree_Multiple_MultiAdd_And_MultiDelete_InTheSame_Transaction_En { for (int i = 0; i < INPUT_COUNT; i++) { - tx.Environment.State.GetTree(tx,"foo").MultiAdd(tx, CHILDTREE_KEY, inputData[i]); + tx.Environment.State.GetTree(tx,"foo").MultiAdd(CHILDTREE_KEY, inputData[i]); } - tx.Environment.State.GetTree(tx,"foo").MultiDelete(tx, CHILDTREE_KEY, inputData[indexToDelete]); + tx.Environment.State.GetTree(tx,"foo").MultiDelete(CHILDTREE_KEY, inputData[indexToDelete]); tx.Commit(); } @@ -235,7 +235,7 @@ public void NamedTree_Multiple_MultiAdd_MultiDelete_Once_And_Read_EntryDeleted() { for (int i = 0; i < INPUT_COUNT; i++) { - tx.Environment.State.GetTree(tx,"foo").MultiAdd(tx, CHILDTREE_KEY, inputData[i]); + tx.Environment.State.GetTree(tx,"foo").MultiAdd(CHILDTREE_KEY, inputData[i]); } tx.Commit(); } @@ -244,7 +244,7 @@ public void NamedTree_Multiple_MultiAdd_MultiDelete_Once_And_Read_EntryDeleted() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.Environment.State.GetTree(tx,"foo").MultiDelete(tx, CHILDTREE_KEY, inputData[indexToDelete]); + tx.Environment.State.GetTree(tx,"foo").MultiDelete(CHILDTREE_KEY, inputData[indexToDelete]); tx.Commit(); } @@ -260,20 +260,20 @@ public void MultiAdd_Twice_TheSame_KeyValue_MultiDelete_NotThrowsException_Multi const string CHILDTREE_VALUE = "Foo"; using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.MultiAdd(tx, CHILDTREE_KEY, CHILDTREE_VALUE); - tx.State.Root.MultiAdd(tx, CHILDTREE_KEY, CHILDTREE_VALUE); + tx.State.Root.MultiAdd(CHILDTREE_KEY, CHILDTREE_VALUE); + tx.State.Root.MultiAdd(CHILDTREE_KEY, CHILDTREE_VALUE); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - Assert.DoesNotThrow(() => tx.State.Root.MultiDelete(tx, CHILDTREE_KEY, CHILDTREE_VALUE)); + Assert.DoesNotThrow(() => tx.State.Root.MultiDelete(CHILDTREE_KEY, CHILDTREE_VALUE)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - Assert.Equal(0, tx.State.Root.ReadVersion(tx, CHILDTREE_KEY)); + Assert.Equal(0, tx.State.Root.ReadVersion(CHILDTREE_KEY)); } } @@ -294,7 +294,7 @@ public void Multiple_MultiAdd_MultiDelete_Once_And_Read_EntryDeleted() { for (int i = 0; i < INPUT_COUNT; i++) { - tx.State.Root.MultiAdd(tx, CHILDTREE_KEY, inputData[i]); + tx.State.Root.MultiAdd(CHILDTREE_KEY, inputData[i]); } tx.Commit(); } @@ -305,7 +305,7 @@ public void Multiple_MultiAdd_MultiDelete_Once_And_Read_EntryDeleted() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.MultiDelete(tx, CHILDTREE_KEY, inputData[indexToDelete]); + tx.State.Root.MultiDelete(CHILDTREE_KEY, inputData[indexToDelete]); tx.Commit(); } @@ -331,7 +331,7 @@ public void Multiple_MultiAdd_And_Read_DataStored() { for (int i = 0; i < INPUT_COUNT; i++) { - tx.State.Root.MultiAdd(tx, CHILDTREE_KEY, inputData[i]); + tx.State.Root.MultiAdd(CHILDTREE_KEY, inputData[i]); } tx.Commit(); } @@ -347,7 +347,7 @@ private void ValidateInputExistence(List inputData, string childtreeKey, int fetchedEntryCount = 0; var inputEntryCount = inputData.Count; - using (var fetchedDataIterator = targetTree.MultiRead(tx, childtreeKey)) + using (var fetchedDataIterator = targetTree.MultiRead(childtreeKey)) { fetchedDataIterator.Seek(Slice.BeforeAllKeys); do diff --git a/Voron.Tests/Optimizations/Writes.cs b/Voron.Tests/Optimizations/Writes.cs index f51d30ce59..4082db6326 100644 --- a/Voron.Tests/Optimizations/Writes.cs +++ b/Voron.Tests/Optimizations/Writes.cs @@ -17,25 +17,25 @@ public void SinglePageModificationDoNotCauseCopyingAllIntermediatePages() var keySize = 1024; using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, new string('9', keySize), new MemoryStream(new byte[3])); + tx.State.Root.Add(new string('9', keySize), new MemoryStream(new byte[3])); RenderAndShow(tx, tx.State.Root, 1); - tx.State.Root.Add(tx, new string('1', keySize), new MemoryStream(new byte[3])); + tx.State.Root.Add(new string('1', keySize), new MemoryStream(new byte[3])); RenderAndShow(tx, tx.State.Root, 1); - tx.State.Root.Add(tx, new string('4', 1000), new MemoryStream(new byte[2])); + tx.State.Root.Add(new string('4', 1000), new MemoryStream(new byte[2])); RenderAndShow(tx, tx.State.Root, 1); - tx.State.Root.Add(tx, new string('5', keySize), new MemoryStream(new byte[2])); + tx.State.Root.Add(new string('5', keySize), new MemoryStream(new byte[2])); RenderAndShow(tx, tx.State.Root, 1); - tx.State.Root.Add(tx, new string('8', keySize), new MemoryStream(new byte[3])); + tx.State.Root.Add(new string('8', keySize), new MemoryStream(new byte[3])); RenderAndShow(tx, tx.State.Root, 1); - tx.State.Root.Add(tx, new string('2', keySize), new MemoryStream(new byte[2])); + tx.State.Root.Add(new string('2', keySize), new MemoryStream(new byte[2])); RenderAndShow(tx, tx.State.Root, 1); - tx.State.Root.Add(tx, new string('6', keySize), new MemoryStream(new byte[2])); + tx.State.Root.Add(new string('6', keySize), new MemoryStream(new byte[2])); RenderAndShow(tx, tx.State.Root, 1); - tx.State.Root.Add(tx, new string('0', keySize), new MemoryStream(new byte[4])); + tx.State.Root.Add(new string('0', keySize), new MemoryStream(new byte[4])); RenderAndShow(tx, tx.State.Root, 1); - tx.State.Root.Add(tx, new string('3', 1000), new MemoryStream(new byte[1])); + tx.State.Root.Add(new string('3', 1000), new MemoryStream(new byte[1])); RenderAndShow(tx, tx.State.Root, 1); - tx.State.Root.Add(tx, new string('7', keySize), new MemoryStream(new byte[1])); + tx.State.Root.Add(new string('7', keySize), new MemoryStream(new byte[1])); tx.Commit(); } @@ -44,9 +44,9 @@ public void SinglePageModificationDoNotCauseCopyingAllIntermediatePages() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Delete(tx, new string('0', keySize)); + tx.State.Root.Delete(new string('0', keySize)); - tx.State.Root.Add(tx, new string('4', 1000), new MemoryStream(new byte[21])); + tx.State.Root.Add(new string('4', 1000), new MemoryStream(new byte[21])); tx.Commit(); } @@ -56,9 +56,9 @@ public void SinglePageModificationDoNotCauseCopyingAllIntermediatePages() // ensure changes were applied using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - Assert.Null(tx.State.Root.Read(tx, new string('0', keySize))); + Assert.Null(tx.State.Root.Read(new string('0', keySize))); - var readResult = tx.State.Root.Read(tx, new string('4', 1000)); + var readResult = tx.State.Root.Read(new string('4', 1000)); Assert.Equal(21, readResult.Reader.Length); } diff --git a/Voron.Tests/Storage/Batches.cs b/Voron.Tests/Storage/Batches.cs index 86c3ebf2d2..6cd5045da4 100644 --- a/Voron.Tests/Storage/Batches.cs +++ b/Voron.Tests/Storage/Batches.cs @@ -19,7 +19,7 @@ public void ReadVersion_Items_From_Both_WriteBatch_And_Snapshot() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { Env.CreateTree(tx, "tree"); - tx.Environment.State.GetTree(tx,"tree").Add(tx, "foo1", StreamFor("foo1")); + tx.Environment.State.GetTree(tx,"tree").Add("foo1", StreamFor("foo1")); tx.Commit(); } @@ -46,7 +46,7 @@ public void ReadVersion_Items_From_Both_WriteBatch_And_Snapshot_WithoutVersionNu using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { Env.CreateTree(tx, "tree"); - tx.Environment.State.GetTree(tx,"tree").Add(tx, "foo1", StreamFor("foo1")); + tx.Environment.State.GetTree(tx,"tree").Add("foo1", StreamFor("foo1")); tx.Commit(); } @@ -73,7 +73,7 @@ public void Read_Items_From_Both_WriteBatch_And_Snapshot() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { Env.CreateTree(tx, "tree"); - tx.Environment.State.GetTree(tx,"tree").Add(tx, "foo1", StreamFor("foo1")); + tx.Environment.State.GetTree(tx,"tree").Add("foo1", StreamFor("foo1")); tx.Commit(); } @@ -102,7 +102,7 @@ public void Read_Items_From_Both_WriteBatch_And_Snapshot_Deleted_Key_Returns_Nul using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { Env.CreateTree(tx, "tree"); - tx.Environment.State.GetTree(tx,"tree").Add(tx, "foo1", StreamFor("foo1")); + tx.Environment.State.GetTree(tx,"tree").Add("foo1", StreamFor("foo1")); tx.Commit(); } @@ -128,7 +128,7 @@ public void WhenLastBatchOperationVersionIsNullThenVersionComesFromStorage() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { Env.CreateTree(tx, "tree"); - tx.Environment.State.GetTree(tx,"tree").Add(tx, "foo1", StreamFor("foo1")); + tx.Environment.State.GetTree(tx,"tree").Add("foo1", StreamFor("foo1")); tx.Commit(); } @@ -161,7 +161,7 @@ public void Read_The_Same_Item_Both_WriteBatch_And_Snapshot_WriteBatch_Takes_Pre using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { Env.CreateTree(tx, "tree"); - tx.Environment.State.GetTree(tx,"tree").Add(tx, "foo1", StreamFor("foo1")); + tx.Environment.State.GetTree(tx,"tree").Add("foo1", StreamFor("foo1")); tx.Commit(); } @@ -188,14 +188,14 @@ public void ReadVersion_The_Same_Item_Both_WriteBatch_And_Snapshot_WriteBatch_Ta using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { Env.CreateTree(tx, "tree"); - tx.Environment.State.GetTree(tx,"tree").Add(tx, "foo1", StreamFor("foo1")); + tx.Environment.State.GetTree(tx,"tree").Add("foo1", StreamFor("foo1")); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.Environment.State.GetTree(tx,"tree").Add(tx, "foo1", StreamFor("updated foo1")); + tx.Environment.State.GetTree(tx,"tree").Add("foo1", StreamFor("updated foo1")); tx.Commit(); } @@ -224,7 +224,7 @@ public void SingleItemBatchTest() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var stream = tx.State.Root.Read(tx, "key/1"); + var stream = tx.State.Root.Read("key/1"); Assert.Equal("123", stream.Reader.ToStringValue()); } } @@ -247,7 +247,7 @@ public void MultipleItemBatchTest() for (int i = 0; i < numberOfItems; i++) { { - var result = tx.State.Root.Read(tx, "key/" + i).Reader.ToStringValue(); + var result = tx.State.Root.Read("key/" + i).Reader.ToStringValue(); Assert.Equal(i.ToString(CultureInfo.InvariantCulture), result); } } @@ -273,10 +273,10 @@ public async Task MultipleBatchesTest() { for (int i = 0; i < numberOfItems; i++) { - var result = tx.State.Root.Read(tx, "key/" + i).Reader.ToStringValue(); + var result = tx.State.Root.Read("key/" + i).Reader.ToStringValue(); Assert.Equal(i.ToString(CultureInfo.InvariantCulture), result); - result = tx.State.Root.Read(tx, "yek/" + i).Reader.ToStringValue(); + result = tx.State.Root.Read("yek/" + i).Reader.ToStringValue(); Assert.Equal(i.ToString(CultureInfo.InvariantCulture), result); } @@ -311,9 +311,9 @@ public async Task MultipleTreesTest() { for (int i = 0; i < numberOfItems; i++) { - var result = tx.Environment.State.GetTree(tx,"tree1").Read(tx, "key/" + i).Reader.ToStringValue(); + var result = tx.Environment.State.GetTree(tx,"tree1").Read("key/" + i).Reader.ToStringValue(); Assert.Equal(i.ToString(CultureInfo.InvariantCulture), result); - result = tx.Environment.State.GetTree(tx,"tree2").Read(tx, "yek/" + i).Reader.ToStringValue(); + result = tx.Environment.State.GetTree(tx,"tree2").Read("yek/" + i).Reader.ToStringValue(); Assert.Equal(i.ToString(CultureInfo.InvariantCulture), result); } } @@ -339,9 +339,9 @@ public void MultipleTreesInSingleBatch() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var result = tx.Environment.State.GetTree(tx,"tree1").Read(tx, "key/1").Reader.ToStringValue(); + var result = tx.Environment.State.GetTree(tx,"tree1").Read("key/1").Reader.ToStringValue(); Assert.Equal("tree1", result); - result = tx.Environment.State.GetTree(tx,"tree2").Read(tx, "key/1").Reader.ToStringValue(); + result = tx.Environment.State.GetTree(tx,"tree2").Read("key/1").Reader.ToStringValue(); Assert.Equal("tree2", result); } } @@ -378,9 +378,9 @@ public async Task BatchErrorHandling() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var result = tx.Environment.State.GetTree(tx,"tree1").Read(tx, "key/1").Reader.ToStringValue(); + var result = tx.Environment.State.GetTree(tx,"tree1").Read("key/1").Reader.ToStringValue(); Assert.Equal("tree1", result); - result = tx.Environment.State.GetTree(tx,"tree3").Read(tx, "key/1").Reader.ToStringValue(); + result = tx.Environment.State.GetTree(tx,"tree3").Read("key/1").Reader.ToStringValue(); Assert.Equal("tree3", result); } } @@ -429,10 +429,10 @@ public async Task MergedBatchErrorHandling() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var result = tx.Environment.State.GetTree(tx,"tree1").Read(tx, "key/1").Reader.ToStringValue(); + var result = tx.Environment.State.GetTree(tx,"tree1").Read("key/1").Reader.ToStringValue(); Assert.Equal("tree1", result); - result = tx.Environment.State.GetTree(tx,"tree3").Read(tx, "key/1").Reader.ToStringValue(); + result = tx.Environment.State.GetTree(tx,"tree3").Read("key/1").Reader.ToStringValue(); Assert.Equal("tree3", result); } } diff --git a/Voron.Tests/Storage/BigValue.cs b/Voron.Tests/Storage/BigValue.cs index 81bfb25af5..42a223c539 100644 --- a/Voron.Tests/Storage/BigValue.cs +++ b/Voron.Tests/Storage/BigValue.cs @@ -27,7 +27,7 @@ public void CanReuseLargeSpace(int restartCount) random.NextBytes(buffer); using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, new Slice(BitConverter.GetBytes(1203)), new MemoryStream(buffer)); + tx.State.Root.Add(new Slice(BitConverter.GetBytes(1203)), new MemoryStream(buffer)); tx.Commit(); } @@ -40,13 +40,13 @@ public void CanReuseLargeSpace(int restartCount) using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Delete(tx, new Slice(BitConverter.GetBytes(1203))); + tx.State.Root.Delete(new Slice(BitConverter.GetBytes(1203))); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var readResult = tx.State.Root.Read(tx, new Slice(BitConverter.GetBytes(1203))); + var readResult = tx.State.Root.Read(new Slice(BitConverter.GetBytes(1203))); Assert.Null(readResult); } @@ -55,7 +55,7 @@ public void CanReuseLargeSpace(int restartCount) using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var readResult = tx.State.Root.Read(tx, new Slice(BitConverter.GetBytes(1203))); + var readResult = tx.State.Root.Read(new Slice(BitConverter.GetBytes(1203))); Assert.Null(readResult); } @@ -63,13 +63,13 @@ public void CanReuseLargeSpace(int restartCount) { buffer = new byte[1024 * 1024 * 3 + 1238]; random.NextBytes(buffer); - tx.State.Root.Add(tx, new Slice(BitConverter.GetBytes(1203)), new MemoryStream(buffer)); + tx.State.Root.Add(new Slice(BitConverter.GetBytes(1203)), new MemoryStream(buffer)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var readResult = tx.State.Root.Read(tx, new Slice(BitConverter.GetBytes(1203))); + var readResult = tx.State.Root.Read(new Slice(BitConverter.GetBytes(1203))); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); @@ -83,7 +83,7 @@ public void CanReuseLargeSpace(int restartCount) using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var readResult = tx.State.Root.Read(tx, new Slice(BitConverter.GetBytes(1203))); + var readResult = tx.State.Root.Read(new Slice(BitConverter.GetBytes(1203))); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); @@ -96,7 +96,7 @@ public void CanReuseLargeSpace(int restartCount) using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var readResult = tx.State.Root.Read(tx, new Slice(BitConverter.GetBytes(1203))); + var readResult = tx.State.Root.Read(new Slice(BitConverter.GetBytes(1203))); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); @@ -112,7 +112,7 @@ public void CanReuseLargeSpace(int restartCount) using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var readResult = tx.State.Root.Read(tx, new Slice(BitConverter.GetBytes(1203))); + var readResult = tx.State.Root.Read(new Slice(BitConverter.GetBytes(1203))); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); @@ -137,13 +137,13 @@ public void CanStoreInOneTransactionReallyBigValue() random.NextBytes(buffer); using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, new Slice(BitConverter.GetBytes(1203)), new MemoryStream(buffer)); + tx.State.Root.Add(new Slice(BitConverter.GetBytes(1203)), new MemoryStream(buffer)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var readResult = tx.State.Root.Read(tx, new Slice(BitConverter.GetBytes(1203))); + var readResult = tx.State.Root.Read(new Slice(BitConverter.GetBytes(1203))); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); @@ -165,7 +165,7 @@ public void CanStoreInOneTransactionManySmallValues() var buffer = new byte[912]; random.NextBytes(buffer); buffers.Add(buffer); - tx.State.Root.Add(tx, new Slice(BitConverter.GetBytes(i)), new MemoryStream(buffer)); + tx.State.Root.Add(new Slice(BitConverter.GetBytes(i)), new MemoryStream(buffer)); } tx.Commit(); } @@ -174,7 +174,7 @@ public void CanStoreInOneTransactionManySmallValues() { for (int i = 0; i < 1500; i++) { - var readResult = tx.State.Root.Read(tx, new Slice(BitConverter.GetBytes(i))); + var readResult = tx.State.Root.Read(new Slice(BitConverter.GetBytes(i))); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); diff --git a/Voron.Tests/Storage/Concurrency.cs b/Voron.Tests/Storage/Concurrency.cs index 1c58497c0b..09001641f8 100644 --- a/Voron.Tests/Storage/Concurrency.cs +++ b/Voron.Tests/Storage/Concurrency.cs @@ -16,7 +16,7 @@ public void MissingEntriesShouldReturn0Version() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - Assert.Equal(0, tx.State.Root.ReadVersion(tx, "key/1")); + Assert.Equal(0, tx.State.Root.ReadVersion("key/1")); } } @@ -25,17 +25,17 @@ public void SimpleVersion() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "key/1", StreamFor("123")); - Assert.Equal(1, tx.State.Root.ReadVersion(tx, "key/1")); - tx.State.Root.Add(tx, "key/1", StreamFor("123")); - Assert.Equal(2, tx.State.Root.ReadVersion(tx, "key/1")); + tx.State.Root.Add("key/1", StreamFor("123")); + Assert.Equal(1, tx.State.Root.ReadVersion("key/1")); + tx.State.Root.Add("key/1", StreamFor("123")); + Assert.Equal(2, tx.State.Root.ReadVersion("key/1")); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - Assert.Equal(2, tx.State.Root.ReadVersion(tx, "key/1")); + Assert.Equal(2, tx.State.Root.ReadVersion("key/1")); } } @@ -46,13 +46,13 @@ public void VersionOverflow() { for (uint i = 1; i <= ushort.MaxValue + 1; i++) { - tx.State.Root.Add(tx, "key/1", StreamFor("123")); + tx.State.Root.Add("key/1", StreamFor("123")); var expected = i; if (expected > ushort.MaxValue) expected = 1; - Assert.Equal(expected, tx.State.Root.ReadVersion(tx, "key/1")); + Assert.Equal(expected, tx.State.Root.ReadVersion("key/1")); } tx.Commit(); @@ -64,15 +64,15 @@ public void NoCommit() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "key/1", StreamFor("123")); - Assert.Equal(1, tx.State.Root.ReadVersion(tx, "key/1")); - tx.State.Root.Add(tx, "key/1", StreamFor("123")); - Assert.Equal(2, tx.State.Root.ReadVersion(tx, "key/1")); + tx.State.Root.Add("key/1", StreamFor("123")); + Assert.Equal(1, tx.State.Root.ReadVersion("key/1")); + tx.State.Root.Add("key/1", StreamFor("123")); + Assert.Equal(2, tx.State.Root.ReadVersion("key/1")); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - Assert.Equal(0, tx.State.Root.ReadVersion(tx, "key/1")); + Assert.Equal(0, tx.State.Root.ReadVersion("key/1")); } } @@ -81,11 +81,11 @@ public void Delete() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "key/1", StreamFor("123")); - Assert.Equal(1, tx.State.Root.ReadVersion(tx, "key/1")); + tx.State.Root.Add("key/1", StreamFor("123")); + Assert.Equal(1, tx.State.Root.ReadVersion("key/1")); - tx.State.Root.Delete(tx, "key/1"); - Assert.Equal(0, tx.State.Root.ReadVersion(tx, "key/1")); + tx.State.Root.Delete("key/1"); + Assert.Equal(0, tx.State.Root.ReadVersion("key/1")); } } @@ -94,15 +94,15 @@ public void Missing() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "key/1", StreamFor("123"), 0); - Assert.Equal(1, tx.State.Root.ReadVersion(tx, "key/1")); + tx.State.Root.Add("key/1", StreamFor("123"), 0); + Assert.Equal(1, tx.State.Root.ReadVersion("key/1")); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - var e = Assert.Throws(() => tx.State.Root.Add(tx, "key/1", StreamFor("321"), 0)); + var e = Assert.Throws(() => tx.State.Root.Add("key/1", StreamFor("321"), 0)); Assert.Equal("Cannot add 'key/1' to 'Root' tree. Version mismatch. Expected: 0. Actual: 1.", e.Message); } } @@ -112,21 +112,21 @@ public void ConcurrencyExceptionShouldBeThrownWhenVersionMismatch() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "key/1", StreamFor("123")); - Assert.Equal(1, tx.State.Root.ReadVersion(tx, "key/1")); + tx.State.Root.Add("key/1", StreamFor("123")); + Assert.Equal(1, tx.State.Root.ReadVersion("key/1")); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - var e = Assert.Throws(() => tx.State.Root.Add(tx, "key/1", StreamFor("321"), 2)); + var e = Assert.Throws(() => tx.State.Root.Add("key/1", StreamFor("321"), 2)); Assert.Equal("Cannot add 'key/1' to 'Root' tree. Version mismatch. Expected: 2. Actual: 1.", e.Message); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - var e = Assert.Throws(() => tx.State.Root.Delete(tx, "key/1", 2)); + var e = Assert.Throws(() => tx.State.Root.Delete("key/1", 2)); Assert.Equal("Cannot delete 'key/1' to 'Root' tree. Version mismatch. Expected: 2. Actual: 1.", e.Message); } } @@ -136,21 +136,21 @@ public void ConcurrencyExceptionShouldBeThrownWhenVersionMismatchMultiTree() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.MultiAdd(tx, "key/1", "123"); - Assert.Equal(1, tx.State.Root.ReadVersion(tx, "key/1")); + tx.State.Root.MultiAdd("key/1", "123"); + Assert.Equal(1, tx.State.Root.ReadVersion("key/1")); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - var e = Assert.Throws(() => tx.State.Root.MultiAdd(tx, "key/1", "321", version: 2)); + var e = Assert.Throws(() => tx.State.Root.MultiAdd("key/1", "321", version: 2)); Assert.Equal("Cannot add value '321' to key 'key/1' to 'Root' tree. Version mismatch. Expected: 2. Actual: 0.", e.Message); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - var e = Assert.Throws(() => tx.State.Root.MultiDelete(tx, "key/1", "123", 2)); + var e = Assert.Throws(() => tx.State.Root.MultiDelete("key/1", "123", 2)); Assert.Equal("Cannot delete value '123' to key 'key/1' to 'Root' tree. Version mismatch. Expected: 2. Actual: 1.", e.Message); } } @@ -165,7 +165,7 @@ public void BatchSimpleVersion() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - Assert.Equal(1, tx.State.Root.ReadVersion(tx, "key/1")); + Assert.Equal(1, tx.State.Root.ReadVersion("key/1")); } var batch2 = new WriteBatch(); @@ -175,7 +175,7 @@ public void BatchSimpleVersion() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - Assert.Equal(2, tx.State.Root.ReadVersion(tx, "key/1")); + Assert.Equal(2, tx.State.Root.ReadVersion("key/1")); } } @@ -189,7 +189,7 @@ public void BatchDelete() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - Assert.Equal(1, tx.State.Root.ReadVersion(tx, "key/1")); + Assert.Equal(1, tx.State.Root.ReadVersion("key/1")); } var batch2 = new WriteBatch(); @@ -199,7 +199,7 @@ public void BatchDelete() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - Assert.Equal(0, tx.State.Root.ReadVersion(tx, "key/1")); + Assert.Equal(0, tx.State.Root.ReadVersion("key/1")); } } @@ -213,7 +213,7 @@ public void BatchMissing() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - Assert.Equal(1, tx.State.Root.ReadVersion(tx, "key/1")); + Assert.Equal(1, tx.State.Root.ReadVersion("key/1")); } var batch2 = new WriteBatch(); diff --git a/Voron.Tests/Storage/FreeScratchPages.cs b/Voron.Tests/Storage/FreeScratchPages.cs index e6077f0198..a28cae58c2 100644 --- a/Voron.Tests/Storage/FreeScratchPages.cs +++ b/Voron.Tests/Storage/FreeScratchPages.cs @@ -27,7 +27,7 @@ public void UncommittedTransactionShouldFreeScratchPagesThatWillBeReusedByNextTr { for (int i = 0; i < 10; i++) { - tx.State.Root.Add(tx, "items/" + i, new MemoryStream(buffer)); + tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } scratchPagesOfUncommittedTransaction = tx.GetTransactionPages(); @@ -42,7 +42,7 @@ public void UncommittedTransactionShouldFreeScratchPagesThatWillBeReusedByNextTr // let's do exactly the same, it should reuse the same scratch pages for (int i = 0; i < 10; i++) { - tx.State.Root.Add(tx, "items/" + i, new MemoryStream(buffer)); + tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } scratchPagesOfCommittedTransaction = tx.GetTransactionPages(); diff --git a/Voron.Tests/Storage/Increments.cs b/Voron.Tests/Storage/Increments.cs index 140fa76595..6f204a59ef 100644 --- a/Voron.Tests/Storage/Increments.cs +++ b/Voron.Tests/Storage/Increments.cs @@ -18,28 +18,28 @@ public void SimpleIncrementShouldWork() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - Assert.Equal(10, tx.ReadTree("tree0").Increment(tx, "key/1", 10)); + Assert.Equal(10, tx.ReadTree("tree0").Increment("key/1", 10)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - Assert.Equal(15, tx.ReadTree("tree0").Increment(tx, "key/1", 5)); + Assert.Equal(15, tx.ReadTree("tree0").Increment("key/1", 5)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - Assert.Equal(12, tx.ReadTree("tree0").Increment(tx, "key/1", -3)); + Assert.Equal(12, tx.ReadTree("tree0").Increment("key/1", -3)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var read = tx.ReadTree("tree0").Read(tx, "key/1"); + var read = tx.ReadTree("tree0").Read("key/1"); Assert.NotNull(read); Assert.Equal(3, read.Version); @@ -69,7 +69,7 @@ public void SimpleIncrementShouldWorkUsingWriteBatch() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var read = tx.ReadTree("tree0").Read(tx, "key/1"); + var read = tx.ReadTree("tree0").Read("key/1"); Assert.NotNull(read); Assert.Equal(3, read.Version); diff --git a/Voron.Tests/Storage/MemoryMapWithoutBackingPagerTest.cs b/Voron.Tests/Storage/MemoryMapWithoutBackingPagerTest.cs index 9070ee2283..ecda48d9ac 100644 --- a/Voron.Tests/Storage/MemoryMapWithoutBackingPagerTest.cs +++ b/Voron.Tests/Storage/MemoryMapWithoutBackingPagerTest.cs @@ -108,7 +108,7 @@ public void Should_be_able_to_allocate_new_pages_with_apply_logs_to_data_file(in { var tree = tx.ReadTree(TestTreeName); foreach (var dataPair in testData) - tree.Add(tx, dataPair.Key, StreamFor(dataPair.Value)); + tree.Add(dataPair.Key, StreamFor(dataPair.Value)); tx.Commit(); } diff --git a/Voron.Tests/Storage/MultiTransactions.cs b/Voron.Tests/Storage/MultiTransactions.cs index 1880149645..2e4fbbf71a 100644 --- a/Voron.Tests/Storage/MultiTransactions.cs +++ b/Voron.Tests/Storage/MultiTransactions.cs @@ -23,7 +23,7 @@ public void ShouldWork() { ms.Position = 0; - tx.State.Root.Add(tx, (x * i).ToString("0000000000000000"), ms); + tx.State.Root.Add((x * i).ToString("0000000000000000"), ms); } tx.Commit(); diff --git a/Voron.Tests/Storage/Quotas.cs b/Voron.Tests/Storage/Quotas.cs index 181d2c8948..5ea8256d94 100644 --- a/Voron.Tests/Storage/Quotas.cs +++ b/Voron.Tests/Storage/Quotas.cs @@ -27,7 +27,7 @@ public void ShouldThrowQuotaException() { for (int i = 0; i < 1024; i++) { - tx.State.Root.Add(tx, "items/" + i, new MemoryStream(new byte[1024])); + tx.State.Root.Add("items/" + i, new MemoryStream(new byte[1024])); } tx.Commit(); diff --git a/Voron.Tests/Storage/Restarts.cs b/Voron.Tests/Storage/Restarts.cs index 5878fd1e38..5d4b04afcd 100644 --- a/Voron.Tests/Storage/Restarts.cs +++ b/Voron.Tests/Storage/Restarts.cs @@ -20,12 +20,12 @@ public void DataIsKeptAfterRestart() { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "test/1", new MemoryStream()); + tx.State.Root.Add("test/1", new MemoryStream()); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "test/2", new MemoryStream()); + tx.State.Root.Add("test/2", new MemoryStream()); tx.Commit(); } } @@ -34,11 +34,11 @@ public void DataIsKeptAfterRestart() { using (var tx = env.NewTransaction(TransactionFlags.Read)) { - if (tx.State.Root.Read(tx, "test/1") == null) + if (tx.State.Root.Read("test/1") == null) Debugger.Launch(); - Assert.NotNull(tx.State.Root.Read(tx, "test/1")); - Assert.NotNull(tx.State.Root.Read(tx, "test/2")); + Assert.NotNull(tx.State.Root.Read("test/1")); + Assert.NotNull(tx.State.Root.Read("test/2")); tx.Commit(); } } @@ -61,10 +61,10 @@ public void DataIsKeptAfterRestartForSubTrees() using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.Environment.State.GetTree(tx,"test"); - tree.Add(tx, "test", Stream.Null); + tree.Add("test", Stream.Null); tx.Commit(); - Assert.NotNull(tree.Read(tx, "test")); + Assert.NotNull(tree.Read("test")); } } @@ -79,7 +79,7 @@ public void DataIsKeptAfterRestartForSubTrees() using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.Environment.State.GetTree(tx,"test"); - Assert.NotNull(tree.Read(tx, "test")); + Assert.NotNull(tree.Read("test")); tx.Commit(); } } diff --git a/Voron.Tests/Storage/Snapshots.cs b/Voron.Tests/Storage/Snapshots.cs index 4e292ee775..c4988cdb31 100644 --- a/Voron.Tests/Storage/Snapshots.cs +++ b/Voron.Tests/Storage/Snapshots.cs @@ -28,7 +28,7 @@ public void SingleItemBatchTestLowLevel() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "key/1", new MemoryStream(Encoding.UTF8.GetBytes("123"))); + tx.State.Root.Add("key/1", new MemoryStream(Encoding.UTF8.GetBytes("123"))); tx.Commit(); } @@ -36,7 +36,7 @@ public void SingleItemBatchTestLowLevel() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var reader = tx.State.Root.Read(tx, "key/1").Reader; + var reader = tx.State.Root.Read("key/1").Reader; Assert.Equal("123", reader.ToStringValue()); tx.Commit(); } diff --git a/Voron.Tests/Storage/SplittingVeryBig.cs b/Voron.Tests/Storage/SplittingVeryBig.cs index 5aeb758d63..a1d259b459 100644 --- a/Voron.Tests/Storage/SplittingVeryBig.cs +++ b/Voron.Tests/Storage/SplittingVeryBig.cs @@ -31,13 +31,13 @@ public void ShouldBeAbleToWriteValuesGreaterThanLogAndReadThem() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.Environment.State.GetTree(tx,"tree").Add(tx, "key1", new MemoryStream(buffer)); + tx.Environment.State.GetTree(tx,"tree").Add("key1", new MemoryStream(buffer)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var read = tx.Environment.State.GetTree(tx,"tree").Read(tx, "key1"); + var read = tx.Environment.State.GetTree(tx,"tree").Read("key1"); Assert.NotNull(read); var reader = read.Reader; @@ -69,7 +69,7 @@ public void ShouldBeAbleToWriteValuesGreaterThanLogAndRecoverThem() using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.Environment.State.GetTree(tx,"tree").Add(tx, "key1", new MemoryStream(buffer)); + tx.Environment.State.GetTree(tx,"tree").Add("key1", new MemoryStream(buffer)); tx.Commit(); } } @@ -87,7 +87,7 @@ public void ShouldBeAbleToWriteValuesGreaterThanLogAndRecoverThem() using (var tx = env.NewTransaction(TransactionFlags.Read)) { - var read = tx.Environment.State.GetTree(tx,"tree").Read(tx, "key1"); + var read = tx.Environment.State.GetTree(tx,"tree").Read("key1"); Assert.NotNull(read); { diff --git a/Voron.Tests/Storage/VeryBig.cs b/Voron.Tests/Storage/VeryBig.cs index f0bc3c9842..0af841cda2 100644 --- a/Voron.Tests/Storage/VeryBig.cs +++ b/Voron.Tests/Storage/VeryBig.cs @@ -25,7 +25,7 @@ public void CanGrowBeyondInitialSize() var tree = tx.Environment.State.GetTree(tx,"test"); for (int j = 0; j < 12; j++) { - tree.Add(tx, string.Format("{0:000}-{1:000}", j, i), new MemoryStream(buffer)); + tree.Add(string.Format("{0:000}-{1:000}", j, i), new MemoryStream(buffer)); } tx.Commit(); } @@ -44,7 +44,7 @@ public void CanGrowBeyondInitialSize_Root() { for (int j = 0; j < 12; j++) { - tx.State.Root.Add(tx, string.Format("{0:000}-{1:000}", j, i), new MemoryStream(buffer)); + tx.State.Root.Add(string.Format("{0:000}-{1:000}", j, i), new MemoryStream(buffer)); } tx.Commit(); } @@ -68,7 +68,7 @@ public void CanGrowBeyondInitialSize_WithAnotherTree() for (int j = 0; j < 12; j++) { - tx.State.Root.Add(tx, string.Format("{0:000}-{1:000}", j, i), new MemoryStream(buffer)); + tx.State.Root.Add(string.Format("{0:000}-{1:000}", j, i), new MemoryStream(buffer)); } tx.Commit(); } diff --git a/Voron.Tests/StorageTest.cs b/Voron.Tests/StorageTest.cs index ab0ca2344a..76ddf4d8c5 100644 --- a/Voron.Tests/StorageTest.cs +++ b/Voron.Tests/StorageTest.cs @@ -151,7 +151,7 @@ protected void RenderAndShow(Transaction tx, Tree root, int showEntries = 25) protected unsafe Tuple ReadKey(Transaction tx, Slice key) { Lazy lazy; - var p = tx.State.Root.FindPageFor(tx, key, out lazy); + var p = tx.State.Root.FindPageFor(key, out lazy); var node = p.Search(key, Env.SliceComparer); if (node == null) diff --git a/Voron.Tests/Trees/Basic.cs b/Voron.Tests/Trees/Basic.cs index 2b72405986..eae5f724d2 100644 --- a/Voron.Tests/Trees/Basic.cs +++ b/Voron.Tests/Trees/Basic.cs @@ -20,8 +20,8 @@ public void CanAddVeryLargeValue() List allPages = null; using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "a", new MemoryStream(buffer)); - allPages = tx.State.Root.AllPages(tx); + tx.State.Root.Add("a", new MemoryStream(buffer)); + allPages = tx.State.Root.AllPages(); tx.Commit(); } @@ -38,7 +38,7 @@ public void CanAdd() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "test", StreamFor("value")); + tx.State.Root.Add("test", StreamFor("value")); } } @@ -47,9 +47,9 @@ public void CanAddAndRead() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "b", StreamFor("2")); - tx.State.Root.Add(tx, "c", StreamFor("3")); - tx.State.Root.Add(tx, "a", StreamFor("1")); + tx.State.Root.Add("b", StreamFor("2")); + tx.State.Root.Add("c", StreamFor("3")); + tx.State.Root.Add("a", StreamFor("1")); var actual = ReadKey(tx, "a"); Assert.Equal("a", actual.Item1); @@ -63,7 +63,7 @@ public void CanAddAndReadStats() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { Slice key = "test"; - tx.State.Root.Add(tx, key, StreamFor("value")); + tx.State.Root.Add(key, StreamFor("value")); tx.Commit(); @@ -82,7 +82,7 @@ public void CanAddEnoughToCausePageSplit() for (int i = 0; i < 256; i++) { stream.Position = 0; - tx.State.Root.Add(tx, "test-" + i, stream); + tx.State.Root.Add("test-" + i, stream); } @@ -107,7 +107,7 @@ public void AfterPageSplitAllDataIsValid() { for (int i = 0; i < count; i++) { - tx.State.Root.Add(tx, "test-" + i.ToString("000"), StreamFor("val-" + i)); + tx.State.Root.Add("test-" + i.ToString("000"), StreamFor("val-" + i)); } @@ -140,7 +140,7 @@ public void PageSplitsAllAround() { } - tx.State.Root.Add(tx, "test-" + j.ToString("000") + "-" + i.ToString("000"), stream); + tx.State.Root.Add("test-" + j.ToString("000") + "-" + i.ToString("000"), stream); } } diff --git a/Voron.Tests/Trees/CanDefrag.cs b/Voron.Tests/Trees/CanDefrag.cs index 5f315112a0..62e4e54ffe 100644 --- a/Voron.Tests/Trees/CanDefrag.cs +++ b/Voron.Tests/Trees/CanDefrag.cs @@ -16,7 +16,7 @@ public void CanDeleteAtRoot() { for (int i = 0; i < size; i++) { - tx.State.Root.Add(tx, string.Format("{0,5}", i*2), StreamFor("abcdefg")); + tx.State.Root.Add(string.Format("{0,5}", i*2), StreamFor("abcdefg")); } tx.Commit(); } @@ -24,7 +24,7 @@ public void CanDeleteAtRoot() { for (int i = 0; i < size/2; i++) { - tx.State.Root.Delete(tx, string.Format("{0,5}", i*2)); + tx.State.Root.Delete(string.Format("{0,5}", i*2)); } tx.Commit(); } @@ -32,7 +32,7 @@ public void CanDeleteAtRoot() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var pageCount = tx.State.Root.State.PageCount; - tx.State.Root.Add(tx, " 244",new MemoryStream(new byte[512])); + tx.State.Root.Add( " 244",new MemoryStream(new byte[512])); Assert.Equal(pageCount, tx.State.Root.State.PageCount); tx.Commit(); } diff --git a/Voron.Tests/Trees/CanIterateBackward.cs b/Voron.Tests/Trees/CanIterateBackward.cs index 9e09079fa6..1b9cd8ac1f 100644 --- a/Voron.Tests/Trees/CanIterateBackward.cs +++ b/Voron.Tests/Trees/CanIterateBackward.cs @@ -9,7 +9,7 @@ public class CanIterateBackward : StorageTest public void SeekLastOnEmptyResultInFalse() { using (var tx = Env.NewTransaction(TransactionFlags.Read)) - using (var it = tx.State.Root.Iterate(tx)) + using (var it = tx.State.Root.Iterate()) { Assert.False(it.Seek(Slice.AfterAllKeys)); @@ -22,15 +22,15 @@ public void CanSeekLast() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "a", new MemoryStream(0)); - tx.State.Root.Add(tx, "c", new MemoryStream(0)); - tx.State.Root.Add(tx, "b", new MemoryStream(0)); + tx.State.Root.Add("a", new MemoryStream(0)); + tx.State.Root.Add("c", new MemoryStream(0)); + tx.State.Root.Add("b", new MemoryStream(0)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) - using (var it = tx.State.Root.Iterate(tx)) + using (var it = tx.State.Root.Iterate()) { Assert.True(it.Seek(Slice.AfterAllKeys)); Assert.Equal("c", it.CurrentKey.ToString()); @@ -44,15 +44,15 @@ public void CanSeekBack() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "a", new MemoryStream(0)); - tx.State.Root.Add(tx, "c", new MemoryStream(0)); - tx.State.Root.Add(tx, "b", new MemoryStream(0)); + tx.State.Root.Add("a", new MemoryStream(0)); + tx.State.Root.Add("c", new MemoryStream(0)); + tx.State.Root.Add("b", new MemoryStream(0)); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) - using (var it = tx.State.Root.Iterate(tx)) + using (var it = tx.State.Root.Iterate()) { Assert.True(it.Seek(Slice.AfterAllKeys)); Assert.Equal("c", it.CurrentKey.ToString()); diff --git a/Voron.Tests/Trees/Deletes.cs b/Voron.Tests/Trees/Deletes.cs index 588665e73b..bbc9c58ab2 100644 --- a/Voron.Tests/Trees/Deletes.cs +++ b/Voron.Tests/Trees/Deletes.cs @@ -19,7 +19,7 @@ public void CanAddVeryLargeValueAndThenDeleteIt() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "a", new MemoryStream(buffer)); + tx.State.Root.Add("a", new MemoryStream(buffer)); tx.Commit(); } @@ -32,7 +32,7 @@ public void CanAddVeryLargeValueAndThenDeleteIt() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Delete(tx, "a"); + tx.State.Root.Delete("a"); tx.Commit(); } @@ -46,7 +46,7 @@ public void CanAddVeryLargeValueAndThenDeleteIt() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - Assert.Null(tx.State.Root.Read(tx, "a")); + Assert.Null(tx.State.Root.Read("a")); tx.Commit(); } @@ -61,7 +61,7 @@ public void CanDeleteAtRoot() { for (int i = 0; i < 1000; i++) { - tx.State.Root.Add(tx, string.Format("{0,5}",i), StreamFor("abcdefg")); + tx.State.Root.Add(string.Format("{0,5}",i), StreamFor("abcdefg")); } tx.Commit(); } @@ -76,7 +76,7 @@ public void CanDeleteAtRoot() { for (int i = 0; i < 15; i++) { - tx.State.Root.Delete(tx, string.Format("{0,5}", i)); + tx.State.Root.Delete(string.Format("{0,5}", i)); } tx.Commit(); } @@ -92,7 +92,7 @@ public void CanDeleteAtRoot() public unsafe List Keys(Tree t, Transaction tx) { var results = new List(); - using (var it = t.Iterate(tx)) + using (var it = t.Iterate()) { if (it.Seek(Slice.BeforeAllKeys) == false) return results; diff --git a/Voron.Tests/Trees/FreeSpaceTest.cs b/Voron.Tests/Trees/FreeSpaceTest.cs index fdc27a3f56..ac33d67fc9 100644 --- a/Voron.Tests/Trees/FreeSpaceTest.cs +++ b/Voron.Tests/Trees/FreeSpaceTest.cs @@ -19,7 +19,7 @@ public void WillBeReused() { for (int i = 0; i < 25; i++) { - tx.State.Root.Add(tx, i.ToString("0000"), new MemoryStream(buffer)); + tx.State.Root.Add(i.ToString("0000"), new MemoryStream(buffer)); } tx.Commit(); @@ -30,7 +30,7 @@ public void WillBeReused() { for (int i = 0; i < 25; i++) { - tx.State.Root.Delete(tx, i.ToString("0000")); + tx.State.Root.Delete(i.ToString("0000")); } tx.Commit(); @@ -41,7 +41,7 @@ public void WillBeReused() { for (int i = 0; i < 25; i++) { - tx.State.Root.Add(tx, i.ToString("0000"), new MemoryStream(buffer)); + tx.State.Root.Add(i.ToString("0000"), new MemoryStream(buffer)); } tx.Commit(); diff --git a/Voron.Tests/Trees/Iteration.cs b/Voron.Tests/Trees/Iteration.cs index 77a89b19fd..245c710b95 100644 --- a/Voron.Tests/Trees/Iteration.cs +++ b/Voron.Tests/Trees/Iteration.cs @@ -15,13 +15,13 @@ public void EmptyIterator() { using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var iterator = tx.State.Root.Iterate(tx); + var iterator = tx.State.Root.Iterate(); Assert.False(iterator.Seek(Slice.BeforeAllKeys)); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var iterator = tx.State.Root.Iterate(tx); + var iterator = tx.State.Root.Iterate(); Assert.False(iterator.Seek(Slice.AfterAllKeys)); } } @@ -37,7 +37,7 @@ public void CanIterateInOrder() { for (int i = 0; i < 25; i++) { - tx.State.Root.Add(tx, i.ToString("0000"), new MemoryStream(buffer)); + tx.State.Root.Add(i.ToString("0000"), new MemoryStream(buffer)); } tx.Commit(); @@ -45,7 +45,7 @@ public void CanIterateInOrder() using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var iterator = tx.State.Root.Iterate(tx); + var iterator = tx.State.Root.Iterate(); Assert.True(iterator.Seek(Slice.BeforeAllKeys)); var slice = new Slice(SliceOptions.Key); diff --git a/Voron.Tests/Trees/MultipleTrees.cs b/Voron.Tests/Trees/MultipleTrees.cs index c112c1ff3f..89d6ac721a 100644 --- a/Voron.Tests/Trees/MultipleTrees.cs +++ b/Voron.Tests/Trees/MultipleTrees.cs @@ -13,14 +13,14 @@ public void CanCreateNewTree() { Env.CreateTree(tx, "test"); - Env.CreateTree(tx, "test").Add(tx, "test", StreamFor("abc")); + Env.CreateTree(tx, "test").Add("test", StreamFor("abc")); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var stream = tx.Environment.State.GetTree(tx,"test").Read(tx, "test"); + var stream = tx.Environment.State.GetTree(tx,"test").Read("test"); Assert.NotNull(stream); tx.Commit(); @@ -34,7 +34,7 @@ public void CanUpdateValuesInSubTree() { Env.CreateTree(tx, "test"); - Env.CreateTree(tx, "test").Add(tx, "test", StreamFor("abc")); + Env.CreateTree(tx, "test").Add("test", StreamFor("abc")); tx.Commit(); } @@ -42,14 +42,14 @@ public void CanUpdateValuesInSubTree() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.Environment.State.GetTree(tx,"test").Add(tx, "test2", StreamFor("abc")); + tx.Environment.State.GetTree(tx,"test").Add("test2", StreamFor("abc")); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.Read)) { - var stream = tx.Environment.State.GetTree(tx,"test").Read(tx, "test2"); + var stream = tx.Environment.State.GetTree(tx,"test").Read("test2"); Assert.NotNull(stream); tx.Commit(); diff --git a/Voron.Tests/Trees/Rebalance.cs b/Voron.Tests/Trees/Rebalance.cs index ed18aff9fe..54948d11cd 100644 --- a/Voron.Tests/Trees/Rebalance.cs +++ b/Voron.Tests/Trees/Rebalance.cs @@ -10,18 +10,18 @@ public void CanMergeRight() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "1", new MemoryStream(new byte[1472])); - tx.State.Root.Add(tx, "2", new MemoryStream(new byte[992])); - tx.State.Root.Add(tx, "3", new MemoryStream(new byte[1632])); - tx.State.Root.Add(tx, "4", new MemoryStream(new byte[632])); - tx.State.Root.Add(tx, "5", new MemoryStream(new byte[824])); - tx.State.Root.Delete(tx, "3"); - tx.State.Root.Add(tx, "6", new MemoryStream(new byte[1096])); + tx.State.Root.Add("1", new MemoryStream(new byte[1472])); + tx.State.Root.Add("2", new MemoryStream(new byte[992])); + tx.State.Root.Add("3", new MemoryStream(new byte[1632])); + tx.State.Root.Add("4", new MemoryStream(new byte[632])); + tx.State.Root.Add("5", new MemoryStream(new byte[824])); + tx.State.Root.Delete("3"); + tx.State.Root.Add("6", new MemoryStream(new byte[1096])); RenderAndShow(tx, 1); - tx.State.Root.Delete(tx, "6"); - tx.State.Root.Delete(tx, "4"); + tx.State.Root.Delete("6"); + tx.State.Root.Delete("4"); RenderAndShow(tx,1); @@ -34,18 +34,18 @@ public void CanMergeLeft() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "1", new MemoryStream(new byte[1524])); - tx.State.Root.Add(tx, "2", new MemoryStream(new byte[1524])); - tx.State.Root.Add(tx, "3", new MemoryStream(new byte[1024])); - tx.State.Root.Add(tx, "4", new MemoryStream(new byte[64])); + tx.State.Root.Add("1", new MemoryStream(new byte[1524])); + tx.State.Root.Add("2", new MemoryStream(new byte[1524])); + tx.State.Root.Add("3", new MemoryStream(new byte[1024])); + tx.State.Root.Add("4", new MemoryStream(new byte[64])); RenderAndShow(tx, 1); - tx.State.Root.Delete(tx, "2"); + tx.State.Root.Delete("2"); RenderAndShow(tx, 1); - tx.State.Root.Delete(tx, "3"); + tx.State.Root.Delete("3"); RenderAndShow(tx, 1); tx.Commit(); @@ -59,30 +59,30 @@ public void StressTest() { for (int i = 0; i < 80; ++i) { - tx.State.Root.Add(tx, string.Format("{0}1", i), new MemoryStream(new byte[1472])); - tx.State.Root.Add(tx, string.Format("{0}2", i), new MemoryStream(new byte[992])); - tx.State.Root.Add(tx, string.Format("{0}3", i), new MemoryStream(new byte[1632])); - tx.State.Root.Add(tx, string.Format("{0}4", i), new MemoryStream(new byte[632])); - tx.State.Root.Add(tx, string.Format("{0}5", i), new MemoryStream(new byte[824])); - tx.State.Root.Add(tx, string.Format("{0}6", i), new MemoryStream(new byte[1096])); - tx.State.Root.Add(tx, string.Format("{0}7", i), new MemoryStream(new byte[2048])); - tx.State.Root.Add(tx, string.Format("{0}8", i), new MemoryStream(new byte[1228])); - tx.State.Root.Add(tx, string.Format("{0}9", i), new MemoryStream(new byte[8192])); + tx.State.Root.Add(string.Format("{0}1", i), new MemoryStream(new byte[1472])); + tx.State.Root.Add(string.Format("{0}2", i), new MemoryStream(new byte[992])); + tx.State.Root.Add(string.Format("{0}3", i), new MemoryStream(new byte[1632])); + tx.State.Root.Add(string.Format("{0}4", i), new MemoryStream(new byte[632])); + tx.State.Root.Add(string.Format("{0}5", i), new MemoryStream(new byte[824])); + tx.State.Root.Add(string.Format("{0}6", i), new MemoryStream(new byte[1096])); + tx.State.Root.Add(string.Format("{0}7", i), new MemoryStream(new byte[2048])); + tx.State.Root.Add(string.Format("{0}8", i), new MemoryStream(new byte[1228])); + tx.State.Root.Add(string.Format("{0}9", i), new MemoryStream(new byte[8192])); } RenderAndShow(tx, 1); for (int i = 79; i >= 0; --i) { - tx.State.Root.Delete(tx, string.Format("{0}1", i)); - tx.State.Root.Delete(tx, string.Format("{0}2", i)); - tx.State.Root.Delete(tx, string.Format("{0}3", i)); - tx.State.Root.Delete(tx, string.Format("{0}4", i)); - tx.State.Root.Delete(tx, string.Format("{0}5", i)); - tx.State.Root.Delete(tx, string.Format("{0}6", i)); - tx.State.Root.Delete(tx, string.Format("{0}7", i)); - tx.State.Root.Delete(tx, string.Format("{0}8", i)); - tx.State.Root.Delete(tx, string.Format("{0}9", i)); + tx.State.Root.Delete(string.Format("{0}1", i)); + tx.State.Root.Delete(string.Format("{0}2", i)); + tx.State.Root.Delete(string.Format("{0}3", i)); + tx.State.Root.Delete(string.Format("{0}4", i)); + tx.State.Root.Delete(string.Format("{0}5", i)); + tx.State.Root.Delete(string.Format("{0}6", i)); + tx.State.Root.Delete(string.Format("{0}7", i)); + tx.State.Root.Delete(string.Format("{0}8", i)); + tx.State.Root.Delete(string.Format("{0}9", i)); } tx.Commit(); diff --git a/Voron.Tests/Trees/Updates.cs b/Voron.Tests/Trees/Updates.cs index 1247b7167e..6f5f657e0f 100644 --- a/Voron.Tests/Trees/Updates.cs +++ b/Voron.Tests/Trees/Updates.cs @@ -15,7 +15,7 @@ public void CanUpdateVeryLargeValueAndThenDeleteIt() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "a", new MemoryStream(buffer)); + tx.State.Root.Add("a", new MemoryStream(buffer)); tx.Commit(); } @@ -32,7 +32,7 @@ public void CanUpdateVeryLargeValueAndThenDeleteIt() using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "a", new MemoryStream(buffer)); + tx.State.Root.Add("a", new MemoryStream(buffer)); tx.Commit(); } @@ -51,8 +51,8 @@ public void CanAddAndUpdate() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "test", StreamFor("1")); - tx.State.Root.Add(tx, "test", StreamFor("2")); + tx.State.Root.Add("test", StreamFor("1")); + tx.State.Root.Add("test", StreamFor("2")); var readKey = ReadKey(tx, "test"); Assert.Equal("test", readKey.Item1); @@ -65,9 +65,9 @@ public void CanAddAndUpdate2() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "test/1", StreamFor("1")); - tx.State.Root.Add(tx, "test/2", StreamFor("2")); - tx.State.Root.Add(tx, "test/1", StreamFor("3")); + tx.State.Root.Add("test/1", StreamFor("1")); + tx.State.Root.Add("test/2", StreamFor("2")); + tx.State.Root.Add("test/1", StreamFor("3")); var readKey = ReadKey(tx, "test/1"); Assert.Equal("test/1", readKey.Item1); @@ -85,9 +85,9 @@ public void CanAddAndUpdate1() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "test/1", StreamFor("1")); - tx.State.Root.Add(tx, "test/2", StreamFor("2")); - tx.State.Root.Add(tx, "test/2", StreamFor("3")); + tx.State.Root.Add("test/1", StreamFor("1")); + tx.State.Root.Add("test/2", StreamFor("2")); + tx.State.Root.Add("test/2", StreamFor("3")); var readKey = ReadKey(tx, "test/1"); Assert.Equal("test/1", readKey.Item1); @@ -106,10 +106,10 @@ public void CanDelete() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "test", StreamFor("1")); + tx.State.Root.Add("test", StreamFor("1")); Assert.NotNull(ReadKey(tx, "test")); - tx.State.Root.Delete(tx, "test"); + tx.State.Root.Delete("test"); Assert.Null(ReadKey(tx, "test")); } } @@ -119,11 +119,11 @@ public void CanDelete2() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "test/1", StreamFor("1")); - tx.State.Root.Add(tx, "test/2", StreamFor("1")); + tx.State.Root.Add("test/1", StreamFor("1")); + tx.State.Root.Add("test/2", StreamFor("1")); Assert.NotNull(ReadKey(tx, "test/2")); - tx.State.Root.Delete(tx, "test/2"); + tx.State.Root.Delete("test/2"); Assert.Null(ReadKey(tx, "test/2")); Assert.NotNull(ReadKey(tx, "test/1")); } @@ -134,11 +134,11 @@ public void CanDelete1() { using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { - tx.State.Root.Add(tx, "test/1", StreamFor("1")); - tx.State.Root.Add(tx, "test/2", StreamFor("1")); + tx.State.Root.Add("test/1", StreamFor("1")); + tx.State.Root.Add("test/2", StreamFor("1")); Assert.NotNull(ReadKey(tx, "test/1")); - tx.State.Root.Delete(tx, "test/1"); + tx.State.Root.Delete("test/1"); Assert.Null(ReadKey(tx, "test/1")); Assert.NotNull(ReadKey(tx, "test/2")); } diff --git a/Voron/Impl/FreeSpace/FreeSpaceHandling.cs b/Voron/Impl/FreeSpace/FreeSpaceHandling.cs index 8ce1d1a021..d8347edee4 100644 --- a/Voron/Impl/FreeSpace/FreeSpaceHandling.cs +++ b/Voron/Impl/FreeSpace/FreeSpaceHandling.cs @@ -16,7 +16,7 @@ public class FreeSpaceHandling : IFreeSpaceHandling if (tx.State.FreeSpaceRoot.State.EntriesCount == 0) return null; - using (var it = tx.State.FreeSpaceRoot.Iterate(tx)) + using (var it = tx.State.FreeSpaceRoot.Iterate()) { if (it.Seek(Slice.BeforeAllKeys) == false) return null; @@ -76,7 +76,7 @@ public class FreeSpaceHandling : IFreeSpaceHandling { foreach (var section in sections) { - tx.State.FreeSpaceRoot.Delete(tx, section); + tx.State.FreeSpaceRoot.Delete(section); } return startSectionId * NumberOfPagesInSection; @@ -84,7 +84,7 @@ public class FreeSpaceHandling : IFreeSpaceHandling var nextSectionId = currentSectionId + 1; var nextId = new Slice(EndianBitConverter.Big.GetBytes(nextSectionId)); - var read = tx.State.FreeSpaceRoot.Read(tx, nextId); + var read = tx.State.FreeSpaceRoot.Read(nextId); if (read == null) { //not a following next section @@ -104,7 +104,7 @@ public class FreeSpaceHandling : IFreeSpaceHandling //mark selected bits to false if (next.SetCount == numberOfExtraBitsNeeded) { - tx.State.FreeSpaceRoot.Delete(tx, nextId); + tx.State.FreeSpaceRoot.Delete(nextId); } else { @@ -112,12 +112,12 @@ public class FreeSpaceHandling : IFreeSpaceHandling { next.Set(i, false); } - tx.State.FreeSpaceRoot.Add(tx, nextId, next.ToStream()); + tx.State.FreeSpaceRoot.Add(nextId, next.ToStream()); } foreach (var section in sections) { - tx.State.FreeSpaceRoot.Delete(tx, section); + tx.State.FreeSpaceRoot.Delete(section); } return startSectionId * NumberOfPagesInSection; @@ -194,7 +194,7 @@ private bool TryFindContinuousRange(Transaction tx, TreeIterator it, int num, St if (current.SetCount == num) { - tx.State.FreeSpaceRoot.Delete(tx, it.CurrentKey); + tx.State.FreeSpaceRoot.Delete(it.CurrentKey); } else { @@ -203,7 +203,7 @@ private bool TryFindContinuousRange(Transaction tx, TreeIterator it, int num, St current.Set(i + start, false); } - tx.State.FreeSpaceRoot.Add(tx, it.CurrentKey, current.ToStream()); + tx.State.FreeSpaceRoot.Add(it.CurrentKey, current.ToStream()); } return true; @@ -219,7 +219,7 @@ private static bool TryFindSmallValueMergingTwoSections(Transaction tx, TreeIter var nextSectionId = currentSectionId + 1; var nextId = new Slice(EndianBitConverter.Big.GetBytes(nextSectionId)); - var read = tx.State.FreeSpaceRoot.Read(tx, nextId); + var read = tx.State.FreeSpaceRoot.Read(nextId); if (read == null) return false; @@ -231,7 +231,7 @@ private static bool TryFindSmallValueMergingTwoSections(Transaction tx, TreeIter if (next.SetCount == nextRange) { - tx.State.FreeSpaceRoot.Delete(tx, nextId); + tx.State.FreeSpaceRoot.Delete(nextId); } else { @@ -239,12 +239,12 @@ private static bool TryFindSmallValueMergingTwoSections(Transaction tx, TreeIter { next.Set(i, false); } - tx.State.FreeSpaceRoot.Add(tx, nextId, next.ToStream()); + tx.State.FreeSpaceRoot.Add(nextId, next.ToStream()); } if (current.SetCount == currentRange) { - tx.State.FreeSpaceRoot.Delete(tx, it.CurrentKey); + tx.State.FreeSpaceRoot.Delete(it.CurrentKey); } else { @@ -252,7 +252,7 @@ private static bool TryFindSmallValueMergingTwoSections(Transaction tx, TreeIter { current.Set(NumberOfPagesInSection - 1 - i, false); } - tx.State.FreeSpaceRoot.Add(tx, nextId, next.ToStream()); + tx.State.FreeSpaceRoot.Add(nextId, next.ToStream()); } @@ -262,17 +262,17 @@ private static bool TryFindSmallValueMergingTwoSections(Transaction tx, TreeIter public List AllPages(Transaction tx) { - return tx.State.FreeSpaceRoot.AllPages(tx); + return tx.State.FreeSpaceRoot.AllPages(); } public void FreePage(Transaction tx, long pageNumber) { var section = pageNumber / NumberOfPagesInSection; var sectionKey = new Slice(EndianBitConverter.Big.GetBytes(section)); - var result = tx.State.FreeSpaceRoot.Read(tx, sectionKey); + var result = tx.State.FreeSpaceRoot.Read(sectionKey); var sba = result == null ? new StreamBitArray() : new StreamBitArray(result.Reader); sba.Set((int)(pageNumber % NumberOfPagesInSection), true); - tx.State.FreeSpaceRoot.Add(tx, sectionKey, sba.ToStream()); + tx.State.FreeSpaceRoot.Add(sectionKey, sba.ToStream()); } } } diff --git a/Voron/Impl/SnapshotReader.cs b/Voron/Impl/SnapshotReader.cs index e3a85a5c12..371ae64740 100644 --- a/Voron/Impl/SnapshotReader.cs +++ b/Voron/Impl/SnapshotReader.cs @@ -36,7 +36,7 @@ public ReadResult Read(string treeName, Slice key, WriteBatch writeBatch = null) case WriteBatch.BatchOperationType.Add: { var reader = new ValueReader(stream); - return new ReadResult(reader, version.HasValue ? (ushort)(version.Value + 1) : tree.ReadVersion(Transaction, key)); + return new ReadResult(reader, version.HasValue ? (ushort)(version.Value + 1) : tree.ReadVersion(key)); } case WriteBatch.BatchOperationType.Delete: return null; @@ -47,13 +47,13 @@ public ReadResult Read(string treeName, Slice key, WriteBatch writeBatch = null) if (tree == null) tree = GetTree(treeName); - return tree.Read(Transaction, key); + return tree.Read(key); } public int GetDataSize(string treeName, Slice key) { var tree = GetTree(treeName); - return tree.GetDataSize(Transaction, key); + return tree.GetDataSize(key); } public bool Contains(string treeName, Slice key, out ushort? version, WriteBatch writeBatch = null) @@ -77,7 +77,7 @@ public bool Contains(string treeName, Slice key, out ushort? version, WriteBatch } var tree = GetTree(treeName); - var readVersion = tree.ReadVersion(Transaction, key); + var readVersion = tree.ReadVersion(key); var exists = readVersion > 0; @@ -105,13 +105,13 @@ public ushort ReadVersion(string treeName, Slice key, WriteBatch writeBatch = nu } var tree = GetTree(treeName); - return tree.ReadVersion(Transaction, key); + return tree.ReadVersion(key); } public IIterator Iterate(string treeName, WriteBatch writeBatch = null) { var tree = GetTree(treeName); - return tree.Iterate(Transaction, writeBatch); + return tree.Iterate(writeBatch); } public void Dispose() @@ -122,7 +122,7 @@ public void Dispose() public IIterator MultiRead(string treeName, Slice key) { var tree = GetTree(treeName); - return tree.MultiRead(Transaction, key); + return tree.MultiRead(key); } private Tree GetTree(string treeName) diff --git a/Voron/Impl/StorageEnvironmentState.cs b/Voron/Impl/StorageEnvironmentState.cs index 5d3594701b..9f54e94542 100644 --- a/Voron/Impl/StorageEnvironmentState.cs +++ b/Voron/Impl/StorageEnvironmentState.cs @@ -23,12 +23,12 @@ public StorageEnvironmentState(Tree freeSpaceRoot, Tree root, long nextPageNumbe NextPageNumber = nextPageNumber; } - public StorageEnvironmentState Clone() + public StorageEnvironmentState Clone(Transaction tx) { - return new StorageEnvironmentState() + return new StorageEnvironmentState { - Root = Root != null ? Root.Clone() : null, - FreeSpaceRoot = FreeSpaceRoot != null ? FreeSpaceRoot.Clone() : null, + Root = Root != null ? Root.Clone(tx) : null, + FreeSpaceRoot = FreeSpaceRoot != null ? FreeSpaceRoot.Clone(tx) : null, NextPageNumber = NextPageNumber }; } diff --git a/Voron/Impl/Transaction.cs b/Voron/Impl/Transaction.cs index 6d683fa408..25041b5d5a 100644 --- a/Voron/Impl/Transaction.cs +++ b/Voron/Impl/Transaction.cs @@ -87,12 +87,12 @@ public Transaction(StorageEnvironment env, long id, TransactionFlags flags, IFre // for write transactions, we can use the current one (which == null) _scratchPagerState = scratchPagerState; - _state = env.State; + _state = env.State.Clone(this); _journal.GetSnapshots().ForEach(AddJournalSnapshot); return; } - _state = env.State.Clone(); + _state = env.State.Clone(this); InitTransactionHeader(); @@ -141,7 +141,7 @@ public Tree ReadTree(string treeName) if (_trees.TryGetValue(treeName, out tree)) return tree; - var header = (TreeRootHeader*)State.Root.DirectRead(this, treeName); + var header = (TreeRootHeader*)State.Root.DirectRead(treeName); if (header != null) { tree = Tree.Open(this, _env.SliceComparer, header); @@ -154,7 +154,7 @@ public Tree ReadTree(string treeName) return null; } - public Page ModifyPage(long num, Page page) + internal Page ModifyPage(long num, Page page) { _env.AssertFlushingNotFailed(); @@ -200,7 +200,7 @@ public Page GetReadOnlyPage(long pageNumber) return p; } - public Page AllocatePage(int numberOfPages, long? pageNumber = null) + internal Page AllocatePage(int numberOfPages, long? pageNumber = null) { if (pageNumber == null) { @@ -260,7 +260,7 @@ internal int GetNumberOfFreePages(NodeHeader* node) return GetNodeDataSize(node) / Constants.PageNumberSize; } - internal int GetNodeDataSize(NodeHeader* node) + private int GetNodeDataSize(NodeHeader* node) { if (node->Flags == (NodeFlags.PageRef)) // lots of data, enough to overflow! { @@ -292,7 +292,7 @@ public void Commit() var treeState = tree.State; if (treeState.IsModified) { - var treePtr = (TreeRootHeader*)State.Root.DirectAdd(this, tree.Name, sizeof(TreeRootHeader)); + var treePtr = (TreeRootHeader*)State.Root.DirectAdd(tree.Name, sizeof(TreeRootHeader)); treeState.CopyTo(treePtr); } } @@ -348,7 +348,7 @@ private unsafe void FlushAllMultiValues() var key = multiValueTree.Key.Item2; var childTree = multiValueTree.Value; - var trh = (TreeRootHeader*)parentTree.DirectAdd(this, key, sizeof(TreeRootHeader), NodeFlags.MultiValuePageRef); + var trh = (TreeRootHeader*)parentTree.DirectAdd(key, sizeof(TreeRootHeader), NodeFlags.MultiValuePageRef); childTree.State.CopyTo(trh); //parentTree.SetAsMultiValueTreeRef(this, key); @@ -367,8 +367,7 @@ public void Dispose() } } - - public void FreePage(long pageNumber) + internal void FreePage(long pageNumber) { Debug.Assert(pageNumber >= 2); _dirtyPages.Remove(pageNumber); @@ -385,18 +384,13 @@ internal void UpdateRootsIfNeeded(Tree root, Tree freeSpace) } } - public void AddPagerState(PagerState state) + internal void AddPagerState(PagerState state) { LatestPagerState = state; _pagerStates.Add(state); } - public Cursor NewCursor(Tree tree) - { - return new Cursor(); - } - - public void AddMultiValueTree(Tree tree, Slice key, Tree mvTree) + internal void AddMultiValueTree(Tree tree, Slice key, Tree mvTree) { if (_multiValueTrees == null) _multiValueTrees = new Dictionary, Tree>(new TreeAndSliceComparer(_env.SliceComparer)); @@ -404,7 +398,7 @@ public void AddMultiValueTree(Tree tree, Slice key, Tree mvTree) _multiValueTrees.Add(Tuple.Create(tree, key), mvTree); } - public bool TryGetMultiValueTree(Tree tree, Slice key, out Tree mvTree) + internal bool TryGetMultiValueTree(Tree tree, Slice key, out Tree mvTree) { mvTree = null; if (_multiValueTrees == null) @@ -412,7 +406,7 @@ public bool TryGetMultiValueTree(Tree tree, Slice key, out Tree mvTree) return _multiValueTrees.TryGetValue(Tuple.Create(tree, key), out mvTree); } - public bool TryRemoveMultiValueTree(Tree parentTree, Slice key) + internal bool TryRemoveMultiValueTree(Tree parentTree, Slice key) { var keyToRemove = Tuple.Create(parentTree, key); if (_multiValueTrees == null || !_multiValueTrees.ContainsKey(keyToRemove)) @@ -421,12 +415,11 @@ public bool TryRemoveMultiValueTree(Tree parentTree, Slice key) return _multiValueTrees.Remove(keyToRemove); } - public bool RemoveTree(string name) + internal bool RemoveTree(string name) { return _trees.Remove(name); } - private void AddJournalSnapshot(JournalSnapshot snapshot) { if (JournalSnapshots.Any(x => x.Number == snapshot.Number)) @@ -436,12 +429,12 @@ private void AddJournalSnapshot(JournalSnapshot snapshot) JournalSnapshots.Add(snapshot); } - public List GetTransactionPages() + internal List GetTransactionPages() { return _transactionPages; } - public RecentlyFoundPages GetRecentlyFoundPages(Tree tree) + internal RecentlyFoundPages GetRecentlyFoundPages(Tree tree) { RecentlyFoundPages pages; if (_recentlyFoundPages.TryGetValue(tree, out pages)) @@ -450,12 +443,12 @@ public RecentlyFoundPages GetRecentlyFoundPages(Tree tree) return null; } - public void ClearRecentFoundPages(Tree tree) + internal void ClearRecentFoundPages(Tree tree) { _recentlyFoundPages.Remove(tree); } - public void AddRecentlyFoundPage(Tree tree, RecentlyFoundPages.FoundPage foundPage) + internal void AddRecentlyFoundPage(Tree tree, RecentlyFoundPages.FoundPage foundPage) { RecentlyFoundPages pages; if (_recentlyFoundPages.TryGetValue(tree, out pages) == false) @@ -464,7 +457,7 @@ public void AddRecentlyFoundPage(Tree tree, RecentlyFoundPages.FoundPage foundPa pages.Add(foundPage); } - public void AddTree(string name, Tree tree) + internal void AddTree(string name, Tree tree) { Tree value; if (_trees.TryGetValue(name, out value) && value != null) diff --git a/Voron/Impl/TransactionMergingWriter.cs b/Voron/Impl/TransactionMergingWriter.cs index 0446ff3f0b..780207027a 100644 --- a/Voron/Impl/TransactionMergingWriter.cs +++ b/Voron/Impl/TransactionMergingWriter.cs @@ -174,25 +174,25 @@ private void HandleOperations(Transaction tx, List writes, Can case WriteBatch.BatchOperationType.Add: var stream = operation.Value as Stream; if (stream != null) - tree.Add(tx, operation.Key, stream, operation.Version); + tree.Add(operation.Key, stream, operation.Version); else - tree.Add(tx, operation.Key, (Slice)operation.Value, operation.Version); + tree.Add(operation.Key, (Slice)operation.Value, operation.Version); actionType = DebugActionType.Add; break; case WriteBatch.BatchOperationType.Delete: - tree.Delete(tx, operation.Key, operation.Version); + tree.Delete(operation.Key, operation.Version); actionType = DebugActionType.Delete; break; case WriteBatch.BatchOperationType.MultiAdd: - tree.MultiAdd(tx, operation.Key, operation.Value as Slice, version: operation.Version); + tree.MultiAdd(operation.Key, operation.Value as Slice, version: operation.Version); actionType = DebugActionType.MultiAdd; break; case WriteBatch.BatchOperationType.MultiDelete: - tree.MultiDelete(tx, operation.Key, operation.Value as Slice, operation.Version); + tree.MultiDelete(operation.Key, operation.Value as Slice, operation.Version); actionType = DebugActionType.MultiDelete; break; case WriteBatch.BatchOperationType.Increment: - tree.Increment(tx, operation.Key, (long)operation.Value, operation.Version); + tree.Increment(operation.Key, (long)operation.Value, operation.Version); actionType = DebugActionType.Increment; break; default: diff --git a/Voron/StorageEnvironment.cs b/Voron/StorageEnvironment.cs index 05c989770f..b3db8f3c87 100644 --- a/Voron/StorageEnvironment.cs +++ b/Voron/StorageEnvironment.cs @@ -233,12 +233,12 @@ public void DeleteTree(Transaction tx, string name) if (tree == null) return; - foreach (var page in tree.AllPages(tx)) + foreach (var page in tree.AllPages()) { tx.FreePage(page); } - tx.State.Root.Delete(tx, name); + tx.State.Root.Delete(name); tx.RemoveTree(name); } @@ -255,7 +255,7 @@ public unsafe Tree CreateTree(Transaction tx, string name) Slice key = name; // we are in a write transaction, no need to handle locks - var header = (TreeRootHeader*)tx.State.Root.DirectRead(tx, key); + var header = (TreeRootHeader*)tx.State.Root.DirectRead(key); if (header != null) { tree = Tree.Open(tx, _sliceComparer, header); @@ -266,7 +266,7 @@ public unsafe Tree CreateTree(Transaction tx, string name) tree = Tree.Create(tx, _sliceComparer); tree.Name = name; - var space = tx.State.Root.DirectAdd(tx, key, sizeof(TreeRootHeader)); + var space = tx.State.Root.DirectAdd(key, sizeof(TreeRootHeader)); tree.State.CopyTo((TreeRootHeader*)space); tree.State.IsModified = true; @@ -438,14 +438,14 @@ public Dictionary> AllPages(Transaction tx) { var results = new Dictionary>(StringComparer.OrdinalIgnoreCase) { - {"Root", State.Root.AllPages(tx)}, - {"Free Space Overhead", State.FreeSpaceRoot.AllPages(tx)}, + {"Root", State.Root.AllPages()}, + {"Free Space Overhead", State.FreeSpaceRoot.AllPages()}, {"Free Pages", _freeSpaceHandling.AllPages(tx)} }; foreach (var tree in tx.Trees) { - results.Add(tree.Name, tree.AllPages(tx)); + results.Add(tree.Name, tree.AllPages()); } return results; diff --git a/Voron/Trees/Tree.MultiTree.cs b/Voron/Trees/Tree.MultiTree.cs index 6eb0e28c50..86d05f963d 100644 --- a/Voron/Trees/Tree.MultiTree.cs +++ b/Voron/Trees/Tree.MultiTree.cs @@ -37,10 +37,10 @@ public unsafe partial class Tree { public bool IsMultiValueTree { get; set; } - public void MultiAdd(Transaction tx, Slice key, Slice value, ushort? version = null) + public void MultiAdd(Slice key, Slice value, ushort? version = null) { if (value == null) throw new ArgumentNullException("value"); - int maxNodeSize = tx.DataPager.MaxNodeSize; + int maxNodeSize = _tx.DataPager.MaxNodeSize; if (value.Size > maxNodeSize) throw new ArgumentException( "Cannot add a value to child tree that is over " + maxNodeSize + " bytes in size", "value"); @@ -50,37 +50,37 @@ public void MultiAdd(Transaction tx, Slice key, Slice value, ushort? version = n State.IsModified = true; Lazy lazy; - var page = FindPageFor(tx, key, out lazy); + var page = FindPageFor(key, out lazy); if ((page == null || page.LastMatch != 0)) { - MultiAddOnNewValue(tx, key, value, version, maxNodeSize); + MultiAddOnNewValue(_tx, key, value, version, maxNodeSize); return; } - page = tx.ModifyPage(page.PageNumber, page); + page = _tx.ModifyPage(page.PageNumber, page); var item = page.GetNode(page.LastSearchPosition); // already was turned into a multi tree, not much to do here if (item->Flags == NodeFlags.MultiValuePageRef) { - var existingTree = OpenOrCreateMultiValueTree(tx, key, item); - existingTree.DirectAdd(tx, value, 0, version: version); + var existingTree = OpenOrCreateMultiValueTree(_tx, key, item); + existingTree.DirectAdd(value, 0, version: version); return; } byte* nestedPagePtr; if (item->Flags == NodeFlags.PageRef) { - var overFlowPage = tx.ModifyPage(item->PageNumber, null); + var overFlowPage = _tx.ModifyPage(item->PageNumber, null); nestedPagePtr = overFlowPage.Base + Constants.PageHeaderSize; } else { - nestedPagePtr = NodeHeader.DirectAccess(tx, item); + nestedPagePtr = NodeHeader.DirectAccess(_tx, item); } - var nestedPage = new Page(nestedPagePtr, "multi tree", (ushort) NodeHeader.GetDataSize(tx, item)); + var nestedPage = new Page(nestedPagePtr, "multi tree", (ushort)NodeHeader.GetDataSize(_tx, item)); var existingItem = nestedPage.Search(value, NativeMethods.memcmp); if (nestedPage.LastMatch != 0) @@ -98,7 +98,7 @@ public void MultiAdd(Transaction tx, Slice key, Slice value, ushort? version = n nestedPage.RemoveNode(nestedPage.LastSearchPosition); } - if (nestedPage.HasSpaceFor(tx, value, 0)) + if (nestedPage.HasSpaceFor(_tx, value, 0)) { // we are now working on top of the modified root page, we can just modify the memory directly nestedPage.AddDataNode(nestedPage.LastSearchPosition, value, 0, previousNodeRevision); @@ -111,21 +111,21 @@ public void MultiAdd(Transaction tx, Slice key, Slice value, ushort? version = n { // we can just expand the current value... no need to create a nested tree yet var actualPageSize = (ushort)Math.Min(Utils.NearestPowerOfTwo(newRequiredSize), maxNodeSize); - ExpandMultiTreeNestedPageSize(tx, key, value, nestedPagePtr, actualPageSize, item->DataSize); + ExpandMultiTreeNestedPageSize(_tx, key, value, nestedPagePtr, actualPageSize, item->DataSize); return; } // we now have to convert this into a tree instance, instead of just a nested page - var tree = Create(tx, _cmp, TreeFlags.MultiValue); + var tree = Create(_tx, _cmp, TreeFlags.MultiValue); for (int i = 0; i < nestedPage.NumberOfEntries; i++) { var existingValue = nestedPage.GetNodeKey(i); - tree.DirectAdd(tx, existingValue, 0); + tree.DirectAdd(existingValue, 0); } - tree.DirectAdd(tx, value, 0, version: version); - tx.AddMultiValueTree(this, key, tree); + tree.DirectAdd(value, 0, version: version); + _tx.AddMultiValueTree(this, key, tree); // we need to record that we switched to tree mode here, so the next call wouldn't also try to create the tree again - DirectAdd(tx, key, sizeof (TreeRootHeader), NodeFlags.MultiValuePageRef); + DirectAdd(key, sizeof (TreeRootHeader), NodeFlags.MultiValuePageRef); } private void ExpandMultiTreeNestedPageSize(Transaction tx, Slice key, Slice value, byte* nestedPagePtr, ushort newSize, int currentSize) @@ -136,10 +136,10 @@ private void ExpandMultiTreeNestedPageSize(Transaction tx, Slice key, Slice valu { var tempPagePointer = tmp.TempPagePointer; NativeMethods.memcpy(tempPagePointer, nestedPagePtr, currentSize); - Delete(tx, key); // release our current page + Delete(key); // release our current page Page nestedPage = new Page(tempPagePointer, "multi tree", (ushort)currentSize); - var ptr = DirectAdd(tx, key, newSize); + var ptr = DirectAdd(key, newSize); var newNestedPage = new Page(ptr, "multi tree", newSize) { @@ -172,16 +172,16 @@ private void MultiAddOnNewValue(Transaction tx, Slice key, Slice value, ushort? // otherwise, we would have to put this in overflow page, and that won't save us any space anyway var tree = Create(tx, _cmp, TreeFlags.MultiValue); - tree.DirectAdd(tx, value, 0); + tree.DirectAdd(value, 0); tx.AddMultiValueTree(this, key, tree); - DirectAdd(tx, key, sizeof (TreeRootHeader), NodeFlags.MultiValuePageRef); + DirectAdd(key, sizeof (TreeRootHeader), NodeFlags.MultiValuePageRef); return; } var actualPageSize = (ushort) Math.Min(Utils.NearestPowerOfTwo(requiredPageSize), maxNodeSize); - var ptr = DirectAdd(tx, key, actualPageSize); + var ptr = DirectAdd(key, actualPageSize); var nestedPage = new Page(ptr, "multi tree", actualPageSize) { @@ -196,38 +196,38 @@ private void MultiAddOnNewValue(Transaction tx, Slice key, Slice value, ushort? nestedPage.AddDataNode(0, value, 0, 0); } - public void MultiDelete(Transaction tx, Slice key, Slice value, ushort? version = null) + public void MultiDelete(Slice key, Slice value, ushort? version = null) { State.IsModified = true; Lazy lazy; - var page = FindPageFor(tx, key, out lazy); + var page = FindPageFor(key, out lazy); if (page == null || page.LastMatch != 0) { return; //nothing to delete - key not found } - page = tx.ModifyPage(page.PageNumber, page); + page = _tx.ModifyPage(page.PageNumber, page); var item = page.GetNode(page.LastSearchPosition); if (item->Flags == NodeFlags.MultiValuePageRef) //multi-value tree exists { - var tree = OpenOrCreateMultiValueTree(tx, key, item); + var tree = OpenOrCreateMultiValueTree(_tx, key, item); - tree.Delete(tx, value, version); + tree.Delete(value, version); // previously, we would convert back to a simple model if we dropped to a single entry // however, it doesn't really make sense, once you got enough values to go to an actual nested // tree, you are probably going to remain that way, or be removed completely. if (tree.State.EntriesCount != 0) return; - tx.TryRemoveMultiValueTree(this, key); - tx.FreePage(tree.State.RootPageNumber); - Delete(tx, key); + _tx.TryRemoveMultiValueTree(this, key); + _tx.FreePage(tree.State.RootPageNumber); + Delete(key); } else // we use a nested page here { - var nestedPage = new Page(NodeHeader.DirectAccess(tx, item), "multi tree", (ushort)NodeHeader.GetDataSize(tx, item)); + var nestedPage = new Page(NodeHeader.DirectAccess(_tx, item), "multi tree", (ushort)NodeHeader.GetDataSize(_tx, item)); var nestedItem = nestedPage.Search(value, NativeMethods.memcmp); if (nestedItem == null) // value not found return; @@ -235,27 +235,30 @@ public void MultiDelete(Transaction tx, Slice key, Slice value, ushort? version byte* nestedPagePtr; if (item->Flags == NodeFlags.PageRef) { - var overFlowPage = tx.ModifyPage(item->PageNumber, null); + var overFlowPage = _tx.ModifyPage(item->PageNumber, null); nestedPagePtr = overFlowPage.Base + Constants.PageHeaderSize; } else { - nestedPagePtr = NodeHeader.DirectAccess(tx, item); + nestedPagePtr = NodeHeader.DirectAccess(_tx, item); } - nestedPage = new Page(nestedPagePtr, "multi tree", (ushort)NodeHeader.GetDataSize(tx, item)); + nestedPage = new Page(nestedPagePtr, "multi tree", (ushort)NodeHeader.GetDataSize(_tx, item)) + { + LastSearchPosition = nestedPage.LastSearchPosition + }; CheckConcurrency(key, value, version, nestedItem->Version, TreeActionType.Delete); nestedPage.RemoveNode(nestedPage.LastSearchPosition); if (nestedPage.NumberOfEntries == 0) - Delete(tx, key); + Delete(key); } } - public IIterator MultiRead(Transaction tx, Slice key) + public IIterator MultiRead(Slice key) { Lazy lazy; - var page = FindPageFor(tx, key, out lazy); + var page = FindPageFor(key, out lazy); if (page == null || page.LastMatch != 0) { @@ -272,17 +275,17 @@ public IIterator MultiRead(Transaction tx, Slice key) if (item->Flags == NodeFlags.MultiValuePageRef) { - var tree = OpenOrCreateMultiValueTree(tx, key, item); + var tree = OpenOrCreateMultiValueTree(_tx, key, item); - return tree.Iterate(tx); + return tree.Iterate(); } - var nestedPage = new Page(NodeHeader.DirectAccess(tx, item), "multi tree", (ushort)NodeHeader.GetDataSize(tx, item)); + var nestedPage = new Page(NodeHeader.DirectAccess(_tx, item), "multi tree", (ushort)NodeHeader.GetDataSize(_tx, item)); return new PageIterator(_cmp, nestedPage); } - internal Tree OpenOrCreateMultiValueTree(Transaction tx, Slice key, NodeHeader* item) + private Tree OpenOrCreateMultiValueTree(Transaction tx, Slice key, NodeHeader* item) { Tree tree; if (tx.TryGetMultiValueTree(this, key, out tree)) @@ -299,24 +302,6 @@ internal Tree OpenOrCreateMultiValueTree(Transaction tx, Slice key, NodeHeader* return tree; } - public bool SetAsMultiValueTreeRef(Transaction tx, Slice key) - { - Lazy lazy; - var foundPage = FindPageFor(tx, key, out lazy); - var page = tx.ModifyPage(foundPage.PageNumber, foundPage); - - if (page.LastMatch != 0) - return false; // not there - - var nodeHeader = page.GetNode(page.LastSearchPosition); - if (nodeHeader->Flags == NodeFlags.MultiValuePageRef) - return false; - if (nodeHeader->Flags != NodeFlags.Data) - throw new InvalidOperationException("Only data nodes can be set to MultiValuePageRef"); - nodeHeader->Flags = NodeFlags.MultiValuePageRef; - return true; - } - private bool TryOverwriteDataOrMultiValuePageRefNode(NodeHeader* updatedNode, Slice key, int len, NodeFlags requestedNodeType, ushort? version, out byte* pos) diff --git a/Voron/Trees/Tree.cs b/Voron/Trees/Tree.cs index 9f3dd2cbd4..7f476c2b10 100644 --- a/Voron/Trees/Tree.cs +++ b/Voron/Trees/Tree.cs @@ -22,23 +22,27 @@ public TreeMutableState State get { return _state; } } + private readonly Transaction _tx; + private readonly SliceComparer _cmp; - private Tree(SliceComparer cmp, long root) + private Tree(Transaction tx, SliceComparer cmp, long root) { + _tx = tx; _cmp = cmp; _state.RootPageNumber = root; } - private Tree(SliceComparer cmp, TreeMutableState state) + private Tree(Transaction tx, SliceComparer cmp, TreeMutableState state) { + _tx = tx; _cmp = cmp; _state = state; } public static Tree Open(Transaction tx, SliceComparer cmp, TreeRootHeader* header) { - return new Tree(cmp, header->RootPageNumber) + return new Tree(tx, cmp, header->RootPageNumber) { _state = { @@ -57,7 +61,7 @@ public static Tree Open(Transaction tx, SliceComparer cmp, TreeRootHeader* heade public static Tree Create(Transaction tx, SliceComparer cmp, TreeFlags flags = TreeFlags.None) { var newRootPage = NewPage(tx, PageFlags.Leaf, 1); - var tree = new Tree(cmp, newRootPage.PageNumber) + var tree = new Tree(tx, cmp, newRootPage.PageNumber) { _state = { @@ -71,38 +75,39 @@ public static Tree Create(Transaction tx, SliceComparer cmp, TreeFlags flags = T return tree; } - public void Add(Transaction tx, Slice key, Stream value, ushort? version = null) + public void Add(Slice key, Stream value, ushort? version = null) { if (value == null) throw new ArgumentNullException("value"); if (value.Length > int.MaxValue) throw new ArgumentException("Cannot add a value that is over 2GB in size", "value"); State.IsModified = true; - var pos = DirectAdd(tx, key, (int)value.Length, version: version); + var pos = DirectAdd(key, (int)value.Length, version: version); - CopyStreamToPointer(tx, value, pos); + CopyStreamToPointer(_tx, value, pos); } - public long Increment(Transaction tx, Slice key, long delta, ushort? version = null) + public long Increment(Slice key, long delta, ushort? version = null) { long currentValue = 0; - var read = Read(tx, key); - if (read != null) - currentValue = read.Reader.ReadLittleEndianInt64(); + var read = Read(key); + if (read != null) + currentValue = *(long*)read.Reader.Base; var value = currentValue + delta; - Add(tx, key, BitConverter.GetBytes(value), version); + var result = (long*)DirectAdd(key, sizeof(long), version: version); + *result = value; return value; } - public void Add(Transaction tx, Slice key, byte[] value, ushort? version = null) + public void Add(Slice key, byte[] value, ushort? version = null) { if (value == null) throw new ArgumentNullException("value"); State.IsModified = true; - var pos = DirectAdd(tx, key, value.Length, version: version); + var pos = DirectAdd(key, value.Length, version: version); fixed (byte* src = value) { @@ -110,12 +115,12 @@ public void Add(Transaction tx, Slice key, byte[] value, ushort? version = null) } } - public void Add(Transaction tx, Slice key, Slice value, ushort? version = null) + public void Add(Slice key, Slice value, ushort? version = null) { if (value == null) throw new ArgumentNullException("value"); State.IsModified = true; - var pos = DirectAdd(tx, key, value.Size, version: version); + var pos = DirectAdd(key, value.Size, version: version); value.CopyTo(pos); } @@ -138,21 +143,21 @@ private static void CopyStreamToPointer(Transaction tx, Stream value, byte* pos) } } - internal byte* DirectAdd(Transaction tx, Slice key, int len, NodeFlags nodeType = NodeFlags.Data, ushort? version = null) + internal byte* DirectAdd(Slice key, int len, NodeFlags nodeType = NodeFlags.Data, ushort? version = null) { Debug.Assert(nodeType == NodeFlags.Data || nodeType == NodeFlags.MultiValuePageRef); - if (tx.Flags == (TransactionFlags.ReadWrite) == false) + if (_tx.Flags == (TransactionFlags.ReadWrite) == false) throw new ArgumentException("Cannot add a value in a read only transaction"); - if (key.Size > tx.DataPager.MaxNodeSize) + if (key.Size > _tx.DataPager.MaxNodeSize) throw new ArgumentException( - "Key size is too big, must be at most " + tx.DataPager.MaxNodeSize + " bytes, but was " + key.Size, "key"); + "Key size is too big, must be at most " + _tx.DataPager.MaxNodeSize + " bytes, but was " + key.Size, "key"); Lazy lazy; - var foundPage = FindPageFor(tx, key, out lazy); + var foundPage = FindPageFor(key, out lazy); - var page = tx.ModifyPage(foundPage.PageNumber, foundPage); + var page = _tx.ModifyPage(foundPage.PageNumber, foundPage); ushort nodeVersion = 0; bool? shouldGoToOverflowPage = null; @@ -162,7 +167,7 @@ private static void CopyStreamToPointer(Transaction tx, Stream value, byte* pos) Debug.Assert(node->KeySize == key.Size && new Slice(node).Equals(key)); - shouldGoToOverflowPage = tx.DataPager.ShouldGoToOverflowPage(len); + shouldGoToOverflowPage = _tx.DataPager.ShouldGoToOverflowPage(len); byte* pos; if (shouldGoToOverflowPage == false) @@ -174,11 +179,11 @@ private static void CopyStreamToPointer(Transaction tx, Stream value, byte* pos) else { // optimization for PageRef - try to overwrite existing overflows - if (TryOverwriteOverflowPages(tx, State, node, key, len, version, out pos)) + if (TryOverwriteOverflowPages(State, node, key, len, version, out pos)) return pos; } - RemoveLeafNode(tx, page, out nodeVersion); + RemoveLeafNode(page, out nodeVersion); } else // new item should be recorded { @@ -190,23 +195,23 @@ private static void CopyStreamToPointer(Transaction tx, Stream value, byte* pos) var lastSearchPosition = page.LastSearchPosition; // searching for overflow pages might change this byte* overFlowPos = null; var pageNumber = -1L; - if (shouldGoToOverflowPage ?? tx.DataPager.ShouldGoToOverflowPage(len)) + if (shouldGoToOverflowPage ?? _tx.DataPager.ShouldGoToOverflowPage(len)) { - pageNumber = WriteToOverflowPages(tx, State, len, out overFlowPos); + pageNumber = WriteToOverflowPages(State, len, out overFlowPos); len = -1; nodeType = NodeFlags.PageRef; } byte* dataPos; - if (page.HasSpaceFor(tx, key, len) == false) + if (page.HasSpaceFor(_tx, key, len) == false) { var cursor = lazy.Value; cursor.Update(cursor.Pages.First, page); - - var pageSplitter = new PageSplitter(tx, this, _cmp, key, len, pageNumber, nodeType, nodeVersion, cursor, State); + + var pageSplitter = new PageSplitter(_tx, this, _cmp, key, len, pageNumber, nodeType, nodeVersion, cursor, State); dataPos = pageSplitter.Execute(); - DebugValidateTree(tx, State.RootPageNumber); + DebugValidateTree(State.RootPageNumber); } else { @@ -224,18 +229,17 @@ private static void CopyStreamToPointer(Transaction tx, Stream value, byte* pos) default: throw new NotSupportedException("Unknown node type for direct add operation: " + nodeType); } - page.DebugValidate(tx, _cmp, State.RootPageNumber); + page.DebugValidate(_tx, _cmp, State.RootPageNumber); } if (overFlowPos != null) return overFlowPos; return dataPos; } - - private long WriteToOverflowPages(Transaction tx, TreeMutableState txInfo, int overflowSize, out byte* dataPos) + private long WriteToOverflowPages(TreeMutableState txInfo, int overflowSize, out byte* dataPos) { - var numberOfPages = tx.DataPager.GetNumberOfOverflowPages(overflowSize); - var overflowPageStart = tx.AllocatePage(numberOfPages); + var numberOfPages = _tx.DataPager.GetNumberOfOverflowPages(overflowSize); + var overflowPageStart = _tx.AllocatePage(numberOfPages); overflowPageStart.Flags = PageFlags.Overflow; overflowPageStart.OverflowSize = overflowSize; dataPos = overflowPageStart.Base + Constants.PageHeaderSize; @@ -244,17 +248,17 @@ private long WriteToOverflowPages(Transaction tx, TreeMutableState txInfo, int o return overflowPageStart.PageNumber; } - private void RemoveLeafNode(Transaction tx, Page page, out ushort nodeVersion) + private void RemoveLeafNode(Page page, out ushort nodeVersion) { var node = page.GetNode(page.LastSearchPosition); nodeVersion = node->Version; if (node->Flags == (NodeFlags.PageRef)) // this is an overflow pointer { - var overflowPage = tx.GetReadOnlyPage(node->PageNumber); - var numberOfPages = tx.DataPager.GetNumberOfOverflowPages(overflowPage.OverflowSize); + var overflowPage = _tx.GetReadOnlyPage(node->PageNumber); + var numberOfPages = _tx.DataPager.GetNumberOfOverflowPages(overflowPage.OverflowSize); for (int i = 0; i < numberOfPages; i++) { - tx.FreePage(overflowPage.PageNumber + i); + _tx.FreePage(overflowPage.PageNumber + i); } State.OverflowPages -= numberOfPages; @@ -264,11 +268,11 @@ private void RemoveLeafNode(Transaction tx, Page page, out ushort nodeVersion) } [Conditional("VALIDATE")] - public void DebugValidateTree(Transaction tx, long rootPageNumber) + public void DebugValidateTree(long rootPageNumber) { var pages = new HashSet(); var stack = new Stack(); - var root = tx.GetReadOnlyPage(rootPageNumber); + var root = _tx.GetReadOnlyPage(rootPageNumber); stack.Push(root); pages.Add(rootPageNumber); while (stack.Count > 0) @@ -276,11 +280,11 @@ public void DebugValidateTree(Transaction tx, long rootPageNumber) var p = stack.Pop(); if (p.NumberOfEntries == 0 && p != root) { - DebugStuff.RenderAndShow(tx, rootPageNumber, 1); + DebugStuff.RenderAndShow(_tx, rootPageNumber, 1); throw new InvalidOperationException("The page " + p.PageNumber + " is empty"); } - p.DebugValidate(tx, _cmp, rootPageNumber); + p.DebugValidate(_tx, _cmp, rootPageNumber); if (p.IsBranch == false) continue; for (int i = 0; i < p.NumberOfEntries; i++) @@ -288,29 +292,29 @@ public void DebugValidateTree(Transaction tx, long rootPageNumber) var page = p.GetNode(i)->PageNumber; if (pages.Add(page) == false) { - DebugStuff.RenderAndShow(tx, rootPageNumber, 1); + DebugStuff.RenderAndShow(_tx, rootPageNumber, 1); throw new InvalidOperationException("The page " + page + " already appeared in the tree!"); } - stack.Push(tx.GetReadOnlyPage(page)); + stack.Push(_tx.GetReadOnlyPage(page)); } } } - public Page FindPageFor(Transaction tx, Slice key, out Lazy cursor) + internal Page FindPageFor(Slice key, out Lazy cursor) { Page p; - if (TryUseRecentTransactionPage(tx, key, out cursor, out p)) + if (TryUseRecentTransactionPage(key, out cursor, out p)) { return p; } - return SearchForPage(tx, key, ref cursor); + return SearchForPage(key, ref cursor); } - private Page SearchForPage(Transaction tx, Slice key, ref Lazy cursor) + private Page SearchForPage(Slice key, ref Lazy cursor) { - var p = tx.GetReadOnlyPage(State.RootPageNumber); + var p = _tx.GetReadOnlyPage(State.RootPageNumber); var c = new Cursor(); c.Push(p); @@ -355,7 +359,7 @@ private Page SearchForPage(Transaction tx, Slice key, ref Lazy cursor) } var node = p.GetNode(nodePos); - p = tx.GetReadOnlyPage(node->PageNumber); + p = _tx.GetReadOnlyPage(node->PageNumber); Debug.Assert(node->PageNumber == p.PageNumber, string.Format("Requested Page: #{0}. Got Page: #{1}", node->PageNumber, p.PageNumber)); @@ -367,13 +371,13 @@ private Page SearchForPage(Transaction tx, Slice key, ref Lazy cursor) p.Search(key, _cmp); // will set the LastSearchPosition - AddToRecentlyFoundPages(tx, c, p, leftmostPage, rightmostPage); + AddToRecentlyFoundPages(c, p, leftmostPage, rightmostPage); cursor = new Lazy(() => c); return p; } - private void AddToRecentlyFoundPages(Transaction tx, Cursor c, Page p, bool? leftmostPage, bool? rightmostPage) + private void AddToRecentlyFoundPages(Cursor c, Page p, bool? leftmostPage, bool? rightmostPage) { var foundPage = new RecentlyFoundPages.FoundPage(c.Pages.Count) { @@ -389,15 +393,15 @@ private void AddToRecentlyFoundPages(Transaction tx, Cursor c, Page p, bool? lef cur = cur.Next; } - tx.AddRecentlyFoundPage(this, foundPage); + _tx.AddRecentlyFoundPage(this, foundPage); } - private bool TryUseRecentTransactionPage(Transaction tx, Slice key, out Lazy cursor, out Page page) + private bool TryUseRecentTransactionPage(Slice key, out Lazy cursor, out Page page) { page = null; cursor = null; - var recentPages = tx.GetRecentlyFoundPages(this); + var recentPages = _tx.GetRecentlyFoundPages(this); if (recentPages == null) return false; @@ -408,7 +412,7 @@ private bool TryUseRecentTransactionPage(Transaction tx, Slice key, out Lazy lazy; - var page = FindPageFor(tx, key, out lazy); + var page = FindPageFor(key, out lazy); page.NodePositionFor(key, _cmp); if (page.LastMatch != 0) return; // not an exact match, can't delete - page = tx.ModifyPage(page.PageNumber, page); + page = _tx.ModifyPage(page.PageNumber, page); State.EntriesCount--; ushort nodeVersion; - RemoveLeafNode(tx, page, out nodeVersion); + RemoveLeafNode(page, out nodeVersion); CheckConcurrency(key, version, nodeVersion, TreeActionType.Delete); - var treeRebalancer = new TreeRebalancer(tx, this); + var treeRebalancer = new TreeRebalancer(_tx, this); var changedPage = page; while (changedPage != null) { changedPage = treeRebalancer.Execute(lazy.Value, changedPage); } - page.DebugValidate(tx, _cmp, State.RootPageNumber); + page.DebugValidate(_tx, _cmp, State.RootPageNumber); } - public TreeIterator Iterate(Transaction tx, WriteBatch writeBatch = null) + public TreeIterator Iterate(WriteBatch writeBatch = null) { - return new TreeIterator(this, tx, _cmp); + return new TreeIterator(this, _tx, _cmp); } - public ReadResult Read(Transaction tx, Slice key) + public ReadResult Read(Slice key) { Lazy lazy; - var p = FindPageFor(tx, key, out lazy); + var p = FindPageFor(key, out lazy); if (p.LastMatch != 0) return null; var node = p.GetNode(p.LastSearchPosition); - return new ReadResult(NodeHeader.Reader(tx, node), node->Version); + return new ReadResult(NodeHeader.Reader(_tx, node), node->Version); } - public int GetDataSize(Transaction tx, Slice key) + public int GetDataSize(Slice key) { Lazy lazy; - var p = FindPageFor(tx, key, out lazy); + var p = FindPageFor(key, out lazy); var node = p.Search(key, _cmp); if (node == null || new Slice(node).Compare(key, _cmp) != 0) @@ -522,10 +526,10 @@ public int GetDataSize(Transaction tx, Slice key) return node->DataSize; } - public ushort ReadVersion(Transaction tx, Slice key) + public ushort ReadVersion(Slice key) { Lazy lazy; - var p = FindPageFor(tx, key, out lazy); + var p = FindPageFor(key, out lazy); var node = p.Search(key, _cmp); if (node == null || new Slice(node).Compare(key, _cmp) != 0) @@ -534,11 +538,10 @@ public ushort ReadVersion(Transaction tx, Slice key) return node->Version; } - - internal byte* DirectRead(Transaction tx, Slice key) + internal byte* DirectRead(Slice key) { Lazy lazy; - var p = FindPageFor(tx, key, out lazy); + var p = FindPageFor(key, out lazy); var node = p.Search(key, _cmp); if (node == null) @@ -551,23 +554,18 @@ public ushort ReadVersion(Transaction tx, Slice key) if (node->Flags == (NodeFlags.PageRef)) { - var overFlowPage = tx.GetReadOnlyPage(node->PageNumber); + var overFlowPage = _tx.GetReadOnlyPage(node->PageNumber); return overFlowPage.Base + Constants.PageHeaderSize; } return (byte*) node + node->KeySize + Constants.NodeHeaderSize; } - internal void SetState(TreeMutableState state) - { - _state = state; - } - - public List AllPages(Transaction tx) + public List AllPages() { var results = new List(); var stack = new Stack(); - var root = tx.GetReadOnlyPage(State.RootPageNumber); + var root = _tx.GetReadOnlyPage(State.RootPageNumber); stack.Push(root); while (stack.Count > 0) { @@ -579,13 +577,13 @@ public List AllPages(Transaction tx) var pageNumber = node->PageNumber; if (p.IsBranch) { - stack.Push(tx.GetReadOnlyPage(pageNumber)); + stack.Push(_tx.GetReadOnlyPage(pageNumber)); } else if (node->Flags == NodeFlags.PageRef) { // This is an overflow page - var overflowPage = tx.GetReadOnlyPage(pageNumber); - var numberOfPages = tx.DataPager.GetNumberOfOverflowPages(overflowPage.OverflowSize); + var overflowPage = _tx.GetReadOnlyPage(pageNumber); + var numberOfPages = _tx.DataPager.GetNumberOfOverflowPages(overflowPage.OverflowSize); for (long j = 0; j < numberOfPages; ++j) results.Add(overflowPage.PageNumber + j); } @@ -596,8 +594,8 @@ public List AllPages(Transaction tx) results.Add(childTreeHeader->RootPageNumber); // this is a multi value - var tree = OpenOrCreateMultiValueTree(tx, new Slice(node), node); - results.AddRange(tree.AllPages(tx)); + var tree = OpenOrCreateMultiValueTree(_tx, new Slice(node), node); + results.AddRange(tree.AllPages()); } } } @@ -609,7 +607,6 @@ public override string ToString() return Name + " " + State.EntriesCount; } - private void CheckConcurrency(Slice key, ushort? expectedVersion, ushort nodeVersion, TreeActionType actionType) { if (expectedVersion.HasValue && nodeVersion != expectedVersion.Value) @@ -623,24 +620,24 @@ private void CheckConcurrency(Slice key, Slice value, ushort? expectedVersion, u throw new ConcurrencyException(string.Format("Cannot {0} value '{5}' to key '{1}' to '{4}' tree. Version mismatch. Expected: {2}. Actual: {3}.", actionType.ToString().ToLowerInvariant(), key, expectedVersion.Value, nodeVersion, Name, value)); } - public enum TreeActionType + private enum TreeActionType { Add, Delete } - public Tree Clone() + internal Tree Clone(Transaction tx) { - return new Tree(_cmp, _state.Clone()){ Name = Name }; + return new Tree(tx, _cmp, _state.Clone()) { Name = Name }; } - private bool TryOverwriteOverflowPages(Transaction tx, TreeMutableState treeState, NodeHeader* updatedNode, + private bool TryOverwriteOverflowPages(TreeMutableState treeState, NodeHeader* updatedNode, Slice key, int len, ushort? version, out byte* pos) { if (updatedNode->Flags == NodeFlags.PageRef && - tx.Id <= tx.Environment.OldestTransaction) // ensure MVCC - do not overwrite if there is some older active transaction that might read those overflows + _tx.Id <= _tx.Environment.OldestTransaction) // ensure MVCC - do not overwrite if there is some older active transaction that might read those overflows { - var overflowPage = tx.GetReadOnlyPage(updatedNode->PageNumber); + var overflowPage = _tx.GetReadOnlyPage(updatedNode->PageNumber); if (len <= overflowPage.OverflowSize) { @@ -650,15 +647,15 @@ private bool TryOverwriteOverflowPages(Transaction tx, TreeMutableState treeStat updatedNode->Version = 0; updatedNode->Version++; - var availableOverflows = tx.DataPager.GetNumberOfOverflowPages(overflowPage.OverflowSize); + var availableOverflows = _tx.DataPager.GetNumberOfOverflowPages(overflowPage.OverflowSize); - var requestedOverflows = tx.DataPager.GetNumberOfOverflowPages(len); + var requestedOverflows = _tx.DataPager.GetNumberOfOverflowPages(len); var overflowsToFree = availableOverflows - requestedOverflows; for (int i = 0; i < overflowsToFree; i++) { - tx.FreePage(overflowPage.PageNumber + requestedOverflows + i); + _tx.FreePage(overflowPage.PageNumber + requestedOverflows + i); } treeState.OverflowPages -= overflowsToFree; diff --git a/Voron/Trees/TreeIterator.cs b/Voron/Trees/TreeIterator.cs index 7aa01f7e25..cd5104afae 100644 --- a/Voron/Trees/TreeIterator.cs +++ b/Voron/Trees/TreeIterator.cs @@ -30,7 +30,7 @@ public int GetCurrentDataSize() public bool Seek(Slice key) { Lazy lazy; - _currentPage = _tree.FindPageFor(_tx, key, out lazy); + _currentPage = _tree.FindPageFor(key, out lazy); _cursor = lazy.Value; _cursor.Pop(); var node = _currentPage.Search(key, _cmp); @@ -62,7 +62,7 @@ public Slice CurrentKey /// public bool DeleteCurrentAndMoveNext() { - _tree.Delete(_tx, CurrentKey); + _tree.Delete(CurrentKey); return MovePrev() && MoveNext(); } diff --git a/Voron/ValueReader.cs b/Voron/ValueReader.cs index 1992a2b470..32943c9c08 100644 --- a/Voron/ValueReader.cs +++ b/Voron/ValueReader.cs @@ -17,6 +17,8 @@ public unsafe struct ValueReader private readonly int _len; private readonly byte* _val; + public byte* Base { get { return _val; } } + public ValueReader(Stream stream) { long position = stream.Position;