Skip to content

Commit

Permalink
MaxDegreeOfParallelism defaults for full pruning (#5662)
Browse files Browse the repository at this point in the history
* other defaults for full pruning?

* degreeOfParalleism

* small refactor

* fix build

* Fixing BatchedTrieVistior

* 25% of cores

* Update Pruning config

* add logger

* fix
  • Loading branch information
MarekM25 authored and kamilchodola committed Jun 23, 2023
1 parent b1ce97e commit 6006747
Show file tree
Hide file tree
Showing 6 changed files with 30 additions and 23 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ private void OnUpdateMainChain(object? sender, OnUpdateMainChainArgs e)
BlockHeader? header = _blockTree.FindHeader(_stateToCopy);
if (header is not null && Interlocked.CompareExchange(ref _waitingForStateReady, 0, 1) == 1)
{
if (_logger.IsInfo) _logger.Info($"Full Pruning Ready to start: pruning garbage before state {_stateToCopy} with root {header.StateRoot}.");
if (_logger.IsInfo) _logger.Info($"Full Pruning Ready to start: pruning garbage before state {_stateToCopy} with root {header.StateRoot}");
Task.Run(() => RunPruning(_currentPruning, header.StateRoot!));
_blockTree.OnUpdateMainChain -= OnUpdateMainChain;
}
Expand Down Expand Up @@ -222,6 +222,7 @@ protected virtual void RunPruning(IPruningContext pruning, Keccak statRoot)
MaxDegreeOfParallelism = _pruningConfig.FullPruningMaxDegreeOfParallelism,
FullScanMemoryBudget = ((long)_pruningConfig.FullPruningMemoryBudgetMb).MiB(),
};
if (_logger.IsInfo) _logger.Info($"Full pruning started with MaxDegreeOfParallelism: {visitingOptions.MaxDegreeOfParallelism} and FullScanMemoryBudget: {visitingOptions.FullScanMemoryBudget}");
_stateReader.RunTreeVisitor(copyTreeVisitor, statRoot, visitingOptions);

if (!pruning.CancellationTokenSource.IsCancellationRequested)
Expand Down
10 changes: 5 additions & 5 deletions src/Nethermind/Nethermind.Db/IPruningConfig.cs
Original file line number Diff line number Diff line change
Expand Up @@ -37,16 +37,16 @@ public interface IPruningConfig : IConfig
FullPruningTrigger FullPruningTrigger { get; set; }

[ConfigItem(
Description = "'Full' pruning: Defines how many parallel tasks and potentially used threads can be created by full pruning. 0 - number of logical processors, 1 - full pruning will run on single thread. " +
"Recommended value depends on the type of the node. If the node needs to be responsive (its RPC or Validator node) then recommended value is below the number of logical processors. " +
"If the node doesn't have much other responsibilities but needs to be reliably be able to follow the chain without any delays and produce live logs - the default value is recommended. " +
"If the node doesn't have to be responsive, has very fast I/O (like NVME) and the shortest pruning time is to be achieved, this can be set to 2-3x of the number of logical processors.",
Description = "'Full' pruning: Defines how many parallel tasks and potentially used threads can be created by full pruning. -1 - number of logical processors, 0 - 25% of logical processors, 1 - full pruning will run on single thread. " +
"Recommended value depends on the type of the node. If the node needs to be responsive (its RPC or Validator node) then recommended value is the default value or below is recommended. " +
"If the node doesn't have much other responsibilities but needs to be reliably be able to follow the chain without any delays and produce live logs - the default value or above is recommended. " +
"If the node doesn't have to be responsive, has very fast I/O (like NVME) and the shortest pruning time is to be achieved, this can be set to the number of logical processors (-1).",
DefaultValue = "0")]
int FullPruningMaxDegreeOfParallelism { get; set; }

[ConfigItem(
Description = "Set the memory budget used for the trie visit. Increasing this significantly reduces read iops requirement at expense of RAM. Default depend on network. Set to 0 to disable.",
DefaultValue = "0")]
DefaultValue = "4000")]
int FullPruningMemoryBudgetMb { get; set; }

[ConfigItem(
Expand Down
4 changes: 2 additions & 2 deletions src/Nethermind/Nethermind.Db/PruningConfig.cs
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ public bool Enabled
public long PersistenceInterval { get; set; } = 8192;
public long FullPruningThresholdMb { get; set; } = 256000;
public FullPruningTrigger FullPruningTrigger { get; set; } = FullPruningTrigger.Manual;
public int FullPruningMaxDegreeOfParallelism { get; set; } = 0;
public int FullPruningMemoryBudgetMb { get; set; } = 0;
public int FullPruningMaxDegreeOfParallelism { get; set; }
public int FullPruningMemoryBudgetMb { get; set; } = 4000;
public bool FullPruningDisableLowPriorityWrites { get; set; } = false;
public int FullPruningMinimumDelayHours { get; set; } = 240;
public FullPruningCompletionBehavior FullPruningCompletionBehavior { get; set; } = FullPruningCompletionBehavior.None;
Expand Down
15 changes: 2 additions & 13 deletions src/Nethermind/Nethermind.Trie/BatchedTrieVisitor.cs
Original file line number Diff line number Diff line change
Expand Up @@ -83,12 +83,7 @@ public BatchedTrieVisitor(
// Get estimated num of file (expected db size / 64MiB), multiplied by a reasonable num of thread we want to
// confine to a file. If its too high, the overhead of looping through the stack can get a bit high at the end
// of the visit. But then again its probably not much.
int degreeOfParallelism = visitingOptions.MaxDegreeOfParallelism;
if (degreeOfParallelism == 0)
{
degreeOfParallelism = Math.Max(Environment.ProcessorCount, 1);
}
long maxPartitionCount = (expectedDbSize / 64.MiB()) * Math.Min(4, degreeOfParallelism);
long maxPartitionCount = (expectedDbSize / 64.MiB()) * Math.Min(4, visitingOptions.MaxDegreeOfParallelism);

if (_partitionCount > maxPartitionCount)
{
Expand Down Expand Up @@ -140,13 +135,7 @@ public void Start(

try
{
int degreeOfParallelism = trieVisitContext.MaxDegreeOfParallelism;
if (degreeOfParallelism == 0)
{
degreeOfParallelism = Math.Max(Environment.ProcessorCount, 1);
}

Task[]? tasks = Enumerable.Range(0, degreeOfParallelism)
Task[]? tasks = Enumerable.Range(0, trieVisitContext.MaxDegreeOfParallelism)
.Select((_) => Task.Run(BatchedThread))
.ToArray();

Expand Down
2 changes: 1 addition & 1 deletion src/Nethermind/Nethermind.Trie/VisitContext.cs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ public class TrieVisitContext : IDisposable
public int MaxDegreeOfParallelism
{
get => _maxDegreeOfParallelism;
internal init => _maxDegreeOfParallelism = value == 0 ? Environment.ProcessorCount : value;
internal init => _maxDegreeOfParallelism = VisitingOptions.AdjustMaxDegreeOfParallelism(value);
}

public SemaphoreSlim Semaphore
Expand Down
19 changes: 18 additions & 1 deletion src/Nethermind/Nethermind.Trie/VisitingOptions.cs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ namespace Nethermind.Trie
public class VisitingOptions
{
public static readonly VisitingOptions Default = new();
private readonly int _maxDegreeOfParallelism = 1;

/// <summary>
/// Should visit accounts.
Expand All @@ -20,7 +21,14 @@ public class VisitingOptions
/// <summary>
/// Maximum number of threads that will be used to visit the trie.
/// </summary>
public int MaxDegreeOfParallelism { get; init; } = 1;
public int MaxDegreeOfParallelism
{
get => _maxDegreeOfParallelism;
init
{
_maxDegreeOfParallelism = AdjustMaxDegreeOfParallelism(value);
}
}

/// <summary>
/// Specify memory budget to run a batched trie visitor. Significantly reduce read iops as memory budget
Expand All @@ -30,5 +38,14 @@ public class VisitingOptions
/// with slower SSD. Set to 0 to disable batched trie visitor.
/// </summary>
public long FullScanMemoryBudget { get; set; }

public static int AdjustMaxDegreeOfParallelism(int rawMaxDegreeOfParallelism)
{
if (rawMaxDegreeOfParallelism == 0)
return Math.Max(Environment.ProcessorCount / 4, 1);
if (rawMaxDegreeOfParallelism <= -1)
return Environment.ProcessorCount;
return rawMaxDegreeOfParallelism;
}
}
}

0 comments on commit 6006747

Please sign in to comment.