Skip to content

Commit

Permalink
Optimized Keccak Hashing for Account Storage Slots (#6452)
Browse files Browse the repository at this point in the history
* not recompute hash if not needed
* Add memoize for the Supplier
* Modify hashcode to only process keccak when slotkey is not defined
* use single cache for keccak

Signed-off-by: Karim Taam <karim.t2am@gmail.com>
Signed-off-by: Ameziane H <ameziane.hamlat@consensys.net>
Signed-off-by: ahamlat <ameziane.hamlat@consensys.net>
Signed-off-by: garyschulte <garyschulte@gmail.com>
  • Loading branch information
matkt authored Jan 30, 2024
1 parent 098d997 commit 6d77d58
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 12 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
- Log blob count when importing a block via Engine API [#6466](https://github.com/hyperledger/besu/pull/6466)
- Introduce `--Xbonsai-limit-trie-logs-enabled` experimental feature which by default will only retain the latest 512 trie logs, saving about 3GB per week in database growth [#5390](https://github.com/hyperledger/besu/issues/5390)
- Introduce `besu storage x-trie-log prune` experimental offline subcommand which will prune all redundant trie logs except the latest 512 [#6303](https://github.com/hyperledger/besu/pull/6303)
- Introduce caching mechanism to optimize Keccak hash calculations for account storage slots during block processing [#6452](https://github.com/hyperledger/besu/pull/6452)
- Added configuration options for `pragueTime` to genesis file for Prague fork development [#6473](https://github.com/hyperledger/besu/pull/6473)

### Bug fixes
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ public class BonsaiWorldStateUpdateAccumulator
private final Map<Address, StorageConsumingMap<StorageSlotKey, BonsaiValue<UInt256>>>
storageToUpdate = new ConcurrentHashMap<>();

private final Map<UInt256, Hash> storageKeyHashLookup = new ConcurrentHashMap<>();
protected boolean isAccumulatorStateChanged;

public BonsaiWorldStateUpdateAccumulator(
Expand Down Expand Up @@ -142,7 +143,7 @@ public MutableAccount createAccount(final Address address, final long nonce, fin
new BonsaiAccount(
this,
address,
hashAndSavePreImage(address),
hashAndSaveAccountPreImage(address),
nonce,
balance,
Hash.EMPTY_TRIE_HASH,
Expand Down Expand Up @@ -364,11 +365,11 @@ public void commit() {
entries.forEach(
storageUpdate -> {
final UInt256 keyUInt = storageUpdate.getKey();
final Hash slotHash = hashAndSavePreImage(keyUInt);
final StorageSlotKey slotKey =
new StorageSlotKey(slotHash, Optional.of(keyUInt));
new StorageSlotKey(hashAndSaveSlotPreImage(keyUInt), Optional.of(keyUInt));
final UInt256 value = storageUpdate.getValue();
final BonsaiValue<UInt256> pendingValue = pendingStorageUpdates.get(slotKey);

if (pendingValue == null) {
pendingStorageUpdates.put(
slotKey,
Expand Down Expand Up @@ -409,7 +410,7 @@ public Optional<Bytes> getCode(final Address address, final Hash codeHash) {
@Override
public UInt256 getStorageValue(final Address address, final UInt256 slotKey) {
StorageSlotKey storageSlotKey =
new StorageSlotKey(hashAndSavePreImage(slotKey), Optional.of(slotKey));
new StorageSlotKey(hashAndSaveSlotPreImage(slotKey), Optional.of(slotKey));
return getStorageValueByStorageSlotKey(address, storageSlotKey).orElse(UInt256.ZERO);
}

Expand Down Expand Up @@ -453,7 +454,7 @@ public Optional<UInt256> getStorageValueByStorageSlotKey(
public UInt256 getPriorStorageValue(final Address address, final UInt256 storageKey) {
// TODO maybe log the read into the trie layer?
StorageSlotKey storageSlotKey =
new StorageSlotKey(hashAndSavePreImage(storageKey), Optional.of(storageKey));
new StorageSlotKey(hashAndSaveSlotPreImage(storageKey), Optional.of(storageKey));
final Map<StorageSlotKey, BonsaiValue<UInt256>> localAccountStorage =
storageToUpdate.get(address);
if (localAccountStorage != null) {
Expand Down Expand Up @@ -765,6 +766,7 @@ public void reset() {
resetAccumulatorStateChanged();
updatedAccounts.clear();
deletedAccounts.clear();
storageKeyHashLookup.clear();
}

public static class AccountConsumingMap<T> extends ForwardingMap<Address, T> {
Expand Down Expand Up @@ -828,8 +830,17 @@ public interface Consumer<T> {
void process(final Address address, T value);
}

protected Hash hashAndSavePreImage(final Bytes bytes) {
// by default do not save hash preImages
return Hash.hash(bytes);
protected Hash hashAndSaveAccountPreImage(final Address address) {
// no need to save account preimage by default
return Hash.hash(address);
}

protected Hash hashAndSaveSlotPreImage(final UInt256 slotKey) {
Hash hash = storageKeyHashLookup.get(slotKey);
if (hash == null) {
hash = Hash.hash(slotKey);
storageKeyHashLookup.put(slotKey, hash);
}
return hash;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
*/
package org.hyperledger.besu.ethereum.referencetests;

import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.datatypes.StorageSlotKey;
import org.hyperledger.besu.ethereum.trie.bonsai.BonsaiAccount;
Expand All @@ -25,7 +26,6 @@

import java.util.concurrent.ConcurrentHashMap;

import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.units.bigints.UInt256;

public class BonsaiReferenceTestUpdateAccumulator extends BonsaiWorldStateUpdateAccumulator {
Expand All @@ -42,9 +42,13 @@ public BonsaiReferenceTestUpdateAccumulator(
}

@Override
protected Hash hashAndSavePreImage(final Bytes bytes) {
// by default do not save hash preImages
return preImageProxy.hashAndSavePreImage(bytes);
protected Hash hashAndSaveAccountPreImage(final Address address) {
return preImageProxy.hashAndSavePreImage(address);
}

@Override
protected Hash hashAndSaveSlotPreImage(final UInt256 slotKey) {
return preImageProxy.hashAndSavePreImage(slotKey);
}

public BonsaiReferenceTestUpdateAccumulator createDetachedAccumulator() {
Expand Down

0 comments on commit 6d77d58

Please sign in to comment.