Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

stagedsync: add CallTraceIndex stage #1892

Merged
merged 1 commit into from
Mar 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 22 additions & 4 deletions silkworm/node/db/tables.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,8 @@
#include <silkworm/node/db/mdbx.hpp>
#include <silkworm/node/db/util.hpp>

/*
Part of the compatibility layer with the Turbo-Geth DB format;
see its common/dbutils/bucket.go.
*/
/// Part of the compatibility layer with the Erigon DB format.

namespace silkworm::db::table {

inline constexpr VersionBase kRequiredSchemaVersion{3, 0, 0}; // We're compatible with this
Expand Down Expand Up @@ -124,9 +122,29 @@ inline constexpr db::MapConfig kBodiesSnapshotInfo{kBodiesSnapshotInfoName};
inline constexpr const char* kCallTraceSetName{"CallTraceSet"};
inline constexpr db::MapConfig kCallTraceSet{kCallTraceSetName, mdbx::key_mode::usual, mdbx::value_mode::multi};

//! \details Stores the list of blocks in which a specific call sender (i.e. "from") has been traced
//! \struct
//! \verbatim
//! key : address (20 bytes) + suffix (BE 64bit unsigned integer)
//! value : binary bitmap holding list of blocks
//! \endverbatim
//! \remark Each record key holds a suffix which is a 64bit unsigned integer specifying the "upper bound" limit
//! of the list of blocks contained in the value part. When this integer is equal to UINT64_MAX, it means this
//! record holds the last known chunk of blocks which contain the address as sender for some call. This is due
//! to how roaring bitmaps work.
inline constexpr const char* kCallFromIndexName{"CallFromIndex"};
inline constexpr db::MapConfig kCallFromIndex{kCallFromIndexName};

//! \details Stores the list of blocks in which a specific call receiver (i.e. "to") has been traced
//! \struct
//! \verbatim
//! key : address (20 bytes) + suffix (BE 64bit unsigned integer)
//! value : binary bitmap holding list of blocks
//! \endverbatim
//! \remark Each record key holds a suffix which is a 64bit unsigned integer specifying the "upper bound" limit
//! of the list of blocks contained in the value part. When this integer is equal to UINT64_MAX, it means this
//! record holds the last known chunk of blocks which contain the address as receiver for some call. This is due
//! to how roaring bitmaps work.
inline constexpr const char* kCallToIndexName{"CallToIndex"};
inline constexpr db::MapConfig kCallToIndex{kCallToIndexName};

Expand Down
38 changes: 21 additions & 17 deletions silkworm/node/stagedsync/execution_pipeline.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include <silkworm/infra/common/stopwatch.hpp>
#include <silkworm/node/stagedsync/stages/stage_blockhashes.hpp>
#include <silkworm/node/stagedsync/stages/stage_bodies.hpp>
#include <silkworm/node/stagedsync/stages/stage_call_trace_index.hpp>
#include <silkworm/node/stagedsync/stages/stage_execution.hpp>
#include <silkworm/node/stagedsync/stages/stage_finish.hpp>
#include <silkworm/node/stagedsync/stages/stage_hashstate.hpp>
Expand Down Expand Up @@ -96,20 +97,17 @@ std::optional<Hash> ExecutionPipeline::bad_block() {
/*
* Stages from Erigon -> Silkworm
* 1 StageHeaders -> stagedsync::HeadersStage
* 2 StageCumulativeIndex -> TBD
* 2 StageBodies -> stagedsync::BodiesStage
* 3 StageBlockHashes -> stagedsync::BlockHashes
* 4 StageBodies -> stagedsync::BodiesStage
* 5 StageIssuance -> TBD
* 6 StageSenders -> stagedsync::Senders
* 7 StageExecuteBlocks -> stagedsync::Execution
* 8 StageTranspile -> TBD
* 9 StageHashState -> stagedsync::HashState
* 10 StageTrie -> stagedsync::InterHashes
* 11 StageHistory -> stagedsync::HistoryIndex
* 12 StageLogIndex -> stagedsync::LogIndex
* 13 StageCallTraces -> TBD
* 14 StageTxLookup -> stagedsync::TxLookup
* 15 StageFinish -> stagedsync::Finish
* 4 StageSenders -> stagedsync::Senders
* 5 StageExecution -> stagedsync::Execution
* 6 StageHashState -> stagedsync::HashState
* 7 StageInterHashes -> stagedsync::InterHashes
* 8 StageIndexes -> stagedsync::HistoryIndex
* 9 StageLogIndex -> stagedsync::LogIndex
* 10 StageCallTraces -> stagedsync::CallTraceIndex
* 11 StageTxLookup -> stagedsync::TxLookup
* 12 StageFinish -> stagedsync::Finish
*/

void ExecutionPipeline::load_stages() {
Expand All @@ -131,6 +129,8 @@ void ExecutionPipeline::load_stages() {
std::make_unique<stagedsync::HistoryIndex>(sync_context_.get(), node_settings_->batch_size, node_settings_->etl(), node_settings_->prune_mode.history()));
stages_.emplace(db::stages::kLogIndexKey,
std::make_unique<stagedsync::LogIndex>(sync_context_.get(), node_settings_->batch_size, node_settings_->etl(), node_settings_->prune_mode.history()));
stages_.emplace(db::stages::kCallTracesKey,
std::make_unique<stagedsync::CallTraceIndex>(sync_context_.get(), node_settings_->batch_size, node_settings_->etl(), node_settings_->prune_mode.call_traces()));
stages_.emplace(db::stages::kTxLookupKey,
std::make_unique<stagedsync::TxLookup>(sync_context_.get(), node_settings_->etl(), node_settings_->prune_mode.tx_index()));
stages_.emplace(db::stages::kFinishKey,
Expand All @@ -148,6 +148,7 @@ void ExecutionPipeline::load_stages() {
db::stages::kIntermediateHashesKey,
db::stages::kHistoryIndexKey,
db::stages::kLogIndexKey,
db::stages::kCallTracesKey,
db::stages::kTxLookupKey,
db::stages::kFinishKey,
});
Expand All @@ -156,6 +157,7 @@ void ExecutionPipeline::load_stages() {
{
db::stages::kFinishKey,
db::stages::kTxLookupKey,
db::stages::kCallTracesKey,
db::stages::kLogIndexKey,
db::stages::kHistoryIndexKey,
db::stages::kHashStateKey,
Expand Down Expand Up @@ -184,7 +186,7 @@ Stage::Result ExecutionPipeline::forward(db::RWTxn& cycle_txn, BlockNum target_h
auto log_timer = make_log_timer();

sync_context_->target_height = target_height;
log::Info("ExecPipeline") << "Forward start --------------------------";
log::Info("ExecutionPipeline") << "Forward start";

try {
Stage::Result result = Stage::Result::kSuccess;
Expand Down Expand Up @@ -257,7 +259,7 @@ Stage::Result ExecutionPipeline::forward(db::RWTxn& cycle_txn, BlockNum target_h
", head_header_height= " + to_string(head_header_number_));
}

log::Info("ExecPipeline") << "Forward done ---------------------------";
log::Info("ExecutionPipeline") << "Forward done";

if (stop_at_block && stop_at_block <= head_header_number_) return Stage::Result::kStoppedByEnv;

Expand All @@ -274,7 +276,7 @@ Stage::Result ExecutionPipeline::unwind(db::RWTxn& cycle_txn, BlockNum unwind_po
using std::to_string;
StopWatch stages_stop_watch(true);
auto log_timer = make_log_timer();
log::Info("ExecPipeline") << "Unwind start ---------------------------";
log::Info("ExecutionPipeline") << "Unwind start";

try {
sync_context_->unwind_point = unwind_point;
Expand Down Expand Up @@ -321,7 +323,7 @@ Stage::Result ExecutionPipeline::unwind(db::RWTxn& cycle_txn, BlockNum unwind_po
sync_context_->unwind_point.reset();
sync_context_->bad_block_hash.reset();

log::Info("ExecPipeline") << "Unwind done ----------------------------";
log::Info("ExecutionPipeline") << "Unwind done";
return is_stopping() ? Stage::Result::kAborted : Stage::Result::kSuccess;

} catch (const std::exception& ex) {
Expand All @@ -334,6 +336,7 @@ Stage::Result ExecutionPipeline::unwind(db::RWTxn& cycle_txn, BlockNum unwind_po
Stage::Result ExecutionPipeline::prune(db::RWTxn& cycle_txn) {
StopWatch stages_stop_watch(true);
auto log_timer = make_log_timer();
log::Info("ExecutionPipeline") << "Prune start";

try {
current_stages_count_ = stages_forward_order_.size();
Expand Down Expand Up @@ -367,6 +370,7 @@ Stage::Result ExecutionPipeline::prune(db::RWTxn& cycle_txn) {
ensure(head_header.has_value(), [&]() { return "Sync pipeline, missing head header hash " + to_hex(head_header_hash_); });
head_header_number_ = head_header->number;

log::Info("ExecutionPipeline") << "Prune done";
return is_stopping() ? Stage::Result::kAborted : Stage::Result::kSuccess;

} catch (const std::exception& ex) {
Expand Down
2 changes: 1 addition & 1 deletion silkworm/node/stagedsync/stages/stage.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ namespace silkworm::stagedsync {

class StageError;

//! \brief Holds informations across all stages
//! \brief Holds information across all stages
struct SyncContext {
SyncContext() = default;
~SyncContext() = default;
Expand Down
Loading
Loading