Skip to content

Commit

Permalink
Merge branch 'master' into feature/gas_cost_from_evmone
Browse files Browse the repository at this point in the history
  • Loading branch information
canepat authored Sep 9, 2024
2 parents f442915 + 8668e77 commit ec87872
Show file tree
Hide file tree
Showing 59 changed files with 680 additions and 567 deletions.
3 changes: 0 additions & 3 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,6 @@
[submodule "ethash"]
path = third_party/ethash/ethash
url = https://github.com/chfast/ethash.git
[submodule "libtorrent"]
path = third_party/libtorrent/libtorrent
url = https://github.com/arvidn/libtorrent
[submodule "stun-msg"]
path = third_party/stun-msg/stun-msg
url = https://github.com/battlmonstr/stun-msg.git
Expand Down
8 changes: 8 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,14 @@ At the very beginning, one of the main goals of Silkworm was implementing high-p
directly within Erigon itself. Recently we focused again on this initial target, making it our highest priority and
delivering the first release of [Erigon++] starting from Erigon 2.59.0.

Erigon++ is supported on platforms:

* Linux x86_64 with glibc 34+, glibcpp 30+ (such as Debian 12+, Ubuntu 22+, etc.)
* macOS 14+ arm64

It is not supported on any arm64 Linux, Alpine Linux.
Test compatibility by running [silkworm_compat_check.sh](https://github.com/erigontech/erigon/blob/main/turbo/silkworm/silkworm_compat_check.sh)

Please note that Erigon++ is just a fancy name for identifying such usage of Silkworm libraries within Erigon, which can
be selectively enabled by specifying optional flags in Erigon command-line.

Expand Down
1 change: 1 addition & 0 deletions cmake/conan.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ if(SILKWORM_SANITIZE_COMPILER_OPTIONS)
abseil
boost
grpc
libtorrent
protobuf
)
# cmake-format: on
Expand Down
2 changes: 1 addition & 1 deletion cmake/toolchain/cxx20.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,6 @@ cmake_policy(SET CMP0063 NEW)
cmake_policy(SET CMP0074 NEW)

set(CMAKE_OSX_DEPLOYMENT_TARGET
"13.3"
"14.0"
CACHE STRING ""
)
2 changes: 1 addition & 1 deletion cmd/capi/sample-go-client/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,5 +13,5 @@ go run main.go

on macOS:
```bash
CGO_LDFLAGS=-mmacosx-version-min=13.3 go run main.go
CGO_LDFLAGS=-mmacosx-version-min=14.0 go run main.go
```
2 changes: 1 addition & 1 deletion cmd/dev/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ add_executable(scan_txs scan_txs.cpp)
target_link_libraries(scan_txs PRIVATE silkworm_node CLI11::CLI11 absl::time)

add_executable(snapshots snapshots.cpp)
target_link_libraries(snapshots PRIVATE silkworm_node cmd_common torrent-rasterbar magic_enum::magic_enum)
target_link_libraries(snapshots PRIVATE silkworm_node cmd_common magic_enum::magic_enum)

add_executable(db_toolbox db_toolbox.cpp)
target_link_libraries(db_toolbox PRIVATE silkworm_node cmd_common CLI11::CLI11 magic_enum::magic_enum)
30 changes: 18 additions & 12 deletions cmd/dev/snapshots.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -204,10 +204,6 @@ void parse_command_line(int argc, char* argv[], CLI::App& app, SnapshotToolboxSe
"Max number of downloads active simultaneously")
->capture_default_str()
->check(CLI::Range(3, 20));
cmd->add_flag("--seeding",
bittorrent_settings.seeding,
"Flag indicating if torrents should be seeded when download is finished")
->capture_default_str();
}
for (auto& cmd : {commands[SnapshotTool::create_index],
commands[SnapshotTool::open_index],
Expand Down Expand Up @@ -382,7 +378,7 @@ void open_index(const SnapSettings& settings) {
}

static TorrentInfoPtrList download_web_seed(const DownloadSettings& settings) {
const auto known_config{snapshots::Config::lookup_known_config(settings.chain_id, /*whitelist=*/{})};
const auto known_config{snapshots::Config::lookup_known_config(settings.chain_id)};
WebSeedClient web_client{/*url_seeds=*/{settings.url_seed}, known_config.preverified_snapshots()};

boost::asio::io_context scheduler;
Expand Down Expand Up @@ -816,14 +812,24 @@ void merge(const SnapSettings& settings) {
}

void sync(const SnapSettings& settings) {
class NoopStageSchedulerAdapter : public stagedsync::StageScheduler {
public:
explicit NoopStageSchedulerAdapter() = default;
~NoopStageSchedulerAdapter() override = default;
Task<void> schedule(std::function<void(db::RWTxn&)> /*callback*/) override {
co_return;
}
};

std::chrono::time_point start{std::chrono::steady_clock::now()};
SnapshotRepository snapshot_repository{settings, bundle_factory()}; // NOLINT(cppcoreguidelines-slicing)
db::SnapshotSync snapshot_sync{&snapshot_repository, kMainnetConfig};
std::vector<std::string> snapshot_file_names;
if (settings.snapshot_file_name) {
snapshot_file_names.push_back(*settings.snapshot_file_name);
}
snapshot_sync.download_snapshots(snapshot_file_names);

TemporaryDirectory tmp_dir;
db::EnvConfig chaindata_env_config{tmp_dir.path()};
auto chaindata_env = db::open_env(chaindata_env_config);
test_util::TaskRunner runner;
NoopStageSchedulerAdapter stage_scheduler;
db::SnapshotSync snapshot_sync{settings, kMainnetConfig.chain_id, chaindata_env, tmp_dir.path(), stage_scheduler}; // NOLINT(cppcoreguidelines-slicing)
runner.run(snapshot_sync.download_snapshots());
std::chrono::duration elapsed{std::chrono::steady_clock::now() - start};

SILK_INFO << "Sync elapsed: " << duration_as<std::chrono::seconds>(elapsed) << " sec";
Expand Down
12 changes: 7 additions & 5 deletions cmd/silkworm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -266,12 +266,9 @@ int main(int argc, char* argv[]) {

// Execution: the execution layer engine
// NOLINTNEXTLINE(cppcoreguidelines-slicing)
silkworm::node::Node execution_node{settings.node_settings, sentry_client, chaindata_db};
silkworm::node::Node execution_node{context_pool.any_executor(), settings.node_settings, sentry_client, chaindata_db};
execution::api::DirectClient& execution_client{execution_node.execution_direct_client()};

// Set up the execution node (e.g. load pre-verified hashes, download+index snapshots...)
execution_node.setup();

// ChainSync: the chain synchronization process based on the consensus protocol
chainsync::EngineRpcSettings rpc_settings{
.engine_end_point = settings.rpcdaemon_settings.engine_end_point,
Expand All @@ -288,11 +285,16 @@ int main(int argc, char* argv[]) {
sentry_client,
*node_settings.chain_config,
rpc_settings};
// Note: temp code until chainsync::Sync becomes a part of Node
auto chain_sync_process_run = [&execution_node](chainsync::Sync& sync) -> Task<void> {
co_await execution_node.wait_for_setup();
co_await sync.async_run();
};

auto tasks =
execution_node.run() &&
embedded_sentry_run_if_needed(sentry_server) &&
chain_sync_process.async_run();
chain_sync_process_run(chain_sync_process);

// Trap OS signals
ShutdownSignal shutdown_signal{context_pool.any_executor()};
Expand Down
7 changes: 4 additions & 3 deletions conanfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ def requirements(self):
self.requires('grpc/1.54.3')
self.requires('gtest/1.12.1')
self.requires('jwt-cpp/0.6.0')
self.requires('libtorrent/2.0.10')
self.requires('mimalloc/2.1.2')
self.requires('openssl/3.2.1')
self.requires('protobuf/3.21.12')
Expand All @@ -58,14 +59,14 @@ def configure(self):
if self.settings.os == 'Windows':
return

# Disable Catch2 version 3.x.x signaling handling on WASM
# Disable Catch2 version 3.x.x signal handling on WASM
if self.settings.arch == 'wasm':
self.options['catch2'].no_posix_signals = True

self.options['boost'].asio_no_deprecated = True
if self.settings.os == 'Macos':
CMAKE_OSX_DEPLOYMENT_TARGET = '10.13'
os_version_min_flag = f'-mmacosx-version-min={CMAKE_OSX_DEPLOYMENT_TARGET}'
cmake_osx_deployment_target = '10.13'
os_version_min_flag = f'-mmacosx-version-min={cmake_osx_deployment_target}'
self.options['boost'].extra_b2_flags = f'cxxflags="{os_version_min_flag}" linkflags="{os_version_min_flag}"'

# Disable building unused boost components
Expand Down
2 changes: 2 additions & 0 deletions silkworm/core/state/intra_block_state.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,7 @@ void IntraBlockState::set_nonce(const evmc::address& address, uint64_t nonce) no
auto& obj{get_or_create_object(address)};
journal_.emplace_back(std::make_unique<state::UpdateDelta>(address, obj));
obj.current->nonce = nonce;
touch(address);
}

ByteView IntraBlockState::get_code(const evmc::address& address) const noexcept {
Expand Down Expand Up @@ -241,6 +242,7 @@ void IntraBlockState::set_code(const evmc::address& address, ByteView code) noex
// Don't overwrite already existing code so that views of it
// that were previously returned by get_code() are still valid.
new_code_.try_emplace(obj.current->code_hash, code.begin(), code.end());
touch(address);
}

evmc_access_status IntraBlockState::access_account(const evmc::address& address) noexcept {
Expand Down
33 changes: 8 additions & 25 deletions silkworm/db/access_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,14 +43,14 @@ std::optional<VersionBase> read_schema_version(ROTxn& txn) {
return std::nullopt;
}

auto data{cursor->current()};
auto data = cursor->current();
SILKWORM_ASSERT(data.value.length() == 12);
auto Major{endian::load_big_u32(static_cast<uint8_t*>(data.value.data()))};
const auto major = endian::load_big_u32(static_cast<uint8_t*>(data.value.data()));
data.value.remove_prefix(sizeof(uint32_t));
auto Minor{endian::load_big_u32(static_cast<uint8_t*>(data.value.data()))};
const auto minor = endian::load_big_u32(static_cast<uint8_t*>(data.value.data()));
data.value.remove_prefix(sizeof(uint32_t));
auto Patch{endian::load_big_u32(static_cast<uint8_t*>(data.value.data()))};
return VersionBase{Major, Minor, Patch};
const auto patch = endian::load_big_u32(static_cast<uint8_t*>(data.value.data()));
return VersionBase{major, minor, patch};
}

void write_schema_version(RWTxn& txn, const VersionBase& schema_version) {
Expand All @@ -65,9 +65,9 @@ void write_schema_version(RWTxn& txn, const VersionBase& schema_version) {
}
}
Bytes value(12, '\0');
endian::store_big_u32(&value[0], schema_version.Major);
endian::store_big_u32(&value[4], schema_version.Minor);
endian::store_big_u32(&value[8], schema_version.Patch);
endian::store_big_u32(&value[0], schema_version.major);
endian::store_big_u32(&value[4], schema_version.minor);
endian::store_big_u32(&value[8], schema_version.patch);

PooledCursor src(txn, db::table::kDatabaseInfo);
src.upsert(mdbx::slice{kDbSchemaVersionKey}, to_slice(value));
Expand All @@ -79,23 +79,6 @@ void write_build_info_height(RWTxn& txn, const Bytes& key, BlockNum height) {
cursor->upsert(db::to_slice(key), db::to_slice(value));
}

std::vector<std::string> read_snapshots(ROTxn& txn) {
auto db_info_cursor = txn.ro_cursor(table::kDatabaseInfo);
if (!db_info_cursor->seek(mdbx::slice{kDbSnapshotsKey})) {
return {};
}
const auto data{db_info_cursor->current()};
// https://github.com/nlohmann/json/issues/2204
const auto json = nlohmann::json::parse(data.value.as_string(), nullptr, /*.allow_exceptions=*/false);
return json.get<std::vector<std::string>>();
}

void write_snapshots(RWTxn& txn, const std::vector<std::string>& snapshot_file_names) {
auto db_info_cursor = txn.rw_cursor(table::kDatabaseInfo);
nlohmann::json json_value = snapshot_file_names;
db_info_cursor->upsert(mdbx::slice{kDbSnapshotsKey}, mdbx::slice(json_value.dump().data()));
}

std::optional<BlockHeader> read_header(ROTxn& txn, BlockNum block_number, const evmc::bytes32& hash) {
return read_header(txn, block_number, hash.bytes);
}
Expand Down
6 changes: 0 additions & 6 deletions silkworm/db/access_layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,6 @@ void write_schema_version(RWTxn& txn, const VersionBase& schema_version);
//! upgrades or downgrades of Silkworm's build
void write_build_info_height(RWTxn& txn, const Bytes& key, BlockNum height);

//! \brief Read the list of snapshot file names
std::vector<std::string> read_snapshots(ROTxn& txn);

//! \brief Write the list of snapshot file names
void write_snapshots(RWTxn& txn, const std::vector<std::string>& snapshot_file_names);

//! \brief Reads a header with the specified key (block number, hash)
std::optional<BlockHeader> read_header(ROTxn& txn, BlockNum block_number, const uint8_t (&hash)[kHashLength]);
std::optional<BlockHeader> read_header(ROTxn& txn, BlockNum block_number, const evmc::bytes32&);
Expand Down
18 changes: 2 additions & 16 deletions silkworm/db/access_layer_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -271,8 +271,8 @@ TEST_CASE("Schema Version", "[db][access_layer]") {

SECTION("Incompatible schema") {
// Reduce compat schema version
auto incompatible_version = VersionBase{db::table::kRequiredSchemaVersion.Major - 1, 0, 0};
REQUIRE_NOTHROW(db::write_schema_version(context.rw_txn(), incompatible_version));
constexpr VersionBase kIncompatibleVersion{db::table::kRequiredSchemaVersion.major - 1, 0, 0};
REQUIRE_NOTHROW(db::write_schema_version(context.rw_txn(), kIncompatibleVersion));
REQUIRE_THROWS(db::table::check_or_create_chaindata_tables(context.rw_txn()));
}

Expand Down Expand Up @@ -442,20 +442,6 @@ TEST_CASE("Stages", "[db][access_layer]") {
CHECK(stages::read_stage_prune_progress(txn, stages::kBlockBodiesKey) == 0);
}

TEST_CASE("Snapshots", "[db][access_layer]") {
db::test_util::TempChainData context;
auto& txn{context.rw_txn()};

const std::vector<std::string> snapshot_list{
"v1-000000-000500-bodies.seg",
"v1-000000-000500-headers.seg",
"v1-000000-000500-transactions.seg",
};

CHECK_NOTHROW(write_snapshots(txn, snapshot_list));
CHECK(read_snapshots(txn) == snapshot_list);
}

TEST_CASE("Difficulty", "[db][access_layer]") {
db::test_util::TempChainData context;
auto& txn{context.rw_txn()};
Expand Down
7 changes: 6 additions & 1 deletion silkworm/db/snapshot_merger.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@

#include "snapshot_merger.hpp"

#include <algorithm>
#include <filesystem>
#include <memory>
#include <vector>
Expand Down Expand Up @@ -146,6 +145,12 @@ void SnapshotMerger::commit(std::shared_ptr<DataMigrationResult> result) {
for (auto& merged_bundle : merged_bundles) {
schedule_bundle_cleanup(*merged_bundle);
}

on_snapshot_merged_signal_(bundle.block_range());
}

boost::signals2::scoped_connection SnapshotMerger::on_snapshot_merged(const std::function<void(BlockNumRange)>& callback) {
return on_snapshot_merged_signal_.connect(callback);
}

Task<void> SnapshotMerger::cleanup() {
Expand Down
12 changes: 11 additions & 1 deletion silkworm/db/snapshot_merger.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,15 @@

#pragma once

#include <functional>

#include <boost/signals2.hpp>

#include <silkworm/core/common/base.hpp>

#include "data_migration.hpp"
#include "snapshots/snapshot_repository.hpp"
#include "snapshots/snapshot_size.hpp"

namespace silkworm::db {

Expand All @@ -29,9 +36,11 @@ class SnapshotMerger : public DataMigration {
: snapshots_(snapshots),
tmp_dir_path_(std::move(tmp_dir_path)) {}

boost::signals2::scoped_connection on_snapshot_merged(const std::function<void(BlockNumRange)>& callback);

private:
static constexpr size_t kBatchSize = 10;
static constexpr size_t kMaxSnapshotSize = 100'000;
static constexpr size_t kMaxSnapshotSize = snapshots::kMaxMergerSnapshotSize;

const char* name() const override { return "SnapshotMerger"; }
std::unique_ptr<DataMigrationCommand> next_command() override;
Expand All @@ -42,6 +51,7 @@ class SnapshotMerger : public DataMigration {

snapshots::SnapshotRepository& snapshots_;
std::filesystem::path tmp_dir_path_;
boost::signals2::signal<void(BlockNumRange)> on_snapshot_merged_signal_;
};

} // namespace silkworm::db
Loading

0 comments on commit ec87872

Please sign in to comment.