diff --git a/nano/node/bootstrap/bootstrap.cpp b/nano/node/bootstrap/bootstrap.cpp index 68e238b054..bf7484f7b8 100644 --- a/nano/node/bootstrap/bootstrap.cpp +++ b/nano/node/bootstrap/bootstrap.cpp @@ -25,6 +25,7 @@ constexpr std::chrono::seconds nano::bootstrap_limits::lazy_flush_delay_sec; constexpr unsigned nano::bootstrap_limits::lazy_destinations_request_limit; constexpr uint64_t nano::bootstrap_limits::lazy_batch_pull_count_resize_blocks_limit; constexpr double nano::bootstrap_limits::lazy_batch_pull_count_resize_ratio; +constexpr size_t nano::bootstrap_limits::lazy_blocks_restart_limit; constexpr std::chrono::hours nano::bootstrap_excluded_peers::exclude_time_hours; constexpr std::chrono::hours nano::bootstrap_excluded_peers::exclude_remove_hours; @@ -819,33 +820,40 @@ void nano::bootstrap_attempt::lazy_requeue (nano::block_hash const & hash_a, nan void nano::bootstrap_attempt::lazy_pull_flush () { assert (!mutex.try_lock ()); - last_lazy_flush = std::chrono::steady_clock::now (); - nano::lock_guard lazy_lock (lazy_mutex); - assert (node->network_params.bootstrap.lazy_max_pull_blocks <= std::numeric_limits::max ()); - nano::pull_info::count_t batch_count (node->network_params.bootstrap.lazy_max_pull_blocks); - if (total_blocks > nano::bootstrap_limits::lazy_batch_pull_count_resize_blocks_limit && !lazy_blocks.empty ()) + static size_t const max_pulls (nano::bootstrap_limits::bootstrap_connection_scale_target_blocks_lazy * 3); + if (pulls.size () < max_pulls) { - double lazy_blocks_ratio (total_blocks / lazy_blocks.size ()); - if (lazy_blocks_ratio > nano::bootstrap_limits::lazy_batch_pull_count_resize_ratio) + last_lazy_flush = std::chrono::steady_clock::now (); + nano::lock_guard lazy_lock (lazy_mutex); + assert (node->network_params.bootstrap.lazy_max_pull_blocks <= std::numeric_limits::max ()); + nano::pull_info::count_t batch_count (node->network_params.bootstrap.lazy_max_pull_blocks); + if (total_blocks > nano::bootstrap_limits::lazy_batch_pull_count_resize_blocks_limit && !lazy_blocks.empty ()) { - // Increasing blocks ratio weight as more important (^3). Small batch count should lower blocks ratio below target - double lazy_blocks_factor (std::pow (lazy_blocks_ratio / nano::bootstrap_limits::lazy_batch_pull_count_resize_ratio, 3.0)); - // Decreasing total block count weight as less important (sqrt) - double total_blocks_factor (std::sqrt (total_blocks / nano::bootstrap_limits::lazy_batch_pull_count_resize_blocks_limit)); - uint32_t batch_count_min (node->network_params.bootstrap.lazy_max_pull_blocks / (lazy_blocks_factor * total_blocks_factor)); - batch_count = std::max (node->network_params.bootstrap.lazy_min_pull_blocks, batch_count_min); + double lazy_blocks_ratio (total_blocks / lazy_blocks.size ()); + if (lazy_blocks_ratio > nano::bootstrap_limits::lazy_batch_pull_count_resize_ratio) + { + // Increasing blocks ratio weight as more important (^3). Small batch count should lower blocks ratio below target + double lazy_blocks_factor (std::pow (lazy_blocks_ratio / nano::bootstrap_limits::lazy_batch_pull_count_resize_ratio, 3.0)); + // Decreasing total block count weight as less important (sqrt) + double total_blocks_factor (std::sqrt (total_blocks / nano::bootstrap_limits::lazy_batch_pull_count_resize_blocks_limit)); + uint32_t batch_count_min (node->network_params.bootstrap.lazy_max_pull_blocks / (lazy_blocks_factor * total_blocks_factor)); + batch_count = std::max (node->network_params.bootstrap.lazy_min_pull_blocks, batch_count_min); + } } - } - auto transaction (node->store.tx_begin_read ()); - for (auto & pull_start : lazy_pulls) - { - // Recheck if block was already processed - if (lazy_blocks.find (pull_start.first) == lazy_blocks.end () && !node->store.block_exists (transaction, pull_start.first)) + size_t count (0); + auto transaction (node->store.tx_begin_read ()); + while (!lazy_pulls.empty () && count < max_pulls) { - pulls.emplace_back (pull_start.first, pull_start.first, nano::block_hash (0), batch_count, pull_start.second); + auto const & pull_start (lazy_pulls.front ()); + // Recheck if block was already processed + if (lazy_blocks.find (pull_start.first) == lazy_blocks.end () && !node->store.block_exists (transaction, pull_start.first)) + { + pulls.emplace_back (pull_start.first, pull_start.first, nano::block_hash (0), batch_count, pull_start.second); + ++count; + } + lazy_pulls.pop_front (); } } - lazy_pulls.clear (); } bool nano::bootstrap_attempt::lazy_finished () @@ -883,10 +891,27 @@ bool nano::bootstrap_attempt::lazy_finished () return result; } +bool nano::bootstrap_attempt::lazy_has_expired () const +{ + bool result (false); + // Max 30 minutes run with enabled legacy bootstrap + static std::chrono::minutes const max_lazy_time (node->flags.disable_legacy_bootstrap ? 7 * 24 * 60 : 30); + if (std::chrono::steady_clock::now () - lazy_start_time >= max_lazy_time) + { + result = true; + } + else if (!node->flags.disable_legacy_bootstrap && lazy_blocks_count > nano::bootstrap_limits::lazy_blocks_restart_limit) + { + result = true; + } + return result; +} + void nano::bootstrap_attempt::lazy_clear () { assert (!lazy_mutex.try_lock ()); lazy_blocks.clear (); + lazy_blocks_count = 0; lazy_keys.clear (); lazy_pulls.clear (); lazy_state_backlog.clear (); @@ -898,13 +923,12 @@ void nano::bootstrap_attempt::lazy_run () { assert (!node->flags.disable_lazy_bootstrap); start_populate_connections (); - auto start_time (std::chrono::steady_clock::now ()); - auto max_time (std::chrono::minutes (node->flags.disable_legacy_bootstrap ? 7 * 24 * 60 : 30)); + lazy_start_time = std::chrono::steady_clock::now (); nano::unique_lock lock (mutex); - while ((still_pulling () || !lazy_finished ()) && std::chrono::steady_clock::now () - start_time < max_time) + while ((still_pulling () || !lazy_finished ()) && !lazy_has_expired ()) { unsigned iterations (0); - while (still_pulling () && std::chrono::steady_clock::now () - start_time < max_time) + while (still_pulling () && !lazy_has_expired ()) { if (!pulls.empty ()) { @@ -1023,6 +1047,7 @@ bool nano::bootstrap_attempt::process_block_lazy (std::shared_ptr b } } lazy_blocks.insert (hash); + ++lazy_blocks_count; // Adding lazy balances for first processed block in pull if (pull_blocks == 0 && (block_a->type () == nano::block_type::state || block_a->type () == nano::block_type::send)) { @@ -1159,17 +1184,21 @@ void nano::bootstrap_attempt::lazy_backlog_cleanup () void nano::bootstrap_attempt::lazy_destinations_increment (nano::account const & destination_a) { - // Update accounts counter for send blocks - auto existing (lazy_destinations.get ().find (destination_a)); - if (existing != lazy_destinations.get ().end ()) - { - lazy_destinations.get ().modify (existing, [](nano::lazy_destinations_item & item_a) { - ++item_a.count; - }); - } - else + // Enabled only if legacy bootstrap is not available. Legacy bootstrap is a more effective way to receive all existing destinations + if (node->flags.disable_legacy_bootstrap) { - lazy_destinations.insert (nano::lazy_destinations_item{ destination_a, 1 }); + // Update accounts counter for send blocks + auto existing (lazy_destinations.get ().find (destination_a)); + if (existing != lazy_destinations.get ().end ()) + { + lazy_destinations.get ().modify (existing, [](nano::lazy_destinations_item & item_a) { + ++item_a.count; + }); + } + else + { + lazy_destinations.insert (nano::lazy_destinations_item{ destination_a, 1 }); + } } } diff --git a/nano/node/bootstrap/bootstrap.hpp b/nano/node/bootstrap/bootstrap.hpp index 3b31259293..459def3d68 100644 --- a/nano/node/bootstrap/bootstrap.hpp +++ b/nano/node/bootstrap/bootstrap.hpp @@ -89,6 +89,7 @@ class bootstrap_attempt final : public std::enable_shared_from_this::max ()); void lazy_requeue (nano::block_hash const &, nano::block_hash const &, bool); bool lazy_finished (); + bool lazy_has_expired () const; void lazy_pull_flush (); void lazy_clear (); bool process_block_lazy (std::shared_ptr, nano::account const &, uint64_t, nano::bulk_pull::count_t, unsigned); @@ -139,6 +140,7 @@ class bootstrap_attempt final : public std::enable_shared_from_this lazy_balances; std::unordered_set lazy_keys; std::deque> lazy_pulls; + std::chrono::steady_clock::time_point lazy_start_time; std::chrono::steady_clock::time_point last_lazy_flush{ std::chrono::steady_clock::now () }; class account_tag { @@ -152,6 +154,7 @@ class bootstrap_attempt final : public std::enable_shared_from_this, boost::multi_index::member, std::greater>, boost::multi_index::hashed_unique, boost::multi_index::member>>> lazy_destinations; + std::atomic lazy_blocks_count{ 0 }; std::atomic lazy_destinations_flushed{ false }; std::mutex lazy_mutex; // Wallet lazy bootstrap @@ -284,5 +287,6 @@ class bootstrap_limits final static constexpr unsigned lazy_destinations_request_limit = 256 * 1024; static constexpr uint64_t lazy_batch_pull_count_resize_blocks_limit = 4 * 1024 * 1024; static constexpr double lazy_batch_pull_count_resize_ratio = 2.0; + static constexpr size_t lazy_blocks_restart_limit = 1024 * 1024; }; } diff --git a/nano/node/bootstrap/bootstrap_bulk_pull.cpp b/nano/node/bootstrap/bootstrap_bulk_pull.cpp index 440b654437..c88a28a61d 100644 --- a/nano/node/bootstrap/bootstrap_bulk_pull.cpp +++ b/nano/node/bootstrap/bootstrap_bulk_pull.cpp @@ -223,7 +223,8 @@ void nano::bulk_pull_client::received_block (boost::system::error_code const & e } // Is block expected? bool block_expected (false); - bool unconfirmed_account_head (pull_blocks == 0 && pull.retry_limit != std::numeric_limits::max () && expected == pull.account_or_head && block->account () == pull.account_or_head); + // Unconfirmed head is used only for lazy destinations if legacy bootstrap is not available, see nano::bootstrap_attempt::lazy_destinations_increment (...) + bool unconfirmed_account_head (connection->node->flags.disable_legacy_bootstrap && pull_blocks == 0 && pull.retry_limit != std::numeric_limits::max () && expected == pull.account_or_head && block->account () == pull.account_or_head); if (hash == expected || unconfirmed_account_head) { expected = block->previous ();