Skip to content

Commit

Permalink
Improve blockchain cmd (paritytech#41)
Browse files Browse the repository at this point in the history
* Add import_params to blockchain cmd

* Nits

* Minor optimization: avoid cloning the entire address list

* Mark disconnected in addressbook

* Add --verbose to blockchain cmd

* Nit

* Fix clippy

* Docs improvement

* .
  • Loading branch information
liuchengxu authored Aug 15, 2024
1 parent 94a19fa commit 1e5a386
Show file tree
Hide file tree
Showing 12 changed files with 160 additions and 76 deletions.
5 changes: 4 additions & 1 deletion crates/pallet-bitcoin/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,10 @@ impl Txid {
Self(H256::from(d))
}

/// Converts the runtime [`Txid`] to a `bitcoin::Txid`.
pub fn into_bitcoin_txid(self) -> bitcoin::Txid {
bitcoin::consensus::Decodable::consensus_decode(&mut self.encode().as_slice())
.expect("txid must be encoded correctly; qed")
.expect("Decode must succeed as txid was ensured to be encoded correctly; qed")
}
}

Expand Down Expand Up @@ -98,6 +99,7 @@ pub mod pallet {

#[pallet::call(weight(<T as Config>::WeightInfo))]
impl<T: Config> Pallet<T> {
/// An internal unsigned extrinsic for including a Bitcoin transaction into the block.
#[pallet::call_index(0)]
#[pallet::weight(Weight::zero())]
pub fn transact(origin: OriginFor<T>, btc_tx: Vec<u8>) -> DispatchResult {
Expand Down Expand Up @@ -183,6 +185,7 @@ pub fn coin_storage_key<T: Config>(bitcoin_txid: bitcoin::Txid, index: Vout) ->
Coins::<T>::storage_double_map_final_key(txid, index)
}

/// Returns the final storage prefix for the storage item `Coins`.
pub fn coin_storage_prefix<T: Config>() -> [u8; 32] {
use frame_support::StoragePrefixedMap;

Expand Down
34 changes: 22 additions & 12 deletions crates/sc-consensus-nakamoto/src/block_executor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,18 +14,28 @@ use subcoin_primitives::{BitcoinTransactionAdapter, CoinStorageKey};
/// A simply way to track the overall execution info for optimization purpose.
#[derive(Debug, Default)]
pub struct ExecutionInfo {
/// Number of transactions in the block.
pub transactions_count: usize,
/// Time taken by `runtime_api.execute_block` in nanoseconds.
pub execute_block: u128,
pub execute_block_time: u128,
/// Time taken by `client.state_at` in nanoseconds.
pub fetch_state: u128,
pub fetch_state_time: u128,
/// Time taken by `runtime_api.into_storage_changes` in nanoseconds.
pub into_storage_changes: u128,
pub into_storage_changes_time: u128,
}

impl ExecutionInfo {
/// Constructs a new instance of [`ExecutionInfo`] with given transactions count.
pub fn new(transactions_count: usize) -> Self {
Self {
transactions_count,
..Default::default()
}
}

/// Returns the total execution time in nanoseconds.
pub fn total(&self) -> u128 {
self.execute_block + self.fetch_state + self.into_storage_changes
self.execute_block_time + self.fetch_state_time + self.into_storage_changes_time
}
}

Expand Down Expand Up @@ -194,21 +204,21 @@ where
let mut runtime_api = self.client.runtime_api();
runtime_api.set_call_context(CallContext::Onchain);

let mut exec_info = ExecutionInfo::default();
let mut exec_info = ExecutionInfo::new(block.extrinsics().len());

let now = std::time::Instant::now();
runtime_api.execute_block_without_state_root_check(parent_hash, block)?;
exec_info.execute_block = now.elapsed().as_nanos();
exec_info.execute_block_time = now.elapsed().as_nanos();

let now = std::time::Instant::now();
let state = self.client.state_at(parent_hash)?;
exec_info.fetch_state = now.elapsed().as_nanos();
exec_info.fetch_state_time = now.elapsed().as_nanos();

let now = std::time::Instant::now();
let storage_changes = runtime_api
.into_storage_changes(&state, parent_hash)
.map_err(sp_blockchain::Error::StorageChanges)?;
exec_info.into_storage_changes = now.elapsed().as_nanos();
exec_info.into_storage_changes_time = now.elapsed().as_nanos();

let state_root = storage_changes.transaction_storage_root;

Expand Down Expand Up @@ -411,7 +421,7 @@ where

let (header, extrinsics) = block.deconstruct();

let mut exec_info = ExecutionInfo::default();
let mut exec_info = ExecutionInfo::new(extrinsics.len());

let now = std::time::Instant::now();

Expand Down Expand Up @@ -450,17 +460,17 @@ where

tracing::debug!("off_runtime({:?}): {exec_details:?}", self.client_context);

exec_info.execute_block = now.elapsed().as_nanos();
exec_info.execute_block_time = now.elapsed().as_nanos();

let now = std::time::Instant::now();
let state = self.client.state_at(parent_hash)?;
exec_info.fetch_state = now.elapsed().as_nanos();
exec_info.fetch_state_time = now.elapsed().as_nanos();

let now = std::time::Instant::now();
let storage_changes = runtime_api
.into_storage_changes(&state, parent_hash)
.map_err(sp_blockchain::Error::StorageChanges)?;
exec_info.into_storage_changes = now.elapsed().as_nanos();
exec_info.into_storage_changes_time = now.elapsed().as_nanos();

tracing::debug!(
"off_runtime({:?}): {exec_info:?}, total: {}",
Expand Down
6 changes: 6 additions & 0 deletions crates/sc-consensus-nakamoto/src/chain_params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use bitcoin::consensus::Params;
use bitcoin::{BlockHash, Network};
use std::collections::HashMap;

/// bip-0113 defines the median of the last 11 blocks instead of the block's timestamp for lock-time calculations.
pub const MEDIAN_TIME_SPAN: usize = 11;

/// Extended [`Params`].
Expand All @@ -13,6 +14,11 @@ pub struct ChainParams {
pub csv_height: u32,
/// Block height at which Segwit becomes active.
pub segwit_height: u32,
/// A map of block hashes to script verification flag exceptions.
///
/// This allows for certain blocks to have specific script verification flags, overriding
/// the default rules. For example, exceptions may be made for blocks that activated
/// BIP16 (P2SH) or Taproot under special conditions.
pub script_flag_exceptions: HashMap<BlockHash, u32>,
}

Expand Down
18 changes: 13 additions & 5 deletions crates/sc-consensus-nakamoto/src/import_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,27 +15,34 @@ use sp_core::traits::SpawnEssentialNamed;
use sp_runtime::traits::Block as BlockT;
use std::pin::Pin;

/// Represents a batch of Bitcoin blocks that are to be imported.
#[derive(Debug, Clone)]
pub struct ImportBlocks {
/// The source from which the blocks were obtained.
pub origin: BlockOrigin,
/// A vector containing the Bitcoin blocks to be imported.
pub blocks: Vec<BitcoinBlock>,
}

/// Import queue for processing Bitcoin blocks.
#[derive(Debug)]
pub struct BlockImportQueue {
pub block_import_sender: TracingUnboundedSender<ImportBlocks>,
pub import_result_receiver: TracingUnboundedReceiver<ImportManyBlocksResult>,
block_import_sender: TracingUnboundedSender<ImportBlocks>,
import_result_receiver: TracingUnboundedReceiver<ImportManyBlocksResult>,
}

impl BlockImportQueue {
/// Send blocks to the actual worker of import queue.
/// Sends a batch of blocks to the worker of import queue for processing.
pub fn import_blocks(&self, incoming_blocks: ImportBlocks) {
let _ = self.block_import_sender.unbounded_send(incoming_blocks);
}

pub async fn next_import_results(&mut self) -> Option<ImportManyBlocksResult> {
self.import_result_receiver.next().await
/// Retrieves the results of the block import operations.
///
/// This asynchronous function waits for and returns the results of the block import process.
/// It consumes the next available result from the import queue.
pub async fn block_import_results(&mut self) -> ImportManyBlocksResult {
self.import_result_receiver.select_next_some().await
}
}

Expand Down Expand Up @@ -90,6 +97,7 @@ where
}
}

/// A dummy verifier that verifies nothing against the block.
pub struct VerifyNothing;

#[async_trait::async_trait]
Expand Down
2 changes: 2 additions & 0 deletions crates/sc-consensus-nakamoto/src/verification/tx_verify.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ pub enum Error {
PreviousOutputNull,
}

/// Checks whether the transaction is final at the given height and block time.
pub fn is_final(tx: &Transaction, height: u32, block_time: u32) -> bool {
if tx.lock_time == LockTime::ZERO {
return true;
Expand Down Expand Up @@ -107,6 +108,7 @@ pub fn check_transaction_sanity(tx: &Transaction) -> Result<(), Error> {
Ok(())
}

/// Counts the sigops for this transaction using legacy counting.
pub fn get_legacy_sig_op_count(tx: &Transaction) -> usize {
tx.input
.iter()
Expand Down
18 changes: 13 additions & 5 deletions crates/subcoin-network/src/address_book.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,17 @@ use std::net::IpAddr;
/// Manages the addresses discovered in the network.
#[derive(Debug)]
pub struct AddressBook {
/// Addresses available for establishing new connections.
discovered_addresses: HashSet<PeerId>,
/// Peers that currently have an active connection or are being communicated with.
active_addresses: HashSet<PeerId>,
/// Addresses that failed to establish a connection.
failed_addresses: HashSet<PeerId>,
/// Indicates whether only IPv4 addresses should be stored.
ipv4_only: bool,
/// Maximum number of discovered addresses.
max_addresses: usize,
/// Random number generator for selecting peers.
rng: fastrand::Rng,
}

Expand Down Expand Up @@ -38,21 +44,23 @@ impl AddressBook {

/// Pops a random address from the discovered addresses and marks it as active.
pub fn pop(&mut self) -> Option<PeerId> {
let maybe_peer = self.rng.choice(self.discovered_addresses.clone());

if let Some(peer) = maybe_peer {
if let Some(peer) = self.rng.choice(self.discovered_addresses.iter()).copied() {
self.discovered_addresses.remove(&peer);
self.active_addresses.insert(peer);
return Some(peer);
}

maybe_peer
None
}

pub fn note_failed_address(&mut self, peer_addr: PeerId) {
self.active_addresses.remove(&peer_addr);
self.failed_addresses.insert(peer_addr);
}

pub fn mark_disconnected(&mut self, peer_addr: &PeerId) {
self.active_addresses.remove(peer_addr);
}

/// Adds multiple addresses (`Address`) to the address book.
pub fn add_many(&mut self, from: PeerId, addresses: Vec<(u32, Address)>) -> usize {
let mut added = 0;
Expand Down
40 changes: 23 additions & 17 deletions crates/subcoin-network/src/block_downloader.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@ use sc_consensus_nakamoto::ImportManyBlocksResult;
use std::collections::{HashMap, HashSet};
use std::time::{Duration, Instant};

/// Interval for logging when the block import queue is too busy.
const BUSY_QUEUE_LOG_INTERVAL: Duration = Duration::from_secs(5);

/// Manages queued blocks.
#[derive(Default, Debug, Clone)]
pub(crate) struct QueuedBlocks {
Expand Down Expand Up @@ -75,6 +78,10 @@ pub(crate) struct BlockDownloadManager {
/// Orphan blocks
orphan_blocks_pool: OrphanBlocksPool,
/// Last time at which the block was received or imported.
///
/// This is updated whenever a block is received from the network or
/// when the results of processed blocks are notified. It helps track
/// the most recent activity related to block processing.
last_progress_time: Instant,
/// Whether there are too many blocks in the queue.
import_queue_is_overloaded: bool,
Expand All @@ -97,12 +104,14 @@ impl BlockDownloadManager {
}
}

// Determine if the downloader is stalled based on the time elapsed since the last progress.
fn is_stalled(&self) -> bool {
// The downloader is considered as stalled if no progress for some time.
// The timeout (in seconds) is extended when the chain exceeds a certain size, as block
// execution times increase significantly with chain growth.
let stall_timeout = if self.best_queued_number > 300_000 {
120
120 // Extended timeout for larger chains
} else {
60
60 // Standard timeout for smaller chains
};

self.last_progress_time.elapsed().as_secs() > stall_timeout
Expand Down Expand Up @@ -133,27 +142,24 @@ impl BlockDownloadManager {
let max_queued_blocks = match best_number {
0..=100_000 => 8192,
100_001..=200_000 => 4096,
200_001..=300_000 => 1024,
200_001..=300_000 => 2048,
_ => 512,
};

let import_queue_is_overloaded = self.best_queued_number - best_number > max_queued_blocks;

if import_queue_is_overloaded {
const INTERVAL: Duration = Duration::from_secs(5);

if self
if import_queue_is_overloaded
&& self
.last_overloaded_queue_log_time
.map(|last_time| last_time.elapsed() > INTERVAL)
.map(|last_time| last_time.elapsed() > BUSY_QUEUE_LOG_INTERVAL)
.unwrap_or(true)
{
tracing::debug!(
best_number,
best_queued_number = self.best_queued_number,
"⏸️ Pausing download: too many blocks in the queue",
);
self.last_overloaded_queue_log_time.replace(Instant::now());
}
{
tracing::debug!(
best_number,
best_queued_number = self.best_queued_number,
"⏸️ Pausing download: too many blocks in the queue",
);
self.last_overloaded_queue_log_time.replace(Instant::now());
}

self.import_queue_is_overloaded = import_queue_is_overloaded;
Expand Down
Loading

0 comments on commit 1e5a386

Please sign in to comment.