diff --git a/consensus/config/src/committee.rs b/consensus/config/src/committee.rs index 693184f7ef5..70ea67e777d 100644 --- a/consensus/config/src/committee.rs +++ b/consensus/config/src/committee.rs @@ -14,13 +14,13 @@ use crate::{AuthorityPublicKey, NetworkPublicKey, ProtocolPublicKey}; /// Committee of the consensus protocol is updated each epoch. pub type Epoch = u64; -/// Voting power of an authority, roughly proportional to the actual amount of Sui staked -/// by the authority. +/// Voting power of an authority, roughly proportional to the actual amount of +/// Sui staked by the authority. /// Total stake / voting power of all authorities should sum to 10,000. pub type Stake = u64; -/// Committee is the set of authorities that participate in the consensus protocol for this epoch. -/// Its configuration is stored and computed on chain. +/// Committee is the set of authorities that participate in the consensus +/// protocol for this epoch. Its configuration is stored and computed on chain. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Committee { /// The epoch number of this committee @@ -127,8 +127,8 @@ impl Committee { /// Represents one authority in the committee. /// -/// NOTE: this is intentionally un-cloneable, to encourage only copying relevant fields. -/// AuthorityIndex should be used to reference an authority instead. +/// NOTE: this is intentionally un-cloneable, to encourage only copying relevant +/// fields. AuthorityIndex should be used to reference an authority instead. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Authority { /// Voting power of the authority in the committee. @@ -145,12 +145,13 @@ pub struct Authority { pub network_key: NetworkPublicKey, } -/// Each authority is uniquely identified by its AuthorityIndex in the Committee. -/// AuthorityIndex is between 0 (inclusive) and the total number of authorities (exclusive). +/// Each authority is uniquely identified by its AuthorityIndex in the +/// Committee. AuthorityIndex is between 0 (inclusive) and the total number of +/// authorities (exclusive). /// -/// NOTE: for safety, invalid AuthorityIndex should be impossible to create. So AuthorityIndex -/// should not be created or incremented outside of this file. AuthorityIndex received from peers -/// should be validated before use. +/// NOTE: for safety, invalid AuthorityIndex should be impossible to create. So +/// AuthorityIndex should not be created or incremented outside of this file. +/// AuthorityIndex received from peers should be validated before use. #[derive( Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Debug, Default, Hash, Serialize, Deserialize, )] diff --git a/consensus/config/src/crypto.rs b/consensus/config/src/crypto.rs index e28b4da3e55..9075bdc7738 100644 --- a/consensus/config/src/crypto.rs +++ b/consensus/config/src/crypto.rs @@ -1,15 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -//! Here we select the cryptographic types that are used by default in the code base. -//! The whole code base should only: +//! Here we select the cryptographic types that are used by default in the code +//! base. The whole code base should only: //! - refer to those aliases and not use the individual scheme implementations -//! - not use the schemes in a way that break genericity (e.g. using their Struct impl functions) +//! - not use the schemes in a way that break genericity (e.g. using their +//! Struct impl functions) //! - swap one of those aliases to point to another type if necessary //! -//! Beware: if you change those aliases to point to another scheme implementation, you will have -//! to change all four aliases to point to concrete types that work with each other. Failure to do -//! so will result in a ton of compilation errors, and worse: it will not make sense! +//! Beware: if you change those aliases to point to another scheme +//! implementation, you will have to change all four aliases to point to +//! concrete types that work with each other. Failure to do so will result in a +//! ton of compilation errors, and worse: it will not make sense! use fastcrypto::{ bls12381, ed25519, @@ -32,7 +34,7 @@ impl NetworkPublicKey { } pub fn to_bytes(&self) -> [u8; 32] { - self.0 .0.to_bytes() + self.0.0.to_bytes() } } @@ -118,8 +120,8 @@ impl ProtocolKeySignature { } } -/// Authority key represents the identity of an authority. It is only used for identity sanity -/// checks and not used for verification. +/// Authority key represents the identity of an authority. It is only used for +/// identity sanity checks and not used for verification. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] pub struct AuthorityPublicKey(bls12381::min_sig::BLS12381PublicKey); pub struct AuthorityKeyPair(bls12381::min_sig::BLS12381KeyPair); diff --git a/consensus/config/src/parameters.rs b/consensus/config/src/parameters.rs index 91c1d00d39f..bb8d3816b96 100644 --- a/consensus/config/src/parameters.rs +++ b/consensus/config/src/parameters.rs @@ -7,17 +7,21 @@ use serde::{Deserialize, Serialize}; /// Operational configurations of a consensus authority. /// -/// All fields should tolerate inconsistencies among authorities, without affecting safety of the -/// protocol. Otherwise, they need to be part of Sui protocol config or epoch state on-chain. +/// All fields should tolerate inconsistencies among authorities, without +/// affecting safety of the protocol. Otherwise, they need to be part of Sui +/// protocol config or epoch state on-chain. /// -/// NOTE: fields with default values are specified in the serde default functions. Most operators -/// should not need to specify any field, except db_path. +/// NOTE: fields with default values are specified in the serde default +/// functions. Most operators should not need to specify any field, except +/// db_path. #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Parameters { - /// The number of rounds of blocks to be kept in the Dag state cache per authority. The larger - /// the number the more the blocks that will be kept in memory allowing minimising any potential - /// disk access. Should be careful when tuning this parameter as it could be quite memory expensive. - /// Value should be at minimum 50 rounds to ensure node performance and protocol advance. + /// The number of rounds of blocks to be kept in the Dag state cache per + /// authority. The larger the number the more the blocks that will be + /// kept in memory allowing minimising any potential disk access. Should + /// be careful when tuning this parameter as it could be quite memory + /// expensive. Value should be at minimum 50 rounds to ensure node + /// performance and protocol advance. #[serde(default = "Parameters::default_dag_state_cached_rounds")] pub dag_state_cached_rounds: u32, @@ -25,14 +29,16 @@ pub struct Parameters { #[serde(default = "Parameters::default_leader_timeout")] pub leader_timeout: Duration, - /// Minimum delay between rounds, to avoid generating too many rounds when latency is low. - /// This is especially necessary for tests running locally. - /// If setting a non-default value, it should be set low enough to avoid reducing - /// round rate and increasing latency in realistic and distributed configurations. + /// Minimum delay between rounds, to avoid generating too many rounds when + /// latency is low. This is especially necessary for tests running + /// locally. If setting a non-default value, it should be set low enough + /// to avoid reducing round rate and increasing latency in realistic and + /// distributed configurations. #[serde(default = "Parameters::default_min_round_delay")] pub min_round_delay: Duration, - /// Maximum forward time drift (how far in future) allowed for received blocks. + /// Maximum forward time drift (how far in future) allowed for received + /// blocks. #[serde(default = "Parameters::default_max_forward_time_drift")] pub max_forward_time_drift: Duration, @@ -88,8 +94,9 @@ impl Default for Parameters { #[derive(Clone, Debug, Deserialize, Serialize)] pub struct AnemoParameters { - /// Size in bytes above which network messages are considered excessively large. Excessively - /// large messages will still be handled, but logged and reported in metrics for debugging. + /// Size in bytes above which network messages are considered excessively + /// large. Excessively large messages will still be handled, but logged + /// and reported in metrics for debugging. /// /// If unspecified, this will default to 8 MiB. #[serde(default = "AnemoParameters::default_excessive_message_size")] diff --git a/consensus/config/src/test_committee.rs b/consensus/config/src/test_committee.rs index f2c821969c0..4b58a6e8b9c 100644 --- a/consensus/config/src/test_committee.rs +++ b/consensus/config/src/test_committee.rs @@ -10,7 +10,8 @@ use crate::{ Authority, AuthorityKeyPair, Committee, Epoch, NetworkKeyPair, ProtocolKeyPair, Stake, }; -/// Creates a committee for local testing, and the corresponding key pairs for the authorities. +/// Creates a committee for local testing, and the corresponding key pairs for +/// the authorities. pub fn local_committee_and_keys( epoch: Epoch, authorities_stake: Vec, @@ -44,9 +45,10 @@ fn get_available_local_address() -> Multiaddr { format!("/ip4/{}/udp/{}", host, port).parse().unwrap() } -/// Returns an ephemeral, available port. On unix systems, the port returned will be in the -/// TIME_WAIT state ensuring that the OS won't hand out this port for some grace period. -/// Callers should be able to bind to this port given they use SO_REUSEADDR. +/// Returns an ephemeral, available port. On unix systems, the port returned +/// will be in the TIME_WAIT state ensuring that the OS won't hand out this port +/// for some grace period. Callers should be able to bind to this port given +/// they use SO_REUSEADDR. fn get_available_port(host: &str) -> u16 { const MAX_PORT_RETRIES: u32 = 1000; @@ -64,9 +66,10 @@ fn get_ephemeral_port(host: &str) -> std::io::Result { let listener = TcpListener::bind((host, 0))?; let addr = listener.local_addr()?; - // Create and accept a connection (which we'll promptly drop) in order to force the port - // into the TIME_WAIT state, ensuring that the port will be reserved from some limited - // amount of time (roughly 60s on some Linux systems) + // Create and accept a connection (which we'll promptly drop) in order to force + // the port into the TIME_WAIT state, ensuring that the port will be + // reserved from some limited amount of time (roughly 60s on some Linux + // systems) let _sender = TcpStream::connect(addr)?; let _incoming = listener.accept()?; diff --git a/consensus/config/tests/committee_test.rs b/consensus/config/tests/committee_test.rs index b067c96fe12..b1ee91b812c 100644 --- a/consensus/config/tests/committee_test.rs +++ b/consensus/config/tests/committee_test.rs @@ -8,8 +8,9 @@ use insta::assert_yaml_snapshot; use mysten_network::Multiaddr; use rand::{rngs::StdRng, SeedableRng as _}; -// Committee is not sent over network or stored on disk itself, but some of its fields are. -// So this test can still be useful to detect accidental format changes. +// Committee is not sent over network or stored on disk itself, but some of its +// fields are. So this test can still be useful to detect accidental format +// changes. #[test] fn committee_snapshot_matches() { let epoch = 100; diff --git a/consensus/core/src/authority_node.rs b/consensus/core/src/authority_node.rs index bdf389067ed..bd97f79f4e5 100644 --- a/consensus/core/src/authority_node.rs +++ b/consensus/core/src/authority_node.rs @@ -39,7 +39,8 @@ use crate::{ }; /// ConsensusAuthority is used by Sui to manage the lifetime of AuthorityNode. -/// It hides the details of the implementation from the caller, MysticetiManager. +/// It hides the details of the implementation from the caller, +/// MysticetiManager. #[allow(private_interfaces)] pub enum ConsensusAuthority { WithAnemo(AuthorityNode), @@ -176,7 +177,8 @@ where let mut network_manager = N::new(context.clone()); let network_client = network_manager.client(); - // REQUIRED: Broadcaster must be created before Core, to start listen on block broadcasts. + // REQUIRED: Broadcaster must be created before Core, to start listen on block + // broadcasts. let broadcaster = Broadcaster::new(context.clone(), network_client.clone(), &signals_receivers); diff --git a/consensus/core/src/base_committer.rs b/consensus/core/src/base_committer.rs index 93ba27e8f95..8b7413f7d1a 100644 --- a/consensus/core/src/base_committer.rs +++ b/consensus/core/src/base_committer.rs @@ -20,8 +20,8 @@ use crate::{ mod base_committer_tests; pub(crate) struct BaseCommitterOptions { - /// TODO: Re-evaluate if we want this to be configurable after running experiments. - /// The length of a wave (minimum 3) + /// TODO: Re-evaluate if we want this to be configurable after running + /// experiments. The length of a wave (minimum 3) pub wave_length: u32, /// The offset used in the leader-election protocol. This is used by the /// multi-committer to ensure that each [`BaseCommitter`] instance elects @@ -43,10 +43,10 @@ impl Default for BaseCommitterOptions { } } -/// The [`BaseCommitter`] contains the bare bone commit logic. Once instantiated, -/// the method `try_direct_decide` and `try_indirect_decide` can be called at any -/// time and any number of times (it is idempotent) to determine whether a leader -/// can be committed or skipped. +/// The [`BaseCommitter`] contains the bare bone commit logic. Once +/// instantiated, the method `try_direct_decide` and `try_indirect_decide` can +/// be called at any time and any number of times (it is idempotent) to +/// determine whether a leader can be committed or skipped. pub(crate) struct BaseCommitter { /// The per-epoch configuration of this authority. context: Arc, @@ -79,16 +79,17 @@ impl BaseCommitter { /// can direct-commit or direct-skip it. #[tracing::instrument(skip_all, fields(leader = %leader))] pub fn try_direct_decide(&self, leader: Slot) -> LeaderStatus { - // Check whether the leader has enough blame. That is, whether there are 2f+1 non-votes - // for that leader (which ensure there will never be a certificate for that leader). + // Check whether the leader has enough blame. That is, whether there are 2f+1 + // non-votes for that leader (which ensure there will never be a + // certificate for that leader). let voting_round = leader.round + 1; if self.enough_leader_blame(voting_round, leader.authority) { return LeaderStatus::Skip(leader); } - // Check whether the leader(s) has enough support. That is, whether there are 2f+1 - // certificates over the leader. Note that there could be more than one leader block - // (created by Byzantine leaders). + // Check whether the leader(s) has enough support. That is, whether there are + // 2f+1 certificates over the leader. Note that there could be more than + // one leader block (created by Byzantine leaders). let wave = self.wave_number(leader.round); let decision_round = self.decision_round(wave); let leader_blocks = self.dag_state.read().get_uncommitted_blocks_at_slot(leader); @@ -98,8 +99,8 @@ impl BaseCommitter { .map(LeaderStatus::Commit) .collect(); - // There can be at most one leader with enough support for each round, otherwise it means - // the BFT assumption is broken. + // There can be at most one leader with enough support for each round, otherwise + // it means the BFT assumption is broken. if leaders_with_enough_support.len() > 1 { panic!("[{self}] More than one certified block for {leader}") } @@ -117,8 +118,9 @@ impl BaseCommitter { leader_slot: Slot, leaders: impl Iterator, ) -> LeaderStatus { - // The anchor is the first committed leader with round higher than the decision round of the - // target leader. We must stop the iteration upon encountering an undecided leader. + // The anchor is the first committed leader with round higher than the decision + // round of the target leader. We must stop the iteration upon + // encountering an undecided leader. let anchors = leaders.filter(|x| leader_slot.round + self.options.wave_length <= x.round()); for anchor in anchors { @@ -157,9 +159,9 @@ impl BaseCommitter { )) } - /// Return the leader round of the specified wave. The leader round is always - /// the first round of the wave. This takes into account round offset for when - /// pipelining is enabled. + /// Return the leader round of the specified wave. The leader round is + /// always the first round of the wave. This takes into account round + /// offset for when pipelining is enabled. pub(crate) fn leader_round(&self, wave: WaveNumber) -> Round { (wave * self.options.wave_length) + self.options.round_offset } @@ -178,11 +180,12 @@ impl BaseCommitter { round.saturating_sub(self.options.round_offset) / self.options.wave_length } - /// Find which block is supported at a slot (author, round) by the given block. - /// Blocks can indirectly reference multiple other blocks at a slot, but only - /// one block at a slot will be supported by the given block. If block A supports B - /// at a slot, it is guaranteed that any processed block by the same author that - /// directly or indirectly includes A will also support B at that slot. + /// Find which block is supported at a slot (author, round) by the given + /// block. Blocks can indirectly reference multiple other blocks at a + /// slot, but only one block at a slot will be supported by the given + /// block. If block A supports B at a slot, it is guaranteed that any + /// processed block by the same author that directly or indirectly + /// includes A will also support B at that slot. fn find_supported_block(&self, leader_slot: Slot, from: &VerifiedBlock) -> Option { if from.round() < leader_slot.round { return None; @@ -215,12 +218,13 @@ impl BaseCommitter { self.find_supported_block(leader_slot, potential_vote) == Some(reference) } - /// Check whether the specified block (`potential_certificate`) is a certificate - /// for the specified leader (`leader_block`). An `all_votes` map can be - /// provided as a cache to quickly skip checking against the block store on - /// whether a reference is a vote. This is done for efficiency. Bear in mind - /// that the `all_votes` should refer to votes considered to the same `leader_block` - /// and it can't be reused for different leaders. + /// Check whether the specified block (`potential_certificate`) is a + /// certificate for the specified leader (`leader_block`). An + /// `all_votes` map can be provided as a cache to quickly skip checking + /// against the block store on whether a reference is a vote. This is + /// done for efficiency. Bear in mind that the `all_votes` should refer + /// to votes considered to the same `leader_block` and it can't be + /// reused for different leaders. fn is_certificate( &self, potential_certificate: &VerifiedBlock, @@ -260,18 +264,19 @@ impl BaseCommitter { false } - /// Decide the status of a target leader from the specified anchor. We commit - /// the target leader if it has a certified link to the anchor. Otherwise, we - /// skip the target leader. + /// Decide the status of a target leader from the specified anchor. We + /// commit the target leader if it has a certified link to the anchor. + /// Otherwise, we skip the target leader. fn decide_leader_from_anchor(&self, anchor: &VerifiedBlock, leader_slot: Slot) -> LeaderStatus { - // Get the block(s) proposed by the leader. There could be more than one leader block - // in the slot from a Byzantine authority. + // Get the block(s) proposed by the leader. There could be more than one leader + // block in the slot from a Byzantine authority. let leader_blocks = self .dag_state .read() .get_uncommitted_blocks_at_slot(leader_slot); - // TODO: Re-evaluate this check once we have a better way to handle/track byzantine authorities. + // TODO: Re-evaluate this check once we have a better way to handle/track + // byzantine authorities. if leader_blocks.len() > 1 { tracing::warn!( "Multiple blocks found for leader slot {leader_slot}: {:?}", @@ -279,8 +284,9 @@ impl BaseCommitter { ); } - // Get all blocks that could be potential certificates for the target leader. These blocks - // are in the decision round of the target leader and are linked to the anchor. + // Get all blocks that could be potential certificates for the target leader. + // These blocks are in the decision round of the target leader and are + // linked to the anchor. let wave = self.wave_number(leader_slot.round); let decision_round = self.decision_round(wave); let potential_certificates = self @@ -288,8 +294,8 @@ impl BaseCommitter { .read() .ancestors_at_round(anchor, decision_round); - // Use those potential certificates to determine which (if any) of the target leader - // blocks can be committed. + // Use those potential certificates to determine which (if any) of the target + // leader blocks can be committed. let mut certified_leader_blocks: Vec<_> = leader_blocks .into_iter() .filter(|leader_block| { @@ -300,20 +306,22 @@ impl BaseCommitter { }) .collect(); - // There can be at most one certified leader, otherwise it means the BFT assumption is broken. + // There can be at most one certified leader, otherwise it means the BFT + // assumption is broken. if certified_leader_blocks.len() > 1 { panic!("More than one certified block at wave {wave} from leader {leader_slot}") } - // We commit the target leader if it has a certificate that is an ancestor of the anchor. - // Otherwise skip it. + // We commit the target leader if it has a certificate that is an ancestor of + // the anchor. Otherwise skip it. match certified_leader_blocks.pop() { Some(certified_leader_block) => LeaderStatus::Commit(certified_leader_block), None => LeaderStatus::Skip(leader_slot), } } - /// Check whether the specified leader has 2f+1 non-votes (blames) to be directly skipped. + /// Check whether the specified leader has 2f+1 non-votes (blames) to be + /// directly skipped. fn enough_leader_blame(&self, voting_round: Round, leader: AuthorityIndex) -> bool { let voting_blocks = self .dag_state @@ -391,9 +399,9 @@ impl Display for BaseCommitter { } } -/// A builder for the base committer. By default, the builder creates a base committer -/// that has no leader or round offset. Which indicates single leader & pipelining -/// disabled. +/// A builder for the base committer. By default, the builder creates a base +/// committer that has no leader or round offset. Which indicates single leader +/// & pipelining disabled. #[cfg(test)] mod base_committer_builder { use super::*; diff --git a/consensus/core/src/block.rs b/consensus/core/src/block.rs index 7242e5df0e0..14afc10d93c 100644 --- a/consensus/core/src/block.rs +++ b/consensus/core/src/block.rs @@ -19,9 +19,12 @@ use fastcrypto::hash::{Digest, HashFunction}; use serde::{Deserialize, Serialize}; use shared_crypto::intent::{Intent, IntentMessage, IntentScope}; -use crate::error::ConsensusResult; -use crate::{commit::CommitRef, context::Context}; -use crate::{ensure, error::ConsensusError}; +use crate::{ + commit::CommitRef, + context::Context, + ensure, + error::{ConsensusError, ConsensusResult}, +}; pub(crate) const GENESIS_ROUND: Round = 0; @@ -59,10 +62,10 @@ impl Transaction { } } -/// A block includes references to previous round blocks and transactions that the authority -/// considers valid. -/// Well behaved authorities produce at most one block per round, but malicious authorities can -/// equivocate. +/// A block includes references to previous round blocks and transactions that +/// the authority considers valid. +/// Well behaved authorities produce at most one block per round, but malicious +/// authorities can equivocate. #[derive(Clone, Deserialize, Serialize)] #[enum_dispatch(BlockAPI)] pub enum Block { @@ -161,8 +164,9 @@ impl BlockAPI for BlockV1 { } } -/// `BlockRef` uniquely identifies a `VerifiedBlock` via `digest`. It also contains the slot -/// info (round and author) so it can be used in logic such as aggregating stakes for a round. +/// `BlockRef` uniquely identifies a `VerifiedBlock` via `digest`. It also +/// contains the slot info (round and author) so it can be used in logic such as +/// aggregating stakes for a round. #[derive(Clone, Copy, Serialize, Deserialize, Default, PartialEq, Eq, PartialOrd, Ord)] pub struct BlockRef { pub round: Round, @@ -199,11 +203,12 @@ impl Hash for BlockRef { } } -/// Digest of a `VerifiedBlock` or verified `SignedBlock`, which covers the `Block` and its -/// signature. +/// Digest of a `VerifiedBlock` or verified `SignedBlock`, which covers the +/// `Block` and its signature. /// -/// Note: the signature algorithm is assumed to be non-malleable, so it is impossible for another -/// party to create an altered but valid signature, producing an equivocating `BlockDigest`. +/// Note: the signature algorithm is assumed to be non-malleable, so it is +/// impossible for another party to create an altered but valid signature, +/// producing an equivocating `BlockDigest`. #[derive(Clone, Copy, Serialize, Deserialize, Default, PartialEq, Eq, PartialOrd, Ord)] pub struct BlockDigest([u8; consensus_config::DIGEST_LENGTH]); @@ -253,8 +258,8 @@ impl AsRef<[u8]> for BlockDigest { } } -/// Slot is the position of blocks in the DAG. It can contain 0, 1 or multiple blocks -/// from the same authority at the same round. +/// Slot is the position of blocks in the DAG. It can contain 0, 1 or multiple +/// blocks from the same authority at the same round. #[derive(Clone, Copy, PartialEq, PartialOrd, Default, Hash)] pub struct Slot { pub round: Round, @@ -296,8 +301,9 @@ impl fmt::Debug for Slot { /// A Block with its signature, before they are verified. /// -/// Note: `BlockDigest` is computed over this struct, so any added field (without `#[serde(skip)]`) -/// will affect the values of `BlockDigest` and `BlockRef`. +/// Note: `BlockDigest` is computed over this struct, so any added field +/// (without `#[serde(skip)]`) will affect the values of `BlockDigest` and +/// `BlockRef`. #[derive(Deserialize, Serialize)] pub(crate) struct SignedBlock { inner: Block, @@ -325,8 +331,8 @@ impl SignedBlock { &self.signature } - /// This method only verifies this block's signature. Verification of the full block - /// should be done via BlockVerifier. + /// This method only verifies this block's signature. Verification of the + /// full block should be done via BlockVerifier. pub(crate) fn verify_signature(&self, context: &Context) -> ConsensusResult<()> { let block = &self.inner; let committee = &context.committee; @@ -356,7 +362,8 @@ impl SignedBlock { /// Digest of a block, covering all `Block` fields without its signature. /// This is used during Block signing and signature verification. -/// This should never be used outside of this file, to avoid confusion with `BlockDigest`. +/// This should never be used outside of this file, to avoid confusion with +/// `BlockDigest`. #[derive(Serialize, Deserialize)] struct InnerBlockDigest([u8; consensus_config::DIGEST_LENGTH]); @@ -400,7 +407,8 @@ fn verify_block_signature( .map_err(ConsensusError::SignatureVerificationFailure) } -/// Allow quick access on the underlying Block without having to always refer to the inner block ref. +/// Allow quick access on the underlying Block without having to always refer to +/// the inner block ref. impl Deref for SignedBlock { type Target = Block; @@ -421,7 +429,8 @@ pub struct VerifiedBlock { } impl VerifiedBlock { - /// Creates VerifiedBlock from a verified SignedBlock and its serialized bytes. + /// Creates VerifiedBlock from a verified SignedBlock and its serialized + /// bytes. pub(crate) fn new_verified(signed_block: SignedBlock, serialized: Bytes) -> Self { let digest = Self::compute_digest(&serialized); VerifiedBlock { @@ -475,7 +484,8 @@ impl VerifiedBlock { } } -/// Allow quick access on the underlying Block without having to always refer to the inner block ref. +/// Allow quick access on the underlying Block without having to always refer to +/// the inner block ref. impl Deref for VerifiedBlock { type Target = Block; @@ -593,9 +603,11 @@ mod tests { use fastcrypto::error::FastCryptoError; - use crate::block::{SignedBlock, TestBlock}; - use crate::context::Context; - use crate::error::ConsensusError; + use crate::{ + block::{SignedBlock, TestBlock}, + context::Context, + error::ConsensusError, + }; #[test] fn test_sign_and_verify() { diff --git a/consensus/core/src/block_manager.rs b/consensus/core/src/block_manager.rs index 8fc29d9300a..0b1fe961135 100644 --- a/consensus/core/src/block_manager.rs +++ b/consensus/core/src/block_manager.rs @@ -31,26 +31,31 @@ impl SuspendedBlock { } } -/// Block manager suspends incoming blocks until they are connected to the existing graph, -/// returning newly connected blocks. -/// TODO: As it is possible to have Byzantine validators who produce Blocks without valid causal -/// history we need to make sure that BlockManager takes care of that and avoid OOM (Out Of Memory) -/// situations. +/// Block manager suspends incoming blocks until they are connected to the +/// existing graph, returning newly connected blocks. +/// TODO: As it is possible to have Byzantine validators who produce Blocks +/// without valid causal history we need to make sure that BlockManager takes +/// care of that and avoid OOM (Out Of Memory) situations. pub(crate) struct BlockManager { context: Arc, dag_state: Arc>, block_verifier: Arc, - /// Keeps all the suspended blocks. A suspended block is a block that is missing part of its causal history and thus - /// can't be immediately processed. A block will remain in this map until all its causal history has been successfully - /// processed. + /// Keeps all the suspended blocks. A suspended block is a block that is + /// missing part of its causal history and thus can't be immediately + /// processed. A block will remain in this map until all its causal history + /// has been successfully processed. suspended_blocks: BTreeMap, - /// A map that keeps all the blocks that we are missing (keys) and the corresponding blocks that reference the missing blocks - /// as ancestors and need them to get unsuspended. It is possible for a missing dependency (key) to be a suspended block, so - /// the block has been already fetched but it self is still missing some of its ancestors to be processed. + /// A map that keeps all the blocks that we are missing (keys) and the + /// corresponding blocks that reference the missing blocks as ancestors + /// and need them to get unsuspended. It is possible for a missing + /// dependency (key) to be a suspended block, so the block has been + /// already fetched but it self is still missing some of its ancestors to be + /// processed. missing_ancestors: BTreeMap>, - /// Keeps all the blocks that we actually miss and haven't fetched them yet. That set will basically contain all the - /// keys from the `missing_ancestors` minus any keys that exist in `suspended_blocks`. + /// Keeps all the blocks that we actually miss and haven't fetched them yet. + /// That set will basically contain all the keys from the + /// `missing_ancestors` minus any keys that exist in `suspended_blocks`. missing_blocks: BTreeSet, } @@ -70,9 +75,11 @@ impl BlockManager { } } - /// Tries to accept the provided blocks assuming that all their causal history exists. The method - /// returns all the blocks that have been successfully processed in round ascending order, that includes also previously - /// suspended blocks that have now been able to get accepted. Method also returns a set with the new missing ancestor blocks. + /// Tries to accept the provided blocks assuming that all their causal + /// history exists. The method returns all the blocks that have been + /// successfully processed in round ascending order, that includes also + /// previously suspended blocks that have now been able to get accepted. + /// Method also returns a set with the new missing ancestor blocks. pub(crate) fn try_accept_blocks( &mut self, mut blocks: Vec, @@ -115,7 +122,10 @@ impl BlockManager { blocks_to_reject.insert(b.reference(), b); continue 'block; } - panic!("Unsuspended block {:?} has a missing ancestor! Ancestor not found in DagState: {:?}", b, included); + panic!( + "Unsuspended block {:?} has a missing ancestor! Ancestor not found in DagState: {:?}", + b, included + ); } if let Err(e) = self.block_verifier.check_ancestors(&b, &ancestor_blocks) { warn!("Block {:?} failed to verify ancestors: {}", b, e); @@ -149,8 +159,8 @@ impl BlockManager { } // Newly missed blocks - // TODO: make sure that the computation here is bounded either in the byzantine or node fall - // back scenario. + // TODO: make sure that the computation here is bounded either in the byzantine + // or node fall back scenario. let missing_blocks_after = self .missing_blocks .difference(&missing_blocks_before) @@ -167,15 +177,17 @@ impl BlockManager { (accepted_blocks, missing_blocks_after) } - /// Tries to accept the provided block. To accept a block its ancestors must have been already successfully accepted. If - /// block is accepted then Some result is returned. None is returned when either the block is suspended or the block - /// has been already accepted before. + /// Tries to accept the provided block. To accept a block its ancestors must + /// have been already successfully accepted. If block is accepted then + /// Some result is returned. None is returned when either the block is + /// suspended or the block has been already accepted before. fn try_accept_one_block(&mut self, block: VerifiedBlock) -> Option { let block_ref = block.reference(); let mut missing_ancestors = BTreeSet::new(); let dag_state = self.dag_state.read(); - // If block has been already received and suspended, or already processed and stored, or is a genesis block, then skip it. + // If block has been already received and suspended, or already processed and + // stored, or is a genesis block, then skip it. if self.suspended_blocks.contains_key(&block_ref) || dag_state.contains_block(&block_ref) { return None; } @@ -197,16 +209,18 @@ impl BlockManager { .or_default() .insert(block_ref); - // Add the ancestor to the missing blocks set only if it doesn't already exist in the suspended blocks - meaning - // that we already have its payload. + // Add the ancestor to the missing blocks set only if it doesn't already exist + // in the suspended blocks - meaning that we already have its + // payload. if !self.suspended_blocks.contains_key(ancestor) { self.missing_blocks.insert(*ancestor); } } } - // Remove the block ref from the `missing_blocks` - if exists - since we now have received the block. The block - // might still get suspended, but we won't report it as missing in order to not re-fetch. + // Remove the block ref from the `missing_blocks` - if exists - since we now + // have received the block. The block might still get suspended, but we + // won't report it as missing in order to not re-fetch. self.missing_blocks.remove(&block.reference()); if !missing_ancestors.is_empty() { @@ -230,8 +244,9 @@ impl BlockManager { Some(block) } - /// Given an accepted block `accepted_block` it attempts to accept all the suspended children blocks assuming such exist. - /// All the unsuspended / accepted blocks are returned as a vector in causal order. + /// Given an accepted block `accepted_block` it attempts to accept all the + /// suspended children blocks assuming such exist. All the unsuspended / + /// accepted blocks are returned as a vector in causal order. fn try_unsuspend_children_blocks( &mut self, accepted_block: &VerifiedBlock, @@ -245,8 +260,9 @@ impl BlockManager { self.missing_ancestors.remove(&block.reference()) { for r in block_refs_with_missing_deps { - // For each dependency try to unsuspend it. If that's successful then we add it to the queue so - // we can recursively try to unsuspend its children. + // For each dependency try to unsuspend it. If that's successful then we add it + // to the queue so we can recursively try to unsuspend its + // children. if let Some(block) = self.try_unsuspend_block(&r, &block.reference()) { unsuspended_blocks.push(block.block.clone()); to_process_blocks.push(block.block); @@ -274,8 +290,10 @@ impl BlockManager { unsuspended_blocks } - /// Attempts to unsuspend a block by checking its ancestors and removing the `accepted_dependency` by its local set. - /// If there is no missing dependency then this block can be unsuspended immediately and is removed from the `suspended_blocks` map. + /// Attempts to unsuspend a block by checking its ancestors and removing the + /// `accepted_dependency` by its local set. If there is no missing + /// dependency then this block can be unsuspended immediately and is removed + /// from the `suspended_blocks` map. fn try_unsuspend_block( &mut self, block_ref: &BlockRef, @@ -300,13 +318,14 @@ impl BlockManager { None } - /// Returns all the blocks that are currently missing and needed in order to accept suspended - /// blocks. + /// Returns all the blocks that are currently missing and needed in order to + /// accept suspended blocks. pub(crate) fn missing_blocks(&self) -> BTreeSet { self.missing_blocks.clone() } - /// Returns all the suspended blocks whose causal history we miss hence we can't accept them yet. + /// Returns all the suspended blocks whose causal history we miss hence we + /// can't accept them yet. #[cfg(test)] pub(crate) fn suspended_blocks(&self) -> Vec { self.suspended_blocks.keys().cloned().collect() @@ -356,13 +375,15 @@ mod tests { // THEN assert!(accepted_blocks.is_empty()); - // AND the returned missing ancestors should be the same as the provided block ancestors + // AND the returned missing ancestors should be the same as the provided block + // ancestors let missing_block_refs = round_2_blocks.first().unwrap().ancestors(); let missing_block_refs = missing_block_refs.iter().cloned().collect::>(); assert_eq!(missing, missing_block_refs); - // AND the missing blocks are the parents of the round 2 blocks. Since this is a fully connected DAG taking the - // ancestors of the first element suffices. + // AND the missing blocks are the parents of the round 2 blocks. Since this is a + // fully connected DAG taking the ancestors of the first element + // suffices. assert_eq!(block_manager.missing_blocks(), missing_block_refs); // AND suspended blocks should return the round_2_blocks @@ -388,8 +409,8 @@ mod tests { // create a DAG of 4 rounds let all_blocks = dag(context, 4); - // Take the blocks from round 4 up to 2 (included). Only the first block of each round should return missing - // ancestors when try to accept + // Take the blocks from round 4 up to 2 (included). Only the first block of each + // round should return missing ancestors when try to accept for (i, block) in all_blocks .into_iter() .rev() @@ -402,7 +423,8 @@ mod tests { // THEN assert!(accepted_blocks.is_empty()); - // Only the first block for each round should return missing blocks. Every other shouldn't + // Only the first block for each round should return missing blocks. Every other + // shouldn't if i % 4 == 0 { let block_ancestors = block.ancestors().iter().cloned().collect::>(); assert_eq!(missing, block_ancestors); @@ -441,7 +463,8 @@ mod tests { ); assert!(missing.is_empty()); - // WHEN trying to accept same blocks again, then none will be returned as those have been already accepted + // WHEN trying to accept same blocks again, then none will be returned as those + // have been already accepted let (accepted_blocks, _) = block_manager.try_accept_blocks(all_blocks); assert!(accepted_blocks.is_empty()); } @@ -455,8 +478,9 @@ mod tests { // create a DAG of rounds 1 ~ 3 let mut all_blocks = dag(context.clone(), 3); - // Now randomize the sequence of sending the blocks to block manager. In the end all the blocks should be uniquely - // suspended and no missing blocks should exist. + // Now randomize the sequence of sending the blocks to block manager. In the end + // all the blocks should be uniquely suspended and no missing blocks + // should exist. for seed in 0..100u8 { all_blocks.shuffle(&mut StdRng::from_seed([seed; 32])); @@ -488,8 +512,8 @@ mod tests { } } - /// Creates all the blocks to produce a fully connected DAG from round 0 up to `end_round`. - /// Note: this method also returns the genesis blocks. + /// Creates all the blocks to produce a fully connected DAG from round 0 up + /// to `end_round`. Note: this method also returns the genesis blocks. fn dag(context: Arc, end_round: u64) -> Vec { let mut last_round_blocks = genesis_blocks(context.clone()); let mut all_blocks = vec![]; @@ -563,7 +587,8 @@ mod tests { let mut block_manager = BlockManager::new(context.clone(), dag_state, Arc::new(test_verifier)); - // Try to accept blocks from round 2 ~ 5 into block manager. All of them should be suspended. + // Try to accept blocks from round 2 ~ 5 into block manager. All of them should + // be suspended. let (accepted_blocks, missing_refs) = block_manager.try_accept_blocks( all_blocks .iter() @@ -595,7 +620,8 @@ mod tests { }); assert!(missing_refs.is_empty()); - // Other blocks should be rejected and there should be no remaining suspended block. + // Other blocks should be rejected and there should be no remaining suspended + // block. assert!(block_manager.suspended_blocks().is_empty()); } } diff --git a/consensus/core/src/block_verifier.rs b/consensus/core/src/block_verifier.rs index 3536968a635..a5a251f1c68 100644 --- a/consensus/core/src/block_verifier.rs +++ b/consensus/core/src/block_verifier.rs @@ -22,7 +22,8 @@ pub(crate) trait BlockVerifier: Send + Sync + 'static { /// This is called after a block has complete causal history locally, /// and is ready to be accepted into the DAG. /// - /// Caller must make sure ancestors corresponse to block.ancestors() 1-to-1, in the same order. + /// Caller must make sure ancestors corresponse to block.ancestors() 1-to-1, + /// in the same order. fn check_ancestors( &self, block: &VerifiedBlock, @@ -32,9 +33,9 @@ pub(crate) trait BlockVerifier: Send + Sync + 'static { /// `SignedBlockVerifier` checks the validity of a block. /// -/// Blocks that fail verification at one honest authority will be rejected by all other honest -/// authorities as well. The means invalid blocks, and blocks with an invalid ancestor, will never -/// be accepted into the DAG. +/// Blocks that fail verification at one honest authority will be rejected by +/// all other honest authorities as well. The means invalid blocks, and blocks +/// with an invalid ancestor, will never be accepted into the DAG. pub(crate) struct SignedBlockVerifier { context: Arc, genesis: BTreeSet, @@ -473,9 +474,11 @@ mod test { .set_timestamp_ms(1500) .build(); let verified_block = VerifiedBlock::new_for_test(block); - assert!(verifier - .check_ancestors(&verified_block, &ancestor_blocks) - .is_ok()); + assert!( + verifier + .check_ancestors(&verified_block, &ancestor_blocks) + .is_ok() + ); } // Block not respecting timestamp invariant. diff --git a/consensus/core/src/broadcaster.rs b/consensus/core/src/broadcaster.rs index 0d5f74ccc2c..3ed32aa7d71 100644 --- a/consensus/core/src/broadcaster.rs +++ b/consensus/core/src/broadcaster.rs @@ -29,8 +29,9 @@ const BROADCAST_CONCURRENCY: usize = 10; /// Broadcaster sends newly created blocks to each peer over the network. /// -/// For a peer that lags behind or is disconnected, blocks are buffered and retried until -/// a limit is reached, then old blocks will get dropped from the buffer. +/// For a peer that lags behind or is disconnected, blocks are buffered and +/// retried until a limit is reached, then old blocks will get dropped from the +/// buffer. pub(crate) struct Broadcaster { // Background tasks listening for new blocks and pushing them to peers. senders: JoinSet<()>, @@ -67,8 +68,8 @@ impl Broadcaster { self.senders.abort_all(); } - /// Runs a loop that continously pushes new blocks received from the rx_block_broadcast - /// channel to the target peer. + /// Runs a loop that continously pushes new blocks received from the + /// rx_block_broadcast channel to the target peer. /// /// The loop does not exit until the validator is shutting down. async fn push_blocks( @@ -79,9 +80,10 @@ impl Broadcaster { ) { let peer_hostname = context.committee.authority(peer).hostname.clone(); - // Record the last block to be broadcasted, to retry in case no new block is produced for awhile. - // Even if the peer has acknowledged the last block, the block might have been dropped afterwards - // if the peer crashed. + // Record the last block to be broadcasted, to retry in case no new block is + // produced for awhile. Even if the peer has acknowledged the last + // block, the block might have been dropped afterwards if the peer + // crashed. let mut last_block: Option = None; // Retry last block with an interval. @@ -89,9 +91,10 @@ impl Broadcaster { retry_timer.reset_after(Self::LAST_BLOCK_RETRY_INTERVAL); retry_timer.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); - // Use a simple exponential-decay RTT estimator to adjust the timeout for each block sent. - // The estimation logic will be removed once the underlying transport switches to use - // streaming and the streaming implementation can be relied upon for retries. + // Use a simple exponential-decay RTT estimator to adjust the timeout for each + // block sent. The estimation logic will be removed once the underlying + // transport switches to use streaming and the streaming implementation + // can be relied upon for retries. const RTT_ESTIMATE_DECAY: f64 = 0.95; const TIMEOUT_THRESHOLD_MULTIPLIER: f64 = 2.0; const TIMEOUT_RTT_INCREMENT_FACTOR: f64 = 1.6; @@ -107,7 +110,8 @@ impl Broadcaster { ) -> (Result, Elapsed>, Instant, VerifiedBlock) { let start = Instant::now(); let req_timeout = rtt_estimate.mul_f64(TIMEOUT_THRESHOLD_MULTIPLIER); - // Use a minimum timeout of 5s so the receiver does not terminate the request too early. + // Use a minimum timeout of 5s so the receiver does not terminate the request + // too early. let network_timeout = std::cmp::max(req_timeout, Broadcaster::MIN_SEND_BLOCK_NETWORK_TIMEOUT); let resp = timeout( diff --git a/consensus/core/src/commit.rs b/consensus/core/src/commit.rs index 670a1281013..7ec7b0e7f1d 100644 --- a/consensus/core/src/commit.rs +++ b/consensus/core/src/commit.rs @@ -31,22 +31,23 @@ pub(crate) const DEFAULT_WAVE_LENGTH: Round = MINIMUM_WAVE_LENGTH; /// We need at least one leader round, one voting round, and one decision round. pub(crate) const MINIMUM_WAVE_LENGTH: Round = 3; -/// The consensus protocol operates in 'waves'. Each wave is composed of a leader -/// round, at least one voting round, and one decision round. +/// The consensus protocol operates in 'waves'. Each wave is composed of a +/// leader round, at least one voting round, and one decision round. pub(crate) type WaveNumber = u32; /// Versioned representation of a consensus commit. /// -/// Commit is used to persist commit metadata for recovery. It is also exchanged over the network. -/// To balance being functional and succinct, a field must meet these requirements to be added -/// to the struct: +/// Commit is used to persist commit metadata for recovery. It is also exchanged +/// over the network. To balance being functional and succinct, a field must +/// meet these requirements to be added to the struct: /// - helps with recoverying CommittedSubDag locally and for peers catching up. /// - cannot be derived from a sequence of Commits and other persisted values. /// -/// For example, transactions in blocks should not be included in Commit, because they can be -/// retrieved from blocks specified in Commit. Last committed round per authority also should not -/// be included, because it can be derived from the latest value in storage and the additional -/// sequence of Commits. +/// For example, transactions in blocks should not be included in Commit, +/// because they can be retrieved from blocks specified in Commit. Last +/// committed round per authority also should not be included, because it can be +/// derived from the latest value in storage and the additional sequence of +/// Commits. #[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] #[enum_dispatch(CommitAPI)] pub(crate) enum Commit { @@ -86,11 +87,13 @@ pub(crate) trait CommitAPI { } /// Specifies one consensus commit. -/// It is stored on disk, so it does not contain blocks which are stored individually. +/// It is stored on disk, so it does not contain blocks which are stored +/// individually. #[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)] pub(crate) struct CommitV1 { /// Index of the commit. - /// First commit after genesis has an index of 1, then every next commit has an index incremented by 1. + /// First commit after genesis has an index of 1, then every next commit has + /// an index incremented by 1. index: CommitIndex, /// Digest of the previous commit. /// Set to CommitDigest::MIN for the first commit after genesis. @@ -123,8 +126,8 @@ impl CommitAPI for CommitV1 { } } -/// A commit is trusted when it is produced locally or certified by a quorum of authorities. -/// Blocks referenced by TrustedCommit are assumed to be valid. +/// A commit is trusted when it is produced locally or certified by a quorum of +/// authorities. Blocks referenced by TrustedCommit are assumed to be valid. /// Only trusted Commit can be sent to execution. /// /// Note: clone() is relatively cheap with the underlying data refcounted. @@ -241,19 +244,21 @@ pub struct CommitRef { pub digest: CommitDigest, } -/// The output of consensus is an ordered list of [`CommittedSubDag`]. The application -/// can arbitrarily sort the blocks within each sub-dag (but using a deterministic algorithm). +/// The output of consensus is an ordered list of [`CommittedSubDag`]. The +/// application can arbitrarily sort the blocks within each sub-dag (but using a +/// deterministic algorithm). #[derive(Clone, PartialEq)] pub struct CommittedSubDag { /// A reference to the leader of the sub-dag pub leader: BlockRef, /// All the committed blocks that are part of this sub-dag pub blocks: Vec, - /// The timestamp of the commit, obtained from the timestamp of the leader block. + /// The timestamp of the commit, obtained from the timestamp of the leader + /// block. pub timestamp_ms: BlockTimestampMs, /// Index of the commit. - /// First commit after genesis has a index of 1, then every next commit has a - /// index incremented by 1. + /// First commit after genesis has a index of 1, then every next commit has + /// a index incremented by 1. pub commit_index: CommitIndex, } @@ -370,9 +375,9 @@ pub(crate) enum Decision { Indirect, } -/// The status of every leader output by the committers. While the core only cares -/// about committed leaders, providing a richer status allows for easier debugging, -/// testing, and composition with advanced commit strategies. +/// The status of every leader output by the committers. While the core only +/// cares about committed leaders, providing a richer status allows for easier +/// debugging, testing, and composition with advanced commit strategies. #[derive(Debug, Clone, PartialEq)] pub(crate) enum LeaderStatus { Commit(VerifiedBlock), diff --git a/consensus/core/src/commit_observer.rs b/consensus/core/src/commit_observer.rs index 30d8b5a1344..26af9171a96 100644 --- a/consensus/core/src/commit_observer.rs +++ b/consensus/core/src/commit_observer.rs @@ -6,34 +6,38 @@ use std::sync::Arc; use parking_lot::RwLock; use tokio::sync::mpsc::UnboundedSender; -use crate::commit::CommitAPI; -use crate::error::{ConsensusError, ConsensusResult}; -use crate::CommitConsumer; use crate::{ block::{timestamp_utc_ms, BlockAPI, VerifiedBlock}, - commit::{load_committed_subdag_from_store, CommitIndex, CommittedSubDag}, + commit::{load_committed_subdag_from_store, CommitAPI, CommitIndex, CommittedSubDag}, context::Context, dag_state::DagState, + error::{ConsensusError, ConsensusResult}, linearizer::Linearizer, storage::Store, + CommitConsumer, }; /// Role of CommitObserver /// - Called by core when try_commit() returns newly committed leaders. -/// - The newly committed leaders are sent to commit observer and then commit observer +/// - The newly committed leaders are sent to commit observer and then commit +/// observer /// gets subdags for each leader via the commit interpreter (linearizer) -/// - The committed subdags are sent as consensus output via an unbounded tokio channel. -/// No back pressure mechanism is needed as backpressure is handled as input into -/// consenus. -/// - Commit metadata including index is persisted in store, before the CommittedSubDag +/// - The committed subdags are sent as consensus output via an unbounded tokio +/// channel. +/// No back pressure mechanism is needed as backpressure is handled as input +/// into consenus. +/// - Commit metadata including index is persisted in store, before the +/// CommittedSubDag /// is sent to the consumer. -/// - When CommitObserver is initialized a last processed commit index can be used +/// - When CommitObserver is initialized a last processed commit index can be +/// used /// to ensure any missing commits are re-sent. pub(crate) struct CommitObserver { context: Arc, /// Component to deterministically collect subdags for committed leaders. commit_interpreter: Linearizer, - /// An unbounded channel to send committed sub-dags to the consumer of consensus output. + /// An unbounded channel to send committed sub-dags to the consumer of + /// consensus output. sender: UnboundedSender, /// Persistent storage for blocks, commits and other consensus data. store: Arc, @@ -104,7 +108,8 @@ impl CommitObserver { } }; - // We should not send the last processed commit again, so last_processed_commit_index+1 + // We should not send the last processed commit again, so + // last_processed_commit_index+1 let unsent_commits = self .store .scan_commits((last_processed_commit_index + 1)..CommitIndex::MAX) diff --git a/consensus/core/src/context.rs b/consensus/core/src/context.rs index 5b99f372491..7c7cdd6e044 100644 --- a/consensus/core/src/context.rs +++ b/consensus/core/src/context.rs @@ -14,8 +14,8 @@ use tempfile::TempDir; use crate::metrics::test_metrics; use crate::metrics::Metrics; -/// Context contains per-epoch configuration and metrics shared by all components -/// of this authority. +/// Context contains per-epoch configuration and metrics shared by all +/// components of this authority. #[derive(Clone)] pub(crate) struct Context { /// Index of this authority in the committee. diff --git a/consensus/core/src/core.rs b/consensus/core/src/core.rs index 96a606bb6b7..8efaa317a64 100644 --- a/consensus/core/src/core.rs +++ b/consensus/core/src/core.rs @@ -13,8 +13,6 @@ use parking_lot::RwLock; use tokio::sync::{broadcast, watch}; use tracing::{debug, info, warn}; -use crate::stake_aggregator::{QuorumThreshold, StakeAggregator}; -use crate::transaction::TransactionGuard; use crate::{ block::{ timestamp_utc_ms, Block, BlockAPI, BlockRef, BlockTimestampMs, BlockV1, Round, SignedBlock, @@ -25,8 +23,9 @@ use crate::{ context::Context, dag_state::DagState, error::{ConsensusError, ConsensusResult}, + stake_aggregator::{QuorumThreshold, StakeAggregator}, threshold_clock::ThresholdClock, - transaction::TransactionConsumer, + transaction::{TransactionConsumer, TransactionGuard}, universal_committer::{ universal_committer_builder::UniversalCommitterBuilder, UniversalCommitter, }, @@ -44,32 +43,39 @@ pub(crate) struct Core { context: Arc, /// The threshold clock that is used to keep track of the current round threshold_clock: ThresholdClock, - /// The consumer to use in order to pull transactions to be included for the next proposals + /// The consumer to use in order to pull transactions to be included for the + /// next proposals transaction_consumer: TransactionConsumer, - /// The block manager which is responsible for keeping track of the DAG dependencies when processing new blocks - /// and accept them or suspend if we are missing their causal history + /// The block manager which is responsible for keeping track of the DAG + /// dependencies when processing new blocks and accept them or suspend + /// if we are missing their causal history block_manager: BlockManager, /// Used to make commit decisions for leader blocks in the dag. committer: UniversalCommitter, /// The last produced block last_proposed_block: VerifiedBlock, - /// The blocks of the last included ancestors per authority. This vector is basically used as a - /// watermark in order to include in the next block proposal only ancestors of higher rounds. - /// By default, is initialised with `None` values. + /// The blocks of the last included ancestors per authority. This vector is + /// basically used as a watermark in order to include in the next block + /// proposal only ancestors of higher rounds. By default, is initialised + /// with `None` values. last_included_ancestors: Vec>, - /// The last decided leader returned from the universal committer. Important to note - /// that this does not signify that the leader has been persisted yet as it still has - /// to go through CommitObserver and persist the commit in store. On recovery/restart - /// the last_decided_leader will be set to the last_commit leader in dag state. + /// The last decided leader returned from the universal committer. Important + /// to note that this does not signify that the leader has been + /// persisted yet as it still has to go through CommitObserver and + /// persist the commit in store. On recovery/restart + /// the last_decided_leader will be set to the last_commit leader in dag + /// state. last_decided_leader: Slot, - /// The commit observer is responsible for observing the commits and collecting + /// The commit observer is responsible for observing the commits and + /// collecting /// + sending subdags over the consensus output channel. commit_observer: CommitObserver, /// Sender of outgoing signals from Core. signals: CoreSignals, /// The keypair to be used for block signing block_signer: ProtocolKeyPair, - /// Keeping track of state of the DAG, including blocks, commits and last committed rounds. + /// Keeping track of state of the DAG, including blocks, commits and last + /// committed rounds. dag_state: Arc>, } @@ -95,14 +101,16 @@ impl Core { .read() .get_last_block_for_authority(context.own_index); - // Recover the last included ancestor rounds based on the last proposed block. That will allow - // to perform the next block proposal by using ancestor blocks of higher rounds and avoid - // re-including blocks that have been already included in the last (or earlier) block proposal. - // This is only strongly guaranteed for a quorum of ancestors. It is still possible to re-include - // a block from an authority which hadn't been added as part of the last proposal hence its - // latest included ancestor is not accurately captured here. This is considered a small deficiency, - // and it mostly matters just for this next proposal without any actual penalties in performance - // or block proposal. + // Recover the last included ancestor rounds based on the last proposed block. + // That will allow to perform the next block proposal by using ancestor + // blocks of higher rounds and avoid re-including blocks that have been + // already included in the last (or earlier) block proposal. + // This is only strongly guaranteed for a quorum of ancestors. It is still + // possible to re-include a block from an authority which hadn't been + // added as part of the last proposal hence its latest included ancestor + // is not accurately captured here. This is considered a small deficiency, + // and it mostly matters just for this next proposal without any actual + // penalties in performance or block proposal. let mut last_included_ancestors = vec![None; context.committee.size()]; for ancestor in last_proposed_block.ancestors() { last_included_ancestors[ancestor.author] = Some(*ancestor); @@ -136,12 +144,17 @@ impl Core { // Recover the last available quorum to correctly advance the threshold clock. let last_quorum = self.dag_state.read().last_quorum(); self.add_accepted_blocks(last_quorum); - // Try to commit and propose, since they may not have run after the last storage write. + // Try to commit and propose, since they may not have run after the last storage + // write. self.try_commit().unwrap(); if self.try_propose(true).unwrap().is_none() { - assert!(self.last_proposed_block.round() > GENESIS_ROUND, "At minimum a block of round higher that genesis should have been produced during recovery"); + assert!( + self.last_proposed_block.round() > GENESIS_ROUND, + "At minimum a block of round higher that genesis should have been produced during recovery" + ); - // if no new block proposed then just re-broadcast the last proposed one to ensure liveness. + // if no new block proposed then just re-broadcast the last proposed one to + // ensure liveness. self.signals .new_block(self.last_proposed_block.clone()) .unwrap(); @@ -150,8 +163,9 @@ impl Core { self } - /// Processes the provided blocks and accepts them if possible when their causal history exists. - /// The method returns the references of parents that are unknown and need to be fetched. + /// Processes the provided blocks and accepts them if possible when their + /// causal history exists. The method returns the references of parents + /// that are unknown and need to be fetched. pub(crate) fn add_blocks( &mut self, blocks: Vec, @@ -181,10 +195,11 @@ impl Core { Ok(missing_blocks) } - /// Adds/processed all the newly `accepted_blocks`. We basically try to move the threshold clock and add them to the - /// pending ancestors list. + /// Adds/processed all the newly `accepted_blocks`. We basically try to move + /// the threshold clock and add them to the pending ancestors list. fn add_accepted_blocks(&mut self, accepted_blocks: Vec) { - // Advance the threshold clock. If advanced to a new round then send a signal that a new quorum has been received. + // Advance the threshold clock. If advanced to a new round then send a signal + // that a new quorum has been received. if let Some(new_round) = self .threshold_clock .add_blocks(accepted_blocks.iter().map(|b| b.reference()).collect()) @@ -201,7 +216,8 @@ impl Core { .set(self.threshold_clock.get_round() as i64); } - /// Force creating a new block for the dictated round. This is used when a leader timeout occurs. + /// Force creating a new block for the dictated round. This is used when a + /// leader timeout occurs. pub(crate) fn force_new_block( &mut self, round: Round, @@ -214,8 +230,8 @@ impl Core { } // Attempts to create a new block, persist and propose it to all peers. - // When force is true, ignore if leader from the last round exists among ancestors and if - // the minimum round delay has passed. + // When force is true, ignore if leader from the last round exists among + // ancestors and if the minimum round delay has passed. fn try_propose(&mut self, force: bool) -> ConsensusResult> { if let Some(block) = self.try_new_block(force) { self.signals.new_block(block.clone())?; @@ -226,8 +242,9 @@ impl Core { Ok(None) } - /// Attempts to propose a new block for the next round. If a block has already proposed for latest - /// or earlier round, then no block is created and None is returned. + /// Attempts to propose a new block for the next round. If a block has + /// already proposed for latest or earlier round, then no block is + /// created and None is returned. fn try_new_block(&mut self, force: bool) -> Option { let _scope = monitored_scope("Core::try_new_block"); let _s = self @@ -245,8 +262,9 @@ impl Core { let now = timestamp_utc_ms(); - // Create a new block either because we want to "forcefully" propose a block due to a leader timeout, - // or because we are actually ready to produce the block (leader exists and min delay has passed). + // Create a new block either because we want to "forcefully" propose a block due + // to a leader timeout, or because we are actually ready to produce the + // block (leader exists and min delay has passed). if !force { if !self.last_quorum_leaders_exist() { return None; @@ -258,17 +276,20 @@ impl Core { } } - // TODO: produce the block for the clock_round. As the threshold clock can advance many rounds at once (ex - // because we synchronized a bulk of blocks) we can decide here whether we want to produce blocks per round - // or just the latest one. From earlier experiments I saw only benefit on proposing for the penultimate round - // only when the validator was supposed to be the leader of the round - so we bring down the missed leaders. + // TODO: produce the block for the clock_round. As the threshold clock can + // advance many rounds at once (ex because we synchronized a bulk of + // blocks) we can decide here whether we want to produce blocks per round + // or just the latest one. From earlier experiments I saw only benefit on + // proposing for the penultimate round only when the validator was + // supposed to be the leader of the round - so we bring down the missed leaders. // Probably proposing for all the intermediate rounds might not make much sense. // Consume the ancestors to be included in proposal let ancestors = self.ancestors_to_propose(clock_round, now); - // Consume the next transactions to be included. Do not drop the guards yet as this would acknowledge - // the inclusion of transactions. Just let this be done in the end of the method. + // Consume the next transactions to be included. Do not drop the guards yet as + // this would acknowledge the inclusion of transactions. Just let this + // be done in the end of the method. let transaction_guards = self.transaction_consumer.next(); let transactions = transaction_guards .iter() @@ -371,8 +392,10 @@ impl Core { self.block_manager.missing_blocks() } - /// Retrieves the next ancestors to propose to form a block at `clock_round` round. Also, the `block_timestamp` is provided - /// to sanity check that everything that goes into the proposal is ensured to have a timestamp < block_timestamp + /// Retrieves the next ancestors to propose to form a block at `clock_round` + /// round. Also, the `block_timestamp` is provided to sanity check that + /// everything that goes into the proposal is ensured to have a timestamp < + /// block_timestamp fn ancestors_to_propose( &mut self, clock_round: Round, @@ -414,7 +437,11 @@ impl Core { quorum.add(ancestor.author(), &self.context.committee); } - assert!(quorum.reached_threshold(&self.context.committee), "Fatal error, quorum not reached for parent round when proposing for round {}. Possible mismatch between DagState and Core.", clock_round); + assert!( + quorum.reached_threshold(&self.context.committee), + "Fatal error, quorum not reached for parent round when proposing for round {}. Possible mismatch between DagState and Core.", + clock_round + ); // Ensure that timestamps are correct ancestors.iter().for_each(|block|{ @@ -422,15 +449,16 @@ impl Core { assert!(block.timestamp_ms() <= block_timestamp, "Violation, ancestor block timestamp {} greater than our timestamp {block_timestamp}", block.timestamp_ms()); }); - // Compress the references in the block. We don't want to include an ancestors that already referenced by other blocks - // we are about to include. + // Compress the references in the block. We don't want to include an ancestors + // that already referenced by other blocks we are about to include. let all_ancestors_parents: HashSet<&BlockRef> = ancestors .iter() .flat_map(|block| block.ancestors()) .collect(); - // Keep block refs to propose in a map, so even if somehow a byzantine node managed to provide blocks that don't - // form a valid chain we can still pick one block per author. + // Keep block refs to propose in a map, so even if somehow a byzantine node + // managed to provide blocks that don't form a valid chain we can still + // pick one block per author. let mut to_propose = BTreeMap::new(); for block in &ancestors { if !all_ancestors_parents.contains(&block.reference()) { @@ -440,8 +468,8 @@ impl Core { assert!(!to_propose.is_empty()); - // always include our last proposed block in front of the vector and make sure that we do not - // double insert. + // always include our last proposed block in front of the vector and make sure + // that we do not double insert. let mut result = vec![self.last_proposed_block.reference()]; for (authority_index, block_ref) in to_propose { if authority_index != self.context.own_index { @@ -453,16 +481,18 @@ impl Core { } /// Checks whether all the leaders of the previous quorum exist. - /// TODO: we can leverage some additional signal here in order to more cleverly manipulate later the leader timeout - /// Ex if we already have one leader - the first in order - we might don't want to wait as much. + /// TODO: we can leverage some additional signal here in order to more + /// cleverly manipulate later the leader timeout Ex if we already have + /// one leader - the first in order - we might don't want to wait as much. fn last_quorum_leaders_exist(&self) -> bool { let quorum_round = self.threshold_clock.get_round().saturating_sub(1); let dag_state = self.dag_state.read(); for leader in self.leaders(quorum_round) { // Search for all the leaders. If at least one is not found, then return false. - // A linear search should be fine here as the set of elements is not expected to be small enough and more sophisticated - // data structures might not give us much here. + // A linear search should be fine here as the set of elements is not expected to + // be small enough and more sophisticated data structures might not + // give us much here. if !dag_state.contains_cached_block_at_slot(leader) { return false; } @@ -494,7 +524,8 @@ impl Core { } } -/// Senders of signals from Core, for outputs and events (ex new block produced). +/// Senders of signals from Core, for outputs and events (ex new block +/// produced). pub(crate) struct CoreSignals { tx_block_broadcast: broadcast::Sender, new_round_sender: watch::Sender, @@ -524,8 +555,9 @@ impl CoreSignals { (me, receivers) } - /// Sends a signal to all the waiters that a new block has been produced. The method will return - /// true if block has reached even one subscriber, false otherwise. + /// Sends a signal to all the waiters that a new block has been produced. + /// The method will return true if block has reached even one + /// subscriber, false otherwise. pub fn new_block(&self, block: VerifiedBlock) -> ConsensusResult<()> { // When there is only one authority in committee, it is unnecessary to broadcast // the block which will fail anyway without subscribers to the signal. @@ -540,15 +572,17 @@ impl CoreSignals { Ok(()) } - /// Sends a signal that threshold clock has advanced to new round. The `round_number` is the round at which the - /// threshold clock has advanced to. + /// Sends a signal that threshold clock has advanced to new round. The + /// `round_number` is the round at which the threshold clock has + /// advanced to. pub fn new_round(&mut self, round_number: Round) { let _ = self.new_round_sender.send_replace(round_number); } } /// Receivers of signals from Core. -/// Intentially un-clonable. Comonents should only subscribe to channels they need. +/// Intentially un-clonable. Comonents should only subscribe to channels they +/// need. pub(crate) struct CoreSignalsReceivers { tx_block_broadcast: broadcast::Sender, new_round_receiver: watch::Receiver, @@ -577,8 +611,7 @@ mod test { use super::*; use crate::{ - block::genesis_blocks, - block::TestBlock, + block::{genesis_blocks, TestBlock}, block_verifier::NoopBlockVerifier, commit::CommitAPI as _, storage::{mem_store::MemStore, Store, WriteBatch}, @@ -586,7 +619,8 @@ mod test { CommitConsumer, CommitIndex, }; - /// Recover Core and continue proposing from the last round which forms a quorum. + /// Recover Core and continue proposing from the last round which forms a + /// quorum. #[tokio::test] async fn test_core_recover_from_store_for_full_round() { telemetry_subscribers::init_for_testing(); @@ -596,7 +630,8 @@ mod test { let (_transaction_client, tx_receiver) = TransactionClient::new(context.clone()); let transaction_consumer = TransactionConsumer::new(tx_receiver, context.clone(), None); - // Create test blocks for all the authorities for 4 rounds and populate them in store + // Create test blocks for all the authorities for 4 rounds and populate them in + // store let mut last_round_blocks = genesis_blocks(context.clone()); let mut all_blocks: Vec = last_round_blocks.clone(); for round in 1..=4 { @@ -679,16 +714,17 @@ mod test { .expect("last commit should be set"); // There were no commits prior to the core starting up but there was completed - // rounds up to and including round 4. So we should commit leaders in round 1 & 2 - // as soon as the new block for round 5 is proposed. + // rounds up to and including round 4. So we should commit leaders in round 1 & + // 2 as soon as the new block for round 5 is proposed. assert_eq!(last_commit.index(), 2); assert_eq!(dag_state.read().last_commit_index(), 2); let all_stored_commits = store.scan_commits(0..CommitIndex::MAX).unwrap(); assert_eq!(all_stored_commits.len(), 2); } - /// Recover Core and continue proposing when having a partial last round which doesn't form a quorum and we haven't - /// proposed for that round yet. + /// Recover Core and continue proposing when having a partial last round + /// which doesn't form a quorum and we haven't proposed for that round + /// yet. #[tokio::test] async fn test_core_recover_from_store_for_partial_round() { telemetry_subscribers::init_for_testing(); @@ -705,7 +741,8 @@ mod test { for round in 1..=4 { let mut this_round_blocks = Vec::new(); - // For round 4 only produce f+1 blocks only skip our validator and that of position 1 from creating blocks. + // For round 4 only produce f+1 blocks only skip our validator and that of + // position 1 from creating blocks. let authorities_to_skip = if round == 4 { context.committee.validity_threshold() as usize } else { @@ -896,7 +933,8 @@ mod test { .expect("Block should be found amongst genesis blocks"); } - // Try to propose again - with or without ignore leaders check, it will not return any block + // Try to propose again - with or without ignore leaders check, it will not + // return any block assert!(core.try_propose(false).unwrap().is_none()); assert!(core.try_propose(true).unwrap().is_none()); @@ -959,7 +997,8 @@ mod test { // attempt to create a block - none will be produced. assert!(core.try_propose(false).unwrap().is_none()); - // Adding another block now forms a quorum for round 1, so block at round 2 will proposed + // Adding another block now forms a quorum for round 1, so block at round 2 will + // proposed let block_3 = VerifiedBlock::new_for_test(TestBlock::new(1, 2).build()); expected_ancestors.insert(block_3.reference()); // Wait for min round delay to allow blocks to be proposed. @@ -990,13 +1029,15 @@ mod test { // Create the cores for all authorities let mut all_cores = create_cores(vec![1, 1, 1, 1]); - // Create blocks for rounds 1..=3 from all Cores except last Core of authority 3, so we miss the block from it. As - // it will be the leader of round 3 then no-one will be able to progress to round 4 unless we explicitly trigger + // Create blocks for rounds 1..=3 from all Cores except last Core of authority + // 3, so we miss the block from it. As it will be the leader of round 3 + // then no-one will be able to progress to round 4 unless we explicitly trigger // the block creation. // create the cores and their signals for all the authorities let (_last_core, cores) = all_cores.split_last_mut().unwrap(); - // Now iterate over a few rounds and ensure the corresponding signals are created while network advances + // Now iterate over a few rounds and ensure the corresponding signals are + // created while network advances let mut last_round_blocks = Vec::new(); for round in 1..=3 { let mut this_round_blocks = Vec::new(); @@ -1023,15 +1064,16 @@ mod test { last_round_blocks = this_round_blocks; } - // Try to create the blocks for round 4 by calling the try_propose() method. No block should be created as the - // leader - authority 3 - hasn't proposed any block. + // Try to create the blocks for round 4 by calling the try_propose() method. No + // block should be created as the leader - authority 3 - hasn't proposed + // any block. for (core, _, _, _, _) in cores.iter_mut() { core.add_blocks(last_round_blocks.clone()).unwrap(); assert!(core.try_propose(false).unwrap().is_none()); } - // Now try to create the blocks for round 4 via the leader timeout method which should - // ignore any leader checks or min round delay. + // Now try to create the blocks for round 4 via the leader timeout method which + // should ignore any leader checks or min round delay. for (core, _, _, _, store) in cores.iter_mut() { assert!(core.force_new_block(4).unwrap().is_some()); assert_eq!(core.last_proposed_round(), 4); @@ -1057,7 +1099,8 @@ mod test { // create the cores and their signals for all the authorities let mut cores = create_cores(vec![1, 1, 1, 1]); - // Now iterate over a few rounds and ensure the corresponding signals are created while network advances + // Now iterate over a few rounds and ensure the corresponding signals are + // created while network advances let mut last_round_blocks = Vec::new(); for round in 1..=10 { let mut this_round_blocks = Vec::new(); @@ -1066,10 +1109,12 @@ mod test { // Wait for min round delay to allow blocks to be proposed. sleep(default_params.min_round_delay).await; // add the blocks from last round - // this will trigger a block creation for the round and a signal should be emitted + // this will trigger a block creation for the round and a signal should be + // emitted core.add_blocks(last_round_blocks.clone()).unwrap(); - // A "new round" signal should be received given that all the blocks of previous round have been processed + // A "new round" signal should be received given that all the blocks of previous + // round have been processed let new_round = receive( Duration::from_secs(1), signal_receivers.new_round_receiver(), @@ -1145,7 +1190,8 @@ mod test { continue; } - // try to propose to ensure that we are covering the case where we miss the leader authority 3 + // try to propose to ensure that we are covering the case where we miss the + // leader authority 3 core.add_blocks(last_round_blocks.clone()).unwrap(); core.force_new_block(round).unwrap(); @@ -1160,17 +1206,20 @@ mod test { all_blocks.extend(this_round_blocks); } - // Now send all the produced blocks to core of authority 3. It should produce a new block. If no compression would - // be applied the we should expect all the previous blocks to be referenced from round 0..=10. However, since compression - // is applied only the last round's (10) blocks should be referenced + the authority's block of round 0. + // Now send all the produced blocks to core of authority 3. It should produce a + // new block. If no compression would be applied the we should expect + // all the previous blocks to be referenced from round 0..=10. However, since + // compression is applied only the last round's (10) blocks should be + // referenced + the authority's block of round 0. let (core, _, _, _, store) = &mut cores[excluded_authority]; // Wait for min round delay to allow blocks to be proposed. sleep(default_params.min_round_delay).await; // add blocks to trigger proposal. core.add_blocks(all_blocks).unwrap(); - // Assert that a block has been created for round 11 and it references to blocks of round 10 for the other peers, and - // to round 1 for its own block (created after recovery). + // Assert that a block has been created for round 11 and it references to blocks + // of round 10 for the other peers, and to round 1 for its own block + // (created after recovery). let block = core.last_proposed_block(); assert_eq!(block.round(), 11); assert_eq!(block.ancestors().len(), 4); @@ -1195,8 +1244,9 @@ mod test { assert_eq!(all_stored_commits.len(), 6); } - /// Creates cores for the specified number of authorities for their corresponding stakes. The method returns the - /// cores and their respective signal receivers are returned in `AuthorityIndex` order asc. + /// Creates cores for the specified number of authorities for their + /// corresponding stakes. The method returns the cores and their + /// respective signal receivers are returned in `AuthorityIndex` order asc. // TODO: return a test fixture instead. fn create_cores( authorities: Vec, diff --git a/consensus/core/src/core_thread.rs b/consensus/core/src/core_thread.rs index 1fc4dbac91f..eca39694d69 100644 --- a/consensus/core/src/core_thread.rs +++ b/consensus/core/src/core_thread.rs @@ -39,7 +39,7 @@ pub enum CoreError { #[async_trait] pub trait CoreThreadDispatcher: Sync + Send + 'static { async fn add_blocks(&self, blocks: Vec) - -> Result, CoreError>; + -> Result, CoreError>; async fn force_new_block(&self, round: Round) -> Result<(), CoreError>; @@ -53,7 +53,8 @@ pub(crate) struct CoreThreadHandle { impl CoreThreadHandle { pub async fn stop(self) { - // drop the sender, that will force all the other weak senders to not able to upgrade. + // drop the sender, that will force all the other weak senders to not able to + // upgrade. drop(self.sender); self.join_handle.await.ok(); } @@ -121,8 +122,9 @@ impl ChannelCoreThreadDispatcher { "ConsensusCoreThread" ); - // Explicitly using downgraded sender in order to allow sharing the CoreThreadDispatcher but - // able to shutdown the CoreThread by dropping the original sender. + // Explicitly using downgraded sender in order to allow sharing the + // CoreThreadDispatcher but able to shutdown the CoreThread by dropping + // the original sender. let dispatcher = ChannelCoreThreadDispatcher { sender: sender.downgrade(), context, diff --git a/consensus/core/src/dag_state.rs b/consensus/core/src/dag_state.rs index 381d2b438b2..b965fa8c88b 100644 --- a/consensus/core/src/dag_state.rs +++ b/consensus/core/src/dag_state.rs @@ -12,19 +12,21 @@ use std::{ use consensus_config::AuthorityIndex; use tracing::error; -use crate::block::GENESIS_ROUND; -use crate::stake_aggregator::{QuorumThreshold, StakeAggregator}; use crate::{ - block::{genesis_blocks, BlockAPI, BlockDigest, BlockRef, Round, Slot, VerifiedBlock}, + block::{ + genesis_blocks, BlockAPI, BlockDigest, BlockRef, Round, Slot, VerifiedBlock, GENESIS_ROUND, + }, commit::{CommitAPI as _, CommitDigest, CommitIndex, CommitRef, TrustedCommit}, context::Context, + stake_aggregator::{QuorumThreshold, StakeAggregator}, storage::{Store, WriteBatch}, }; /// DagState provides the API to write and read accepted blocks from the DAG. /// Only uncommitted and last committed blocks are cached in memory. /// The rest of blocks are stored on disk. -/// Refs to cached blocks and additional refs are cached as well, to speed up existence checks. +/// Refs to cached blocks and additional refs are cached as well, to speed up +/// existence checks. /// /// Note: DagState should be wrapped with Arc>, to allow /// concurrent access from multiple components. @@ -180,8 +182,9 @@ impl DagState { .expect("Exactly one element should be returned") } - /// Gets blocks by checking genesis, cached recent blocks in memory, then storage. - /// An element is None when the corresponding block is not found. + /// Gets blocks by checking genesis, cached recent blocks in memory, then + /// storage. An element is None when the corresponding block is not + /// found. pub(crate) fn get_blocks(&self, block_refs: &[BlockRef]) -> Vec> { let mut blocks = vec![None; block_refs.len()]; let mut missing = Vec::new(); @@ -228,10 +231,12 @@ impl DagState { } /// Gets all uncommitted blocks in a slot. - /// Uncommitted blocks must exist in memory, so only in-memory blocks are checked. + /// Uncommitted blocks must exist in memory, so only in-memory blocks are + /// checked. pub(crate) fn get_uncommitted_blocks_at_slot(&self, slot: Slot) -> Vec { - // TODO: either panic below when the slot is at or below the last committed round, - // or support reading from storage while limiting storage reads to edge cases. + // TODO: either panic below when the slot is at or below the last committed + // round, or support reading from storage while limiting storage reads + // to edge cases. let mut blocks = vec![]; for (_block_ref, block) in self.recent_blocks.range(( @@ -244,7 +249,8 @@ impl DagState { } /// Gets all uncommitted blocks in a round. - /// Uncommitted blocks must exist in memory, so only in-memory blocks are checked. + /// Uncommitted blocks must exist in memory, so only in-memory blocks are + /// checked. pub(crate) fn get_uncommitted_blocks_at_round(&self, round: Round) -> Vec { if round <= self.last_commit_round() { panic!("Round {} have committed blocks!", round); @@ -306,8 +312,9 @@ impl DagState { blocks.first().cloned().unwrap() } - /// Retrieves the last block proposed for the specified `authority`. If no block is found in cache - /// then the genesis block is returned as no other block has been received from that authority. + /// Retrieves the last block proposed for the specified `authority`. If no + /// block is found in cache then the genesis block is returned as no + /// other block has been received from that authority. pub(crate) fn get_last_block_for_authority(&self, authority: AuthorityIndex) -> VerifiedBlock { if let Some(last) = self.recent_refs[authority].last() { return self @@ -327,10 +334,12 @@ impl DagState { } /// Returns the last block proposed per authority with `round < end_round`. - /// The method is guaranteed to return results only when the `end_round` is not earlier of the - /// available cached data for each authority, otherwise the method will panic - it's the caller's - /// responsibility to ensure that is not requesting filtering for earlier rounds . - /// In case of equivocation for an authority's last slot only one block will be returned (the last in order). + /// The method is guaranteed to return results only when the `end_round` is + /// not earlier of the available cached data for each authority, + /// otherwise the method will panic - it's the caller's responsibility + /// to ensure that is not requesting filtering for earlier rounds . + /// In case of equivocation for an authority's last slot only one block will + /// be returned (the last in order). pub(crate) fn get_last_cached_block_per_authority( &self, end_round: Round, @@ -357,7 +366,9 @@ impl DagState { let last_evicted_round = self.authority_evict_round(authority_index); if end_round.saturating_sub(1) <= last_evicted_round { - panic!("Attempted to request for blocks of rounds < {end_round}, when the last evicted round is {last_evicted_round} for authority {authority_index}", ); + panic!( + "Attempted to request for blocks of rounds < {end_round}, when the last evicted round is {last_evicted_round} for authority {authority_index}", + ); } if let Some(block_ref) = block_refs @@ -383,8 +394,9 @@ impl DagState { blocks.into_iter().collect() } - /// Checks whether a block exists in the slot. The method checks only against the cached data. - /// If the user asks for a slot that is not within the cached data then a panic is thrown. + /// Checks whether a block exists in the slot. The method checks only + /// against the cached data. If the user asks for a slot that is not + /// within the cached data then a panic is thrown. pub(crate) fn contains_cached_block_at_slot(&self, slot: Slot) -> bool { // Always return true for genesis slots. if slot.round == GENESIS_ROUND { @@ -405,8 +417,9 @@ impl DagState { result.next().is_some() } - /// Checks whether the required blocks are in cache, if exist, or otherwise will check in store. The method is not caching - /// back the results, so its expensive if keep asking for cache missing blocks. + /// Checks whether the required blocks are in cache, if exist, or otherwise + /// will check in store. The method is not caching back the results, so + /// its expensive if keep asking for cache missing blocks. pub(crate) fn contains_blocks(&self, block_refs: Vec) -> Vec { let mut exist = vec![false; block_refs.len()]; let mut missing = Vec::new(); @@ -417,9 +430,10 @@ impl DagState { exist[index] = true; } else if recent_refs.is_empty() || recent_refs.last().unwrap().round < block_ref.round { - // Optimization: recent_refs contain the most recent blocks known to this authority. - // If a block ref is not found there and has a higher round, it definitely is - // missing from this authority and there is no need to check disk. + // Optimization: recent_refs contain the most recent blocks known to this + // authority. If a block ref is not found there and has a higher + // round, it definitely is missing from this authority and there + // is no need to check disk. exist[index] = false; } else { missing.push((index, block_ref)); @@ -547,8 +561,8 @@ impl DagState { self.last_committed_rounds.clone() } - /// After each flush, DagState becomes persisted in storage and it expected to recover - /// all internal states from storage after restarts. + /// After each flush, DagState becomes persisted in storage and it expected + /// to recover all internal states from storage after restarts. pub(crate) fn flush(&mut self) { let _s = self .context @@ -577,7 +591,8 @@ impl DagState { .dag_state_store_write_count .inc(); - // Clean up old cached data. After flushing, all cached blocks are guaranteed to be persisted. + // Clean up old cached data. After flushing, all cached blocks are guaranteed to + // be persisted. for (authority_refs, last_committed_round) in self .recent_refs .iter_mut() @@ -594,11 +609,12 @@ impl DagState { } } - /// Detects and returns the blocks of the round that forms the last quorum. The method will return - /// the quorum even if that's genesis. + /// Detects and returns the blocks of the round that forms the last quorum. + /// The method will return the quorum even if that's genesis. pub(crate) fn last_quorum(&self) -> Vec { - // the quorum should exist either on the highest accepted round or the one before. If we fail to detect - // a quorum then it means that our DAG has advanced with missing causal history. + // the quorum should exist either on the highest accepted round or the one + // before. If we fail to detect a quorum then it means that our DAG has + // advanced with missing causal history. for round in (self.highest_accepted_round.saturating_sub(1)..=self.highest_accepted_round).rev() { @@ -607,7 +623,8 @@ impl DagState { } let mut quorum = StakeAggregator::::new(); - // Since the minimum wave length is 3 we expect to find a quorum in the uncommitted rounds. + // Since the minimum wave length is 3 we expect to find a quorum in the + // uncommitted rounds. let blocks = self.get_uncommitted_blocks_at_round(round); for block in &blocks { if quorum.add(block.author(), &self.context.committee) { @@ -623,7 +640,8 @@ impl DagState { self.genesis.values().cloned().collect() } - /// Highest round where a block is committed, which is last commit's leader round. + /// Highest round where a block is committed, which is last commit's leader + /// round. fn last_commit_round(&self) -> Round { match &self.last_commit { Some(commit) => commit.leader().round, @@ -631,16 +649,17 @@ impl DagState { } } - /// The last round that got evicted after a cache clean up operation. After this round we are - /// guaranteed to have all the produced blocks from that authority. For any round that is - /// <= `last_evicted_round` we don't have such guarantees as out of order blocks might exist. + /// The last round that got evicted after a cache clean up operation. After + /// this round we are guaranteed to have all the produced blocks from + /// that authority. For any round that is <= `last_evicted_round` we + /// don't have such guarantees as out of order blocks might exist. fn authority_evict_round(&self, authority_index: AuthorityIndex) -> Round { let commit_round = self.last_committed_rounds[authority_index]; Self::evict_round(commit_round, self.cached_rounds) } - /// Calculates the last eviction round based on the provided `commit_round`. Any blocks with - /// round <= the evict round have been cleaned up. + /// Calculates the last eviction round based on the provided `commit_round`. + /// Any blocks with round <= the evict round have been cleaned up. fn evict_round(commit_round: Round, cached_rounds: Round) -> Round { commit_round.saturating_sub(cached_rounds) } @@ -648,14 +667,15 @@ impl DagState { #[cfg(test)] mod test { - use parking_lot::RwLock; use std::vec; + use parking_lot::RwLock; + use super::*; - use crate::test_dag::build_dag; use crate::{ block::{BlockDigest, BlockRef, BlockTimestampMs, TestBlock, VerifiedBlock}, storage::{mem_store::MemStore, WriteBatch}, + test_dag::build_dag, }; #[test] @@ -700,13 +720,15 @@ mod test { // Check uncommitted blocks that do not exist. let last_ref = blocks.keys().last().unwrap(); - assert!(dag_state - .get_block(&BlockRef::new( - last_ref.round, - last_ref.author, - BlockDigest::MIN - )) - .is_none()); + assert!( + dag_state + .get_block(&BlockRef::new( + last_ref.round, + last_ref.author, + BlockDigest::MIN + )) + .is_none() + ); // Check slots with uncommitted blocks. for round in 1..=num_rounds { @@ -759,9 +781,11 @@ mod test { } // Check rounds without uncommitted blocks. - assert!(dag_state - .get_uncommitted_blocks_at_round(non_existent_round) - .is_empty()); + assert!( + dag_state + .get_uncommitted_blocks_at_round(non_existent_round) + .is_empty() + ); } #[test] @@ -945,7 +969,8 @@ mod test { } } - // Now write in store the blocks from first 4 rounds and the rest to the dag state + // Now write in store the blocks from first 4 rounds and the rest to the dag + // state blocks.clone().into_iter().for_each(|block| { if block.round() <= 4 { store @@ -956,8 +981,9 @@ mod test { } }); - // Now when trying to query whether we have all the blocks, we should successfully retrieve a positive answer - // where the blocks of first 4 round should be found in DagState and the rest in store. + // Now when trying to query whether we have all the blocks, we should + // successfully retrieve a positive answer where the blocks of first 4 + // round should be found in DagState and the rest in store. let mut block_refs = blocks .iter() .map(|block| block.reference()) @@ -1013,8 +1039,9 @@ mod test { ); } - // Now when trying to query whether we have all the blocks, we should successfully retrieve a positive answer - // where the blocks of first 4 round should be found in DagState and the rest in store. + // Now when trying to query whether we have all the blocks, we should + // successfully retrieve a positive answer where the blocks of first 4 + // round should be found in DagState and the rest in store. let mut block_refs = blocks .iter() .map(|block| block.reference()) @@ -1078,8 +1105,9 @@ mod test { dag_state.flush(); - // When trying to request for authority 0 at block slot 8 it should panic, as anything - // that is <= commit_round - cached_rounds = 10 - 2 = 8 should be evicted + // When trying to request for authority 0 at block slot 8 it should panic, as + // anything that is <= commit_round - cached_rounds = 10 - 2 = 8 should + // be evicted let _ = dag_state.contains_cached_block_at_slot(Slot::new(8, AuthorityIndex::new_for_test(0))); } @@ -1103,7 +1131,8 @@ mod test { } } - // Now write in store the blocks from first 4 rounds and the rest to the dag state + // Now write in store the blocks from first 4 rounds and the rest to the dag + // state blocks.clone().into_iter().for_each(|block| { if block.round() <= 4 { store @@ -1114,8 +1143,9 @@ mod test { } }); - // Now when trying to query whether we have all the blocks, we should successfully retrieve a positive answer - // where the blocks of first 4 round should be found in DagState and the rest in store. + // Now when trying to query whether we have all the blocks, we should + // successfully retrieve a positive answer where the blocks of first 4 + // round should be found in DagState and the rest in store. let mut block_refs = blocks .iter() .map(|block| block.reference()) @@ -1283,8 +1313,9 @@ mod test { assert_eq!(last_blocks[2].round(), 2); assert_eq!(last_blocks[3].round(), 3); - // WHEN we flush the DagState - after adding a commit with all the blocks, we expect this to trigger - // a clean up in the internal cache. That will keep the all the blocks with rounds >= authority_commit_round - CACHED_ROUND. + // WHEN we flush the DagState - after adding a commit with all the blocks, we + // expect this to trigger a clean up in the internal cache. That will + // keep the all the blocks with rounds >= authority_commit_round - CACHED_ROUND. dag_state.flush(); // AND we request before round 3 @@ -1335,11 +1366,12 @@ mod test { .collect::>(), )); - // Flush the store so we keep in memory only the last 1 round from the last commit for each - // authority. + // Flush the store so we keep in memory only the last 1 round from the last + // commit for each authority. dag_state.flush(); - // THEN the method should panic, as some authorities have already evicted rounds <= round 2 + // THEN the method should panic, as some authorities have already evicted rounds + // <= round 2 let end_round = 2; dag_state.get_last_cached_block_per_authority(end_round); } @@ -1359,7 +1391,8 @@ mod test { assert_eq!(dag_state.read().last_quorum(), genesis); } - // WHEN a fully connected DAG up to round 4 is created, then round 4 blocks should be returned as quorum + // WHEN a fully connected DAG up to round 4 is created, then round 4 blocks + // should be returned as quorum { let round_4_blocks = build_dag(context, dag_state.clone(), None, 4); @@ -1374,7 +1407,8 @@ mod test { ); } - // WHEN adding one more block at round 5, still round 4 should be returned as quorum + // WHEN adding one more block at round 5, still round 4 should be returned as + // quorum { let block = VerifiedBlock::new_for_test(TestBlock::new(5, 0).build()); dag_state.write().accept_block(block); @@ -1411,7 +1445,8 @@ mod test { ); } - // WHEN adding some blocks for authorities, only the last ones should be returned + // WHEN adding some blocks for authorities, only the last ones should be + // returned { // add blocks up to round 4 build_dag(context.clone(), dag_state.clone(), None, 4); diff --git a/consensus/core/src/error.rs b/consensus/core/src/error.rs index c3486b7faa3..d2ac85e8c5f 100644 --- a/consensus/core/src/error.rs +++ b/consensus/core/src/error.rs @@ -10,7 +10,8 @@ use typed_store::TypedStoreError; use crate::block::{BlockRef, BlockTimestampMs, Round}; -/// Errors that can occur when processing blocks, reading from storage, or encountering shutdown. +/// Errors that can occur when processing blocks, reading from storage, or +/// encountering shutdown. #[derive(Clone, Debug, Error)] pub enum ConsensusError { #[error("Error deserializing block: {0}")] @@ -40,7 +41,9 @@ pub enum ConsensusError { block_ref: BlockRef, }, - #[error("Too many blocks have been returned from authority {0} when requesting to fetch missing blocks")] + #[error( + "Too many blocks have been returned from authority {0} when requesting to fetch missing blocks" + )] TooManyFetchedBlocksReturned(AuthorityIndex), #[error("Too many blocks have been requested from authority {0}")] @@ -58,7 +61,9 @@ pub enum ConsensusError { #[error("Synchronizer for fetching blocks directly from {0} is saturated")] SynchronizerSaturated(AuthorityIndex), - #[error("Ancestor is in wrong position: block {block_authority}, ancestor {ancestor_authority}, position {position}")] + #[error( + "Ancestor is in wrong position: block {block_authority}, ancestor {ancestor_authority}, position {position}" + )] InvalidAncestorPosition { block_authority: AuthorityIndex, ancestor_authority: AuthorityIndex, diff --git a/consensus/core/src/leader_schedule.rs b/consensus/core/src/leader_schedule.rs index b9e59bafd69..08308c2a39e 100644 --- a/consensus/core/src/leader_schedule.rs +++ b/consensus/core/src/leader_schedule.rs @@ -3,9 +3,8 @@ use std::sync::Arc; -use rand::{prelude::SliceRandom, rngs::StdRng, SeedableRng}; - use consensus_config::AuthorityIndex; +use rand::{prelude::SliceRandom, rngs::StdRng, SeedableRng}; use crate::context::Context; @@ -98,7 +97,8 @@ mod tests { leader_schedule.elect_leader(5, 0), AuthorityIndex::new_for_test(1) ); - // ensure we elect different leaders for the same round for the multi-leader case + // ensure we elect different leaders for the same round for the multi-leader + // case assert_ne!( leader_schedule.elect_leader_stake_based(1, 1), leader_schedule.elect_leader_stake_based(1, 2) @@ -130,7 +130,8 @@ mod tests { leader_schedule.elect_leader_stake_based(5, 0), AuthorityIndex::new_for_test(3) ); - // ensure we elect different leaders for the same round for the multi-leader case + // ensure we elect different leaders for the same round for the multi-leader + // case assert_ne!( leader_schedule.elect_leader_stake_based(1, 1), leader_schedule.elect_leader_stake_based(1, 2) diff --git a/consensus/core/src/leader_timeout.rs b/consensus/core/src/leader_timeout.rs index e4ac5c0a49f..84f03c163a4 100644 --- a/consensus/core/src/leader_timeout.rs +++ b/consensus/core/src/leader_timeout.rs @@ -1,17 +1,21 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::block::Round; -use crate::context::Context; -use crate::core::CoreSignalsReceivers; -use crate::core_thread::CoreThreadDispatcher; -use std::sync::Arc; -use std::time::Duration; -use tokio::sync::oneshot::{Receiver, Sender}; -use tokio::sync::watch; -use tokio::task::JoinHandle; -use tokio::time::{sleep_until, Instant}; +use std::{sync::Arc, time::Duration}; + +use tokio::{ + sync::{ + oneshot::{Receiver, Sender}, + watch, + }, + task::JoinHandle, + time::{sleep_until, Instant}, +}; use tracing::{debug, warn}; +use crate::{ + block::Round, context::Context, core::CoreSignalsReceivers, core_thread::CoreThreadDispatcher, +}; + pub(crate) struct LeaderTimeoutTaskHandle { handle: JoinHandle<()>, stop: Sender<()>, @@ -96,20 +100,20 @@ impl LeaderTimeoutTask { #[cfg(test)] mod tests { - use std::collections::BTreeSet; - use std::sync::Arc; - use std::time::Duration; + use std::{collections::BTreeSet, sync::Arc, time::Duration}; use async_trait::async_trait; use consensus_config::Parameters; use parking_lot::Mutex; use tokio::time::{sleep, Instant}; - use crate::block::{BlockRef, Round, VerifiedBlock}; - use crate::context::Context; - use crate::core::CoreSignals; - use crate::core_thread::{CoreError, CoreThreadDispatcher}; - use crate::leader_timeout::LeaderTimeoutTask; + use crate::{ + block::{BlockRef, Round, VerifiedBlock}, + context::Context, + core::CoreSignals, + core_thread::{CoreError, CoreThreadDispatcher}, + leader_timeout::LeaderTimeoutTask, + }; #[derive(Clone, Default)] struct MockCoreThreadDispatcher { @@ -204,8 +208,8 @@ mod tests { // spawn the task let _handle = LeaderTimeoutTask::start(dispatcher.clone(), &signal_receivers, context); - // now send some signals with some small delay between them, but not enough so every round - // manages to timeout and call the force new block method. + // now send some signals with some small delay between them, but not enough so + // every round manages to timeout and call the force new block method. signals.new_round(13); sleep(leader_timeout / 2).await; signals.new_round(14); diff --git a/consensus/core/src/linearizer.rs b/consensus/core/src/linearizer.rs index a7455a50b7e..55a4a33a063 100644 --- a/consensus/core/src/linearizer.rs +++ b/consensus/core/src/linearizer.rs @@ -77,8 +77,8 @@ impl Linearizer { } // This function should be called whenever a new commit is observed. This will - // iterate over the sequence of committed leaders and produce a list of committed - // sub-dags. + // iterate over the sequence of committed leaders and produce a list of + // committed sub-dags. pub(crate) fn handle_commit( &mut self, committed_leaders: Vec, @@ -124,11 +124,12 @@ impl Linearizer { self.dag_state.write().add_commit(commit.clone()); committed_sub_dags.push(sub_dag); } - // Committed blocks must be persisted to storage before sending them to Sui and executing - // their transactions. - // Commit metadata can be persisted more lazily because they are recoverable. Uncommitted - // blocks can wait to persist too. - // But for simplicity, all unpersisted blocks and commits are flushed to storage. + // Committed blocks must be persisted to storage before sending them to Sui and + // executing their transactions. + // Commit metadata can be persisted more lazily because they are recoverable. + // Uncommitted blocks can wait to persist too. + // But for simplicity, all unpersisted blocks and commits are flushed to + // storage. if !committed_sub_dags.is_empty() { self.dag_state.write().flush(); } diff --git a/consensus/core/src/metrics.rs b/consensus/core/src/metrics.rs index aea537ca0aa..2a417e950f3 100644 --- a/consensus/core/src/metrics.rs +++ b/consensus/core/src/metrics.rs @@ -3,7 +3,6 @@ use std::sync::Arc; -use crate::network::metrics::{NetworkRouteMetrics, QuinnConnectionMetrics}; use prometheus::{ register_histogram_vec_with_registry, register_histogram_with_registry, register_int_counter_vec_with_registry, register_int_counter_with_registry, @@ -11,6 +10,8 @@ use prometheus::{ HistogramVec, IntCounter, IntCounterVec, IntGauge, IntGaugeVec, Registry, }; +use crate::network::metrics::{NetworkRouteMetrics, QuinnConnectionMetrics}; + // starts from 1μs, 50μs, 100μs... const FINE_GRAINED_LATENCY_SEC_BUCKETS: &[f64] = &[ 0.000_001, 0.000_050, 0.000_100, 0.000_500, 0.001, 0.005, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25, diff --git a/consensus/core/src/network/anemo_network.rs b/consensus/core/src/network/anemo_network.rs index f42615beb02..40054853352 100644 --- a/consensus/core/src/network/anemo_network.rs +++ b/consensus/core/src/network/anemo_network.rs @@ -264,7 +264,8 @@ impl ConsensusRpc for AnemoServiceProxy { /// 2. Take `AnemoClient` from `AnemoManager::client()`. /// 3. Create consensus components. /// 4. Create `AnemoService` for consensus RPC handler. -/// 5. Install `AnemoService` to `AnemoManager` with `AnemoManager::install_service()`. +/// 5. Install `AnemoService` to `AnemoManager` with +/// `AnemoManager::install_service()`. pub(crate) struct AnemoManager { context: Arc, client: Arc, @@ -388,8 +389,8 @@ impl NetworkManager for AnemoManager { let mut config = anemo::Config::default(); config.quic = Some(quic_config); - // Set the max_frame_size to be 1 GB to work around the issue of there being too many - // delegation events in the epoch change txn. + // Set the max_frame_size to be 1 GB to work around the issue of there being too + // many delegation events in the epoch change txn. config.max_frame_size = Some(1 << 30); // Set a default timeout of 300s for all RPC requests config.inbound_request_timeout_ms = Some(300_000); @@ -491,7 +492,8 @@ impl NetworkManager for AnemoManager { #[derive(Clone)] pub(crate) struct MetricsMakeCallbackHandler { metrics: Arc, - /// Size in bytes above which a request or response message is considered excessively large + /// Size in bytes above which a request or response message is considered + /// excessively large excessive_message_size: usize, } @@ -716,8 +718,8 @@ mod test { test_block_0.serialized(), ); - // `Committee` is generated with the same random seed in Context::new_for_test(), - // so the first 4 authorities are the same. + // `Committee` is generated with the same random seed in + // Context::new_for_test(), so the first 4 authorities are the same. let (context_4, keys_4) = Context::new_for_test(5); let context_4 = Arc::new( context_4 @@ -734,22 +736,26 @@ mod test { // client_4 should not be able to reach service_0 or service_1, because of the // AllowedPeers filter. let test_block_2 = VerifiedBlock::new_for_test(TestBlock::new(9, 2).build()); - assert!(client_4 - .send_block( - context.committee.to_authority_index(0).unwrap(), - &test_block_2, - Duration::from_secs(5), - ) - .await - .is_err()); + assert!( + client_4 + .send_block( + context.committee.to_authority_index(0).unwrap(), + &test_block_2, + Duration::from_secs(5), + ) + .await + .is_err() + ); let test_block_3 = VerifiedBlock::new_for_test(TestBlock::new(9, 3).build()); - assert!(client_4 - .send_block( - context.committee.to_authority_index(1).unwrap(), - &test_block_3, - Duration::from_secs(5), - ) - .await - .is_err()); + assert!( + client_4 + .send_block( + context.committee.to_authority_index(1).unwrap(), + &test_block_3, + Duration::from_secs(5), + ) + .await + .is_err() + ); } } diff --git a/consensus/core/src/network/metrics.rs b/consensus/core/src/network/metrics.rs index f9c9278fecc..f8750c901ec 100644 --- a/consensus/core/src/network/metrics.rs +++ b/consensus/core/src/network/metrics.rs @@ -9,7 +9,8 @@ use prometheus::{ #[derive(Clone)] pub(crate) struct QuinnConnectionMetrics { - /// The connection status of known peers. 0 if not connected, 1 if connected. + /// The connection status of known peers. 0 if not connected, 1 if + /// connected. pub network_peer_connected: IntGaugeVec, /// The number of connected peers pub network_peers: IntGauge, @@ -204,8 +205,8 @@ const LATENCY_SEC_BUCKETS: &[f64] = &[ 0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5, 1., 2.5, 5., 10., 20., 30., 60., 90., ]; -// Arbitrarily chosen buckets for message size, with gradually-lowering exponent to give us -// better resolution at high sizes. +// Arbitrarily chosen buckets for message size, with gradually-lowering exponent +// to give us better resolution at high sizes. const SIZE_BYTE_BUCKETS: &[f64] = &[ 2048., 8192., // *4 16384., 32768., 65536., 131072., 262144., 524288., 1048576., // *2 diff --git a/consensus/core/src/network/mod.rs b/consensus/core/src/network/mod.rs index d970315a18a..0769e3c2c8c 100644 --- a/consensus/core/src/network/mod.rs +++ b/consensus/core/src/network/mod.rs @@ -31,9 +31,10 @@ pub(crate) mod tonic_network; /// Network client for communicating with peers. /// -/// NOTE: the timeout parameters help saving resources at client and potentially server. -/// But it is up to the server implementation if the timeout is honored. -/// - To bound server resources, server should implement own timeout for incoming requests. +/// NOTE: the timeout parameters help saving resources at client and potentially +/// server. But it is up to the server implementation if the timeout is honored. +/// - To bound server resources, server should implement own timeout for +/// incoming requests. #[async_trait] pub(crate) trait NetworkClient: Send + Sync + 'static { /// Sends a serialized SignedBlock to a peer. @@ -54,8 +55,9 @@ pub(crate) trait NetworkClient: Send + Sync + 'static { } /// Network service for handling requests from peers. -/// NOTE: using `async_trait` macro because `NetworkService` methods are called in the trait impl -/// of `anemo_gen::ConsensusRpc`, which itself is annotated with `async_trait`. +/// NOTE: using `async_trait` macro because `NetworkService` methods are called +/// in the trait impl of `anemo_gen::ConsensusRpc`, which itself is annotated +/// with `async_trait`. #[async_trait] pub(crate) trait NetworkService: Send + Sync + 'static { async fn handle_send_block(&self, peer: AuthorityIndex, block: Bytes) -> ConsensusResult<()>; diff --git a/consensus/core/src/network/tonic_network.rs b/consensus/core/src/network/tonic_network.rs index 4dc63fe4f02..10f31319433 100644 --- a/consensus/core/src/network/tonic_network.rs +++ b/consensus/core/src/network/tonic_network.rs @@ -258,12 +258,14 @@ impl ConsensusService for TonicServiceProxy { } } -/// Manages the lifecycle of Tonic network client and service. Typical usage during initialization: +/// Manages the lifecycle of Tonic network client and service. Typical usage +/// during initialization: /// 1. Create a new `TonicManager`. /// 2. Take `TonicClient` from `TonicManager::client()`. /// 3. Create consensus components. /// 4. Create `TonicService` for consensus service handler. -/// 5. Install `TonicService` to `TonicManager` with `TonicManager::install_service()`. +/// 5. Install `TonicService` to `TonicManager` with +/// `TonicManager::install_service()`. pub(crate) struct TonicManager { context: Arc, client: Arc, @@ -358,8 +360,8 @@ impl NetworkManager for TonicManager { } } -/// Attempts to convert a multiaddr of the form `/[ip4,ip6,dns]/{}/udp/{port}` into -/// a host:port string. +/// Attempts to convert a multiaddr of the form `/[ip4,ip6,dns]/{}/udp/{port}` +/// into a host:port string. fn to_host_port_str(addr: &Multiaddr) -> Result { let mut iter = addr.iter(); @@ -381,8 +383,8 @@ fn to_host_port_str(addr: &Multiaddr) -> Result { } } -/// Attempts to convert a multiaddr of the form `/[ip4,ip6]/{}/[udp,tcp]/{port}` into -/// a SocketAddr value. +/// Attempts to convert a multiaddr of the form `/[ip4,ip6]/{}/[udp,tcp]/{port}` +/// into a SocketAddr value. fn to_socket_addr(addr: &Multiaddr) -> Result { let mut iter = addr.iter(); @@ -404,7 +406,8 @@ fn to_socket_addr(addr: &Multiaddr) -> Result { } } -// TODO: after supporting peer authentication, using rtest to share the test case with anemo_network.rs +// TODO: after supporting peer authentication, using rtest to share the test +// case with anemo_network.rs #[cfg(test)] mod test { use std::{sync::Arc, time::Duration}; diff --git a/consensus/core/src/stake_aggregator.rs b/consensus/core/src/stake_aggregator.rs index db2c1e6166d..e7f462ef5ef 100644 --- a/consensus/core/src/stake_aggregator.rs +++ b/consensus/core/src/stake_aggregator.rs @@ -1,9 +1,9 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{collections::HashSet, marker::PhantomData}; + use consensus_config::{AuthorityIndex, Committee, Stake}; -use std::collections::HashSet; -use std::marker::PhantomData; pub(crate) trait CommitteeThreshold { fn is_threshold(committee: &Committee, amount: Stake) -> bool; @@ -40,9 +40,9 @@ impl StakeAggregator { } } - /// Adds a vote for the specified authority index to the aggregator. It is guaranteed to count - /// the vote only once for an authority. The method returns true when the required threshold has - /// been reached. + /// Adds a vote for the specified authority index to the aggregator. It is + /// guaranteed to count the vote only once for an authority. The method + /// returns true when the required threshold has been reached. pub(crate) fn add(&mut self, vote: AuthorityIndex, committee: &Committee) -> bool { if self.votes.insert(vote) { self.stake += committee.stake(vote); diff --git a/consensus/core/src/storage/mem_store.rs b/consensus/core/src/storage/mem_store.rs index 1f498aa669c..a4f40a8b638 100644 --- a/consensus/core/src/storage/mem_store.rs +++ b/consensus/core/src/storage/mem_store.rs @@ -1,22 +1,21 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::collections::VecDeque; -use std::ops::Range; use std::{ - collections::{BTreeMap, BTreeSet}, - ops::Bound::{Excluded, Included}, + collections::{BTreeMap, BTreeSet, VecDeque}, + ops::{ + Bound::{Excluded, Included}, + Range, + }, }; use consensus_config::AuthorityIndex; use parking_lot::RwLock; use super::{CommitInfo, Store, WriteBatch}; -use crate::block::Slot; -use crate::commit::{CommitAPI as _, TrustedCommit}; use crate::{ - block::{BlockAPI as _, BlockDigest, BlockRef, Round, VerifiedBlock}, - commit::{CommitDigest, CommitIndex}, + block::{BlockAPI as _, BlockDigest, BlockRef, Round, Slot, VerifiedBlock}, + commit::{CommitAPI as _, CommitDigest, CommitIndex, TrustedCommit}, error::ConsensusResult, }; diff --git a/consensus/core/src/storage/mod.rs b/consensus/core/src/storage/mod.rs index e87085e268f..56412931271 100644 --- a/consensus/core/src/storage/mod.rs +++ b/consensus/core/src/storage/mod.rs @@ -12,9 +12,8 @@ use std::ops::Range; use consensus_config::AuthorityIndex; use serde::{Deserialize, Serialize}; -use crate::block::Slot; use crate::{ - block::{BlockRef, Round, VerifiedBlock}, + block::{BlockRef, Round, Slot, VerifiedBlock}, commit::{CommitIndex, TrustedCommit}, error::ConsensusResult, }; @@ -40,9 +39,10 @@ pub(crate) trait Store: Send + Sync { start_round: Round, ) -> ConsensusResult>; - // The method returns the last `num_of_rounds` rounds blocks by author in round ascending order. - // When a `before_round` is defined then the blocks of round `<=before_round` are returned. If not - // then the max value for round will be used as cut off. + // The method returns the last `num_of_rounds` rounds blocks by author in round + // ascending order. When a `before_round` is defined then the blocks of + // round `<=before_round` are returned. If not then the max value for round + // will be used as cut off. fn scan_last_blocks_by_author( &self, author: AuthorityIndex, @@ -56,7 +56,8 @@ pub(crate) trait Store: Send + Sync { /// Reads all commits from start (inclusive) until end (exclusive). fn scan_commits(&self, range: Range) -> ConsensusResult>; - /// Reads the last commit info, including last committed round per authority. + /// Reads the last commit info, including last committed round per + /// authority. fn read_last_commit_info(&self) -> ConsensusResult>; } @@ -96,9 +97,9 @@ impl WriteBatch { } } -/// Per-commit properties that can be derived and do not need to be part of the Commit struct. -/// Only the latest version is needed for CommitInfo, but more versions are stored for -/// debugging and potential recovery. +/// Per-commit properties that can be derived and do not need to be part of the +/// Commit struct. Only the latest version is needed for CommitInfo, but more +/// versions are stored for debugging and potential recovery. // TODO: version this struct. #[derive(Clone, Debug, Serialize, Deserialize)] pub(crate) struct CommitInfo { diff --git a/consensus/core/src/storage/rocksdb_store.rs b/consensus/core/src/storage/rocksdb_store.rs index 49fb4f829e1..6bc5f783105 100644 --- a/consensus/core/src/storage/rocksdb_store.rs +++ b/consensus/core/src/storage/rocksdb_store.rs @@ -1,10 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::collections::VecDeque; -use std::ops::Range; use std::{ - ops::Bound::{Excluded, Included}, + collections::VecDeque, + ops::{ + Bound::{Excluded, Included}, + Range, + }, time::Duration, }; @@ -18,11 +20,9 @@ use typed_store::{ }; use super::{CommitInfo, Store, WriteBatch}; -use crate::block::Slot; -use crate::commit::{CommitAPI as _, CommitDigest, TrustedCommit}; use crate::{ - block::{BlockAPI as _, BlockDigest, BlockRef, Round, SignedBlock, VerifiedBlock}, - commit::CommitIndex, + block::{BlockAPI as _, BlockDigest, BlockRef, Round, SignedBlock, Slot, VerifiedBlock}, + commit::{CommitAPI as _, CommitDigest, CommitIndex, TrustedCommit}, error::{ConsensusError, ConsensusResult}, }; @@ -50,8 +50,8 @@ impl RocksDBStore { /// Creates a new instance of RocksDB storage. pub(crate) fn new(path: &str) -> Self { - // Consensus data has high write throughput (all transactions) and is rarely read - // (only during recovery and when helping peers catch up). + // Consensus data has high write throughput (all transactions) and is rarely + // read (only during recovery and when helping peers catch up). let db_options = default_db_options().optimize_db_for_write_throughput(2); let mut metrics_conf = MetricConf::new("consensus"); metrics_conf.read_sample_interval = SamplingInterval::new(Duration::from_secs(60), 0); @@ -216,9 +216,10 @@ impl Store for RocksDBStore { Ok(blocks) } - // The method returns the last `num_of_rounds` rounds blocks by author in round ascending order. - // When a `before_round` is defined then the blocks of round `<=before_round` are returned. If not - // then the max value for round will be used as cut off. + // The method returns the last `num_of_rounds` rounds blocks by author in round + // ascending order. When a `before_round` is defined then the blocks of + // round `<=before_round` are returned. If not then the max value for round + // will be used as cut off. fn scan_last_blocks_by_author( &self, author: AuthorityIndex, diff --git a/consensus/core/src/synchronizer.rs b/consensus/core/src/synchronizer.rs index fb4a35458cf..11b61c680fe 100644 --- a/consensus/core/src/synchronizer.rs +++ b/consensus/core/src/synchronizer.rs @@ -1,31 +1,38 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, + time::Duration, +}; + use bytes::Bytes; -use futures::stream::FuturesUnordered; -use futures::StreamExt; +use consensus_config::AuthorityIndex; +use futures::{stream::FuturesUnordered, StreamExt}; use mysten_metrics::{monitored_future, monitored_scope}; use parking_lot::Mutex; #[cfg(not(test))] use rand::{rngs::ThreadRng, seq::SliceRandom}; -use std::collections::{BTreeMap, BTreeSet}; -use std::sync::Arc; -use std::time::Duration; -use tokio::sync::mpsc::error::TrySendError; -use tokio::sync::mpsc::{channel, Receiver, Sender}; -use tokio::sync::oneshot; -use tokio::task::JoinSet; -use tokio::time::{error::Elapsed, sleep, sleep_until, timeout, Instant}; +use tokio::{ + sync::{ + mpsc::{channel, error::TrySendError, Receiver, Sender}, + oneshot, + }, + task::JoinSet, + time::{error::Elapsed, sleep, sleep_until, timeout, Instant}, +}; use tracing::{debug, info, warn}; -use crate::block::{BlockRef, SignedBlock, VerifiedBlock}; -use crate::block_verifier::BlockVerifier; -use crate::context::Context; -use crate::core_thread::CoreThreadDispatcher; -use crate::error::{ConsensusError, ConsensusResult}; -use crate::network::NetworkClient; -use crate::BlockAPI; -use consensus_config::AuthorityIndex; +use crate::{ + block::{BlockRef, SignedBlock, VerifiedBlock}, + block_verifier::BlockVerifier, + context::Context, + core_thread::CoreThreadDispatcher, + error::{ConsensusError, ConsensusResult}, + network::NetworkClient, + BlockAPI, +}; /// The number of concurrent fetch blocks requests per authority const FETCH_BLOCKS_CONCURRENCY: usize = 5; @@ -50,8 +57,8 @@ pub(crate) struct SynchronizerHandle { } impl SynchronizerHandle { - /// Explicitly asks from the synchronizer to fetch the blocks - provided the block_refs set - from - /// the peer authority. + /// Explicitly asks from the synchronizer to fetch the blocks - provided the + /// block_refs set - from the peer authority. pub(crate) async fn fetch_blocks( &self, block_refs: BTreeSet, @@ -135,7 +142,8 @@ impl Synchronizer Synchronizer, peer_index: AuthorityIndex, @@ -268,8 +277,8 @@ impl Synchronizer Synchronizer Synchronizer Synchronizer Synchronizer, network_client: Arc, @@ -409,7 +453,8 @@ impl Synchronizer>(); - // TODO: probably inject the RNG to allow unit testing - this is a work around for now. + // TODO: probably inject the RNG to allow unit testing - this is a work around + // for now. cfg_if::cfg_if! { if #[cfg(not(test))] { // Shuffle the peers @@ -482,22 +527,29 @@ impl Synchronizer>, @@ -668,8 +720,8 @@ mod tests { let peer = AuthorityIndex::new_for_test(1); let mut iter = expected_blocks.iter().peekable(); while let Some(block) = iter.next() { - // stub the fetch_blocks request from peer 1 and give some high response latency so requests - // can start blocking the peer task. + // stub the fetch_blocks request from peer 1 and give some high response latency + // so requests can start blocking the peer task. network_client .stub_fetch_blocks( vec![block.clone()], @@ -681,8 +733,8 @@ mod tests { let mut missing_blocks = BTreeSet::new(); missing_blocks.insert(block.reference()); - // WHEN requesting to fetch the blocks, it should not succeed for the last request and get - // an error with "saturated" synchronizer + // WHEN requesting to fetch the blocks, it should not succeed for the last + // request and get an error with "saturated" synchronizer if iter.peek().is_none() { match handle.fetch_blocks(missing_blocks, peer).await { Err(ConsensusError::SynchronizerSaturated(index)) => { @@ -720,8 +772,8 @@ mod tests { .await; // AND stub the requests for authority 1 & 2 - // Make the first authority timeout, so the second will be called. "We" are authority = 0, so - // we are skipped anyways. + // Make the first authority timeout, so the second will be called. "We" are + // authority = 0, so we are skipped anyways. network_client .stub_fetch_blocks( expected_blocks.clone(), @@ -752,10 +804,12 @@ mod tests { assert_eq!(added_blocks, expected_blocks); // AND missing blocks should have been consumed by the stub - assert!(core_dispatcher - .get_missing_blocks() - .await - .unwrap() - .is_empty()); + assert!( + core_dispatcher + .get_missing_blocks() + .await + .unwrap() + .is_empty() + ); } } diff --git a/consensus/core/src/test_dag.rs b/consensus/core/src/test_dag.rs index 8d8cf43a1cd..60a58af9bbe 100644 --- a/consensus/core/src/test_dag.rs +++ b/consensus/core/src/test_dag.rs @@ -15,8 +15,8 @@ use crate::{ /// Build a fully interconnected dag up to the specified round. This function /// starts building the dag from the specified [`start`] parameter or from -/// genesis if none are specified up to and including the specified round [`stop`] -/// parameter. +/// genesis if none are specified up to and including the specified round +/// [`stop`] parameter. pub(crate) fn build_dag( context: Arc, dag_state: Arc>, diff --git a/consensus/core/src/tests/base_committer_tests.rs b/consensus/core/src/tests/base_committer_tests.rs index 369cdc587fc..439e516f5b7 100644 --- a/consensus/core/src/tests/base_committer_tests.rs +++ b/consensus/core/src/tests/base_committer_tests.rs @@ -281,7 +281,8 @@ fn indirect_commit() { .take(context.committee.quorum_threshold() as usize) .collect(); - // The validators not part of the f+1 above will not certify the leader of wave 1. + // The validators not part of the f+1 above will not certify the leader of wave + // 1. let connections_without_votes_for_leader_1 = context .committee .authorities() @@ -332,11 +333,11 @@ fn indirect_commit() { // Quick Summary: // Leader of wave 2 or C6 has the necessary votes/certs to be directly commited. - // Then, when we get to the leader of wave 1 or D3, we see that we cannot direct commit - // and it is marked as undecided. But this time we have a committed anchor so we - // check if there is a certified link from the anchor (c6) to the undecided leader - // (d3). There is a certified link through A5 with votes A4,B4,C4. So we can mark - // this leader as committed indirectly. + // Then, when we get to the leader of wave 1 or D3, we see that we cannot direct + // commit and it is marked as undecided. But this time we have a committed + // anchor so we check if there is a certified link from the anchor (c6) to + // the undecided leader (d3). There is a certified link through A5 with + // votes A4,B4,C4. So we can mark this leader as committed indirectly. // Ensure we commit the leader of wave 1 indirectly with the committed leader // of wave 2 as the anchor. @@ -519,8 +520,8 @@ fn undecided() { references_leader_round_wave_1, )]; - // Also to ensure we have < 2f+1 blames, we take less then that for connections (votes) - // without the leader of wave 1. + // Also to ensure we have < 2f+1 blames, we take less then that for connections + // (votes) without the leader of wave 1. let connections_without_leader_wave_1: Vec<_> = authorities .take((context.committee.quorum_threshold() - 1) as usize) .map(|authority| (authority.0, references_without_leader_wave_1.clone())) @@ -568,8 +569,9 @@ fn undecided() { } // This test scenario has one authority that is acting in a byzantine manner. It -// will be sending multiple different blocks to different validators for a round. -// The commit rule should handle this and correctly commit the expected blocks. +// will be sending multiple different blocks to different validators for a +// round. The commit rule should handle this and correctly commit the expected +// blocks. #[test] fn test_byzantine_direct_commit() { telemetry_subscribers::init_for_testing(); @@ -592,7 +594,8 @@ fn test_byzantine_direct_commit() { // Add blocks to reach voting round of wave 4 let voting_round_wave_4 = leader_round_wave_4 + 1; - // This includes a "good vote" from validator C which is acting as a byzantine validator + // This includes a "good vote" from validator C which is acting as a byzantine + // validator let good_references_voting_round_wave_4 = build_dag( context.clone(), dag_state.clone(), @@ -619,7 +622,8 @@ fn test_byzantine_direct_commit() { .filter(|x| x.author != leader_wave_4.authority) .collect(); - // Accept these references/blocks as ancestors from decision round blocks in dag state + // Accept these references/blocks as ancestors from decision round blocks in dag + // state let byzantine_block_c13_1 = VerifiedBlock::new_for_test( TestBlock::new(13, 2) .set_ancestors(references_without_leader_round_wave_4.clone()) @@ -650,9 +654,10 @@ fn test_byzantine_direct_commit() { .write() .accept_block(byzantine_block_c13_3.clone()); - // Ancestors of decision blocks in round 14 should include multiple byzantine non-votes C13 - // but there are enough good votes to prevent a skip. Additionally only one of the non-votes - // per authority should be counted so we should not skip leader A12. + // Ancestors of decision blocks in round 14 should include multiple byzantine + // non-votes C13 but there are enough good votes to prevent a skip. + // Additionally only one of the non-votes per authority should be counted so + // we should not skip leader A12. let decison_block_a14 = VerifiedBlock::new_for_test( TestBlock::new(14, 0) .set_ancestors(good_references_voting_round_wave_4.clone()) @@ -706,10 +711,11 @@ fn test_byzantine_direct_commit() { // DagState Update: // - We have A13, B13, D13 & C13 as good votes in the voting round of wave 4 - // - We have 3 byzantine C13 nonvotes that we received as ancestors from decision - // round blocks from B, C, & D. - // - We have B14, C14 & D14 that include this byzantine nonvote and A14 from the - // decision round. But all of these blocks also have good votes from A, B, C & D. + // - We have 3 byzantine C13 nonvotes that we received as ancestors from + // decision round blocks from B, C, & D. + // - We have B14, C14 & D14 that include this byzantine nonvote and A14 from + // the decision round. But all of these blocks also have good votes from A, B, + // C & D. // Expect a successful direct commit. tracing::info!("Try direct commit for leader {leader_wave_4}"); @@ -723,8 +729,9 @@ fn test_byzantine_direct_commit() { }; } -// TODO: Add test for indirect commit with a certified link through a byzantine validator. +// TODO: Add test for indirect commit with a certified link through a byzantine +// validator. -// TODO: add basic tests for multi leader & pipeline. More tests will be added to -// throughly test pipelining and multileader once universal committer lands so -// these tests may not be necessary here. +// TODO: add basic tests for multi leader & pipeline. More tests will be added +// to throughly test pipelining and multileader once universal committer lands +// so these tests may not be necessary here. diff --git a/consensus/core/src/tests/pipelined_committer_tests.rs b/consensus/core/src/tests/pipelined_committer_tests.rs index 326939afca1..43d02bd4d67 100644 --- a/consensus/core/src/tests/pipelined_committer_tests.rs +++ b/consensus/core/src/tests/pipelined_committer_tests.rs @@ -47,8 +47,8 @@ fn direct_commit() { fn idempotence() { let (context, dag_state, committer) = basic_test_setup(); - // Add enough blocks to reach decision round of pipeline 1 wave 0 which is round 4. - // note: pipelines, waves & rounds are zero-indexed. + // Add enough blocks to reach decision round of pipeline 1 wave 0 which is round + // 4. note: pipelines, waves & rounds are zero-indexed. let leader_round_pipeline_1_wave_0 = committer.committers[1].leader_round(0); let decision_round_pipeline_1_wave_0 = committer.committers[1].decision_round(0); build_dag( @@ -89,7 +89,8 @@ fn idempotence() { panic!("Expected a committed leader") }; - // Ensure we don't commit the same leader again once last decided has been updated. + // Ensure we don't commit the same leader again once last decided has been + // updated. let last_decided = Slot::new(first_sequence[0].round(), first_sequence[0].authority()); let sequence = committer.try_commit(last_decided); assert!(sequence.is_empty()); @@ -292,7 +293,8 @@ fn direct_skip_enough_blame() { .take(context.committee.quorum_threshold() as usize) .collect(); - // Add enough blocks to reach the decision round of the wave 0 leader for pipeline 1. + // Add enough blocks to reach the decision round of the wave 0 leader for + // pipeline 1. let decision_round_pipeline_1_wave_0 = committer.committers[1].decision_round(0); build_dag( context.clone(), @@ -394,9 +396,9 @@ fn indirect_commit() { dag_state.clone(), )); - // Add enough blocks to decide the leader of round 5. The leader of round 2 will be skipped - // (it was the vote for the first leader that we removed) so we add enough blocks - // to indirectly skip it. + // Add enough blocks to decide the leader of round 5. The leader of round 2 will + // be skipped (it was the vote for the first leader that we removed) so we + // add enough blocks to indirectly skip it. let leader_round_5 = 5; let pipeline_leader_5 = leader_round_5 % wave_length as usize; let wave_leader_5 = committer.committers[pipeline_leader_5].wave_number(leader_round_5 as u32); @@ -488,7 +490,8 @@ fn indirect_skip() { decision_round_7, ); - // Ensure we commit the first 3 leaders, skip the 4th, and commit the last 2 leaders. + // Ensure we commit the first 3 leaders, skip the 4th, and commit the last 2 + // leaders. let last_decided = Slot::new_for_test(0, 0); let sequence = committer.try_commit(last_decided); tracing::info!("Commit sequence: {sequence:#?}"); @@ -573,11 +576,11 @@ fn undecided() { } // This test scenario has one authority that is acting in a byzantine manner. It -// will be sending multiple different blocks to different validators for a round. -// The commit rule should handle this and correctly commit the expected blocks. -// However when extra dag layers are added and the byzantine node is meant to be -// a leader, its block is skipped as there is not enough votes to directly -// decide it and not any certified links to indirectly commit it. +// will be sending multiple different blocks to different validators for a +// round. The commit rule should handle this and correctly commit the expected +// blocks. However when extra dag layers are added and the byzantine node is +// meant to be a leader, its block is skipped as there is not enough votes to +// directly decide it and not any certified links to indirectly commit it. #[test] fn test_byzantine_validator() { let (context, dag_state, committer) = basic_test_setup(); @@ -590,7 +593,8 @@ fn test_byzantine_validator() { // Add blocks to reach voting round for leader A12 let voting_round_12 = leader_round_12 + 1; - // This includes a "good vote" from validator B which is acting as a byzantine validator + // This includes a "good vote" from validator B which is acting as a byzantine + // validator let good_references_voting_round_wave_4 = build_dag( context.clone(), dag_state.clone(), @@ -613,7 +617,8 @@ fn test_byzantine_validator() { .filter(|x| x.author != leader_12) .collect(); - // Accept these references/blocks as ancestors from decision round blocks in dag state + // Accept these references/blocks as ancestors from decision round blocks in dag + // state let byzantine_block_b13_1 = VerifiedBlock::new_for_test( TestBlock::new(13, 1) .set_ancestors(references_without_leader_round_wave_4.clone()) @@ -644,9 +649,10 @@ fn test_byzantine_validator() { .write() .accept_block(byzantine_block_b13_3.clone()); - // Ancestors of decision blocks in round 14 should include multiple byzantine non-votes B13 - // but there are enough good votes to prevent a skip. Additionally only one of the non-votes - // per authority should be counted so we should not skip leader A12. + // Ancestors of decision blocks in round 14 should include multiple byzantine + // non-votes B13 but there are enough good votes to prevent a skip. + // Additionally only one of the non-votes per authority should be counted so + // we should not skip leader A12. let mut references_round_14 = vec![]; let decison_block_a14 = VerifiedBlock::new_for_test( TestBlock::new(14, 0) @@ -705,8 +711,8 @@ fn test_byzantine_validator() { // DagState Update: // - We have A13, B13, D13 & C13 as good votes in the voting round of leader A12 - // - We have 3 byzantine B13 nonvotes that we received as ancestors from decision - // round blocks from B, C, & D. + // - We have 3 byzantine B13 nonvotes that we received as ancestors from + // decision round blocks from B, C, & D. // - We have B14, C14 & D14 that include this byzantine nonvote. But all of // these blocks also have good votes from A, C & D. diff --git a/consensus/core/src/tests/universal_committer_tests.rs b/consensus/core/src/tests/universal_committer_tests.rs index 695ddd12a12..3ec3508ccd5 100644 --- a/consensus/core/src/tests/universal_committer_tests.rs +++ b/consensus/core/src/tests/universal_committer_tests.rs @@ -28,12 +28,13 @@ fn direct_commit() { let voting_round_wave_2 = committer.committers[0].leader_round(2) + 1; build_dag(context, dag_state, None, voting_round_wave_2); - // Genesis cert will not be included in commit sequence, marking it as last decided + // Genesis cert will not be included in commit sequence, marking it as last + // decided let last_decided = Slot::new_for_test(0, 0); - // The universal committer should mark the potential leaders in leader round 6 as - // undecided because there is no way to get enough certificates for leaders of - // leader round 6 without completing wave 2. + // The universal committer should mark the potential leaders in leader round 6 + // as undecided because there is no way to get enough certificates for + // leaders of leader round 6 without completing wave 2. let sequence = committer.try_commit(last_decided); tracing::info!("Commit sequence: {sequence:#?}"); @@ -381,7 +382,8 @@ fn indirect_commit() { .take(context.committee.quorum_threshold() as usize) .collect(); - // The validators not part of the f+1 above will not certify the leader of wave 1. + // The validators not part of the f+1 above will not certify the leader of wave + // 1. let connections_without_votes_for_leader_1 = context .committee .authorities() @@ -446,9 +448,9 @@ fn indirect_skip() { .collect(); // Only f+1 validators connect to the leader of wave 2. This is setting up the - // scenario where we have <2f+1 blame & <2f+1 certificates for the leader of wave 2 - // which will mean we mark it as Undecided. Note there are not enough votes - // to form a certified link to the leader of wave 2 as well. + // scenario where we have <2f+1 blame & <2f+1 certificates for the leader of + // wave 2 which will mean we mark it as Undecided. Note there are not enough + // votes to form a certified link to the leader of wave 2 as well. let mut references = Vec::new(); let connections_with_leader_wave_2 = context @@ -500,10 +502,10 @@ fn indirect_skip() { panic!("Expected a committed leader") }; - // Ensure we skip the leader of wave 2 after it had been marked undecided directly. - // This happens because we do not have enough votes in voting round of wave 2 - // for the certificates of decision round wave 2 to form a certified link to - // the leader of wave 2. + // Ensure we skip the leader of wave 2 after it had been marked undecided + // directly. This happens because we do not have enough votes in voting + // round of wave 2 for the certificates of decision round wave 2 to form a + // certified link to the leader of wave 2. if let LeaderStatus::Skip(leader) = sequence[1] { assert_eq!(leader.authority, leader_wave_2); assert_eq!(leader.round, leader_round_wave_2); @@ -571,8 +573,8 @@ fn undecided() { decision_round_wave_1, ); - // Ensure outcome of direct & indirect rule is undecided. So not commit decisions - // should be returned. + // Ensure outcome of direct & indirect rule is undecided. So not commit + // decisions should be returned. let last_committed = Slot::new_for_test(0, 0); let sequence = committer.try_commit(last_committed); tracing::info!("Commit sequence: {sequence:#?}"); @@ -580,8 +582,9 @@ fn undecided() { } // This test scenario has one authority that is acting in a byzantine manner. It -// will be sending multiple different blocks to different validators for a round. -// The commit rule should handle this and correctly commit the expected blocks. +// will be sending multiple different blocks to different validators for a +// round. The commit rule should handle this and correctly commit the expected +// blocks. #[test] fn test_byzantine_direct_commit() { let (context, dag_state, committer) = basic_test_setup(); @@ -598,7 +601,8 @@ fn test_byzantine_direct_commit() { // Add blocks to reach voting round of wave 4 let voting_round_wave_4 = committer.committers[0].leader_round(4) + 1; - // This includes a "good vote" from validator C which is acting as a byzantine validator + // This includes a "good vote" from validator C which is acting as a byzantine + // validator let good_references_voting_round_wave_4 = build_dag( context.clone(), dag_state.clone(), @@ -623,7 +627,8 @@ fn test_byzantine_direct_commit() { .filter(|x| x.author != leader_wave_4) .collect(); - // Accept these references/blocks as ancestors from decision round blocks in dag state + // Accept these references/blocks as ancestors from decision round blocks in dag + // state let byzantine_block_c13_1 = VerifiedBlock::new_for_test( TestBlock::new(13, 2) .set_ancestors(references_without_leader_round_wave_4.clone()) @@ -654,9 +659,10 @@ fn test_byzantine_direct_commit() { .write() .accept_block(byzantine_block_c13_3.clone()); - // Ancestors of decision blocks in round 14 should include multiple byzantine non-votes C13 - // but there are enough good votes to prevent a skip. Additionally only one of the non-votes - // per authority should be counted so we should not skip leader A12. + // Ancestors of decision blocks in round 14 should include multiple byzantine + // non-votes C13 but there are enough good votes to prevent a skip. + // Additionally only one of the non-votes per authority should be counted so + // we should not skip leader A12. let decison_block_a14 = VerifiedBlock::new_for_test( TestBlock::new(14, 0) .set_ancestors(good_references_voting_round_wave_4.clone()) @@ -710,8 +716,8 @@ fn test_byzantine_direct_commit() { // DagState Update: // - We have A13, B13, D13 & C13 as good votes in the voting round of wave 4 - // - We have 3 byzantine C13 nonvotes that we received as ancestors from decision - // round blocks from B, C, & D. + // - We have 3 byzantine C13 nonvotes that we received as ancestors from + // decision round blocks from B, C, & D. // - We have B14, C14 & D14 that include this byzantine nonvote from C13 but // all of these blocks also have good votes for leader A12 through A, B, D. @@ -731,7 +737,8 @@ fn test_byzantine_direct_commit() { }; } -// TODO: Add byzantine variant of tests for indirect/direct commit/skip/undecided decisions +// TODO: Add byzantine variant of tests for indirect/direct +// commit/skip/undecided decisions fn basic_test_setup() -> ( Arc, @@ -749,7 +756,8 @@ fn basic_test_setup() -> ( // Create committer without pipelining and only 1 leader per leader round let committer = UniversalCommitterBuilder::new(context.clone(), dag_state.clone()).build(); - // note: without pipelining or multi-leader enabled there should only be one committer. + // note: without pipelining or multi-leader enabled there should only be one + // committer. assert!(committer.committers.len() == 1); (context, dag_state, committer) diff --git a/consensus/core/src/threshold_clock.rs b/consensus/core/src/threshold_clock.rs index b99237b48af..613eb854add 100644 --- a/consensus/core/src/threshold_clock.rs +++ b/consensus/core/src/threshold_clock.rs @@ -26,8 +26,9 @@ impl ThresholdClock { } } - /// Add the block references that have been successfully processed and advance the round accordingly. If the round - /// has indeed advanced then the new round is returned, otherwise None is returned. + /// Add the block references that have been successfully processed and + /// advance the round accordingly. If the round has indeed advanced then + /// the new round is returned, otherwise None is returned. pub(crate) fn add_blocks(&mut self, blocks: Vec) -> Option { let previous_round = self.round; for block_ref in blocks { @@ -72,9 +73,10 @@ impl ThresholdClock { #[cfg(test)] mod tests { + use consensus_config::AuthorityIndex; + use super::*; use crate::block::BlockDigest; - use consensus_config::AuthorityIndex; #[test] fn test_threshold_clock_add_block() { diff --git a/consensus/core/src/transaction.rs b/consensus/core/src/transaction.rs index 7ec5e86ee9e..47464eb52cc 100644 --- a/consensus/core/src/transaction.rs +++ b/consensus/core/src/transaction.rs @@ -3,26 +3,27 @@ use std::sync::Arc; -use mysten_metrics::metered_channel; -use mysten_metrics::metered_channel::channel_with_total; +use mysten_metrics::{metered_channel, metered_channel::channel_with_total}; use sui_protocol_config::ProtocolConfig; use tap::tap::TapFallible; use thiserror::Error; use tokio::sync::oneshot; use tracing::error; -use crate::block::Transaction; -use crate::context::Context; +use crate::{block::Transaction, context::Context}; -/// The maximum number of transactions pending to the queue to be pulled for block proposal +/// The maximum number of transactions pending to the queue to be pulled for +/// block proposal const MAX_PENDING_TRANSACTIONS: usize = 2_000; const MAX_CONSUMED_TRANSACTIONS_PER_REQUEST: u64 = 5_000; -/// The guard acts as an acknowledgment mechanism for the inclusion of the transaction to a block. -/// When the transaction is included to a block then the inclusion should be explicitly acknowledged -/// by calling the `acknowledge` method. If the guard is dropped without getting acknowledged then -/// that means the transaction has not been included to a block and the consensus is shutting down. +/// The guard acts as an acknowledgment mechanism for the inclusion of the +/// transaction to a block. When the transaction is included to a block then the +/// inclusion should be explicitly acknowledged by calling the `acknowledge` +/// method. If the guard is dropped without getting acknowledged then that means +/// the transaction has not been included to a block and the consensus is +/// shutting down. pub(crate) struct TransactionGuard { pub transaction: Transaction, included_in_block_ack: oneshot::Sender<()>, @@ -34,9 +35,10 @@ impl TransactionGuard { } } -/// The TransactionConsumer is responsible for fetching the next transactions to be included for the block proposals. -/// The transactions are submitted to a channel which is shared between the TransactionConsumer and the TransactionClient -/// and are pulled every time the `next` method is called. +/// The TransactionConsumer is responsible for fetching the next transactions to +/// be included for the block proposals. The transactions are submitted to a +/// channel which is shared between the TransactionConsumer and the +/// TransactionClient and are pulled every time the `next` method is called. pub(crate) struct TransactionConsumer { tx_receiver: metered_channel::Receiver, max_consumed_bytes_per_request: u64, @@ -61,14 +63,17 @@ impl TransactionConsumer { } } - // Attempts to fetch the next transactions that have been submitted for sequence. Also a `max_consumed_bytes_per_request` parameter - // is given in order to ensure up to `max_consumed_bytes_per_request` bytes of transactions are retrieved. + // Attempts to fetch the next transactions that have been submitted for + // sequence. Also a `max_consumed_bytes_per_request` parameter is given in + // order to ensure up to `max_consumed_bytes_per_request` bytes of transactions + // are retrieved. pub(crate) fn next(&mut self) -> Vec { let mut transactions = Vec::new(); let mut total_size: usize = 0; if let Some(t) = self.pending_transaction.take() { - // Here we assume that a transaction can always fit in `max_fetched_bytes_per_request` + // Here we assume that a transaction can always fit in + // `max_fetched_bytes_per_request` total_size += t.transaction.data().len(); transactions.push(t); } @@ -76,7 +81,8 @@ impl TransactionConsumer { while let Ok(t) = self.tx_receiver.try_recv() { total_size += t.transaction.data().len(); - // If we went over the max size with this transaction, just cache it for the next pull. + // If we went over the max size with this transaction, just cache it for the + // next pull. if total_size as u64 > self.max_consumed_bytes_per_request { self.pending_transaction = Some(t); break; @@ -128,8 +134,9 @@ impl TransactionClient { ) } - /// Submits a transaction to be sequenced. The method returns when the transaction has been successfully - /// included to the next proposed block. + /// Submits a transaction to be sequenced. The method returns when the + /// transaction has been successfully included to the next proposed + /// block. pub async fn submit(&self, transaction: Vec) -> Result<(), ClientError> { let included_in_block = self.submit_no_wait(transaction).await?; included_in_block @@ -138,11 +145,14 @@ impl TransactionClient { .map_err(|e| ClientError::ConsensusShuttingDown(e.to_string())) } - /// Submits a transaction to be sequenced. The transaction length gets evaluated and rejected from consensus if too big. - /// That shouldn't be the common case as sizes should be aligned between consensus and client. The method returns - /// a receiver to wait on until the transactions has been included in the next block to get proposed. The consumer should - /// wait on it to consider as inclusion acknowledgement. If the receiver errors then consensus is shutting down and transaction - /// has not been included to any block. + /// Submits a transaction to be sequenced. The transaction length gets + /// evaluated and rejected from consensus if too big. That shouldn't be + /// the common case as sizes should be aligned between consensus and client. + /// The method returns a receiver to wait on until the transactions has + /// been included in the next block to get proposed. The consumer should + /// wait on it to consider as inclusion acknowledgement. If the receiver + /// errors then consensus is shutting down and transaction has not been + /// included to any block. pub(crate) async fn submit_no_wait( &self, transaction: Vec, @@ -168,8 +178,8 @@ impl TransactionClient { } } -/// `TransactionVerifier` implementation is supplied by Sui to validate transactions in a block, -/// before acceptance of the block. +/// `TransactionVerifier` implementation is supplied by Sui to validate +/// transactions in a block, before acceptance of the block. pub trait TransactionVerifier: Send + Sync + 'static { /// Determines if this batch can be voted on fn verify_batch( @@ -200,15 +210,17 @@ impl TransactionVerifier for NoopTransactionVerifier { #[cfg(test)] mod tests { - use crate::context::Context; - use crate::transaction::{TransactionClient, TransactionConsumer, TransactionGuard}; - use futures::stream::FuturesUnordered; - use futures::StreamExt; - use std::sync::Arc; - use std::time::Duration; + use std::{sync::Arc, time::Duration}; + + use futures::{stream::FuturesUnordered, StreamExt}; use sui_protocol_config::ProtocolConfig; use tokio::time::timeout; + use crate::{ + context::Context, + transaction::{TransactionClient, TransactionConsumer, TransactionGuard}, + }; + #[tokio::test(flavor = "current_thread", start_paused = true)] async fn basic_submit_and_consume() { let _guard = ProtocolConfig::apply_overrides_for_testing(|_, mut config| { diff --git a/consensus/core/src/universal_committer.rs b/consensus/core/src/universal_committer.rs index c4f09aa3346..415a02da53b 100644 --- a/consensus/core/src/universal_committer.rs +++ b/consensus/core/src/universal_committer.rs @@ -22,9 +22,9 @@ mod universal_committer_tests; #[path = "tests/pipelined_committer_tests.rs"] mod pipelined_committer_tests; -/// A universal committer uses a collection of committers to commit a sequence of leaders. -/// It can be configured to use a combination of different commit strategies, including -/// multi-leaders, backup leaders, and pipelines. +/// A universal committer uses a collection of committers to commit a sequence +/// of leaders. It can be configured to use a combination of different commit +/// strategies, including multi-leaders, backup leaders, and pipelines. pub(crate) struct UniversalCommitter { /// The per-epoch configuration of this authority. context: Arc, @@ -35,8 +35,8 @@ pub(crate) struct UniversalCommitter { } impl UniversalCommitter { - /// Try to commit part of the dag. This function is idempotent and returns a list of - /// ordered decided leaders. + /// Try to commit part of the dag. This function is idempotent and returns a + /// list of ordered decided leaders. #[tracing::instrument(skip_all, fields(last_decided = %last_decided))] pub(crate) fn try_commit(&self, last_decided: Slot) -> Vec { let highest_accepted_round = self.dag_state.read().highest_accepted_round(); @@ -126,8 +126,8 @@ impl UniversalCommitter { } } -/// A builder for a universal committer. By default, the builder creates a single -/// base committer, that is, a single leader and no pipeline. +/// A builder for a universal committer. By default, the builder creates a +/// single base committer, that is, a single leader and no pipeline. pub(crate) mod universal_committer_builder { use super::*; use crate::{ diff --git a/crates/anemo-benchmark/src/server.rs b/crates/anemo-benchmark/src/server.rs index a13f8fceaa7..c41c25f0598 100644 --- a/crates/anemo-benchmark/src/server.rs +++ b/crates/anemo-benchmark/src/server.rs @@ -1,9 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::Benchmark; use rand::Rng; +use crate::Benchmark; + pub struct Server; #[anemo::async_trait] diff --git a/crates/data-transform/src/lib.rs b/crates/data-transform/src/lib.rs index 1f8f1724cac..66c3265055d 100644 --- a/crates/data-transform/src/lib.rs +++ b/crates/data-transform/src/lib.rs @@ -4,12 +4,11 @@ pub mod models; pub mod schema; -use diesel::pg::PgConnection; -use diesel::prelude::*; -use diesel::r2d2::ConnectionManager; -use dotenvy::dotenv; use std::env; +use diesel::{pg::PgConnection, prelude::*, r2d2::ConnectionManager}; +use dotenvy::dotenv; + pub type PgConnectionPool = diesel::r2d2::Pool>; pub type PgPoolConnection = diesel::r2d2::PooledConnection>; diff --git a/crates/data-transform/src/main.rs b/crates/data-transform/src/main.rs index 7d371791844..be8a14c60f1 100644 --- a/crates/data-transform/src/main.rs +++ b/crates/data-transform/src/main.rs @@ -1,35 +1,34 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::{HashMap, HashSet}, + env, + process::exit, + str::FromStr, + sync::Arc, +}; + use anyhow::anyhow; use data_transform::*; -use diesel::prelude::*; -use diesel::RunQueryDsl; -use once_cell::sync::Lazy; -use std::process::exit; -use std::str::FromStr; -use std::sync::Arc; -use sui_types::object::bounded_visitor::BoundedVisitor; - +use diesel::{prelude::*, RunQueryDsl}; use move_bytecode_utils::module_cache::SyncModuleCache; -use sui_types::object::MoveObject; - -use self::models::*; -use std::env; -use sui_indexer::db::new_pg_connection_pool; -use sui_indexer::errors::IndexerError; -use sui_indexer::store::module_resolver::IndexerStorePackageModuleResolver; - -use move_core_types::language_storage::ModuleId; -use move_core_types::resolver::ModuleResolver; -use std::collections::{HashMap, HashSet}; +use move_core_types::{language_storage::ModuleId, resolver::ModuleResolver}; +use once_cell::sync::Lazy; +use sui_indexer::{ + db::new_pg_connection_pool, errors::IndexerError, + store::module_resolver::IndexerStorePackageModuleResolver, +}; use sui_json_rpc_types::SuiMoveStruct; -use sui_types::parse_sui_struct_tag; - +use sui_types::{ + object::{bounded_visitor::BoundedVisitor, MoveObject}, + parse_sui_struct_tag, +}; use tracing::debug; + +use self::models::*; extern crate base64; -use base64::engine::general_purpose::STANDARD as BASE64; -use base64::engine::Engine as _; +use base64::engine::{general_purpose::STANDARD as BASE64, Engine as _}; use move_core_types::account_address::AccountAddress; struct GrootModuleResolver { @@ -199,8 +198,7 @@ fn map_typus_address(address: &AccountAddress) -> AccountAddress { } fn main() { - use self::schema::events::dsl::*; - use self::schema::events_json::dsl::*; + use self::schema::{events::dsl::*, events_json::dsl::*}; // get the starting id from the arguments let args: Vec = env::args().collect(); @@ -220,14 +218,16 @@ fn main() { println!("start id = {}", start_id); - //let mut end_id: i64 = start_id +1; + // let mut end_id: i64 = start_id +1; let database_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set"); let connection = &mut establish_connection(); let blocking_cp = new_pg_connection_pool(&database_url, None) .map_err(|e| anyhow!("Unable to connect to Postgres, is it running? {e}")); - //let module_cache = Arc::new(SyncModuleCache::new(IndexerModuleResolver::new(blocking_cp.expect("REASON").clone()))); + // let module_cache = + // Arc::new(SyncModuleCache::new(IndexerModuleResolver::new(blocking_cp.expect(" + // REASON").clone()))); // let module_cache = Arc::new(SyncModuleCache::new(GrootModuleResolver::new( blocking_cp.expect("REASON"), @@ -252,12 +252,11 @@ fn main() { let text = String::from_utf8_lossy(&event.event_bcs); debug!("bcs in text = {:#?}", text); - /* - if event.package != "0x000000000000000000000000000000000000000000000000000000000000dee9" { - println!("not deepbook skipping..."); - continue; - } - */ + // if event.package != + // "0x000000000000000000000000000000000000000000000000000000000000dee9" { + // println!("not deepbook skipping..."); + // continue; + // } // check for the previous record in events_json let eventj = events_json diff --git a/crates/data-transform/src/models.rs b/crates/data-transform/src/models.rs index 5f1503164aa..8c6e589071a 100644 --- a/crates/data-transform/src/models.rs +++ b/crates/data-transform/src/models.rs @@ -1,9 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::schema::events_json; use diesel::prelude::*; +use crate::schema::events_json; + #[derive(Queryable, Selectable)] #[diesel(table_name = crate::schema::events)] #[diesel(check_for_backend(diesel::pg::Pg))] diff --git a/crates/mysten-common/src/sync/async_once_cell.rs b/crates/mysten-common/src/sync/async_once_cell.rs index 7a4d35962d5..4ca6c20d69a 100644 --- a/crates/mysten-common/src/sync/async_once_cell.rs +++ b/crates/mysten-common/src/sync/async_once_cell.rs @@ -1,16 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use parking_lot::Mutex; use std::sync::Arc; + +use parking_lot::Mutex; use tokio::sync::{OwnedRwLockWriteGuard, RwLock}; /// This structure contains a cell for a single value. /// The cell can be written only once, and can be read many times. /// Readers are provided with async API, that waits for write to happen. /// This is similar to tokio::sync::watch, except one difference: -/// * tokio::sync::watch requires existing receiver to work. If no subscriber is registered, and the value is sent to channel, the value is dropped -/// * Unlike with tokio::sync::watch, it is possible to write to AsyncOnceCell when no readers are registered, and value will be available later when AsyncOnceCell::get is called +/// * tokio::sync::watch requires existing receiver to work. If no subscriber is +/// registered, and the value is sent to channel, the value is dropped +/// * Unlike with tokio::sync::watch, it is possible to write to AsyncOnceCell +/// when no readers are registered, and value will be available later when +/// AsyncOnceCell::get is called pub struct AsyncOnceCell { value: Arc>>, writer: Mutex>>>, diff --git a/crates/mysten-common/src/sync/notify_once.rs b/crates/mysten-common/src/sync/notify_once.rs index d83f2a7da1f..8e52a478969 100644 --- a/crates/mysten-common/src/sync/notify_once.rs +++ b/crates/mysten-common/src/sync/notify_once.rs @@ -1,16 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use parking_lot::Mutex; use std::sync::Arc; -use tokio::sync::futures::Notified; -use tokio::sync::Notify; -/// Notify once allows waiter to register for certain conditions and unblocks waiter -/// when condition is signalled with `notify` method. +use parking_lot::Mutex; +use tokio::sync::{futures::Notified, Notify}; + +/// Notify once allows waiter to register for certain conditions and unblocks +/// waiter when condition is signalled with `notify` method. /// -/// The functionality is somewhat similar to a tokio watch channel with subscribe method, -/// however it is much less error prone to use NotifyOnce rather then tokio watch. +/// The functionality is somewhat similar to a tokio watch channel with +/// subscribe method, however it is much less error prone to use NotifyOnce +/// rather then tokio watch. /// /// Specifically with tokio watch you may miss notification, /// if you subscribe to it after the value was changed @@ -49,11 +50,12 @@ impl NotifyOnce { /// This future is cancellation safe. pub async fn wait(&self) { // Note that we only hold lock briefly when registering for notification - // There is a bit of a trickery here with lock - we take a lock and if it is not empty, - // we register .notified() first and then release lock + // There is a bit of a trickery here with lock - we take a lock and if it is not + // empty, we register .notified() first and then release lock // - // This is to make sure no notification is lost because Notify::notify_waiters do not - // notify waiters that register **after** notify_waiters was called + // This is to make sure no notification is lost because Notify::notify_waiters + // do not notify waiters that register **after** notify_waiters was + // called let mut notify = None; let notified = self.make_notified(&mut notify); @@ -83,18 +85,23 @@ impl Default for NotifyOnce { async fn notify_once_test() { let notify_once = NotifyOnce::new(); // Before notify() is called .wait() is not ready - assert!(futures::future::poll_immediate(notify_once.wait()) - .await - .is_none()); + assert!( + futures::future::poll_immediate(notify_once.wait()) + .await + .is_none() + ); let wait = notify_once.wait(); notify_once.notify().unwrap(); // Pending wait() call is ready now assert!(futures::future::poll_immediate(wait).await.is_some()); // Take wait future and don't resolve it. - // This makes sure lock is dropped properly and wait futures resolve independently of each other + // This makes sure lock is dropped properly and wait futures resolve + // independently of each other let _dangle_wait = notify_once.wait(); // Any new wait() is immediately ready - assert!(futures::future::poll_immediate(notify_once.wait()) - .await - .is_some()); + assert!( + futures::future::poll_immediate(notify_once.wait()) + .await + .is_some() + ); } diff --git a/crates/mysten-common/src/sync/notify_read.rs b/crates/mysten-common/src/sync/notify_read.rs index be95f3fab86..98e7594cf1e 100644 --- a/crates/mysten-common/src/sync/notify_read.rs +++ b/crates/mysten-common/src/sync/notify_read.rs @@ -1,17 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use parking_lot::Mutex; -use parking_lot::MutexGuard; -use std::collections::hash_map::DefaultHasher; -use std::collections::HashMap; -use std::future::Future; -use std::hash::{Hash, Hasher}; -use std::mem; -use std::pin::Pin; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::task::{Context, Poll}; +use std::{ + collections::{hash_map::DefaultHasher, HashMap}, + future::Future, + hash::{Hash, Hasher}, + mem, + pin::Pin, + sync::atomic::{AtomicUsize, Ordering}, + task::{Context, Poll}, +}; + +use parking_lot::{Mutex, MutexGuard}; use tokio::sync::oneshot; type Registrations = Vec>; @@ -31,7 +31,8 @@ impl NotifyRead { } } - /// Asynchronously notifies waiters and return number of remaining pending registration + /// Asynchronously notifies waiters and return number of remaining pending + /// registration pub fn notify(&self, key: &K, value: &V) -> usize { let registrations = self.pending(key).remove(key); let Some(registrations) = registrations else { @@ -116,7 +117,8 @@ impl NotifyRead { } /// Registration resolves to the value but also provides safe cancellation -/// When Registration is dropped before it is resolved, we de-register from the pending list +/// When Registration is dropped before it is resolved, we de-register from the +/// pending list pub struct Registration<'a, K: Eq + Hash + Clone, V: Clone> { this: &'a NotifyRead, registration: Option<(K, oneshot::Receiver)>, @@ -157,9 +159,10 @@ impl Default for NotifyRead { #[cfg(test)] mod tests { - use super::*; use futures::future::join_all; + use super::*; + #[tokio::test] pub async fn test_notify_read() { let notify_read = NotifyRead::::new(); diff --git a/crates/mysten-metrics/src/guards.rs b/crates/mysten-metrics/src/guards.rs index 2eeb2e42dc1..d23c519a57c 100644 --- a/crates/mysten-metrics/src/guards.rs +++ b/crates/mysten-metrics/src/guards.rs @@ -1,10 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + use prometheus::IntGauge; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; /// Increments gauge when acquired, decrements when guard drops pub struct GaugeGuard<'a>(&'a IntGauge); diff --git a/crates/mysten-metrics/src/histogram.rs b/crates/mysten-metrics/src/histogram.rs index 8e7d8bf4aa6..0bb007fdea7 100644 --- a/crates/mysten-metrics/src/histogram.rs +++ b/crates/mysten-metrics/src/histogram.rs @@ -1,24 +1,28 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::monitored_scope; +use std::{ + collections::{hash_map::DefaultHasher, HashMap, HashSet}, + hash::{Hash, Hasher}, + sync::Arc, + time::Duration, +}; + use futures::FutureExt; use parking_lot::Mutex; use prometheus::{ register_int_counter_vec_with_registry, register_int_gauge_vec_with_registry, IntCounterVec, IntGaugeVec, Registry, }; -use std::collections::hash_map::DefaultHasher; -use std::collections::{HashMap, HashSet}; -use std::hash::{Hash, Hasher}; -use std::sync::Arc; -use std::time::Duration; -use tokio::runtime::Handle; -use tokio::sync::mpsc; -use tokio::sync::mpsc::error::TrySendError; -use tokio::time::Instant; +use tokio::{ + runtime::Handle, + sync::{mpsc, mpsc::error::TrySendError}, + time::Instant, +}; use tracing::{debug, error}; +use crate::monitored_scope; + type Point = u64; type HistogramMessage = (HistogramLabels, Point); @@ -60,30 +64,37 @@ struct HistogramLabelsInner { } /// Reports the histogram to the given prometheus gauge. -/// Unlike the histogram from prometheus crate, this histogram does not require to specify buckets -/// It works by calculating 'true' histogram by aggregating and sorting values. +/// Unlike the histogram from prometheus crate, this histogram does not require +/// to specify buckets It works by calculating 'true' histogram by aggregating +/// and sorting values. /// -/// The values are reported into prometheus gauge with requested labels and additional dimension -/// for the histogram percentile. +/// The values are reported into prometheus gauge with requested labels and +/// additional dimension for the histogram percentile. /// -/// It worth pointing out that due to those more precise calculations, this Histogram usage -/// is somewhat more limited comparing to original prometheus Histogram. +/// It worth pointing out that due to those more precise calculations, this +/// Histogram usage is somewhat more limited comparing to original prometheus +/// Histogram. /// -/// On the bright side, this histogram exports less data to Prometheus comparing to prometheus::Histogram, -/// it exports each requested percentile into separate prometheus gauge, while original implementation creates -/// gauge per bucket. +/// On the bright side, this histogram exports less data to Prometheus comparing +/// to prometheus::Histogram, it exports each requested percentile into separate +/// prometheus gauge, while original implementation creates gauge per bucket. /// It also exports _sum and _count aggregates same as original implementation. /// -/// It is ok to measure timings for things like network latencies and expensive crypto operations. -/// However as a rule of thumb this histogram should not be used in places that can produce very high data point count. +/// It is ok to measure timings for things like network latencies and expensive +/// crypto operations. However as a rule of thumb this histogram should not be +/// used in places that can produce very high data point count. /// -/// As a last round of defence this histogram emits error log when too much data is flowing in and drops data points. +/// As a last round of defence this histogram emits error log when too much data +/// is flowing in and drops data points. /// -/// This implementation puts great deal of effort to make sure the metric does not cause any harm to the code itself: +/// This implementation puts great deal of effort to make sure the metric does +/// not cause any harm to the code itself: /// * Reporting data point is a non-blocking send to a channel /// * Data point collections tries to clear the channel as fast as possible -/// * Expensive histogram calculations are done in a separate blocking tokio thread pool to avoid effects on main scheduler -/// * If histogram data is produced too fast, the data is dropped and error! log is emitted +/// * Expensive histogram calculations are done in a separate blocking tokio +/// thread pool to avoid effects on main scheduler +/// * If histogram data is produced too fast, the data is dropped and error! log +/// is emitted impl HistogramVec { pub fn new_in_registry(name: &str, desc: &str, labels: &[&str], registry: &Registry) -> Self { Self::new_in_registry_with_percentiles( @@ -95,7 +106,8 @@ impl HistogramVec { ) } - /// Allows to specify percentiles in 1/1000th, e.g. 90pct is specified as 900 + /// Allows to specify percentiles in 1/1000th, e.g. 90pct is specified as + /// 900 pub fn new_in_registry_with_percentiles( name: &str, desc: &str, @@ -114,7 +126,8 @@ impl HistogramVec { Self::new(gauge, sum, count, percentiles, name) } - // Do not expose it to public interface because we need labels to have a specific format (e.g. add last label is "pct") + // Do not expose it to public interface because we need labels to have a + // specific format (e.g. add last label is "pct") fn new( gauge: IntGaugeVec, sum: IntCounterVec, @@ -205,7 +218,8 @@ impl HistogramCollector { pub async fn run(mut self) { let mut deadline = Instant::now(); loop { - // We calculate deadline here instead of just using sleep inside cycle to avoid accumulating error + // We calculate deadline here instead of just using sleep inside cycle to avoid + // accumulating error #[cfg(test)] const HISTOGRAM_WINDOW_SEC: u64 = 1; #[cfg(not(test))] @@ -248,7 +262,10 @@ impl HistogramCollector { } if Arc::strong_count(&self.reporter) != 1 { #[cfg(not(debug_assertions))] - error!("Histogram data overflow - we receive histogram data for {} faster then can process. Some histogram data is dropped", self._name); + error!( + "Histogram data overflow - we receive histogram data for {} faster then can process. Some histogram data is dropped", + self._name + ); } else { let reporter = self.reporter.clone(); Handle::current().spawn_blocking(move || reporter.lock().report(labeled_data)); @@ -318,9 +335,10 @@ impl<'a> Drop for HistogramTimerGuard<'a> { #[cfg(test)] mod tests { - use super::*; use prometheus::proto::MetricFamily; + use super::*; + #[test] fn pct_index_test() { assert_eq!(200, HistogramReporter::pct1000_index(1000, 200)); diff --git a/crates/mysten-metrics/src/lib.rs b/crates/mysten-metrics/src/lib.rs index 98aa7b83507..f445888a7a0 100644 --- a/crates/mysten-metrics/src/lib.rs +++ b/crates/mysten-metrics/src/lib.rs @@ -1,21 +1,22 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + future::Future, + net::SocketAddr, + pin::Pin, + sync::Arc, + task::{Context, Poll}, + time::Instant, +}; + use axum::{extract::Extension, http::StatusCode, routing::get, Router}; use dashmap::DashMap; -use std::future::Future; -use std::net::SocketAddr; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::time::Instant; - use once_cell::sync::OnceCell; use prometheus::{register_int_gauge_vec_with_registry, IntGaugeVec, Registry, TextEncoder}; +pub use scopeguard; use tap::TapFallible; use tracing::warn; - -pub use scopeguard; use uuid::Uuid; mod guards; @@ -100,9 +101,7 @@ pub fn get_metrics() -> Option<&'static Metrics> { #[macro_export] macro_rules! monitored_future { - ($fut: expr) => {{ - monitored_future!(futures, $fut, "", INFO, false) - }}; + ($fut: expr) => {{ monitored_future!(futures, $fut, "", INFO, false) }}; ($metric: ident, $fut: expr, $name: expr, $logging_level: ident, $logging_enabled: expr) => {{ let location: &str = if $name.is_empty() { @@ -201,13 +200,15 @@ impl Drop for MonitoredScopeGuard { } /// This function creates a named scoped object, that keeps track of -/// - the total iterations where the scope is called in the `monitored_scope_iterations` metric. -/// - and the total duration of the scope in the `monitored_scope_duration_ns` metric. +/// - the total iterations where the scope is called in the +/// `monitored_scope_iterations` metric. +/// - and the total duration of the scope in the `monitored_scope_duration_ns` +/// metric. /// -/// The monitored scope should be single threaded, e.g. the scoped object encompass the lifetime of -/// a select loop or guarded by mutex. -/// Then the rate of `monitored_scope_duration_ns`, converted to the unit of sec / sec, would be -/// how full the single threaded scope is running. +/// The monitored scope should be single threaded, e.g. the scoped object +/// encompass the lifetime of a select loop or guarded by mutex. +/// Then the rate of `monitored_scope_duration_ns`, converted to the unit of sec +/// / sec, would be how full the single threaded scope is running. pub fn monitored_scope(name: &'static str) -> Option { let metrics = get_metrics(); if let Some(m) = metrics { @@ -251,9 +252,9 @@ impl Future for MonitoredScopeFuture { pub type RegistryID = Uuid; -/// A service to manage the prometheus registries. This service allow us to create -/// a new Registry on demand and keep it accessible for processing/polling. -/// The service can be freely cloned/shared across threads. +/// A service to manage the prometheus registries. This service allow us to +/// create a new Registry on demand and keep it accessible for +/// processing/polling. The service can be freely cloned/shared across threads. #[derive(Clone)] pub struct RegistryService { // Holds a Registry that is supposed to be used @@ -262,8 +263,8 @@ pub struct RegistryService { } impl RegistryService { - // Creates a new registry service and also adds the main/default registry that is supposed to - // be preserved and never get removed + // Creates a new registry service and also adds the main/default registry that + // is supposed to be preserved and never get removed pub fn new(default_registry: Registry) -> Self { Self { default_registry, @@ -277,10 +278,11 @@ impl RegistryService { self.default_registry.clone() } - // Adds a new registry to the service. The corresponding RegistryID is returned so can later be - // used for removing the Registry. Method panics if we try to insert a registry with the same id. - // As this can be quite serious for the operation of the node we don't want to accidentally - // swap an existing registry - we expected a removal to happen explicitly. + // Adds a new registry to the service. The corresponding RegistryID is returned + // so can later be used for removing the Registry. Method panics if we try + // to insert a registry with the same id. As this can be quite serious for + // the operation of the node we don't want to accidentally swap an existing + // registry - we expected a removal to happen explicitly. pub fn add(&self, registry: Registry) -> RegistryID { let registry_id = Uuid::new_v4(); if self @@ -294,8 +296,8 @@ impl RegistryService { registry_id } - // Removes the registry from the service. If Registry existed then this method returns true, - // otherwise false is returned instead. + // Removes the registry from the service. If Registry existed then this method + // returns true, otherwise false is returned instead. pub fn remove(&self, registry_id: RegistryID) -> bool { self.registries_by_id.remove(®istry_id).is_some() } @@ -318,11 +320,14 @@ impl RegistryService { } } -/// Create a metric that measures the uptime from when this metric was constructed. -/// The metric is labeled with: -/// - 'process': the process type, differentiating between validator and fullnode -/// - 'version': binary version, generally be of the format: 'semver-gitrevision' -/// - 'chain_identifier': the identifier of the network which this process is part of +/// Create a metric that measures the uptime from when this metric was +/// constructed. The metric is labeled with: +/// - 'process': the process type, differentiating between validator and +/// fullnode +/// - 'version': binary version, generally be of the format: +/// 'semver-gitrevision' +/// - 'chain_identifier': the identifier of the network which this process is +/// part of pub fn uptime_metric( process: &str, version: &'static str, @@ -350,15 +355,16 @@ pub const METRICS_ROUTE: &str = "/metrics"; // Creates a new http server that has as a sole purpose to expose // and endpoint that prometheus agent can use to poll for the metrics. -// A RegistryService is returned that can be used to get access in prometheus Registries. +// A RegistryService is returned that can be used to get access in prometheus +// Registries. pub fn start_prometheus_server(addr: SocketAddr) -> RegistryService { let registry = Registry::new(); let registry_service = RegistryService::new(registry); if cfg!(msim) { - // prometheus uses difficult-to-support features such as TcpSocket::from_raw_fd(), so we - // can't yet run it in the simulator. + // prometheus uses difficult-to-support features such as + // TcpSocket::from_raw_fd(), so we can't yet run it in the simulator. warn!("not starting prometheus server in simulator"); return registry_service; } @@ -392,9 +398,9 @@ pub async fn metrics( #[cfg(test)] mod tests { + use prometheus::{IntCounter, Registry}; + use crate::RegistryService; - use prometheus::IntCounter; - use prometheus::Registry; #[test] fn registry_service() { diff --git a/crates/mysten-metrics/src/metered_channel.rs b/crates/mysten-metrics/src/metered_channel.rs index 8cc973848e7..1379ff3a560 100644 --- a/crates/mysten-metrics/src/metered_channel.rs +++ b/crates/mysten-metrics/src/metered_channel.rs @@ -2,12 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 #![allow(dead_code)] +use std::{ + future::Future, + task::{Context, Poll}, +}; + use async_trait::async_trait; -use std::future::Future; -// TODO: complete tests - This kinda sorta facades the whole tokio::mpsc::{Sender, Receiver}: without tests, this will be fragile to maintain. +// TODO: complete tests - This kinda sorta facades the whole tokio::mpsc::{Sender, Receiver}: +// without tests, this will be fragile to maintain. use futures::{FutureExt, Stream, TryFutureExt}; use prometheus::{IntCounter, IntGauge}; -use std::task::{Context, Poll}; use tokio::sync::mpsc::{ self, error::{SendError, TryRecvError, TrySendError}, @@ -166,7 +170,8 @@ impl<'a, T> Permit<'a, T> { impl<'a, T> Drop for Permit<'a, T> { fn drop(&mut self) { - // in the case the permit is dropped without sending, we still want to decrease the occupancy of the channel + // in the case the permit is dropped without sending, we still want to decrease + // the occupancy of the channel self.gauge_ref.dec() } } @@ -198,8 +203,10 @@ impl Sender { }) } - // TODO: facade [`send_timeout`](tokio::mpsc::Sender::send_timeout) under the tokio feature flag "time" - // TODO: facade [`blocking_send`](tokio::mpsc::Sender::blocking_send) under the tokio feature flag "sync" + // TODO: facade [`send_timeout`](tokio::mpsc::Sender::send_timeout) under the + // tokio feature flag "time" TODO: facade + // [`blocking_send`](tokio::mpsc::Sender::blocking_send) under the tokio feature + // flag "sync" /// Checks if the channel has been closed. This happens when the /// [`Receiver`] is dropped, or when the [`Receiver::close`] method is @@ -224,8 +231,8 @@ impl Sender { .await } - /// Tries to acquire a slot in the channel without waiting for the slot to become - /// available. + /// Tries to acquire a slot in the channel without waiting for the slot to + /// become available. /// Increments the gauge in case of a successful `try_reserve`. pub fn try_reserve(&self) -> Result, TrySendError<()>> { self.inner.try_reserve().map(|val| { @@ -237,8 +244,8 @@ impl Sender { // TODO: consider exposing the _owned methods - // Note: not exposing `same_channel`, as it is hard to implement with callers able to - // break the coupling between channel and gauge using `gauge`. + // Note: not exposing `same_channel`, as it is hard to implement with callers + // able to break the coupling between channel and gauge using `gauge`. /// Returns the current capacity of the channel. pub fn capacity(&self) -> usize { @@ -257,8 +264,8 @@ impl Sender { /// Stream API Wrappers! //////////////////////////////// -/// A wrapper around [`crate::metered_channel::Receiver`] that implements [`Stream`]. -/// +/// A wrapper around [`crate::metered_channel::Receiver`] that implements +/// [`Stream`]. #[derive(Debug)] pub struct ReceiverStream { inner: Receiver, @@ -311,13 +318,15 @@ impl From> for ReceiverStream { } // TODO: facade PollSender -// TODO: add prom metrics reporting for gauge and migrate all existing use cases. +// TODO: add prom metrics reporting for gauge and migrate all existing use +// cases. //////////////////////////////////////////////////////////////// /// Constructor //////////////////////////////////////////////////////////////// -/// Similar to `mpsc::channel`, `channel` creates a pair of `Sender` and `Receiver` +/// Similar to `mpsc::channel`, `channel` creates a pair of `Sender` and +/// `Receiver` #[track_caller] pub fn channel(size: usize, gauge: &IntGauge) -> (Sender, Receiver) { gauge.set(0); diff --git a/crates/mysten-metrics/src/tests/metered_channel_tests.rs b/crates/mysten-metrics/src/tests/metered_channel_tests.rs index 0d59317ec6e..53f756e8fc1 100644 --- a/crates/mysten-metrics/src/tests/metered_channel_tests.rs +++ b/crates/mysten-metrics/src/tests/metered_channel_tests.rs @@ -1,7 +1,6 @@ // Copyright (c) 2021, Facebook, Inc. and its affiliates // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::{channel, channel_with_total}; use futures::{ task::{noop_waker, Context, Poll}, FutureExt, @@ -9,6 +8,8 @@ use futures::{ use prometheus::{IntCounter, IntGauge}; use tokio::sync::mpsc::error::TrySendError; +use super::{channel, channel_with_total}; + #[tokio::test] async fn test_send() { let counter = IntGauge::new("TEST_COUNTER", "test").unwrap(); diff --git a/crates/mysten-network/src/anemo_ext.rs b/crates/mysten-network/src/anemo_ext.rs index 8c984bfa40d..eb81e6698c4 100644 --- a/crates/mysten-network/src/anemo_ext.rs +++ b/crates/mysten-network/src/anemo_ext.rs @@ -1,19 +1,16 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anemo::codegen::BoxError; -use anemo::codegen::BoxFuture; -use anemo::codegen::Service; -use anemo::types::PeerEvent; -use anemo::Network; -use anemo::PeerId; -use anemo::Request; -use anemo::Response; -use bytes::Bytes; -use futures::future::OptionFuture; -use futures::FutureExt; use std::time::Instant; +use anemo::{ + codegen::{BoxError, BoxFuture, Service}, + types::PeerEvent, + Network, PeerId, Request, Response, +}; +use bytes::Bytes; +use futures::{future::OptionFuture, FutureExt}; + pub trait NetworkExt { fn waiting_peer(&self, peer_id: PeerId) -> WaitingPeer; } diff --git a/crates/mysten-network/src/callback/future.rs b/crates/mysten-network/src/callback/future.rs index ef41436b619..36407fb29c6 100644 --- a/crates/mysten-network/src/callback/future.rs +++ b/crates/mysten-network/src/callback/future.rs @@ -1,15 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::ResponseHandler; -use http::Response; -use pin_project_lite::pin_project; use std::{ future::Future, pin::Pin, task::{Context, Poll}, }; +use http::Response; +use pin_project_lite::pin_project; + +use super::ResponseHandler; + pin_project! { /// Response future for [`Callback`]. /// diff --git a/crates/mysten-network/src/callback/layer.rs b/crates/mysten-network/src/callback/layer.rs index 301818140a5..75f56bf7450 100644 --- a/crates/mysten-network/src/callback/layer.rs +++ b/crates/mysten-network/src/callback/layer.rs @@ -1,9 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::{Callback, MakeCallbackHandler}; use tower::Layer; +use super::{Callback, MakeCallbackHandler}; + /// [`Layer`] that adds callbacks to a [`Service`]. /// /// See the [module docs](crate::callback) for more details. diff --git a/crates/mysten-network/src/callback/service.rs b/crates/mysten-network/src/callback/service.rs index 8e7ffc08b17..447d46dcde0 100644 --- a/crates/mysten-network/src/callback/service.rs +++ b/crates/mysten-network/src/callback/service.rs @@ -1,11 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::{CallbackLayer, MakeCallbackHandler, ResponseFuture}; -use http::{Request, Response}; use std::task::{Context, Poll}; + +use http::{Request, Response}; use tower::Service; +use super::{CallbackLayer, MakeCallbackHandler, ResponseFuture}; + /// Middleware that adds callbacks to a [`Service`]. /// /// See the [module docs](crate::callback) for an example. @@ -26,7 +28,8 @@ impl Callback { } } - /// Returns a new [`Layer`] that wraps services with a [`CallbackLayer`] middleware. + /// Returns a new [`Layer`] that wraps services with a [`CallbackLayer`] + /// middleware. /// /// [`Layer`]: tower::layer::Layer pub fn layer(make_handler: M) -> CallbackLayer diff --git a/crates/mysten-network/src/client.rs b/crates/mysten-network/src/client.rs index 1d7a1fafed2..b3f6e66cdff 100644 --- a/crates/mysten-network/src/client.rs +++ b/crates/mysten-network/src/client.rs @@ -1,12 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use eyre::{eyre, Context, Result}; +use tonic::transport::{Channel, Endpoint, Uri}; + use crate::{ config::Config, multiaddr::{parse_dns, parse_ip4, parse_ip6, Multiaddr, Protocol}, }; -use eyre::{eyre, Context, Result}; -use tonic::transport::{Channel, Endpoint, Uri}; pub async fn connect(address: &Multiaddr) -> Result { let channel = endpoint_from_multiaddr(address)?.connect().await?; diff --git a/crates/mysten-network/src/codec.rs b/crates/mysten-network/src/codec.rs index 180cd2d9e11..219760f92a4 100644 --- a/crates/mysten-network/src/codec.rs +++ b/crates/mysten-network/src/codec.rs @@ -1,8 +1,9 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use bytes::{Buf, BufMut}; use std::{io::Read, marker::PhantomData}; + +use bytes::{Buf, BufMut}; use tonic::{ codec::{Codec, DecodeBuf, Decoder, EncodeBuf, Encoder}, Status, @@ -105,8 +106,8 @@ impl Decoder for BcsSnappyDecoder { } } -/// A [`Codec`] that implements `bcs` encoding/decoding and snappy compression/decompression -/// via the serde library. +/// A [`Codec`] that implements `bcs` encoding/decoding and snappy +/// compression/decompression via the serde library. #[derive(Debug, Clone)] pub struct BcsSnappyCodec(PhantomData<(T, U)>); @@ -137,9 +138,10 @@ where // Anemo variant of BCS codec using Snappy for compression. pub mod anemo { + use std::{io::Read, marker::PhantomData}; + use ::anemo::rpc::codec::{Codec, Decoder, Encoder}; use bytes::Buf; - use std::{io::Read, marker::PhantomData}; #[derive(Debug)] pub struct BcsSnappyEncoder(PhantomData); @@ -173,7 +175,8 @@ pub mod anemo { } } - /// A [`Codec`] that implements `bcs` encoding/decoding via the serde library. + /// A [`Codec`] that implements `bcs` encoding/decoding via the serde + /// library. #[derive(Debug, Clone)] pub struct BcsSnappyCodec(PhantomData<(T, U)>); diff --git a/crates/mysten-network/src/config.rs b/crates/mysten-network/src/config.rs index 1e59dbe75bc..cafb340fcb9 100644 --- a/crates/mysten-network/src/config.rs +++ b/crates/mysten-network/src/config.rs @@ -1,15 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::metrics::{DefaultMetricsCallbackProvider, MetricsCallbackProvider}; +use std::time::Duration; + +use eyre::Result; +use serde::{Deserialize, Serialize}; +use tonic::transport::Channel; + use crate::{ client::{connect_lazy_with_config, connect_with_config}, + metrics::{DefaultMetricsCallbackProvider, MetricsCallbackProvider}, server::ServerBuilder, Multiaddr, }; -use eyre::Result; -use serde::{Deserialize, Serialize}; -use std::time::Duration; -use tonic::transport::Channel; #[derive(Debug, Default, Deserialize, Serialize)] pub struct Config { @@ -22,8 +24,8 @@ pub struct Config { /// Set a timeout for establishing an outbound connection. pub connect_timeout: Option, - /// Sets the SETTINGS_INITIAL_WINDOW_SIZE option for HTTP2 stream-level flow control. - /// Default is 65,535 + /// Sets the SETTINGS_INITIAL_WINDOW_SIZE option for HTTP2 stream-level flow + /// control. Default is 65,535 pub http2_initial_stream_window_size: Option, /// Sets the max connection-level flow control for HTTP2 @@ -38,19 +40,22 @@ pub struct Config { /// Set whether TCP keepalive messages are enabled on accepted connections. /// - /// If None is specified, keepalive is disabled, otherwise the duration specified will be the - /// time to remain idle before sending TCP keepalive probes. + /// If None is specified, keepalive is disabled, otherwise the duration + /// specified will be the time to remain idle before sending TCP + /// keepalive probes. /// /// Default is no keepalive (None) pub tcp_keepalive: Option, - /// Set the value of TCP_NODELAY option for accepted connections. Enabled by default. + /// Set the value of TCP_NODELAY option for accepted connections. Enabled by + /// default. pub tcp_nodelay: Option, /// Set whether HTTP2 Ping frames are enabled on accepted connections. /// - /// If None is specified, HTTP2 keepalive is disabled, otherwise the duration specified will be - /// the time interval between HTTP2 Ping frames. The timeout for receiving an acknowledgement + /// If None is specified, HTTP2 keepalive is disabled, otherwise the + /// duration specified will be the time interval between HTTP2 Ping + /// frames. The timeout for receiving an acknowledgement /// of the keepalive ping can be set with http2_keepalive_timeout. /// /// Default is no HTTP2 keepalive (None) @@ -58,8 +63,8 @@ pub struct Config { /// Sets a timeout for receiving an acknowledgement of the keepalive ping. /// - /// If the ping is not acknowledged within the timeout, the connection will be closed. Does nothing - /// if http2_keep_alive_interval is disabled. + /// If the ping is not acknowledged within the timeout, the connection will + /// be closed. Does nothing if http2_keep_alive_interval is disabled. /// /// Default is 20 seconds. pub http2_keepalive_timeout: Option, diff --git a/crates/mysten-network/src/metrics.rs b/crates/mysten-network/src/metrics.rs index 3112f439533..7610ac37a26 100644 --- a/crates/mysten-network/src/metrics.rs +++ b/crates/mysten-network/src/metrics.rs @@ -1,11 +1,15 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 use std::time::Duration; -use tonic::codegen::http::header::HeaderName; -use tonic::codegen::http::{HeaderValue, Request, Response}; -use tonic::{Code, Status}; -use tower_http::classify::GrpcFailureClass; -use tower_http::trace::{OnFailure, OnRequest, OnResponse}; + +use tonic::{ + codegen::http::{header::HeaderName, HeaderValue, Request, Response}, + Code, Status, +}; +use tower_http::{ + classify::GrpcFailureClass, + trace::{OnFailure, OnRequest, OnResponse}, +}; use tracing::Span; pub(crate) static GRPC_ENDPOINT_PATH_HEADER: HeaderName = HeaderName::from_static("grpc-path-req"); @@ -24,8 +28,8 @@ pub trait MetricsCallbackProvider: Send + Sync + Clone + 'static { /// Method to be called from the server when a request is performed. /// `path`: the endpoint uri path - /// `latency`: the time when the request was received and when the response was created - /// `status`: the http status code of the response + /// `latency`: the time when the request was received and when the response + /// was created `status`: the http status code of the response /// `grpc_status_code`: the grpc status code (see ) fn on_response(&self, path: String, latency: Duration, status: u16, grpc_status_code: Code); @@ -33,7 +37,8 @@ pub trait MetricsCallbackProvider: Send + Sync + Clone + 'static { fn on_start(&self, _path: &str) {} /// Called when request call is dropped. - /// It is guaranteed that for each on_start there will be corresponding on_drop + /// It is guaranteed that for each on_start there will be corresponding + /// on_drop fn on_drop(&self, _path: &str) {} } diff --git a/crates/mysten-network/src/multiaddr.rs b/crates/mysten-network/src/multiaddr.rs index 76a3c202b4c..d5d147af3cf 100644 --- a/crates/mysten-network/src/multiaddr.rs +++ b/crates/mysten-network/src/multiaddr.rs @@ -1,15 +1,14 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use eyre::{eyre, Result}; use std::{ borrow::Cow, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, }; -use tracing::error; -pub use ::multiaddr::Error; -pub use ::multiaddr::Protocol; +pub use ::multiaddr::{Error, Protocol}; +use eyre::{eyre, Result}; +use tracing::error; #[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] pub struct Multiaddr(::multiaddr::Multiaddr); @@ -51,8 +50,8 @@ impl Multiaddr { self.0.is_empty() } - /// Attempts to convert a multiaddr of the form `/[ip4,ip6,dns]/{}/udp/{port}` into an anemo - /// address + /// Attempts to convert a multiaddr of the form + /// `/[ip4,ip6,dns]/{}/udp/{port}` into an anemo address pub fn to_anemo_address(&self) -> Result { let mut iter = self.iter(); @@ -86,8 +85,9 @@ impl Multiaddr { } // Converts a /ip{4,6}/-/tcp/-[/-] Multiaddr to SocketAddr. - // Useful when an external library only accepts SocketAddr, e.g. to start a local server. - // See `client::endpoint_from_multiaddr()` for converting to Endpoint for clients. + // Useful when an external library only accepts SocketAddr, e.g. to start a + // local server. See `client::endpoint_from_multiaddr()` for converting to + // Endpoint for clients. pub fn to_socket_addr(&self) -> Result { let mut iter = self.iter(); let ip = match iter.next().ok_or_else(|| { @@ -101,9 +101,10 @@ impl Multiaddr { Ok(SocketAddr::new(ip, tcp_port)) } - /// Set the ip address to `0.0.0.0`. For instance, it converts the following address - /// `/ip4/155.138.174.208/tcp/1500/http` into `/ip4/0.0.0.0/tcp/1500/http`. - /// This is useful when starting a server and you want to listen on all interfaces. + /// Set the ip address to `0.0.0.0`. For instance, it converts the following + /// address `/ip4/155.138.174.208/tcp/1500/http` into + /// `/ip4/0.0.0.0/tcp/1500/http`. This is useful when starting a server + /// and you want to listen on all interfaces. pub fn with_zero_ip(&self) -> Self { let mut new_address = self.0.clone(); let Some(protocol) = new_address.iter().next() else { @@ -130,8 +131,9 @@ impl Multiaddr { Self(new_address) } - /// Set the ip address to `127.0.0.1`. For instance, it converts the following address - /// `/ip4/155.138.174.208/tcp/1500/http` into `/ip4/127.0.0.1/tcp/1500/http`. + /// Set the ip address to `127.0.0.1`. For instance, it converts the + /// following address `/ip4/155.138.174.208/tcp/1500/http` into + /// `/ip4/127.0.0.1/tcp/1500/http`. pub fn with_localhost_ip(&self) -> Self { let mut new_address = self.0.clone(); let Some(protocol) = new_address.iter().next() else { @@ -339,9 +341,10 @@ pub(crate) fn parse_unix(address: &Multiaddr) -> Result<(Cow<'_, str>, &'static #[cfg(test)] mod test { - use super::Multiaddr; use multiaddr::multiaddr; + use super::Multiaddr; + #[test] fn test_to_socket_addr_basic() { let multi_addr_ipv4 = Multiaddr(multiaddr!(Ip4([127, 0, 0, 1]), Tcp(10500u16))); diff --git a/crates/mysten-network/src/server.rs b/crates/mysten-network/src/server.rs index 7842ef71008..7e6436e4194 100644 --- a/crates/mysten-network/src/server.rs +++ b/crates/mysten-network/src/server.rs @@ -1,24 +1,19 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::metrics::{ - DefaultMetricsCallbackProvider, MetricsCallbackProvider, MetricsHandler, - GRPC_ENDPOINT_PATH_HEADER, -}; -use crate::{ - config::Config, - multiaddr::{parse_dns, parse_ip4, parse_ip6, Multiaddr, Protocol}, +use std::{ + convert::Infallible, + net::SocketAddr, + task::{Context, Poll}, }; + use eyre::{eyre, Result}; use futures::FutureExt; -use std::task::{Context, Poll}; -use std::{convert::Infallible, net::SocketAddr}; use tokio::net::{TcpListener, ToSocketAddrs}; use tokio_stream::wrappers::TcpListenerStream; -use tonic::codegen::http::HeaderValue; use tonic::{ body::BoxBody, codegen::{ - http::{Request, Response}, + http::{HeaderValue, Request, Response}, BoxFuture, }, server::NamedService, @@ -31,10 +26,21 @@ use tower::{ util::Either, Layer, Service, ServiceBuilder, }; -use tower_http::classify::{GrpcErrorsAsFailures, SharedClassifier}; -use tower_http::propagate_header::PropagateHeaderLayer; -use tower_http::set_header::SetRequestHeaderLayer; -use tower_http::trace::{DefaultMakeSpan, DefaultOnBodyChunk, DefaultOnEos, TraceLayer}; +use tower_http::{ + classify::{GrpcErrorsAsFailures, SharedClassifier}, + propagate_header::PropagateHeaderLayer, + set_header::SetRequestHeaderLayer, + trace::{DefaultMakeSpan, DefaultOnBodyChunk, DefaultOnEos, TraceLayer}, +}; + +use crate::{ + config::Config, + metrics::{ + DefaultMetricsCallbackProvider, MetricsCallbackProvider, MetricsHandler, + GRPC_ENDPOINT_PATH_HEADER, + }, + multiaddr::{parse_dns, parse_ip4, parse_ip6, Multiaddr, Protocol}, +}; pub struct ServerBuilder { router: Router>, @@ -272,15 +278,16 @@ fn update_tcp_port_in_multiaddr(addr: &Multiaddr, port: u16) -> Multiaddr { #[cfg(test)] mod test { - use crate::config::Config; - use crate::metrics::MetricsCallbackProvider; - use crate::Multiaddr; - use std::ops::Deref; - use std::sync::{Arc, Mutex}; - use std::time::Duration; + use std::{ + ops::Deref, + sync::{Arc, Mutex}, + time::Duration, + }; + use tonic::Code; - use tonic_health::pb::health_client::HealthClient; - use tonic_health::pb::HealthCheckRequest; + use tonic_health::pb::{health_client::HealthClient, HealthCheckRequest}; + + use crate::{config::Config, metrics::MetricsCallbackProvider, Multiaddr}; #[test] fn document_multiaddr_limitation_for_unix_protocol() { @@ -460,8 +467,8 @@ mod test { #[cfg(unix)] #[tokio::test] async fn unix() { - // Note that this only works when constructing a multiaddr by hand and not via the - // human-readable format + // Note that this only works when constructing a multiaddr by hand and not via + // the human-readable format let path = "unix-domain-socket"; let address = Multiaddr::new_internal(multiaddr::multiaddr!(Unix(path), Http)); test_multiaddr(address).await; diff --git a/crates/mysten-service-boilerplate/src/main.rs b/crates/mysten-service-boilerplate/src/main.rs index 56fb6cb3755..a39c0ff3934 100644 --- a/crates/mysten-service-boilerplate/src/main.rs +++ b/crates/mysten-service-boilerplate/src/main.rs @@ -2,13 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::Result; -use axum::extract::State; -use axum::routing::get; -use mysten_service::get_mysten_service; -use mysten_service::metrics::start_basic_prometheus_server; -use mysten_service::package_name; -use mysten_service::package_version; -use mysten_service::serve; +use axum::{extract::State, routing::get}; +use mysten_service::{ + get_mysten_service, metrics::start_basic_prometheus_server, package_name, package_version, + serve, +}; use prometheus::{register_int_counter_with_registry, IntCounter, Registry}; use tracing::debug; diff --git a/crates/mysten-service/src/health.rs b/crates/mysten-service/src/health.rs index 406302509ba..47e757dbd6f 100644 --- a/crates/mysten-service/src/health.rs +++ b/crates/mysten-service/src/health.rs @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 /// service health related utilities -/// use serde::Serialize; #[derive(Debug, Clone, Serialize, PartialEq, Eq)] diff --git a/crates/mysten-service/src/lib.rs b/crates/mysten-service/src/lib.rs index 5af492b3ac4..0db49c4dfec 100644 --- a/crates/mysten-service/src/lib.rs +++ b/crates/mysten-service/src/lib.rs @@ -6,8 +6,7 @@ pub mod logging; pub mod metrics; mod service; -pub use service::get_mysten_service; -pub use service::serve; +pub use service::{get_mysten_service, serve}; pub const DEFAULT_PORT: u16 = 2024; diff --git a/crates/mysten-service/src/metrics.rs b/crates/mysten-service/src/metrics.rs index 109b60e1fb9..7ef0dd16ca0 100644 --- a/crates/mysten-service/src/metrics.rs +++ b/crates/mysten-service/src/metrics.rs @@ -1,9 +1,9 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use prometheus::Registry; -use std::net::SocketAddr; -use std::net::{IpAddr, Ipv4Addr}; pub const METRICS_HOST_PORT: u16 = 9184; @@ -38,8 +38,8 @@ pub use mysten_metrics::start_prometheus_server; /// /// #[tokio::main] /// async fn main() { -/// let prometheus_registry = mysten_service::metrics::start_basic_prometheus_server(); -/// let metrics = MyMetrics::new(&prometheus_registry); +/// let prometheus_registry = mysten_service::metrics::start_basic_prometheus_server(); +/// let metrics = MyMetrics::new(&prometheus_registry); /// } /// ``` pub fn start_basic_prometheus_server() -> Registry { diff --git a/crates/mysten-service/src/service.rs b/crates/mysten-service/src/service.rs index 599796b1f45..9d7e1c6b669 100644 --- a/crates/mysten-service/src/service.rs +++ b/crates/mysten-service/src/service.rs @@ -1,14 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::health::HealthResponse; -use crate::DEFAULT_PORT; use anyhow::Result; -use axum::routing::get; -use axum::Json; -use axum::Router; +use axum::{routing::get, Json, Router}; use tracing::debug; +use crate::{health::HealthResponse, DEFAULT_PORT}; + pub fn get_mysten_service(app_name: &str, app_version: &str) -> Router where S: Send + Clone + Sync + 'static, diff --git a/crates/mysten-service/tests/integration_test.rs b/crates/mysten-service/tests/integration_test.rs index f661be895e2..760a785b18d 100644 --- a/crates/mysten-service/tests/integration_test.rs +++ b/crates/mysten-service/tests/integration_test.rs @@ -1,9 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use axum::body::Body; -use axum::body::HttpBody; -use axum::http::Request; +use axum::{ + body::{Body, HttpBody}, + http::Request, +}; use tower::ServiceExt; #[tokio::test] diff --git a/crates/mysten-util-mem/src/allocators.rs b/crates/mysten-util-mem/src/allocators.rs index ca5822b8f58..91b5cddadca 100644 --- a/crates/mysten-util-mem/src/allocators.rs +++ b/crates/mysten-util-mem/src/allocators.rs @@ -9,14 +9,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#[cfg(feature = "std")] -use crate::malloc_size::MallocUnconditionalSizeOf; -use crate::malloc_size::{MallocSizeOf, MallocSizeOfOps, VoidPtrToSizeFn}; #[cfg(not(feature = "std"))] use core::ffi::c_void; #[cfg(feature = "std")] use std::os::raw::c_void; +#[cfg(feature = "std")] +use crate::malloc_size::MallocUnconditionalSizeOf; +use crate::malloc_size::{MallocSizeOf, MallocSizeOfOps, VoidPtrToSizeFn}; + mod usable_size { use super::*; diff --git a/crates/mysten-util-mem/src/external_impls.rs b/crates/mysten-util-mem/src/external_impls.rs index 262fd82088c..d8168aba786 100644 --- a/crates/mysten-util-mem/src/external_impls.rs +++ b/crates/mysten-util-mem/src/external_impls.rs @@ -87,8 +87,8 @@ impl MallocShallowSizeOf for indexmap::Inde } } impl MallocSizeOf for indexmap::IndexMap { - // This only produces a rough estimate of IndexMap size, because we cannot access private - // fields to measure them precisely. + // This only produces a rough estimate of IndexMap size, because we cannot + // access private fields to measure them precisely. fn size_of(&self, ops: &mut crate::MallocSizeOfOps) -> usize { let mut n = self.shallow_size_of(ops); if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { @@ -104,8 +104,8 @@ impl MallocSizeOf for indexmap::IndexMap usize { self.serialized_size() } diff --git a/crates/mysten-util-mem/src/lib.rs b/crates/mysten-util-mem/src/lib.rs index 7491b9576b9..a07d3247654 100644 --- a/crates/mysten-util-mem/src/lib.rs +++ b/crates/mysten-util-mem/src/lib.rs @@ -32,10 +32,11 @@ pub mod allocators; pub mod sizeof; /// This is a copy of patched crate `malloc_size_of` as a module. -/// We need to have it as an inner module to be able to define our own traits implementation, -/// if at some point the trait become standard enough we could use the right way of doing it -/// by implementing it in our type traits crates. At this time moving this trait to the primitive -/// types level would impact too much of the dependencies to be easily manageable. +/// We need to have it as an inner module to be able to define our own traits +/// implementation, if at some point the trait become standard enough we could +/// use the right way of doing it by implementing it in our type traits crates. +/// At this time moving this trait to the primitive types level would impact too +/// much of the dependencies to be easily manageable. #[macro_use] mod malloc_size; @@ -43,7 +44,6 @@ pub mod external_impls; pub use allocators::MallocSizeOfExt; pub use malloc_size::{MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps}; - pub use mysten_util_mem_derive::*; /// Heap size of structure. @@ -98,9 +98,10 @@ impl MemoryAllocationTracker { #[cfg(feature = "std")] #[cfg(test)] mod test { - use super::{malloc_size, MallocSizeOf, MallocSizeOfExt}; use std::sync::Arc; + use super::{malloc_size, MallocSizeOf, MallocSizeOfExt}; + #[test] fn test_arc() { let val = Arc::new("test".to_string()); diff --git a/crates/mysten-util-mem/src/malloc_size.rs b/crates/mysten-util-mem/src/malloc_size.rs index 2668bb13e71..261848ec042 100644 --- a/crates/mysten-util-mem/src/malloc_size.rs +++ b/crates/mysten-util-mem/src/malloc_size.rs @@ -24,30 +24,30 @@ //! are different to the ones for non-graph structures. //! //! Suggested uses are as follows. -//! - When possible, use the `MallocSizeOf` trait. (Deriving support is -//! provided by the `malloc_size_of_derive` crate.) +//! - When possible, use the `MallocSizeOf` trait. (Deriving support is provided +//! by the `malloc_size_of_derive` crate.) //! - If you need an additional synchronization argument, provide a function //! that is like the standard trait method, but with the extra argument. //! - If you need multiple measurements for a type, provide a function named -//! `add_size_of` that takes a mutable reference to a struct that contains -//! the multiple measurement fields. +//! `add_size_of` that takes a mutable reference to a struct that contains the +//! multiple measurement fields. //! - When deep measurement (via `MallocSizeOf`) cannot be implemented for a //! type, shallow measurement (via `MallocShallowSizeOf`) in combination with //! iteration can be a useful substitute. //! - `Rc` and `Arc` are always tricky, which is why `MallocSizeOf` is not (and //! should not be) implemented for them. -//! - If an `Rc` or `Arc` is known to be a "primary" reference and can always -//! be measured, it should be measured via the `MallocUnconditionalSizeOf` -//! trait. -//! - If an `Rc` or `Arc` should be measured only if it hasn't been seen -//! before, it should be measured via the `MallocConditionalSizeOf` trait. +//! - If an `Rc` or `Arc` is known to be a "primary" reference and can always be +//! measured, it should be measured via the `MallocUnconditionalSizeOf` trait. +//! - If an `Rc` or `Arc` should be measured only if it hasn't been seen before, +//! it should be measured via the `MallocConditionalSizeOf` trait. //! - Using universal function call syntax is a good idea when measuring boxed //! fields in structs, because it makes it clear that the Box is being -//! measured as well as the thing it points to. E.g. -//! ` as MallocSizeOf>::size_of(field, ops)`. +//! measured as well as the thing it points to. E.g. ` as +//! MallocSizeOf>::size_of(field, ops)`. //! This is an extended version of the Servo internal malloc_size crate. -//! We should occasionally track the upstream changes/fixes and reintroduce them here, whenever applicable. +//! We should occasionally track the upstream changes/fixes and reintroduce them +//! here, whenever applicable. #[cfg(not(feature = "std"))] use alloc::vec::Vec; @@ -60,17 +60,22 @@ mod rstd { pub use core::*; pub mod collections { pub use alloc::collections::*; + pub use vec_deque::VecDeque; } } -#[cfg(feature = "std")] -use std::sync::Arc; - #[cfg(not(feature = "std"))] pub use alloc::boxed::Box; #[cfg(not(feature = "std"))] use core::ffi::c_void; +#[cfg(feature = "std")] +use std::hash::BuildHasher; +#[cfg(feature = "std")] +use std::os::raw::c_void; +#[cfg(feature = "std")] +use std::sync::Arc; + #[cfg(feature = "std")] use rstd::hash::Hash; use rstd::{ @@ -78,10 +83,6 @@ use rstd::{ mem::size_of, ops::{Deref, DerefMut, Range}, }; -#[cfg(feature = "std")] -use std::hash::BuildHasher; -#[cfg(feature = "std")] -use std::os::raw::c_void; /// A C function that takes a pointer to a heap allocation and returns its size. pub type VoidPtrToSizeFn = unsafe extern "C" fn(ptr: *const c_void) -> usize; @@ -169,7 +170,8 @@ impl MallocSizeOfOps { pub trait MallocSizeOf { /// Measure the heap usage of all descendant heap-allocated structures, but /// not the space taken up by the value itself. - /// If `T::size_of` is a constant, consider implementing `constant_size` as well. + /// If `T::size_of` is a constant, consider implementing `constant_size` as + /// well. fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize; /// Used to optimize `MallocSizeOf` implementation for collections @@ -544,12 +546,13 @@ impl MallocConditionalSizeOf for Arc { } } -/// If a mutex is stored directly as a member of a data type that is being measured, -/// it is the unique owner of its contents and deserves to be measured. +/// If a mutex is stored directly as a member of a data type that is being +/// measured, it is the unique owner of its contents and deserves to be +/// measured. /// -/// If a mutex is stored inside of an Arc value as a member of a data type that is being measured, -/// the Arc will not be automatically measured so there is no risk of overcounting the mutex's -/// contents. +/// If a mutex is stored inside of an Arc value as a member of a data type that +/// is being measured, the Arc will not be automatically measured so there is no +/// risk of overcounting the mutex's contents. /// /// The same reasoning applies to RwLock. #[cfg(feature = "std")] @@ -672,8 +675,8 @@ malloc_size_of_is_0!(Range, Range, Range, Range, Range malloc_size_of_is_0!(Range, Range); malloc_size_of_is_0!(any: PhantomData); -/// Measurable that defers to inner value and used to verify MallocSizeOf implementation in a -/// struct. +/// Measurable that defers to inner value and used to verify MallocSizeOf +/// implementation in a struct. #[derive(Clone)] pub struct Measurable(pub T); @@ -785,9 +788,11 @@ malloc_size_of_is_0!(std::time::Duration); #[cfg(all(test, feature = "std"))] // tests are using std implementations mod tests { - use crate::{allocators::new_malloc_size_ops, MallocSizeOf, MallocSizeOfOps}; - use smallvec::SmallVec; use std::{collections::BTreeSet, mem}; + + use smallvec::SmallVec; + + use crate::{allocators::new_malloc_size_ops, MallocSizeOf, MallocSizeOfOps}; impl_smallvec!(3); #[test] diff --git a/crates/mysten-util-mem/src/sizeof.rs b/crates/mysten-util-mem/src/sizeof.rs index b9b237ab471..8d2dd06105a 100644 --- a/crates/mysten-util-mem/src/sizeof.rs +++ b/crates/mysten-util-mem/src/sizeof.rs @@ -9,12 +9,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Estimation for heapsize calculation. Usable to replace call to allocator method (for some -//! allocators or simply because we just need a deterministic cunsumption measurement). +//! Estimation for heapsize calculation. Usable to replace call to allocator +//! method (for some allocators or simply because we just need a deterministic +//! cunsumption measurement). -use crate::malloc_size::{ - MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps, MallocUnconditionalShallowSizeOf, -}; #[cfg(not(feature = "std"))] use alloc::boxed::Box; #[cfg(not(feature = "std"))] @@ -25,12 +23,15 @@ use alloc::sync::Arc; use alloc::vec::Vec; #[cfg(not(feature = "std"))] use core::mem::{size_of, size_of_val}; - #[cfg(feature = "std")] use std::mem::{size_of, size_of_val}; #[cfg(feature = "std")] use std::sync::Arc; +use crate::malloc_size::{ + MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps, MallocUnconditionalShallowSizeOf, +}; + impl MallocShallowSizeOf for Box { fn shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { size_of_val(&**self) diff --git a/crates/prometheus-closure-metric/src/lib.rs b/crates/prometheus-closure-metric/src/lib.rs index b0fd177be94..c978552af7f 100644 --- a/crates/prometheus-closure-metric/src/lib.rs +++ b/crates/prometheus-closure-metric/src/lib.rs @@ -4,21 +4,20 @@ // Copyright 2014 The Prometheus Authors // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0. -//! This library implements a `ClosureMetric` for crate `prometheus` whose value is computed at -//! the time of collection by a provided closure. +//! This library implements a `ClosureMetric` for crate `prometheus` whose value +//! is computed at the time of collection by a provided closure. // TODO: add example usage once constructor macros are implemented. // (For now, look at tests for an example.) -use anyhow::anyhow; -use anyhow::Result; -use prometheus::core; -use prometheus::proto; +use anyhow::{anyhow, Result}; +use prometheus::{core, proto}; -/// A Prometheus metric whose value is computed at collection time by the provided closure. +/// A Prometheus metric whose value is computed at collection time by the +/// provided closure. /// -/// WARNING: The provided closure must be fast (~milliseconds or faster), since it blocks -/// metric collection. +/// WARNING: The provided closure must be fast (~milliseconds or faster), since +/// it blocks metric collection. #[derive(Debug)] pub struct ClosureMetric { desc: core::Desc, diff --git a/crates/prometheus-closure-metric/tests/closure_metric.rs b/crates/prometheus-closure-metric/tests/closure_metric.rs index 90155023502..039a9372227 100644 --- a/crates/prometheus-closure-metric/tests/closure_metric.rs +++ b/crates/prometheus-closure-metric/tests/closure_metric.rs @@ -17,9 +17,11 @@ fn closure_metric_basic() { ) .unwrap(); - assert!(prometheus::default_registry() - .register(Box::new(metric0)) - .is_ok()); + assert!( + prometheus::default_registry() + .register(Box::new(metric0)) + .is_ok() + ); // Gather the metrics. let metric_families = prometheus::default_registry().gather(); diff --git a/crates/shared-crypto/src/intent.rs b/crates/shared-crypto/src/intent.rs index 15c28473f76..efdc149aedb 100644 --- a/crates/shared-crypto/src/intent.rs +++ b/crates/shared-crypto/src/intent.rs @@ -1,18 +1,18 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::str::FromStr; + use eyre::eyre; use fastcrypto::encoding::decode_bytes_hex; use serde::{Deserialize, Serialize}; -use serde_repr::Deserialize_repr; -use serde_repr::Serialize_repr; -use std::str::FromStr; +use serde_repr::{Deserialize_repr, Serialize_repr}; pub const INTENT_PREFIX_LENGTH: usize = 3; -/// The version here is to distinguish between signing different versions of the struct -/// or enum. Serialized output between two different versions of the same struct/enum -/// might accidentally (or maliciously on purpose) match. +/// The version here is to distinguish between signing different versions of the +/// struct or enum. Serialized output between two different versions of the same +/// struct/enum might accidentally (or maliciously on purpose) match. #[derive(Serialize_repr, Deserialize_repr, Copy, Clone, PartialEq, Eq, Debug, Hash)] #[repr(u8)] pub enum IntentVersion { @@ -26,10 +26,11 @@ impl TryFrom for IntentVersion { } } -/// This enums specifies the application ID. Two intents in two different applications -/// (i.e., Narwhal, Sui, Ethereum etc) should never collide, so that even when a signing -/// key is reused, nobody can take a signature designated for app_1 and present it as a -/// valid signature for an (any) intent in app_2. +/// This enums specifies the application ID. Two intents in two different +/// applications (i.e., Narwhal, Sui, Ethereum etc) should never collide, so +/// that even when a signing key is reused, nobody can take a signature +/// designated for app_1 and present it as a valid signature for an (any) intent +/// in app_2. #[derive(Serialize_repr, Deserialize_repr, Copy, Clone, PartialEq, Eq, Debug, Hash)] #[repr(u8)] pub enum AppId { @@ -52,9 +53,9 @@ impl Default for AppId { } } -/// This enums specifies the intent scope. Two intents for different scope should -/// never collide, so no signature provided for one intent scope can be used for -/// another, even when the serialized data itself may be the same. +/// This enums specifies the intent scope. Two intents for different scope +/// should never collide, so no signature provided for one intent scope can be +/// used for another, even when the serialized data itself may be the same. #[derive(Serialize_repr, Deserialize_repr, Copy, Clone, PartialEq, Eq, Debug, Hash)] #[repr(u8)] pub enum IntentScope { @@ -63,7 +64,8 @@ pub enum IntentScope { CheckpointSummary = 2, // Used for an authority signature on a checkpoint summary. PersonalMessage = 3, // Used for a user signature on a personal message. SenderSignedTransaction = 4, // Used for an authority signature on a user signed transaction. - ProofOfPossession = 5, // Used as a signature representing an authority's proof of possession of its authority protocol key. + ProofOfPossession = 5, /* Used as a signature representing an authority's proof of + * possession of its authority protocol key. */ HeaderDigest = 6, // Used for narwhal authority signature on header digest. BridgeEventUnused = 7, // for bridge purposes but it's currently not included in messages. ConsensusBlock = 8, // Used for consensus authority signature on block's digest @@ -76,11 +78,14 @@ impl TryFrom for IntentScope { } } -/// An intent is a compact struct serves as the domain separator for a message that a signature commits to. -/// It consists of three parts: [enum IntentScope] (what the type of the message is), [enum IntentVersion], [enum AppId] (what application that the signature refers to). -/// It is used to construct [struct IntentMessage] that what a signature commits to. +/// An intent is a compact struct serves as the domain separator for a message +/// that a signature commits to. It consists of three parts: [enum IntentScope] +/// (what the type of the message is), [enum IntentVersion], [enum AppId] (what +/// application that the signature refers to). It is used to construct [struct +/// IntentMessage] that what a signature commits to. /// -/// The serialization of an Intent is a 3-byte array where each field is represented by a byte. +/// The serialization of an Intent is a 3-byte array where each field is +/// represented by a byte. #[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone, Hash)] pub struct Intent { pub scope: IntentScope, @@ -145,14 +150,14 @@ impl Intent { } } -/// Intent Message is a wrapper around a message with its intent. The message can -/// be any type that implements [trait Serialize]. *ALL* signatures in Sui must commits -/// to the intent message, not the message itself. This guarantees any intent -/// message signed in the system cannot collide with another since they are domain -/// separated by intent. +/// Intent Message is a wrapper around a message with its intent. The message +/// can be any type that implements [trait Serialize]. *ALL* signatures in Sui +/// must commits to the intent message, not the message itself. This guarantees +/// any intent message signed in the system cannot collide with another since +/// they are domain separated by intent. /// -/// The serialization of an IntentMessage is compact: it only appends three bytes -/// to the message itself. +/// The serialization of an IntentMessage is compact: it only appends three +/// bytes to the message itself. #[derive(Debug, PartialEq, Eq, Serialize, Clone, Hash, Deserialize)] pub struct IntentMessage { pub intent: Intent, @@ -180,9 +185,10 @@ pub(crate) mod private { impl SealedIntent for IntentMessage {} } -/// A 1-byte domain separator for hashing Object ID in Sui. It is starting from 0xf0 -/// to ensure no hashing collision for any ObjectID vs SuiAddress which is derived -/// as the hash of `flag || pubkey`. See `sui_types::crypto::SignatureScheme::flag()`. +/// A 1-byte domain separator for hashing Object ID in Sui. It is starting from +/// 0xf0 to ensure no hashing collision for any ObjectID vs SuiAddress which is +/// derived as the hash of `flag || pubkey`. See +/// `sui_types::crypto::SignatureScheme::flag()`. #[derive(Serialize_repr, Deserialize_repr, Copy, Clone, PartialEq, Eq, Debug, Hash)] #[repr(u8)] pub enum HashingIntentScope { diff --git a/crates/simulacrum/src/epoch_state.rs b/crates/simulacrum/src/epoch_state.rs index 4ea65be5abb..a949710fb8b 100644 --- a/crates/simulacrum/src/epoch_state.rs +++ b/crates/simulacrum/src/epoch_state.rs @@ -12,8 +12,7 @@ use sui_types::{ effects::TransactionEffects, gas::SuiGasStatus, inner_temporary_store::InnerTemporaryStore, - metrics::BytecodeVerifierMetrics, - metrics::LimitsMetrics, + metrics::{BytecodeVerifierMetrics, LimitsMetrics}, sui_system_state::{ epoch_start_sui_system_state::{EpochStartSystemState, EpochStartSystemStateTrait}, SuiSystemState, SuiSystemStateTrait, @@ -30,8 +29,9 @@ pub struct EpochState { limits_metrics: Arc, bytecode_verifier_metrics: Arc, executor: Arc, - /// A counter that advances each time we advance the clock in order to ensure that each update - /// txn has a unique digest. This is reset on epoch changes + /// A counter that advances each time we advance the clock in order to + /// ensure that each update txn has a unique digest. This is reset on + /// epoch changes next_consensus_round: u64, } @@ -118,8 +118,8 @@ impl EpochState { &receiving_object_refs, )?; - // Run the transaction input checks that would run when submitting the txn to a validator - // for signing + // Run the transaction input checks that would run when submitting the txn to a + // validator for signing let (gas_status, checked_input_objects) = sui_transaction_checks::check_transaction_input( &self.protocol_config, self.epoch_start_state.reference_gas_price(), diff --git a/crates/simulacrum/src/lib.rs b/crates/simulacrum/src/lib.rs index f12d9f5f307..cd759513746 100644 --- a/crates/simulacrum/src/lib.rs +++ b/crates/simulacrum/src/lib.rs @@ -3,61 +3,57 @@ //! A `Simulacrum` of Sui. //! -//! The word simulacrum is latin for "likeness, semblance", it is also a spell in D&D which creates -//! a copy of a creature which then follows the player's commands and wishes. As such this crate -//! provides the [`Simulacrum`] type which is a implementation or instantiation of a sui -//! blockchain, one which doesn't do anything unless acted upon. +//! The word simulacrum is latin for "likeness, semblance", it is also a spell +//! in D&D which creates a copy of a creature which then follows the player's +//! commands and wishes. As such this crate provides the [`Simulacrum`] type +//! which is a implementation or instantiation of a sui blockchain, one which +//! doesn't do anything unless acted upon. //! //! [`Simulacrum`]: crate::Simulacrum -use std::num::NonZeroUsize; -use std::sync::Arc; +use std::{num::NonZeroUsize, sync::Arc}; use anyhow::{anyhow, Result}; use fastcrypto::traits::Signer; use rand::rngs::OsRng; use sui_config::{genesis, transaction_deny_config::TransactionDenyConfig}; use sui_protocol_config::ProtocolVersion; -use sui_swarm_config::genesis_config::AccountConfig; -use sui_swarm_config::network_config::NetworkConfig; -use sui_swarm_config::network_config_builder::ConfigBuilder; -use sui_types::base_types::{AuthorityName, ObjectID, VersionNumber}; -use sui_types::crypto::AuthoritySignature; -use sui_types::digests::ConsensusCommitDigest; -use sui_types::object::Object; -use sui_types::storage::{ObjectStore, ReadStore}; -use sui_types::sui_system_state::epoch_start_sui_system_state::EpochStartSystemState; -use sui_types::transaction::EndOfEpochTransactionKind; +use sui_swarm_config::{ + genesis_config::AccountConfig, network_config::NetworkConfig, + network_config_builder::ConfigBuilder, +}; use sui_types::{ - base_types::SuiAddress, + base_types::{AuthorityName, ObjectID, SuiAddress, VersionNumber}, committee::Committee, + crypto::AuthoritySignature, + digests::ConsensusCommitDigest, effects::TransactionEffects, error::ExecutionError, - gas_coin::MIST_PER_SUI, + gas_coin::{GasCoin, MIST_PER_SUI}, inner_temporary_store::InnerTemporaryStore, messages_checkpoint::{EndOfEpochData, VerifiedCheckpoint}, + mock_checkpoint_builder::{MockCheckpointBuilder, ValidatorKeypairProvider}, + object::Object, + programmable_transaction_builder::ProgrammableTransactionBuilder, signature::VerifyParams, - transaction::{Transaction, VerifiedTransaction}, + storage::{ObjectStore, ReadStore}, + sui_system_state::epoch_start_sui_system_state::EpochStartSystemState, + transaction::{ + EndOfEpochTransactionKind, GasData, Transaction, TransactionData, TransactionKind, + VerifiedTransaction, + }, }; -use self::epoch_state::EpochState; -pub use self::store::in_mem_store::InMemoryStore; -use self::store::in_mem_store::KeyStore; -pub use self::store::SimulatorStore; -use sui_types::mock_checkpoint_builder::{MockCheckpointBuilder, ValidatorKeypairProvider}; -use sui_types::{ - gas_coin::GasCoin, - programmable_transaction_builder::ProgrammableTransactionBuilder, - transaction::{GasData, TransactionData, TransactionKind}, -}; +pub use self::store::{in_mem_store::InMemoryStore, SimulatorStore}; +use self::{epoch_state::EpochState, store::in_mem_store::KeyStore}; mod epoch_state; pub mod store; /// A `Simulacrum` of Sui. /// -/// This type represents a simulated instantiation of a Sui blockchain that needs to be driven -/// manually, that is time doesn't advance and checkpoints are not formed unless explicitly -/// requested. +/// This type represents a simulated instantiation of a Sui blockchain that +/// needs to be driven manually, that is time doesn't advance and checkpoints +/// are not formed unless explicitly requested. /// /// See [module level][mod] documentation for more details. /// @@ -78,7 +74,8 @@ pub struct Simulacrum { } impl Simulacrum { - /// Create a new, random Simulacrum instance using an `OsRng` as the source of randomness. + /// Create a new, random Simulacrum instance using an `OsRng` as the source + /// of randomness. #[allow(clippy::new_without_default)] pub fn new() -> Self { Self::new_with_rng(OsRng) @@ -91,12 +88,12 @@ where { /// Create a new Simulacrum instance using the provided `rng`. /// - /// This allows you to create a fully deterministic initial chainstate when a seeded rng is - /// used. + /// This allows you to create a fully deterministic initial chainstate when + /// a seeded rng is used. /// /// ``` + /// use rand::{rngs::StdRng, SeedableRng}; /// use simulacrum::Simulacrum; - /// use rand::{SeedableRng, rngs::StdRng}; /// /// # fn main() { /// let mut rng = StdRng::seed_from_u64(1); @@ -155,15 +152,17 @@ impl Simulacrum { /// Attempts to execute the provided Transaction. /// - /// The provided Transaction undergoes the same types of checks that a Validator does prior to - /// signing and executing in the production system. Some of these checks are as follows: + /// The provided Transaction undergoes the same types of checks that a + /// Validator does prior to signing and executing in the production + /// system. Some of these checks are as follows: /// - User signature is valid /// - Sender owns all OwnedObject inputs /// - etc /// - /// If the above checks are successful then the transaction is immediately executed, enqueued - /// to be included in the next checkpoint (the next time `create_checkpoint` is called) and the - /// corresponding TransactionEffects are returned. + /// If the above checks are successful then the transaction is immediately + /// executed, enqueued to be included in the next checkpoint (the next + /// time `create_checkpoint` is called) and the corresponding + /// TransactionEffects are returned. pub fn execute_transaction( &mut self, transaction: Transaction, @@ -191,8 +190,8 @@ impl Simulacrum { Ok((effects, execution_error_opt.err())) } - /// Creates the next Checkpoint using the Transactions enqueued since the last checkpoint was - /// created. + /// Creates the next Checkpoint using the Transactions enqueued since the + /// last checkpoint was created. pub fn create_checkpoint(&mut self) -> VerifiedCheckpoint { let committee = CommitteeWithKeys::new(&self.keystore, self.epoch_state.committee()); let (checkpoint, contents, _) = self @@ -205,8 +204,8 @@ impl Simulacrum { /// Advances the clock by `duration`. /// - /// This creates and executes a ConsensusCommitPrologue transaction which advances the chain - /// Clock by the provided duration. + /// This creates and executes a ConsensusCommitPrologue transaction which + /// advances the chain Clock by the provided duration. pub fn advance_clock(&mut self, duration: std::time::Duration) -> TransactionEffects { let epoch = self.epoch_state.epoch(); let round = self.epoch_state.next_consensus_round(); @@ -226,16 +225,17 @@ impl Simulacrum { /// Advances the epoch. /// - /// This creates and executes an EndOfEpoch transaction which advances the chain into the next - /// epoch. Since it is required to be the final transaction in an epoch, the final checkpoint in - /// the epoch is also created. + /// This creates and executes an EndOfEpoch transaction which advances the + /// chain into the next epoch. Since it is required to be the final + /// transaction in an epoch, the final checkpoint in the epoch is also + /// created. /// - /// create_random_state controls whether a `RandomStateCreate` end of epoch transaction is - /// included as part of this epoch change (to initialise on-chain randomness for the first - /// time). + /// create_random_state controls whether a `RandomStateCreate` end of epoch + /// transaction is included as part of this epoch change (to initialise + /// on-chain randomness for the first time). /// - /// NOTE: This function does not currently support updating the protocol version or the system - /// packages + /// NOTE: This function does not currently support updating the protocol + /// version or the system packages pub fn advance_epoch(&mut self, create_random_state: bool) { let next_epoch = self.epoch_state.epoch() + 1; let next_epoch_protocol_version = self.epoch_state.protocol_version(); @@ -297,9 +297,10 @@ impl Simulacrum { /// Return a handle to the internally held RNG. /// - /// Returns a handle to the RNG used to create this Simulacrum for use as a source of - /// randomness. Using a seeded RNG to build a Simulacrum and then utilizing the stored RNG as a - /// source of randomness can lead to a fully deterministic chain evolution. + /// Returns a handle to the RNG used to create this Simulacrum for use as a + /// source of randomness. Using a seeded RNG to build a Simulacrum and + /// then utilizing the stored RNG as a source of randomness can lead to + /// a fully deterministic chain evolution. pub fn rng(&mut self) -> &mut R { &mut self.rng } @@ -313,8 +314,7 @@ impl Simulacrum { /// /// ``` /// use simulacrum::Simulacrum; - /// use sui_types::base_types::SuiAddress; - /// use sui_types::gas_coin::MIST_PER_SUI; + /// use sui_types::{base_types::SuiAddress, gas_coin::MIST_PER_SUI}; /// /// # fn main() { /// let mut simulacrum = Simulacrum::new(); @@ -326,8 +326,9 @@ impl Simulacrum { /// # } /// ``` pub fn request_gas(&mut self, address: SuiAddress, amount: u64) -> Result { - // For right now we'll just use the first account as the `faucet` account. We may want to - // explicitly cordon off the faucet account from the rest of the accounts though. + // For right now we'll just use the first account as the `faucet` account. We + // may want to explicitly cordon off the faucet account from the rest of + // the accounts though. let (sender, key) = self.keystore().accounts().next().unwrap(); let object = self .store() @@ -435,8 +436,8 @@ impl ReadStore for Simulacrum { &self, ) -> sui_types::storage::error::Result { - // TODO wire this up to the underlying sim store, for now this will work since we never - // prune the sim store + // TODO wire this up to the underlying sim store, for now this will work since + // we never prune the sim store Ok(0) } @@ -514,9 +515,11 @@ impl ReadStore for Simulacrum { impl Simulacrum { /// Generate a random transfer transaction. - /// TODO: This is here today to make it easier to write tests. But we should utilize all the - /// existing code for generating transactions in sui-test-transaction-builder by defining a trait - /// that both WalletContext and Simulacrum implement. Then we can remove this function. + /// TODO: This is here today to make it easier to write tests. But we should + /// utilize all the existing code for generating transactions in + /// sui-test-transaction-builder by defining a trait + /// that both WalletContext and Simulacrum implement. Then we can remove + /// this function. pub fn transfer_txn(&mut self, recipient: SuiAddress) -> (Transaction, u64) { let (sender, key) = self.keystore().accounts().next().unwrap(); let sender = *sender; diff --git a/crates/simulacrum/src/store/in_mem_store.rs b/crates/simulacrum/src/store/in_mem_store.rs index e61ce2a4316..fe9fc48dda0 100644 --- a/crates/simulacrum/src/store/in_mem_store.rs +++ b/crates/simulacrum/src/store/in_mem_store.rs @@ -1,12 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::collections::{BTreeMap, HashMap}; + use move_binary_format::CompiledModule; use move_bytecode_utils::module_cache::GetModule; use move_core_types::{language_storage::ModuleId, resolver::ModuleResolver}; -use std::collections::{BTreeMap, HashMap}; use sui_config::genesis; -use sui_types::storage::{get_module, load_package_object_from_object_store, PackageObject}; use sui_types::{ base_types::{AuthorityName, ObjectID, SequenceNumber, SuiAddress}, committee::{Committee, EpochId}, @@ -19,7 +19,10 @@ use sui_types::{ VerifiedCheckpoint, }, object::{Object, Owner}, - storage::{BackingPackageStore, ChildObjectResolver, ObjectStore, ParentSync}, + storage::{ + get_module, load_package_object_from_object_store, BackingPackageStore, + ChildObjectResolver, ObjectStore, PackageObject, ParentSync, + }, transaction::VerifiedTransaction, }; diff --git a/crates/simulacrum/src/store/mod.rs b/crates/simulacrum/src/store/mod.rs index e91ad4aaec0..dfe91c89dfb 100644 --- a/crates/simulacrum/src/store/mod.rs +++ b/crates/simulacrum/src/store/mod.rs @@ -2,26 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 use std::collections::BTreeMap; + use sui_config::genesis; -use sui_types::base_types::ObjectRef; -use sui_types::error::UserInputError; -use sui_types::transaction::InputObjects; -use sui_types::transaction::ObjectReadResult; -use sui_types::transaction::ReceivingObjectReadResult; -use sui_types::transaction::ReceivingObjects; use sui_types::{ - base_types::{ObjectID, SequenceNumber, SuiAddress}, + base_types::{ObjectID, ObjectRef, SequenceNumber, SuiAddress}, committee::{Committee, EpochId}, digests::{ObjectDigest, TransactionDigest, TransactionEventsDigest}, effects::{TransactionEffects, TransactionEffectsAPI, TransactionEvents}, - error::SuiResult, + error::{SuiResult, UserInputError}, messages_checkpoint::{ CheckpointContents, CheckpointContentsDigest, CheckpointDigest, CheckpointSequenceNumber, VerifiedCheckpoint, }, object::Object, storage::{BackingStore, ChildObjectResolver, ParentSync}, - transaction::{InputObjectKind, VerifiedTransaction}, + transaction::{ + InputObjectKind, InputObjects, ObjectReadResult, ReceivingObjectReadResult, + ReceivingObjects, VerifiedTransaction, + }, }; pub mod in_mem_store; @@ -75,7 +73,7 @@ pub trait SimulatorStore: fn get_transaction_effects(&self, digest: &TransactionDigest) -> Option; fn get_transaction_events(&self, digest: &TransactionEventsDigest) - -> Option; + -> Option; fn get_transaction_events_by_tx_digest( &self, @@ -120,8 +118,9 @@ pub trait SimulatorStore: fn backing_store(&self) -> &dyn BackingStore; - // TODO: After we abstract object storage into the ExecutionCache trait, we can replace this with - // sui_core::TransactionInputLoad using an appropriate cache implementation. + // TODO: After we abstract object storage into the ExecutionCache trait, we can + // replace this with sui_core::TransactionInputLoad using an appropriate + // cache implementation. fn read_objects_for_synchronous_execution( &self, _tx_digest: &TransactionDigest, diff --git a/crates/sui-analytics-indexer/src/analytics_processor.rs b/crates/sui-analytics-indexer/src/analytics_processor.rs index 0c4ed9d0258..1d53e6a7709 100644 --- a/crates/sui-analytics-indexer/src/analytics_processor.rs +++ b/crates/sui-analytics-indexer/src/analytics_processor.rs @@ -1,32 +1,29 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::fs; -use std::ops::Range; -use std::path::PathBuf; -use std::sync::Arc; -use std::time::{Duration, Instant}; +use std::{ + fs, + ops::Range, + path::PathBuf, + sync::Arc, + time::{Duration, Instant}, +}; -use anyhow::Context; -use anyhow::Result; -use object_store::path::Path; -use object_store::DynObjectStore; +use anyhow::{Context, Result}; +use object_store::{path::Path, DynObjectStore}; use serde::Serialize; -use tokio::sync::{mpsc, oneshot}; -use tracing::{error, info}; - use sui_config::object_storage_config::{ObjectStoreConfig, ObjectStoreType}; use sui_indexer::framework::Handler; use sui_rest_api::CheckpointData; use sui_storage::object_store::util::{copy_file, path_to_filesystem}; use sui_types::messages_checkpoint::CheckpointSequenceNumber; +use tokio::sync::{mpsc, oneshot}; +use tracing::{error, info}; -use crate::analytics_metrics::AnalyticsMetrics; -use crate::handlers::AnalyticsHandler; -use crate::writers::AnalyticsWriter; use crate::{ - join_paths, AnalyticsIndexerConfig, FileMetadata, MaxCheckpointReader, ParquetSchema, - EPOCH_DIR_PREFIX, + analytics_metrics::AnalyticsMetrics, handlers::AnalyticsHandler, join_paths, + writers::AnalyticsWriter, AnalyticsIndexerConfig, FileMetadata, MaxCheckpointReader, + ParquetSchema, EPOCH_DIR_PREFIX, }; pub struct AnalyticsProcessor { diff --git a/crates/sui-analytics-indexer/src/handlers/checkpoint_handler.rs b/crates/sui-analytics-indexer/src/handlers/checkpoint_handler.rs index 12c6957d58a..6a20eb728d0 100644 --- a/crates/sui-analytics-indexer/src/handlers/checkpoint_handler.rs +++ b/crates/sui-analytics-indexer/src/handlers/checkpoint_handler.rs @@ -3,16 +3,15 @@ use anyhow::Result; use fastcrypto::traits::EncodeDecodeBase64; - use sui_indexer::framework::Handler; use sui_rest_api::{CheckpointData, CheckpointTransaction}; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::messages_checkpoint::{CertifiedCheckpointSummary, CheckpointSummary}; -use sui_types::transaction::TransactionDataAPI; +use sui_types::{ + effects::TransactionEffectsAPI, + messages_checkpoint::{CertifiedCheckpointSummary, CheckpointSummary}, + transaction::TransactionDataAPI, +}; -use crate::handlers::AnalyticsHandler; -use crate::tables::CheckpointEntry; -use crate::FileType; +use crate::{handlers::AnalyticsHandler, tables::CheckpointEntry, FileType}; pub struct CheckpointHandler { checkpoints: Vec, diff --git a/crates/sui-analytics-indexer/src/handlers/df_handler.rs b/crates/sui-analytics-indexer/src/handlers/df_handler.rs index 2d9a30059c4..6050ea22907 100644 --- a/crates/sui-analytics-indexer/src/handlers/df_handler.rs +++ b/crates/sui-analytics-indexer/src/handlers/df_handler.rs @@ -1,27 +1,28 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{collections::HashMap, path::Path}; + use anyhow::Result; use fastcrypto::encoding::{Base64, Encoding}; -use std::collections::HashMap; -use std::path::Path; -use sui_indexer::errors::IndexerError; -use tap::tap::TapFallible; -use tracing::warn; - -use sui_indexer::framework::Handler; -use sui_indexer::types::owner_to_owner_info; +use sui_indexer::{errors::IndexerError, framework::Handler, types::owner_to_owner_info}; use sui_json_rpc_types::SuiMoveValue; use sui_package_resolver::Resolver; use sui_rest_api::{CheckpointData, CheckpointTransaction}; -use sui_types::base_types::ObjectID; -use sui_types::dynamic_field::{DynamicFieldInfo, DynamicFieldName, DynamicFieldType}; -use sui_types::object::Object; +use sui_types::{ + base_types::ObjectID, + dynamic_field::{DynamicFieldInfo, DynamicFieldName, DynamicFieldType}, + object::Object, +}; +use tap::tap::TapFallible; +use tracing::warn; -use crate::handlers::{get_move_struct, AnalyticsHandler}; -use crate::package_store::{LocalDBPackageStore, PackageCache}; -use crate::tables::DynamicFieldEntry; -use crate::FileType; +use crate::{ + handlers::{get_move_struct, AnalyticsHandler}, + package_store::{LocalDBPackageStore, PackageCache}, + tables::DynamicFieldEntry, + FileType, +}; pub struct DynamicFieldHandler { dynamic_fields: Vec, @@ -142,13 +143,12 @@ impl DynamicFieldHandler { .to_canonical_string(/* with_prefix */ true), }, DynamicFieldType::DynamicObject => { - let object = - all_written_objects - .get(&object_id) - .ok_or(IndexerError::UncategorizedError(anyhow::anyhow!( - "Failed to find object_id {:?} when trying to create dynamic field info", - object_id - )))?; + let object = all_written_objects.get(&object_id).ok_or( + IndexerError::UncategorizedError(anyhow::anyhow!( + "Failed to find object_id {:?} when trying to create dynamic field info", + object_id + )), + )?; let version = object.version().value(); let digest = object.digest().to_string(); let object_type = object.data.type_().unwrap().clone(); diff --git a/crates/sui-analytics-indexer/src/handlers/event_handler.rs b/crates/sui-analytics-indexer/src/handlers/event_handler.rs index 0e240f42258..d31a710a931 100644 --- a/crates/sui-analytics-indexer/src/handlers/event_handler.rs +++ b/crates/sui-analytics-indexer/src/handlers/event_handler.rs @@ -1,22 +1,22 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anyhow::Result; -use fastcrypto::encoding::{Base64, Encoding}; - use std::path::Path; -use crate::handlers::{get_move_struct, AnalyticsHandler}; -use crate::package_store::{LocalDBPackageStore, PackageCache}; -use crate::tables::EventEntry; -use crate::FileType; +use anyhow::Result; +use fastcrypto::encoding::{Base64, Encoding}; use sui_indexer::framework::Handler; use sui_json_rpc_types::SuiMoveStruct; use sui_package_resolver::Resolver; use sui_rest_api::CheckpointData; -use sui_types::digests::TransactionDigest; -use sui_types::effects::TransactionEvents; -use sui_types::event::Event; +use sui_types::{digests::TransactionDigest, effects::TransactionEvents, event::Event}; + +use crate::{ + handlers::{get_move_struct, AnalyticsHandler}, + package_store::{LocalDBPackageStore, PackageCache}, + tables::EventEntry, + FileType, +}; pub struct EventHandler { events: Vec, diff --git a/crates/sui-analytics-indexer/src/handlers/mod.rs b/crates/sui-analytics-indexer/src/handlers/mod.rs index 3c212db4985..4bccc5aabff 100644 --- a/crates/sui-analytics-indexer/src/handlers/mod.rs +++ b/crates/sui-analytics-indexer/src/handlers/mod.rs @@ -4,21 +4,23 @@ use std::collections::{BTreeMap, BTreeSet}; use anyhow::{anyhow, Result}; -use move_core_types::annotated_value::{MoveStruct, MoveTypeLayout, MoveValue}; -use move_core_types::language_storage::{StructTag, TypeTag}; - +use move_core_types::{ + annotated_value::{MoveStruct, MoveTypeLayout, MoveValue}, + language_storage::{StructTag, TypeTag}, +}; use sui_indexer::framework::Handler; use sui_package_resolver::{PackageStore, Resolver}; -use sui_types::base_types::ObjectID; -use sui_types::effects::TransactionEffects; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::object::bounded_visitor::BoundedVisitor; -use sui_types::object::{Object, Owner}; -use sui_types::transaction::TransactionData; -use sui_types::transaction::TransactionDataAPI; +use sui_types::{ + base_types::ObjectID, + effects::{TransactionEffects, TransactionEffectsAPI}, + object::{bounded_visitor::BoundedVisitor, Object, Owner}, + transaction::{TransactionData, TransactionDataAPI}, +}; -use crate::tables::{InputObjectKind, ObjectStatus, OwnerType}; -use crate::FileType; +use crate::{ + tables::{InputObjectKind, ObjectStatus, OwnerType}, + FileType, +}; pub mod checkpoint_handler; pub mod df_handler; @@ -43,7 +45,8 @@ pub trait AnalyticsHandler: Handler { /// will be invoked by the analytics processor after every call to /// process_checkpoint fn read(&mut self) -> Result>; - /// Type of data being written by this processor i.e. checkpoint, object, etc + /// Type of data being written by this processor i.e. checkpoint, object, + /// etc fn file_type(&self) -> Result; } @@ -75,8 +78,8 @@ fn get_owner_address(object: &Object) -> Option { } // Helper class to track input object kind. -// Build sets of object ids for input, shared input and gas coin objects as defined -// in the transaction data. +// Build sets of object ids for input, shared input and gas coin objects as +// defined in the transaction data. // Input objects include coins and shared. struct InputObjectTracker { shared: BTreeSet, @@ -273,15 +276,18 @@ fn parse_struct_field( #[cfg(test)] mod tests { - use crate::handlers::parse_struct; - use move_core_types::account_address::AccountAddress; - use move_core_types::annotated_value::{MoveStruct, MoveValue}; - use move_core_types::identifier::Identifier; - use move_core_types::language_storage::StructTag; - use std::collections::BTreeMap; - use std::str::FromStr; + use std::{collections::BTreeMap, str::FromStr}; + + use move_core_types::{ + account_address::AccountAddress, + annotated_value::{MoveStruct, MoveValue}, + identifier::Identifier, + language_storage::StructTag, + }; use sui_types::base_types::ObjectID; + use crate::handlers::parse_struct; + #[tokio::test] async fn test_wrapped_object_parsing() -> anyhow::Result<()> { let uid_field = MoveValue::Struct(MoveStruct { diff --git a/crates/sui-analytics-indexer/src/handlers/move_call_handler.rs b/crates/sui-analytics-indexer/src/handlers/move_call_handler.rs index 4dc164e4434..b11aec2609c 100644 --- a/crates/sui-analytics-indexer/src/handlers/move_call_handler.rs +++ b/crates/sui-analytics-indexer/src/handlers/move_call_handler.rs @@ -3,15 +3,11 @@ use anyhow::Result; use move_core_types::identifier::IdentStr; - use sui_indexer::framework::Handler; use sui_rest_api::CheckpointData; -use sui_types::base_types::ObjectID; -use sui_types::transaction::TransactionDataAPI; +use sui_types::{base_types::ObjectID, transaction::TransactionDataAPI}; -use crate::handlers::AnalyticsHandler; -use crate::tables::MoveCallEntry; -use crate::FileType; +use crate::{handlers::AnalyticsHandler, tables::MoveCallEntry, FileType}; pub struct MoveCallHandler { move_calls: Vec, diff --git a/crates/sui-analytics-indexer/src/handlers/object_handler.rs b/crates/sui-analytics-indexer/src/handlers/object_handler.rs index a7fca0f76dc..5a8afaea187 100644 --- a/crates/sui-analytics-indexer/src/handlers/object_handler.rs +++ b/crates/sui-analytics-indexer/src/handlers/object_handler.rs @@ -1,26 +1,26 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anyhow::Result; -use fastcrypto::encoding::{Base64, Encoding}; use std::path::Path; +use anyhow::Result; +use fastcrypto::encoding::{Base64, Encoding}; use sui_indexer::framework::Handler; use sui_json_rpc_types::SuiMoveStruct; use sui_package_resolver::Resolver; use sui_rest_api::{CheckpointData, CheckpointTransaction}; -use sui_types::effects::TransactionEffects; -use sui_types::object::Object; +use sui_types::{effects::TransactionEffects, object::Object}; -use crate::handlers::{ - get_move_struct, get_owner_address, get_owner_type, initial_shared_version, AnalyticsHandler, - ObjectStatusTracker, +use crate::{ + handlers::{ + get_move_struct, get_owner_address, get_owner_type, initial_shared_version, + AnalyticsHandler, ObjectStatusTracker, + }, + package_store::{LocalDBPackageStore, PackageCache}, + tables::{ObjectEntry, ObjectStatus}, + FileType, }; -use crate::package_store::{LocalDBPackageStore, PackageCache}; -use crate::tables::{ObjectEntry, ObjectStatus}; -use crate::FileType; - pub struct ObjectHandler { objects: Vec, package_store: LocalDBPackageStore, diff --git a/crates/sui-analytics-indexer/src/handlers/package_handler.rs b/crates/sui-analytics-indexer/src/handlers/package_handler.rs index a872b708e03..fefe066578f 100644 --- a/crates/sui-analytics-indexer/src/handlers/package_handler.rs +++ b/crates/sui-analytics-indexer/src/handlers/package_handler.rs @@ -5,12 +5,9 @@ use anyhow::Result; use fastcrypto::encoding::{Base64, Encoding}; use sui_indexer::framework::Handler; use sui_rest_api::CheckpointData; -use sui_types::full_checkpoint_content::CheckpointTransaction; -use sui_types::object::Object; +use sui_types::{full_checkpoint_content::CheckpointTransaction, object::Object}; -use crate::handlers::AnalyticsHandler; -use crate::tables::MovePackageEntry; -use crate::FileType; +use crate::{handlers::AnalyticsHandler, tables::MovePackageEntry, FileType}; pub struct PackageHandler { packages: Vec, diff --git a/crates/sui-analytics-indexer/src/handlers/transaction_handler.rs b/crates/sui-analytics-indexer/src/handlers/transaction_handler.rs index 917fe5512c0..51da4289161 100644 --- a/crates/sui-analytics-indexer/src/handlers/transaction_handler.rs +++ b/crates/sui-analytics-indexer/src/handlers/transaction_handler.rs @@ -5,17 +5,15 @@ use std::collections::BTreeSet; use anyhow::Result; use fastcrypto::encoding::{Base64, Encoding}; -use tracing::error; - use sui_indexer::framework::Handler; use sui_rest_api::{CheckpointData, CheckpointTransaction}; -use sui_types::effects::TransactionEffects; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::transaction::{Command, TransactionDataAPI, TransactionKind}; +use sui_types::{ + effects::{TransactionEffects, TransactionEffectsAPI}, + transaction::{Command, TransactionDataAPI, TransactionKind}, +}; +use tracing::error; -use crate::handlers::AnalyticsHandler; -use crate::tables::TransactionEntry; -use crate::FileType; +use crate::{handlers::AnalyticsHandler, tables::TransactionEntry, FileType}; pub struct TransactionHandler { pub(crate) transactions: Vec, @@ -114,10 +112,14 @@ impl TransactionHandler { } } } else { - error!("Transaction kind [{kind}] is not programmable transaction and not a system transaction"); + error!( + "Transaction kind [{kind}] is not programmable transaction and not a system transaction" + ); } if move_calls_count != move_calls { - error!("Mismatch in move calls count: commands {move_calls_count} != {move_calls} calls"); + error!( + "Mismatch in move calls count: commands {move_calls_count} != {move_calls} calls" + ); } } let transaction_json = serde_json::to_string(&transaction)?; @@ -154,9 +156,9 @@ impl TransactionHandler { move_calls, packages, gas_owner: txn_data.gas_owner().to_string(), - gas_object_id: gas_object.0 .0.to_string(), - gas_object_sequence: gas_object.0 .1.value(), - gas_object_digest: gas_object.0 .2.to_string(), + gas_object_id: gas_object.0.0.to_string(), + gas_object_sequence: gas_object.0.1.value(), + gas_object_digest: gas_object.0.2.to_string(), gas_budget: txn_data.gas_budget(), total_gas_cost: gas_summary.net_gas_usage(), computation_cost: gas_summary.computation_cost, @@ -180,12 +182,12 @@ impl TransactionHandler { #[cfg(test)] mod tests { - use crate::handlers::transaction_handler::TransactionHandler; use fastcrypto::encoding::{Base64, Encoding}; use simulacrum::Simulacrum; use sui_indexer::framework::Handler; - use sui_types::base_types::SuiAddress; - use sui_types::storage::ReadStore; + use sui_types::{base_types::SuiAddress, storage::ReadStore}; + + use crate::handlers::transaction_handler::TransactionHandler; #[tokio::test] pub async fn test_transaction_handler() -> anyhow::Result<()> { diff --git a/crates/sui-analytics-indexer/src/handlers/transaction_objects_handler.rs b/crates/sui-analytics-indexer/src/handlers/transaction_objects_handler.rs index e16a8f21730..10807d856eb 100644 --- a/crates/sui-analytics-indexer/src/handlers/transaction_objects_handler.rs +++ b/crates/sui-analytics-indexer/src/handlers/transaction_objects_handler.rs @@ -2,16 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::Result; - use sui_indexer::framework::Handler; use sui_rest_api::{CheckpointData, CheckpointTransaction}; -use sui_types::base_types::ObjectID; -use sui_types::effects::TransactionEffects; -use sui_types::transaction::TransactionDataAPI; +use sui_types::{ + base_types::ObjectID, effects::TransactionEffects, transaction::TransactionDataAPI, +}; -use crate::handlers::{AnalyticsHandler, InputObjectTracker, ObjectStatusTracker}; -use crate::tables::TransactionObjectEntry; -use crate::FileType; +use crate::{ + handlers::{AnalyticsHandler, InputObjectTracker, ObjectStatusTracker}, + tables::TransactionObjectEntry, + FileType, +}; pub struct TransactionObjectsHandler { transaction_objects: Vec, diff --git a/crates/sui-analytics-indexer/src/handlers/wrapped_object_handler.rs b/crates/sui-analytics-indexer/src/handlers/wrapped_object_handler.rs index 92bb47b6660..1a3e708a8f9 100644 --- a/crates/sui-analytics-indexer/src/handlers/wrapped_object_handler.rs +++ b/crates/sui-analytics-indexer/src/handlers/wrapped_object_handler.rs @@ -1,20 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anyhow::Result; -use std::collections::BTreeMap; -use std::path::Path; +use std::{collections::BTreeMap, path::Path}; +use anyhow::Result; use sui_indexer::framework::Handler; use sui_package_resolver::Resolver; use sui_rest_api::{CheckpointData, CheckpointTransaction}; use sui_types::object::Object; -use crate::handlers::{get_move_struct, parse_struct, AnalyticsHandler}; - -use crate::package_store::{LocalDBPackageStore, PackageCache}; -use crate::tables::WrappedObjectEntry; -use crate::FileType; +use crate::{ + handlers::{get_move_struct, parse_struct, AnalyticsHandler}, + package_store::{LocalDBPackageStore, PackageCache}, + tables::WrappedObjectEntry, + FileType, +}; pub struct WrappedObjectHandler { wrapped_objects: Vec, diff --git a/crates/sui-analytics-indexer/src/lib.rs b/crates/sui-analytics-indexer/src/lib.rs index d9fdf0980c1..38395a2b5c2 100644 --- a/crates/sui-analytics-indexer/src/lib.rs +++ b/crates/sui-analytics-indexer/src/lib.rs @@ -1,52 +1,47 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::ops::Range; -use std::path::PathBuf; +use std::{ops::Range, path::PathBuf}; use anyhow::{anyhow, Result}; use arrow_array::{Array, Int32Array}; use clap::*; -use gcp_bigquery_client::model::query_request::QueryRequest; -use gcp_bigquery_client::Client; -use num_enum::IntoPrimitive; -use num_enum::TryFromPrimitive; +use gcp_bigquery_client::{model::query_request::QueryRequest, Client}; +use num_enum::{IntoPrimitive, TryFromPrimitive}; use object_store::path::Path; use serde::{Deserialize, Serialize}; use snowflake_api::{QueryResult, SnowflakeApi}; use strum_macros::EnumIter; -use tracing::info; - use sui_config::object_storage_config::ObjectStoreConfig; use sui_indexer::framework::Handler; use sui_rest_api::CheckpointData; use sui_storage::object_store::util::{ find_all_dirs_with_epoch_prefix, find_all_files_with_epoch_prefix, }; -use sui_types::base_types::EpochId; -use sui_types::dynamic_field::DynamicFieldType; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; - -use crate::analytics_metrics::AnalyticsMetrics; -use crate::analytics_processor::AnalyticsProcessor; -use crate::handlers::checkpoint_handler::CheckpointHandler; -use crate::handlers::df_handler::DynamicFieldHandler; -use crate::handlers::event_handler::EventHandler; -use crate::handlers::move_call_handler::MoveCallHandler; -use crate::handlers::object_handler::ObjectHandler; -use crate::handlers::package_handler::PackageHandler; -use crate::handlers::transaction_handler::TransactionHandler; -use crate::handlers::transaction_objects_handler::TransactionObjectsHandler; -use crate::handlers::wrapped_object_handler::WrappedObjectHandler; -use crate::handlers::AnalyticsHandler; -use crate::tables::{ - CheckpointEntry, DynamicFieldEntry, EventEntry, InputObjectKind, MoveCallEntry, - MovePackageEntry, ObjectEntry, ObjectStatus, OwnerType, TransactionEntry, - TransactionObjectEntry, WrappedObjectEntry, +use sui_types::{ + base_types::EpochId, dynamic_field::DynamicFieldType, + messages_checkpoint::CheckpointSequenceNumber, +}; +use tracing::info; + +use crate::{ + analytics_metrics::AnalyticsMetrics, + analytics_processor::AnalyticsProcessor, + handlers::{ + checkpoint_handler::CheckpointHandler, df_handler::DynamicFieldHandler, + event_handler::EventHandler, move_call_handler::MoveCallHandler, + object_handler::ObjectHandler, package_handler::PackageHandler, + transaction_handler::TransactionHandler, + transaction_objects_handler::TransactionObjectsHandler, + wrapped_object_handler::WrappedObjectHandler, AnalyticsHandler, + }, + tables::{ + CheckpointEntry, DynamicFieldEntry, EventEntry, InputObjectKind, MoveCallEntry, + MovePackageEntry, ObjectEntry, ObjectStatus, OwnerType, TransactionEntry, + TransactionObjectEntry, WrappedObjectEntry, + }, + writers::{csv_writer::CSVWriter, parquet_writer::ParquetWriter, AnalyticsWriter}, }; -use crate::writers::csv_writer::CSVWriter; -use crate::writers::parquet_writer::ParquetWriter; -use crate::writers::AnalyticsWriter; pub mod analytics_metrics; pub mod analytics_processor; diff --git a/crates/sui-analytics-indexer/src/main.rs b/crates/sui-analytics-indexer/src/main.rs index b8481f8ab25..23c36762a27 100644 --- a/crates/sui-analytics-indexer/src/main.rs +++ b/crates/sui-analytics-indexer/src/main.rs @@ -7,8 +7,7 @@ use sui_analytics_indexer::{ analytics_metrics::AnalyticsMetrics, errors::AnalyticsIndexerError, make_analytics_processor, AnalyticsIndexerConfig, }; -use sui_indexer::framework::IndexerBuilder; -use sui_indexer::metrics::IndexerMetrics; +use sui_indexer::{framework::IndexerBuilder, metrics::IndexerMetrics}; use tracing::info; #[tokio::main] diff --git a/crates/sui-analytics-indexer/src/package_store.rs b/crates/sui-analytics-indexer/src/package_store.rs index 13d37d33282..44404228da3 100644 --- a/crates/sui-analytics-indexer/src/package_store.rs +++ b/crates/sui-analytics-indexer/src/package_store.rs @@ -1,22 +1,24 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use async_trait::async_trait; -use std::path::Path; -use std::sync::Arc; +use std::{path::Path, sync::Arc}; +use async_trait::async_trait; use move_core_types::account_address::AccountAddress; use sui_package_resolver::{ error::Error as PackageResolverError, Package, PackageStore, PackageStoreWithLruCache, Result, }; use sui_rest_api::Client; -use sui_types::base_types::{ObjectID, SequenceNumber}; -use sui_types::object::Object; +use sui_types::{ + base_types::{ObjectID, SequenceNumber}, + object::Object, +}; use thiserror::Error; -use typed_store::rocks::{DBMap, MetricConf}; -use typed_store::traits::TableSummary; -use typed_store::traits::TypedStoreDebug; -use typed_store::{Map, TypedStoreError}; +use typed_store::{ + rocks::{DBMap, MetricConf}, + traits::{TableSummary, TypedStoreDebug}, + Map, TypedStoreError, +}; use typed_store_derive::DBMapUtils; const STORE: &str = "RocksDB"; @@ -62,10 +64,10 @@ impl PackageStoreTables { } } -/// Store which keeps package objects in a local rocksdb store. It is expected that this store is -/// kept updated with latest version of package objects while iterating over checkpoints. If the -/// local db is missing (or gets deleted), packages are fetched from a full node and local store is -/// updated +/// Store which keeps package objects in a local rocksdb store. It is expected +/// that this store is kept updated with latest version of package objects while +/// iterating over checkpoints. If the local db is missing (or gets deleted), +/// packages are fetched from a full node and local store is updated #[derive(Clone)] pub struct LocalDBPackageStore { package_store_tables: Arc, diff --git a/crates/sui-analytics-indexer/src/tables.rs b/crates/sui-analytics-indexer/src/tables.rs index 75a1be98033..5831546c877 100644 --- a/crates/sui-analytics-indexer/src/tables.rs +++ b/crates/sui-analytics-indexer/src/tables.rs @@ -2,13 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 #![allow(dead_code)] -use crate::{ParquetSchema, ParquetValue}; use serde::Serialize; use strum_macros::Display; use sui_analytics_indexer_derive::SerializeParquet; use sui_types::dynamic_field::DynamicFieldType; -// +use crate::{ParquetSchema, ParquetValue}; + // Table entries for the analytics database. // Each entry is a row in the database. // @@ -132,8 +132,8 @@ pub enum InputObjectKind { GasCoin, } -// Used in the object table to identify the status of object, its result in the last transaction -// effect. +// Used in the object table to identify the status of object, its result in the +// last transaction effect. #[derive(Serialize, Clone, Display)] pub enum ObjectStatus { Created, diff --git a/crates/sui-analytics-indexer/src/writers/csv_writer.rs b/crates/sui-analytics-indexer/src/writers/csv_writer.rs index bbc2375cf6b..0f6c45a9b61 100644 --- a/crates/sui-analytics-indexer/src/writers/csv_writer.rs +++ b/crates/sui-analytics-indexer/src/writers/csv_writer.rs @@ -2,20 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 #![allow(dead_code)] -use std::fs::{create_dir_all, remove_file}; -use std::ops::Range; -use std::path::Path; -use std::{fs, fs::File, path::PathBuf}; +use std::{ + fs, + fs::{create_dir_all, remove_file, File}, + ops::Range, + path::{Path, PathBuf}, +}; use anyhow::{anyhow, Result}; use csv::{Writer, WriterBuilder}; use serde::Serialize; - use sui_storage::object_store::util::path_to_filesystem; use sui_types::base_types::EpochId; -use crate::writers::AnalyticsWriter; -use crate::{FileFormat, FileType, ParquetSchema}; +use crate::{writers::AnalyticsWriter, FileFormat, FileType, ParquetSchema}; // Save table entries to csv files. pub(crate) struct CSVWriter { diff --git a/crates/sui-analytics-indexer/src/writers/mod.rs b/crates/sui-analytics-indexer/src/writers/mod.rs index 3295693a856..9435715fc80 100644 --- a/crates/sui-analytics-indexer/src/writers/mod.rs +++ b/crates/sui-analytics-indexer/src/writers/mod.rs @@ -1,11 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{FileFormat, ParquetSchema}; use anyhow::Result; use serde::Serialize; use sui_types::base_types::EpochId; +use crate::{FileFormat, ParquetSchema}; + pub mod csv_writer; pub mod parquet_writer; diff --git a/crates/sui-analytics-indexer/src/writers/parquet_writer.rs b/crates/sui-analytics-indexer/src/writers/parquet_writer.rs index da5c5cd0e8f..bd719ecbf78 100644 --- a/crates/sui-analytics-indexer/src/writers/parquet_writer.rs +++ b/crates/sui-analytics-indexer/src/writers/parquet_writer.rs @@ -1,22 +1,21 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{AnalyticsWriter, FileFormat, FileType}; -use crate::{ParquetSchema, ParquetValue}; +use std::{ + fs::{create_dir_all, remove_file, File}, + ops::Range, + path::{Path, PathBuf}, + sync::Arc, +}; + use anyhow::{anyhow, Result}; use arrow_array::{ArrayRef, BooleanArray, Int64Array, RecordBatch, StringArray, UInt64Array}; +use parquet::{arrow::ArrowWriter, basic::Compression, file::properties::WriterProperties}; use serde::Serialize; -use std::fs::File; -use std::fs::{create_dir_all, remove_file}; -use std::ops::Range; -use std::path::{Path, PathBuf}; -use std::sync::Arc; +use sui_storage::object_store::util::path_to_filesystem; use sui_types::base_types::EpochId; -use parquet::arrow::ArrowWriter; -use parquet::basic::Compression; -use parquet::file::properties::WriterProperties; -use sui_storage::object_store::util::path_to_filesystem; +use crate::{AnalyticsWriter, FileFormat, FileType, ParquetSchema, ParquetValue}; // Save table entries to parquet files. pub(crate) struct ParquetWriter { diff --git a/crates/sui-archival/src/lib.rs b/crates/sui-archival/src/lib.rs index 1dbcd632c25..8c3516cc0c2 100644 --- a/crates/sui-archival/src/lib.rs +++ b/crates/sui-archival/src/lib.rs @@ -8,42 +8,56 @@ pub mod writer; #[cfg(test)] mod tests; -use crate::reader::{ArchiveReader, ArchiveReaderMetrics}; +use std::{ + fs, + io::{BufWriter, Cursor, Read, Seek, SeekFrom, Write}, + num::NonZeroUsize, + ops::Range, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::{Duration, Instant}, +}; + use anyhow::{anyhow, Result}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use bytes::Bytes; use fastcrypto::hash::{HashFunction, Sha3_256}; use indicatif::{ProgressBar, ProgressStyle}; -use num_enum::IntoPrimitive; -use num_enum::TryFromPrimitive; +use num_enum::{IntoPrimitive, TryFromPrimitive}; use object_store::path::Path; use prometheus::Registry; use serde::{Deserialize, Serialize}; -use std::fs; -use std::io::{BufWriter, Cursor, Read, Seek, SeekFrom, Write}; -use std::num::NonZeroUsize; -use std::ops::Range; -use std::sync::atomic::{AtomicU64, Ordering}; -use std::sync::Arc; -use std::time::{Duration, Instant}; -use sui_config::genesis::Genesis; -use sui_config::node::ArchiveReaderConfig; -use sui_config::object_storage_config::ObjectStoreConfig; -use sui_storage::blob::{Blob, BlobEncoding}; -use sui_storage::object_store::util::{get, put}; -use sui_storage::object_store::{ObjectStoreGetExt, ObjectStorePutExt}; -use sui_storage::{compute_sha3_checksum, compute_sha3_checksum_for_bytes, SHA3_BYTES}; -use sui_types::base_types::ExecutionData; -use sui_types::messages_checkpoint::{FullCheckpointContents, VerifiedCheckpointContents}; -use sui_types::storage::{SingleCheckpointSharedInMemoryStore, WriteStore}; +use sui_config::{ + genesis::Genesis, node::ArchiveReaderConfig, object_storage_config::ObjectStoreConfig, +}; +use sui_storage::{ + blob::{Blob, BlobEncoding}, + compute_sha3_checksum, compute_sha3_checksum_for_bytes, + object_store::{ + util::{get, put}, + ObjectStoreGetExt, ObjectStorePutExt, + }, + SHA3_BYTES, +}; +use sui_types::{ + base_types::ExecutionData, + messages_checkpoint::{FullCheckpointContents, VerifiedCheckpointContents}, + storage::{SingleCheckpointSharedInMemoryStore, WriteStore}, +}; use tracing::{error, info}; +use crate::reader::{ArchiveReader, ArchiveReaderMetrics}; + #[allow(rustdoc::invalid_html_tags)] -/// Checkpoints and summaries are persisted as blob files. Files are committed to local store -/// by duration or file size. Committed files are synced with the remote store continuously. Files are -/// optionally compressed with the zstd compression format. Filenames follow the format -/// . where `checkpoint_seq_num` is the first checkpoint present in that -/// file. MANIFEST is the index and source of truth for all files present in the archive. +/// Checkpoints and summaries are persisted as blob files. Files are committed +/// to local store by duration or file size. Committed files are synced with the +/// remote store continuously. Files are optionally compressed with the zstd +/// compression format. Filenames follow the format . +/// where `checkpoint_seq_num` is the first checkpoint present in that +/// file. MANIFEST is the index and source of truth for all files present in the +/// archive. /// /// State Archival Directory Layout /// - archive/ @@ -62,34 +76,34 @@ use tracing::{error, info}; /// - 101000.chk /// - ... /// Blob File Disk Format -///┌──────────────────────────────┐ -///│ magic <4 byte> │ -///├──────────────────────────────┤ -///│ storage format <1 byte> │ +/// ┌──────────────────────────────┐ +/// │ magic <4 byte> │ +/// ├──────────────────────────────┤ +/// │ storage format <1 byte> │ // ├──────────────────────────────┤ -///│ file compression <1 byte> │ +/// │ file compression <1 byte> │ // ├──────────────────────────────┤ -///│ ┌──────────────────────────┐ │ -///│ │ Blob 1 │ │ -///│ ├──────────────────────────┤ │ -///│ │ ... │ │ -///│ ├──────────────────────────┤ │ -///│ │ Blob N │ │ -///│ └──────────────────────────┘ │ -///└──────────────────────────────┘ +/// │ ┌──────────────────────────┐ │ +/// │ │ Blob 1 │ │ +/// │ ├──────────────────────────┤ │ +/// │ │ ... │ │ +/// │ ├──────────────────────────┤ │ +/// │ │ Blob N │ │ +/// │ └──────────────────────────┘ │ +/// └──────────────────────────────┘ /// Blob -///┌───────────────┬───────────────────┬──────────────┐ -///│ len │ encoding <1 byte> │ data │ -///└───────────────┴───────────────────┴──────────────┘ +/// ┌───────────────┬───────────────────┬──────────────┐ +/// │ len │ encoding <1 byte> │ data │ +/// └───────────────┴───────────────────┴──────────────┘ /// /// MANIFEST File Disk Format -///┌──────────────────────────────┐ -///│ magic<4 byte> │ -///├──────────────────────────────┤ -///│ serialized manifest │ -///├──────────────────────────────┤ -///│ sha3 <32 bytes> │ -///└──────────────────────────────┘ +/// ┌──────────────────────────────┐ +/// │ magic<4 byte> │ +/// ├──────────────────────────────┤ +/// │ serialized manifest │ +/// ├──────────────────────────────┤ +/// │ sha3 <32 bytes> │ +/// └──────────────────────────────┘ pub const CHECKPOINT_FILE_MAGIC: u32 = 0x0000DEAD; pub const SUMMARY_FILE_MAGIC: u32 = 0x0000CAFE; const MANIFEST_FILE_MAGIC: u32 = 0x00C0FFEE; @@ -179,9 +193,11 @@ impl Manifest { .filter(|f| f.file_type == FileType::CheckpointSummary) .collect(); summary_files.sort_by_key(|f| f.checkpoint_seq_range.start); - assert!(summary_files - .windows(2) - .all(|w| w[1].checkpoint_seq_range.start == w[0].checkpoint_seq_range.end)); + assert!( + summary_files + .windows(2) + .all(|w| w[1].checkpoint_seq_range.start == w[0].checkpoint_seq_range.end) + ); assert_eq!(summary_files.first().unwrap().checkpoint_seq_range.start, 0); summary_files .iter() diff --git a/crates/sui-archival/src/reader.rs b/crates/sui-archival/src/reader.rs index 42f838351f8..328451ec98c 100644 --- a/crates/sui-archival/src/reader.rs +++ b/crates/sui-archival/src/reader.rs @@ -1,35 +1,43 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{ - read_manifest, FileMetadata, FileType, Manifest, CHECKPOINT_FILE_MAGIC, SUMMARY_FILE_MAGIC, +use std::{ + borrow::Borrow, + future, + ops::Range, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Duration, }; + use anyhow::{anyhow, Context, Result}; -use bytes::buf::Reader; -use bytes::{Buf, Bytes}; +use bytes::{buf::Reader, Buf, Bytes}; use futures::{StreamExt, TryStreamExt}; use prometheus::{register_int_counter_vec_with_registry, IntCounterVec, Registry}; use rand::seq::SliceRandom; -use std::borrow::Borrow; -use std::future; -use std::ops::Range; -use std::sync::atomic::{AtomicU64, Ordering}; -use std::sync::Arc; -use std::time::Duration; use sui_config::node::ArchiveReaderConfig; -use sui_storage::object_store::http::HttpDownloaderBuilder; -use sui_storage::object_store::util::get; -use sui_storage::object_store::ObjectStoreGetExt; -use sui_storage::{compute_sha3_checksum_for_bytes, make_iterator, verify_checkpoint}; -use sui_types::messages_checkpoint::{ - CertifiedCheckpointSummary, CheckpointSequenceNumber, - FullCheckpointContents as CheckpointContents, VerifiedCheckpoint, VerifiedCheckpointContents, +use sui_storage::{ + compute_sha3_checksum_for_bytes, make_iterator, + object_store::{http::HttpDownloaderBuilder, util::get, ObjectStoreGetExt}, + verify_checkpoint, }; -use sui_types::storage::WriteStore; -use tokio::sync::oneshot::Sender; -use tokio::sync::{oneshot, Mutex}; +use sui_types::{ + messages_checkpoint::{ + CertifiedCheckpointSummary, CheckpointSequenceNumber, + FullCheckpointContents as CheckpointContents, VerifiedCheckpoint, + VerifiedCheckpointContents, + }, + storage::WriteStore, +}; +use tokio::sync::{oneshot, oneshot::Sender, Mutex}; use tracing::info; +use crate::{ + read_manifest, FileMetadata, FileType, Manifest, CHECKPOINT_FILE_MAGIC, SUMMARY_FILE_MAGIC, +}; + #[derive(Debug)] pub struct ArchiveReaderMetrics { pub archive_txns_read: IntCounterVec, @@ -58,7 +66,8 @@ impl ArchiveReaderMetrics { } } -// ArchiveReaderBalancer selects archives for reading based on whether they can fulfill a checkpoint request +// ArchiveReaderBalancer selects archives for reading based on whether they can +// fulfill a checkpoint request #[derive(Default, Clone)] pub struct ArchiveReaderBalancer { readers: Vec>, @@ -167,8 +176,9 @@ impl ArchiveReader { }) } - /// This function verifies that the files in archive cover the entire range of checkpoints from - /// sequence number 0 until the latest available checkpoint with no missing checkpoint + /// This function verifies that the files in archive cover the entire range + /// of checkpoints from sequence number 0 until the latest available + /// checkpoint with no missing checkpoint pub async fn verify_manifest( &self, manifest: Manifest, @@ -192,12 +202,16 @@ impl ArchiveReader { summary_files.sort_by_key(|f| f.checkpoint_seq_range.start); contents_files.sort_by_key(|f| f.checkpoint_seq_range.start); - assert!(summary_files - .windows(2) - .all(|w| w[1].checkpoint_seq_range.start == w[0].checkpoint_seq_range.end)); - assert!(contents_files - .windows(2) - .all(|w| w[1].checkpoint_seq_range.start == w[0].checkpoint_seq_range.end)); + assert!( + summary_files + .windows(2) + .all(|w| w[1].checkpoint_seq_range.start == w[0].checkpoint_seq_range.end) + ); + assert!( + contents_files + .windows(2) + .all(|w| w[1].checkpoint_seq_range.start == w[0].checkpoint_seq_range.end) + ); let files: Vec<(FileMetadata, FileMetadata)> = summary_files .into_iter() @@ -213,8 +227,8 @@ impl ArchiveReader { Ok(files) } - /// This function downloads summary and content files and ensures their computed checksum matches - /// the one in manifest + /// This function downloads summary and content files and ensures their + /// computed checksum matches the one in manifest pub async fn verify_file_consistency( &self, files: Vec<(FileMetadata, FileMetadata)>, @@ -263,8 +277,9 @@ impl ArchiveReader { .await } - /// Load checkpoints+txns+effects from archive into the input store `S` for the given - /// checkpoint range. Summaries are downloaded out of order and inserted without verification + /// Load checkpoints+txns+effects from archive into the input store `S` for + /// the given checkpoint range. Summaries are downloaded out of order + /// and inserted without verification pub async fn read_summaries( &self, store: S, @@ -294,15 +309,13 @@ impl ArchiveReader { stream .buffered(self.concurrency) .try_for_each(|summary_data| { - let result: Result<(), anyhow::Error> = make_iterator::< - CertifiedCheckpointSummary, - Reader, - >( - SUMMARY_FILE_MAGIC, - summary_data.reader(), - ) - .and_then(|summary_iter| { - summary_iter + let result: Result<(), anyhow::Error> = + make_iterator::>( + SUMMARY_FILE_MAGIC, + summary_data.reader(), + ) + .and_then(|summary_iter| { + summary_iter .filter(|s| { s.sequence_number >= checkpoint_range.start && s.sequence_number < checkpoint_range.end @@ -326,7 +339,7 @@ impl ArchiveReader { checkpoint_counter.fetch_add(1, Ordering::Relaxed); Ok::<(), anyhow::Error>(()) }) - }); + }); futures::future::ready(result) }) .await @@ -357,9 +370,10 @@ impl ArchiveReader { } } - /// Load checkpoints+txns+effects from archive into the input store `S` for the given - /// checkpoint range. If latest available checkpoint in archive is older than the start of the - /// input range then this call fails with an error otherwise we load as many checkpoints as + /// Load checkpoints+txns+effects from archive into the input store `S` for + /// the given checkpoint range. If latest available checkpoint in + /// archive is older than the start of the input range then this call + /// fails with an error otherwise we load as many checkpoints as /// possible until the end of the provided checkpoint range. pub async fn read( &self, diff --git a/crates/sui-archival/src/tests.rs b/crates/sui-archival/src/tests.rs index d0fa91b279f..b3de1713f0d 100644 --- a/crates/sui-archival/src/tests.rs +++ b/crates/sui-archival/src/tests.rs @@ -1,30 +1,40 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::reader::{ArchiveReader, ArchiveReaderMetrics}; -use crate::writer::ArchiveWriter; -use crate::{read_manifest, verify_archive_with_local_store, write_manifest, Manifest}; +use std::{ + fs, + fs::File, + io::Write, + num::NonZeroUsize, + path::PathBuf, + sync::{atomic::AtomicU64, Arc}, + time::Duration, +}; + use anyhow::{anyhow, Context, Result}; use more_asserts as ma; use object_store::DynObjectStore; use prometheus::Registry; -use std::fs; -use std::fs::File; -use std::io::Write; -use std::num::NonZeroUsize; -use std::path::PathBuf; -use std::sync::atomic::AtomicU64; -use std::sync::Arc; -use std::time::Duration; -use sui_config::node::ArchiveReaderConfig; -use sui_config::object_storage_config::{ObjectStoreConfig, ObjectStoreType}; -use sui_storage::object_store::util::path_to_filesystem; -use sui_storage::{FileCompression, StorageFormat}; +use sui_config::{ + node::ArchiveReaderConfig, + object_storage_config::{ObjectStoreConfig, ObjectStoreType}, +}; +use sui_storage::{object_store::util::path_to_filesystem, FileCompression, StorageFormat}; use sui_swarm_config::test_utils::{empty_contents, CommitteeFixture}; -use sui_types::messages_checkpoint::{VerifiedCheckpoint, VerifiedCheckpointContents}; -use sui_types::storage::{ReadStore, SharedInMemoryStore, SingleCheckpointSharedInMemoryStore}; +use sui_types::{ + messages_checkpoint::{VerifiedCheckpoint, VerifiedCheckpointContents}, + storage::{ReadStore, SharedInMemoryStore, SingleCheckpointSharedInMemoryStore}, +}; use tempfile::tempdir; +use crate::{ + read_manifest, + reader::{ArchiveReader, ArchiveReaderMetrics}, + verify_archive_with_local_store, write_manifest, + writer::ArchiveWriter, + Manifest, +}; + struct TestState { archive_writer: ArchiveWriter, archive_reader: ArchiveReader, @@ -286,14 +296,16 @@ async fn test_verify_archive_with_oneshot_store() -> Result<(), anyhow::Error> { ); // Verification should pass - assert!(verify_archive_with_local_store( - read_store, - test_state.remote_store_config.clone(), - 1, - false - ) - .await - .is_ok()); + assert!( + verify_archive_with_local_store( + read_store, + test_state.remote_store_config.clone(), + 1, + false + ) + .await + .is_ok() + ); kill.send(())?; Ok(()) } @@ -360,14 +372,16 @@ async fn test_verify_archive_with_oneshot_store_bad_data() -> Result<(), anyhow: ); // Verification should fail - assert!(verify_archive_with_local_store( - read_store, - test_state.remote_store_config.clone(), - 1, - false - ) - .await - .is_err()); + assert!( + verify_archive_with_local_store( + read_store, + test_state.remote_store_config.clone(), + 1, + false + ) + .await + .is_err() + ); kill.send(())?; Ok(()) diff --git a/crates/sui-archival/src/writer.rs b/crates/sui-archival/src/writer.rs index ecdf6839992..72b750d621e 100644 --- a/crates/sui-archival/src/writer.rs +++ b/crates/sui-archival/src/writer.rs @@ -2,38 +2,50 @@ // SPDX-License-Identifier: Apache-2.0 #![allow(dead_code)] -use crate::{ - create_file_metadata, read_manifest, write_manifest, CheckpointUpdates, FileMetadata, FileType, - Manifest, CHECKPOINT_FILE_MAGIC, CHECKPOINT_FILE_SUFFIX, EPOCH_DIR_PREFIX, MAGIC_BYTES, - SUMMARY_FILE_MAGIC, SUMMARY_FILE_SUFFIX, +use std::{ + fs, + fs::{File, OpenOptions}, + io::{BufWriter, Seek, SeekFrom, Write}, + ops::Range, + path::{Path, PathBuf}, + sync::Arc, + thread::sleep, + time::Duration, }; -use anyhow::Result; -use anyhow::{anyhow, Context}; + +use anyhow::{anyhow, Context, Result}; use byteorder::{BigEndian, ByteOrder, WriteBytesExt}; use object_store::DynObjectStore; use prometheus::{register_int_gauge_with_registry, IntGauge, Registry}; -use std::fs; -use std::fs::{File, OpenOptions}; -use std::io::{BufWriter, Seek, SeekFrom, Write}; -use std::ops::Range; -use std::path::{Path, PathBuf}; -use std::sync::Arc; -use std::thread::sleep; -use std::time::Duration; use sui_config::object_storage_config::ObjectStoreConfig; -use sui_storage::blob::{Blob, BlobEncoding}; -use sui_storage::object_store::util::{copy_file, path_to_filesystem}; -use sui_storage::{compress, FileCompression, StorageFormat}; -use sui_types::messages_checkpoint::{ - CertifiedCheckpointSummary as Checkpoint, CheckpointSequenceNumber, - FullCheckpointContents as CheckpointContents, +use sui_storage::{ + blob::{Blob, BlobEncoding}, + compress, + object_store::util::{copy_file, path_to_filesystem}, + FileCompression, StorageFormat, +}; +use sui_types::{ + messages_checkpoint::{ + CertifiedCheckpointSummary as Checkpoint, CheckpointSequenceNumber, + FullCheckpointContents as CheckpointContents, + }, + storage::WriteStore, +}; +use tokio::{ + sync::{ + mpsc, + mpsc::{Receiver, Sender}, + }, + time::Instant, }; -use sui_types::storage::WriteStore; -use tokio::sync::mpsc; -use tokio::sync::mpsc::{Receiver, Sender}; -use tokio::time::Instant; use tracing::{debug, info}; +use crate::{ + create_file_metadata, read_manifest, write_manifest, CheckpointUpdates, FileMetadata, FileType, + Manifest, CHECKPOINT_FILE_MAGIC, CHECKPOINT_FILE_SUFFIX, EPOCH_DIR_PREFIX, MAGIC_BYTES, + SUMMARY_FILE_MAGIC, SUMMARY_FILE_SUFFIX, +}; + pub struct ArchiveMetrics { pub latest_checkpoint_archived: IntGauge, } @@ -52,7 +64,8 @@ impl ArchiveMetrics { } } -/// CheckpointWriter writes checkpoints and summaries. It creates multiple *.chk and *.sum files +/// CheckpointWriter writes checkpoints and summaries. It creates multiple *.chk +/// and *.sum files struct CheckpointWriter { root_dir_path: PathBuf, epoch_num: u64, @@ -309,8 +322,8 @@ impl CheckpointWriter { } } -/// ArchiveWriter archives history by tailing checkpoints writing them to a local staging dir and -/// simultaneously uploading them to a remote object store +/// ArchiveWriter archives history by tailing checkpoints writing them to a +/// local staging dir and simultaneously uploading them to a remote object store pub struct ArchiveWriter { file_compression: FileCompression, storage_format: StorageFormat, @@ -425,8 +438,8 @@ impl ArchiveWriter { continue; } } - // Checkpoint with `checkpoint_sequence_number` is not available to read from store yet, - // sleep for sometime and then retry + // Checkpoint with `checkpoint_sequence_number` is not available to read from + // store yet, sleep for sometime and then retry sleep(Duration::from_secs(3)); } Ok(()) diff --git a/crates/sui-authority-aggregation/src/lib.rs b/crates/sui-authority-aggregation/src/lib.rs index 880a4352b5b..d2086ca733f 100644 --- a/crates/sui-authority-aggregation/src/lib.rs +++ b/crates/sui-authority-aggregation/src/lib.rs @@ -1,18 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use futures::Future; -use futures::{future::BoxFuture, stream::FuturesUnordered, StreamExt}; -use mysten_metrics::monitored_future; -use tracing::instrument::Instrument; - -use std::collections::{BTreeMap, BTreeSet}; -use std::sync::Arc; -use std::time::Duration; -use sui_types::base_types::ConciseableName; -use sui_types::committee::{CommitteeTrait, StakeUnit}; +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, + time::Duration, +}; +use futures::{future::BoxFuture, stream::FuturesUnordered, Future, StreamExt}; +use mysten_metrics::monitored_future; +use sui_types::{ + base_types::ConciseableName, + committee::{CommitteeTrait, StakeUnit}, +}; use tokio::time::timeout; +use tracing::instrument::Instrument; pub type AsyncResult<'a, T, E> = BoxFuture<'a, Result>; @@ -101,26 +103,31 @@ where } } } - // If we have exhausted all authorities and still have not returned a result, return - // error with the accumulated state. + // If we have exhausted all authorities and still have not returned a result, + // return error with the accumulated state. Err(accumulated_state) } -/// This function takes an initial state, than executes an asynchronous function (FMap) for each -/// authority, and folds the results as they become available into the state using an async function (FReduce). +/// This function takes an initial state, than executes an asynchronous function +/// (FMap) for each authority, and folds the results as they become available +/// into the state using an async function (FReduce). /// -/// FMap can do io, and returns a result V. An error there may not be fatal, and could be consumed by the -/// MReduce function to overall recover from it. This is necessary to ensure byzantine authorities cannot -/// interrupt the logic of this function. +/// FMap can do io, and returns a result V. An error there may not be fatal, and +/// could be consumed by the MReduce function to overall recover from it. This +/// is necessary to ensure byzantine authorities cannot interrupt the logic of +/// this function. /// -/// FReduce returns a result to a ReduceOutput. If the result is Err the function -/// shortcuts and the Err is returned. An Ok ReduceOutput result can be used to shortcut and return -/// the resulting state (ReduceOutput::End), continue the folding as new states arrive (ReduceOutput::Continue), -/// or continue with a timeout maximum waiting time (ReduceOutput::ContinueWithTimeout). +/// FReduce returns a result to a ReduceOutput. If the result is Err the +/// function shortcuts and the Err is returned. An Ok ReduceOutput result can be +/// used to shortcut and return the resulting state (ReduceOutput::End), +/// continue the folding as new states arrive (ReduceOutput::Continue), +/// or continue with a timeout maximum waiting time +/// (ReduceOutput::ContinueWithTimeout). /// -/// This function provides a flexible way to communicate with a quorum of authorities, processing and -/// processing their results into a safe overall result, and also safely allowing operations to continue -/// past the quorum to ensure all authorities are up to date (up to a timeout). +/// This function provides a flexible way to communicate with a quorum of +/// authorities, processing and processing their results into a safe overall +/// result, and also safely allowing operations to continue past the quorum to +/// ensure all authorities are up to date (up to a timeout). pub async fn quorum_map_then_reduce_with_timeout< 'a, C, diff --git a/crates/sui-aws-orchestrator/src/benchmark.rs b/crates/sui-aws-orchestrator/src/benchmark.rs index d3fb9a1d182..07ecd959457 100644 --- a/crates/sui-aws-orchestrator/src/benchmark.rs +++ b/crates/sui-aws-orchestrator/src/benchmark.rs @@ -106,7 +106,8 @@ pub enum LoadType { Search { /// The initial load to test (and use a baseline). starting_load: usize, - /// The maximum number of iterations before converging on a breaking point. + /// The maximum number of iterations before converging on a breaking + /// point. max_iterations: usize, }, } @@ -198,13 +199,14 @@ impl BenchmarkParametersGenerator { self } - /// Detects whether the latest benchmark parameters run the system out of capacity. + /// Detects whether the latest benchmark parameters run the system out of + /// capacity. fn out_of_capacity( last_result: &MeasurementsCollection, new_result: &MeasurementsCollection, ) -> bool { - // We consider the system is out of capacity if the latency increased by over 5x with - // respect to the latest run. + // We consider the system is out of capacity if the latency increased by over 5x + // with respect to the latest run. let threshold = last_result.aggregate_average_latency() * 5; let high_latency = new_result.aggregate_average_latency() > threshold; @@ -215,8 +217,8 @@ impl BenchmarkParametersGenerator { high_latency || no_throughput_increase } - /// Register a new benchmark measurements collection. These results are used to determine - /// whether the system reached its breaking point. + /// Register a new benchmark measurements collection. These results are used + /// to determine whether the system reached its breaking point. pub fn register_result(&mut self, result: MeasurementsCollection) { self.next_load = match &mut self.load_type { LoadType::Fixed(loads) => { @@ -274,13 +276,12 @@ pub mod test { use serde::{Deserialize, Serialize}; + use super::{BenchmarkParametersGenerator, BenchmarkType, LoadType}; use crate::{ measurement::{Measurement, MeasurementsCollection}, settings::Settings, }; - use super::{BenchmarkParametersGenerator, BenchmarkType, LoadType}; - /// Mock benchmark type for unit tests. #[derive( Serialize, Deserialize, Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Default, diff --git a/crates/sui-aws-orchestrator/src/client/aws.rs b/crates/sui-aws-orchestrator/src/client/aws.rs index 1b6af312d8d..fecf5beb02f 100644 --- a/crates/sui-aws-orchestrator/src/client/aws.rs +++ b/crates/sui-aws-orchestrator/src/client/aws.rs @@ -7,9 +7,9 @@ use std::{ }; use aws_config::profile::profile_file::{ProfileFileKind, ProfileFiles}; -use aws_sdk_ec2::primitives::Blob; use aws_sdk_ec2::{ config::Region, + primitives::Blob, types::{ BlockDeviceMapping, EbsBlockDevice, EphemeralNvmeSupport, Filter, ResourceType, Tag, TagSpecification, VolumeType, @@ -18,13 +18,12 @@ use aws_sdk_ec2::{ use aws_smithy_http::result::SdkError; use serde::Serialize; +use super::{Instance, ServerProviderClient}; use crate::{ error::{CloudProviderError, CloudProviderResult}, settings::Settings, }; -use super::{Instance, ServerProviderClient}; - // Make a request error from an AWS error message. impl From> for CloudProviderError @@ -75,7 +74,8 @@ impl AwsClient { Self { settings, clients } } - /// Parse an AWS response and ignore errors if they mean a request is a duplicate. + /// Parse an AWS response and ignore errors if they mean a request is a + /// duplicate. fn check_but_ignore_duplicates( response: Result< T, @@ -94,7 +94,8 @@ impl AwsClient { Ok(()) } - /// Convert an AWS instance into an orchestrator instance (used in the rest of the codebase). + /// Convert an AWS instance into an orchestrator instance (used in the rest + /// of the codebase). fn make_instance( &self, region: String, @@ -155,7 +156,8 @@ impl AwsClient { }) } - /// Create a new security group for the instance (if it doesn't already exist). + /// Create a new security group for the instance (if it doesn't already + /// exist). async fn create_security_group(&self, client: &aws_sdk_ec2::Client) -> CloudProviderResult<()> { // Create a security group (if it doesn't already exist). let request = client @@ -196,10 +198,11 @@ impl AwsClient { ] } - /// Check whether the instance type specified in the settings supports NVMe drives. + /// Check whether the instance type specified in the settings supports NVMe + /// drives. async fn check_nvme_support(&self) -> CloudProviderResult { - // Get the client for the first region. A given instance type should either have NVMe support - // in all regions or in none. + // Get the client for the first region. A given instance type should either have + // NVMe support in all regions or in none. let client = match self .settings .regions diff --git a/crates/sui-aws-orchestrator/src/client/mod.rs b/crates/sui-aws-orchestrator/src/client/mod.rs index e2db67c7edc..ab685ce4a88 100644 --- a/crates/sui-aws-orchestrator/src/client/mod.rs +++ b/crates/sui-aws-orchestrator/src/client/mod.rs @@ -40,7 +40,8 @@ impl Instance { !self.is_active() } - /// Return whether the instance is terminated and in the process of being deleted. + /// Return whether the instance is terminated and in the process of being + /// deleted. pub fn is_terminated(&self) -> bool { self.status.to_lowercase() == "terminated" } @@ -76,7 +77,8 @@ pub trait ServerProviderClient: Display { where I: Iterator + Send; - /// Halt/Stop the specified instances. We may still be billed for stopped instances. + /// Halt/Stop the specified instances. We may still be billed for stopped + /// instances. async fn stop_instances<'a, I>(&self, instance_ids: I) -> CloudProviderResult<()> where I: Iterator + Send; @@ -86,8 +88,8 @@ pub trait ServerProviderClient: Display { where S: Into + Serialize + Send; - /// Delete a specific instance. Calling this function ensures we are no longer billed for - /// the specified instance. + /// Delete a specific instance. Calling this function ensures we are no + /// longer billed for the specified instance. async fn delete_instance(&self, instance: Instance) -> CloudProviderResult<()>; /// Authorize the provided ssh public key to access machines. @@ -103,9 +105,8 @@ pub mod test_client { use serde::Serialize; - use crate::{error::CloudProviderResult, settings::Settings}; - use super::{Instance, ServerProviderClient}; + use crate::{error::CloudProviderResult, settings::Settings}; pub struct TestClient { settings: Settings, diff --git a/crates/sui-aws-orchestrator/src/display.rs b/crates/sui-aws-orchestrator/src/display.rs index 4ab1be1dff0..4d8508ab3eb 100644 --- a/crates/sui-aws-orchestrator/src/display.rs +++ b/crates/sui-aws-orchestrator/src/display.rs @@ -93,9 +93,8 @@ mod test { use tokio::time::sleep; - use crate::display::status; - use super::{action, config, done, error, header, newline, warn}; + use crate::display::status; #[tokio::test] #[ignore = "only used to manually check if prints work correctly"] diff --git a/crates/sui-aws-orchestrator/src/faults.rs b/crates/sui-aws-orchestrator/src/faults.rs index 6b238430803..e1de1b285a4 100644 --- a/crates/sui-aws-orchestrator/src/faults.rs +++ b/crates/sui-aws-orchestrator/src/faults.rs @@ -57,7 +57,8 @@ impl Display for FaultsType { } } -/// The actions to apply to the testbed, i.e., which instances to crash and recover. +/// The actions to apply to the testbed, i.e., which instances to crash and +/// recover. #[derive(Default)] pub struct CrashRecoveryAction { /// The instances to boot. @@ -163,9 +164,8 @@ impl CrashRecoverySchedule { mod faults_tests { use std::time::Duration; - use crate::client::Instance; - use super::{CrashRecoverySchedule, FaultsType}; + use crate::client::Instance; #[test] fn crash_recovery_1_fault() { diff --git a/crates/sui-aws-orchestrator/src/logs.rs b/crates/sui-aws-orchestrator/src/logs.rs index 4a162bbb496..d0921ee0f3a 100644 --- a/crates/sui-aws-orchestrator/src/logs.rs +++ b/crates/sui-aws-orchestrator/src/logs.rs @@ -31,8 +31,8 @@ impl LogsAnalyzer { self.client_panic = log.contains("panic"); } - /// Aggregate multiple log analyzers into one, based on the analyzer that found the - /// most serious errors. + /// Aggregate multiple log analyzers into one, based on the analyzer that + /// found the most serious errors. pub fn aggregate(counters: Vec) -> Self { let mut highest = Self::default(); for counter in counters { diff --git a/crates/sui-aws-orchestrator/src/main.rs b/crates/sui-aws-orchestrator/src/main.rs index 0ee92fe16ad..7f39e582073 100644 --- a/crates/sui-aws-orchestrator/src/main.rs +++ b/crates/sui-aws-orchestrator/src/main.rs @@ -39,8 +39,9 @@ type BenchmarkType = NarwhalBenchmarkType; #[derive(Parser)] #[command(author, version, about = "Testbed orchestrator", long_about = None)] pub struct Opts { - /// The path to the settings file. This file contains basic information to deploy testbeds - /// and run benchmarks such as the url of the git repo, the commit to deploy, etc. + /// The path to the settings file. This file contains basic information to + /// deploy testbeds and run benchmarks such as the url of the git repo, + /// the commit to deploy, etc. #[clap( long, value_name = "FILE", @@ -64,8 +65,8 @@ pub enum Operation { /// Run a benchmark on the specified testbed. Benchmark { - /// Percentage of shared vs owned objects; 0 means only owned objects and 100 means - /// only shared objects. + /// Percentage of shared vs owned objects; 0 means only owned objects + /// and 100 means only shared objects. #[clap(long, default_value = "0", global = true)] benchmark_type: String, @@ -105,12 +106,14 @@ pub enum Operation { #[clap(long, action, default_value = "false", global = true)] log_processing: bool, - /// The number of instances running exclusively load generators. If set to zero the - /// orchestrator collocates one load generator with each node. + /// The number of instances running exclusively load generators. If set + /// to zero the orchestrator collocates one load generator with + /// each node. #[clap(long, value_name = "INT", default_value = "0", global = true)] dedicated_clients: usize, - /// Whether to forgo a grafana and prometheus instance and leave the testbed unmonitored. + /// Whether to forgo a grafana and prometheus instance and leave the + /// testbed unmonitored. #[clap(long, action, default_value = "false", global = true)] skip_monitoring: bool, @@ -141,20 +144,22 @@ pub enum TestbedAction { /// Display the testbed status. Status, - /// Deploy the specified number of instances in all regions specified by in the setting file. + /// Deploy the specified number of instances in all regions specified by in + /// the setting file. Deploy { /// Number of instances to deploy. #[clap(long)] instances: usize, - /// The region where to deploy the instances. If this parameter is not specified, the - /// command deploys the specified number of instances in all regions listed in the - /// setting file. + /// The region where to deploy the instances. If this parameter is not + /// specified, the command deploys the specified number of + /// instances in all regions listed in the setting file. #[clap(long)] region: Option, }, - /// Start at most the specified number of instances per region on an existing testbed. + /// Start at most the specified number of instances per region on an + /// existing testbed. Start { /// Number of instances to deploy. #[clap(long, default_value = "200")] @@ -187,7 +192,8 @@ pub enum Load { /// The initial load (in tx/s) to test and use a baseline. #[clap(long, value_name = "INT", default_value = "250")] starting_load: usize, - /// The maximum number of iterations before converging on a breaking point. + /// The maximum number of iterations before converging on a breaking + /// point. #[clap(long, value_name = "INT", default_value = "5")] max_iterations: usize, }, diff --git a/crates/sui-aws-orchestrator/src/measurement.rs b/crates/sui-aws-orchestrator/src/measurement.rs index b9c94a67880..ebba07bf017 100644 --- a/crates/sui-aws-orchestrator/src/measurement.rs +++ b/crates/sui-aws-orchestrator/src/measurement.rs @@ -111,9 +111,10 @@ impl Measurement { } /// Compute the tps. - /// NOTE: Do not use `self.timestamp` as benchmark duration because some clients may - /// be unable to submit transactions passed the first few seconds of the benchmark. This - /// may happen as a result of a bad control system withing the nodes. + /// NOTE: Do not use `self.timestamp` as benchmark duration because some + /// clients may be unable to submit transactions passed the first few + /// seconds of the benchmark. This may happen as a result of a bad + /// control system withing the nodes. pub fn tps(&self, duration: &Duration) -> u64 { let tps = self.count.checked_div(duration.as_secs() as usize); tps.unwrap_or_default() as u64 @@ -207,7 +208,8 @@ impl MeasurementsCollection { self.parameters.load } - /// Aggregate the benchmark duration of multiple data points by taking the max. + /// Aggregate the benchmark duration of multiple data points by taking the + /// max. pub fn benchmark_duration(&self) -> Duration { self.scrapers .values() @@ -233,7 +235,8 @@ impl MeasurementsCollection { .sum() } - /// Aggregate the average latency of multiple data points by taking the average. + /// Aggregate the average latency of multiple data points by taking the + /// average. pub fn aggregate_average_latency(&self) -> Duration { let last_data_points: Vec<_> = self.scrapers.values().filter_map(|x| x.last()).collect(); last_data_points @@ -294,13 +297,12 @@ impl MeasurementsCollection { mod test { use std::{collections::HashMap, time::Duration}; + use super::{BenchmarkParameters, Measurement, MeasurementsCollection}; use crate::{ benchmark::test::TestBenchmarkType, protocol::test_protocol_metrics::TestProtocolMetrics, settings::Settings, }; - use super::{BenchmarkParameters, Measurement, MeasurementsCollection}; - #[test] fn average_latency() { let data = Measurement { diff --git a/crates/sui-aws-orchestrator/src/monitor.rs b/crates/sui-aws-orchestrator/src/monitor.rs index f3fc81c555b..cfa02acf6ee 100644 --- a/crates/sui-aws-orchestrator/src/monitor.rs +++ b/crates/sui-aws-orchestrator/src/monitor.rs @@ -91,7 +91,8 @@ impl Prometheus { ] } - /// Generate the commands to update the prometheus configuration and restart prometheus. + /// Generate the commands to update the prometheus configuration and restart + /// prometheus. pub fn setup_commands(clients: I, nodes: I, protocol: &P) -> String where I: IntoIterator, @@ -181,7 +182,8 @@ impl Grafana { ] } - /// Generate the commands to update the grafana datasource and restart grafana. + /// Generate the commands to update the grafana datasource and restart + /// grafana. pub fn setup_commands() -> String { [ &format!("(rm -r {} || true)", Self::DATASOURCES_PATH), @@ -219,9 +221,10 @@ impl Grafana { #[allow(dead_code)] /// Bootstrap the grafana with datasource to connect to the given instances. -/// NOTE: Only for macOS. Grafana must be installed through homebrew (and not from source). Deeper grafana -/// configuration can be done through the grafana.ini file (/opt/homebrew/etc/grafana/grafana.ini) or the -/// plist file (~/Library/LaunchAgents/homebrew.mxcl.grafana.plist). +/// NOTE: Only for macOS. Grafana must be installed through homebrew (and not +/// from source). Deeper grafana configuration can be done through the +/// grafana.ini file (/opt/homebrew/etc/grafana/grafana.ini) or the plist file +/// (~/Library/LaunchAgents/homebrew.mxcl.grafana.plist). pub struct LocalGrafana; #[allow(dead_code)] @@ -267,9 +270,10 @@ impl LocalGrafana { Ok(()) } - /// Generate the content of the datasource file for the given instance. This grafana instance takes - /// one datasource per instance and assumes one prometheus server runs per instance. - /// NOTE: The datasource file is a yaml file so spaces are important. + /// Generate the content of the datasource file for the given instance. This + /// grafana instance takes one datasource per instance and assumes one + /// prometheus server runs per instance. NOTE: The datasource file is a + /// yaml file so spaces are important. fn datasource(instance: &Instance, index: usize) -> String { [ "apiVersion: 1", diff --git a/crates/sui-aws-orchestrator/src/orchestrator.rs b/crates/sui-aws-orchestrator/src/orchestrator.rs index 193786078c8..988e9c604f9 100644 --- a/crates/sui-aws-orchestrator/src/orchestrator.rs +++ b/crates/sui-aws-orchestrator/src/orchestrator.rs @@ -11,7 +11,6 @@ use std::{ use tokio::time::{self, Instant}; -use crate::monitor::Monitor; use crate::{ benchmark::{BenchmarkParameters, BenchmarkParametersGenerator, BenchmarkType}, client::Instance, @@ -20,6 +19,7 @@ use crate::{ faults::CrashRecoverySchedule, logs::LogsAnalyzer, measurement::{Measurement, MeasurementsCollection}, + monitor::Monitor, protocol::{ProtocolCommands, ProtocolMetrics}, settings::Settings, ssh::{CommandContext, CommandStatus, SshConnectionManager}, @@ -29,14 +29,15 @@ use crate::{ pub struct Orchestrator { /// The testbed's settings. settings: Settings, - /// The state of the testbed (reflecting accurately the state of the machines). + /// The state of the testbed (reflecting accurately the state of the + /// machines). instances: Vec, /// The type of the benchmark parameters. benchmark_type: PhantomData, /// Provider-specific commands to install on the instance. instance_setup_commands: Vec, - /// Protocol-specific commands generator to generate the protocol configuration files, - /// boot clients and nodes, etc. + /// Protocol-specific commands generator to generate the protocol + /// configuration files, boot clients and nodes, etc. protocol_commands: P, /// The interval between measurements collection. scrape_interval: Duration, @@ -50,10 +51,12 @@ pub struct Orchestrator { skip_testbed_configuration: bool, /// Whether to downloading and analyze the client and node log files. log_processing: bool, - /// Number of instances running only load generators (not nodes). If this value is set - /// to zero, the orchestrator runs a load generate collocated with each node. + /// Number of instances running only load generators (not nodes). If this + /// value is set to zero, the orchestrator runs a load generate + /// collocated with each node. dedicated_clients: usize, - /// Whether to forgo a grafana and prometheus instance and leave the testbed unmonitored. + /// Whether to forgo a grafana and prometheus instance and leave the testbed + /// unmonitored. skip_monitoring: bool, } @@ -130,8 +133,9 @@ impl Orchestrator { self } - /// Select on which instances of the testbed to run the benchmarks. This function returns two vector - /// of instances; the first contains the instances on which to run the load generators and the second + /// Select on which instances of the testbed to run the benchmarks. This + /// function returns two vector of instances; the first contains the + /// instances on which to run the load generators and the second /// contains the instances on which to run the nodes. pub fn select_instances( &self, @@ -197,8 +201,8 @@ impl Orchestrator { } } - // Spawn a load generate collocated with each node if there are no instances dedicated - // to excursively run load generators. + // Spawn a load generate collocated with each node if there are no instances + // dedicated to excursively run load generators. if client_instances.is_empty() { client_instances = nodes_instances.clone(); } @@ -307,13 +311,14 @@ impl + ProtocolMetrics, T: BenchmarkType> Orchestrator TestbedResult<()> { display::action("Updating all instances"); - // Update all active instances. This requires compiling the codebase in release (which - // may take a long time) so we run the command in the background to avoid keeping alive - // many ssh connections for too long. + // Update all active instances. This requires compiling the codebase in release + // (which may take a long time) so we run the command in the background + // to avoid keeping alive many ssh connections for too long. let commit = &self.settings.repository.commit; let command = [ "git fetch -f", @@ -351,7 +356,8 @@ impl + ProtocolMetrics, T: BenchmarkType> Orchestrator + ProtocolMetrics, T: BenchmarkType> Orchestrator + ProtocolMetrics, T: BenchmarkType> Orchestrator + ProtocolMetrics, T: BenchmarkType> Orchestrator { /// The list of dependencies to install (e.g., through apt-get). fn protocol_dependencies(&self) -> Vec<&'static str>; - /// The directories of all databases (that should be erased before each run). + /// The directories of all databases (that should be erased before each + /// run). fn db_directories(&self) -> Vec; - /// The command to generate the genesis and all configuration files. This command - /// is run on each remote machine. + /// The command to generate the genesis and all configuration files. This + /// command is run on each remote machine. fn genesis_command<'a, I>(&self, instances: I) -> String where I: Iterator; - /// The command to run a node. The function returns a vector of commands along with the - /// associated instance on which to run the command. + /// The command to run a node. The function returns a vector of commands + /// along with the associated instance on which to run the command. fn node_command( &self, instances: I, @@ -40,8 +41,8 @@ pub trait ProtocolCommands { where I: IntoIterator; - /// The command to run a client. The function returns a vector of commands along with the - /// associated instance on which to run the command. + /// The command to run a client. The function returns a vector of commands + /// along with the associated instance on which to run the command. fn client_command( &self, instances: I, @@ -51,20 +52,22 @@ pub trait ProtocolCommands { I: IntoIterator; } -/// The names of the minimum metrics exposed by the load generators that are required to -/// compute performance. +/// The names of the minimum metrics exposed by the load generators that are +/// required to compute performance. pub trait ProtocolMetrics { - /// The name of the metric reporting the total duration of the benchmark (in seconds). + /// The name of the metric reporting the total duration of the benchmark (in + /// seconds). const BENCHMARK_DURATION: &'static str; - /// The name of the metric reporting the total number of finalized transactions + /// The name of the metric reporting the total number of finalized + /// transactions const TOTAL_TRANSACTIONS: &'static str; /// The name of the metric reporting the latency buckets. const LATENCY_BUCKETS: &'static str; - /// The name of the metric reporting the sum of the end-to-end latency of all finalized - /// transactions. + /// The name of the metric reporting the sum of the end-to-end latency of + /// all finalized transactions. const LATENCY_SUM: &'static str; - /// The name of the metric reporting the square of the sum of the end-to-end latency of all - /// finalized transactions. + /// The name of the metric reporting the square of the sum of the end-to-end + /// latency of all finalized transactions. const LATENCY_SQUARED_SUM: &'static str; /// The network path where the nodes expose prometheus metrics. @@ -100,9 +103,8 @@ pub trait ProtocolMetrics { #[cfg(test)] pub mod test_protocol_metrics { - use crate::client::Instance; - use super::ProtocolMetrics; + use crate::client::Instance; pub struct TestProtocolMetrics; diff --git a/crates/sui-aws-orchestrator/src/protocol/narwhal.rs b/crates/sui-aws-orchestrator/src/protocol/narwhal.rs index 2463e9069d3..7439d0e966e 100644 --- a/crates/sui-aws-orchestrator/src/protocol/narwhal.rs +++ b/crates/sui-aws-orchestrator/src/protocol/narwhal.rs @@ -7,15 +7,15 @@ use std::{ str::FromStr, }; +use narwhal_config::PrometheusMetricsParameters; +use serde::{Deserialize, Serialize}; + +use super::{ProtocolCommands, ProtocolMetrics}; use crate::{ benchmark::{BenchmarkParameters, BenchmarkType}, client::Instance, settings::Settings, }; -use narwhal_config::PrometheusMetricsParameters; -use serde::{Deserialize, Serialize}; - -use super::{ProtocolCommands, ProtocolMetrics}; const NUM_WORKERS: usize = 1; const BASE_PORT: usize = 5000; diff --git a/crates/sui-aws-orchestrator/src/protocol/sui.rs b/crates/sui-aws-orchestrator/src/protocol/sui.rs index 576aa45480f..ea28120e41e 100644 --- a/crates/sui-aws-orchestrator/src/protocol/sui.rs +++ b/crates/sui-aws-orchestrator/src/protocol/sui.rs @@ -11,18 +11,17 @@ use serde::{Deserialize, Serialize}; use sui_swarm_config::genesis_config::GenesisConfig; use sui_types::{base_types::SuiAddress, multiaddr::Multiaddr}; +use super::{ProtocolCommands, ProtocolMetrics}; use crate::{ benchmark::{BenchmarkParameters, BenchmarkType}, client::Instance, settings::Settings, }; -use super::{ProtocolCommands, ProtocolMetrics}; - #[derive(Serialize, Deserialize, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct SuiBenchmarkType { - /// Percentage of shared vs owned objects; 0 means only owned objects and 100 means - /// only shared objects. + /// Percentage of shared vs owned objects; 0 means only owned objects and + /// 100 means only shared objects. shared_objects_ratio: u16, } @@ -108,8 +107,8 @@ impl ProtocolCommands for SuiProtocol { // .map(|i| { // ( // i, - // "tail -f --pid=$(pidof sui) -f /dev/null; tail -100 node.log".to_string(), - // ) + // "tail -f --pid=$(pidof sui) -f /dev/null; tail -100 + // node.log".to_string(), ) // }) // .collect() vec![] @@ -220,8 +219,8 @@ impl SuiProtocol { } } - /// Creates the network addresses in multi address format for the instances. It returns the - /// Instance and the corresponding address. + /// Creates the network addresses in multi address format for the instances. + /// It returns the Instance and the corresponding address. pub fn resolve_network_addresses( instances: impl IntoIterator, ) -> Vec<(Instance, Multiaddr)> { diff --git a/crates/sui-aws-orchestrator/src/settings.rs b/crates/sui-aws-orchestrator/src/settings.rs index 8eaca609aa3..50b8a70355c 100644 --- a/crates/sui-aws-orchestrator/src/settings.rs +++ b/crates/sui-aws-orchestrator/src/settings.rs @@ -49,8 +49,9 @@ pub enum CloudProvider { /// The testbed settings. Those are topically specified in a file. #[derive(Deserialize, Clone)] pub struct Settings { - /// The testbed unique id. This allows multiple users to run concurrent testbeds on the - /// same cloud provider's account without interference with each others. + /// The testbed unique id. This allows multiple users to run concurrent + /// testbeds on the same cloud provider's account without interference + /// with each others. pub testbed_id: String, /// The cloud provider hosting the testbed. pub cloud_provider: CloudProvider, @@ -58,23 +59,28 @@ pub struct Settings { pub token_file: PathBuf, /// The ssh private key to access the instances. pub ssh_private_key_file: PathBuf, - /// The corresponding ssh public key registered on the instances. If not specified. the - /// public key defaults the same path as the private key with an added extension 'pub'. + /// The corresponding ssh public key registered on the instances. If not + /// specified. the public key defaults the same path as the private key + /// with an added extension 'pub'. pub ssh_public_key_file: Option, /// The list of cloud provider regions to deploy the testbed. pub regions: Vec, - /// The specs of the instances to deploy. Those are dependent on the cloud provider, e.g., - /// specifying 't3.medium' creates instances with 2 vCPU and 4GBo of ram on AWS. + /// The specs of the instances to deploy. Those are dependent on the cloud + /// provider, e.g., specifying 't3.medium' creates instances with 2 vCPU + /// and 4GBo of ram on AWS. pub specs: String, /// The details of the git reposit to deploy. pub repository: Repository, - /// The working directory on the remote instance (containing all configuration files). + /// The working directory on the remote instance (containing all + /// configuration files). #[serde(default = "default_working_dir")] pub working_dir: PathBuf, - /// The directory (on the local machine) where to save benchmarks measurements. + /// The directory (on the local machine) where to save benchmarks + /// measurements. #[serde(default = "default_results_dir")] pub results_dir: PathBuf, - /// The directory (on the local machine) where to download logs files from the instances. + /// The directory (on the local machine) where to download logs files from + /// the instances. #[serde(default = "default_logs_dir")] pub logs_dir: PathBuf, } @@ -155,7 +161,8 @@ impl Settings { } } - /// Check whether the input instance matches the criteria described in the settings. + /// Check whether the input instance matches the criteria described in the + /// settings. pub fn filter_instances(&self, instance: &Instance) -> bool { self.regions.contains(&instance.region) && instance.specs.to_lowercase().replace('.', "") diff --git a/crates/sui-aws-orchestrator/src/ssh.rs b/crates/sui-aws-orchestrator/src/ssh.rs index 093b6d3fbbe..f8d07ec1fcb 100644 --- a/crates/sui-aws-orchestrator/src/ssh.rs +++ b/crates/sui-aws-orchestrator/src/ssh.rs @@ -1,21 +1,19 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use async_trait::async_trait; -use std::io::Write; -use std::sync::Arc; use std::{ + io::Write, net::SocketAddr, path::{Path, PathBuf}, + sync::Arc, time::Duration, }; +use async_trait::async_trait; use futures::future::try_join_all; -use russh::client::Msg; -use russh::{client, Channel}; +use russh::{client, client::Msg, Channel}; use russh_keys::key; -use tokio::task::JoinHandle; -use tokio::time::sleep; +use tokio::{task::JoinHandle, time::sleep}; use crate::{ client::Instance, @@ -31,8 +29,8 @@ pub enum CommandStatus { } impl CommandStatus { - /// Return whether a background command is still running. Returns `Terminated` if the - /// command is not running in the background. + /// Return whether a background command is still running. Returns + /// `Terminated` if the command is not running in the background. pub fn status(command_id: &str, text: &str) -> Self { if text.contains(command_id) { Self::Running @@ -45,8 +43,8 @@ impl CommandStatus { /// The command to execute on all specified remote machines. #[derive(Clone, Default)] pub struct CommandContext { - /// Whether to run the command in the background (and return immediately). Commands - /// running in the background are identified by a unique id. + /// Whether to run the command in the background (and return immediately). + /// Commands running in the background are identified by a unique id. pub background: Option, /// The path from where to execute the command. pub path: Option, @@ -130,7 +128,8 @@ impl SshConnectionManager { self } - /// Set the maximum number of times to retries to establish a connection and execute commands. + /// Set the maximum number of times to retries to establish a connection and + /// execute commands. pub fn with_retries(mut self, retries: usize) -> Self { self.retries = retries; self @@ -365,7 +364,8 @@ impl SshConnection { Err(error.unwrap()) } - /// Execute an ssh command on the remote machine and return both stdout and stderr. + /// Execute an ssh command on the remote machine and return both stdout and + /// stderr. async fn execute_impl( &self, mut channel: Channel, @@ -406,8 +406,9 @@ impl SshConnection { Ok((output_str.clone(), output_str)) } - /// Download a file from the remote machines by doing a silly cat on the file. - /// TODO: if the files get too big then we should leverage a simple S3 bucket instead. + /// Download a file from the remote machines by doing a silly cat on the + /// file. TODO: if the files get too big then we should leverage a + /// simple S3 bucket instead. pub async fn download>(&self, path: P) -> SshResult { let mut error = None; for _ in 0..self.retries + 1 { diff --git a/crates/sui-aws-orchestrator/src/testbed.rs b/crates/sui-aws-orchestrator/src/testbed.rs index 054c51254d8..15cefb5ead0 100644 --- a/crates/sui-aws-orchestrator/src/testbed.rs +++ b/crates/sui-aws-orchestrator/src/testbed.rs @@ -7,6 +7,7 @@ use futures::future::try_join_all; use prettytable::{row, Table}; use tokio::time::{self, Instant}; +use super::client::Instance; use crate::{ client::ServerProviderClient, display, @@ -15,15 +16,14 @@ use crate::{ ssh::SshConnection, }; -use super::client::Instance; - /// Represents a testbed running on a cloud provider. pub struct Testbed { /// The testbed's settings. settings: Settings, /// The client interfacing with the cloud provider. client: C, - /// The state of the testbed (reflecting accurately the state of the machines). + /// The state of the testbed (reflecting accurately the state of the + /// machines). instances: Vec, } @@ -123,8 +123,9 @@ impl Testbed { display::newline(); } - /// Populate the testbed by creating the specified amount of instances per region. The total - /// number of instances created is thus the specified amount x the number of regions. + /// Populate the testbed by creating the specified amount of instances per + /// region. The total number of instances created is thus the specified + /// amount x the number of regions. pub async fn deploy(&mut self, quantity: usize, region: Option) -> TestbedResult<()> { display::action(format!("Deploying instances ({quantity} per region)")); @@ -165,8 +166,8 @@ impl Testbed { Ok(()) } - /// Start the specified number of instances in each region. Returns an error if there are not - /// enough available instances. + /// Start the specified number of instances in each region. Returns an error + /// if there are not enough available instances. pub async fn start(&mut self, quantity: usize) -> TestbedResult<()> { display::action("Booting instances"); diff --git a/crates/sui-benchmark/src/bank.rs b/crates/sui-benchmark/src/bank.rs index 1861a35d93a..74d851f9e57 100644 --- a/crates/sui-benchmark/src/bank.rs +++ b/crates/sui-benchmark/src/bank.rs @@ -1,20 +1,27 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::util::UpdatedAndNewlyMintedGasCoins; -use crate::workloads::payload::Payload; -use crate::workloads::workload::{Workload, WorkloadBuilder, MAX_BUDGET}; -use crate::workloads::{Gas, GasCoinConfig}; -use crate::ValidatorProxy; +use std::{ + collections::{HashMap, VecDeque}, + sync::Arc, +}; + use anyhow::{Error, Result}; use itertools::Itertools; -use std::collections::{HashMap, VecDeque}; -use std::sync::Arc; use sui_core::test_utils::{make_pay_sui_transaction, make_transfer_sui_transaction}; -use sui_types::base_types::SuiAddress; -use sui_types::crypto::AccountKeyPair; +use sui_types::{base_types::SuiAddress, crypto::AccountKeyPair}; use tracing::info; +use crate::{ + util::UpdatedAndNewlyMintedGasCoins, + workloads::{ + payload::Payload, + workload::{Workload, WorkloadBuilder, MAX_BUDGET}, + Gas, GasCoinConfig, + }, + ValidatorProxy, +}; + /// Bank is used for generating gas for running the benchmark. #[derive(Clone)] pub struct BenchmarkBank { @@ -131,7 +138,7 @@ impl BenchmarkBank { let updated_gas = effects .mutated() .into_iter() - .find(|(k, _)| k.0 == init_coin.0 .0) + .find(|(k, _)| k.0 == init_coin.0.0) .ok_or("Input gas missing in the effects") .map_err(Error::msg)?; @@ -182,7 +189,7 @@ impl BenchmarkBank { let updated_gas = effects .mutated() .into_iter() - .find(|(k, _)| k.0 == self.primary_coin.0 .0) + .find(|(k, _)| k.0 == self.primary_coin.0.0) .ok_or("Input gas missing in the effects") .map_err(Error::msg)?; diff --git a/crates/sui-benchmark/src/benchmark_setup.rs b/crates/sui-benchmark/src/benchmark_setup.rs index 86338e7232a..2fc44d70da7 100644 --- a/crates/sui-benchmark/src/benchmark_setup.rs +++ b/crates/sui-benchmark/src/benchmark_setup.rs @@ -1,30 +1,31 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::bank::BenchmarkBank; -use crate::options::Opts; -use crate::util::get_ed25519_keypair_from_keystore; -use crate::{FullNodeProxy, LocalValidatorAggregatorProxy, ValidatorProxy}; +use std::{path::PathBuf, sync::Arc, thread::JoinHandle, time::Duration}; + use anyhow::{anyhow, bail, Context, Result}; use prometheus::Registry; use rand::seq::SliceRandom; -use std::path::PathBuf; -use std::sync::Arc; -use std::thread::JoinHandle; -use std::time::Duration; use sui_swarm_config::genesis_config::AccountConfig; -use sui_types::base_types::ConciseableName; -use sui_types::base_types::ObjectID; -use sui_types::base_types::SuiAddress; -use sui_types::crypto::{deterministic_random_account_key, AccountKeyPair}; -use sui_types::gas_coin::TOTAL_SUPPLY_MIST; -use sui_types::object::Owner; +use sui_types::{ + base_types::{ConciseableName, ObjectID, SuiAddress}, + crypto::{deterministic_random_account_key, AccountKeyPair}, + gas_coin::TOTAL_SUPPLY_MIST, + object::Owner, +}; use test_cluster::TestClusterBuilder; -use tokio::runtime::Builder; -use tokio::sync::{oneshot, Barrier}; -use tokio::time::sleep; +use tokio::{ + runtime::Builder, + sync::{oneshot, Barrier}, + time::sleep, +}; use tracing::info; +use crate::{ + bank::BenchmarkBank, options::Opts, util::get_ed25519_keypair_from_keystore, FullNodeProxy, + LocalValidatorAggregatorProxy, ValidatorProxy, +}; + pub enum Env { // Mode where benchmark in run on a validator cluster that gets spun up locally Local, @@ -100,7 +101,8 @@ impl Env { let cluster = TestClusterBuilder::new() .with_accounts(vec![AccountConfig { address: Some(primary_gas_owner), - // We can't use TOTAL_SUPPLY_MIST because we need to account for validator stakes in genesis allocation. + // We can't use TOTAL_SUPPLY_MIST because we need to account for validator + // stakes in genesis allocation. gas_amounts: vec![TOTAL_SUPPLY_MIST / 2], }]) .with_num_validators(committee_size) @@ -224,7 +226,8 @@ impl Env { .await?; gas_objects.sort_by_key(|&(gas, _)| std::cmp::Reverse(gas)); - // TODO: Merge all owned gas objects into one and use that as the primary gas object. + // TODO: Merge all owned gas objects into one and use that as the primary gas + // object. let (balance, primary_gas_obj) = gas_objects .iter() .max_by_key(|(balance, _)| balance) diff --git a/crates/sui-benchmark/src/bin/stress.rs b/crates/sui-benchmark/src/bin/stress.rs index dfedddaa253..b9ac64a5f65 100644 --- a/crates/sui-benchmark/src/bin/stress.rs +++ b/crates/sui-benchmark/src/bin/stress.rs @@ -1,31 +1,21 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{sync::Arc, time::Duration}; + use anyhow::{anyhow, Context, Result}; use clap::*; - use prometheus::Registry; -use rand::seq::SliceRandom; -use rand::Rng; -use sui_protocol_config::Chain; -use tokio::time::sleep; - -use std::sync::Arc; -use std::time::Duration; -use sui_benchmark::drivers::bench_driver::BenchDriver; -use sui_benchmark::drivers::driver::Driver; -use sui_benchmark::drivers::BenchmarkCmp; -use sui_benchmark::drivers::BenchmarkStats; -use sui_protocol_config::{ProtocolConfig, ProtocolVersion}; - -use sui_benchmark::benchmark_setup::Env; -use sui_benchmark::options::Opts; - -use sui_benchmark::workloads::workload_configuration::WorkloadConfiguration; - -use sui_benchmark::system_state_observer::SystemStateObserver; -use tokio::runtime::Builder; -use tokio::sync::Barrier; +use rand::{seq::SliceRandom, Rng}; +use sui_benchmark::{ + benchmark_setup::Env, + drivers::{bench_driver::BenchDriver, driver::Driver, BenchmarkCmp, BenchmarkStats}, + options::Opts, + system_state_observer::SystemStateObserver, + workloads::workload_configuration::WorkloadConfiguration, +}; +use sui_protocol_config::{Chain, ProtocolConfig, ProtocolVersion}; +use tokio::{runtime::Builder, sync::Barrier, time::sleep}; /// To spin up a local cluster and direct some load /// at it with 50/50 shared and owned traffic, use diff --git a/crates/sui-benchmark/src/drivers/bench_driver.rs b/crates/sui-benchmark/src/drivers/bench_driver.rs index 3d2906a4a92..c07c3cb2c42 100644 --- a/crates/sui-benchmark/src/drivers/bench_driver.rs +++ b/crates/sui-benchmark/src/drivers/bench_driver.rs @@ -1,50 +1,57 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anyhow::Context; -use anyhow::{anyhow, Result}; +use std::{ + collections::{BTreeMap, VecDeque}, + fmt::{Debug, Formatter}, + future::Future, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Duration, +}; + +use anyhow::{anyhow, Context, Result}; use async_trait::async_trait; -use futures::future::try_join_all; -use futures::future::BoxFuture; -use futures::FutureExt; -use futures::{stream::FuturesUnordered, StreamExt}; -use indicatif::ProgressBar; -use indicatif::ProgressStyle; -use prometheus::register_histogram_vec_with_registry; -use prometheus::IntCounterVec; -use prometheus::Registry; -use prometheus::{register_counter_vec_with_registry, register_gauge_vec_with_registry}; -use prometheus::{register_int_counter_vec_with_registry, CounterVec}; -use prometheus::{register_int_gauge_with_registry, GaugeVec}; -use prometheus::{HistogramVec, IntGauge}; +use futures::{ + future::{try_join_all, BoxFuture}, + stream::FuturesUnordered, + FutureExt, StreamExt, +}; +use indicatif::{ProgressBar, ProgressStyle}; +use prometheus::{ + register_counter_vec_with_registry, register_gauge_vec_with_registry, + register_histogram_vec_with_registry, register_int_counter_vec_with_registry, + register_int_gauge_with_registry, CounterVec, GaugeVec, HistogramVec, IntCounterVec, IntGauge, + Registry, +}; use rand::seq::SliceRandom; -use tokio::sync::mpsc::{channel, Sender}; -use tokio::sync::OnceCell; -use tokio_util::sync::CancellationToken; - -use crate::drivers::driver::Driver; -use crate::drivers::HistogramWrapper; -use crate::system_state_observer::SystemStateObserver; -use crate::workloads::payload::Payload; -use crate::workloads::{GroupID, WorkloadInfo}; -use crate::{ExecutionEffects, ValidatorProxy}; -use std::collections::{BTreeMap, VecDeque}; -use std::fmt::{Debug, Formatter}; -use std::future::Future; -use std::sync::atomic::{AtomicU64, Ordering}; -use std::sync::Arc; -use std::time::Duration; -use sui_types::committee::Committee; -use sui_types::quorum_driver_types::QuorumDriverError; -use sui_types::transaction::{Transaction, TransactionDataAPI}; +use sui_types::{ + committee::Committee, + quorum_driver_types::QuorumDriverError, + transaction::{Transaction, TransactionDataAPI}, +}; use sysinfo::{CpuExt, System, SystemExt}; -use tokio::sync::Barrier; -use tokio::task::{JoinHandle, JoinSet}; -use tokio::{time, time::Instant}; +use tokio::{ + sync::{ + mpsc::{channel, Sender}, + Barrier, OnceCell, + }, + task::{JoinHandle, JoinSet}, + time, + time::Instant, +}; +use tokio_util::sync::CancellationToken; use tracing::{debug, error, info, warn}; -use super::Interval; -use super::{BenchmarkStats, StressStats}; +use super::{BenchmarkStats, Interval, StressStats}; +use crate::{ + drivers::{driver::Driver, HistogramWrapper}, + system_state_observer::SystemStateObserver, + workloads::{payload::Payload, GroupID, WorkloadInfo}, + ExecutionEffects, ValidatorProxy, +}; pub struct BenchMetrics { pub benchmark_duration: IntGauge, pub num_success: IntCounterVec, @@ -293,7 +300,8 @@ async fn ctrl_c() -> std::io::Result<()> { tokio::signal::ctrl_c().await } -// TODO: if more use is made of tokio::signal we should just add support for it to the sim. +// TODO: if more use is made of tokio::signal we should just add support for it +// to the sim. #[cfg(msim)] async fn ctrl_c() -> std::io::Result<()> { futures::future::pending().await @@ -316,8 +324,9 @@ impl Driver<(BenchmarkStats, StressStats)> for BenchDriver { let (tx, mut rx) = channel(100); let (stress_stat_tx, mut stress_stat_rx) = channel(100); - // All the benchmark workers that are grouped by GroupID. The group is order in group id order - // ascending. This is important as benchmark groups should be executed in order to follow the input settings. + // All the benchmark workers that are grouped by GroupID. The group is order in + // group id order ascending. This is important as benchmark groups + // should be executed in order to follow the input settings. let mut bench_workers = VecDeque::new(); let mut worker_id = 0; @@ -353,7 +362,8 @@ impl Driver<(BenchmarkStats, StressStats)> for BenchDriver { let total_benchmark_progress = Arc::new(create_progress_bar(total_benchmark_run_interval)); let total_benchmark_gas_used = Arc::new(AtomicU64::new(0)); - // Spin up the scheduler task to orchestrate running the workers for each benchmark group. + // Spin up the scheduler task to orchestrate running the workers for each + // benchmark group. let scheduler = spawn_workers_scheduler( bench_workers, self.token.clone(), @@ -393,8 +403,8 @@ impl Driver<(BenchmarkStats, StressStats)> for BenchDriver { }, ) = rx.recv().await { - // We use the special id as signal to clear up the stat collection map since that means - // that new benchmark group workers have spun up. + // We use the special id as signal to clear up the stat collection map since + // that means that new benchmark group workers have spun up. if id == usize::MAX { stat_collection.clear(); continue; @@ -417,7 +427,8 @@ impl Driver<(BenchmarkStats, StressStats)> for BenchDriver { for (_, v) in stat_collection.iter() { let duration = v.bench_stats.duration.as_secs() as f32; - // no reason to do any measurements when duration is zero as this will output NaN + // no reason to do any measurements when duration is zero as this will output + // NaN if duration == 0.0 { continue; } @@ -442,7 +453,21 @@ impl Driver<(BenchmarkStats, StressStats)> for BenchDriver { }; counter += 1; if counter % num_workers == 0 { - stat = format!("TPS = {}, CPS = {}, latency_ms(min/p50/p99/max) = {}/{}/{}/{}, num_success_tx = {}, num_error_tx = {}, num_success_cmds = {}, no_gas = {}, submitted = {}, in_flight = {}", total_qps, total_cps, latency_histogram.min(), latency_histogram.value_at_quantile(0.5), latency_histogram.value_at_quantile(0.99), latency_histogram.max(), num_success_txes, num_error_txes, num_success_cmds, num_no_gas, num_submitted, num_in_flight); + stat = format!( + "TPS = {}, CPS = {}, latency_ms(min/p50/p99/max) = {}/{}/{}/{}, num_success_tx = {}, num_error_tx = {}, num_success_cmds = {}, no_gas = {}, submitted = {}, in_flight = {}", + total_qps, + total_cps, + latency_histogram.min(), + latency_histogram.value_at_quantile(0.5), + latency_histogram.value_at_quantile(0.99), + latency_histogram.max(), + num_success_txes, + num_error_txes, + num_success_cmds, + num_no_gas, + num_submitted, + num_in_flight + ); if show_progress { eprintln!("{}", stat); } @@ -508,11 +533,12 @@ impl Driver<(BenchmarkStats, StressStats)> for BenchDriver { } } -/// The workers scheduler is orchestrating the bench workers to run according to their group. Each -/// group is running for a specific period/interval. Once finished then the next group of bench workers -/// is picked up to run. The worker groups are cycled , so once the last group is run then we start -/// again from the beginning. That allows running benchmarks with repeatable patterns across the whole -/// benchmark duration. +/// The workers scheduler is orchestrating the bench workers to run according to +/// their group. Each group is running for a specific period/interval. Once +/// finished then the next group of bench workers is picked up to run. The +/// worker groups are cycled , so once the last group is run then we start again +/// from the beginning. That allows running benchmarks with repeatable patterns +/// across the whole benchmark duration. async fn spawn_workers_scheduler( mut bench_workers: VecDeque>, cancellation_token: CancellationToken, @@ -673,7 +699,8 @@ async fn run_bench_worker( total_benchmark_start_time: Instant, total_benchmark_gas_used: Arc, ) -> Option { - // Waiting until all the tasks have been spawn , so we can coordinate the traffic and timing. + // Waiting until all the tasks have been spawn , so we can coordinate the + // traffic and timing. barrier.wait().await; debug!("Run {:?}", worker); let group_benchmark_start_time = Instant::now(); @@ -786,7 +813,8 @@ async fn run_bench_worker( } }; - // Updates the progress bars. if any of the progress bars are finished then true is returned. False otherwise. + // Updates the progress bars. if any of the progress bars are finished then true + // is returned. False otherwise. let update_progress = |increment_by_value: u64| { let group_gas_used = group_gas_used.load(Ordering::SeqCst); let total_benchmark_gas_used = total_benchmark_gas_used.load(Ordering::SeqCst); @@ -996,8 +1024,9 @@ async fn run_bench_worker( Some(worker) } -/// Creates a new progress bar based on the provided duration. The method is agnostic to the actual -/// usage - weather we want to track the overall benchmark duration or an individual benchmark run. +/// Creates a new progress bar based on the provided duration. The method is +/// agnostic to the actual usage - weather we want to track the overall +/// benchmark duration or an individual benchmark run. fn create_progress_bar(duration: Interval) -> ProgressBar { fn new_progress_bar(len: u64) -> ProgressBar { if cfg!(msim) { diff --git a/crates/sui-benchmark/src/drivers/driver.rs b/crates/sui-benchmark/src/drivers/driver.rs index 82a74cfea0d..e138ab6b44d 100644 --- a/crates/sui-benchmark/src/drivers/driver.rs +++ b/crates/sui-benchmark/src/drivers/driver.rs @@ -1,16 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::collections::BTreeMap; -use std::sync::Arc; +use std::{collections::BTreeMap, sync::Arc}; -use crate::drivers::Interval; -use crate::system_state_observer::SystemStateObserver; -use crate::ValidatorProxy; use async_trait::async_trait; use prometheus::Registry; -use crate::workloads::{GroupID, WorkloadInfo}; +use crate::{ + drivers::Interval, + system_state_observer::SystemStateObserver, + workloads::{GroupID, WorkloadInfo}, + ValidatorProxy, +}; #[async_trait] pub trait Driver { diff --git a/crates/sui-benchmark/src/drivers/mod.rs b/crates/sui-benchmark/src/drivers/mod.rs index 9a245f57429..d8083b5b9cd 100644 --- a/crates/sui-benchmark/src/drivers/mod.rs +++ b/crates/sui-benchmark/src/drivers/mod.rs @@ -1,9 +1,9 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{fmt::Formatter, str::FromStr, time::Duration}; + use duration_str::parse; -use std::fmt::Formatter; -use std::{str::FromStr, time::Duration}; pub mod bench_driver; pub mod driver; @@ -401,8 +401,8 @@ impl BenchmarkCmp<'_> { } } -/// Convert an unsigned number into a string separated by `delim` every `step_size` digits -/// For example used to make 100000 more readable as 100,000 +/// Convert an unsigned number into a string separated by `delim` every +/// `step_size` digits For example used to make 100000 more readable as 100,000 fn format_num_with_separators + std::fmt::Display>( x: T, step_size: u8, diff --git a/crates/sui-benchmark/src/embedded_reconfig_observer.rs b/crates/sui-benchmark/src/embedded_reconfig_observer.rs index 3634ac9bc6e..8667c83f905 100644 --- a/crates/sui-benchmark/src/embedded_reconfig_observer.rs +++ b/crates/sui-benchmark/src/embedded_reconfig_observer.rs @@ -1,11 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::sync::Arc; + use anyhow::anyhow; use async_trait::async_trait; -use std::sync::Arc; -use sui_core::authority_aggregator::AuthorityAggregator; use sui_core::{ + authority_aggregator::AuthorityAggregator, authority_client::NetworkAuthorityClient, quorum_driver::{reconfig_observer::ReconfigObserver, QuorumDriver}, }; @@ -16,13 +17,12 @@ use tracing::{error, info, trace}; /// A ReconfigObserver that polls validators periodically /// to get new epoch information. /// Caveat: -/// 1. it does not guarantee to insert every committee into -/// committee store. This is fine in scenarios such as -/// stress, but may not be suitable in some other cases. -/// 2. because of 1, if it misses intermediate committee(s) -/// and we happen to have a big committee rotation, it may -/// fail to get quorum on the latest committee info from -/// demissioned validators and then stop working. +/// 1. it does not guarantee to insert every committee into committee store. +/// This is fine in scenarios such as stress, but may not be suitable in some +/// other cases. +/// 2. because of 1, if it misses intermediate committee(s) and we happen to +/// have a big committee rotation, it may fail to get quorum on the latest +/// committee info from demissioned validators and then stop working. /// Background: this is a temporary solution for stress before /// we see fullnode reconfiguration stabilizes. #[derive(Clone, Default)] diff --git a/crates/sui-benchmark/src/fullnode_reconfig_observer.rs b/crates/sui-benchmark/src/fullnode_reconfig_observer.rs index 1a44aff8db3..5f19f3ab667 100644 --- a/crates/sui-benchmark/src/fullnode_reconfig_observer.rs +++ b/crates/sui-benchmark/src/fullnode_reconfig_observer.rs @@ -1,8 +1,9 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use async_trait::async_trait; use std::{collections::HashMap, sync::Arc}; + +use async_trait::async_trait; use sui_core::{ authority_aggregator::{AuthAggMetrics, AuthorityAggregator}, authority_client::NetworkAuthorityClient, diff --git a/crates/sui-benchmark/src/in_memory_wallet.rs b/crates/sui-benchmark/src/in_memory_wallet.rs index 07bd6fccd47..cc2f6a286f3 100644 --- a/crates/sui-benchmark/src/in_memory_wallet.rs +++ b/crates/sui-benchmark/src/in_memory_wallet.rs @@ -8,13 +8,14 @@ use sui_types::{ base_types::{ObjectID, ObjectRef, SuiAddress}, crypto::AccountKeyPair, object::Owner, - transaction::{CallArg, Transaction, TransactionData, TransactionDataAPI}, + transaction::{CallArg, Command, Transaction, TransactionData, TransactionDataAPI}, utils::to_sender_signed_transaction, }; -use crate::ProgrammableTransactionBuilder; -use crate::{convert_move_call_args, workloads::Gas, BenchMoveCallArg, ExecutionEffects}; -use sui_types::transaction::Command; +use crate::{ + convert_move_call_args, workloads::Gas, BenchMoveCallArg, ExecutionEffects, + ProgrammableTransactionBuilder, +}; /// A Sui account and all of the objects it owns #[derive(Debug)] @@ -57,7 +58,8 @@ impl SuiAccount { } } -/// Utility struct tracking keys for known accounts, owned objects, shared objects, and immutable objects +/// Utility struct tracking keys for known accounts, owned objects, shared +/// objects, and immutable objects #[derive(Debug, Default)] pub struct InMemoryWallet { accounts: BTreeMap, // TODO: track shared and immutable objects as well @@ -95,10 +97,11 @@ impl InMemoryWallet { for obj in effects.deleted() { // by construction, every deleted object either // 1. belongs to the tx sender directly (e.g., sender owned the object) - // 2. belongs to the sender indirectly (e.g., deleted object was a dynamic field of a object the sender owned) + // 2. belongs to the sender indirectly (e.g., deleted object was a dynamic field + // of a object the sender owned) // 3. is shared (though we do not yet support deletion of shared objects) - // so, we just try to delete everything from the sender's account here, though it's - // worth noting that (2) and (3) are possible. + // so, we just try to delete everything from the sender's account here, though + // it's worth noting that (2) and (3) are possible. sender_account.delete(&obj.0); } } // else, tx sender is not an account we can spend from, we don't care diff --git a/crates/sui-benchmark/src/lib.rs b/crates/sui-benchmark/src/lib.rs index 3ac0b83c354..90c3f5a87b1 100644 --- a/crates/sui-benchmark/src/lib.rs +++ b/crates/sui-benchmark/src/lib.rs @@ -1,5 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::BTreeMap, + sync::{Arc, Mutex}, + time::Duration, +}; + use anyhow::bail; use async_trait::async_trait; use embedded_reconfig_observer::EmbeddedReconfigObserver; @@ -9,11 +15,6 @@ use mysten_metrics::GaugeGuard; use prometheus::Registry; use rand::Rng; use roaring::RoaringBitmap; -use std::{ - collections::BTreeMap, - sync::{Arc, Mutex}, - time::Duration, -}; use sui_config::genesis::Genesis; use sui_core::{ authority_aggregator::{AuthorityAggregator, AuthorityAggregatorBuilder}, @@ -30,32 +31,23 @@ use sui_json_rpc_types::{ }; use sui_network::{DEFAULT_CONNECT_TIMEOUT_SEC, DEFAULT_REQUEST_TIMEOUT_SEC}; use sui_sdk::{SuiClient, SuiClientBuilder}; -use sui_types::base_types::ConciseableName; -use sui_types::committee::CommitteeTrait; -use sui_types::effects::{CertifiedTransactionEffects, TransactionEffectsAPI, TransactionEvents}; -use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; -use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary; -use sui_types::transaction::Argument; -use sui_types::transaction::CallArg; -use sui_types::transaction::ObjectArg; use sui_types::{ - base_types::ObjectID, - committee::{Committee, EpochId}, + base_types::{AuthorityName, ConciseableName, ObjectID, ObjectRef, SequenceNumber, SuiAddress}, + committee::{Committee, CommitteeTrait, EpochId}, crypto::{ AggregateAuthenticator, AggregateAuthoritySignature, AuthorityQuorumSignInfo, - AuthoritySignature, + AuthoritySignature, AuthorityStrongQuorumSignInfo, }, + effects::{CertifiedTransactionEffects, TransactionEffectsAPI, TransactionEvents}, + error::SuiError, + gas::GasCostSummary, + gas_coin::GasCoin, message_envelope::Envelope, - object::Object, - transaction::{CertifiedTransaction, Transaction}, + object::{Object, Owner}, + programmable_transaction_builder::ProgrammableTransactionBuilder, + sui_system_state::{sui_system_state_summary::SuiSystemStateSummary, SuiSystemStateTrait}, + transaction::{Argument, CallArg, CertifiedTransaction, ObjectArg, Transaction}, }; -use sui_types::{base_types::ObjectRef, crypto::AuthorityStrongQuorumSignInfo, object::Owner}; -use sui_types::{base_types::SequenceNumber, gas_coin::GasCoin}; -use sui_types::{ - base_types::{AuthorityName, SuiAddress}, - sui_system_state::SuiSystemStateTrait, -}; -use sui_types::{error::SuiError, gas::GasCostSummary}; use tokio::{ task::JoinSet, time::{sleep, timeout}, @@ -73,8 +65,10 @@ pub mod system_state_observer; pub mod util; pub mod workloads; use futures::FutureExt; -use sui_types::messages_grpc::{HandleCertificateResponseV2, TransactionStatus}; -use sui_types::quorum_driver_types::{QuorumDriverError, QuorumDriverResponse}; +use sui_types::{ + messages_grpc::{HandleCertificateResponseV2, TransactionStatus}, + quorum_driver_types::{QuorumDriverError, QuorumDriverResponse}, +}; #[derive(Debug)] /// A wrapper on execution results to accommodate different types of @@ -149,7 +143,7 @@ impl ExecutionEffects { pub fn sender(&self) -> SuiAddress { match self.gas_object().1 { Owner::AddressOwner(a) => a, - Owner::ObjectOwner(_) | Owner::Shared { .. } | Owner::Immutable => unreachable!(), // owner of gas object is always an address + Owner::ObjectOwner(_) | Owner::Shared { .. } | Owner::Immutable => unreachable!(), /* owner of gas object is always an address */ } } @@ -227,8 +221,8 @@ pub trait ValidatorProxy { async fn execute_transaction_block(&self, tx: Transaction) -> anyhow::Result; - /// This function is similar to `execute_transaction` but does not check any validator's - /// signature. It should only be used for benchmarks. + /// This function is similar to `execute_transaction` but does not check any + /// validator's signature. It should only be used for benchmarks. async fn execute_bench_transaction(&self, tx: Transaction) -> anyhow::Result; fn clone_committee(&self) -> Arc; @@ -240,7 +234,8 @@ pub trait ValidatorProxy { async fn get_validators(&self) -> Result, anyhow::Error>; } -// TODO: Eventually remove this proxy because we shouldn't rely on validators to read objects. +// TODO: Eventually remove this proxy because we shouldn't rely on validators to +// read objects. pub struct LocalValidatorAggregatorProxy { _qd_handler: QuorumDriverHandler, // Stress client does not verify individual validator signatures since this is very expensive @@ -397,7 +392,8 @@ impl ValidatorProxy for LocalValidatorAggregatorProxy { } async fn execute_bench_transaction(&self, tx: Transaction) -> anyhow::Result { - // Store the epoch number; we read it from the votes and use it later to create the certificate. + // Store the epoch number; we read it from the votes and use it later to create + // the certificate. let mut epoch = 0; let auth_agg = self.qd.authority_aggregator().load(); @@ -429,7 +425,8 @@ impl ValidatorProxy for LocalValidatorAggregatorProxy { total_stake += self.committee.weight(&signature.authority); votes.push(signature); } - // The transaction may be submitted again in case the certificate's submission failed. + // The transaction may be submitted again in case the certificate's submission + // failed. TransactionStatus::Executed(cert, _effects, _) => { tracing::warn!("Transaction already submitted: {tx:?}"); if let Some(cert) = cert { @@ -562,8 +559,9 @@ impl ValidatorProxy for LocalValidatorAggregatorProxy { } } - // Abort if we failed to submit the certificate to enough validators. This typically - // happens when the validators are overloaded and the requests timed out. + // Abort if we failed to submit the certificate to enough validators. This + // typically happens when the validators are overloaded and the requests + // timed out. if transaction_effects.is_none() || total_stake < self.committee.quorum_threshold() { bail!("Failed to submit certificate to quorum of validators"); } @@ -726,8 +724,8 @@ impl ValidatorProxy for FullNodeProxy { let tx_digest = *tx.digest(); let mut retry_cnt = 0; while retry_cnt < 10 { - // Fullnode could time out after WAIT_FOR_FINALITY_TIMEOUT (30s) in TransactionOrchestrator - // SuiClient times out after 60s + // Fullnode could time out after WAIT_FOR_FINALITY_TIMEOUT (30s) in + // TransactionOrchestrator SuiClient times out after 60s match self .sui_client .quorum_driver_api() diff --git a/crates/sui-benchmark/src/options.rs b/crates/sui-benchmark/src/options.rs index 1dc0f5c3ce5..bb6f895f070 100644 --- a/crates/sui-benchmark/src/options.rs +++ b/crates/sui-benchmark/src/options.rs @@ -1,12 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use clap::*; +use std::str::FromStr; +use clap::*; use strum_macros::EnumString; use crate::drivers::Interval; -use std::str::FromStr; #[derive(Parser)] #[clap(name = "Stress Testing Framework")] @@ -105,8 +105,8 @@ pub struct Opts { #[clap(long, default_value = "0", global = true)] pub staggered_start_max_multiplier: u32, - /// Start the stress test at a given protocol version. (Usually unnecessary if stress test is - /// built at the same commit as the validators. + /// Start the stress test at a given protocol version. (Usually unnecessary + /// if stress test is built at the same commit as the validators. #[clap(long, global = true)] pub protocol_version: Option, } @@ -126,13 +126,14 @@ pub enum RunSpec { // will likely change in future to support // more representative workloads. // - // The Bench command allow us to define multiple benchmark groups in order to simulate different - // traffic characteristics across the whole benchmark duration. For that reason all arguments are - // expressed as vectors. Each benchmark group runs for the specified duration - as defined on the - // duration field - and for each group the parameters of the same vector position are considered. - // For example, for benchmark group 0, the vector arguments on position 0 refer to the properties - // of that benchmark group. The benchmark groups will run in a rotation fashion, unless the duration - // of the last group is set as "unbounded" which will run of the rest of the whole benchmark. + // The Bench command allow us to define multiple benchmark groups in order to simulate + // different traffic characteristics across the whole benchmark duration. For that reason + // all arguments are expressed as vectors. Each benchmark group runs for the specified + // duration - as defined on the duration field - and for each group the parameters of the + // same vector position are considered. For example, for benchmark group 0, the vector + // arguments on position 0 refer to the properties of that benchmark group. The benchmark + // groups will run in a rotation fashion, unless the duration of the last group is set as + // "unbounded" which will run of the rest of the whole benchmark. // // Example: for Bench argument: // @@ -145,17 +146,22 @@ pub enum RunSpec { // duration: vec!["10s", "30s"] // } // - // It will run 2 "benchmarks" in a cycle . First the benchmark with parameters {shared_counter: 100, transfer_object: 50, target_qps: 1000, duration: "10s"...} - // will run for 10 seconds. Once finished, then a second benchmark will run immediately with parameters {shared_counter: 200, transfer_object: 50, target_qps: 2000, duration: "30s"...} - // for 30 seconds. Once finished, then again the fist benchmark will run. That will happen perpetually unless a `run_duration` is defined. - // If the second benchmark group had as duration "unbounded" then this benchmark would run forever and no cycling would occur. - // It has to be noted that all those benchmark groups are running under the same benchmark. The benchmark groups are essentially a way - // for someone to define for example different traffic loads to simulate things like peaks, lows etc. + // It will run 2 "benchmarks" in a cycle . First the benchmark with parameters + // {shared_counter: 100, transfer_object: 50, target_qps: 1000, duration: "10s"...} + // will run for 10 seconds. Once finished, then a second benchmark will run immediately with + // parameters {shared_counter: 200, transfer_object: 50, target_qps: 2000, duration: "30s"...} + // for 30 seconds. Once finished, then again the fist benchmark will run. That will happen + // perpetually unless a `run_duration` is defined. If the second benchmark group had as + // duration "unbounded" then this benchmark would run forever and no cycling would occur. + // It has to be noted that all those benchmark groups are running under the same benchmark. + // The benchmark groups are essentially a way for someone to define for example different + // traffic loads to simulate things like peaks, lows etc. Bench { // ----- workloads ---- - // the number of benchmarks that we are willing to run. For example, if `num_of_benchmark_groups = 2`, - // then we expect all the arguments under this subcommand to contain two values on their vectors - one for each - // benchmark set. If an argument doesn't contain the right number of values then it will panic. + // the number of benchmarks that we are willing to run. For example, if + // `num_of_benchmark_groups = 2`, then we expect all the arguments under this + // subcommand to contain two values on their vectors - one for each benchmark set. + // If an argument doesn't contain the right number of values then it will panic. #[clap(long, default_value = "1")] num_of_benchmark_groups: u32, // relative weight of shared counter @@ -189,13 +195,14 @@ pub enum RunSpec { #[clap(long, num_args(1..), value_delimiter = ',', default_values_t = [50])] shared_counter_hotness_factor: Vec, // The number of shared counters this stress client will create and use. - // This parameter takes precedence over `shared_counter_hotness_factor`, meaning that when this - // parameter is specified, `shared_counter_hotness_factor` is ignored when deciding the number of shared - // counters to create. + // This parameter takes precedence over `shared_counter_hotness_factor`, meaning that when + // this parameter is specified, `shared_counter_hotness_factor` is ignored when + // deciding the number of shared counters to create. #[clap(long, num_args(1..), value_delimiter = ',')] num_shared_counters: Option>, // Maximum gas price increment over the RGP for shared counter transactions. - // The actual increment for each transaction is chosen at random a value between 0 and this value. + // The actual increment for each transaction is chosen at random a value between 0 and this + // value. #[clap(long, num_args(1..), value_delimiter = ',', default_values_t = [0])] shared_counter_max_tip: Vec, // batch size use for batch payment workload @@ -204,7 +211,8 @@ pub enum RunSpec { // type and load % of adversarial transactions in the benchmark workload. // Format is "{adversarial_type}-{load_factor}". // `load_factor` is a number between 0.0 and 1.0 which dictates how much load per tx - // Default is (0-0.5) implying random load at 50% load. See `AdversarialPayloadType` enum for `adversarial_type` + // Default is (0-0.5) implying random load at 50% load. See `AdversarialPayloadType` enum + // for `adversarial_type` #[clap(long, num_args(1..), value_delimiter = ',', default_values_t = ["0-1.0".to_string()])] adversarial_cfg: Vec, diff --git a/crates/sui-benchmark/src/system_state_observer.rs b/crates/sui-benchmark/src/system_state_observer.rs index 1b9fb08597d..fae2bf0bfac 100644 --- a/crates/sui-benchmark/src/system_state_observer.rs +++ b/crates/sui-benchmark/src/system_state_observer.rs @@ -1,17 +1,18 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::ValidatorProxy; -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; + use sui_protocol_config::{Chain, ProtocolConfig, ProtocolVersion}; -use tokio::sync::oneshot::Sender; -use tokio::sync::watch; -use tokio::sync::watch::Receiver; -use tokio::time; -use tokio::time::Instant; +use tokio::{ + sync::{oneshot::Sender, watch, watch::Receiver}, + time, + time::Instant, +}; use tracing::{error, info}; +use crate::ValidatorProxy; + #[derive(Debug, Clone)] pub struct SystemState { pub reference_gas_price: u64, diff --git a/crates/sui-benchmark/src/util.rs b/crates/sui-benchmark/src/util.rs index 197e892d58c..b69de0eecd4 100644 --- a/crates/sui-benchmark/src/util.rs +++ b/crates/sui-benchmark/src/util.rs @@ -1,19 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::workloads::Gas; -use crate::ValidatorProxy; +use std::{path::PathBuf, sync::Arc}; + use anyhow::Result; -use std::path::PathBuf; -use std::sync::Arc; use sui_keys::keystore::{AccountKeystore, FileBasedKeystore}; use sui_test_transaction_builder::TestTransactionBuilder; -use sui_types::base_types::ObjectRef; -use sui_types::crypto::{AccountKeyPair, KeypairTraits}; -use sui_types::object::Owner; -use sui_types::transaction::{Transaction, TransactionData, TEST_ONLY_GAS_UNIT_FOR_TRANSFER}; -use sui_types::utils::to_sender_signed_transaction; -use sui_types::{base_types::SuiAddress, crypto::SuiKeyPair}; +use sui_types::{ + base_types::{ObjectRef, SuiAddress}, + crypto::{AccountKeyPair, KeypairTraits, SuiKeyPair}, + object::Owner, + transaction::{Transaction, TransactionData, TEST_ONLY_GAS_UNIT_FOR_TRANSFER}, + utils::to_sender_signed_transaction, +}; + +use crate::{workloads::Gas, ValidatorProxy}; // This is the maximum gas we will transfer from primary coin into any gas coin // for running the benchmark diff --git a/crates/sui-benchmark/src/workloads/adversarial.rs b/crates/sui-benchmark/src/workloads/adversarial.rs index 970e98baf9d..d9a7060b151 100644 --- a/crates/sui-benchmark/src/workloads/adversarial.rs +++ b/crates/sui-benchmark/src/workloads/adversarial.rs @@ -1,45 +1,49 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::{ - workload::{Workload, WorkloadBuilder, MAX_GAS_FOR_TESTING}, - WorkloadBuilderInfo, WorkloadParams, -}; -use crate::drivers::Interval; -use crate::in_memory_wallet::move_call_pt_impl; -use crate::in_memory_wallet::InMemoryWallet; -use crate::system_state_observer::{SystemState, SystemStateObserver}; -use crate::workloads::payload::Payload; -use crate::workloads::{Gas, GasCoinConfig}; -use crate::ProgrammableTransactionBuilder; -use crate::{convert_move_call_args, BenchMoveCallArg, ExecutionEffects, ValidatorProxy}; +use std::{path::PathBuf, str::FromStr, sync::Arc}; + use anyhow::anyhow; use async_trait::async_trait; use itertools::Itertools; use move_core_types::identifier::Identifier; -use rand::distributions::{Distribution, Standard}; -use rand::Rng; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; use regex::Regex; -use std::path::PathBuf; -use std::str::FromStr; -use std::sync::Arc; use strum::{EnumCount, IntoEnumIterator}; use strum_macros::{EnumCount as EnumCountMacro, EnumIter}; use sui_protocol_config::ProtocolConfig; use sui_test_transaction_builder::TestTransactionBuilder; -use sui_types::base_types::{random_object_ref, ObjectRef}; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::transaction::Command; -use sui_types::transaction::{CallArg, ObjectArg}; -use sui_types::{base_types::ObjectID, object::Owner}; -use sui_types::{base_types::SuiAddress, crypto::get_key_pair, transaction::Transaction}; -use sui_types::{transaction::TransactionData, utils::to_sender_signed_transaction}; +use sui_types::{ + base_types::{random_object_ref, ObjectID, ObjectRef, SuiAddress}, + crypto::get_key_pair, + effects::TransactionEffectsAPI, + object::Owner, + transaction::{CallArg, Command, ObjectArg, Transaction, TransactionData}, + utils::to_sender_signed_transaction, +}; use tracing::debug; +use super::{ + workload::{Workload, WorkloadBuilder, MAX_GAS_FOR_TESTING}, + WorkloadBuilderInfo, WorkloadParams, +}; +use crate::{ + convert_move_call_args, + drivers::Interval, + in_memory_wallet::{move_call_pt_impl, InMemoryWallet}, + system_state_observer::{SystemState, SystemStateObserver}, + workloads::{payload::Payload, Gas, GasCoinConfig}, + BenchMoveCallArg, ExecutionEffects, ProgrammableTransactionBuilder, ValidatorProxy, +}; + /// Number of vectors to create in LargeTransientRuntimeVectors workload const NUM_VECTORS: u64 = 1_000; -// TODO: Need to fix Large* workloads, which are currently failing due to InsufficientGas +// TODO: Need to fix Large* workloads, which are currently failing due to +// InsufficientGas #[derive(Debug, EnumCountMacro, EnumIter, Clone)] pub enum AdversarialPayloadType { Random = 0, @@ -48,12 +52,14 @@ pub enum AdversarialPayloadType { DynamicFieldReads, LargeTransientRuntimeVectors, LargePureFunctionArgs, - // Creates a bunch of shared objects in the module init for adversarial, then taking them all as input) + // Creates a bunch of shared objects in the module init for adversarial, then taking them all + // as input) MaxReads, // Creates a the largest package publish possible MaxPackagePublish, // TODO: - // - MaxReads (by creating a bunch of shared objects in the module init for adversarial, then taking them all as input) + // - MaxReads (by creating a bunch of shared objects in the module init for adversarial, then + // taking them all as input) // - MaxEffects (by creating a bunch of small objects) and mutating lots of objects // - MaxCommands (by created the maximum number of PT commands) // - MaxTxSize @@ -135,8 +141,9 @@ impl FromStr for AdversarialPayloadCfg { type Err = anyhow::Error; fn from_str(s: &str) -> Result { - // Matches regex for two numbers delimited by a hyphen, where the left number must be positive - // and the right number must be a float between 0.0 inclusive and 1.0 inclusive + // Matches regex for two numbers delimited by a hyphen, where the left number + // must be positive and the right number must be a float between 0.0 + // inclusive and 1.0 inclusive let re = Regex::new( r"^(?:0|[1-9]\d*)-(?:0(?:\.\d+)?|1(?:\.0+)?|[1-9](?:\d*(?:\.\d+)?)?|\.\d+)$", ) @@ -471,7 +478,9 @@ impl Workload for AdversarialWorkload { .build_and_sign(gas.2.as_ref()); let effects = proxy.execute_transaction_block(transaction).await.unwrap(); let created = effects.created(); - // should only create the package object, upgrade cap, dynamic field top level obj, and NUM_DYNAMIC_FIELDS df objects. otherwise, there are some object initializers running and we will need to disambiguate + // should only create the package object, upgrade cap, dynamic field top level + // obj, and NUM_DYNAMIC_FIELDS df objects. otherwise, there are some object + // initializers running and we will need to disambiguate assert_eq!( created.len() as u64, 3 + protocol_config.object_runtime_max_num_store_entries() @@ -482,7 +491,7 @@ impl Workload for AdversarialWorkload { .unwrap(); for o in &created { - let obj = proxy.get_object(o.0 .0).await.unwrap(); + let obj = proxy.get_object(o.0.0).await.unwrap(); if let Some(tag) = obj.data.struct_tag() { if tag.to_string().contains("::adversarial::Obj") { self.df_parent_obj_ref = o.0; @@ -493,20 +502,21 @@ impl Workload for AdversarialWorkload { self.df_parent_obj_ref.0 != ObjectID::ZERO, "Dynamic field parent must be created" ); - self.package_id = package_obj.0 .0; + self.package_id = package_obj.0.0; let gas_ref = proxy - .get_object(gas.0 .0) + .get_object(gas.0.0) .await .unwrap() .compute_object_reference(); - // Pop off two to avoid hitting max input objs limit since gas and package count as two + // Pop off two to avoid hitting max input objs limit since gas and package count + // as two let num_shared_objs = protocol_config.max_input_objects() - 2; // Create a bunch of sharedobjects which we will use for MaxReads workload let transaction = move_call_pt_impl( gas.1, &gas.2, - package_obj.0 .0, + package_obj.0.0, "adversarial", "create_min_size_shared_objects", vec![], @@ -521,10 +531,11 @@ impl Workload for AdversarialWorkload { let created = effects.created(); assert_eq!(created.len() as u64, num_shared_objs); - // We've seen that the shared objects are indeed created,we store them so we can read them in MaxReads workload + // We've seen that the shared objects are indeed created,we store them so we can + // read them in MaxReads workload self.shared_objs = created .iter() - .map(|o| BenchMoveCallArg::Shared((o.0 .0, o.0 .1, false))) + .map(|o| BenchMoveCallArg::Shared((o.0.0, o.0.1, false))) .collect(); } diff --git a/crates/sui-benchmark/src/workloads/batch_payment.rs b/crates/sui-benchmark/src/workloads/batch_payment.rs index 71d678d77af..2b3cf0f19c9 100644 --- a/crates/sui-benchmark/src/workloads/batch_payment.rs +++ b/crates/sui-benchmark/src/workloads/batch_payment.rs @@ -1,29 +1,32 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::drivers::Interval; -use crate::in_memory_wallet::InMemoryWallet; -use crate::system_state_observer::SystemStateObserver; -use crate::workloads::payload::Payload; -use crate::workloads::workload::{Workload, STORAGE_COST_PER_COIN}; -use crate::workloads::workload::{WorkloadBuilder, ESTIMATED_COMPUTATION_COST}; -use crate::workloads::{Gas, GasCoinConfig, WorkloadBuilderInfo, WorkloadParams}; -use crate::{ExecutionEffects, ValidatorProxy}; +use std::{collections::HashMap, sync::Arc}; + use async_trait::async_trait; -use std::collections::HashMap; -use std::sync::Arc; use sui_core::test_utils::make_pay_sui_transaction; -use sui_types::base_types::{ObjectID, SequenceNumber}; -use sui_types::digests::ObjectDigest; -use sui_types::gas_coin::MIST_PER_SUI; -use sui_types::object::Owner; use sui_types::{ - base_types::{ObjectRef, SuiAddress}, + base_types::{ObjectID, ObjectRef, SequenceNumber, SuiAddress}, crypto::get_key_pair, + digests::ObjectDigest, + gas_coin::MIST_PER_SUI, + object::Owner, transaction::Transaction, }; use tracing::{debug, error}; +use crate::{ + drivers::Interval, + in_memory_wallet::InMemoryWallet, + system_state_observer::SystemStateObserver, + workloads::{ + payload::Payload, + workload::{Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, STORAGE_COST_PER_COIN}, + Gas, GasCoinConfig, WorkloadBuilderInfo, WorkloadParams, + }, + ExecutionEffects, ValidatorProxy, +}; + /// Value of each address's "primary coin" in mist. The first transaction gives /// each address a coin worth PRIMARY_COIN_VALUE, and all subsequent transfers /// send TRANSFER_AMOUNT coins each time @@ -39,8 +42,8 @@ pub struct BatchPaymentTestPayload { state: InMemoryWallet, /// total number of payments made, to be used in reporting batch TPS num_payments: usize, - /// address of the first sender. important because in the beginning, only one address has any coins. - /// after the first tx, any address can send + /// address of the first sender. important because in the beginning, only + /// one address has any coins. after the first tx, any address can send first_sender: SuiAddress, system_state_observer: Arc, } @@ -84,7 +87,8 @@ impl Payload for BatchPaymentTestPayload { debug!("New sender sending gas {}...", addr); addr }; - // we're only using gas objects in this benchmark, so safe to assume everything owned by an address is a gas object + // we're only using gas objects in this benchmark, so safe to assume everything + // owned by an address is a gas object let gas_obj = self.state.gas(&sender).unwrap(); debug!("Gas ID being used for tx {gas_obj:#?}"); let amount = if self.num_payments == 0 { diff --git a/crates/sui-benchmark/src/workloads/delegation.rs b/crates/sui-benchmark/src/workloads/delegation.rs index de274663b31..4f16782c327 100644 --- a/crates/sui-benchmark/src/workloads/delegation.rs +++ b/crates/sui-benchmark/src/workloads/delegation.rs @@ -1,26 +1,34 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::drivers::Interval; -use crate::system_state_observer::SystemStateObserver; -use crate::workloads::payload::Payload; -use crate::workloads::workload::{Workload, WorkloadBuilder}; -use crate::workloads::workload::{ - ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, STORAGE_COST_PER_COIN, -}; -use crate::workloads::{Gas, GasCoinConfig, WorkloadBuilderInfo, WorkloadParams}; -use crate::{ExecutionEffects, ValidatorProxy}; +use std::sync::Arc; + use async_trait::async_trait; use rand::seq::IteratorRandom; -use std::sync::Arc; use sui_core::test_utils::make_transfer_sui_transaction; use sui_test_transaction_builder::TestTransactionBuilder; -use sui_types::base_types::{ObjectRef, SuiAddress}; -use sui_types::crypto::{get_key_pair, AccountKeyPair}; -use sui_types::gas_coin::MIST_PER_SUI; -use sui_types::transaction::Transaction; +use sui_types::{ + base_types::{ObjectRef, SuiAddress}, + crypto::{get_key_pair, AccountKeyPair}, + gas_coin::MIST_PER_SUI, + transaction::Transaction, +}; use tracing::error; +use crate::{ + drivers::Interval, + system_state_observer::SystemStateObserver, + workloads::{ + payload::Payload, + workload::{ + Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, + STORAGE_COST_PER_COIN, + }, + Gas, GasCoinConfig, WorkloadBuilderInfo, WorkloadParams, + }, + ExecutionEffects, ValidatorProxy, +}; + #[derive(Debug)] pub struct DelegationTestPayload { coin: Option, @@ -53,8 +61,8 @@ impl Payload for DelegationTestPayload { } /// delegation flow is split into two phases - /// first `make_transaction` call creates separate coin object for future delegation - /// followup call creates delegation transaction itself + /// first `make_transaction` call creates separate coin object for future + /// delegation followup call creates delegation transaction itself fn make_transaction(&mut self) -> Transaction { match self.coin { Some(coin) => TestTransactionBuilder::new( diff --git a/crates/sui-benchmark/src/workloads/mod.rs b/crates/sui-benchmark/src/workloads/mod.rs index 3d2910fa552..e478e3d2529 100644 --- a/crates/sui-benchmark/src/workloads/mod.rs +++ b/crates/sui-benchmark/src/workloads/mod.rs @@ -13,12 +13,14 @@ pub mod workload_configuration; use std::sync::Arc; -use crate::drivers::Interval; -use crate::workloads::payload::Payload; -use sui_types::base_types::{ObjectRef, SuiAddress}; -use sui_types::crypto::AccountKeyPair; +use sui_types::{ + base_types::{ObjectRef, SuiAddress}, + crypto::AccountKeyPair, +}; use workload::*; +use crate::{drivers::Interval, workloads::payload::Payload}; + pub type GroupID = u32; #[derive(Debug, Clone)] diff --git a/crates/sui-benchmark/src/workloads/payload.rs b/crates/sui-benchmark/src/workloads/payload.rs index 567e7ad897e..5ff54638ab8 100644 --- a/crates/sui-benchmark/src/workloads/payload.rs +++ b/crates/sui-benchmark/src/workloads/payload.rs @@ -1,13 +1,16 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::ExecutionEffects; use std::fmt::Display; + use sui_types::transaction::Transaction; -/// A Payload is a transaction wrapper of a particular type (transfer object, shared counter, etc). -/// Calling `make_transaction()` on a payload produces the transaction it is wrapping. Once that -/// transaction is returned with effects (by quorum driver), a new payload can be generated with that +use crate::ExecutionEffects; + +/// A Payload is a transaction wrapper of a particular type (transfer object, +/// shared counter, etc). Calling `make_transaction()` on a payload produces the +/// transaction it is wrapping. Once that transaction is returned with effects +/// (by quorum driver), a new payload can be generated with that /// effect by invoking `make_new_payload(effects)` pub trait Payload: Send + Sync + std::fmt::Debug + Display { fn make_new_payload(&mut self, effects: &ExecutionEffects); diff --git a/crates/sui-benchmark/src/workloads/shared_counter.rs b/crates/sui-benchmark/src/workloads/shared_counter.rs index 465892026e4..ceea911f817 100644 --- a/crates/sui-benchmark/src/workloads/shared_counter.rs +++ b/crates/sui-benchmark/src/workloads/shared_counter.rs @@ -1,30 +1,34 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::drivers::Interval; -use crate::system_state_observer::SystemStateObserver; -use crate::util::publish_basics_package; -use crate::workloads::payload::Payload; -use crate::workloads::workload::{ - Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, - STORAGE_COST_PER_COUNTER, -}; -use crate::workloads::GasCoinConfig; -use crate::workloads::{Gas, WorkloadBuilderInfo, WorkloadParams}; -use crate::{ExecutionEffects, ValidatorProxy}; +use std::sync::Arc; + use async_trait::async_trait; use futures::future::join_all; -use rand::seq::SliceRandom; -use rand::Rng; -use std::sync::Arc; +use rand::{seq::SliceRandom, Rng}; use sui_test_transaction_builder::TestTransactionBuilder; -use sui_types::crypto::get_key_pair; use sui_types::{ base_types::{ObjectDigest, ObjectID, SequenceNumber}, + crypto::get_key_pair, transaction::Transaction, }; use tracing::{debug, error, info}; +use crate::{ + drivers::Interval, + system_state_observer::SystemStateObserver, + util::publish_basics_package, + workloads::{ + payload::Payload, + workload::{ + Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, + STORAGE_COST_PER_COUNTER, + }, + Gas, GasCoinConfig, WorkloadBuilderInfo, WorkloadParams, + }, + ExecutionEffects, ValidatorProxy, +}; + /// The max amount of gas units needed for a payload. pub const MAX_GAS_IN_UNIT: u64 = 1_000_000_000; diff --git a/crates/sui-benchmark/src/workloads/shared_object_deletion.rs b/crates/sui-benchmark/src/workloads/shared_object_deletion.rs index 7892b1f18b4..1913a958c80 100644 --- a/crates/sui-benchmark/src/workloads/shared_object_deletion.rs +++ b/crates/sui-benchmark/src/workloads/shared_object_deletion.rs @@ -1,30 +1,34 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::drivers::Interval; -use crate::system_state_observer::SystemStateObserver; -use crate::util::publish_basics_package; -use crate::workloads::payload::Payload; -use crate::workloads::workload::{ - Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, - STORAGE_COST_PER_COUNTER, -}; -use crate::workloads::GasCoinConfig; -use crate::workloads::{Gas, WorkloadBuilderInfo, WorkloadParams}; -use crate::{ExecutionEffects, ValidatorProxy}; +use std::sync::Arc; + use async_trait::async_trait; use futures::future::join_all; -use rand::seq::SliceRandom; -use rand::Rng; -use std::sync::Arc; +use rand::{seq::SliceRandom, Rng}; use sui_test_transaction_builder::TestTransactionBuilder; -use sui_types::crypto::get_key_pair; use sui_types::{ base_types::{ObjectDigest, ObjectID, SequenceNumber}, + crypto::get_key_pair, transaction::Transaction, }; use tracing::{debug, error, info}; +use crate::{ + drivers::Interval, + system_state_observer::SystemStateObserver, + util::publish_basics_package, + workloads::{ + payload::Payload, + workload::{ + Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, + STORAGE_COST_PER_COUNTER, + }, + Gas, GasCoinConfig, WorkloadBuilderInfo, WorkloadParams, + }, + ExecutionEffects, ValidatorProxy, +}; + /// The max amount of gas units needed for a payload. pub const MAX_GAS_IN_UNIT: u64 = 1_000_000_000; diff --git a/crates/sui-benchmark/src/workloads/transfer_object.rs b/crates/sui-benchmark/src/workloads/transfer_object.rs index ed9d5c2b587..47d32c97fc1 100644 --- a/crates/sui-benchmark/src/workloads/transfer_object.rs +++ b/crates/sui-benchmark/src/workloads/transfer_object.rs @@ -1,31 +1,34 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{collections::HashMap, sync::Arc}; + use async_trait::async_trait; use rand::seq::IteratorRandom; -use tracing::error; - -use std::collections::HashMap; -use std::sync::Arc; - -use crate::drivers::Interval; -use crate::system_state_observer::SystemStateObserver; -use crate::workloads::payload::Payload; -use crate::workloads::workload::WorkloadBuilder; -use crate::workloads::workload::{ - Workload, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, STORAGE_COST_PER_COIN, -}; -use crate::workloads::{Gas, GasCoinConfig, WorkloadBuilderInfo, WorkloadParams}; -use crate::{ExecutionEffects, ValidatorProxy}; use sui_core::test_utils::make_transfer_object_transaction; use sui_types::{ base_types::{ObjectRef, SuiAddress}, crypto::{get_key_pair, AccountKeyPair}, transaction::Transaction, }; +use tracing::error; + +use crate::{ + drivers::Interval, + system_state_observer::SystemStateObserver, + workloads::{ + payload::Payload, + workload::{ + Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, + STORAGE_COST_PER_COIN, + }, + Gas, GasCoinConfig, WorkloadBuilderInfo, WorkloadParams, + }, + ExecutionEffects, ValidatorProxy, +}; -/// TODO: This should be the amount that is being transferred instead of MAX_GAS. -/// Number of mist sent to each address on each batch transfer +/// TODO: This should be the amount that is being transferred instead of +/// MAX_GAS. Number of mist sent to each address on each batch transfer const _TRANSFER_AMOUNT: u64 = 1; #[derive(Debug)] diff --git a/crates/sui-benchmark/src/workloads/workload.rs b/crates/sui-benchmark/src/workloads/workload.rs index f6175834ea0..92109ac67dd 100644 --- a/crates/sui-benchmark/src/workloads/workload.rs +++ b/crates/sui-benchmark/src/workloads/workload.rs @@ -1,14 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::system_state_observer::SystemStateObserver; -use crate::workloads::payload::Payload; -use crate::workloads::{Gas, GasCoinConfig}; -use crate::ValidatorProxy; -use async_trait::async_trait; use std::sync::Arc; + +use async_trait::async_trait; use sui_types::gas_coin::MIST_PER_SUI; +use crate::{ + system_state_observer::SystemStateObserver, + workloads::{payload::Payload, Gas, GasCoinConfig}, + ValidatorProxy, +}; + // This is the maximum gas we will transfer from primary coin into any gas coin // for running the benchmark pub const MAX_GAS_FOR_TESTING: u64 = 1_000 * MIST_PER_SUI; @@ -30,14 +33,16 @@ pub trait WorkloadBuilder: Send + Sync + std::fmt::Debug { async fn build(&self, init_gas: Vec, payload_gas: Vec) -> Box>; } -/// A Workload is used to generate multiple payloads during setup phase with `make_test_payloads()` -/// which are added to a local queue. We execute transactions (the queue is drained based on the -/// target qps i.e. for 100 tps, the queue will be popped 100 times every second) with those payloads -/// and generate new payloads (which are enqueued back to the queue) with the returned effects. The -/// total number of payloads to generate depends on how much transaction throughput we want and the -/// maximum number of transactions we want to have in flight. For instance, for a 100 target_qps and -/// in_flight_ratio of 5, a maximum of 500 transactions is expected to be in flight and that many -/// payloads are created. +/// A Workload is used to generate multiple payloads during setup phase with +/// `make_test_payloads()` which are added to a local queue. We execute +/// transactions (the queue is drained based on the target qps i.e. for 100 tps, +/// the queue will be popped 100 times every second) with those payloads +/// and generate new payloads (which are enqueued back to the queue) with the +/// returned effects. The total number of payloads to generate depends on how +/// much transaction throughput we want and the maximum number of transactions +/// we want to have in flight. For instance, for a 100 target_qps and +/// in_flight_ratio of 5, a maximum of 500 transactions is expected to be in +/// flight and that many payloads are created. #[async_trait] pub trait Workload: Send + Sync + std::fmt::Debug { async fn init( diff --git a/crates/sui-benchmark/src/workloads/workload_configuration.rs b/crates/sui-benchmark/src/workloads/workload_configuration.rs index 38434414b62..73e77bad22b 100644 --- a/crates/sui-benchmark/src/workloads/workload_configuration.rs +++ b/crates/sui-benchmark/src/workloads/workload_configuration.rs @@ -1,23 +1,26 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::bank::BenchmarkBank; -use crate::drivers::Interval; -use crate::options::{Opts, RunSpec}; -use crate::system_state_observer::SystemStateObserver; -use crate::workloads::batch_payment::BatchPaymentWorkloadBuilder; -use crate::workloads::delegation::DelegationWorkloadBuilder; -use crate::workloads::shared_counter::SharedCounterWorkloadBuilder; -use crate::workloads::transfer_object::TransferObjectWorkloadBuilder; -use crate::workloads::{GroupID, WorkloadBuilderInfo, WorkloadInfo}; +use std::{collections::BTreeMap, str::FromStr, sync::Arc}; + use anyhow::Result; -use std::collections::BTreeMap; -use std::str::FromStr; -use std::sync::Arc; use tracing::info; -use super::adversarial::{AdversarialPayloadCfg, AdversarialWorkloadBuilder}; -use super::shared_object_deletion::SharedCounterDeletionWorkloadBuilder; +use super::{ + adversarial::{AdversarialPayloadCfg, AdversarialWorkloadBuilder}, + shared_object_deletion::SharedCounterDeletionWorkloadBuilder, +}; +use crate::{ + bank::BenchmarkBank, + drivers::Interval, + options::{Opts, RunSpec}, + system_state_observer::SystemStateObserver, + workloads::{ + batch_payment::BatchPaymentWorkloadBuilder, delegation::DelegationWorkloadBuilder, + shared_counter::SharedCounterWorkloadBuilder, + transfer_object::TransferObjectWorkloadBuilder, GroupID, WorkloadBuilderInfo, WorkloadInfo, + }, +}; pub struct WorkloadConfiguration; @@ -54,8 +57,9 @@ impl WorkloadConfiguration { num_of_benchmark_groups ); - // Creating the workload builders for each benchmark group. The workloads for each - // benchmark group will run in the same time for the same duration. + // Creating the workload builders for each benchmark group. The workloads for + // each benchmark group will run in the same time for the same + // duration. for workload_group in 0..num_of_benchmark_groups { let i = workload_group as usize; let builders = Self::create_workload_builders( diff --git a/crates/sui-benchmark/tests/simtest.rs b/crates/sui-benchmark/tests/simtest.rs index 22d3cb6ed22..4ed3cceabf4 100644 --- a/crates/sui-benchmark/tests/simtest.rs +++ b/crates/sui-benchmark/tests/simtest.rs @@ -3,40 +3,48 @@ #[cfg(msim)] mod test { + use std::{ + collections::HashSet, + path::PathBuf, + str::FromStr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, + }, + time::{Duration, Instant}, + }; + use rand::{distributions::uniform::SampleRange, thread_rng, Rng}; - use std::collections::HashSet; - use std::path::PathBuf; - use std::str::FromStr; - use std::sync::atomic::{AtomicBool, Ordering}; - use std::sync::{Arc, Mutex}; - use std::time::{Duration, Instant}; - use sui_benchmark::bank::BenchmarkBank; - use sui_benchmark::system_state_observer::SystemStateObserver; - use sui_benchmark::workloads::adversarial::AdversarialPayloadCfg; - use sui_benchmark::workloads::workload_configuration::WorkloadConfiguration; use sui_benchmark::{ + bank::BenchmarkBank, drivers::{bench_driver::BenchDriver, driver::Driver, Interval}, + system_state_observer::SystemStateObserver, util::get_ed25519_keypair_from_keystore, + workloads::{ + adversarial::AdversarialPayloadCfg, workload_configuration::WorkloadConfiguration, + }, LocalValidatorAggregatorProxy, ValidatorProxy, }; - use sui_config::genesis::Genesis; - use sui_config::{AUTHORITIES_DB_NAME, SUI_KEYSTORE_FILENAME}; - use sui_core::authority::authority_store_tables::AuthorityPerpetualTables; - use sui_core::authority::framework_injection; - use sui_core::authority::AuthorityState; - use sui_core::checkpoints::{CheckpointStore, CheckpointWatermark}; + use sui_config::{genesis::Genesis, AUTHORITIES_DB_NAME, SUI_KEYSTORE_FILENAME}; + use sui_core::{ + authority::{ + authority_store_tables::AuthorityPerpetualTables, framework_injection, AuthorityState, + }, + checkpoints::{CheckpointStore, CheckpointWatermark}, + }; use sui_framework::BuiltInFramework; use sui_macros::{ clear_fail_point, nondeterministic, register_fail_point_async, register_fail_point_if, register_fail_points, sim_test, }; use sui_protocol_config::{ProtocolVersion, SupportedProtocolVersions}; - use sui_simulator::tempfile::TempDir; - use sui_simulator::{configs::*, SimConfig}; + use sui_simulator::{configs::*, tempfile::TempDir, SimConfig}; use sui_storage::blob::Blob; - use sui_types::base_types::{ObjectRef, SuiAddress}; - use sui_types::full_checkpoint_content::CheckpointData; - use sui_types::messages_checkpoint::VerifiedCheckpoint; + use sui_types::{ + base_types::{ObjectRef, SuiAddress}, + full_checkpoint_content::CheckpointData, + messages_checkpoint::VerifiedCheckpoint, + }; use test_cluster::{TestCluster, TestClusterBuilder}; use tracing::{error, info}; use typed_store::traits::Map; @@ -85,7 +93,7 @@ mod test { register_fail_point_if("correlated-crash-after-consensus-commit-boundary", || true); // TODO: enable this - right now it causes rocksdb errors when re-opening DBs - //register_fail_point_if("correlated-crash-process-certificate", || true); + // register_fail_point_if("correlated-crash-process-certificate", || true); let test_cluster = build_test_cluster(4, 10000).await; test_simulated_load(TestInitData::new(&test_cluster).await, 60).await; @@ -125,9 +133,10 @@ mod test { test_simulated_load(TestInitData::new(&test_cluster).await, 120).await; } - /// Get a list of nodes that we don't want to kill in the crash recovery tests. - /// This includes the client node which is the node that is running the test, as well as - /// rpc fullnode which are needed to run the benchmark. + /// Get a list of nodes that we don't want to kill in the crash recovery + /// tests. This includes the client node which is the node that is + /// running the test, as well as rpc fullnode which are needed to run + /// the benchmark. fn get_keep_alive_nodes(cluster: &TestCluster) -> HashSet { let mut keep_alive_nodes = HashSet::new(); // The first fullnode in the swarm ins the rpc fullnode. @@ -183,7 +192,8 @@ mod test { } } - // Runs object pruning and compaction for object table in `state` probabistically. + // Runs object pruning and compaction for object table in `state` + // probabistically. async fn handle_failpoint_prune_and_compact(state: Arc, probability: f64) { { let mut rng = thread_rng(); @@ -224,7 +234,8 @@ mod test { }); test_simulated_load(TestInitData::new(&test_cluster).await, 60).await; - // The fail point holds a reference to `node_state`, which we need to release before the test ends. + // The fail point holds a reference to `node_state`, which we need to release + // before the test ends. clear_fail_point("prune-and-compact"); } @@ -380,10 +391,11 @@ mod test { #[sim_test(config = "test_config()")] async fn test_upgrade_compatibility() { - // This test is intended to test the compatibility of the latest protocol version with - // the previous protocol version. It does this by starting a network with - // the previous protocol version that this binary supports, and then upgrading the network - // to the latest protocol version. + // This test is intended to test the compatibility of the latest protocol + // version with the previous protocol version. It does this by starting + // a network with the previous protocol version that this binary + // supports, and then upgrading the network to the latest protocol + // version. tokio::time::timeout( Duration::from_secs(1000), test_protocol_upgrade_compatibility_impl(), @@ -441,8 +453,9 @@ mod test { Some(BuiltInFramework::iter_system_packages().collect::>()) } else { // Often we want to be able to create multiple protocol config versions - // on main that none have shipped to any production network. In this case, - // some of the protocol versions may not have a framework snapshot. + // on main that none have shipped to any production network. In this + // case, some of the protocol versions may + // not have a framework snapshot. None } } @@ -548,13 +561,15 @@ mod test { let system_state_observer = { let mut system_state_observer = SystemStateObserver::new(proxy.clone()); if let Ok(_) = system_state_observer.state.changed().await { - info!("Got the new state (reference gas price and/or protocol config) from system state object"); + info!( + "Got the new state (reference gas price and/or protocol config) from system state object" + ); } Arc::new(system_state_observer) }; - // The default test parameters are somewhat conservative in order to keep the running time - // of the test reasonable in CI. + // The default test parameters are somewhat conservative in order to keep the + // running time of the test reasonable in CI. let target_qps = get_var("SIM_STRESS_TEST_QPS", 10); let num_workers = get_var("SIM_STRESS_TEST_WORKERS", 10); let in_flight_ratio = get_var("SIM_STRESS_TEST_IFR", 2); @@ -570,8 +585,8 @@ mod test { let adversarial_cfg = AdversarialPayloadCfg::from_str("0-1.0").unwrap(); let duration = Interval::from_str("unbounded").unwrap(); - // TODO: re-enable this when we figure out why it is causing connection errors and making - // tests run for ever + // TODO: re-enable this when we figure out why it is causing connection errors + // and making tests run for ever let adversarial_weight = 0; let shared_counter_hotness_factor = 50; @@ -634,7 +649,8 @@ mod test { .await .unwrap(); - // TODO: make this stricter (== 0) when we have reliable error retrying on the client. + // TODO: make this stricter (== 0) when we have reliable error retrying on the + // client. tracing::info!("end of test {:?}", benchmark_stats); assert!(benchmark_stats.num_error_txes < 100); } diff --git a/crates/sui-bridge/build.rs b/crates/sui-bridge/build.rs index d0b0d41262c..244572cd7b4 100644 --- a/crates/sui-bridge/build.rs +++ b/crates/sui-bridge/build.rs @@ -1,9 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::fs; -use std::path::{Path, PathBuf}; -use std::process::{exit, Command, ExitStatus}; +use std::{ + fs, + path::{Path, PathBuf}, + process::{exit, Command, ExitStatus}, +}; fn main() -> Result<(), ExitStatus> { #[cfg(windows)] @@ -102,7 +104,8 @@ fn should_install_dependencies(dir_path: &str) -> bool { if !missing_dependencies { return false; } - // if any dependencies are missing, recreate an empty directory and then reinstall + // if any dependencies are missing, recreate an empty directory and then + // reinstall eprintln!( "cargo:warning={:?} does not have all the dependnecies, re-creating", dir_path diff --git a/crates/sui-bridge/src/abi.rs b/crates/sui-bridge/src/abi.rs index f6801c84963..edc60fe85eb 100644 --- a/crates/sui-bridge/src/abi.rs +++ b/crates/sui-bridge/src/abi.rs @@ -1,9 +1,6 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::error::{BridgeError, BridgeResult}; -use crate::types::{BridgeAction, EthToSuiBridgeAction}; -use crate::types::{BridgeChainId, EthLog, TokenId}; use ethers::{ abi::RawLog, contract::{abigen, EthLogDecode}, @@ -12,6 +9,11 @@ use ethers::{ use serde::{Deserialize, Serialize}; use sui_types::base_types::SuiAddress; +use crate::{ + error::{BridgeError, BridgeResult}, + types::{BridgeAction, BridgeChainId, EthLog, EthToSuiBridgeAction, TokenId}, +}; + // TODO: write a macro to handle variants // TODO: Add other events @@ -55,11 +57,17 @@ impl EthBridgeEvent { let bridge_event = match EthToSuiTokenBridgeV1::try_from(&event) { Ok(bridge_event) => bridge_event, // This only happens when solidity code does not align with rust code. - // When this happens in production, there is a risk of stuck bridge transfers. - // We log error here. + // When this happens in production, there is a risk of stuck bridge + // transfers. We log error here. // TODO: add metrics and alert Err(e) => { - tracing::error!(?eth_tx_hash, eth_event_index, "Failed to convert TokensBridgedToSui log to EthToSuiTokenBridgeV1. This indicates incorrect parameters or a bug in the code: {:?}. Err: {:?}", event, e); + tracing::error!( + ?eth_tx_hash, + eth_event_index, + "Failed to convert TokensBridgedToSui log to EthToSuiTokenBridgeV1. This indicates incorrect parameters or a bug in the code: {:?}. Err: {:?}", + event, + e + ); return None; } }; diff --git a/crates/sui-bridge/src/action_executor.rs b/crates/sui-bridge/src/action_executor.rs index 03d00426777..ac42224ad64 100644 --- a/crates/sui-bridge/src/action_executor.rs +++ b/crates/sui-bridge/src/action_executor.rs @@ -4,6 +4,8 @@ //! BridgeActionExecutor receives BridgeActions (from BridgeOrchestrator), //! collects bridge authority signatures and submit signatures on chain. +use std::sync::Arc; + use mysten_metrics::spawn_logged_monitored_task; use shared_crypto::intent::{Intent, IntentMessage}; use sui_json_rpc_types::{ @@ -18,6 +20,7 @@ use sui_types::{ object::Owner, transaction::Transaction, }; +use tracing::{error, info, warn}; use crate::{ client::bridge_authority_aggregator::BridgeAuthorityAggregator, @@ -27,13 +30,12 @@ use crate::{ sui_transaction_builder::build_transaction, types::{BridgeAction, BridgeActionStatus, VerifiedCertifiedBridgeAction}, }; -use std::sync::Arc; -use tracing::{error, info, warn}; pub const CHANNEL_SIZE: usize = 1000; // delay schedule: at most 16 times including the initial attempt -// 0.1s, 0.2s, 0.4s, 0.8s, 1.6s, 3.2s, 6.4s, 12.8s, 25.6s, 51.2s, 102.4s, 204.8s, 409.6s, 819.2s, 1638.4s +// 0.1s, 0.2s, 0.4s, 0.8s, 1.6s, 3.2s, 6.4s, 12.8s, 25.6s, 51.2s, 102.4s, +// 204.8s, 409.6s, 819.2s, 1638.4s pub const MAX_SIGNING_ATTEMPTS: u64 = 16; pub const MAX_EXECUTION_ATTEMPTS: u64 = 16; @@ -259,7 +261,10 @@ where warn!("Failed to collect sigs for bridge action: {:?}", e); if attempt_times >= MAX_SIGNING_ATTEMPTS { - error!("Manual intervention is required. Failed to collect sigs for bridge action after {MAX_SIGNING_ATTEMPTS} attempts: {:?}", e); + error!( + "Manual intervention is required. Failed to collect sigs for bridge action after {MAX_SIGNING_ATTEMPTS} attempts: {:?}", + e + ); return; } delay(attempt_times).await; @@ -352,7 +357,10 @@ where // TODO: metrics + alerts // If it fails for too many times, log and ask for manual intervention. if attempt_times >= MAX_EXECUTION_ATTEMPTS { - error!("Manual intervention is required. Failed to collect execute transaction for bridge action after {MAX_EXECUTION_ATTEMPTS} attempts: {:?}", err); + error!( + "Manual intervention is required. Failed to collect execute transaction for bridge action after {MAX_EXECUTION_ATTEMPTS} attempts: {:?}", + err + ); return; } delay(attempt_times).await; @@ -388,14 +396,18 @@ where }) } SuiExecutionStatus::Failure { error } => { - // In practice the transaction could fail because of running out of gas, but really - // should not be due to other reasons. + // In practice the transaction could fail because of running out of gas, but + // really should not be due to other reasons. // This means manual intervention is needed. So we do not push them back to // the execution queue because retries are mostly likely going to fail anyway. - // After human examination, the node should be restarted and fetch them from WAL. + // After human examination, the node should be restarted and fetch them from + // WAL. // TODO metrics + alerts - error!(?tx_digest, "Manual intervention is needed. Sui transaction executed and failed with error: {error:?}"); + error!( + ?tx_digest, + "Manual intervention is needed. Sui transaction executed and failed with error: {error:?}" + ); } } } @@ -439,10 +451,12 @@ mod tests { use fastcrypto::traits::KeyPair; use prometheus::Registry; use sui_json_rpc_types::SuiTransactionBlockResponse; - use sui_types::crypto::get_key_pair; - use sui_types::gas_coin::GasCoin; - use sui_types::{base_types::random_object_ref, transaction::TransactionData}; + use sui_types::{ + base_types::random_object_ref, crypto::get_key_pair, gas_coin::GasCoin, + transaction::TransactionData, + }; + use super::*; use crate::{ crypto::{ BridgeAuthorityKeyPair, BridgeAuthorityPublicKeyBytes, @@ -457,8 +471,6 @@ mod tests { types::{BridgeCommittee, BridgeCommitteeValiditySignInfo, CertifiedBridgeAction}, }; - use super::*; - #[tokio::test] async fn test_onchain_execution_loop() { let ( @@ -518,13 +530,16 @@ mod tests { .await .unwrap(); - // Expect to see the transaction to be requested and successfully executed hence removed from WAL + // Expect to see the transaction to be requested and successfully executed hence + // removed from WAL tx_subscription.recv().await.unwrap(); assert!(store.get_all_pending_actions().unwrap().is_empty()); ///////////////////////////////////////////////////////////////////////////////////////////////// - ////////////////////////////////////// Test execution failure /////////////////////////////////// - ///////////////////////////////////////////////////////////////////////////////////////////////// + ////////////////////////////////////// Test execution failure + ////////////////////////////////////// /////////////////////////////////// + ////////////////////////////////////// ///////////////////////////////////////// + ////////////////////////////////////// ////////////////// let (action_certificate, _, _) = get_bridge_authority_approved_action( vec![&mock0, &mock1, &mock2, &mock3], @@ -566,8 +581,10 @@ mod tests { ); ///////////////////////////////////////////////////////////////////////////////////////////////// - //////////////////////////// Test transaction failed at signing stage /////////////////////////// - ///////////////////////////////////////////////////////////////////////////////////////////////// + //////////////////////////// Test transaction failed at signing stage + //////////////////////////// /////////////////////////// /////////////// + //////////////////////////// /////////////////////////////////////////////////// + //////////////////////////// /// let (action_certificate, _, _) = get_bridge_authority_approved_action( vec![&mock0, &mock1, &mock2, &mock3], @@ -601,10 +618,12 @@ mod tests { assert_eq!(tx_subscription.recv().await.unwrap(), tx_digest); // The retry is still going on, action still in WAL - assert!(store - .get_all_pending_actions() - .unwrap() - .contains_key(&action.digest())); + assert!( + store + .get_all_pending_actions() + .unwrap() + .contains_key(&action.digest()) + ); // Now let it succeed mock_transaction_response( @@ -617,10 +636,12 @@ mod tests { // Give it 1 second to retry and succeed tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; // The action is successful and should be removed from WAL now - assert!(!store - .get_all_pending_actions() - .unwrap() - .contains_key(&action.digest())); + assert!( + !store + .get_all_pending_actions() + .unwrap() + .contains_key(&action.digest()) + ); } #[tokio::test] @@ -683,7 +704,8 @@ mod tests { .await .unwrap(); - // Wait until the transaction is retried at least once (instead of deing dropped) + // Wait until the transaction is retried at least once (instead of deing + // dropped) loop { let requested_times = mock0.get_sui_token_events_requested(sui_tx_digest, sui_tx_event_index); @@ -731,10 +753,12 @@ mod tests { // Expect to see the transaction to be requested and succeed assert_eq!(tx_subscription.recv().await.unwrap(), tx_digest); // The action is removed from WAL - assert!(!store - .get_all_pending_actions() - .unwrap() - .contains_key(&action.digest())); + assert!( + !store + .get_all_pending_actions() + .unwrap() + .contains_key(&action.digest()) + ); } #[tokio::test] @@ -785,18 +809,22 @@ mod tests { .unwrap(); let action_digest = action.digest(); - // Wait for 1 second. It should still in the process of retrying requesting sigs becaues we mock errors above. + // Wait for 1 second. It should still in the process of retrying requesting sigs + // becaues we mock errors above. tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; tx_subscription.try_recv().unwrap_err(); // And the action is still in WAL - assert!(store - .get_all_pending_actions() - .unwrap() - .contains_key(&action_digest)); + assert!( + store + .get_all_pending_actions() + .unwrap() + .contains_key(&action_digest) + ); sui_client_mock.set_action_onchain_status(&action, BridgeActionStatus::Approved); - // The next retry will see the action is already processed on chain and remove it from WAL + // The next retry will see the action is already processed on chain and remove + // it from WAL let now = std::time::Instant::now(); while store .get_all_pending_actions() @@ -878,7 +906,8 @@ mod tests { // Set the action to be already approved on chain sui_client_mock.set_action_onchain_status(&action, BridgeActionStatus::Approved); - // The next retry will see the action is already processed on chain and remove it from WAL + // The next retry will see the action is already processed on chain and remove + // it from WAL let now = std::time::Instant::now(); let action_digest = action.digest(); while store @@ -1026,8 +1055,9 @@ mod tests { let tx_subscription = sui_client_mock.subscribe_to_requested_transactions(); let sui_client = Arc::new(SuiClient::new_for_testing(sui_client_mock.clone())); - // The dummy key is used to sign transaction so we can get TransactionDigest easily. - // User signature is not part of the transaction so it does not matter which key it is. + // The dummy key is used to sign transaction so we can get TransactionDigest + // easily. User signature is not part of the transaction so it does not + // matter which key it is. let (_, dummy_kp): (_, fastcrypto::secp256k1::Secp256k1KeyPair) = get_key_pair(); let dummy_sui_key = SuiKeyPair::from(dummy_kp); diff --git a/crates/sui-bridge/src/client/bridge_authority_aggregator.rs b/crates/sui-bridge/src/client/bridge_authority_aggregator.rs index 0d647f4f1d8..4567fc6a42b 100644 --- a/crates/sui-bridge/src/client/bridge_authority_aggregator.rs +++ b/crates/sui-bridge/src/client/bridge_authority_aggregator.rs @@ -3,26 +3,29 @@ //! BridgeAuthorityAggregator aggregates signatures from BridgeCommittee. -use crate::client::bridge_client::BridgeClient; -use crate::crypto::BridgeAuthorityPublicKeyBytes; -use crate::crypto::BridgeAuthoritySignInfo; -use crate::error::{BridgeError, BridgeResult}; -use crate::types::BridgeCommitteeValiditySignInfo; -use crate::types::{ - BridgeAction, BridgeCommittee, CertifiedBridgeAction, VerifiedCertifiedBridgeAction, - VerifiedSignedBridgeAction, +use std::{ + collections::{btree_map::Entry, BTreeMap}, + sync::Arc, + time::Duration, +}; + +use sui_common::authority_aggregation::{quorum_map_then_reduce_with_timeout, ReduceOutput}; +use sui_types::{ + base_types::ConciseableName, + committee::{StakeUnit, TOTAL_VOTING_POWER}, }; -use std::collections::btree_map::Entry; -use std::collections::BTreeMap; -use std::sync::Arc; -use std::time::Duration; -use sui_common::authority_aggregation::quorum_map_then_reduce_with_timeout; -use sui_common::authority_aggregation::ReduceOutput; -use sui_types::base_types::ConciseableName; -use sui_types::committee::StakeUnit; -use sui_types::committee::TOTAL_VOTING_POWER; use tracing::{error, info, warn}; +use crate::{ + client::bridge_client::BridgeClient, + crypto::{BridgeAuthorityPublicKeyBytes, BridgeAuthoritySignInfo}, + error::{BridgeError, BridgeResult}, + types::{ + BridgeAction, BridgeCommittee, BridgeCommitteeValiditySignInfo, CertifiedBridgeAction, + VerifiedCertifiedBridgeAction, VerifiedSignedBridgeAction, + }, +}; + pub struct BridgeAuthorityAggregator { pub committee: Arc, pub clients: Arc>>, @@ -181,7 +184,7 @@ async fn request_sign_bridge_action_into_certification( verified_signed_action, ) { Ok(Some(certified_action)) => { - return ReduceOutput::Success(certified_action) + return ReduceOutput::Success(certified_action); } Ok(None) => (), Err(e) => { @@ -204,7 +207,8 @@ async fn request_sign_bridge_action_into_certification( } }; - // If bad stake (including blocklisted stake) is too high to reach validity threshold, return error + // If bad stake (including blocklisted stake) is too high to reach validity + // threshold, return error if state.is_too_many_error() { ReduceOutput::Failed(state) } else { @@ -238,18 +242,18 @@ mod tests { use std::collections::BTreeSet; use fastcrypto::traits::ToFromBytes; - use sui_types::committee::VALIDITY_THRESHOLD; - use sui_types::digests::TransactionDigest; - - use crate::crypto::BridgeAuthorityPublicKey; - use crate::server::mock_handler::BridgeRequestMockHandler; + use sui_types::{committee::VALIDITY_THRESHOLD, digests::TransactionDigest}; use super::*; - use crate::test_utils::{ - get_test_authorities_and_run_mock_bridge_server, get_test_authority_and_key, - get_test_sui_to_eth_bridge_action, sign_action_with_key, + use crate::{ + crypto::BridgeAuthorityPublicKey, + server::mock_handler::BridgeRequestMockHandler, + test_utils::{ + get_test_authorities_and_run_mock_bridge_server, get_test_authority_and_key, + get_test_sui_to_eth_bridge_action, sign_action_with_key, + }, + types::BridgeCommittee, }; - use crate::types::BridgeCommittee; #[tokio::test] async fn test_bridge_auth_agg_construction() { @@ -424,8 +428,9 @@ mod tests { Some(amount), ); - // Only mock authority 2 and 3 to return signatures, such that if BridgeAuthorityAggregator - // requests to authority 0 and 1 (which should not happen) it will panic. + // Only mock authority 2 and 3 to return signatures, such that if + // BridgeAuthorityAggregator requests to authority 0 and 1 (which should + // not happen) it will panic. mock2.add_sui_event_response( sui_tx_digest, sui_tx_event_index, @@ -469,7 +474,8 @@ mod tests { BridgeError::AuthoritySignatureAggregationTooManyError(_) )); - // if mock 3 returns duplicated signature (by authority 2), `BridgeClient` will catch this + // if mock 3 returns duplicated signature (by authority 2), `BridgeClient` will + // catch this mock3.add_sui_event_response( sui_tx_digest, sui_tx_event_index, @@ -558,14 +564,16 @@ mod tests { let sig_0 = sign_action_with_key(&action, &secrets[0]); // returns Ok(None) - assert!(state - .handle_verified_signed_action( - authorities[0].pubkey_bytes().clone(), - authorities[0].voting_power, - VerifiedSignedBridgeAction::new_from_verified(sig_0.clone()) - ) - .unwrap() - .is_none()); + assert!( + state + .handle_verified_signed_action( + authorities[0].pubkey_bytes().clone(), + authorities[0].voting_power, + VerifiedSignedBridgeAction::new_from_verified(sig_0.clone()) + ) + .unwrap() + .is_none() + ); assert_eq!(state.total_ok_stake, 2500); // Handling a sig from an already signed authority would fail @@ -611,14 +619,16 @@ mod tests { // Collect signtuare from authority 1 (voting power = 1) let sig_1 = sign_action_with_key(&action, &secrets[1]); // returns Ok(None) - assert!(state - .handle_verified_signed_action( - authorities[1].pubkey_bytes().clone(), - authorities[1].voting_power, - VerifiedSignedBridgeAction::new_from_verified(sig_1.clone()) - ) - .unwrap() - .is_none()); + assert!( + state + .handle_verified_signed_action( + authorities[1].pubkey_bytes().clone(), + authorities[1].voting_power, + VerifiedSignedBridgeAction::new_from_verified(sig_1.clone()) + ) + .unwrap() + .is_none() + ); assert_eq!(state.total_ok_stake, 2501); // Collect signtuare from authority 2 - reach validity threshold diff --git a/crates/sui-bridge/src/client/bridge_client.rs b/crates/sui-bridge/src/client/bridge_client.rs index 1180f38cd94..db32ce96d5c 100644 --- a/crates/sui-bridge/src/client/bridge_client.rs +++ b/crates/sui-bridge/src/client/bridge_client.rs @@ -3,21 +3,27 @@ //! `BridgeClient` talks to BridgeNode. -use crate::crypto::{verify_signed_bridge_action, BridgeAuthorityPublicKeyBytes}; -use crate::error::{BridgeError, BridgeResult}; -use crate::server::APPLICATION_JSON; -use crate::types::{BridgeAction, BridgeCommittee, VerifiedSignedBridgeAction}; -use fastcrypto::encoding::{Encoding, Hex}; -use fastcrypto::traits::ToFromBytes; -use std::str::FromStr; -use std::sync::Arc; +use std::{str::FromStr, sync::Arc}; + +use fastcrypto::{ + encoding::{Encoding, Hex}, + traits::ToFromBytes, +}; use url::Url; -// Note: `base_url` is `Option` because `quorum_map_then_reduce_with_timeout_and_prefs` -// uses `[]` to get Client based on key. Therefore even when the URL is invalid we need to -// create a Client instance. -// TODO: In the future we can consider change `quorum_map_then_reduce_with_timeout_and_prefs` -// and its callsites to use `get` instead of `[]`. +use crate::{ + crypto::{verify_signed_bridge_action, BridgeAuthorityPublicKeyBytes}, + error::{BridgeError, BridgeResult}, + server::APPLICATION_JSON, + types::{BridgeAction, BridgeCommittee, VerifiedSignedBridgeAction}, +}; + +// Note: `base_url` is `Option` because +// `quorum_map_then_reduce_with_timeout_and_prefs` uses `[]` to get Client based +// on key. Therefore even when the URL is invalid we need to create a Client +// instance. TODO: In the future we can consider change +// `quorum_map_then_reduce_with_timeout_and_prefs` and its callsites to use +// `get` instead of `[]`. #[derive(Clone, Debug)] pub struct BridgeClient { inner: reqwest::Client, @@ -167,23 +173,25 @@ impl BridgeClient { #[cfg(test)] mod tests { + use ethers::types::{Address as EthAddress, TxHash}; + use fastcrypto::{ + hash::{HashFunction, Keccak256}, + traits::KeyPair, + }; + use prometheus::Registry; + use sui_types::{base_types::SuiAddress, crypto::get_key_pair, digests::TransactionDigest}; + + use super::*; use crate::{ abi::EthToSuiTokenBridgeV1, crypto::BridgeAuthoritySignInfo, events::EmittedSuiToEthTokenBridgeV1, server::mock_handler::BridgeRequestMockHandler, - test_utils::{get_test_authority_and_key, get_test_sui_to_eth_bridge_action}, + test_utils::{ + get_test_authority_and_key, get_test_sui_to_eth_bridge_action, run_mock_bridge_server, + }, types::{BridgeChainId, SignedBridgeAction, TokenId}, }; - use fastcrypto::hash::{HashFunction, Keccak256}; - use fastcrypto::traits::KeyPair; - use prometheus::Registry; - - use super::*; - use crate::test_utils::run_mock_bridge_server; - use ethers::types::Address as EthAddress; - use ethers::types::TxHash; - use sui_types::{base_types::SuiAddress, crypto::get_key_pair, digests::TransactionDigest}; #[tokio::test] async fn test_bridge_client() { @@ -282,7 +290,8 @@ mod tests { .await .unwrap(); - // mismatched action would fail, this could happen when the authority fetched the wrong event + // mismatched action would fail, this could happen when the authority fetched + // the wrong event let action2 = get_test_sui_to_eth_bridge_action(Some(tx_digest), Some(event_idx), Some(2), Some(200)); let wrong_sig = BridgeAuthoritySignInfo::new(&action2, &secret); diff --git a/crates/sui-bridge/src/config.rs b/crates/sui-bridge/src/config.rs index 8118b068997..72a0637bd3f 100644 --- a/crates/sui-bridge/src/config.rs +++ b/crates/sui-bridge/src/config.rs @@ -1,30 +1,34 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::crypto::BridgeAuthorityKeyPair; -use crate::error::BridgeError; -use crate::eth_client::EthClient; -use crate::sui_client::SuiClient; -use crate::types::BridgeAction; +use std::{ + collections::{BTreeMap, HashSet}, + path::PathBuf, + str::FromStr, + sync::Arc, +}; + use anyhow::anyhow; use ethers::types::Address as EthAddress; use fastcrypto::traits::EncodeDecodeBase64; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use std::collections::{BTreeMap, HashSet}; -use std::path::PathBuf; -use std::str::FromStr; -use std::sync::Arc; use sui_config::Config; use sui_sdk::SuiClient as SuiSdkClient; -use sui_types::base_types::ObjectRef; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::crypto::SuiKeyPair; -use sui_types::event::EventID; -use sui_types::object::Owner; -use sui_types::Identifier; +use sui_types::{ + base_types::{ObjectID, ObjectRef, SuiAddress}, + crypto::SuiKeyPair, + event::EventID, + object::Owner, + Identifier, +}; use tracing::info; +use crate::{ + crypto::BridgeAuthorityKeyPair, error::BridgeError, eth_client::EthClient, + sui_client::SuiClient, types::BridgeAction, +}; + #[serde_as] #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(rename_all = "kebab-case")] @@ -33,49 +37,59 @@ pub struct BridgeNodeConfig { pub server_listen_port: u16, /// The port that for metrics server. pub metrics_port: u16, - /// Path of the file where bridge authority key (Secp256k1) is stored as Base64 encoded `privkey`. + /// Path of the file where bridge authority key (Secp256k1) is stored as + /// Base64 encoded `privkey`. pub bridge_authority_key_path_base64_raw: PathBuf, /// Rpc url for Sui fullnode, used for query stuff and submit transactions. pub sui_rpc_url: String, /// Rpc url for Eth fullnode, used for query stuff. pub eth_rpc_url: String, - /// The eth contract addresses (hex). It must not be empty. It serves two purpose: - /// 1. validator only signs bridge actions that are generated from these contracts. + /// The eth contract addresses (hex). It must not be empty. It serves two + /// purpose: + /// 1. validator only signs bridge actions that are generated from these + /// contracts. /// 2. for EthSyncer to watch for when `run_client` is true. pub eth_addresses: Vec, - /// Path of the file where bridge client key (any SuiKeyPair) is stored as Base64 encoded `flag || privkey`. - /// If `run_client` is true, and this is None, then use `bridge_authority_key_path_base64_raw` as client key. + /// Path of the file where bridge client key (any SuiKeyPair) is stored as + /// Base64 encoded `flag || privkey`. If `run_client` is true, and this + /// is None, then use `bridge_authority_key_path_base64_raw` as client key. #[serde(skip_serializing_if = "Option::is_none")] pub bridge_client_key_path_base64_sui_key: Option, /// Whether to run client. If true, `bridge_client_key_path_base64_sui_key`, /// `bridge_client_gas_object` and `db_path` needs to be provided. pub run_client: bool, - /// The gas object to use for paying for gas fees for the client. It needs to - /// be owned by the address associated with bridge client key. + /// The gas object to use for paying for gas fees for the client. It needs + /// to be owned by the address associated with bridge client key. #[serde(skip_serializing_if = "Option::is_none")] pub bridge_client_gas_object: Option, /// Path of the client storage. Required when `run_client` is true. #[serde(skip_serializing_if = "Option::is_none")] pub db_path: Option, // TODO: this should be hardcoded and removed from config - /// The sui modules of bridge packages for client to watch for. Need to contain at least one item when `run_client` is true. + /// The sui modules of bridge packages for client to watch for. Need to + /// contain at least one item when `run_client` is true. pub sui_bridge_modules: Option>, // TODO: we need to hardcode the starting blocks for eth networks for cold start. - /// Override the start block number for each eth address. Key must be in `eth_addresses`. - /// When set, EthSyncer will start from this block number (inclusively) instead of the one in storage. + /// Override the start block number for each eth address. Key must be in + /// `eth_addresses`. When set, EthSyncer will start from this block + /// number (inclusively) instead of the one in storage. /// Key: eth address, Value: block number to start from - /// Note: This field should be rarely used. Only use it when you understand how to follow up. + /// Note: This field should be rarely used. Only use it when you understand + /// how to follow up. #[serde(skip_serializing_if = "Option::is_none")] pub eth_bridge_contracts_start_block_override: Option>, - /// Override the last processed EventID for each bridge module. Key must be in `sui_bridge_modules`. - /// When set, SuiSyncer will start from this cursor (exclusively) instead of the one in storage. - /// Key: sui module, Value: last processed EventID (tx_digest, event_seq). - /// Note 1: This field should be rarely used. Only use it when you understand how to follow up. - /// Note 2: the EventID needs to be valid, namely it must exist and matches the filter. - /// Otherwise, it will miss one event because of fullnode Event query semantics. + /// Override the last processed EventID for each bridge module. Key must be + /// in `sui_bridge_modules`. When set, SuiSyncer will start from this + /// cursor (exclusively) instead of the one in storage. Key: sui module, + /// Value: last processed EventID (tx_digest, event_seq). Note 1: This + /// field should be rarely used. Only use it when you understand how to + /// follow up. Note 2: the EventID needs to be valid, namely it must + /// exist and matches the filter. Otherwise, it will miss one event + /// because of fullnode Event query semantics. #[serde(skip_serializing_if = "Option::is_none")] pub sui_bridge_modules_last_processed_event_id_override: Option>, - /// A list of approved governance actions. Action in this list will be signed when requested by client. + /// A list of approved governance actions. Action in this list will be + /// signed when requested by client. pub approved_governance_actions: Vec, } @@ -186,7 +200,7 @@ impl BridgeNodeConfig { None => { return Err(anyhow!( "`sui_bridge_modules` is required when `run_client` is true" - )) + )); } }; @@ -212,7 +226,12 @@ impl BridgeNodeConfig { .get_gas_data_panic_if_not_gas(gas_object_id) .await; if owner != Owner::AddressOwner(client_sui_address) { - return Err(anyhow!("Gas object {:?} is not owned by bridge client key's associated sui address {:?}, but {:?}", gas_object_id, client_sui_address, owner)); + return Err(anyhow!( + "Gas object {:?} is not owned by bridge client key's associated sui address {:?}, but {:?}", + gas_object_id, + client_sui_address, + owner + )); } info!( "Starting bridge client with gas object {:?}, balance: {}", @@ -243,7 +262,8 @@ pub struct BridgeServerConfig { pub metrics_port: u16, pub sui_client: Arc>, pub eth_client: Arc>, - /// A list of approved governance actions. Action in this list will be signed when requested by client. + /// A list of approved governance actions. Action in this list will be + /// signed when requested by client. pub approved_governance_actions: Vec, } diff --git a/crates/sui-bridge/src/crypto.rs b/crates/sui-bridge/src/crypto.rs index 2a2ff909a01..0a8b46bb7f3 100644 --- a/crates/sui-bridge/src/crypto.rs +++ b/crates/sui-bridge/src/crypto.rs @@ -1,29 +1,30 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{ - error::{BridgeError, BridgeResult}, - types::{BridgeAction, BridgeCommittee, SignedBridgeAction, VerifiedSignedBridgeAction}, +use std::fmt::{Debug, Display, Formatter}; + +use ethers::{ + core::k256::{ecdsa::VerifyingKey, elliptic_curve::sec1::ToEncodedPoint}, + types::Address as EthAddress, }; -use ethers::core::k256::ecdsa::VerifyingKey; -use ethers::core::k256::elliptic_curve::sec1::ToEncodedPoint; -use ethers::types::Address as EthAddress; -use fastcrypto::hash::HashFunction; use fastcrypto::{ encoding::{Encoding, Hex}, error::FastCryptoError, + hash::{HashFunction, Keccak256}, secp256k1::{ recoverable::Secp256k1RecoverableSignature, Secp256k1KeyPair, Secp256k1PublicKey, Secp256k1PublicKeyAsBytes, }, - traits::{RecoverableSigner, ToFromBytes, VerifyRecoverable}, + traits::{KeyPair, RecoverableSigner, ToFromBytes, VerifyRecoverable}, }; -use fastcrypto::{hash::Keccak256, traits::KeyPair}; use serde::{Deserialize, Serialize}; -use std::fmt::Debug; -use std::fmt::{Display, Formatter}; use sui_types::{base_types::ConciseableName, message_envelope::VerifiedEnvelope}; use tap::TapFallible; + +use crate::{ + error::{BridgeError, BridgeResult}, + types::{BridgeAction, BridgeCommittee, SignedBridgeAction, VerifiedSignedBridgeAction}, +}; pub type BridgeAuthorityKeyPair = Secp256k1KeyPair; pub type BridgeAuthorityPublicKey = Secp256k1PublicKey; pub type BridgeAuthorityRecoverableSignature = Secp256k1RecoverableSignature; @@ -67,7 +68,7 @@ pub struct ConciseBridgeAuthorityPublicKeyBytesRef<'a>(&'a BridgeAuthorityPublic impl Debug for ConciseBridgeAuthorityPublicKeyBytesRef<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { - let s = Hex::encode(self.0 .0 .0.get(0..4).ok_or(std::fmt::Error)?); + let s = Hex::encode(self.0.0.0.get(0..4).ok_or(std::fmt::Error)?); write!(f, "k#{}..", s) } } @@ -80,7 +81,7 @@ impl Display for ConciseBridgeAuthorityPublicKeyBytesRef<'_> { impl AsRef<[u8]> for BridgeAuthorityPublicKeyBytes { fn as_ref(&self) -> &[u8] { - self.0 .0.as_ref() + self.0.0.as_ref() } } @@ -140,8 +141,9 @@ impl BridgeAuthoritySignInfo { } } -/// Verifies a SignedBridgeAction (response from bridge authority to bridge client) -/// represents the right BridgeAction, and is signed by the right authority. +/// Verifies a SignedBridgeAction (response from bridge authority to bridge +/// client) represents the right BridgeAction, and is signed by the right +/// authority. pub fn verify_signed_bridge_action( expected_action: &BridgeAction, signed_action: SignedBridgeAction, @@ -168,22 +170,22 @@ pub fn verify_signed_bridge_action( #[cfg(test)] mod tests { - use crate::events::EmittedSuiToEthTokenBridgeV1; - use crate::test_utils::{get_test_authority_and_key, get_test_sui_to_eth_bridge_action}; - use crate::types::SignedBridgeAction; - use crate::types::{ - BridgeAction, BridgeAuthority, BridgeChainId, SuiToEthBridgeAction, TokenId, - }; + use std::{str::FromStr, sync::Arc}; + use ethers::types::Address as EthAddress; use fastcrypto::traits::{KeyPair, ToFromBytes}; use prometheus::Registry; - use std::str::FromStr; - use std::sync::Arc; - use sui_types::base_types::SuiAddress; - use sui_types::crypto::get_key_pair; - use sui_types::digests::TransactionDigest; + use sui_types::{base_types::SuiAddress, crypto::get_key_pair, digests::TransactionDigest}; use super::*; + use crate::{ + events::EmittedSuiToEthTokenBridgeV1, + test_utils::{get_test_authority_and_key, get_test_sui_to_eth_bridge_action}, + types::{ + BridgeAction, BridgeAuthority, BridgeChainId, SignedBridgeAction, SuiToEthBridgeAction, + TokenId, + }, + }; #[test] fn test_sign_and_verify_bridge_event_basic() -> anyhow::Result<()> { @@ -232,7 +234,8 @@ mod tests { BridgeError::MismatchedAction, )); - // Signature is invalid (signed over different message), verification should fail + // Signature is invalid (signed over different message), verification should + // fail let action2: BridgeAction = get_test_sui_to_eth_bridge_action(None, Some(3), Some(5), Some(77)); diff --git a/crates/sui-bridge/src/eth_client.rs b/crates/sui-bridge/src/eth_client.rs index a97f236dfda..7a0adc1a4a1 100644 --- a/crates/sui-bridge/src/eth_client.rs +++ b/crates/sui-bridge/src/eth_client.rs @@ -3,17 +3,19 @@ use std::collections::HashSet; -use crate::abi::EthBridgeEvent; -use crate::error::{BridgeError, BridgeResult}; -use crate::types::{BridgeAction, EthLog}; -use ethers::providers::{Http, JsonRpcClient, Middleware, Provider}; -use ethers::types::TxHash; -use ethers::types::{Block, Filter}; +use ethers::{ + providers::{Http, JsonRpcClient, Middleware, Provider}, + types::{Address as EthAddress, Block, Filter, TxHash}, +}; use tap::TapFallible; #[cfg(test)] use crate::eth_mock_provider::EthMockProvider; -use ethers::types::Address as EthAddress; +use crate::{ + abi::EthBridgeEvent, + error::{BridgeError, BridgeResult}, + types::{BridgeAction, EthLog}, +}; pub struct EthClient

{ provider: Provider

, contract_addresses: HashSet, @@ -162,8 +164,9 @@ where Ok(results) } - /// This function converts a `Log` to `EthLog`, to make sure the `block_num`, `tx_hash` and `log_index_in_tx` - /// are available for downstream. + /// This function converts a `Log` to `EthLog`, to make sure the + /// `block_num`, `tx_hash` and `log_index_in_tx` are available for + /// downstream. // It's frustratingly ugly because of the nulliability of many fields in `Log`. async fn get_log_tx_details(&self, log: ethers::types::Log) -> BridgeResult { let block_number = log @@ -180,8 +183,8 @@ where "Provider returns log without log_index".into(), ))?; - // Now get the log's index in the transaction. There is `transaction_log_index` field in - // `Log`, but I never saw it populated. + // Now get the log's index in the transaction. There is `transaction_log_index` + // field in `Log`, but I never saw it populated. let receipt = self .provider @@ -197,7 +200,10 @@ where "Provider returns log without block_number".into(), ))?; if receipt_block_num.as_u64() != block_number { - return Err(BridgeError::ProviderError(format!("Provider returns receipt with different block number from log. Receipt: {:?}, Log: {:?}", receipt, log))); + return Err(BridgeError::ProviderError(format!( + "Provider returns receipt with different block number from log. Receipt: {:?}, Log: {:?}", + receipt, log + ))); } // Find the log index in the transaction @@ -207,7 +213,10 @@ where if receipt_log.log_index == Some(log_index) { // make sure the topics and data match if receipt_log.topics != log.topics || receipt_log.data != log.data { - return Err(BridgeError::ProviderError(format!("Provider returns receipt with different log from log. Receipt: {:?}, Log: {:?}", receipt, log))); + return Err(BridgeError::ProviderError(format!( + "Provider returns receipt with different log from log. Receipt: {:?}, Log: {:?}", + receipt, log + ))); } log_index_in_tx = Some(idx); } diff --git a/crates/sui-bridge/src/eth_mock_provider.rs b/crates/sui-bridge/src/eth_mock_provider.rs index a33586cf2eb..f835b73839a 100644 --- a/crates/sui-bridge/src/eth_mock_provider.rs +++ b/crates/sui-bridge/src/eth_mock_provider.rs @@ -1,23 +1,24 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -//! A mock implementation of Ethereum JSON-RPC client, based on `MockProvider` from `ethers-rs`. +//! A mock implementation of Ethereum JSON-RPC client, based on `MockProvider` +//! from `ethers-rs`. -use async_trait::async_trait; -use ethers::providers::JsonRpcClient; -use ethers::providers::MockError; -use serde::{de::DeserializeOwned, Serialize}; -use serde_json::Value; -use std::collections::HashMap; -use std::fmt::Debug; use std::{ borrow::Borrow, + collections::HashMap, + fmt::Debug, sync::{Arc, Mutex}, }; +use async_trait::async_trait; +use ethers::providers::{JsonRpcClient, MockError}; +use serde::{de::DeserializeOwned, Serialize}; +use serde_json::Value; + /// Helper type that can be used to pass through the `params` value. -/// This is necessary because the wrapper provider is supposed to skip the `params` if it's of -/// size 0, see `crate::transports::common::Request` +/// This is necessary because the wrapper provider is supposed to skip the +/// `params` if it's of size 0, see `crate::transports::common::Request` #[derive(Debug, Eq, PartialEq, Clone, Hash)] enum MockParams { Value(String), @@ -97,9 +98,10 @@ impl EthMockProvider { #[cfg(test)] #[cfg(not(target_arch = "wasm32"))] mod tests { - use super::*; use ethers::{providers::Middleware, types::U64}; + use super::*; + #[tokio::test] async fn test_basic_responses_match() { let mock = EthMockProvider::new(); diff --git a/crates/sui-bridge/src/eth_syncer.rs b/crates/sui-bridge/src/eth_syncer.rs index 44ce0159b1e..7f189c17396 100644 --- a/crates/sui-bridge/src/eth_syncer.rs +++ b/crates/sui-bridge/src/eth_syncer.rs @@ -1,24 +1,27 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -//! The EthSyncer module is responsible for synchronizing Events emitted on Ethereum blockchain from -//! concerned contracts. Each contract is associated with a start block number, and the syncer will -//! only query from that block number onwards. The syncer also keeps track of the last finalized +//! The EthSyncer module is responsible for synchronizing Events emitted on +//! Ethereum blockchain from concerned contracts. Each contract is associated +//! with a start block number, and the syncer will only query from that block +//! number onwards. The syncer also keeps track of the last finalized //! block on Ethereum and will only query for events up to that block number. -use crate::error::BridgeResult; -use crate::eth_client::EthClient; -use crate::retry_with_max_elapsed_time; -use crate::types::EthLog; +use std::{collections::HashMap, sync::Arc}; + use ethers::types::Address as EthAddress; use mysten_metrics::spawn_logged_monitored_task; -use std::collections::HashMap; -use std::sync::Arc; -use tokio::sync::watch; -use tokio::task::JoinHandle; -use tokio::time::{self, Duration}; +use tokio::{ + sync::watch, + task::JoinHandle, + time::{self, Duration}, +}; use tracing::error; +use crate::{ + error::BridgeResult, eth_client::EthClient, retry_with_max_elapsed_time, types::EthLog, +}; + const ETH_LOG_QUERY_MAX_BLOCK_RANGE: u64 = 1000; const ETH_EVENTS_CHANNEL_SIZE: usize = 1000; const FINALIZED_BLOCK_QUERY_INTERVAL: Duration = Duration::from_secs(2); @@ -156,12 +159,14 @@ where }; let len = events.len(); - // Note 1: we always events to the channel even when it is empty. This is because of - // how `eth_getLogs` api is designed - we want cursor to move forward continuously. + // Note 1: we always events to the channel even when it is empty. This is + // because of how `eth_getLogs` api is designed - we want cursor to + // move forward continuously. - // Note 2: it's extremely critical to make sure the Logs we send via this channel - // are complete per block height. Namely, we should never send a partial list - // of events for a block. Otherwise, we may end up missing events. + // Note 2: it's extremely critical to make sure the Logs we send via this + // channel are complete per block height. Namely, we should never + // send a partial list of events for a block. Otherwise, we may end + // up missing events. events_sender .send((contract_address, end_block, events)) .await @@ -181,18 +186,16 @@ where mod tests { use std::{collections::HashSet, str::FromStr}; - use ethers::types::{Log, U256, U64}; + use ethers::types::{Log, TxHash, U256, U64}; use prometheus::Registry; use tokio::sync::mpsc::error::TryRecvError; + use super::*; use crate::{ eth_mock_provider::EthMockProvider, test_utils::{mock_get_logs, mock_last_finalized_block}, }; - use super::*; - use ethers::types::TxHash; - #[tokio::test] async fn test_last_finalized_block() -> anyhow::Result<()> { telemetry_subscribers::init_for_testing(); @@ -234,7 +237,8 @@ mod tests { .await .unwrap(); - // The latest finalized block stays at 777, event listener should not query again. + // The latest finalized block stays at 777, event listener should not query + // again. finalized_block_rx.changed().await.unwrap(); assert_eq!(*finalized_block_rx.borrow(), 777); let (contract_address, end_block, received_logs) = logs_rx.recv().await.unwrap(); @@ -250,7 +254,8 @@ mod tests { 888, vec![log.clone()], ); - // The latest finalized block is updated to 888, event listener should query again. + // The latest finalized block is updated to 888, event listener should query + // again. mock_last_finalized_block(&mock_provider, 888); finalized_block_rx.changed().await.unwrap(); assert_eq!(*finalized_block_rx.borrow(), 888); @@ -308,8 +313,8 @@ mod tests { log_index: Some(U256::from(6)), ..Default::default() }; - // Mock logs for another_address although it shouldn't be queried. We don't expect to - // see log2 in the logs channel later on. + // Mock logs for another_address although it shouldn't be queried. We don't + // expect to see log2 in the logs channel later on. mock_get_logs( &mock_provider, another_address, @@ -393,7 +398,8 @@ mod tests { Ok(()) } - /// Test that the syncer will query for logs in multiple queries if the range is too big. + /// Test that the syncer will query for logs in multiple queries if the + /// range is too big. #[tokio::test] async fn test_paginated_eth_log_query() -> anyhow::Result<()> { telemetry_subscribers::init_for_testing(); @@ -446,7 +452,8 @@ mod tests { start_block + ETH_LOG_QUERY_MAX_BLOCK_RANGE - 1, vec![log.clone()], ); - // Second query handles [start + ETH_LOG_QUERY_MAX_BLOCK_RANGE, last_finalized_block] + // Second query handles [start + ETH_LOG_QUERY_MAX_BLOCK_RANGE, + // last_finalized_block] mock_get_logs( &mock_provider, EthAddress::zero(), diff --git a/crates/sui-bridge/src/events.rs b/crates/sui-bridge/src/events.rs index ed4c3f447d0..c21c3d52de1 100644 --- a/crates/sui-bridge/src/events.rs +++ b/crates/sui-bridge/src/events.rs @@ -8,23 +8,19 @@ use std::str::FromStr; -use crate::error::BridgeError; -use crate::error::BridgeResult; -use crate::sui_transaction_builder::get_bridge_package_id; -use crate::types::BridgeAction; -use crate::types::BridgeActionType; -use crate::types::BridgeChainId; -use crate::types::SuiToEthBridgeAction; -use crate::types::TokenId; use ethers::types::Address as EthAddress; -use fastcrypto::encoding::Encoding; -use fastcrypto::encoding::Hex; +use fastcrypto::encoding::{Encoding, Hex}; use move_core_types::language_storage::StructTag; use once_cell::sync::OnceCell; use serde::{Deserialize, Serialize}; use sui_json_rpc_types::SuiEvent; -use sui_types::base_types::SuiAddress; -use sui_types::digests::TransactionDigest; +use sui_types::{base_types::SuiAddress, digests::TransactionDigest}; + +use crate::{ + error::{BridgeError, BridgeResult}, + sui_transaction_builder::get_bridge_package_id, + types::{BridgeAction, BridgeActionType, BridgeChainId, SuiToEthBridgeAction, TokenId}, +}; // This is the event structure defined and emitted in Move #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] @@ -191,23 +187,22 @@ impl SuiBridgeEvent { #[cfg(test)] pub mod tests { - use super::get_bridge_event_struct_tag; - use super::EmittedSuiToEthTokenBridgeV1; - use super::MoveTokenBridgeEvent; - use crate::types::BridgeAction; - use crate::types::BridgeActionType; - use crate::types::BridgeChainId; - use crate::types::SuiToEthBridgeAction; - use crate::types::TokenId; + use std::str::FromStr; + use ethers::types::Address as EthAddress; use move_core_types::language_storage::StructTag; - use std::str::FromStr; use sui_json_rpc_types::SuiEvent; - use sui_types::base_types::ObjectID; - use sui_types::base_types::SuiAddress; - use sui_types::digests::TransactionDigest; - use sui_types::event::EventID; - use sui_types::Identifier; + use sui_types::{ + base_types::{ObjectID, SuiAddress}, + digests::TransactionDigest, + event::EventID, + Identifier, + }; + + use super::{get_bridge_event_struct_tag, EmittedSuiToEthTokenBridgeV1, MoveTokenBridgeEvent}; + use crate::types::{ + BridgeAction, BridgeActionType, BridgeChainId, SuiToEthBridgeAction, TokenId, + }; /// Returns a test SuiEvent and corresponding BridgeAction pub fn get_test_sui_event_and_action(identifier: Identifier) -> (SuiEvent, BridgeAction) { diff --git a/crates/sui-bridge/src/lib.rs b/crates/sui-bridge/src/lib.rs index 6d527f3ea36..58266c4e9f8 100644 --- a/crates/sui-bridge/src/lib.rs +++ b/crates/sui-bridge/src/lib.rs @@ -49,7 +49,8 @@ macro_rules! retry_with_max_elapsed_time { return Ok(result); } Err(e) => { - // For simplicity we treat every error as transient so we can retry until max_elapsed_time + // For simplicity we treat every error as transient so we can retry until + // max_elapsed_time tracing::debug!("Retrying due to error: {:?}", e); return Err(backoff::Error::transient(e)); } @@ -63,9 +64,10 @@ macro_rules! retry_with_max_elapsed_time { #[cfg(test)] mod tests { - use super::*; use std::time::Duration; + use super::*; + async fn example_func_ok() -> anyhow::Result<()> { Ok(()) } @@ -83,7 +85,8 @@ mod tests { .unwrap() .unwrap(); - // now call a function that always errors and expect it to return before max_elapsed_time runs out + // now call a function that always errors and expect it to return before + // max_elapsed_time runs out let max_elapsed_time = Duration::from_secs(4); let instant = std::time::Instant::now(); retry_with_max_elapsed_time!(example_func_err(), max_elapsed_time).unwrap_err(); diff --git a/crates/sui-bridge/src/main.rs b/crates/sui-bridge/src/main.rs index ed25477bde4..c024fd83055 100644 --- a/crates/sui-bridge/src/main.rs +++ b/crates/sui-bridge/src/main.rs @@ -1,14 +1,14 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use clap::Parser; -use mysten_metrics::start_prometheus_server; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, }; -use sui_bridge::config::BridgeNodeConfig; -use sui_bridge::node::run_bridge_node; + +use clap::Parser; +use mysten_metrics::start_prometheus_server; +use sui_bridge::{config::BridgeNodeConfig, node::run_bridge_node}; use sui_config::Config; use tracing::info; diff --git a/crates/sui-bridge/src/node.rs b/crates/sui-bridge/src/node.rs index 1179dde2504..f09778cc32b 100644 --- a/crates/sui-bridge/src/node.rs +++ b/crates/sui-bridge/src/node.rs @@ -1,6 +1,16 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::HashMap, + net::{IpAddr, Ipv4Addr, SocketAddr}, + sync::Arc, + time::Duration, +}; + +use tokio::task::JoinHandle; +use tracing::info; + use crate::{ action_executor::BridgeActionExecutor, client::bridge_authority_aggregator::BridgeAuthorityAggregator, @@ -11,14 +21,6 @@ use crate::{ storage::BridgeOrchestratorTables, sui_syncer::SuiSyncer, }; -use std::{ - collections::HashMap, - net::{IpAddr, Ipv4Addr, SocketAddr}, - sync::Arc, - time::Duration, -}; -use tokio::task::JoinHandle; -use tracing::info; pub async fn run_bridge_node(config: BridgeNodeConfig) -> anyhow::Result<()> { let (server_config, client_config) = config.validate().await?; @@ -110,7 +112,8 @@ async fn start_client_components( contract, client_config.eth_bridge_contracts_start_block_override[contract], cursor ); } else if let Some(cursor) = cursor { - // +1: The stored value is the last block that was processed, so we start from the next block. + // +1: The stored value is the last block that was processed, so we start from + // the next block. eth_contracts_to_watch.insert(*contract, cursor + 1); } else { return Err(anyhow::anyhow!( diff --git a/crates/sui-bridge/src/orchestrator.rs b/crates/sui-bridge/src/orchestrator.rs index 451ecfb6d37..4c8197e94bd 100644 --- a/crates/sui-bridge/src/orchestrator.rs +++ b/crates/sui-bridge/src/orchestrator.rs @@ -2,27 +2,32 @@ // SPDX-License-Identifier: Apache-2.0 //! `BridgeOrchestrator` is the component that: -//! 1. monitors Sui and Ethereum events with the help of `SuiSyncer` and `EthSyncer` +//! 1. monitors Sui and Ethereum events with the help of `SuiSyncer` and +//! `EthSyncer` //! 2. updates WAL table and cursor tables //! 2. hands actions to `BridgeExecutor` for execution -use crate::abi::EthBridgeEvent; -use crate::action_executor::{ - submit_to_executor, BridgeActionExecutionWrapper, BridgeActionExecutorTrait, -}; -use crate::error::BridgeResult; -use crate::events::SuiBridgeEvent; -use crate::storage::BridgeOrchestratorTables; -use crate::sui_client::{SuiClient, SuiClientInner}; -use crate::types::EthLog; +use std::sync::Arc; + use ethers::types::Address as EthAddress; use mysten_metrics::spawn_logged_monitored_task; -use std::sync::Arc; use sui_json_rpc_types::SuiEvent; use sui_types::Identifier; use tokio::task::JoinHandle; use tracing::{info, warn}; +use crate::{ + abi::EthBridgeEvent, + action_executor::{ + submit_to_executor, BridgeActionExecutionWrapper, BridgeActionExecutorTrait, + }, + error::BridgeResult, + events::SuiBridgeEvent, + storage::BridgeOrchestratorTables, + sui_client::{SuiClient, SuiClientInner}, + types::EthLog, +}; + pub struct BridgeOrchestrator { _sui_client: Arc>, sui_events_rx: mysten_metrics::metered_channel::Receiver<(Identifier, Vec)>, @@ -179,7 +184,8 @@ where store .insert_pending_actions(&actions) .expect("Store operation should not fail"); - // Execution will remove the pending actions from DB when the action is completed. + // Execution will remove the pending actions from DB when the action is + // completed. for action in actions { submit_to_executor(&executor_tx, action) .await @@ -197,13 +203,16 @@ where #[cfg(test)] mod tests { - use crate::{test_utils::get_test_log_and_action, types::BridgeActionDigest}; + use std::str::FromStr; + use ethers::types::{Address as EthAddress, TxHash}; use prometheus::Registry; - use std::str::FromStr; use super::*; - use crate::{events::tests::get_test_sui_event_and_action, sui_mock_client::SuiMockClient}; + use crate::{ + events::tests::get_test_sui_event_and_action, sui_mock_client::SuiMockClient, + test_utils::get_test_log_and_action, types::BridgeActionDigest, + }; #[tokio::test] async fn test_sui_watcher_task() { @@ -259,8 +268,10 @@ mod tests { #[tokio::test] async fn test_eth_watcher_task() { // Note: this test may fail beacuse of the following reasons: - // 1. Log and BridgeAction returned from `get_test_log_and_action` are not in sync - // 2. Log returned from `get_test_log_and_action` is not parseable log (not abigen!, check abi.rs) + // 1. Log and BridgeAction returned from `get_test_log_and_action` are not in + // sync + // 2. Log returned from `get_test_log_and_action` is not parseable log (not + // abigen!, check abi.rs) let (_sui_events_tx, sui_events_rx, eth_events_tx, eth_events_rx, sui_client, store) = setup(); @@ -364,7 +375,8 @@ mod tests { ) } - /// A `BridgeActionExecutorTrait` implementation that only tracks the submitted actions. + /// A `BridgeActionExecutorTrait` implementation that only tracks the + /// submitted actions. struct MockExecutor { requested_transactions_tx: tokio::sync::broadcast::Sender, } diff --git a/crates/sui-bridge/src/server/governance_verifier.rs b/crates/sui-bridge/src/server/governance_verifier.rs index 7be1323b0b3..180eb6b5c60 100644 --- a/crates/sui-bridge/src/server/governance_verifier.rs +++ b/crates/sui-bridge/src/server/governance_verifier.rs @@ -3,9 +3,11 @@ use std::collections::HashMap; -use crate::error::{BridgeError, BridgeResult}; -use crate::server::handler::ActionVerifier; -use crate::types::{BridgeAction, BridgeActionDigest}; +use crate::{ + error::{BridgeError, BridgeResult}, + server::handler::ActionVerifier, + types::{BridgeAction, BridgeActionDigest}, +}; #[derive(Debug)] pub struct GovernanceVerifier { @@ -31,7 +33,8 @@ impl GovernanceVerifier { #[async_trait::async_trait] impl ActionVerifier for GovernanceVerifier { async fn verify(&self, key: BridgeAction) -> BridgeResult { - // TODO: an optimization would be to check the current nonce on chain and err for older ones + // TODO: an optimization would be to check the current nonce on chain and err + // for older ones if !key.is_governace_action() { return Err(BridgeError::ActionIsNotGovernanceAction(key)); } diff --git a/crates/sui-bridge/src/server/handler.rs b/crates/sui-bridge/src/server/handler.rs index 6d201b0e254..ad298887e45 100644 --- a/crates/sui-bridge/src/server/handler.rs +++ b/crates/sui-bridge/src/server/handler.rs @@ -3,26 +3,25 @@ #![allow(clippy::type_complexity)] -use crate::crypto::{BridgeAuthorityKeyPair, BridgeAuthoritySignInfo}; -use crate::error::{BridgeError, BridgeResult}; -use crate::eth_client::EthClient; -use crate::sui_client::{SuiClient, SuiClientInner}; -use crate::types::{BridgeAction, SignedBridgeAction}; +use std::{num::NonZeroUsize, str::FromStr, sync::Arc}; + use async_trait::async_trait; use axum::Json; -use ethers::providers::JsonRpcClient; -use ethers::types::TxHash; +use ethers::{providers::JsonRpcClient, types::TxHash}; use lru::LruCache; -use std::num::NonZeroUsize; -use std::str::FromStr; -use std::sync::Arc; use sui_types::digests::TransactionDigest; use tap::TapFallible; use tokio::sync::{oneshot, Mutex}; -use tracing::info; -use tracing::instrument; +use tracing::{info, instrument}; use super::governance_verifier::GovernanceVerifier; +use crate::{ + crypto::{BridgeAuthorityKeyPair, BridgeAuthoritySignInfo}, + error::{BridgeError, BridgeResult}, + eth_client::EthClient, + sui_client::{SuiClient, SuiClientInner}, + types::{BridgeAction, SignedBridgeAction}, +}; #[async_trait] pub trait BridgeRequestHandlerTrait { @@ -128,8 +127,8 @@ where .await .unwrap_or_else(|| panic!("Server signer's channel is closed")); let result = self.sign(key).await; - // The receiver may be dropped before the sender (client connection was dropped for example), - // we ignore the error in that case. + // The receiver may be dropped before the sender (client connection was dropped + // for example), we ignore the error in that case. let _ = tx.send(result); } }) @@ -139,7 +138,8 @@ where &mut self, key: K, ) -> Arc>>> { - // This mutex exists to make sure everyone gets the same entry, namely no double insert + // This mutex exists to make sure everyone gets the same entry, namely no double + // insert let _ = self.mutex.lock().await; self.cache .get_or_insert(key, || Arc::new(Mutex::new(None))) @@ -320,6 +320,10 @@ impl BridgeRequestHandlerTrait for BridgeRequestHandler { mod tests { use std::collections::HashSet; + use ethers::types::{Address as EthAddress, TransactionReceipt}; + use sui_json_rpc_types::SuiEvent; + use sui_types::{base_types::SuiAddress, crypto::get_key_pair}; + use super::*; use crate::{ eth_mock_provider::EthMockProvider, @@ -333,9 +337,6 @@ mod tests { LimitUpdateAction, TokenId, }, }; - use ethers::types::{Address as EthAddress, TransactionReceipt}; - use sui_json_rpc_types::SuiEvent; - use sui_types::{base_types::SuiAddress, crypto::get_key_pair}; #[tokio::test] async fn test_sui_signer_with_cache() { @@ -350,10 +351,12 @@ mod tests { // Test `get_cache_entry` creates a new entry if not exist let sui_tx_digest = TransactionDigest::random(); let sui_event_idx = 42; - assert!(sui_signer_with_cache - .get_testing_only((sui_tx_digest, sui_event_idx)) - .await - .is_none()); + assert!( + sui_signer_with_cache + .get_testing_only((sui_tx_digest, sui_event_idx)) + .await + .is_none() + ); let entry = sui_signer_with_cache .get_cache_entry((sui_tx_digest, sui_event_idx)) .await; @@ -387,7 +390,8 @@ mod tests { .await; assert!(entry_.unwrap().lock().await.is_none()); - // Mock a cacheable error such as no bridge events in tx position (empty event list) + // Mock a cacheable error such as no bridge events in tx position (empty event + // list) sui_client_mock.add_events_by_tx_digest(sui_tx_digest, vec![]); assert!(matches!( sui_signer_with_cache @@ -403,8 +407,9 @@ mod tests { BridgeError::NoBridgeEventsInTxPosition, ); - // TODO: test BridgeEventInUnrecognizedSuiPackage, SuiBridgeEvent::try_from_sui_event - // and BridgeEventNotActionable to be cached + // TODO: test BridgeEventInUnrecognizedSuiPackage, + // SuiBridgeEvent::try_from_sui_event and BridgeEventNotActionable to be + // cached // Test `sign` caches Ok result let emitted_event_1 = MoveTokenBridgeEvent { @@ -447,7 +452,8 @@ mod tests { .unwrap(); // Because the result is cached now, the verifier should not be called again. - // Even though we remove the `add_events_by_tx_digest` mock, we will still get the same result. + // Even though we remove the `add_events_by_tx_digest` mock, we will still get + // the same result. sui_client_mock.add_events_by_tx_digest(sui_tx_digest, vec![]); assert_eq!( sui_signer_with_cache @@ -483,17 +489,20 @@ mod tests { // Test `get_cache_entry` creates a new entry if not exist let eth_tx_hash = TxHash::random(); let eth_event_idx = 42; - assert!(eth_signer_with_cache - .get_testing_only((eth_tx_hash, eth_event_idx)) - .await - .is_none()); + assert!( + eth_signer_with_cache + .get_testing_only((eth_tx_hash, eth_event_idx)) + .await + .is_none() + ); let entry = eth_signer_with_cache .get_cache_entry((eth_tx_hash, eth_event_idx)) .await; let entry_ = eth_signer_with_cache .get_testing_only((eth_tx_hash, eth_event_idx)) .await; - // first unwrap should not pacic because the entry should have been inserted by `get_cache_entry` + // first unwrap should not pacic because the entry should have been inserted by + // `get_cache_entry` assert!(entry_.unwrap().lock().await.is_none()); let (_, action) = get_test_log_and_action(contract_address, eth_tx_hash, eth_event_idx); diff --git a/crates/sui-bridge/src/server/mock_handler.rs b/crates/sui-bridge/src/server/mock_handler.rs index 87adce061ed..1179ce9e24d 100644 --- a/crates/sui-bridge/src/server/mock_handler.rs +++ b/crates/sui-bridge/src/server/mock_handler.rs @@ -4,23 +4,24 @@ //! A mock implementation for `BridgeRequestHandlerTrait` //! that handles requests according to preset behaviors. -use std::collections::HashMap; -use std::net::SocketAddr; -use std::str::FromStr; -use std::sync::{Arc, Mutex}; +use std::{ + collections::HashMap, + net::SocketAddr, + str::FromStr, + sync::{Arc, Mutex}, +}; -use crate::crypto::BridgeAuthorityKeyPair; -use crate::crypto::BridgeAuthoritySignInfo; -use crate::error::BridgeError; -use crate::error::BridgeResult; -use crate::types::SignedBridgeAction; use arc_swap::ArcSwap; use async_trait::async_trait; use axum::Json; use sui_types::digests::TransactionDigest; -use super::handler::BridgeRequestHandlerTrait; -use super::make_router; +use super::{handler::BridgeRequestHandlerTrait, make_router}; +use crate::{ + crypto::{BridgeAuthorityKeyPair, BridgeAuthoritySignInfo}, + error::{BridgeError, BridgeResult}, + types::SignedBridgeAction, +}; #[allow(clippy::type_complexity)] #[derive(Clone)] diff --git a/crates/sui-bridge/src/server/mod.rs b/crates/sui-bridge/src/server/mod.rs index ee6c0052506..5f1a388efa0 100644 --- a/crates/sui-bridge/src/server/mod.rs +++ b/crates/sui-bridge/src/server/mod.rs @@ -3,6 +3,20 @@ #![allow(clippy::inconsistent_digit_grouping)] +use std::{net::SocketAddr, sync::Arc}; + +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::get, + Json, Router, +}; +use ethers::types::Address as EthAddress; +use fastcrypto::{ + encoding::{Encoding, Hex}, + traits::ToFromBytes, +}; + use crate::{ crypto::BridgeAuthorityPublicKeyBytes, error::BridgeError, @@ -13,18 +27,6 @@ use crate::{ LimitUpdateAction, SignedBridgeAction, TokenId, }, }; -use axum::{ - extract::{Path, State}, - Json, -}; -use axum::{http::StatusCode, routing::get, Router}; -use ethers::types::Address as EthAddress; -use fastcrypto::{ - encoding::{Encoding, Hex}, - traits::ToFromBytes, -}; -use std::net::SocketAddr; -use std::sync::Arc; pub mod governance_verifier; pub mod handler; @@ -265,10 +267,10 @@ async fn handle_evm_contract_upgrade( #[cfg(test)] mod tests { use super::*; - use crate::client::bridge_client::BridgeClient; - use crate::server::mock_handler::BridgeRequestMockHandler; - use crate::test_utils::get_test_authorities_and_run_mock_bridge_server; - use crate::types::BridgeCommittee; + use crate::{ + client::bridge_client::BridgeClient, server::mock_handler::BridgeRequestMockHandler, + test_utils::get_test_authorities_and_run_mock_bridge_server, types::BridgeCommittee, + }; #[tokio::test] async fn test_bridge_server_handle_blocklist_update_action_path() { diff --git a/crates/sui-bridge/src/storage.rs b/crates/sui-bridge/src/storage.rs index 3a1ede1b920..94ed1c891b3 100644 --- a/crates/sui-bridge/src/storage.rs +++ b/crates/sui-bridge/src/storage.rs @@ -1,20 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::collections::HashMap; -use std::path::Path; -use std::sync::Arc; -use sui_types::Identifier; - -use sui_types::event::EventID; -use typed_store::rocks::{DBMap, MetricConf}; -use typed_store::traits::TableSummary; -use typed_store::traits::TypedStoreDebug; -use typed_store::Map; +use std::{collections::HashMap, path::Path, sync::Arc}; + +use sui_types::{event::EventID, Identifier}; +use typed_store::{ + rocks::{DBMap, MetricConf}, + traits::{TableSummary, TypedStoreDebug}, + Map, +}; use typed_store_derive::DBMapUtils; -use crate::error::{BridgeError, BridgeResult}; -use crate::types::{BridgeAction, BridgeActionDigest}; +use crate::{ + error::{BridgeError, BridgeResult}, + types::{BridgeAction, BridgeActionDigest}, +}; #[derive(DBMapUtils)] pub struct BridgeOrchestratorTables { @@ -141,9 +141,8 @@ mod tests { use sui_types::digests::TransactionDigest; - use crate::test_utils::get_test_sui_to_eth_bridge_action; - use super::*; + use crate::test_utils::get_test_sui_to_eth_bridge_action; // async: existing runtime is required with typed-store #[tokio::test] @@ -202,10 +201,12 @@ mod tests { // update eth event cursor let eth_contract_address = ethers::types::Address::random(); let eth_block_num = 199999u64; - assert!(store - .get_eth_event_cursors(&[eth_contract_address]) - .unwrap()[0] - .is_none()); + assert!( + store + .get_eth_event_cursors(&[eth_contract_address]) + .unwrap()[0] + .is_none() + ); store .update_eth_event_cursor(eth_contract_address, eth_block_num) .unwrap(); diff --git a/crates/sui-bridge/src/sui_client.rs b/crates/sui-bridge/src/sui_client.rs index cf2b753e35a..50bf2fadbfe 100644 --- a/crates/sui-bridge/src/sui_client.rs +++ b/crates/sui-bridge/src/sui_client.rs @@ -4,64 +4,59 @@ // TODO remove when integrated #![allow(unused)] -use std::str::from_utf8; -use std::str::FromStr; -use std::time::Duration; +use std::{ + str::{from_utf8, FromStr}, + time::Duration, +}; use anyhow::anyhow; use async_trait::async_trait; use axum::response::sse::Event; use ethers::types::{Address, U256}; -use fastcrypto::traits::KeyPair; -use fastcrypto::traits::ToFromBytes; +use fastcrypto::traits::{KeyPair, ToFromBytes}; use once_cell::sync::OnceCell; use serde::{Deserialize, Serialize}; -use sui_json_rpc_types::{EventFilter, Page, SuiData, SuiEvent}; use sui_json_rpc_types::{ - EventPage, SuiObjectDataOptions, SuiTransactionBlockResponse, - SuiTransactionBlockResponseOptions, + EventFilter, EventPage, Page, SuiData, SuiEvent, SuiObjectDataOptions, + SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, }; use sui_sdk::{SuiClient as SuiSdkClient, SuiClientBuilder}; -use sui_types::base_types::ObjectRef; -use sui_types::collection_types::LinkedTableNode; -use sui_types::crypto::get_key_pair; -use sui_types::dynamic_field::DynamicFieldName; -use sui_types::dynamic_field::Field; -use sui_types::error::SuiObjectResponseError; -use sui_types::error::UserInputError; -use sui_types::event; -use sui_types::gas_coin::GasCoin; -use sui_types::object::{Object, Owner}; -use sui_types::transaction::Transaction; -use sui_types::TypeTag; use sui_types::{ - base_types::{ObjectID, SuiAddress}, + base_types::{ObjectID, ObjectRef, SuiAddress}, + collection_types::LinkedTableNode, + crypto::get_key_pair, digests::TransactionDigest, + dynamic_field::{DynamicFieldName, Field}, + error::{SuiObjectResponseError, UserInputError}, + event, event::EventID, - Identifier, + gas_coin::GasCoin, + object::{Object, Owner}, + transaction::Transaction, + Identifier, TypeTag, }; use tap::TapFallible; use tracing::{error, warn}; -use crate::crypto::BridgeAuthorityPublicKey; -use crate::error::{BridgeError, BridgeResult}; -use crate::events::SuiBridgeEvent; -use crate::retry_with_max_elapsed_time; -use crate::sui_transaction_builder::get_bridge_package_id; -use crate::types::BridgeActionStatus; -use crate::types::BridgeInnerDynamicField; -use crate::types::BridgeRecordDyanmicField; -use crate::types::MoveTypeBridgeMessageKey; -use crate::types::MoveTypeBridgeRecord; -use crate::types::{ - BridgeAction, BridgeAuthority, BridgeCommittee, MoveTypeBridgeCommittee, MoveTypeBridgeInner, - MoveTypeCommitteeMember, +use crate::{ + crypto::BridgeAuthorityPublicKey, + error::{BridgeError, BridgeResult}, + events::SuiBridgeEvent, + retry_with_max_elapsed_time, + sui_transaction_builder::get_bridge_package_id, + types::{ + BridgeAction, BridgeActionStatus, BridgeAuthority, BridgeCommittee, + BridgeInnerDynamicField, BridgeRecordDyanmicField, MoveTypeBridgeCommittee, + MoveTypeBridgeInner, MoveTypeBridgeMessageKey, MoveTypeBridgeRecord, + MoveTypeCommitteeMember, + }, }; -// TODO: once we have bridge package on sui framework, we can hardcode the actual -// bridge dynamic field object id (not 0x9 or dynamic field wrapper) and update -// along with software upgrades. -// Or do we always retrieve from 0x9? We can figure this out before the first uggrade. +// TODO: once we have bridge package on sui framework, we can hardcode the +// actual bridge dynamic field object id (not 0x9 or dynamic field wrapper) and +// update along with software upgrades. +// Or do we always retrieve from 0x9? We can figure this out before the first +// uggrade. fn get_bridge_object_id() -> &'static ObjectID { static BRIDGE_OBJ_ID: OnceCell = OnceCell::new(); BRIDGE_OBJ_ID.get_or_init(|| { @@ -73,7 +68,8 @@ fn get_bridge_object_id() -> &'static ObjectID { } // object id of BridgeRecord, this is wrapped in the bridge inner object. -// TODO: once we have bridge package on sui framework, we can hardcode the actual id. +// TODO: once we have bridge package on sui framework, we can hardcode the +// actual id. fn get_bridge_record_id() -> &'static ObjectID { static BRIDGE_RECORD_ID: OnceCell = OnceCell::new(); BRIDGE_RECORD_ID.get_or_init(|| { @@ -130,11 +126,13 @@ where let events = self.inner.query_events(filter.clone(), cursor).await?; // Safeguard check that all events are emitted from requested package and module - assert!(events - .data - .iter() - .all(|event| event.type_.address.as_ref() == package.as_ref() - && event.type_.module == module)); + assert!( + events + .data + .iter() + .all(|event| event.type_.address.as_ref() == package.as_ref() + && event.type_.module == module) + ); Ok(events) } @@ -340,17 +338,18 @@ impl SuiClientInner for SuiSdkClient { { Ok(object) => object.object_id, Err(SuiObjectResponseError::DynamicFieldNotFound { .. }) => { - return Ok(BridgeActionStatus::RecordNotFound) + return Ok(BridgeActionStatus::RecordNotFound); } other => { return Err(BridgeError::Generic(format!( "Can't get bridge action record dynamic field {:?}: {:?}", key, other - ))) + ))); } }; - // get_dynamic_field_object does not return bcs, so we have to issue anothe query + // get_dynamic_field_object does not return bcs, so we have to issue anothe + // query let bcs_bytes = self .read_api() .get_move_object_bcs(status_object_id) @@ -413,15 +412,8 @@ impl SuiClientInner for SuiSdkClient { #[cfg(test)] mod tests { - use crate::{ - events::{EmittedSuiToEthTokenBridgeV1, MoveTokenBridgeEvent}, - sui_mock_client::SuiMockClient, - test_utils::{ - bridge_token, get_test_sui_to_eth_bridge_action, mint_tokens, publish_bridge_package, - transfer_treasury_cap, - }, - types::{BridgeActionType, BridgeChainId, SuiToEthBridgeAction, TokenId}, - }; + use std::{collections::HashSet, str::FromStr}; + use ethers::{ abi::Token, types::{ @@ -431,11 +423,21 @@ mod tests { }; use move_core_types::account_address::AccountAddress; use prometheus::Registry; - use std::{collections::HashSet, str::FromStr}; use test_cluster::TestClusterBuilder; use super::*; - use crate::events::{init_all_struct_tags, SuiToEthTokenBridgeV1}; + use crate::{ + events::{ + init_all_struct_tags, EmittedSuiToEthTokenBridgeV1, MoveTokenBridgeEvent, + SuiToEthTokenBridgeV1, + }, + sui_mock_client::SuiMockClient, + test_utils::{ + bridge_token, get_test_sui_to_eth_bridge_action, mint_tokens, publish_bridge_package, + transfer_treasury_cap, + }, + types::{BridgeActionType, BridgeChainId, SuiToEthBridgeAction, TokenId}, + }; #[tokio::test] async fn get_bridge_action_by_tx_digest_and_event_idx_maybe() { @@ -605,11 +607,13 @@ mod tests { .unwrap(); assert_eq!(status, BridgeActionStatus::Pending); - // TODO: run bridge committee and approve the action, then assert status is Approved + // TODO: run bridge committee and approve the action, then assert status + // is Approved } #[tokio::test] async fn test_get_action_onchain_status_for_eth_to_sui_transfer() { - // TODO: init an eth -> sui transfer, run bridge committee, approve the action, then assert status is Approved/Claimed + // TODO: init an eth -> sui transfer, run bridge committee, approve the + // action, then assert status is Approved/Claimed } } diff --git a/crates/sui-bridge/src/sui_mock_client.rs b/crates/sui-bridge/src/sui_mock_client.rs index 43ef442ba64..ade88b4c490 100644 --- a/crates/sui-bridge/src/sui_mock_client.rs +++ b/crates/sui-bridge/src/sui_mock_client.rs @@ -3,23 +3,28 @@ //! A mock implementation of Sui JSON-RPC client. -use crate::error::{BridgeError, BridgeResult}; +use std::{ + collections::{HashMap, VecDeque}, + sync::{Arc, Mutex}, +}; + use async_trait::async_trait; -use std::collections::{HashMap, VecDeque}; -use std::sync::{Arc, Mutex}; -use sui_json_rpc_types::SuiTransactionBlockResponse; -use sui_json_rpc_types::{EventFilter, EventPage, SuiEvent}; -use sui_types::base_types::ObjectID; -use sui_types::base_types::ObjectRef; -use sui_types::digests::TransactionDigest; -use sui_types::event::EventID; -use sui_types::gas_coin::GasCoin; -use sui_types::object::Owner; -use sui_types::transaction::Transaction; -use sui_types::Identifier; - -use crate::sui_client::SuiClientInner; -use crate::types::{BridgeAction, BridgeActionDigest, BridgeActionStatus, MoveTypeBridgeCommittee}; +use sui_json_rpc_types::{EventFilter, EventPage, SuiEvent, SuiTransactionBlockResponse}; +use sui_types::{ + base_types::{ObjectID, ObjectRef}, + digests::TransactionDigest, + event::EventID, + gas_coin::GasCoin, + object::Owner, + transaction::Transaction, + Identifier, +}; + +use crate::{ + error::{BridgeError, BridgeResult}, + sui_client::SuiClientInner, + types::{BridgeAction, BridgeActionDigest, BridgeActionStatus, MoveTypeBridgeCommittee}, +}; /// Mock client used in test environments. #[allow(clippy::type_complexity)] diff --git a/crates/sui-bridge/src/sui_syncer.rs b/crates/sui-bridge/src/sui_syncer.rs index ff214d156a9..3f03ef751dc 100644 --- a/crates/sui-bridge/src/sui_syncer.rs +++ b/crates/sui-bridge/src/sui_syncer.rs @@ -1,17 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -//! The SuiSyncer module is responsible for synchronizing Events emitted on Sui blockchain from -//! concerned bridge packages. +//! The SuiSyncer module is responsible for synchronizing Events emitted on Sui +//! blockchain from concerned bridge packages. -use crate::{ - error::BridgeResult, - retry_with_max_elapsed_time, - sui_client::{SuiClient, SuiClientInner}, - sui_transaction_builder::get_bridge_package_id, -}; -use mysten_metrics::spawn_logged_monitored_task; use std::{collections::HashMap, sync::Arc}; + +use mysten_metrics::spawn_logged_monitored_task; use sui_json_rpc_types::SuiEvent; use sui_types::{event::EventID, Identifier}; use tokio::{ @@ -19,6 +14,13 @@ use tokio::{ time::{self, Duration}, }; +use crate::{ + error::BridgeResult, + retry_with_max_elapsed_time, + sui_client::{SuiClient, SuiClientInner}, + sui_transaction_builder::get_bridge_package_id, +}; + // TODO: use the right package id // const PACKAGE_ID: ObjectID = SUI_SYSTEM_PACKAGE_ID; const SUI_EVENTS_CHANNEL_SIZE: usize = 1000; @@ -100,10 +102,11 @@ where let len = events.data.len(); if len != 0 { - // Note: it's extremely critical to make sure the SuiEvents we send via this channel - // are complete per transaction level. Namely, we should never send a partial list - // of events for a transaction. Otherwise, we may end up missing events. - // See `sui_client.query_events_by_module` for how this is implemented. + // Note: it's extremely critical to make sure the SuiEvents we send via this + // channel are complete per transaction level. Namely, we should + // never send a partial list of events for a transaction. + // Otherwise, we may end up missing events. See `sui_client. + // query_events_by_module` for how this is implemented. events_sender .send((module.clone(), events.data)) .await @@ -119,14 +122,14 @@ where #[cfg(test)] mod tests { - use super::*; - - use crate::{sui_client::SuiClient, sui_mock_client::SuiMockClient}; use prometheus::Registry; use sui_json_rpc_types::EventPage; use sui_types::{digests::TransactionDigest, event::EventID, Identifier}; use tokio::time::timeout; + use super::*; + use crate::{sui_client::SuiClient, sui_mock_client::SuiMockClient}; + #[tokio::test] async fn test_sui_syncer_basic() -> anyhow::Result<()> { telemetry_subscribers::init_for_testing(); diff --git a/crates/sui-bridge/src/sui_transaction_builder.rs b/crates/sui-bridge/src/sui_transaction_builder.rs index 3e173576c46..1ca31002c6a 100644 --- a/crates/sui-bridge/src/sui_transaction_builder.rs +++ b/crates/sui-bridge/src/sui_transaction_builder.rs @@ -6,9 +6,9 @@ use std::{collections::HashMap, str::FromStr}; use fastcrypto::traits::ToFromBytes; use move_core_types::ident_str; use once_cell::sync::OnceCell; -use sui_types::gas_coin::GAS; use sui_types::{ base_types::{ObjectID, ObjectRef, SequenceNumber, SuiAddress}, + gas_coin::GAS, programmable_transaction_builder::ProgrammableTransactionBuilder, transaction::{ObjectArg, TransactionData}, TypeTag, @@ -19,7 +19,8 @@ use crate::{ types::{BridgeAction, TokenId, VerifiedCertifiedBridgeAction}, }; -// TODO: once we have bridge package on sui framework, we can hardcode the actual package id. +// TODO: once we have bridge package on sui framework, we can hardcode the +// actual package id. pub fn get_bridge_package_id() -> &'static ObjectID { static BRIDGE_PACKAGE_ID: OnceCell = OnceCell::new(); BRIDGE_PACKAGE_ID.get_or_init(|| match std::env::var("BRIDGE_PACKAGE_ID") { diff --git a/crates/sui-bridge/src/test_utils.rs b/crates/sui-bridge/src/test_utils.rs index 420356aaf88..09abab7492a 100644 --- a/crates/sui-bridge/src/test_utils.rs +++ b/crates/sui-bridge/src/test_utils.rs @@ -1,48 +1,53 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::abi::EthToSuiTokenBridgeV1; -use crate::eth_mock_provider::EthMockProvider; -use crate::events::SuiBridgeEvent; -use crate::server::mock_handler::run_mock_server; -use crate::sui_transaction_builder::{ - get_bridge_package_id, get_root_bridge_object_arg, get_sui_token_type_tag, +use std::{ + collections::BTreeMap, + net::{IpAddr, Ipv4Addr, SocketAddr}, + path::PathBuf, }; -use crate::types::BridgeInnerDynamicField; -use crate::{ - crypto::{BridgeAuthorityKeyPair, BridgeAuthorityPublicKey, BridgeAuthoritySignInfo}, - events::EmittedSuiToEthTokenBridgeV1, - server::mock_handler::BridgeRequestMockHandler, + +use ethers::{ + abi::{long_signature, ParamType}, types::{ - BridgeAction, BridgeAuthority, BridgeChainId, EthToSuiBridgeAction, SignedBridgeAction, - SuiToEthBridgeAction, TokenId, + Address as EthAddress, Block, BlockNumber, Filter, FilterBlockOption, Log, + TransactionReceipt, TxHash, ValueOrArray, U64, }, }; -use ethers::abi::{long_signature, ParamType}; -use ethers::types::Address as EthAddress; -use ethers::types::{ - Block, BlockNumber, Filter, FilterBlockOption, Log, TransactionReceipt, TxHash, ValueOrArray, - U64, +use fastcrypto::{ + encoding::{Encoding, Hex}, + traits::KeyPair, }; -use fastcrypto::encoding::{Encoding, Hex}; -use fastcrypto::traits::KeyPair; use hex_literal::hex; -use std::collections::BTreeMap; -use std::net::IpAddr; -use std::net::Ipv4Addr; -use std::net::SocketAddr; -use std::path::PathBuf; use sui_config::local_ip_utils; use sui_json_rpc_types::ObjectChange; use sui_sdk::wallet_context::WalletContext; use sui_test_transaction_builder::TestTransactionBuilder; -use sui_types::base_types::ObjectRef; -use sui_types::object::Owner; -use sui_types::transaction::{CallArg, ObjectArg}; -use sui_types::SUI_FRAMEWORK_PACKAGE_ID; -use sui_types::{base_types::SuiAddress, crypto::get_key_pair, digests::TransactionDigest}; +use sui_types::{ + base_types::{ObjectRef, SuiAddress}, + crypto::get_key_pair, + digests::TransactionDigest, + object::Owner, + transaction::{CallArg, ObjectArg}, + SUI_FRAMEWORK_PACKAGE_ID, +}; use tokio::task::JoinHandle; +use crate::{ + abi::EthToSuiTokenBridgeV1, + crypto::{BridgeAuthorityKeyPair, BridgeAuthorityPublicKey, BridgeAuthoritySignInfo}, + eth_mock_provider::EthMockProvider, + events::{EmittedSuiToEthTokenBridgeV1, SuiBridgeEvent}, + server::mock_handler::{run_mock_server, BridgeRequestMockHandler}, + sui_transaction_builder::{ + get_bridge_package_id, get_root_bridge_object_arg, get_sui_token_type_tag, + }, + types::{ + BridgeAction, BridgeAuthority, BridgeChainId, BridgeInnerDynamicField, + EthToSuiBridgeAction, SignedBridgeAction, SuiToEthBridgeAction, TokenId, + }, +}; + pub fn get_test_authority_and_key( voting_power: u64, port: u16, @@ -142,8 +147,8 @@ pub fn mock_last_finalized_block(mock_provider: &EthMockProvider, block_number: .unwrap(); } -// Mocks eth_getLogs and eth_getTransactionReceipt for the given address and block range. -// The input log needs to have transaction_hash set. +// Mocks eth_getLogs and eth_getTransactionReceipt for the given address and +// block range. The input log needs to have transaction_hash set. pub fn mock_get_logs( mock_provider: &EthMockProvider, address: EthAddress, @@ -216,9 +221,9 @@ pub fn get_test_log_and_action( ParamType::Bytes, ], ), - hex!("0000000000000000000000000000000000000000000000000000000000000001").into(), // chain id: sui testnet - hex!("0000000000000000000000000000000000000000000000000000000000000010").into(), // nonce: 16 - hex!("000000000000000000000000000000000000000000000000000000000000000b").into(), // chain id: sepolia + hex!("0000000000000000000000000000000000000000000000000000000000000001").into(), /* chain id: sui testnet */ + hex!("0000000000000000000000000000000000000000000000000000000000000010").into(), /* nonce: 16 */ + hex!("000000000000000000000000000000000000000000000000000000000000000b").into(), /* chain id: sepolia */ ], data: encoded.into(), block_hash: Some(TxHash::random()), diff --git a/crates/sui-bridge/src/tools/cli.rs b/crates/sui-bridge/src/tools/cli.rs index 1fcc3c4cd9b..bb7fd006ee6 100644 --- a/crates/sui-bridge/src/tools/cli.rs +++ b/crates/sui-bridge/src/tools/cli.rs @@ -1,20 +1,22 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::path::PathBuf; + use anyhow::anyhow; use clap::*; -use fastcrypto::ed25519::Ed25519KeyPair; -use fastcrypto::secp256k1::Secp256k1KeyPair; -use fastcrypto::traits::EncodeDecodeBase64; -use std::path::PathBuf; -use sui_bridge::config::BridgeNodeConfig; -use sui_bridge::crypto::BridgeAuthorityKeyPair; -use sui_bridge::crypto::BridgeAuthorityPublicKeyBytes; +use fastcrypto::{ + ed25519::Ed25519KeyPair, secp256k1::Secp256k1KeyPair, traits::EncodeDecodeBase64, +}; +use sui_bridge::{ + config::BridgeNodeConfig, + crypto::{BridgeAuthorityKeyPair, BridgeAuthorityPublicKeyBytes}, +}; use sui_config::Config; -use sui_types::base_types::ObjectID; -use sui_types::base_types::SuiAddress; -use sui_types::crypto::get_key_pair; -use sui_types::crypto::SuiKeyPair; +use sui_types::{ + base_types::{ObjectID, SuiAddress}, + crypto::{get_key_pair, SuiKeyPair}, +}; #[derive(Parser)] #[clap(rename_all = "kebab-case")] @@ -66,7 +68,8 @@ async fn main() -> anyhow::Result<()> { Ok(()) } -/// Generate Bridge Authority key (Secp256k1KeyPair) and write to a file as base64 encoded `privkey`. +/// Generate Bridge Authority key (Secp256k1KeyPair) and write to a file as +/// base64 encoded `privkey`. fn generate_bridge_authority_key_and_write_to_file(path: &PathBuf) -> Result<(), anyhow::Error> { let (_, kp): (_, BridgeAuthorityKeyPair) = get_key_pair(); let eth_address = BridgeAuthorityPublicKeyBytes::from(&kp.public).to_eth_address(); @@ -84,7 +87,8 @@ fn generate_bridge_authority_key_and_write_to_file(path: &PathBuf) -> Result<(), .map_err(|err| anyhow!("Failed to write encoded key to path: {:?}", err)) } -/// Generate Bridge Client key (Secp256k1KeyPair or Ed25519KeyPair) and write to a file as base64 encoded `flag || privkey`. +/// Generate Bridge Client key (Secp256k1KeyPair or Ed25519KeyPair) and write to +/// a file as base64 encoded `flag || privkey`. fn generate_bridge_client_key_and_write_to_file( path: &PathBuf, use_ecdsa: bool, diff --git a/crates/sui-bridge/src/types.rs b/crates/sui-bridge/src/types.rs index 8c6cbd3ec73..cdc182d557d 100644 --- a/crates/sui-bridge/src/types.rs +++ b/crates/sui-bridge/src/types.rs @@ -1,33 +1,34 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::abi::EthToSuiTokenBridgeV1; -use crate::crypto::BridgeAuthorityPublicKeyBytes; -use crate::crypto::{ - BridgeAuthorityPublicKey, BridgeAuthorityRecoverableSignature, BridgeAuthoritySignInfo, -}; -use crate::error::{BridgeError, BridgeResult}; -use crate::events::EmittedSuiToEthTokenBridgeV1; -use ethers::types::Address as EthAddress; -use ethers::types::Log; -use ethers::types::H256; +use std::collections::{BTreeMap, BTreeSet}; + pub use ethers::types::H256 as EthTransactionHash; +use ethers::types::{Address as EthAddress, Log, H256}; use fastcrypto::hash::{HashFunction, Keccak256}; use num_enum::TryFromPrimitive; -use rand::seq::SliceRandom; -use rand::Rng; +use rand::{seq::SliceRandom, Rng}; use serde::{Deserialize, Serialize}; use shared_crypto::intent::IntentScope; -use std::collections::{BTreeMap, BTreeSet}; -use sui_types::base_types::SuiAddress; -use sui_types::collection_types::{Bag, LinkedTable, LinkedTableNode, VecMap}; -use sui_types::committee::CommitteeTrait; -use sui_types::committee::StakeUnit; -use sui_types::digests::{Digest, TransactionDigest}; -use sui_types::dynamic_field::Field; -use sui_types::error::SuiResult; -use sui_types::message_envelope::{Envelope, Message, VerifiedEnvelope}; -use sui_types::{base_types::SUI_ADDRESS_LENGTH, committee::EpochId}; +use sui_types::{ + base_types::{SuiAddress, SUI_ADDRESS_LENGTH}, + collection_types::{Bag, LinkedTable, LinkedTableNode, VecMap}, + committee::{CommitteeTrait, EpochId, StakeUnit}, + digests::{Digest, TransactionDigest}, + dynamic_field::Field, + error::SuiResult, + message_envelope::{Envelope, Message, VerifiedEnvelope}, +}; + +use crate::{ + abi::EthToSuiTokenBridgeV1, + crypto::{ + BridgeAuthorityPublicKey, BridgeAuthorityPublicKeyBytes, + BridgeAuthorityRecoverableSignature, BridgeAuthoritySignInfo, + }, + error::{BridgeError, BridgeResult}, + events::EmittedSuiToEthTokenBridgeV1, +}; pub const BRIDGE_AUTHORITY_TOTAL_VOTING_POWER: u64 = 10000; @@ -382,8 +383,8 @@ impl EmergencyAction { pub struct LimitUpdateAction { pub nonce: u64, // The chain id that will receive this signed action. It's also the destination chain id - // for the limit update. For example, if chain_id is EthMainnet and sending_chain_id is SuiMainnet, - // it means we want to update the limit for the SuiMainnet to EthMainnet route. + // for the limit update. For example, if chain_id is EthMainnet and sending_chain_id is + // SuiMainnet, it means we want to update the limit for the SuiMainnet to EthMainnet route. pub chain_id: BridgeChainId, // The sending chain id for the limit update. pub sending_chain_id: BridgeChainId, @@ -704,21 +705,25 @@ pub struct MoveTypeBridgeRecord { #[cfg(test)] mod tests { - use crate::{test_utils::get_test_authority_and_key, types::TokenId}; - use ethers::abi::ParamType; - use ethers::types::{Address as EthAddress, TxHash}; - use fastcrypto::encoding::Hex; - use fastcrypto::hash::HashFunction; - use fastcrypto::traits::ToFromBytes; - use fastcrypto::{encoding::Encoding, traits::KeyPair}; - use prometheus::Registry; use std::{collections::HashSet, str::FromStr}; + + use ethers::{ + abi::ParamType, + types::{Address as EthAddress, TxHash}, + }; + use fastcrypto::{ + encoding::{Encoding, Hex}, + hash::HashFunction, + traits::{KeyPair, ToFromBytes}, + }; + use prometheus::Registry; use sui_types::{ base_types::{SuiAddress, TransactionDigest}, crypto::get_key_pair, }; use super::*; + use crate::{test_utils::get_test_authority_and_key, types::TokenId}; #[test] fn test_bridge_message_encoding() -> anyhow::Result<()> { @@ -794,8 +799,8 @@ mod tests { } #[test] - fn test_bridge_message_encoding_regression_emitted_sui_to_eth_token_bridge_v1( - ) -> anyhow::Result<()> { + fn test_bridge_message_encoding_regression_emitted_sui_to_eth_token_bridge_v1() + -> anyhow::Result<()> { telemetry_subscribers::init_for_testing(); let registry = Registry::new(); mysten_metrics::init_metrics(®istry); @@ -861,18 +866,16 @@ mod tests { blocklisted_members: vec![pub_key_bytes.clone()], }); let bytes = blocklist_action.to_bytes(); - /* - 5355495f4252494447455f4d455353414745: prefix - 01: msg type - 01: msg version - 0000000000000081: nonce - 03: chain id - 00: blocklist type - 01: length of updated members - [ - 68b43fd906c0b8f024a18c56e06744f7c6157c65 - ]: blocklisted members abi-encoded - */ + // 5355495f4252494447455f4d455353414745: prefix + // 01: msg type + // 01: msg version + // 0000000000000081: nonce + // 03: chain id + // 00: blocklist type + // 01: length of updated members + // [ + // 68b43fd906c0b8f024a18c56e06744f7c6157c65 + // ]: blocklisted members abi-encoded assert_eq!(bytes, Hex::decode("5355495f4252494447455f4d4553534147450101000000000000008103000168b43fd906c0b8f024a18c56e06744f7c6157c65").unwrap()); let pub_key_bytes_2 = BridgeAuthorityPublicKeyBytes::from_bytes( @@ -888,19 +891,17 @@ mod tests { blocklisted_members: vec![pub_key_bytes.clone(), pub_key_bytes_2.clone()], }); let bytes = blocklist_action.to_bytes(); - /* - 5355495f4252494447455f4d455353414745: prefix - 01: msg type - 01: msg version - 0000000000000044: nonce - 02: chain id - 01: blocklist type - 02: length of updated members - [ - 68b43fd906c0b8f024a18c56e06744f7c6157c65 - acaef39832cb995c4e049437a3e2ec6a7bad1ab5 - ]: blocklisted members abi-encoded - */ + // 5355495f4252494447455f4d455353414745: prefix + // 01: msg type + // 01: msg version + // 0000000000000044: nonce + // 02: chain id + // 01: blocklist type + // 02: length of updated members + // [ + // 68b43fd906c0b8f024a18c56e06744f7c6157c65 + // acaef39832cb995c4e049437a3e2ec6a7bad1ab5 + // ]: blocklisted members abi-encoded assert_eq!(bytes, Hex::decode("5355495f4252494447455f4d4553534147450101000000000000004402010268b43fd906c0b8f024a18c56e06744f7c6157c65acaef39832cb995c4e049437a3e2ec6a7bad1ab5").unwrap()); let blocklist_action = BridgeAction::BlocklistCommitteeAction(BlocklistCommitteeAction { @@ -910,18 +911,16 @@ mod tests { blocklisted_members: vec![pub_key_bytes.clone()], }); let bytes = blocklist_action.to_bytes(); - /* - 5355495f4252494447455f4d455353414745: prefix - 01: msg type - 01: msg version - 0000000000000031: nonce - 0c: chain id - 00: blocklist type - 01: length of updated members - [ - 68b43fd906c0b8f024a18c56e06744f7c6157c65 - ]: blocklisted members abi-encoded - */ + // 5355495f4252494447455f4d455353414745: prefix + // 01: msg type + // 01: msg version + // 0000000000000031: nonce + // 0c: chain id + // 00: blocklist type + // 01: length of updated members + // [ + // 68b43fd906c0b8f024a18c56e06744f7c6157c65 + // ]: blocklisted members abi-encoded assert_eq!(bytes, Hex::decode("5355495f4252494447455f4d455353414745010100000000000000310c000168b43fd906c0b8f024a18c56e06744f7c6157c65").unwrap()); let blocklist_action = BridgeAction::BlocklistCommitteeAction(BlocklistCommitteeAction { @@ -931,19 +930,17 @@ mod tests { blocklisted_members: vec![pub_key_bytes.clone(), pub_key_bytes_2.clone()], }); let bytes = blocklist_action.to_bytes(); - /* - 5355495f4252494447455f4d455353414745: prefix - 01: msg type - 01: msg version - 000000000000005e: nonce - 0b: chain id - 01: blocklist type - 02: length of updated members - [ - 00000000000000000000000068b43fd906c0b8f024a18c56e06744f7c6157c65 - 000000000000000000000000acaef39832cb995c4e049437a3e2ec6a7bad1ab5 - ]: blocklisted members abi-encoded - */ + // 5355495f4252494447455f4d455353414745: prefix + // 01: msg type + // 01: msg version + // 000000000000005e: nonce + // 0b: chain id + // 01: blocklist type + // 02: length of updated members + // [ + // 00000000000000000000000068b43fd906c0b8f024a18c56e06744f7c6157c65 + // 000000000000000000000000acaef39832cb995c4e049437a3e2ec6a7bad1ab5 + // ]: blocklisted members abi-encoded assert_eq!(bytes, Hex::decode("5355495f4252494447455f4d4553534147450101000000000000005e0b010268b43fd906c0b8f024a18c56e06744f7c6157c65acaef39832cb995c4e049437a3e2ec6a7bad1ab5").unwrap()); } @@ -955,14 +952,12 @@ mod tests { action_type: EmergencyActionType::Pause, }); let bytes = action.to_bytes(); - /* - 5355495f4252494447455f4d455353414745: prefix - 02: msg type - 01: msg version - 0000000000000037: nonce - 03: chain id - 00: action type - */ + // 5355495f4252494447455f4d455353414745: prefix + // 02: msg type + // 01: msg version + // 0000000000000037: nonce + // 03: chain id + // 00: action type assert_eq!( bytes, Hex::decode("5355495f4252494447455f4d455353414745020100000000000000370300").unwrap() @@ -974,14 +969,12 @@ mod tests { action_type: EmergencyActionType::Unpause, }); let bytes = action.to_bytes(); - /* - 5355495f4252494447455f4d455353414745: prefix - 02: msg type - 01: msg version - 0000000000000038: nonce - 0b: chain id - 01: action type - */ + // 5355495f4252494447455f4d455353414745: prefix + // 02: msg type + // 01: msg version + // 0000000000000038: nonce + // 0b: chain id + // 01: action type assert_eq!( bytes, Hex::decode("5355495f4252494447455f4d455353414745020100000000000000380b01").unwrap() @@ -997,15 +990,13 @@ mod tests { new_usd_limit: 1_000_000 * USD_MULTIPLIER, // $1M USD }); let bytes = action.to_bytes(); - /* - 5355495f4252494447455f4d455353414745: prefix - 03: msg type - 01: msg version - 000000000000000f: nonce - 03: chain id - 0c: sending chain id - 00000002540be400: new usd limit - */ + // 5355495f4252494447455f4d455353414745: prefix + // 03: msg type + // 01: msg version + // 000000000000000f: nonce + // 03: chain id + // 0c: sending chain id + // 00000002540be400: new usd limit assert_eq!( bytes, Hex::decode( @@ -1024,15 +1015,13 @@ mod tests { new_usd_price: 100_000 * USD_MULTIPLIER, // $100k USD }); let bytes = action.to_bytes(); - /* - 5355495f4252494447455f4d455353414745: prefix - 04: msg type - 01: msg version - 000000000000010a: nonce - 03: chain id - 01: token id - 000000003b9aca00: new usd price - */ + // 5355495f4252494447455f4d455353414745: prefix + // 04: msg type + // 01: msg version + // 000000000000010a: nonce + // 03: chain id + // 01: token id + // 000000003b9aca00: new usd price assert_eq!( bytes, Hex::decode( @@ -1044,7 +1033,8 @@ mod tests { #[test] fn test_evm_contract_upgrade_action() { - // Calldata with only the function selector and no parameters: `function initializeV2()` + // Calldata with only the function selector and no parameters: `function + // initializeV2()` let function_signature = "initializeV2()"; let selector = &Keccak256::digest(function_signature).digest[0..4]; let call_data = selector.to_vec(); @@ -1057,20 +1047,23 @@ mod tests { new_impl_address: EthAddress::repeat_byte(9), call_data, }); - /* - 5355495f4252494447455f4d455353414745: prefix - 05: msg type - 01: msg version - 000000000000007b: nonce - 0c: chain id - 0000000000000000000000000606060606060606060606060606060606060606: proxy address - 0000000000000000000000000909090909090909090909090909090909090909: new impl address - - 0000000000000000000000000000000000000000000000000000000000000060 - 0000000000000000000000000000000000000000000000000000000000000004 - 5cd8a76b00000000000000000000000000000000000000000000000000000000: call data - */ - assert_eq!(Hex::encode(action.to_bytes().clone()), "5355495f4252494447455f4d4553534147450501000000000000007b0c00000000000000000000000006060606060606060606060606060606060606060000000000000000000000000909090909090909090909090909090909090909000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000045cd8a76b00000000000000000000000000000000000000000000000000000000"); + // 5355495f4252494447455f4d455353414745: prefix + // 05: msg type + // 01: msg version + // 000000000000007b: nonce + // 0c: chain id + // 0000000000000000000000000606060606060606060606060606060606060606: proxy + // address + // 0000000000000000000000000909090909090909090909090909090909090909: new impl + // address + // + // 0000000000000000000000000000000000000000000000000000000000000060 + // 0000000000000000000000000000000000000000000000000000000000000004 + // 5cd8a76b00000000000000000000000000000000000000000000000000000000: call data + assert_eq!( + Hex::encode(action.to_bytes().clone()), + "5355495f4252494447455f4d4553534147450501000000000000007b0c00000000000000000000000006060606060606060606060606060606060606060000000000000000000000000909090909090909090909090909090909090909000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000045cd8a76b00000000000000000000000000000000000000000000000000000000" + ); // Calldata with one parameter: `function newMockFunction(bool)` let function_signature = "newMockFunction(bool)"; @@ -1088,21 +1081,24 @@ mod tests { new_impl_address: EthAddress::repeat_byte(9), call_data, }); - /* - 5355495f4252494447455f4d455353414745: prefix - 05: msg type - 01: msg version - 000000000000007b: nonce - 0c: chain id - 0000000000000000000000000606060606060606060606060606060606060606: proxy address - 0000000000000000000000000909090909090909090909090909090909090909: new impl address - - 0000000000000000000000000000000000000000000000000000000000000060 - 0000000000000000000000000000000000000000000000000000000000000024 - 417795ef00000000000000000000000000000000000000000000000000000000 - 0000000100000000000000000000000000000000000000000000000000000000: call data - */ - assert_eq!(Hex::encode(action.to_bytes().clone()), "5355495f4252494447455f4d4553534147450501000000000000007b0c0000000000000000000000000606060606060606060606060606060606060606000000000000000000000000090909090909090909090909090909090909090900000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000024417795ef000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000"); + // 5355495f4252494447455f4d455353414745: prefix + // 05: msg type + // 01: msg version + // 000000000000007b: nonce + // 0c: chain id + // 0000000000000000000000000606060606060606060606060606060606060606: proxy + // address + // 0000000000000000000000000909090909090909090909090909090909090909: new impl + // address + // + // 0000000000000000000000000000000000000000000000000000000000000060 + // 0000000000000000000000000000000000000000000000000000000000000024 + // 417795ef00000000000000000000000000000000000000000000000000000000 + // 0000000100000000000000000000000000000000000000000000000000000000: call data + assert_eq!( + Hex::encode(action.to_bytes().clone()), + "5355495f4252494447455f4d4553534147450501000000000000007b0c0000000000000000000000000606060606060606060606060606060606060606000000000000000000000000090909090909090909090909090909090909090900000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000024417795ef000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000" + ); // Calldata with two parameters: `function newerMockFunction(bool, uint8)` let function_signature = "newMockFunction(bool,uint8)"; @@ -1123,22 +1119,25 @@ mod tests { new_impl_address: EthAddress::repeat_byte(9), call_data, }); - /* - 5355495f4252494447455f4d455353414745: prefix - 05: msg type - 01: msg version - 000000000000007b: nonce - 0c: chain id - 0000000000000000000000000606060606060606060606060606060606060606: proxy address - 0000000000000000000000000909090909090909090909090909090909090909: new impl address - - 0000000000000000000000000000000000000000000000000000000000000060 - 0000000000000000000000000000000000000000000000000000000000000044 - be8fc25d00000000000000000000000000000000000000000000000000000000 - 0000000100000000000000000000000000000000000000000000000000000000 - 0000002a00000000000000000000000000000000000000000000000000000000: call data - */ - assert_eq!(Hex::encode(action.to_bytes().clone()), "5355495f4252494447455f4d4553534147450501000000000000007b0c0000000000000000000000000606060606060606060606060606060606060606000000000000000000000000090909090909090909090909090909090909090900000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000044be8fc25d0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002a00000000000000000000000000000000000000000000000000000000"); + // 5355495f4252494447455f4d455353414745: prefix + // 05: msg type + // 01: msg version + // 000000000000007b: nonce + // 0c: chain id + // 0000000000000000000000000606060606060606060606060606060606060606: proxy + // address + // 0000000000000000000000000909090909090909090909090909090909090909: new impl + // address + // + // 0000000000000000000000000000000000000000000000000000000000000060 + // 0000000000000000000000000000000000000000000000000000000000000044 + // be8fc25d00000000000000000000000000000000000000000000000000000000 + // 0000000100000000000000000000000000000000000000000000000000000000 + // 0000002a00000000000000000000000000000000000000000000000000000000: call data + assert_eq!( + Hex::encode(action.to_bytes().clone()), + "5355495f4252494447455f4d4553534147450501000000000000007b0c0000000000000000000000000606060606060606060606060606060606060606000000000000000000000000090909090909090909090909090909090909090900000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000044be8fc25d0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002a00000000000000000000000000000000000000000000000000000000" + ); // Empty calldate let action = BridgeAction::EvmContractUpgradeAction(EvmContractUpgradeAction { @@ -1148,20 +1147,23 @@ mod tests { new_impl_address: EthAddress::repeat_byte(9), call_data: vec![], }); - /* - 5355495f4252494447455f4d455353414745: prefix - 05: msg type - 01: msg version - 000000000000007b: nonce - 0c: chain id - 0000000000000000000000000606060606060606060606060606060606060606: proxy address - 0000000000000000000000000909090909090909090909090909090909090909: new impl address - - 0000000000000000000000000000000000000000000000000000000000000060 - 0000000000000000000000000000000000000000000000000000000000000000: call data - */ + // 5355495f4252494447455f4d455353414745: prefix + // 05: msg type + // 01: msg version + // 000000000000007b: nonce + // 0c: chain id + // 0000000000000000000000000606060606060606060606060606060606060606: proxy + // address + // 0000000000000000000000000909090909090909090909090909090909090909: new impl + // address + // + // 0000000000000000000000000000000000000000000000000000000000000060 + // 0000000000000000000000000000000000000000000000000000000000000000: call data let data = action.to_bytes(); - assert_eq!(Hex::encode(data.clone()), "5355495f4252494447455f4d4553534147450501000000000000007b0c0000000000000000000000000606060606060606060606060606060606060606000000000000000000000000090909090909090909090909090909090909090900000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000"); + assert_eq!( + Hex::encode(data.clone()), + "5355495f4252494447455f4d4553534147450501000000000000007b0c0000000000000000000000000606060606060606060606060606060606060606000000000000000000000000090909090909090909090909090909090909090900000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000" + ); let types = vec![ParamType::Address, ParamType::Address, ParamType::Bytes]; // Ensure that the call data (start from bytes 29) can be decoded ethers::abi::decode(&types, &data[29..]).unwrap(); diff --git a/crates/sui-cluster-test/src/cluster.rs b/crates/sui-cluster-test/src/cluster.rs index 45cae6952f8..c0dc44e5b41 100644 --- a/crates/sui-cluster-test/src/cluster.rs +++ b/crates/sui-cluster-test/src/cluster.rs @@ -1,28 +1,30 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::config::{ClusterTestOpt, Env}; +use std::{net::SocketAddr, path::Path}; + use async_trait::async_trait; -use std::net::SocketAddr; -use std::path::Path; -use sui_config::Config; -use sui_config::{PersistedConfig, SUI_KEYSTORE_FILENAME, SUI_NETWORK_CONFIG}; -use sui_graphql_rpc::config::ConnectionConfig; -use sui_graphql_rpc::test_infra::cluster::start_graphql_server_with_fn_rpc; +use sui_config::{Config, PersistedConfig, SUI_KEYSTORE_FILENAME, SUI_NETWORK_CONFIG}; +use sui_graphql_rpc::{ + config::ConnectionConfig, test_infra::cluster::start_graphql_server_with_fn_rpc, +}; use sui_indexer::test_utils::{start_test_indexer, ReaderWriterConfig}; use sui_keys::keystore::{AccountKeystore, FileBasedKeystore, Keystore}; -use sui_sdk::sui_client_config::{SuiClientConfig, SuiEnv}; -use sui_sdk::wallet_context::WalletContext; +use sui_sdk::{ + sui_client_config::{SuiClientConfig, SuiEnv}, + wallet_context::WalletContext, +}; use sui_swarm::memory::Swarm; -use sui_swarm_config::genesis_config::GenesisConfig; -use sui_swarm_config::network_config::NetworkConfig; -use sui_types::base_types::SuiAddress; -use sui_types::crypto::KeypairTraits; -use sui_types::crypto::SuiKeyPair; -use sui_types::crypto::{get_key_pair, AccountKeyPair}; +use sui_swarm_config::{genesis_config::GenesisConfig, network_config::NetworkConfig}; +use sui_types::{ + base_types::SuiAddress, + crypto::{get_key_pair, AccountKeyPair, KeypairTraits, SuiKeyPair}, +}; use test_cluster::{TestCluster, TestClusterBuilder}; use tracing::info; +use super::config::{ClusterTestOpt, Env}; + const DEVNET_FAUCET_ADDR: &str = "https://faucet.devnet.sui.io:443"; const STAGING_FAUCET_ADDR: &str = "https://faucet.staging.sui.io:443"; const CONTINUOUS_FAUCET_ADDR: &str = "https://faucet.ci.sui.io:443"; @@ -253,7 +255,8 @@ impl Cluster for LocalNewCluster { start_graphql_server_with_fn_rpc( graphql_connection_config.clone(), Some(fullnode_url.clone()), - /* cancellation_token */ None, + // cancellation_token + None, ) .await; } diff --git a/crates/sui-cluster-test/src/config.rs b/crates/sui-cluster-test/src/config.rs index a40c72ff2af..b846a367b77 100644 --- a/crates/sui-cluster-test/src/config.rs +++ b/crates/sui-cluster-test/src/config.rs @@ -1,9 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{fmt, path::PathBuf}; + use clap::*; use regex::Regex; -use std::{fmt, path::PathBuf}; #[derive(Parser, Clone, ValueEnum, Debug)] pub enum Env { diff --git a/crates/sui-cluster-test/src/faucet.rs b/crates/sui-cluster-test/src/faucet.rs index 5025aaef9c7..c9a06b32234 100644 --- a/crates/sui-cluster-test/src/faucet.rs +++ b/crates/sui-cluster-test/src/faucet.rs @@ -1,20 +1,19 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::cluster::{new_wallet_context_from_cluster, Cluster}; +use std::{collections::HashMap, env, sync::Arc}; + use async_trait::async_trait; use fastcrypto::encoding::{Encoding, Hex}; -use std::collections::HashMap; -use std::env; -use std::sync::Arc; use sui_faucet::{ BatchFaucetResponse, BatchStatusFaucetResponse, Faucet, FaucetConfig, FaucetResponse, SimpleFaucet, }; -use sui_types::base_types::SuiAddress; -use sui_types::crypto::KeypairTraits; +use sui_types::{base_types::SuiAddress, crypto::KeypairTraits}; use tracing::{debug, info, info_span, Instrument}; use uuid::Uuid; +use super::cluster::{new_wallet_context_from_cluster, Cluster}; + pub struct FaucetClientFactory; impl FaucetClientFactory { diff --git a/crates/sui-cluster-test/src/helper.rs b/crates/sui-cluster-test/src/helper.rs index c462099dd1f..087e7992759 100644 --- a/crates/sui-cluster-test/src/helper.rs +++ b/crates/sui-cluster-test/src/helper.rs @@ -5,9 +5,10 @@ use anyhow::bail; use move_core_types::language_storage::TypeTag; use sui_json_rpc_types::{BalanceChange, SuiData, SuiObjectData, SuiObjectDataOptions}; use sui_sdk::SuiClient; -use sui_types::error::SuiObjectResponseError; -use sui_types::gas_coin::GasCoin; -use sui_types::{base_types::ObjectID, object::Owner, parse_sui_type_tag}; +use sui_types::{ + base_types::ObjectID, error::SuiObjectResponseError, gas_coin::GasCoin, object::Owner, + parse_sui_type_tag, +}; use tracing::{debug, trace}; /// A util struct that helps verify Sui Object. diff --git a/crates/sui-cluster-test/src/lib.rs b/crates/sui-cluster-test/src/lib.rs index 6c299f0d7b8..6180a6b7f44 100644 --- a/crates/sui-cluster-test/src/lib.rs +++ b/crates/sui-cluster-test/src/lib.rs @@ -1,30 +1,29 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::faucet::{FaucetClient, FaucetClientFactory}; +use std::sync::Arc; + use async_trait::async_trait; use cluster::{Cluster, ClusterFactory}; use config::ClusterTestOpt; use futures::{stream::FuturesUnordered, StreamExt}; use helper::ObjectChecker; -use jsonrpsee::core::params::ArrayParams; -use jsonrpsee::{core::client::ClientT, http_client::HttpClientBuilder}; -use std::sync::Arc; +use jsonrpsee::{ + core::{client::ClientT, params::ArrayParams}, + http_client::HttpClientBuilder, +}; use sui_faucet::CoinInfo; use sui_json_rpc_types::{ SuiExecutionStatus, SuiTransactionBlockEffectsAPI, SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, TransactionBlockBytes, }; -use sui_sdk::wallet_context::WalletContext; +use sui_sdk::{wallet_context::WalletContext, SuiClient}; use sui_test_transaction_builder::batch_make_transfer_transactions; -use sui_types::base_types::TransactionDigest; -use sui_types::object::Owner; -use sui_types::quorum_driver_types::ExecuteTransactionRequestType; -use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary; - -use sui_sdk::SuiClient; -use sui_types::gas_coin::GasCoin; use sui_types::{ - base_types::SuiAddress, + base_types::{SuiAddress, TransactionDigest}, + gas_coin::GasCoin, + object::Owner, + quorum_driver_types::ExecuteTransactionRequestType, + sui_system_state::sui_system_state_summary::SuiSystemStateSummary, transaction::{Transaction, TransactionData}, }; use test_case::{ @@ -37,6 +36,8 @@ use tokio::time::{self, Duration}; use tracing::{error, info}; use wallet_client::WalletClient; +use crate::faucet::{FaucetClient, FaucetClientFactory}; + pub mod cluster; pub mod config; pub mod faucet; diff --git a/crates/sui-cluster-test/src/test_case/coin_index_test.rs b/crates/sui-cluster-test/src/test_case/coin_index_test.rs index e3977af2602..a95f60ea375 100644 --- a/crates/sui-cluster-test/src/test_case/coin_index_test.rs +++ b/crates/sui-cluster-test/src/test_case/coin_index_test.rs @@ -1,24 +1,28 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{TestCaseImpl, TestContext}; +use std::collections::HashMap; + use async_trait::async_trait; use jsonrpsee::rpc_params; use move_core_types::language_storage::StructTag; use serde_json::json; -use std::collections::HashMap; use sui_core::test_utils::compile_managed_coin_package; use sui_json::SuiJsonValue; -use sui_json_rpc_types::ObjectChange; -use sui_json_rpc_types::SuiTransactionBlockResponse; -use sui_json_rpc_types::{Balance, SuiTransactionBlockResponseOptions}; +use sui_json_rpc_types::{ + Balance, ObjectChange, SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, +}; use sui_test_transaction_builder::make_staking_transaction; -use sui_types::base_types::{ObjectID, ObjectRef}; -use sui_types::gas_coin::GAS; -use sui_types::object::Owner; -use sui_types::quorum_driver_types::ExecuteTransactionRequestType; +use sui_types::{ + base_types::{ObjectID, ObjectRef}, + gas_coin::GAS, + object::Owner, + quorum_driver_types::ExecuteTransactionRequestType, +}; use tracing::info; +use crate::{TestCaseImpl, TestContext}; + pub struct CoinIndexTest; #[async_trait] @@ -613,10 +617,12 @@ impl TestCaseImpl for CoinIndexTest { managed_coins_12_39.data.last().unwrap().coin_object_id, last_managed_coin ); - assert!(!managed_coins_12_39 - .data - .iter() - .any(|coin| coin.coin_object_id == removed_coin_id)); + assert!( + !managed_coins_12_39 + .data + .iter() + .any(|coin| coin.coin_object_id == removed_coin_id) + ); assert!(!managed_coins_12_39.has_next_page); // =========================== Test Get Coins Ends =========================== diff --git a/crates/sui-cluster-test/src/test_case/coin_merge_split_test.rs b/crates/sui-cluster-test/src/test_case/coin_merge_split_test.rs index a43d905b5d4..1dee0e9a787 100644 --- a/crates/sui-cluster-test/src/test_case/coin_merge_split_test.rs +++ b/crates/sui-cluster-test/src/test_case/coin_merge_split_test.rs @@ -1,15 +1,18 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{helper::ObjectChecker, TestCaseImpl, TestContext}; use async_trait::async_trait; use jsonrpsee::rpc_params; use sui_json_rpc_types::{SuiTransactionBlockEffectsAPI, SuiTransactionBlockResponse}; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::object::Owner; -use sui_types::sui_serde::BigInt; +use sui_types::{ + base_types::{ObjectID, SuiAddress}, + object::Owner, + sui_serde::BigInt, +}; use tracing::{debug, info}; +use crate::{helper::ObjectChecker, TestCaseImpl, TestContext}; + pub struct CoinMergeSplitTest; #[async_trait] @@ -61,7 +64,8 @@ impl TestCaseImpl for CoinMergeSplitTest { info!("Testing coin merge."); let mut coins_merged = Vec::new(); let mut txes = Vec::new(); - // We on purpose linearize the merge operations, otherwise the primary coin may be locked + // We on purpose linearize the merge operations, otherwise the primary coin may + // be locked for new_coin in new_coins { let coin_to_merge = new_coin.reference.object_id; debug!( diff --git a/crates/sui-cluster-test/src/test_case/fullnode_build_publish_transaction_test.rs b/crates/sui-cluster-test/src/test_case/fullnode_build_publish_transaction_test.rs index a53c685c46c..bf37dacac4b 100644 --- a/crates/sui-cluster-test/src/test_case/fullnode_build_publish_transaction_test.rs +++ b/crates/sui-cluster-test/src/test_case/fullnode_build_publish_transaction_test.rs @@ -1,13 +1,14 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{TestCaseImpl, TestContext}; use async_trait::async_trait; use jsonrpsee::rpc_params; use sui_core::test_utils::compile_basics_package; use sui_json_rpc_types::SuiTransactionBlockEffectsAPI; use sui_types::{base_types::ObjectID, object::Owner}; +use crate::{TestCaseImpl, TestContext}; + pub struct FullNodeBuildPublishTransactionTest; #[async_trait] diff --git a/crates/sui-cluster-test/src/test_case/fullnode_execute_transaction_test.rs b/crates/sui-cluster-test/src/test_case/fullnode_execute_transaction_test.rs index a12f836348e..71ae0698d1e 100644 --- a/crates/sui-cluster-test/src/test_case/fullnode_execute_transaction_test.rs +++ b/crates/sui-cluster-test/src/test_case/fullnode_execute_transaction_test.rs @@ -1,7 +1,6 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{TestCaseImpl, TestContext}; use async_trait::async_trait; use sui_json_rpc_types::{ SuiExecutionStatus, SuiTransactionBlockEffectsAPI, SuiTransactionBlockResponseOptions, @@ -12,6 +11,8 @@ use sui_types::{ }; use tracing::info; +use crate::{TestCaseImpl, TestContext}; + pub struct FullNodeExecuteTransactionTest; impl FullNodeExecuteTransactionTest { diff --git a/crates/sui-cluster-test/src/test_case/native_transfer_test.rs b/crates/sui-cluster-test/src/test_case/native_transfer_test.rs index 0641863d45a..6ce1f865f74 100644 --- a/crates/sui-cluster-test/src/test_case/native_transfer_test.rs +++ b/crates/sui-cluster-test/src/test_case/native_transfer_test.rs @@ -3,14 +3,13 @@ use async_trait::async_trait; use jsonrpsee::rpc_params; -use tracing::info; - use sui_json_rpc_types::SuiTransactionBlockResponse; use sui_types::{ base_types::{ObjectID, SuiAddress}, crypto::{get_key_pair, AccountKeyPair}, object::Owner, }; +use tracing::info; use crate::{ helper::{BalanceChangeChecker, ObjectChecker}, @@ -81,15 +80,16 @@ impl NativeTransferTest { obj_to_transfer_id: ObjectID, ) { let balance_changes = &mut response.balance_changes.as_mut().unwrap(); - // for transfer we only expect 2 balance changes, one for sender and one for recipient. + // for transfer we only expect 2 balance changes, one for sender and one for + // recipient. assert_eq!( balance_changes.len(), 2, "Expect 2 balance changes emitted, but got {}", balance_changes.len() ); - // Order of balance change is not fixed so need to check who's balance come first. - // this make sure recipient always come first + // Order of balance change is not fixed so need to check who's balance come + // first. this make sure recipient always come first if balance_changes[0].owner.get_owner_address().unwrap() == signer { balance_changes.reverse() } diff --git a/crates/sui-cluster-test/src/test_case/shared_object_test.rs b/crates/sui-cluster-test/src/test_case/shared_object_test.rs index 02bab303b79..d7beabdbb71 100644 --- a/crates/sui-cluster-test/src/test_case/shared_object_test.rs +++ b/crates/sui-cluster-test/src/test_case/shared_object_test.rs @@ -1,7 +1,6 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{helper::ObjectChecker, TestCaseImpl, TestContext}; use async_trait::async_trait; use sui_json_rpc_types::{SuiExecutionStatus, SuiTransactionBlockEffectsAPI}; use sui_sdk::wallet_context::WalletContext; @@ -9,6 +8,8 @@ use sui_test_transaction_builder::{increment_counter, publish_basics_package_and use sui_types::object::Owner; use tracing::info; +use crate::{helper::ObjectChecker, TestCaseImpl, TestContext}; + pub struct SharedCounterTest; #[async_trait] diff --git a/crates/sui-cluster-test/src/wallet_client.rs b/crates/sui-cluster-test/src/wallet_client.rs index 5ef59f2e5b6..b15ea3717a9 100644 --- a/crates/sui-cluster-test/src/wallet_client.rs +++ b/crates/sui-cluster-test/src/wallet_client.rs @@ -1,18 +1,19 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::cluster::new_wallet_context_from_cluster; - -use super::Cluster; use shared_crypto::intent::Intent; use sui_keys::keystore::AccountKeystore; -use sui_sdk::wallet_context::WalletContext; -use sui_sdk::{SuiClient, SuiClientBuilder}; -use sui_types::base_types::SuiAddress; -use sui_types::crypto::{KeypairTraits, Signature}; -use sui_types::transaction::TransactionData; +use sui_sdk::{wallet_context::WalletContext, SuiClient, SuiClientBuilder}; +use sui_types::{ + base_types::SuiAddress, + crypto::{KeypairTraits, Signature}, + transaction::TransactionData, +}; use tracing::{info, info_span, Instrument}; +use super::Cluster; +use crate::cluster::new_wallet_context_from_cluster; + pub struct WalletClient { wallet_context: WalletContext, address: SuiAddress, diff --git a/crates/sui-cluster-test/tests/local_cluster_test.rs b/crates/sui-cluster-test/tests/local_cluster_test.rs index a017857904a..1cb15f2e499 100644 --- a/crates/sui-cluster-test/tests/local_cluster_test.rs +++ b/crates/sui-cluster-test/tests/local_cluster_test.rs @@ -14,9 +14,10 @@ async fn cluster_test() { #[tokio::test] async fn test_sui_cluster() { use reqwest::StatusCode; - use sui_cluster_test::cluster::Cluster; - use sui_cluster_test::cluster::LocalNewCluster; - use sui_cluster_test::config::Env; + use sui_cluster_test::{ + cluster::{Cluster, LocalNewCluster}, + config::Env, + }; use sui_graphql_rpc::client::simple_client::SimpleClient; use tokio::time::sleep; let fullnode_rpc_port: u16 = 9020; diff --git a/crates/sui-common/src/authority_aggregation.rs b/crates/sui-common/src/authority_aggregation.rs index a267c7cd792..ebcb8124c9e 100644 --- a/crates/sui-common/src/authority_aggregation.rs +++ b/crates/sui-common/src/authority_aggregation.rs @@ -1,18 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use futures::Future; -use futures::{future::BoxFuture, stream::FuturesUnordered, StreamExt}; -use mysten_metrics::monitored_future; -use tracing::instrument::Instrument; - -use std::collections::{BTreeMap, BTreeSet}; -use std::sync::Arc; -use std::time::Duration; -use sui_types::base_types::ConciseableName; -use sui_types::committee::{CommitteeTrait, StakeUnit}; +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, + time::Duration, +}; +use futures::{future::BoxFuture, stream::FuturesUnordered, Future, StreamExt}; +use mysten_metrics::monitored_future; +use sui_types::{ + base_types::ConciseableName, + committee::{CommitteeTrait, StakeUnit}, +}; use tokio::time::timeout; +use tracing::instrument::Instrument; pub type AsyncResult<'a, T, E> = BoxFuture<'a, Result>; @@ -101,26 +103,31 @@ where } } } - // If we have exhausted all authorities and still have not returned a result, return - // error with the accumulated state. + // If we have exhausted all authorities and still have not returned a result, + // return error with the accumulated state. Err(accumulated_state) } -/// This function takes an initial state, than executes an asynchronous function (FMap) for each -/// authority, and folds the results as they become available into the state using an async function (FReduce). +/// This function takes an initial state, than executes an asynchronous function +/// (FMap) for each authority, and folds the results as they become available +/// into the state using an async function (FReduce). /// -/// FMap can do io, and returns a result V. An error there may not be fatal, and could be consumed by the -/// MReduce function to overall recover from it. This is necessary to ensure byzantine authorities cannot -/// interrupt the logic of this function. +/// FMap can do io, and returns a result V. An error there may not be fatal, and +/// could be consumed by the MReduce function to overall recover from it. This +/// is necessary to ensure byzantine authorities cannot interrupt the logic of +/// this function. /// -/// FReduce returns a result to a ReduceOutput. If the result is Err the function -/// shortcuts and the Err is returned. An Ok ReduceOutput result can be used to shortcut and return -/// the resulting state (ReduceOutput::End), continue the folding as new states arrive (ReduceOutput::Continue), -/// or continue with a timeout maximum waiting time (ReduceOutput::ContinueWithTimeout). +/// FReduce returns a result to a ReduceOutput. If the result is Err the +/// function shortcuts and the Err is returned. An Ok ReduceOutput result can be +/// used to shortcut and return the resulting state (ReduceOutput::End), +/// continue the folding as new states arrive (ReduceOutput::Continue), +/// or continue with a timeout maximum waiting time +/// (ReduceOutput::ContinueWithTimeout). /// -/// This function provides a flexible way to communicate with a quorum of authorities, processing and -/// processing their results into a safe overall result, and also safely allowing operations to continue -/// past the quorum to ensure all authorities are up to date (up to a timeout). +/// This function provides a flexible way to communicate with a quorum of +/// authorities, processing and processing their results into a safe overall +/// result, and also safely allowing operations to continue past the quorum to +/// ensure all authorities are up to date (up to a timeout). pub async fn quorum_map_then_reduce_with_timeout< 'a, C, diff --git a/crates/sui-config/src/certificate_deny_config.rs b/crates/sui-config/src/certificate_deny_config.rs index 820e2196d4e..a9293ac08df 100644 --- a/crates/sui-config/src/certificate_deny_config.rs +++ b/crates/sui-config/src/certificate_deny_config.rs @@ -1,24 +1,28 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::collections::HashSet; + use once_cell::sync::OnceCell; use serde::{Deserialize, Serialize}; -use std::collections::HashSet; use sui_types::base_types::TransactionDigest; #[derive(Clone, Debug, Default, Deserialize, Serialize)] #[serde(rename_all = "kebab-case")] pub struct CertificateDenyConfig { - /// A list of certificate digests that are known to be either deterministically crashing - /// every validator, or causing every validator to hang forever, i.e. there is no way - /// for such transaction to execute successfully today. - /// Now with this config, a validator will decide that this transaction will always yield - /// ExecutionError and charge gas accordingly. - /// This config is meant for a fast temporary fix for a known issue, and should be removed - /// once the issue is fixed. However, since a certificate once executed will be included - /// in checkpoints, all future executions of this transaction through replay must also lead - /// to the same result (i.e. ExecutionError). So when we remove this config, we need to make - /// sure it's added to the constant certificate deny list in the Rust code (TODO: code link). + /// A list of certificate digests that are known to be either + /// deterministically crashing every validator, or causing every + /// validator to hang forever, i.e. there is no way for such transaction + /// to execute successfully today. Now with this config, a validator + /// will decide that this transaction will always yield ExecutionError + /// and charge gas accordingly. This config is meant for a fast + /// temporary fix for a known issue, and should be removed + /// once the issue is fixed. However, since a certificate once executed will + /// be included in checkpoints, all future executions of this + /// transaction through replay must also lead to the same result (i.e. + /// ExecutionError). So when we remove this config, we need to make sure + /// it's added to the constant certificate deny list in the Rust code (TODO: + /// code link). #[serde(default, skip_serializing_if = "Vec::is_empty")] certificate_deny_list: Vec, diff --git a/crates/sui-config/src/genesis.rs b/crates/sui-config/src/genesis.rs index 39ddf41fa3d..f0a6b3eca75 100644 --- a/crates/sui-config/src/genesis.rs +++ b/crates/sui-config/src/genesis.rs @@ -1,33 +1,35 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anyhow::{Context, Result}; -use fastcrypto::encoding::{Base64, Encoding}; -use fastcrypto::hash::HashFunction; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::{fs, path::Path}; -use sui_types::authenticator_state::{get_authenticator_state, AuthenticatorStateInner}; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::clock::Clock; -use sui_types::committee::CommitteeWithNetworkMetadata; -use sui_types::crypto::DefaultHash; -use sui_types::deny_list::{get_coin_deny_list, PerTypeDenyList}; -use sui_types::effects::{TransactionEffects, TransactionEvents}; -use sui_types::gas_coin::TOTAL_SUPPLY_MIST; -use sui_types::messages_checkpoint::{ - CertifiedCheckpointSummary, CheckpointContents, CheckpointSummary, VerifiedCheckpoint, -}; -use sui_types::storage::ObjectStore; -use sui_types::sui_system_state::{ - get_sui_system_state, get_sui_system_state_wrapper, SuiSystemState, SuiSystemStateTrait, - SuiSystemStateWrapper, SuiValidatorGenesis, + +use anyhow::{Context, Result}; +use fastcrypto::{ + encoding::{Base64, Encoding}, + hash::HashFunction, }; -use sui_types::transaction::Transaction; -use sui_types::SUI_RANDOMNESS_STATE_OBJECT_ID; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use sui_types::{ - committee::{Committee, EpochId, ProtocolVersion}, + authenticator_state::{get_authenticator_state, AuthenticatorStateInner}, + base_types::{ObjectID, SuiAddress}, + clock::Clock, + committee::{Committee, CommitteeWithNetworkMetadata, EpochId, ProtocolVersion}, + crypto::DefaultHash, + deny_list::{get_coin_deny_list, PerTypeDenyList}, + effects::{TransactionEffects, TransactionEvents}, error::SuiResult, + gas_coin::TOTAL_SUPPLY_MIST, + messages_checkpoint::{ + CertifiedCheckpointSummary, CheckpointContents, CheckpointSummary, VerifiedCheckpoint, + }, object::Object, + storage::ObjectStore, + sui_system_state::{ + get_sui_system_state, get_sui_system_state_wrapper, SuiSystemState, SuiSystemStateTrait, + SuiSystemStateWrapper, SuiValidatorGenesis, + }, + transaction::Transaction, + SUI_RANDOMNESS_STATE_OBJECT_ID, }; use tracing::trace; @@ -51,7 +53,8 @@ pub struct UnsignedGenesis { pub objects: Vec, } -// Hand implement PartialEq in order to get around the fact that AuthSigs don't impl Eq +// Hand implement PartialEq in order to get around the fact that AuthSigs don't +// impl Eq impl PartialEq for Genesis { fn eq(&self, other: &Self) -> bool { self.checkpoint.data() == other.checkpoint.data() @@ -488,7 +491,9 @@ impl TokenDistributionSchedule { } if total_mist != TOTAL_SUPPLY_MIST { - panic!("TokenDistributionSchedule adds up to {total_mist} and not expected {TOTAL_SUPPLY_MIST}"); + panic!( + "TokenDistributionSchedule adds up to {total_mist} and not expected {TOTAL_SUPPLY_MIST}" + ); } } @@ -503,8 +508,8 @@ impl TokenDistributionSchedule { let mut validators: HashMap = validators.into_iter().map(|a| (a, 0)).collect(); - // Check that all allocations are for valid validators, while summing up all allocations - // for each validator + // Check that all allocations are for valid validators, while summing up all + // allocations for each validator for allocation in &self.allocations { if let Some(staked_with_validator) = &allocation.staked_with_validator { *validators @@ -514,12 +519,14 @@ impl TokenDistributionSchedule { } } - // Check that all validators have sufficient stake allocated to ensure they meet the - // minimum stake threshold + // Check that all validators have sufficient stake allocated to ensure they meet + // the minimum stake threshold let minimum_required_stake = sui_types::governance::VALIDATOR_LOW_STAKE_THRESHOLD_MIST; for (validator, stake) in validators { if stake < minimum_required_stake { - panic!("validator {validator} has '{stake}' stake and does not meet the minimum required stake threshold of '{minimum_required_stake}'"); + panic!( + "validator {validator} has '{stake}' stake and does not meet the minimum required stake threshold of '{minimum_required_stake}'" + ); } } } @@ -553,9 +560,11 @@ impl TokenDistributionSchedule { /// Helper to read a TokenDistributionSchedule from a csv file. /// - /// The file is encoded such that the final entry in the CSV file is used to denote the - /// allocation to the stake subsidy fund. It must be in the following format: - /// `0x0000000000000000000000000000000000000000000000000000000000000000,,` + /// The file is encoded such that the final entry in the CSV file is used to + /// denote the allocation to the stake subsidy fund. It must be in the + /// following format: + /// `0x0000000000000000000000000000000000000000000000000000000000000000, + /// ,` /// /// All entries in a token distribution schedule must add up to 10B Sui. pub fn from_csv(reader: R) -> Result { @@ -612,7 +621,8 @@ pub struct TokenAllocation { pub recipient_address: SuiAddress, pub amount_mist: u64, - /// Indicates if this allocation should be staked at genesis and with which validator + /// Indicates if this allocation should be staked at genesis and with which + /// validator pub staked_with_validator: Option, } diff --git a/crates/sui-config/src/lib.rs b/crates/sui-config/src/lib.rs index a5beb3c7623..df424e9ee39 100644 --- a/crates/sui-config/src/lib.rs +++ b/crates/sui-config/src/lib.rs @@ -1,12 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anyhow::Context; -use anyhow::Result; -use serde::de::DeserializeOwned; -use serde::Serialize; -use std::fs; -use std::path::{Path, PathBuf}; +use std::{ + fs, + path::{Path, PathBuf}, +}; + +use anyhow::{Context, Result}; +use serde::{de::DeserializeOwned, Serialize}; use tracing::trace; pub mod certificate_deny_config; diff --git a/crates/sui-config/src/local_ip_utils.rs b/crates/sui-config/src/local_ip_utils.rs index 5e7d1298f36..eb8ae104a6d 100644 --- a/crates/sui-config/src/local_ip_utils.rs +++ b/crates/sui-config/src/local_ip_utils.rs @@ -4,10 +4,12 @@ use std::net::SocketAddr; #[cfg(msim)] use std::sync::{atomic::AtomicI16, Arc}; + use sui_types::multiaddr::Multiaddr; /// A singleton struct to manage IP addresses and ports for simtest. -/// This allows us to generate unique IP addresses and ports for each node in simtest. +/// This allows us to generate unique IP addresses and ports for each node in +/// simtest. #[cfg(msim)] pub struct SimAddressManager { next_ip_offset: AtomicI16, @@ -65,15 +67,17 @@ pub fn localhost_for_testing() -> String { } /// Returns an available port for the given host in simtest. -/// We don't care about host because it's all managed by simulator. Just obtain a unique port. +/// We don't care about host because it's all managed by simulator. Just obtain +/// a unique port. #[cfg(msim)] pub fn get_available_port(_host: &str) -> u16 { get_sim_address_manager().get_next_available_port() } -/// Return an ephemeral, available port. On unix systems, the port returned will be in the -/// TIME_WAIT state ensuring that the OS won't hand out this port for some grace period. -/// Callers should be able to bind to this port given they use SO_REUSEADDR. +/// Return an ephemeral, available port. On unix systems, the port returned will +/// be in the TIME_WAIT state ensuring that the OS won't hand out this port for +/// some grace period. Callers should be able to bind to this port given they +/// use SO_REUSEADDR. #[cfg(not(msim))] pub fn get_available_port(host: &str) -> u16 { const MAX_PORT_RETRIES: u32 = 1000; @@ -99,30 +103,34 @@ fn get_ephemeral_port(host: &str) -> std::io::Result { let listener = TcpListener::bind((host, 0))?; let addr = listener.local_addr()?; - // Create and accept a connection (which we'll promptly drop) in order to force the port - // into the TIME_WAIT state, ensuring that the port will be reserved from some limited - // amount of time (roughly 60s on some Linux systems) + // Create and accept a connection (which we'll promptly drop) in order to force + // the port into the TIME_WAIT state, ensuring that the port will be + // reserved from some limited amount of time (roughly 60s on some Linux + // systems) let _sender = TcpStream::connect(addr)?; let _incoming = listener.accept()?; Ok(addr.port()) } -/// Returns a new unique TCP address for the given host, by finding a new available port. +/// Returns a new unique TCP address for the given host, by finding a new +/// available port. pub fn new_tcp_address_for_testing(host: &str) -> Multiaddr { format!("/ip4/{}/tcp/{}/http", host, get_available_port(host)) .parse() .unwrap() } -/// Returns a new unique UDP address for the given host, by finding a new available port. +/// Returns a new unique UDP address for the given host, by finding a new +/// available port. pub fn new_udp_address_for_testing(host: &str) -> Multiaddr { format!("/ip4/{}/udp/{}", host, get_available_port(host)) .parse() .unwrap() } -/// Returns a new unique TCP address (SocketAddr) for localhost, by finding a new available port on localhost. +/// Returns a new unique TCP address (SocketAddr) for localhost, by finding a +/// new available port on localhost. pub fn new_local_tcp_socket_for_testing() -> SocketAddr { format!( "{}:{}", @@ -133,12 +141,14 @@ pub fn new_local_tcp_socket_for_testing() -> SocketAddr { .unwrap() } -/// Returns a new unique TCP address (Multiaddr) for localhost, by finding a new available port on localhost. +/// Returns a new unique TCP address (Multiaddr) for localhost, by finding a new +/// available port on localhost. pub fn new_local_tcp_address_for_testing() -> Multiaddr { new_tcp_address_for_testing(&localhost_for_testing()) } -/// Returns a new unique UDP address for localhost, by finding a new available port. +/// Returns a new unique UDP address for localhost, by finding a new available +/// port. pub fn new_local_udp_address_for_testing() -> Multiaddr { new_udp_address_for_testing(&localhost_for_testing()) } diff --git a/crates/sui-config/src/node.rs b/crates/sui-config/src/node.rs index 870ef414717..cf2ce247006 100644 --- a/crates/sui-config/src/node.rs +++ b/crates/sui-config/src/node.rs @@ -1,38 +1,41 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::certificate_deny_config::CertificateDenyConfig; -use crate::genesis; -use crate::object_storage_config::ObjectStoreConfig; -use crate::p2p::P2pConfig; -use crate::transaction_deny_config::TransactionDenyConfig; -use crate::Config; +use std::{ + collections::{BTreeMap, BTreeSet}, + net::SocketAddr, + num::NonZeroUsize, + path::{Path, PathBuf}, + sync::Arc, + time::Duration, + usize, +}; + use anyhow::Result; use narwhal_config::Parameters as ConsensusParameters; use once_cell::sync::OnceCell; use rand::rngs::OsRng; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use std::collections::{BTreeMap, BTreeSet}; -use std::net::SocketAddr; -use std::num::NonZeroUsize; -use std::path::{Path, PathBuf}; -use std::sync::Arc; -use std::time::Duration; -use std::usize; use sui_keys::keypair_file::{read_authority_keypair_from_file, read_keypair_from_file}; use sui_protocol_config::{Chain, SupportedProtocolVersions}; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::committee::EpochId; -use sui_types::crypto::AuthorityPublicKeyBytes; -use sui_types::crypto::KeypairTraits; -use sui_types::crypto::NetworkKeyPair; -use sui_types::crypto::SuiKeyPair; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; - -use sui_types::crypto::{get_key_pair_from_rng, AccountKeyPair, AuthorityKeyPair}; -use sui_types::multiaddr::Multiaddr; +use sui_types::{ + base_types::{ObjectID, SuiAddress}, + committee::EpochId, + crypto::{ + get_key_pair_from_rng, AccountKeyPair, AuthorityKeyPair, AuthorityPublicKeyBytes, + KeypairTraits, NetworkKeyPair, SuiKeyPair, + }, + messages_checkpoint::CheckpointSequenceNumber, + multiaddr::Multiaddr, +}; use tracing::info; +use crate::{ + certificate_deny_config::CertificateDenyConfig, genesis, + object_storage_config::ObjectStoreConfig, p2p::P2pConfig, + transaction_deny_config::TransactionDenyConfig, Config, +}; + // Default max number of concurrent requests served pub const DEFAULT_GRPC_CONCURRENCY_LIMIT: usize = 20000000000; @@ -97,7 +100,8 @@ pub struct NodeConfig { #[serde(default = "default_authority_store_pruning_config")] pub authority_store_pruning_config: AuthorityStorePruningConfig, - /// Size of the broadcast channel used for notifying other systems of end of epoch. + /// Size of the broadcast channel used for notifying other systems of end of + /// epoch. /// /// If unspecified, this will default to `128`. #[serde(default = "default_end_of_epoch_broadcast_channel_capacity")] @@ -109,8 +113,9 @@ pub struct NodeConfig { #[serde(skip_serializing_if = "Option::is_none")] pub metrics: Option, - /// In a `sui-node` binary, this is set to SupportedProtocolVersions::SYSTEM_DEFAULT - /// in sui-node/src/main.rs. It is present in the config so that it can be changed by tests in + /// In a `sui-node` binary, this is set to + /// SupportedProtocolVersions::SYSTEM_DEFAULT in sui-node/src/main.rs. + /// It is present in the config so that it can be changed by tests in /// order to test protocol upgrades. #[serde(skip)] pub supported_protocol_versions: Option, @@ -355,23 +360,26 @@ pub struct ConsensusConfig { pub address: Multiaddr, pub db_path: PathBuf, - /// Optional alternative address preferentially used by a primary to talk to its own worker. - /// For example, this could be used to connect to co-located workers over a private LAN address. + /// Optional alternative address preferentially used by a primary to talk to + /// its own worker. For example, this could be used to connect to + /// co-located workers over a private LAN address. pub internal_worker_address: Option, - /// Maximum number of pending transactions to submit to consensus, including those - /// in submission wait. - /// Assuming 10_000 txn tps * 10 sec consensus latency = 100_000 inflight consensus txns, - /// Default to 100_000. + /// Maximum number of pending transactions to submit to consensus, including + /// those in submission wait. + /// Assuming 10_000 txn tps * 10 sec consensus latency = 100_000 inflight + /// consensus txns, Default to 100_000. pub max_pending_transactions: Option, - /// When defined caps the calculated submission position to the max_submit_position. Even if the - /// is elected to submit from a higher position than this, it will "reset" to the max_submit_position. + /// When defined caps the calculated submission position to the + /// max_submit_position. Even if the is elected to submit from a higher + /// position than this, it will "reset" to the max_submit_position. pub max_submit_position: Option, - /// The submit delay step to consensus defined in milliseconds. When provided it will - /// override the current back off logic otherwise the default backoff logic will be applied based - /// on consensus latency estimates. + /// The submit delay step to consensus defined in milliseconds. When + /// provided it will override the current back off logic otherwise the + /// default backoff logic will be applied based on consensus latency + /// estimates. pub submit_delay_step_override_millis: Option, pub narwhal_config: ConsensusParameters, @@ -412,7 +420,8 @@ pub fn default_consensus_protocol() -> ConsensusProtocol { #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(rename_all = "kebab-case")] pub struct CheckpointExecutorConfig { - /// Upper bound on the number of checkpoints that can be concurrently executed + /// Upper bound on the number of checkpoints that can be concurrently + /// executed /// /// If unspecified, this will default to `200` #[serde(default = "default_checkpoint_execution_max_concurrency")] @@ -427,7 +436,8 @@ pub struct CheckpointExecutorConfig { pub local_execution_timeout_sec: u64, /// Optional directory used for data ingestion pipeline - /// When specified, each executed checkpoint will be saved in a local directory for post processing + /// When specified, each executed checkpoint will be saved in a local + /// directory for post processing #[serde(default, skip_serializing_if = "Option::is_none")] pub data_ingestion_dir: Option, } @@ -437,18 +447,19 @@ pub struct CheckpointExecutorConfig { pub struct ExpensiveSafetyCheckConfig { /// If enabled, at epoch boundary, we will check that the storage /// fund balance is always identical to the sum of the storage - /// rebate of all live objects, and that the total SUI in the network remains - /// the same. + /// rebate of all live objects, and that the total SUI in the network + /// remains the same. #[serde(default)] enable_epoch_sui_conservation_check: bool, - /// If enabled, we will check that the total SUI in all input objects of a tx - /// (both the Move part and the storage rebate) matches the total SUI in all - /// output objects of the tx + gas fees + /// If enabled, we will check that the total SUI in all input objects of a + /// tx (both the Move part and the storage rebate) matches the total SUI + /// in all output objects of the tx + gas fees #[serde(default)] enable_deep_per_tx_sui_conservation_check: bool, - /// Disable epoch SUI conservation check even when we are running in debug mode. + /// Disable epoch SUI conservation check even when we are running in debug + /// mode. #[serde(default)] force_disable_epoch_sui_conservation_check: bool, @@ -540,7 +551,8 @@ pub struct AuthorityStorePruningConfig { /// number of the latest epoch dbs to retain #[serde(default = "default_num_latest_epoch_dbs_to_retain")] pub num_latest_epoch_dbs_to_retain: usize, - /// time interval used by the pruner to determine whether there are any epoch DBs to remove + /// time interval used by the pruner to determine whether there are any + /// epoch DBs to remove #[serde(default = "default_epoch_db_pruning_period_secs")] pub epoch_db_pruning_period_secs: u64, /// number of epochs to keep the latest version of objects for. @@ -552,21 +564,25 @@ pub struct AuthorityStorePruningConfig { /// pruner's runtime interval used for aggressive mode #[serde(skip_serializing_if = "Option::is_none")] pub pruning_run_delay_seconds: Option, - /// maximum number of checkpoints in the pruning batch. Can be adjusted to increase performance + /// maximum number of checkpoints in the pruning batch. Can be adjusted to + /// increase performance #[serde(default = "default_max_checkpoints_in_batch")] pub max_checkpoints_in_batch: usize, /// maximum number of transaction in the pruning batch #[serde(default = "default_max_transactions_in_batch")] pub max_transactions_in_batch: usize, - /// enables periodic background compaction for old SST files whose last modified time is - /// older than `periodic_compaction_threshold_days` days. - /// That ensures that all sst files eventually go through the compaction process + /// enables periodic background compaction for old SST files whose last + /// modified time is older than `periodic_compaction_threshold_days` + /// days. That ensures that all sst files eventually go through the + /// compaction process #[serde(skip_serializing_if = "Option::is_none")] pub periodic_compaction_threshold_days: Option, - /// number of epochs to keep the latest version of transactions and effects for + /// number of epochs to keep the latest version of transactions and effects + /// for #[serde(skip_serializing_if = "Option::is_none")] pub num_epochs_to_retain_for_checkpoints: Option, - /// disables object tombstone pruning. We don't serialize it if it is the default value, false. + /// disables object tombstone pruning. We don't serialize it if it is the + /// default value, false. #[serde(default, skip_serializing_if = "std::ops::Not::not")] pub killswitch_tombstone_pruning: bool, #[serde(default, skip_serializing_if = "std::ops::Not::not")] @@ -840,7 +856,8 @@ enum GenesisLocation { }, } -/// Wrapper struct for SuiKeyPair that can be deserialized from a file path. Used by network, worker, and account keypair. +/// Wrapper struct for SuiKeyPair that can be deserialized from a file path. +/// Used by network, worker, and account keypair. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct KeyPairWithPath { #[serde(flatten)] @@ -868,7 +885,8 @@ impl KeyPairWithPath { pub fn new(kp: SuiKeyPair) -> Self { let cell: OnceCell> = OnceCell::new(); let arc_kp = Arc::new(kp); - // OK to unwrap panic because authority should not start without all keypairs loaded. + // OK to unwrap panic because authority should not start without all keypairs + // loaded. cell.set(arc_kp.clone()).expect("Failed to set keypair"); Self { location: KeyPairLocation::InPlace { value: arc_kp }, @@ -878,7 +896,8 @@ impl KeyPairWithPath { pub fn new_from_path(path: PathBuf) -> Self { let cell: OnceCell> = OnceCell::new(); - // OK to unwrap panic because authority should not start without all keypairs loaded. + // OK to unwrap panic because authority should not start without all keypairs + // loaded. cell.set(Arc::new(read_keypair_from_file(&path).unwrap_or_else( |e| panic!("Invalid keypair file at path {:?}: {e}", &path), ))) @@ -894,7 +913,8 @@ impl KeyPairWithPath { .get_or_init(|| match &self.location { KeyPairLocation::InPlace { value } => value.clone(), KeyPairLocation::File { path } => { - // OK to unwrap panic because authority should not start without all keypairs loaded. + // OK to unwrap panic because authority should not start without all keypairs + // loaded. Arc::new( read_keypair_from_file(path).unwrap_or_else(|e| { panic!("Invalid keypair file at path {:?}: {e}", path) @@ -906,7 +926,8 @@ impl KeyPairWithPath { } } -/// Wrapper struct for AuthorityKeyPair that can be deserialized from a file path. +/// Wrapper struct for AuthorityKeyPair that can be deserialized from a file +/// path. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct AuthorityKeyPairWithPath { #[serde(flatten)] @@ -928,7 +949,8 @@ impl AuthorityKeyPairWithPath { pub fn new(kp: AuthorityKeyPair) -> Self { let cell: OnceCell> = OnceCell::new(); let arc_kp = Arc::new(kp); - // OK to unwrap panic because authority should not start without all keypairs loaded. + // OK to unwrap panic because authority should not start without all keypairs + // loaded. cell.set(arc_kp.clone()) .expect("Failed to set authority keypair"); Self { @@ -939,7 +961,8 @@ impl AuthorityKeyPairWithPath { pub fn new_from_path(path: PathBuf) -> Self { let cell: OnceCell> = OnceCell::new(); - // OK to unwrap panic because authority should not start without all keypairs loaded. + // OK to unwrap panic because authority should not start without all keypairs + // loaded. cell.set(Arc::new( read_authority_keypair_from_file(&path) .unwrap_or_else(|_| panic!("Invalid authority keypair file at path {:?}", &path)), @@ -956,7 +979,8 @@ impl AuthorityKeyPairWithPath { .get_or_init(|| match &self.location { AuthorityKeyPairLocation::InPlace { value } => value.clone(), AuthorityKeyPairLocation::File { path } => { - // OK to unwrap panic because authority should not start without all keypairs loaded. + // OK to unwrap panic because authority should not start without all keypairs + // loaded. Arc::new( read_authority_keypair_from_file(path).unwrap_or_else(|_| { panic!("Invalid authority keypair file {:?}", &path) @@ -1045,7 +1069,8 @@ mod tests { } // RunWithRange is used to specify the ending epoch/checkpoint to process. -// this is intended for use with disaster recovery debugging and verification workflows, never in normal operations +// this is intended for use with disaster recovery debugging and verification +// workflows, never in normal operations #[derive(Clone, Copy, PartialEq, Debug, Serialize, Deserialize)] pub enum RunWithRange { Epoch(EpochId), diff --git a/crates/sui-config/src/node_config_metrics.rs b/crates/sui-config/src/node_config_metrics.rs index 1b741dc0cfb..9b74b6b8923 100644 --- a/crates/sui-config/src/node_config_metrics.rs +++ b/crates/sui-config/src/node_config_metrics.rs @@ -1,10 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::NodeConfig; -use prometheus::{register_int_gauge_with_registry, IntGauge, Registry}; use std::sync::Arc; +use prometheus::{register_int_gauge_with_registry, IntGauge, Registry}; + +use crate::NodeConfig; + pub struct NodeConfigMetrics { tx_deny_config_user_transaction_disabled: IntGauge, tx_deny_config_shared_object_disabled: IntGauge, diff --git a/crates/sui-config/src/object_storage_config.rs b/crates/sui-config/src/object_storage_config.rs index 6a569e55c0f..5aea7e27bed 100644 --- a/crates/sui-config/src/object_storage_config.rs +++ b/crates/sui-config/src/object_storage_config.rs @@ -1,16 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anyhow::{anyhow, Context, Result}; +use std::{fs, path::PathBuf, sync::Arc}; +use anyhow::{anyhow, Context, Result}; use clap::*; -use object_store::aws::AmazonS3Builder; -use object_store::{ClientOptions, DynObjectStore}; +use object_store::{aws::AmazonS3Builder, ClientOptions, DynObjectStore}; use reqwest::header::{HeaderMap, HeaderName, HeaderValue}; use serde::{Deserialize, Serialize}; -use std::fs; -use std::path::PathBuf; -use std::sync::Arc; use tracing::info; /// Object-store type. @@ -29,7 +26,8 @@ pub enum ObjectStoreType { #[derive(Default, Debug, Clone, Deserialize, Serialize, Args)] #[serde(rename_all = "kebab-case")] pub struct ObjectStoreConfig { - /// Which object storage to use. If not specified, defaults to local file system. + /// Which object storage to use. If not specified, defaults to local file + /// system. #[serde(skip_serializing_if = "Option::is_none")] #[arg(value_enum)] pub object_store: Option, @@ -157,8 +155,7 @@ impl ObjectStoreConfig { ))) } fn new_gcs(&self) -> Result, anyhow::Error> { - use object_store::gcp::GoogleCloudStorageBuilder; - use object_store::limit::LimitStore; + use object_store::{gcp::GoogleCloudStorageBuilder, limit::LimitStore}; info!(bucket=?self.bucket, object_store_type="GCS", "Object Store"); @@ -188,8 +185,7 @@ impl ObjectStoreConfig { ))) } fn new_azure(&self) -> Result, anyhow::Error> { - use object_store::azure::MicrosoftAzureBuilder; - use object_store::limit::LimitStore; + use object_store::{azure::MicrosoftAzureBuilder, limit::LimitStore}; info!(bucket=?self.bucket, account=?self.azure_storage_account, object_store_type="Azure", "Object Store"); diff --git a/crates/sui-config/src/p2p.rs b/crates/sui-config/src/p2p.rs index f85af958904..b6c8206bda0 100644 --- a/crates/sui-config/src/p2p.rs +++ b/crates/sui-config/src/p2p.rs @@ -31,8 +31,9 @@ pub struct P2pConfig { pub discovery: Option, #[serde(skip_serializing_if = "Option::is_none")] pub randomness: Option, - /// Size in bytes above which network messages are considered excessively large. Excessively - /// large messages will still be handled, but logged and reported in metrics for debugging. + /// Size in bytes above which network messages are considered excessively + /// large. Excessively large messages will still be handled, but logged + /// and reported in metrics for debugging. /// /// If unspecified, this will default to 8 MiB. #[serde(skip_serializing_if = "Option::is_none")] @@ -91,14 +92,16 @@ pub struct AllowlistedPeer { #[derive(Clone, Debug, Default, Deserialize, Serialize)] #[serde(rename_all = "kebab-case")] pub struct StateSyncConfig { - /// List of "known-good" checkpoints that state sync will be forced to use. State sync will - /// skip verification of pinned checkpoints, and reject checkpoints with digests that don't - /// match pinned values for a given sequence number. + /// List of "known-good" checkpoints that state sync will be forced to use. + /// State sync will skip verification of pinned checkpoints, and reject + /// checkpoints with digests that don't match pinned values for a given + /// sequence number. /// /// This can be used: - /// - in case of a fork, to prevent the node from syncing to the wrong chain. - /// - in case of a network stall, to force the node to proceed with a manually-injected - /// checkpoint. + /// - in case of a fork, to prevent the node from syncing to the wrong + /// chain. + /// - in case of a network stall, to force the node to proceed with a + /// manually-injected checkpoint. #[serde(skip_serializing_if = "Vec::is_empty", default)] pub pinned_checkpoints: Vec<(CheckpointSequenceNumber, CheckpointDigest)>, @@ -114,39 +117,45 @@ pub struct StateSyncConfig { #[serde(skip_serializing_if = "Option::is_none")] pub mailbox_capacity: Option, - /// Size of the broadcast channel use for notifying other systems of newly sync'ed checkpoints. + /// Size of the broadcast channel use for notifying other systems of newly + /// sync'ed checkpoints. /// /// If unspecified, this will default to `128`. #[serde(skip_serializing_if = "Option::is_none")] pub synced_checkpoint_broadcast_channel_capacity: Option, - /// Set the upper bound on the number of checkpoint headers to be downloaded concurrently. + /// Set the upper bound on the number of checkpoint headers to be downloaded + /// concurrently. /// /// If unspecified, this will default to `100`. #[serde(skip_serializing_if = "Option::is_none")] pub checkpoint_header_download_concurrency: Option, - /// Set the upper bound on the number of checkpoint contents to be downloaded concurrently. + /// Set the upper bound on the number of checkpoint contents to be + /// downloaded concurrently. /// /// If unspecified, this will default to `100`. #[serde(skip_serializing_if = "Option::is_none")] pub checkpoint_content_download_concurrency: Option, - /// Set the upper bound on the number of individual transactions contained in checkpoint - /// contents to be downloaded concurrently. If both this value and - /// `checkpoint_content_download_concurrency` are set, the lower of the two will apply. + /// Set the upper bound on the number of individual transactions contained + /// in checkpoint contents to be downloaded concurrently. If both this + /// value and `checkpoint_content_download_concurrency` are set, the + /// lower of the two will apply. /// /// If unspecified, this will default to `50,000`. #[serde(skip_serializing_if = "Option::is_none")] pub checkpoint_content_download_tx_concurrency: Option, - /// Set the timeout that should be used when sending most state-sync RPC requests. + /// Set the timeout that should be used when sending most state-sync RPC + /// requests. /// /// If unspecified, this will default to `10,000` milliseconds. #[serde(skip_serializing_if = "Option::is_none")] pub timeout_ms: Option, - /// Set the timeout that should be used when sending RPC requests to sync checkpoint contents. + /// Set the timeout that should be used when sending RPC requests to sync + /// checkpoint contents. /// /// If unspecified, this will default to `10,000` milliseconds. #[serde(skip_serializing_if = "Option::is_none")] @@ -176,8 +185,8 @@ pub struct StateSyncConfig { #[serde(skip_serializing_if = "Option::is_none")] pub get_checkpoint_contents_inflight_limit: Option, - /// Per-checkpoint inflight limit for the GetCheckpointContents RPC. This is enforced globally - /// across all peers. + /// Per-checkpoint inflight limit for the GetCheckpointContents RPC. This is + /// enforced globally across all peers. /// /// If unspecified, this will default to no limit. #[serde(skip_serializing_if = "Option::is_none")] @@ -245,11 +254,12 @@ impl StateSyncConfig { /// Access Type of a node. /// AccessType info is shared in the discovery process. /// * If the node marks itself as Public, other nodes may try to connect to it. -/// * If the node marks itself as Private, only nodes that have it in -/// their `allowlisted_peers` or `seed_peers` will try to connect to it. +/// * If the node marks itself as Private, only nodes that have it in their +/// `allowlisted_peers` or `seed_peers` will try to connect to it. /// * If not set, defaults to Public. -/// AccessType is useful when a network of nodes want to stay private. To achieve this, -/// mark every node in this network as `Private` and allowlist/seed them to each other. +/// AccessType is useful when a network of nodes want to stay private. To +/// achieve this, mark every node in this network as `Private` and +/// allowlist/seed them to each other. #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub enum AccessType { Public, @@ -273,8 +283,8 @@ pub struct DiscoveryConfig { /// Number of peers to query each interval. /// - /// Sets the number of peers, to be randomly selected, that are queried for their known peers - /// each interval. + /// Sets the number of peers, to be randomly selected, that are queried for + /// their known peers each interval. /// /// If unspecified, this will default to `1`. #[serde(skip_serializing_if = "Option::is_none")] @@ -290,13 +300,15 @@ pub struct DiscoveryConfig { #[serde(skip_serializing_if = "Option::is_none")] pub access_type: Option, - /// Like `seed_peers` in `P2pConfig`, allowlisted peers will awlays be allowed to establish - /// connection with this node regardless of the concurrency limit. - /// Unlike `seed_peers`, a node does not reach out to `allowlisted_peers` preferentially. - /// It is also used to determine if a peer is accessible when its AccessType is Private. - /// For example, a node will ignore a peer with Private AccessType if the peer is not in - /// its `allowlisted_peers`. Namely, the node will not try to establish connections - /// to this peer, nor advertise this peer's info to other peers in the network. + /// Like `seed_peers` in `P2pConfig`, allowlisted peers will awlays be + /// allowed to establish connection with this node regardless of the + /// concurrency limit. Unlike `seed_peers`, a node does not reach out to + /// `allowlisted_peers` preferentially. It is also used to determine if + /// a peer is accessible when its AccessType is Private. For example, a + /// node will ignore a peer with Private AccessType if the peer is not in + /// its `allowlisted_peers`. Namely, the node will not try to establish + /// connections to this peer, nor advertise this peer's info to other + /// peers in the network. #[serde(skip_serializing_if = "Vec::is_empty", default)] pub allowlisted_peers: Vec, } @@ -330,27 +342,29 @@ impl DiscoveryConfig { #[derive(Clone, Debug, Default, Deserialize, Serialize)] #[serde(rename_all = "kebab-case")] pub struct RandomnessConfig { - /// Maximum number of rounds ahead of our most recent completed round for which we should - /// accept partial signatures from other validators. + /// Maximum number of rounds ahead of our most recent completed round for + /// which we should accept partial signatures from other validators. /// /// If unspecified, this will default to 10. #[serde(skip_serializing_if = "Option::is_none")] pub max_partial_sigs_rounds_ahead: Option, - /// Maximum number of rounds for which partial signatures should be concurrently sent. + /// Maximum number of rounds for which partial signatures should be + /// concurrently sent. /// /// If unspecified, this will default to 10. #[serde(skip_serializing_if = "Option::is_none")] pub max_partial_sigs_concurrent_sends: Option, - /// Interval at which to retry sending partial signatures until the round is complete. + /// Interval at which to retry sending partial signatures until the round is + /// complete. /// /// If unspecified, this will default to `5,000` milliseconds. #[serde(skip_serializing_if = "Option::is_none")] pub partial_signature_retry_interval_ms: Option, - /// Size of the Randomness actor's mailbox. This should be set large enough to never - /// overflow unless a bug is encountered. + /// Size of the Randomness actor's mailbox. This should be set large enough + /// to never overflow unless a bug is encountered. /// /// If unspecified, this will default to `1,000,000`. #[serde(skip_serializing_if = "Option::is_none")] diff --git a/crates/sui-config/src/transaction_deny_config.rs b/crates/sui-config/src/transaction_deny_config.rs index 927686d74ac..a94e43d367e 100644 --- a/crates/sui-config/src/transaction_deny_config.rs +++ b/crates/sui-config/src/transaction_deny_config.rs @@ -10,26 +10,30 @@ use sui_types::base_types::{ObjectID, SuiAddress}; #[derive(Clone, Debug, Default, Deserialize, Serialize)] #[serde(rename_all = "kebab-case")] pub struct TransactionDenyConfig { - /// A list of object IDs that are not allowed to be accessed/used in transactions. - /// Note that since this is checked during transaction signing, only root object ids - /// are supported here (i.e. no child-objects). - /// Similarly this does not apply to wrapped objects as they are not directly accessible. + /// A list of object IDs that are not allowed to be accessed/used in + /// transactions. Note that since this is checked during transaction + /// signing, only root object ids are supported here (i.e. no + /// child-objects). Similarly this does not apply to wrapped objects as + /// they are not directly accessible. #[serde(default, skip_serializing_if = "Vec::is_empty")] object_deny_list: Vec, - /// A list of package object IDs that are not allowed to be called into in transactions, - /// either directly or indirectly through transitive dependencies. - /// Note that this does not apply to type arguments. - /// Also since we only compare the deny list against the upgraded package ID of each dependency - /// in the used package, when a package ID is denied, newer versions of that package are - /// still allowed. If we want to deny the entire upgrade family of a package, we need to - /// explicitly specify all the package IDs in the deny list. - /// TODO: We could consider making this more flexible, e.g. whether to check in type args, - /// whether to block entire upgrade family, whether to allow upgrade and etc. + /// A list of package object IDs that are not allowed to be called into in + /// transactions, either directly or indirectly through transitive + /// dependencies. Note that this does not apply to type arguments. + /// Also since we only compare the deny list against the upgraded package ID + /// of each dependency in the used package, when a package ID is denied, + /// newer versions of that package are still allowed. If we want to deny + /// the entire upgrade family of a package, we need to explicitly + /// specify all the package IDs in the deny list. TODO: We could + /// consider making this more flexible, e.g. whether to check in type args, + /// whether to block entire upgrade family, whether to allow upgrade and + /// etc. #[serde(default, skip_serializing_if = "Vec::is_empty")] package_deny_list: Vec, - /// A list of sui addresses that are not allowed to be used as the sender or sponsor. + /// A list of sui addresses that are not allowed to be used as the sender or + /// sponsor. #[serde(default, skip_serializing_if = "Vec::is_empty")] address_deny_list: Vec, @@ -45,8 +49,9 @@ pub struct TransactionDenyConfig { #[serde(default)] shared_object_disabled: bool, - /// Whether user transactions are disabled (i.e. only system transactions are allowed). - /// This is essentially a kill switch for transactions processing to a degree. + /// Whether user transactions are disabled (i.e. only system transactions + /// are allowed). This is essentially a kill switch for transactions + /// processing to a degree. #[serde(default)] user_transaction_disabled: bool, @@ -72,7 +77,8 @@ pub struct TransactionDenyConfig { #[serde(default)] zklogin_disabled_providers: HashSet, // TODO: We could consider add a deny list for types that we want to disable public transfer. - // TODO: We could also consider disable more types of commands, such as transfer, split and etc. + // TODO: We could also consider disable more types of commands, such as transfer, split and + // etc. } impl TransactionDenyConfig { diff --git a/crates/sui-core/benches/batch_verification_bench.rs b/crates/sui-core/benches/batch_verification_bench.rs index 6648d2e2866..2cb8be5cb1f 100644 --- a/crates/sui-core/benches/batch_verification_bench.rs +++ b/crates/sui-core/benches/batch_verification_bench.rs @@ -1,21 +1,22 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use criterion::*; - -use rand::prelude::*; -use rand::seq::SliceRandom; - -use futures::future::join_all; -use prometheus::Registry; use std::sync::Arc; -use sui_core::test_utils::{make_cert_with_large_committee, make_dummy_tx}; -use sui_types::committee::Committee; -use sui_types::crypto::{get_key_pair, AccountKeyPair, AuthorityKeyPair}; -use sui_types::transaction::CertifiedTransaction; +use criterion::*; use fastcrypto_zkp::bn254::zk_login_api::ZkLoginEnv; -use sui_core::signature_verifier::*; +use futures::future::join_all; +use prometheus::Registry; +use rand::{prelude::*, seq::SliceRandom}; +use sui_core::{ + signature_verifier::*, + test_utils::{make_cert_with_large_committee, make_dummy_tx}, +}; +use sui_types::{ + committee::Committee, + crypto::{get_key_pair, AccountKeyPair, AuthorityKeyPair}, + transaction::CertifiedTransaction, +}; fn gen_certs( committee: &Committee, @@ -109,8 +110,8 @@ fn batch_verification_bench(c: &mut Criterion) { let (committee, key_pairs) = Committee::new_simple_test_committee_of_size(100); let mut group = c.benchmark_group("batch_verify"); - // throughput improvements mostly level off at a batch size of 32, and latency starts getting - // pretty significant at that point. + // throughput improvements mostly level off at a batch size of 32, and latency + // starts getting pretty significant at that point. for batch_size in [1, 4, 16, 32, 64] { for num_errors in [0, 1] { let mut certs = gen_certs(&committee, &key_pairs, batch_size); diff --git a/crates/sui-core/benches/verified_cert_cache_bench.rs b/crates/sui-core/benches/verified_cert_cache_bench.rs index 483d09d642a..b0ceb41da51 100644 --- a/crates/sui-core/benches/verified_cert_cache_bench.rs +++ b/crates/sui-core/benches/verified_cert_cache_bench.rs @@ -1,9 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use criterion::*; - -use criterion::Criterion; +use criterion::{Criterion, *}; use sui_core::signature_verifier::{SignatureVerifierMetrics, VerifiedDigestCache}; use sui_types::digests::CertificateDigest; diff --git a/crates/sui-core/src/authority.rs b/crates/sui-core/src/authority.rs index 7c61922096b..91883b73ef7 100644 --- a/crates/sui-core/src/authority.rs +++ b/crates/sui-core/src/authority.rs @@ -2,22 +2,35 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::execution_cache::NotifyReadWrapper; -use crate::transaction_outputs::TransactionOutputs; -use crate::verify_indexes::verify_indexes; +use std::{ + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + fs, + fs::File, + io::Write, + path::{Path, PathBuf}, + pin::Pin, + sync::{atomic::Ordering, Arc}, + time::Duration, + vec, +}; + use anyhow::anyhow; use arc_swap::{ArcSwap, Guard}; use async_trait::async_trait; +pub use authority_notify_read::EffectsNotifyRead; +pub use authority_store::{AuthorityStore, ResolverWrapper, UpdateType}; use chrono::prelude::*; -use fastcrypto::encoding::Base58; -use fastcrypto::encoding::Encoding; -use fastcrypto::hash::MultisetHash; +use fastcrypto::{ + encoding::{Base58, Encoding}, + hash::MultisetHash, +}; use itertools::Itertools; -use move_binary_format::binary_config::BinaryConfig; -use move_binary_format::CompiledModule; -use move_core_types::annotated_value::MoveStructLayout; -use move_core_types::language_storage::ModuleId; -use mysten_metrics::{TX_TYPE_SHARED_OBJ_TX, TX_TYPE_SINGLE_WRITER_TX}; +use move_binary_format::{binary_config::BinaryConfig, CompiledModule}; +use move_core_types::{annotated_value::MoveStructLayout, language_storage::ModuleId}; +use mysten_metrics::{ + monitored_scope, spawn_monitored_task, TX_TYPE_SHARED_OBJ_TX, TX_TYPE_SINGLE_WRITER_TX, +}; +use once_cell::sync::OnceCell; use parking_lot::Mutex; use prometheus::{ register_histogram_vec_with_registry, register_histogram_with_registry, @@ -25,47 +38,19 @@ use prometheus::{ register_int_gauge_vec_with_registry, register_int_gauge_with_registry, Histogram, IntCounter, IntCounterVec, IntGauge, IntGaugeVec, Registry, }; -use serde::de::DeserializeOwned; -use serde::{Deserialize, Serialize}; -use std::collections::{BTreeMap, BTreeSet}; -use std::fs::File; -use std::io::Write; -use std::path::{Path, PathBuf}; -use std::sync::atomic::Ordering; -use std::time::Duration; -use std::{ - collections::{HashMap, HashSet}, - fs, - pin::Pin, - sync::Arc, - vec, -}; -use sui_config::node::{AuthorityOverloadConfig, StateDebugDumpConfig}; -use sui_config::NodeConfig; -use sui_types::crypto::RandomnessRound; -use sui_types::execution_status::ExecutionStatus; -use sui_types::type_resolver::LayoutResolver; -use tap::{TapFallible, TapOptional}; -use tokio::sync::mpsc::unbounded_channel; -use tokio::sync::{mpsc, oneshot, RwLock}; -use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument, warn, Instrument}; - -use self::authority_store::ExecutionLockWriteGuard; -use self::authority_store_pruner::AuthorityStorePruningMetrics; -pub use authority_notify_read::EffectsNotifyRead; -pub use authority_store::{AuthorityStore, ResolverWrapper, UpdateType}; -use mysten_metrics::{monitored_scope, spawn_monitored_task}; - -use once_cell::sync::OnceCell; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use shared_crypto::intent::{AppId, Intent, IntentMessage, IntentScope, IntentVersion}; use sui_archival::reader::ArchiveReaderBalancer; -use sui_config::certificate_deny_config::CertificateDenyConfig; -use sui_config::genesis::Genesis; -use sui_config::node::{ - AuthorityStorePruningConfig, DBCheckpointConfig, ExpensiveSafetyCheckConfig, +use sui_config::{ + certificate_deny_config::CertificateDenyConfig, + genesis::Genesis, + node::{ + AuthorityOverloadConfig, AuthorityStorePruningConfig, DBCheckpointConfig, + ExpensiveSafetyCheckConfig, StateDebugDumpConfig, + }, + transaction_deny_config::TransactionDenyConfig, + NodeConfig, }; -use sui_config::transaction_deny_config::TransactionDenyConfig; use sui_framework::{BuiltInFramework, SystemPackage}; use sui_json_rpc_types::{ DevInspectResults, DryRunTransactionBlockResponse, EventFilter, SuiEvent, SuiMoveValue, @@ -74,92 +59,103 @@ use sui_json_rpc_types::{ }; use sui_macros::{fail_point, fail_point_async, fail_point_if}; use sui_protocol_config::{ProtocolConfig, SupportedProtocolVersions}; -use sui_storage::indexes::{CoinInfo, ObjectIndexChanges}; -use sui_storage::key_value_store::{TransactionKeyValueStore, TransactionKeyValueStoreTrait}; -use sui_storage::key_value_store_metrics::KeyValueStoreMetrics; -use sui_storage::IndexStore; -use sui_types::authenticator_state::get_authenticator_state; -use sui_types::committee::{EpochId, ProtocolVersion}; -use sui_types::crypto::{default_hash, AuthoritySignInfo, Signer}; -use sui_types::deny_list::DenyList; -use sui_types::digests::ChainIdentifier; -use sui_types::digests::TransactionEventsDigest; -use sui_types::dynamic_field::{DynamicFieldInfo, DynamicFieldName, DynamicFieldType}; -use sui_types::effects::{ - InputSharedObject, SignedTransactionEffects, TransactionEffects, TransactionEffectsAPI, - TransactionEvents, VerifiedCertifiedTransactionEffects, VerifiedSignedTransactionEffects, -}; -use sui_types::error::{ExecutionError, UserInputError}; -use sui_types::event::{Event, EventID}; -use sui_types::executable_transaction::VerifiedExecutableTransaction; -use sui_types::gas::{GasCostSummary, SuiGasStatus}; -use sui_types::inner_temporary_store::{ - InnerTemporaryStore, ObjectMap, TemporaryModuleResolver, TemporaryPackageStore, TxCoins, - WrittenObjects, -}; -use sui_types::message_envelope::Message; -use sui_types::messages_checkpoint::{ - CertifiedCheckpointSummary, CheckpointCommitment, CheckpointContents, CheckpointContentsDigest, - CheckpointDigest, CheckpointRequest, CheckpointRequestV2, CheckpointResponse, - CheckpointResponseV2, CheckpointSequenceNumber, CheckpointSummary, CheckpointSummaryResponse, - CheckpointTimestamp, ECMHLiveObjectSetDigest, VerifiedCheckpoint, +use sui_storage::{ + indexes::{CoinInfo, ObjectIndexChanges}, + key_value_store::{TransactionKeyValueStore, TransactionKeyValueStoreTrait}, + key_value_store_metrics::KeyValueStoreMetrics, + IndexStore, }; -use sui_types::messages_consensus::AuthorityCapabilities; -use sui_types::messages_grpc::{ - HandleTransactionResponse, LayoutGenerationOption, ObjectInfoRequest, ObjectInfoRequestKind, - ObjectInfoResponse, TransactionInfoRequest, TransactionInfoResponse, TransactionStatus, -}; -use sui_types::metrics::{BytecodeVerifierMetrics, LimitsMetrics}; -use sui_types::object::{MoveObject, Owner, PastObjectRead, OBJECT_START_VERSION}; -use sui_types::storage::{ - BackingPackageStore, BackingStore, ObjectKey, ObjectOrTombstone, ObjectStore, WriteKind, -}; -use sui_types::sui_system_state::epoch_start_sui_system_state::EpochStartSystemStateTrait; -use sui_types::sui_system_state::SuiSystemStateTrait; -use sui_types::sui_system_state::{get_sui_system_state, SuiSystemState}; +#[cfg(msim)] +use sui_types::committee::CommitteeTrait; use sui_types::{ + authenticator_state::get_authenticator_state, base_types::*, - committee::Committee, - crypto::AuthoritySignature, - error::{SuiError, SuiResult}, + committee::{Committee, EpochId, ProtocolVersion}, + crypto::{default_hash, AuthoritySignInfo, AuthoritySignature, RandomnessRound, Signer}, + deny_list::DenyList, + digests::{ChainIdentifier, TransactionEventsDigest}, + dynamic_field::{DynamicFieldInfo, DynamicFieldName, DynamicFieldType}, + effects::{ + InputSharedObject, SignedTransactionEffects, TransactionEffects, TransactionEffectsAPI, + TransactionEvents, VerifiedCertifiedTransactionEffects, VerifiedSignedTransactionEffects, + }, + error::{ExecutionError, SuiError, SuiResult, UserInputError}, + event::{Event, EventID}, + executable_transaction::VerifiedExecutableTransaction, + execution_config_utils::to_binary_config, + execution_status::ExecutionStatus, fp_ensure, - object::{Object, ObjectRead}, + gas::{GasCostSummary, SuiGasStatus}, + inner_temporary_store::{ + InnerTemporaryStore, ObjectMap, TemporaryModuleResolver, TemporaryPackageStore, TxCoins, + WrittenObjects, + }, + is_system_package, + message_envelope::Message, + messages_checkpoint::{ + CertifiedCheckpointSummary, CheckpointCommitment, CheckpointContents, + CheckpointContentsDigest, CheckpointDigest, CheckpointRequest, CheckpointRequestV2, + CheckpointResponse, CheckpointResponseV2, CheckpointSequenceNumber, CheckpointSummary, + CheckpointSummaryResponse, CheckpointTimestamp, ECMHLiveObjectSetDigest, + VerifiedCheckpoint, + }, + messages_consensus::AuthorityCapabilities, + messages_grpc::{ + HandleTransactionResponse, LayoutGenerationOption, ObjectInfoRequest, + ObjectInfoRequestKind, ObjectInfoResponse, TransactionInfoRequest, TransactionInfoResponse, + TransactionStatus, + }, + metrics::{BytecodeVerifierMetrics, LimitsMetrics}, + object::{MoveObject, Object, ObjectRead, Owner, PastObjectRead, OBJECT_START_VERSION}, + storage::{ + BackingPackageStore, BackingStore, ObjectKey, ObjectOrTombstone, ObjectStore, WriteKind, + }, + sui_system_state::{ + epoch_start_sui_system_state::EpochStartSystemStateTrait, get_sui_system_state, + SuiSystemState, SuiSystemStateTrait, + }, transaction::*, - SUI_SYSTEM_ADDRESS, + type_resolver::LayoutResolver, + TypeTag, SUI_SYSTEM_ADDRESS, }; -use sui_types::{is_system_package, TypeTag}; +use tap::{TapFallible, TapOptional}; +use tokio::{ + sync::{mpsc, mpsc::unbounded_channel, oneshot, RwLock}, + task::JoinHandle, +}; +use tracing::{debug, error, info, instrument, warn, Instrument}; use typed_store::TypedStoreError; -use crate::authority::authority_per_epoch_store::{AuthorityPerEpochStore, CertTxGuard}; -use crate::authority::authority_per_epoch_store_pruner::AuthorityPerEpochStorePruner; -use crate::authority::authority_store::{ExecutionLockReadGuard, ObjectLockStatus}; -use crate::authority::authority_store_pruner::{ - AuthorityStorePruner, EPOCH_DURATION_MS_FOR_TESTING, +use self::{ + authority_store::ExecutionLockWriteGuard, authority_store_pruner::AuthorityStorePruningMetrics, }; -use crate::authority::epoch_start_configuration::EpochStartConfigTrait; -use crate::authority::epoch_start_configuration::EpochStartConfiguration; -use crate::checkpoints::checkpoint_executor::CheckpointExecutor; -use crate::checkpoints::CheckpointStore; -use crate::consensus_adapter::ConsensusAdapter; -use crate::epoch::committee_store::CommitteeStore; -use crate::execution_cache::{ - CheckpointCache, ExecutionCache, ExecutionCacheCommit, ExecutionCacheRead, - ExecutionCacheReconfigAPI, ExecutionCacheWrite, StateSyncAPI, +use crate::{ + authority::{ + authority_per_epoch_store::{AuthorityPerEpochStore, CertTxGuard}, + authority_per_epoch_store_pruner::AuthorityPerEpochStorePruner, + authority_store::{ExecutionLockReadGuard, ObjectLockStatus}, + authority_store_pruner::{AuthorityStorePruner, EPOCH_DURATION_MS_FOR_TESTING}, + epoch_start_configuration::{EpochStartConfigTrait, EpochStartConfiguration}, + }, + checkpoints::{checkpoint_executor::CheckpointExecutor, CheckpointStore}, + consensus_adapter::ConsensusAdapter, + epoch::committee_store::CommitteeStore, + execution_cache::{ + CheckpointCache, ExecutionCache, ExecutionCacheCommit, ExecutionCacheRead, + ExecutionCacheReconfigAPI, ExecutionCacheWrite, NotifyReadWrapper, StateSyncAPI, + }, + execution_driver::execution_process, + metrics::{LatencyObserver, RateTracker}, + module_cache_metrics::ResolverMetrics, + overload_monitor::{overload_monitor_accept_tx, AuthorityOverloadInfo}, + stake_aggregator::StakeAggregator, + state_accumulator::{AccumulatorStore, StateAccumulator, WrappedObject}, + subscription_handler::SubscriptionHandler, + transaction_input_loader::TransactionInputLoader, + transaction_manager::TransactionManager, + transaction_outputs::TransactionOutputs, + verify_indexes::verify_indexes, }; -use crate::execution_driver::execution_process; -use crate::metrics::LatencyObserver; -use crate::metrics::RateTracker; -use crate::module_cache_metrics::ResolverMetrics; -use crate::overload_monitor::{overload_monitor_accept_tx, AuthorityOverloadInfo}; -use crate::stake_aggregator::StakeAggregator; -use crate::state_accumulator::{AccumulatorStore, StateAccumulator, WrappedObject}; -use crate::subscription_handler::SubscriptionHandler; -use crate::transaction_input_loader::TransactionInputLoader; -use crate::transaction_manager::TransactionManager; - -#[cfg(msim)] -use sui_types::committee::CommitteeTrait; -use sui_types::execution_config_utils::to_binary_config; #[cfg(test)] #[path = "unit_tests/authority_tests.rs"] @@ -697,17 +693,18 @@ impl AuthorityMetrics { } /// a Trait object for `Signer` that is: -/// - Pin, i.e. confined to one place in memory (we don't want to copy private keys). +/// - Pin, i.e. confined to one place in memory (we don't want to copy private +/// keys). /// - Sync, i.e. can be safely shared between threads. /// /// Typically instantiated with Box::pin(keypair) where keypair is a `KeyPair` -/// pub type StableSyncAuthoritySigner = Pin + Send + Sync>>; // If you have Arc, you cannot return a reference to it as -// an &Arc (for example), because the trait object is a fat pointer. -// So, in order to be able to return &Arc, we create all the converted trait objects -// (aka fat pointers) up front and return references to them. +// an &Arc (for example), because the trait object is a +// fat pointer. So, in order to be able to return &Arc, we create all the +// converted trait objects (aka fat pointers) up front and return references to +// them. struct ExecutionCacheTraitPointers { cache_reader: Arc, backing_store: Arc, @@ -753,9 +750,10 @@ pub struct AuthorityState { epoch_store: ArcSwap, /// This lock denotes current 'execution epoch'. - /// Execution acquires read lock, checks certificate epoch and holds it until all writes are complete. - /// Reconfiguration acquires write lock, changes the epoch and revert all transactions - /// from previous epoch that are executed but did not make into checkpoint. + /// Execution acquires read lock, checks certificate epoch and holds it + /// until all writes are complete. Reconfiguration acquires write lock, + /// changes the epoch and revert all transactions from previous epoch + /// that are executed but did not make into checkpoint. execution_lock: RwLock, pub indexes: Option>, @@ -796,10 +794,12 @@ pub struct AuthorityState { pub overload_info: AuthorityOverloadInfo, } -/// The authority state encapsulates all state, drives execution, and ensures safety. +/// The authority state encapsulates all state, drives execution, and ensures +/// safety. /// -/// Note the authority operations can be accessed through a read ref (&) and do not -/// require &mut. Internally a database is synchronized through a mutex lock. +/// Note the authority operations can be accessed through a read ref (&) and do +/// not require &mut. Internally a database is synchronized through a mutex +/// lock. /// /// Repeating valid commands should produce no changes and return no error. impl AuthorityState { @@ -841,8 +841,9 @@ impl AuthorityState { Ok(commitments) } - /// This is a private method and should be kept that way. It doesn't check whether - /// the provided transaction is a system transaction, and hence can only be called internally. + /// This is a private method and should be kept that way. It doesn't check + /// whether the provided transaction is a system transaction, and hence + /// can only be called internally. #[instrument(level = "trace", skip_all)] async fn handle_transaction_impl( &self, @@ -905,8 +906,8 @@ impl AuthorityState { // Check and write locks, to signed transaction, into the database // The call to self.set_transaction_lock checks the lock is not conflicting, - // and returns ConflictingTransaction error in case there is a lock on a different - // existing transaction. + // and returns ConflictingTransaction error in case there is a lock on a + // different existing transaction. self.execution_cache .acquire_transaction_locks(epoch_store, &owned_objects, signed_transaction.clone()) .await?; @@ -930,8 +931,9 @@ impl AuthorityState { let tx_digest = *transaction.digest(); debug!("handle_transaction"); - // Ensure an idempotent answer. This is checked before the system_tx check so that - // a validator is able to return the signed system tx if it was already signed locally. + // Ensure an idempotent answer. This is checked before the system_tx check so + // that a validator is able to return the signed system tx if it was + // already signed locally. if let Some((_, status)) = self.get_transaction_status(&tx_digest, epoch_store)? { return Ok(HandleTransactionResponse { status }); } @@ -1015,9 +1017,10 @@ impl AuthorityState { } /// Executes a transaction that's known to have correct effects. - /// For such transaction, we don't have to wait for consensus to set shared object - /// locks because we already know the shared object versions based on the effects. - /// This function can be called by a fullnode only. + /// For such transaction, we don't have to wait for consensus to set shared + /// object locks because we already know the shared object versions + /// based on the effects. This function can be called by a fullnode + /// only. #[instrument(level = "trace", skip_all)] pub async fn fullnode_execute_certificate_with_effects( &self, @@ -1074,7 +1077,11 @@ impl AuthorityState { if &observed_effects_digest != expected_effects_digest { panic!( "Locally executed effects do not match canonical effects! expected_effects_digest={:?} observed_effects_digest={:?} expected_effects={:?} observed_effects={:?} input_objects={:?}", - expected_effects_digest, observed_effects_digest, effects.data(), observed_effects, transaction.data().transaction_data().input_objects() + expected_effects_digest, + observed_effects_digest, + effects.data(), + observed_effects, + transaction.data().transaction_data().input_objects() ); } Ok(()) @@ -1102,8 +1109,9 @@ impl AuthorityState { if !certificate.contains_shared_object() { // Shared object transactions need to be sequenced by Narwhal before enqueueing - // for execution, done in AuthorityPerEpochStore::handle_consensus_transaction(). - // For owned object transactions, they can be enqueued for execution immediately. + // for execution, done in + // AuthorityPerEpochStore::handle_consensus_transaction(). For owned + // object transactions, they can be enqueued for execution immediately. self.enqueue_certificates_for_execution(vec![certificate.clone()], epoch_store); } @@ -1115,11 +1123,14 @@ impl AuthorityState { /// /// Guarantees that /// - If input objects are available, return no permanent failure. - /// - Execution and output commit are atomic. i.e. outputs are only written to storage, - /// on successful execution; crashed execution has no observable effect and can be retried. + /// - Execution and output commit are atomic. i.e. outputs are only written + /// to storage, + /// on successful execution; crashed execution has no observable effect and + /// can be retried. /// - /// It is caller's responsibility to ensure input objects are available and locks are set. - /// If this cannot be satisfied by the caller, execute_certificate() should be called instead. + /// It is caller's responsibility to ensure input objects are available and + /// locks are set. If this cannot be satisfied by the caller, + /// execute_certificate() should be called instead. /// /// Should only be called within sui-core. #[instrument(level = "trace", skip_all)] @@ -1136,9 +1147,10 @@ impl AuthorityState { let tx_digest = certificate.digest(); let input_objects = self.read_objects(certificate, epoch_store).await?; - // This acquires a lock on the tx digest to prevent multiple concurrent executions of the - // same tx. While we don't need this for safety (tx sequencing is ultimately atomic), it is - // very common to receive the same tx multiple times simultaneously due to gossip, so we + // This acquires a lock on the tx digest to prevent multiple concurrent + // executions of the same tx. While we don't need this for safety (tx + // sequencing is ultimately atomic), it is very common to receive the + // same tx multiple times simultaneously due to gossip, so we // may as well hold the lock and save the cpu time for other requests. let tx_guard = epoch_store.acquire_tx_guard(certificate).await?; @@ -1192,8 +1204,9 @@ impl AuthorityState { self.read_objects(certificate, epoch_store).await } - /// Test only wrapper for `try_execute_immediately()` above, useful for checking errors if the - /// pre-conditions are not satisfied, and executing change epoch transactions. + /// Test only wrapper for `try_execute_immediately()` above, useful for + /// checking errors if the pre-conditions are not satisfied, and + /// executing change epoch transactions. pub async fn try_execute_for_test( &self, certificate: &VerifiedCertificate, @@ -1226,9 +1239,9 @@ impl AuthorityState { } /// This function captures the required state to debug a forked transaction. - /// The dump is written to a file in dir `path`, with name prefixed by the transaction digest. - /// NOTE: Since this info escapes the validator context, - /// make sure not to leak any private info here + /// The dump is written to a file in dir `path`, with name prefixed by the + /// transaction digest. NOTE: Since this info escapes the validator + /// context, make sure not to leak any private info here pub(crate) fn debug_dump_transaction_state( &self, tx_digest: &TransactionDigest, @@ -1276,8 +1289,8 @@ impl AuthorityState { } }); - // The cert could have been processed by a concurrent attempt of the same cert, so check if - // the effects have already been written. + // The cert could have been processed by a concurrent attempt of the same cert, + // so check if the effects have already been written. if let Some(effects) = self.execution_cache.get_executed_effects(&digest)? { tx_guard.release(); return Ok((effects, None)); @@ -1285,10 +1298,11 @@ impl AuthorityState { let execution_guard = self .execution_lock_for_executable_transaction(certificate) .await; - // Any caller that verifies the signatures on the certificate will have already checked the - // epoch. But paths that don't verify sigs (e.g. execution from checkpoint, reading from db) - // present the possibility of an epoch mismatch. If this cert is not finalzied in previous - // epoch, then it's invalid. + // Any caller that verifies the signatures on the certificate will have already + // checked the epoch. But paths that don't verify sigs (e.g. execution + // from checkpoint, reading from db) present the possibility of an epoch + // mismatch. If this cert is not finalzied in previous epoch, then it's + // invalid. let execution_guard = match execution_guard { Ok(execution_guard) => execution_guard, Err(err) => { @@ -1296,8 +1310,9 @@ impl AuthorityState { return Err(err); } }; - // Since we obtain a reference to the epoch store before taking the execution lock, it's - // possible that reconfiguration has happened and they no longer match. + // Since we obtain a reference to the epoch store before taking the execution + // lock, it's possible that reconfiguration has happened and they no + // longer match. if *execution_guard != epoch_store.epoch() { tx_guard.release(); info!("The epoch of the execution_guard doesn't match the epoch store"); @@ -1307,10 +1322,11 @@ impl AuthorityState { }); } - // Errors originating from prepare_certificate may be transient (failure to read locks) or - // non-transient (transaction input is invalid, move vm errors). However, all errors from - // this function occur before we have written anything to the db, so we commit the tx - // guard and rely on the client to retry the tx (if it was transient). + // Errors originating from prepare_certificate may be transient (failure to read + // locks) or non-transient (transaction input is invalid, move vm + // errors). However, all errors from this function occur before we have + // written anything to the db, so we commit the tx guard and rely on the + // client to retry the tx (if it was transient). let (inner_temporary_store, effects, execution_error_opt) = match self.prepare_certificate( &execution_guard, certificate, @@ -1384,7 +1400,8 @@ impl AuthorityState { debug_assert!(execution_error_opt.is_none()); epoch_store.update_authenticator_state(auth_state); - // double check that the signature verifier always matches the authenticator state + // double check that the signature verifier always matches the authenticator + // state if cfg!(debug_assertions) { let authenticator_state = get_authenticator_state(&self.execution_cache) .expect("Read cannot fail") @@ -1459,8 +1476,9 @@ impl AuthorityState { error!(?tx_digest, "tx post processing failed: {e}"); }); - // The insertion to epoch_store is not atomic with the insertion to the perpetual store. This is OK because - // we insert to the epoch store first. And during lookups we always look up in the perpetual store first. + // The insertion to epoch_store is not atomic with the insertion to the + // perpetual store. This is OK because we insert to the epoch store + // first. And during lookups we always look up in the perpetual store first. epoch_store.insert_tx_cert_and_effects_signature( &tx_key, tx_digest, @@ -1541,15 +1559,17 @@ impl AuthorityState { ); } - /// prepare_certificate validates the transaction input, and executes the certificate, - /// returning effects, output objects, events, etc. + /// prepare_certificate validates the transaction input, and executes the + /// certificate, returning effects, output objects, events, etc. /// - /// It reads state from the db (both owned and shared locks), but it has no side effects. + /// It reads state from the db (both owned and shared locks), but it has no + /// side effects. /// - /// It can be generally understood that a failure of prepare_certificate indicates a - /// non-transient error, e.g. the transaction input is somehow invalid, the correct - /// locks are not held, etc. However, this is not entirely true, as a transient db read error - /// may also cause this function to fail. + /// It can be generally understood that a failure of prepare_certificate + /// indicates a non-transient error, e.g. the transaction input is + /// somehow invalid, the correct locks are not held, etc. However, this + /// is not entirely true, as a transient db read error may also cause + /// this function to fail. #[instrument(level = "trace", skip_all)] fn prepare_certificate( &self, @@ -1571,7 +1591,8 @@ impl AuthorityState { tx_data.check_version_supported(epoch_store.protocol_config())?; tx_data.validity_check(epoch_store.protocol_config())?; - // The cost of partially re-auditing a transaction before execution is tolerated. + // The cost of partially re-auditing a transaction before execution is + // tolerated. let (gas_status, input_objects) = sui_transaction_checks::check_certificate_input( certificate, input_objects, @@ -1862,7 +1883,8 @@ impl AuthorityState { let price = gas_price.unwrap_or(reference_gas_price); let budget = gas_budget.unwrap_or(max_tx_gas); let owner = gas_sponsor.unwrap_or(sender); - // Payment might be empty here, but it's fine we'll have to deal with it later after reading all the input objects. + // Payment might be empty here, but it's fine we'll have to deal with it later + // after reading all the input objects. let payment = gas_objects.unwrap_or_default(); let transaction = TransactionData::V1(TransactionDataV1 { kind: transaction_kind.clone(), @@ -1922,8 +1944,9 @@ impl AuthorityState { }; let (gas_status, checked_input_objects) = if skip_checks { - // If we are skipping checks, then we call the check_dev_inspect_input function which will perform - // only lightweight checks on the transaction input. And if the gas field is empty, that means we will + // If we are skipping checks, then we call the check_dev_inspect_input function + // which will perform only lightweight checks on the transaction + // input. And if the gas field is empty, that means we will // use the dummy gas object so we need to add it to the input objects vector. if transaction.gas().is_empty() { input_objects.push(ObjectReadResult::new( @@ -1946,8 +1969,9 @@ impl AuthorityState { (gas_status, checked_input_objects) } else { - // If we are not skipping checks, then we call the check_transaction_input function and its dummy gas - // variant which will perform full fledged checks just like a real transaction execution. + // If we are not skipping checks, then we call the check_transaction_input + // function and its dummy gas variant which will perform full + // fledged checks just like a real transaction execution. if transaction.gas().is_empty() { sui_transaction_checks::check_transaction_input_with_given_gas( epoch_store.protocol_config(), @@ -1985,7 +2009,8 @@ impl AuthorityState { self.get_backing_store().as_ref(), protocol_config, self.metrics.limits_metrics.clone(), - /* expensive checks */ false, + // expensive checks + false, self.certificate_deny_config.certificate_deny_set(), &epoch_store.epoch_start_config().epoch_data().epoch_id(), epoch_store @@ -2101,7 +2126,7 @@ impl AuthorityState { let cur_stake = (**committee).weight(&self.name); if cur_stake > 0 { FAIL_STATE.with_borrow_mut(|fail_state| { - //let (&mut failing_stake, &mut failing_validators) = fail_state; + // let (&mut failing_stake, &mut failing_validators) = fail_state; if fail_state.0 < committee.validity_threshold() { fail_state.0 += cur_stake; fail_state.1.insert(self.name); @@ -2158,16 +2183,23 @@ impl AuthorityState { for (oref, owner, kind) in effects.all_changed_objects() { let id = &oref.0; - // For mutated objects, retrieve old owner and delete old index if there is a owner change. + // For mutated objects, retrieve old owner and delete old index if there is a + // owner change. if let WriteKind::Mutate = kind { let Some(old_version) = modified_at_version.get(id) else { - panic!("tx_digest={:?}, error processing object owner index, cannot find modified at version for mutated object [{id}].", tx_digest); + panic!( + "tx_digest={:?}, error processing object owner index, cannot find modified at version for mutated object [{id}].", + tx_digest + ); }; // When we process the index, the latest object hasn't been written yet so // the old object must be present. let Some(old_object) = self.execution_cache.get_object_by_key(id, *old_version)? else { - panic!("tx_digest={:?}, error processing object owner index, cannot find owner for object {:?} at version {:?}", tx_digest, id, old_version); + panic!( + "tx_digest={:?}, error processing object owner index, cannot find owner for object {:?} at version {:?}", + tx_digest, id, old_version + ); }; if old_object.owner != owner { match old_object.owner { @@ -2184,11 +2216,20 @@ impl AuthorityState { match owner { Owner::AddressOwner(addr) => { - // TODO: We can remove the object fetching after we added ObjectType to TransactionEffects + // TODO: We can remove the object fetching after we added ObjectType to + // TransactionEffects let new_object = written.get(id).unwrap_or_else( || panic!("tx_digest={:?}, error processing object owner index, written does not contain object {:?}", tx_digest, id) ); - assert_eq!(new_object.version(), oref.1, "tx_digest={:?} error processing object owner index, object {:?} from written has mismatched version. Actual: {}, expected: {}", tx_digest, id, new_object.version(), oref.1); + assert_eq!( + new_object.version(), + oref.1, + "tx_digest={:?} error processing object owner index, object {:?} from written has mismatched version. Actual: {}, expected: {}", + tx_digest, + id, + new_object.version(), + oref.1 + ); let type_ = new_object .type_() @@ -2211,7 +2252,15 @@ impl AuthorityState { let new_object = written.get(id).unwrap_or_else( || panic!("tx_digest={:?}, error processing object owner index, written does not contain object {:?}", tx_digest, id) ); - assert_eq!(new_object.version(), oref.1, "tx_digest={:?} error processing object owner index, object {:?} from written has mismatched version. Actual: {}, expected: {}", tx_digest, id, new_object.version(), oref.1); + assert_eq!( + new_object.version(), + oref.1, + "tx_digest={:?} error processing object owner index, object {:?} from written has mismatched version. Actual: {}, expected: {}", + tx_digest, + id, + new_object.version(), + oref.1 + ); let Some(df_info) = self .try_create_dynamic_field_info(new_object, written, layout_resolver.as_mut()) @@ -2274,7 +2323,8 @@ impl AuthorityState { Ok(Some(match type_ { DynamicFieldType::DynamicObject => { - // Find the actual object from storage using the object id obtained from the wrapper. + // Find the actual object from storage using the object id obtained from the + // wrapper. // Try to find the object in the written objects first. let (version, digest, object_type) = if let Some(object) = written.get(&object_id) { @@ -2669,9 +2719,9 @@ impl AuthorityState { state } - // Use this method only if one of the trait-specific methods below does not work. - // (For instance if you need an implementation of more than one of these traits - // simultaneously). + // Use this method only if one of the trait-specific methods below does not + // work. (For instance if you need an implementation of more than one of + // these traits simultaneously). pub fn get_execution_cache(&self) -> Arc { self.execution_cache.clone() } @@ -2746,8 +2796,8 @@ impl AuthorityState { } /// Adds certificates to transaction manager for ordered execution. - /// It is unnecessary to persist the certificates into the pending_execution table, - /// because only Narwhal output needs to be persisted. + /// It is unnecessary to persist the certificates into the pending_execution + /// table, because only Narwhal output needs to be persisted. pub fn enqueue_certificates_for_execution( &self, certs: Vec, @@ -2903,8 +2953,9 @@ impl AuthorityState { Ok(new_epoch_store) } - /// This is a temporary method to be used when we enable simplified_unwrap_then_delete. - /// It re-accumulates state hash for the new epoch if simplified_unwrap_then_delete is enabled. + /// This is a temporary method to be used when we enable + /// simplified_unwrap_then_delete. It re-accumulates state hash for the + /// new epoch if simplified_unwrap_then_delete is enabled. #[instrument(level = "error", skip_all)] fn maybe_reaccumulate_state_hash( &self, @@ -2936,7 +2987,8 @@ impl AuthorityState { panic!("{}", err); } else { // We cannot panic in production yet because it is known that there are some - // inconsistencies in testnet. We will enable this once we make it balanced again in testnet. + // inconsistencies in testnet. We will enable this once we make it balanced + // again in testnet. warn!("Sui conservation consistency check failed: {}", err); } } else { @@ -3063,11 +3115,12 @@ impl AuthorityState { Ok(()) } - /// Load the current epoch store. This can change during reconfiguration. To ensure that - /// we never end up accessing different epoch stores in a single task, we need to make sure - /// that this is called once per task. Each call needs to be carefully audited to ensure it is - /// the case. This also means we should minimize the number of call-sites. Only call it when - /// there is no way to obtain it from somewhere else. + /// Load the current epoch store. This can change during reconfiguration. To + /// ensure that we never end up accessing different epoch stores in a + /// single task, we need to make sure that this is called once per task. + /// Each call needs to be carefully audited to ensure it is + /// the case. This also means we should minimize the number of call-sites. + /// Only call it when there is no way to obtain it from somewhere else. pub fn load_epoch_store_one_call_per_task(&self) -> Guard> { self.epoch_store.load() } @@ -3675,7 +3728,7 @@ impl AuthorityState { ) -> SuiResult> { let index_store = self.get_indexes()?; - //Get the tx_num from tx_digest + // Get the tx_num from tx_digest let (tx_num, event_num) = if let Some(cursor) = cursor.as_ref() { let tx_seq = index_store.get_transaction_seq(&cursor.tx_digest)?.ok_or( SuiError::TransactionNotFound { @@ -3744,7 +3797,7 @@ impl AuthorityState { error: UserInputError::Unsupported( "This query type is not supported by the full node.".to_string(), ), - }) + }); } }; @@ -3848,9 +3901,10 @@ impl AuthorityState { TransactionStatus::Executed(cert_sig, effects.into_inner(), events), ))); } else { - // The read of effects and read of transaction are not atomic. It's possible that we reverted - // the transaction (during epoch change) in between the above two reads, and we end up - // having effects but not transaction. In this case, we just fall through. + // The read of effects and read of transaction are not atomic. It's possible + // that we reverted the transaction (during epoch change) in + // between the above two reads, and we end up having effects but + // not transaction. In this case, we just fall through. debug!(tx_digest=?transaction_digest, "Signed effects exist but no transaction found"); } } @@ -3863,9 +3917,9 @@ impl AuthorityState { } } - /// Get the signed effects of the given transaction. If the effects was signed in a previous - /// epoch, re-sign it so that the caller is able to form a cert of the effects in the current - /// epoch. + /// Get the signed effects of the given transaction. If the effects was + /// signed in a previous epoch, re-sign it so that the caller is able to + /// form a cert of the effects in the current epoch. #[instrument(level = "trace", skip_all)] pub fn get_signed_effects_and_maybe_resign( &self, @@ -3903,23 +3957,28 @@ impl AuthorityState { // - The tx makes it into final checkpoint. // - 2 validators go away and are replaced in the new epoch. // - The new epoch begins. - // - The quorum driver cannot complete the partial effects cert from the previous epoch, - // because it may not be able to reach either of the 2 former validators. - // - But, if the 2 validators that stayed are willing to re-sign the effects in the new - // epoch, the QD can make a new effects cert and return it to the client. + // - The quorum driver cannot complete the partial effects cert from the + // previous epoch, because it may not be able to reach either of the 2 former + // validators. + // - But, if the 2 validators that stayed are willing to re-sign the effects in + // the new epoch, the QD can make a new effects cert and return it to the + // client. // - // This is a considered a short-term workaround. Eventually, Quorum Driver should be able - // to return either an effects certificate, -or- a proof of inclusion in a checkpoint. In - // the case above, the Quorum Driver would return a proof of inclusion in the final + // This is a considered a short-term workaround. Eventually, Quorum Driver + // should be able to return either an effects certificate, -or- + // a proof of inclusion in a checkpoint. In the case above, the + // Quorum Driver would return a proof of inclusion in the final // checkpoint, and this code would no longer be necessary. // // Alternatively, some of the confusion around re-signing could be resolved if - // CertifiedTransactionEffects included both the epoch in which the transaction became - // final, as well as the epoch at which the effects were certified. In this case, there - // would be nothing terribly odd about the validators from epoch N certifying that a - // given TX became final in epoch N - 1. The confusion currently arises from the fact that - // the epoch field in AuthoritySignInfo is overloaded both to identify the provenance of - // the authority's signature, as well as to identify in which epoch the transaction was + // CertifiedTransactionEffects included both the epoch in which the transaction + // became final, as well as the epoch at which the effects were + // certified. In this case, there would be nothing terribly odd + // about the validators from epoch N certifying that a + // given TX became final in epoch N - 1. The confusion currently arises from the + // fact that the epoch field in AuthoritySignInfo is overloaded + // both to identify the provenance of the authority's signature, + // as well as to identify in which epoch the transaction was // executed. debug!( ?tx_digest, @@ -3974,13 +4033,17 @@ impl AuthorityState { Some((input_coin_objects, written_coin_objects)) } - /// Get the TransactionEnvelope that currently locks the given object, if any. - /// Since object locks are only valid for one epoch, we also need the epoch_id in the query. - /// Returns UserInputError::ObjectNotFound if no lock records for the given object can be found. - /// Returns UserInputError::ObjectVersionUnavailableForConsumption if the object record is at a different version. - /// Returns Some(VerifiedEnvelope) if the given ObjectRef is locked by a certain transaction. - /// Returns None if the a lock record is initialized for the given ObjectRef but not yet locked by any transaction, - /// or cannot find the transaction in transaction table, because of data race etc. + /// Get the TransactionEnvelope that currently locks the given object, if + /// any. Since object locks are only valid for one epoch, we also need + /// the epoch_id in the query. Returns UserInputError::ObjectNotFound if + /// no lock records for the given object can be found. + /// Returns UserInputError::ObjectVersionUnavailableForConsumption if the + /// object record is at a different version. + /// Returns Some(VerifiedEnvelope) if the given ObjectRef is locked by a + /// certain transaction. Returns None if the a lock record is + /// initialized for the given ObjectRef but not yet locked by any + /// transaction, or cannot find the transaction in transaction + /// table, because of data race etc. #[instrument(level = "trace", skip_all)] pub async fn get_transaction_lock( &self, @@ -4021,13 +4084,14 @@ impl AuthorityState { } /// Ordinarily, protocol upgrades occur when 2f + 1 + (f * - /// ProtocolConfig::buffer_stake_for_protocol_upgrade_bps) vote for the upgrade. + /// ProtocolConfig::buffer_stake_for_protocol_upgrade_bps) vote for the + /// upgrade. /// - /// This method can be used to dynamic adjust the amount of buffer. If set to 0, the upgrade - /// will go through with only 2f+1 votes. + /// This method can be used to dynamic adjust the amount of buffer. If set + /// to 0, the upgrade will go through with only 2f+1 votes. /// - /// IMPORTANT: If this is used, it must be used on >=2f+1 validators (all should have the same - /// value), or you risk halting the chain. + /// IMPORTANT: If this is used, it must be used on >=2f+1 validators (all + /// should have the same value), or you risk halting the chain. pub fn set_override_protocol_upgrade_buffer_stake( &self, expected_epoch: EpochId, @@ -4061,8 +4125,9 @@ impl AuthorityState { epoch_store.clear_override_protocol_upgrade_buffer_stake() } - /// Get the set of system packages that are compiled in to this build, if those packages are - /// compatible with the current versions of those packages on-chain. + /// Get the set of system packages that are compiled in to this build, if + /// those packages are compatible with the current versions of those + /// packages on-chain. pub async fn get_available_system_packages( &self, binary_config: &BinaryConfig, @@ -4101,19 +4166,22 @@ impl AuthorityState { results } - /// Return the new versions, module bytes, and dependencies for the packages that have been - /// committed to for a framework upgrade, in `system_packages`. Loads the module contents from - /// the binary, and performs the following checks: + /// Return the new versions, module bytes, and dependencies for the packages + /// that have been committed to for a framework upgrade, in + /// `system_packages`. Loads the module contents from the binary, and + /// performs the following checks: /// - /// - Whether its contents matches what is on-chain already, in which case no upgrade is - /// required, and its contents are omitted from the output. - /// - Whether the contents in the binary can form a package whose digest matches the input, - /// meaning the framework will be upgraded, and this authority can satisfy that upgrade, in - /// which case the contents are included in the output. + /// - Whether its contents matches what is on-chain already, in which case + /// no upgrade is required, and its contents are omitted from the output. + /// - Whether the contents in the binary can form a package whose digest + /// matches the input, meaning the framework will be upgraded, and this + /// authority can satisfy that upgrade, in which case the contents are + /// included in the output. /// - /// If the current version of the framework can't be loaded, the binary does not contain the - /// bytes for that framework ID, or the resulting package fails the digest check, `None` is - /// returned indicating that this authority cannot run the upgrade that the network voted on. + /// If the current version of the framework can't be loaded, the binary does + /// not contain the bytes for that framework ID, or the resulting + /// package fails the digest check, `None` is returned indicating that + /// this authority cannot run the upgrade that the network voted on. async fn get_system_package_bytes( &self, system_packages: Vec, @@ -4197,8 +4265,8 @@ impl AuthorityState { buffer_stake_bps = 10000; } - // For each validator, gather the protocol version and system packages that it would like - // to upgrade to in the next epoch. + // For each validator, gather the protocol version and system packages that it + // would like to upgrade to in the next epoch. let mut desired_upgrades: Vec<_> = capabilities .into_iter() .filter_map(|mut cap| { @@ -4217,15 +4285,16 @@ impl AuthorityState { ); // A validator that only supports the current protocol version is also voting - // against any change, because framework upgrades always require a protocol version - // bump. + // against any change, because framework upgrades always require a protocol + // version bump. cap.supported_protocol_versions .is_version_supported(proposed_protocol_version) .then_some((cap.available_system_packages, cap.authority)) }) .collect(); - // There can only be one set of votes that have a majority, find one if it exists. + // There can only be one set of votes that have a majority, find one if it + // exists. desired_upgrades.sort(); desired_upgrades .into_iter() @@ -4363,16 +4432,18 @@ impl AuthorityState { Some(tx) } - /// Creates and execute the advance epoch transaction to effects without committing it to the database. - /// The effects of the change epoch tx are only written to the database after a certified checkpoint has been + /// Creates and execute the advance epoch transaction to effects without + /// committing it to the database. The effects of the change epoch tx + /// are only written to the database after a certified checkpoint has been /// formed and executed by CheckpointExecutor. /// - /// When a framework upgraded has been decided on, but the validator does not have the new - /// versions of the packages locally, the validator cannot form the ChangeEpochTx. In this case - /// it returns Err, indicating that the checkpoint builder should give up trying to make the - /// final checkpoint. As long as the network is able to create a certified checkpoint (which - /// should be ensured by the capabilities vote), it will arrive via state sync and be executed - /// by CheckpointExecutor. + /// When a framework upgraded has been decided on, but the validator does + /// not have the new versions of the packages locally, the validator + /// cannot form the ChangeEpochTx. In this case it returns Err, + /// indicating that the checkpoint builder should give up trying to make the + /// final checkpoint. As long as the network is able to create a certified + /// checkpoint (which should be ensured by the capabilities vote), it + /// will arrive via state sync and be executed by CheckpointExecutor. #[instrument(level = "error", skip_all)] pub async fn create_and_execute_advance_epoch_tx( &self, @@ -4408,8 +4479,9 @@ impl AuthorityState { buffer_stake_bps, ); - // since system packages are created during the current epoch, they should abide by the - // rules of the current epoch, including the current epoch's max Move binary format version + // since system packages are created during the current epoch, they should abide + // by the rules of the current epoch, including the current epoch's max + // Move binary format version let config = epoch_store.protocol_config(); let binary_config = to_binary_config(config); let Some(next_epoch_system_package_bytes) = self @@ -4423,12 +4495,13 @@ impl AuthorityState { ); // the checkpoint builder will keep retrying forever when it hits this error. // Eventually, one of two things will happen: - // - The operator will upgrade this binary to one that has the new packages locally, - // and this function will succeed. - // - The final checkpoint will be certified by other validators, we will receive it via - // state sync, and execute it. This will upgrade the framework packages, reconfigure, - // and most likely shut down in the new epoch (this validator likely doesn't support - // the new protocol version, or else it should have had the packages.) + // - The operator will upgrade this binary to one that has the new packages + // locally, and this function will succeed. + // - The final checkpoint will be certified by other validators, we will receive + // it via state sync, and execute it. This will upgrade the framework + // packages, reconfigure, and most likely shut down in the new epoch (this + // validator likely doesn't support the new protocol version, or else it + // should have had the packages.) return Err(anyhow!( "missing system packages: cannot form ChangeEpochTx" )); @@ -4486,8 +4559,9 @@ impl AuthorityState { fail_point_async!("change_epoch_tx_delay"); let _tx_lock = epoch_store.acquire_tx_lock(tx_digest).await; - // The tx could have been executed by state sync already - if so simply return an error. - // The checkpoint builder will shortly be terminated by reconfiguration anyway. + // The tx could have been executed by state sync already - if so simply return + // an error. The checkpoint builder will shortly be terminated by + // reconfiguration anyway. if self .execution_cache .is_tx_already_executed(tx_digest) @@ -4521,9 +4595,9 @@ impl AuthorityState { let system_obj = get_sui_system_state(&temporary_store.written) .expect("change epoch tx must write to system object"); - // We must write tx and effects to the state sync tables so that state sync is able to - // deliver to the transaction to CheckpointExecutor after it is included in a certified - // checkpoint. + // We must write tx and effects to the state sync tables so that state sync is + // able to deliver to the transaction to CheckpointExecutor after it is + // included in a certified checkpoint. self.execution_cache .insert_transaction_and_effects(&tx, &effects) .map_err(|err| { @@ -4542,7 +4616,8 @@ impl AuthorityState { } /// This function is called at the very end of the epoch. - /// This step is required before updating new epoch in the db and calling reopen_epoch_db. + /// This step is required before updating new epoch in the db and calling + /// reopen_epoch_db. #[instrument(level = "error", skip_all)] async fn revert_uncommitted_epoch_transactions( &self, @@ -4551,12 +4626,14 @@ impl AuthorityState { { let state = epoch_store.get_reconfig_state_write_lock_guard(); if state.should_accept_user_certs() { - // Need to change this so that consensus adapter do not accept certificates from user. - // This can happen if our local validator did not initiate epoch change locally, - // but 2f+1 nodes already concluded the epoch. + // Need to change this so that consensus adapter do not accept certificates from + // user. This can happen if our local validator did not initiate + // epoch change locally, but 2f+1 nodes already concluded the + // epoch. // // This lock is essentially a barrier for - // `epoch_store.pending_consensus_certificates` table we are reading on the line after this block + // `epoch_store.pending_consensus_certificates` table we are reading on the line + // after this block epoch_store.close_user_certs(state); } // lock is dropped here @@ -4569,7 +4646,10 @@ impl AuthorityState { ); for digest in pending_certificates { if epoch_store.is_transaction_executed_in_checkpoint(&digest)? { - info!("Not reverting pending consensus transaction {:?} - it was included in checkpoint", digest); + info!( + "Not reverting pending consensus transaction {:?} - it was included in checkpoint", + digest + ); continue; } info!("Reverting {:?} at the end of epoch", digest); @@ -4659,7 +4739,8 @@ impl AuthorityState { .await } - /// NOTE: this function is only to be used for fuzzing and testing. Never use in prod + /// NOTE: this function is only to be used for fuzzing and testing. Never + /// use in prod pub async fn insert_objects_unsafe_for_testing_only(&self, objects: &[Object]) -> SuiResult { self.execution_cache.bulk_insert_genesis_objects(objects)?; self.execution_cache @@ -4736,21 +4817,28 @@ impl RandomnessRoundReceiver { let authority_state = self.authority_state.clone(); spawn_monitored_task!(async move { - // Wait for transaction execution in a separate task, to avoid deadlock in case of - // out-of-order randomness generation. (Each RandomnessStateUpdate depends on the - // output of the RandomnessStateUpdate from the previous round.) + // Wait for transaction execution in a separate task, to avoid deadlock in case + // of out-of-order randomness generation. (Each + // RandomnessStateUpdate depends on the output of the + // RandomnessStateUpdate from the previous round.) let Ok(mut effects) = authority_state .execution_cache .notify_read_executed_effects(&[digest]) .await else { - panic!("failed to get effects for randomness state update transaction at epoch {epoch}, round {round}"); + panic!( + "failed to get effects for randomness state update transaction at epoch {epoch}, round {round}" + ); }; let effects = effects.pop().expect("should return effects"); if *effects.status() != ExecutionStatus::Success { - panic!("failed to execute randomness state update transaction at epoch {epoch}, round {round}: {effects:?}"); + panic!( + "failed to execute randomness state update transaction at epoch {epoch}, round {round}: {effects:?}" + ); } - debug!("successfully executed randomness state update transaction at epoch {epoch}, round {round}"); + debug!( + "successfully executed randomness state update transaction at epoch {epoch}, round {round}" + ); }); } } @@ -4880,12 +4968,17 @@ impl TransactionKeyValueStoreTrait for AuthorityState { #[cfg(msim)] pub mod framework_injection { + use std::{ + cell::RefCell, + collections::{BTreeMap, BTreeSet}, + }; + use move_binary_format::CompiledModule; - use std::collections::BTreeMap; - use std::{cell::RefCell, collections::BTreeSet}; use sui_framework::{BuiltInFramework, SystemPackage}; - use sui_types::base_types::{AuthorityName, ObjectID}; - use sui_types::is_system_package; + use sui_types::{ + base_types::{AuthorityName, ObjectID}, + is_system_package, + }; type FrameworkOverrideConfig = BTreeMap; @@ -4964,7 +5057,8 @@ pub mod framework_injection { .dependencies() .to_vec() } else { - // Assume that entirely new injected packages depend on all existing system packages. + // Assume that entirely new injected packages depend on all existing system + // packages. BuiltInFramework::all_package_ids() }; Some(SystemPackage { @@ -5087,8 +5181,9 @@ impl NodeStateDump { } } - // Packages read at runtime, which were not previously loaded into the temoorary store - // Some packages may be fetched at runtime and wont show up in input objects + // Packages read at runtime, which were not previously loaded into the temoorary + // store Some packages may be fetched at runtime and wont show up in + // input objects let mut runtime_reads = Vec::new(); for obj in inner_temporary_store .runtime_packages_loaded_from_db diff --git a/crates/sui-core/src/authority/authority_notify_read.rs b/crates/sui-core/src/authority/authority_notify_read.rs index 95ca3b57dca..4804593b6be 100644 --- a/crates/sui-core/src/authority/authority_notify_read.rs +++ b/crates/sui-core/src/authority/authority_notify_read.rs @@ -2,18 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 use async_trait::async_trait; -use sui_types::base_types::{TransactionDigest, TransactionEffectsDigest}; -use sui_types::effects::TransactionEffects; -use sui_types::error::SuiResult; +use sui_types::{ + base_types::{TransactionDigest, TransactionEffectsDigest}, + effects::TransactionEffects, + error::SuiResult, +}; #[async_trait] pub trait EffectsNotifyRead: Send + Sync + 'static { /// This method reads executed transaction effects from database. - /// If effects are not available immediately (i.e. haven't been executed yet), - /// the method blocks until they are persisted in the database. + /// If effects are not available immediately (i.e. haven't been executed + /// yet), the method blocks until they are persisted in the database. /// - /// This method **does not** schedule transactions for execution - it is responsibility of the caller - /// to schedule transactions for execution before calling this method. + /// This method **does not** schedule transactions for execution - it is + /// responsibility of the caller to schedule transactions for execution + /// before calling this method. async fn notify_read_executed_effects( &self, digests: Vec, diff --git a/crates/sui-core/src/authority/authority_per_epoch_store.rs b/crates/sui-core/src/authority/authority_per_epoch_store.rs index 523c61f06f3..1e3ef6a9f21 100644 --- a/crates/sui-core/src/authority/authority_per_epoch_store.rs +++ b/crates/sui-core/src/authority/authority_per_epoch_store.rs @@ -1,109 +1,122 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque}, + future::Future, + path::{Path, PathBuf}, + str::FromStr, + sync::Arc, +}; + use arc_swap::ArcSwapOption; use enum_dispatch::enum_dispatch; use fastcrypto::groups::bls12381; -use fastcrypto_tbls::dkg; -use fastcrypto_tbls::nodes::PartyId; -use fastcrypto_zkp::bn254::zk_login::{JwkId, OIDCProvider, JWK}; -use fastcrypto_zkp::bn254::zk_login_api::ZkLoginEnv; -use futures::future::{join_all, select, Either}; -use futures::FutureExt; -use itertools::{izip, Itertools}; -use narwhal_executor::ExecutionIndices; -use parking_lot::RwLock; -use parking_lot::{Mutex, RwLockReadGuard, RwLockWriteGuard}; -use rocksdb::Options; -use serde::{Deserialize, Serialize}; -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque}; -use std::future::Future; -use std::path::{Path, PathBuf}; -use std::sync::Arc; -use sui_config::node::ExpensiveSafetyCheckConfig; -use sui_types::accumulator::Accumulator; -use sui_types::authenticator_state::{get_authenticator_state, ActiveJwk}; -use sui_types::base_types::{AuthorityName, EpochId, ObjectID, SequenceNumber, TransactionDigest}; -use sui_types::base_types::{ConciseableName, ObjectRef}; -use sui_types::committee::Committee; -use sui_types::committee::CommitteeTrait; -use sui_types::crypto::{AuthoritySignInfo, AuthorityStrongQuorumSignInfo, RandomnessRound}; -use sui_types::digests::ChainIdentifier; -use sui_types::error::{SuiError, SuiResult}; -use sui_types::signature::GenericSignature; -use sui_types::storage::InputKey; -use sui_types::transaction::{ - AuthenticatorStateUpdate, CertifiedTransaction, InputObjectKind, SenderSignedData, Transaction, - TransactionDataAPI, TransactionKey, TransactionKind, VerifiedCertificate, - VerifiedSignedTransaction, VerifiedTransaction, -}; -use tokio::sync::OnceCell; -use tracing::{debug, error, info, instrument, trace, warn}; -use typed_store::rocks::{read_size_from_env, ReadWriteOptions}; -use typed_store::{ - rocks::{default_db_options, DBBatch, DBMap, DBOptions, MetricConf}, - traits::{TableSummary, TypedStoreDebug}, - TypedStoreError, -}; - -use super::authority_store_tables::ENV_VAR_LOCKS_BLOCK_CACHE_SIZE; -use super::epoch_start_configuration::EpochStartConfigTrait; -use crate::authority::epoch_start_configuration::{EpochFlag, EpochStartConfiguration}; -use crate::authority::ResolverWrapper; -use crate::checkpoints::{ - BuilderCheckpointSummary, CheckpointHeight, CheckpointServiceNotify, EpochStats, - PendingCheckpoint, PendingCheckpointInfo, PendingCheckpointV2, PendingCheckpointV2Contents, -}; - -use crate::authority::shared_object_version_manager::{ - AssignedTxAndVersions, ConsensusSharedObjVerAssignment, SharedObjVerManager, +use fastcrypto_tbls::{dkg, nodes::PartyId}; +use fastcrypto_zkp::bn254::{ + zk_login::{JwkId, OIDCProvider, JWK}, + zk_login_api::ZkLoginEnv, }; -use crate::consensus_handler::{ - SequencedConsensusTransaction, SequencedConsensusTransactionKey, - SequencedConsensusTransactionKind, VerifiedSequencedConsensusTransaction, +use futures::{ + future::{join_all, select, Either}, + FutureExt, }; -use crate::epoch::epoch_metrics::EpochMetrics; -use crate::epoch::randomness::{RandomnessManager, RandomnessReporter}; -use crate::epoch::reconfiguration::ReconfigState; -use crate::execution_cache::{ExecutionCache, ExecutionCacheRead}; -use crate::module_cache_metrics::ResolverMetrics; -use crate::post_consensus_tx_reorder::PostConsensusTxReorder; -use crate::signature_verifier::*; -use crate::stake_aggregator::{GenericMultiStakeAggregator, StakeAggregator}; +use itertools::{izip, Itertools}; use move_bytecode_utils::module_cache::SyncModuleCache; -use mysten_common::sync::notify_once::NotifyOnce; -use mysten_common::sync::notify_read::NotifyRead; +use mysten_common::sync::{notify_once::NotifyOnce, notify_read::NotifyRead}; use mysten_metrics::monitored_scope; +use narwhal_executor::ExecutionIndices; use narwhal_types::{Round, TimestampMs}; +use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; use prometheus::IntCounter; -use std::str::FromStr; +use rocksdb::Options; +use serde::{Deserialize, Serialize}; +use sui_config::node::ExpensiveSafetyCheckConfig; use sui_execution::{self, Executor}; use sui_macros::fail_point; use sui_protocol_config::{Chain, ProtocolConfig, ProtocolVersion}; use sui_storage::mutex_table::{MutexGuard, MutexTable}; -use sui_types::effects::TransactionEffects; -use sui_types::executable_transaction::{ - TrustedExecutableTransaction, VerifiedExecutableTransaction, +use sui_types::{ + accumulator::Accumulator, + authenticator_state::{get_authenticator_state, ActiveJwk}, + base_types::{ + AuthorityName, ConciseableName, EpochId, ObjectID, ObjectRef, SequenceNumber, + TransactionDigest, + }, + committee::{Committee, CommitteeTrait}, + crypto::{AuthoritySignInfo, AuthorityStrongQuorumSignInfo, RandomnessRound}, + digests::ChainIdentifier, + effects::TransactionEffects, + error::{SuiError, SuiResult}, + executable_transaction::{TrustedExecutableTransaction, VerifiedExecutableTransaction}, + message_envelope::TrustedEnvelope, + messages_checkpoint::{ + CheckpointContents, CheckpointSequenceNumber, CheckpointSignatureMessage, CheckpointSummary, + }, + messages_consensus::{ + check_total_jwk_size, AuthorityCapabilities, ConsensusTransaction, ConsensusTransactionKey, + ConsensusTransactionKind, + }, + signature::GenericSignature, + storage::{GetSharedLocks, InputKey}, + sui_system_state::epoch_start_sui_system_state::{ + EpochStartSystemState, EpochStartSystemStateTrait, + }, + transaction::{ + AuthenticatorStateUpdate, CertifiedTransaction, InputObjectKind, SenderSignedData, + Transaction, TransactionDataAPI, TransactionKey, TransactionKind, VerifiedCertificate, + VerifiedSignedTransaction, VerifiedTransaction, + }, }; -use sui_types::message_envelope::TrustedEnvelope; -use sui_types::messages_checkpoint::{ - CheckpointContents, CheckpointSequenceNumber, CheckpointSignatureMessage, CheckpointSummary, +use tap::TapOptional; +use tokio::{sync::OnceCell, time::Instant}; +use tracing::{debug, error, info, instrument, trace, warn}; +use typed_store::{ + retry_transaction_forever, + rocks::{ + default_db_options, read_size_from_env, DBBatch, DBMap, DBOptions, MetricConf, + ReadWriteOptions, + }, + traits::{TableSummary, TypedStoreDebug}, + Map, TypedStoreError, }; -use sui_types::messages_consensus::{ - check_total_jwk_size, AuthorityCapabilities, ConsensusTransaction, ConsensusTransactionKey, - ConsensusTransactionKind, +use typed_store_derive::DBMapUtils; + +use super::{ + authority_store_tables::ENV_VAR_LOCKS_BLOCK_CACHE_SIZE, + epoch_start_configuration::EpochStartConfigTrait, }; -use sui_types::storage::GetSharedLocks; -use sui_types::sui_system_state::epoch_start_sui_system_state::{ - EpochStartSystemState, EpochStartSystemStateTrait, +use crate::{ + authority::{ + epoch_start_configuration::{EpochFlag, EpochStartConfiguration}, + shared_object_version_manager::{ + AssignedTxAndVersions, ConsensusSharedObjVerAssignment, SharedObjVerManager, + }, + ResolverWrapper, + }, + checkpoints::{ + BuilderCheckpointSummary, CheckpointHeight, CheckpointServiceNotify, EpochStats, + PendingCheckpoint, PendingCheckpointInfo, PendingCheckpointV2, PendingCheckpointV2Contents, + }, + consensus_handler::{ + SequencedConsensusTransaction, SequencedConsensusTransactionKey, + SequencedConsensusTransactionKind, VerifiedSequencedConsensusTransaction, + }, + epoch::{ + epoch_metrics::EpochMetrics, + randomness::{RandomnessManager, RandomnessReporter}, + reconfiguration::ReconfigState, + }, + execution_cache::{ExecutionCache, ExecutionCacheRead}, + module_cache_metrics::ResolverMetrics, + post_consensus_tx_reorder::PostConsensusTxReorder, + signature_verifier::*, + stake_aggregator::{GenericMultiStakeAggregator, StakeAggregator}, }; -use tap::TapOptional; -use tokio::time::Instant; -use typed_store::{retry_transaction_forever, Map}; -use typed_store_derive::DBMapUtils; /// The key where the latest consensus index is stored in the database. -// TODO: Make a single table (e.g., called `variables`) storing all our lonely variables in one place. +// TODO: Make a single table (e.g., called `variables`) storing all our lonely +// variables in one place. const LAST_CONSENSUS_STATS_ADDR: u64 = 0; const RECONFIG_STATE_INDEX: u64 = 0; const OVERRIDE_PROTOCOL_UPGRADE_BUFFER_STAKE_INDEX: u64 = 0; @@ -113,9 +126,10 @@ pub const EPOCH_DB_PREFIX: &str = "epoch_"; pub(crate) type PkG = bls12381::G2Element; pub(crate) type EncG = bls12381::G2Element; -// CertLockGuard and CertTxGuard are functionally identical right now, but we retain a distinction -// anyway. If we need to support distributed object storage, having this distinction will be -// useful, as we will most likely have to re-implement a retry / write-ahead-log at that point. +// CertLockGuard and CertTxGuard are functionally identical right now, but we +// retain a distinction anyway. If we need to support distributed object +// storage, having this distinction will be useful, as we will most likely have +// to re-implement a retry / write-ahead-log at that point. pub struct CertLockGuard(MutexGuard); pub struct CertTxGuard(CertLockGuard); @@ -127,17 +141,20 @@ impl CertTxGuard { type JwkAggregator = GenericMultiStakeAggregator<(JwkId, JWK), true>; pub enum ConsensusCertificateResult { - /// The consensus message was ignored (e.g. because it has already been processed). + /// The consensus message was ignored (e.g. because it has already been + /// processed). Ignored, /// An executable transaction (can be a user tx or a system tx) SuiTransaction(VerifiedExecutableTransaction), - /// The transaction should be re-processed at a future commit, specified by the DeferralKey + /// The transaction should be re-processed at a future commit, specified by + /// the DeferralKey Deferred(DeferralKey), /// A message was processed which updates randomness state. RandomnessConsensusMessage, /// Everything else, e.g. AuthorityCapabilities, CheckpointSignatures, etc. ConsensusMessage, - /// A system message in consensus was ignored (e.g. because of end of epoch). + /// A system message in consensus was ignored (e.g. because of end of + /// epoch). IgnoredSystem, } @@ -248,9 +265,10 @@ pub struct AuthorityPerEpochStore { reconfig_state_mem: RwLock, consensus_notify_read: NotifyRead, - /// Batch verifier for certificates - also caches certificates and tx sigs that are known to have - /// valid signatures. Lives in per-epoch store because the caching/batching is only valid - /// within for certs within the current epoch. + /// Batch verifier for certificates - also caches certificates and tx sigs + /// that are known to have valid signatures. Lives in per-epoch store + /// because the caching/batching is only valid within for certs within + /// the current epoch. pub(crate) signature_verifier: SignatureVerifier, pub(crate) checkpoint_state_notify_read: NotifyRead, @@ -262,31 +280,37 @@ pub struct AuthorityPerEpochStore { /// Used to notify all epoch specific tasks that user certs are closed. user_certs_closed_notify: NotifyOnce, - /// This lock acts as a barrier for tasks that should not be executed in parallel with reconfiguration - /// See comments in AuthorityPerEpochStore::epoch_terminated() on how this is used - /// Crash recovery note: we write next epoch in the database first, and then use this lock to - /// wait for in-memory tasks for the epoch to finish. If node crashes at this stage validator - /// will start with the new epoch(and will open instance of per-epoch store for a new epoch). + /// This lock acts as a barrier for tasks that should not be executed in + /// parallel with reconfiguration See comments in + /// AuthorityPerEpochStore::epoch_terminated() on how this is used Crash + /// recovery note: we write next epoch in the database first, and then use + /// this lock to wait for in-memory tasks for the epoch to finish. If + /// node crashes at this stage validator will start with the new + /// epoch(and will open instance of per-epoch store for a new epoch). epoch_alive: tokio::sync::RwLock, end_of_publish: Mutex>, /// Pending certificates that we are waiting to be sequenced by consensus. - /// This is an in-memory 'index' of a AuthorityPerEpochTables::pending_consensus_transactions. - /// We need to keep track of those in order to know when to send EndOfPublish message. - /// Lock ordering: this is a 'leaf' lock, no other locks should be acquired in the scope of this lock - /// In particular, this lock is always acquired after taking read or write lock on reconfig state + /// This is an in-memory 'index' of a + /// AuthorityPerEpochTables::pending_consensus_transactions. We need to + /// keep track of those in order to know when to send EndOfPublish message. + /// Lock ordering: this is a 'leaf' lock, no other locks should be acquired + /// in the scope of this lock In particular, this lock is always + /// acquired after taking read or write lock on reconfig state pending_consensus_certificates: Mutex>, - /// MutexTable for transaction locks (prevent concurrent execution of same transaction) + /// MutexTable for transaction locks (prevent concurrent execution of same + /// transaction) mutex_table: MutexTable, - /// The moment when the current epoch started locally on this validator. Note that this - /// value could be skewed if the node crashed and restarted in the middle of the epoch. That's - /// ok because this is used for metric purposes and we could tolerate some skews occasionally. + /// The moment when the current epoch started locally on this validator. + /// Note that this value could be skewed if the node crashed and + /// restarted in the middle of the epoch. That's ok because this is used + /// for metric purposes and we could tolerate some skews occasionally. pub(crate) epoch_open_time: Instant, - /// The moment when epoch is closed. We don't care much about crash recovery because it's - /// a metric that doesn't have to be available for each epoch, and it's only used during - /// the last few seconds of an epoch. + /// The moment when epoch is closed. We don't care much about crash recovery + /// because it's a metric that doesn't have to be available for each + /// epoch, and it's only used during the last few seconds of an epoch. epoch_close_time: RwLock>, pub(crate) metrics: Arc, epoch_start_configuration: Arc, @@ -305,10 +329,12 @@ pub struct AuthorityPerEpochStore { randomness_reporter: OnceCell, } -/// AuthorityEpochTables contains tables that contain data that is only valid within an epoch. +/// AuthorityEpochTables contains tables that contain data that is only valid +/// within an epoch. #[derive(DBMapUtils)] pub struct AuthorityEpochTables { - /// This is map between the transaction digest and transactions found in the `transaction_lock`. + /// This is map between the transaction digest and transactions found in the + /// `transaction_lock`. #[default_options_override_fn = "signed_transactions_table_default_config"] signed_transactions: DBMap>, @@ -317,54 +343,64 @@ pub struct AuthorityEpochTables { #[default_options_override_fn = "owned_object_transaction_locks_table_default_config"] owned_object_locked_transactions: DBMap, - /// Signatures over transaction effects that were executed in the current epoch. - /// Store this to avoid re-signing the same effects twice. + /// Signatures over transaction effects that were executed in the current + /// epoch. Store this to avoid re-signing the same effects twice. effects_signatures: DBMap, /// Signatures of transaction certificates that are executed locally. pub(crate) transaction_cert_signatures: DBMap, - /// The tables below manage shared object locks / versions. There are three ways they can be - /// updated: - /// 1. (validators only): Upon receiving a certified transaction from consensus, the authority - /// assigns the next version to each shared object of the transaction. The next versions of - /// the shared objects are updated as well. - /// 2. (validators only): Upon receiving a new consensus commit, the authority assigns the - /// next version of the randomness state object to an expected future transaction to be - /// generated after the next random value is available. The next version of the randomness - /// state object is updated as well. - /// 3. (fullnodes + validators): Upon receiving a certified effect from state sync, or - /// transaction orchestrator fast execution path, the node assigns the shared object - /// versions from the transaction effect. Next object versions are not updated. + /// The tables below manage shared object locks / versions. There are three + /// ways they can be updated: + /// 1. (validators only): Upon receiving a certified transaction from + /// consensus, the authority + /// assigns the next version to each shared object of the transaction. The + /// next versions of the shared objects are updated as well. + /// 2. (validators only): Upon receiving a new consensus commit, the + /// authority assigns the + /// next version of the randomness state object to an expected future + /// transaction to be generated after the next random value is + /// available. The next version of the randomness state object is + /// updated as well. + /// 3. (fullnodes + validators): Upon receiving a certified effect from + /// state sync, or + /// transaction orchestrator fast execution path, the node assigns the + /// shared object versions from the transaction effect. Next object + /// versions are not updated. /// - /// REQUIRED: all authorities must assign the same shared object versions for each transaction. + /// REQUIRED: all authorities must assign the same shared object versions + /// for each transaction. assigned_shared_object_versions: DBMap>, assigned_shared_object_versions_v2: DBMap>, next_shared_object_versions: DBMap, - /// Certificates that have been received from clients or received from consensus, but not yet - /// executed. Entries are cleared after execution. - /// This table is critical for crash recovery, because usually the consensus output progress - /// is updated after a certificate is committed into this table. + /// Certificates that have been received from clients or received from + /// consensus, but not yet executed. Entries are cleared after + /// execution. This table is critical for crash recovery, because + /// usually the consensus output progress is updated after a certificate + /// is committed into this table. /// - /// In theory, this table may be superseded by storing consensus and checkpoint execution - /// progress. But it is more complex, because it would be necessary to track inflight - /// executions not ordered by indices. For now, tracking inflight certificates as a map + /// In theory, this table may be superseded by storing consensus and + /// checkpoint execution progress. But it is more complex, because it + /// would be necessary to track inflight executions not ordered by + /// indices. For now, tracking inflight certificates as a map /// seems easier. #[default_options_override_fn = "pending_execution_table_default_config"] pub(crate) pending_execution: DBMap, - /// Track which transactions have been processed in handle_consensus_transaction. We must be - /// sure to advance next_shared_object_versions exactly once for each transaction we receive from - /// consensus. But, we may also be processing transactions from checkpoints, so we need to - /// track this state separately. + /// Track which transactions have been processed in + /// handle_consensus_transaction. We must be sure to advance + /// next_shared_object_versions exactly once for each transaction we receive + /// from consensus. But, we may also be processing transactions from + /// checkpoints, so we need to track this state separately. /// - /// Entries in this table can be garbage collected whenever we can prove that we won't receive - /// another handle_consensus_transaction call for the given digest. This probably means at - /// epoch change. + /// Entries in this table can be garbage collected whenever we can prove + /// that we won't receive another handle_consensus_transaction call for + /// the given digest. This probably means at epoch change. consensus_message_processed: DBMap, - /// Map stores pending transactions that this authority submitted to consensus + /// Map stores pending transactions that this authority submitted to + /// consensus #[default_options_override_fn = "pending_consensus_transactions_table_default_config"] pending_consensus_transactions: DBMap, @@ -372,14 +408,17 @@ pub struct AuthorityEpochTables { #[allow(dead_code)] consensus_message_order: DBMap, - /// The following table is used to store a single value (the corresponding key is a constant). The value - /// represents the index of the latest consensus message this authority processed. This field is written - /// by a single process acting as consensus (light) client. It is used to ensure the authority processes - /// every message output by consensus (and in the right order). + /// The following table is used to store a single value (the corresponding + /// key is a constant). The value represents the index of the latest + /// consensus message this authority processed. This field is written by + /// a single process acting as consensus (light) client. It is used to + /// ensure the authority processes every message output by consensus + /// (and in the right order). last_consensus_index: DBMap, - /// The following table is used to store a single value (the corresponding key is a constant). The value - /// represents the index of the latest consensus message this authority processed, running hash of + /// The following table is used to store a single value (the corresponding + /// key is a constant). The value represents the index of the latest + /// consensus message this authority processed, running hash of /// transactions, and accumulated stats of consensus output. /// This field is written by a single process (consensus handler). last_consensus_stats: DBMap, @@ -388,7 +427,8 @@ pub struct AuthorityEpochTables { #[allow(dead_code)] checkpoint_boundary: DBMap, - /// This table contains current reconfiguration state for validator for current epoch + /// This table contains current reconfiguration state for validator for + /// current epoch reconfig_state: DBMap, /// Validators that have sent EndOfPublish message in this epoch @@ -398,39 +438,46 @@ pub struct AuthorityEpochTables { #[allow(dead_code)] final_epoch_checkpoint: DBMap, - /// This table has information for the checkpoints for which we constructed all the data - /// from consensus, but not yet constructed actual checkpoint. + /// This table has information for the checkpoints for which we constructed + /// all the data from consensus, but not yet constructed actual + /// checkpoint. /// - /// Key in this table is the narwhal commit height and not a checkpoint sequence number. + /// Key in this table is the narwhal commit height and not a checkpoint + /// sequence number. /// - /// Non-empty list of transactions here might result in empty list when we are forming checkpoint. - /// Because we don't want to create checkpoints with empty content(see CheckpointBuilder::write_checkpoint), + /// Non-empty list of transactions here might result in empty list when we + /// are forming checkpoint. Because we don't want to create checkpoints + /// with empty content(see CheckpointBuilder::write_checkpoint), /// the sequence number of checkpoint does not match height here. #[default_options_override_fn = "pending_checkpoints_table_default_config"] pending_checkpoints: DBMap, #[default_options_override_fn = "pending_checkpoints_table_default_config"] pending_checkpoints_v2: DBMap, - /// Checkpoint builder maintains internal list of transactions it included in checkpoints here + /// Checkpoint builder maintains internal list of transactions it included + /// in checkpoints here builder_digest_to_checkpoint: DBMap, - /// Maps non-digest TransactionKeys to the corresponding digest after execution, for use - /// by checkpoint builder. + /// Maps non-digest TransactionKeys to the corresponding digest after + /// execution, for use by checkpoint builder. transaction_key_to_digest: DBMap, /// Stores pending signatures - /// The key in this table is checkpoint sequence number and an arbitrary integer + /// The key in this table is checkpoint sequence number and an arbitrary + /// integer pending_checkpoint_signatures: DBMap<(CheckpointSequenceNumber, u64), CheckpointSignatureMessage>, /// When we see certificate through consensus for the first time, we record - /// user signature for this transaction here. This will be included in the checkpoint later. + /// user signature for this transaction here. This will be included in the + /// checkpoint later. user_signatures_for_checkpoints: DBMap>, /// This table is not used #[allow(dead_code)] builder_checkpoint_summary: DBMap, - /// Maps sequence number to checkpoint summary, used by CheckpointBuilder to build checkpoint within epoch + /// Maps sequence number to checkpoint summary, used by CheckpointBuilder to + /// build checkpoint within epoch builder_checkpoint_summary_v2: DBMap, // Maps checkpoint sequence number to an accumulator with accumulated state @@ -445,43 +492,47 @@ pub struct AuthorityEpochTables { /// ProtocolConfig::buffer_stake_for_protocol_upgrade_bps override_protocol_upgrade_buffer_stake: DBMap, - /// When transaction is executed via checkpoint executor, we store association here + /// When transaction is executed via checkpoint executor, we store + /// association here pub(crate) executed_transactions_to_checkpoint: DBMap, - /// This table is no longer used (can be removed when DBMap supports removing tables) + /// This table is no longer used (can be removed when DBMap supports + /// removing tables) #[allow(dead_code)] oauth_provider_jwk: DBMap, - /// JWKs that have been voted for by one or more authorities but are not yet active. + /// JWKs that have been voted for by one or more authorities but are not yet + /// active. pending_jwks: DBMap<(AuthorityName, JwkId, JWK), ()>, - /// JWKs that are currently available for zklogin authentication, and the round in which they - /// became active. - /// This would normally be stored as (JwkId, JWK) -> u64, but we need to be able to scan to - /// find all Jwks for a given round + /// JWKs that are currently available for zklogin authentication, and the + /// round in which they became active. + /// This would normally be stored as (JwkId, JWK) -> u64, but we need to be + /// able to scan to find all Jwks for a given round active_jwks: DBMap<(u64, (JwkId, JWK)), ()>, /// Transactions that are being deferred until some future time deferred_transactions: DBMap>, - /// This table is no longer used (can be removed when DBMap supports removing tables) + /// This table is no longer used (can be removed when DBMap supports + /// removing tables) #[allow(dead_code)] randomness_rounds_written: DBMap, /// Tables for recording state for RandomnessManager. - /// Records messages processed from other nodes. Updated when receiving a new dkg::Message - /// via consensus. + /// Records messages processed from other nodes. Updated when receiving a + /// new dkg::Message via consensus. pub(crate) dkg_processed_messages: DBMap>, - /// Records messages used to generate a DKG confirmation. Updated when enough DKG - /// messages are received to progress to the next phase. + /// Records messages used to generate a DKG confirmation. Updated when + /// enough DKG messages are received to progress to the next phase. pub(crate) dkg_used_messages: DBMap>, - /// Records confirmations received from other nodes. Updated when receiving a new - /// dkg::Confirmation via consensus. + /// Records confirmations received from other nodes. Updated when receiving + /// a new dkg::Confirmation via consensus. pub(crate) dkg_confirmations: DBMap>, - /// Records the final output of DKG after completion, including the public VSS key and - /// any local private shares. + /// Records the final output of DKG after completion, including the public + /// VSS key and any local private shares. pub(crate) dkg_output: DBMap>, /// RandomnessRound numbers that are still pending generation. pub(crate) randomness_rounds_pending: DBMap, @@ -495,9 +546,10 @@ pub enum DeferralKey { RandomnessDkg { deferred_from_round: Round, }, - // ConsensusRound deferral key requires both the round to which the tx should be deferred (so that - // we can efficiently load all txns that are now ready), and the round from which it has been - // deferred (so that multiple rounds can efficiently defer to the same future round). + // ConsensusRound deferral key requires both the round to which the tx should be deferred (so + // that we can efficiently load all txns that are now ready), and the round from which it + // has been deferred (so that multiple rounds can efficiently defer to the same future + // round). ConsensusRound { future_round: Round, deferred_from_round: Round, @@ -660,8 +712,9 @@ impl AuthorityEpochTables { Ok(()) } - /// WARNING: This method is very subtle and can corrupt the database if used incorrectly. - /// It should only be used in one-off cases or tests after fully understanding the risk. + /// WARNING: This method is very subtle and can corrupt the database if used + /// incorrectly. It should only be used in one-off cases or tests after + /// fully understanding the risk. pub fn remove_executed_tx_subtle(&self, digest: &TransactionDigest) -> SuiResult { self.executed_transactions_to_checkpoint.remove(digest)?; Ok(()) @@ -836,9 +889,11 @@ impl AuthorityPerEpochStore { let is_validator = committee.authority_index(&name).is_some(); if is_validator { - assert!(epoch_start_configuration - .flags() - .contains(&EpochFlag::InMemoryCheckpointRoots)); + assert!( + epoch_start_configuration + .flags() + .contains(&EpochFlag::InMemoryCheckpointRoots) + ); } let mut jwk_aggregator = JwkAggregator::new(committee.clone()); @@ -894,8 +949,8 @@ impl AuthorityPerEpochStore { self.tables.store(None); } - // Returns true if authenticator state is enabled in the protocol config *and* the - // authenticator state object already exists + // Returns true if authenticator state is enabled in the protocol config *and* + // the authenticator state object already exists pub fn authenticator_state_enabled(&self) -> bool { self.protocol_config().enable_jwk_consensus_updates() && self.authenticator_state_exists() } @@ -956,7 +1011,8 @@ impl AuthorityPerEpochStore { } /// Returns `&Arc` - /// User can treat this `Arc` as `&EpochStartConfiguration`, or clone the Arc to pass as owned object + /// User can treat this `Arc` as `&EpochStartConfiguration`, or clone the + /// Arc to pass as owned object pub fn epoch_start_config(&self) -> &Arc { &self.epoch_start_configuration } @@ -1171,8 +1227,8 @@ impl AuthorityPerEpochStore { Ok(self.tables()?.transaction_cert_signatures.get(tx_digest)?) } - /// Resolves InputObjectKinds into InputKeys, by consulting the shared object version - /// assignment table. + /// Resolves InputObjectKinds into InputKeys, by consulting the shared + /// object version assignment table. pub(crate) fn get_input_object_keys( &self, key: &TransactionKey, @@ -1286,7 +1342,8 @@ impl AuthorityPerEpochStore { Ok(join_all(results).await) } - /// `pending_certificates` table related methods. Should only be used from TransactionManager. + /// `pending_certificates` table related methods. Should only be used from + /// TransactionManager. /// Gets all pending certificates. Used during recovery. pub fn all_pending_execution(&self) -> SuiResult> { @@ -1396,32 +1453,34 @@ impl AuthorityPerEpochStore { .contains(&EpochFlag::ObjectLockSplitTables) } - // For each id in objects_to_init, return the next version for that id as recorded in the - // next_shared_object_versions table. + // For each id in objects_to_init, return the next version for that id as + // recorded in the next_shared_object_versions table. // - // If any ids are missing, then we need to initialize the table. We first check if a previous - // version of that object has been written. If so, then the object was written in a previous - // epoch, and we initialize next_shared_object_versions to that value. If no version of the - // object has yet been written, we initialize the object to the initial version recorded in the - // certificate (which is a function of the lamport version computation of the transaction that - // created the shared object originally - which transaction may not yet have been executed on - // this node). + // If any ids are missing, then we need to initialize the table. We first check + // if a previous version of that object has been written. If so, then the + // object was written in a previous epoch, and we initialize + // next_shared_object_versions to that value. If no version of the + // object has yet been written, we initialize the object to the initial version + // recorded in the certificate (which is a function of the lamport version + // computation of the transaction that created the shared object originally + // - which transaction may not yet have been executed on this node). // - // Because all paths that assign shared locks for a shared object transaction call this - // function, it is impossible for parent_sync to be updated before this function completes - // successfully for each affected object id. + // Because all paths that assign shared locks for a shared object transaction + // call this function, it is impossible for parent_sync to be updated before + // this function completes successfully for each affected object id. pub(crate) async fn get_or_init_next_object_versions( &self, objects_to_init: &[(ObjectID, SequenceNumber)], cache_reader: &dyn ExecutionCacheRead, ) -> SuiResult> { let mut ret: HashMap<_, _>; - // Since this can be called from consensus task, we must retry forever - the only other - // option is to panic. It is extremely unlikely that more than 2 retries will be needed, as - // the only two writers are the consensus task and checkpoint execution. + // Since this can be called from consensus task, we must retry forever - the + // only other option is to panic. It is extremely unlikely that more + // than 2 retries will be needed, as the only two writers are the + // consensus task and checkpoint execution. retry_transaction_forever!({ - // This code may still be correct without using a transaction snapshot, but I couldn't - // convince myself of that. + // This code may still be correct without using a transaction snapshot, but I + // couldn't convince myself of that. let tables = self.tables()?; let mut db_transaction = tables.next_shared_object_versions.transaction()?; @@ -1439,8 +1498,9 @@ impl AuthorityPerEpochStore { }) .collect(); - // The common case is that there are no uninitialized versions - this early return will - // happen every time except the first time an object is used in an epoch. + // The common case is that there are no uninitialized versions - this early + // return will happen every time except the first time an object is + // used in an epoch. if uninitialized_objects.is_empty() { // unwrap ok - we already verified that next_versions is not missing any keys. return Ok(izip!(ids, next_versions.into_iter().map(|v| v.unwrap())).collect()); @@ -1561,9 +1621,9 @@ impl AuthorityPerEpochStore { } } - // Transactional DBs do not support range deletes, so we have to delete keys one-by-one. - // This shouldn't be a problem, there should not usually be more than a small handful of - // keys loaded in each round. + // Transactional DBs do not support range deletes, so we have to delete keys + // one-by-one. This shouldn't be a problem, there should not usually be + // more than a small handful of keys loaded in each round. batch.delete_batch(&self.tables()?.deferred_transactions, keys)?; Ok(txns) @@ -1586,9 +1646,10 @@ impl AuthorityPerEpochStore { None } - /// Lock a sequence number for the shared objects of the input transaction based on the effects - /// of that transaction. - /// Used by full nodes who don't listen to consensus, and validators who catch up by state sync. + /// Lock a sequence number for the shared objects of the input transaction + /// based on the effects of that transaction. + /// Used by full nodes who don't listen to consensus, and validators who + /// catch up by state sync. // TODO: We should be able to pass in a vector of certs/effects and lock them all at once. #[instrument(level = "trace", skip_all)] pub async fn acquire_shared_locks_from_effects( @@ -1610,8 +1671,8 @@ impl AuthorityPerEpochStore { Ok(()) } - /// When submitting a certificate caller **must** provide a ReconfigState lock guard - /// and verify that it allows new user certificates + /// When submitting a certificate caller **must** provide a ReconfigState + /// lock guard and verify that it allows new user certificates pub fn insert_pending_consensus_transactions( &self, transaction: &ConsensusTransaction, @@ -1678,7 +1739,8 @@ impl AuthorityPerEpochStore { } /// Check whether certificate was processed by consensus. - /// For shared lock certificates, if this function returns true means shared locks for this certificate are set + /// For shared lock certificates, if this function returns true means shared + /// locks for this certificate are set pub fn is_tx_cert_consensus_message_processed( &self, certificate: &CertifiedTransaction, @@ -1744,8 +1806,8 @@ impl AuthorityPerEpochStore { .contains_key(authority)) } - // Converts transaction keys to digests, waiting for digests to become available for any - // non-digest keys. + // Converts transaction keys to digests, waiting for digests to become available + // for any non-digest keys. pub async fn notify_read_executed_digests( &self, keys: &[TransactionKey], @@ -1792,7 +1854,8 @@ impl AuthorityPerEpochStore { .collect()) } - /// Note: caller usually need to call consensus_message_processed_notify before this call + /// Note: caller usually need to call consensus_message_processed_notify + /// before this call pub fn user_signatures_for_checkpoint( &self, transactions: &[VerifiedTransaction], @@ -1881,7 +1944,8 @@ impl AuthorityPerEpochStore { info!("received capabilities {:?}", capabilities); let authority = &capabilities.authority; - // Read-compare-write pattern assumes we are only called from the consensus handler task. + // Read-compare-write pattern assumes we are only called from the consensus + // handler task. if let Some(cap) = self.tables()?.authority_capabilities.get(authority)? { if cap.generation >= capabilities.generation { debug!( @@ -2060,12 +2124,15 @@ impl AuthorityPerEpochStore { &self.tables()?.pending_execution, [(*certificate.digest(), certificate.clone().serializable())], )?; - // User signatures are written in the same batch as consensus certificate processed flag, - // which means we won't attempt to insert this twice for the same tx digest - debug_assert!(!self - .tables()? - .user_signatures_for_checkpoints - .contains_key(certificate.digest())?); + // User signatures are written in the same batch as consensus certificate + // processed flag, which means we won't attempt to insert this twice + // for the same tx digest + debug_assert!( + !self + .tables()? + .user_signatures_for_checkpoints + .contains_key(certificate.digest())? + ); batch.insert_batch( &self.tables()?.user_signatures_for_checkpoints, [(*certificate.digest(), certificate.tx_signatures().to_vec())], @@ -2122,15 +2189,16 @@ impl AuthorityPerEpochStore { } /// This function executes given future until epoch_terminated is called - /// If future finishes before epoch_terminated is called, future result is returned - /// If epoch_terminated is called before future is resolved, error is returned + /// If future finishes before epoch_terminated is called, future result is + /// returned If epoch_terminated is called before future is resolved, + /// error is returned /// - /// In addition to the early termination guarantee, this function also prevents epoch_terminated() - /// if future is being executed. + /// In addition to the early termination guarantee, this function also + /// prevents epoch_terminated() if future is being executed. #[allow(clippy::result_unit_err)] pub async fn within_alive_epoch(&self, f: F) -> Result { - // This guard is kept in the future until it resolves, preventing `epoch_terminated` to - // acquire a write lock + // This guard is kept in the future until it resolves, preventing + // `epoch_terminated` to acquire a write lock let guard = self.epoch_alive.read().await; if !*guard { return Err(()); @@ -2151,9 +2219,11 @@ impl AuthorityPerEpochStore { } /// Verifies transaction signatures and other data - /// Important: This function can potentially be called in parallel and you can not rely on order of transactions to perform verification - /// If this function return an error, transaction is skipped and is not passed to handle_consensus_transaction - /// This function returns unit error and is responsible for emitting log messages for internal errors + /// Important: This function can potentially be called in parallel and you + /// can not rely on order of transactions to perform verification + /// If this function return an error, transaction is skipped and is not + /// passed to handle_consensus_transaction This function returns unit + /// error and is responsible for emitting log messages for internal errors fn verify_consensus_transaction( &self, transaction: SequencedConsensusTransaction, @@ -2172,7 +2242,8 @@ impl AuthorityPerEpochStore { skipped_consensus_txns.inc(); return None; } - // Signatures are verified as part of narwhal payload verification in SuiTxValidator + // Signatures are verified as part of narwhal payload verification in + // SuiTxValidator match &transaction.transaction { SequencedConsensusTransactionKind::External(ConsensusTransaction { kind: ConsensusTransactionKind::UserTransaction(_certificate), @@ -2183,7 +2254,11 @@ impl AuthorityPerEpochStore { .. }) => { if transaction.sender_authority() != data.summary.auth_sig().authority { - warn!("CheckpointSignature authority {} does not match narwhal certificate source {}", data.summary.auth_sig().authority, transaction.certificate_author_index ); + warn!( + "CheckpointSignature authority {} does not match narwhal certificate source {}", + data.summary.auth_sig().authority, + transaction.certificate_author_index + ); return None; } } @@ -2206,8 +2281,7 @@ impl AuthorityPerEpochStore { if transaction.sender_authority() != capabilities.authority { warn!( "CapabilityNotification authority {} does not match narwhal certificate source {}", - capabilities.authority, - transaction.certificate_author_index + capabilities.authority, transaction.certificate_author_index ); return None; } @@ -2242,8 +2316,7 @@ impl AuthorityPerEpochStore { if transaction.sender_authority() != *authority { warn!( "RandomnessDkgMessage authority {} does not match narwhal certificate source {}", - authority, - transaction.certificate_author_index + authority, transaction.certificate_author_index ); return None; } @@ -2255,8 +2328,7 @@ impl AuthorityPerEpochStore { if transaction.sender_authority() != *authority { warn!( "RandomnessDkgConfirmation authority {} does not match narwhal certificate source {}", - authority, - transaction.certificate_author_index + authority, transaction.certificate_author_index ); return None; } @@ -2344,9 +2416,9 @@ impl AuthorityPerEpochStore { } } - // If DKG is closed, we should now load any previously-deferred randomness-using tx - // so we can decide what to do with them (execute or ignore, depending on whether - // DKG was successful). + // If DKG is closed, we should now load any previously-deferred randomness-using + // tx so we can decide what to do with them (execute or ignore, + // depending on whether DKG was successful). let mut randomness_manager = match self.randomness_manager.get() { Some(rm) => Some(rm.lock().await), None => None, @@ -2372,7 +2444,8 @@ impl AuthorityPerEpochStore { sequenced_randomness_transactions.extend(deferred_randomness_txs); } - // Save roots for checkpoint generation. One set for most tx, one for randomness tx. + // Save roots for checkpoint generation. One set for most tx, one for randomness + // tx. let mut roots: BTreeSet<_> = system_transactions .iter() .chain(sequenced_transactions.iter()) @@ -2461,8 +2534,9 @@ impl AuthorityPerEpochStore { } else { // It is ok to just release lock here as functions called by this one are the // only place that transition reconfig state, and this function itself is always - // executed from consensus task. At this point if the lock was not already provided - // above, we know we won't be transitioning state for this commit. + // executed from consensus task. At this point if the lock was not already + // provided above, we know we won't be transitioning state for this + // commit. self.get_reconfig_state_read_lock_guard().should_accept_tx() }; let make_checkpoint = should_accept_tx || final_round; @@ -2510,8 +2584,8 @@ impl AuthorityPerEpochStore { batch.write()?; - // Only after batch is written, notify checkpoint service to start building any new - // pending checkpoints. + // Only after batch is written, notify checkpoint service to start building any + // new pending checkpoints. if make_checkpoint { debug!( ?commit_round, @@ -2597,8 +2671,8 @@ impl AuthorityPerEpochStore { // Caller is not required to set ExecutionIndices with the right semantics in // VerifiedSequencedConsensusTransaction. - // Also, ConsensusStats and hash will not be updated in the db with this function, unlike in - // process_consensus_transactions_and_commit_boundary(). + // Also, ConsensusStats and hash will not be updated in the db with this + // function, unlike in process_consensus_transactions_and_commit_boundary(). #[cfg(any(test, feature = "test-utils"))] pub async fn process_consensus_transactions_for_tests( self: &Arc, @@ -2633,9 +2707,10 @@ impl AuthorityPerEpochStore { } } - /// Depending on the type of the VerifiedSequencedConsensusTransaction wrappers, - /// - Verify and initialize the state to execute the certificates. - /// Return VerifiedCertificates for each executable certificate + /// Depending on the type of the VerifiedSequencedConsensusTransaction + /// wrappers, + /// - Verify and initialize the state to execute the certificates. Return + /// VerifiedCertificates for each executable certificate /// - Or update the state for checkpoint or epoch change protocol. #[instrument(level = "debug", skip_all)] #[allow(clippy::type_complexity)] @@ -2792,8 +2867,9 @@ impl AuthorityPerEpochStore { authority.concise() ); - // It is ok to just release lock here as this function is the only place that transition into RejectAllCerts state - // And this function itself is always executed from consensus task + // It is ok to just release lock here as this function is the only place that + // transition into RejectAllCerts state And this function itself + // is always executed from consensus task let collected_end_of_publish = if lock.is_none() && self .get_reconfig_state_read_lock_guard() @@ -2805,8 +2881,12 @@ impl AuthorityPerEpochStore { .insert_generic(*authority, ()).is_quorum_reached() // end_of_publish lock is released here. } else { - // If we past the stage where we are accepting consensus certificates we also don't record end of publish messages - debug!("Ignoring end of publish message from validator {:?} as we already collected enough end of publish messages", authority.concise()); + // If we past the stage where we are accepting consensus certificates we also + // don't record end of publish messages + debug!( + "Ignoring end of publish message from validator {:?} as we already collected enough end of publish messages", + authority.concise() + ); false }; @@ -2820,13 +2900,16 @@ impl AuthorityPerEpochStore { let mut l = self.get_reconfig_state_write_lock_guard(); l.close_all_certs(); self.store_reconfig_state_batch(&l, write_batch)?; - // Holding this lock until end of process_consensus_transactions_and_commit_boundary() where we write batch to DB + // Holding this lock until end of + // process_consensus_transactions_and_commit_boundary() where we write batch to + // DB lock = Some(l); }; // Important: we actually rely here on fact that ConsensusHandler panics if its - // operation returns error. If some day we won't panic in ConsensusHandler on error - // we need to figure out here how to revert in-memory state of .end_of_publish - // and .reconfig_state when write fails. + // operation returns error. If some day we won't panic in ConsensusHandler on + // error we need to figure out here how to revert in-memory + // state of .end_of_publish and .reconfig_state when write + // fails. self.record_consensus_message_processed(write_batch, transaction.key())?; } else { panic!( @@ -2903,13 +2986,21 @@ impl AuthorityPerEpochStore { && !previously_deferred_tx_digests.contains(certificate.digest()) { // This can not happen with valid authority - // With some edge cases narwhal might sometimes resend previously seen certificate after EndOfPublish - // However this certificate will be filtered out before this line by `consensus_message_processed` call in `verify_consensus_transaction` - // If we see some new certificate here it means authority is byzantine and sent certificate after EndOfPublish (or we have some bug in ConsensusAdapter) - warn!("[Byzantine authority] Authority {:?} sent a new, previously unseen certificate {:?} after it sent EndOfPublish message to consensus", certificate_author.concise(), certificate.digest()); + // With some edge cases narwhal might sometimes resend previously seen + // certificate after EndOfPublish However this certificate + // will be filtered out before this line by `consensus_message_processed` call + // in `verify_consensus_transaction` If we see some new + // certificate here it means authority is byzantine and sent certificate after + // EndOfPublish (or we have some bug in ConsensusAdapter) + warn!( + "[Byzantine authority] Authority {:?} sent a new, previously unseen certificate {:?} after it sent EndOfPublish message to consensus", + certificate_author.concise(), + certificate.digest() + ); return Ok(ConsensusCertificateResult::Ignored); } - // Safe because signatures are verified when consensus called into SuiTxValidator::validate_batch. + // Safe because signatures are verified when consensus called into + // SuiTxValidator::validate_batch. let certificate = VerifiedCertificate::new_unchecked(*certificate.clone()); let certificate = VerifiedExecutableTransaction::new_from_certificate(certificate); @@ -2924,8 +3015,10 @@ impl AuthorityPerEpochStore { .should_accept_consensus_certs() && !previously_deferred_tx_digests.contains(certificate.digest()) { - debug!("Ignoring consensus certificate for transaction {:?} because of end of epoch", - certificate.digest()); + debug!( + "Ignoring consensus certificate for transaction {:?} because of end of epoch", + certificate.digest() + ); return Ok(ConsensusCertificateResult::Ignored); } @@ -2958,9 +3051,9 @@ impl AuthorityPerEpochStore { kind: ConsensusTransactionKind::CheckpointSignature(info), .. }) => { - // We usually call notify_checkpoint_signature in SuiTxValidator, but that step can - // be skipped when a batch is already part of a certificate, so we must also - // notify here. + // We usually call notify_checkpoint_signature in SuiTxValidator, but that step + // can be skipped when a batch is already part of a certificate, + // so we must also notify here. checkpoint_service.notify_checkpoint_signature(self, info)?; Ok(ConsensusCertificateResult::ConsensusMessage) } @@ -3119,7 +3212,12 @@ impl AuthorityPerEpochStore { ) -> SuiResult { if let Some(pending) = self.get_pending_checkpoint(&checkpoint.height())? { if pending.roots() != checkpoint.roots() { - panic!("Received checkpoint at index {} that contradicts previously stored checkpoint. Old roots: {:?}, new roots: {:?}", checkpoint.height(), pending.roots(), checkpoint.roots()); + panic!( + "Received checkpoint at index {} that contradicts previously stored checkpoint. Old roots: {:?}, new roots: {:?}", + checkpoint.height(), + pending.roots(), + checkpoint.roots() + ); } debug!( checkpoint_commit_height = checkpoint.height(), @@ -3193,9 +3291,10 @@ impl AuthorityPerEpochStore { commit_height: CheckpointHeight, content_info: Vec<(CheckpointSummary, CheckpointContents)>, ) -> SuiResult<()> { - // All created checkpoints are inserted in builder_checkpoint_summary in a single batch. - // This means that upon restart we can use BuilderCheckpointSummary::commit_height - // from the last built summary to resume building checkpoints. + // All created checkpoints are inserted in builder_checkpoint_summary in a + // single batch. This means that upon restart we can use + // BuilderCheckpointSummary::commit_height from the last built summary + // to resume building checkpoints. let mut batch = self.tables()?.pending_checkpoints.batch(); for (position_in_commit, (summary, transactions)) in content_info.into_iter().enumerate() { let sequence_number = summary.sequence_number; @@ -3459,13 +3558,14 @@ pub enum LockDetailsWrapper { impl LockDetailsWrapper { pub fn migrate(self) -> Self { - // TODO: when there are multiple versions, we must iteratively migrate from version N to - // N+1 until we arrive at the latest version + // TODO: when there are multiple versions, we must iteratively migrate from + // version N to N+1 until we arrive at the latest version self } - // Always returns the most recent version. Older versions are migrated to the latest version at - // read time, so there is never a need to access older versions. + // Always returns the most recent version. Older versions are migrated to the + // latest version at read time, so there is never a need to access older + // versions. pub fn inner(&self) -> &LockDetails { match self { Self::V1(v1) => v1, diff --git a/crates/sui-core/src/authority/authority_per_epoch_store_pruner.rs b/crates/sui-core/src/authority/authority_per_epoch_store_pruner.rs index f1f793a0237..f7e2e81da6b 100644 --- a/crates/sui-core/src/authority/authority_per_epoch_store_pruner.rs +++ b/crates/sui-core/src/authority/authority_per_epoch_store_pruner.rs @@ -1,15 +1,15 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::authority_per_epoch_store::EPOCH_DB_PREFIX; +use std::{fs, path::PathBuf, time::Duration}; + use itertools::Itertools; -use std::fs; -use std::path::PathBuf; -use std::time::Duration; use sui_config::node::AuthorityStorePruningConfig; use tokio::sync::oneshot; use tracing::log::{error, info}; use typed_store::rocks::safe_drop_db; +use crate::authority::authority_per_epoch_store::EPOCH_DB_PREFIX; + pub struct AuthorityPerEpochStorePruner { _cancel_handle: oneshot::Sender<()>, } @@ -73,9 +73,10 @@ impl AuthorityPerEpochStorePruner { #[cfg(test)] mod tests { - use crate::authority::authority_per_epoch_store_pruner::AuthorityPerEpochStorePruner; use std::fs; + use crate::authority::authority_per_epoch_store_pruner::AuthorityPerEpochStorePruner; + #[test] fn test_basic_epoch_pruner() { let parent_directory = tempfile::tempdir().unwrap().into_path(); diff --git a/crates/sui-core/src/authority/authority_store.rs b/crates/sui-core/src/authority/authority_store.rs index 9ecaaeeaafb..91cff85f1b5 100644 --- a/crates/sui-core/src/authority/authority_store.rs +++ b/crates/sui-core/src/authority/authority_store.rs @@ -1,51 +1,59 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::cmp::Ordering; -use std::ops::Not; -use std::sync::Arc; -use std::{iter, mem, thread}; - -use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore; -use crate::authority::authority_store_types::{ - get_store_object_pair, ObjectContentDigest, StoreObject, StoreObjectPair, StoreObjectWrapper, -}; -use crate::authority::epoch_start_configuration::{EpochFlag, EpochStartConfiguration}; -use crate::state_accumulator::AccumulatorStore; -use crate::transaction_outputs::TransactionOutputs; +use std::{cmp::Ordering, iter, mem, ops::Not, sync::Arc, thread}; + use either::Either; use fastcrypto::hash::{HashFunction, MultisetHash, Sha3_256}; use futures::stream::FuturesUnordered; use itertools::izip; use move_core_types::resolver::ModuleResolver; +use mysten_common::sync::notify_read::NotifyRead; use serde::{Deserialize, Serialize}; use sui_macros::fail_point_arg; use sui_storage::mutex_table::{MutexGuard, MutexTable, RwLockGuard, RwLockTable}; -use sui_types::accumulator::Accumulator; -use sui_types::digests::TransactionEventsDigest; -use sui_types::error::UserInputError; -use sui_types::execution::TypeLayoutStore; -use sui_types::message_envelope::Message; -use sui_types::storage::{ - get_module, BackingPackageStore, MarkerValue, ObjectKey, ObjectOrTombstone, ObjectStore, +use sui_types::{ + accumulator::Accumulator, + base_types::SequenceNumber, + digests::TransactionEventsDigest, + effects::{TransactionEffects, TransactionEvents}, + error::UserInputError, + execution::TypeLayoutStore, + fp_bail, fp_ensure, + gas_coin::TOTAL_SUPPLY_MIST, + message_envelope::Message, + storage::{ + get_module, BackingPackageStore, MarkerValue, ObjectKey, ObjectOrTombstone, ObjectStore, + }, + sui_system_state::get_sui_system_state, +}; +use tokio::{ + sync::{RwLockReadGuard, RwLockWriteGuard}, + time::Instant, }; -use sui_types::sui_system_state::get_sui_system_state; -use sui_types::{base_types::SequenceNumber, fp_bail, fp_ensure}; -use tokio::sync::{RwLockReadGuard, RwLockWriteGuard}; -use tokio::time::Instant; use tracing::{debug, info, trace}; -use typed_store::traits::Map; use typed_store::{ - rocks::{DBBatch, DBMap}, + rocks::{util::is_ref_count_value, DBBatch, DBMap}, + traits::Map, TypedStoreError, }; -use super::authority_store_tables::LiveObject; -use super::{authority_store_tables::AuthorityPerpetualTables, *}; -use mysten_common::sync::notify_read::NotifyRead; -use sui_types::effects::{TransactionEffects, TransactionEvents}; -use sui_types::gas_coin::TOTAL_SUPPLY_MIST; -use typed_store::rocks::util::is_ref_count_value; +use super::{ + authority_store_tables::{AuthorityPerpetualTables, LiveObject}, + *, +}; +use crate::{ + authority::{ + authority_per_epoch_store::AuthorityPerEpochStore, + authority_store_types::{ + get_store_object_pair, ObjectContentDigest, StoreObject, StoreObjectPair, + StoreObjectWrapper, + }, + epoch_start_configuration::{EpochFlag, EpochStartConfiguration}, + }, + state_accumulator::AccumulatorStore, + transaction_outputs::TransactionOutputs, +}; const NUM_SHARDS: usize = 4096; @@ -105,9 +113,10 @@ impl AuthorityStoreMetrics { /// ALL_OBJ_VER determines whether we want to store all past /// versions of every object in the store. Authority doesn't store /// them, but other entities such as replicas will. -/// S is a template on Authority signature state. This allows SuiDataStore to be used on either -/// authorities or non-authorities. Specifically, when storing transactions and effects, -/// S allows SuiDataStore to either store the authority signed version or unsigned version. +/// S is a template on Authority signature state. This allows SuiDataStore to be +/// used on either authorities or non-authorities. Specifically, when storing +/// transactions and effects, S allows SuiDataStore to either store the +/// authority signed version or unsigned version. pub struct AuthorityStore { /// Internal vector of locks to manage concurrent writes to the database mutex_table: MutexTable, @@ -195,15 +204,17 @@ impl AuthorityStore { } } - // NB: This must only be called at time of reconfiguration. We take the execution lock write - // guard as an argument to ensure that this is the case. + // NB: This must only be called at time of reconfiguration. We take the + // execution lock write guard as an argument to ensure that this is the + // case. pub fn clear_object_per_epoch_marker_table( &self, _execution_guard: &ExecutionLockWriteGuard<'_>, ) -> SuiResult<()> { - // We can safely delete all entries in the per epoch marker table since this is only called - // at epoch boundaries (during reconfiguration). Therefore any entries that currently - // exist can be removed. Because of this we can use the `schedule_delete_all` method. + // We can safely delete all entries in the per epoch marker table since this is + // only called at epoch boundaries (during reconfiguration). Therefore + // any entries that currently exist can be removed. Because of this we + // can use the `schedule_delete_all` method. Ok(self .perpetual_tables .object_per_epoch_marker_table @@ -216,8 +227,8 @@ impl AuthorityStore { genesis: &Genesis, indirect_objects_threshold: usize, ) -> SuiResult> { - // TODO: Since we always start at genesis, the committee should be technically the same - // as the genesis committee. + // TODO: Since we always start at genesis, the committee should be technically + // the same as the genesis committee. assert_eq!(committee.epoch, 0); Self::open_inner( genesis, @@ -269,8 +280,9 @@ impl AuthorityStore { .effects .insert(&genesis.effects().digest(), genesis.effects()) .unwrap(); - // We don't insert the effects to executed_effects yet because the genesis tx hasn't but will be executed. - // This is important for fullnodes to be able to generate indexing data right now. + // We don't insert the effects to executed_effects yet because the genesis tx + // hasn't but will be executed. This is important for fullnodes to + // be able to generate indexing data right now. let event_digests = genesis.events().digest(); let events = genesis @@ -367,8 +379,9 @@ impl AuthorityStore { } } - /// Given a list of transaction digests, returns a list of the corresponding effects only if they have been - /// executed. For transactions that have not been executed, None is returned. + /// Given a list of transaction digests, returns a list of the corresponding + /// effects only if they have been executed. For transactions that have + /// not been executed, None is returned. pub fn multi_get_executed_effects_digests( &self, digests: &[TransactionDigest], @@ -376,8 +389,9 @@ impl AuthorityStore { Ok(self.perpetual_tables.executed_effects.multi_get(digests)?) } - /// Given a list of transaction digests, returns a list of the corresponding effects only if they have been - /// executed. For transactions that have not been executed, None is returned. + /// Given a list of transaction digests, returns a list of the corresponding + /// effects only if they have been executed. For transactions that have + /// not been executed, None is returned. pub fn multi_get_executed_effects( &self, digests: &[TransactionDigest], @@ -447,7 +461,8 @@ impl AuthorityStore { &self, epoch: EpochId, ) -> SuiResult<(CheckpointSequenceNumber, Accumulator)> { - // We need to register waiters _before_ reading from the database to avoid race conditions + // We need to register waiters _before_ reading from the database to avoid race + // conditions let registration = self.root_state_notify_read.register_one(&epoch); let hash = self.perpetual_tables.root_state_hash_by_epoch.get(&epoch)?; @@ -510,7 +525,8 @@ impl AuthorityStore { self.perpetual_tables.database_is_empty() } - /// A function that acquires all locks associated with the objects (in order to avoid deadlocks). + /// A function that acquires all locks associated with the objects (in order + /// to avoid deadlocks). async fn acquire_locks(&self, input_objects: &[ObjectRef]) -> Vec { self.mutex_table .acquire_locks(input_objects.iter().map(|(_, _, digest)| *digest)) @@ -632,8 +648,8 @@ impl AuthorityStore { self.insert_object_direct(object_ref, &object) } - /// Insert an object directly into the store, and also update relevant tables - /// NOTE: does not handle transaction lock. + /// Insert an object directly into the store, and also update relevant + /// tables NOTE: does not handle transaction lock. /// This is used to insert genesis objects fn insert_object_direct(&self, object_ref: ObjectRef, object: &Object) -> SuiResult { let mut write_batch = self.perpetual_tables.objects.batch(); @@ -665,7 +681,8 @@ impl AuthorityStore { Ok(()) } - /// This function should only be used for initializing genesis and should remain private. + /// This function should only be used for initializing genesis and should + /// remain private. pub(crate) fn bulk_insert_genesis_objects(&self, objects: &[Object]) -> SuiResult<()> { let mut batch = self.perpetual_tables.objects.batch(); let ref_and_objects: Vec<_> = objects @@ -792,9 +809,11 @@ impl AuthorityStore { // - transaction execution branches to reference count increment // - pruner decrements ref count to 0 // - compaction job compresses existing merge values to an empty vector - // - tx executor commits ref count increment instead of the full value making object inaccessible + // - tx executor commits ref count increment instead of the full value making + // object inaccessible // read locks are sufficient because ref count increments are safe, - // concurrent transaction executions produce independent ref count increments and don't corrupt the state + // concurrent transaction executions produce independent ref count increments + // and don't corrupt the state let digests = written .values() .filter_map(|object| { @@ -809,7 +828,8 @@ impl AuthorityStore { /// Updates the state resulting from the execution of a certificate. /// /// Internally it checks that all locks for active inputs are at the correct - /// version, and then writes objects, certificates, parents and clean up locks atomically. + /// version, and then writes objects, certificates, parents and clean up + /// locks atomically. #[instrument(level = "debug", skip_all)] pub async fn write_transaction_outputs( &self, @@ -881,7 +901,8 @@ impl AuthorityStore { .indirect_move_objects .multi_get_raw_bytes(indirect_objects.iter().map(|(digest, _)| digest))?; // split updates to existing and new indirect objects - // for new objects full merge needs to be triggered. For existing ref count increment is sufficient + // for new objects full merge needs to be triggered. For existing ref count + // increment is sufficient let (existing_indirect_objects, new_indirect_objects): (Vec<_>, Vec<_>) = indirect_objects .into_iter() .enumerate() @@ -912,21 +933,24 @@ impl AuthorityStore { write_batch.insert_batch(&self.perpetual_tables.events, events)?; - // NOTE: We just check here that locks exist, not that they are locked to a specific TX. Why? - // 1. Lock existence prevents re-execution of old certs when objects have been upgraded - // 2. Not all validators lock, just 2f+1, so transaction should proceed regardless - // (But the lock should exist which means previous transactions finished) - // 3. Equivocation possible (different TX) but as long as 2f+1 approves current TX its - // fine - // 4. Locks may have existed when we started processing this tx, but could have since - // been deleted by a concurrent tx that finished first. In that case, check if the - // tx effects exist. + // NOTE: We just check here that locks exist, not that they are locked to a + // specific TX. Why? + // 1. Lock existence prevents re-execution of old certs when objects have been + // upgraded + // 2. Not all validators lock, just 2f+1, so transaction should proceed + // regardless (But the lock should exist which means previous transactions + // finished) + // 3. Equivocation possible (different TX) but as long as 2f+1 approves current + // TX its fine + // 4. Locks may have existed when we started processing this tx, but could have + // since been deleted by a concurrent tx that finished first. In that case, + // check if the tx effects exist. self.check_owned_object_locks_exist(locks_to_delete)?; self.initialize_live_object_markers_impl(&mut write_batch, new_locks_to_init, false)?; - // Note: deletes locks for received objects as well (but not for objects that were in - // `Receiving` arguments which were not received) + // Note: deletes locks for received objects as well (but not for objects that + // were in `Receiving` arguments which were not received) self.delete_live_object_markers(&mut write_batch, locks_to_delete)?; write_batch @@ -969,7 +993,8 @@ impl AuthorityStore { } } - /// Acquires a lock for a transaction on the given objects if they have all been initialized previously + /// Acquires a lock for a transaction on the given objects if they have all + /// been initialized previously async fn acquire_transaction_locks_v1( &self, epoch_store: &AuthorityPerEpochStore, @@ -977,8 +1002,8 @@ impl AuthorityStore { tx_digest: TransactionDigest, ) -> SuiResult { let epoch = epoch_store.epoch(); - // Other writers may be attempting to acquire locks on the same objects, so a mutex is - // required. + // Other writers may be attempting to acquire locks on the same objects, so a + // mutex is required. // TODO: replace with optimistic db_transactions (i.e. set lock to tx if none) let _mutexes = self.acquire_locks(owned_input_objects).await; @@ -994,11 +1019,13 @@ impl AuthorityStore { // The object / version must exist, and therefore lock initialized. if lock.is_none() { let latest_lock = self.get_latest_live_version_for_object_id(obj_ref.0)?; - fp_bail!(UserInputError::ObjectVersionUnavailableForConsumption { - provided_obj_ref: *obj_ref, - current_version: latest_lock.1 - } - .into()); + fp_bail!( + UserInputError::ObjectVersionUnavailableForConsumption { + provided_obj_ref: *obj_ref, + current_version: latest_lock.1 + } + .into() + ); } // Safe to unwrap as it is checked above let lock = lock.unwrap().map(|l| l.migrate().into_inner()); @@ -1063,8 +1090,8 @@ impl AuthorityStore { ) -> SuiResult { let tx_digest = *transaction.digest(); let epoch = epoch_store.epoch(); - // Other writers may be attempting to acquire locks on the same objects, so a mutex is - // required. + // Other writers may be attempting to acquire locks on the same objects, so a + // mutex is required. // TODO: replace with optimistic db_transactions (i.e. set lock to tx if none) let _mutexes = self.acquire_locks(owned_input_objects).await; @@ -1089,11 +1116,13 @@ impl AuthorityStore { ) { let Some(live_marker) = live_marker else { let latest_lock = self.get_latest_live_version_for_object_id(obj_ref.0)?; - fp_bail!(UserInputError::ObjectVersionUnavailableForConsumption { - provided_obj_ref: *obj_ref, - current_version: latest_lock.1 - } - .into()); + fp_bail!( + UserInputError::ObjectVersionUnavailableForConsumption { + provided_obj_ref: *obj_ref, + current_version: latest_lock.1 + } + .into() + ); }; let live_marker = live_marker.map(|l| l.migrate().into_inner()); @@ -1140,7 +1169,8 @@ impl AuthorityStore { } /// Gets ObjectLockInfo that represents state of lock on an object. - /// Returns UserInputError::ObjectNotFound if cannot find lock record for this object + /// Returns UserInputError::ObjectNotFound if cannot find lock record for + /// this object pub(crate) fn get_lock( &self, obj_ref: ObjectRef, @@ -1221,7 +1251,8 @@ impl AuthorityStore { ) } - /// Returns UserInputError::ObjectNotFound if no lock records found for this object. + /// Returns UserInputError::ObjectNotFound if no lock records found for this + /// object. pub(crate) fn get_latest_live_version_for_object_id( &self, object_id: ObjectID, @@ -1235,7 +1266,7 @@ impl AuthorityStore { Ok(iterator .next() .and_then(|value| { - if value.0 .0 == object_id { + if value.0.0 == object_id { Some(value) } else { None @@ -1251,9 +1282,10 @@ impl AuthorityStore { } /// Checks multiple object locks exist. - /// Returns UserInputError::ObjectNotFound if cannot find lock record for at least one of the objects. - /// Returns UserInputError::ObjectVersionUnavailableForConsumption if at least one object lock is not initialized - /// at the given version. + /// Returns UserInputError::ObjectNotFound if cannot find lock record for at + /// least one of the objects. + /// Returns UserInputError::ObjectVersionUnavailableForConsumption if at + /// least one object lock is not initialized at the given version. pub fn check_owned_object_locks_exist(&self, objects: &[ObjectRef]) -> SuiResult { let locks = self .perpetual_tables @@ -1262,18 +1294,21 @@ impl AuthorityStore { for (lock, obj_ref) in locks.into_iter().zip(objects) { if lock.is_none() { let latest_lock = self.get_latest_live_version_for_object_id(obj_ref.0)?; - fp_bail!(UserInputError::ObjectVersionUnavailableForConsumption { - provided_obj_ref: *obj_ref, - current_version: latest_lock.1 - } - .into()); + fp_bail!( + UserInputError::ObjectVersionUnavailableForConsumption { + provided_obj_ref: *obj_ref, + current_version: latest_lock.1 + } + .into() + ); } } Ok(()) } /// Initialize a lock to None (but exists) for a given list of ObjectRefs. - /// Returns SuiError::ObjectLockAlreadyInitialized if the lock already exists and is locked to a transaction + /// Returns SuiError::ObjectLockAlreadyInitialized if the lock already + /// exists and is locked to a transaction fn initialize_live_object_markers_impl( &self, write_batch: &mut DBBatch, @@ -1303,7 +1338,8 @@ impl AuthorityStore { // If any locks exist and are not None, return errors for them // Note that if epoch_store.object_lock_split_tables_enabled() is true, we don't // check if there is a pre-existing lock. this is because initializing the live - // object marker will not overwrite the lock and cause the validator to equivocate. + // object marker will not overwrite the lock and cause the validator to + // equivocate. let existing_locks: Vec = locks .iter() .zip(objects) @@ -1371,8 +1407,8 @@ impl AuthorityStore { } /// This function is called at the end of epoch for each transaction that's - /// executed locally on the validator but didn't make to the last checkpoint. - /// The effects of the execution is reverted here. + /// executed locally on the validator but didn't make to the last + /// checkpoint. The effects of the execution is reverted here. /// The following things are reverted: /// 1. All new object states are deleted. /// 2. owner_index table change is reverted. @@ -1471,10 +1507,11 @@ impl AuthorityStore { Ok(()) } - /// Return the object with version less then or eq to the provided seq number. - /// This is used by indexer to find the correct version of dynamic field child object. - /// We do not store the version of the child object, but because of lamport timestamp, - /// we know the child must have version number less then or eq to the parent. + /// Return the object with version less then or eq to the provided seq + /// number. This is used by indexer to find the correct version of + /// dynamic field child object. We do not store the version of the child + /// object, but because of lamport timestamp, we know the child must + /// have version number less then or eq to the parent. pub fn find_object_lt_or_eq_version( &self, object_id: ObjectID, @@ -1484,13 +1521,14 @@ impl AuthorityStore { .find_object_lt_or_eq_version(object_id, version) } - /// Returns the latest object reference we have for this object_id in the objects table. + /// Returns the latest object reference we have for this object_id in the + /// objects table. /// - /// The method may also return the reference to a deleted object with a digest of - /// ObjectDigest::deleted() or ObjectDigest::wrapped() and lamport version - /// of a transaction that deleted the object. - /// Note that a deleted object may re-appear if the deletion was the result of the object - /// being wrapped in another object. + /// The method may also return the reference to a deleted object with a + /// digest of ObjectDigest::deleted() or ObjectDigest::wrapped() and + /// lamport version of a transaction that deleted the object. + /// Note that a deleted object may re-appear if the deletion was the result + /// of the object being wrapped in another object. /// /// If no entry for the object_id is found, return None. pub fn get_latest_object_ref_or_tombstone( @@ -1501,8 +1539,8 @@ impl AuthorityStore { .get_latest_object_ref_or_tombstone(object_id) } - /// Returns the latest object reference if and only if the object is still live (i.e. it does - /// not return tombstones) + /// Returns the latest object reference if and only if the object is still + /// live (i.e. it does not return tombstones) pub fn get_latest_object_ref_if_alive( &self, object_id: ObjectID, @@ -1513,7 +1551,8 @@ impl AuthorityStore { } } - /// Returns the latest object we have for this object_id in the objects table. + /// Returns the latest object we have for this object_id in the objects + /// table. /// /// If no entry for the object_id is found, return None. pub fn get_latest_object_or_tombstone( @@ -1605,11 +1644,12 @@ impl AuthorityStore { } /// This function reads the DB directly to get the system state object. - /// If reconfiguration is happening at the same time, there is no guarantee whether we would be getting - /// the old or the new system state object. - /// Hence this function should only be called during RPC reads where data race is not a major concern. - /// In general we should avoid this as much as possible. - /// If the intent is for testing, you can use AuthorityState:: get_sui_system_state_object_for_testing. + /// If reconfiguration is happening at the same time, there is no guarantee + /// whether we would be getting the old or the new system state object. + /// Hence this function should only be called during RPC reads where data + /// race is not a major concern. In general we should avoid this as much + /// as possible. If the intent is for testing, you can use + /// AuthorityState:: get_sui_system_state_object_for_testing. pub fn get_sui_system_state_object_unsafe(&self) -> SuiResult { get_sui_system_state(self.perpetual_tables.as_ref()) } @@ -1650,8 +1690,9 @@ impl AuthorityStore { let mut total_sui = 0; for object in task_objects { total_storage_rebate += object.storage_rebate; - // get_total_sui includes storage rebate, however all storage rebate is - // also stored in the storage fund, so we need to subtract it here. + // get_total_sui includes storage rebate, however all storage + // rebate is also stored in + // the storage fund, so we need to subtract it here. total_sui += object.get_total_sui(layout_resolver.as_mut()).unwrap() - object.storage_rebate; @@ -1694,7 +1735,8 @@ impl AuthorityStore { .sui_conservation_check_latency .set(cur_time.elapsed().as_secs() as i64); - // It is safe to call this function because we are in the middle of reconfiguration. + // It is safe to call this function because we are in the middle of + // reconfiguration. let system_state = self .get_sui_system_state_object_unsafe() .expect("Reading sui system state object cannot fail") @@ -1764,8 +1806,9 @@ impl AuthorityStore { Ok(()) } - /// This is a temporary method to be used when we enable simplified_unwrap_then_delete. - /// It re-accumulates state hash for the new epoch if simplified_unwrap_then_delete is enabled. + /// This is a temporary method to be used when we enable + /// simplified_unwrap_then_delete. It re-accumulates state hash for the + /// new epoch if simplified_unwrap_then_delete is enabled. #[instrument(level = "error", skip_all)] pub fn maybe_reaccumulate_state_hash( &self, @@ -1780,23 +1823,25 @@ impl AuthorityStore { cur_epoch_store.get_chain_identifier().chain(), ) .simplified_unwrap_then_delete(); - // If in the new epoch the simplified_unwrap_then_delete is enabled for the first time, - // we re-accumulate state root. + // If in the new epoch the simplified_unwrap_then_delete is enabled for the + // first time, we re-accumulate state root. let should_reaccumulate = !old_simplified_unwrap_then_delete && new_simplified_unwrap_then_delete; if !should_reaccumulate { return; } - info!("[Re-accumulate] simplified_unwrap_then_delete is enabled in the new protocol version, re-accumulating state hash"); + info!( + "[Re-accumulate] simplified_unwrap_then_delete is enabled in the new protocol version, re-accumulating state hash" + ); let cur_time = Instant::now(); std::thread::scope(|s| { let pending_tasks = FuturesUnordered::new(); - // Shard the object IDs into different ranges so that we can process them in parallel. - // We divide the range into 2^BITS number of ranges. To do so we use the highest BITS bits - // to mark the starting/ending point of the range. For example, when BITS = 5, we - // divide the range into 32 ranges, and the first few ranges are: - // 00000000_.... to 00000111_.... - // 00001000_.... to 00001111_.... + // Shard the object IDs into different ranges so that we can process them in + // parallel. We divide the range into 2^BITS number of ranges. To do + // so we use the highest BITS bits to mark the starting/ending point + // of the range. For example, when BITS = 5, we divide the range + // into 32 ranges, and the first few ranges are: 00000000_.... to + // 00000111_.... 00001000_.... to 00001111_.... // 00010000_.... to 00010111_.... // and etc. const BITS: u8 = 5; @@ -1835,10 +1880,10 @@ impl AuthorityStore { ); } if matches!(prev.1.inner(), StoreObject::Wrapped) - && object_key.0 != prev.0 .0 + && object_key.0 != prev.0.0 { wrapped_objects_to_remove - .push(WrappedObject::new(prev.0 .0, prev.0 .1)); + .push(WrappedObject::new(prev.0.0, prev.0.1)); } prev = (object_key, object); @@ -1850,7 +1895,7 @@ impl AuthorityStore { } } if matches!(prev.1.inner(), StoreObject::Wrapped) { - wrapped_objects_to_remove.push(WrappedObject::new(prev.0 .0, prev.0 .1)); + wrapped_objects_to_remove.push(WrappedObject::new(prev.0.0, prev.0.1)); } info!( "[Re-accumulate] Task {}: object scanned: {}, wrapped objects: {}", @@ -1940,7 +1985,8 @@ impl AuthorityStore { self.perpetual_tables.objects.multi_remove(entries).unwrap(); } - // Counts the number of versions exist in object store for `object_id`. This includes tombstone. + // Counts the number of versions exist in object store for `object_id`. This + // includes tombstone. #[cfg(msim)] pub fn count_object_versions(&self, object_id: ObjectID) -> usize { self.perpetual_tables @@ -2078,13 +2124,14 @@ pub enum LockDetailsWrapperDeprecated { impl LockDetailsWrapperDeprecated { pub fn migrate(self) -> Self { - // TODO: when there are multiple versions, we must iteratively migrate from version N to - // N+1 until we arrive at the latest version + // TODO: when there are multiple versions, we must iteratively migrate from + // version N to N+1 until we arrive at the latest version self } - // Always returns the most recent version. Older versions are migrated to the latest version at - // read time, so there is never a need to access older versions. + // Always returns the most recent version. Older versions are migrated to the + // latest version at read time, so there is never a need to access older + // versions. pub fn inner(&self) -> &LockDetailsDeprecated { match self { Self::V1(v1) => v1, diff --git a/crates/sui-core/src/authority/authority_store_pruner.rs b/crates/sui-core/src/authority/authority_store_pruner.rs index 45490cc35a6..a27f6a159ea 100644 --- a/crates/sui-core/src/authority/authority_store_pruner.rs +++ b/crates/sui-core/src/authority/authority_store_pruner.rs @@ -1,8 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::authority_store_types::{ObjectContentDigest, StoreData, StoreObject}; -use crate::checkpoints::{CheckpointStore, CheckpointWatermark}; +use std::{ + cmp::{max, min}, + collections::{BTreeSet, HashMap}, + sync::{Arc, Mutex}, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; + use anyhow::anyhow; use mysten_metrics::{monitored_scope, spawn_monitored_task}; use once_cell::sync::Lazy; @@ -11,31 +16,28 @@ use prometheus::{ Registry, }; use rocksdb::LiveFile; -use std::cmp::{max, min}; -use std::collections::{BTreeSet, HashMap}; -use std::sync::Mutex; -use std::time::{SystemTime, UNIX_EPOCH}; -use std::{sync::Arc, time::Duration}; use sui_archival::reader::ArchiveReaderBalancer; use sui_config::node::AuthorityStorePruningConfig; use sui_storage::mutex_table::RwLockTable; -use sui_types::base_types::SequenceNumber; -use sui_types::effects::TransactionEffects; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::message_envelope::Message; -use sui_types::messages_checkpoint::{ - CheckpointContents, CheckpointDigest, CheckpointSequenceNumber, -}; use sui_types::{ - base_types::{ObjectID, VersionNumber}, + base_types::{ObjectID, SequenceNumber, VersionNumber}, + effects::{TransactionEffects, TransactionEffectsAPI}, + message_envelope::Message, + messages_checkpoint::{CheckpointContents, CheckpointDigest, CheckpointSequenceNumber}, storage::ObjectKey, }; -use tokio::sync::oneshot::{self, Sender}; -use tokio::time::Instant; +use tokio::{ + sync::oneshot::{self, Sender}, + time::Instant, +}; use tracing::{debug, error, info, warn}; use typed_store::{Map, TypedStoreError}; use super::authority_store_tables::AuthorityPerpetualTables; +use crate::{ + authority::authority_store_types::{ObjectContentDigest, StoreData, StoreObject}, + checkpoints::{CheckpointStore, CheckpointWatermark}, +}; static PERIODIC_PRUNING_TABLES: Lazy> = Lazy::new(|| { [ @@ -189,11 +191,14 @@ impl AuthorityStorePruner { wb.schedule_delete_range(&perpetual_db.objects, &start_range, &end_range)?; } - // When enable_pruning_tombstones is enabled, instead of using range deletes, we need to do a scan of all the keys - // for the deleted objects and then do point deletes to delete all the existing keys. This is because to improve read - // performance, we set `ignore_range_deletions` on all read options, and using range delete to delete tombstones - // may leak object (imagine a tombstone is compacted away, but earlier version is still not). Using point deletes - // guarantees that all earlier versions are deleted in the database. + // When enable_pruning_tombstones is enabled, instead of using range deletes, we + // need to do a scan of all the keys for the deleted objects and then do + // point deletes to delete all the existing keys. This is because to improve + // read performance, we set `ignore_range_deletions` on all read + // options, and using range delete to delete tombstones may leak object + // (imagine a tombstone is compacted away, but earlier version is still not). + // Using point deletes guarantees that all earlier versions are deleted + // in the database. if !object_tombstones_to_prune.is_empty() { let mut object_keys_to_delete = vec![]; for ObjectKey(object_id, seq_number) in object_tombstones_to_prune { @@ -300,7 +305,8 @@ impl AuthorityStorePruner { Ok(()) } - /// Prunes old data based on effects from all checkpoints from epochs eligible for pruning + /// Prunes old data based on effects from all checkpoints from epochs + /// eligible for pruning pub async fn prune_objects_for_eligible_epochs( perpetual_db: &Arc, checkpoint_store: &Arc, @@ -386,7 +392,8 @@ impl AuthorityStorePruner { .await } - /// Prunes old object versions based on effects from all checkpoints from epochs eligible for pruning + /// Prunes old object versions based on effects from all checkpoints from + /// epochs eligible for pruning pub async fn prune_for_eligible_epochs( perpetual_db: &Arc, checkpoint_store: &Arc, @@ -418,9 +425,10 @@ impl AuthorityStorePruner { }; let checkpoint = ckpt.into_inner(); // Skipping because checkpoint's epoch or checkpoint number is too new. - // We have to respect the highest executed checkpoint watermark (including the watermark itself) - // because there might be parts of the system that still require access to old object versions - // (i.e. state accumulator). + // We have to respect the highest executed checkpoint watermark (including the + // watermark itself) because there might be parts of the system that + // still require access to old object versions (i.e. state + // accumulator). if (current_epoch < checkpoint.epoch() + num_epochs_to_retain) || (*checkpoint.sequence_number() >= max_eligible_checkpoint) { @@ -670,7 +678,10 @@ impl AuthorityStorePruner { ) -> Self { if pruning_config.num_epochs_to_retain > 0 && pruning_config.num_epochs_to_retain < u64::MAX { - warn!("Using objects pruner with num_epochs_to_retain = {} can lead to performance issues", pruning_config.num_epochs_to_retain); + warn!( + "Using objects pruner with num_epochs_to_retain = {} can lead to performance issues", + pruning_config.num_epochs_to_retain + ); if is_validator { warn!("Resetting to aggressive pruner."); pruning_config.num_epochs_to_retain = 0; @@ -702,33 +713,32 @@ impl AuthorityStorePruner { #[cfg(test)] mod tests { - use more_asserts as ma; - use std::path::Path; - use std::time::Duration; - use std::{collections::HashSet, sync::Arc}; - use tracing::log::info; + use std::{collections::HashSet, path::Path, sync::Arc, time::Duration}; - use crate::authority::authority_store_pruner::AuthorityStorePruningMetrics; - use crate::authority::authority_store_tables::AuthorityPerpetualTables; - use crate::authority::authority_store_types::{ - get_store_object_pair, ObjectContentDigest, StoreData, StoreObject, StoreObjectPair, - StoreObjectWrapper, - }; + use more_asserts as ma; use prometheus::Registry; use sui_storage::mutex_table::RwLockTable; - use sui_types::base_types::ObjectDigest; - use sui_types::effects::TransactionEffects; - use sui_types::effects::TransactionEffectsAPI; use sui_types::{ - base_types::{ObjectID, SequenceNumber}, + base_types::{ObjectDigest, ObjectID, SequenceNumber}, + effects::{TransactionEffects, TransactionEffectsAPI}, object::Object, storage::ObjectKey, }; - use typed_store::rocks::util::reference_count_merge_operator; - use typed_store::rocks::{DBMap, MetricConf, ReadWriteOptions}; - use typed_store::Map; + use tracing::log::info; + use typed_store::{ + rocks::{util::reference_count_merge_operator, DBMap, MetricConf, ReadWriteOptions}, + Map, + }; use super::AuthorityStorePruner; + use crate::authority::{ + authority_store_pruner::AuthorityStorePruningMetrics, + authority_store_tables::AuthorityPerpetualTables, + authority_store_types::{ + get_store_object_pair, ObjectContentDigest, StoreData, StoreObject, StoreObjectPair, + StoreObjectWrapper, + }, + }; fn get_keys_after_pruning(path: &Path) -> anyhow::Result> { let perpetual_db_path = path.join(Path::new("perpetual")); @@ -1025,30 +1035,25 @@ mod tests { #[cfg(not(target_os = "macos"))] #[cfg(not(target_env = "msvc"))] mod pprof_tests { - use crate::authority::authority_store_pruner::tests; - use std::sync::Arc; - use tracing::log::{error, info}; - use crate::authority::authority_store_pruner::tests::lock_table; - use crate::authority::authority_store_pruner::AuthorityStorePruningMetrics; - use crate::authority::authority_store_tables::AuthorityPerpetualTables; - use crate::authority::authority_store_types::{get_store_object_pair, StoreObjectWrapper}; use pprof::Symbol; use prometheus::Registry; - use sui_types::base_types::ObjectDigest; - use sui_types::base_types::VersionNumber; - use sui_types::effects::TransactionEffects; - use sui_types::effects::TransactionEffectsAPI; use sui_types::{ - base_types::{ObjectID, SequenceNumber}, + base_types::{ObjectDigest, ObjectID, SequenceNumber, VersionNumber}, + effects::{TransactionEffects, TransactionEffectsAPI}, object::Object, storage::ObjectKey, }; - use typed_store::rocks::DBMap; - use typed_store::Map; + use tracing::log::{error, info}; + use typed_store::{rocks::DBMap, Map}; use super::AuthorityStorePruner; + use crate::authority::{ + authority_store_pruner::{tests, tests::lock_table, AuthorityStorePruningMetrics}, + authority_store_tables::AuthorityPerpetualTables, + authority_store_types::{get_store_object_pair, StoreObjectWrapper}, + }; fn insert_keys( objects: &DBMap, @@ -1105,12 +1110,13 @@ mod pprof_tests { } #[tokio::test] - async fn ensure_no_tombstone_fragmentation_in_stack_frame_with_ignore_tombstones( - ) -> Result<(), anyhow::Error> { - // This test writes a bunch of objects to objects table, invokes pruning on it and - // then does a bunch of get(). We open the db with `ignore_range_delete` set to true (default mode). - // We then record a cpu profile of the `get()` calls and do not find any range fragmentation stack frame - // in it. + async fn ensure_no_tombstone_fragmentation_in_stack_frame_with_ignore_tombstones() + -> Result<(), anyhow::Error> { + // This test writes a bunch of objects to objects table, invokes pruning on it + // and then does a bunch of get(). We open the db with + // `ignore_range_delete` set to true (default mode). We then record a + // cpu profile of the `get()` calls and do not find any range fragmentation + // stack frame in it. let registry = Registry::default(); let metrics = AuthorityStorePruningMetrics::new(®istry); let primary_path = tempfile::tempdir()?.into_path(); @@ -1132,21 +1138,23 @@ mod pprof_tests { .unwrap(); read_keys(&perpetual_db.objects, 1000)?; if let Ok(report) = guard.report().build() { - assert!(!report.data.keys().any(|f| f - .frames - .iter() - .any(|vs| is_rocksdb_range_tombstone_frame(vs)))); + assert!(!report.data.keys().any(|f| { + f.frames + .iter() + .any(|vs| is_rocksdb_range_tombstone_frame(vs)) + })); } Ok(()) } #[tokio::test] - async fn ensure_no_tombstone_fragmentation_in_stack_frame_after_flush( - ) -> Result<(), anyhow::Error> { - // This test writes a bunch of objects to objects table, invokes pruning on it and - // then does a bunch of get(). We open the db with `ignore_range_delete` set to true (default mode). - // We then record a cpu profile of the `get()` calls and do not find any range fragmentation stack frame - // in it. + async fn ensure_no_tombstone_fragmentation_in_stack_frame_after_flush() + -> Result<(), anyhow::Error> { + // This test writes a bunch of objects to objects table, invokes pruning on it + // and then does a bunch of get(). We open the db with + // `ignore_range_delete` set to true (default mode). We then record a + // cpu profile of the `get()` calls and do not find any range fragmentation + // stack frame in it. let primary_path = tempfile::tempdir()?.into_path(); let perpetual_db = Arc::new(AuthorityPerpetualTables::open(&primary_path, None)); let effects = insert_keys(&perpetual_db.objects)?; @@ -1173,10 +1181,11 @@ mod pprof_tests { .unwrap(); read_keys(&perpetual_db.objects, 1000)?; if let Ok(report) = guard.report().build() { - assert!(!report.data.keys().any(|f| f - .frames - .iter() - .any(|vs| is_rocksdb_range_tombstone_frame(vs)))); + assert!(!report.data.keys().any(|f| { + f.frames + .iter() + .any(|vs| is_rocksdb_range_tombstone_frame(vs)) + })); } Ok(()) } diff --git a/crates/sui-core/src/authority/authority_store_tables.rs b/crates/sui-core/src/authority/authority_store_tables.rs index 1367ed5f35b..cfe1a54d350 100644 --- a/crates/sui-core/src/authority/authority_store_tables.rs +++ b/crates/sui-core/src/authority/authority_store_tables.rs @@ -1,30 +1,35 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::*; -use crate::authority::authority_store::LockDetailsWrapperDeprecated; +use std::path::Path; + use rocksdb::Options; use serde::{Deserialize, Serialize}; -use std::path::Path; -use sui_types::accumulator::Accumulator; -use sui_types::base_types::SequenceNumber; -use sui_types::digests::TransactionEventsDigest; -use sui_types::effects::TransactionEffects; -use sui_types::storage::MarkerValue; -use typed_store::metrics::SamplingInterval; -use typed_store::rocks::util::{empty_compaction_filter, reference_count_merge_operator}; -use typed_store::rocks::{ - default_db_options, read_size_from_env, DBBatch, DBMap, DBOptions, MetricConf, ReadWriteOptions, +use sui_types::{ + accumulator::Accumulator, base_types::SequenceNumber, digests::TransactionEventsDigest, + effects::TransactionEffects, storage::MarkerValue, }; -use typed_store::traits::{Map, TableSummary, TypedStoreDebug}; - -use crate::authority::authority_store_types::{ - get_store_object_pair, try_construct_object, ObjectContentDigest, StoreData, - StoreMoveObjectWrapper, StoreObject, StoreObjectPair, StoreObjectValue, StoreObjectWrapper, +use typed_store::{ + metrics::SamplingInterval, + rocks::{ + default_db_options, read_size_from_env, + util::{empty_compaction_filter, reference_count_merge_operator}, + DBBatch, DBMap, DBOptions, MetricConf, ReadWriteOptions, + }, + traits::{Map, TableSummary, TypedStoreDebug}, }; -use crate::authority::epoch_start_configuration::EpochStartConfiguration; use typed_store_derive::DBMapUtils; +use super::*; +use crate::authority::{ + authority_store::LockDetailsWrapperDeprecated, + authority_store_types::{ + get_store_object_pair, try_construct_object, ObjectContentDigest, StoreData, + StoreMoveObjectWrapper, StoreObject, StoreObjectPair, StoreObjectValue, StoreObjectWrapper, + }, + epoch_start_configuration::EpochStartConfiguration, +}; + const ENV_VAR_OBJECTS_BLOCK_CACHE_SIZE: &str = "OBJECTS_BLOCK_CACHE_MB"; pub(crate) const ENV_VAR_LOCKS_BLOCK_CACHE_SIZE: &str = "LOCKS_BLOCK_CACHE_MB"; const ENV_VAR_TRANSACTIONS_BLOCK_CACHE_SIZE: &str = "TRANSACTIONS_BLOCK_CACHE_MB"; @@ -32,56 +37,68 @@ const ENV_VAR_EFFECTS_BLOCK_CACHE_SIZE: &str = "EFFECTS_BLOCK_CACHE_MB"; const ENV_VAR_EVENTS_BLOCK_CACHE_SIZE: &str = "EVENTS_BLOCK_CACHE_MB"; const ENV_VAR_INDIRECT_OBJECTS_BLOCK_CACHE_SIZE: &str = "INDIRECT_OBJECTS_BLOCK_CACHE_MB"; -/// AuthorityPerpetualTables contains data that must be preserved from one epoch to the next. +/// AuthorityPerpetualTables contains data that must be preserved from one epoch +/// to the next. #[derive(DBMapUtils)] pub struct AuthorityPerpetualTables { - /// This is a map between the object (ID, version) and the latest state of the object, namely the - /// state that is needed to process new transactions. - /// State is represented by `StoreObject` enum, which is either a move module, a move object, or - /// a pointer to an object stored in the `indirect_move_objects` table. + /// This is a map between the object (ID, version) and the latest state of + /// the object, namely the state that is needed to process new + /// transactions. State is represented by `StoreObject` enum, which is + /// either a move module, a move object, or a pointer to an object + /// stored in the `indirect_move_objects` table. /// - /// Note that while this map can store all versions of an object, we will eventually - /// prune old object versions from the db. + /// Note that while this map can store all versions of an object, we will + /// eventually prune old object versions from the db. /// - /// IMPORTANT: object versions must *only* be pruned if they appear as inputs in some - /// TransactionEffects. Simply pruning all objects but the most recent is an error! - /// This is because there can be partially executed transactions whose effects have not yet - /// been written out, and which must be retried. But, they cannot be retried unless their input - /// objects are still accessible! + /// IMPORTANT: object versions must *only* be pruned if they appear as + /// inputs in some TransactionEffects. Simply pruning all objects but + /// the most recent is an error! This is because there can be partially + /// executed transactions whose effects have not yet been written out, + /// and which must be retried. But, they cannot be retried unless their + /// input objects are still accessible! #[default_options_override_fn = "objects_table_default_config"] pub(crate) objects: DBMap, #[default_options_override_fn = "indirect_move_objects_table_default_config"] pub(crate) indirect_move_objects: DBMap, - /// This is a map between object references of currently active objects that can be mutated. + /// This is a map between object references of currently active objects that + /// can be mutated. /// - /// For old epochs, it may also contain the transaction that they are lock on for use by this - /// specific validator. The transaction locks themselves are now in AuthorityPerEpochStore. + /// For old epochs, it may also contain the transaction that they are lock + /// on for use by this specific validator. The transaction locks + /// themselves are now in AuthorityPerEpochStore. #[default_options_override_fn = "owned_object_transaction_locks_table_default_config"] #[rename = "owned_object_transaction_locks"] pub(crate) live_owned_object_markers: DBMap>, - /// This is a map between the transaction digest and the corresponding transaction that's known to be - /// executable. This means that it may have been executed locally, or it may have been synced through + /// This is a map between the transaction digest and the corresponding + /// transaction that's known to be executable. This means that it may + /// have been executed locally, or it may have been synced through /// state-sync but hasn't been executed yet. #[default_options_override_fn = "transactions_table_default_config"] pub(crate) transactions: DBMap, - /// A map between the transaction digest of a certificate to the effects of its execution. - /// We store effects into this table in two different cases: - /// 1. When a transaction is synced through state_sync, we store the effects here. These effects - /// are known to be final in the network, but may not have been executed locally yet. - /// 2. When the transaction is executed locally on this node, we store the effects here. This means that - /// it's possible to store the same effects twice (once for the synced transaction, and once for the executed). - /// It's also possible for the effects to be reverted if the transaction didn't make it into the epoch. + /// A map between the transaction digest of a certificate to the effects of + /// its execution. We store effects into this table in two different + /// cases: + /// 1. When a transaction is synced through state_sync, we store the effects + /// here. These effects + /// are known to be final in the network, but may not have been executed + /// locally yet. + /// 2. When the transaction is executed locally on this node, we store the + /// effects here. This means that + /// it's possible to store the same effects twice (once for the synced + /// transaction, and once for the executed). It's also possible for the + /// effects to be reverted if the transaction didn't make it into the epoch. #[default_options_override_fn = "effects_table_default_config"] pub(crate) effects: DBMap, - /// Transactions that have been executed locally on this node. We need this table since the `effects` table - /// doesn't say anything about the execution status of the transaction on this node. When we wait for transactions - /// to be executed, we wait for them to appear in this table. When we revert transactions, we remove them from both - /// tables. + /// Transactions that have been executed locally on this node. We need this + /// table since the `effects` table doesn't say anything about the + /// execution status of the transaction on this node. When we wait for + /// transactions to be executed, we wait for them to appear in this + /// table. When we revert transactions, we remove them from both tables. pub(crate) executed_effects: DBMap, // Currently this is needed in the validator for returning events during process certificates. @@ -91,9 +108,10 @@ pub struct AuthorityPerpetualTables { #[default_options_override_fn = "events_table_default_config"] pub(crate) events: DBMap<(TransactionEventsDigest, usize), Event>, - /// DEPRECATED in favor of the table of the same name in authority_per_epoch_store. - /// Please do not add new accessors/callsites. - /// When transaction is executed via checkpoint executor, we store association here + /// DEPRECATED in favor of the table of the same name in + /// authority_per_epoch_store. Please do not add new + /// accessors/callsites. When transaction is executed via checkpoint + /// executor, we store association here pub(crate) executed_transactions_to_checkpoint: DBMap, @@ -105,25 +123,30 @@ pub struct AuthorityPerpetualTables { /// Parameters of the system fixed at the epoch start pub(crate) epoch_start_configuration: DBMap<(), EpochStartConfiguration>, - /// A singleton table that stores latest pruned checkpoint. Used to keep objects pruner progress + /// A singleton table that stores latest pruned checkpoint. Used to keep + /// objects pruner progress pub(crate) pruned_checkpoint: DBMap<(), CheckpointSequenceNumber>, - /// Expected total amount of SUI in the network. This is expected to remain constant - /// throughout the lifetime of the network. We check it at the end of each epoch if - /// expensive checks are enabled. We cannot use 10B today because in tests we often - /// inject extra gas objects into genesis. + /// Expected total amount of SUI in the network. This is expected to remain + /// constant throughout the lifetime of the network. We check it at the + /// end of each epoch if expensive checks are enabled. We cannot use 10B + /// today because in tests we often inject extra gas objects into + /// genesis. pub(crate) expected_network_sui_amount: DBMap<(), u64>, - /// Expected imbalance between storage fund balance and the sum of storage rebate of all live objects. - /// This could be non-zero due to bugs in earlier protocol versions. - /// This number is the result of storage_fund_balance - sum(storage_rebate). + /// Expected imbalance between storage fund balance and the sum of storage + /// rebate of all live objects. This could be non-zero due to bugs in + /// earlier protocol versions. This number is the result of + /// storage_fund_balance - sum(storage_rebate). pub(crate) expected_storage_fund_imbalance: DBMap<(), i64>, - /// Table that stores the set of received objects and deleted objects and the version at - /// which they were received. This is used to prevent possible race conditions around receiving - /// objects (since they are not locked by the transaction manager) and for tracking shared - /// objects that have been deleted. This table is meant to be pruned per-epoch, and all - /// previous epochs other than the current epoch may be pruned safely. + /// Table that stores the set of received objects and deleted objects and + /// the version at which they were received. This is used to prevent + /// possible race conditions around receiving objects (since they are + /// not locked by the transaction manager) and for tracking shared + /// objects that have been deleted. This table is meant to be pruned + /// per-epoch, and all previous epochs other than the current epoch may + /// be pruned safely. pub(crate) object_per_epoch_marker_table: DBMap<(EpochId, ObjectKey), MarkerValue>, } @@ -151,9 +174,10 @@ impl AuthorityPerpetualTables { ) } - // This is used by indexer to find the correct version of dynamic field child object. - // We do not store the version of the child object, but because of lamport timestamp, - // we know the child must have version number less then or eq to the parent. + // This is used by indexer to find the correct version of dynamic field child + // object. We do not store the version of the child object, but because of + // lamport timestamp, we know the child must have version number less then + // or eq to the parent. pub fn find_object_lt_or_eq_version( &self, object_id: ObjectID, diff --git a/crates/sui-core/src/authority/authority_store_types.rs b/crates/sui-core/src/authority/authority_store_types.rs index 46747b26cc4..66c03fc08c1 100644 --- a/crates/sui-core/src/authority/authority_store_types.rs +++ b/crates/sui-core/src/authority/authority_store_types.rs @@ -2,40 +2,43 @@ // SPDX-License-Identifier: Apache-2.0 use serde::{Deserialize, Serialize}; -use serde_with::serde_as; -use serde_with::Bytes; -use sui_types::base_types::MoveObjectType; -use sui_types::base_types::{ObjectDigest, SequenceNumber, TransactionDigest}; -use sui_types::coin::Coin; -use sui_types::crypto::{default_hash, Signable}; -use sui_types::error::SuiError; -use sui_types::move_package::MovePackage; -use sui_types::object::{Data, MoveObject, Object, ObjectInner, Owner}; -use sui_types::storage::ObjectKey; +use serde_with::{serde_as, Bytes}; +use sui_types::{ + base_types::{MoveObjectType, ObjectDigest, SequenceNumber, TransactionDigest}, + coin::Coin, + crypto::{default_hash, Signable}, + error::SuiError, + move_package::MovePackage, + object::{Data, MoveObject, Object, ObjectInner, Owner}, + storage::ObjectKey, +}; pub type ObjectContentDigest = ObjectDigest; // Versioning process: // -// Object storage versioning is done lazily (at read time) - therefore we must always preserve the -// code for reading the very first storage version. For all versions, a migration function +// Object storage versioning is done lazily (at read time) - therefore we must +// always preserve the code for reading the very first storage version. For all +// versions, a migration function // // f(V_n) -> V_(n+1) // -// must be defined. This way we can iteratively migrate the very oldest version to the very newest -// version at any point in the future. +// must be defined. This way we can iteratively migrate the very oldest version +// to the very newest version at any point in the future. // -// To change the format of the object table value types (StoreObject and StoreMoveObject), use the -// following process: +// To change the format of the object table value types (StoreObject and +// StoreMoveObject), use the following process: // - Add a new variant to the enum to store the new version type. -// - Extend the `migrate` functions to migrate from the previous version to the new version. -// - Change `From for StoreObjectPair` to create the newest version only. +// - Extend the `migrate` functions to migrate from the previous version to the +// new version. +// - Change `From for StoreObjectPair` to create the newest version +// only. // // Additionally, the first time we version these formats, we will need to: -// - Add a check in the `TryFrom for Object` to see if the object that was just -// read is the latest version. -// - If it is not, use the migration function (as explained above) to migrate it to the next -// version. +// - Add a check in the `TryFrom for Object` to see if the +// object that was just read is the latest version. +// - If it is not, use the migration function (as explained above) to migrate it +// to the next version. // - Repeat until we have arrive at the current version. /// Enum wrapper for versioning @@ -49,13 +52,14 @@ pub type StoreObject = StoreObjectV1; impl StoreObjectWrapper { pub fn migrate(self) -> Self { - // TODO: when there are multiple versions, we must iteratively migrate from version N to - // N+1 until we arrive at the latest version + // TODO: when there are multiple versions, we must iteratively migrate from + // version N to N+1 until we arrive at the latest version self } - // Always returns the most recent version. Older versions are migrated to the latest version at - // read time, so there is never a need to access older versions. + // Always returns the most recent version. Older versions are migrated to the + // latest version at read time, so there is never a need to access older + // versions. pub fn inner(&self) -> &StoreObject { match self { Self::V1(v1) => v1, @@ -100,7 +104,8 @@ pub struct StoreObjectValue { } /// Forked version of [`sui_types::object::Data`] -/// Adds extra enum value `IndirectObject`, which represents a reference to an object stored separately +/// Adds extra enum value `IndirectObject`, which represents a reference to an +/// object stored separately #[derive(Eq, PartialEq, Debug, Clone, Deserialize, Serialize, Hash)] pub enum StoreData { Move(MoveObject), @@ -127,13 +132,14 @@ pub type StoreMoveObject = StoreMoveObjectV1; impl StoreMoveObjectWrapper { pub fn migrate(self) -> Self { - // TODO: when there are multiple versions, we must iteratively migrate from version N to - // N+1 until we arrive at the latest version + // TODO: when there are multiple versions, we must iteratively migrate from + // version N to N+1 until we arrive at the latest version self } - // Always returns the most recent version. Older versions are migrated to the latest version at - // read time, so there is never a need to access older versions. + // Always returns the most recent version. Older versions are migrated to the + // latest version at read time, so there is never a need to access older + // versions. pub fn inner(&self) -> &StoreMoveObject { match self { Self::V1(v1) => v1, @@ -270,7 +276,7 @@ pub(crate) fn try_construct_object( _ => { return Err(SuiError::Storage( "corrupted field: inconsistent object representation".to_string(), - )) + )); } }; diff --git a/crates/sui-core/src/authority/authority_test_utils.rs b/crates/sui-core/src/authority/authority_test_utils.rs index 0e8f875e138..e07b7aa6a1b 100644 --- a/crates/sui-core/src/authority/authority_test_utils.rs +++ b/crates/sui-core/src/authority/authority_test_utils.rs @@ -2,23 +2,22 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::checkpoints::CheckpointServiceNoop; -use crate::consensus_handler::SequencedConsensusTransaction; use core::default::Default; -use fastcrypto::hash::MultisetHash; -use fastcrypto::traits::KeyPair; + +use fastcrypto::{hash::MultisetHash, traits::KeyPair}; use move_core_types::account_address::AccountAddress; use move_symbol_pool::Symbol; use sui_move_build::{BuildConfig, CompiledPackage}; -use sui_types::crypto::Signature; -use sui_types::crypto::{AccountKeyPair, AuthorityKeyPair}; -use sui_types::messages_consensus::ConsensusTransaction; -use sui_types::move_package::UpgradePolicy; -use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; -use sui_types::utils::to_sender_signed_transaction; +use sui_types::{ + crypto::{AccountKeyPair, AuthorityKeyPair, Signature}, + messages_consensus::ConsensusTransaction, + move_package::UpgradePolicy, + programmable_transaction_builder::ProgrammableTransactionBuilder, + utils::to_sender_signed_transaction, +}; -use super::test_authority_builder::TestAuthorityBuilder; -use super::*; +use super::{test_authority_builder::TestAuthorityBuilder, *}; +use crate::{checkpoints::CheckpointServiceNoop, consensus_handler::SequencedConsensusTransaction}; pub async fn send_and_confirm_transaction( authority: &AuthorityState, @@ -26,9 +25,9 @@ pub async fn send_and_confirm_transaction( ) -> Result<(CertifiedTransaction, SignedTransactionEffects), SuiError> { send_and_confirm_transaction_( authority, - None, /* no fullnode_key_pair */ + None, // no fullnode_key_pair transaction, - false, /* no shared objects */ + false, // no shared objects ) .await } @@ -84,8 +83,8 @@ pub async fn execute_certificate_with_execution_error( SuiError, > { let epoch_store = authority.load_epoch_store_one_call_per_task(); - // We also check the incremental effects of the transaction on the live object set against StateAccumulator - // for testing and regression detection. + // We also check the incremental effects of the transaction on the live object + // set against StateAccumulator for testing and regression detection. // We must do this before sending to consensus, otherwise consensus may already // lead to transaction execution and state change. let state_acc = StateAccumulator::new(authority.execution_cache.clone()); @@ -102,8 +101,10 @@ pub async fn execute_certificate_with_execution_error( } } - // Submit the confirmation. *Now* execution actually happens, and it should fail when we try to look up our dummy module. - // we unfortunately don't get a very descriptive error message, but we can at least see that something went wrong inside the VM + // Submit the confirmation. *Now* execution actually happens, and it should fail + // when we try to look up our dummy module. we unfortunately don't get a + // very descriptive error message, but we can at least see that something went + // wrong inside the VM let (result, execution_error_opt) = authority.try_execute_for_test(&certificate).await?; let state_after = state_acc.accumulate_live_object_set(include_wrapped_tombstone); let effects_acc = state_acc.accumulate_effects( @@ -387,7 +388,8 @@ pub async fn send_consensus_no_execution(authority: &AuthorityState, cert: &Veri ConsensusTransaction::new_certificate_message(&authority.name, cert.clone().into_inner()), ); - // Call process_consensus_transaction() instead of handle_consensus_transaction(), to avoid actually executing cert. + // Call process_consensus_transaction() instead of + // handle_consensus_transaction(), to avoid actually executing cert. // This allows testing cert execution independently. authority .epoch_store_for_testing() @@ -427,11 +429,13 @@ pub fn build_test_modules_with_dep_addr( for unpublished_dep in &package.dependency_ids.unpublished { let published_id = dep_id_mapping.get(unpublished_dep).unwrap(); // Make sure we aren't overriding a package - assert!(package - .dependency_ids - .published - .insert(*unpublished_dep, *published_id) - .is_none()) + assert!( + package + .dependency_ids + .published + .insert(*unpublished_dep, *published_id) + .is_none() + ) } // No unpublished deps @@ -440,9 +444,10 @@ pub fn build_test_modules_with_dep_addr( } /// Returns the new package's ID and the upgrade cap object ref. -/// `dep_original_addresses` allows us to fill out mappings in the addresses section of the package manifest. These IDs -/// must be the original IDs of names. -/// dep_ids are the IDs of the dependencies of the package, in the latest version (if there were upgrades). +/// `dep_original_addresses` allows us to fill out mappings in the addresses +/// section of the package manifest. These IDs must be the original IDs of +/// names. dep_ids are the IDs of the dependencies of the package, in the latest +/// version (if there were upgrades). pub async fn publish_package_on_single_authority( path: PathBuf, sender: SuiAddress, @@ -485,7 +490,7 @@ pub async fn publish_package_on_single_authority( .find(|c| c.1 == Owner::Immutable) .unwrap() .0 - .0; + .0; let cap_object = effects .data() .created() @@ -537,6 +542,6 @@ pub async fn upgrade_package_on_single_authority( .find(|c| c.1 == Owner::Immutable) .unwrap() .0 - .0; + .0; Ok(package_id) } diff --git a/crates/sui-core/src/authority/epoch_start_configuration.rs b/crates/sui-core/src/authority/epoch_start_configuration.rs index ec6105e9454..f46b7ab0b78 100644 --- a/crates/sui-core/src/authority/epoch_start_configuration.rs +++ b/crates/sui-core/src/authority/epoch_start_configuration.rs @@ -1,20 +1,22 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::fmt; + use enum_dispatch::enum_dispatch; use serde::{Deserialize, Serialize}; - -use std::fmt; -use sui_types::authenticator_state::get_authenticator_state_obj_initial_shared_version; -use sui_types::base_types::SequenceNumber; -use sui_types::deny_list::get_deny_list_obj_initial_shared_version; -use sui_types::epoch_data::EpochData; -use sui_types::error::SuiResult; -use sui_types::messages_checkpoint::{CheckpointDigest, CheckpointTimestamp}; -use sui_types::randomness_state::get_randomness_state_obj_initial_shared_version; -use sui_types::storage::ObjectStore; -use sui_types::sui_system_state::epoch_start_sui_system_state::{ - EpochStartSystemState, EpochStartSystemStateTrait, +use sui_types::{ + authenticator_state::get_authenticator_state_obj_initial_shared_version, + base_types::SequenceNumber, + deny_list::get_deny_list_obj_initial_shared_version, + epoch_data::EpochData, + error::SuiResult, + messages_checkpoint::{CheckpointDigest, CheckpointTimestamp}, + randomness_state::get_randomness_state_obj_initial_shared_version, + storage::ObjectStore, + sui_system_state::epoch_start_sui_system_state::{ + EpochStartSystemState, EpochStartSystemStateTrait, + }, }; #[enum_dispatch] @@ -86,8 +88,9 @@ pub struct EpochStartConfigurationV1 { system_state: EpochStartSystemState, /// epoch_digest is defined as following /// (1) For the genesis epoch it is set to 0 - /// (2) For all other epochs it is a digest of the last checkpoint of a previous epoch - /// Note that this is in line with how epoch start timestamp is defined + /// (2) For all other epochs it is a digest of the last checkpoint of a + /// previous epoch Note that this is in line with how epoch start + /// timestamp is defined epoch_digest: CheckpointDigest, } @@ -279,7 +282,8 @@ impl EpochFlag { impl fmt::Display for EpochFlag { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Important - implementation should return low cardinality values because this is used as metric key + // Important - implementation should return low cardinality values because this + // is used as metric key match self { EpochFlag::InMemoryCheckpointRoots => write!(f, "InMemoryCheckpointRoots"), EpochFlag::PerEpochFinalizedTransactions => write!(f, "PerEpochFinalizedTransactions"), diff --git a/crates/sui-core/src/authority/shared_object_version_manager.rs b/crates/sui-core/src/authority/shared_object_version_manager.rs index 0279fbb9aff..c3f01f0ad8d 100644 --- a/crates/sui-core/src/authority/shared_object_version_manager.rs +++ b/crates/sui-core/src/authority/shared_object_version_manager.rs @@ -1,26 +1,25 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::epoch_start_configuration::EpochStartConfigTrait; -use crate::authority::AuthorityPerEpochStore; -use crate::execution_cache::ExecutionCacheRead; use std::collections::HashMap; -use sui_types::crypto::RandomnessRound; -use sui_types::effects::{TransactionEffects, TransactionEffectsAPI}; -use sui_types::executable_transaction::VerifiedExecutableTransaction; -use sui_types::storage::{ - transaction_input_object_keys, transaction_receiving_object_keys, ObjectKey, -}; -use sui_types::transaction::{ - SenderSignedData, SharedInputObject, TransactionDataAPI, TransactionKey, -}; + use sui_types::{ base_types::{ObjectID, SequenceNumber}, + crypto::RandomnessRound, + effects::{TransactionEffects, TransactionEffectsAPI}, error::SuiResult, + executable_transaction::VerifiedExecutableTransaction, + storage::{transaction_input_object_keys, transaction_receiving_object_keys, ObjectKey}, + transaction::{SenderSignedData, SharedInputObject, TransactionDataAPI, TransactionKey}, SUI_RANDOMNESS_STATE_OBJECT_ID, }; use tracing::{debug, trace}; +use crate::{ + authority::{epoch_start_configuration::EpochStartConfigTrait, AuthorityPerEpochStore}, + execution_cache::ExecutionCacheRead, +}; + pub struct SharedObjVerManager {} pub type AssignedTxAndVersions = Vec<(TransactionKey, Vec<(ObjectID, SequenceNumber)>)>; @@ -47,15 +46,19 @@ impl SharedObjVerManager { ) .await?; let mut assigned_versions = Vec::new(); - // We must update randomness object version first before processing any transaction, - // so that all reads are using the next version. - // TODO: Add a test that actually check this, i.e. if we change the order, some test should fail. + // We must update randomness object version first before processing any + // transaction, so that all reads are using the next version. + // TODO: Add a test that actually check this, i.e. if we change the order, some + // test should fail. if let Some(round) = randomness_round { // If we're generating randomness, update the randomness state object version. let version = shared_input_next_versions .get_mut(&SUI_RANDOMNESS_STATE_OBJECT_ID) .expect("randomness state object must have been added in get_or_init_versions()"); - debug!("assigning shared object versions for randomness: epoch {}, round {round:?} -> version {version:?}", epoch_store.epoch()); + debug!( + "assigning shared object versions for randomness: epoch {}, round {round:?} -> version {version:?}", + epoch_store.epoch() + ); assigned_versions.push(( TransactionKey::RandomnessRound(epoch_store.epoch(), round), vec![(SUI_RANDOMNESS_STATE_OBJECT_ID, *version)], @@ -83,11 +86,12 @@ impl SharedObjVerManager { cache_reader: &dyn ExecutionCacheRead, ) -> SuiResult { // We don't care about the results since we can use effects to assign versions. - // But we must call it to make sure whenever a shared object is touched the first time - // during an epoch, either through consensus or through checkpoint executor, - // its next version must be initialized. This is because we initialize the next version - // of a shared object in an epoch by reading the current version from the object store. - // This must be done before we mutate it the first time, otherwise we would be initializing + // But we must call it to make sure whenever a shared object is touched the + // first time during an epoch, either through consensus or through + // checkpoint executor, its next version must be initialized. This is + // because we initialize the next version of a shared object in an epoch + // by reading the current version from the object store. This must be + // done before we mutate it the first time, otherwise we would be initializing // it with the wrong version. let _ = get_or_init_versions( certs_and_effects.iter().map(|(cert, _)| cert.data()), @@ -205,25 +209,29 @@ fn assign_versions_for_certificate( #[cfg(test)] mod tests { - use crate::authority::epoch_start_configuration::EpochStartConfigTrait; - use crate::authority::shared_object_version_manager::{ - ConsensusSharedObjVerAssignment, SharedObjVerManager, - }; - use crate::authority::test_authority_builder::TestAuthorityBuilder; - use shared_crypto::intent::Intent; use std::collections::{BTreeMap, HashMap}; + + use shared_crypto::intent::Intent; use sui_test_transaction_builder::TestTransactionBuilder; - use sui_types::base_types::{ObjectID, SequenceNumber, SuiAddress}; - use sui_types::crypto::RandomnessRound; - use sui_types::digests::ObjectDigest; - use sui_types::effects::TestEffectsBuilder; - use sui_types::executable_transaction::{ - CertificateProof, ExecutableTransaction, VerifiedExecutableTransaction, + use sui_types::{ + base_types::{ObjectID, SequenceNumber, SuiAddress}, + crypto::RandomnessRound, + digests::ObjectDigest, + effects::TestEffectsBuilder, + executable_transaction::{ + CertificateProof, ExecutableTransaction, VerifiedExecutableTransaction, + }, + object::{Object, Owner}, + programmable_transaction_builder::ProgrammableTransactionBuilder, + transaction::{ObjectArg, SenderSignedData, TransactionKey}, + SUI_RANDOMNESS_STATE_OBJECT_ID, + }; + + use crate::authority::{ + epoch_start_configuration::EpochStartConfigTrait, + shared_object_version_manager::{ConsensusSharedObjVerAssignment, SharedObjVerManager}, + test_authority_builder::TestAuthorityBuilder, }; - use sui_types::object::{Object, Owner}; - use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; - use sui_types::transaction::{ObjectArg, SenderSignedData, TransactionKey}; - use sui_types::SUI_RANDOMNESS_STATE_OBJECT_ID; #[tokio::test] async fn test_assign_versions_from_consensus_basic() { @@ -258,21 +266,23 @@ mod tests { ) .await .unwrap(); - // Check that the shared object's next version is always initialized in the epoch store. + // Check that the shared object's next version is always initialized in the + // epoch store. assert_eq!( epoch_store.get_next_object_version(&id).unwrap(), init_shared_version ); - // Check that the final version of the shared object is the lamport version of the last - // transaction. + // Check that the final version of the shared object is the lamport version of + // the last transaction. assert_eq!( shared_input_next_versions, HashMap::from([(id, SequenceNumber::from_u64(12))]) ); // Check that the version assignment for each transaction is correct. - // For a transaction that uses the shared object with mutable=false, it won't update the version - // using lamport version, hence the next transaction will use the same version number. - // In the following case, certs[2] has the same assignment as certs[1] for this reason. + // For a transaction that uses the shared object with mutable=false, it won't + // update the version using lamport version, hence the next transaction + // will use the same version number. In the following case, certs[2] has + // the same assignment as certs[1] for this reason. assert_eq!( assigned_versions, vec![ @@ -296,7 +306,8 @@ mod tests { generate_shared_obj_tx_with_gas_version( SUI_RANDOMNESS_STATE_OBJECT_ID, randomness_obj_version, - // This can only be false since it's not allowed to use randomness object with mutable=true. + // This can only be false since it's not allowed to use randomness object with + // mutable=true. false, 3, ), @@ -340,12 +351,14 @@ mod tests { ), ( certs[0].key(), - // It is critical that the randomness object version is updated before the assignment. + // It is critical that the randomness object version is updated before the + // assignment. vec![(SUI_RANDOMNESS_STATE_OBJECT_ID, next_randomness_obj_version)] ), ( certs[1].key(), - // It is critical that the randomness object version is updated before the assignment. + // It is critical that the randomness object version is updated before the + // assignment. vec![(SUI_RANDOMNESS_STATE_OBJECT_ID, next_randomness_obj_version)] ), ] @@ -397,7 +410,8 @@ mod tests { ) .await .unwrap(); - // Check that the shared object's next version is always initialized in the epoch store. + // Check that the shared object's next version is always initialized in the + // epoch store. assert_eq!( epoch_store.get_next_object_version(&id).unwrap(), init_shared_version @@ -413,9 +427,10 @@ mod tests { ); } - /// Generate a transaction that uses a shared object as specified in the parameters. - /// Also uses a gas object with specified version. - /// The version of the gas object is used to manipulate the lamport version of this transaction. + /// Generate a transaction that uses a shared object as specified in the + /// parameters. Also uses a gas object with specified version. + /// The version of the gas object is used to manipulate the lamport version + /// of this transaction. fn generate_shared_obj_tx_with_gas_version( shared_object_id: ObjectID, shared_object_init_version: SequenceNumber, diff --git a/crates/sui-core/src/authority/test_authority_builder.rs b/crates/sui-core/src/authority/test_authority_builder.rs index ad38ea91ee8..570b5892f45 100644 --- a/crates/sui-core/src/authority/test_authority_builder.rs +++ b/crates/sui-core/src/authority/test_authority_builder.rs @@ -1,42 +1,48 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore; -use crate::authority::authority_store_tables::AuthorityPerpetualTables; -use crate::authority::epoch_start_configuration::EpochStartConfiguration; -use crate::authority::{AuthorityState, AuthorityStore}; -use crate::checkpoints::CheckpointStore; -use crate::epoch::committee_store::CommitteeStore; -use crate::epoch::epoch_metrics::EpochMetrics; -use crate::execution_cache::ExecutionCache; -use crate::module_cache_metrics::ResolverMetrics; -use crate::signature_verifier::SignatureVerifierMetrics; +use std::{path::PathBuf, sync::Arc}; + use fastcrypto::traits::KeyPair; use prometheus::Registry; -use std::path::PathBuf; -use std::sync::Arc; use sui_archival::reader::ArchiveReaderBalancer; -use sui_config::certificate_deny_config::CertificateDenyConfig; -use sui_config::genesis::Genesis; -use sui_config::node::{AuthorityOverloadConfig, StateDebugDumpConfig}; -use sui_config::node::{ - AuthorityStorePruningConfig, DBCheckpointConfig, ExpensiveSafetyCheckConfig, +use sui_config::{ + certificate_deny_config::CertificateDenyConfig, + genesis::Genesis, + node::{ + AuthorityOverloadConfig, AuthorityStorePruningConfig, DBCheckpointConfig, + ExpensiveSafetyCheckConfig, StateDebugDumpConfig, + }, + transaction_deny_config::TransactionDenyConfig, }; -use sui_config::transaction_deny_config::TransactionDenyConfig; use sui_macros::nondeterministic; use sui_protocol_config::{ProtocolConfig, SupportedProtocolVersions}; use sui_storage::IndexStore; -use sui_swarm_config::genesis_config::AccountConfig; -use sui_swarm_config::network_config::NetworkConfig; -use sui_types::base_types::{AuthorityName, ObjectID}; -use sui_types::crypto::AuthorityKeyPair; -use sui_types::digests::ChainIdentifier; -use sui_types::executable_transaction::VerifiedExecutableTransaction; -use sui_types::object::Object; -use sui_types::sui_system_state::SuiSystemStateTrait; -use sui_types::transaction::VerifiedTransaction; +use sui_swarm_config::{genesis_config::AccountConfig, network_config::NetworkConfig}; +use sui_types::{ + base_types::{AuthorityName, ObjectID}, + crypto::AuthorityKeyPair, + digests::ChainIdentifier, + executable_transaction::VerifiedExecutableTransaction, + object::Object, + sui_system_state::SuiSystemStateTrait, + transaction::VerifiedTransaction, +}; use tempfile::tempdir; +use crate::{ + authority::{ + authority_per_epoch_store::AuthorityPerEpochStore, + authority_store_tables::AuthorityPerpetualTables, + epoch_start_configuration::EpochStartConfiguration, AuthorityState, AuthorityStore, + }, + checkpoints::CheckpointStore, + epoch::{committee_store::CommitteeStore, epoch_metrics::EpochMetrics}, + execution_cache::ExecutionCache, + module_cache_metrics::ResolverMetrics, + signature_verifier::SignatureVerifierMetrics, +}; + #[derive(Default, Clone)] pub struct TestAuthorityBuilder<'a> { store_base_path: Option, @@ -51,7 +57,8 @@ pub struct TestAuthorityBuilder<'a> { expensive_safety_checks: Option, disable_indexer: bool, accounts: Vec, - /// By default, we don't insert the genesis checkpoint, which isn't needed by most tests. + /// By default, we don't insert the genesis checkpoint, which isn't needed + /// by most tests. insert_genesis_checkpoint: bool, authority_overload_config: Option, } @@ -92,12 +99,14 @@ impl<'a> TestAuthorityBuilder<'a> { } pub fn with_reference_gas_price(mut self, reference_gas_price: u64) -> Self { - // If genesis is already set then setting rgp is meaningless since it will be overwritten. + // If genesis is already set then setting rgp is meaningless since it will be + // overwritten. assert!(self.genesis.is_none()); - assert!(self - .reference_gas_price - .replace(reference_gas_price) - .is_none()); + assert!( + self.reference_gas_price + .replace(reference_gas_price) + .is_none() + ); self } @@ -195,8 +204,8 @@ impl<'a> TestAuthorityBuilder<'a> { let signature_verifier_metrics = SignatureVerifierMetrics::new(®istry); // `_guard` must be declared here so it is not dropped before // `AuthorityPerEpochStore::new` is called - // Force disable random beacon for tests using this builder, because it doesn't set up the - // RandomnessManager. + // Force disable random beacon for tests using this builder, because it doesn't + // set up the RandomnessManager. let _guard = if let Some(mut config) = self.protocol_config { config.set_random_beacon_for_testing(false); ProtocolConfig::apply_overrides_for_testing(move |_, _| config.clone()) @@ -295,9 +304,10 @@ impl<'a> TestAuthorityBuilder<'a> { ArchiveReaderBalancer::default(), ) .await; - // For any type of local testing that does not actually spawn a node, the checkpoint executor - // won't be started, which means we won't actually execute the genesis transaction. In that case, - // the genesis objects (e.g. all the genesis test coins) won't be accessible. Executing it + // For any type of local testing that does not actually spawn a node, the + // checkpoint executor won't be started, which means we won't actually + // execute the genesis transaction. In that case, the genesis objects + // (e.g. all the genesis test coins) won't be accessible. Executing it // explicitly makes sure all genesis objects are ready for use. state .try_execute_immediately( @@ -312,10 +322,10 @@ impl<'a> TestAuthorityBuilder<'a> { .await .unwrap(); - // We want to insert these objects directly instead of relying on genesis because - // genesis process would set the previous transaction field for these objects, which would - // change their object digest. This makes it difficult to write tests that want to use - // these objects directly. + // We want to insert these objects directly instead of relying on genesis + // because genesis process would set the previous transaction field for + // these objects, which would change their object digest. This makes it + // difficult to write tests that want to use these objects directly. // TODO: we should probably have a better way to do this. if let Some(starting_objects) = self.starting_objects { state diff --git a/crates/sui-core/src/authority_aggregator.rs b/crates/sui-core/src/authority_aggregator.rs index 633f9cdce97..a7e86b8c1bd 100644 --- a/crates/sui-core/src/authority_aggregator.rs +++ b/crates/sui-core/src/authority_aggregator.rs @@ -2,62 +2,65 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority_client::{ - make_authority_clients_with_timeout_config, make_network_authority_clients_with_network_config, - AuthorityAPI, NetworkAuthorityClient, +use std::{ + collections::{BTreeMap, BTreeSet, HashMap}, + convert::AsRef, + string::ToString, + sync::Arc, + time::Duration, }; -use crate::execution_cache::ExecutionCacheRead; -use crate::safe_client::{SafeClient, SafeClientMetrics, SafeClientMetricsBase}; + use fastcrypto::traits::ToFromBytes; use futures::{future::BoxFuture, stream::FuturesUnordered, StreamExt}; -use mysten_metrics::histogram::Histogram; -use mysten_metrics::{monitored_future, spawn_monitored_task, GaugeGuard}; +use mysten_metrics::{histogram::Histogram, monitored_future, spawn_monitored_task, GaugeGuard}; use mysten_network::config::Config; -use std::convert::AsRef; -use sui_authority_aggregation::ReduceOutput; -use sui_authority_aggregation::{quorum_map_then_reduce_with_timeout, AsyncResult}; +use prometheus::{ + register_int_counter_vec_with_registry, register_int_counter_with_registry, + register_int_gauge_with_registry, IntCounter, IntCounterVec, IntGauge, Registry, +}; +use sui_authority_aggregation::{quorum_map_then_reduce_with_timeout, AsyncResult, ReduceOutput}; use sui_config::genesis::Genesis; use sui_network::{ default_mysten_network_config, DEFAULT_CONNECT_TIMEOUT_SEC, DEFAULT_REQUEST_TIMEOUT_SEC, }; use sui_swarm_config::network_config::NetworkConfig; -use sui_types::crypto::{AuthorityPublicKeyBytes, AuthoritySignInfo}; -use sui_types::error::UserInputError; -use sui_types::fp_ensure; -use sui_types::message_envelope::Message; -use sui_types::object::Object; -use sui_types::quorum_driver_types::GroupedErrors; -use sui_types::sui_system_state::{SuiSystemState, SuiSystemStateTrait}; use sui_types::{ base_types::*, - committee::{Committee, ProtocolVersion}, - error::{SuiError, SuiResult}, + committee::{ + Committee, CommitteeTrait, CommitteeWithNetworkMetadata, ProtocolVersion, StakeUnit, + }, + crypto::{AuthorityPublicKeyBytes, AuthoritySignInfo}, + effects::{ + CertifiedTransactionEffects, SignedTransactionEffects, TransactionEffects, + TransactionEvents, VerifiedCertifiedTransactionEffects, + }, + error::{SuiError, SuiResult, UserInputError}, + fp_ensure, + message_envelope::Message, + messages_grpc::{ + HandleCertificateResponseV2, LayoutGenerationOption, ObjectInfoRequest, + TransactionInfoRequest, + }, + messages_safe_client::PlainTransactionInfoResponse, + object::Object, + quorum_driver_types::GroupedErrors, + sui_system_state::{SuiSystemState, SuiSystemStateTrait}, transaction::*, }; use thiserror::Error; +use tokio::time::{sleep, timeout}; use tracing::{debug, error, info, trace, warn, Instrument}; -use prometheus::{ - register_int_counter_vec_with_registry, register_int_counter_with_registry, - register_int_gauge_with_registry, IntCounter, IntCounterVec, IntGauge, Registry, -}; -use std::collections::{BTreeMap, BTreeSet, HashMap}; -use std::string::ToString; -use std::sync::Arc; -use std::time::Duration; -use sui_types::committee::{CommitteeTrait, CommitteeWithNetworkMetadata, StakeUnit}; -use sui_types::effects::{ - CertifiedTransactionEffects, SignedTransactionEffects, TransactionEffects, TransactionEvents, - VerifiedCertifiedTransactionEffects, -}; -use sui_types::messages_grpc::{ - HandleCertificateResponseV2, LayoutGenerationOption, ObjectInfoRequest, TransactionInfoRequest, +use crate::{ + authority_client::{ + make_authority_clients_with_timeout_config, + make_network_authority_clients_with_network_config, AuthorityAPI, NetworkAuthorityClient, + }, + epoch::committee_store::CommitteeStore, + execution_cache::ExecutionCacheRead, + safe_client::{SafeClient, SafeClientMetrics, SafeClientMetricsBase}, + stake_aggregator::{InsertResult, MultiStakeAggregator, StakeAggregator}, }; -use sui_types::messages_safe_client::PlainTransactionInfoResponse; -use tokio::time::{sleep, timeout}; - -use crate::epoch::committee_store::CommitteeStore; -use crate::stake_aggregator::{InsertResult, MultiStakeAggregator, StakeAggregator}; pub const DEFAULT_RETRIES: usize = 4; @@ -201,7 +204,7 @@ impl AuthAggMetrics { pub enum AggregatorProcessTransactionError { #[error( "Failed to execute transaction on a quorum of validators due to non-retryable errors. Validator errors: {:?}", - errors, + errors )] FatalTransaction { errors: GroupedErrors }, @@ -214,7 +217,7 @@ pub enum AggregatorProcessTransactionError { #[error( "Failed to execute transaction on a quorum of validators due to conflicting transactions. Locked objects: {:?}. Validator errors: {:?}", conflicting_tx_digests, - errors, + errors )] FatalConflictingTransaction { errors: GroupedErrors, @@ -225,7 +228,7 @@ pub enum AggregatorProcessTransactionError { #[error( "Validators returned conflicting transactions but it is potentially recoverable. Locked objects: {:?}. Validator errors: {:?}", conflicting_tx_digests, - errors, + errors )] RetryableConflictingTransaction { conflicting_tx_digest_to_retry: Option, @@ -370,13 +373,16 @@ impl ProcessTransactionState { &self, validity_threshold: StakeUnit, ) -> bool { - // In some edge cases, the client may send the same transaction multiple times but with different user signatures. - // When this happens, the "minority" tx will fail in safe_client because the certificate verification would fail + // In some edge cases, the client may send the same transaction multiple times + // but with different user signatures. When this happens, the "minority" + // tx will fail in safe_client because the certificate verification would fail // and return Sui::FailedToVerifyTxCertWithExecutedEffects. - // Here, we check if there are f+1 validators return this error. If so, the transaction is already finalized - // with a different set of user signatures. It's not trivial to return the results of that successful transaction - // because we don't want fullnode to store the transaction with non-canonical user signatures. Given that this is - // very rare, we simply return an error here. + // Here, we check if there are f+1 validators return this error. If so, the + // transaction is already finalized with a different set of user + // signatures. It's not trivial to return the results of that successful + // transaction because we don't want fullnode to store the transaction + // with non-canonical user signatures. Given that this is very rare, we + // simply return an error here. let invalid_sig_stake: StakeUnit = self .errors .iter() @@ -441,7 +447,8 @@ pub struct AuthorityAggregator { pub authority_clients: Arc>>>, /// Metrics pub metrics: Arc, - /// Metric base for the purpose of creating new safe clients during reconfiguration. + /// Metric base for the purpose of creating new safe clients during + /// reconfiguration. pub safe_client_metrics_base: SafeClientMetricsBase, pub timeouts: TimeoutConfig, /// Store here for clone during re-config. @@ -549,8 +556,9 @@ impl AuthorityAggregator { }) .collect::>(); - // TODO: It's likely safer to do the following operations atomically, in case this function - // gets called from different threads. It cannot happen today, but worth the caution. + // TODO: It's likely safer to do the following operations atomically, in case + // this function gets called from different threads. It cannot happen + // today, but worth the caution. let new_committee = committee.committee; if disallow_missing_intermediate_committees { fp_ensure!( @@ -642,9 +650,10 @@ impl AuthorityAggregator { safe_client_metrics_base: SafeClientMetricsBase, auth_agg_metrics: AuthAggMetrics, ) -> anyhow::Result { - // TODO: We should get the committee from the epoch store instead to ensure consistency. - // Instead of this function use AuthorityEpochStore::epoch_start_configuration() to access this object everywhere - // besides when we are reading fields for the current epoch + // TODO: We should get the committee from the epoch store instead to ensure + // consistency. Instead of this function use + // AuthorityEpochStore::epoch_start_configuration() to access this object + // everywhere besides when we are reading fields for the current epoch let sui_system_state = store.get_sui_system_state_object_unsafe()?; let committee = sui_system_state.get_current_epoch_committee(); let validator_display_names = sui_system_state @@ -695,9 +704,10 @@ impl AuthorityAggregator where A: AuthorityAPI + Send + Sync + 'static + Clone, { - // Repeatedly calls the provided closure on a randomly selected validator until it succeeds. - // Once all validators have been attempted, starts over at the beginning. Intended for cases - // that must eventually succeed as long as the network is up (or comes back up) eventually. + // Repeatedly calls the provided closure on a randomly selected validator until + // it succeeds. Once all validators have been attempted, starts over at the + // beginning. Intended for cases that must eventually succeed as long as the + // network is up (or comes back up) eventually. async fn quorum_once_inner<'a, S, FMap>( &'a self, // try these authorities first @@ -749,26 +759,27 @@ where })) }; - // This process is intended to minimize latency in the face of unreliable authorities, - // without creating undue load on authorities. + // This process is intended to minimize latency in the face of unreliable + // authorities, without creating undue load on authorities. // // The fastest possible process from the // client's point of view would simply be to issue a concurrent request to every // authority and then take the winner - this would create unnecessary load on // authorities. // - // The most efficient process from the network's point of view is to do one request at - // a time, however if the first validator that the client contacts is unavailable or - // slow, the client must wait for the serial_authority_request_interval period to elapse + // The most efficient process from the network's point of view is to do one + // request at a time, however if the first validator that the client + // contacts is unavailable or slow, the client must wait for the + // serial_authority_request_interval period to elapse // before starting its next request. // // So, this process is designed as a compromise between these two extremes. // - We start one request, and schedule another request to begin after // serial_authority_request_interval. - // - Whenever a request finishes, if it succeeded, we return. if it failed, we start a - // new request. - // - If serial_authority_request_interval elapses, we begin a new request even if the - // previous one is not finished, and schedule another future request. + // - Whenever a request finishes, if it succeeded, we return. if it failed, we + // start a new request. + // - If serial_authority_request_interval elapses, we begin a new request even + // if the previous one is not finished, and schedule another future request. let name = authorities_shuffled.next().ok_or_else(|| { error!( @@ -826,10 +837,11 @@ where } } - /// Like quorum_map_then_reduce_with_timeout, but for things that need only a single - /// successful response, such as fetching a Transaction from some authority. - /// This is intended for cases in which byzantine authorities can time out or slow-loris, but - /// can't give a false answer, because e.g. the digest of the response is known, or a + /// Like quorum_map_then_reduce_with_timeout, but for things that need only + /// a single successful response, such as fetching a Transaction from + /// some authority. This is intended for cases in which byzantine + /// authorities can time out or slow-loris, but can't give a false + /// answer, because e.g. the digest of the response is known, or a /// quorum-signed object such as a checkpoint has been requested. pub(crate) async fn quorum_once_with_timeout<'a, S, FMap>( &'a self, @@ -884,9 +896,10 @@ where /// Query the object with highest version number from the authorities. /// We stop after receiving responses from 2f+1 validators. - /// This function is untrusted because we simply assume each response is valid and there are no - /// byzantine validators. - /// Because of this, this function should only be used for testing or benchmarking. + /// This function is untrusted because we simply assume each response is + /// valid and there are no byzantine validators. + /// Because of this, this function should only be used for testing or + /// benchmarking. pub async fn get_latest_object_version_for_testing( &self, object_id: ObjectID, @@ -1242,10 +1255,12 @@ where }; } - // When state is in a retryable state and process transaction was not successful, it indicates that - // we have heard from *all* validators. Check if any SystemOverloadRetryAfter error caused the txn - // to fail. If so, return explicit SystemOverloadRetryAfter error for continuous retry (since objects) - // are locked in validators. If not, retry regular RetryableTransaction error. + // When state is in a retryable state and process transaction was not + // successful, it indicates that we have heard from *all* validators. + // Check if any SystemOverloadRetryAfter error caused the txn + // to fail. If so, return explicit SystemOverloadRetryAfter error for continuous + // retry (since objects) are locked in validators. If not, retry regular + // RetryableTransaction error. if state.tx_signatures.total_votes() + state.retryable_overloaded_stake >= quorum_threshold { // TODO: make use of retry_after_secs, which is currently not used. @@ -1256,7 +1271,8 @@ where }; } - // No conflicting transaction, the system is not overloaded and transaction state is still retryable. + // No conflicting transaction, the system is not overloaded and transaction + // state is still retryable. AggregatorProcessTransactionError::RetryableTransaction { errors: group_errors(state.errors), } @@ -1356,8 +1372,9 @@ where } _ => { // If we get 2f+1 effects, it's a proof that the transaction - // has already been finalized. This works because validators would re-sign effects for transactions - // that were finalized in previous epochs. + // has already been finalized. This works because validators would re-sign + // effects for transactions that were finalized in previous + // epochs. let digest = plain_tx_effects.data().digest(); match state.effects_map.insert(digest, plain_tx_effects.clone()) { InsertResult::NotEnoughVotes { @@ -1437,8 +1454,9 @@ where involved_validators.extend_from_slice(validators); total_stake += stake; } - // TODO: Instead of pushing a new error, we should add more information about the non-quorum effects - // in the final error if state is no longer retryable + // TODO: Instead of pushing a new error, we should add more information about + // the non-quorum effects in the final error if state is no longer + // retryable state.errors.push(( SuiError::QuorumFailedToGetEffectsQuorumWhenProcessingTransaction { effects_map: non_quorum_effects, @@ -1710,8 +1728,8 @@ where Ok(response.0) } - /// This function tries to get SignedTransaction OR CertifiedTransaction from - /// an given list of validators who are supposed to know about it. + /// This function tries to get SignedTransaction OR CertifiedTransaction + /// from an given list of validators who are supposed to know about it. pub async fn handle_transaction_info_request_from_some_validators( &self, tx_digest: &TransactionDigest, diff --git a/crates/sui-core/src/authority_client.rs b/crates/sui-core/src/authority_client.rs index aef068dcc76..d9cb48a8fd2 100644 --- a/crates/sui-core/src/authority_client.rs +++ b/crates/sui-core/src/authority_client.rs @@ -2,25 +2,26 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{collections::BTreeMap, time::Duration}; + use anyhow::anyhow; use async_trait::async_trait; use mysten_network::config::Config; -use std::collections::BTreeMap; -use std::time::Duration; -use sui_network::{api::ValidatorClient, tonic}; -use sui_types::base_types::AuthorityName; -use sui_types::committee::CommitteeWithNetworkMetadata; -use sui_types::messages_checkpoint::{ - CheckpointRequest, CheckpointRequestV2, CheckpointResponse, CheckpointResponseV2, -}; -use sui_types::multiaddr::Multiaddr; -use sui_types::sui_system_state::SuiSystemState; -use sui_types::{error::SuiError, transaction::*}; - -use sui_network::tonic::transport::Channel; -use sui_types::messages_grpc::{ - HandleCertificateResponseV2, HandleTransactionResponse, ObjectInfoRequest, ObjectInfoResponse, - SystemStateRequest, TransactionInfoRequest, TransactionInfoResponse, +use sui_network::{api::ValidatorClient, tonic, tonic::transport::Channel}; +use sui_types::{ + base_types::AuthorityName, + committee::CommitteeWithNetworkMetadata, + error::SuiError, + messages_checkpoint::{ + CheckpointRequest, CheckpointRequestV2, CheckpointResponse, CheckpointResponseV2, + }, + messages_grpc::{ + HandleCertificateResponseV2, HandleTransactionResponse, ObjectInfoRequest, + ObjectInfoResponse, SystemStateRequest, TransactionInfoRequest, TransactionInfoResponse, + }, + multiaddr::Multiaddr, + sui_system_state::SuiSystemState, + transaction::*, }; #[async_trait] diff --git a/crates/sui-core/src/authority_server.rs b/crates/sui-core/src/authority_server.rs index 235f838b75a..fec4e6620b3 100644 --- a/crates/sui-core/src/authority_server.rs +++ b/crates/sui-core/src/authority_server.rs @@ -2,44 +2,47 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{io, sync::Arc}; + use anyhow::Result; use async_trait::async_trait; -use mysten_metrics::histogram::Histogram as MystenHistogram; -use mysten_metrics::spawn_monitored_task; +use mysten_metrics::{histogram::Histogram as MystenHistogram, spawn_monitored_task}; use narwhal_worker::LazyNarwhalClient; use prometheus::{ register_int_counter_vec_with_registry, register_int_counter_with_registry, IntCounter, IntCounterVec, Registry, }; -use std::{io, sync::Arc}; use sui_network::{ api::{Validator, ValidatorServer}, tonic, }; -use sui_types::effects::TransactionEvents; -use sui_types::messages_consensus::ConsensusTransaction; -use sui_types::messages_grpc::{ - HandleCertificateResponseV2, HandleTransactionResponse, ObjectInfoRequest, ObjectInfoResponse, - SubmitCertificateResponse, SystemStateRequest, TransactionInfoRequest, TransactionInfoResponse, -}; -use sui_types::multiaddr::Multiaddr; -use sui_types::sui_system_state::SuiSystemState; -use sui_types::{effects::TransactionEffectsAPI, message_envelope::Message}; -use sui_types::{error::*, transaction::*}; use sui_types::{ + effects::{TransactionEffectsAPI, TransactionEvents}, + error::*, fp_ensure, + message_envelope::Message, messages_checkpoint::{ CheckpointRequest, CheckpointRequestV2, CheckpointResponse, CheckpointResponseV2, }, + messages_consensus::ConsensusTransaction, + messages_grpc::{ + HandleCertificateResponseV2, HandleTransactionResponse, ObjectInfoRequest, + ObjectInfoResponse, SubmitCertificateResponse, SystemStateRequest, TransactionInfoRequest, + TransactionInfoResponse, + }, + multiaddr::Multiaddr, + sui_system_state::SuiSystemState, + transaction::*, }; use tap::TapFallible; use tokio::task::JoinHandle; use tracing::{error_span, info, Instrument}; -use crate::consensus_adapter::ConnectionMonitorStatusForTests; use crate::{ authority::AuthorityState, - consensus_adapter::{ConsensusAdapter, ConsensusAdapterMetrics}, + consensus_adapter::{ + ConnectionMonitorStatusForTests, ConsensusAdapter, ConsensusAdapterMetrics, + }, }; #[cfg(test)] @@ -313,13 +316,14 @@ impl ValidatorService { .into()); } - // When authority is overloaded and decide to reject this tx, we still lock the object - // and ask the client to retry in the future. This is because without locking, the - // input objects can be locked by a different tx in the future, however, the input objects - // may already be locked by this tx in other validators. This can cause non of the txes - // to have enough quorum to form a certificate, causing the objects to be locked for - // the entire epoch. By doing locking but pushback, retrying transaction will have - // higher chance to succeed. + // When authority is overloaded and decide to reject this tx, we still lock the + // object and ask the client to retry in the future. This is because + // without locking, the input objects can be locked by a different tx in + // the future, however, the input objects may already be locked by this + // tx in other validators. This can cause non of the txes to have enough + // quorum to form a certificate, causing the objects to be locked for + // the entire epoch. By doing locking but pushback, retrying transaction will + // have higher chance to succeed. let mut validator_pushback_error = None; let overload_check_res = state.check_system_overload( &consensus_adapter, @@ -364,8 +368,8 @@ impl ValidatorService { })?; if let Some(error) = validator_pushback_error { - // TODO: right now, we still sign the txn, but just don't return it. We can also skip signing - // to save more CPU. + // TODO: right now, we still sign the txn, but just don't return it. We can also + // skip signing to save more CPU. return Err(error.into()); } Ok(tonic::Response::new(info)) @@ -466,9 +470,10 @@ impl ValidatorService { } // 3) All certificates are sent to consensus (at least by some authorities) - // For shared objects this will wait until either timeout or we have heard back from consensus. - // For owned objects this will return without waiting for certificate to be sequenced - // First do quick dirty non-async check + // For shared objects this will wait until either timeout or we have heard back + // from consensus. For owned objects this will return without + // waiting for certificate to be sequenced First do quick dirty + // non-async check if !epoch_store.is_tx_cert_consensus_message_processed(&certificate)? { let _metrics_guard = if shared_object_tx { Some(self.metrics.consensus_latency.start_timer()) @@ -484,8 +489,9 @@ impl ValidatorService { Some(&reconfiguration_lock), &epoch_store, )?; - // Do not wait for the result, because the transaction might have already executed. - // Instead, check or wait for the existence of certificate effects below. + // Do not wait for the result, because the transaction might + // have already executed. Instead, check or wait + // for the existence of certificate effects below. } drop(reconfiguration_lock); certificate @@ -501,7 +507,8 @@ impl ValidatorService { return Ok(None); } - // 4) Execute the certificate if it contains only owned object transactions, or wait for + // 4) Execute the certificate if it contains only owned object transactions, or + // wait for // the execution results if it contains shared objects. let effects = self .state @@ -528,8 +535,9 @@ impl Validator for ValidatorService { ) -> Result, tonic::Status> { let validator_service = self.clone(); - // Spawns a task which handles the transaction. The task will unconditionally continue - // processing in the event that the client connection is dropped. + // Spawns a task which handles the transaction. The task will unconditionally + // continue processing in the event that the client connection is + // dropped. spawn_monitored_task!(validator_service.handle_transaction(request)) .await .unwrap() @@ -539,7 +547,8 @@ impl Validator for ValidatorService { &self, request: tonic::Request, ) -> Result, tonic::Status> { - // The call to digest() assumes the transaction is valid, so we need to verify it first. + // The call to digest() assumes the transaction is valid, so we need to verify + // it first. request.get_ref().verify_user_input()?; let span = error_span!("submit_certificate", tx_digest = ?request.get_ref().digest()); @@ -553,7 +562,8 @@ impl Validator for ValidatorService { &self, request: tonic::Request, ) -> Result, tonic::Status> { - // The call to digest() assumes the transaction is valid, so we need to verify it first. + // The call to digest() assumes the transaction is valid, so we need to verify + // it first. request.get_ref().verify_user_input()?; let span = error_span!("handle_certificate", tx_digest = ?request.get_ref().digest()); diff --git a/crates/sui-core/src/checkpoints/causal_order.rs b/crates/sui-core/src/checkpoints/causal_order.rs index c7e3ce6365f..c6cbc36589c 100644 --- a/crates/sui-core/src/checkpoints/causal_order.rs +++ b/crates/sui-core/src/checkpoints/causal_order.rs @@ -2,10 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 use std::collections::{BTreeMap, BTreeSet, HashMap}; -use sui_types::base_types::TransactionDigest; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::effects::{InputSharedObject, TransactionEffects}; -use sui_types::storage::ObjectKey; + +use sui_types::{ + base_types::TransactionDigest, + effects::{InputSharedObject, TransactionEffects, TransactionEffectsAPI}, + storage::ObjectKey, +}; use tracing::trace; pub struct CausalOrder { @@ -19,9 +21,11 @@ impl CausalOrder { /// Returned list has effects that /// /// (a) Causally sorted - /// (b) Have deterministic order between transactions that are not causally dependent + /// (b) Have deterministic order between transactions that are not causally + /// dependent /// - /// The order of result list does not depend on order of effects in the supplied vector + /// The order of result list does not depend on order of effects in the + /// supplied vector pub fn causal_sort(effects: Vec) -> Vec { let mut this = Self::from_vec(effects); while let Some(item) = this.pop_first() { @@ -55,7 +59,8 @@ impl CausalOrder { while let Some(state) = states.last_mut() { if let Some(new_state) = state.process(self) { - // This is essentially a 'recursive call' but using heap instead of stack to store state + // This is essentially a 'recursive call' but using heap instead of stack to + // store state states.push(new_state); } else { // Done with current state, remove it @@ -87,29 +92,34 @@ impl TransactionDependencies { } } -/// Supplies TransactionDependencies tree with additional edges from transactions -/// that write shared locks object to transactions that read previous version of this object. +/// Supplies TransactionDependencies tree with additional edges from +/// transactions that write shared locks object to transactions that read +/// previous version of this object. /// -/// With RWLocks we can have multiple transaction that depend on shared object version N - many read -/// transactions and single write transaction. Those transactions depend on transaction that has written N, -/// but they do not depend on each other. And specifically, transaction that reads N and writes N+1 -/// does not depend on read-only transactions that also read N. +/// With RWLocks we can have multiple transaction that depend on shared object +/// version N - many read transactions and single write transaction. Those +/// transactions depend on transaction that has written N, but they do not +/// depend on each other. And specifically, transaction that reads N and writes +/// N+1 does not depend on read-only transactions that also read N. /// -/// We do not add such read transactions to TransactionEffects of shared object write transactions -/// for next version to make sure TransactionEffects are not grow too large -/// (and because you do not need read transactions to replay write transaction for next version). +/// We do not add such read transactions to TransactionEffects of shared object +/// write transactions for next version to make sure TransactionEffects are not +/// grow too large (and because you do not need read transactions to replay +/// write transaction for next version). /// -/// However, when building checkpoints we supply transaction dependency tree with additional dependency edges to -/// make it look like write transaction for next version causally depends on transactions that read -/// previous versions, for two reasons: +/// However, when building checkpoints we supply transaction dependency tree +/// with additional dependency edges to make it look like write transaction for +/// next version causally depends on transactions that read previous versions, +/// for two reasons: /// -/// (1) Without this addition we could have peculiar checkpoints where transaction reading -/// version N appears after transaction that overwritten this object with version N+1. -/// This does not affect how transaction is executed, but it is not something one would expect in -/// causally ordered list. +/// (1) Without this addition we could have peculiar checkpoints where +/// transaction reading version N appears after transaction that overwritten +/// this object with version N+1. This does not affect how transaction is +/// executed, but it is not something one would expect in causally ordered list. /// -/// (2) On the practical side it will allow to simplify pruner as it can now just tail checkpoints -/// and delete objects in order they appear in TransactionEffects::modified_at_versions in checkpoint. +/// (2) On the practical side it will allow to simplify pruner as it can now +/// just tail checkpoints and delete objects in order they appear in +/// TransactionEffects::modified_at_versions in checkpoint. struct RWLockDependencyBuilder { read_version: HashMap>, overwrite_versions: HashMap>, @@ -209,10 +219,12 @@ impl InsertState { #[cfg(test)] mod tests { + use sui_types::{ + base_types::{ObjectDigest, ObjectID, SequenceNumber}, + effects::TransactionEffects, + }; + use super::*; - use sui_types::base_types::ObjectDigest; - use sui_types::base_types::{ObjectID, SequenceNumber}; - use sui_types::effects::TransactionEffects; #[test] pub fn test_causal_order() { @@ -261,7 +273,7 @@ mod tests { let r = extract(CausalOrder::causal_sort(vec![e5, e2, e3])); assert_eq!(r.len(), 3); assert_eq!(*r.get(2).unwrap(), 3); // [3] is the last - // both [5] and [2] are present (but order is not fixed) + // both [5] and [2] are present (but order is not fixed) assert!(r.contains(&5)); assert!(r.contains(&2)); } diff --git a/crates/sui-core/src/checkpoints/checkpoint_executor/data_ingestion_handler.rs b/crates/sui-core/src/checkpoints/checkpoint_executor/data_ingestion_handler.rs index d39df248d42..c244c96072f 100644 --- a/crates/sui-core/src/checkpoints/checkpoint_executor/data_ingestion_handler.rs +++ b/crates/sui-core/src/checkpoints/checkpoint_executor/data_ingestion_handler.rs @@ -1,18 +1,23 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::checkpoints::CheckpointStore; -use crate::execution_cache::ExecutionCacheRead; -use std::collections::{HashMap, HashSet}; -use std::path::PathBuf; -use std::sync::Arc; +use std::{ + collections::{HashMap, HashSet}, + path::PathBuf, + sync::Arc, +}; + use sui_storage::blob::{Blob, BlobEncoding}; -use sui_types::digests::TransactionDigest; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::error::{SuiError, SuiResult, UserInputError}; -use sui_types::full_checkpoint_content::{CheckpointData, CheckpointTransaction}; -use sui_types::messages_checkpoint::VerifiedCheckpoint; -use sui_types::storage::ObjectKey; +use sui_types::{ + digests::TransactionDigest, + effects::TransactionEffectsAPI, + error::{SuiError, SuiResult, UserInputError}, + full_checkpoint_content::{CheckpointData, CheckpointTransaction}, + messages_checkpoint::VerifiedCheckpoint, + storage::ObjectKey, +}; + +use crate::{checkpoints::CheckpointStore, execution_cache::ExecutionCacheRead}; pub(crate) fn store_checkpoint_locally( path: PathBuf, @@ -81,7 +86,8 @@ pub(crate) fn store_checkpoint_locally( ) .collect::>() .into_iter() - // Unwrapped-then-deleted objects are not stored in state before the tx, so we have nothing to fetch. + // Unwrapped-then-deleted objects are not stored in state before the tx, so we have + // nothing to fetch. .filter(|key| !unwrapped_then_deleted_obj_ids.contains(&key.0)) .collect::>(); diff --git a/crates/sui-core/src/checkpoints/checkpoint_executor/metrics.rs b/crates/sui-core/src/checkpoints/checkpoint_executor/metrics.rs index 33b82682416..f03330e3738 100644 --- a/crates/sui-core/src/checkpoints/checkpoint_executor/metrics.rs +++ b/crates/sui-core/src/checkpoints/checkpoint_executor/metrics.rs @@ -1,12 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::sync::Arc; + use mysten_metrics::histogram::Histogram; use prometheus::{ register_int_counter_with_registry, register_int_gauge_with_registry, IntCounter, IntGauge, Registry, }; -use std::sync::Arc; pub struct CheckpointExecutorMetrics { pub checkpoint_exec_sync_tps: IntGauge, diff --git a/crates/sui-core/src/checkpoints/checkpoint_executor/mod.rs b/crates/sui-core/src/checkpoints/checkpoint_executor/mod.rs index 96864b7ba19..1eb8f0a2407 100644 --- a/crates/sui-core/src/checkpoints/checkpoint_executor/mod.rs +++ b/crates/sui-core/src/checkpoints/checkpoint_executor/mod.rs @@ -11,16 +11,18 @@ //! to saturate the CPU with executor tasks (one per checkpoint), each of which //! handle scheduling and awaiting checkpoint transaction execution. //! -//! CheckpointExecutor is made recoverable in the event of Node shutdown by way of a watermark, -//! highest_executed_checkpoint, which is guaranteed to be updated sequentially in order, -//! despite checkpoints themselves potentially being executed nonsequentially and in parallel. -//! CheckpointExecutor parallelizes checkpoints of the same epoch as much as possible. -//! CheckpointExecutor enforces the invariant that if `run` returns successfully, we have reached the -//! end of epoch. This allows us to use it as a signal for reconfig. - -use std::path::PathBuf; +//! CheckpointExecutor is made recoverable in the event of Node shutdown by way +//! of a watermark, highest_executed_checkpoint, which is guaranteed to be +//! updated sequentially in order, despite checkpoints themselves potentially +//! being executed nonsequentially and in parallel. CheckpointExecutor +//! parallelizes checkpoints of the same epoch as much as possible. +//! CheckpointExecutor enforces the invariant that if `run` returns +//! successfully, we have reached the end of epoch. This allows us to use it as +//! a signal for reconfig. + use std::{ collections::HashMap, + path::PathBuf, sync::Arc, time::{Duration, Instant}, }; @@ -31,17 +33,16 @@ use mysten_metrics::{spawn_monitored_task, MonitoredFutureExt}; use prometheus::Registry; use sui_config::node::{CheckpointExecutorConfig, RunWithRange}; use sui_macros::{fail_point, fail_point_async}; -use sui_types::crypto::RandomnessRound; -use sui_types::effects::{TransactionEffects, TransactionEffectsAPI}; -use sui_types::executable_transaction::VerifiedExecutableTransaction; -use sui_types::message_envelope::Message; -use sui_types::transaction::TransactionKind; use sui_types::{ base_types::{ExecutionDigests, TransactionDigest, TransactionEffectsDigest}, + crypto::RandomnessRound, + effects::{TransactionEffects, TransactionEffectsAPI}, + error::SuiResult, + executable_transaction::VerifiedExecutableTransaction, + message_envelope::Message, messages_checkpoint::{CheckpointSequenceNumber, VerifiedCheckpoint}, - transaction::VerifiedTransaction, + transaction::{TransactionDataAPI, TransactionKind, VerifiedTransaction}, }; -use sui_types::{error::SuiResult, transaction::TransactionDataAPI}; use tap::{TapFallible, TapOptional}; use tokio::{ sync::broadcast::{self, error::RecvError}, @@ -52,12 +53,15 @@ use tokio_stream::StreamExt; use tracing::{debug, error, info, instrument, trace, warn}; use self::metrics::CheckpointExecutorMetrics; -use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore; -use crate::authority::AuthorityState; -use crate::checkpoints::checkpoint_executor::data_ingestion_handler::store_checkpoint_locally; -use crate::state_accumulator::StateAccumulator; -use crate::transaction_manager::TransactionManager; -use crate::{checkpoints::CheckpointStore, execution_cache::ExecutionCacheRead}; +use crate::{ + authority::{authority_per_epoch_store::AuthorityPerEpochStore, AuthorityState}, + checkpoints::{ + checkpoint_executor::data_ingestion_handler::store_checkpoint_locally, CheckpointStore, + }, + execution_cache::ExecutionCacheRead, + state_accumulator::StateAccumulator, + transaction_manager::TransactionManager, +}; mod data_ingestion_handler; mod metrics; @@ -80,8 +84,9 @@ pub enum StopReason { pub struct CheckpointExecutor { mailbox: broadcast::Receiver, - // TODO: AuthorityState is only needed because we have to call deprecated_insert_finalized_transactions - // once that code is fully deprecated we can remove this + // TODO: AuthorityState is only needed because we have to call + // deprecated_insert_finalized_transactions once that code is fully deprecated we can + // remove this state: Arc, checkpoint_store: Arc, cache_reader: Arc, @@ -131,8 +136,8 @@ impl CheckpointExecutor { } /// Ensure that all checkpoints in the current epoch will be executed. - /// We don't technically need &mut on self, but passing it to make sure only one instance is - /// running at one time. + /// We don't technically need &mut on self, but passing it to make sure only + /// one instance is running at one time. pub async fn run_epoch( &mut self, epoch_store: Arc, @@ -170,8 +175,8 @@ impl CheckpointExecutor { if epoch_store.epoch() == highest_executed.epoch() && highest_executed.is_last_checkpoint_of_epoch() { - // We can arrive at this point if we bump the highest_executed_checkpoint watermark, and then - // crash before completing reconfiguration. + // We can arrive at this point if we bump the highest_executed_checkpoint + // watermark, and then crash before completing reconfiguration. info!(seq = ?highest_executed.sequence_number, "final checkpoint of epoch has already been executed"); return StopReason::EpochComplete; } @@ -196,9 +201,10 @@ impl CheckpointExecutor { loop { // If we have executed the last checkpoint of the current epoch, stop. - // Note: when we arrive here with highest_executed == the final checkpoint of the epoch, - // we are in an edge case where highest_executed does not actually correspond to the watermark. - // The watermark is only bumped past the epoch final checkpoint after execution of the change + // Note: when we arrive here with highest_executed == the final checkpoint of + // the epoch, we are in an edge case where highest_executed does not + // actually correspond to the watermark. The watermark is only + // bumped past the epoch final checkpoint after execution of the change // epoch tx, and state accumulation. if self .check_epoch_last_checkpoint(epoch_store.clone(), &highest_executed) @@ -314,8 +320,8 @@ impl CheckpointExecutor { fail_point!("highest-executed-checkpoint"); - // We store a fixed number of additional FullCheckpointContents after execution is complete - // for use in state sync. + // We store a fixed number of additional FullCheckpointContents after execution + // is complete for use in state sync. const NUM_SAVED_FULL_CHECKPOINT_CONTENTS: u64 = 5_000; if seq >= NUM_SAVED_FULL_CHECKPOINT_CONTENTS { let prune_seq = seq - NUM_SAVED_FULL_CHECKPOINT_CONTENTS; @@ -343,8 +349,9 @@ impl CheckpointExecutor { checkpoint.report_checkpoint_age_ms(&self.metrics.last_executed_checkpoint_age_ms); } - /// Post processing and plumbing after we executed a checkpoint. This function is guaranteed - /// to be called in the order of checkpoint sequence number. + /// Post processing and plumbing after we executed a checkpoint. This + /// function is guaranteed to be called in the order of checkpoint + /// sequence number. #[instrument(level = "debug", skip_all)] async fn process_executed_checkpoint( &self, @@ -523,9 +530,9 @@ impl CheckpointExecutor { .await; } - /// Check whether `checkpoint` is the last checkpoint of the current epoch. If so, - /// perform special case logic (execute change_epoch tx, accumulate epoch, - /// finalize transactions), then return true. + /// Check whether `checkpoint` is the last checkpoint of the current epoch. + /// If so, perform special case logic (execute change_epoch tx, + /// accumulate epoch, finalize transactions), then return true. pub async fn check_epoch_last_checkpoint( &self, epoch_store: Arc, @@ -622,8 +629,8 @@ impl CheckpointExecutor { } } -// Logs within the function are annotated with the checkpoint sequence number and epoch, -// from schedule_checkpoint(). +// Logs within the function are annotated with the checkpoint sequence number +// and epoch, from schedule_checkpoint(). #[instrument(level = "debug", skip_all, fields(seq = ?checkpoint.sequence_number(), epoch = ?epoch_store.epoch()))] async fn execute_checkpoint( checkpoint: VerifiedCheckpoint, @@ -640,10 +647,10 @@ async fn execute_checkpoint( debug!("Preparing checkpoint for execution",); let prepare_start = Instant::now(); - // this function must guarantee that all transactions in the checkpoint are executed before it - // returns. This invariant is enforced in two phases: - // - First, we filter out any already executed transactions from the checkpoint in - // get_unexecuted_transactions() + // this function must guarantee that all transactions in the checkpoint are + // executed before it returns. This invariant is enforced in two phases: + // - First, we filter out any already executed transactions from the checkpoint + // in get_unexecuted_transactions() // - Second, we execute all remaining transactions. let (execution_digests, all_tx_digests, executable_txns, randomness_round) = @@ -676,8 +683,9 @@ async fn execute_checkpoint( ) .await?; - // Once execution is complete, we know that any randomness contained in this checkpoint has - // been successfully included in a checkpoint certified by quorum of validators. + // Once execution is complete, we know that any randomness contained in this + // checkpoint has been successfully included in a checkpoint certified by + // quorum of validators. if let Some(round) = randomness_round { // RandomnessManager is only present on validators. if let Some(randomness_reporter) = epoch_store.randomness_reporter() { @@ -754,11 +762,7 @@ async fn handle_execution_effects( .zip(all_tx_digests.clone()) .filter_map( |(fx, digest)| { - if fx.is_none() { - Some(digest) - } else { - None - } + if fx.is_none() { Some(digest) } else { None } }, ) .collect(); @@ -865,7 +869,8 @@ fn extract_end_of_epoch_tx( ) -> Option<(ExecutionDigests, VerifiedExecutableTransaction)> { checkpoint.end_of_epoch_data.as_ref()?; - // Last checkpoint must have the end of epoch transaction as the last transaction. + // Last checkpoint must have the end of epoch transaction as the last + // transaction. let checkpoint_sequence = checkpoint.sequence_number(); let execution_digests = checkpoint_store @@ -898,18 +903,20 @@ fn extract_end_of_epoch_tx( *checkpoint_sequence, ); - assert!(change_epoch_tx - .data() - .intent_message() - .value - .is_end_of_epoch_tx()); + assert!( + change_epoch_tx + .data() + .intent_message() + .value + .is_end_of_epoch_tx() + ); Some((*digests, change_epoch_tx)) } -// Given a checkpoint, filter out any already executed transactions, then return the remaining -// execution digests, transaction digests, transactions to be executed, and randomness round -// (if any) included in the checkpoint. +// Given a checkpoint, filter out any already executed transactions, then return +// the remaining execution digests, transaction digests, transactions to be +// executed, and randomness round (if any) included in the checkpoint. #[allow(clippy::type_complexity)] fn get_unexecuted_transactions( checkpoint: VerifiedCheckpoint, @@ -948,7 +955,8 @@ fn get_unexecuted_transactions( .collect::>() }); - // Remove the change epoch transaction so that we can special case its execution. + // Remove the change epoch transaction so that we can special case its + // execution. checkpoint.end_of_epoch_data.as_ref().tap_some(|_| { let change_epoch_tx_digest = execution_digests .pop() @@ -967,9 +975,9 @@ fn get_unexecuted_transactions( assert!(change_epoch_tx.data().intent_message().value.is_end_of_epoch_tx()); }); - // Look for a randomness state update tx. It must be first if it exists, because all other - // transactions in a checkpoint that includes a randomness state update are causally - // dependent on it. + // Look for a randomness state update tx. It must be first if it exists, because + // all other transactions in a checkpoint that includes a randomness state + // update are causally dependent on it. let randomness_round = if let Some(first_digest) = execution_digests.first() { let maybe_randomness_tx = cache_reader.get_transaction_block(&first_digest.transaction) .expect("read cannot fail") @@ -1073,8 +1081,8 @@ fn get_unexecuted_transactions( ) } -// Logs within the function are annotated with the checkpoint sequence number and epoch, -// from schedule_checkpoint(). +// Logs within the function are annotated with the checkpoint sequence number +// and epoch, from schedule_checkpoint(). #[instrument(level = "debug", skip_all)] async fn execute_transactions( execution_digests: Vec, diff --git a/crates/sui-core/src/checkpoints/checkpoint_executor/tests.rs b/crates/sui-core/src/checkpoints/checkpoint_executor/tests.rs index d4511f28778..7c0d66c73c1 100644 --- a/crates/sui-core/src/checkpoints/checkpoint_executor/tests.rs +++ b/crates/sui-core/src/checkpoints/checkpoint_executor/tests.rs @@ -1,28 +1,31 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::*; -use sui_config::node::ExpensiveSafetyCheckConfig; -use sui_types::gas::GasCostSummary; -use tempfile::tempdir; - use std::{sync::Arc, time::Duration}; -use crate::authority::epoch_start_configuration::EpochStartConfiguration; -use crate::state_accumulator::AccumulatorStore; use broadcast::{Receiver, Sender}; +use sui_config::node::ExpensiveSafetyCheckConfig; use sui_protocol_config::SupportedProtocolVersions; -use sui_types::committee::ProtocolVersion; -use sui_types::messages_checkpoint::{ECMHLiveObjectSetDigest, EndOfEpochData, VerifiedCheckpoint}; +use sui_swarm_config::test_utils::{empty_contents, CommitteeFixture}; +use sui_types::{ + committee::ProtocolVersion, + gas::GasCostSummary, + messages_checkpoint::{ECMHLiveObjectSetDigest, EndOfEpochData, VerifiedCheckpoint}, + sui_system_state::epoch_start_sui_system_state::EpochStartSystemState, +}; +use tempfile::tempdir; use tokio::{sync::broadcast, time::timeout}; +use typed_store::Map; -use crate::authority::test_authority_builder::TestAuthorityBuilder; +use super::*; use crate::{ - authority::AuthorityState, checkpoints::CheckpointStore, state_accumulator::StateAccumulator, + authority::{ + epoch_start_configuration::EpochStartConfiguration, + test_authority_builder::TestAuthorityBuilder, AuthorityState, + }, + checkpoints::CheckpointStore, + state_accumulator::{AccumulatorStore, StateAccumulator}, }; -use sui_swarm_config::test_utils::{empty_contents, CommitteeFixture}; -use sui_types::sui_system_state::epoch_start_sui_system_state::EpochStartSystemState; -use typed_store::Map; /// Test checkpoint executor happy path, test that checkpoint executor correctly /// picks up where it left off in the event of a mid-epoch node crash. @@ -40,10 +43,12 @@ pub async fn test_checkpoint_executor_crash_recovery() { CommitteeFixture, ) = init_executor_test(buffer_size, checkpoint_store.clone()).await; - assert!(checkpoint_store - .get_highest_executed_checkpoint_seq_number() - .unwrap() - .is_none()); + assert!( + checkpoint_store + .get_highest_executed_checkpoint_seq_number() + .unwrap() + .is_none() + ); let checkpoints = sync_new_checkpoints( &checkpoint_store, &checkpoint_sender, @@ -104,8 +109,8 @@ pub async fn test_checkpoint_executor_crash_recovery() { /// from the next epoch if called after reconfig /// /// TODO(william) disabling reconfig unit tests here for now until we can work -/// on correctly inserting transactions, especially the change_epoch tx. As it stands, this -/// is better tested in existing reconfig simtests +/// on correctly inserting transactions, especially the change_epoch tx. As it +/// stands, this is better tested in existing reconfig simtests #[tokio::test] #[ignore] pub async fn test_checkpoint_executor_cross_epoch() { @@ -126,10 +131,12 @@ pub async fn test_checkpoint_executor_cross_epoch() { let epoch = epoch_store.epoch(); assert_eq!(epoch, 0); - assert!(checkpoint_store - .get_highest_executed_checkpoint_seq_number() - .unwrap() - .is_none()); + assert!( + checkpoint_store + .get_highest_executed_checkpoint_seq_number() + .unwrap() + .is_none() + ); // sync 20 checkpoints let cold_start_checkpoints = sync_new_checkpoints( @@ -188,11 +195,13 @@ pub async fn test_checkpoint_executor_cross_epoch() { .await; // Ensure root state hash for epoch does not exist before we close epoch - assert!(authority_state - .get_execution_cache() - .get_root_state_accumulator_for_epoch(0) - .unwrap() - .is_none()); + assert!( + authority_state + .get_execution_cache() + .get_root_state_accumulator_for_epoch(0) + .unwrap() + .is_none() + ); // Ensure executor reaches end of epoch in a timely manner timeout(Duration::from_secs(5), async { @@ -266,12 +275,12 @@ pub async fn test_checkpoint_executor_cross_epoch() { .expect("root state hash for epoch should exist"); } -/// Test that if we crash at end of epoch / during reconfig, we recover on startup -/// by starting at the old epoch and immediately retrying reconfig +/// Test that if we crash at end of epoch / during reconfig, we recover on +/// startup by starting at the old epoch and immediately retrying reconfig /// /// TODO(william) disabling reconfig unit tests here for now until we can work -/// on correctly inserting transactions, especially the change_epoch tx. As it stands, this -/// is better tested in existing reconfig simtests +/// on correctly inserting transactions, especially the change_epoch tx. As it +/// stands, this is better tested in existing reconfig simtests #[tokio::test] #[ignore] pub async fn test_reconfig_crash_recovery() { @@ -286,15 +295,17 @@ pub async fn test_reconfig_crash_recovery() { Sender, CommitteeFixture, ) = init_executor_test( - 10, /* StateSync -> Executor channel buffer size */ + 10, // StateSync -> Executor channel buffer size checkpoint_store.clone(), ) .await; - assert!(checkpoint_store - .get_highest_executed_checkpoint_seq_number() - .unwrap() - .is_none()); + assert!( + checkpoint_store + .get_highest_executed_checkpoint_seq_number() + .unwrap() + .is_none() + ); // sync 1 checkpoint let checkpoint = sync_new_checkpoints( @@ -343,9 +354,10 @@ pub async fn test_reconfig_crash_recovery() { ); // Drop and re-istantiate checkpoint executor without performing reconfig. This - // is logically equivalent to reconfig crashing and the node restarting, in which - // case executor should be able to infer that, rather than beginning execution of - // the next epoch, we should immediately exit so that reconfig can be reattempted. + // is logically equivalent to reconfig crashing and the node restarting, in + // which case executor should be able to infer that, rather than beginning + // execution of the next epoch, we should immediately exit so that reconfig + // can be reattempted. drop(executor); let mut executor = CheckpointExecutor::new_for_tests( checkpoint_sender.subscribe(), @@ -412,8 +424,8 @@ async fn init_executor_test( /// Creates and simulates syncing of a new checkpoint by StateSync, i.e. new /// checkpoint is persisted, along with its contents, highest synced checkpoint -/// watermark is updated, and message is broadcasted notifying of the newly synced -/// checkpoint. Returns created checkpoints +/// watermark is updated, and message is broadcasted notifying of the newly +/// synced checkpoint. Returns created checkpoints fn sync_new_checkpoints( checkpoint_store: &CheckpointStore, sender: &Sender, diff --git a/crates/sui-core/src/checkpoints/checkpoint_output.rs b/crates/sui-core/src/checkpoints/checkpoint_output.rs index 09d723c89c3..ab5a632af62 100644 --- a/crates/sui-core/src/checkpoints/checkpoint_output.rs +++ b/crates/sui-core/src/checkpoints/checkpoint_output.rs @@ -1,23 +1,27 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore; -use crate::authority::StableSyncAuthoritySigner; -use crate::consensus_adapter::SubmitToConsensus; -use crate::epoch::reconfiguration::ReconfigurationInitiator; -use async_trait::async_trait; use std::sync::Arc; -use sui_types::base_types::AuthorityName; -use sui_types::error::SuiResult; -use sui_types::message_envelope::Message; -use sui_types::messages_checkpoint::{ - CertifiedCheckpointSummary, CheckpointContents, CheckpointSignatureMessage, CheckpointSummary, - SignedCheckpointSummary, VerifiedCheckpoint, + +use async_trait::async_trait; +use sui_types::{ + base_types::AuthorityName, + error::SuiResult, + message_envelope::Message, + messages_checkpoint::{ + CertifiedCheckpointSummary, CheckpointContents, CheckpointSignatureMessage, + CheckpointSummary, SignedCheckpointSummary, VerifiedCheckpoint, + }, + messages_consensus::ConsensusTransaction, }; -use sui_types::messages_consensus::ConsensusTransaction; use tracing::{debug, info, instrument, trace}; use super::CheckpointMetrics; +use crate::{ + authority::{authority_per_epoch_store::AuthorityPerEpochStore, StableSyncAuthoritySigner}, + consensus_adapter::SubmitToConsensus, + epoch::reconfiguration::ReconfigurationInitiator, +}; #[async_trait] pub trait CheckpointOutput: Sync + Send + 'static { @@ -32,7 +36,7 @@ pub trait CheckpointOutput: Sync + Send + 'static { #[async_trait] pub trait CertifiedCheckpointOutput: Sync + Send + 'static { async fn certified_checkpoint_created(&self, summary: &CertifiedCheckpointSummary) - -> SuiResult; + -> SuiResult; } pub struct SubmitCheckpointToConsensus { diff --git a/crates/sui-core/src/checkpoints/metrics.rs b/crates/sui-core/src/checkpoints/metrics.rs index 62266d7c69e..a3a167c3ef3 100644 --- a/crates/sui-core/src/checkpoints/metrics.rs +++ b/crates/sui-core/src/checkpoints/metrics.rs @@ -1,13 +1,14 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::sync::Arc; + use mysten_metrics::histogram::Histogram; use prometheus::{ register_int_counter_vec_with_registry, register_int_counter_with_registry, register_int_gauge_vec_with_registry, register_int_gauge_with_registry, IntCounter, IntCounterVec, IntGauge, IntGaugeVec, Registry, }; -use std::sync::Arc; pub struct CheckpointMetrics { pub last_certified_checkpoint: IntGauge, @@ -44,12 +45,12 @@ impl CheckpointMetrics { last_created_checkpoint_age_ms: Histogram::new_in_registry( "last_created_checkpoint_age_ms", "Age of the last created checkpoint", - registry + registry, ), last_certified_checkpoint_age_ms: Histogram::new_in_registry( "last_certified_checkpoint_age_ms", "Age of the last certified checkpoint", - registry + registry, ), checkpoint_errors: register_int_counter_with_registry!( "checkpoint_errors", diff --git a/crates/sui-core/src/checkpoints/mod.rs b/crates/sui-core/src/checkpoints/mod.rs index df4e6530a4f..2e9874aa807 100644 --- a/crates/sui-core/src/checkpoints/mod.rs +++ b/crates/sui-core/src/checkpoints/mod.rs @@ -6,72 +6,84 @@ pub mod checkpoint_executor; mod checkpoint_output; mod metrics; -use crate::authority::{AuthorityState, EffectsNotifyRead}; -use crate::authority_client::{make_network_authority_clients_with_network_config, AuthorityAPI}; -use crate::checkpoints::causal_order::CausalOrder; -use crate::checkpoints::checkpoint_output::{CertifiedCheckpointOutput, CheckpointOutput}; -pub use crate::checkpoints::checkpoint_output::{ - LogCheckpointOutput, SendCheckpointToStateSync, SubmitCheckpointToConsensus, +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + fs::File, + io::Write, + path::Path, + sync::Arc, + time::Duration, }; -pub use crate::checkpoints::metrics::CheckpointMetrics; -use crate::stake_aggregator::{InsertResult, MultiStakeAggregator}; -use crate::state_accumulator::StateAccumulator; + +use chrono::Utc; use diffy::create_patch; -use futures::future::{select, Either}; -use futures::FutureExt; +use futures::{ + future::{select, Either}, + FutureExt, +}; use itertools::Itertools; use mysten_metrics::{monitored_scope, spawn_monitored_task, MonitoredFutureExt}; use parking_lot::Mutex; +use rand::{rngs::OsRng, seq::SliceRandom}; use serde::{Deserialize, Serialize}; use sui_macros::fail_point; use sui_network::default_mysten_network_config; -use sui_types::base_types::ConciseableName; -use sui_types::sui_system_state::epoch_start_sui_system_state::EpochStartSystemStateTrait; - -use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore; -use crate::consensus_handler::SequencedConsensusTransactionKey; -use chrono::Utc; -use rand::rngs::OsRng; -use rand::seq::SliceRandom; -use std::collections::{BTreeMap, HashMap, HashSet}; -use std::fs::File; -use std::io::Write; -use std::path::Path; -use std::sync::Arc; -use std::time::Duration; use sui_protocol_config::ProtocolVersion; -use sui_types::base_types::{AuthorityName, EpochId, TransactionDigest}; -use sui_types::committee::StakeUnit; -use sui_types::crypto::AuthorityStrongQuorumSignInfo; -use sui_types::digests::{CheckpointContentsDigest, CheckpointDigest}; -use sui_types::effects::{TransactionEffects, TransactionEffectsAPI}; -use sui_types::error::SuiResult; -use sui_types::gas::GasCostSummary; -use sui_types::message_envelope::Message; -use sui_types::messages_checkpoint::{ - CertifiedCheckpointSummary, CheckpointContents, CheckpointResponseV2, CheckpointSequenceNumber, - CheckpointSignatureMessage, CheckpointSummary, CheckpointSummaryResponse, CheckpointTimestamp, - EndOfEpochData, FullCheckpointContents, TrustedCheckpoint, VerifiedCheckpoint, - VerifiedCheckpointContents, +use sui_types::{ + base_types::{AuthorityName, ConciseableName, EpochId, TransactionDigest}, + committee::StakeUnit, + crypto::AuthorityStrongQuorumSignInfo, + digests::{CheckpointContentsDigest, CheckpointDigest}, + effects::{TransactionEffects, TransactionEffectsAPI}, + error::SuiResult, + gas::GasCostSummary, + message_envelope::Message, + messages_checkpoint::{ + CertifiedCheckpointSummary, CheckpointContents, CheckpointRequestV2, CheckpointResponseV2, + CheckpointSequenceNumber, CheckpointSignatureMessage, CheckpointSummary, + CheckpointSummaryResponse, CheckpointTimestamp, EndOfEpochData, FullCheckpointContents, + SignedCheckpointSummary, TrustedCheckpoint, VerifiedCheckpoint, VerifiedCheckpointContents, + }, + messages_consensus::ConsensusTransactionKey, + signature::GenericSignature, + sui_system_state::{ + epoch_start_sui_system_state::EpochStartSystemStateTrait, SuiSystemState, + SuiSystemStateTrait, + }, + transaction::{TransactionDataAPI, TransactionKey, TransactionKind}, }; -use sui_types::messages_checkpoint::{CheckpointRequestV2, SignedCheckpointSummary}; -use sui_types::messages_consensus::ConsensusTransactionKey; -use sui_types::signature::GenericSignature; -use sui_types::sui_system_state::{SuiSystemState, SuiSystemStateTrait}; -use sui_types::transaction::{TransactionDataAPI, TransactionKey, TransactionKind}; use tokio::{ sync::{watch, Notify}, time::timeout, }; use tracing::{debug, error, info, instrument, warn}; -use typed_store::traits::{TableSummary, TypedStoreDebug}; -use typed_store::Map; use typed_store::{ rocks::{DBMap, MetricConf}, - TypedStoreError, + traits::{TableSummary, TypedStoreDebug}, + Map, TypedStoreError, }; use typed_store_derive::DBMapUtils; +pub use crate::checkpoints::{ + checkpoint_output::{ + LogCheckpointOutput, SendCheckpointToStateSync, SubmitCheckpointToConsensus, + }, + metrics::CheckpointMetrics, +}; +use crate::{ + authority::{ + authority_per_epoch_store::AuthorityPerEpochStore, AuthorityState, EffectsNotifyRead, + }, + authority_client::{make_network_authority_clients_with_network_config, AuthorityAPI}, + checkpoints::{ + causal_order::CausalOrder, + checkpoint_output::{CertifiedCheckpointOutput, CheckpointOutput}, + }, + consensus_handler::SequencedConsensusTransactionKey, + stake_aggregator::{InsertResult, MultiStakeAggregator}, + state_accumulator::StateAccumulator, +}; + pub type CheckpointHeight = u64; pub struct EpochStats { @@ -160,9 +172,9 @@ pub struct CheckpointStore { pub(crate) checkpoint_sequence_by_contents_digest: DBMap, - /// Stores entire checkpoint contents from state sync, indexed by sequence number, for - /// efficient reads of full checkpoints. Entries from this table are deleted after state - /// accumulation has completed. + /// Stores entire checkpoint contents from state sync, indexed by sequence + /// number, for efficient reads of full checkpoints. Entries from this + /// table are deleted after state accumulation has completed. full_checkpoint_content: DBMap, /// Stores certified checkpoints @@ -170,12 +182,13 @@ pub struct CheckpointStore { /// Map from checkpoint digest to certified checkpoint pub(crate) checkpoint_by_digest: DBMap, - /// Store locally computed checkpoint summaries so that we can detect forks and log useful - /// information. Can be pruned as soon as we verify that we are in agreement with the latest - /// certified checkpoint. + /// Store locally computed checkpoint summaries so that we can detect forks + /// and log useful information. Can be pruned as soon as we verify that + /// we are in agreement with the latest certified checkpoint. pub(crate) locally_computed_checkpoints: DBMap, - /// A map from epoch ID to the sequence number of the last checkpoint in that epoch. + /// A map from epoch ID to the sequence number of the last checkpoint in + /// that epoch. epoch_last_checkpoint_map: DBMap, /// Watermarks used to determine the highest verified, fully synced, and @@ -220,7 +233,8 @@ impl CheckpointStore { "can't call insert_genesis_checkpoint with a checkpoint that doesn't have a sequence number of 0" ); - // Only insert the genesis checkpoint if the DB is empty and doesn't have it already + // Only insert the genesis checkpoint if the DB is empty and doesn't have it + // already if self .get_checkpoint_by_digest(checkpoint.digest()) .unwrap() @@ -570,10 +584,13 @@ impl CheckpointStore { if seq_number >= *checkpoint.sequence_number() { return Ok(()); } - assert_eq!(seq_number + 1, *checkpoint.sequence_number(), - "Cannot update highest executed checkpoint to {} when current highest executed checkpoint is {}", - checkpoint.sequence_number(), - seq_number); + assert_eq!( + seq_number + 1, + *checkpoint.sequence_number(), + "Cannot update highest executed checkpoint to {} when current highest executed checkpoint is {}", + checkpoint.sequence_number(), + seq_number + ); } debug!( checkpoint_seq = checkpoint.sequence_number(), @@ -597,8 +614,9 @@ impl CheckpointStore { /// Sets highest executed checkpoint to any value. /// - /// WARNING: This method is very subtle and can corrupt the database if used incorrectly. - /// It should only be used in one-off cases or tests after fully understanding the risk. + /// WARNING: This method is very subtle and can corrupt the database if used + /// incorrectly. It should only be used in one-off cases or tests after + /// fully understanding the risk. pub fn set_highest_executed_checkpoint_subtle( &self, checkpoint: &VerifiedCheckpoint, @@ -673,7 +691,8 @@ impl CheckpointStore { Ok(()) } - /// Given the epoch ID, and the last checkpoint of the epoch, derive a few statistics of the epoch. + /// Given the epoch ID, and the last checkpoint of the epoch, derive a few + /// statistics of the epoch. pub fn get_epoch_stats( &self, epoch: EpochId, @@ -959,7 +978,10 @@ impl CheckpointBuilder { { if chunk.is_empty() { // Always allow at least one tx in a checkpoint. - warn!("Size of single transaction ({size}) exceeds max checkpoint size ({}); allowing excessively large checkpoint to go through.", self.max_checkpoint_size_bytes); + warn!( + "Size of single transaction ({size}) exceeds max checkpoint size ({}); allowing excessively large checkpoint to go through.", + self.max_checkpoint_size_bytes + ); } else { chunks.push(chunk); chunk = Vec::new(); @@ -974,13 +996,14 @@ impl CheckpointBuilder { if !chunk.is_empty() || chunks.is_empty() { // We intentionally create an empty checkpoint if there is no content provided // to make a 'heartbeat' checkpoint. - // Important: if some conditions are added here later, we need to make sure we always - // have at least one chunk if last_pending_of_epoch is set + // Important: if some conditions are added here later, we need to make sure we + // always have at least one chunk if last_pending_of_epoch is set chunks.push(chunk); - // Note: empty checkpoints are ok - they shouldn't happen at all on a network with even - // modest load. Even if they do happen, it is still useful as it allows fullnodes to - // distinguish between "no transactions have happened" and "i am not receiving new - // checkpoints". + // Note: empty checkpoints are ok - they shouldn't happen at all on + // a network with even modest load. Even if they do + // happen, it is still useful as it allows fullnodes to + // distinguish between "no transactions have happened" and "i am not + // receiving new checkpoints". } Ok(chunks) } @@ -1001,9 +1024,12 @@ impl CheckpointBuilder { let last_verified = self.tables.get_epoch_last_checkpoint(previous_epoch)?; last_checkpoint = last_verified.map(VerifiedCheckpoint::into_summary_and_sequence); if let Some((ref seq, _)) = last_checkpoint { - debug!("No checkpoints in builder DB, taking checkpoint from previous epoch with sequence {seq}"); + debug!( + "No checkpoints in builder DB, taking checkpoint from previous epoch with sequence {seq}" + ); } else { - // This is some serious bug with when CheckpointBuilder started so surfacing it via panic + // This is some serious bug with when CheckpointBuilder started so surfacing it + // via panic panic!("Can not find last checkpoint for previous epoch {previous_epoch}"); } } @@ -1101,8 +1127,10 @@ impl CheckpointBuilder { let timestamp_ms = details.timestamp_ms; if let Some((_, last_checkpoint)) = &last_checkpoint { if last_checkpoint.timestamp_ms > timestamp_ms { - error!("Unexpected decrease of checkpoint timestamp, sequence: {}, previous: {}, current: {}", - sequence_number, last_checkpoint.timestamp_ms, timestamp_ms); + error!( + "Unexpected decrease of checkpoint timestamp, sequence: {}, previous: {}, current: {}", + sequence_number, last_checkpoint.timestamp_ms, timestamp_ms + ); } } @@ -1248,8 +1276,9 @@ impl CheckpointBuilder { Ok(system_state) } - /// For the given roots return complete list of effects to include in checkpoint - /// This list includes the roots and all their dependencies, which are not part of checkpoint already + /// For the given roots return complete list of effects to include in + /// checkpoint This list includes the roots and all their dependencies, + /// which are not part of checkpoint already #[instrument(level = "debug", skip_all)] fn complete_checkpoint_effects( &self, @@ -1269,7 +1298,8 @@ impl CheckpointBuilder { for (effect, tx_included) in roots.into_iter().zip(transactions_included.into_iter()) { let digest = effect.transaction_digest(); - // Unnecessary to read effects of a dependency if the effect is already processed. + // Unnecessary to read effects of a dependency if the effect is already + // processed. seen.insert(*digest); // Skip roots already included in checkpoints or roots from previous epochs @@ -1507,8 +1537,9 @@ impl CheckpointSignatureAggregator { Err(()) } InsertResult::QuorumReached(cert) => { - // It is not guaranteed that signature.authority == narwhal_cert.author, but we do verify - // the signature so we know that the author signed the message at some point. + // It is not guaranteed that signature.authority == narwhal_cert.author, but we + // do verify the signature so we know that the author signed the + // message at some point. if their_digest != self.digest { self.metrics.remote_checkpoint_forks.inc(); warn!( @@ -1532,9 +1563,10 @@ impl CheckpointSignatureAggregator { } } - /// Check if there is a split brain condition in checkpoint signature aggregation, defined - /// as any state wherein it is no longer possible to achieve quorum on a checkpoint proposal, - /// irrespective of the outcome of any outstanding votes. + /// Check if there is a split brain condition in checkpoint signature + /// aggregation, defined as any state wherein it is no longer possible + /// to achieve quorum on a checkpoint proposal, irrespective of the + /// outcome of any outstanding votes. fn check_for_split_brain(&self) { debug!( checkpoint_seq = self.summary.sequence_number, @@ -1576,10 +1608,10 @@ impl CheckpointSignatureAggregator { } /// Create data dump containing relevant data for diagnosing cause of the -/// split brain by querying one disagreeing validator for full checkpoint contents. -/// To minimize peer chatter, we only query one validator at random from each -/// disagreeing faction, as all honest validators that participated in this round may -/// inevitably run the same process. +/// split brain by querying one disagreeing validator for full checkpoint +/// contents. To minimize peer chatter, we only query one validator at random +/// from each disagreeing faction, as all honest validators that participated in +/// this round may inevitably run the same process. async fn diagnose_split_brain( all_unique_values: BTreeMap, StakeUnit)>, local_summary: CheckpointSummary, @@ -1767,7 +1799,8 @@ pub trait CheckpointServiceNotify { fn notify_checkpoint(&self) -> SuiResult; } -/// This is a service used to communicate with other pieces of sui(for ex. authority) +/// This is a service used to communicate with other pieces of sui(for ex. +/// authority) pub struct CheckpointService { tables: Arc, notify_builder: Arc, @@ -1889,8 +1922,9 @@ impl CheckpointServiceNotify for CheckpointService { .last_received_checkpoint_signatures .with_label_values(&[&signer.to_string()]) .set(sequence as i64); - // While it can be tempting to make last_signature_index into AtomicU64, this won't work - // We need to make sure we write to `pending_signatures` and trigger `notify_aggregator` without race conditions + // While it can be tempting to make last_signature_index into AtomicU64, this + // won't work We need to make sure we write to `pending_signatures` and + // trigger `notify_aggregator` without race conditions let mut index = self.last_signature_index.lock(); *index += 1; epoch_store.insert_checkpoint_signature(sequence, *index, info)?; @@ -1943,22 +1977,28 @@ impl From for PendingCheckpointV2 { #[cfg(test)] mod tests { - use super::*; - use crate::authority::test_authority_builder::TestAuthorityBuilder; + use std::{ + collections::{BTreeMap, HashMap}, + ops::Deref, + }; + use async_trait::async_trait; use shared_crypto::intent::{Intent, IntentScope}; - use std::collections::{BTreeMap, HashMap}; - use std::ops::Deref; use sui_macros::sim_test; - use sui_types::base_types::{ObjectID, SequenceNumber, TransactionEffectsDigest}; - use sui_types::crypto::{AuthoritySignInfo, Signature}; - use sui_types::effects::TransactionEffects; - use sui_types::messages_checkpoint::SignedCheckpointSummary; - use sui_types::move_package::MovePackage; - use sui_types::object; - use sui_types::transaction::{GenesisObject, VerifiedTransaction}; + use sui_types::{ + base_types::{ObjectID, SequenceNumber, TransactionEffectsDigest}, + crypto::{AuthoritySignInfo, Signature}, + effects::TransactionEffects, + messages_checkpoint::SignedCheckpointSummary, + move_package::MovePackage, + object, + transaction::{GenesisObject, VerifiedTransaction}, + }; use tokio::sync::mpsc; + use super::*; + use crate::authority::test_authority_builder::TestAuthorityBuilder; + #[sim_test] pub async fn checkpoint_builder_test() { telemetry_subscribers::init_for_testing(); @@ -1973,9 +2013,11 @@ mod tests { SequenceNumber::new(), BTreeMap::from([(format!("{:0>40000}", "1"), Vec::new())]), 100_000, - // no modules so empty type_origin_table as no types are defined in this package + // no modules so empty type_origin_table as no types are defined in this + // package Vec::new(), - // no modules so empty linkage_table as no dependencies of this package exist + // no modules so empty linkage_table as no dependencies of this package + // exist BTreeMap::new(), ) .unwrap(), @@ -2129,8 +2171,8 @@ mod tests { assert_eq!(c3t, vec![d(10), d(11), d(12)]); assert_eq!(c4t, vec![d(13)]); - // Pending at index 3 had 3 transactions of 40K size, and we configured 100K max. - // Verify that we split into 2 checkpoints. + // Pending at index 3 had 3 transactions of 40K size, and we configured 100K + // max. Verify that we split into 2 checkpoints. let (c5c, c5s) = result.recv().await.unwrap(); let c5t = c5c.iter().map(|d| d.transaction).collect::>(); let (c6c, c6s) = result.recv().await.unwrap(); diff --git a/crates/sui-core/src/consensus_adapter.rs b/crates/sui-core/src/consensus_adapter.rs index f7a690ce6bd..acde7800be0 100644 --- a/crates/sui-core/src/consensus_adapter.rs +++ b/crates/sui-core/src/consensus_adapter.rs @@ -1,62 +1,61 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::HashMap, + future::Future, + ops::Deref, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Instant, +}; + use arc_swap::{ArcSwap, ArcSwapOption}; use bytes::Bytes; -use dashmap::try_result::TryResult; -use dashmap::DashMap; -use futures::future::{select, Either}; -use futures::pin_mut; -use futures::FutureExt; +use dashmap::{try_result::TryResult, DashMap}; +use futures::{ + future::{select, Either}, + pin_mut, FutureExt, +}; use itertools::Itertools; +use mysten_metrics::{spawn_monitored_task, GaugeGuard, GaugeGuardFutureExt}; use narwhal_types::{TransactionProto, TransactionsClient}; use narwhal_worker::LazyNarwhalClient; use parking_lot::RwLockReadGuard; -use prometheus::Histogram; -use prometheus::HistogramVec; -use prometheus::IntCounterVec; -use prometheus::IntGauge; -use prometheus::IntGaugeVec; -use prometheus::Registry; use prometheus::{ register_histogram_vec_with_registry, register_histogram_with_registry, register_int_counter_vec_with_registry, register_int_gauge_vec_with_registry, - register_int_gauge_with_registry, + register_int_gauge_with_registry, Histogram, HistogramVec, IntCounterVec, IntGauge, + IntGaugeVec, Registry, }; -use rand::rngs::StdRng; -use rand::SeedableRng; -use std::collections::HashMap; -use std::future::Future; -use std::ops::Deref; -use std::sync::atomic::AtomicU64; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use std::time::Instant; -use sui_types::base_types::TransactionDigest; -use sui_types::committee::{Committee, CommitteeTrait}; -use sui_types::error::{SuiError, SuiResult}; - -use tap::prelude::*; -use tokio::sync::{Semaphore, SemaphorePermit}; -use tokio::task::JoinHandle; -use tokio::time::{self}; - -use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore; -use crate::consensus_handler::{classify, SequencedConsensusTransactionKey}; -use crate::consensus_throughput_calculator::{ConsensusThroughputProfiler, Level}; -use crate::epoch::reconfiguration::{ReconfigState, ReconfigurationInitiator}; -use crate::metrics::LatencyObserver; -use mysten_metrics::{spawn_monitored_task, GaugeGuard, GaugeGuardFutureExt}; +use rand::{rngs::StdRng, SeedableRng}; use sui_protocol_config::ProtocolConfig; -use sui_simulator::anemo::PeerId; -use sui_simulator::narwhal_network::connectivity::ConnectionStatus; -use sui_types::base_types::AuthorityName; -use sui_types::fp_ensure; -use sui_types::messages_consensus::ConsensusTransaction; -use sui_types::messages_consensus::ConsensusTransactionKind; -use tokio::time::Duration; +use sui_simulator::{anemo::PeerId, narwhal_network::connectivity::ConnectionStatus}; +use sui_types::{ + base_types::{AuthorityName, TransactionDigest}, + committee::{Committee, CommitteeTrait}, + error::{SuiError, SuiResult}, + fp_ensure, + messages_consensus::{ConsensusTransaction, ConsensusTransactionKind}, +}; +use tap::prelude::*; +use tokio::{ + sync::{Semaphore, SemaphorePermit}, + task::JoinHandle, + time::{self, Duration}, +}; use tracing::{debug, info, warn}; +use crate::{ + authority::authority_per_epoch_store::AuthorityPerEpochStore, + consensus_handler::{classify, SequencedConsensusTransactionKey}, + consensus_throughput_calculator::{ConsensusThroughputProfiler, Level}, + epoch::reconfiguration::{ReconfigState, ReconfigurationInitiator}, + metrics::LatencyObserver, +}; + #[cfg(test)] #[path = "unit_tests/consensus_tests.rs"] pub mod consensus_tests; @@ -221,8 +220,9 @@ impl SubmitToConsensus for LazyNarwhalClient { ) -> SuiResult { let transaction = bcs::to_bytes(transaction).expect("Serializing consensus transaction cannot fail"); - // The retrieved LocalNarwhalClient can be from the past epoch. Submit would fail after - // Narwhal shuts down, so there should be no correctness issue. + // The retrieved LocalNarwhalClient can be from the past epoch. Submit would + // fail after Narwhal shuts down, so there should be no correctness + // issue. let client = { let c = self.client.load(); if c.is_some() { @@ -255,17 +255,20 @@ pub struct ConsensusAdapter { max_pending_transactions: usize, /// Number of submitted transactions still inflight at this node. num_inflight_transactions: AtomicU64, - /// Dictates the maximum position from which will submit to consensus. Even if the is elected to - /// submit from a higher position than this, it will "reset" to the max_submit_position. + /// Dictates the maximum position from which will submit to consensus. Even + /// if the is elected to submit from a higher position than this, it + /// will "reset" to the max_submit_position. max_submit_position: Option, - /// When provided it will override the current back off logic and will use this value instead - /// as delay step. + /// When provided it will override the current back off logic and will use + /// this value instead as delay step. submit_delay_step_override: Option, - /// A structure to check the connection statuses populated by the Connection Monitor Listener + /// A structure to check the connection statuses populated by the Connection + /// Monitor Listener connection_monitor_status: Arc, /// A structure to check the reputation scores populated by Consensus low_scoring_authorities: ArcSwap>>>, - /// The throughput profiler to be used when making decisions to submit to consensus + /// The throughput profiler to be used when making decisions to submit to + /// consensus consensus_throughput_profiler: ArcSwapOption, /// A structure to register metrics metrics: ConsensusAdapterMetrics, @@ -337,11 +340,13 @@ impl ConsensusAdapter { self.consensus_throughput_profiler.store(Some(profiler)) } - // todo - this probably need to hold some kind of lock to make sure epoch does not change while we are recovering + // todo - this probably need to hold some kind of lock to make sure epoch does + // not change while we are recovering pub fn submit_recovered(self: &Arc, epoch_store: &Arc) { - // Currently narwhal worker might lose transactions on restart, so we need to resend them - // todo - get_all_pending_consensus_transactions is called twice when - // initializing AuthorityPerEpochStore and here, should not be a big deal but can be optimized + // Currently narwhal worker might lose transactions on restart, so we need to + // resend them todo - get_all_pending_consensus_transactions is called + // twice when initializing AuthorityPerEpochStore and here, should not + // be a big deal but can be optimized let mut recovered = epoch_store.get_all_pending_consensus_transactions(); #[allow(clippy::collapsible_if)] // This if can be collapsed but it will be ugly @@ -355,11 +360,13 @@ impl ConsensusAdapter { .any(ConsensusTransaction::is_end_of_publish) { // There are two cases when this is needed - // (1) We send EndOfPublish message after removing pending certificates in submit_and_wait_inner - // It is possible that node will crash between those two steps, in which case we might need to + // (1) We send EndOfPublish message after removing pending certificates in + // submit_and_wait_inner It is possible that node will crash + // between those two steps, in which case we might need to // re-introduce EndOfPublish message on restart // (2) If node crashed inside ConsensusAdapter::close_epoch, - // after reconfig lock state was written to DB and before we persisted EndOfPublish message + // after reconfig lock state was written to DB and before we persisted + // EndOfPublish message recovered.push(ConsensusTransaction::new_end_of_publish(self.authority)); } } @@ -426,9 +433,11 @@ impl ConsensusAdapter { ) } - // According to the throughput profile we want to either allow some transaction duplication or not) - // When throughput profile is Low and the validator is in position = 1, then it will submit to consensus with much lower latency. - // When throughput profile is High then we go back to default operation and no-one co-submits. + // According to the throughput profile we want to either allow some transaction + // duplication or not) When throughput profile is Low and the validator is + // in position = 1, then it will submit to consensus with much lower latency. + // When throughput profile is High then we go back to default operation and + // no-one co-submits. fn override_by_throughput_profiler(&self, position: usize, latency: Duration) -> Duration { const LOW_THROUGHPUT_DELAY_BEFORE_SUBMIT_MS: u64 = 0; const MEDIUM_THROUGHPUT_DELAY_BEFORE_SUBMIT_MS: u64 = 2_500; @@ -439,8 +448,9 @@ impl ConsensusAdapter { if let Some(profiler) = p.as_ref() { let (level, _) = profiler.throughput_level(); - // we only run this for the position = 1 validator to co-submit with the validator of - // position = 0. We also enable this only when the feature is enabled on the protocol config. + // we only run this for the position = 1 validator to co-submit with the + // validator of position = 0. We also enable this only when the + // feature is enabled on the protocol config. if self.protocol_config.throughput_aware_consensus_submission() && position == 1 { return match level { Level::Low => Duration::from_millis(LOW_THROUGHPUT_DELAY_BEFORE_SUBMIT_MS), @@ -451,11 +461,7 @@ impl ConsensusAdapter { let l = Duration::from_millis(HIGH_THROUGHPUT_DELAY_BEFORE_SUBMIT_MS); // back off according to recorded latency if it's significantly higher - if latency >= 2 * l { - latency - } else { - l - } + if latency >= 2 * l { latency } else { l } } }; } @@ -463,9 +469,11 @@ impl ConsensusAdapter { latency } - /// Overrides the latency and the position if there are defined settings for `max_submit_position` and - /// `submit_delay_step_override`. If the `max_submit_position` has defined, then that will always be used - /// irrespective of any so far decision. Same for the `submit_delay_step_override`. + /// Overrides the latency and the position if there are defined settings for + /// `max_submit_position` and `submit_delay_step_override`. If the + /// `max_submit_position` has defined, then that will always be used + /// irrespective of any so far decision. Same for the + /// `submit_delay_step_override`. fn override_by_max_submit_position_settings( &self, latency: Duration, @@ -481,12 +489,14 @@ impl ConsensusAdapter { } /// Check when this authority should submit the certificate to consensus. - /// This sorts all authorities based on pseudo-random distribution derived from transaction hash. + /// This sorts all authorities based on pseudo-random distribution derived + /// from transaction hash. /// - /// The function targets having 1 consensus transaction submitted per user transaction - /// when system operates normally. + /// The function targets having 1 consensus transaction submitted per user + /// transaction when system operates normally. /// - /// The function returns the position of this authority when it is their turn to submit the transaction to consensus. + /// The function returns the position of this authority when it is their + /// turn to submit the transaction to consensus. fn submission_position( &self, committee: &Committee, @@ -497,26 +507,32 @@ impl ConsensusAdapter { self.check_submission_wrt_connectivity_and_scores(positions) } - /// This function runs the following algorithm to decide whether or not to submit a transaction - /// to consensus. + /// This function runs the following algorithm to decide whether or not to + /// submit a transaction to consensus. /// - /// It takes in a deterministic list that represents positions of all the authorities. - /// The authority in the first position will be responsible for submitting to consensus, and - /// so we check if we are this validator, and if so, return true. + /// It takes in a deterministic list that represents positions of all the + /// authorities. The authority in the first position will be responsible + /// for submitting to consensus, and so we check if we are this + /// validator, and if so, return true. /// - /// If we are not in that position, we check our connectivity to the authority in that position. - /// If we are connected to them, we can assume that they are operational and will submit the transaction. - /// If we are not connected to them, we assume that they are not operational and we will not rely - /// on that authority to submit the transaction. So we shift them out of the first position, and - /// run this algorithm again on the new set of positions. + /// If we are not in that position, we check our connectivity to the + /// authority in that position. If we are connected to them, we can + /// assume that they are operational and will submit the transaction. If + /// we are not connected to them, we assume that they are not operational + /// and we will not rely on that authority to submit the transaction. So + /// we shift them out of the first position, and run this algorithm + /// again on the new set of positions. /// - /// This can possibly result in a transaction being submitted twice if an authority sees a false - /// negative in connectivity to another, such as in the case of a network partition. + /// This can possibly result in a transaction being submitted twice if an + /// authority sees a false negative in connectivity to another, such as + /// in the case of a network partition. /// - /// Recursively, if the authority further ahead of us in the positions is a low performing authority, we will - /// move our positions up one, and submit the transaction. This allows maintaining performance - /// overall. We will only do this part for authorities that are not low performers themselves to - /// prevent extra amplification in the case that the positions look like [low_scoring_a1, low_scoring_a2, a3] + /// Recursively, if the authority further ahead of us in the positions is a + /// low performing authority, we will move our positions up one, and + /// submit the transaction. This allows maintaining performance overall. + /// We will only do this part for authorities that are not low performers + /// themselves to prevent extra amplification in the case that the + /// positions look like [low_scoring_a1, low_scoring_a2, a3] fn check_submission_wrt_connectivity_and_scores( &self, positions: Vec, @@ -564,12 +580,15 @@ impl ConsensusAdapter { } /// This method blocks until transaction is persisted in local database - /// It then returns handle to async task, user can join this handle to await while transaction is processed by consensus + /// It then returns handle to async task, user can join this handle to await + /// while transaction is processed by consensus /// - /// This method guarantees that once submit(but not returned async handle) returns, - /// transaction is persisted and will eventually be sent to consensus even after restart + /// This method guarantees that once submit(but not returned async handle) + /// returns, transaction is persisted and will eventually be sent to + /// consensus even after restart /// - /// When submitting a certificate caller **must** provide a ReconfigState lock guard + /// When submitting a certificate caller **must** provide a ReconfigState + /// lock guard pub fn submit( self: &Arc, transaction: ConsensusTransaction, @@ -606,7 +625,8 @@ impl ConsensusAdapter { transaction: ConsensusTransaction, epoch_store: &Arc, ) -> JoinHandle<()> { - // Reconfiguration lock is dropped when pending_consensus_transactions is persisted, before it is handled by consensus + // Reconfiguration lock is dropped when pending_consensus_transactions is + // persisted, before it is handled by consensus let async_stage = self .clone() .submit_and_wait(transaction, epoch_store.clone()); @@ -621,17 +641,19 @@ impl ConsensusAdapter { transaction: ConsensusTransaction, epoch_store: Arc, ) { - // When epoch_terminated signal is received all pending submit_and_wait_inner are dropped. + // When epoch_terminated signal is received all pending submit_and_wait_inner + // are dropped. // - // This is needed because submit_and_wait_inner waits on read_notify for consensus message to be processed, - // which may never happen on epoch boundary. + // This is needed because submit_and_wait_inner waits on read_notify for + // consensus message to be processed, which may never happen on epoch + // boundary. // // In addition to that, within_alive_epoch ensures that all pending consensus // adapter tasks are stopped before reconfiguration can proceed. // - // This is essential because narwhal workers reuse same ports when narwhal restarts, - // this means we might be sending transactions from previous epochs to narwhal of - // new epoch if we have not had this barrier. + // This is essential because narwhal workers reuse same ports when narwhal + // restarts, this means we might be sending transactions from previous + // epochs to narwhal of new epoch if we have not had this barrier. epoch_store .within_alive_epoch(self.submit_and_wait_inner(transaction, &epoch_store)) .await @@ -722,8 +744,9 @@ impl ConsensusAdapter { let _in_flight_submission_guard = GaugeGuard::acquire(&self.metrics.sequencing_in_flight_submissions); - // We enter this branch when in select above await_submit completed and processed_waiter is pending - // This means it is time for us to submit transaction to consensus + // We enter this branch when in select above await_submit completed and + // processed_waiter is pending This means it is time for us to + // submit transaction to consensus let submit_inner = async { let ack_start = Instant::now(); let mut retries: u32 = 0; @@ -755,9 +778,10 @@ impl ConsensusAdapter { }; } - // we want to record the num of retries when reporting latency but to avoid label - // cardinality we do some simple bucketing to give us a good enough idea of how - // many retries happened associated with the latency. + // we want to record the num of retries when reporting latency but to avoid + // label cardinality we do some simple bucketing to give us a + // good enough idea of how many retries happened associated with + // the latency. let bucket = match retries { 0..=10 => retries.to_string(), // just report the retry count as is 11..=20 => "between_10_and_20".to_string(), @@ -788,11 +812,13 @@ impl ConsensusAdapter { &transaction.kind { // If we are in RejectUserCerts state and we just drained the list we need to - // send EndOfPublish to signal other validators that we are not submitting more certificates to the epoch. - // Note that there could be a race condition here where we enter this check in RejectAllCerts state. + // send EndOfPublish to signal other validators that we are not submitting more + // certificates to the epoch. Note that there could be a race + // condition here where we enter this check in RejectAllCerts state. // In that case we don't need to send EndOfPublish because condition to enter - // RejectAllCerts is when 2f+1 other validators already sequenced their EndOfPublish message. - // Also note that we could sent multiple EndOfPublish due to that multiple tasks can enter here with + // RejectAllCerts is when 2f+1 other validators already sequenced their + // EndOfPublish message. Also note that we could sent multiple + // EndOfPublish due to that multiple tasks can enter here with // pending_count == 0. This doesn't affect correctness. if epoch_store .get_reconfig_state_read_lock_guard() @@ -906,7 +932,8 @@ pub fn order_validators_for_submission( impl ReconfigurationInitiator for Arc { /// This method is called externally to begin reconfiguration /// It transition reconfig state to reject new certificates from user - /// ConsensusAdapter will send EndOfPublish message once pending certificate queue is drained. + /// ConsensusAdapter will send EndOfPublish message once pending certificate + /// queue is drained. fn close_epoch(&self, epoch_store: &Arc) { let send_end_of_publish = { let reconfig_guard = epoch_store.get_reconfig_state_write_lock_guard(); @@ -1057,22 +1084,22 @@ pub fn position_submit_certificate( #[cfg(test)] mod adapter_tests { - use super::position_submit_certificate; - use crate::consensus_adapter::{ - ConnectionMonitorStatusForTests, ConsensusAdapter, ConsensusAdapterMetrics, - LazyNarwhalClient, - }; + use std::{sync::Arc, time::Duration}; + use fastcrypto::traits::KeyPair; - use rand::Rng; - use rand::{rngs::StdRng, SeedableRng}; - use std::sync::Arc; - use std::time::Duration; + use rand::{rngs::StdRng, Rng, SeedableRng}; use sui_types::{ base_types::TransactionDigest, committee::Committee, crypto::{get_key_pair_from_rng, AuthorityKeyPair, AuthorityPublicKeyBytes}, }; + use super::position_submit_certificate; + use crate::consensus_adapter::{ + ConnectionMonitorStatusForTests, ConsensusAdapter, ConsensusAdapterMetrics, + LazyNarwhalClient, + }; + fn test_committee(rng: &mut StdRng, size: usize) -> Committee { let authorities = (0..size) .map(|_k| { diff --git a/crates/sui-core/src/consensus_handler.rs b/crates/sui-core/src/consensus_handler.rs index fe20f4c2474..3bf2db10d9e 100644 --- a/crates/sui-core/src/consensus_handler.rs +++ b/crates/sui-core/src/consensus_handler.rs @@ -104,19 +104,24 @@ impl ConsensusHandlerInitializer { } pub struct ConsensusHandler { - /// A store created for each epoch. ConsensusHandler is recreated each epoch, with the - /// corresponding store. This store is also used to get the current epoch ID. + /// A store created for each epoch. ConsensusHandler is recreated each + /// epoch, with the corresponding store. This store is also used to get + /// the current epoch ID. epoch_store: Arc, /// Holds the indices, hash and stats after the last consensus commit /// It is used for avoiding replaying already processed transactions, - /// checking chain consistency, and accumulating per-epoch consensus output stats. + /// checking chain consistency, and accumulating per-epoch consensus output + /// stats. last_consensus_stats: ExecutionIndicesWithStats, checkpoint_service: Arc, - /// cache reader is needed when determining the next version to assign for shared objects. + /// cache reader is needed when determining the next version to assign for + /// shared objects. cache_reader: Arc, - /// Reputation scores used by consensus adapter that we update, forwarded from consensus + /// Reputation scores used by consensus adapter that we update, forwarded + /// from consensus low_scoring_authorities: Arc>>, - /// The narwhal committee used to do stake computations for deciding set of low scoring authorities + /// The narwhal committee used to do stake computations for deciding set of + /// low scoring authorities committee: Committee, // TODO: ConsensusHandler doesn't really share metrics with AuthorityState. We could define // a new metrics type here if we want to. @@ -124,7 +129,8 @@ pub struct ConsensusHandler { /// Lru cache to quickly discard transactions processed by consensus processed_cache: LruCache, transaction_scheduler: AsyncTransactionScheduler, - /// Using the throughput calculator to record the current consensus throughput + /// Using the throughput calculator to record the current consensus + /// throughput throughput_calculator: Arc, } @@ -165,8 +171,9 @@ impl ConsensusHandler { } } - /// Updates the execution indexes based on the provided input. Some is returned when the indexes - /// are updated which means that the transaction has been seen for first time. None is returned + /// Updates the execution indexes based on the provided input. Some is + /// returned when the indexes are updated which means that the + /// transaction has been seen for first time. None is returned /// otherwise. fn update_index_and_hash(&mut self, index: ExecutionIndices, v: &[u8]) -> bool { update_index_and_hash(&mut self.last_consensus_stats, index, v) @@ -202,7 +209,8 @@ fn update_index_and_hash( #[async_trait] impl ExecutionState for ConsensusHandler { - /// This function will be called by Narwhal, after Narwhal sequenced this certificate. + /// This function will be called by Narwhal, after Narwhal sequenced this + /// certificate. #[instrument(level = "debug", skip_all)] async fn handle_consensus_output(&mut self, consensus_output: ConsensusOutput) { let _scope = monitored_scope("HandleConsensusOutput"); @@ -226,10 +234,11 @@ impl ConsensusHandler { consensus_output: impl ConsensusOutputAPI, ) { // This code no longer supports old protocol versions. - assert!(self - .epoch_store - .protocol_config() - .consensus_order_end_of_epoch_last()); + assert!( + self.epoch_store + .protocol_config() + .consensus_order_end_of_epoch_last() + ); let last_committed_round = self.last_consensus_stats.index.last_committed_round; @@ -238,8 +247,9 @@ impl ConsensusHandler { assert!(round >= last_committed_round); if last_committed_round == round { // we can receive the same commit twice after restart - // It is critical that the writes done by this function are atomic - otherwise we can - // lose the later parts of a commit if we restart midway through processing it. + // It is critical that the writes done by this function are atomic - otherwise + // we can lose the later parts of a commit if we restart midway + // through processing it. info!( "Ignoring consensus output for round {} as it is already committed", round @@ -247,7 +257,7 @@ impl ConsensusHandler { return; } - /* (serialized, transaction, output_cert) */ + // (serialized, transaction, output_cert) let mut transactions = vec![]; let timestamp = consensus_output.commit_timestamp_ms(); let leader_author = consensus_output.leader_author_index(); @@ -293,13 +303,15 @@ impl ConsensusHandler { consensus_output.leader_author_index(), )); - // Load all jwks that became active in the previous round, and commit them in this round. - // We want to delay one round because none of the transactions in the previous round could - // have been authenticated with the jwks that became active in that round. + // Load all jwks that became active in the previous round, and commit them in + // this round. We want to delay one round because none of the + // transactions in the previous round could have been authenticated with + // the jwks that became active in that round. // - // Because of this delay, jwks that become active in the last round of the epoch will - // never be committed. That is ok, because in the new epoch, the validators should - // immediately re-submit these jwks, and they can become active then. + // Because of this delay, jwks that become active in the last round of the epoch + // will never be committed. That is ok, because in the new epoch, the + // validators should immediately re-submit these jwks, and they can + // become active then. let new_jwks = self .epoch_store .get_new_jwks(last_committed_round) @@ -362,8 +374,11 @@ impl ConsensusHandler { if let ConsensusTransactionKind::RandomnessStateUpdate(randomness_round, _) = &transaction.kind { - // These are deprecated and we should never see them. Log an error and eat the tx if one appears. - error!("BUG: saw deprecated RandomnessStateUpdate tx for commit round {round:?}, randomness round {randomness_round:?}") + // These are deprecated and we should never see them. Log an error and eat + // the tx if one appears. + error!( + "BUG: saw deprecated RandomnessStateUpdate tx for commit round {round:?}, randomness round {randomness_round:?}" + ) } else { let transaction = SequencedConsensusTransactionKind::External(transaction); transactions.push((serialized_transaction, transaction, authority_index)); @@ -392,8 +407,9 @@ impl ConsensusHandler { let mut all_transactions = Vec::new(); { - // We need a set here as well, since the processed_cache is a LRU cache and can drop - // entries while we're iterating over the sequenced transactions. + // We need a set here as well, since the processed_cache is a LRU cache and can + // drop entries while we're iterating over the sequenced + // transactions. let mut processed_set = HashSet::new(); for (seq, (serialized, transaction, cert_origin)) in @@ -506,10 +522,10 @@ impl AsyncTransactionScheduler { } } -/// Consensus handler used by Mysticeti. Since Mysticeti repo is not yet integrated, we use a -/// channel to receive the consensus output from Mysticeti. -/// During initialization, the sender is passed into Mysticeti which can send consensus output -/// to the channel. +/// Consensus handler used by Mysticeti. Since Mysticeti repo is not yet +/// integrated, we use a channel to receive the consensus output from Mysticeti. +/// During initialization, the sender is passed into Mysticeti which can send +/// consensus output to the channel. pub struct MysticetiConsensusHandler { handle: tokio::task::JoinHandle<()>, } @@ -639,9 +655,9 @@ impl<'de> Deserialize<'de> for SequencedConsensusTransactionKind { } } -// We can't serialize SequencedConsensusTransactionKind directly because it contains a -// VerifiedExecutableTransaction, which is not serializable (by design). This wrapper allows us to -// convert to a serializable format easily. +// We can't serialize SequencedConsensusTransactionKind directly because it +// contains a VerifiedExecutableTransaction, which is not serializable (by +// design). This wrapper allows us to convert to a serializable format easily. #[derive(Debug, Clone, Serialize, Deserialize)] enum SerializableSequencedConsensusTransactionKind { External(ConsensusTransaction), @@ -755,8 +771,9 @@ impl SequencedConsensusTransaction { pub fn is_user_tx_with_randomness(&self, randomness_state_enabled: bool) -> bool { if !randomness_state_enabled { - // If randomness is disabled, these should be processed same as a tx without randomness, - // which will eventually fail when the randomness state object is not found. + // If randomness is disabled, these should be processed same as a tx without + // randomness, which will eventually fail when the randomness state + // object is not found. return false; } let SequencedConsensusTransactionKind::External(ConsensusTransaction { diff --git a/crates/sui-core/src/consensus_manager/mod.rs b/crates/sui-core/src/consensus_manager/mod.rs index e6febe54aa6..afb293abf4b 100644 --- a/crates/sui-core/src/consensus_manager/mod.rs +++ b/crates/sui-core/src/consensus_manager/mod.rs @@ -1,24 +1,28 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore; -use crate::consensus_handler::ConsensusHandlerInitializer; -use crate::consensus_manager::mysticeti_manager::MysticetiManager; -use crate::consensus_manager::narwhal_manager::{NarwhalConfiguration, NarwhalManager}; -use crate::consensus_validator::SuiTxValidator; -use crate::mysticeti_adapter::LazyMysticetiClient; +use std::{path::PathBuf, sync::Arc, time::Instant}; + use async_trait::async_trait; use enum_dispatch::enum_dispatch; use fastcrypto::traits::KeyPair as _; use mysten_metrics::RegistryService; use prometheus::{register_int_gauge_with_registry, IntGauge, Registry}; -use std::path::PathBuf; -use std::sync::Arc; -use std::time::Instant; use sui_config::{ConsensusConfig, NodeConfig}; use sui_protocol_config::ProtocolVersion; use sui_types::committee::EpochId; use tokio::sync::{Mutex, MutexGuard}; +use crate::{ + authority::authority_per_epoch_store::AuthorityPerEpochStore, + consensus_handler::ConsensusHandlerInitializer, + consensus_manager::{ + mysticeti_manager::MysticetiManager, + narwhal_manager::{NarwhalConfiguration, NarwhalManager}, + }, + consensus_validator::SuiTxValidator, + mysticeti_adapter::LazyMysticetiClient, +}; + pub mod mysticeti_manager; pub mod narwhal_manager; @@ -194,7 +198,10 @@ impl Drop for RunningLockGuard<'_> { match *self.state_guard { // consensus was running and now will have to be marked as shutdown Running::True(epoch, version) => { - tracing::info!("Consensus shutdown for epoch {epoch:?} & protocol version {version:?} is complete - took {} seconds", self.start.elapsed().as_secs_f64()); + tracing::info!( + "Consensus shutdown for epoch {epoch:?} & protocol version {version:?} is complete - took {} seconds", + self.start.elapsed().as_secs_f64() + ); self.metrics .shutdown_latency @@ -205,10 +212,11 @@ impl Drop for RunningLockGuard<'_> { // consensus was not running and now will be marked as started Running::False => { tracing::info!( - "Starting up consensus for epoch {} & protocol version {:?} is complete - took {} seconds", - self.epoch.unwrap(), - self.protocol_version.unwrap(), - self.start.elapsed().as_secs_f64()); + "Starting up consensus for epoch {} & protocol version {:?} is complete - took {} seconds", + self.epoch.unwrap(), + self.protocol_version.unwrap(), + self.start.elapsed().as_secs_f64() + ); self.metrics .start_latency diff --git a/crates/sui-core/src/consensus_manager/mysticeti_manager.rs b/crates/sui-core/src/consensus_manager/mysticeti_manager.rs index d269400fbd5..e6a2cb816f3 100644 --- a/crates/sui-core/src/consensus_manager/mysticeti_manager.rs +++ b/crates/sui-core/src/consensus_manager/mysticeti_manager.rs @@ -46,7 +46,8 @@ pub struct MysticetiManager { impl MysticetiManager { /// NOTE: Mysticeti protocol key uses Ed25519 instead of BLS. - /// But for security, the protocol keypair must be different from the network keypair. + /// But for security, the protocol keypair must be different from the + /// network keypair. pub fn new( protocol_keypair: ed25519::Ed25519KeyPair, network_keypair: ed25519::Ed25519KeyPair, @@ -125,8 +126,8 @@ impl ConsensusManagerTrait for MysticetiManager { let registry = Registry::new_custom(Some("consensus".to_string()), None).unwrap(); - // TODO: that should be replaced by a metered channel. We can discuss if unbounded approach - // is the one we want to go with. + // TODO: that should be replaced by a metered channel. We can discuss if + // unbounded approach is the one we want to go with. #[allow(clippy::disallowed_methods)] let (commit_sender, commit_receiver) = unbounded_channel(); @@ -180,7 +181,8 @@ impl ConsensusManagerTrait for MysticetiManager { return; }; - // swap with empty to ensure there is no other reference to authority and we can safely do Arc unwrap + // swap with empty to ensure there is no other reference to authority and we can + // safely do Arc unwrap let r = self.authority.swap(None).unwrap(); let Ok((authority, registry_id)) = Arc::try_unwrap(r) else { panic!("Failed to retrieve the mysticeti authority"); diff --git a/crates/sui-core/src/consensus_manager/narwhal_manager.rs b/crates/sui-core/src/consensus_manager/narwhal_manager.rs index 98718743399..f521cabe194 100644 --- a/crates/sui-core/src/consensus_manager/narwhal_manager.rs +++ b/crates/sui-core/src/consensus_manager/narwhal_manager.rs @@ -1,27 +1,32 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore; -use crate::consensus_handler::ConsensusHandlerInitializer; -use crate::consensus_manager::{ - ConsensusManagerMetrics, ConsensusManagerTrait, Running, RunningLockGuard, -}; -use crate::consensus_validator::SuiTxValidator; +use std::{path::PathBuf, sync::Arc}; + use async_trait::async_trait; use fastcrypto::traits::KeyPair; use mysten_metrics::RegistryService; use narwhal_config::{Parameters, WorkerId}; use narwhal_network::client::NetworkClient; -use narwhal_node::primary_node::PrimaryNode; -use narwhal_node::worker_node::WorkerNodes; -use narwhal_node::{CertificateStoreCacheMetrics, NodeStorage}; -use std::path::PathBuf; -use std::sync::Arc; +use narwhal_node::{ + primary_node::PrimaryNode, worker_node::WorkerNodes, CertificateStoreCacheMetrics, NodeStorage, +}; use sui_config::NodeConfig; -use sui_types::committee::EpochId; -use sui_types::crypto::{AuthorityKeyPair, NetworkKeyPair}; -use sui_types::sui_system_state::epoch_start_sui_system_state::EpochStartSystemStateTrait; +use sui_types::{ + committee::EpochId, + crypto::{AuthorityKeyPair, NetworkKeyPair}, + sui_system_state::epoch_start_sui_system_state::EpochStartSystemStateTrait, +}; use tokio::sync::Mutex; +use crate::{ + authority::authority_per_epoch_store::AuthorityPerEpochStore, + consensus_handler::ConsensusHandlerInitializer, + consensus_manager::{ + ConsensusManagerMetrics, ConsensusManagerTrait, Running, RunningLockGuard, + }, + consensus_validator::SuiTxValidator, +}; + #[cfg(test)] #[path = "../unit_tests/narwhal_manager_tests.rs"] pub mod narwhal_manager_tests; @@ -87,10 +92,10 @@ impl ConsensusManagerTrait for NarwhalManager { // Note: After a binary is updated with the new protocol version and the node // is restarted, the protocol config does not take effect until we have a quorum // of validators have updated the binary. Because of this the protocol upgrade - // will happen in the following epoch after quorum is reached. In this case NarwhalManager - // is not recreated which is why we pass protocol config in at start and not at creation. - // To ensure correct behavior an updated protocol config must be passed in at the - // start of EACH epoch. + // will happen in the following epoch after quorum is reached. In this case + // NarwhalManager is not recreated which is why we pass protocol config in + // at start and not at creation. To ensure correct behavior an updated + // protocol config must be passed in at the start of EACH epoch. async fn start( &self, config: &NodeConfig, @@ -207,7 +212,8 @@ impl ConsensusManagerTrait for NarwhalManager { self.metrics.start_worker_retries.set(worker_retries as i64); } - // Shuts down whole Narwhal (primary & worker(s)) and waits until nodes have shutdown. + // Shuts down whole Narwhal (primary & worker(s)) and waits until nodes have + // shutdown. async fn shutdown(&self) { let Some(_guard) = RunningLockGuard::acquire_shutdown(&self.metrics, &self.running).await else { diff --git a/crates/sui-core/src/consensus_throughput_calculator.rs b/crates/sui-core/src/consensus_throughput_calculator.rs index ac1c4a3d9bf..9f7b33e00ec 100644 --- a/crates/sui-core/src/consensus_throughput_calculator.rs +++ b/crates/sui-core/src/consensus_throughput_calculator.rs @@ -1,16 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::AuthorityMetrics; +use std::{ + collections::{BTreeMap, VecDeque}, + num::NonZeroU64, + sync::Arc, +}; + use arc_swap::ArcSwap; use narwhal_types::TimestampMs; use parking_lot::Mutex; -use std::collections::{BTreeMap, VecDeque}; -use std::num::NonZeroU64; -use std::sync::Arc; use sui_protocol_config::Chain; use sui_types::digests::ChainIdentifier; use tracing::{debug, warn}; +use crate::authority::AuthorityMetrics; + const DEFAULT_OBSERVATIONS_WINDOW: u64 = 120; // number of observations to use to calculate the past throughput const DEFAULT_THROUGHPUT_PROFILE_UPDATE_INTERVAL_SECS: u64 = 60; // seconds that need to pass between two consecutive throughput profile updates const DEFAULT_THROUGHPUT_PROFILE_COOL_DOWN_THRESHOLD: u64 = 10; // 10% of throughput @@ -18,8 +22,9 @@ const DEFAULT_THROUGHPUT_PROFILE_COOL_DOWN_THRESHOLD: u64 = 10; // 10% of throug #[derive(Clone, Copy, Debug, PartialEq, Eq, Ord, PartialOrd)] pub struct ThroughputProfile { pub level: Level, - /// The lower range of the throughput that this profile is referring to. For example, if - /// `throughput = 1_000`, then for values >= 1_000 this throughput profile applies. + /// The lower range of the throughput that this profile is referring to. For + /// example, if `throughput = 1_000`, then for values >= 1_000 this + /// throughput profile applies. pub throughput: u64, } @@ -54,7 +59,8 @@ impl From for usize { #[derive(Debug)] pub struct ThroughputProfileRanges { - /// Holds the throughput profiles by the throughput range (upper_throughput, cool_down_threshold) + /// Holds the throughput profiles by the throughput range (upper_throughput, + /// cool_down_threshold) profiles: BTreeMap, } @@ -125,7 +131,8 @@ impl ThroughputProfileRanges { .expect("Should contain at least one throughput profile") .1 } - /// Resolves the throughput profile that corresponds to the provided throughput. + /// Resolves the throughput profile that corresponds to the provided + /// throughput. pub fn resolve(&self, current_throughput: u64) -> ThroughputProfile { let mut iter = self.profiles.iter(); while let Some((threshold, profile)) = iter.next_back() { @@ -134,9 +141,13 @@ impl ThroughputProfileRanges { } } - warn!("Could not resolve throughput profile for throughput {} - we shouldn't end up here. Fallback to lowest profile as default.", current_throughput); + warn!( + "Could not resolve throughput profile for throughput {} - we shouldn't end up here. Fallback to lowest profile as default.", + current_throughput + ); - // If not found, then we should return the lowest possible profile as default to stay on safe side. + // If not found, then we should return the lowest possible profile as default to + // stay on safe side. self.highest_profile() } } @@ -173,24 +184,32 @@ pub struct ThroughputProfileEntry { struct ConsensusThroughputCalculatorInner { observations: VecDeque<(TimestampSecs, u64)>, total_transactions: u64, - /// The last timestamp that we considered as oldest to calculate the throughput over the observations window. + /// The last timestamp that we considered as oldest to calculate the + /// throughput over the observations window. last_oldest_timestamp: Option, } -/// The ConsensusThroughputProfiler is responsible for assigning the right throughput profile by polling -/// the measured consensus throughput. It is important to rely on the ConsensusThroughputCalculator to measure -/// throughput as we need to make sure that validators will see an as possible consistent view to assign -/// the right profile. +/// The ConsensusThroughputProfiler is responsible for assigning the right +/// throughput profile by polling the measured consensus throughput. It is +/// important to rely on the ConsensusThroughputCalculator to measure throughput +/// as we need to make sure that validators will see an as possible consistent +/// view to assign the right profile. pub struct ConsensusThroughputProfiler { - /// The throughput profile will be eligible for update every `throughput_profile_update_interval` seconds. - /// A bucketing approach is followed where the throughput timestamp is used in order to calculate on which - /// seconds bucket is assigned to. When we detect a change on that bucket then an update is triggered (if a different - /// profile is calculated). That allows validators to align on the update timing and ensure they will eventually - /// converge as the consensus timestamps are used. + /// The throughput profile will be eligible for update every + /// `throughput_profile_update_interval` seconds. A bucketing approach + /// is followed where the throughput timestamp is used in order to calculate + /// on which seconds bucket is assigned to. When we detect a change on + /// that bucket then an update is triggered (if a different profile is + /// calculated). That allows validators to align on the update timing and + /// ensure they will eventually converge as the consensus timestamps are + /// used. throughput_profile_update_interval: TimestampSecs, - /// When current calculated throughput (A) is lower than previous, and the assessed profile is now a lower than previous, - /// we'll change to the lower profile only when (A) <= (previous_profile.throughput) * (100 - throughput_profile_cool_down_threshold) / 100. - /// Otherwise we'll stick to the previous profile. We want to do that to avoid any jittery behaviour that alternates between two profiles. + /// When current calculated throughput (A) is lower than previous, and the + /// assessed profile is now a lower than previous, we'll change to the + /// lower profile only when (A) <= (previous_profile.throughput) * (100 - + /// throughput_profile_cool_down_threshold) / 100. Otherwise we'll stick + /// to the previous profile. We want to do that to avoid any jittery + /// behaviour that alternates between two profiles. throughput_profile_cool_down_threshold: u64, /// The profile ranges to use to profile the throughput profile_ranges: ThroughputProfileRanges, @@ -240,8 +259,9 @@ impl ConsensusThroughputProfiler { } } - // Return the current throughput level and the corresponding throughput when this was last updated. - // If that is not set yet then as default the High profile is returned and the throughput will be None. + // Return the current throughput level and the corresponding throughput when + // this was last updated. If that is not set yet then as default the High + // profile is returned and the throughput will be None. pub fn throughput_level(&self) -> (Level, u64) { // Update throughput profile if necessary time has passed let (throughput, timestamp) = self.calculator.current_throughput(); @@ -250,13 +270,15 @@ impl ConsensusThroughputProfiler { (profile.profile.level, profile.throughput) } - // Calculate and update the throughput profile based on the provided throughput. The throughput profile - // will only get updated when a different value has been calculated. For example, if the - // `last_throughput_profile` is `Low` , and again we calculate it as `Low` based on input, then we'll - // not update the profile or the timestamp. We do care to perform updates only when profiles differ. - // To ensure that we are protected against throughput profile change fluctuations, we update a - // throughput profile every `throughput_profile_update_interval` seconds based on the provided unix timestamps. - // The last throughput profile entry is returned. + // Calculate and update the throughput profile based on the provided throughput. + // The throughput profile will only get updated when a different value has + // been calculated. For example, if the `last_throughput_profile` is `Low` , + // and again we calculate it as `Low` based on input, then we'll not update + // the profile or the timestamp. We do care to perform updates only when + // profiles differ. To ensure that we are protected against throughput + // profile change fluctuations, we update a throughput profile every + // `throughput_profile_update_interval` seconds based on the provided unix + // timestamps. The last throughput profile entry is returned. fn update_and_fetch_throughput_profile( &self, throughput: u64, @@ -264,8 +286,9 @@ impl ConsensusThroughputProfiler { ) -> ThroughputProfileEntry { let last_profile = self.last_throughput_profile.load(); - // Skip any processing if provided timestamp is older than the last used one. Also return existing - // profile when provided timestamp is 0 - this avoids triggering an immediate update eventually overriding + // Skip any processing if provided timestamp is older than the last used one. + // Also return existing profile when provided timestamp is 0 - this + // avoids triggering an immediate update eventually overriding // the default value. if timestamp == 0 || timestamp < last_profile.timestamp { return **last_profile; @@ -278,13 +301,14 @@ impl ConsensusThroughputProfiler { last_profile.timestamp / self.throughput_profile_update_interval; // Update only when we minimum time has been passed since last update. - // We allow the edge case to update on the same bucket when a different profile has been - // computed for the exact same timestamp. + // We allow the edge case to update on the same bucket when a different profile + // has been computed for the exact same timestamp. let should_update_profile = if current_seconds_bucket > last_profile_seconds_bucket || (profile != last_profile.profile && last_profile.timestamp == timestamp) { if profile < last_profile.profile { - // If new profile is smaller than previous one, then make sure the cool down threshold is respected. + // If new profile is smaller than previous one, then make sure the cool down + // threshold is respected. let min_throughput = last_profile .profile .throughput @@ -318,14 +342,16 @@ impl ConsensusThroughputProfiler { } } -/// ConsensusThroughputCalculator is calculating the transaction throughput as this is coming out from -/// consensus. The throughput is calculated using a sliding window approach and leveraging the timestamps -/// provided by consensus. +/// ConsensusThroughputCalculator is calculating the transaction throughput as +/// this is coming out from consensus. The throughput is calculated using a +/// sliding window approach and leveraging the timestamps provided by consensus. pub struct ConsensusThroughputCalculator { - /// The number of transaction throughput observations that should be stored within the observations - /// vector in the ConsensusThroughputCalculatorInner. Those observations will be used to calculate - /// the current transactions throughput. We want to select a number that give us enough observations - /// so we better calculate the throughput and protected against spikes. A large enough value though + /// The number of transaction throughput observations that should be stored + /// within the observations vector in the + /// ConsensusThroughputCalculatorInner. Those observations will be used to + /// calculate the current transactions throughput. We want to select a + /// number that give us enough observations so we better calculate the + /// throughput and protected against spikes. A large enough value though /// will make us less reactive to throughput changes. observations_window: u64, inner: Mutex, @@ -347,22 +373,28 @@ impl ConsensusThroughputCalculator { } } - // Adds an observation of the number of transactions that have been sequenced after deduplication - // and the corresponding leader timestamp. The observation timestamps should be monotonically - // incremented otherwise observation will be ignored. + // Adds an observation of the number of transactions that have been sequenced + // after deduplication and the corresponding leader timestamp. The + // observation timestamps should be monotonically incremented otherwise + // observation will be ignored. pub fn add_transactions(&self, timestamp_ms: TimestampMs, num_of_transactions: u64) { let mut inner = self.inner.lock(); let timestamp_secs: TimestampSecs = timestamp_ms / 1_000; // lowest bucket we care is seconds if let Some((front_ts, transactions)) = inner.observations.front_mut() { - // First check that the timestamp is monotonically incremented - ignore any observation that is not - // later from previous one (it shouldn't really happen). + // First check that the timestamp is monotonically incremented - ignore any + // observation that is not later from previous one (it shouldn't + // really happen). if timestamp_secs < *front_ts { - warn!("Ignoring observation of transactions:{} as has earlier timestamp than last observation {}s < {}s", num_of_transactions, timestamp_secs, front_ts); + warn!( + "Ignoring observation of transactions:{} as has earlier timestamp than last observation {}s < {}s", + num_of_transactions, timestamp_secs, front_ts + ); return; } - // Not very likely, but if transactions refer to same second we add to the last element. + // Not very likely, but if transactions refer to same second we add to the last + // element. if timestamp_secs == *front_ts { *transactions = transactions.saturating_add(num_of_transactions); } else { @@ -379,15 +411,19 @@ impl ConsensusThroughputCalculator { // update total number of transactions in the observations list inner.total_transactions = inner.total_transactions.saturating_add(num_of_transactions); - // If we have more values on our window of max values, remove the last one, and calculate throughput. - // If we have the exact same values on our window of max values, then still calculate the throughput to ensure - // that we are taking into account the case where the last bucket gets updated because it falls into the same second. + // If we have more values on our window of max values, remove the last one, and + // calculate throughput. If we have the exact same values on our window + // of max values, then still calculate the throughput to ensure + // that we are taking into account the case where the last bucket gets updated + // because it falls into the same second. if inner.observations.len() as u64 >= self.observations_window { let last_element_ts = if inner.observations.len() as u64 == self.observations_window { if let Some(ts) = inner.last_oldest_timestamp { ts } else { - warn!("Skip calculation - we still don't have enough elements to pop the last observation"); + warn!( + "Skip calculation - we still don't have enough elements to pop the last observation" + ); return; } } else { @@ -417,12 +453,16 @@ impl ConsensusThroughputCalculator { self.current_throughput .store(Arc::new((current_throughput, timestamp_secs))); } else { - warn!("Skip calculating throughput as time period is {}. This is very unlikely to happen, should investigate.", period); + warn!( + "Skip calculating throughput as time period is {}. This is very unlikely to happen, should investigate.", + period + ); } } } - // Returns the current (live calculated) throughput and the corresponding timestamp of when this got updated. + // Returns the current (live calculated) throughput and the corresponding + // timestamp of when this got updated. pub fn current_throughput(&self) -> (u64, TimestampSecs) { *self.current_throughput.load().as_ref() } @@ -430,9 +470,10 @@ impl ConsensusThroughputCalculator { #[cfg(test)] mod tests { + use prometheus::Registry; + use super::*; use crate::consensus_throughput_calculator::Level::{High, Low}; - use prometheus::Registry; #[test] pub fn test_throughput_profile_ranges() { @@ -483,7 +524,8 @@ mod tests { calculator.add_transactions(3000 as TimestampMs, 1_000); calculator.add_transactions(4000 as TimestampMs, 1_000); - // We expect to have a rate of 1K tx/sec with last update timestamp the 4th second + // We expect to have a rate of 1K tx/sec with last update timestamp the 4th + // second assert_eq!(calculator.current_throughput(), (1000, 4)); // We are adding more transactions to get over 2K tx/sec @@ -491,9 +533,10 @@ mod tests { calculator.add_transactions(6_000 as TimestampMs, 2_800); assert_eq!(calculator.current_throughput(), (2100, 6)); - // Let's now add 0 transactions after 5 seconds. Since 5 seconds have passed since the last - // update and now the transactions are 0 we expect the throughput to be calculate as: - // 2800 + 2500 + 0 = 5300 / (15sec - 4sec) = 5300 / 11sec = 481 tx/sec + // Let's now add 0 transactions after 5 seconds. Since 5 seconds have passed + // since the last update and now the transactions are 0 we expect the + // throughput to be calculate as: 2800 + 2500 + 0 = 5300 / (15sec - + // 4sec) = 5300 / 11sec = 481 tx/sec calculator.add_transactions(15_000 as TimestampMs, 0); assert_eq!(calculator.current_throughput(), (481, 15)); @@ -506,7 +549,8 @@ mod tests { calculator.add_transactions(20_000 as TimestampMs, 0); assert_eq!(calculator.current_throughput(), (0, 20)); - // By adding now a few entries with lots of transactions increase again the throughput + // By adding now a few entries with lots of transactions increase again the + // throughput calculator.add_transactions(21_000 as TimestampMs, 1_000); calculator.add_transactions(22_000 as TimestampMs, 2_000); calculator.add_transactions(23_000 as TimestampMs, 3_100); @@ -524,19 +568,21 @@ mod tests { // adding one observation calculator.add_transactions(1_000, 0); - // Adding observations with same timestamp should fall under the same bucket and won't lead - // to throughput update. + // Adding observations with same timestamp should fall under the same bucket and + // won't lead to throughput update. for _ in 0..10 { calculator.add_transactions(2_340, 100); } assert_eq!(calculator.current_throughput(), (0, 0)); - // Adding now one observation on a different second bucket will change throughput + // Adding now one observation on a different second bucket will change + // throughput calculator.add_transactions(5_000, 0); assert_eq!(calculator.current_throughput(), (250, 5)); - // Updating further the last bucket with more transactions it keeps updating the throughput + // Updating further the last bucket with more transactions it keeps updating the + // throughput calculator.add_transactions(5_000, 400); assert_eq!(calculator.current_throughput(), (350, 5)); @@ -566,26 +612,28 @@ mod tests { ranges, ); - // When no transactions exists, the calculator will return by default "High" to err on the - // assumption that there is lots of load. + // When no transactions exists, the calculator will return by default "High" to + // err on the assumption that there is lots of load. assert_eq!(profiler.throughput_level(), (High, 0)); calculator.add_transactions(1000 as TimestampMs, 1_000); calculator.add_transactions(2000 as TimestampMs, 1_000); calculator.add_transactions(3000 as TimestampMs, 1_000); - // We expect to have a rate of 1K tx/sec, that's < 2K limit , so throughput profile remains to "High" - nothing gets updated + // We expect to have a rate of 1K tx/sec, that's < 2K limit , so throughput + // profile remains to "High" - nothing gets updated assert_eq!(profiler.throughput_level(), (High, 0)); - // We are adding more transactions to get over 2K tx/sec, so throughput profile should now be categorised - // as "high" + // We are adding more transactions to get over 2K tx/sec, so throughput profile + // should now be categorised as "high" calculator.add_transactions(4000 as TimestampMs, 2_500); calculator.add_transactions(5000 as TimestampMs, 2_800); assert_eq!(profiler.throughput_level(), (High, 2100)); - // Let's now add 0 transactions after at least 5 seconds. Since the update should happen every 5 seconds - // now the transactions are 0 we expect the throughput to be calculate as: - // 2800 + 2800 + 0 = 5300 / 15 - 4sec = 5600 / 11sec = 509 tx/sec + // Let's now add 0 transactions after at least 5 seconds. Since the update + // should happen every 5 seconds now the transactions are 0 we expect + // the throughput to be calculate as: 2800 + 2800 + 0 = 5300 / 15 - 4sec + // = 5600 / 11sec = 509 tx/sec calculator.add_transactions(7_000 as TimestampMs, 2_800); calculator.add_transactions(15_000 as TimestampMs, 0); @@ -599,8 +647,8 @@ mod tests { assert_eq!(profiler.throughput_level(), (Low, 0)); - // By adding a few entries with lots of transactions for the exact same last timestamp it will - // trigger a throughput profile update. + // By adding a few entries with lots of transactions for the exact same last + // timestamp it will trigger a throughput profile update. calculator.add_transactions(20_000 as TimestampMs, 4_000); calculator.add_transactions(20_000 as TimestampMs, 4_000); calculator.add_transactions(20_000 as TimestampMs, 4_000); @@ -633,8 +681,9 @@ mod tests { ranges, ); - // Current setup is `throughput_profile_update_interval` = 5sec, which means that throughput profile - // should get updated every 5 seconds (based on the provided unix timestamp). + // Current setup is `throughput_profile_update_interval` = 5sec, which means + // that throughput profile should get updated every 5 seconds (based on + // the provided unix timestamp). calculator.add_transactions(3_000 as TimestampMs, 2_200); calculator.add_transactions(4_000 as TimestampMs, 4_200); @@ -642,18 +691,21 @@ mod tests { assert_eq!(profiler.throughput_level(), (High, 2_100)); - // When adding transactions at timestamp 10s the bucket changes and the profile should get updated + // When adding transactions at timestamp 10s the bucket changes and the profile + // should get updated calculator.add_transactions(10_000 as TimestampMs, 1_000); assert_eq!(profiler.throughput_level(), (Low, 866)); - // Now adding transactions at timestamp 16s the bucket changes and profile should get updated + // Now adding transactions at timestamp 16s the bucket changes and profile + // should get updated calculator.add_transactions(16_000 as TimestampMs, 20_000); assert_eq!(profiler.throughput_level(), (High, 2333)); - // Keep adding transactions that fall under the same timestamp as the previous one, even though - // traffic should be marked as low it doesn't until the bucket of 20s is updated. + // Keep adding transactions that fall under the same timestamp as the previous + // one, even though traffic should be marked as low it doesn't until the + // bucket of 20s is updated. calculator.add_transactions(17_000 as TimestampMs, 0); calculator.add_transactions(18_000 as TimestampMs, 0); calculator.add_transactions(19_000 as TimestampMs, 0); @@ -687,14 +739,16 @@ mod tests { ranges, ); - // Adding 4 observations of 3_000 tx/sec, so in the end throughput profile should be flagged as high + // Adding 4 observations of 3_000 tx/sec, so in the end throughput profile + // should be flagged as high for i in 1..=4 { calculator.add_transactions(i * 1_000, 3_000); } assert_eq!(profiler.throughput_level(), (High, 3_000)); - // Now let's add some transactions to bring throughput little bit bellow the upper Low threshold (2000 tx/sec) - // but still above the 10% offset which is 1800 tx/sec. + // Now let's add some transactions to bring throughput little bit bellow the + // upper Low threshold (2000 tx/sec) but still above the 10% offset + // which is 1800 tx/sec. calculator.add_transactions(5_000, 1_900); calculator.add_transactions(6_000, 1_900); calculator.add_transactions(7_000, 1_900); @@ -702,7 +756,8 @@ mod tests { assert_eq!(calculator.current_throughput(), (1_900, 7)); assert_eq!(profiler.throughput_level(), (High, 3_000)); - // Let's bring down more throughput - now the throughput profile should get updated + // Let's bring down more throughput - now the throughput profile should get + // updated calculator.add_transactions(8_000, 1_500); calculator.add_transactions(9_000, 1_500); calculator.add_transactions(10_000, 1_500); diff --git a/crates/sui-core/src/consensus_types/consensus_output_api.rs b/crates/sui-core/src/consensus_types/consensus_output_api.rs index 85307ba4e67..067c71730f2 100644 --- a/crates/sui-core/src/consensus_types/consensus_output_api.rs +++ b/crates/sui-core/src/consensus_types/consensus_output_api.rs @@ -2,15 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 use std::fmt::Display; -use crate::consensus_types::AuthorityIndex; use consensus_core::BlockAPI; use fastcrypto::hash::Hash; use narwhal_types::{BatchAPI, CertificateAPI, ConsensusOutputDigest, HeaderAPI}; use sui_types::{digests::ConsensusCommitDigest, messages_consensus::ConsensusTransaction}; +use crate::consensus_types::AuthorityIndex; + /// A list of tuples of: -/// (certificate origin authority index, all transactions corresponding to the certificate). -/// For each transaction, returns the serialized transaction and the deserialized transaction. +/// (certificate origin authority index, all transactions corresponding to the +/// certificate). For each transaction, returns the serialized transaction and +/// the deserialized transaction. type ConsensusOutputTransactions<'a> = Vec<(AuthorityIndex, Vec<(&'a [u8], ConsensusTransaction)>)>; pub(crate) trait ConsensusOutputAPI: Display { @@ -95,8 +97,9 @@ impl ConsensusOutputAPI for narwhal_types::ConsensusOutput { } fn consensus_digest(&self) -> ConsensusCommitDigest { - // We port ConsensusOutputDigest, a narwhal space object, into ConsensusCommitDigest, a sui-core space object. - // We assume they always have the same format. + // We port ConsensusOutputDigest, a narwhal space object, into + // ConsensusCommitDigest, a sui-core space object. We assume they always + // have the same format. static_assertions::assert_eq_size!(ConsensusCommitDigest, ConsensusOutputDigest); ConsensusCommitDigest::new(self.digest().into_inner()) } diff --git a/crates/sui-core/src/consensus_validator.rs b/crates/sui-core/src/consensus_validator.rs index 7c4534b6cbe..20acb4556c0 100644 --- a/crates/sui-core/src/consensus_validator.rs +++ b/crates/sui-core/src/consensus_validator.rs @@ -60,9 +60,11 @@ impl SuiTxValidator { cert_batch.push(*certificate); // if !certificate.contains_shared_object() { - // // new_unchecked safety: we do not use the certs in this list until all - // // have had their signatures verified. - // owned_tx_certs.push(VerifiedCertificate::new_unchecked(*certificate)); + // // new_unchecked safety: we do not use the certs in + // this list until all // have had + // their signatures verified. + // owned_tx_certs. + // push(VerifiedCertificate::new_unchecked(*certificate)); // } } ConsensusTransactionKind::CheckpointSignature(signature) => { @@ -88,7 +90,8 @@ impl SuiTxValidator { .tap_err(|e| warn!("batch verification error: {}", e)) .wrap_err("Malformed batch (failed to verify)")?; - // All checkpoint sigs have been verified, forward them to the checkpoint service + // All checkpoint sigs have been verified, forward them to the checkpoint + // service for ckpt in ckpt_messages { self.checkpoint_service .notify_checkpoint_signature(&self.epoch_store, &ckpt)?; @@ -102,11 +105,12 @@ impl SuiTxValidator { .inc_by(ckpt_count as u64); Ok(()) - // todo - we should un-comment line below once we have a way to revert those transactions at the end of epoch - // all certificates had valid signatures, schedule them for execution prior to sequencing + // todo - we should un-comment line below once we have a way to revert + // those transactions at the end of epoch all certificates had + // valid signatures, schedule them for execution prior to sequencing // which is unnecessary for owned object transactions. - // It is unnecessary to write to pending_certificates table because the certs will be written - // via Narwhal output. + // It is unnecessary to write to pending_certificates table because the + // certs will be written via Narwhal output. // self.transaction_manager // .enqueue_certificates(owned_tx_certs, &self.epoch_store) // .wrap_err("Failed to schedule certificates for execution") @@ -122,7 +126,8 @@ impl TransactionValidator for SuiTxValidator { type Error = eyre::Report; fn validate(&self, _tx: &[u8]) -> Result<(), Self::Error> { - // We only accept transactions from local sui instance so no need to re-verify it + // We only accept transactions from local sui instance so no need to re-verify + // it Ok(()) } @@ -279,7 +284,8 @@ mod tests { // TODO: Remove once we have removed BatchV1 from the codebase. let batch_v1 = Batch::V1(BatchV1::new(vec![])); - // Case #1: Receive BatchV1 but network has upgraded past v11 so we fail because we expect BatchV2 + // Case #1: Receive BatchV1 but network has upgraded past v11 so we fail because + // we expect BatchV2 let res_batch = validator.validate_batch(&batch_v1, latest_protocol_config); assert!(res_batch.is_err()); diff --git a/crates/sui-core/src/db_checkpoint_handler.rs b/crates/sui-core/src/db_checkpoint_handler.rs index 455cbaec1e2..674ea80d384 100644 --- a/crates/sui-core/src/db_checkpoint_handler.rs +++ b/crates/sui-core/src/db_checkpoint_handler.rs @@ -1,32 +1,37 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::authority_store_pruner::{ - AuthorityStorePruner, AuthorityStorePruningMetrics, EPOCH_DURATION_MS_FOR_TESTING, -}; -use crate::authority::authority_store_tables::AuthorityPerpetualTables; -use crate::checkpoints::CheckpointStore; +use std::{fs, num::NonZeroUsize, path::PathBuf, sync::Arc, time::Duration}; + use anyhow::Result; use bytes::Bytes; use futures::future::try_join_all; -use object_store::path::Path; -use object_store::DynObjectStore; +use object_store::{path::Path, DynObjectStore}; use prometheus::{register_int_gauge_with_registry, IntGauge, Registry}; -use std::fs; -use std::num::NonZeroUsize; -use std::path::PathBuf; -use std::sync::Arc; -use std::time::Duration; -use sui_config::node::AuthorityStorePruningConfig; -use sui_config::object_storage_config::{ObjectStoreConfig, ObjectStoreType}; -use sui_storage::mutex_table::RwLockTable; -use sui_storage::object_store::util::{ - copy_recursively, find_all_dirs_with_epoch_prefix, find_missing_epochs_dirs, - path_to_filesystem, put, run_manifest_update_loop, write_snapshot_manifest, +use sui_config::{ + node::AuthorityStorePruningConfig, + object_storage_config::{ObjectStoreConfig, ObjectStoreType}, +}; +use sui_storage::{ + mutex_table::RwLockTable, + object_store::util::{ + copy_recursively, find_all_dirs_with_epoch_prefix, find_missing_epochs_dirs, + path_to_filesystem, put, run_manifest_update_loop, write_snapshot_manifest, + }, }; use tracing::{debug, error, info}; use typed_store::rocks::MetricConf; +use crate::{ + authority::{ + authority_store_pruner::{ + AuthorityStorePruner, AuthorityStorePruningMetrics, EPOCH_DURATION_MS_FOR_TESTING, + }, + authority_store_tables::AuthorityPerpetualTables, + }, + checkpoints::CheckpointStore, +}; + pub const SUCCESS_MARKER: &str = "_SUCCESS"; pub const TEST_MARKER: &str = "_TEST"; pub const UPLOAD_COMPLETED_MARKER: &str = "_UPLOAD_COMPLETED"; @@ -59,9 +64,11 @@ pub struct DBCheckpointHandler { output_object_store: Option>, /// Time interval to check for presence of new db checkpoint interval: Duration, - /// File markers which signal that local db checkpoint can be garbage collected + /// File markers which signal that local db checkpoint can be garbage + /// collected gc_markers: Vec, - /// Boolean flag to enable/disable object pruning and manual compaction before upload + /// Boolean flag to enable/disable object pruning and manual compaction + /// before upload prune_and_compact_before_upload: bool, /// Indirect object config for pruner indirect_objects_threshold: usize, @@ -289,14 +296,18 @@ impl DBCheckpointHandler { .expect("Expected object store to exist") .clone(); for (epoch, db_path) in dirs { - // Convert `db_path` to the local filesystem path to where db checkpoint is stored + // Convert `db_path` to the local filesystem path to where db checkpoint is + // stored let local_db_path = path_to_filesystem(self.input_root_path.clone(), db_path)?; if missing_epochs.contains(epoch) || *epoch >= last_missing_epoch { if self.state_snapshot_enabled { let snapshot_completed_marker = local_db_path.join(STATE_SNAPSHOT_COMPLETED_MARKER); if !snapshot_completed_marker.exists() { - info!("DB checkpoint upload for epoch {} to wait until state snasphot uploaded", *epoch); + info!( + "DB checkpoint upload for epoch {} to wait until state snasphot uploaded", + *epoch + ); continue; } } @@ -316,7 +327,8 @@ impl DBCheckpointHandler { ) .await?; - // This writes a single "MANIFEST" file which contains a list of all files that make up a db snapshot + // This writes a single "MANIFEST" file which contains a list of all files that + // make up a db snapshot write_snapshot_manifest(db_path, &object_store, format!("epoch_{}/", epoch)) .await?; // Drop marker in the output directory that upload completed successfully @@ -372,17 +384,19 @@ impl DBCheckpointHandler { #[cfg(test)] mod tests { - use crate::db_checkpoint_handler::{ - DBCheckpointHandler, SUCCESS_MARKER, TEST_MARKER, UPLOAD_COMPLETED_MARKER, - }; - use itertools::Itertools; use std::fs; + + use itertools::Itertools; use sui_config::object_storage_config::{ObjectStoreConfig, ObjectStoreType}; use sui_storage::object_store::util::{ find_all_dirs_with_epoch_prefix, find_missing_epochs_dirs, path_to_filesystem, }; use tempfile::TempDir; + use crate::db_checkpoint_handler::{ + DBCheckpointHandler, SUCCESS_MARKER, TEST_MARKER, UPLOAD_COMPLETED_MARKER, + }; + #[tokio::test] async fn test_basic() -> anyhow::Result<()> { let checkpoint_dir = TempDir::new()?; @@ -445,9 +459,11 @@ mod tests { assert!(remote_epoch0_checkpoint.join("file2").exists()); assert!(remote_epoch0_checkpoint.join("data").join("file3").exists()); assert!(remote_epoch0_checkpoint.join(SUCCESS_MARKER).exists()); - assert!(local_epoch0_checkpoint - .join(UPLOAD_COMPLETED_MARKER) - .exists()); + assert!( + local_epoch0_checkpoint + .join(UPLOAD_COMPLETED_MARKER) + .exists() + ); // Drop an extra gc marker meant only for gc to trigger let test_marker = local_epoch0_checkpoint.join(TEST_MARKER); @@ -513,9 +529,11 @@ mod tests { assert!(remote_epoch0_checkpoint.join("file2").exists()); assert!(remote_epoch0_checkpoint.join("data").join("file3").exists()); assert!(remote_epoch0_checkpoint.join(SUCCESS_MARKER).exists()); - assert!(local_epoch0_checkpoint - .join(UPLOAD_COMPLETED_MARKER) - .exists()); + assert!( + local_epoch0_checkpoint + .join(UPLOAD_COMPLETED_MARKER) + .exists() + ); // Add a new db checkpoint to the local checkpoint directory let local_epoch1_checkpoint = checkpoint_dir_path.join("epoch_1"); @@ -547,18 +565,22 @@ mod tests { assert!(remote_epoch0_checkpoint.join("file2").exists()); assert!(remote_epoch0_checkpoint.join("data").join("file3").exists()); assert!(remote_epoch0_checkpoint.join(SUCCESS_MARKER).exists()); - assert!(local_epoch0_checkpoint - .join(UPLOAD_COMPLETED_MARKER) - .exists()); + assert!( + local_epoch0_checkpoint + .join(UPLOAD_COMPLETED_MARKER) + .exists() + ); let remote_epoch1_checkpoint = remote_checkpoint_dir_path.join("epoch_1"); assert!(remote_epoch1_checkpoint.join("file1").exists()); assert!(remote_epoch1_checkpoint.join("file2").exists()); assert!(remote_epoch1_checkpoint.join("data").join("file3").exists()); assert!(remote_epoch1_checkpoint.join(SUCCESS_MARKER).exists()); - assert!(local_epoch1_checkpoint - .join(UPLOAD_COMPLETED_MARKER) - .exists()); + assert!( + local_epoch1_checkpoint + .join(UPLOAD_COMPLETED_MARKER) + .exists() + ); // Drop an extra gc marker meant only for gc to trigger let test_marker = local_epoch0_checkpoint.join(TEST_MARKER); diff --git a/crates/sui-core/src/epoch/committee_store.rs b/crates/sui-core/src/epoch/committee_store.rs index 40a7e3d99d0..d6a42b18c36 100644 --- a/crates/sui-core/src/epoch/committee_store.rs +++ b/crates/sui-core/src/epoch/committee_store.rs @@ -1,21 +1,26 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::HashMap, + path::{Path, PathBuf}, + sync::Arc, +}; + use parking_lot::RwLock; use rocksdb::Options; -use std::collections::HashMap; -use std::path::{Path, PathBuf}; -use std::sync::Arc; -use sui_types::base_types::ObjectID; -use sui_types::committee::{Committee, EpochId}; -use sui_types::error::{SuiError, SuiResult}; -use typed_store::rocks::{default_db_options, DBMap, DBOptions, MetricConf}; -use typed_store::traits::{TableSummary, TypedStoreDebug}; - -use typed_store::Map; -use typed_store_derive::DBMapUtils; - use sui_macros::nondeterministic; +use sui_types::{ + base_types::ObjectID, + committee::{Committee, EpochId}, + error::{SuiError, SuiResult}, +}; +use typed_store::{ + rocks::{default_db_options, DBMap, DBOptions, MetricConf}, + traits::{TableSummary, TypedStoreDebug}, + Map, +}; +use typed_store_derive::DBMapUtils; pub struct CommitteeStore { tables: CommitteeStoreTables, @@ -69,7 +74,8 @@ impl CommitteeStore { pub fn insert_new_committee(&self, new_committee: &Committee) -> SuiResult { if let Some(old_committee) = self.get_committee(&new_committee.epoch)? { - // If somehow we already have this committee in the store, they must be the same. + // If somehow we already have this committee in the store, they must be the + // same. assert_eq!(&*old_committee, new_committee); } else { self.tables @@ -106,7 +112,8 @@ impl CommitteeStore { .unwrap() .1 } - /// Return the committee specified by `epoch`. If `epoch` is `None`, return the latest committee. + /// Return the committee specified by `epoch`. If `epoch` is `None`, return + /// the latest committee. // todo - make use of cache or remove this method pub fn get_or_latest_committee(&self, epoch: Option) -> SuiResult { Ok(match epoch { diff --git a/crates/sui-core/src/epoch/data_removal.rs b/crates/sui-core/src/epoch/data_removal.rs index 76455294e6c..88c18752450 100644 --- a/crates/sui-core/src/epoch/data_removal.rs +++ b/crates/sui-core/src/epoch/data_removal.rs @@ -5,10 +5,10 @@ #[path = "../unit_tests/epoch_data_tests.rs"] pub mod epoch_data_tests; +use std::{fs, path::PathBuf}; + use mysten_metrics::spawn_monitored_task; use narwhal_config::Epoch; -use std::fs; -use std::path::PathBuf; use tokio::sync::mpsc; pub struct EpochDataRemover { @@ -61,7 +61,8 @@ pub(crate) fn remove_old_epoch_data(storage_base_path: PathBuf, epoch: Epoch) { return; } - // Keep previous epoch data as a safety buffer and remove starting from epoch - 1 + // Keep previous epoch data as a safety buffer and remove starting from epoch - + // 1 let drop_boundary = epoch - 1; tracing::info!( @@ -73,7 +74,10 @@ pub(crate) fn remove_old_epoch_data(storage_base_path: PathBuf, epoch: Epoch) { let files = match fs::read_dir(storage_base_path) { Ok(f) => f, Err(e) => { - tracing::error!("Data Remover cannot read the files in the storage path directory for epoch cleanup: {:?}", e); + tracing::error!( + "Data Remover cannot read the files in the storage path directory for epoch cleanup: {:?}", + e + ); return; } }; @@ -100,7 +104,10 @@ pub(crate) fn remove_old_epoch_data(storage_base_path: PathBuf, epoch: Epoch) { let file_epoch = match file_epoch_string.to_owned().parse::() { Ok(f) => f, Err(e) => { - tracing::error!("Data Remover could not parse file in storage path into epoch for cleanup: {:?}",e); + tracing::error!( + "Data Remover could not parse file in storage path into epoch for cleanup: {:?}", + e + ); continue; } }; diff --git a/crates/sui-core/src/epoch/epoch_metrics.rs b/crates/sui-core/src/epoch/epoch_metrics.rs index 9a1850e534b..7ba5bee8176 100644 --- a/crates/sui-core/src/epoch/epoch_metrics.rs +++ b/crates/sui-core/src/epoch/epoch_metrics.rs @@ -1,18 +1,22 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use prometheus::{register_int_gauge_with_registry, IntGauge, Registry}; use std::sync::Arc; +use prometheus::{register_int_gauge_with_registry, IntGauge, Registry}; + pub struct EpochMetrics { - /// The current epoch ID. This is updated only when the AuthorityState finishes reconfiguration. + /// The current epoch ID. This is updated only when the AuthorityState + /// finishes reconfiguration. pub current_epoch: IntGauge, - /// Current voting right of the validator in the protocol. Updated at the start of epochs. + /// Current voting right of the validator in the protocol. Updated at the + /// start of epochs. pub current_voting_right: IntGauge, - /// Total duration of the epoch. This is measured from when the current epoch store is opened, - /// until the current epoch store is replaced with the next epoch store. + /// Total duration of the epoch. This is measured from when the current + /// epoch store is opened, until the current epoch store is replaced + /// with the next epoch store. pub epoch_total_duration: IntGauge, /// Number of checkpoints in the epoch. @@ -32,71 +36,83 @@ pub struct EpochMetrics { // 5. CheckpointExecutor finishes executing the last checkpoint, and triggers reconfiguration. // 6. During reconfiguration, we tear down Narwhal, reconfigure state (at which point we opens // up user certs), and start Narwhal again. - // 7. After reconfiguration, and eventually Narwhal starts successfully, at some point the first - // checkpoint of the new epoch will be created. + // 7. After reconfiguration, and eventually Narwhal starts successfully, at some point the + // first checkpoint of the new epoch will be created. // We introduce various metrics to cover the latency of above steps. - /// The duration from when the epoch is closed (i.e. validator halted) to when all pending - /// certificates are processed (i.e. ready to send EndOfPublish message). - /// This is the duration of (1) through (2) above. + /// The duration from when the epoch is closed (i.e. validator halted) to + /// when all pending certificates are processed (i.e. ready to send + /// EndOfPublish message). This is the duration of (1) through (2) + /// above. pub epoch_pending_certs_processed_time_since_epoch_close_ms: IntGauge, - /// The interval from when the epoch is closed to when we receive 2f+1 EndOfPublish messages. - /// This is the duration of (1) through (3) above. + /// The interval from when the epoch is closed to when we receive 2f+1 + /// EndOfPublish messages. This is the duration of (1) through (3) + /// above. pub epoch_end_of_publish_quorum_time_since_epoch_close_ms: IntGauge, - /// The interval from when the epoch is closed to when we created the last checkpoint of the - /// epoch. + /// The interval from when the epoch is closed to when we created the last + /// checkpoint of the epoch. /// This is the duration of (1) through (4) above. pub epoch_last_checkpoint_created_time_since_epoch_close_ms: IntGauge, - /// The interval from when the epoch is closed to when we finished executing the last transaction - /// of the checkpoint (and hence triggering reconfiguration process). - /// This is the duration of (1) through (5) above. + /// The interval from when the epoch is closed to when we finished executing + /// the last transaction of the checkpoint (and hence triggering + /// reconfiguration process). This is the duration of (1) through (5) + /// above. pub epoch_reconfig_start_time_since_epoch_close_ms: IntGauge, - /// The total duration when this validator is halted, and hence does not accept certs from users. - /// This is the duration of (1) through (6) above, and is the most important latency metric - /// reflecting reconfiguration delay for each validator. + /// The total duration when this validator is halted, and hence does not + /// accept certs from users. This is the duration of (1) through (6) + /// above, and is the most important latency metric reflecting + /// reconfiguration delay for each validator. pub epoch_validator_halt_duration_ms: IntGauge, - /// The interval from when the epoch begins (i.e. right after state reconfigure, when the new - /// epoch_store is created), to when the first checkpoint of the epoch is ready for creation locally. - /// This is (7) above, and is a good proxy to how long it takes for the validator - /// to become useful in the network after reconfiguration. + /// The interval from when the epoch begins (i.e. right after state + /// reconfigure, when the new epoch_store is created), to when the first + /// checkpoint of the epoch is ready for creation locally. This is (7) + /// above, and is a good proxy to how long it takes for the validator to + /// become useful in the network after reconfiguration. // TODO: This needs to be reported properly. pub epoch_first_checkpoint_ready_time_since_epoch_begin_ms: IntGauge, - /// Whether we are running in safe mode where reward distribution and tokenomics are disabled. + /// Whether we are running in safe mode where reward distribution and + /// tokenomics are disabled. pub is_safe_mode: IntGauge, - /// When building the last checkpoint of the epoch, we execute advance epoch transaction once - /// without committing results to the store. It's useful to know whether this execution leads - /// to safe_mode, since in theory the result could be different from checkpoint executor. + /// When building the last checkpoint of the epoch, we execute advance epoch + /// transaction once without committing results to the store. It's + /// useful to know whether this execution leads to safe_mode, since in + /// theory the result could be different from checkpoint executor. pub checkpoint_builder_advance_epoch_is_safe_mode: IntGauge, /// Buffer stake current in effect for this epoch pub effective_buffer_stake: IntGauge, - /// Set to 1 if the random beacon DKG protocol failed for the most recent epoch. + /// Set to 1 if the random beacon DKG protocol failed for the most recent + /// epoch. pub epoch_random_beacon_dkg_failed: IntGauge, - /// The number of shares held by this node after the random beacon DKG protocol completed. + /// The number of shares held by this node after the random beacon DKG + /// protocol completed. pub epoch_random_beacon_dkg_num_shares: IntGauge, - /// The amount of time taken from epoch start to completion of random beacon DKG protocol, - /// for the most recent epoch. + /// The amount of time taken from epoch start to completion of random beacon + /// DKG protocol, for the most recent epoch. pub epoch_random_beacon_dkg_epoch_start_completion_time_ms: IntGauge, - /// The amount of time taken to complete random beacon DKG protocol from the time it was - /// started (which may be a bit after the epcoh began), for the most recent epoch. + /// The amount of time taken to complete random beacon DKG protocol from the + /// time it was started (which may be a bit after the epcoh began), for + /// the most recent epoch. pub epoch_random_beacon_dkg_completion_time_ms: IntGauge, - /// The amount of time taken to start first phase of the random beacon DKG protocol, - /// at which point the node has submitted a DKG Message, for the most recent epoch. + /// The amount of time taken to start first phase of the random beacon DKG + /// protocol, at which point the node has submitted a DKG Message, for + /// the most recent epoch. pub epoch_random_beacon_dkg_message_time_ms: IntGauge, - /// The amount of time taken to complete first phase of the random beacon DKG protocol, - /// at which point the node has submitted a DKG Confirmation, for the most recent epoch. + /// The amount of time taken to complete first phase of the random beacon + /// DKG protocol, at which point the node has submitted a DKG + /// Confirmation, for the most recent epoch. pub epoch_random_beacon_dkg_confirmation_time_ms: IntGauge, } diff --git a/crates/sui-core/src/epoch/randomness.rs b/crates/sui-core/src/epoch/randomness.rs index cdec42dcbc8..47bf1cc2061 100644 --- a/crates/sui-core/src/epoch/randomness.rs +++ b/crates/sui-core/src/epoch/randomness.rs @@ -1,38 +1,47 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::{BTreeMap, HashMap}, + sync::{Arc, Weak}, + time::Instant, +}; + use anemo::PeerId; -use fastcrypto::encoding::{Encoding, Hex}; -use fastcrypto::error::FastCryptoError; -use fastcrypto::groups::bls12381; -use fastcrypto::serde_helpers::ToFromByteArray; -use fastcrypto::traits::{KeyPair, ToFromBytes}; -use fastcrypto_tbls::nodes::PartyId; -use fastcrypto_tbls::{dkg, nodes}; -use futures::stream::FuturesUnordered; -use futures::StreamExt; +use fastcrypto::{ + encoding::{Encoding, Hex}, + error::FastCryptoError, + groups::bls12381, + serde_helpers::ToFromByteArray, + traits::{KeyPair, ToFromBytes}, +}; +use fastcrypto_tbls::{dkg, nodes, nodes::PartyId}; +use futures::{stream::FuturesUnordered, StreamExt}; use narwhal_types::Round; -use rand::rngs::{OsRng, StdRng}; -use rand::SeedableRng; -use std::collections::{BTreeMap, HashMap}; -use std::sync::{Arc, Weak}; -use std::time::Instant; +use rand::{ + rngs::{OsRng, StdRng}, + SeedableRng, +}; use sui_network::randomness; -use sui_types::base_types::AuthorityName; -use sui_types::committee::{Committee, EpochId, StakeUnit}; -use sui_types::crypto::{AuthorityKeyPair, RandomnessRound}; -use sui_types::error::{SuiError, SuiResult}; -use sui_types::messages_consensus::ConsensusTransaction; -use sui_types::sui_system_state::epoch_start_sui_system_state::EpochStartSystemStateTrait; -use tokio::sync::OnceCell; -use tokio::task::JoinHandle; +use sui_types::{ + base_types::AuthorityName, + committee::{Committee, EpochId, StakeUnit}, + crypto::{AuthorityKeyPair, RandomnessRound}, + error::{SuiError, SuiResult}, + messages_consensus::ConsensusTransaction, + sui_system_state::epoch_start_sui_system_state::EpochStartSystemStateTrait, +}; +use tokio::{sync::OnceCell, task::JoinHandle}; use tracing::{debug, error, info, warn}; -use typed_store::rocks::DBBatch; -use typed_store::Map; +use typed_store::{rocks::DBBatch, Map}; -use crate::authority::authority_per_epoch_store::{AuthorityEpochTables, AuthorityPerEpochStore}; -use crate::authority::epoch_start_configuration::EpochStartConfigTrait; -use crate::consensus_adapter::ConsensusAdapter; +use crate::{ + authority::{ + authority_per_epoch_store::{AuthorityEpochTables, AuthorityPerEpochStore}, + epoch_start_configuration::EpochStartConfigTrait, + }, + consensus_adapter::ConsensusAdapter, +}; type PkG = bls12381::G2Element; type EncG = bls12381::G2Element; @@ -43,21 +52,25 @@ const SINGLETON_KEY: u64 = 0; // // DKG protocol: // 1. This validator sends out a `Message` to all other validators. -// 2. Once sufficient valid `Message`s are received from other validators via consensus and -// procesed, this validator sends out a `Confirmation` to all other validators. -// 3. Once sufficient `Confirmation`s are received from other validators via consensus and -// processed, they are combined to form a public VSS key and local private key shares. +// 2. Once sufficient valid `Message`s are received from other validators via +// consensus and procesed, this validator sends out a `Confirmation` to all +// other validators. +// 3. Once sufficient `Confirmation`s are received from other validators via +// consensus and processed, they are combined to form a public VSS key and +// local private key shares. // 4. Randomness generation begins. // // Randomness generation: -// 1. For each new round, AuthorityPerEpochStore eventually calls `generate_randomness`. -// 2. This kicks off a process in RandomnessEventLoop to send partial signatures for the new -// round to all other validators. -// 3. Once enough partial signautres for the round are collected, a RandomnessStateUpdate -// transaction is generated and injected into the TransactionManager. -// 4. Once the RandomnessStateUpdate transaction is seen in a certified checkpoint, -// `notify_randomness_in_checkpoint` is called to complete the round and stop sending -// partial signatures for it. +// 1. For each new round, AuthorityPerEpochStore eventually calls +// `generate_randomness`. +// 2. This kicks off a process in RandomnessEventLoop to send partial signatures +// for the new round to all other validators. +// 3. Once enough partial signautres for the round are collected, a +// RandomnessStateUpdate transaction is generated and injected into the +// TransactionManager. +// 4. Once the RandomnessStateUpdate transaction is seen in a certified +// checkpoint, `notify_randomness_in_checkpoint` is called to complete the +// round and stop sending partial signatures for it. pub struct RandomnessManager { epoch_store: Weak, consensus_adapter: Arc, @@ -97,7 +110,9 @@ impl RandomnessManager { let tables = match epoch_store.tables() { Ok(tables) => tables, Err(_) => { - error!("could not construct RandomnessManager: AuthorityPerEpochStore tables already gone"); + error!( + "could not construct RandomnessManager: AuthorityPerEpochStore tables already gone" + ); return None; } }; @@ -110,7 +125,9 @@ impl RandomnessManager { // Log first few entries in DKG info for debugging. for (id, name, pk, stake) in info.iter().filter(|(id, _, _, _)| *id < 3) { let pk_bytes = pk.as_element().to_byte_array(); - debug!("random beacon: DKG info: id={id}, stake={stake}, name={name}, pk={pk_bytes:x?}"); + debug!( + "random beacon: DKG info: id={id}, stake={stake}, name={name}, pk={pk_bytes:x?}" + ); } } let authority_ids: HashMap<_, _> = @@ -255,8 +272,8 @@ impl RandomnessManager { // Resume randomness generation from where we left off. // This must be loaded regardless of whether DKG has finished yet, since the - // RandomnessEventLoop and commit-handling logic in AuthorityPerEpochStore both depend on - // this state. + // RandomnessEventLoop and commit-handling logic in AuthorityPerEpochStore both + // depend on this state. rm.next_randomness_round = tables .randomness_next_round .get(&SINGLETON_KEY) @@ -326,8 +343,9 @@ impl RandomnessManager { Ok(()) } - /// Processes all received messages and advances the randomness DKG state machine when possible, - /// sending out a dkg::Confirmation and generating final output. + /// Processes all received messages and advances the randomness DKG state + /// machine when possible, sending out a dkg::Confirmation and + /// generating final output. pub async fn advance_dkg(&mut self, batch: &mut DBBatch, round: Round) -> SuiResult { let epoch_store = self.epoch_store()?; @@ -384,7 +402,8 @@ impl RandomnessManager { .set(elapsed as i64); } } - Err(fastcrypto::error::FastCryptoError::NotEnoughInputs) => (), // wait for more input + Err(fastcrypto::error::FastCryptoError::NotEnoughInputs) => (), // wait for more + // input Err(e) => debug!("random beacon: error while merging DKG Messages: {e:?}"), } } @@ -402,7 +421,9 @@ impl RandomnessManager { let num_shares = output.shares.as_ref().map_or(0, |shares| shares.len()); let epoch_elapsed = epoch_store.epoch_open_time.elapsed().as_millis(); let elapsed = self.dkg_start_time.get().map(|t| t.elapsed().as_millis()); - info!("random beacon: DKG complete in {epoch_elapsed}ms since epoch start, {elapsed:?}ms since DKG start, with {num_shares} shares for this node"); + info!( + "random beacon: DKG complete in {epoch_elapsed}ms since epoch start, {elapsed:?}ms since DKG start, with {num_shares} shares for this node" + ); epoch_store .metrics .epoch_random_beacon_dkg_num_shares @@ -432,7 +453,8 @@ impl RandomnessManager { std::iter::once((SINGLETON_KEY, output)), )?; } - Err(fastcrypto::error::FastCryptoError::NotEnoughInputs) => (), // wait for more input + Err(fastcrypto::error::FastCryptoError::NotEnoughInputs) => (), // wait for more + // input Err(e) => error!("random beacon: error while processing DKG Confirmations: {e:?}"), } } @@ -445,7 +467,9 @@ impl RandomnessManager { .random_beacon_dkg_timeout_round() .into() { - error!("random beacon: DKG timed out. Randomness disabled for this epoch. All randomness-using transactions will fail."); + error!( + "random beacon: DKG timed out. Randomness disabled for this epoch. All randomness-using transactions will fail." + ); epoch_store.metrics.epoch_random_beacon_dkg_failed.set(1); self.dkg_output .set(None) @@ -470,7 +494,9 @@ impl RandomnessManager { return Ok(()); }; if *party_id != msg.sender { - warn!("ignoring equivocating DKG Message from authority {authority:?} pretending to be PartyId {party_id:?}"); + warn!( + "ignoring equivocating DKG Message from authority {authority:?} pretending to be PartyId {party_id:?}" + ); return Ok(()); } if self.enqueued_messages.contains_key(&msg.sender) @@ -481,7 +507,8 @@ impl RandomnessManager { } let party = self.party.clone(); - // TODO: Could save some CPU by not processing messages if we already have enough to merge. + // TODO: Could save some CPU by not processing messages if we already have + // enough to merge. self.enqueued_messages.insert( msg.sender, tokio::task::spawn_blocking(move || { @@ -515,7 +542,9 @@ impl RandomnessManager { return Ok(()); }; if *party_id != conf.sender { - warn!("ignoring equivocating DKG Confirmation from authority {authority:?} pretending to be PartyId {party_id:?}"); + warn!( + "ignoring equivocating DKG Confirmation from authority {authority:?} pretending to be PartyId {party_id:?}" + ); return Ok(()); } self.confirmations.insert(conf.sender, conf.clone()); @@ -526,9 +555,10 @@ impl RandomnessManager { Ok(()) } - /// Reserves the next available round number for randomness generation. Once the given - /// batch is written, `generate_randomness` must be called to start the process. On restart, - /// any reserved rounds for which the batch was written will automatically be resumed. + /// Reserves the next available round number for randomness generation. Once + /// the given batch is written, `generate_randomness` must be called to + /// start the process. On restart, any reserved rounds for which the + /// batch was written will automatically be resumed. pub fn reserve_next_randomness(&mut self, batch: &mut DBBatch) -> SuiResult { let tables = self.tables()?; @@ -556,7 +586,8 @@ impl RandomnessManager { .send_partial_signatures(epoch, randomness_round); } - /// Returns true if DKG is over for this epoch, whether due to success or failure. + /// Returns true if DKG is over for this epoch, whether due to success or + /// failure. pub fn is_dkg_closed(&self) -> bool { self.dkg_output.initialized() } @@ -566,7 +597,8 @@ impl RandomnessManager { self.dkg_output.get().and_then(|opt| opt.as_ref()).is_some() } - /// Generates a new RandomnessReporter for reporting observed rounds to this RandomnessManager. + /// Generates a new RandomnessReporter for reporting observed rounds to this + /// RandomnessManager. pub fn reporter(&self) -> RandomnessReporter { RandomnessReporter { epoch_store: self.epoch_store.clone(), @@ -618,7 +650,8 @@ impl RandomnessManager { } } -// Used by other components to notify the randomness system of observed randomness. +// Used by other components to notify the randomness system of observed +// randomness. #[derive(Clone)] pub struct RandomnessReporter { epoch_store: Weak, @@ -626,9 +659,9 @@ pub struct RandomnessReporter { } impl RandomnessReporter { - /// Notifies the associated randomness manager that randomness for the given round has been - /// durably committed in a checkpoint. This completes the process of generating randomness for - /// the round. + /// Notifies the associated randomness manager that randomness for the given + /// round has been durably committed in a checkpoint. This completes the + /// process of generating randomness for the round. pub fn notify_randomness_in_checkpoint(&self, round: RandomnessRound) -> SuiResult { let epoch_store = self.epoch_store.upgrade().ok_or(SuiError::EpochEnded)?; epoch_store @@ -643,6 +676,11 @@ impl RandomnessReporter { #[cfg(test)] mod tests { + use std::num::NonZeroUsize; + + use sui_types::messages_consensus::ConsensusTransactionKind; + use tokio::sync::mpsc; + use crate::{ authority::test_authority_builder::TestAuthorityBuilder, consensus_adapter::{ @@ -651,9 +689,6 @@ mod tests { }, epoch::randomness::*, }; - use std::num::NonZeroUsize; - use sui_types::messages_consensus::ConsensusTransactionKind; - use tokio::sync::mpsc; #[tokio::test] async fn test_dkg() { diff --git a/crates/sui-core/src/epoch/reconfiguration.rs b/crates/sui-core/src/epoch/reconfiguration.rs index 0b492ebe1d6..956938d7a1e 100644 --- a/crates/sui-core/src/epoch/reconfiguration.rs +++ b/crates/sui-core/src/epoch/reconfiguration.rs @@ -1,10 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore; -use serde::{Deserialize, Serialize}; use std::sync::Arc; +use serde::{Deserialize, Serialize}; + +use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore; + #[derive(Clone, Debug, Serialize, Deserialize)] pub enum ReconfigCertStatus { AcceptAllCerts, diff --git a/crates/sui-core/src/execution_cache.rs b/crates/sui-core/src/execution_cache.rs index 15fed352a90..61dd4919648 100644 --- a/crates/sui-core/src/execution_cache.rs +++ b/crates/sui-core/src/execution_cache.rs @@ -1,41 +1,39 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore; -use crate::authority::authority_store::{ExecutionLockWriteGuard, SuiLockResult}; -use crate::authority::epoch_start_configuration::EpochFlag; -use crate::authority::{ - authority_notify_read::EffectsNotifyRead, epoch_start_configuration::EpochStartConfiguration, -}; -use crate::transaction_outputs::TransactionOutputs; -use async_trait::async_trait; +use std::{collections::HashSet, path::Path, sync::Arc}; +use async_trait::async_trait; use futures::{future::BoxFuture, FutureExt}; use prometheus::{register_int_gauge_with_registry, IntGauge, Registry}; -use std::collections::HashSet; -use std::path::Path; -use std::sync::Arc; use sui_protocol_config::ProtocolVersion; -use sui_types::base_types::VerifiedExecutionData; -use sui_types::digests::{TransactionDigest, TransactionEffectsDigest, TransactionEventsDigest}; -use sui_types::effects::{TransactionEffects, TransactionEvents}; -use sui_types::error::{SuiError, SuiResult, UserInputError}; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; -use sui_types::object::Object; -use sui_types::storage::{ - error::{Error as StorageError, Result as StorageResult}, - BackingPackageStore, ChildObjectResolver, MarkerValue, ObjectKey, ObjectOrTombstone, - ObjectStore, PackageObject, ParentSync, -}; -use sui_types::sui_system_state::SuiSystemState; -use sui_types::transaction::{VerifiedSignedTransaction, VerifiedTransaction}; use sui_types::{ - base_types::{EpochId, ObjectID, ObjectRef, SequenceNumber}, - object::Owner, - storage::InputKey, + base_types::{EpochId, ObjectID, ObjectRef, SequenceNumber, VerifiedExecutionData}, + digests::{TransactionDigest, TransactionEffectsDigest, TransactionEventsDigest}, + effects::{TransactionEffects, TransactionEvents}, + error::{SuiError, SuiResult, UserInputError}, + messages_checkpoint::CheckpointSequenceNumber, + object::{Object, Owner}, + storage::{ + error::{Error as StorageError, Result as StorageResult}, + BackingPackageStore, ChildObjectResolver, InputKey, MarkerValue, ObjectKey, + ObjectOrTombstone, ObjectStore, PackageObject, ParentSync, + }, + sui_system_state::SuiSystemState, + transaction::{VerifiedSignedTransaction, VerifiedTransaction}, }; use tracing::instrument; +use crate::{ + authority::{ + authority_notify_read::EffectsNotifyRead, + authority_per_epoch_store::AuthorityPerEpochStore, + authority_store::{ExecutionLockWriteGuard, SuiLockResult}, + epoch_start_configuration::{EpochFlag, EpochStartConfiguration}, + }, + transaction_outputs::TransactionOutputs, +}; + pub(crate) mod cached_version_map; pub mod passthrough_cache; pub mod writeback_cache; @@ -63,9 +61,10 @@ impl ExecutionCacheMetrics { pub type ExecutionCache = PassthroughCache; pub trait ExecutionCacheCommit: Send + Sync { - /// Durably commit the transaction outputs of the given transaction to the database. - /// Will be called by CheckpointExecutor to ensure that transaction outputs are - /// written durably before marking a checkpoint as finalized. + /// Durably commit the transaction outputs of the given transaction to the + /// database. Will be called by CheckpointExecutor to ensure that + /// transaction outputs are written durably before marking a checkpoint + /// as finalized. fn commit_transaction_outputs( &self, epoch: EpochId, @@ -104,7 +103,7 @@ pub trait ExecutionCacheRead: Send + Sync { ) -> SuiResult>; fn multi_get_objects_by_key(&self, object_keys: &[ObjectKey]) - -> SuiResult>>; + -> SuiResult>>; fn object_exists_by_key( &self, @@ -117,10 +116,11 @@ pub trait ExecutionCacheRead: Send + Sync { /// Load a list of objects from the store by object reference. /// If they exist in the store, they are returned directly. /// If any object missing, we try to figure out the best error to return. - /// If the object we are asking is currently locked at a future version, we know this - /// transaction is out-of-date and we return a ObjectVersionUnavailableForConsumption, - /// which indicates this is not retriable. - /// Otherwise, we return a ObjectNotFound error, which indicates this is retriable. + /// If the object we are asking is currently locked at a future version, we + /// know this transaction is out-of-date and we return a + /// ObjectVersionUnavailableForConsumption, which indicates this is not + /// retriable. Otherwise, we return a ObjectNotFound error, which + /// indicates this is retriable. fn multi_get_objects_with_more_accurate_error_return( &self, object_refs: &[ObjectRef], @@ -155,9 +155,11 @@ pub trait ExecutionCacheRead: Send + Sync { Ok(result) } - /// Used by transaction manager to determine if input objects are ready. Distinct from multi_get_object_by_key - /// because it also consults markers to handle the case where an object will never become available (e.g. - /// because it has been received by some other transaction already). + /// Used by transaction manager to determine if input objects are ready. + /// Distinct from multi_get_object_by_key because it also consults + /// markers to handle the case where an object will never become available + /// (e.g. because it has been received by some other transaction + /// already). fn multi_input_objects_available( &self, keys: &[InputKey], @@ -184,10 +186,11 @@ pub trait ExecutionCacheRead: Send + Sync { versioned_results.push((*idx, true)) } else if receiving_objects.contains(input_key) { // There could be a more recent version of this object, and the object at the - // specified version could have already been pruned. In such a case `has_key` will - // be false, but since this is a receiving object we should mark it as available if - // we can determine that an object with a version greater than or equal to the - // specified version exists or was deleted. We will then let mark it as available + // specified version could have already been pruned. In such a case `has_key` + // will be false, but since this is a receiving object we should + // mark it as available if we can determine that an object with + // a version greater than or equal to the specified version + // exists or was deleted. We will then let mark it as available // to let the transaction through so it can fail at execution. let is_available = self .get_object(&input_key.id())? @@ -207,8 +210,9 @@ pub trait ExecutionCacheRead: Send + Sync { )? .is_some() { - // If the object is an already deleted shared object, mark it as available if the - // version for that object is in the shared deleted marker table. + // If the object is an already deleted shared object, mark it as available if + // the version for that object is in the shared deleted marker + // table. versioned_results.push((*idx, true)); } else { versioned_results.push((*idx, false)); @@ -236,10 +240,11 @@ pub trait ExecutionCacheRead: Send + Sync { Ok(results.into_iter().map(|(_, result)| result).collect()) } - /// Return the object with version less then or eq to the provided seq number. - /// This is used by indexer to find the correct version of dynamic field child object. - /// We do not store the version of the child object, but because of lamport timestamp, - /// we know the child must have version number less then or eq to the parent. + /// Return the object with version less then or eq to the provided seq + /// number. This is used by indexer to find the correct version of + /// dynamic field child object. We do not store the version of the child + /// object, but because of lamport timestamp, we know the child must + /// have version number less then or eq to the parent. fn find_object_lt_or_eq_version( &self, object_id: ObjectID, @@ -248,7 +253,8 @@ pub trait ExecutionCacheRead: Send + Sync { fn get_lock(&self, obj_ref: ObjectRef, epoch_store: &AuthorityPerEpochStore) -> SuiLockResult; - // This method is considered "private" - only used by multi_get_objects_with_more_accurate_error_return + // This method is considered "private" - only used by + // multi_get_objects_with_more_accurate_error_return fn _get_latest_lock_for_object_id(&self, object_id: ObjectID) -> SuiResult; fn check_owned_object_locks_exist(&self, owned_object_refs: &[ObjectRef]) -> SuiResult; @@ -416,7 +422,8 @@ pub trait ExecutionCacheRead: Send + Sync { epoch_id: EpochId, ) -> SuiResult>; - /// If the shared object was deleted, return deletion info for the current live version + /// If the shared object was deleted, return deletion info for the current + /// live version fn get_last_shared_object_deletion_info( &self, object_id: &ObjectID, @@ -428,7 +435,8 @@ pub trait ExecutionCacheRead: Send + Sync { } } - /// If the shared object was deleted, return deletion info for the specified version. + /// If the shared object was deleted, return deletion info for the specified + /// version. fn get_deleted_shared_object_previous_tx_digest( &self, object_id: &ObjectID, @@ -471,21 +479,24 @@ pub trait ExecutionCacheRead: Send + Sync { pub trait ExecutionCacheWrite: Send + Sync { /// Write the output of a transaction. /// - /// Because of the child object consistency rule (readers that observe parents must observe all - /// children of that parent, up to the parent's version bound), implementations of this method - /// must not write any top-level (address-owned or shared) objects before they have written all + /// Because of the child object consistency rule (readers that observe + /// parents must observe all children of that parent, up to the parent's + /// version bound), implementations of this method must not write any + /// top-level (address-owned or shared) objects before they have written all /// of the object-owned objects (i.e. child objects) in the `objects` list. /// - /// In the future, we may modify this method to expose finer-grained information about - /// parent/child relationships. (This may be especially necessary for distributed object - /// storage, but is unlikely to be an issue before we tackle that problem). + /// In the future, we may modify this method to expose finer-grained + /// information about parent/child relationships. (This may be + /// especially necessary for distributed object storage, but is unlikely + /// to be an issue before we tackle that problem). /// - /// This function may evict the mutable input objects (and successfully received objects) of - /// transaction from the cache, since they cannot be read by any other transaction. + /// This function may evict the mutable input objects (and successfully + /// received objects) of transaction from the cache, since they cannot + /// be read by any other transaction. /// - /// Any write performed by this method immediately notifies any waiter that has previously - /// called notify_read_objects_for_execution or notify_read_objects_for_signing for the object - /// in question. + /// Any write performed by this method immediately notifies any waiter that + /// has previously called notify_read_objects_for_execution or + /// notify_read_objects_for_signing for the object in question. fn write_transaction_outputs( &self, epoch_id: EpochId, @@ -502,8 +513,8 @@ pub trait ExecutionCacheWrite: Send + Sync { } pub trait CheckpointCache: Send + Sync { - // TODO: In addition to the deprecated methods below, this will eventually include access - // to the CheckpointStore + // TODO: In addition to the deprecated methods below, this will eventually + // include access to the CheckpointStore // DEPRECATED METHODS fn deprecated_get_transaction_checkpoint( @@ -545,8 +556,9 @@ pub trait ExecutionCacheReconfigAPI: Send + Sync { fn checkpoint_db(&self, path: &Path) -> SuiResult; - /// This is a temporary method to be used when we enable simplified_unwrap_then_delete. - /// It re-accumulates state hash for the new epoch if simplified_unwrap_then_delete is enabled. + /// This is a temporary method to be used when we enable + /// simplified_unwrap_then_delete. It re-accumulates state hash for the + /// new epoch if simplified_unwrap_then_delete is enabled. fn maybe_reaccumulate_state_hash( &self, cur_epoch_store: &AuthorityPerEpochStore, @@ -554,9 +566,10 @@ pub trait ExecutionCacheReconfigAPI: Send + Sync { ); } -// StateSyncAPI is for writing any data that was not the result of transaction execution, -// but that arrived via state sync. The fact that it came via state sync implies that it -// is certified output, and can be immediately persisted to the store. +// StateSyncAPI is for writing any data that was not the result of transaction +// execution, but that arrived via state sync. The fact that it came via state +// sync implies that it is certified output, and can be immediately persisted to +// the store. pub trait StateSyncAPI: Send + Sync { fn insert_transaction_and_effects( &self, @@ -570,8 +583,10 @@ pub trait StateSyncAPI: Send + Sync { ) -> SuiResult; } -// TODO: Remove EffectsNotifyRead trait and just use ExecutionCacheRead directly everywhere. -/// This wrapper is used so that we don't have to disambiguate traits at every callsite. +// TODO: Remove EffectsNotifyRead trait and just use ExecutionCacheRead directly +// everywhere. +/// This wrapper is used so that we don't have to disambiguate traits at every +/// callsite. pub struct NotifyReadWrapper(Arc); impl Clone for NotifyReadWrapper { @@ -663,9 +678,11 @@ macro_rules! implement_storage_traits { // Check for: // * Invalid access -- treat as the object does not exist. Or; - // * If we've already received the object at the version -- then treat it as though it doesn't exist. - // These two cases must remain indisguishable to the caller otherwise we risk forks in - // transaction replay due to possible reordering of transactions during replay. + // * If we've already received the object at the version -- then treat it as + // though it doesn't exist. + // These two cases must remain indisguishable to the caller otherwise we risk + // forks in transaction replay due to possible reordering of + // transactions during replay. if recv_object.owner != Owner::AddressOwner((*owner).into()) || self.have_received_object_at_version( receiving_object_id, @@ -700,7 +717,8 @@ macro_rules! implement_storage_traits { }; } -// Implement traits for a cache implementation that always go directly to the store. +// Implement traits for a cache implementation that always go directly to the +// store. macro_rules! implement_passthrough_traits { ($implementor: ident) => { impl CheckpointCache for $implementor { diff --git a/crates/sui-core/src/execution_cache/cached_version_map.rs b/crates/sui-core/src/execution_cache/cached_version_map.rs index 00d7e03db83..9a816aaa8e4 100644 --- a/crates/sui-core/src/execution_cache/cached_version_map.rs +++ b/crates/sui-core/src/execution_cache/cached_version_map.rs @@ -1,19 +1,19 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::cmp::Ordering; -use std::collections::VecDeque; +use std::{cmp::Ordering, collections::VecDeque}; use sui_types::base_types::SequenceNumber; -/// CachedVersionMap is a map from version to value, with the additional contraints: -/// - The key (SequenceNumber) must be monotonically increasing for each insert. If -/// a key is inserted that is less than the previous key, it results in an assertion -/// failure. +/// CachedVersionMap is a map from version to value, with the additional +/// contraints: +/// - The key (SequenceNumber) must be monotonically increasing for each insert. +/// If a key is inserted that is less than the previous key, it results in an +/// assertion failure. /// - Similarly, only the item with the least key can be removed. -/// - The intent of these constraints is to ensure that there are never gaps in the collection, -/// so that membership in the map can be tested by comparing to both the highest and lowest -/// (first and last) entries. +/// - The intent of these constraints is to ensure that there are never gaps in +/// the collection, so that membership in the map can be tested by comparing +/// to both the highest and lowest (first and last) entries. #[derive(Debug)] pub struct CachedVersionMap { values: VecDeque<(SequenceNumber, V)>, @@ -103,9 +103,10 @@ impl CachedVersionMap { #[cfg(test)] mod tests { - use super::*; use sui_types::base_types::SequenceNumber; + use super::*; + // Helper function to create a SequenceNumber for simplicity fn seq(num: u64) -> SequenceNumber { SequenceNumber::from(num) diff --git a/crates/sui-core/src/execution_cache/passthrough_cache.rs b/crates/sui-core/src/execution_cache/passthrough_cache.rs index f5aff8b0b13..8fa4bcabde7 100644 --- a/crates/sui-core/src/execution_cache/passthrough_cache.rs +++ b/crates/sui-core/src/execution_cache/passthrough_cache.rs @@ -1,17 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore; -use crate::authority::authority_store::{ExecutionLockWriteGuard, SuiLockResult}; -use crate::authority::authority_store_pruner::{ - AuthorityStorePruner, AuthorityStorePruningMetrics, EPOCH_DURATION_MS_FOR_TESTING, -}; -use crate::authority::epoch_start_configuration::EpochFlag; -use crate::authority::epoch_start_configuration::EpochStartConfiguration; -use crate::authority::AuthorityStore; -use crate::checkpoints::CheckpointStore; -use crate::state_accumulator::AccumulatorStore; -use crate::transaction_outputs::TransactionOutputs; +use std::sync::Arc; use either::Either; use futures::{ @@ -20,22 +10,22 @@ use futures::{ }; use mysten_common::sync::notify_read::NotifyRead; use prometheus::Registry; -use std::sync::Arc; use sui_config::node::AuthorityStorePruningConfig; use sui_protocol_config::ProtocolVersion; use sui_storage::package_object_cache::PackageObjectCache; -use sui_types::accumulator::Accumulator; -use sui_types::base_types::VerifiedExecutionData; -use sui_types::base_types::{EpochId, ObjectID, ObjectRef, SequenceNumber}; -use sui_types::digests::{TransactionDigest, TransactionEffectsDigest, TransactionEventsDigest}; -use sui_types::effects::{TransactionEffects, TransactionEvents}; -use sui_types::error::{SuiError, SuiResult}; -use sui_types::message_envelope::Message; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; -use sui_types::object::Object; -use sui_types::storage::{MarkerValue, ObjectKey, ObjectOrTombstone, ObjectStore, PackageObject}; -use sui_types::sui_system_state::{get_sui_system_state, SuiSystemState}; -use sui_types::transaction::{VerifiedSignedTransaction, VerifiedTransaction}; +use sui_types::{ + accumulator::Accumulator, + base_types::{EpochId, ObjectID, ObjectRef, SequenceNumber, VerifiedExecutionData}, + digests::{TransactionDigest, TransactionEffectsDigest, TransactionEventsDigest}, + effects::{TransactionEffects, TransactionEvents}, + error::{SuiError, SuiResult}, + message_envelope::Message, + messages_checkpoint::CheckpointSequenceNumber, + object::Object, + storage::{MarkerValue, ObjectKey, ObjectOrTombstone, ObjectStore, PackageObject}, + sui_system_state::{get_sui_system_state, SuiSystemState}, + transaction::{VerifiedSignedTransaction, VerifiedTransaction}, +}; use tap::TapFallible; use tracing::instrument; use typed_store::Map; @@ -45,6 +35,20 @@ use super::{ ExecutionCacheRead, ExecutionCacheReconfigAPI, ExecutionCacheWrite, NotifyReadWrapper, StateSyncAPI, }; +use crate::{ + authority::{ + authority_per_epoch_store::AuthorityPerEpochStore, + authority_store::{ExecutionLockWriteGuard, SuiLockResult}, + authority_store_pruner::{ + AuthorityStorePruner, AuthorityStorePruningMetrics, EPOCH_DURATION_MS_FOR_TESTING, + }, + epoch_start_configuration::{EpochFlag, EpochStartConfiguration}, + AuthorityStore, + }, + checkpoints::CheckpointStore, + state_accumulator::AccumulatorStore, + transaction_outputs::TransactionOutputs, +}; pub struct PassthroughCache { store: Arc, @@ -352,7 +356,8 @@ impl ExecutionCacheCommit for PassthroughCache { _epoch: EpochId, _digest: &TransactionDigest, ) -> BoxFuture<'_, SuiResult> { - // Nothing needs to be done since they were already committed in write_transaction_outputs + // Nothing needs to be done since they were already committed in + // write_transaction_outputs async { Ok(()) }.boxed() } } diff --git a/crates/sui-core/src/execution_cache/unit_tests/writeback_cache_tests.rs b/crates/sui-core/src/execution_cache/unit_tests/writeback_cache_tests.rs index a5de01afc25..39885191b80 100644 --- a/crates/sui-core/src/execution_cache/unit_tests/writeback_cache_tests.rs +++ b/crates/sui-core/src/execution_cache/unit_tests/writeback_cache_tests.rs @@ -1,22 +1,25 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use prometheus::default_registry; -use rand::{rngs::StdRng, SeedableRng}; use std::{ collections::BTreeMap, future::Future, path::PathBuf, - sync::atomic::Ordering, - sync::{atomic::AtomicU32, Arc}, + sync::{ + atomic::{AtomicU32, Ordering}, + Arc, + }, }; + +use prometheus::default_registry; +use rand::{rngs::StdRng, SeedableRng}; use sui_framework::BuiltInFramework; use sui_macros::{register_fail_point_async, sim_test}; use sui_test_transaction_builder::TestTransactionBuilder; -use sui_types::effects::TestEffectsBuilder; use sui_types::{ base_types::{random_object_ref, SuiAddress}, crypto::{deterministic_random_account_key, get_key_pair_from_rng, AccountKeyPair}, + effects::TestEffectsBuilder, object::{MoveObject, Owner, OBJECT_START_VERSION}, storage::ChildObjectResolver, }; @@ -155,8 +158,8 @@ impl Scenario { let (sender, keypair): (SuiAddress, AccountKeyPair) = get_key_pair_from_rng(&mut rng); let (receiver, _): (SuiAddress, AccountKeyPair) = get_key_pair_from_rng(&mut rng); - // Tx is opaque to the cache, so we just build a dummy tx. The only requirement is - // that it has a unique digest every time. + // Tx is opaque to the cache, so we just build a dummy tx. The only requirement + // is that it has a unique digest every time. let tx = TestTransactionBuilder::new(sender, random_object_ref(), 100) .transfer(random_object_ref(), receiver) .build_and_sign(&keypair); @@ -331,8 +334,8 @@ impl Scenario { Arc::new(outputs) } - // Commit the current tx to the cache, return its digest, and reset the transaction - // outputs to a new empty one. + // Commit the current tx to the cache, return its digest, and reset the + // transaction outputs to a new empty one. async fn do_tx(&mut self) -> TransactionDigest { // Resets outputs, but not objects, so that subsequent runs must respect // the state so far. @@ -488,10 +491,11 @@ impl Scenario { .unwrap(), *object ); - assert!(self - .cache() - .have_received_object_at_version(id, object.version(), 1) - .unwrap()); + assert!( + self.cache() + .have_received_object_at_version(id, object.version(), 1) + .unwrap() + ); } } @@ -679,8 +683,8 @@ async fn test_write_transaction_outputs_is_sync() { Scenario::iterate(|mut s| async move { s.with_created(&[1, 2]); let outputs = s.take_outputs(); - // assert that write_transaction_outputs is sync in non-simtest, which causes the - // fail_point_async! macros above to be elided + // assert that write_transaction_outputs is sync in non-simtest, which causes + // the fail_point_async! macros above to be elided s.cache .write_transaction_outputs(1, outputs) .now_or_never() @@ -778,11 +782,12 @@ async fn test_invalidate_package_cache_on_revert() { s.cache().revert_state_update(&tx1).unwrap(); s.clear_state_end_of_epoch().await; - assert!(s - .cache() - .get_package_object(&s.obj_id(2)) - .unwrap() - .is_none()); + assert!( + s.cache() + .get_package_object(&s.obj_id(2)) + .unwrap() + .is_none() + ); }) .await; } diff --git a/crates/sui-core/src/execution_cache/writeback_cache.rs b/crates/sui-core/src/execution_cache/writeback_cache.rs index 9de84a198df..99c9357483b 100644 --- a/crates/sui-core/src/execution_cache/writeback_cache.rs +++ b/crates/sui-core/src/execution_cache/writeback_cache.rs @@ -1,56 +1,54 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -//! MemoryCache is a cache for the transaction execution which delays writes to the database until -//! transaction results are certified (i.e. they appear in a certified checkpoint, or an effects cert -//! is observed by a fullnode). The cache also stores committed data in memory in order to serve -//! future reads without hitting the database. +//! MemoryCache is a cache for the transaction execution which delays writes to +//! the database until transaction results are certified (i.e. they appear in a +//! certified checkpoint, or an effects cert is observed by a fullnode). The +//! cache also stores committed data in memory in order to serve future reads +//! without hitting the database. //! -//! For storing uncommitted transaction outputs, we cannot evict the data at all until it is written -//! to disk. Committed data not only can be evicted, but it is also unbounded (imagine a stream of -//! transactions that keep splitting a coin into smaller coins). +//! For storing uncommitted transaction outputs, we cannot evict the data at all +//! until it is written to disk. Committed data not only can be evicted, but it +//! is also unbounded (imagine a stream of transactions that keep splitting a +//! coin into smaller coins). //! -//! We also want to be able to support negative cache hits (i.e. the case where we can determine an -//! object does not exist without hitting the database). +//! We also want to be able to support negative cache hits (i.e. the case where +//! we can determine an object does not exist without hitting the database). //! -//! To achieve both of these goals, we split the cache data into two pieces, a dirty set and a cached -//! set. The dirty set has no automatic evictions, data is only removed after being committed. The -//! cached set is in a bounded-sized cache with automatic evictions. In order to support negative -//! cache hits, we treat the two halves of the cache as FIFO queue. Newly written (dirty) versions are -//! inserted to one end of the dirty queue. As versions are committed to disk, they are -//! removed from the other end of the dirty queue and inserted into the cache queue. The cache queue -//! is truncated if it exceeds its maximum size, by removing all but the N newest versions. +//! To achieve both of these goals, we split the cache data into two pieces, a +//! dirty set and a cached set. The dirty set has no automatic evictions, data +//! is only removed after being committed. The cached set is in a bounded-sized +//! cache with automatic evictions. In order to support negative cache hits, we +//! treat the two halves of the cache as FIFO queue. Newly written (dirty) +//! versions are inserted to one end of the dirty queue. As versions are +//! committed to disk, they are removed from the other end of the dirty queue +//! and inserted into the cache queue. The cache queue is truncated if it +//! exceeds its maximum size, by removing all but the N newest versions. //! -//! This gives us the property that the sequence of versions in the dirty and cached queues are the -//! most recent versions of the object, i.e. there can be no "gaps". This allows for the following: +//! This gives us the property that the sequence of versions in the dirty and +//! cached queues are the most recent versions of the object, i.e. there can be +//! no "gaps". This allows for the following: //! -//! - Negative cache hits: If the queried version is not in memory, but is higher than the smallest -//! version in the cached queue, it does not exist in the db either. -//! - Bounded reads: When reading the most recent version that is <= some version bound, we can -//! correctly satisfy this query from the cache, or determine that we must go to the db. +//! - Negative cache hits: If the queried version is not in memory, but is +//! higher than the smallest version in the cached queue, it does not exist +//! in the db either. +//! - Bounded reads: When reading the most recent version that is <= some +//! version bound, we can correctly satisfy this query from the cache, or +//! determine that we must go to the db. //! -//! Note that at any time, either or both the dirty or the cached queue may be non-existent. There may be no -//! dirty versions of the objects, in which case there will be no dirty queue. And, the cached queue -//! may be evicted from the cache, in which case there will be no cached queue. Because only the cached -//! queue can be evicted (the dirty queue can only become empty by moving versions from it to the cached -//! queue), the "highest versions" property still holds in all cases. +//! Note that at any time, either or both the dirty or the cached queue may be +//! non-existent. There may be no dirty versions of the objects, in which case +//! there will be no dirty queue. And, the cached queue may be evicted from the +//! cache, in which case there will be no cached queue. Because only the cached +//! queue can be evicted (the dirty queue can only become empty by moving +//! versions from it to the cached queue), the "highest versions" property still +//! holds in all cases. //! //! The above design is used for both objects and markers. -use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore; -use crate::authority::authority_store::{ExecutionLockWriteGuard, SuiLockResult}; -use crate::authority::authority_store_pruner::{ - AuthorityStorePruner, AuthorityStorePruningMetrics, EPOCH_DURATION_MS_FOR_TESTING, -}; -use crate::authority::authority_store_tables::LiveObject; -use crate::authority::epoch_start_configuration::{EpochFlag, EpochStartConfiguration}; -use crate::authority::AuthorityStore; -use crate::checkpoints::CheckpointStore; -use crate::state_accumulator::AccumulatorStore; -use crate::transaction_outputs::TransactionOutputs; - -use dashmap::mapref::entry::Entry as DashMapEntry; -use dashmap::DashMap; +use std::{collections::BTreeSet, hash::Hash, sync::Arc}; + +use dashmap::{mapref::entry::Entry as DashMapEntry, DashMap}; use either::Either; use futures::{ future::{join_all, BoxFuture}, @@ -60,32 +58,43 @@ use moka::sync::Cache as MokaCache; use mysten_common::sync::notify_read::NotifyRead; use parking_lot::Mutex; use prometheus::Registry; -use std::collections::BTreeSet; -use std::hash::Hash; -use std::sync::Arc; use sui_config::node::AuthorityStorePruningConfig; use sui_macros::fail_point_async; use sui_protocol_config::ProtocolVersion; -use sui_types::accumulator::Accumulator; -use sui_types::base_types::{EpochId, ObjectID, ObjectRef, SequenceNumber, VerifiedExecutionData}; -use sui_types::digests::{ - ObjectDigest, TransactionDigest, TransactionEffectsDigest, TransactionEventsDigest, +use sui_types::{ + accumulator::Accumulator, + base_types::{EpochId, ObjectID, ObjectRef, SequenceNumber, VerifiedExecutionData}, + digests::{ObjectDigest, TransactionDigest, TransactionEffectsDigest, TransactionEventsDigest}, + effects::{TransactionEffects, TransactionEvents}, + error::{SuiError, SuiResult, UserInputError}, + message_envelope::Message, + messages_checkpoint::CheckpointSequenceNumber, + object::Object, + storage::{MarkerValue, ObjectKey, ObjectOrTombstone, ObjectStore, PackageObject}, + sui_system_state::{get_sui_system_state, SuiSystemState}, + transaction::{VerifiedSignedTransaction, VerifiedTransaction}, }; -use sui_types::effects::{TransactionEffects, TransactionEvents}; -use sui_types::error::{SuiError, SuiResult, UserInputError}; -use sui_types::message_envelope::Message; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; -use sui_types::object::Object; -use sui_types::storage::{MarkerValue, ObjectKey, ObjectOrTombstone, ObjectStore, PackageObject}; -use sui_types::sui_system_state::{get_sui_system_state, SuiSystemState}; -use sui_types::transaction::{VerifiedSignedTransaction, VerifiedTransaction}; use tracing::{info, instrument}; -use super::ExecutionCacheAPI; use super::{ cached_version_map::CachedVersionMap, implement_passthrough_traits, CheckpointCache, - ExecutionCacheCommit, ExecutionCacheMetrics, ExecutionCacheRead, ExecutionCacheReconfigAPI, - ExecutionCacheWrite, NotifyReadWrapper, StateSyncAPI, + ExecutionCacheAPI, ExecutionCacheCommit, ExecutionCacheMetrics, ExecutionCacheRead, + ExecutionCacheReconfigAPI, ExecutionCacheWrite, NotifyReadWrapper, StateSyncAPI, +}; +use crate::{ + authority::{ + authority_per_epoch_store::AuthorityPerEpochStore, + authority_store::{ExecutionLockWriteGuard, SuiLockResult}, + authority_store_pruner::{ + AuthorityStorePruner, AuthorityStorePruningMetrics, EPOCH_DURATION_MS_FOR_TESTING, + }, + authority_store_tables::LiveObject, + epoch_start_configuration::{EpochFlag, EpochStartConfiguration}, + AuthorityStore, + }, + checkpoints::CheckpointStore, + state_accumulator::AccumulatorStore, + transaction_outputs::TransactionOutputs, }; #[cfg(test)] @@ -138,21 +147,25 @@ enum CacheResult { Miss, } -/// UncommitedData stores execution outputs that are not yet written to the db. Entries in this -/// struct can only be purged after they are committed. +/// UncommitedData stores execution outputs that are not yet written to the db. +/// Entries in this struct can only be purged after they are committed. struct UncommittedData { - /// The object dirty set. All writes go into this table first. After we flush the data to the - /// db, the data is removed from this table and inserted into the object_cache. + /// The object dirty set. All writes go into this table first. After we + /// flush the data to the db, the data is removed from this table and + /// inserted into the object_cache. /// - /// This table may contain both live and dead objects, since we flush both live and dead - /// objects to the db in order to support past object queries on fullnodes. + /// This table may contain both live and dead objects, since we flush both + /// live and dead objects to the db in order to support past object + /// queries on fullnodes. /// - /// Further, we only remove objects in FIFO order, which ensures that the cached - /// sequence of objects has no gaps. In other words, if we have versions 4, 8, 13 of - /// an object, we can deduce that version 9 does not exist. This also makes child object - /// reads efficient. `object_cache` cannot contain a more recent version of an object than - /// `objects`, and neither can have any gaps. Therefore if there is any object <= the version - /// bound for a child read in objects, it is the correct object to return. + /// Further, we only remove objects in FIFO order, which ensures that the + /// cached sequence of objects has no gaps. In other words, if we have + /// versions 4, 8, 13 of an object, we can deduce that version 9 does + /// not exist. This also makes child object reads efficient. + /// `object_cache` cannot contain a more recent version of an object than + /// `objects`, and neither can have any gaps. Therefore if there is any + /// object <= the version bound for a child read in objects, it is the + /// correct object to return. objects: DashMap>, // Markers for received objects and deleted shared objects. This contains all of the dirty @@ -199,7 +212,8 @@ impl UncommittedData { } } -/// CachedData stores data that has been committed to the db, but is likely to be read soon. +/// CachedData stores data that has been committed to the db, but is likely to +/// be read soon. struct CachedCommittedData { // See module level comment for an explanation of caching strategy. object_cache: MokaCache>>>, @@ -347,9 +361,9 @@ impl WritebackCache { .insert(object_key.1, marker_value); } - // lock both the dirty and committed sides of the cache, and then pass the entries to - // the callback. Written with the `with` pattern because any other way of doing this - // creates lifetime hell. + // lock both the dirty and committed sides of the cache, and then pass the + // entries to the callback. Written with the `with` pattern because any + // other way of doing this creates lifetime hell. fn with_locked_cache_entries( dirty_map: &DashMap>, cached_map: &MokaCache>>>, @@ -490,8 +504,9 @@ impl WritebackCache { .write_transaction_outputs(epoch, outputs.clone()) .await?; - // Now, remove each piece of committed data from the dirty state and insert it into the cache. - // TODO: outputs should have a strong count of 1 so we should be able to move out of it + // Now, remove each piece of committed data from the dirty state and insert it + // into the cache. TODO: outputs should have a strong count of 1 so we + // should be able to move out of it let TransactionOutputs { transaction, effects, @@ -587,8 +602,9 @@ impl WritebackCache { { static MAX_VERSIONS: usize = 3; - // IMPORTANT: lock both the dirty set entry and the cache entry before modifying either. - // this ensures that readers cannot see a value temporarily disappear. + // IMPORTANT: lock both the dirty set entry and the cache entry before modifying + // either. this ensures that readers cannot see a value temporarily + // disappear. let dirty_entry = dirty.entry(key); let cache_entry = cache.entry(key).or_default(); let mut cache_map = cache_entry.value().lock(); @@ -706,9 +722,9 @@ impl ExecutionCacheRead for WritebackCache { return Ok(Some(p)); } - // We try the dirty objects cache as well before going to the database. This is necessary - // because the package could be evicted from the package cache before it is committed - // to the database. + // We try the dirty objects cache as well before going to the database. This is + // necessary because the package could be evicted from the package cache + // before it is committed to the database. if let Some(p) = ExecutionCacheRead::get_object(self, package_id)? { if p.is_package() { let p = PackageObject::new(p); @@ -737,17 +753,18 @@ impl ExecutionCacheRead for WritebackCache { assert!(p.is_package()); self.packages.insert(*package_id, PackageObject::new(p)); } - // It's possible that a package is not found if it's newly added system package ID - // that hasn't got created yet. This should be very very rare though. + // It's possible that a package is not found if it's newly added + // system package ID that hasn't got created yet. This + // should be very very rare though. } } // get_object and variants. // - // TODO: We don't insert objects into the cache after misses because they are usually only - // read once. We might want to cache immutable reads (RO shared objects and immutable objects) - // If we do this, we must be VERY CAREFUL not to break the contiguous version property - // of the cache. + // TODO: We don't insert objects into the cache after misses because they are + // usually only read once. We might want to cache immutable reads (RO shared + // objects and immutable objects) If we do this, we must be VERY CAREFUL not + // to break the contiguous version property of the cache. fn get_object(&self, id: &ObjectID) -> SuiResult> { match self.get_object_by_id_cache_only(id) { @@ -1083,10 +1100,11 @@ impl ExecutionCacheWrite for WritebackCache { .. } = &*tx_outputs; - // Deletions and wraps must be written first. The reason is that one of the deletes - // may be a child object, and if we write the parent object first, a reader may or may - // not see the previous version of the child object, instead of the deleted/wrapped - // tombstone, which would cause an execution fork + // Deletions and wraps must be written first. The reason is that one of the + // deletes may be a child object, and if we write the parent object + // first, a reader may or may not see the previous version of the + // child object, instead of the deleted/wrapped tombstone, which + // would cause an execution fork for ObjectKey(id, version) in deleted.iter() { self.write_object_entry(id, *version, ObjectEntry::Deleted) .await; @@ -1103,8 +1121,8 @@ impl ExecutionCacheWrite for WritebackCache { .await; } - // Write children before parents to ensure that readers do not observe a parent object - // before its most recent children are visible. + // Write children before parents to ensure that readers do not observe a parent + // object before its most recent children are visible. for (object_id, object) in written.iter() { if object.is_child_object() { self.write_object_entry(object_id, object.version(), object.clone().into()) @@ -1163,11 +1181,12 @@ impl ExecutionCacheWrite for WritebackCache { /// do_fallback_lookup is a helper function for multi-get operations. /// It takes a list of keys and first attempts to look up each key in the cache. -/// The cache can return a hit, a miss, or a negative hit (if the object is known to not exist). -/// Any keys that result in a miss are then looked up in the store. +/// The cache can return a hit, a miss, or a negative hit (if the object is +/// known to not exist). Any keys that result in a miss are then looked up in +/// the store. /// -/// The "get from cache" and "get from store" behavior are implemented by the caller and provided -/// via the get_cached_key and multiget_fallback functions. +/// The "get from cache" and "get from store" behavior are implemented by the +/// caller and provided via the get_cached_key and multiget_fallback functions. fn do_fallback_lookup( keys: &[K], get_cached_key: impl Fn(&K) -> SuiResult>, @@ -1211,9 +1230,10 @@ impl AccumulatorStore for WritebackCache { object_id: &ObjectID, version: SequenceNumber, ) -> SuiResult> { - // There is probably a more efficient way to implement this, but since this is only used by - // old protocol versions, it is better to do the simple thing that is obviously correct. - // In this case we previous version from all sources and choose the highest + // There is probably a more efficient way to implement this, but since this is + // only used by old protocol versions, it is better to do the simple + // thing that is obviously correct. In this case we previous version + // from all sources and choose the highest let mut candidates = Vec::new(); let check_versions = @@ -1285,9 +1305,9 @@ impl AccumulatorStore for WritebackCache { &self, include_wrapped_tombstone: bool, ) -> Box + '_> { - // The only time it is safe to iterate the live object set is at an epoch boundary, - // at which point the db is consistent and the dirty cache is empty. So this does - // read the cache + // The only time it is safe to iterate the live object set is at an epoch + // boundary, at which point the db is consistent and the dirty cache is + // empty. So this does read the cache self.store.iter_live_object_set(include_wrapped_tombstone) } } diff --git a/crates/sui-core/src/execution_driver.rs b/crates/sui-core/src/execution_driver.rs index d43ee31adf1..4498e4aecaf 100644 --- a/crates/sui-core/src/execution_driver.rs +++ b/crates/sui-core/src/execution_driver.rs @@ -18,15 +18,14 @@ use tokio::{ }; use tracing::{error, error_span, info, trace, Instrument}; -use crate::authority::AuthorityState; -use crate::transaction_manager::PendingCertificate; +use crate::{authority::AuthorityState, transaction_manager::PendingCertificate}; #[cfg(test)] #[path = "unit_tests/execution_driver_tests.rs"] mod execution_driver_tests; -// Execution should not encounter permanent failures, so any failure can and needs -// to be retried. +// Execution should not encounter permanent failures, so any failure can and +// needs to be retried. pub const EXECUTION_MAX_ATTEMPTS: u32 = 10; const EXECUTION_FAILURE_RETRY_INTERVAL: Duration = Duration::from_secs(1); const QUEUEING_DELAY_SAMPLING_RATIO: f64 = 0.05; @@ -73,14 +72,15 @@ pub async fn execution_process( let authority = if let Some(authority) = authority_state.upgrade() { authority } else { - // Terminate the execution if authority has already shutdown, even if there can be more - // items in rx_ready_certificates. + // Terminate the execution if authority has already shutdown, even if there can + // be more items in rx_ready_certificates. info!("Authority state has shutdown. Exiting ..."); return; }; authority.metrics.execution_driver_dispatch_queue.dec(); - // TODO: Ideally execution_driver should own a copy of epoch store and recreate each epoch. + // TODO: Ideally execution_driver should own a copy of epoch store and recreate + // each epoch. let epoch_store = authority.load_epoch_store_one_call_per_task(); let digest = *certificate.digest(); @@ -106,7 +106,8 @@ pub async fn execution_process( authority.metrics.execution_rate_tracker.lock().record(); - // Certificate execution can take significant time, so run it in a separate task. + // Certificate execution can take significant time, so run it in a separate + // task. spawn_monitored_task!(async move { let _scope = monitored_scope("ExecutionDriver::task"); let _guard = permit; diff --git a/crates/sui-core/src/generate_format.rs b/crates/sui-core/src/generate_format.rs index 1ee51f633ff..86c982423e5 100644 --- a/crates/sui-core/src/generate_format.rs +++ b/crates/sui-core/src/generate_format.rs @@ -1,50 +1,43 @@ // Copyright (c) 2021, Facebook, Inc. and its affiliates // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{fs::File, io::Write, str::FromStr}; + use clap::*; -use fastcrypto_zkp::bn254::zk_login::OIDCProvider; -use fastcrypto_zkp::zk_login_utils::Bn254FrElement; +use fastcrypto_zkp::{bn254::zk_login::OIDCProvider, zk_login_utils::Bn254FrElement}; use move_core_types::language_storage::{StructTag, TypeTag}; use pretty_assertions::assert_str_eq; -use rand::rngs::StdRng; -use rand::SeedableRng; +use rand::{rngs::StdRng, SeedableRng}; use serde_reflection::{Registry, Result, Samples, Tracer, TracerConfig}; use shared_crypto::intent::{Intent, IntentMessage, PersonalMessage}; -use std::str::FromStr; -use std::{fs::File, io::Write}; -use sui_types::execution_status::{ - CommandArgumentError, ExecutionFailureStatus, ExecutionStatus, PackageUpgradeError, - TypeArgumentError, -}; -use sui_types::messages_grpc::ObjectInfoRequestKind; -use sui_types::{ - base_types::MoveObjectType_, - crypto::Signer, - messages_checkpoint::{ - CheckpointContents, CheckpointContentsDigest, CheckpointDigest, CheckpointSummary, - FullCheckpointContents, - }, - transaction::TransactionExpiration, -}; use sui_types::{ base_types::{ - self, MoveObjectType, ObjectDigest, ObjectID, TransactionDigest, TransactionEffectsDigest, + self, MoveObjectType, MoveObjectType_, ObjectDigest, ObjectID, TransactionDigest, + TransactionEffectsDigest, }, crypto::{ get_key_pair, get_key_pair_from_rng, AccountKeyPair, AuthorityKeyPair, - AuthorityPublicKeyBytes, AuthoritySignature, KeypairTraits, Signature, SuiKeyPair, + AuthorityPublicKeyBytes, AuthoritySignature, KeypairTraits, PublicKey, Signature, Signer, + SuiKeyPair, ZkLoginPublicIdentifier, }, + effects::{IDOperation, ObjectIn, ObjectOut, TransactionEffects, UnchangedSharedKind}, + execution_status::{ + CommandArgumentError, ExecutionFailureStatus, ExecutionStatus, PackageUpgradeError, + TypeArgumentError, + }, + messages_checkpoint::{ + CheckpointContents, CheckpointContentsDigest, CheckpointDigest, CheckpointSummary, + FullCheckpointContents, + }, + messages_grpc::ObjectInfoRequestKind, multisig::{MultiSig, MultiSigPublicKey}, object::{Data, Owner}, signature::GenericSignature, storage::DeleteKind, transaction::{ - Argument, CallArg, Command, EndOfEpochTransactionKind, ObjectArg, TransactionKind, + Argument, CallArg, Command, EndOfEpochTransactionKind, ObjectArg, TransactionExpiration, + TransactionKind, }, -}; -use sui_types::{ - crypto::{PublicKey, ZkLoginPublicIdentifier}, - effects::{IDOperation, ObjectIn, ObjectOut, TransactionEffects, UnchangedSharedKind}, utils::DEFAULT_ADDRESS_SEED, }; use typed_store::TypedStoreError; @@ -57,8 +50,8 @@ fn get_registry() -> Result { // 1. Record samples for types with custom deserializers. // We want to call // tracer.trace_value(&mut samples, ...)?; - // with all the base types contained in messages, especially the ones with custom serializers; - // or involving generics (see [serde_reflection documentation](https://novifinancial.github.io/serde-reflection/serde_reflection/index.html)). + // with all the base types contained in messages, especially the ones with + // custom serializers; or involving generics (see [serde_reflection documentation](https://novifinancial.github.io/serde-reflection/serde_reflection/index.html)). let (addr, kp): (_, AuthorityKeyPair) = get_key_pair(); let (s_addr, s_kp): (_, AccountKeyPair) = get_key_pair(); let pk: AuthorityPublicKeyBytes = kp.public().into(); @@ -69,7 +62,8 @@ fn get_registry() -> Result { tracer.trace_value(&mut samples, &s_addr)?; tracer.trace_value(&mut samples, &s_kp)?; - // We have two signature types: one for Authority Signatures, which don't include the PubKey ... + // We have two signature types: one for Authority Signatures, which don't + // include the PubKey ... let sig: AuthoritySignature = Signer::sign(&kp, b"hello world"); tracer.trace_value(&mut samples, &sig)?; // ... and the user signature which does @@ -128,7 +122,8 @@ fn get_registry() -> Result { let oid: ObjectID = addr.into(); tracer.trace_value(&mut samples, &oid)?; - // ObjectDigest and Transaction digest use the `serde_as`speedup for ser/de => trace them + // ObjectDigest and Transaction digest use the `serde_as`speedup for ser/de => + // trace them let od = ObjectDigest::random(); let td = TransactionDigest::random(); tracer.trace_value(&mut samples, &od)?; diff --git a/crates/sui-core/src/metrics.rs b/crates/sui-core/src/metrics.rs index 54cc6fbf9de..47bc61b0036 100644 --- a/crates/sui-core/src/metrics.rs +++ b/crates/sui-core/src/metrics.rs @@ -1,13 +1,14 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::VecDeque, + default::Default, + sync::atomic::{AtomicU64, Ordering}, +}; + use parking_lot::Mutex; -use std::collections::VecDeque; -use std::default::Default; -use std::sync::atomic::AtomicU64; -use std::sync::atomic::Ordering; -use tokio::time::Duration; -use tokio::time::Instant; +use tokio::time::{Duration, Instant}; pub struct LatencyObserver { data: Mutex, @@ -58,9 +59,10 @@ impl Default for LatencyObserver { } } -/// RateTracker tracks events in a rolling window, and calculates the rate of events. -/// Internally, the tracker divides the tracking window into multiple BIN_DURATION, -/// and counts events in each BIN_DURATION in a fixed sized buffer. +/// RateTracker tracks events in a rolling window, and calculates the rate of +/// events. Internally, the tracker divides the tracking window into multiple +/// BIN_DURATION, and counts events in each BIN_DURATION in a fixed sized +/// buffer. pub struct RateTracker { // Counts the number of events by bins. Each bin is BIN_DURATION long within window_duration. // The size of the buffer = window_duration / BIN_DURATION. @@ -81,7 +83,8 @@ pub struct RateTracker { const BIN_DURATION: Duration = Duration::from_millis(100); impl RateTracker { - /// Create a new RateTracker to track event rate (events/seconds) in `window_duration`. + /// Create a new RateTracker to track event rate (events/seconds) in + /// `window_duration`. pub fn new(window_duration: Duration) -> Self { assert!(window_duration > BIN_DURATION); let total_bins = (window_duration.as_millis() / BIN_DURATION.as_millis()) as usize; @@ -123,8 +126,9 @@ impl RateTracker { (now.duration_since(self.start_time).as_millis() / BIN_DURATION.as_millis()) as u64 } - // Updates the rolling window to accommodate the time of interests, `now`. That is, remove any - // event counts happened prior to (`now` - `window_duration`). + // Updates the rolling window to accommodate the time of interests, `now`. That + // is, remove any event counts happened prior to (`now` - + // `window_duration`). fn update_window(&mut self, now: Instant) { let current_bin_index = self.get_bin_index(now); if self.global_bin_index >= current_bin_index { @@ -133,8 +137,8 @@ impl RateTracker { } for bin_index in (self.global_bin_index + 1)..=current_bin_index { - // Time has elapsed from global_bin_index to current_bin_index. Clear all the buffer - // counter associated with them. + // Time has elapsed from global_bin_index to current_bin_index. Clear all the + // buffer counter associated with them. let index_in_buffer = bin_index as usize % self.total_bins; self.event_buffer[index_in_buffer] = 0; } @@ -144,13 +148,11 @@ impl RateTracker { #[cfg(test)] mod tests { - use super::*; - - use rand::rngs::StdRng; - use rand::Rng; - use rand::SeedableRng; + use rand::{rngs::StdRng, Rng, SeedableRng}; use tokio::time::advance; + use super::*; + #[tokio::test(flavor = "current_thread", start_paused = true)] pub async fn test_rate_tracker_basic() { // 1 sec rolling window. @@ -219,7 +221,8 @@ mod tests { assert_eq!(tracker.rate(), 0.0); } - // Tests that events happened prior to tracking window shouldn't affect the rate. + // Tests that events happened prior to tracking window shouldn't affect the + // rate. #[tokio::test(flavor = "current_thread", start_paused = true)] pub async fn test_rate_tracker_outside_of_window() { let mut tracker = RateTracker::new(Duration::from_secs(1)); diff --git a/crates/sui-core/src/mysticeti_adapter.rs b/crates/sui-core/src/mysticeti_adapter.rs index 699df2626f3..bb704c09249 100644 --- a/crates/sui-core/src/mysticeti_adapter.rs +++ b/crates/sui-core/src/mysticeti_adapter.rs @@ -18,10 +18,11 @@ use crate::{ consensus_adapter::SubmitToConsensus, }; -/// Basically a wrapper struct that reads from the LOCAL_MYSTICETI_CLIENT variable where the latest -/// MysticetiClient is stored in order to communicate with Mysticeti. The LazyMysticetiClient is considered -/// "lazy" only in the sense that we can't use it directly to submit to consensus unless the underlying -/// local client is set first. +/// Basically a wrapper struct that reads from the LOCAL_MYSTICETI_CLIENT +/// variable where the latest MysticetiClient is stored in order to communicate +/// with Mysticeti. The LazyMysticetiClient is considered "lazy" only in the +/// sense that we can't use it directly to submit to consensus unless the +/// underlying local client is set first. #[derive(Default, Clone)] pub struct LazyMysticetiClient { client: Arc>, @@ -40,8 +41,8 @@ impl LazyMysticetiClient { return client; } - // We expect this to get called during the SUI process start. After that at least one - // object will have initialised and won't need to call again. + // We expect this to get called during the SUI process start. After that at + // least one object will have initialised and won't need to call again. const MYSTICETI_START_TIMEOUT: Duration = Duration::from_secs(30); const LOAD_RETRY_TIMEOUT: Duration = Duration::from_millis(100); if let Ok(client) = timeout(MYSTICETI_START_TIMEOUT, async { @@ -78,8 +79,8 @@ impl SubmitToConsensus for LazyMysticetiClient { _epoch_store: &Arc, ) -> SuiResult { // TODO(mysticeti): confirm comment is still true - // The retrieved TransactionClient can be from the past epoch. Submit would fail after - // Mysticeti shuts down, so there should be no correctness issue. + // The retrieved TransactionClient can be from the past epoch. Submit would fail + // after Mysticeti shuts down, so there should be no correctness issue. let client = self.get().await; let tx_bytes = bcs::to_bytes(&transaction).expect("Serialization should not fail."); client diff --git a/crates/sui-core/src/overload_monitor.rs b/crates/sui-core/src/overload_monitor.rs index 43c12528605..241d02f8fa9 100644 --- a/crates/sui-core/src/overload_monitor.rs +++ b/crates/sui-core/src/overload_monitor.rs @@ -1,22 +1,28 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::AuthorityState; -use std::cmp::{max, min}; -use std::hash::Hasher; -use std::sync::atomic::{AtomicBool, AtomicU32, Ordering}; -use std::sync::Weak; -use std::time::Duration; -use std::time::{SystemTime, UNIX_EPOCH}; +use std::{ + cmp::{max, min}, + hash::Hasher, + sync::{ + atomic::{AtomicBool, AtomicU32, Ordering}, + Weak, + }, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; + use sui_config::node::AuthorityOverloadConfig; -use sui_types::digests::TransactionDigest; -use sui_types::error::SuiError; -use sui_types::error::SuiResult; -use sui_types::fp_bail; +use sui_types::{ + digests::TransactionDigest, + error::{SuiError, SuiResult}, + fp_bail, +}; use tokio::time::sleep; use tracing::{debug, info}; use twox_hash::XxHash64; +use crate::authority::AuthorityState; + #[cfg(test)] #[path = "unit_tests/overload_monitor_tests.rs"] pub mod overload_monitor_tests; @@ -47,11 +53,12 @@ const STEADY_OVERLOAD_REDUCTION_PERCENTAGE: u32 = 10; const EXECUTION_RATE_RATIO_FOR_COMPARISON: f64 = 0.95; const ADDITIONAL_LOAD_SHEDDING: f64 = 0.02; -// The update interval of the random seed used to determine whether a txn should be rejected. +// The update interval of the random seed used to determine whether a txn should +// be rejected. const SEED_UPDATE_DURATION_SECS: u64 = 30; -// Monitors the overload signals in `authority_state` periodically, and updates its `overload_info` -// when the signals indicates overload. +// Monitors the overload signals in `authority_state` periodically, and updates +// its `overload_info` when the signals indicates overload. pub async fn overload_monitor( authority_state: Weak, config: AuthorityOverloadConfig, @@ -126,24 +133,25 @@ fn check_authority_overload( true } -// Calculates the percentage of transactions to drop in order to reduce execution queue. -// Returns the integer percentage between 0 and 100. +// Calculates the percentage of transactions to drop in order to reduce +// execution queue. Returns the integer percentage between 0 and 100. fn calculate_load_shedding_percentage(txn_ready_rate: f64, execution_rate: f64) -> u32 { - // When transaction ready rate is practically 0, we aren't adding more load to the - // execution driver, so no shedding. + // When transaction ready rate is practically 0, we aren't adding more load to + // the execution driver, so no shedding. // TODO: consensus handler or transaction manager can also be overloaded. if txn_ready_rate < 1e-10 { return 0; } - // Deflate the execution rate to account for the case that execution_rate is close to - // txn_ready_rate. + // Deflate the execution rate to account for the case that execution_rate is + // close to txn_ready_rate. if execution_rate * EXECUTION_RATE_RATIO_FOR_COMPARISON > txn_ready_rate { return 0; } - // In order to maintain execution queue length, we need to drop at least (1 - executionRate / readyRate). - // To reduce the queue length, here we add 10% more transactions to drop. + // In order to maintain execution queue length, we need to drop at least (1 - + // executionRate / readyRate). To reduce the queue length, here we add 10% + // more transactions to drop. (((1.0 - execution_rate * EXECUTION_RATE_RATIO_FOR_COMPARISON / txn_ready_rate) + ADDITIONAL_LOAD_SHEDDING) .min(1.0) @@ -151,14 +159,15 @@ fn calculate_load_shedding_percentage(txn_ready_rate: f64, execution_rate: f64) .round() as u32 } -// Given overload signals (`queueing_latency`, `txn_ready_rate`, `execution_rate`), return whether -// the authority server should enter load shedding mode, and how much percentage of transactions to drop. -// Note that the final load shedding percentage should also take the current load shedding percentage -// into consideration. If we are already shedding 40% load, based on the current txn_ready_rate -// and execution_rate, we need to shed 10% more, the outcome is that we need to shed -// 40% + (1 - 40%) * 10% = 46%. -// When txn_ready_rate is less than execution_rate, we gradually reduce load shedding percentage until -// the queueing latency is back to normal. +// Given overload signals (`queueing_latency`, `txn_ready_rate`, +// `execution_rate`), return whether the authority server should enter load +// shedding mode, and how much percentage of transactions to drop. Note that the +// final load shedding percentage should also take the current load shedding +// percentage into consideration. If we are already shedding 40% load, based on +// the current txn_ready_rate and execution_rate, we need to shed 10% more, the +// outcome is that we need to shed 40% + (1 - 40%) * 10% = 46%. +// When txn_ready_rate is less than execution_rate, we gradually reduce load +// shedding percentage until the queueing latency is back to normal. fn check_overload_signals( config: &AuthorityOverloadConfig, current_load_shedding_percentage: u32, @@ -166,8 +175,9 @@ fn check_overload_signals( txn_ready_rate: f64, execution_rate: f64, ) -> (bool, u32) { - // First, we calculate based on the current `txn_ready_rate` and `execution_rate`, - // what's the percentage of traffic to shed from `txn_ready_rate`. + // First, we calculate based on the current `txn_ready_rate` and + // `execution_rate`, what's the percentage of traffic to shed from + // `txn_ready_rate`. let additional_load_shedding_percentage; if queueing_latency > config.execution_queue_latency_hard_limit { let calculated_load_shedding_percentage = @@ -192,21 +202,23 @@ fn check_overload_signals( // Next, we calculate the new load shedding percentage. let load_shedding_percentage = if additional_load_shedding_percentage > 0 { - // When we need to shed more load, since the `txn_ready_rate` is already influenced - // by `current_load_shedding_percentage`, we need to calculate the new load shedding - // percentage from `current_load_shedding_percentage` and + // When we need to shed more load, since the `txn_ready_rate` is already + // influenced by `current_load_shedding_percentage`, we need to + // calculate the new load shedding percentage from + // `current_load_shedding_percentage` and // `additional_load_shedding_percentage`. current_load_shedding_percentage + (100 - current_load_shedding_percentage) * additional_load_shedding_percentage / 100 } else if txn_ready_rate > config.safe_transaction_ready_rate as f64 && current_load_shedding_percentage > 10 { - // We don't need to shed more load. However, the enqueue rate is still not minimal. - // We gradually reduce load shedding percentage (10% at a time) to gracefully accept - // more load. + // We don't need to shed more load. However, the enqueue rate is still not + // minimal. We gradually reduce load shedding percentage (10% at a time) + // to gracefully accept more load. current_load_shedding_percentage - STEADY_OVERLOAD_REDUCTION_PERCENTAGE } else { - // The current transaction ready rate is considered very low. Turn off load shedding mode. + // The current transaction ready rate is considered very low. Turn off load + // shedding mode. 0 }; @@ -224,8 +236,8 @@ fn should_reject_tx( tx_digest: TransactionDigest, temporal_seed: u64, ) -> bool { - // TODO: we also need to add a secret salt (e.g. first consensus commit in the current epoch), - // to prevent gaming the system. + // TODO: we also need to add a secret salt (e.g. first consensus commit in the + // current epoch), to prevent gaming the system. let mut hasher = XxHash64::with_seed(temporal_seed); hasher.write(tx_digest.inner()); let value = hasher.finish(); @@ -237,11 +249,11 @@ pub fn overload_monitor_accept_tx( load_shedding_percentage: u32, tx_digest: TransactionDigest, ) -> SuiResult { - // Derive a random seed from the epoch time for transaction selection. Changing the seed every - // `SEED_UPDATE_DURATION_SECS` interval allows rejected transaction's retry to have a chance - // to go through in the future. - // Also, using the epoch time instead of randomly generating a seed allows that all validators - // makes the same decision. + // Derive a random seed from the epoch time for transaction selection. Changing + // the seed every `SEED_UPDATE_DURATION_SECS` interval allows rejected + // transaction's retry to have a chance to go through in the future. + // Also, using the epoch time instead of randomly generating a seed allows that + // all validators makes the same decision. let temporal_seed = SystemTime::now() .duration_since(UNIX_EPOCH) .expect("Sui did not exist prior to 1970") @@ -249,8 +261,9 @@ pub fn overload_monitor_accept_tx( / SEED_UPDATE_DURATION_SECS; if should_reject_tx(load_shedding_percentage, tx_digest, temporal_seed) { - // TODO: using `SEED_UPDATE_DURATION_SECS` is a safe suggestion that the time based seed - // is definitely different by then. However, a shorter suggestion may be available. + // TODO: using `SEED_UPDATE_DURATION_SECS` is a safe suggestion that the time + // based seed is definitely different by then. However, a shorter + // suggestion may be available. fp_bail!(SuiError::ValidatorOverloadedRetryAfter { retry_after_secs: SEED_UPDATE_DURATION_SECS }); @@ -261,21 +274,24 @@ pub fn overload_monitor_accept_tx( #[cfg(test)] #[allow(clippy::disallowed_methods)] // allow unbounded_channel() since tests are simulating txn manager execution driver interaction. mod tests { - use super::*; + use std::sync::Arc; - use crate::authority::test_authority_builder::TestAuthorityBuilder; use rand::{ rngs::{OsRng, StdRng}, Rng, SeedableRng, }; - use std::sync::Arc; use sui_macros::sim_test; - use tokio::sync::mpsc::unbounded_channel; - use tokio::sync::mpsc::UnboundedReceiver; - use tokio::sync::mpsc::UnboundedSender; - use tokio::sync::oneshot; - use tokio::task::JoinHandle; - use tokio::time::{interval, Instant, MissedTickBehavior}; + use tokio::{ + sync::{ + mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, + oneshot, + }, + task::JoinHandle, + time::{interval, Instant, MissedTickBehavior}, + }; + + use super::*; + use crate::authority::test_authority_builder::TestAuthorityBuilder; #[test] pub fn test_authority_overload_info() { @@ -343,35 +359,37 @@ mod tests { ..Default::default() }; - // When execution queueing latency is within soft limit, don't start overload protection. + // When execution queueing latency is within soft limit, don't start overload + // protection. assert_eq!( check_overload_signals(&config, 0, Duration::from_millis(500), 1000.0, 10.0), (false, 0) ); - // When execution queueing latency hits soft limit and execution rate is higher, don't - // start overload protection. + // When execution queueing latency hits soft limit and execution rate is higher, + // don't start overload protection. assert_eq!( check_overload_signals(&config, 0, Duration::from_secs(2), 100.0, 120.0), (false, 0) ); - // When execution queueing latency hits soft limit, but not hard limit, start overload - // protection. + // When execution queueing latency hits soft limit, but not hard limit, start + // overload protection. assert_eq!( check_overload_signals(&config, 0, Duration::from_secs(2), 100.0, 100.0), (true, 7) ); - // When execution queueing latency hits hard limit, start more aggressive overload - // protection. + // When execution queueing latency hits hard limit, start more aggressive + // overload protection. assert_eq!( check_overload_signals(&config, 0, Duration::from_secs(11), 100.0, 100.0), (true, 50) ); - // When execution queueing latency hits hard limit and calculated shedding percentage - // is higher than min_load_shedding_percentage_above_hard_limit. + // When execution queueing latency hits hard limit and calculated shedding + // percentage is higher than + // min_load_shedding_percentage_above_hard_limit. assert_eq!( check_overload_signals(&config, 0, Duration::from_secs(11), 240.0, 100.0), (true, 62) @@ -390,15 +408,16 @@ mod tests { (true, 90) ); - // When the system is already shedding 50% of load, and the current txn ready rate - // and execution rate require another 20%, the final shedding rate is 60%. + // When the system is already shedding 50% of load, and the current txn ready + // rate and execution rate require another 20%, the final shedding rate + // is 60%. assert_eq!( check_overload_signals(&config, 50, Duration::from_secs(2), 116.0, 100.0), (true, 60) ); - // Load shedding percentage is gradually reduced when txn ready rate is lower than - // execution rate. + // Load shedding percentage is gradually reduced when txn ready rate is lower + // than execution rate. assert_eq!( check_overload_signals(&config, 90, Duration::from_secs(2), 200.0, 300.0), (true, 80) @@ -446,7 +465,8 @@ mod tests { assert!(!check_authority_overload(&authority, &config)); } - // Creates an AuthorityState and starts an overload monitor that monitors its metrics. + // Creates an AuthorityState and starts an overload monitor that monitors its + // metrics. async fn start_overload_monitor() -> (Arc, JoinHandle<()>) { let overload_config = AuthorityOverloadConfig::default(); let state = TestAuthorityBuilder::new() @@ -460,8 +480,8 @@ mod tests { (state, monitor_handle) } - // Starts a load generator that generates a steady workload, and also allow it to accept - // burst of request through `burst_rx`. + // Starts a load generator that generates a steady workload, and also allow it + // to accept burst of request through `burst_rx`. // Request tracking is done by the overload monitor inside `authority`. fn start_load_generator( steady_rate: f64, @@ -575,8 +595,9 @@ mod tests { } } - // Running a workload with consistent steady `generator_rate` and `executor_rate`. - // It checks that the dropped requests should in between min_dropping_rate and max_dropping_rate. + // Running a workload with consistent steady `generator_rate` and + // `executor_rate`. It checks that the dropped requests should in between + // min_dropping_rate and max_dropping_rate. async fn run_consistent_workload_test( generator_rate: f64, executor_rate: f64, @@ -616,15 +637,16 @@ mod tests { let _ = monitor_handle.await; } - // Tests that when request generation rate is slower than execution rate, no requests should be dropped. + // Tests that when request generation rate is slower than execution rate, no + // requests should be dropped. #[tokio::test(flavor = "current_thread", start_paused = true)] pub async fn test_workload_consistent_no_overload() { telemetry_subscribers::init_for_testing(); run_consistent_workload_test(900.0, 1000.0, 0.0, 0.0).await; } - // Tests that when request generation rate is slightly above execution rate, a small portion of - // requests should be dropped. + // Tests that when request generation rate is slightly above execution rate, a + // small portion of requests should be dropped. #[tokio::test(flavor = "current_thread", start_paused = true)] pub async fn test_workload_consistent_slightly_overload() { telemetry_subscribers::init_for_testing(); @@ -632,8 +654,8 @@ mod tests { run_consistent_workload_test(1100.0, 1000.0, 0.05, 0.25).await; } - // Tests that when request generation rate is much higher than execution rate, a large portion of - // requests should be dropped. + // Tests that when request generation rate is much higher than execution rate, a + // large portion of requests should be dropped. #[tokio::test(flavor = "current_thread", start_paused = true)] pub async fn test_workload_consistent_overload() { telemetry_subscribers::init_for_testing(); @@ -641,7 +663,8 @@ mod tests { run_consistent_workload_test(3000.0, 1000.0, 0.6, 0.8).await; } - // Tests that when there is a very short single spike, no request should be dropped. + // Tests that when there is a very short single spike, no request should be + // dropped. #[tokio::test(flavor = "current_thread", start_paused = true)] pub async fn test_workload_single_spike() { telemetry_subscribers::init_for_testing(); @@ -679,8 +702,8 @@ mod tests { let _ = monitor_handle.await; } - // Tests that when there are regular spikes that keep queueing latency consistently high, - // overload monitor should kick in and shed load. + // Tests that when there are regular spikes that keep queueing latency + // consistently high, overload monitor should kick in and shed load. #[tokio::test(flavor = "current_thread", start_paused = true)] pub async fn test_workload_consistent_short_spike() { telemetry_subscribers::init_for_testing(); @@ -715,8 +738,8 @@ mod tests { let dropped_ratio = dropped_requests.load(Ordering::SeqCst) as f64 / total_requests.load(Ordering::SeqCst) as f64; - // We should drop about 50% of request because the burst throughput is about 2x of - // execution rate. + // We should drop about 50% of request because the burst throughput is about 2x + // of execution rate. assert!(0.4 < dropped_ratio); assert!(dropped_ratio < 0.6); @@ -724,8 +747,8 @@ mod tests { let _ = monitor_handle.await; } - // Tests that the ratio of rejected transactions created randomly matches load shedding percentage in - // the overload monitor. + // Tests that the ratio of rejected transactions created randomly matches load + // shedding percentage in the overload monitor. #[test] fn test_txn_rejection_rate() { for rejection_percentage in 0..=100 { @@ -747,7 +770,8 @@ mod tests { } } - // Tests that rejected transaction will have a chance to be accepted in the future. + // Tests that rejected transaction will have a chance to be accepted in the + // future. #[sim_test] async fn test_txn_rejection_over_time() { let start_time = Instant::now(); diff --git a/crates/sui-core/src/post_consensus_tx_reorder.rs b/crates/sui-core/src/post_consensus_tx_reorder.rs index 03a5dbe28e2..7bbfe047173 100644 --- a/crates/sui-core/src/post_consensus_tx_reorder.rs +++ b/crates/sui-core/src/post_consensus_tx_reorder.rs @@ -1,13 +1,14 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::consensus_handler::{ - SequencedConsensusTransactionKind, VerifiedSequencedConsensusTransaction, -}; use mysten_metrics::monitored_scope; use sui_protocol_config::ConsensusTransactionOrdering; use sui_types::messages_consensus::{ConsensusTransaction, ConsensusTransactionKind}; +use crate::consensus_handler::{ + SequencedConsensusTransactionKind, VerifiedSequencedConsensusTransaction, +}; + pub struct PostConsensusTxReorder {} impl PostConsensusTxReorder { @@ -15,9 +16,10 @@ impl PostConsensusTxReorder { transactions: &mut [VerifiedSequencedConsensusTransaction], kind: ConsensusTransactionOrdering, ) { - // TODO: make the reordering algorithm richer and depend on object hotness as well. - // Order transactions based on their gas prices. System transactions without gas price - // are put to the beginning of the sequenced_transactions vector. + // TODO: make the reordering algorithm richer and depend on object hotness as + // well. Order transactions based on their gas prices. System + // transactions without gas price are put to the beginning of the + // sequenced_transactions vector. match kind { ConsensusTransactionOrdering::ByGasPrice => Self::order_by_gas_price(transactions), ConsensusTransactionOrdering::None => (), @@ -27,7 +29,8 @@ impl PostConsensusTxReorder { fn order_by_gas_price(transactions: &mut [VerifiedSequencedConsensusTransaction]) { let _scope = monitored_scope("HandleConsensusOutput::order_by_gas_price"); transactions.sort_by_key(|txn| { - // Reverse order, so that transactions with higher gas price are put to the beginning. + // Reverse order, so that transactions with higher gas price are put to the + // beginning. std::cmp::Reverse({ match &txn.0.transaction { SequencedConsensusTransactionKind::External(ConsensusTransaction { diff --git a/crates/sui-core/src/quorum_driver/metrics.rs b/crates/sui-core/src/quorum_driver/metrics.rs index 0f08b7b1106..dc3a9cd46c2 100644 --- a/crates/sui-core/src/quorum_driver/metrics.rs +++ b/crates/sui-core/src/quorum_driver/metrics.rs @@ -2,14 +2,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use mysten_metrics::histogram::Histogram; use prometheus::{ register_histogram_vec_with_registry, register_int_counter_vec_with_registry, register_int_counter_with_registry, register_int_gauge_with_registry, HistogramVec, IntCounter, IntCounterVec, IntGauge, Registry, }; -use mysten_metrics::histogram::Histogram; - const FINALITY_LATENCY_SEC_BUCKETS: &[f64] = &[ 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, diff --git a/crates/sui-core/src/quorum_driver/mod.rs b/crates/sui-core/src/quorum_driver/mod.rs index e3685381919..e07ebd4eac0 100644 --- a/crates/sui-core/src/quorum_driver/mod.rs +++ b/crates/sui-core/src/quorum_driver/mod.rs @@ -6,40 +6,47 @@ pub use metrics::*; pub mod reconfig_observer; -use arc_swap::ArcSwap; -use std::collections::{BTreeMap, BTreeSet}; -use std::fmt::{Debug, Formatter}; -use std::sync::Arc; -use std::time::Duration; -use sui_types::base_types::{AuthorityName, ObjectRef, TransactionDigest}; -use sui_types::committee::{Committee, EpochId, StakeUnit}; -use sui_types::quorum_driver_types::{ - QuorumDriverEffectsQueueResult, QuorumDriverError, QuorumDriverResponse, QuorumDriverResult, +use std::{ + collections::{BTreeMap, BTreeSet}, + fmt::{Debug, Formatter, Write}, + sync::Arc, + time::Duration, }; -use tap::TapFallible; -use tokio::sync::Semaphore; -use tokio::time::{sleep_until, Instant}; - -use tokio::sync::mpsc::{self, Receiver, Sender}; -use tokio::task::JoinHandle; -use tracing::Instrument; -use tracing::{debug, error, info, warn}; -use crate::authority_aggregator::{ - AggregatorProcessCertificateError, AggregatorProcessTransactionError, AuthorityAggregator, - ProcessTransactionResult, -}; -use crate::authority_client::AuthorityAPI; +use arc_swap::ArcSwap; use mysten_common::sync::notify_read::{NotifyRead, Registration}; use mysten_metrics::{ spawn_monitored_task, GaugeGuard, TX_TYPE_SHARED_OBJ_TX, TX_TYPE_SINGLE_WRITER_TX, }; -use std::fmt::Write; -use sui_types::error::{SuiError, SuiResult}; -use sui_types::messages_safe_client::PlainTransactionInfoResponse; -use sui_types::transaction::{CertifiedTransaction, Transaction}; +use sui_types::{ + base_types::{AuthorityName, ObjectRef, TransactionDigest}, + committee::{Committee, EpochId, StakeUnit}, + error::{SuiError, SuiResult}, + messages_safe_client::PlainTransactionInfoResponse, + quorum_driver_types::{ + QuorumDriverEffectsQueueResult, QuorumDriverError, QuorumDriverResponse, QuorumDriverResult, + }, + transaction::{CertifiedTransaction, Transaction}, +}; +use tap::TapFallible; +use tokio::{ + sync::{ + mpsc::{self, Receiver, Sender}, + Semaphore, + }, + task::JoinHandle, + time::{sleep_until, Instant}, +}; +use tracing::{debug, error, info, warn, Instrument}; use self::reconfig_observer::ReconfigObserver; +use crate::{ + authority_aggregator::{ + AggregatorProcessCertificateError, AggregatorProcessTransactionError, AuthorityAggregator, + ProcessTransactionResult, + }, + authority_client::AuthorityAPI, +}; #[cfg(test)] mod tests; @@ -151,7 +158,8 @@ impl QuorumDriver { .await } - /// Performs exponential backoff and enqueue the `transaction` to the execution queue. + /// Performs exponential backoff and enqueue the `transaction` to the + /// execution queue. async fn backoff_and_enqueue( &self, transaction: Transaction, @@ -205,8 +213,9 @@ impl QuorumDriver { if total_attempts > 1 { self.metrics.current_transactions_in_retry.dec(); } - // On fullnode we expect the send to always succeed because TransactionOrchestrator should be subscribing - // to this queue all the time. However the if QuorumDriver is used elsewhere log may be noisy. + // On fullnode we expect the send to always succeed because + // TransactionOrchestrator should be subscribing to this queue all the + // time. However the if QuorumDriver is used elsewhere log may be noisy. if let Err(err) = self.effects_subscribe_sender.send(effects_queue_result) { warn!(?tx_digest, "No subscriber found for effects: {}", err); } @@ -238,8 +247,9 @@ where Ok(ticket) } - // Used when the it is called in a component holding the notifier, and a ticket is - // already obtained prior to calling this function, for instance, TransactionOrchestrator + // Used when the it is called in a component holding the notifier, and a ticket + // is already obtained prior to calling this function, for instance, + // TransactionOrchestrator pub async fn submit_transaction_no_ticket(&self, transaction: Transaction) -> SuiResult<()> { let tx_digest = transaction.digest(); debug!( @@ -302,8 +312,9 @@ where ) .await } else { - // If no retryable conflicting transaction was returned that means we have >= 2f+1 good stake for - // the original transaction + retryable stake. Will continue to retry the original transaction. + // If no retryable conflicting transaction was returned that means we have >= + // 2f+1 good stake for the original transaction + retryable + // stake. Will continue to retry the original transaction. debug!( ?errors, "Observed Tx {tx_digest:} is still in retryable state. Conflicting Txes: {conflicting_tx_digests:?}", @@ -387,7 +398,8 @@ where (Vec<(AuthorityName, ObjectRef)>, StakeUnit), >, ) -> Result> { - // Safe to unwrap because tx_digest_to_retry is generated from conflicting_tx_digests + // Safe to unwrap because tx_digest_to_retry is generated from + // conflicting_tx_digests // in ProcessTransactionState::conflicting_tx_digest_with_most_stake() let (validators, _) = conflicting_tx_digests.get(&conflicting_tx_digest).unwrap(); let attempt_result = self @@ -484,8 +496,8 @@ where self.validators.store(new_validators); } - /// Returns Some(true) if the conflicting transaction is executed successfully - /// (or already executed), or Some(false) if it did not. + /// Returns Some(true) if the conflicting transaction is executed + /// successfully (or already executed), or Some(false) if it did not. async fn attempt_conflicting_transaction( &self, tx_digest: &TransactionDigest, @@ -502,15 +514,16 @@ where ) .await?; - // If we are able to get a certificate right away, we use it and execute the cert; - // otherwise, we have to re-form a cert and execute it. + // If we are able to get a certificate right away, we use it and execute the + // cert; otherwise, we have to re-form a cert and execute it. let transaction = match response { PlainTransactionInfoResponse::ExecutedWithCert(cert, _, _) => { self.metrics .total_times_conflicting_transaction_already_finalized_when_retrying .inc(); - // We still want to ask validators to execute this certificate in case this certificate is not - // known to the rest of them (e.g. when *this* validator is bad). + // We still want to ask validators to execute this certificate in case this + // certificate is not known to the rest of them (e.g. when + // *this* validator is bad). let result = self .validators .load() @@ -624,8 +637,9 @@ where } } - // Used when the it is called in a component holding the notifier, and a ticket is - // already obtained prior to calling this function, for instance, TransactionOrchestrator + // Used when the it is called in a component holding the notifier, and a ticket + // is already obtained prior to calling this function, for instance, + // TransactionOrchestrator pub async fn submit_transaction_no_ticket(&self, transaction: Transaction) -> SuiResult<()> { self.quorum_driver .submit_transaction_no_ticket(transaction) @@ -639,10 +653,11 @@ where self.quorum_driver.submit_transaction(transaction).await } - /// Create a new `QuorumDriverHandler` based on the same AuthorityAggregator. - /// Note: the new `QuorumDriverHandler` will have a new `ArcSwap` - /// that is NOT tied to the original one. So if there are multiple QuorumDriver(Handler) - /// then all of them need to do reconfigs on their own. + /// Create a new `QuorumDriverHandler` based on the same + /// AuthorityAggregator. Note: the new `QuorumDriverHandler` will have a + /// new `ArcSwap` that is NOT tied to the original + /// one. So if there are multiple QuorumDriver(Handler) then all of them + /// need to do reconfigs on their own. pub fn clone_new(&self) -> Self { let (task_sender, task_rx) = mpsc::channel::(TASK_QUEUE_SIZE); let (effects_subscribe_sender, subscriber_rx) = @@ -704,8 +719,8 @@ where } /// Process a QuorumDriverTask. - /// The function has no return value - the corresponding actions of task result - /// are performed in this call. + /// The function has no return value - the corresponding actions of task + /// result are performed in this call. async fn process_task(quorum_driver: Arc>, task: QuorumDriverTask) { debug!(?task, "Quorum Driver processing task"); let QuorumDriverTask { @@ -815,10 +830,12 @@ where )); } Some(QuorumDriverError::SystemOverloadRetryAfter { .. }) => { - // Special case for SystemOverloadRetryAfter error. In this case, due to that objects are already - // locked inside validators, we need to perform continuous retry and ignore `max_retry_times`. - // TODO: the txn can potentially be retried unlimited times, therefore, we need to bound the number - // of on going transactions in a quorum driver. When the limit is reached, the quorum driver should + // Special case for SystemOverloadRetryAfter error. In this case, due to that + // objects are already locked inside validators, we need to + // perform continuous retry and ignore `max_retry_times`. + // TODO: the txn can potentially be retried unlimited times, therefore, we need + // to bound the number of on going transactions in a quorum + // driver. When the limit is reached, the quorum driver should // reject any new transaction requests. debug!(?tx_digest, "Failed to {action} - Retrying"); spawn_monitored_task!(quorum_driver.backoff_and_enqueue( @@ -829,7 +846,8 @@ where } Some(qd_error) => { debug!(?tx_digest, "Failed to {action}: {}", qd_error); - // non-retryable failure, this task reaches terminal state for now, notify waiter. + // non-retryable failure, this task reaches terminal state for now, notify + // waiter. quorum_driver.notify(&transaction, &Err(qd_error), old_retry_times + 1); } } diff --git a/crates/sui-core/src/quorum_driver/reconfig_observer.rs b/crates/sui-core/src/quorum_driver/reconfig_observer.rs index 41eeee9810d..a68fd029a1c 100644 --- a/crates/sui-core/src/quorum_driver/reconfig_observer.rs +++ b/crates/sui-core/src/quorum_driver/reconfig_observer.rs @@ -1,12 +1,14 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use async_trait::async_trait; use std::sync::Arc; + +use async_trait::async_trait; use sui_types::sui_system_state::{SuiSystemState, SuiSystemStateTrait}; use tokio::sync::broadcast::error::RecvError; use tracing::{info, warn}; +use super::QuorumDriver; use crate::{ authority_aggregator::{AuthAggMetrics, AuthorityAggregator}, authority_client::{AuthorityAPI, NetworkAuthorityClient}, @@ -15,8 +17,6 @@ use crate::{ safe_client::SafeClientMetricsBase, }; -use super::QuorumDriver; - #[async_trait] pub trait ReconfigObserver { async fn run(&mut self, quorum_driver: Arc>); diff --git a/crates/sui-core/src/quorum_driver/tests.rs b/crates/sui-core/src/quorum_driver/tests.rs index 169540c1427..c59edb9d205 100644 --- a/crates/sui-core/src/quorum_driver/tests.rs +++ b/crates/sui-core/src/quorum_driver/tests.rs @@ -1,24 +1,28 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::quorum_driver::reconfig_observer::DummyReconfigObserver; -use crate::quorum_driver::{AuthorityAggregator, QuorumDriverHandlerBuilder}; -use crate::test_authority_clients::LocalAuthorityClient; -use crate::test_authority_clients::LocalAuthorityClientFaultConfig; -use crate::test_utils::make_transfer_sui_transaction; -use crate::{quorum_driver::QuorumDriverMetrics, test_utils::init_local_authorities}; +use std::{sync::Arc, time::Duration}; + use mysten_common::sync::notify_read::{NotifyRead, Registration}; -use std::sync::Arc; -use std::time::Duration; -use sui_types::base_types::SuiAddress; -use sui_types::base_types::TransactionDigest; -use sui_types::crypto::{deterministic_random_account_key, get_key_pair, AccountKeyPair}; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::object::{generate_test_gas_objects, Object}; -use sui_types::quorum_driver_types::{QuorumDriverError, QuorumDriverResponse, QuorumDriverResult}; -use sui_types::transaction::Transaction; +use sui_types::{ + base_types::{SuiAddress, TransactionDigest}, + crypto::{deterministic_random_account_key, get_key_pair, AccountKeyPair}, + effects::TransactionEffectsAPI, + object::{generate_test_gas_objects, Object}, + quorum_driver_types::{QuorumDriverError, QuorumDriverResponse, QuorumDriverResult}, + transaction::Transaction, +}; use tokio::time::timeout; +use crate::{ + quorum_driver::{ + reconfig_observer::DummyReconfigObserver, AuthorityAggregator, QuorumDriverHandlerBuilder, + QuorumDriverMetrics, + }, + test_authority_clients::{LocalAuthorityClient, LocalAuthorityClientFaultConfig}, + test_utils::{init_local_authorities, make_transfer_sui_transaction}, +}; + async fn setup() -> (AuthorityAggregator, Transaction) { let (sender, keypair): (_, AccountKeyPair) = get_key_pair(); let gas_object = Object::with_owner_for_testing(sender); @@ -185,16 +189,22 @@ async fn test_quorum_driver_update_validators_and_max_retry_times() { // This now will fail due to server/client epoch mismatch: // server's epoch is 0 but client's is 10 - // This error should not happen in practice for benign validators and a working client + // This error should not happen in practice for benign validators and a working + // client let ticket = quorum_driver.submit_transaction(tx).await.unwrap(); // We have a timeout here to make the test fail fast if fails match tokio::time::timeout(Duration::from_secs(20), ticket).await { - Ok(Err(QuorumDriverError::FailedWithTransientErrorAfterMaximumAttempts { total_attempts })) => assert_eq!(total_attempts, 4), - _ => panic!("The transaction should err on SafeClient epoch check mismatch, be retried 3 times and raise QuorumDriverError::FailedWithTransientErrorAfterMaximumAttempts error"), + Ok(Err(QuorumDriverError::FailedWithTransientErrorAfterMaximumAttempts { + total_attempts, + })) => assert_eq!(total_attempts, 4), + _ => panic!( + "The transaction should err on SafeClient epoch check mismatch, be retried 3 times and raise QuorumDriverError::FailedWithTransientErrorAfterMaximumAttempts error" + ), }; }); - // Update authority aggregator with a new epoch number, and let quorum driver know. + // Update authority aggregator with a new epoch number, and let quorum driver + // know. let mut committee = aggregator.clone_inner_committee_test_only(); committee.epoch = 10; aggregator.committee = Arc::new(committee); @@ -263,9 +273,9 @@ async fn test_quorum_driver_object_locked() -> Result<(), anyhow::Error> { let res = quorum_driver.submit_transaction(tx2).await.unwrap().await; // Aggregator waits for all responses when it sees a conflicting tx and because - // there are not enough retryable errors to push the original tx or the most staked - // conflicting tx >= 2f+1 stake. Neither transaction can be retried due to client - // double spend and this is a fatal error. + // there are not enough retryable errors to push the original tx or the most + // staked conflicting tx >= 2f+1 stake. Neither transaction can be retried + // due to client double spend and this is a fatal error. if let Err(QuorumDriverError::ObjectsDoubleUsed { conflicting_txes, retried_tx, @@ -331,7 +341,9 @@ async fn test_quorum_driver_object_locked() -> Result<(), anyhow::Error> { let QuorumDriverResponse { effects_cert, .. } = res; assert_eq!(*effects_cert.transaction_digest(), tx2_digest); - println!("Case 3 - object is locked by 2 txes with weight 2 and 1 respectivefully. Then try to execute the third txn"); + println!( + "Case 3 - object is locked by 2 txes with weight 2 and 1 respectivefully. Then try to execute the third txn" + ); let gas = gas_objects.pop().unwrap(); let tx = make_tx(&gas, sender, &keypair, rgp); let tx2 = make_tx(&gas, sender, &keypair, rgp); @@ -363,7 +375,9 @@ async fn test_quorum_driver_object_locked() -> Result<(), anyhow::Error> { ) } - println!("Case 4 - object is locked by 2 txes with weight 2 and 1, try to execute the lighter stake tx"); + println!( + "Case 4 - object is locked by 2 txes with weight 2 and 1, try to execute the lighter stake tx" + ); let gas = gas_objects.pop().unwrap(); let tx = make_tx(&gas, sender, &keypair, rgp); let tx2 = make_tx(&gas, sender, &keypair, rgp); @@ -389,7 +403,9 @@ async fn test_quorum_driver_object_locked() -> Result<(), anyhow::Error> { ) } - println!("Case 5 - object is locked by 2 txes with weight 2 and 1, try to execute the heavier stake tx"); + println!( + "Case 5 - object is locked by 2 txes with weight 2 and 1, try to execute the heavier stake tx" + ); let gas = gas_objects.pop().unwrap(); let tx = make_tx(&gas, sender, &keypair, rgp); let tx_digest = *tx.digest(); @@ -434,10 +450,12 @@ async fn test_quorum_driver_object_locked() -> Result<(), anyhow::Error> { assert_eq!(retried_tx, None); assert_eq!(retried_tx_success, None); assert!(conflicting_txes.len() == 3 || conflicting_txes.len() == 2); - assert!(conflicting_txes - .iter() - .all(|(digest, (_objs, stake))| (*stake == 2500) - && (digest == tx.digest() || digest == tx2.digest() || digest == tx3.digest()))); + assert!( + conflicting_txes + .iter() + .all(|(digest, (_objs, stake))| (*stake == 2500) + && (digest == tx.digest() || digest == tx2.digest() || digest == tx3.digest())) + ); } else { panic!( "expect Err(QuorumDriverError::ObjectsDoubleUsed) but got {:?}", @@ -448,7 +466,8 @@ async fn test_quorum_driver_object_locked() -> Result<(), anyhow::Error> { Ok(()) } -// Tests that quorum driver can continuously retry txn with SystemOverloadedRetryAfter error. +// Tests that quorum driver can continuously retry txn with +// SystemOverloadedRetryAfter error. #[tokio::test(flavor = "current_thread", start_paused = true)] async fn test_quorum_driver_handling_overload_and_retry() { telemetry_subscribers::init_for_testing(); @@ -459,7 +478,8 @@ async fn test_quorum_driver_handling_overload_and_retry() { let (mut aggregator, authorities, genesis, _) = init_local_authorities(4, vec![gas_object.clone()]).await; - // Make local authority client to always return SystemOverloadedRetryAfter error. + // Make local authority client to always return SystemOverloadedRetryAfter + // error. let fault_config = LocalAuthorityClientFaultConfig { overload_retry_after_handle_transaction: true, ..Default::default() diff --git a/crates/sui-core/src/runtime.rs b/crates/sui-core/src/runtime.rs index 5393e6e28eb..56876044b25 100644 --- a/crates/sui-core/src/runtime.rs +++ b/crates/sui-core/src/runtime.rs @@ -1,8 +1,8 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::env; -use std::str::FromStr; +use std::{env, str::FromStr}; + use sui_config::NodeConfig; use tap::TapFallible; use tokio::runtime::Runtime; diff --git a/crates/sui-core/src/safe_client.rs b/crates/sui-core/src/safe_client.rs index 94e7ff9ec22..2992307e8d5 100644 --- a/crates/sui-core/src/safe_client.rs +++ b/crates/sui-core/src/safe_client.rs @@ -2,31 +2,35 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority_client::AuthorityAPI; -use crate::epoch::committee_store::CommitteeStore; -use mysten_metrics::histogram::{Histogram, HistogramVec}; -use prometheus::core::GenericCounter; -use prometheus::{register_int_counter_vec_with_registry, IntCounterVec, Registry}; use std::sync::Arc; -use sui_types::crypto::AuthorityPublicKeyBytes; -use sui_types::effects::{SignedTransactionEffects, TransactionEffectsAPI}; -use sui_types::messages_checkpoint::{ - CertifiedCheckpointSummary, CheckpointRequest, CheckpointResponse, CheckpointSequenceNumber, -}; -use sui_types::messages_grpc::{ - HandleCertificateResponseV2, ObjectInfoRequest, ObjectInfoResponse, SystemStateRequest, - TransactionInfoRequest, TransactionStatus, VerifiedObjectInfoResponse, + +use mysten_metrics::histogram::{Histogram, HistogramVec}; +use prometheus::{ + core::GenericCounter, register_int_counter_vec_with_registry, IntCounterVec, Registry, }; -use sui_types::messages_safe_client::PlainTransactionInfoResponse; -use sui_types::sui_system_state::SuiSystemState; -use sui_types::{base_types::*, committee::*, fp_ensure}; use sui_types::{ + base_types::*, + committee::*, + crypto::AuthorityPublicKeyBytes, + effects::{SignedTransactionEffects, TransactionEffectsAPI}, error::{SuiError, SuiResult}, + fp_ensure, + messages_checkpoint::{ + CertifiedCheckpointSummary, CheckpointRequest, CheckpointResponse, CheckpointSequenceNumber, + }, + messages_grpc::{ + HandleCertificateResponseV2, ObjectInfoRequest, ObjectInfoResponse, SystemStateRequest, + TransactionInfoRequest, TransactionStatus, VerifiedObjectInfoResponse, + }, + messages_safe_client::PlainTransactionInfoResponse, + sui_system_state::SuiSystemState, transaction::*, }; use tap::TapFallible; use tracing::{debug, error}; +use crate::{authority_client::AuthorityAPI, epoch::committee_store::CommitteeStore}; + macro_rules! check_error { ($address:expr, $cond:expr, $msg:expr) => { $cond.tap_err(|err| { diff --git a/crates/sui-core/src/scoring_decision.rs b/crates/sui-core/src/scoring_decision.rs index 5d1a5a667e9..0ad798880ff 100644 --- a/crates/sui-core/src/scoring_decision.rs +++ b/crates/sui-core/src/scoring_decision.rs @@ -12,12 +12,14 @@ use crate::{ consensus_types::{committee_api::CommitteeAPI, AuthorityIndex}, }; -/// Updates list of authorities that are deemed to have low reputation scores by consensus -/// these may be lagging behind the network, byzantine, or not reliably participating for any reason. -/// The algorithm is flagging as low scoring authorities all the validators that have the lowest scores -/// up to the defined protocol_config.consensus_bad_nodes_stake_threshold. This is done to align the -/// submission side with the Narwhal leader election schedule. Practically we don't want to submit -/// transactions for sequencing to validators that have low scores and are not part of the leader +/// Updates list of authorities that are deemed to have low reputation scores by +/// consensus these may be lagging behind the network, byzantine, or not +/// reliably participating for any reason. The algorithm is flagging as low +/// scoring authorities all the validators that have the lowest scores up to the +/// defined protocol_config.consensus_bad_nodes_stake_threshold. This is done to +/// align the submission side with the Narwhal leader election schedule. +/// Practically we don't want to submit transactions for sequencing to +/// validators that have low scores and are not part of the leader /// schedule since the chances of getting them sequenced are lower. pub(crate) fn update_low_scoring_authorities( low_scoring_authorities: Arc>>, @@ -26,14 +28,19 @@ pub(crate) fn update_low_scoring_authorities( metrics: &Arc, consensus_bad_nodes_stake_threshold: u64, ) { - assert!((0..=33).contains(&consensus_bad_nodes_stake_threshold), "The bad_nodes_stake_threshold should be in range [0 - 33], out of bounds parameter detected {}", consensus_bad_nodes_stake_threshold); + assert!( + (0..=33).contains(&consensus_bad_nodes_stake_threshold), + "The bad_nodes_stake_threshold should be in range [0 - 33], out of bounds parameter detected {}", + consensus_bad_nodes_stake_threshold + ); let Some(reputation_scores) = reputation_score_sorted_desc else { return; }; - // We order the authorities by score ascending order in the exact same way as the reputation - // scores do - so we keep complete alignment between implementations + // We order the authorities by score ascending order in the exact same way as + // the reputation scores do - so we keep complete alignment between + // implementations let scores_per_authority_order_asc: Vec<_> = reputation_scores .into_iter() .rev() // we reverse so we get them in asc order @@ -121,7 +128,8 @@ mod tests { let low_scoring = Arc::new(ArcSwap::from_pointee(HashMap::new())); let metrics = Arc::new(AuthorityMetrics::new(&Registry::new())); - // there is a low outlier in the non zero scores, exclude it as well as down nodes + // there is a low outlier in the non zero scores, exclude it as well as down + // nodes let mut scores = HashMap::new(); scores.insert(a1.id(), 350_u64); scores.insert(a2.id(), 390_u64); @@ -157,7 +165,7 @@ mod tests { assert_eq!(low_scoring.load().len(), 2); println!("low scoring {:?}", low_scoring.load()); assert_eq!( - *low_scoring.load().get(&a3.protocol_key().into()).unwrap(), // Since a3 & a4 have equal scores, we resolve the decision with a3.id < a4.id + *low_scoring.load().get(&a3.protocol_key().into()).unwrap(), /* Since a3 & a4 have equal scores, we resolve the decision with a3.id < a4.id */ 50 ); assert_eq!( @@ -189,8 +197,9 @@ mod tests { ); } - /// Generate a random committee for the given size. It's important to create the Authorities - /// via the committee to ensure than an AuthorityIdentifier will be assigned, as this is dynamically + /// Generate a random committee for the given size. It's important to create + /// the Authorities via the committee to ensure than an + /// AuthorityIdentifier will be assigned, as this is dynamically /// calculated during committee creation. fn generate_committee(committee_size: usize) -> Arc { let mut committee_builder = CommitteeBuilder::new(0); diff --git a/crates/sui-core/src/signature_verifier.rs b/crates/sui-core/src/signature_verifier.rs index e256f15a3ae..48cd6cb6c48 100644 --- a/crates/sui-core/src/signature_verifier.rs +++ b/crates/sui-core/src/signature_verifier.rs @@ -1,10 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{hash::Hash, sync::Arc}; + use either::Either; -use fastcrypto_zkp::bn254::zk_login::JwkId; -use fastcrypto_zkp::bn254::zk_login::{OIDCProvider, JWK}; -use fastcrypto_zkp::bn254::zk_login_api::ZkLoginEnv; +use fastcrypto_zkp::bn254::{ + zk_login::{JwkId, OIDCProvider, JWK}, + zk_login_api::ZkLoginEnv, +}; use futures::pin_mut; use im::hashmap::HashMap as ImHashMap; use itertools::izip; @@ -13,35 +16,32 @@ use mysten_metrics::monitored_scope; use parking_lot::{Mutex, MutexGuard, RwLock}; use prometheus::{register_int_counter_with_registry, IntCounter, Registry}; use shared_crypto::intent::Intent; -use std::hash::Hash; -use std::sync::Arc; -use sui_types::digests::SenderSignedDataDigest; -use sui_types::digests::ZKLoginInputsDigest; -use sui_types::transaction::SenderSignedData; use sui_types::{ committee::Committee, crypto::{AuthoritySignInfoTrait, VerificationObligation}, - digests::CertificateDigest, + digests::{CertificateDigest, SenderSignedDataDigest, ZKLoginInputsDigest}, error::{SuiError, SuiResult}, message_envelope::{AuthenticatedMessage, Message}, messages_checkpoint::SignedCheckpointSummary, signature::VerifyParams, - transaction::{CertifiedTransaction, VerifiedCertificate}, + transaction::{CertifiedTransaction, SenderSignedData, VerifiedCertificate}, }; use tap::TapFallible; -use tokio::runtime::Handle; use tokio::{ + runtime::Handle, sync::oneshot, time::{timeout, Duration}, }; use tracing::debug; -// Maximum amount of time we wait for a batch to fill up before verifying a partial batch. +// Maximum amount of time we wait for a batch to fill up before verifying a +// partial batch. const BATCH_TIMEOUT_MS: Duration = Duration::from_millis(10); -// Maximum size of batch to verify. Increasing this value will slightly improve CPU utilization -// (batching starts to hit steeply diminishing marginal returns around batch sizes of 16), at the -// cost of slightly increasing latency (BATCH_TIMEOUT_MS will be hit more frequently if system is -// not heavily loaded). +// Maximum size of batch to verify. Increasing this value will slightly improve +// CPU utilization (batching starts to hit steeply diminishing marginal returns +// around batch sizes of 16), at the cost of slightly increasing latency +// (BATCH_TIMEOUT_MS will be hit more frequently if system is not heavily +// loaded). const MAX_BATCH_SIZE: usize = 8; type Sender = oneshot::Sender>; @@ -61,7 +61,8 @@ impl CertBuffer { } } - // Function consumes MutexGuard, therefore releasing the lock after mem swap is done + // Function consumes MutexGuard, therefore releasing the lock after mem swap is + // done fn take_and_replace(mut guard: MutexGuard<'_, Self>) -> Self { let this = &mut *guard; let mut new = CertBuffer::new(this.capacity()); @@ -86,7 +87,8 @@ impl CertBuffer { } } -/// Verifies signatures in ways that faster than verifying each signature individually. +/// Verifies signatures in ways that faster than verifying each signature +/// individually. /// - BLS signatures - caching and batch verification. /// - User signed data - caching. pub struct SignatureVerifier { @@ -96,13 +98,15 @@ pub struct SignatureVerifier { zklogin_inputs_cache: VerifiedDigestCache, /// Map from JwkId (iss, kid) to the fetched JWK for that key. - /// We use an immutable data structure because verification of ZKLogins may be slow, so we - /// don't want to pass a reference to the map to the verify method, since that would lead to a - /// lengthy critical section. Instead, we use an immutable data structure which can be cloned - /// very cheaply. + /// We use an immutable data structure because verification of ZKLogins may + /// be slow, so we don't want to pass a reference to the map to the + /// verify method, since that would lead to a lengthy critical section. + /// Instead, we use an immutable data structure which can be cloned very + /// cheaply. jwks: RwLock>, - /// Params that contains a list of supported providers for ZKLogin and the environment (prod/test) the code runs in. + /// Params that contains a list of supported providers for ZKLogin and the + /// environment (prod/test) the code runs in. zk_login_params: ZkLoginParams, queue: Mutex, @@ -114,7 +118,8 @@ pub struct SignatureVerifier { struct ZkLoginParams { /// A list of supported OAuth providers for ZkLogin. pub supported_providers: Vec, - /// The environment (prod/test) the code runs in. It decides which verifying key to use in fastcrypto. + /// The environment (prod/test) the code runs in. It decides which verifying + /// key to use in fastcrypto. pub env: ZkLoginEnv, pub verify_legacy_zklogin_address: bool, pub accept_zklogin_in_multisig: bool, @@ -189,8 +194,9 @@ impl SignatureVerifier { .filter(|cert| !self.certificate_cache.is_cached(&cert.certificate_digest())) .collect(); - // Verify only the user sigs of certificates that were not cached already, since whenever we - // insert a certificate into the cache, it is already verified. + // Verify only the user sigs of certificates that were not cached already, since + // whenever we insert a certificate into the cache, it is already + // verified. for cert in &certs { self.verify_tx(cert.data())?; } @@ -217,8 +223,8 @@ impl SignatureVerifier { &self, cert: CertifiedTransaction, ) -> SuiResult { - // this is the only innocent error we are likely to encounter - filter it before we poison - // a whole batch. + // this is the only innocent error we are likely to encounter - filter it before + // we poison a whole batch. if cert.auth_sig().epoch != self.committee.epoch() { return Err(SuiError::WrongEpoch { expected_epoch: self.committee.epoch(), @@ -233,9 +239,10 @@ impl SignatureVerifier { &self, cert: CertifiedTransaction, ) -> SuiResult { - // Cancellation safety: we use parking_lot locks, which cannot be held across awaits. - // Therefore once the queue has been taken by a thread, it is guaranteed to process the - // queue and send all results before the future can be cancelled by the caller. + // Cancellation safety: we use parking_lot locks, which cannot be held across + // awaits. Therefore once the queue has been taken by a thread, it is + // guaranteed to process the queue and send all results before the + // future can be cancelled by the caller. let (tx, rx) = oneshot::channel(); pin_mut!(rx); @@ -282,8 +289,8 @@ impl SignatureVerifier { return rx.try_recv().unwrap(); } - // unwrap ok - another thread took the queue while we were re-acquiring the lock and is - // guaranteed to process the queue immediately. + // unwrap ok - another thread took the queue while we were re-acquiring the lock + // and is guaranteed to process the queue immediately. rx.await.unwrap() } @@ -324,8 +331,8 @@ impl SignatureVerifier { }); } - /// Insert a JWK into the verifier state. Pre-existing entries for a given JwkId will not be - /// overwritten. + /// Insert a JWK into the verifier state. Pre-existing entries for a given + /// JwkId will not be overwritten. pub(crate) fn insert_jwk(&self, jwk_id: &JwkId, jwk: &JWK) { let mut jwks = self.jwks.write(); match jwks.entry(jwk_id.clone()) { @@ -497,7 +504,8 @@ pub fn batch_verify_all_certificates_and_checkpoints( batch_verify(committee, certs, checkpoints) } -/// Verifies certificates in batch mode, but returns a separate result for each cert. +/// Verifies certificates in batch mode, but returns a separate result for each +/// cert. pub fn batch_verify_certificates( committee: &Committee, certs: &[CertifiedTransaction], @@ -547,9 +555,11 @@ fn batch_verify( obligation.verify_all() } -// Cache up to 20000 verified certs. We will need to tune this number in the future - a decent -// guess to start with is that it should be 10-20 times larger than peak transactions per second, -// on the assumption that we should see most certs twice within about 10-20 seconds at most: Once via RPC, once via consensus. +// Cache up to 20000 verified certs. We will need to tune this number in the +// future - a decent guess to start with is that it should be 10-20 times larger +// than peak transactions per second, on the assumption that we should see most +// certs twice within about 10-20 seconds at most: Once via RPC, once via +// consensus. const VERIFIED_CERTIFICATE_CACHE_SIZE: usize = 20000; pub struct VerifiedDigestCache { diff --git a/crates/sui-core/src/stake_aggregator.rs b/crates/sui-core/src/stake_aggregator.rs index 7b4910d23d5..c3025ca756c 100644 --- a/crates/sui-core/src/stake_aggregator.rs +++ b/crates/sui-core/src/stake_aggregator.rs @@ -1,22 +1,26 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::{hash_map::Entry, BTreeMap, HashMap}, + hash::Hash, + sync::Arc, +}; + use serde::Serialize; use shared_crypto::intent::Intent; -use std::collections::hash_map::Entry; -use std::collections::{BTreeMap, HashMap}; -use std::hash::Hash; -use std::sync::Arc; -use sui_types::base_types::AuthorityName; -use sui_types::base_types::ConciseableName; -use sui_types::committee::{Committee, CommitteeTrait, StakeUnit}; -use sui_types::crypto::{AuthorityQuorumSignInfo, AuthoritySignInfo, AuthoritySignInfoTrait}; -use sui_types::error::SuiError; -use sui_types::message_envelope::{Envelope, Message}; +use sui_types::{ + base_types::{AuthorityName, ConciseableName}, + committee::{Committee, CommitteeTrait, StakeUnit}, + crypto::{AuthorityQuorumSignInfo, AuthoritySignInfo, AuthoritySignInfoTrait}, + error::SuiError, + message_envelope::{Envelope, Message}, +}; use tracing::warn; -/// StakeAggregator allows us to keep track of the total stake of a set of validators. -/// STRENGTH indicates whether we want a strong quorum (2f+1) or a weak quorum (f+1). +/// StakeAggregator allows us to keep track of the total stake of a set of +/// validators. STRENGTH indicates whether we want a strong quorum (2f+1) or a +/// weak quorum (f+1). #[derive(Debug)] pub struct StakeAggregator { data: HashMap, @@ -24,10 +28,11 @@ pub struct StakeAggregator { committee: Arc, } -/// StakeAggregator is a utility data structure that allows us to aggregate a list of validator -/// signatures over time. A committee is used to determine whether we have reached sufficient -/// quorum (defined based on `STRENGTH`). The generic implementation does not require `S` to be -/// an actual signature, but just an indication that a specific validator has voted. A specialized +/// StakeAggregator is a utility data structure that allows us to aggregate a +/// list of validator signatures over time. A committee is used to determine +/// whether we have reached sufficient quorum (defined based on `STRENGTH`). The +/// generic implementation does not require `S` to be an actual signature, but +/// just an indication that a specific validator has voted. A specialized /// implementation for `AuthoritySignInfo` is followed below. impl StakeAggregator { pub fn new(committee: Arc) -> Self { @@ -50,10 +55,11 @@ impl StakeAggregator { } /// A generic version of inserting arbitrary type of V (e.g. void type). - /// If V is AuthoritySignInfo, the `insert` function should be used instead since it does extra - /// checks and aggregations in the end. + /// If V is AuthoritySignInfo, the `insert` function should be used instead + /// since it does extra checks and aggregations in the end. /// Returns Map authority -> S, without aggregating it. - /// If you want to get an aggregated signature instead, use `StakeAggregator::insert` + /// If you want to get an aggregated signature instead, use + /// `StakeAggregator::insert` pub fn insert_generic( &mut self, authority: AuthorityName, @@ -116,9 +122,10 @@ impl StakeAggregator { } impl StakeAggregator { - /// Insert an authority signature. This is the primary way to use the aggregator and a few - /// dedicated checks are performed to make sure things work. - /// If quorum is reached, we return AuthorityQuorumSignInfo directly. + /// Insert an authority signature. This is the primary way to use the + /// aggregator and a few dedicated checks are performed to make sure + /// things work. If quorum is reached, we return AuthorityQuorumSignInfo + /// directly. pub fn insert( &mut self, envelope: Envelope, @@ -144,18 +151,21 @@ impl StakeAggregator { Intent::sui_app(T::SCOPE), self.committee(), ) { - // In the happy path, the aggregated signature verifies ok and no need to verify - // individual. + // In the happy path, the aggregated signature verifies ok and no need + // to verify individual. Ok(_) => InsertResult::QuorumReached(aggregated), Err(_) => { - // If the aggregated signature fails to verify, fallback to iterating through - // all signatures and verify individually. Decrement total votes and continue + // If the aggregated signature fails to verify, fallback to + // iterating through all signatures + // and verify individually. Decrement total votes and continue // to find new authority for signature to reach the quorum. // - // TODO(joyqvq): It is possible for the aggregated signature to fail every time - // when the latest one single signature fails to verify repeatedly, and trigger - // this for loop to run. This can be optimized by caching single sig verification - // result only verify the net new ones. + // TODO(joyqvq): It is possible for the aggregated signature to fail + // every time when the latest one + // single signature fails to verify repeatedly, and trigger + // this for loop to run. This can be optimized by caching single sig + // verification result only verify + // the net new ones. let mut bad_votes = 0; let mut bad_authorities = vec![]; for (name, sig) in &self.data.clone() { @@ -164,9 +174,12 @@ impl StakeAggregator { Intent::sui_app(T::SCOPE), self.committee(), ) { - // TODO(joyqvq): Currently, the aggregator cannot do much with an authority that - // always returns an invalid signature other than saving to errors in state. It - // is possible to add the authority to a denylist or punish the byzantine authority. + // TODO(joyqvq): Currently, the aggregator cannot do much + // with an authority that + // always returns an invalid signature other than saving to + // errors in state. It + // is possible to add the authority to a denylist or punish + // the byzantine authority. warn!(name=?name.concise(), "Bad stake from validator: {:?}", err); self.data.remove(name); let votes = self.committee.weight(name); @@ -215,10 +228,11 @@ impl InsertResult { } } -/// MultiStakeAggregator is a utility data structure that tracks the stake accumulation of -/// potentially multiple different values (usually due to byzantine/corrupted responses). Each -/// value is tracked using a StakeAggregator and determine whether it has reached a quorum. -/// Once quorum is reached, the aggregated signature is returned. +/// MultiStakeAggregator is a utility data structure that tracks the stake +/// accumulation of potentially multiple different values (usually due to +/// byzantine/corrupted responses). Each value is tracked using a +/// StakeAggregator and determine whether it has reached a quorum. Once quorum +/// is reached, the aggregated signature is returned. #[derive(Debug)] pub struct MultiStakeAggregator { committee: Arc, @@ -261,8 +275,8 @@ where let mut new_entry = StakeAggregator::new(self.committee.clone()); let result = new_entry.insert(envelope.clone()); if !matches!(result, InsertResult::Failed { .. }) { - // This is very important: ensure that if the insert fails, we don't even add the - // new entry to the map. + // This is very important: ensure that if the insert fails, we don't even add + // the new entry to the map. self.stake_maps.insert(k, (envelope.into_data(), new_entry)); } result @@ -306,14 +320,16 @@ where .unwrap_or_default() } - /// If true, there isn't enough uncommitted stake to reach quorum for any value + /// If true, there isn't enough uncommitted stake to reach quorum for any + /// value pub fn quorum_unreachable(&self) -> bool { self.uncommitted_stake() + self.plurality_stake() < self.committee.threshold::() } } -/// Like MultiStakeAggregator, but for counting votes for a generic value instead of an envelope, in -/// scenarios where byzantine validators may submit multiple votes for different values. +/// Like MultiStakeAggregator, but for counting votes for a generic value +/// instead of an envelope, in scenarios where byzantine validators may submit +/// multiple votes for different values. pub struct GenericMultiStakeAggregator { committee: Arc, stake_maps: HashMap>, @@ -373,13 +389,15 @@ fn test_votes_per_authority() { let mut agg: GenericMultiStakeAggregator<&str, true> = GenericMultiStakeAggregator::new(Arc::new(committee)); - // 1. Inserting an `authority` and a `key`, and then checking the number of votes for that `authority`. + // 1. Inserting an `authority` and a `key`, and then checking the number of + // votes for that `authority`. let key1: &str = "key1"; let authority1 = authorities[0]; agg.insert(authority1, key1); assert_eq!(agg.votes_for_authority(authority1), 1); - // 2. Inserting the same `authority` and `key` pair multiple times to ensure votes aren't incremented incorrectly. + // 2. Inserting the same `authority` and `key` pair multiple times to ensure + // votes aren't incremented incorrectly. agg.insert(authority1, key1); agg.insert(authority1, key1); assert_eq!(agg.votes_for_authority(authority1), 1); @@ -394,7 +412,8 @@ fn test_votes_per_authority() { assert_eq!(agg.votes_for_authority(authority2), 1); assert_eq!(agg.votes_for_authority(authority1), 1); - // 5. Verifying that inserting different keys for the same authority increments the vote count. + // 5. Verifying that inserting different keys for the same authority increments + // the vote count. let key3: &str = "key3"; agg.insert(authority1, key3); assert_eq!(agg.votes_for_authority(authority1), 2); diff --git a/crates/sui-core/src/state_accumulator.rs b/crates/sui-core/src/state_accumulator.rs index 7d46b45ea39..858a6210b5b 100644 --- a/crates/sui-core/src/state_accumulator.rs +++ b/crates/sui-core/src/state_accumulator.rs @@ -1,37 +1,41 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; + +use fastcrypto::hash::MultisetHash; use itertools::Itertools; use mysten_metrics::monitored_scope; use serde::Serialize; use sui_protocol_config::ProtocolConfig; -use sui_types::base_types::{ObjectID, ObjectRef, SequenceNumber, VersionNumber}; -use sui_types::committee::EpochId; -use sui_types::digests::{ObjectDigest, TransactionDigest}; -use sui_types::in_memory_storage::InMemoryStorage; -use sui_types::storage::{ObjectKey, ObjectStore}; +use sui_types::{ + accumulator::Accumulator, + base_types::{ObjectID, ObjectRef, SequenceNumber, VersionNumber}, + committee::EpochId, + digests::{ObjectDigest, TransactionDigest}, + effects::{TransactionEffects, TransactionEffectsAPI}, + error::SuiResult, + in_memory_storage::InMemoryStorage, + messages_checkpoint::{CheckpointSequenceNumber, ECMHLiveObjectSetDigest}, + storage::{ObjectKey, ObjectStore}, +}; use tracing::debug; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; - -use fastcrypto::hash::MultisetHash; -use sui_types::accumulator::Accumulator; -use sui_types::effects::TransactionEffects; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::error::SuiResult; -use sui_types::messages_checkpoint::{CheckpointSequenceNumber, ECMHLiveObjectSetDigest}; - -use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore; -use crate::authority::authority_store_tables::LiveObject; +use crate::authority::{ + authority_per_epoch_store::AuthorityPerEpochStore, authority_store_tables::LiveObject, +}; pub struct StateAccumulator { store: Arc, } pub trait AccumulatorStore: ObjectStore + Send + Sync { - /// This function is only called in older protocol versions, and should no longer be used. - /// It creates an explicit dependency to tombstones which is not desired. + /// This function is only called in older protocol versions, and should no + /// longer be used. It creates an explicit dependency to tombstones + /// which is not desired. fn get_object_ref_prior_to_key_deprecated( &self, object_id: &ObjectID, @@ -66,7 +70,9 @@ impl AccumulatorStore for InMemoryStorage { _object_id: &ObjectID, _version: VersionNumber, ) -> SuiResult> { - unreachable!("get_object_ref_prior_to_key is only called by accumulate_effects_v1, while InMemoryStorage is used by testing and genesis only, which always uses latest protocol ") + unreachable!( + "get_object_ref_prior_to_key is only called by accumulate_effects_v1, while InMemoryStorage is used by testing and genesis only, which always uses latest protocol " + ) } fn get_root_state_accumulator_for_epoch( @@ -160,8 +166,9 @@ where .collect::>(), ); - // insert wrapped tombstones. We use a custom struct in order to contain the tombstone - // against the object id and sequence number, as the tombstone by itself is not unique. + // insert wrapped tombstones. We use a custom struct in order to contain the + // tombstone against the object id and sequence number, as the tombstone by + // itself is not unique. acc.insert_all( effects .iter() @@ -201,9 +208,9 @@ where .collect(); // Collect keys from modified_at_versions to remove from the accumulator. - // Filter all unwrapped objects (from unwrapped or unwrapped_then_deleted effects) - // as these were inserted into the accumulator as a WrappedObject. Will handle these - // separately. + // Filter all unwrapped objects (from unwrapped or unwrapped_then_deleted + // effects) as these were inserted into the accumulator as a WrappedObject. + // Will handle these separately. let modified_at_version_keys: Vec = effects .iter() .flat_map(|fx| { @@ -240,9 +247,9 @@ where // removed as WrappedObject using the last sequence number it was tombstoned // against. Since this happened in a past transaction, and the child object may // have been modified since (and hence its sequence number incremented), we - // seek the version prior to the unwrapped version from the objects table directly. - // If the tombstone is not found, then assume this is a newly created wrapped object hence - // we don't expect to find it in the table. + // seek the version prior to the unwrapped version from the objects table + // directly. If the tombstone is not found, then assume this is a newly + // created wrapped object hence we don't expect to find it in the table. let wrapped_objects_to_remove: Vec = all_unwrapped .iter() .filter_map(|(_tx_digest, id, seq_num)| { @@ -351,7 +358,8 @@ impl StateAccumulator { Self { store } } - /// Accumulates the effects of a single checkpoint and persists the accumulator. + /// Accumulates the effects of a single checkpoint and persists the + /// accumulator. pub fn accumulate_checkpoint( &self, effects: Vec, @@ -375,7 +383,8 @@ impl StateAccumulator { Ok(acc) } - /// Accumulates given effects and returns the accumulator without side effects. + /// Accumulates given effects and returns the accumulator without side + /// effects. pub fn accumulate_effects( &self, effects: Vec, @@ -384,10 +393,10 @@ impl StateAccumulator { accumulate_effects(&*self.store, effects, protocol_config) } - /// Unions all checkpoint accumulators at the end of the epoch to generate the - /// root state hash and persists it to db. This function is idempotent. Can be called on - /// non-consecutive epochs, e.g. to accumulate epoch 3 after having last - /// accumulated epoch 1. + /// Unions all checkpoint accumulators at the end of the epoch to generate + /// the root state hash and persists it to db. This function is + /// idempotent. Can be called on non-consecutive epochs, e.g. to + /// accumulate epoch 3 after having last accumulated epoch 1. pub async fn accumulate_epoch( &self, epoch: &EpochId, @@ -459,7 +468,8 @@ impl StateAccumulator { Ok(root_state_accumulator) } - /// Returns the result of accumulating the live object set, without side effects + /// Returns the result of accumulating the live object set, without side + /// effects pub fn accumulate_live_object_set(&self, include_wrapped_tombstone: bool) -> Accumulator { let mut acc = Accumulator::default(); for live_object in self.store.iter_live_object_set(include_wrapped_tombstone) { diff --git a/crates/sui-core/src/storage.rs b/crates/sui-core/src/storage.rs index 9bfdc5db274..e866091cbb8 100644 --- a/crates/sui-core/src/storage.rs +++ b/crates/sui-core/src/storage.rs @@ -1,33 +1,29 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use parking_lot::Mutex; use std::sync::Arc; -use sui_types::storage::ObjectStore; -use sui_types::base_types::TransactionDigest; -use sui_types::committee::Committee; -use sui_types::committee::EpochId; -use sui_types::digests::TransactionEventsDigest; -use sui_types::effects::{TransactionEffects, TransactionEvents}; -use sui_types::error::SuiError; -use sui_types::messages_checkpoint::CheckpointContentsDigest; -use sui_types::messages_checkpoint::CheckpointDigest; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; -use sui_types::messages_checkpoint::EndOfEpochData; -use sui_types::messages_checkpoint::FullCheckpointContents; -use sui_types::messages_checkpoint::VerifiedCheckpoint; -use sui_types::messages_checkpoint::VerifiedCheckpointContents; -use sui_types::object::Object; -use sui_types::storage::error::Error as StorageError; -use sui_types::storage::WriteStore; -use sui_types::storage::{ObjectKey, ReadStore}; -use sui_types::transaction::VerifiedTransaction; +use parking_lot::Mutex; +use sui_types::{ + base_types::TransactionDigest, + committee::{Committee, EpochId}, + digests::TransactionEventsDigest, + effects::{TransactionEffects, TransactionEvents}, + error::SuiError, + messages_checkpoint::{ + CheckpointContentsDigest, CheckpointDigest, CheckpointSequenceNumber, EndOfEpochData, + FullCheckpointContents, VerifiedCheckpoint, VerifiedCheckpointContents, + }, + object::Object, + storage::{error::Error as StorageError, ObjectKey, ObjectStore, ReadStore, WriteStore}, + transaction::VerifiedTransaction, +}; -use crate::checkpoints::CheckpointStore; -use crate::epoch::committee_store::CommitteeStore; -use crate::execution_cache::ExecutionCacheRead; -use crate::execution_cache::StateSyncAPI; +use crate::{ + checkpoints::CheckpointStore, + epoch::committee_store::CommitteeStore, + execution_cache::{ExecutionCacheRead, StateSyncAPI}, +}; #[derive(Clone)] pub struct RocksDbStore { @@ -146,9 +142,10 @@ impl ReadStore for RocksDbStore { // Otherwise gather it from the individual components. // Note we can't insert the constructed contents into `full_checkpoint_content`, - // because it needs to be inserted along with `checkpoint_sequence_by_contents_digest` - // and `checkpoint_content`. However at this point it's likely we don't know the - // corresponding sequence number yet. + // because it needs to be inserted along with + // `checkpoint_sequence_by_contents_digest` and `checkpoint_content`. + // However at this point it's likely we don't know the corresponding + // sequence number yet. self.checkpoint_store .get_checkpoint_contents(digest) .map_err(sui_types::storage::error::Error::custom)? diff --git a/crates/sui-core/src/streamer.rs b/crates/sui-core/src/streamer.rs index 084c9836da5..844619187d7 100644 --- a/crates/sui-core/src/streamer.rs +++ b/crates/sui-core/src/streamer.rs @@ -1,26 +1,25 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::subscription_handler::{SubscriptionMetrics, EVENT_DISPATCH_BUFFER_SIZE}; +use std::{collections::BTreeMap, fmt::Debug, sync::Arc}; + use futures::Stream; -use mysten_metrics::metered_channel::Sender; -use mysten_metrics::spawn_monitored_task; +use mysten_metrics::{metered_channel::Sender, spawn_monitored_task}; use parking_lot::RwLock; use prometheus::Registry; -use std::collections::BTreeMap; -use std::fmt::Debug; -use std::sync::Arc; use sui_json_rpc_types::Filter; -use sui_types::base_types::ObjectID; -use sui_types::error::SuiError; +use sui_types::{base_types::ObjectID, error::SuiError}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tracing::{debug, warn}; +use crate::subscription_handler::{SubscriptionMetrics, EVENT_DISPATCH_BUFFER_SIZE}; + type Subscribers = Arc, F)>>>; -/// The Streamer splits a mpsc channel into multiple mpsc channels using the subscriber's `Filter` object. -/// Data will be sent to the subscribers in parallel and the subscription will be dropped if it received a send error. +/// The Streamer splits a mpsc channel into multiple mpsc channels using the +/// subscriber's `Filter` object. Data will be sent to the subscribers in +/// parallel and the subscription will be dropped if it received a send error. pub struct Streamer> { streamer_queue: Sender, subscribers: Subscribers, @@ -41,8 +40,8 @@ where let gauge = if let Some(metrics) = mysten_metrics::get_metrics() { metrics.channels.with_label_values(&[&channel_label]) } else { - // We call init_metrics very early when starting a node. Therefore when this happens, - // it's probably in a test. + // We call init_metrics very early when starting a node. Therefore when this + // happens, it's probably in a test. mysten_metrics::init_metrics(&Registry::default()); mysten_metrics::get_metrics() .unwrap() @@ -107,8 +106,9 @@ where subscription_id = id, "Error when streaming data, removing subscriber. Error: {e}" ); - // It does not matter what the error is - channel full or closed, we remove the subscriber. - // In the case of a full channel, this nudges the subscriber to catch up separately and not + // It does not matter what the error is - channel full or closed, we remove + // the subscriber. In the case of a full channel, + // this nudges the subscriber to catch up separately and not // miss any data. to_remove.push(id.clone()); failure_counter.inc(); diff --git a/crates/sui-core/src/subscription_handler.rs b/crates/sui-core/src/subscription_handler.rs index 7653a36d422..d29e3cdab2a 100644 --- a/crates/sui-core/src/subscription_handler.rs +++ b/crates/sui-core/src/subscription_handler.rs @@ -7,17 +7,15 @@ use prometheus::{ register_int_counter_vec_with_registry, register_int_gauge_vec_with_registry, IntCounterVec, IntGaugeVec, Registry, }; +use sui_json_rpc_types::{ + EffectsWithInput, EventFilter, SuiEvent, SuiTransactionBlockEffects, + SuiTransactionBlockEffectsAPI, SuiTransactionBlockEvents, TransactionFilter, +}; +use sui_types::{error::SuiResult, transaction::TransactionData}; use tokio_stream::Stream; use tracing::{error, instrument, trace}; use crate::streamer::Streamer; -use sui_json_rpc_types::{ - EffectsWithInput, EventFilter, SuiTransactionBlockEffects, SuiTransactionBlockEvents, - TransactionFilter, -}; -use sui_json_rpc_types::{SuiEvent, SuiTransactionBlockEffectsAPI}; -use sui_types::error::SuiResult; -use sui_types::transaction::TransactionData; #[cfg(test)] #[path = "unit_tests/subscription_handler_tests.rs"] diff --git a/crates/sui-core/src/test_authority_clients.rs b/crates/sui-core/src/test_authority_clients.rs index b206debb455..bff472f82ef 100644 --- a/crates/sui-core/src/test_authority_clients.rs +++ b/crates/sui-core/src/test_authority_clients.rs @@ -7,26 +7,27 @@ use std::{ time::Duration, }; -use crate::authority::test_authority_builder::TestAuthorityBuilder; -use crate::{authority::AuthorityState, authority_client::AuthorityAPI}; use async_trait::async_trait; use mysten_metrics::spawn_monitored_task; use sui_config::genesis::Genesis; -use sui_types::error::SuiResult; -use sui_types::messages_grpc::{ - HandleCertificateResponseV2, HandleTransactionResponse, ObjectInfoRequest, ObjectInfoResponse, - SystemStateRequest, TransactionInfoRequest, TransactionInfoResponse, -}; -use sui_types::sui_system_state::SuiSystemState; use sui_types::{ crypto::AuthorityKeyPair, - error::SuiError, - messages_checkpoint::{CheckpointRequest, CheckpointResponse}, + effects::{TransactionEffectsAPI, TransactionEvents}, + error::{SuiError, SuiResult}, + messages_checkpoint::{ + CheckpointRequest, CheckpointRequestV2, CheckpointResponse, CheckpointResponseV2, + }, + messages_grpc::{ + HandleCertificateResponseV2, HandleTransactionResponse, ObjectInfoRequest, + ObjectInfoResponse, SystemStateRequest, TransactionInfoRequest, TransactionInfoResponse, + }, + sui_system_state::SuiSystemState, transaction::{CertifiedTransaction, Transaction, VerifiedTransaction}, }; -use sui_types::{ - effects::{TransactionEffectsAPI, TransactionEvents}, - messages_checkpoint::{CheckpointRequestV2, CheckpointResponseV2}, + +use crate::{ + authority::{test_authority_builder::TestAuthorityBuilder, AuthorityState}, + authority_client::AuthorityAPI, }; #[derive(Clone, Copy, Default)] @@ -152,9 +153,10 @@ impl LocalAuthorityClient { } } - // One difference between this implementation and actual certificate execution, is that - // this assumes shared object locks have already been acquired and tries to execute shared - // object transactions as well as owned object transactions. + // One difference between this implementation and actual certificate execution, + // is that this assumes shared object locks have already been acquired and + // tries to execute shared object transactions as well as owned object + // transactions. async fn handle_certificate( state: Arc, certificate: CertifiedTransaction, @@ -165,8 +167,8 @@ impl LocalAuthorityClient { error: "Mock error before handle_confirmation_transaction".to_owned(), }); } - // Check existing effects before verifying the cert to allow querying certs finalized - // from previous epochs. + // Check existing effects before verifying the cert to allow querying certs + // finalized from previous epochs. let tx_digest = *certificate.digest(); let epoch_store = state.epoch_store_for_testing(); let signed_effects = match state @@ -178,7 +180,7 @@ impl LocalAuthorityClient { .signature_verifier .verify_cert(certificate) .await?; - //let certificate = certificate.verify(epoch_store.committee())?; + // let certificate = certificate.verify(epoch_store.committee())?; state.enqueue_certificates_for_execution(vec![certificate.clone()], &epoch_store); let effects = state.notify_read_effects(&certificate).await?; state.sign_effects(effects, &epoch_store)? diff --git a/crates/sui-core/src/test_utils.rs b/crates/sui-core/src/test_utils.rs index a98bee92cc4..47c8d6f00be 100644 --- a/crates/sui-core/src/test_utils.rs +++ b/crates/sui-core/src/test_utils.rs @@ -1,55 +1,55 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use fastcrypto::hash::MultisetHash; -use fastcrypto::traits::KeyPair; +use std::{ + collections::{BTreeMap, HashMap}, + path::PathBuf, + sync::Arc, + time::Duration, +}; + +use fastcrypto::{hash::MultisetHash, traits::KeyPair}; use futures::future::join_all; -use move_core_types::account_address::AccountAddress; -use move_core_types::ident_str; +use move_core_types::{account_address::AccountAddress, ident_str}; use prometheus::Registry; use shared_crypto::intent::{Intent, IntentScope}; -use std::collections::{BTreeMap, HashMap}; -use std::path::PathBuf; -use std::sync::Arc; -use std::time::Duration; -use sui_config::genesis::Genesis; -use sui_config::local_ip_utils; -use sui_config::node::AuthorityOverloadConfig; +use sui_config::{genesis::Genesis, local_ip_utils, node::AuthorityOverloadConfig}; use sui_framework::BuiltInFramework; use sui_genesis_builder::validator_info::ValidatorInfo; use sui_macros::nondeterministic; use sui_move_build::{BuildConfig, CompiledPackage, SuiPackageHooks}; use sui_protocol_config::ProtocolConfig; -use sui_types::base_types::{random_object_ref, ObjectID}; -use sui_types::crypto::{ - generate_proof_of_possession, get_key_pair, AccountKeyPair, AuthorityPublicKeyBytes, - NetworkKeyPair, SuiKeyPair, -}; -use sui_types::crypto::{AuthorityKeyPair, Signer}; -use sui_types::effects::{SignedTransactionEffects, TestEffectsBuilder}; -use sui_types::error::SuiError; -use sui_types::transaction::ObjectArg; -use sui_types::transaction::{ - CallArg, SignedTransaction, Transaction, TransactionData, TEST_ONLY_GAS_UNIT_FOR_TRANSFER, -}; -use sui_types::utils::create_fake_transaction; -use sui_types::utils::to_sender_signed_transaction; use sui_types::{ - base_types::{AuthorityName, ExecutionDigests, ObjectRef, SuiAddress, TransactionDigest}, + base_types::{ + random_object_ref, AuthorityName, ExecutionDigests, ObjectID, ObjectRef, SuiAddress, + TransactionDigest, + }, committee::Committee, - crypto::{AuthoritySignInfo, AuthoritySignature}, + crypto::{ + generate_proof_of_possession, get_key_pair, AccountKeyPair, AuthorityKeyPair, + AuthorityPublicKeyBytes, AuthoritySignInfo, AuthoritySignature, NetworkKeyPair, Signer, + SuiKeyPair, + }, + effects::{SignedTransactionEffects, TestEffectsBuilder}, + error::SuiError, message_envelope::Message, object::Object, - transaction::CertifiedTransaction, + transaction::{ + CallArg, CertifiedTransaction, ObjectArg, SignedTransaction, Transaction, TransactionData, + TEST_ONLY_GAS_UNIT_FOR_TRANSFER, + }, + utils::{create_fake_transaction, to_sender_signed_transaction}, }; use tokio::time::timeout; use tracing::{info, warn}; -use crate::authority::{test_authority_builder::TestAuthorityBuilder, AuthorityState}; -use crate::authority_aggregator::{AuthorityAggregator, TimeoutConfig}; -use crate::epoch::committee_store::CommitteeStore; -use crate::state_accumulator::StateAccumulator; -use crate::test_authority_clients::LocalAuthorityClient; +use crate::{ + authority::{test_authority_builder::TestAuthorityBuilder, AuthorityState}, + authority_aggregator::{AuthorityAggregator, TimeoutConfig}, + epoch::committee_store::CommitteeStore, + state_accumulator::StateAccumulator, + test_authority_clients::LocalAuthorityClient, +}; const WAIT_FOR_TX_TIMEOUT: Duration = Duration::from_secs(15); @@ -74,11 +74,13 @@ pub async fn send_and_confirm_transaction( .verify_authenticated(&committee, &Default::default()) .unwrap(); - // Submit the confirmation. *Now* execution actually happens, and it should fail when we try to look up our dummy module. - // we unfortunately don't get a very descriptive error message, but we can at least see that something went wrong inside the VM + // Submit the confirmation. *Now* execution actually happens, and it should fail + // when we try to look up our dummy module. we unfortunately don't get a + // very descriptive error message, but we can at least see that something went + // wrong inside the VM // - // We also check the incremental effects of the transaction on the live object set against StateAccumulator - // for testing and regression detection + // We also check the incremental effects of the transaction on the live object + // set against StateAccumulator for testing and regression detection let state_acc = StateAccumulator::new(authority.get_execution_cache().clone()); let include_wrapped_tombstone = !authority .epoch_store_for_testing() @@ -101,8 +103,8 @@ pub async fn send_and_confirm_transaction( Ok((certificate.into_inner(), result.into_inner())) } -// note: clippy is confused about this being dead - it appears to only be used in cfg(test), but -// adding #[cfg(test)] causes other targets to fail +// note: clippy is confused about this being dead - it appears to only be used +// in cfg(test), but adding #[cfg(test)] causes other targets to fail #[allow(dead_code)] pub(crate) fn init_state_parameters_from_rng(rng: &mut R) -> (Genesis, AuthorityKeyPair) where diff --git a/crates/sui-core/src/transaction_input_loader.rs b/crates/sui-core/src/transaction_input_loader.rs index 518208e55df..4e183b17029 100644 --- a/crates/sui-core/src/transaction_input_loader.rs +++ b/crates/sui-core/src/transaction_input_loader.rs @@ -1,11 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::execution_cache::ExecutionCacheRead; +use std::{collections::HashMap, sync::Arc}; + use itertools::izip; use once_cell::unsync::OnceCell; -use std::collections::HashMap; -use std::sync::Arc; use sui_protocol_config::ProtocolConfig; use sui_types::{ base_types::{EpochId, ObjectID, ObjectRef, SequenceNumber, TransactionDigest}, @@ -18,6 +17,8 @@ use sui_types::{ }; use tracing::instrument; +use crate::execution_cache::ExecutionCacheRead; + pub(crate) struct TransactionInputLoader { cache: Arc, } @@ -31,9 +32,10 @@ impl TransactionInputLoader { impl TransactionInputLoader { /// Read the inputs for a transaction that the validator was asked to sign. /// - /// tx_digest is provided so that the inputs can be cached with the tx_digest and returned with - /// a single hash map lookup when notify_read_objects_for_execution is called later. - /// TODO: implement this caching + /// tx_digest is provided so that the inputs can be cached with the + /// tx_digest and returned with a single hash map lookup when + /// notify_read_objects_for_execution is called later. TODO: implement + /// this caching #[instrument(level = "trace", skip_all)] pub async fn read_objects_for_signing( &self, @@ -42,7 +44,8 @@ impl TransactionInputLoader { receiving_objects: &[ObjectRef], epoch_id: EpochId, ) -> SuiResult<(InputObjects, ReceivingObjects)> { - // Length of input_object_kinds have beeen checked via validity_check() for ProgrammableTransaction. + // Length of input_object_kinds have beeen checked via validity_check() for + // ProgrammableTransaction. let mut input_results = vec![None; input_object_kinds.len()]; let mut object_refs = Vec::with_capacity(input_object_kinds.len()); let mut fetch_indices = Vec::with_capacity(input_object_kinds.len()); @@ -107,9 +110,10 @@ impl TransactionInputLoader { )) } - /// Reads input objects assuming a synchronous context such as the end of epoch transaction. - /// By "synchronous" we mean that it is safe to read the latest version of all shared objects, - /// as opposed to relying on the shared input version assignment. + /// Reads input objects assuming a synchronous context such as the end of + /// epoch transaction. By "synchronous" we mean that it is safe to read + /// the latest version of all shared objects, as opposed to relying on + /// the shared input version assignment. #[instrument(level = "trace", skip_all)] pub async fn read_objects_for_synchronous_execution( &self, @@ -164,17 +168,21 @@ impl TransactionInputLoader { /// Read the inputs for a transaction that is ready to be executed. /// - /// shared_lock_store is used to resolve the versions of any shared input objects. + /// shared_lock_store is used to resolve the versions of any shared input + /// objects. /// - /// This function panics if any inputs are not available, as TransactionManager should already - /// have verified that the transaction is ready to be executed. + /// This function panics if any inputs are not available, as + /// TransactionManager should already have verified that the transaction + /// is ready to be executed. /// - /// The tx_digest is provided here to support the following optimization (not yet implemented): - /// All the owned input objects will likely have been loaded during transaction signing, and - /// can be stored as a group with the transaction_digest as the key, allowing the lookup to - /// proceed with only a single hash map lookup. (additional lookups may be necessary for shared - /// inputs, since the versions are not known at signing time). Receiving objects could be - /// cached, but only with appropriate invalidation logic for when an object is received by a + /// The tx_digest is provided here to support the following optimization + /// (not yet implemented): All the owned input objects will likely have + /// been loaded during transaction signing, and can be stored as a group + /// with the transaction_digest as the key, allowing the lookup to + /// proceed with only a single hash map lookup. (additional lookups may be + /// necessary for shared inputs, since the versions are not known at + /// signing time). Receiving objects could be cached, but only with + /// appropriate invalidation logic for when an object is received by a /// different tx first. #[instrument(level = "trace", skip_all)] pub async fn read_objects_for_execution( @@ -245,16 +253,24 @@ impl TransactionInputLoader { (None, InputObjectKind::SharedMoveObject { id, .. }) => { // Check if the object was deleted by a concurrently certified tx let version = key.1; - if let Some(dependency) = self.cache.get_deleted_shared_object_previous_tx_digest(id, version, epoch_id)? { + if let Some(dependency) = self + .cache + .get_deleted_shared_object_previous_tx_digest(id, version, epoch_id)? + { ObjectReadResult { input_object_kind: *input, object: ObjectReadResultKind::DeletedSharedObject(version, dependency), } } else { - panic!("All dependencies of tx {tx_key:?} should have been executed now, but Shared Object id: {}, version: {version} is absent in epoch {epoch_id}", *id); + panic!( + "All dependencies of tx {tx_key:?} should have been executed now, but Shared Object id: {}, version: {version} is absent in epoch {epoch_id}", + *id + ); } - }, - _ => panic!("All dependencies of tx {tx_key:?} should have been executed now, but obj {key:?} is absent"), + } + _ => panic!( + "All dependencies of tx {tx_key:?} should have been executed now, but obj {key:?} is absent" + ), }); } @@ -276,7 +292,8 @@ impl TransactionInputLoader { _protocol_config: &ProtocolConfig, ) -> SuiResult<(InputObjects, ReceivingObjects)> { let mut results = Vec::with_capacity(input_object_kinds.len()); - // Length of input_object_kinds have beeen checked via validity_check() for ProgrammableTransaction. + // Length of input_object_kinds have beeen checked via validity_check() for + // ProgrammableTransaction. for kind in input_object_kinds { let obj = match kind { InputObjectKind::MovePackage(id) => self diff --git a/crates/sui-core/src/transaction_manager.rs b/crates/sui-core/src/transaction_manager.rs index dec876c51d7..795317f66ce 100644 --- a/crates/sui-core/src/transaction_manager.rs +++ b/crates/sui-core/src/transaction_manager.rs @@ -17,23 +17,20 @@ use sui_types::{ committee::EpochId, digests::TransactionEffectsDigest, error::{SuiError, SuiResult}, - fp_ensure, + executable_transaction::VerifiedExecutableTransaction, + fp_bail, fp_ensure, message_envelope::Message, storage::InputKey, - transaction::{TransactionDataAPI, VerifiedCertificate}, + transaction::{SenderSignedData, TransactionDataAPI, VerifiedCertificate}, }; -use sui_types::{executable_transaction::VerifiedExecutableTransaction, fp_bail}; -use tokio::sync::mpsc::UnboundedSender; -use tokio::time::Instant; +use tap::TapOptional; +use tokio::{sync::mpsc::UnboundedSender, time::Instant}; use tracing::{error, info, instrument, trace, warn}; -use crate::authority::AuthorityMetrics; use crate::{ - authority::authority_per_epoch_store::AuthorityPerEpochStore, + authority::{authority_per_epoch_store::AuthorityPerEpochStore, AuthorityMetrics}, execution_cache::ExecutionCacheRead, }; -use sui_types::transaction::SenderSignedData; -use tap::TapOptional; #[cfg(test)] #[path = "unit_tests/transaction_manager_tests.rs"] @@ -42,21 +39,24 @@ mod transaction_manager_tests; /// Minimum capacity of HashMaps used in TransactionManager. const MIN_HASHMAP_CAPACITY: usize = 1000; -// Reject a transaction if transaction manager queue length is above this threshold. -// 100_000 = 10k TPS * 5s resident time in transaction manager (pending + executing) * 2. +// Reject a transaction if transaction manager queue length is above this +// threshold. 100_000 = 10k TPS * 5s resident time in transaction manager +// (pending + executing) * 2. pub(crate) const MAX_TM_QUEUE_LENGTH: usize = 100_000; -// Reject a transaction if the number of pending transactions depending on the object -// is above the threshold. +// Reject a transaction if the number of pending transactions depending on the +// object is above the threshold. pub(crate) const MAX_PER_OBJECT_QUEUE_LENGTH: usize = 200; -/// TransactionManager is responsible for managing object dependencies of pending transactions, -/// and publishing a stream of certified transactions (certificates) ready to execute. -/// It receives certificates from Narwhal, validator RPC handlers, and checkpoint executor. -/// Execution driver subscribes to the stream of ready certificates from TransactionManager, and +/// TransactionManager is responsible for managing object dependencies of +/// pending transactions, and publishing a stream of certified transactions +/// (certificates) ready to execute. It receives certificates from Narwhal, +/// validator RPC handlers, and checkpoint executor. Execution driver subscribes +/// to the stream of ready certificates from TransactionManager, and /// executes them in parallel. -/// The actual execution logic is inside AuthorityState. After a transaction commits and updates -/// storage, committed objects and certificates are notified back to TransactionManager. +/// The actual execution logic is inside AuthorityState. After a transaction +/// commits and updates storage, committed objects and certificates are notified +/// back to TransactionManager. pub struct TransactionManager { cache_read: Arc, tx_ready_certificates: UnboundedSender, @@ -79,7 +79,8 @@ pub struct PendingCertificate { // When executing from checkpoint, the certified effects digest is provided, so that forks can // be detected prior to committing the transaction. pub expected_effects_digest: Option, - // The input object this certificate is waiting for to become available in order to be executed. + // The input object this certificate is waiting for to become available in order to be + // executed. pub waiting_input_objects: BTreeSet, // Stores stats about this transaction. pub stats: PendingCertificateStats, @@ -148,9 +149,9 @@ impl CacheInner { .transaction_manager_object_cache_size .set(self.versioned_cache.len() as i64); } else if let Some((previous_id, _)) = self.unversioned_cache.push(object.id(), ()) { - // lru_cache will does not check if the value being evicted is the same as the value - // being inserted, so we do need to check if the id is different before counting this - // as an eviction. + // lru_cache will does not check if the value being evicted is the same as the + // value being inserted, so we do need to check if the id is + // different before counting this as an eviction. if previous_id != object.id() { self.metrics .transaction_manager_package_cache_evictions @@ -162,8 +163,8 @@ impl CacheInner { } } - // Returns Some(true/false) for a definitive result. Returns None if the caller must defer to - // the db. + // Returns Some(true/false) for a definitive result. Returns None if the caller + // must defer to the db. fn is_object_available(&mut self, object: &InputKey) -> Option { if let Some(version) = object.version() { if let Some(current) = self.versioned_cache.get(&object.id()) { @@ -260,8 +261,8 @@ impl Inner { } } - // Checks if there is any transaction waiting on `input_key`. Returns all the pending - // transactions that are ready to be executed. + // Checks if there is any transaction waiting on `input_key`. Returns all the + // pending transactions that are ready to be executed. // Must ensure input_key is available in storage before calling this function. fn find_ready_transactions( &mut self, @@ -327,7 +328,8 @@ impl Inner { self.executing_certificates.maybe_reserve_capacity(); } - /// After reaching 1/4 load in hashmaps, decrease capacity to increase load to 1/2. + /// After reaching 1/4 load in hashmaps, decrease capacity to increase load + /// to 1/2. fn maybe_shrink_capacity(&mut self) { self.missing_inputs.maybe_shrink_capacity(); self.input_objects.maybe_shrink_capacity(); @@ -337,9 +339,10 @@ impl Inner { } impl TransactionManager { - /// If a node restarts, transaction manager recovers in-memory data from pending_certificates, - /// which contains certificates not yet executed from Narwhal output and RPC. - /// Transactions from other sources, e.g. checkpoint executor, have own persistent storage to + /// If a node restarts, transaction manager recovers in-memory data from + /// pending_certificates, which contains certificates not yet executed + /// from Narwhal output and RPC. Transactions from other sources, e.g. + /// checkpoint executor, have own persistent storage to /// retry transactions. pub(crate) fn new( cache_read: Arc, @@ -357,11 +360,12 @@ impl TransactionManager { transaction_manager } - /// Enqueues certificates / verified transactions into TransactionManager. Once all of the input objects are available - /// locally for a certificate, the certified transaction will be sent to execution driver. + /// Enqueues certificates / verified transactions into TransactionManager. + /// Once all of the input objects are available locally for a + /// certificate, the certified transaction will be sent to execution driver. /// - /// REQUIRED: Shared object locks must be taken before calling enqueueing transactions - /// with shared objects! + /// REQUIRED: Shared object locks must be taken before calling enqueueing + /// transactions with shared objects! #[instrument(level = "trace", skip_all)] pub(crate) fn enqueue_certificates( &self, @@ -503,9 +507,9 @@ impl TransactionManager { .into_iter() .zip(input_object_cache_misses); - // After this point, the function cannot return early and must run to the end. Otherwise, - // it can lead to data inconsistencies and potentially some transactions will never get - // executed. + // After this point, the function cannot return early and must run to the end. + // Otherwise, it can lead to data inconsistencies and potentially some + // transactions will never get executed. // Internal lock is held only for updating the internal state. let mut inner = self.inner.write(); @@ -514,10 +518,11 @@ impl TransactionManager { for (available, key) in cache_miss_availability { if available && key.version().is_none() { - // Mutable objects obtained from cache_miss_availability usually will not be read - // again, so we do not want to evict other objects in order to insert them into the - // cache. However, packages will likely be read often, so we do want to insert them - // even if they cause evictions. + // Mutable objects obtained from cache_miss_availability usually will not be + // read again, so we do not want to evict other objects in order + // to insert them into the cache. However, packages will likely + // be read often, so we do want to insert them even if they + // cause evictions. inner.available_objects_cache.insert(&key); } object_availability @@ -525,9 +530,10 @@ impl TransactionManager { .expect("entry must already exist"); } - // Now recheck the cache for anything that became available (via notify_commit) since we - // read cache_miss_availability - because the cache is unbounded mode it is guaranteed to - // contain all notifications that arrived since we released the lock on self.inner. + // Now recheck the cache for anything that became available (via notify_commit) + // since we read cache_miss_availability - because the cache is + // unbounded mode it is guaranteed to contain all notifications that + // arrived since we released the lock on self.inner. for (key, value) in object_availability.iter_mut() { if !value.expect("all objects must have been checked by now") { if let Some(true) = inner.available_objects_cache.is_object_available(key) { @@ -554,10 +560,11 @@ impl TransactionManager { } for mut pending_cert in pending { - // Tx lock is not held here, which makes it possible to send duplicated transactions to - // the execution driver after crash-recovery, when the same transaction is recovered - // from recovery log and pending certificates table. The transaction will still only - // execute once, because tx lock is acquired in execution driver and executed effects + // Tx lock is not held here, which makes it possible to send duplicated + // transactions to the execution driver after crash-recovery, when + // the same transaction is recovered from recovery log and pending + // certificates table. The transaction will still only execute once, + // because tx lock is acquired in execution driver and executed effects // table is consulted. So this behavior is benigh. let digest = *pending_cert.certificate.digest(); @@ -733,7 +740,12 @@ impl TransactionManager { let _scope = monitored_scope("TransactionManager::notify_commit::wlock"); if inner.epoch != epoch_store.epoch() { - warn!("Ignoring committed certificate from wrong epoch. Expected={} Actual={} CertificateDigest={:?}", inner.epoch, epoch_store.epoch(), digest); + warn!( + "Ignoring committed certificate from wrong epoch. Expected={} Actual={} CertificateDigest={:?}", + inner.epoch, + epoch_store.epoch(), + digest + ); return; } @@ -746,7 +758,10 @@ impl TransactionManager { ); if !inner.executing_certificates.remove(digest) { - trace!("{:?} not found in executing certificates, likely because it is a system transaction", digest); + trace!( + "{:?} not found in executing certificates, likely because it is a system transaction", + digest + ); return; } @@ -765,9 +780,11 @@ impl TransactionManager { trace!(tx_digest = ?pending_certificate.certificate.digest(), "certificate ready"); assert_eq!(pending_certificate.waiting_input_objects.len(), 0); // Record as an executing certificate. - assert!(inner - .executing_certificates - .insert(*pending_certificate.certificate.digest())); + assert!( + inner + .executing_certificates + .insert(*pending_certificate.certificate.digest()) + ); self.metrics.txn_ready_rate_tracker.lock().record(); let _ = self.tx_ready_certificates.send(pending_certificate); self.metrics.transaction_manager_num_ready.inc(); @@ -783,7 +800,8 @@ impl TransactionManager { .map(|cert| cert.waiting_input_objects.clone().into_iter().collect()) } - // Returns the number of transactions waiting on each object ID, as well as the age of the oldest transaction in the queue. + // Returns the number of transactions waiting on each object ID, as well as the + // age of the oldest transaction in the queue. pub(crate) fn objects_queue_len_and_age( &self, keys: Vec, @@ -808,8 +826,9 @@ impl TransactionManager { inner.pending_certificates.len() + inner.executing_certificates.len() } - // Reconfigures the TransactionManager for a new epoch. Existing transactions will be dropped - // because they are no longer relevant and may be incorrect in the new epoch. + // Reconfigures the TransactionManager for a new epoch. Existing transactions + // will be dropped because they are no longer relevant and may be incorrect + // in the new epoch. pub(crate) fn reconfigure(&self, new_epoch: EpochId) { let mut inner = self.inner.write(); *inner = Inner::new(new_epoch, self.metrics.clone()); @@ -852,9 +871,14 @@ impl TransactionManager { }); } if let Some(age) = txn_age { - // Check that we don't have a txn that has been waiting for a long time in the queue. + // Check that we don't have a txn that has been waiting for a long time in the + // queue. if age >= txn_age_threshold { - info!("Overload detected on object {:?} with oldest transaction pending for {} secs", object_id, age.as_secs()); + info!( + "Overload detected on object {:?} with oldest transaction pending for {} secs", + object_id, + age.as_secs() + ); fp_bail!(SuiError::TooOldTransactionPendingOnObject { object_id, txn_age_sec: age.as_secs(), @@ -902,14 +926,16 @@ impl ResizableHashMap for HashMap where K: std::cmp::Eq + std::hash::Hash, { - /// After reaching 3/4 load in hashmaps, increase capacity to decrease load to 1/2. + /// After reaching 3/4 load in hashmaps, increase capacity to decrease load + /// to 1/2. fn maybe_reserve_capacity(&mut self) { if self.len() > self.capacity() * 3 / 4 { self.reserve(self.capacity() / 2); } } - /// After reaching 1/4 load in hashmaps, decrease capacity to increase load to 1/2. + /// After reaching 1/4 load in hashmaps, decrease capacity to increase load + /// to 1/2. fn maybe_shrink_capacity(&mut self) { if self.len() > MIN_HASHMAP_CAPACITY && self.len() < self.capacity() / 4 { self.shrink_to(max(self.capacity() / 2, MIN_HASHMAP_CAPACITY)) @@ -926,14 +952,16 @@ impl ResizableHashSet for HashSet where K: std::cmp::Eq + std::hash::Hash, { - /// After reaching 3/4 load in hashset, increase capacity to decrease load to 1/2. + /// After reaching 3/4 load in hashset, increase capacity to decrease load + /// to 1/2. fn maybe_reserve_capacity(&mut self) { if self.len() > self.capacity() * 3 / 4 { self.reserve(self.capacity() / 2); } } - /// After reaching 1/4 load in hashset, decrease capacity to increase load to 1/2. + /// After reaching 1/4 load in hashset, decrease capacity to increase load + /// to 1/2. fn maybe_shrink_capacity(&mut self) { if self.len() > MIN_HASHMAP_CAPACITY && self.len() < self.capacity() / 4 { self.shrink_to(max(self.capacity() / 2, MIN_HASHMAP_CAPACITY)) @@ -943,9 +971,10 @@ where #[cfg(test)] mod test { - use super::*; use prometheus::Registry; + use super::*; + #[test] #[cfg_attr(msim, ignore)] fn test_available_objects_cache() { @@ -1010,7 +1039,8 @@ mod test { version: 10.into(), }; assert_eq!(cache.is_object_available(&input_key), Some(false)); - // it is available at version 8 (this case can be used by readonly shared objects) + // it is available at version 8 (this case can be used by readonly shared + // objects) let input_key = InputKey::VersionedObject { id: object, version: 8.into(), diff --git a/crates/sui-core/src/transaction_orchestrator.rs b/crates/sui-core/src/transaction_orchestrator.rs index e6b1de5d109..f8b125b98b0 100644 --- a/crates/sui-core/src/transaction_orchestrator.rs +++ b/crates/sui-core/src/transaction_orchestrator.rs @@ -1,50 +1,58 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore; -/* -Transaction Orchestrator is a Node component that utilizes Quorum Driver to -submit transactions to validators for finality, and proactively executes -finalized transactions locally, when possible. -*/ -use crate::authority::AuthorityState; -use crate::authority_aggregator::{AuthAggMetrics, AuthorityAggregator}; -use crate::authority_client::{AuthorityAPI, NetworkAuthorityClient}; -use crate::quorum_driver::reconfig_observer::{OnsiteReconfigObserver, ReconfigObserver}; -use crate::quorum_driver::{QuorumDriverHandler, QuorumDriverHandlerBuilder, QuorumDriverMetrics}; -use crate::safe_client::SafeClientMetricsBase; -use futures::future::{select, Either, Future}; -use futures::FutureExt; +use std::{path::Path, sync::Arc, time::Duration}; + +use futures::{ + future::{select, Either, Future}, + FutureExt, +}; use mysten_common::sync::notify_read::NotifyRead; -use mysten_metrics::histogram::{Histogram, HistogramVec}; -use mysten_metrics::{spawn_logged_monitored_task, spawn_monitored_task}; -use mysten_metrics::{TX_TYPE_SHARED_OBJ_TX, TX_TYPE_SINGLE_WRITER_TX}; -use prometheus::core::{AtomicI64, AtomicU64, GenericCounter, GenericGauge}; +use mysten_metrics::{ + histogram::{Histogram, HistogramVec}, + spawn_logged_monitored_task, spawn_monitored_task, TX_TYPE_SHARED_OBJ_TX, + TX_TYPE_SINGLE_WRITER_TX, +}; use prometheus::{ + core::{AtomicI64, AtomicU64, GenericCounter, GenericGauge}, register_int_counter_vec_with_registry, register_int_counter_with_registry, register_int_gauge_vec_with_registry, register_int_gauge_with_registry, Registry, }; -use std::path::Path; -use std::sync::Arc; -use std::time::Duration; use sui_storage::write_path_pending_tx_log::WritePathPendingTransactionLog; -use sui_types::base_types::TransactionDigest; -use sui_types::effects::{TransactionEffectsAPI, VerifiedCertifiedTransactionEffects}; -use sui_types::error::{SuiError, SuiResult}; -use sui_types::executable_transaction::VerifiedExecutableTransaction; -use sui_types::quorum_driver_types::{ - ExecuteTransactionRequest, ExecuteTransactionRequestType, ExecuteTransactionResponse, - FinalizedEffects, QuorumDriverEffectsQueueResult, QuorumDriverError, QuorumDriverResponse, - QuorumDriverResult, +use sui_types::{ + base_types::TransactionDigest, + effects::{TransactionEffectsAPI, VerifiedCertifiedTransactionEffects}, + error::{SuiError, SuiResult}, + executable_transaction::VerifiedExecutableTransaction, + quorum_driver_types::{ + ExecuteTransactionRequest, ExecuteTransactionRequestType, ExecuteTransactionResponse, + FinalizedEffects, QuorumDriverEffectsQueueResult, QuorumDriverError, QuorumDriverResponse, + QuorumDriverResult, + }, + sui_system_state::SuiSystemState, + transaction::VerifiedTransaction, +}; +use tokio::{ + sync::broadcast::{error::RecvError, Receiver}, + task::JoinHandle, + time::timeout, }; -use sui_types::sui_system_state::SuiSystemState; -use tokio::sync::broadcast::error::RecvError; -use tokio::sync::broadcast::Receiver; -use tokio::task::JoinHandle; -use tokio::time::timeout; use tracing::{debug, error, error_span, info, instrument, warn, Instrument}; -use sui_types::transaction::VerifiedTransaction; +// Transaction Orchestrator is a Node component that utilizes Quorum Driver to +// submit transactions to validators for finality, and proactively executes +// finalized transactions locally, when possible. +use crate::authority::AuthorityState; +use crate::{ + authority::authority_per_epoch_store::AuthorityPerEpochStore, + authority_aggregator::{AuthAggMetrics, AuthorityAggregator}, + authority_client::{AuthorityAPI, NetworkAuthorityClient}, + quorum_driver::{ + reconfig_observer::{OnsiteReconfigObserver, ReconfigObserver}, + QuorumDriverHandler, QuorumDriverHandlerBuilder, QuorumDriverMetrics, + }, + safe_client::SafeClientMetricsBase, +}; // How long to wait for local execution (including parents) before a timeout // is returned to client. @@ -158,8 +166,8 @@ where request: ExecuteTransactionRequest, ) -> Result { // TODO check if tx is already executed on this node. - // Note: since EffectsCert is not stored today, we need to gather that from validators - // (and maybe store it for caching purposes) + // Note: since EffectsCert is not stored today, we need to gather that from + // validators (and maybe store it for caching purposes) let epoch_store = self.validator_state.load_epoch_store_one_call_per_task(); let transaction = epoch_store @@ -276,10 +284,11 @@ where .submit_transaction_no_ticket(transaction.clone().into()) .await?; } - // It's possible that the transaction effects is already stored in DB at this point. - // So we also subscribe to that. If we hear from `effects_await` first, it means - // the ticket misses the previous notification, and we want to ask quorum driver - // to form a certificate for us again, to serve this request. + // It's possible that the transaction effects is already stored in DB at this + // point. So we also subscribe to that. If we hear from `effects_await` + // first, it means the ticket misses the previous notification, and we + // want to ask quorum driver to form a certificate for us again, to + // serve this request. let cache_reader = self.validator_state.get_cache_reader().clone(); let qd = self.clone_quorum_driver(); Ok(async move { @@ -314,10 +323,10 @@ where // Every WaitForLocalExecution request will be attempted to execute twice, // one from the subscriber queue, one from the proactive execution before // returning results to clients. This is not insanely bad because: - // 1. it's possible that one attempt finishes before the other, so there's - // zero extra work except DB checks - // 2. an up-to-date fullnode should have minimal overhead to sync parents - // (for one extra time) + // 1. it's possible that one attempt finishes before the other, so there's zero + // extra work except DB checks + // 2. an up-to-date fullnode should have minimal overhead to sync parents (for + // one extra time) // 3. at the end of day, the tx will be executed at most once per lock guard. let tx_digest = transaction.digest(); if validator_state.is_tx_already_executed(tx_digest)? { @@ -407,9 +416,9 @@ where // This should be impossible, since we verified the transaction // before sending it to quorum driver. error!( - ?err, - "Transaction signature failed to verify after quorum driver execution." - ); + ?err, + "Transaction signature failed to verify after quorum driver execution." + ); continue; } }; @@ -508,7 +517,8 @@ where // requires a migration. let tx = tx.into_inner(); let tx_digest = *tx.digest(); - // It's not impossible we fail to enqueue a task but that's not the end of world. + // It's not impossible we fail to enqueue a task but that's not the end of + // world. if let Err(err) = quorum_driver.submit_transaction_no_ticket(tx).await { warn!( ?tx_digest, @@ -521,7 +531,8 @@ where } } } - // Transactions will be cleaned up in loop_execute_finalized_tx_locally() after they + // Transactions will be cleaned up in + // loop_execute_finalized_tx_locally() after they // produce effects. }); } diff --git a/crates/sui-core/src/transaction_outputs.rs b/crates/sui-core/src/transaction_outputs.rs index 7b0709bd4bc..b5c7d6d79f8 100644 --- a/crates/sui-core/src/transaction_outputs.rs +++ b/crates/sui-core/src/transaction_outputs.rs @@ -1,13 +1,18 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use sui_types::base_types::ObjectRef; -use sui_types::effects::{TransactionEffects, TransactionEffectsAPI, TransactionEvents}; -use sui_types::inner_temporary_store::{InnerTemporaryStore, WrittenObjects}; -use sui_types::storage::{MarkerValue, ObjectKey}; -use sui_types::transaction::{TransactionDataAPI, VerifiedTransaction}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; + +use sui_types::{ + base_types::ObjectRef, + effects::{TransactionEffects, TransactionEffectsAPI, TransactionEvents}, + inner_temporary_store::{InnerTemporaryStore, WrittenObjects}, + storage::{MarkerValue, ObjectKey}, + transaction::{TransactionDataAPI, VerifiedTransaction}, +}; /// TransactionOutputs pub struct TransactionOutputs { @@ -24,7 +29,8 @@ pub struct TransactionOutputs { } impl TransactionOutputs { - // Convert InnerTemporaryStore + Effects into the exact set of updates to the store + // Convert InnerTemporaryStore + Effects into the exact set of updates to the + // store pub fn build_transaction_outputs( transaction: VerifiedTransaction, effects: TransactionEffects, @@ -54,9 +60,10 @@ impl TransactionOutputs { .cloned() .filter(|obj_ref| modified_at.contains(&(obj_ref.0, obj_ref.1))); - // We record any received or deleted objects since they could be pruned, and smear shared - // object deletions in the marker table. For deleted entries in the marker table we need to - // make sure we don't accidentally overwrite entries. + // We record any received or deleted objects since they could be pruned, and + // smear shared object deletions in the marker table. For deleted + // entries in the marker table we need to make sure we don't + // accidentally overwrite entries. let markers: Vec<_> = { let received = received_objects .clone() @@ -74,10 +81,10 @@ impl TransactionOutputs { } }); - // We "smear" shared deleted objects in the marker table to allow for proper sequencing - // of transactions that are submitted after the deletion of the shared object. - // NB: that we do _not_ smear shared objects that were taken immutably in the - // transaction. + // We "smear" shared deleted objects in the marker table to allow for proper + // sequencing of transactions that are submitted after the deletion + // of the shared object. NB: that we do _not_ smear shared objects + // that were taken immutably in the transaction. let smeared_objects = effects.deleted_mutably_accessed_shared_objects(); let shared_smears = smeared_objects.into_iter().map(move |object_id| { ( diff --git a/crates/sui-core/src/unit_tests/authority_aggregator_tests.rs b/crates/sui-core/src/unit_tests/authority_aggregator_tests.rs index 7e33a63c21b..dd0c6053b85 100644 --- a/crates/sui-core/src/unit_tests/authority_aggregator_tests.rs +++ b/crates/sui-core/src/unit_tests/authority_aggregator_tests.rs @@ -1,46 +1,45 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::test_utils::make_transfer_object_transaction; -use crate::test_utils::make_transfer_sui_transaction; +use std::{ + collections::{BTreeMap, HashSet}, + path::PathBuf, + sync::{Arc, Mutex}, +}; + use move_core_types::{account_address::AccountAddress, ident_str}; -use rand::rngs::StdRng; -use rand::SeedableRng; +use rand::{rngs::StdRng, SeedableRng}; use shared_crypto::intent::{Intent, IntentScope}; -use std::collections::BTreeMap; -use std::collections::HashSet; -use std::path::PathBuf; -use std::sync::{Arc, Mutex}; use sui_authority_aggregation::quorum_map_then_reduce_with_timeout; +use sui_framework::BuiltInFramework; use sui_macros::sim_test; use sui_move_build::BuildConfig; -use sui_types::crypto::get_key_pair_from_rng; -use sui_types::crypto::{get_key_pair, AccountKeyPair, AuthorityKeyPair}; -use sui_types::crypto::{AuthoritySignature, Signer}; -use sui_types::crypto::{KeypairTraits, Signature}; -use sui_types::object::Object; -use sui_types::transaction::*; -use sui_types::utils::create_fake_transaction; - -use super::*; -use crate::authority_client::AuthorityAPI; -use crate::test_authority_clients::{ - HandleTransactionTestAuthorityClient, LocalAuthorityClient, LocalAuthorityClientFaultConfig, - MockAuthorityApi, -}; -use crate::test_utils::init_local_authorities; -use sui_framework::BuiltInFramework; -use sui_types::utils::to_sender_signed_transaction; -use tokio::time::Instant; - #[cfg(msim)] use sui_simulator::configs::constant_latency_ms; -use sui_types::effects::{ - TestEffectsBuilder, TransactionEffects, TransactionEffectsAPI, TransactionEvents, +use sui_types::{ + crypto::{ + get_key_pair, get_key_pair_from_rng, AccountKeyPair, AuthorityKeyPair, AuthoritySignature, + KeypairTraits, Signature, Signer, + }, + effects::{TestEffectsBuilder, TransactionEffects, TransactionEffectsAPI, TransactionEvents}, + execution_status::{ExecutionFailureStatus, ExecutionStatus}, + messages_grpc::{HandleTransactionResponse, TransactionStatus, VerifiedObjectInfoResponse}, + object::Object, + transaction::*, + utils::{create_fake_transaction, to_sender_signed_transaction}, }; -use sui_types::execution_status::{ExecutionFailureStatus, ExecutionStatus}; -use sui_types::messages_grpc::{ - HandleTransactionResponse, TransactionStatus, VerifiedObjectInfoResponse, +use tokio::time::Instant; + +use super::*; +use crate::{ + authority_client::AuthorityAPI, + test_authority_clients::{ + HandleTransactionTestAuthorityClient, LocalAuthorityClient, + LocalAuthorityClientFaultConfig, MockAuthorityApi, + }, + test_utils::{ + init_local_authorities, make_transfer_object_transaction, make_transfer_sui_transaction, + }, }; macro_rules! assert_matches { @@ -87,7 +86,8 @@ pub fn create_object_move_transaction( gas_object_ref: ObjectRef, gas_price: u64, ) -> Transaction { - // When creating an object_basics object, we provide the value (u64) and address which will own the object + // When creating an object_basics object, we provide the value (u64) and address + // which will own the object let arguments = vec![ CallArg::Pure(value.to_le_bytes().to_vec()), CallArg::Pure(bcs::to_bytes(&AccountAddress::from(dest)).unwrap()), @@ -494,10 +494,12 @@ async fn test_map_reducer() { #[sim_test] async fn test_process_transaction_fault_success() { - // This test exercises the 4 different possible failing case when one authority is faulty. - // A transaction is sent to all authories, however one of them will error out either before or after processing the transaction. - // A cert should still be created, and sent out to all authorities again. This time - // a different authority errors out either before or after processing the cert. + // This test exercises the 4 different possible failing case when one authority + // is faulty. A transaction is sent to all authories, however one of them + // will error out either before or after processing the transaction. + // A cert should still be created, and sent out to all authorities again. This + // time a different authority errors out either before or after processing + // the cert. for i in 0..4 { let mut config_before_process_transaction = LocalAuthorityClientFaultConfig::default(); if i % 2 == 0 { @@ -544,7 +546,8 @@ async fn test_process_transaction_fault_fail() { #[sim_test] async fn test_process_certificate_fault_fail() { - // Similar to test_process_transaction_fault_fail but tested on the process_certificate phase. + // Similar to test_process_transaction_fault_fail but tested on the + // process_certificate phase. let fail_before_process_certificate_config = LocalAuthorityClientFaultConfig { fail_before_handle_confirmation: true, ..Default::default() @@ -603,8 +606,9 @@ async fn test_quorum_once_with_timeout() { Arc::try_unwrap(log).unwrap().into_inner().unwrap() }; - // New requests are started every 50ms even though each request hangs for 1000ms. - // The 15th request succeeds, and we exit before processing the remaining authorities. + // New requests are started every 50ms even though each request hangs for + // 1000ms. The 15th request succeeds, and we exit before processing the + // remaining authorities. assert_eq!( case(agg.clone(), 1000).await, (0..15) @@ -613,14 +617,16 @@ async fn test_quorum_once_with_timeout() { ); *count.lock().unwrap() = 0; - // Here individual requests time out relatively quickly (100ms), but we continue increasing - // the parallelism every 50ms + // Here individual requests time out relatively quickly (100ms), but we continue + // increasing the parallelism every 50ms assert_eq!( case(agg.clone(), 100).await, - [0, 50, 100, 100, 150, 150, 200, 200, 200, 250, 250, 250, 300, 300, 300] - .iter() - .map(|d| Duration::from_millis(*d)) - .collect::>() + [ + 0, 50, 100, 100, 150, 150, 200, 200, 200, 250, 250, 250, 300, 300, 300 + ] + .iter() + .map(|d| Duration::from_millis(*d)) + .collect::>() ); } @@ -731,8 +737,8 @@ async fn test_handle_transaction_fork() { 666, // this is a dummy value which does not matter ); - // Non-quorum of effects without a retryable majority indicating a safety violation - // or a fork + // Non-quorum of effects without a retryable majority indicating a safety + // violation or a fork // All Validators gives signed-tx set_tx_info_response_with_signed_tx( @@ -898,7 +904,8 @@ async fn test_handle_transaction_response() { }; println!("Case 0 - Non-retryable Transaction (Unknown Error)"); - // Validators give invalid response because of the initial value set for their responses. + // Validators give invalid response because of the initial value set for their + // responses. let agg = get_genesis_agg(authorities.clone(), clients.clone()); assert_resp_err( @@ -962,7 +969,8 @@ async fn test_handle_transaction_response() { clients.get_mut(name).unwrap().reset_tx_info_response(); } let agg = get_genesis_agg(authorities.clone(), clients.clone()); - // We have a valid cert because val-0 has it. Note we can't form a cert based on what val-1 and val-2 give + // We have a valid cert because val-0 has it. Note we can't form a cert based on + // what val-1 and val-2 give agg.process_transaction(tx.clone().into()).await.unwrap(); println!("Case 4 - Retryable Transaction (MissingCommitteeAtEpoch Error)"); @@ -1027,7 +1035,9 @@ async fn test_handle_transaction_response() { // We have 2f+1 signed effects on epoch 1, so we are good. agg.process_transaction(tx.clone().into()).await.unwrap(); - println!("Case 6 - Retryable Transaction (most staked effects stake + retryable stake >= 2f+1 with QuorumFailedToGetEffectsQuorumWhenProcessingTransaction Error)"); + println!( + "Case 6 - Retryable Transaction (most staked effects stake + retryable stake >= 2f+1 with QuorumFailedToGetEffectsQuorumWhenProcessingTransaction Error)" + ); // Val 0, 1 & 2 returns retryable error set_retryable_tx_info_response_error(&mut clients, &authority_keys); // Validators 3 returns tx-cert with epoch 1 @@ -1095,7 +1105,8 @@ async fn test_handle_transaction_response() { .unwrap() .set_tx_info_response(resp); - // Validators 3 returns different tx-effects without cert for epoch 1 (simulating byzantine behavior) + // Validators 3 returns different tx-effects without cert for epoch 1 + // (simulating byzantine behavior) let effects = TestEffectsBuilder::new(cert_epoch_0.data()) .with_status(ExecutionStatus::Failure { error: ExecutionFailureStatus::InvalidGasObject, @@ -1181,7 +1192,8 @@ async fn test_handle_transaction_response() { .unwrap() .set_tx_info_response(resp); - // Validators 3 returns tx2-effects without cert for epoch 1 (simulating byzantine behavior) + // Validators 3 returns tx2-effects without cert for epoch 1 (simulating + // byzantine behavior) let effects = TestEffectsBuilder::new(cert_epoch_0_2.data()) .with_status(ExecutionStatus::Failure { error: ExecutionFailureStatus::InsufficientGas, @@ -1295,7 +1307,8 @@ async fn test_handle_transaction_response() { ) .await; - // TODO: change to use a move transaction which makes package error more realistic + // TODO: change to use a move transaction which makes package error more + // realistic println!("Case 8.1 - Retryable Transaction (PackageNotFound Error)"); // < 2f+1 package not found errors for (name, _) in authority_keys.iter().skip(2) { @@ -1638,7 +1651,9 @@ async fn test_handle_conflicting_transaction_response() { ) .await; - println!("Case 3.1 - Non-retryable Tx (Mixed Response - 1 conflict, 1 signed, 1 non-retryable, 1 retryable)"); + println!( + "Case 3.1 - Non-retryable Tx (Mixed Response - 1 conflict, 1 signed, 1 non-retryable, 1 retryable)" + ); // Validator 1 returns a signed tx1 set_tx_info_response_with_signed_tx(&mut clients, &authority_keys, &tx1, 0); // Validator 2 returns a conflicting tx2 @@ -1784,7 +1799,8 @@ async fn test_handle_conflicting_transaction_response() { .unwrap() .into_cert_for_testing(); - // Validators have moved to epoch 2 and return tx-effects with epoch 2, client expects 1 + // Validators have moved to epoch 2 and return tx-effects with epoch 2, client + // expects 1 let effects = TestEffectsBuilder::new(cert_epoch_1.data()).build(); set_tx_info_response_with_cert_and_effects( &mut clients, @@ -1881,7 +1897,8 @@ async fn test_handle_overload_response() { }; let rpc_error = SuiError::RpcError("RPC".into(), "Error".into()); - // Have 2f + 1 validators return the overload error and we should get the `SystemOverload` error. + // Have 2f + 1 validators return the overload error and we should get the + // `SystemOverload` error. set_retryable_tx_info_response_error(&mut clients, &authority_keys); set_tx_info_response_with_error(&mut clients, authority_keys.iter().skip(1), overload_error); @@ -1907,8 +1924,8 @@ async fn test_handle_overload_response() { ) .await; - // Change one of the valdiators' errors to RPC error so the system is considered not overloaded now and a `RetryableTransaction` - // should be returned. + // Change one of the valdiators' errors to RPC error so the system is considered + // not overloaded now and a `RetryableTransaction` should be returned. clients .get_mut(&authority_keys[1].0) .unwrap() @@ -1935,7 +1952,8 @@ async fn test_handle_overload_response() { .await; } -// Tests that authority aggregator can aggregate SuiError::ValidatorOverloadedRetryAfter into +// Tests that authority aggregator can aggregate +// SuiError::ValidatorOverloadedRetryAfter into // AggregatorProcessTransactionError::SystemOverloadRetryAfter. #[tokio::test] async fn test_handle_overload_retry_response() { @@ -1966,7 +1984,8 @@ async fn test_handle_overload_retry_response() { }; let rpc_error = SuiError::RpcError("RPC".into(), "Error".into()); - // Have 2f + 1 validators return the overload error and we should get the `SystemOverload` error. + // Have 2f + 1 validators return the overload error and we should get the + // `SystemOverload` error. set_retryable_tx_info_response_error(&mut clients, &authority_keys); set_tx_info_response_with_error(&mut clients, authority_keys.iter().skip(1), overload_error); @@ -1989,8 +2008,8 @@ async fn test_handle_overload_retry_response() { ) .await; - // Change one of the valdiators' errors to RPC error so the system is considered not overloaded now and a `RetryableTransaction` - // should be returned. + // Change one of the valdiators' errors to RPC error so the system is considered + // not overloaded now and a `RetryableTransaction` should be returned. clients .get_mut(&authority_keys[1].0) .unwrap() @@ -2040,8 +2059,9 @@ async fn test_early_exit_with_too_many_conflicts() { 666, // this is a dummy value which does not matter ); - // Now we have 3 conflicting transactions each with 1 stake. There is no hope to get quorum for any of them. - // So we expect to exit early before getting the final response (from whom is still sleeping). + // Now we have 3 conflicting transactions each with 1 stake. There is no hope to + // get quorum for any of them. So we expect to exit early before getting the + // final response (from whom is still sleeping). set_tx_info_response_with_error( &mut clients, authority_keys.iter().take(1), @@ -2113,13 +2133,16 @@ async fn test_byzantine_authority_sig_aggregation() { assert!(run_aggregator(2, 6).await.is_ok()); assert!(run_aggregator(3, 6).await.is_err()); - // For 4 validators, we need 2f+1 = 3 for quorum for signing transaction effects. + // For 4 validators, we need 2f+1 = 3 for quorum for signing transaction + // effects. assert!(process_with_cert(1, 4).await.is_ok()); - // For 6 validators, we need 2f+1 = 5 for quorum for signing transaction effects. + // For 6 validators, we need 2f+1 = 5 for quorum for signing transaction + // effects. assert!(process_with_cert(1, 6).await.is_ok()); - // For 12 validators, we need 2f+1 = 9 for quorum for signing transaction effects. + // For 12 validators, we need 2f+1 = 9 for quorum for signing transaction + // effects. assert!(process_with_cert(1, 12).await.is_ok()); assert!(process_with_cert(2, 12).await.is_ok()); assert!(process_with_cert(3, 12).await.is_ok()); @@ -2145,8 +2168,8 @@ async fn test_fork_panic_process_cert_4_auths() { )); } -// Aggregator aggregate signatures from authorities and process the transaction as signed. -// Test [fn handle_transaction_response_with_signed]. +// Aggregator aggregate signatures from authorities and process the transaction +// as signed. Test [fn handle_transaction_response_with_signed]. async fn run_aggregator( num_byzantines: u8, num_authorities: u8, @@ -2197,7 +2220,8 @@ async fn run_aggregator( secret, ) }; - // For each client, set the response with the correspond good/bad auth signatures. + // For each client, set the response with the correspond good/bad auth + // signatures. let resp = HandleTransactionResponse { status: TransactionStatus::Signed(auth_signature), }; @@ -2208,8 +2232,8 @@ async fn run_aggregator( agg.process_transaction(tx.clone()).await } -// Aggregator aggregate signatures from authorities and process the transaction as executed. -// Test [fn handle_transaction_response_with_executed]. +// Aggregator aggregate signatures from authorities and process the transaction +// as executed. Test [fn handle_transaction_response_with_executed]. async fn process_with_cert( num_byzantines: u8, num_authorities: u8, diff --git a/crates/sui-core/src/unit_tests/authority_tests.rs b/crates/sui-core/src/unit_tests/authority_tests.rs index 49b0f02a46d..ff735d522f5 100644 --- a/crates/sui-core/src/unit_tests/authority_tests.rs +++ b/crates/sui-core/src/unit_tests/authority_tests.rs @@ -2,19 +2,22 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{collections::HashSet, convert::TryInto, env, fs}; + use bcs; use fastcrypto::traits::KeyPair; use futures::{stream::FuturesUnordered, StreamExt}; -use move_binary_format::access::ModuleAccess; use move_binary_format::{ + access::ModuleAccess, file_format::{self, AddressIdentifierIndex, IdentifierIndex, ModuleHandle}, CompiledModule, }; -use move_core_types::identifier::IdentStr; -use move_core_types::language_storage::StructTag; -use move_core_types::parser::parse_type_tag; use move_core_types::{ - account_address::AccountAddress, ident_str, identifier::Identifier, language_storage::TypeTag, + account_address::AccountAddress, + ident_str, + identifier::{IdentStr, Identifier}, + language_storage::{StructTag, TypeTag}, + parser::parse_type_tag, }; use rand::{ distributions::{Distribution, Uniform}, @@ -22,53 +25,45 @@ use rand::{ Rng, SeedableRng, }; use serde_json::json; -use std::collections::HashSet; -use std::fs; -use std::{convert::TryInto, env}; - use sui_json_rpc_types::{ SuiArgument, SuiExecutionResult, SuiExecutionStatus, SuiTransactionBlockEffectsAPI, SuiTypeTag, }; use sui_macros::sim_test; use sui_protocol_config::{ProtocolConfig, SupportedProtocolVersions}; -use sui_types::digests::ConsensusCommitDigest; -use sui_types::dynamic_field::DynamicFieldType; -use sui_types::effects::TransactionEffects; -use sui_types::epoch_data::EpochData; -use sui_types::error::UserInputError; -use sui_types::execution_status::{ExecutionFailureStatus, ExecutionStatus}; -use sui_types::gas_coin::GasCoin; -use sui_types::messages_consensus::{ConsensusCommitPrologue, ConsensusCommitPrologueV2}; -use sui_types::object::Data; -use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; -use sui_types::randomness_state::get_randomness_state_obj_initial_shared_version; -use sui_types::storage::GetSharedLocks; -use sui_types::sui_system_state::SuiSystemStateWrapper; -use sui_types::utils::{ - to_sender_signed_transaction, to_sender_signed_transaction_with_multi_signers, -}; use sui_types::{ base_types::dbg_addr, - crypto::{get_key_pair, Signature}, - crypto::{AccountKeyPair, AuthorityKeyPair}, - object::{Owner, GAS_VALUE_FOR_TESTING, OBJECT_START_VERSION}, + crypto::{get_key_pair, AccountKeyPair, AuthorityKeyPair, Signature}, + digests::ConsensusCommitDigest, + dynamic_field::DynamicFieldType, + effects::TransactionEffects, + epoch_data::EpochData, + error::UserInputError, + execution_status::{ExecutionFailureStatus, ExecutionStatus}, + gas_coin::GasCoin, + messages_consensus::{ConsensusCommitPrologue, ConsensusCommitPrologueV2}, + object::{Data, Owner, GAS_VALUE_FOR_TESTING, OBJECT_START_VERSION}, + programmable_transaction_builder::ProgrammableTransactionBuilder, + randomness_state::get_randomness_state_obj_initial_shared_version, + storage::GetSharedLocks, + sui_system_state::SuiSystemStateWrapper, + utils::{to_sender_signed_transaction, to_sender_signed_transaction_with_multi_signers}, MOVE_STDLIB_PACKAGE_ID, SUI_AUTHENTICATOR_STATE_OBJECT_ID, SUI_CLOCK_OBJECT_ID, SUI_FRAMEWORK_PACKAGE_ID, SUI_RANDOMNESS_STATE_OBJECT_ID, SUI_SYSTEM_STATE_OBJECT_ID, }; -use crate::authority::authority_store_tables::AuthorityPerpetualTables; -use crate::authority::move_integration_tests::build_and_publish_test_package_with_upgrade_cap; -use crate::authority::test_authority_builder::TestAuthorityBuilder; +use super::*; +pub use crate::authority::authority_test_utils::*; use crate::{ + authority::{ + authority_store_tables::AuthorityPerpetualTables, + move_integration_tests::build_and_publish_test_package_with_upgrade_cap, + test_authority_builder::TestAuthorityBuilder, + }, authority_client::{AuthorityAPI, NetworkAuthorityClient}, authority_server::AuthorityServer, test_utils::init_state_parameters_from_rng, }; -use super::*; - -pub use crate::authority::authority_test_utils::*; - pub enum TestCallArg { Pure(Vec), Object(ObjectID), @@ -149,7 +144,7 @@ async fn construct_shared_object_transaction_with_sequence_number( .await .unwrap(); effects.status().unwrap(); - let shared_object_id = effects.created()[0].0 .0; + let shared_object_id = effects.created()[0].0.0; let mut shared_object = authority .get_object(&shared_object_id) .await @@ -183,9 +178,10 @@ async fn construct_shared_object_transaction_with_sequence_number( package.0, ident_str!("object_basics").to_owned(), ident_str!("set_value").to_owned(), - /* type_args */ vec![], + // type_args + vec![], gas_object_ref, - /* args */ + // args vec![ CallArg::Object(ObjectArg::SharedObject { id: shared_object_id, @@ -354,7 +350,7 @@ async fn test_dev_inspect_object_by_bytes() { ) .await .unwrap(); - let created_object_id = effects.created()[0].0 .0; + let created_object_id = effects.created()[0].0.0; let created_object = validator .get_object(&created_object_id) .await @@ -465,7 +461,7 @@ async fn test_dev_inspect_unowned_object() { ) .await .unwrap(); - let created_object_id = effects.created()[0].0 .0; + let created_object_id = effects.created()[0].0.0; let created_object = validator .get_object(&created_object_id) .await @@ -537,7 +533,7 @@ async fn test_dev_inspect_dynamic_field() { .await .unwrap(); assert!(effects.status().is_ok(), "{:#?}", effects.status()); - let created_object_id = effects.created()[0].0 .0; + let created_object_id = effects.created()[0].0.0; let created_object = validator .get_object(&created_object_id) .await @@ -647,7 +643,7 @@ async fn test_dev_inspect_return_values() { ) .await .unwrap(); - let created_object_id = effects.created()[0].0 .0; + let created_object_id = effects.created()[0].0.0; let created_object = validator .get_object(&created_object_id) .await @@ -987,8 +983,8 @@ async fn test_dry_run_on_validator() { assert!(response.is_err()); } -// Tests using a dynamic field that is newer than the parent in dev inspect/dry run results -// in not being able to access the dynamic field object +// Tests using a dynamic field that is newer than the parent in dev inspect/dry +// run results in not being able to access the dynamic field object #[tokio::test] async fn test_dry_run_dev_inspect_dynamic_field_too_new() { let (sender, sender_key): (_, AccountKeyPair) = get_key_pair(); @@ -1206,19 +1202,19 @@ async fn test_handle_transfer_transaction_bad_signature() { let mut bad_signature_transfer_transaction = transfer_transaction.clone().into_inner(); *bad_signature_transfer_transaction .data_mut_for_testing() - .tx_signatures_mut_for_testing() = - vec![ - Signature::new_secure(transfer_transaction.data().intent_message(), &unknown_key) - .into(), - ]; + .tx_signatures_mut_for_testing() = vec![ + Signature::new_secure(transfer_transaction.data().intent_message(), &unknown_key).into(), + ]; - assert!(client - .handle_transaction(bad_signature_transfer_transaction) - .await - .is_err()); + assert!( + client + .handle_transaction(bad_signature_transfer_transaction) + .await + .is_err() + ); - // This metric does not increment because of the early check for correct sender address in - // verify_user_input (transaction.rs) + // This metric does not increment because of the early check for correct sender + // address in verify_user_input (transaction.rs) // assert_eq!(metrics.signature_errors.get(), 1); let object = authority_state @@ -1226,23 +1222,27 @@ async fn test_handle_transfer_transaction_bad_signature() { .await .unwrap() .unwrap(); - assert!(authority_state - .get_transaction_lock( - &object.compute_object_reference(), - &authority_state.epoch_store_for_testing() - ) - .await - .unwrap() - .is_none()); + assert!( + authority_state + .get_transaction_lock( + &object.compute_object_reference(), + &authority_state.epoch_store_for_testing() + ) + .await + .unwrap() + .is_none() + ); - assert!(authority_state - .get_transaction_lock( - &object.compute_object_reference(), - &authority_state.epoch_store_for_testing() - ) - .await - .unwrap() - .is_none()); + assert!( + authority_state + .get_transaction_lock( + &object.compute_object_reference(), + &authority_state.epoch_store_for_testing() + ) + .await + .unwrap() + .is_none() + ); } #[tokio::test] @@ -1337,33 +1337,39 @@ async fn test_handle_transfer_transaction_unknown_sender() { rgp, ); - assert!(authority_state - .handle_transaction(&epoch_store, unknown_sender_transfer_transaction) - .await - .is_err()); + assert!( + authority_state + .handle_transaction(&epoch_store, unknown_sender_transfer_transaction) + .await + .is_err() + ); let object = authority_state .get_object(&object_id) .await .unwrap() .unwrap(); - assert!(authority_state - .get_transaction_lock( - &object.compute_object_reference(), - &authority_state.epoch_store_for_testing() - ) - .await - .unwrap() - .is_none()); + assert!( + authority_state + .get_transaction_lock( + &object.compute_object_reference(), + &authority_state.epoch_store_for_testing() + ) + .await + .unwrap() + .is_none() + ); - assert!(authority_state - .get_transaction_lock( - &object.compute_object_reference(), - &authority_state.epoch_store_for_testing() - ) - .await - .unwrap() - .is_none()); + assert!( + authority_state + .get_transaction_lock( + &object.compute_object_reference(), + &authority_state.epoch_store_for_testing() + ) + .await + .unwrap() + .is_none() + ); } #[tokio::test] @@ -1407,21 +1413,25 @@ async fn test_handle_transfer_transaction_ok() { ); // Check the initial state of the locks - assert!(authority_state - .get_transaction_lock( - &(object_id, before_object_version, object.digest()), - &authority_state.epoch_store_for_testing() - ) - .await - .unwrap() - .is_none()); - assert!(authority_state - .get_transaction_lock( - &(object_id, after_object_version, object.digest()), - &authority_state.epoch_store_for_testing() - ) - .await - .is_err()); + assert!( + authority_state + .get_transaction_lock( + &(object_id, before_object_version, object.digest()), + &authority_state.epoch_store_for_testing() + ) + .await + .unwrap() + .is_none() + ); + assert!( + authority_state + .get_transaction_lock( + &(object_id, after_object_version, object.digest()), + &authority_state.epoch_store_for_testing() + ) + .await + .is_err() + ); let account_info = authority_state .handle_transaction(&epoch_store, transfer_transaction.clone()) @@ -1767,11 +1777,13 @@ async fn test_publish_dependent_module_ok() { TxContext::new(&sender, transaction.digest(), &EpochData::new_test()).fresh_id(); // Object does not exist - assert!(authority - .get_object(&dependent_module_id) - .await - .unwrap() - .is_none()); + assert!( + authority + .get_object(&dependent_module_id) + .await + .unwrap() + .is_none() + ); let signed_effects = send_and_confirm_transaction(&authority, transaction) .await .unwrap() @@ -1844,7 +1856,8 @@ async fn test_publish_non_existing_dependent_module() { }; // create a module that depends on a genesis module let mut dependent_module = make_dependent_module(&genesis_module); - // Add another dependent module that points to a random address, hence does not exist on-chain. + // Add another dependent module that points to a random address, hence does not + // exist on-chain. let not_on_chain = ObjectID::random(); dependent_module .address_identifiers @@ -1875,8 +1888,10 @@ async fn test_publish_non_existing_dependent_module() { let response = authority .handle_transaction(&epoch_store, transaction) .await; - assert!(std::string::ToString::to_string(&response.unwrap_err()) - .contains("DependentPackageNotFound")); + assert!( + std::string::ToString::to_string(&response.unwrap_err()) + .contains("DependentPackageNotFound") + ); // Check that gas was not charged. assert_eq!( authority @@ -1899,9 +1914,10 @@ async fn test_package_size_limit() { let gas_payment_object_ref = gas_payment_object.compute_object_reference(); let mut package = Vec::new(); let mut modules_size = 0; - // create a package larger than the max size; serialized modules is the largest contributor and - // while other metadata is also contributing to the size it's easiest to construct object that's - // too large by adding more module bytes + // create a package larger than the max size; serialized modules is the largest + // contributor and while other metadata is also contributing to the size + // it's easiest to construct object that's too large by adding more module + // bytes let max_move_package_size = ProtocolConfig::get_for_min_version().max_move_package_size(); while modules_size <= max_move_package_size { let mut module = file_format::empty_module(); @@ -1962,7 +1978,7 @@ async fn test_handle_move_transaction() { assert_eq!(effects.created().len(), 1); assert_eq!(effects.mutated().len(), 1); - let created_object_id = effects.created()[0].0 .0; + let created_object_id = effects.created()[0].0.0; // check that transaction actually created an object with the expected ID, owner let created_obj = authority_state .get_object(&created_object_id) @@ -2018,11 +2034,12 @@ async fn test_conflicting_transactions() { rgp, ); - // repeatedly attempt to submit conflicting transactions at the same time, and verify that - // exactly one succeeds in every case. + // repeatedly attempt to submit conflicting transactions at the same time, and + // verify that exactly one succeeds in every case. // - // Note: I verified that this test fails immediately if we remove the acquire_locks() call in - // acquire_transaction_locks() and then add a sleep after we read the locks. + // Note: I verified that this test fails immediately if we remove the + // acquire_locks() call in acquire_transaction_locks() and then add a sleep + // after we read the locks. for _ in 0..100 { let mut futures = FuturesUnordered::new(); futures.push(authority_state.handle_transaction(&epoch_store, tx1.clone())); @@ -2126,7 +2143,8 @@ async fn test_handle_transfer_transaction_double_spend() { .handle_transaction(&epoch_store, transfer_transaction) .await .unwrap(); - // this is valid because our test authority should not change its certified transaction + // this is valid because our test authority should not change its certified + // transaction assert_eq!(signed_transaction, double_spend_signed_transaction); } @@ -2395,21 +2413,25 @@ async fn test_handle_confirmation_transaction_ok() { assert_eq!(next_sequence_number, new_account.version()); // Check locks are set and archived correctly - assert!(authority_state - .get_transaction_lock( - &(object_id, 1.into(), old_account.digest()), - &authority_state.epoch_store_for_testing() - ) - .await - .is_err()); - assert!(authority_state - .get_transaction_lock( - &(object_id, 2.into(), new_account.digest()), - &authority_state.epoch_store_for_testing() - ) - .await - .expect("Exists") - .is_none()); + assert!( + authority_state + .get_transaction_lock( + &(object_id, 1.into(), old_account.digest()), + &authority_state.epoch_store_for_testing() + ) + .await + .is_err() + ); + assert!( + authority_state + .get_transaction_lock( + &(object_id, 2.into(), new_account.digest()), + &authority_state.epoch_store_for_testing() + ) + .await + .expect("Exists") + .is_none() + ); } #[tokio::test] @@ -2458,7 +2480,8 @@ async fn test_handle_confirmation_transaction_idempotent() { .unwrap(); assert_eq!(signed_effects2.data().status(), &ExecutionStatus::Success); - // this is valid because we're checking the authority state does not change the certificate + // this is valid because we're checking the authority state does not change the + // certificate assert_eq!(signed_effects, signed_effects2); // Now check the transaction info request is also the same @@ -2535,7 +2558,8 @@ async fn test_move_call_mutable_object_not_mutated() { .unwrap(); assert!(effects.status().is_ok()); assert_eq!((effects.created().len(), effects.mutated().len()), (0, 3)); - // Verify that both objects' version increased, even though only one object was updated. + // Verify that both objects' version increased, even though only one object was + // updated. assert_eq!( authority_state .get_object(&new_object_id1) @@ -2558,9 +2582,9 @@ async fn test_move_call_mutable_object_not_mutated() { #[tokio::test] async fn test_move_call_insufficient_gas() { - // This test attempts to trigger a transaction execution that would fail due to insufficient gas. - // We want to ensure that even though the transaction failed to execute, all objects - // are mutated properly. + // This test attempts to trigger a transaction execution that would fail due to + // insufficient gas. We want to ensure that even though the transaction + // failed to execute, all objects are mutated properly. let (sender, sender_key): (_, AccountKeyPair) = get_key_pair(); let (recipient, recipient_key): (_, AccountKeyPair) = get_key_pair(); let object_id = ObjectID::random(); @@ -2574,8 +2598,8 @@ async fn test_move_call_insufficient_gas() { .await; let rgp = authority_state.reference_gas_price_for_testing().unwrap(); - // First execute a transaction successfully to obtain the amount of gas needed for this - // type of transaction. + // First execute a transaction successfully to obtain the amount of gas needed + // for this type of transaction. // After this transaction, object_id will be owned by recipient. let certified_transfer_transaction = init_certified_transfer_transaction( sender, @@ -2631,7 +2655,8 @@ async fn test_move_call_insufficient_gas() { } else { 2000 }; - // Now we try to construct a transaction with a smaller gas budget than required. + // Now we try to construct a transaction with a smaller gas budget than + // required. let data = TransactionData::new_transfer(sender, obj_ref, recipient, gas_ref, gas_used - 5, rgp); @@ -2728,11 +2753,13 @@ async fn test_move_call_delete() { async fn test_get_latest_parent_entry_genesis() { let authority_state = TestAuthorityBuilder::new().build().await; // There should not be any object with ID zero - assert!(authority_state - .get_object_or_tombstone(ObjectID::ZERO) - .await - .unwrap() - .is_none()); + assert!( + authority_state + .get_object_or_tombstone(ObjectID::ZERO) + .await + .unwrap() + .is_none() + ); } #[tokio::test] @@ -2764,7 +2791,7 @@ async fn test_get_latest_parent_entry() { .unwrap(); let (new_object_id2, seq2, _) = effects.created()[0].0; - let update_version = SequenceNumber::lamport_increment([seq1, seq2, effects.gas_object().0 .1]); + let update_version = SequenceNumber::lamport_increment([seq1, seq2, effects.gas_object().0.1]); let effects = call_move( &authority_state, @@ -2792,7 +2819,7 @@ async fn test_get_latest_parent_entry() { assert_eq!(obj_ref.0, new_object_id1); assert_eq!(obj_ref.1, update_version); - let delete_version = SequenceNumber::lamport_increment([obj_ref.1, effects.gas_object().0 .1]); + let delete_version = SequenceNumber::lamport_increment([obj_ref.1, effects.gas_object().0.1]); let _effects = call_move( &authority_state, @@ -2816,11 +2843,13 @@ async fn test_get_latest_parent_entry() { // Prevent overflow x[last_index] = u8::MAX - x[last_index]; let unknown_object_id: ObjectID = x.try_into().unwrap(); - assert!(authority_state - .get_object_or_tombstone(unknown_object_id) - .await - .unwrap() - .is_none()); + assert!( + authority_state + .get_object_or_tombstone(unknown_object_id) + .await + .unwrap() + .is_none() + ); // Check gas object is returned. let obj_ref = authority_state @@ -2860,11 +2889,13 @@ async fn test_account_state_unknown_account() { let sender = dbg_addr(1); let unknown_address = dbg_object_id(99); let authority_state = init_state_with_object_id(sender, ObjectID::random()).await; - assert!(authority_state - .get_object(&unknown_address) - .await - .unwrap() - .is_none()); + assert!( + authority_state + .get_object(&unknown_address) + .await + .unwrap() + .is_none() + ); } #[tokio::test] @@ -2909,8 +2940,9 @@ async fn test_authority_persist() { // Close the authority drop(authority); - // TODO: The right fix is to invoke some function on DBMap and release the rocksdb arc references - // being held in the background thread but this will suffice for now + // TODO: The right fix is to invoke some function on DBMap and release the + // rocksdb arc references being held in the background thread but this will + // suffice for now tokio::time::sleep(std::time::Duration::from_secs(1)).await; // Reopen the same authority with the same path @@ -2932,9 +2964,9 @@ async fn test_authority_persist() { #[tokio::test] async fn test_idempotent_reversed_confirmation() { - // In this test we exercise the case where an authority first receive the certificate, - // and then receive the raw transaction latter. We should still ensure idempotent - // response and be able to get back the same result. + // In this test we exercise the case where an authority first receive the + // certificate, and then receive the raw transaction latter. We should still + // ensure idempotent response and be able to get back the same result. let recipient = dbg_addr(2); let (sender, sender_key): (_, AccountKeyPair) = get_key_pair(); @@ -3049,8 +3081,8 @@ async fn test_refusal_to_sign_consensus_commit_prologue_v2() { #[tokio::test] async fn test_invalid_mutable_clock_parameter() { - // User transactions that take the singleton Clock object at `0x6` by mutable reference will - // fail to sign, to prevent transactions bottlenecking on it. + // User transactions that take the singleton Clock object at `0x6` by mutable + // reference will fail to sign, to prevent transactions bottlenecking on it. let (sender, sender_key): (_, AccountKeyPair) = get_key_pair(); let gas_object_id = ObjectID::random(); let (authority_state, package_object_ref) = @@ -3065,7 +3097,8 @@ async fn test_invalid_mutable_clock_parameter() { package_object_ref.0, ident_str!("object_basics").to_owned(), ident_str!("use_clock").to_owned(), - /* type_args */ vec![], + // type_args + vec![], gas_ref, vec![CallArg::CLOCK_MUT], TEST_ONLY_GAS_UNIT_FOR_OBJECT_BASICS * rgp, @@ -3093,8 +3126,9 @@ async fn test_invalid_mutable_clock_parameter() { #[tokio::test] async fn test_invalid_authenticator_state_parameter() { - // User transactions that take the singleton AuthenticatorState object at `0x7` by mutable - // reference will fail to sign, to prevent transactions bottlenecking on it. + // User transactions that take the singleton AuthenticatorState object at `0x7` + // by mutable reference will fail to sign, to prevent transactions + // bottlenecking on it. let (sender, sender_key): (_, AccountKeyPair) = get_key_pair(); let gas_object_id = ObjectID::random(); let (authority_state, package_object_ref) = @@ -3109,7 +3143,8 @@ async fn test_invalid_authenticator_state_parameter() { package_object_ref.0, ident_str!("object_basics").to_owned(), ident_str!("use_auth_state").to_owned(), - /* type_args */ vec![], + // type_args + vec![], gas_ref, vec![CallArg::AUTHENTICATOR_MUT], TEST_ONLY_GAS_UNIT_FOR_OBJECT_BASICS * rgp, @@ -3137,8 +3172,9 @@ async fn test_invalid_authenticator_state_parameter() { #[tokio::test] async fn test_invalid_randomness_parameter() { - // User transactions that take the singleton Randomness object at `0x8` by mutable - // reference will fail to sign, to prevent transactions bottlenecking on it. + // User transactions that take the singleton Randomness object at `0x8` by + // mutable reference will fail to sign, to prevent transactions + // bottlenecking on it. let (sender, sender_key): (_, AccountKeyPair) = get_key_pair(); let gas_object_id = ObjectID::random(); let (authority_state, package_object_ref) = @@ -3164,7 +3200,8 @@ async fn test_invalid_randomness_parameter() { package_object_ref.0, ident_str!("object_basics").to_owned(), ident_str!("use_random").to_owned(), - /* type_args */ vec![], + // type_args + vec![], gas_ref, vec![random_mut], TEST_ONLY_GAS_UNIT_FOR_OBJECT_BASICS * rgp, @@ -3190,7 +3227,8 @@ async fn test_invalid_randomness_parameter() { #[tokio::test] async fn test_invalid_object_ownership() { - // User transaction that attempts to mutate an object it does not own will fail to sign. + // User transaction that attempts to mutate an object it does not own will fail + // to sign. let (sender, sender_key): (_, AccountKeyPair) = get_key_pair(); let (invalid_owner, _): (_, AccountKeyPair) = get_key_pair(); @@ -3229,7 +3267,12 @@ async fn test_invalid_object_ownership() { }; assert_eq!( UserInputError::try_from(e).unwrap(), - UserInputError::IncorrectUserSignature { error: format!("Object {:?} is owned by account address {:?}, but given owner/signer address is {:?}", invalid_ownership_object_id, invalid_owner, sender)} + UserInputError::IncorrectUserSignature { + error: format!( + "Object {:?} is owned by account address {:?}, but given owner/signer address is {:?}", + invalid_ownership_object_id, invalid_owner, sender + ) + } ); } @@ -3250,7 +3293,8 @@ async fn test_valid_immutable_clock_parameter() { package_object_ref.0, ident_str!("object_basics").to_owned(), ident_str!("use_clock").to_owned(), - /* type_args */ vec![], + // type_args + vec![], gas_ref, vec![CallArg::CLOCK_IMM], TEST_ONLY_GAS_UNIT_FOR_OBJECT_BASICS * rgp, @@ -3269,7 +3313,8 @@ async fn test_valid_immutable_clock_parameter() { #[tokio::test] async fn test_genesis_sui_system_state_object() { // This test verifies that we can read the genesis SuiSystemState object. - // And its Move layout matches the definition in Rust (so that we can deserialize it). + // And its Move layout matches the definition in Rust (so that we can + // deserialize it). let authority_state = TestAuthorityBuilder::new().build().await; let wrapper = authority_state .get_object(&SUI_SYSTEM_STATE_OBJECT_ID) @@ -3329,11 +3374,12 @@ async fn test_transfer_sui_no_amount() { .await .unwrap(); let effects = signed_effects.into_message(); - // Check that the transaction was successful, and the gas object is the only mutated object, - // and got transferred. Also check on its version and new balance. + // Check that the transaction was successful, and the gas object is the only + // mutated object, and got transferred. Also check on its version and new + // balance. assert!(effects.status().is_ok()); assert!(effects.mutated_excluding_gas().is_empty()); - assert!(gas_ref.1 < effects.gas_object().0 .1); + assert!(gas_ref.1 < effects.gas_object().0.1); assert_eq!(effects.gas_object().1, Owner::AddressOwner(recipient)); let new_balance = sui_types::gas::get_gas_balance( &authority_state @@ -3375,19 +3421,19 @@ async fn test_transfer_sui_with_amount() { .await .unwrap(); let effects = signed_effects.into_message(); - // Check that the transaction was successful, the gas object remains in the original owner, - // and an amount is split out and send to the recipient. + // Check that the transaction was successful, the gas object remains in the + // original owner, and an amount is split out and send to the recipient. assert!(effects.status().is_ok()); assert!(effects.mutated_excluding_gas().is_empty()); assert_eq!(effects.created().len(), 1); assert_eq!(effects.created()[0].1, Owner::AddressOwner(recipient)); let new_gas = authority_state - .get_object(&effects.created()[0].0 .0) + .get_object(&effects.created()[0].0.0) .await .unwrap() .unwrap(); assert_eq!(sui_types::gas::get_gas_balance(&new_gas).unwrap(), 500); - assert!(gas_ref.1 < effects.gas_object().0 .1); + assert!(gas_ref.1 < effects.gas_object().0.1); assert_eq!(effects.gas_object().1, Owner::AddressOwner(sender)); let new_balance = sui_types::gas::get_gas_balance( &authority_state @@ -3517,7 +3563,7 @@ async fn test_store_revert_wrap_move_call() { // The gas is uncharged let gas = db.get_object(&gas_object_id).unwrap().unwrap(); - assert_eq!(gas.version(), create_effects.gas_object().0 .1); + assert_eq!(gas.version(), create_effects.gas_object().0.1); } #[tokio::test] @@ -3590,7 +3636,7 @@ async fn test_store_revert_unwrap_move_call() { assert_eq!(unwrap_effects.deleted().len(), 1); assert_eq!(unwrap_effects.deleted()[0].0, wrapper_v0.0); assert_eq!(unwrap_effects.unwrapped().len(), 1); - assert_eq!(unwrap_effects.unwrapped()[0].0 .0, object_v0.0); + assert_eq!(unwrap_effects.unwrapped()[0].0.0, object_v0.0); let db = &authority_state.database_for_testing(); @@ -3605,7 +3651,7 @@ async fn test_store_revert_unwrap_move_call() { // The gas is uncharged let gas = db.get_object(&gas_object_id).unwrap().unwrap(); - assert_eq!(gas.version(), wrap_effects.gas_object().0 .1); + assert_eq!(gas.version(), wrap_effects.gas_object().0.1); } #[tokio::test] async fn test_store_get_dynamic_object() { @@ -4027,7 +4073,8 @@ async fn test_iter_live_object_set() { &sender_key, &gas, "object_wrapping", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -4365,7 +4412,8 @@ pub async fn execute_programmable_transaction( sender, sender_key, pt, - /* with_shared */ false, + // with_shared + false, gas_unit, ) .await @@ -4386,7 +4434,8 @@ pub async fn execute_programmable_transaction_with_shared( sender, sender_key, pt, - /* with_shared */ true, + // with_shared + true, gas_unit, ) .await @@ -4611,10 +4660,12 @@ pub async fn call_dev_inspect( .await } -/// This function creates a transaction that calls a 0x02::object_basics::set_value function. -/// Usually we need to publish this package first, but in these test files we often don't do that. -/// Then the tx would fail with `VMVerificationOrDeserializationError` (Linker error, module not found), -/// but gas is still charged. Depending on what we want to test, this may be fine. +/// This function creates a transaction that calls a +/// 0x02::object_basics::set_value function. Usually we need to publish this +/// package first, but in these test files we often don't do that. Then the tx +/// would fail with `VMVerificationOrDeserializationError` (Linker error, module +/// not found), but gas is still charged. Depending on what we want to test, +/// this may be fine. #[cfg(test)] async fn make_test_transaction( sender: &SuiAddress, @@ -4639,9 +4690,10 @@ async fn make_test_transaction( SUI_FRAMEWORK_PACKAGE_ID, ident_str!(module).to_owned(), ident_str!(function).to_owned(), - /* type_args */ vec![], + // type_args + vec![], *gas_object_ref, - /* args */ + // args vec![ CallArg::Object(ObjectArg::SharedObject { id: shared_object_id, @@ -4682,8 +4734,8 @@ async fn make_test_transaction( unreachable!("couldn't form cert") } -async fn prepare_authority_and_shared_object_cert( -) -> (Arc, VerifiedCertificate, ObjectID) { +async fn prepare_authority_and_shared_object_cert() +-> (Arc, VerifiedCertificate, ObjectID) { let (sender, keypair): (_, AccountKeyPair) = get_key_pair(); // Initialize an authority with a (owned) gas object and a shared object. @@ -4721,7 +4773,8 @@ async fn prepare_authority_and_shared_object_cert( async fn test_shared_object_transaction_shared_locks_not_set() { let (authority, certificate, _) = prepare_authority_and_shared_object_cert().await; - // Executing the certificate now panics since it was not sequenced and shared locks are not set + // Executing the certificate now panics since it was not sequenced and shared + // locks are not set let _ = authority.try_execute_for_test(&certificate).await; } @@ -4833,8 +4886,9 @@ async fn test_consensus_message_processed() { let (effects1, _execution_error_opt) = authority1.try_execute_for_test(&certificate).await.unwrap(); - // now, on authority2, we send 0 or 1 consensus messages, then we either sequence and execute via - // effects or via handle_certificate_v2, then send 0 or 1 consensus messages. + // now, on authority2, we send 0 or 1 consensus messages, then we either + // sequence and execute via effects or via handle_certificate_v2, then + // send 0 or 1 consensus messages. let send_first = rng.gen_bool(0.5); if send_first { send_consensus(&authority2, &certificate).await; @@ -4867,7 +4921,8 @@ async fn test_consensus_message_processed() { assert_eq!(effects1.data(), &effects2); - // If we didn't send consensus before handle_node_sync_certificate, we need to do it now. + // If we didn't send consensus before handle_node_sync_certificate, we need to + // do it now. if !send_first { send_consensus(&authority2, &certificate).await; } @@ -4930,8 +4985,8 @@ fn test_choose_next_system_packages() { protocol_config.set_advance_to_highest_supported_protocol_version_for_testing(false); protocol_config.set_buffer_stake_for_protocol_upgrade_bps_for_testing(7500); - // all validators agree on new system packages, but without a new protocol version, so no - // upgrade. + // all validators agree on new system packages, but without a new protocol + // version, so no upgrade. let capabilities = vec![ make_capabilities!(1, v[0].0, vec![o1, o2]), make_capabilities!(1, v[1].0, vec![o1, o2]), @@ -5040,8 +5095,8 @@ fn test_choose_next_system_packages() { ) ); - // all validators support 3, but with this protocol config we cannot advance multiple - // versions at once. + // all validators support 3, but with this protocol config we cannot advance + // multiple versions at once. let capabilities = vec![ make_capabilities!(3, v[0].0, vec![o1, o2]), make_capabilities!(3, v[1].0, vec![o1, o2]), @@ -5107,8 +5162,8 @@ fn test_choose_next_system_packages() { make_capabilities!(5, v[3].0, vec![o1, o2]), ]; - // packages are identical between all currently supported versions, so we can upgrade to - // 3 which is the highest supported version + // packages are identical between all currently supported versions, so we can + // upgrade to 3 which is the highest supported version assert_eq!( (ver(3), sort(vec![o1, o2])), AuthorityState::choose_protocol_version_and_system_packages( @@ -5127,9 +5182,10 @@ fn test_choose_next_system_packages() { make_capabilities!(3, v[3].0, vec![o1, o3]), ]; - // Even though 2f+1 validators agree on version 2, we don't have an agreement about the - // packages. In this situation it is likely that (v2, []) is a valid upgrade, but we don't have - // a way to detect that. The upgrade simply won't happen until everyone moves to 3. + // Even though 2f+1 validators agree on version 2, we don't have an agreement + // about the packages. In this situation it is likely that (v2, []) is a + // valid upgrade, but we don't have a way to detect that. The upgrade simply + // won't happen until everyone moves to 3. assert_eq!( (ver(1), sort(vec![])), AuthorityState::choose_protocol_version_and_system_packages( @@ -5144,7 +5200,8 @@ fn test_choose_next_system_packages() { #[tokio::test] async fn test_gas_smashing() { - // run a create move object transaction with a given set o gas coins and a budget + // run a create move object transaction with a given set o gas coins and a + // budget async fn create_obj( sender: SuiAddress, sender_key: AccountKeyPair, @@ -5207,7 +5264,7 @@ async fn test_gas_smashing() { assert!(effects.status().is_err()); } // gas object in effects is first coin in vector of coins - assert_eq!(gas_coin_ids[0], effects.gas_object().0 .0); + assert_eq!(gas_coin_ids[0], effects.gas_object().0.0); // object is created on success and gas at position 0 mutated let created = usize::from(success); assert_eq!( @@ -5217,10 +5274,12 @@ async fn test_gas_smashing() { // extra coin are deleted assert_eq!(effects.deleted().len() as u64, coin_num - 1); for gas_coin_id in &gas_coin_ids[1..] { - assert!(effects - .deleted() - .iter() - .any(|deleted| deleted.0 == *gas_coin_id)); + assert!( + effects + .deleted() + .iter() + .any(|deleted| deleted.0 == *gas_coin_id) + ); } // balance on first coin is correct let balance = sui_types::gas::get_gas_balance( @@ -5237,7 +5296,8 @@ async fn test_gas_smashing() { // 100,000 should be enough money for that transaction. let gas_used = run_and_check(100_000_000, 1, 100_000_000, true).await; - // add something to the gas used to account for multiple gas coins being charged for + // add something to the gas used to account for multiple gas coins being charged + // for let reference_gas_used = gas_used + 1_000; let three_coin_gas = run_and_check(reference_gas_used, 3, reference_gas_used, true).await; run_and_check(reference_gas_used, 10, reference_gas_used - 100, true).await; diff --git a/crates/sui-core/src/unit_tests/batch_transaction_tests.rs b/crates/sui-core/src/unit_tests/batch_transaction_tests.rs index 2f132187293..2539f56e07a 100644 --- a/crates/sui-core/src/unit_tests/batch_transaction_tests.rs +++ b/crates/sui-core/src/unit_tests/batch_transaction_tests.rs @@ -1,21 +1,19 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::*; -use crate::authority::authority_tests::init_state_with_ids_and_object_basics; +use authority_tests::send_and_confirm_transaction; use bcs; +use move_core_types::{account_address::AccountAddress, ident_str}; use sui_types::{ + crypto::{get_key_pair, AccountKeyPair}, execution_status::ExecutionStatus, + object::Owner, programmable_transaction_builder::ProgrammableTransactionBuilder, utils::to_sender_signed_transaction, }; -use authority_tests::send_and_confirm_transaction; -use move_core_types::{account_address::AccountAddress, ident_str}; -use sui_types::{ - crypto::{get_key_pair, AccountKeyPair}, - object::Owner, -}; +use super::*; +use crate::authority::authority_tests::init_state_with_ids_and_object_basics; #[tokio::test] async fn test_batch_transaction_ok() -> anyhow::Result<()> { @@ -60,11 +58,13 @@ async fn test_batch_transaction_ok() -> anyhow::Result<()> { } let data = TransactionData::new_programmable( sender, - vec![authority_state - .get_object(&all_ids[N]) - .await? - .unwrap() - .compute_object_reference()], + vec![ + authority_state + .get_object(&all_ids[N]) + .await? + .unwrap() + .compute_object_reference(), + ], builder.finish(), rgp * TEST_ONLY_GAS_UNIT_FOR_OBJECT_BASICS * (N as u64), rgp, @@ -78,10 +78,12 @@ async fn test_batch_transaction_ok() -> anyhow::Result<()> { (effects.created().len(), effects.mutated().len()), (N, N + 1), ); - assert!(effects - .created() - .iter() - .all(|(_, owner)| owner == &Owner::AddressOwner(sender))); + assert!( + effects + .created() + .iter() + .all(|(_, owner)| owner == &Owner::AddressOwner(sender)) + ); // N of the objects should now be owned by recipient. assert_eq!( effects @@ -97,8 +99,9 @@ async fn test_batch_transaction_ok() -> anyhow::Result<()> { #[tokio::test] async fn test_batch_transaction_last_one_fail() -> anyhow::Result<()> { - // This test tests the case where the last transaction in a batch transaction would fail to execute. - // We make sure that the entire batch is rolled back, and only gas is charged. + // This test tests the case where the last transaction in a batch transaction + // would fail to execute. We make sure that the entire batch is rolled back, + // and only gas is charged. let (sender, sender_key): (_, AccountKeyPair) = get_key_pair(); let (recipient, _): (_, AccountKeyPair) = get_key_pair(); const N: usize = 5; @@ -133,11 +136,13 @@ async fn test_batch_transaction_last_one_fail() -> anyhow::Result<()> { .unwrap(); let data = TransactionData::new_programmable( sender, - vec![authority_state - .get_object(&all_ids[N]) - .await? - .unwrap() - .compute_object_reference()], + vec![ + authority_state + .get_object(&all_ids[N]) + .await? + .unwrap() + .compute_object_reference(), + ], builder.finish(), rgp * TEST_ONLY_GAS_UNIT_FOR_OBJECT_BASICS, rgp, @@ -158,8 +163,8 @@ async fn test_batch_transaction_last_one_fail() -> anyhow::Result<()> { #[tokio::test] async fn test_batch_insufficient_gas_balance() -> anyhow::Result<()> { - // This test creates 10 Move call transactions batch, each with a budget of 5000. - // However we provide a gas coin with only 49999 balance. + // This test creates 10 Move call transactions batch, each with a budget of + // 5000. However we provide a gas coin with only 49999 balance. let (sender, sender_key): (_, AccountKeyPair) = get_key_pair(); let (authority_state, package) = init_state_with_ids_and_object_basics([]).await; let rgp = authority_state.reference_gas_price_for_testing()?; diff --git a/crates/sui-core/src/unit_tests/batch_verification_tests.rs b/crates/sui-core/src/unit_tests/batch_verification_tests.rs index 2d7fcc43716..50f5a01adcf 100644 --- a/crates/sui-core/src/unit_tests/batch_verification_tests.rs +++ b/crates/sui-core/src/unit_tests/batch_verification_tests.rs @@ -1,21 +1,25 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::signature_verifier::*; -use crate::test_utils::{make_cert_with_large_committee, make_dummy_tx}; +use std::sync::Arc; + use fastcrypto::traits::KeyPair; use futures::future::join_all; use prometheus::Registry; use rand::{thread_rng, Rng}; -use std::sync::Arc; use sui_macros::sim_test; -use sui_types::committee::Committee; -use sui_types::crypto::{get_key_pair, AccountKeyPair, AuthorityKeyPair}; -use sui_types::gas::GasCostSummary; -use sui_types::messages_checkpoint::{ - CheckpointContents, CheckpointSummary, SignedCheckpointSummary, +use sui_types::{ + committee::Committee, + crypto::{get_key_pair, AccountKeyPair, AuthorityKeyPair}, + gas::GasCostSummary, + messages_checkpoint::{CheckpointContents, CheckpointSummary, SignedCheckpointSummary}, + transaction::CertifiedTransaction, +}; + +use crate::{ + signature_verifier::*, + test_utils::{make_cert_with_large_committee, make_dummy_tx}, }; -use sui_types::transaction::CertifiedTransaction; // TODO consolidate with `gen_certs` in batch_verification_bench.rs fn gen_certs( @@ -86,8 +90,8 @@ async fn test_batch_verify() { } let (other_sender, other_sender_sec): (_, AccountKeyPair) = get_key_pair(); - // this test is a bit much for the current implementation - it was originally written to verify - // a bisecting fall back approach. + // this test is a bit much for the current implementation - it was originally + // written to verify a bisecting fall back approach. for i in 0..16 { let (receiver, _): (_, AccountKeyPair) = get_key_pair(); let mut certs = certs.clone(); diff --git a/crates/sui-core/src/unit_tests/consensus_tests.rs b/crates/sui-core/src/unit_tests/consensus_tests.rs index a027d3f9344..086ee98554a 100644 --- a/crates/sui-core/src/unit_tests/consensus_tests.rs +++ b/crates/sui-core/src/unit_tests/consensus_tests.rs @@ -1,27 +1,29 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::*; -use crate::authority::{authority_tests::init_state_with_objects, AuthorityState}; -use crate::checkpoints::CheckpointServiceNoop; -use crate::consensus_handler::SequencedConsensusTransaction; use move_core_types::{account_address::AccountAddress, ident_str}; -use narwhal_types::Transactions; -use narwhal_types::TransactionsServer; -use narwhal_types::{Empty, TransactionProto}; +use narwhal_types::{Empty, TransactionProto, Transactions, TransactionsServer}; use sui_network::tonic; -use sui_types::crypto::deterministic_random_account_key; -use sui_types::multiaddr::Multiaddr; -use sui_types::transaction::TEST_ONLY_GAS_UNIT_FOR_OBJECT_BASICS; -use sui_types::utils::to_sender_signed_transaction; -use sui_types::SUI_FRAMEWORK_PACKAGE_ID; use sui_types::{ base_types::ObjectID, + crypto::deterministic_random_account_key, + multiaddr::Multiaddr, object::Object, - transaction::{CallArg, CertifiedTransaction, ObjectArg, TransactionData}, + transaction::{ + CallArg, CertifiedTransaction, ObjectArg, TransactionData, + TEST_ONLY_GAS_UNIT_FOR_OBJECT_BASICS, + }, + utils::to_sender_signed_transaction, + SUI_FRAMEWORK_PACKAGE_ID, +}; +use tokio::sync::mpsc::{channel, Receiver, Sender}; + +use super::*; +use crate::{ + authority::{authority_tests::init_state_with_objects, AuthorityState}, + checkpoints::CheckpointServiceNoop, + consensus_handler::SequencedConsensusTransaction, }; -use tokio::sync::mpsc::channel; -use tokio::sync::mpsc::{Receiver, Sender}; /// Fixture: a few test gas objects. pub fn test_gas_objects() -> Vec { @@ -67,9 +69,10 @@ pub async fn test_certificates(authority: &AuthorityState) -> Vec>], @@ -337,7 +340,8 @@ async fn test_execution_with_dependencies() { executed_owned_certs.push(cert); let mut owned_object_ref = effects1.created()[0].0; - // Initialize a shared counter, re-using gas_ref_0 so it has to execute after tx1. + // Initialize a shared counter, re-using gas_ref_0 so it has to execute after + // tx1. let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_objects[0][0].id()).await; let tx2 = TestTransactionBuilder::new(*addr1, gas_ref, rgp) .call_counter_create(package) @@ -351,18 +355,19 @@ async fn test_execution_with_dependencies() { initial_shared_version, } = owner { - // Because the gas object used has version 2, the initial lamport timestamp of the shared - // counter is 3. + // Because the gas object used has version 2, the initial lamport timestamp of + // the shared counter is 3. assert_eq!(initial_shared_version.value(), 3); initial_shared_version } else { panic!("Not a shared object! {:?} {:?}", shared_counter_ref, owner); }; - // ---- Execute transactions with dependencies on first 3 nodes in the dependency order. + // ---- Execute transactions with dependencies on first 3 nodes in the + // dependency order. - // In each iteration, creates an owned and a shared transaction that depends on previous input - // and gas objects. + // In each iteration, creates an owned and a shared transaction that depends on + // previous input and gas objects. for i in 0..100 { let source_index = i % NUM_ACCOUNTS; let (source_addr, source_key) = &accounts[source_index]; @@ -534,8 +539,9 @@ async fn test_per_object_overload() { // Make sure execution driver has exited. sleep(Duration::from_secs(1)).await; - // Sign and try execute 1000 txns on the first three authorities. And enqueue them on the last authority. - // First shared counter txn has input object available on authority 3. So to overload authority 3, 1 more + // Sign and try execute 1000 txns on the first three authorities. And enqueue + // them on the last authority. First shared counter txn has input object + // available on authority 3. So to overload authority 3, 1 more // txn is needed. let num_txns = MAX_PER_OBJECT_QUEUE_LENGTH + 1; for gas_object in gas_objects.iter().take(num_txns) { @@ -660,8 +666,9 @@ async fn test_txn_age_overload() { // Make sure execution driver has exited. sleep(Duration::from_secs(1)).await; - // Sign and try execute 2 txns on the first three authorities. And enqueue them on the last authority. - // First shared counter txn has input object available on authority 3. So to put a txn in the queue, we + // Sign and try execute 2 txns on the first three authorities. And enqueue them + // on the last authority. First shared counter txn has input object + // available on authority 3. So to put a txn in the queue, we // will need another txn. for gas_object in gas_objects.iter().take(2) { let gas_ref = get_latest_ref(authority_clients[0].clone(), gas_object.id()).await; @@ -685,7 +692,8 @@ async fn test_txn_age_overload() { send_consensus(&authorities[3], &shared_cert).await; } - // Sleep for 6 seconds to make sure the transaction is old enough since our threshold is 5. + // Sleep for 6 seconds to make sure the transaction is old enough since our + // threshold is 5. tokio::time::sleep(Duration::from_secs(6)).await; // Trying to sign a new transaction would now fail. @@ -708,7 +716,8 @@ async fn test_txn_age_overload() { ); } -// Tests that when validator is in load shedding mode, it can pushback txn signing correctly. +// Tests that when validator is in load shedding mode, it can pushback txn +// signing correctly. #[tokio::test(flavor = "current_thread", start_paused = true)] async fn test_authority_txn_signing_pushback() { telemetry_subscribers::init_for_testing(); @@ -720,8 +729,9 @@ async fn test_authority_txn_signing_pushback() { let gas_object1 = Object::with_owner_for_testing(sender); let gas_object2 = Object::with_owner_for_testing(sender); - // Initialize an AuthorityState. Disable overload monitor by setting max_load_shedding_percentage to 0; - // Set check_system_overload_at_signing to true. + // Initialize an AuthorityState. Disable overload monitor by setting + // max_load_shedding_percentage to 0; Set check_system_overload_at_signing + // to true. let overload_config = AuthorityOverloadConfig { check_system_overload_at_signing: true, max_load_shedding_percentage: 0, @@ -785,8 +795,8 @@ async fn test_authority_txn_signing_pushback() { .unwrap(); assert_eq!(tx.digest(), lock_tx.digest()); - // Send the same txn again. Although objects are locked, since authority is in load shedding mode, - // it should still pushback the transaction. + // Send the same txn again. Although objects are locked, since authority is in + // load shedding mode, it should still pushback the transaction. assert!(matches!( validator_service .handle_transaction_for_testing(tx.clone()) @@ -798,8 +808,8 @@ async fn test_authority_txn_signing_pushback() { )); // Send another transaction, that send the same object to a different recipient. - // Transaction signing should failed with ObjectLockConflict error, since the object - // is already locked by the previous transaction. + // Transaction signing should failed with ObjectLockConflict error, since the + // object is already locked by the previous transaction. let tx2 = make_transfer_object_transaction( gas_object1.compute_object_reference(), gas_object2.compute_object_reference(), @@ -821,7 +831,8 @@ async fn test_authority_txn_signing_pushback() { // Clear the authority overload status. authority_state.overload_info.clear_overload(); - // Re-send the first transaction, now the transaction can be successfully signed. + // Re-send the first transaction, now the transaction can be successfully + // signed. let response = validator_service .handle_transaction_for_testing(tx.clone()) .await; @@ -836,7 +847,8 @@ async fn test_authority_txn_signing_pushback() { ); } -// Tests that when validator is in load shedding mode, it can pushback txn execution correctly. +// Tests that when validator is in load shedding mode, it can pushback txn +// execution correctly. #[tokio::test(flavor = "current_thread", start_paused = true)] async fn test_authority_txn_execution_pushback() { telemetry_subscribers::init_for_testing(); @@ -847,9 +859,10 @@ async fn test_authority_txn_execution_pushback() { let gas_object1 = Object::with_owner_for_testing(sender); let gas_object2 = Object::with_owner_for_testing(sender); - // Initialize an AuthorityState. Disable overload monitor by setting max_load_shedding_percentage to 0; - // Set check_system_overload_at_signing to false to disable load shedding at signing, this we are testing load shedding at execution. - // Set check_system_overload_at_execution to true. + // Initialize an AuthorityState. Disable overload monitor by setting + // max_load_shedding_percentage to 0; Set check_system_overload_at_signing + // to false to disable load shedding at signing, this we are testing load + // shedding at execution. Set check_system_overload_at_execution to true. let overload_config = AuthorityOverloadConfig { check_system_overload_at_signing: false, check_system_overload_at_execution: true, @@ -911,7 +924,8 @@ async fn test_authority_txn_execution_pushback() { ) .unwrap(); - // Ask the validator to execute the certificate, it should fail with ValidatorOverloadedRetryAfter error. + // Ask the validator to execute the certificate, it should fail with + // ValidatorOverloadedRetryAfter error. assert!(matches!( validator_service .execute_certificate_for_testing(cert.clone()) @@ -922,10 +936,13 @@ async fn test_authority_txn_execution_pushback() { SuiError::ValidatorOverloadedRetryAfter { .. } )); - // Clear the validator overload status and retry the certificate. It should succeed. + // Clear the validator overload status and retry the certificate. It should + // succeed. authority_state.overload_info.clear_overload(); - assert!(validator_service - .execute_certificate_for_testing(cert) - .await - .is_ok()); + assert!( + validator_service + .execute_certificate_for_testing(cert) + .await + .is_ok() + ); } diff --git a/crates/sui-core/src/unit_tests/gas_tests.rs b/crates/sui-core/src/unit_tests/gas_tests.rs index 5cc3847e277..54ffe1dd2d4 100644 --- a/crates/sui-core/src/unit_tests/gas_tests.rs +++ b/crates/sui-core/src/unit_tests/gas_tests.rs @@ -1,30 +1,36 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::*; - -use super::authority_tests::{init_state_with_ids, send_and_confirm_transaction}; -use super::move_integration_tests::build_and_try_publish_test_package; -use crate::authority::authority_tests::init_state_with_ids_and_object_basics; -use crate::authority::test_authority_builder::TestAuthorityBuilder; -use move_core_types::account_address::AccountAddress; -use move_core_types::ident_str; +use move_core_types::{account_address::AccountAddress, ident_str}; use once_cell::sync::Lazy; use sui_protocol_config::ProtocolConfig; -use sui_types::crypto::AccountKeyPair; -use sui_types::effects::TransactionEvents; -use sui_types::execution_status::{ExecutionFailureStatus, ExecutionStatus}; -use sui_types::gas_coin::GasCoin; -use sui_types::object::GAS_VALUE_FOR_TESTING; -use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; -use sui_types::utils::to_sender_signed_transaction; -use sui_types::{base_types::dbg_addr, crypto::get_key_pair}; - -// The cost table is used only to get the max budget available which is not dependent on -// the gas price +use sui_types::{ + base_types::dbg_addr, + crypto::{get_key_pair, AccountKeyPair}, + effects::TransactionEvents, + execution_status::{ExecutionFailureStatus, ExecutionStatus}, + gas_coin::GasCoin, + object::GAS_VALUE_FOR_TESTING, + programmable_transaction_builder::ProgrammableTransactionBuilder, + utils::to_sender_signed_transaction, +}; + +use super::{ + authority_tests::{init_state_with_ids, send_and_confirm_transaction}, + move_integration_tests::build_and_try_publish_test_package, + *, +}; +use crate::authority::{ + authority_tests::init_state_with_ids_and_object_basics, + test_authority_builder::TestAuthorityBuilder, +}; + +// The cost table is used only to get the max budget available which is not +// dependent on the gas price static MAX_GAS_BUDGET: Lazy = Lazy::new(|| ProtocolConfig::get_for_max_version_UNSAFE().max_tx_gas()); -// MIN_GAS_BUDGET_PRE_RGP has to be multiplied by the RGP to get the proper minimum +// MIN_GAS_BUDGET_PRE_RGP has to be multiplied by the RGP to get the proper +// minimum static MIN_GAS_BUDGET_PRE_RGP: Lazy = Lazy::new(|| ProtocolConfig::get_for_max_version_UNSAFE().base_tx_cost_fixed()); @@ -48,8 +54,8 @@ async fn test_tx_less_than_minimum_gas_budget() { #[tokio::test] async fn test_tx_more_than_maximum_gas_budget() { // This test creates a transaction that sets a gas_budget more than the maximum - // budget (which could lead to overflow). It's expected to fail early during transaction - // handling phase. + // budget (which could lead to overflow). It's expected to fail early during + // transaction handling phase. let budget = *MAX_GAS_BUDGET + 1; let result = execute_transfer(*MAX_GAS_BUDGET, budget, false, false).await; @@ -62,26 +68,27 @@ async fn test_tx_more_than_maximum_gas_budget() { ); } -// // Out Of Gas Scenarios // "minimal storage" is storage for input objects after reset. Operations for // "minimal storage" can only happen if storage charges fail. // Single gas coin: // - OOG computation, storage (minimal storage) ok -// - OOG for computation, OOG for minimal storage (e.g. computation is entire budget) +// - OOG for computation, OOG for minimal storage (e.g. computation is entire +// budget) // - computation ok, OOG for storage, minimal storage ok -// - computation ok, OOG for storage, OOG for minimal storage (e.g. computation is entire budget) +// - computation ok, OOG for storage, OOG for minimal storage (e.g. computation +// is entire budget) // -// With multiple gas coins is practically impossible to fail storage cost because we -// get a significant among of MIST back from smashing. So we try: +// With multiple gas coins is practically impossible to fail storage cost +// because we get a significant among of MIST back from smashing. So we try: // - OOG computation, storage ok // // impossible scenarios: -// - OOG for computation, OOG for storage, minimal storage ok - OOG for computation implies +// - OOG for computation, OOG for storage, minimal storage ok - OOG for +// computation implies // minimal storage is the only extra charge, so storage == minimal storage // -// // Helpers for OOG scenarios // @@ -102,7 +109,8 @@ async fn publish_move_random_package( "move_random", PUBLISH_BUDGET, rgp, - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; let effects = response.1.into_data(); @@ -113,7 +121,7 @@ async fn publish_move_random_package( .find(|(_, owner)| matches!(owner, Owner::Immutable)) .unwrap() .0 - .0 + .0 } async fn check_oog_transaction( @@ -199,16 +207,18 @@ where ExecutionFailureStatus::InsufficientGas ); // gas object in effects is first coin in vector of coins - assert_eq!(gas_coin_ids[0], effects.gas_object().0 .0); + assert_eq!(gas_coin_ids[0], effects.gas_object().0.0); // gas at position 0 mutated assert_eq!(effects.mutated().len(), 1); // extra coins are deleted assert_eq!(effects.deleted().len() as u64, coin_num - 1); for gas_coin_id in &gas_coin_ids[1..] { - assert!(effects - .deleted() - .iter() - .any(|deleted| deleted.0 == *gas_coin_id)); + assert!( + effects + .deleted() + .iter() + .any(|deleted| deleted.0 == *gas_coin_id) + ); } let gas_ref = effects.gas_object().0; let gas_object = authority_state @@ -339,7 +349,8 @@ async fn test_oog_computation_storage_ok_multi_coins() -> SuiResult { .await } -// OOG for computation, OOG for minimal storage (e.g. computation is entire budget) +// OOG for computation, OOG for minimal storage (e.g. computation is entire +// budget) #[tokio::test] async fn test_oog_computation_oog_storage_final_one_coin() -> SuiResult { const GAS_PRICE: u64 = 1000; @@ -357,7 +368,8 @@ async fn test_oog_computation_oog_storage_final_one_coin() -> SuiResult { |summary, initial_value, final_value| { let gas_used = summary.net_gas_usage() as u64; assert!(summary.computation_cost > 0); - // currently when storage charges go out of gas, the storage data in the summary is zero + // currently when storage charges go out of gas, the storage data in the summary + // is zero assert_eq!(summary.storage_cost, 0); assert_eq!(summary.storage_rebate, 0); assert_eq!(summary.non_refundable_storage_fee, 0); @@ -432,7 +444,8 @@ async fn test_computation_ok_oog_storage_minimal_ok_multi_coins() -> SuiResult { .await } -// - computation ok, OOG for storage, OOG for minimal storage (e.g. computation is entire budget) +// - computation ok, OOG for storage, OOG for minimal storage (e.g. computation +// is entire budget) #[tokio::test] async fn test_computation_ok_oog_storage_final_one_coin() -> SuiResult { const GAS_PRICE: u64 = 1001; @@ -452,7 +465,8 @@ async fn test_computation_ok_oog_storage_final_one_coin() -> SuiResult { |summary, initial_value, final_value| { let gas_used = summary.net_gas_usage() as u64; assert!(summary.computation_cost > 0); - // currently when storage charges go out of gas, the storage data in the summary is zero + // currently when storage charges go out of gas, the storage data in the summary + // is zero assert_eq!(summary.storage_cost, 0); assert_eq!(summary.storage_rebate, 0); assert_eq!(summary.non_refundable_storage_fee, 0); @@ -693,11 +707,13 @@ async fn test_invalid_gas_owners() { #[tokio::test] async fn test_native_transfer_insufficient_gas_reading_objects() { // This test creates a transfer transaction with a gas budget, that's more than - // the minimum budget requirement, but not enough to even read the objects from db. - // This will lead to failure in lock check step during handle transaction phase. + // the minimum budget requirement, but not enough to even read the objects from + // db. This will lead to failure in lock check step during handle + // transaction phase. let balance = *MIN_GAS_BUDGET_PRE_RGP + 1; let result = execute_transfer(*MAX_GAS_BUDGET, balance, true, true).await; - // The transaction should still execute to effects, but with execution status as failure. + // The transaction should still execute to effects, but with execution status as + // failure. let effects = result .response .unwrap() @@ -711,10 +727,10 @@ async fn test_native_transfer_insufficient_gas_reading_objects() { #[tokio::test] async fn test_native_transfer_insufficient_gas_execution() { - // This test creates a transfer transaction with a gas budget that's insufficient - // to finalize the transfer object mutation effects. It will fail during - // execution phase, and hence gas object will still be mutated and all budget - // will be charged. + // This test creates a transfer transaction with a gas budget that's + // insufficient to finalize the transfer object mutation effects. It will + // fail during execution phase, and hence gas object will still be mutated + // and all budget will be charged. let result = execute_transfer(*MAX_GAS_BUDGET, *MAX_GAS_BUDGET, true, false).await; let total_gas = result .response @@ -768,7 +784,8 @@ async fn test_publish_gas() -> anyhow::Result<()> { "object_wrapping", TEST_ONLY_GAS_UNIT_FOR_PUBLISH * rgp * 2, rgp, - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; let effects = response.1.into_data(); @@ -802,7 +819,8 @@ async fn test_publish_gas() -> anyhow::Result<()> { "object_wrapping", budget, rgp, - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; let effects = response.1.into_data(); @@ -894,7 +912,8 @@ async fn test_move_call_gas() -> SuiResult { // storage_cost should be less than rebate because for object deletion, we only // rebate without charging. assert!(gas_cost.storage_cost > 0 && gas_cost.storage_cost < gas_cost.storage_rebate); - // Check that we have storage rebate is less or equal to the previous one + non refundable + // Check that we have storage rebate is less or equal to the previous one + non + // refundable assert_eq!( gas_cost.storage_rebate + gas_cost.non_refundable_storage_fee, prev_storage_cost diff --git a/crates/sui-core/src/unit_tests/move_integration_tests.rs b/crates/sui-core/src/unit_tests/move_integration_tests.rs index cc82e63a857..6aed096d34f 100644 --- a/crates/sui-core/src/unit_tests/move_integration_tests.rs +++ b/crates/sui-core/src/unit_tests/move_integration_tests.rs @@ -2,39 +2,32 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::*; -use crate::authority::authority_tests::{ - call_move, call_move_, execute_programmable_transaction, init_state_with_ids, - send_and_confirm_transaction, TestCallArg, -}; +use std::{collections::HashSet, env, path::PathBuf, str::FromStr}; + use move_core_types::{ account_address::AccountAddress, identifier::{IdentStr, Identifier}, - language_storage::StructTag, + language_storage::{StructTag, TypeTag}, u256::U256, }; - +use sui_move_build::{BuildConfig, SuiPackageHooks}; use sui_types::{ base_types::{RESOLVED_ASCII_STR, RESOLVED_STD_OPTION, RESOLVED_UTF8_STR}, - error::ExecutionErrorKind, + crypto::{get_key_pair, AccountKeyPair}, + error::{ExecutionErrorKind, SuiError}, + execution_status::{CommandArgumentError, ExecutionFailureStatus, ExecutionStatus}, + move_package::UpgradeCap, programmable_transaction_builder::ProgrammableTransactionBuilder, utils::to_sender_signed_transaction, SUI_FRAMEWORK_PACKAGE_ID, }; -use move_core_types::language_storage::TypeTag; - -use sui_move_build::{BuildConfig, SuiPackageHooks}; -use sui_types::{ - crypto::{get_key_pair, AccountKeyPair}, - error::SuiError, +use super::*; +use crate::authority::authority_tests::{ + call_move, call_move_, execute_programmable_transaction, init_state_with_ids, + send_and_confirm_transaction, TestCallArg, }; -use std::{collections::HashSet, path::PathBuf}; -use std::{env, str::FromStr}; -use sui_types::execution_status::{CommandArgumentError, ExecutionFailureStatus, ExecutionStatus}; -use sui_types::move_package::UpgradeCap; - #[tokio::test] #[cfg_attr(msim, ignore)] async fn test_object_wrapping_unwrapping() { @@ -49,7 +42,8 @@ async fn test_object_wrapping_unwrapping() { &sender_key, &gas, "object_wrapping", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -79,7 +73,7 @@ async fn test_object_wrapping_unwrapping() { assert_eq!(child_object_ref.1, create_child_version); let wrapped_version = - SequenceNumber::lamport_increment([child_object_ref.1, effects.gas_object().0 .1]); + SequenceNumber::lamport_increment([child_object_ref.1, effects.gas_object().0.1]); // Create a Parent object, by wrapping the child object. let effects = call_move( @@ -124,7 +118,7 @@ async fn test_object_wrapping_unwrapping() { assert_eq!(parent_object_ref.1, wrapped_version); let unwrapped_version = - SequenceNumber::lamport_increment([parent_object_ref.1, effects.gas_object().0 .1]); + SequenceNumber::lamport_increment([parent_object_ref.1, effects.gas_object().0.1]); // Extract the child out of the parent. let effects = call_move( @@ -156,14 +150,14 @@ async fn test_object_wrapping_unwrapping() { (2, 0, 1) ); // Make sure that version increments again when unwrapped. - assert_eq!(effects.unwrapped()[0].0 .1, unwrapped_version); + assert_eq!(effects.unwrapped()[0].0.1, unwrapped_version); check_latest_object_ref(&authority, &effects.unwrapped()[0].0, false).await; let child_object_ref = effects.unwrapped()[0].0; let rewrap_version = SequenceNumber::lamport_increment([ parent_object_ref.1, child_object_ref.1, - effects.gas_object().0 .1, + effects.gas_object().0.1, ]); // Wrap the child to the parent again. @@ -202,7 +196,7 @@ async fn test_object_wrapping_unwrapping() { let parent_object_ref = effects.mutated_excluding_gas().first().unwrap().0; let deleted_version = - SequenceNumber::lamport_increment([parent_object_ref.1, effects.gas_object().0 .1]); + SequenceNumber::lamport_increment([parent_object_ref.1, effects.gas_object().0.1]); // Now delete the parent object, which will in turn delete the child object. let effects = call_move( @@ -231,9 +225,11 @@ async fn test_object_wrapping_unwrapping() { deleted_version, ObjectDigest::OBJECT_DIGEST_DELETED, ); - assert!(effects - .unwrapped_then_deleted() - .contains(&expected_child_object_ref)); + assert!( + effects + .unwrapped_then_deleted() + .contains(&expected_child_object_ref) + ); check_latest_object_ref(&authority, &expected_child_object_ref, true).await; let expected_parent_object_ref = ( parent_object_ref.0, @@ -257,7 +253,8 @@ async fn test_object_owning_another_object() { &sender_key, &gas, "object_owner", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -340,7 +337,8 @@ async fn test_object_owning_another_object() { let field_object = authority.get_object(&field_id).await.unwrap().unwrap(); assert_eq!(field_object.owner, parent.0); - // Mutate the child directly will now fail because we need the parent to authenticate. + // Mutate the child directly will now fail because we need the parent to + // authenticate. let result = call_move( &authority, &gas, @@ -409,7 +407,8 @@ async fn test_object_owning_another_object() { assert!(effects.status().is_ok()); - // Delete the child. This should fail as the child cannot be used as a transaction argument + // Delete the child. This should fail as the child cannot be used as a + // transaction argument let effects = call_move( &authority, &gas, @@ -438,7 +437,8 @@ async fn test_create_then_delete_parent_child() { &sender_key, &gas, "object_owner", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -499,7 +499,8 @@ async fn test_create_then_delete_parent_child_wrap() { &sender_key, &gas, "object_owner", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -559,10 +560,11 @@ async fn test_create_then_delete_parent_child_wrap() { assert!(effects.status().is_ok()); - // The parent and field are considered deleted, the child doesn't count because it wasn't - // considered created in the first place. + // The parent and field are considered deleted, the child doesn't count because + // it wasn't considered created in the first place. assert_eq!(effects.deleted().len(), 2); - // The child is considered as unwrapped and deleted, even though it was wrapped since creation. + // The child is considered as unwrapped and deleted, even though it was wrapped + // since creation. assert_eq!(effects.unwrapped_then_deleted().len(), 1); assert_eq!( @@ -578,8 +580,9 @@ async fn test_create_then_delete_parent_child_wrap() { ); } -/// We are explicitly testing the case where a parent and child object are created together - where -/// no prior child version exists - and then we remove the child successfully. +/// We are explicitly testing the case where a parent and child object are +/// created together - where no prior child version exists - and then we remove +/// the child successfully. #[tokio::test] #[cfg_attr(msim, ignore)] async fn test_remove_child_when_no_prior_version_exists() { @@ -593,7 +596,8 @@ async fn test_remove_child_when_no_prior_version_exists() { &sender_key, &gas, "object_owner", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -685,7 +689,8 @@ async fn test_create_then_delete_parent_child_wrap_separate() { &sender_key, &gas, "object_owner", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -780,7 +785,8 @@ async fn test_entry_point_vector_empty() { &sender_key, &gas, "entry_point_vector", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -919,11 +925,13 @@ async fn test_entry_point_vector_primitive() { &sender_key, &gas, "entry_point_vector", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; - // just a test call with vector of 2 primitive values and check its length in the entry function + // just a test call with vector of 2 primitive values and check its length in + // the entry function let effects = call_move( &authority, &gas, @@ -959,7 +967,8 @@ async fn test_entry_point_vector() { &sender_key, &gas, "entry_point_vector", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -1003,8 +1012,8 @@ async fn test_entry_point_vector() { effects.status() ); - // mint a parent object and a child object and make sure that parent stored in the vector - // authenticates the child passed by-value + // mint a parent object and a child object and make sure that parent stored in + // the vector authenticates the child passed by-value let effects = call_move( &authority, &gas, @@ -1046,8 +1055,8 @@ async fn test_entry_point_vector() { effects.status() ); let (child_id, _, _) = effects.created()[0].0; - // call a function with a vector containing the same owned object as another one passed as - // a reference argument + // call a function with a vector containing the same owned object as another one + // passed as a reference argument let effects = call_move( &authority, &gas, @@ -1080,7 +1089,8 @@ async fn test_entry_point_vector_error() { &sender_key, &gas, "entry_point_vector", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -1178,7 +1188,8 @@ async fn test_entry_point_vector_error() { ) .await .unwrap(); - // should fail as we passed object of the wrong type as the first element of the vector + // should fail as we passed object of the wrong type as the first element of the + // vector assert!( matches!(effects.status(), ExecutionStatus::Failure { .. }), "{:?}", @@ -1248,8 +1259,8 @@ async fn test_entry_point_vector_error() { effects.status() ); let (obj_id, _, _) = effects.created()[0].0; - // call a function with a vector containing the same owned object as another one passed as - // argument + // call a function with a vector containing the same owned object as another one + // passed as argument let result = call_move( &authority, &gas, @@ -1265,7 +1276,8 @@ async fn test_entry_point_vector_error() { ], ) .await; - // should fail as we have the same object passed in vector and as a separate by-value argument + // should fail as we have the same object passed in vector and as a separate + // by-value argument assert_eq!( result.unwrap().status(), &ExecutionStatus::Failure { @@ -1297,8 +1309,8 @@ async fn test_entry_point_vector_error() { effects.status() ); let (obj_id, _, _) = effects.created()[0].0; - // call a function with a vector containing the same owned object as another one passed as - // a reference argument + // call a function with a vector containing the same owned object as another one + // passed as a reference argument let result = call_move( &authority, &gas, @@ -1314,7 +1326,8 @@ async fn test_entry_point_vector_error() { ], ) .await; - // should fail as we have the same object passed in vector and as a separate by-reference argument + // should fail as we have the same object passed in vector and as a separate + // by-reference argument assert_eq!( result.unwrap().status(), &ExecutionStatus::Failure { @@ -1340,7 +1353,8 @@ async fn test_entry_point_vector_any() { &sender_key, &gas, "entry_point_vector", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -1387,8 +1401,8 @@ async fn test_entry_point_vector_any() { effects.status() ); - // mint a parent object and a child object and make sure that parent stored in the vector - // authenticates the child passed by-value + // mint a parent object and a child object and make sure that parent stored in + // the vector authenticates the child passed by-value let effects = call_move( &authority, &gas, @@ -1430,8 +1444,8 @@ async fn test_entry_point_vector_any() { effects.status() ); let (child_id, _, _) = effects.created()[0].0; - // call a function with a vector containing the same owned object as another one passed as - // a reference argument + // call a function with a vector containing the same owned object as another one + // passed as a reference argument let effects = call_move( &authority, &gas, @@ -1464,7 +1478,8 @@ async fn test_entry_point_vector_any_error() { &sender_key, &gas, "entry_point_vector", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -1565,7 +1580,8 @@ async fn test_entry_point_vector_any_error() { ) .await .unwrap(); - // should fail as we passed object of the wrong type as the first element of the vector + // should fail as we passed object of the wrong type as the first element of the + // vector assert!( matches!(effects.status(), ExecutionStatus::Failure { .. }), "{:?}", @@ -1635,8 +1651,8 @@ async fn test_entry_point_vector_any_error() { effects.status() ); let (obj_id, _, _) = effects.created()[0].0; - // call a function with a vector containing the same owned object as another one passed as - // argument + // call a function with a vector containing the same owned object as another one + // passed as argument let result = call_move( &authority, &gas, @@ -1652,7 +1668,8 @@ async fn test_entry_point_vector_any_error() { ], ) .await; - // should fail as we have the same object passed in vector and as a separate by-value argument + // should fail as we have the same object passed in vector and as a separate + // by-value argument assert_eq!( result.unwrap().status(), &ExecutionStatus::Failure { @@ -1684,8 +1701,8 @@ async fn test_entry_point_vector_any_error() { effects.status() ); let (obj_id, _, _) = effects.created()[0].0; - // call a function with a vector containing the same owned object as another one passed as - // a reference argument + // call a function with a vector containing the same owned object as another one + // passed as a reference argument let result = call_move( &authority, &gas, @@ -1726,7 +1743,8 @@ async fn test_entry_point_string() { &sender_key, &gas, "entry_point_types", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -1810,7 +1828,8 @@ async fn test_nested_string() { &sender_key, &gas, "entry_point_types", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -1954,7 +1973,8 @@ async fn test_entry_point_string_vec() { &sender_key, &gas, "entry_point_types", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -1995,7 +2015,8 @@ async fn test_entry_point_string_error() { &sender_key, &gas, "entry_point_types", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -2114,7 +2135,8 @@ async fn test_entry_point_string_vec_error() { &sender_key, &gas, "entry_point_types", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -2169,7 +2191,8 @@ async fn test_entry_point_string_option_error() { &sender_key, &gas, "entry_point_types", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -2435,7 +2458,8 @@ macro_rules! make_vec_tests_for_type { &sender_key, &gas, "entry_point_types", - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; let package_id = package.0; @@ -2776,9 +2800,10 @@ async fn test_object_no_id_error() { let mut build_config = BuildConfig::new_for_testing(); build_config.config.test_mode = true; let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - // in this package object struct (NotObject) is defined incorrectly and publishing should - // fail (it's defined in test-only code hence cannot be checked by transactional testing - // framework which goes through "normal" publishing path which excludes tests). + // in this package object struct (NotObject) is defined incorrectly and + // publishing should fail (it's defined in test-only code hence cannot be + // checked by transactional testing framework which goes through "normal" + // publishing path which excludes tests). path.extend(["src", "unit_tests", "data", "object_no_id"]); let res = build_config.build(path); diff --git a/crates/sui-core/src/unit_tests/move_package_publish_tests.rs b/crates/sui-core/src/unit_tests/move_package_publish_tests.rs index 467744964f0..b48d7ff2ea5 100644 --- a/crates/sui-core/src/unit_tests/move_package_publish_tests.rs +++ b/crates/sui-core/src/unit_tests/move_package_publish_tests.rs @@ -1,39 +1,32 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::{ - authority_tests::{call_move, init_state_with_ids, send_and_confirm_transaction}, - move_integration_tests::{build_and_publish_test_package, build_test_package}, -}; +use std::{collections::HashSet, env, fs::File, io::Read, path::PathBuf}; +use expect_test::expect; use move_binary_format::CompiledModule; +use move_package::source_package::manifest_parser; +use sui_framework::BuiltInFramework; +use sui_move_build::{check_unpublished_dependencies, gather_published_ids, BuildConfig}; use sui_types::{ base_types::ObjectID, - error::UserInputError, + crypto::{get_key_pair, AccountKeyPair}, + effects::TransactionEffectsAPI, + error::{SuiError, UserInputError}, + execution_status::{ExecutionFailureStatus, ExecutionStatus}, object::{Data, ObjectRead, Owner}, + programmable_transaction_builder::ProgrammableTransactionBuilder, transaction::{TransactionData, TEST_ONLY_GAS_UNIT_FOR_PUBLISH}, utils::to_sender_signed_transaction, }; -use move_package::source_package::manifest_parser; -use sui_move_build::{check_unpublished_dependencies, gather_published_ids, BuildConfig}; -use sui_types::{ - crypto::{get_key_pair, AccountKeyPair}, - error::SuiError, -}; - -use crate::authority::move_integration_tests::{ - build_multi_publish_txns, build_package, run_multi_txns, +use crate::authority::{ + authority_tests::{call_move, init_state_with_ids, send_and_confirm_transaction}, + move_integration_tests::{ + build_and_publish_test_package, build_multi_publish_txns, build_package, + build_test_package, run_multi_txns, + }, }; -use expect_test::expect; -use std::env; -use std::fs::File; -use std::io::Read; -use std::{collections::HashSet, path::PathBuf}; -use sui_framework::BuiltInFramework; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::execution_status::{ExecutionFailureStatus, ExecutionStatus}; -use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; #[tokio::test] #[cfg_attr(msim, ignore)] @@ -48,7 +41,8 @@ async fn test_publishing_with_unpublished_deps() { &sender_key, &gas, "depends_on_basics", - /* with_unpublished_deps */ true, + // with_unpublished_deps + true, ) .await; @@ -202,7 +196,8 @@ async fn test_generate_lock_file() { .clone() .build(path.clone()) .expect("Move package did not build"); - // Update the lock file with placeholder compiler version so this isn't bumped every release. + // Update the lock file with placeholder compiler version so this isn't bumped + // every release. build_config .config .update_lock_file_toolchain_version(&path, "0.0.1".into()) diff --git a/crates/sui-core/src/unit_tests/move_package_tests.rs b/crates/sui-core/src/unit_tests/move_package_tests.rs index 3fde3ad747e..bb9ef60a2e2 100644 --- a/crates/sui-core/src/unit_tests/move_package_tests.rs +++ b/crates/sui-core/src/unit_tests/move_package_tests.rs @@ -1,9 +1,9 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use move_binary_format::file_format::CompiledModule; - use std::{collections::BTreeMap, path::PathBuf}; + +use move_binary_format::file_format::CompiledModule; use sui_move_build::{BuildConfig, CompiledPackage}; use sui_protocol_config::{Chain, ProtocolConfig}; use sui_types::{ @@ -78,8 +78,9 @@ fn test_new_initial() { } ); - // also test that move package sizes used for gas computations are estimated correctly (small - // constant differences can be tolerated and are due to BCS encoding) + // also test that move package sizes used for gas computations are estimated + // correctly (small constant differences can be tolerated and are due to BCS + // encoding) let a_pkg_obj = Object::new_package_from_data(Data::Package(a_pkg), TransactionDigest::ZERO); let b_pkg_obj = Object::new_package_from_data(Data::Package(b_pkg), TransactionDigest::ZERO); let c_pkg_obj = Object::new_package_from_data(Data::Package(c_pkg), TransactionDigest::ZERO); @@ -223,8 +224,8 @@ fn test_upgrade_linkage_digest_to_new_dep() { }, ); - // Make sure that we compute the package digest off of the update dependencies and not the old - // dependencies in the linkage table. + // Make sure that we compute the package digest off of the update dependencies + // and not the old dependencies in the linkage table. let hash_modules = true; assert_eq!( b_new.digest(hash_modules), diff --git a/crates/sui-core/src/unit_tests/move_package_upgrade_tests.rs b/crates/sui-core/src/unit_tests/move_package_upgrade_tests.rs index 6ca10d5c3c9..a5601daae3f 100644 --- a/crates/sui-core/src/unit_tests/move_package_upgrade_tests.rs +++ b/crates/sui-core/src/unit_tests/move_package_upgrade_tests.rs @@ -1,12 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{collections::BTreeSet, path::PathBuf, str::FromStr, sync::Arc}; + use move_core_types::{ident_str, language_storage::StructTag}; use sui_move_build::BuildConfig; use sui_protocol_config::ProtocolConfig; use sui_types::{ base_types::{ObjectID, ObjectRef, SuiAddress}, crypto::{get_key_pair, AccountKeyPair}, + effects::{TransactionEffects, TransactionEffectsAPI}, + error::{SuiError, UserInputError}, + execution_config_utils::to_binary_config, + execution_status::{ + CommandArgumentError, ExecutionFailureStatus, ExecutionStatus, PackageUpgradeError, + }, move_package::UpgradePolicy, object::{Object, Owner}, programmable_transaction_builder::ProgrammableTransactionBuilder, @@ -15,24 +23,16 @@ use sui_types::{ MOVE_STDLIB_PACKAGE_ID, SUI_FRAMEWORK_PACKAGE_ID, }; -use std::{collections::BTreeSet, path::PathBuf, str::FromStr, sync::Arc}; -use sui_types::effects::{TransactionEffects, TransactionEffectsAPI}; -use sui_types::error::{SuiError, UserInputError}; -use sui_types::execution_config_utils::to_binary_config; -use sui_types::execution_status::{ - CommandArgumentError, ExecutionFailureStatus, ExecutionStatus, PackageUpgradeError, -}; - -use crate::authority::authority_tests::init_state_with_ids; -use crate::authority::move_integration_tests::{ - build_multi_publish_txns, build_multi_upgrade_txns, build_package, - collect_packages_and_upgrade_caps, run_multi_txns, UpgradeData, -}; -use crate::authority::test_authority_builder::TestAuthorityBuilder; use crate::authority::{ authority_test_utils::build_test_modules_with_dep_addr, - authority_tests::execute_programmable_transaction, - move_integration_tests::build_and_publish_test_package_with_upgrade_cap, AuthorityState, + authority_tests::{execute_programmable_transaction, init_state_with_ids}, + move_integration_tests::{ + build_and_publish_test_package_with_upgrade_cap, build_multi_publish_txns, + build_multi_upgrade_txns, build_package, collect_packages_and_upgrade_caps, run_multi_txns, + UpgradeData, + }, + test_authority_builder::TestAuthorityBuilder, + AuthorityState, }; #[macro_export] @@ -130,7 +130,8 @@ impl UpgradeStateRunner { &sender_key, &gas_object_id, base_package_name, - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -273,14 +274,18 @@ async fn test_upgrade_package_happy_path() { let binary_config = to_binary_config(&config); let normalized_modules = package.move_package().normalize(&binary_config).unwrap(); assert!(normalized_modules.contains_key("new_module")); - assert!(normalized_modules["new_module"] - .functions - .contains_key(ident_str!("this_is_a_new_module"))); - assert!(normalized_modules["new_module"] - .functions - .contains_key(ident_str!( - "i_can_call_funs_in_other_modules_that_already_existed" - ))); + assert!( + normalized_modules["new_module"] + .functions + .contains_key(ident_str!("this_is_a_new_module")) + ); + assert!( + normalized_modules["new_module"] + .functions + .contains_key(ident_str!( + "i_can_call_funs_in_other_modules_that_already_existed" + )) + ); // Call into the upgraded module let effects = runner @@ -329,8 +334,8 @@ async fn test_upgrade_introduces_type_then_uses_it() { assert!(effects.status().is_ok(), "{:#?}", effects.status()); let package_v3 = runner.package.0; - // Create an instance of the type introduced at version 2, with the function introduced at - // version 3. + // Create an instance of the type introduced at version 2, with the function + // introduced at version 3. let effects = runner .run({ let mut builder = ProgrammableTransactionBuilder::new(); @@ -681,7 +686,7 @@ async fn test_multiple_upgrades( .find(|(_, owner)| matches!(owner, Owner::Immutable)) .unwrap() .0 - .0; + .0; // Second upgrade: May also adds a dep on the sui framework and stdlib. let (digest, modules) = build_upgrade_test_modules("stage2_basic_compatibility_valid"); @@ -704,7 +709,8 @@ async fn test_multiple_upgrades( async fn test_interleaved_upgrades() { let mut runner = UpgradeStateRunner::new("move_upgrade/base").await; - // Base has been published. Publish a package now that depends on the base package. + // Base has been published. Publish a package now that depends on the base + // package. let (_, module_bytes, dep_ids) = build_upgrade_test_modules_with_dep_addr( "dep_on_upgrading_package", [("base_addr", runner.package.0)], @@ -786,7 +792,8 @@ async fn test_interleaved_upgrades() { async fn test_publish_override_happy_path() { let mut runner = UpgradeStateRunner::new("move_upgrade/base").await; - // Base has been published already. Publish a package now that depends on the base package. + // Base has been published already. Publish a package now that depends on the + // base package. let (_, module_bytes, dep_ids) = build_upgrade_test_modules_with_dep_addr( "dep_on_upgrading_package", [("base_addr", runner.package.0)], @@ -814,8 +821,8 @@ async fn test_publish_override_happy_path() { .unwrap() .0; - // Publish P that depends on both `dep_on_upgrading_package` and `stage1_basic_compatibility_valid` - // Dependency graph for dep_on_dep: + // Publish P that depends on both `dep_on_upgrading_package` and + // `stage1_basic_compatibility_valid` Dependency graph for dep_on_dep: // base(v1) // base(v2) <-- dep_on_upgrading_package <-- dep_on_dep let (_, modules, dep_ids) = build_upgrade_test_modules_with_dep_addr( @@ -867,9 +874,9 @@ async fn test_publish_transitive_happy_path() { // Dependency graph: base <-- dep_on_upgrading_package let (depender_package, _) = runner.publish(module_bytes, dep_ids).await; - // publish a root package that depends on the dependent package and on version 1 of the base - // package (both dependent package and transitively dependent package depended on the same - // version of the base package) + // publish a root package that depends on the dependent package and on version 1 + // of the base package (both dependent package and transitively dependent + // package depended on the same version of the base package) let (_, root_module_bytes, root_dep_ids) = build_upgrade_test_modules_with_dep_addr( "dep_on_upgrading_package_transitive", [ @@ -882,7 +889,8 @@ async fn test_publish_transitive_happy_path() { ], ); // Dependency graph: base(v1) <-- dep_on_upgrading_package - // base(v1) <-- dep_on_upgrading_package <-- dep_on_upgrading_package_transitive --> base(v1) + // base(v1) <-- dep_on_upgrading_package <-- + // dep_on_upgrading_package_transitive --> base(v1) let (root_package, _) = runner.publish(root_module_bytes, root_dep_ids).await; let root_move_package = runner @@ -902,8 +910,8 @@ async fn test_publish_transitive_happy_path() { assert!(dep_ids_in_linkage_table.contains(&runner.package.0)); assert!(dep_ids_in_linkage_table.contains(&depender_package.0)); - // Call into the root module to call base module's function (should abort due to base module's - // call_return_0 aborting with code 42) + // Call into the root module to call base module's function (should abort due to + // base module's call_return_0 aborting with code 42) let call_effects = runner .run({ let mut builder = ProgrammableTransactionBuilder::new(); @@ -958,9 +966,9 @@ async fn test_publish_transitive_override_happy_path() { .unwrap() .0; - // publish a root package that depends on the dependent package and on version 2 of the base - // package (overriding base package dependency of the dependent package which originally - // depended on base package version 1) + // publish a root package that depends on the dependent package and on version 2 + // of the base package (overriding base package dependency of the dependent + // package which originally depended on base package version 1) let (_, root_module_bytes, root_dep_ids) = build_upgrade_test_modules_with_dep_addr( "dep_on_upgrading_package_transitive", [ @@ -973,7 +981,8 @@ async fn test_publish_transitive_override_happy_path() { ], ); // Dependency graph: base(v1) <-- dep_on_upgrading_package - // base(v2) <-- dep_on_upgrading_package <-- dep_on_upgrading_package_transitive --> base(v2) + // base(v2) <-- dep_on_upgrading_package <-- + // dep_on_upgrading_package_transitive --> base(v2) let (root_package, _) = runner.publish(root_module_bytes, root_dep_ids).await; let root_move_package = runner @@ -993,8 +1002,8 @@ async fn test_publish_transitive_override_happy_path() { assert!(dep_ids_in_linkage_table.contains(&base_v2_package.0)); assert!(dep_ids_in_linkage_table.contains(&depender_package.0)); - // Call into the root module to call upgraded base module's function (should succeed due to base module's - // call_return_0 no longer aborting) + // Call into the root module to call upgraded base module's function (should + // succeed due to base module's call_return_0 no longer aborting) let call_effects = runner .run({ let mut builder = ProgrammableTransactionBuilder::new(); @@ -1046,7 +1055,8 @@ async fn test_upgraded_types_in_one_txn() { assert!(effects.status().is_ok(), "{:#?}", effects.status()); let package_v3 = runner.package.0; - // Create an instance of the type introduced at version 2 using function from version 2. + // Create an instance of the type introduced at version 2 using function from + // version 2. let effects = runner .run({ let mut builder = ProgrammableTransactionBuilder::new(); @@ -1062,7 +1072,8 @@ async fn test_upgraded_types_in_one_txn() { .find_map(|(b, owner)| matches!(owner, Owner::AddressOwner(_)).then_some(b)) .unwrap(); - // Create an instance of the type introduced at version 3 using function from version 3. + // Create an instance of the type introduced at version 3 using function from + // version 3. let effects = runner .run({ let mut builder = ProgrammableTransactionBuilder::new(); @@ -1078,8 +1089,9 @@ async fn test_upgraded_types_in_one_txn() { .find_map(|(c, owner)| matches!(owner, Owner::AddressOwner(_)).then_some(c)) .unwrap(); - // modify objects created of types introduced at versions 2 and 3 and emit events using types - // introduced at versions 2 and 3 (using functions from version 3) + // modify objects created of types introduced at versions 2 and 3 and emit + // events using types introduced at versions 2 and 3 (using functions from + // version 3) let effects = runner .run({ let mut builder = ProgrammableTransactionBuilder::new(); @@ -1122,9 +1134,10 @@ async fn test_different_versions_across_calls() { .find(|(_, owner)| matches!(owner, Owner::Immutable)) .unwrap() .0 - .0; + .0; - // call the same function twice within the same block but from two different module versions + // call the same function twice within the same block but from two different + // module versions let effects = runner .run({ let mut builder = ProgrammableTransactionBuilder::new(); @@ -1142,7 +1155,8 @@ async fn test_conflicting_versions_across_calls() { // publishes base package at version 1 let mut runner = UpgradeStateRunner::new("move_upgrade/base").await; - // publish a dependent package at version 1 that depends on the base package at version 1 + // publish a dependent package at version 1 that depends on the base package at + // version 1 let (_, module_bytes, dep_ids) = build_upgrade_test_modules_with_dep_addr( "dep_on_upgrading_package_upgradeable", [ @@ -1170,7 +1184,8 @@ async fn test_conflicting_versions_across_calls() { .unwrap() .0; - // publish a dependent package at version 2 that depends on the base package at version 2 + // publish a dependent package at version 2 that depends on the base package at + // version 2 let pt2 = { let mut builder = ProgrammableTransactionBuilder::new(); let current_package_id = depender_package.0; @@ -1215,15 +1230,15 @@ async fn test_conflicting_versions_across_calls() { .unwrap() .0; - // call the same function twice within the same block but from two different module versions - // that differ only by having different dependencies + // call the same function twice within the same block but from two different + // module versions that differ only by having different dependencies let effects = runner .run({ let mut builder = ProgrammableTransactionBuilder::new(); // call from upgraded package - should succeed move_call! { builder, (dependent_v2_package.0)::my_module::call_return_0() }; - // call from original package - should abort (check later that the second command - // aborts) + // call from original package - should abort (check later that the second + // command aborts) move_call! { builder, (depender_package.0)::my_module::call_return_0() }; builder.finish() }) @@ -1320,7 +1335,6 @@ async fn test_upgrade_max_packages() { let gas_object_id = ObjectID::random(); let authority = init_state_with_ids(vec![(sender, gas_object_id)]).await; - // // Build and publish max number of packages allowed let (_, modules, dependencies) = build_package("move_upgrade/base", false); @@ -1375,7 +1389,6 @@ async fn test_upgrade_more_than_max_packages_error() { let gas_object_id = ObjectID::random(); let authority = init_state_with_ids(vec![(sender, gas_object_id)]).await; - // // Build and publish max number of packages allowed let (_, modules, dependencies) = build_package("move_upgrade/base", false); diff --git a/crates/sui-core/src/unit_tests/narwhal_manager_tests.rs b/crates/sui-core/src/unit_tests/narwhal_manager_tests.rs index 1913b72263f..2ba443f84ce 100644 --- a/crates/sui-core/src/unit_tests/narwhal_manager_tests.rs +++ b/crates/sui-core/src/unit_tests/narwhal_manager_tests.rs @@ -1,31 +1,37 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::test_authority_builder::TestAuthorityBuilder; -use crate::authority::AuthorityState; -use crate::checkpoints::{CheckpointMetrics, CheckpointService, CheckpointServiceNoop}; -use crate::consensus_handler::ConsensusHandlerInitializer; -use crate::consensus_manager::narwhal_manager::{NarwhalConfiguration, NarwhalManager}; -use crate::consensus_manager::{ConsensusManagerMetrics, ConsensusManagerTrait}; -use crate::consensus_validator::{SuiTxValidator, SuiTxValidatorMetrics}; -use crate::state_accumulator::StateAccumulator; +use std::{sync::Arc, time::Duration}; + use bytes::Bytes; -use fastcrypto::bls12381; -use fastcrypto::traits::KeyPair; +use fastcrypto::{bls12381, traits::KeyPair}; use mysten_metrics::RegistryService; use narwhal_config::{Epoch, WorkerCache}; use narwhal_types::{TransactionProto, TransactionsClient}; use prometheus::Registry; -use std::sync::Arc; -use std::time::Duration; use sui_swarm_config::network_config_builder::ConfigBuilder; -use sui_types::messages_checkpoint::{ - CertifiedCheckpointSummary, CheckpointContents, CheckpointSummary, +use sui_types::{ + messages_checkpoint::{CertifiedCheckpointSummary, CheckpointContents, CheckpointSummary}, + sui_system_state::{ + epoch_start_sui_system_state::EpochStartSystemStateTrait, SuiSystemStateTrait, + }, +}; +use tokio::{ + sync::{broadcast, mpsc}, + time::{interval, sleep}, +}; + +use crate::{ + authority::{test_authority_builder::TestAuthorityBuilder, AuthorityState}, + checkpoints::{CheckpointMetrics, CheckpointService, CheckpointServiceNoop}, + consensus_handler::ConsensusHandlerInitializer, + consensus_manager::{ + narwhal_manager::{NarwhalConfiguration, NarwhalManager}, + ConsensusManagerMetrics, ConsensusManagerTrait, + }, + consensus_validator::{SuiTxValidator, SuiTxValidatorMetrics}, + state_accumulator::StateAccumulator, }; -use sui_types::sui_system_state::epoch_start_sui_system_state::EpochStartSystemStateTrait; -use sui_types::sui_system_state::SuiSystemStateTrait; -use tokio::sync::{broadcast, mpsc}; -use tokio::time::{interval, sleep}; async fn send_transactions( name: &bls12381::min_sig::BLS12381PublicKey, @@ -191,11 +197,13 @@ async fn test_narwhal_manager() { // ensure that no primary or worker node is running assert!(!narwhal_manager.is_running().await); assert!(!narwhal_manager.primary_node.is_running().await); - assert!(narwhal_manager - .worker_nodes - .workers_running() - .await - .is_empty()); + assert!( + narwhal_manager + .worker_nodes + .workers_running() + .await + .is_empty() + ); let system_state = state .get_sui_system_state_object_for_testing() diff --git a/crates/sui-core/src/unit_tests/overload_monitor_tests.rs b/crates/sui-core/src/unit_tests/overload_monitor_tests.rs index 31323f32a41..79a8261c873 100644 --- a/crates/sui-core/src/unit_tests/overload_monitor_tests.rs +++ b/crates/sui-core/src/unit_tests/overload_monitor_tests.rs @@ -4,11 +4,12 @@ // Tests that overload monitor only starts on validators. #[cfg(msim)] mod simtests { - use std::sync::atomic::AtomicUsize; - use std::sync::atomic::Ordering; - use std::sync::Arc; - use sui_macros::register_fail_point; - use sui_macros::sim_test; + use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }; + + use sui_macros::{register_fail_point, sim_test}; use test_cluster::TestClusterBuilder; #[sim_test] @@ -22,8 +23,8 @@ mod simtests { counter_clone.fetch_add(1, Ordering::SeqCst); }); - // Creates a cluster, and tests that number of nodes with overload monitor is equal to - // the number of validators. + // Creates a cluster, and tests that number of nodes with overload monitor is + // equal to the number of validators. let test_cluster = TestClusterBuilder::new().build().await; let nodes_with_overload_monitor = counter.load(Ordering::SeqCst); assert_eq!( @@ -38,4 +39,5 @@ mod simtests { } } -// TODO: move other overload relate tests from execution_driver_tests.rs to here. +// TODO: move other overload relate tests from execution_driver_tests.rs to +// here. diff --git a/crates/sui-core/src/unit_tests/pay_sui_tests.rs b/crates/sui-core/src/unit_tests/pay_sui_tests.rs index 7430171f295..d0b4b6534b2 100644 --- a/crates/sui-core/src/unit_tests/pay_sui_tests.rs +++ b/crates/sui-core/src/unit_tests/pay_sui_tests.rs @@ -1,23 +1,27 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::authority_tests::{init_state_with_committee, send_and_confirm_transaction}; -use crate::authority::test_authority_builder::TestAuthorityBuilder; -use crate::authority::AuthorityState; +use std::{collections::HashMap, sync::Arc}; + use futures::future::join_all; -use std::collections::HashMap; -use std::sync::Arc; -use sui_types::base_types::{ObjectID, ObjectRef, SuiAddress}; -use sui_types::crypto::AccountKeyPair; -use sui_types::effects::{SignedTransactionEffects, TransactionEffectsAPI}; -use sui_types::error::UserInputError; -use sui_types::execution_status::{ExecutionFailureStatus, ExecutionStatus}; -use sui_types::gas_coin::GasCoin; -use sui_types::object::Object; -use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; -use sui_types::transaction::TransactionData; -use sui_types::utils::to_sender_signed_transaction; -use sui_types::{base_types::dbg_addr, crypto::get_key_pair, error::SuiError}; +use sui_types::{ + base_types::{dbg_addr, ObjectID, ObjectRef, SuiAddress}, + crypto::{get_key_pair, AccountKeyPair}, + effects::{SignedTransactionEffects, TransactionEffectsAPI}, + error::{SuiError, UserInputError}, + execution_status::{ExecutionFailureStatus, ExecutionStatus}, + gas_coin::GasCoin, + object::Object, + programmable_transaction_builder::ProgrammableTransactionBuilder, + transaction::TransactionData, + utils::to_sender_signed_transaction, +}; + +use crate::authority::{ + authority_tests::{init_state_with_committee, send_and_confirm_transaction}, + test_authority_builder::TestAuthorityBuilder, + AuthorityState, +}; #[tokio::test] async fn test_pay_sui_failure_empty_recipients() { @@ -31,7 +35,7 @@ async fn test_pay_sui_failure_empty_recipients() { let effects = res.txn_result.unwrap().into_data(); assert_eq!(effects.status(), &ExecutionStatus::Success); assert_eq!(effects.mutated().len(), 1); - assert_eq!(effects.mutated()[0].0 .0, coin_id); + assert_eq!(effects.mutated()[0].0.0, coin_id); assert!(effects.deleted().is_empty()); assert!(effects.created().is_empty()); } @@ -166,9 +170,9 @@ async fn test_pay_sui_success_one_input_coin() -> anyhow::Result<()> { assert_eq!(*effects.status(), ExecutionStatus::Success); // make sure each recipient receives the specified amount assert_eq!(effects.created().len(), 3); - let created_obj_id1 = effects.created()[0].0 .0; - let created_obj_id2 = effects.created()[1].0 .0; - let created_obj_id3 = effects.created()[2].0 .0; + let created_obj_id1 = effects.created()[0].0.0; + let created_obj_id2 = effects.created()[1].0.0; + let created_obj_id3 = effects.created()[2].0.0; let created_obj1 = res .authority_state .get_object(&created_obj_id1) @@ -205,8 +209,9 @@ async fn test_pay_sui_success_one_input_coin() -> anyhow::Result<()> { assert_eq!(GasCoin::try_from(&created_obj3)?.value(), coin_val3); // make sure the first object still belongs to the sender, - // the value is equal to all residual values after amounts transferred and gas payment. - assert_eq!(effects.mutated()[0].0 .0, object_id); + // the value is equal to all residual values after amounts transferred and gas + // payment. + assert_eq!(effects.mutated()[0].0.0, object_id); assert_eq!(effects.mutated()[0].1, sender); let gas_used = effects.gas_cost_summary().net_gas_usage() as u64; let gas_object = res.authority_state.get_object(&object_id).await?.unwrap(); @@ -246,8 +251,8 @@ async fn test_pay_sui_success_multiple_input_coins() -> anyhow::Result<()> { // make sure each recipient receives the specified amount assert_eq!(effects.created().len(), 2); - let created_obj_id1 = effects.created()[0].0 .0; - let created_obj_id2 = effects.created()[1].0 .0; + let created_obj_id1 = effects.created()[0].0.0; + let created_obj_id2 = effects.created()[1].0.0; let created_obj1 = res .authority_state .get_object(&created_obj_id1) @@ -271,8 +276,9 @@ async fn test_pay_sui_success_multiple_input_coins() -> anyhow::Result<()> { assert_eq!(GasCoin::try_from(&created_obj1)?.value(), coin_val1); assert_eq!(GasCoin::try_from(&created_obj2)?.value(), coin_val2); // make sure the first input coin still belongs to the sender, - // the value is equal to all residual values after amounts transferred and gas payment. - assert_eq!(effects.mutated()[0].0 .0, object_id1); + // the value is equal to all residual values after amounts transferred and gas + // payment. + assert_eq!(effects.mutated()[0].0.0, object_id1); assert_eq!(effects.mutated()[0].1, sender); let gas_used = effects.gas_cost_summary().net_gas_usage() as u64; let gas_object = res.authority_state.get_object(&object_id1).await?.unwrap(); diff --git a/crates/sui-core/src/unit_tests/server_tests.rs b/crates/sui-core/src/unit_tests/server_tests.rs index e6ebeeace93..2b7aea28e22 100644 --- a/crates/sui-core/src/unit_tests/server_tests.rs +++ b/crates/sui-core/src/unit_tests/server_tests.rs @@ -1,24 +1,26 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use sui_types::{ + base_types::{dbg_addr, dbg_object_id}, + messages_grpc::LayoutGenerationOption, +}; + use super::*; use crate::{ authority::authority_tests::init_state_with_object_id, authority_client::{AuthorityAPI, NetworkAuthorityClient}, }; -use sui_types::{ - base_types::{dbg_addr, dbg_object_id}, - messages_grpc::LayoutGenerationOption, -}; -//This is the most basic example of how to test the server logic +// This is the most basic example of how to test the server logic #[tokio::test] async fn test_simple_request() { let sender = dbg_addr(1); let object_id = dbg_object_id(1); let authority_state = init_state_with_object_id(sender, object_id).await; - // The following two fields are only needed for shared objects (not by this bench). + // The following two fields are only needed for shared objects (not by this + // bench). let consensus_address = "/ip4/127.0.0.1/tcp/0/http".parse().unwrap(); let server = AuthorityServer::new_for_test( diff --git a/crates/sui-core/src/unit_tests/shared_object_deletion_tests.rs b/crates/sui-core/src/unit_tests/shared_object_deletion_tests.rs index 842f50eba64..3b1770eb3c3 100644 --- a/crates/sui-core/src/unit_tests/shared_object_deletion_tests.rs +++ b/crates/sui-core/src/unit_tests/shared_object_deletion_tests.rs @@ -4,19 +4,29 @@ use std::sync::Arc; +use move_core_types::ident_str; +use sui_protocol_config::{Chain, ProtocolConfig, ProtocolVersion}; use sui_types::{ - base_types::{ObjectID, ObjectRef, SequenceNumber, SuiAddress}, + base_types::{ObjectID, ObjectRef, SequenceNumber, SuiAddress, TransactionDigest}, + committee::EpochId, crypto::{get_key_pair, AccountKeyPair}, - effects::TransactionEffects, - execution_status::{CommandArgumentError, ExecutionFailureStatus}, + effects::{TransactionEffects, TransactionEffectsAPI}, + error::{ExecutionError, SuiError}, + execution_status::{ + CommandArgumentError, ExecutionFailureStatus, + ExecutionFailureStatus::{InputObjectDeleted, SharedObjectOperationNotAllowed}, + }, object::Object, programmable_transaction_builder::ProgrammableTransactionBuilder, - transaction::{ProgrammableTransaction, Transaction, TEST_ONLY_GAS_UNIT_FOR_PUBLISH}, + transaction::{ + ObjectArg, ProgrammableTransaction, Transaction, VerifiedCertificate, + TEST_ONLY_GAS_UNIT_FOR_PUBLISH, + }, }; -use crate::authority::authority_test_utils::execute_sequenced_certificate_to_effects; use crate::{ authority::{ + authority_test_utils::execute_sequenced_certificate_to_effects, authority_tests::{ build_programmable_transaction, certify_shared_obj_transaction_no_execution, enqueue_all_and_execute_all, execute_programmable_transaction, @@ -27,16 +37,6 @@ use crate::{ }, move_call, }; -use move_core_types::ident_str; -use sui_protocol_config::{Chain, ProtocolConfig, ProtocolVersion}; -use sui_types::base_types::TransactionDigest; -use sui_types::committee::EpochId; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::error::{ExecutionError, SuiError}; -use sui_types::execution_status::ExecutionFailureStatus::{ - InputObjectDeleted, SharedObjectOperationNotAllowed, -}; -use sui_types::transaction::{ObjectArg, VerifiedCertificate}; pub struct TestRunner { pub sender: SuiAddress, @@ -735,7 +735,8 @@ async fn test_delete_shared_object_immut_mut_mut_interleave() { *effects.transaction_digest(), ); - // Try to delete again with the object passed as mutable and make sure we get `InputObjectDeleted`. + // Try to delete again with the object passed as mutable and make sure we get + // `InputObjectDeleted`. let (effects, _) = user1 .execute_sequenced_certificate_to_effects(cert_immut2) .await @@ -964,8 +965,9 @@ async fn test_shifting_mutate_and_deletes_multiple_objects() { (shared_obj.0, shared_obj.1) }; - // Test that in the presence of multiple shared objects one of which may be deleted, that we - // track versions, notifications, transaction dependencies, and execute correctly. + // Test that in the presence of multiple shared objects one of which may be + // deleted, that we track versions, notifications, transaction dependencies, + // and execute correctly. // Tx_i^j = Transaction i on shared object So_j // R = Read, M = Write/Mutate, _ = not present @@ -1375,8 +1377,8 @@ async fn test_mutate_interleaved_read_only_enqueued_after_delete() { // The gas coin gets mutated assert_eq!(effects.mutated().len(), 1); - // NB: the tx dependency is still on the first mutation tx and not on the intervening read - // of the SO. + // NB: the tx dependency is still on the first mutation tx and not on the + // intervening read of the SO. assert!(effects.dependencies().contains(first_mutate_digest)); } } @@ -1459,9 +1461,10 @@ async fn test_delete_with_shared_after_mutate_enqueued() { .await .unwrap(); - // create an execution order where the second mutation on an already deleted shared object - // expects a higher version because of higher versioned additional input - // expected input seq numbers (4, 6) (7) (15, 7_deleted) (16_deleted) + // create an execution order where the second mutation on an already deleted + // shared object expects a higher version because of higher versioned + // additional input expected input seq numbers (4, 6) (7) (15, 7_deleted) + // (16_deleted) let res = user_1 .enqueue_all_and_execute_all(vec![ delete_cert, @@ -1476,9 +1479,11 @@ async fn test_delete_with_shared_after_mutate_enqueued() { assert!(delete_effects.status().is_ok()); let deleted_obj_ver = delete_effects.deleted()[0].1; - assert!(user_1 - .object_exists_in_marker_table(&shared_obj_id, &deleted_obj_ver, 0) - .is_some()); + assert!( + user_1 + .object_exists_in_marker_table(&shared_obj_id, &deleted_obj_ver, 0) + .is_some() + ); let mutate_effects = res.get(1).unwrap(); assert!(mutate_effects.status().is_ok()); @@ -1694,8 +1699,8 @@ async fn test_certs_fail_after_delete() { let mutate_cert_result = user_1.certify_shared_obj_transaction(mutate_obj_tx).await; - // In same epoch, so can still certify this transaction even though it uses a deleted shared - // object. + // In same epoch, so can still certify this transaction even though it uses a + // deleted shared object. assert!(mutate_cert_result.is_ok()); } diff --git a/crates/sui-core/src/unit_tests/subscription_handler_tests.rs b/crates/sui-core/src/unit_tests/subscription_handler_tests.rs index 67b2e78b311..149aa4ab4d1 100644 --- a/crates/sui-core/src/unit_tests/subscription_handler_tests.rs +++ b/crates/sui-core/src/unit_tests/subscription_handler_tests.rs @@ -1,24 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use move_core_types::account_address::AccountAddress; -use move_core_types::identifier::Identifier; - use move_core_types::{ + account_address::AccountAddress, annotated_value::{MoveFieldLayout, MoveStructLayout, MoveTypeLayout}, ident_str, + identifier::Identifier, language_storage::StructTag, }; - -use serde::Deserialize; -use serde::Serialize; +use serde::{Deserialize, Serialize}; use serde_json::json; use sui_json_rpc_types::SuiMoveStruct; - -use sui_types::base_types::ObjectID; -use sui_types::gas_coin::GasCoin; -use sui_types::object::bounded_visitor::BoundedVisitor; -use sui_types::{MOVE_STDLIB_ADDRESS, SUI_FRAMEWORK_ADDRESS}; +use sui_types::{ + base_types::ObjectID, gas_coin::GasCoin, object::bounded_visitor::BoundedVisitor, + MOVE_STDLIB_ADDRESS, SUI_FRAMEWORK_ADDRESS, +}; #[test] fn test_to_json_value() { diff --git a/crates/sui-core/src/unit_tests/transaction_deny_tests.rs b/crates/sui-core/src/unit_tests/transaction_deny_tests.rs index cd7e578385e..788aec3dccb 100644 --- a/crates/sui-core/src/unit_tests/transaction_deny_tests.rs +++ b/crates/sui-core/src/unit_tests/transaction_deny_tests.rs @@ -1,34 +1,44 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority::authority_test_utils::{ - publish_package_on_single_authority, upgrade_package_on_single_authority, -}; -use crate::authority::test_authority_builder::TestAuthorityBuilder; -use crate::authority::AuthorityState; -use crate::test_utils::make_transfer_sui_transaction; -use fastcrypto::ed25519::Ed25519KeyPair; -use fastcrypto::traits::KeyPair; +use std::{path::PathBuf, sync::Arc}; + +use fastcrypto::{ed25519::Ed25519KeyPair, traits::KeyPair}; use move_core_types::ident_str; -use std::path::PathBuf; -use std::sync::Arc; -use sui_config::certificate_deny_config::CertificateDenyConfigBuilder; -use sui_config::transaction_deny_config::{TransactionDenyConfig, TransactionDenyConfigBuilder}; -use sui_swarm_config::genesis_config::{AccountConfig, DEFAULT_GAS_AMOUNT}; -use sui_swarm_config::network_config::NetworkConfig; +use sui_config::{ + certificate_deny_config::CertificateDenyConfigBuilder, + transaction_deny_config::{TransactionDenyConfig, TransactionDenyConfigBuilder}, +}; +use sui_swarm_config::{ + genesis_config::{AccountConfig, DEFAULT_GAS_AMOUNT}, + network_config::NetworkConfig, +}; use sui_test_transaction_builder::TestTransactionBuilder; -use sui_types::base_types::{ObjectID, ObjectRef, SuiAddress}; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::error::{SuiError, SuiResult, UserInputError}; -use sui_types::execution_status::{ExecutionFailureStatus, ExecutionStatus}; -use sui_types::messages_grpc::HandleTransactionResponse; -use sui_types::transaction::{ - CallArg, CertifiedTransaction, Transaction, TransactionData, VerifiedCertificate, - VerifiedTransaction, TEST_ONLY_GAS_UNIT_FOR_TRANSFER, +use sui_types::{ + base_types::{ObjectID, ObjectRef, SuiAddress}, + effects::TransactionEffectsAPI, + error::{SuiError, SuiResult, UserInputError}, + execution_status::{ExecutionFailureStatus, ExecutionStatus}, + messages_grpc::HandleTransactionResponse, + transaction::{ + CallArg, CertifiedTransaction, Transaction, TransactionData, VerifiedCertificate, + VerifiedTransaction, TEST_ONLY_GAS_UNIT_FOR_TRANSFER, + }, + utils::{ + get_zklogin_user_address, make_zklogin_tx, to_sender_signed_transaction, + to_sender_signed_transaction_with_multi_signers, + }, }; -use sui_types::utils::get_zklogin_user_address; -use sui_types::utils::{ - make_zklogin_tx, to_sender_signed_transaction, to_sender_signed_transaction_with_multi_signers, + +use crate::{ + authority::{ + authority_test_utils::{ + publish_package_on_single_authority, upgrade_package_on_single_authority, + }, + test_authority_builder::TestAuthorityBuilder, + AuthorityState, + }, + test_utils::make_transfer_sui_transaction, }; const ACCOUNT_NUM: usize = 5; @@ -202,10 +212,12 @@ async fn test_zklogin_transaction_disabled() { #[tokio::test] async fn test_object_denied() { - // We need to create the authority state once to get one of the gas coin object IDs. + // We need to create the authority state once to get one of the gas coin object + // IDs. let (network_config, state) = setup_test(TransactionDenyConfigBuilder::new().build()).await; let accounts = get_accounts_and_coins(&network_config, &state); - // Re-create the state such that we could specify a gas coin object to be denied. + // Re-create the state such that we could specify a gas coin object to be + // denied. let obj_ref = accounts[0].2[0]; let state = reload_state_with_new_deny_config( &network_config, @@ -220,7 +232,8 @@ async fn test_object_denied() { #[tokio::test] async fn test_signer_denied() { - // We need to create the authority state once to get one of the account addresses. + // We need to create the authority state once to get one of the account + // addresses. let (network_config, state) = setup_test(TransactionDenyConfigBuilder::new().build()).await; let accounts = get_accounts_and_coins(&network_config, &state); @@ -288,7 +301,8 @@ async fn test_package_denied() { let accounts = get_accounts_and_coins(&network_config, &state); let path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); // Publish 3 packages, where b depends on c, and a depends on b. - // Also upgrade c to c', and upgrade b to b' (which will start using c' instead of c as dependency). + // Also upgrade c to c', and upgrade b to b' (which will start using c' instead + // of c as dependency). let (package_c, cap_c) = publish_package_on_single_authority( path.join("src/unit_tests/data/package_deny/c"), accounts[0].0, @@ -414,7 +428,8 @@ async fn test_package_denied() { .await; assert_denied(&result); - // Upgrade a using c' as dependency will succeed since it no longer depends on c. + // Upgrade a using c' as dependency will succeed since it no longer depends on + // c. let result = upgrade_package_on_single_authority( path.join("src/unit_tests/data/package_deny/a"), accounts[0].0, diff --git a/crates/sui-core/src/unit_tests/transaction_manager_tests.rs b/crates/sui-core/src/unit_tests/transaction_manager_tests.rs index a5e6e296570..0347c3fb900 100644 --- a/crates/sui-core/src/unit_tests/transaction_manager_tests.rs +++ b/crates/sui-core/src/unit_tests/transaction_manager_tests.rs @@ -4,20 +4,18 @@ use std::{time::Duration, vec}; use sui_test_transaction_builder::TestTransactionBuilder; -use sui_types::executable_transaction::VerifiedExecutableTransaction; -use sui_types::transaction::VerifiedTransaction; use sui_types::{ base_types::ObjectID, crypto::deterministic_random_account_key, + executable_transaction::VerifiedExecutableTransaction, object::Object, storage::InputKey, - transaction::{CallArg, ObjectArg}, + transaction::{CallArg, ObjectArg, VerifiedTransaction}, SUI_FRAMEWORK_PACKAGE_ID, }; -use tokio::time::Instant; use tokio::{ sync::mpsc::{error::TryRecvError, unbounded_channel, UnboundedReceiver}, - time::sleep, + time::{sleep, Instant}, }; use crate::{ @@ -29,8 +27,8 @@ use crate::{ fn make_transaction_manager( state: &AuthorityState, ) -> (TransactionManager, UnboundedReceiver) { - // Create a new transaction manager instead of reusing the authority's, to examine - // transaction_manager output from rx_ready_certificates. + // Create a new transaction manager instead of reusing the authority's, to + // examine transaction_manager output from rx_ready_certificates. let (tx_ready_certificates, rx_ready_certificates) = unbounded_channel(); let transaction_manager = TransactionManager::new( state.get_cache_reader().clone(), @@ -43,8 +41,8 @@ fn make_transaction_manager( } fn make_transaction(gas_object: Object, input: Vec) -> VerifiedExecutableTransaction { - // Use fake module, function, package and gas prices since they are irrelevant for testing - // transaction manager. + // Use fake module, function, package and gas prices since they are irrelevant + // for testing transaction manager. let rgp = 100; let (sender, keypair) = deterministic_random_account_key(); let transaction = @@ -76,22 +74,26 @@ async fn transaction_manager_basics() { .collect(); let state = init_state_with_objects(gas_objects.clone()).await; - // Create a new transaction manager instead of reusing the authority's, to examine - // transaction_manager output from rx_ready_certificates. + // Create a new transaction manager instead of reusing the authority's, to + // examine transaction_manager output from rx_ready_certificates. let (transaction_manager, mut rx_ready_certificates) = make_transaction_manager(&state); // TM should output no transaction. - assert!(rx_ready_certificates - .try_recv() - .is_err_and(|err| err == TryRecvError::Empty)); + assert!( + rx_ready_certificates + .try_recv() + .is_err_and(|err| err == TryRecvError::Empty) + ); // TM should be empty at the beginning. transaction_manager.check_empty_for_testing(); // Enqueue empty vec should not crash. transaction_manager.enqueue(vec![], &state.epoch_store_for_testing()); // TM should output no transaction. - assert!(rx_ready_certificates - .try_recv() - .is_err_and(|err| err == TryRecvError::Empty)); + assert!( + rx_ready_certificates + .try_recv() + .is_err_and(|err| err == TryRecvError::Empty) + ); // Enqueue a transaction with existing gas object, empty input. let transaction = make_transaction(gas_objects[0].clone(), vec![]); @@ -126,18 +128,22 @@ async fn transaction_manager_basics() { transaction_manager.enqueue(vec![transaction.clone()], &state.epoch_store_for_testing()); // TM should output no transaction yet. sleep(Duration::from_secs(1)).await; - assert!(rx_ready_certificates - .try_recv() - .is_err_and(|err| err == TryRecvError::Empty)); + assert!( + rx_ready_certificates + .try_recv() + .is_err_and(|err| err == TryRecvError::Empty) + ); assert_eq!(transaction_manager.inflight_queue_len(), 1); // Duplicated enqueue is allowed. transaction_manager.enqueue(vec![transaction.clone()], &state.epoch_store_for_testing()); sleep(Duration::from_secs(1)).await; - assert!(rx_ready_certificates - .try_recv() - .is_err_and(|err| err == TryRecvError::Empty)); + assert!( + rx_ready_certificates + .try_recv() + .is_err_and(|err| err == TryRecvError::Empty) + ); assert_eq!(transaction_manager.inflight_queue_len(), 1); @@ -149,8 +155,8 @@ async fn transaction_manager_basics() { // TM should output the transaction eventually. let pending_certificate = rx_ready_certificates.recv().await.unwrap(); - // Tests that pending certificate stats are recorded properly. The ready time should be - // 2 seconds apart from the enqueue time. + // Tests that pending certificate stats are recorded properly. The ready time + // should be 2 seconds apart from the enqueue time. assert!(pending_certificate.stats.enqueue_time >= tx_start_time); assert!( pending_certificate.stats.ready_time.unwrap() - pending_certificate.stats.enqueue_time @@ -160,9 +166,11 @@ async fn transaction_manager_basics() { // Re-enqueue the same transaction should not result in another output. transaction_manager.enqueue(vec![transaction.clone()], &state.epoch_store_for_testing()); sleep(Duration::from_secs(1)).await; - assert!(rx_ready_certificates - .try_recv() - .is_err_and(|err| err == TryRecvError::Empty)); + assert!( + rx_ready_certificates + .try_recv() + .is_err_and(|err| err == TryRecvError::Empty) + ); // Notify TM about transaction commit transaction_manager.notify_commit( @@ -175,14 +183,14 @@ async fn transaction_manager_basics() { transaction_manager.check_empty_for_testing(); } -// Tests when objects become available, correct set of transactions can be sent to execute. -// Specifically, we have following setup, +// Tests when objects become available, correct set of transactions can be sent +// to execute. Specifically, we have following setup, // shared_object shared_object_2 // / | \ \ / // tx_0 tx_1 tx_2 tx_3 // r r w r -// And when shared_object is available, tx_0, tx_1, and tx_2 can be executed. And when -// shared_object_2 becomes available, tx_3 can be executed. +// And when shared_object is available, tx_0, tx_1, and tx_2 can be executed. +// And when shared_object_2 becomes available, tx_3 can be executed. #[tokio::test(flavor = "current_thread", start_paused = true)] async fn transaction_manager_object_dependency() { // Initialize an authority state, with gas objects and a shared object. @@ -205,8 +213,8 @@ async fn transaction_manager_object_dependency() { ) .await; - // Create a new transaction manager instead of reusing the authority's, to examine - // transaction_manager output from rx_ready_certificates. + // Create a new transaction manager instead of reusing the authority's, to + // examine transaction_manager output from rx_ready_certificates. let (transaction_manager, mut rx_ready_certificates) = make_transaction_manager(&state); // TM should output no transaction. assert!(rx_ready_certificates.try_recv().is_err()); @@ -259,7 +267,8 @@ async fn transaction_manager_object_dependency() { ) .unwrap(); - // Enqueue one transaction with two readonly shared object inputs, `shared_object` and `shared_object_2`. + // Enqueue one transaction with two readonly shared object inputs, + // `shared_object` and `shared_object_2`. let shared_version_2 = 2000.into(); let shared_object_arg_read_2 = ObjectArg::SharedObject { id: shared_object_2.id(), @@ -374,8 +383,8 @@ async fn transaction_manager_receiving_notify_commit() { .collect(); let state = init_state_with_objects(gas_objects.clone()).await; - // Create a new transaction manager instead of reusing the authority's, to examine - // transaction_manager output from rx_ready_certificates. + // Create a new transaction manager instead of reusing the authority's, to + // examine transaction_manager output from rx_ready_certificates. let (transaction_manager, mut rx_ready_certificates) = make_transaction_manager(&state); // TM should output no transaction. assert!(rx_ready_certificates.try_recv().is_err()); @@ -386,12 +395,13 @@ async fn transaction_manager_receiving_notify_commit() { let object_arguments: Vec<_> = (0..10) .map(|i| { let object = Object::with_id_owner_version_for_testing(obj_id, i.into(), owner); - // Every other transaction receives the object, and we create a run of multiple receives in - // a row at the beginning to test that the TM doesn't get stuck in either configuration of: - // ImmOrOwnedObject => Receiving, - // Receiving => Receiving + // Every other transaction receives the object, and we create a run of multiple + // receives in a row at the beginning to test that the TM doesn't + // get stuck in either configuration of: ImmOrOwnedObject => + // Receiving, Receiving => Receiving // Receiving => ImmOrOwnedObject - // ImmOrOwnedObject => ImmOrOwnedObject is already tested as the default case on mainnet. + // ImmOrOwnedObject => ImmOrOwnedObject is already tested as the default case on + // mainnet. let object_arg = if i % 2 == 0 || i == 3 { ObjectArg::Receiving(object.compute_object_reference()) } else { @@ -417,20 +427,20 @@ async fn transaction_manager_receiving_notify_commit() { &state.epoch_store_for_testing(), ); - // Now start to unravel the rest of the transactions by notifying that each subsequent - // transaction has been processed. + // Now start to unravel the rest of the transactions by notifying that each + // subsequent transaction has been processed. for (i, (object, txn)) in object_arguments.iter().enumerate() { - // TM should output the transaction eventually now that the receiving object has become - // available. + // TM should output the transaction eventually now that the receiving object has + // become available. rx_ready_certificates.recv().await.unwrap(); - // Only one transaction at a time should become available though. So if we try to get - // another one it should fail. + // Only one transaction at a time should become available though. So if we try + // to get another one it should fail. sleep(Duration::from_secs(1)).await; assert!(rx_ready_certificates.try_recv().is_err()); - // Notify the TM that the transaction has been processed, and that it has written the - // object at the next version. + // Notify the TM that the transaction has been processed, and that it has + // written the object at the next version. transaction_manager.notify_commit( txn.digest(), vec![InputKey::VersionedObject { @@ -440,8 +450,8 @@ async fn transaction_manager_receiving_notify_commit() { &state.epoch_store_for_testing(), ); - // TM should now output another transaction to run since it the next version of that object - // has become available. + // TM should now output another transaction to run since it the next version of + // that object has become available. assert_eq!( transaction_manager.inflight_queue_len(), object_arguments.len() - i - 1 @@ -465,8 +475,8 @@ async fn transaction_manager_receiving_object_ready_notifications() { .collect(); let state = init_state_with_objects(gas_objects.clone()).await; - // Create a new transaction manager instead of reusing the authority's, to examine - // transaction_manager output from rx_ready_certificates. + // Create a new transaction manager instead of reusing the authority's, to + // examine transaction_manager output from rx_ready_certificates. let (transaction_manager, mut rx_ready_certificates) = make_transaction_manager(&state); // TM should output no transaction. assert!(rx_ready_certificates.try_recv().is_err()); @@ -523,8 +533,8 @@ async fn transaction_manager_receiving_object_ready_notifications() { &state.epoch_store_for_testing(), ); - // TM should output the transaction eventually now that the receiving object has become - // available. + // TM should output the transaction eventually now that the receiving object has + // become available. rx_ready_certificates.recv().await.unwrap(); // Notify TM that the receiving object 0 is available. @@ -533,8 +543,8 @@ async fn transaction_manager_receiving_object_ready_notifications() { &state.epoch_store_for_testing(), ); - // TM should output the transaction eventually now that the receiving object has become - // available. + // TM should output the transaction eventually now that the receiving object has + // become available. rx_ready_certificates.recv().await.unwrap(); } @@ -551,8 +561,8 @@ async fn transaction_manager_receiving_object_ready_notifications_multiple_of_sa .collect(); let state = init_state_with_objects(gas_objects.clone()).await; - // Create a new transaction manager instead of reusing the authority's, to examine - // transaction_manager output from rx_ready_certificates. + // Create a new transaction manager instead of reusing the authority's, to + // examine transaction_manager output from rx_ready_certificates. let (transaction_manager, mut rx_ready_certificates) = make_transaction_manager(&state); // TM should output no transaction. assert!(rx_ready_certificates.try_recv().is_err()); @@ -581,8 +591,8 @@ async fn transaction_manager_receiving_object_ready_notifications_multiple_of_sa vec![CallArg::Object(receiving_object_arg1)], ); - // Enqueuing a transaction with a receiving object that is available at the time it is enqueued - // should become immediately available. + // Enqueuing a transaction with a receiving object that is available at the time + // it is enqueued should become immediately available. let gas_receiving_arg = ObjectArg::Receiving(gas_objects[3].compute_object_reference()); let tx1 = make_transaction( gas_objects[0].clone(), @@ -623,17 +633,18 @@ async fn transaction_manager_receiving_object_ready_notifications_multiple_of_sa &state.epoch_store_for_testing(), ); - // TM should output both transactions depending on the receiving object now that the - // transaction's receiving object has become available. + // TM should output both transactions depending on the receiving object now that + // the transaction's receiving object has become available. rx_ready_certificates.recv().await.unwrap(); rx_ready_certificates.recv().await.unwrap(); - // Only two transactions that were dependent on the receiving object should be output. + // Only two transactions that were dependent on the receiving object should be + // output. assert!(rx_ready_certificates.try_recv().is_err()); - // Enqueue a transaction with a receiving object that is available at the time it is enqueued. - // This should be immediately available. + // Enqueue a transaction with a receiving object that is available at the time + // it is enqueued. This should be immediately available. transaction_manager.enqueue(vec![tx1.clone()], &state.epoch_store_for_testing()); sleep(Duration::from_secs(1)).await; rx_ready_certificates.recv().await.unwrap(); @@ -644,8 +655,8 @@ async fn transaction_manager_receiving_object_ready_notifications_multiple_of_sa &state.epoch_store_for_testing(), ); - // TM should output the transaction eventually now that the receiving object has become - // available. + // TM should output the transaction eventually now that the receiving object has + // become available. rx_ready_certificates.recv().await.unwrap(); } @@ -665,8 +676,8 @@ async fn transaction_manager_receiving_object_ready_if_current_version_greater() gas_objects.push(receiving_object.clone()); let state = init_state_with_objects(gas_objects.clone()).await; - // Create a new transaction manager instead of reusing the authority's, to examine - // transaction_manager output from rx_ready_certificates. + // Create a new transaction manager instead of reusing the authority's, to + // examine transaction_manager output from rx_ready_certificates. let (transaction_manager, mut rx_ready_certificates) = make_transaction_manager(&state); // TM should output no transaction. assert!(rx_ready_certificates.try_recv().is_err()); diff --git a/crates/sui-core/src/unit_tests/transaction_tests.rs b/crates/sui-core/src/unit_tests/transaction_tests.rs index 0c577939e50..cc92f2fc403 100644 --- a/crates/sui-core/src/unit_tests/transaction_tests.rs +++ b/crates/sui-core/src/unit_tests/transaction_tests.rs @@ -1,12 +1,14 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::ops::Deref; + use fastcrypto::{ed25519::Ed25519KeyPair, traits::KeyPair}; use fastcrypto_zkp::bn254::zk_login::{parse_jwks, OIDCProvider, ZkLoginInputs}; use mysten_network::Multiaddr; use rand::{rngs::StdRng, SeedableRng}; use shared_crypto::intent::{Intent, IntentMessage}; -use std::ops::Deref; +use sui_macros::sim_test; use sui_types::{ authenticator_state::ActiveJwk, base_types::dbg_addr, @@ -23,8 +25,6 @@ use sui_types::{ zk_login_util::DEFAULT_JWK_BYTES, }; -use sui_macros::sim_test; - macro_rules! assert_matches { ($expression:expr, $pattern:pat $(if $guard: expr)?) => { match $expression { @@ -39,18 +39,17 @@ macro_rules! assert_matches { }; } +use fastcrypto::traits::AggregateAuthenticator; +use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; + +use super::*; +pub use crate::authority::authority_test_utils::init_state_with_ids; use crate::{ authority_client::{AuthorityAPI, NetworkAuthorityClient}, authority_server::{AuthorityServer, AuthorityServerHandle}, stake_aggregator::{InsertResult, StakeAggregator}, }; -use super::*; -use fastcrypto::traits::AggregateAuthenticator; -use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; - -pub use crate::authority::authority_test_utils::init_state_with_ids; - #[sim_test] async fn test_handle_transfer_transaction_bad_signature() { do_transaction_test( @@ -403,7 +402,8 @@ async fn do_transaction_test_impl( check_locks(authority_state.clone(), vec![object_id]).await; - // now verify that the same transaction is rejected if a false certificate is somehow formed and sent + // now verify that the same transaction is rejected if a false certificate is + // somehow formed and sent if check_forged_cert { let epoch_store = authority_state.epoch_store_for_testing(); let signed_transaction = VerifiedSignedTransaction::new( @@ -497,16 +497,14 @@ async fn zklogin_test_cached_proof_wrong_key() { let res = client.handle_transaction(transfer_transaction).await; assert!(res.is_ok()); - /* - assert_eq!( - epoch_store - .signature_verifier - .metrics - .zklogin_inputs_cache_misses - .get(), - 1 - ); - */ + // assert_eq!( + // epoch_store + // .signature_verifier + // .metrics + // .zklogin_inputs_cache_misses + // .get(), + // 1 + // ); let (skp, _eph_pk, zklogin) = &load_test_vectors("../sui-types/src/unit_tests/zklogin_test_vectors.json")[1]; @@ -543,23 +541,24 @@ async fn zklogin_test_cached_proof_wrong_key() { _ => panic!(), } - // This tx should fail, but passes because we skip the ephemeral sig check when hitting the zklogin check! - assert!(client - .handle_transaction(transfer_transaction2) - .await - .is_err()); + // This tx should fail, but passes because we skip the ephemeral sig check when + // hitting the zklogin check! + assert!( + client + .handle_transaction(transfer_transaction2) + .await + .is_err() + ); // TODO: re-enable when cache is re-enabled. - /* - assert_eq!( - epoch_store - .signature_verifier - .metrics - .zklogin_inputs_cache_hits - .get(), - 1 - ); - */ + // assert_eq!( + // epoch_store + // .signature_verifier + // .metrics + // .zklogin_inputs_cache_hits + // .get(), + // 1 + // ); assert_eq!(metrics.signature_errors.get(), 1); @@ -585,22 +584,22 @@ async fn do_zklogin_transaction_test( post_sign_mutations(&mut transfer_transaction); - assert!(client - .handle_transaction(transfer_transaction) - .await - .is_err()); + assert!( + client + .handle_transaction(transfer_transaction) + .await + .is_err() + ); // TODO: re-enable when cache is re-enabled. - /* - assert_eq!( - epoch_store - .signature_verifier - .metrics - .zklogin_inputs_cache_misses - .get(), - 1 - ); - */ + // assert_eq!( + // epoch_store + // .signature_verifier + // .metrics + // .zklogin_inputs_cache_misses + // .get(), + // 1 + // ); assert_eq!(metrics.signature_errors.get(), expected_sig_errors); @@ -614,14 +613,16 @@ async fn check_locks(authority_state: Arc, object_ids: Vec = signatures.into_values().collect(); diff --git a/crates/sui-core/src/unit_tests/transfer_to_object_tests.rs b/crates/sui-core/src/unit_tests/transfer_to_object_tests.rs index 035f6f5197d..2806be49577 100644 --- a/crates/sui-core/src/unit_tests/transfer_to_object_tests.rs +++ b/crates/sui-core/src/unit_tests/transfer_to_object_tests.rs @@ -3,6 +3,7 @@ use std::{collections::HashSet, sync::Arc}; +use move_core_types::ident_str; use sui_types::{ base_types::{ObjectID, ObjectRef, SequenceNumber, SuiAddress}, crypto::{get_key_pair, AccountKeyPair}, @@ -31,15 +32,16 @@ use crate::{ }, move_call, }; -use move_core_types::ident_str; -// The primary use for these tests is to make sure the generated effect sets match what we expect -// when receiving an object, and if we then perform different types of operations on the received -// object (e.g., deleting, wrapping, unwrapping, adding as a dynamic field, etc.) and various -// combinations of that. Some of these tests also check and validate locking behavior around -// receiving object arguments as well. +// The primary use for these tests is to make sure the generated effect sets +// match what we expect when receiving an object, and if we then perform +// different types of operations on the received object (e.g., deleting, +// wrapping, unwrapping, adding as a dynamic field, etc.) and various +// combinations of that. Some of these tests also check and validate locking +// behavior around receiving object arguments as well. -// Run the test twice -- once with aggressive pruning enabled, and the other with it not enabled. +// Run the test twice -- once with aggressive pruning enabled, and the other +// with it not enabled. macro_rules! transfer_test_runner { (gas_objects: $num:expr, $expr:expr) => { let runner = TestRunner::new_with_objects("tto", $num, false).await; @@ -95,7 +97,8 @@ impl TestRunner { &sender_key, &gas_object_ids[0], base_package_name, - /* with_unpublished_deps */ false, + // with_unpublished_deps + false, ) .await; @@ -232,7 +235,8 @@ impl TestRunner { if shared { send_consensus(&self.authority_state, &ct).await; } - // Call `execute_certificate` instead of `execute_certificate_with_execution_error` to make sure we go through TM + // Call `execute_certificate` instead of + // `execute_certificate_with_execution_error` to make sure we go through TM let effects = self .authority_state .execute_certificate(&ct, &epoch_store) @@ -256,8 +260,8 @@ impl TestRunner { fn get_parent_and_child( created: Vec<(ObjectRef, Owner)>, ) -> ((ObjectRef, Owner), (ObjectRef, Owner)) { - // make sure there is an object with an `AddressOwner` who matches the object ID of another - // object. + // make sure there is an object with an `AddressOwner` who matches the object ID + // of another object. let created_addrs: HashSet<_> = created.iter().map(|((i, _, _), _)| i).collect(); let (child, parent_id) = created .iter() @@ -903,12 +907,13 @@ async fn test_tto_unwrap_add_as_dynamic_field() { // This test does this by // 1. Creating a parent object and child object // 2. Creating a fake parent object -// 3. Create and sign a transaction `tx1` that tries to receive the child object using -// the fake parent. -// 4. Create and sign a transaction `tx2` that receives the child object using the valid parent -// object. +// 3. Create and sign a transaction `tx1` that tries to receive the child object +// using the fake parent. +// 4. Create and sign a transaction `tx2` that receives the child object using +// the valid parent object. // 5. Execute `tx2` and verify that it can be executed successfully. -// 6. Execute `tx1` and verify that it can be executed, but will result in a Move abort. +// 6. Execute `tx1` and verify that it can be executed, but will result in a +// Move abort. // The order of steps 5 and 6 are swapped if `flipper` is `true`. // The object is deleted instead of received if `should_delete` is `true`. async fn verify_tto_not_locked( @@ -932,7 +937,7 @@ async fn verify_tto_not_locked( let fake_parent = *effects .created() .iter() - .find(|(obj_ref, _)| obj_ref.0 != parent.0 .0 && obj_ref.0 != child.0 .0) + .find(|(obj_ref, _)| obj_ref.0 != parent.0.0 && obj_ref.0 != child.0.0) .unwrap(); // Now get a certificate for fake_parent/child1. This will lock input objects. @@ -956,9 +961,9 @@ async fn verify_tto_not_locked( ) .await; - // After the other (fake) transaction has been created and signed, sign and execute this - // transaction. This should have no issues because the receiving object is not locked by the - // signing of the transaction above. + // After the other (fake) transaction has been created and signed, sign and + // execute this transaction. This should have no issues because the + // receiving object is not locked by the signing of the transaction above. let valid_cert = runner .lock_and_verify_transaction( { @@ -976,8 +981,8 @@ async fn verify_tto_not_locked( ) .await; - // The order of the execution of these transactions is flipped depending on the value of - // flipper. However, the result should be the same in either case. + // The order of the execution of these transactions is flipped depending on the + // value of flipper. However, the result should be the same in either case. let (valid_effects, invalid_effects) = if flipper { let invalid_effects = runner .execute_certificate(cert_for_fake_parent, false) @@ -1027,8 +1032,8 @@ fn assert_effects_equivalent(ef1: &TransactionEffects, ef2: &TransactionEffects) #[tokio::test] async fn test_tto_not_locked() { for aggressive_pruning_enabled in [true, false] { - // The transaction effects for the valid and invalid transactions should be the same regardless - // of the order in which they are run. + // The transaction effects for the valid and invalid transactions should be the + // same regardless of the order in which they are run. let (valid1, invalid1) = verify_tto_not_locked(false, false, aggressive_pruning_enabled).await; let (valid2, invalid2) = diff --git a/crates/sui-core/src/unit_tests/type_param_tests.rs b/crates/sui-core/src/unit_tests/type_param_tests.rs index 655ee034b3b..9942c4590e4 100644 --- a/crates/sui-core/src/unit_tests/type_param_tests.rs +++ b/crates/sui-core/src/unit_tests/type_param_tests.rs @@ -3,17 +3,16 @@ use std::str::FromStr; -use crate::authority::{ - authority_tests::{call_move, init_state_with_ids, TestCallArg}, - move_integration_tests::build_and_publish_test_package, -}; - use move_core_types::language_storage::TypeTag; - -use sui_types::effects::TransactionEffectsAPI; use sui_types::{ base_types::ObjectID, crypto::{get_key_pair, AccountKeyPair}, + effects::TransactionEffectsAPI, +}; + +use crate::authority::{ + authority_tests::{call_move, init_state_with_ids, TestCallArg}, + move_integration_tests::build_and_publish_test_package, }; #[tokio::test] @@ -29,7 +28,8 @@ async fn test_same_module_type_param() { &sender_key, &gas, "type_params", - /* with_unpublished_deps */ true, + // with_unpublished_deps + true, ) .await; @@ -50,7 +50,7 @@ async fn test_same_module_type_param() { .await .unwrap(); - let created_object_id = effects.created()[0].0 .0; + let created_object_id = effects.created()[0].0.0; let type_param = TypeTag::from_str(format!("{}::m1::Object", package.0).as_str()).unwrap(); let effects = call_move( @@ -86,7 +86,8 @@ async fn test_different_module_type_param() { &sender_key, &gas, "type_params", - /* with_unpublished_deps */ true, + // with_unpublished_deps + true, ) .await; @@ -107,7 +108,7 @@ async fn test_different_module_type_param() { .await .unwrap(); - let created_object_id = effects.created()[0].0 .0; + let created_object_id = effects.created()[0].0.0; let type_param = TypeTag::from_str(format!("{}::m2::AnotherObject", package.0).as_str()).unwrap(); @@ -145,7 +146,8 @@ async fn test_nested_type_param() { &sender_key, &gas, "type_params", - /* with_unpublished_deps */ true, + // with_unpublished_deps + true, ) .await; @@ -166,7 +168,7 @@ async fn test_nested_type_param() { .await .unwrap(); - let created_object_id = effects.created()[0].0 .0; + let created_object_id = effects.created()[0].0.0; let type_param = TypeTag::from_str( format!( "{}::m1::GenObject<{}::m2::AnotherObject>", @@ -210,7 +212,8 @@ async fn test_nested_type_param_different_module() { &sender_key, &gas, "type_params", - /* with_unpublished_deps */ true, + // with_unpublished_deps + true, ) .await; @@ -231,7 +234,7 @@ async fn test_nested_type_param_different_module() { .await .unwrap(); - let created_object_id = effects.created()[0].0 .0; + let created_object_id = effects.created()[0].0.0; let type_param = TypeTag::from_str( format!( "{}::m1::GenObject<{}::m2::AnotherObject>", @@ -275,7 +278,8 @@ async fn test_different_package_type_param() { &sender_key, &gas, "type_params", - /* with_unpublished_deps */ true, + // with_unpublished_deps + true, ) .await; @@ -285,7 +289,8 @@ async fn test_different_package_type_param() { &sender_key, &gas, "type_params_extra", - /* with_unpublished_deps */ true, + // with_unpublished_deps + true, ) .await; @@ -306,7 +311,7 @@ async fn test_different_package_type_param() { .await .unwrap(); - let created_object_id = effects.created()[0].0 .0; + let created_object_id = effects.created()[0].0.0; let type_param = TypeTag::from_str(format!("{}::m2::AnotherObject", package.0).as_str()).unwrap(); @@ -344,7 +349,8 @@ async fn test_nested_type_param_different_package() { &sender_key, &gas, "type_params", - /* with_unpublished_deps */ true, + // with_unpublished_deps + true, ) .await; @@ -354,7 +360,8 @@ async fn test_nested_type_param_different_package() { &sender_key, &gas, "type_params_extra", - /* with_unpublished_deps */ true, + // with_unpublished_deps + true, ) .await; @@ -375,7 +382,7 @@ async fn test_nested_type_param_different_package() { .await .unwrap(); - let created_object_id = effects.created()[0].0 .0; + let created_object_id = effects.created()[0].0.0; let type_param = TypeTag::from_str( format!( "{}::m1::GenObject<{}::m2::AnotherObject>", diff --git a/crates/sui-core/src/verify_indexes.rs b/crates/sui-core/src/verify_indexes.rs index 38b4fd2dcd2..804cc318529 100644 --- a/crates/sui-core/src/verify_indexes.rs +++ b/crates/sui-core/src/verify_indexes.rs @@ -11,8 +11,9 @@ use typed_store::traits::Map; use crate::{authority::authority_store_tables::LiveObject, state_accumulator::AccumulatorStore}; -/// This is a very expensive function that verifies some of the secondary indexes. This is done by -/// iterating through the live object set and recalculating these secodary indexes. +/// This is a very expensive function that verifies some of the secondary +/// indexes. This is done by iterating through the live object set and +/// recalculating these secodary indexes. pub fn verify_indexes(store: &dyn AccumulatorStore, indexes: Arc) -> Result<()> { info!("Begin running index verification checks"); @@ -56,7 +57,9 @@ pub fn verify_indexes(store: &dyn AccumulatorStore, indexes: Arc) -> })?; if calculated_info != info { - bail!("owner_index: entry {key:?} is different: expected {calculated_info:?} found {info:?}"); + bail!( + "owner_index: entry {key:?} is different: expected {calculated_info:?} found {info:?}" + ); } } @@ -75,7 +78,9 @@ pub fn verify_indexes(store: &dyn AccumulatorStore, indexes: Arc) -> })?; if calculated_info != info { - bail!("coin_index: entry {key:?} is different: expected {calculated_info:?} found {info:?}"); + bail!( + "coin_index: entry {key:?} is different: expected {calculated_info:?} found {info:?}" + ); } } tracing::info!("Coin index is good"); diff --git a/crates/sui-core/tests/format.rs b/crates/sui-core/tests/format.rs index ad4facf59b8..637ed35b43f 100644 --- a/crates/sui-core/tests/format.rs +++ b/crates/sui-core/tests/format.rs @@ -5,8 +5,9 @@ #[test] #[cfg_attr(msim, ignore)] fn test_format() { - // If this test breaks and you intended a format change, you need to run to get the fresh format: - // # cargo -q run --example generate-format -- print > crates/sui-core/tests/staged/sui.yaml + // If this test breaks and you intended a format change, you need to run to get + // the fresh format: # cargo -q run --example generate-format -- print > + // crates/sui-core/tests/staged/sui.yaml let status = std::process::Command::new("cargo") .current_dir("..") diff --git a/crates/sui-cost/tests/empirical_transaction_cost.rs b/crates/sui-cost/tests/empirical_transaction_cost.rs index 9bafb1556d5..9466164549c 100644 --- a/crates/sui-cost/tests/empirical_transaction_cost.rs +++ b/crates/sui-cost/tests/empirical_transaction_cost.rs @@ -1,25 +1,23 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{collections::BTreeMap, path::PathBuf}; + use insta::assert_json_snapshot; use serde::{Deserialize, Serialize}; -use std::{collections::BTreeMap, path::PathBuf}; -use strum_macros::Display; -use strum_macros::EnumString; +use strum_macros::{Display, EnumString}; use sui_json_rpc_types::SuiTransactionBlockEffectsAPI; use sui_swarm_config::genesis_config::{AccountConfig, DEFAULT_GAS_AMOUNT}; -use sui_test_transaction_builder::publish_basics_package_and_make_counter; -use sui_test_transaction_builder::TestTransactionBuilder; -use sui_types::base_types::{ObjectRef, SuiAddress}; -use sui_types::coin::PAY_JOIN_FUNC_NAME; -use sui_types::coin::PAY_MODULE_NAME; -use sui_types::coin::PAY_SPLIT_VEC_FUNC_NAME; -use sui_types::gas_coin::GAS; -use sui_types::transaction::TransactionData; -use sui_types::SUI_FRAMEWORK_PACKAGE_ID; +use sui_test_transaction_builder::{ + publish_basics_package_and_make_counter, TestTransactionBuilder, +}; use sui_types::{ + base_types::{ObjectRef, SuiAddress}, + coin::{PAY_JOIN_FUNC_NAME, PAY_MODULE_NAME, PAY_SPLIT_VEC_FUNC_NAME}, gas::GasCostSummary, - transaction::{CallArg, ObjectArg}, + gas_coin::GAS, + transaction::{CallArg, ObjectArg, TransactionData}, + SUI_FRAMEWORK_PACKAGE_ID, }; use test_cluster::{TestCluster, TestClusterBuilder}; @@ -50,8 +48,8 @@ impl CommonTransactionCosts { const TEST_DATA_DIR: &str = "tests/data/"; -// Execute every entry function in Move framework and examples and ensure costs don't change -// To review snapshot changes, and fix snapshot differences, +// Execute every entry function in Move framework and examples and ensure costs +// don't change To review snapshot changes, and fix snapshot differences, // 0. Install cargo-insta // 1. Run `cargo insta test --review` under `./sui-cost`. // 2. Review, accept or reject changes. @@ -95,8 +93,9 @@ async fn split_n_tx( async fn create_txes( test_cluster: &TestCluster, ) -> BTreeMap { - // Initial preparations to create a shared counter. This needs to be done first to not interfere - // with the use of gas objects in the rest of this function. + // Initial preparations to create a shared counter. This needs to be done first + // to not interfere with the use of gas objects in the rest of this + // function. let (counter_package, counter) = publish_basics_package_and_make_counter(&test_cluster.wallet).await; let counter_package_id = counter_package.0; @@ -106,7 +105,6 @@ async fn create_txes( let (sender, mut gas_objects) = test_cluster.wallet.get_one_account().await.unwrap(); let gas_price = test_cluster.get_reference_gas_price().await; - // // Publish // let mut package_path = PathBuf::from(TEST_DATA_DIR); @@ -116,7 +114,6 @@ async fn create_txes( .build(); ret.insert(CommonTransactionCosts::Publish, publish_tx); - // // Transfer Whole Sui Coin and Transfer Portion of Sui Coin // let whole_sui_coin_tx = @@ -136,7 +133,6 @@ async fn create_txes( partial_sui_coin_tx, ); - // // Transfer Whole Coin Object // let whole_coin_tx = TestTransactionBuilder::new(sender, gas_objects.pop().unwrap(), gas_price) @@ -145,7 +141,6 @@ async fn create_txes( ret.insert(CommonTransactionCosts::TransferWholeCoin, whole_coin_tx); - // // Merge Two Coins // let c1 = gas_objects.pop().unwrap(); @@ -165,9 +160,9 @@ async fn create_txes( .build(); ret.insert(CommonTransactionCosts::MergeCoin, merge_tx); - // // Split A Coin Into N Specific Amounts - // Note splitting complexity does not depend on the amounts but only on the number of amounts + // Note splitting complexity does not depend on the amounts but only on the + // number of amounts // for n in 0..4 { let gas = gas_objects.pop().unwrap(); @@ -176,7 +171,6 @@ async fn create_txes( ret.insert(CommonTransactionCosts::SplitCoin(n as usize), split_tx); } - // // Shared Object Section // Using the `counter` example // @@ -222,8 +216,8 @@ async fn create_txes( ret } -async fn run_actual_costs( -) -> Result, anyhow::Error> { +async fn run_actual_costs() +-> Result, anyhow::Error> { let mut ret = BTreeMap::new(); let test_cluster = TestClusterBuilder::new() .with_accounts(vec![AccountConfig { diff --git a/crates/sui-data-ingestion-core/src/executor.rs b/crates/sui-data-ingestion-core/src/executor.rs index 92bfda00685..c75a27b44ed 100644 --- a/crates/sui-data-ingestion-core/src/executor.rs +++ b/crates/sui-data-ingestion-core/src/executor.rs @@ -1,23 +1,23 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::progress_store::{ - ExecutorProgress, ProgressStore, ProgressStoreWrapper, ShimProgressStore, -}; -use crate::reader::CheckpointReader; -use crate::worker_pool::WorkerPool; -use crate::Worker; -use crate::{DataIngestionMetrics, ReaderOptions}; +use std::{path::PathBuf, pin::Pin}; + use anyhow::Result; use futures::Future; use mysten_metrics::spawn_monitored_task; use prometheus::Registry; -use std::path::PathBuf; -use std::pin::Pin; -use sui_types::full_checkpoint_content::CheckpointData; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; -use tokio::sync::mpsc; -use tokio::sync::oneshot; +use sui_types::{ + full_checkpoint_content::CheckpointData, messages_checkpoint::CheckpointSequenceNumber, +}; +use tokio::sync::{mpsc, oneshot}; + +use crate::{ + progress_store::{ExecutorProgress, ProgressStore, ProgressStoreWrapper, ShimProgressStore}, + reader::CheckpointReader, + worker_pool::WorkerPool, + DataIngestionMetrics, ReaderOptions, Worker, +}; pub const MAX_CHECKPOINTS_IN_PROGRESS: usize = 10000; diff --git a/crates/sui-data-ingestion-core/src/lib.rs b/crates/sui-data-ingestion-core/src/lib.rs index bdf7688301c..0cf5ec288cd 100644 --- a/crates/sui-data-ingestion-core/src/lib.rs +++ b/crates/sui-data-ingestion-core/src/lib.rs @@ -16,18 +16,20 @@ pub use executor::{setup_single_workflow, IndexerExecutor, MAX_CHECKPOINTS_IN_PR pub use metrics::DataIngestionMetrics; pub use progress_store::{FileProgressStore, ProgressStore}; pub use reader::ReaderOptions; -use sui_types::full_checkpoint_content::CheckpointData; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; +use sui_types::{ + full_checkpoint_content::CheckpointData, messages_checkpoint::CheckpointSequenceNumber, +}; pub use util::create_remote_store_client; pub use worker_pool::WorkerPool; #[async_trait] pub trait Worker: Send + Sync { async fn process_checkpoint(&self, checkpoint: CheckpointData) -> Result<()>; - /// Optional method. Allows controlling when workflow progress is updated in the progress store. - /// For instance, some pipelines may benefit from aggregating checkpoints, thus skipping - /// the saving of updates for intermediate checkpoints. - /// The default implementation is to update the progress store for every processed checkpoint. + /// Optional method. Allows controlling when workflow progress is updated in + /// the progress store. For instance, some pipelines may benefit from + /// aggregating checkpoints, thus skipping the saving of updates for + /// intermediate checkpoints. The default implementation is to update + /// the progress store for every processed checkpoint. async fn save_progress( &self, sequence_number: CheckpointSequenceNumber, diff --git a/crates/sui-data-ingestion-core/src/progress_store/file.rs b/crates/sui-data-ingestion-core/src/progress_store/file.rs index b9a9b3f2457..d37bf018107 100644 --- a/crates/sui-data-ingestion-core/src/progress_store/file.rs +++ b/crates/sui-data-ingestion-core/src/progress_store/file.rs @@ -1,13 +1,15 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::progress_store::ProgressStore; +use std::path::PathBuf; + use anyhow::Result; use async_trait::async_trait; use serde_json::{Number, Value}; -use std::path::PathBuf; use sui_types::messages_checkpoint::CheckpointSequenceNumber; +use crate::progress_store::ProgressStore; + pub struct FileProgressStore { path: PathBuf, } diff --git a/crates/sui-data-ingestion-core/src/progress_store/mod.rs b/crates/sui-data-ingestion-core/src/progress_store/mod.rs index c7358eb1175..0f3a9e86b52 100644 --- a/crates/sui-data-ingestion-core/src/progress_store/mod.rs +++ b/crates/sui-data-ingestion-core/src/progress_store/mod.rs @@ -1,9 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::collections::HashMap; + use anyhow::Result; use async_trait::async_trait; -use std::collections::HashMap; use sui_types::messages_checkpoint::CheckpointSequenceNumber; mod file; pub use file::FileProgressStore; diff --git a/crates/sui-data-ingestion-core/src/reader.rs b/crates/sui-data-ingestion-core/src/reader.rs index 4f6dc035d8c..bcdde67fb1e 100644 --- a/crates/sui-data-ingestion-core/src/reader.rs +++ b/crates/sui-data-ingestion-core/src/reader.rs @@ -1,30 +1,27 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::create_remote_store_client; -use crate::executor::MAX_CHECKPOINTS_IN_PROGRESS; +use std::{ffi::OsString, fs, path::PathBuf, time::Duration}; + use anyhow::Result; use backoff::backoff::Backoff; use futures::StreamExt; use mysten_metrics::spawn_monitored_task; -use notify::RecursiveMode; -use notify::Watcher; -use object_store::path::Path; -use object_store::ObjectStore; -use std::ffi::OsString; -use std::fs; -use std::path::PathBuf; -use std::time::Duration; +use notify::{RecursiveMode, Watcher}; +use object_store::{path::Path, ObjectStore}; use sui_storage::blob::Blob; -use sui_types::full_checkpoint_content::CheckpointData; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; +use sui_types::{ + full_checkpoint_content::CheckpointData, messages_checkpoint::CheckpointSequenceNumber, +}; use tap::pipe::Pipe; -use tokio::sync::mpsc; -use tokio::sync::mpsc::error::TryRecvError; -use tokio::sync::oneshot; -use tokio::time::timeout; +use tokio::{ + sync::{mpsc, mpsc::error::TryRecvError, oneshot}, + time::timeout, +}; use tracing::{debug, error, info}; +use crate::{create_remote_store_client, executor::MAX_CHECKPOINTS_IN_PROGRESS}; + /// Implements a checkpoint reader that monitors a local directory. /// Designed for setups where the indexer daemon is colocated with FN. /// This implementation is push-based and utilizes the inotify API. @@ -60,7 +57,8 @@ impl Default for ReaderOptions { impl CheckpointReader { /// Represents a single iteration of the reader. - /// Reads files in a local directory, validates them, and forwards `CheckpointData` to the executor. + /// Reads files in a local directory, validates them, and forwards + /// `CheckpointData` to the executor. async fn read_local_files(&self) -> Result> { let mut files = vec![]; for entry in fs::read_dir(self.path.clone())? { @@ -194,7 +192,9 @@ impl CheckpointReader { info!( "Local reader. Current checkpoint number: {}, pruning watermark: {}, unprocessed checkpoints: {:?}", - self.current_checkpoint_number, self.last_pruned_watermark, checkpoints.len(), + self.current_checkpoint_number, + self.last_pruned_watermark, + checkpoints.len(), ); for checkpoint in checkpoints { assert_eq!( diff --git a/crates/sui-data-ingestion-core/src/tests.rs b/crates/sui-data-ingestion-core/src/tests.rs index 47bd7fbdfc0..1037a430c36 100644 --- a/crates/sui-data-ingestion-core/src/tests.rs +++ b/crates/sui-data-ingestion-core/src/tests.rs @@ -1,28 +1,31 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::progress_store::ExecutorProgress; -use crate::{DataIngestionMetrics, FileProgressStore, IndexerExecutor, WorkerPool}; -use crate::{ReaderOptions, Worker}; +use std::{path::PathBuf, time::Duration}; + use anyhow::Result; use async_trait::async_trait; use prometheus::Registry; -use rand::prelude::StdRng; -use rand::SeedableRng; -use std::path::PathBuf; -use std::time::Duration; +use rand::{prelude::StdRng, SeedableRng}; use sui_storage::blob::{Blob, BlobEncoding}; -use sui_types::crypto::KeypairTraits; -use sui_types::full_checkpoint_content::CheckpointData; -use sui_types::gas::GasCostSummary; -use sui_types::messages_checkpoint::{ - CertifiedCheckpointSummary, CheckpointContents, CheckpointSequenceNumber, CheckpointSummary, - SignedCheckpointSummary, +use sui_types::{ + crypto::KeypairTraits, + full_checkpoint_content::CheckpointData, + gas::GasCostSummary, + messages_checkpoint::{ + CertifiedCheckpointSummary, CheckpointContents, CheckpointSequenceNumber, + CheckpointSummary, SignedCheckpointSummary, + }, + utils::make_committee_key, }; -use sui_types::utils::make_committee_key; use tempfile::NamedTempFile; use tokio::sync::oneshot; +use crate::{ + progress_store::ExecutorProgress, DataIngestionMetrics, FileProgressStore, IndexerExecutor, + ReaderOptions, Worker, WorkerPool, +}; + async fn add_worker_pool( indexer: &mut IndexerExecutor, worker: W, diff --git a/crates/sui-data-ingestion-core/src/util.rs b/crates/sui-data-ingestion-core/src/util.rs index 3feb3a8bc3a..a37a7c4450f 100644 --- a/crates/sui-data-ingestion-core/src/util.rs +++ b/crates/sui-data-ingestion-core/src/util.rs @@ -1,12 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{str::FromStr, time::Duration}; + use anyhow::Result; -use object_store::aws::AmazonS3ConfigKey; -use object_store::gcp::GoogleConfigKey; -use object_store::{ClientOptions, ObjectStore, RetryConfig}; -use std::str::FromStr; -use std::time::Duration; +use object_store::{ + aws::AmazonS3ConfigKey, gcp::GoogleConfigKey, ClientOptions, ObjectStore, RetryConfig, +}; use url::Url; pub fn create_remote_store_client( diff --git a/crates/sui-data-ingestion-core/src/worker_pool.rs b/crates/sui-data-ingestion-core/src/worker_pool.rs index aedefc09364..c261fe25631 100644 --- a/crates/sui-data-ingestion-core/src/worker_pool.rs +++ b/crates/sui-data-ingestion-core/src/worker_pool.rs @@ -1,18 +1,21 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::executor::MAX_CHECKPOINTS_IN_PROGRESS; -use crate::Worker; +use std::{ + collections::{BTreeSet, HashMap, VecDeque}, + sync::Arc, + time::Instant, +}; + use mysten_metrics::spawn_monitored_task; -use std::collections::{BTreeSet, HashMap, VecDeque}; -use std::sync::Arc; -use std::time::Instant; -use sui_types::full_checkpoint_content::CheckpointData; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; -use tokio::sync::mpsc; -use tokio::sync::oneshot; +use sui_types::{ + full_checkpoint_content::CheckpointData, messages_checkpoint::CheckpointSequenceNumber, +}; +use tokio::sync::{mpsc, oneshot}; use tracing::info; +use crate::{executor::MAX_CHECKPOINTS_IN_PROGRESS, Worker}; + pub struct WorkerPool { pub task_name: String, concurrency: usize, diff --git a/crates/sui-data-ingestion/src/main.rs b/crates/sui-data-ingestion/src/main.rs index a55ad5535c5..c909708a23b 100644 --- a/crates/sui-data-ingestion/src/main.rs +++ b/crates/sui-data-ingestion/src/main.rs @@ -1,19 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{env, path::PathBuf}; + use anyhow::Result; use prometheus::Registry; use serde::{Deserialize, Serialize}; -use std::env; -use std::path::PathBuf; use sui_data_ingestion::{ ArchivalConfig, ArchivalWorker, BlobTaskConfig, BlobWorker, DynamoDBProgressStore, KVStoreTaskConfig, KVStoreWorker, }; -use sui_data_ingestion_core::{DataIngestionMetrics, ReaderOptions}; -use sui_data_ingestion_core::{IndexerExecutor, WorkerPool}; -use tokio::signal; -use tokio::sync::oneshot; +use sui_data_ingestion_core::{DataIngestionMetrics, IndexerExecutor, ReaderOptions, WorkerPool}; +use tokio::{signal, sync::oneshot}; #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(rename_all = "lowercase")] diff --git a/crates/sui-data-ingestion/src/progress_store.rs b/crates/sui-data-ingestion/src/progress_store.rs index c8524ccbd53..081af3558c3 100644 --- a/crates/sui-data-ingestion/src/progress_store.rs +++ b/crates/sui-data-ingestion/src/progress_store.rs @@ -1,14 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{str::FromStr, time::Duration}; + use anyhow::Result; use async_trait::async_trait; use aws_config::timeout::TimeoutConfig; -use aws_sdk_dynamodb::types::AttributeValue; -use aws_sdk_dynamodb::Client; +use aws_sdk_dynamodb::{types::AttributeValue, Client}; use aws_sdk_s3::config::{Credentials, Region}; -use std::str::FromStr; -use std::time::Duration; use sui_data_ingestion_core::ProgressStore; use sui_types::messages_checkpoint::CheckpointSequenceNumber; diff --git a/crates/sui-data-ingestion/src/workers/archival.rs b/crates/sui-data-ingestion/src/workers/archival.rs index 04bf5f47518..9363635b276 100644 --- a/crates/sui-data-ingestion/src/workers/archival.rs +++ b/crates/sui-data-ingestion/src/workers/archival.rs @@ -1,27 +1,32 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + io::Cursor, + ops::Range, + time::{Duration, Instant}, +}; + use anyhow::Result; use async_trait::async_trait; -use byteorder::BigEndian; -use byteorder::ByteOrder; +use byteorder::{BigEndian, ByteOrder}; use bytes::Bytes; -use object_store::path::Path; -use object_store::ObjectStore; +use object_store::{path::Path, ObjectStore}; use serde::{Deserialize, Serialize}; -use std::io::Cursor; -use std::ops::Range; -use std::time::{Duration, Instant}; use sui_archival::{ create_file_metadata_from_bytes, finalize_manifest, read_manifest_from_bytes, FileType, Manifest, CHECKPOINT_FILE_MAGIC, SUMMARY_FILE_MAGIC, }; use sui_data_ingestion_core::{create_remote_store_client, Worker, MAX_CHECKPOINTS_IN_PROGRESS}; -use sui_storage::blob::{Blob, BlobEncoding}; -use sui_storage::{compress, FileCompression, StorageFormat}; -use sui_types::base_types::{EpochId, ExecutionData}; -use sui_types::full_checkpoint_content::CheckpointData; -use sui_types::messages_checkpoint::{CheckpointSequenceNumber, FullCheckpointContents}; +use sui_storage::{ + blob::{Blob, BlobEncoding}, + compress, FileCompression, StorageFormat, +}; +use sui_types::{ + base_types::{EpochId, ExecutionData}, + full_checkpoint_content::CheckpointData, + messages_checkpoint::{CheckpointSequenceNumber, FullCheckpointContents}, +}; use tokio::sync::Mutex; #[derive(Serialize, Deserialize, Clone, Debug)] diff --git a/crates/sui-data-ingestion/src/workers/blob.rs b/crates/sui-data-ingestion/src/workers/blob.rs index 24a1b546fee..7c097a36133 100644 --- a/crates/sui-data-ingestion/src/workers/blob.rs +++ b/crates/sui-data-ingestion/src/workers/blob.rs @@ -4,8 +4,7 @@ use anyhow::Result; use async_trait::async_trait; use bytes::Bytes; -use object_store::path::Path; -use object_store::ObjectStore; +use object_store::{path::Path, ObjectStore}; use serde::{Deserialize, Serialize}; use sui_data_ingestion_core::{create_remote_store_client, Worker}; use sui_storage::blob::{Blob, BlobEncoding}; diff --git a/crates/sui-data-ingestion/src/workers/kv_store.rs b/crates/sui-data-ingestion/src/workers/kv_store.rs index 43f96e6f2b0..2e9d67e4330 100644 --- a/crates/sui-data-ingestion/src/workers/kv_store.rs +++ b/crates/sui-data-ingestion/src/workers/kv_store.rs @@ -1,25 +1,28 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + borrow::Borrow, + collections::{HashMap, HashSet, VecDeque}, + iter::repeat, + time::{Duration, Instant}, +}; + use anyhow::{anyhow, Result}; use async_trait::async_trait; use aws_config::timeout::TimeoutConfig; -use aws_sdk_dynamodb::primitives::Blob; -use aws_sdk_dynamodb::types::{AttributeValue, PutRequest, WriteRequest}; -use aws_sdk_dynamodb::Client; +use aws_sdk_dynamodb::{ + primitives::Blob, + types::{AttributeValue, PutRequest, WriteRequest}, + Client, +}; use aws_sdk_s3 as s3; use aws_sdk_s3::config::{Credentials, Region}; -use backoff::backoff::Backoff; -use backoff::ExponentialBackoff; +use backoff::{backoff::Backoff, ExponentialBackoff}; use serde::{Deserialize, Serialize}; -use std::borrow::Borrow; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::iter::repeat; -use std::time::{Duration, Instant}; use sui_data_ingestion_core::Worker; use sui_storage::http_key_value_store::TaggedKey; -use sui_types::full_checkpoint_content::CheckpointData; -use sui_types::storage::ObjectKey; +use sui_types::{full_checkpoint_content::CheckpointData, storage::ObjectKey}; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/crates/sui-e2e-tests/tests/checkpoint_tests.rs b/crates/sui-e2e-tests/tests/checkpoint_tests.rs index ff0e5319730..3a2019ab022 100644 --- a/crates/sui-e2e-tests/tests/checkpoint_tests.rs +++ b/crates/sui-e2e-tests/tests/checkpoint_tests.rs @@ -1,14 +1,15 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use std::sync::Mutex; -use std::time::Duration; -use sui_macros::register_fail_point; -use sui_macros::register_fail_point_if; -use sui_macros::sim_test; +use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, Mutex, + }, + time::Duration, +}; + +use sui_macros::{register_fail_point, register_fail_point_if, sim_test}; use sui_test_transaction_builder::make_transfer_sui_transaction; use test_cluster::TestClusterBuilder; diff --git a/crates/sui-e2e-tests/tests/coin_deny_list_tests.rs b/crates/sui-e2e-tests/tests/coin_deny_list_tests.rs index dc828b7daad..4adc60c5e7b 100644 --- a/crates/sui-e2e-tests/tests/coin_deny_list_tests.rs +++ b/crates/sui-e2e-tests/tests/coin_deny_list_tests.rs @@ -2,19 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 use std::path::PathBuf; + use sui_core::authority::epoch_start_configuration::EpochStartConfigTrait; -use sui_json_rpc_types::SuiTransactionBlockEffectsAPI; -use sui_json_rpc_types::SuiTransactionBlockKind; -use sui_json_rpc_types::{SuiTransactionBlockDataAPI, SuiTransactionBlockResponseOptions}; +use sui_json_rpc_types::{ + SuiTransactionBlockDataAPI, SuiTransactionBlockEffectsAPI, SuiTransactionBlockKind, + SuiTransactionBlockResponseOptions, +}; use sui_macros::sim_test; -use sui_types::deny_list::RegulatedCoinMetadata; -use sui_types::deny_list::{ - get_coin_deny_list, get_deny_list_obj_initial_shared_version, get_deny_list_root_object, - CoinDenyCap, DenyList, +use sui_types::{ + deny_list::{ + get_coin_deny_list, get_deny_list_obj_initial_shared_version, get_deny_list_root_object, + CoinDenyCap, DenyList, RegulatedCoinMetadata, + }, + id::UID, + storage::ObjectStore, + SUI_DENY_LIST_OBJECT_ID, }; -use sui_types::id::UID; -use sui_types::storage::ObjectStore; -use sui_types::SUI_DENY_LIST_OBJECT_ID; use test_cluster::TestClusterBuilder; #[sim_test] @@ -26,14 +29,16 @@ async fn test_coin_deny_list_creation() { .await; for handle in test_cluster.all_node_handles() { handle.with(|node| { - assert!(get_deny_list_obj_initial_shared_version( - node.state().get_object_store().as_ref() - ) - .is_none()); - assert!(!node - .state() - .epoch_store_for_testing() - .coin_deny_list_state_exists()); + assert!( + get_deny_list_obj_initial_shared_version(node.state().get_object_store().as_ref()) + .is_none() + ); + assert!( + !node + .state() + .epoch_store_for_testing() + .coin_deny_list_state_exists() + ); }); } test_cluster.wait_for_epoch_all_nodes(2).await; diff --git a/crates/sui-e2e-tests/tests/dynamic_committee_tests.rs b/crates/sui-e2e-tests/tests/dynamic_committee_tests.rs index 3d0b374dfef..28c77231347 100644 --- a/crates/sui-e2e-tests/tests/dynamic_committee_tests.rs +++ b/crates/sui-e2e-tests/tests/dynamic_committee_tests.rs @@ -1,21 +1,22 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anyhow::Result; -use async_trait::async_trait; -use move_core_types::ident_str; -use rand::{rngs::StdRng, Rng, SeedableRng}; use std::{ collections::{BTreeMap, BTreeSet}, sync::Arc, }; + +use anyhow::Result; +use async_trait::async_trait; +use move_core_types::ident_str; +use rand::{rngs::StdRng, Rng, SeedableRng}; use sui_core::authority::AuthorityState; use sui_macros::*; use sui_swarm_config::genesis_config::{AccountConfig, DEFAULT_GAS_AMOUNT}; use sui_test_transaction_builder::TestTransactionBuilder; -use sui_types::effects::{TransactionEffects, TransactionEffectsAPI}; use sui_types::{ base_types::{ObjectID, ObjectRef, SuiAddress}, + effects::{TransactionEffects, TransactionEffectsAPI}, object::{Object, Owner}, programmable_transaction_builder::ProgrammableTransactionBuilder, storage::ObjectStore, @@ -201,10 +202,9 @@ impl StressTestRunner { } } - /* - pub fn db(&self) -> Arc { - self.state().db() - }*/ + // pub fn db(&self) -> Arc { + // self.state().db() + // } pub fn state(&self) -> Arc { self.test_cluster.fullnode_handle.sui_node.state() @@ -265,9 +265,10 @@ impl StressTestRunner { } mod add_stake { - use super::*; use sui_types::effects::TransactionEffects; + use super::*; + pub struct RequestAddStakeGen; pub struct RequestAddStake { diff --git a/crates/sui-e2e-tests/tests/full_node_tests.rs b/crates/sui-e2e-tests/tests/full_node_tests.rs index c11e58d25c6..c5c34b5b7f4 100644 --- a/crates/sui-e2e-tests/tests/full_node_tests.rs +++ b/crates/sui-e2e-tests/tests/full_node_tests.rs @@ -1,60 +1,62 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::sync::Arc; + use futures::future; -use jsonrpsee::core::client::{ClientT, Subscription, SubscriptionClientT}; -use jsonrpsee::rpc_params; -use move_core_types::annotated_value::MoveStructLayout; -use move_core_types::ident_str; -use move_core_types::parser::parse_struct_tag; +use jsonrpsee::{ + core::client::{ClientT, Subscription, SubscriptionClientT}, + rpc_params, +}; +use move_core_types::{annotated_value::MoveStructLayout, ident_str, parser::parse_struct_tag}; use rand::rngs::OsRng; use serde_json::json; -use std::sync::Arc; use sui::client_commands::{SuiClientCommandResult, SuiClientCommands}; use sui_config::node::RunWithRange; use sui_core::authority::EffectsNotifyRead; use sui_json_rpc_types::{ - type_and_fields_from_move_struct, EventPage, SuiEvent, SuiExecutionStatus, + type_and_fields_from_move_struct, EventFilter, EventPage, SuiEvent, SuiExecutionStatus, SuiTransactionBlockEffectsAPI, SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, + TransactionFilter, }; -use sui_json_rpc_types::{EventFilter, TransactionFilter}; use sui_keys::keystore::AccountKeystore; use sui_macros::*; use sui_node::SuiNodeHandle; use sui_sdk::wallet_context::WalletContext; -use sui_storage::key_value_store::TransactionKeyValueStore; -use sui_storage::key_value_store_metrics::KeyValueStoreMetrics; +use sui_storage::{ + key_value_store::TransactionKeyValueStore, key_value_store_metrics::KeyValueStoreMetrics, +}; use sui_test_transaction_builder::{ batch_make_transfer_transactions, create_devnet_nft, delete_devnet_nft, increment_counter, publish_basics_package, publish_basics_package_and_make_counter, publish_nfts_package, TestTransactionBuilder, }; use sui_tool::restore_from_db_checkpoint; -use sui_types::base_types::{ObjectID, SuiAddress, TransactionDigest}; -use sui_types::base_types::{ObjectRef, SequenceNumber}; -use sui_types::crypto::{get_key_pair, SuiKeyPair}; -use sui_types::error::{SuiError, UserInputError}; -use sui_types::event::{Event, EventID}; -use sui_types::message_envelope::Message; -use sui_types::messages_grpc::TransactionInfoRequest; -use sui_types::object::{MoveObject, Object, ObjectRead, Owner, PastObjectRead}; -use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; -use sui_types::quorum_driver_types::{ - ExecuteTransactionRequest, ExecuteTransactionRequestType, ExecuteTransactionResponse, - QuorumDriverResponse, -}; -use sui_types::storage::ObjectStore; -use sui_types::transaction::{ - CallArg, GasData, TransactionData, TransactionKind, TEST_ONLY_GAS_UNIT_FOR_OBJECT_BASICS, - TEST_ONLY_GAS_UNIT_FOR_SPLIT_COIN, TEST_ONLY_GAS_UNIT_FOR_TRANSFER, -}; -use sui_types::utils::{ - to_sender_signed_transaction, to_sender_signed_transaction_with_multi_signers, +use sui_types::{ + base_types::{ObjectID, ObjectRef, SequenceNumber, SuiAddress, TransactionDigest}, + crypto::{get_key_pair, SuiKeyPair}, + error::{SuiError, UserInputError}, + event::{Event, EventID}, + message_envelope::Message, + messages_grpc::TransactionInfoRequest, + object::{MoveObject, Object, ObjectRead, Owner, PastObjectRead}, + programmable_transaction_builder::ProgrammableTransactionBuilder, + quorum_driver_types::{ + ExecuteTransactionRequest, ExecuteTransactionRequestType, ExecuteTransactionResponse, + QuorumDriverResponse, + }, + storage::ObjectStore, + transaction::{ + CallArg, GasData, TransactionData, TransactionKind, TEST_ONLY_GAS_UNIT_FOR_OBJECT_BASICS, + TEST_ONLY_GAS_UNIT_FOR_SPLIT_COIN, TEST_ONLY_GAS_UNIT_FOR_TRANSFER, + }, + utils::{to_sender_signed_transaction, to_sender_signed_transaction_with_multi_signers}, }; use test_cluster::TestClusterBuilder; -use tokio::sync::Mutex; -use tokio::time::timeout; -use tokio::time::{sleep, Duration}; +use tokio::{ + sync::Mutex, + time::{sleep, timeout, Duration}, +}; use tracing::info; #[sim_test] @@ -77,7 +79,8 @@ async fn test_full_node_follows_txes() -> Result<(), anyhow::Error> { .await .unwrap(); - // A small delay is needed for post processing operations following the transaction to finish. + // A small delay is needed for post processing operations following the + // transaction to finish. sleep(Duration::from_secs(1)).await; // verify that the node has seen the transfer @@ -360,134 +363,135 @@ async fn test_full_node_indexes() -> Result<(), anyhow::Error> { assert_eq!(txes.len(), 0); // This is a poor substitute for the post processing taking some time - // Unfortunately event store writes seem to add some latency so this wait is needed + // Unfortunately event store writes seem to add some latency so this wait is + // needed sleep(Duration::from_millis(1000)).await; - /* // one event is stored, and can be looked up by digest + // // one event is stored, and can be looked up by digest // query by timestamp verifies that a timestamp is inserted, within an hour - let sender_balance_change = BalanceChange { - change_type: BalanceChangeType::Pay, - owner: sender, - coin_type: parse_struct_tag("0x2::sui::SUI").unwrap(), - amount: -100000000000000, - }; - let recipient_balance_change = BalanceChange { - change_type: BalanceChangeType::Receive, - owner: receiver, - coin_type: parse_struct_tag("0x2::sui::SUI").unwrap(), - amount: 100000000000000, - }; - let gas_balance_change = BalanceChange { - change_type: BalanceChangeType::Gas, - owner: sender, - coin_type: parse_struct_tag("0x2::sui::SUI").unwrap(), - amount: (gas_used as i128).neg(), - }; - + // let sender_balance_change = BalanceChange { + // change_type: BalanceChangeType::Pay, + // owner: sender, + // coin_type: parse_struct_tag("0x2::sui::SUI").unwrap(), + // amount: -100000000000000, + // }; + // let recipient_balance_change = BalanceChange { + // change_type: BalanceChangeType::Receive, + // owner: receiver, + // coin_type: parse_struct_tag("0x2::sui::SUI").unwrap(), + // amount: 100000000000000, + // }; + // let gas_balance_change = BalanceChange { + // change_type: BalanceChangeType::Gas, + // owner: sender, + // coin_type: parse_struct_tag("0x2::sui::SUI").unwrap(), + // amount: (gas_used as i128).neg(), + // }; + // // query all events - let all_events = node - .state() - .get_transaction_events( - EventQuery::TimeRange { - start_time: ts.unwrap() - HOUR_MS, - end_time: ts.unwrap() + HOUR_MS, - }, - None, - 100, - false, - ) - .await?; - let all_events = &all_events[all_events.len() - 3..]; - assert_eq!(all_events.len(), 3); - assert_eq!(all_events[0].1.tx_digest, digest); - let all_events = all_events - .iter() - .map(|(_, envelope)| envelope.event.clone()) - .collect::>(); - assert_eq!(all_events[0], gas_event.clone()); - assert_eq!(all_events[1], sender_event.clone()); - assert_eq!(all_events[2], recipient_event.clone()); - + // let all_events = node + // .state() + // .get_transaction_events( + // EventQuery::TimeRange { + // start_time: ts.unwrap() - HOUR_MS, + // end_time: ts.unwrap() + HOUR_MS, + // }, + // None, + // 100, + // false, + // ) + // .await?; + // let all_events = &all_events[all_events.len() - 3..]; + // assert_eq!(all_events.len(), 3); + // assert_eq!(all_events[0].1.tx_digest, digest); + // let all_events = all_events + // .iter() + // .map(|(_, envelope)| envelope.event.clone()) + // .collect::>(); + // assert_eq!(all_events[0], gas_event.clone()); + // assert_eq!(all_events[1], sender_event.clone()); + // assert_eq!(all_events[2], recipient_event.clone()); + // // query by sender - let events_by_sender = node - .state() - .query_events(EventQuery::Sender(sender), None, 10, false) - .await?; - assert_eq!(events_by_sender.len(), 3); - assert_eq!(events_by_sender[0].1.tx_digest, digest); - let events_by_sender = events_by_sender - .into_iter() - .map(|(_, envelope)| envelope.event) - .collect::>(); - assert_eq!(events_by_sender[0], gas_event.clone()); - assert_eq!(events_by_sender[1], sender_event.clone()); - assert_eq!(events_by_sender[2], recipient_event.clone()); - + // let events_by_sender = node + // .state() + // .query_events(EventQuery::Sender(sender), None, 10, false) + // .await?; + // assert_eq!(events_by_sender.len(), 3); + // assert_eq!(events_by_sender[0].1.tx_digest, digest); + // let events_by_sender = events_by_sender + // .into_iter() + // .map(|(_, envelope)| envelope.event) + // .collect::>(); + // assert_eq!(events_by_sender[0], gas_event.clone()); + // assert_eq!(events_by_sender[1], sender_event.clone()); + // assert_eq!(events_by_sender[2], recipient_event.clone()); + // // query by tx digest - let events_by_tx = node - .state() - .query_events(EventQuery::Transaction(digest), None, 10, false) - .await?; - assert_eq!(events_by_tx.len(), 3); - assert_eq!(events_by_tx[0].1.tx_digest, digest); - let events_by_tx = events_by_tx - .into_iter() - .map(|(_, envelope)| envelope.event) - .collect::>(); - assert_eq!(events_by_tx[0], gas_event); - assert_eq!(events_by_tx[1], sender_event.clone()); - assert_eq!(events_by_tx[2], recipient_event.clone()); - + // let events_by_tx = node + // .state() + // .query_events(EventQuery::Transaction(digest), None, 10, false) + // .await?; + // assert_eq!(events_by_tx.len(), 3); + // assert_eq!(events_by_tx[0].1.tx_digest, digest); + // let events_by_tx = events_by_tx + // .into_iter() + // .map(|(_, envelope)| envelope.event) + // .collect::>(); + // assert_eq!(events_by_tx[0], gas_event); + // assert_eq!(events_by_tx[1], sender_event.clone()); + // assert_eq!(events_by_tx[2], recipient_event.clone()); + // // query by recipient - let events_by_recipient = node - .state() - .query_events( - EventQuery::Recipient(Owner::AddressOwner(receiver)), - None, - 100, - false, - ) - .await?; - assert_eq!(events_by_recipient.last().unwrap().1.tx_digest, digest); - assert_eq!(events_by_recipient.last().unwrap().1.event, recipient_event); - + // let events_by_recipient = node + // .state() + // .query_events( + // EventQuery::Recipient(Owner::AddressOwner(receiver)), + // None, + // 100, + // false, + // ) + // .await?; + // assert_eq!(events_by_recipient.last().unwrap().1.tx_digest, digest); + // assert_eq!(events_by_recipient.last().unwrap().1.event, recipient_event); + // // query by object - let mut events_by_object = node - .state() - .query_events(EventQuery::Object(transferred_object), None, 100, false) - .await?; - let events_by_object = events_by_object.split_off(events_by_object.len() - 2); - assert_eq!(events_by_object.len(), 2); - assert_eq!(events_by_object[0].1.tx_digest, digest); - let events_by_object = events_by_object - .into_iter() - .map(|(_, envelope)| envelope.event) - .collect::>(); - assert_eq!(events_by_object[0], sender_event.clone()); - assert_eq!(events_by_object[1], recipient_event.clone()); - + // let mut events_by_object = node + // .state() + // .query_events(EventQuery::Object(transferred_object), None, 100, false) + // .await?; + // let events_by_object = events_by_object.split_off(events_by_object.len() - + // 2); assert_eq!(events_by_object.len(), 2); + // assert_eq!(events_by_object[0].1.tx_digest, digest); + // let events_by_object = events_by_object + // .into_iter() + // .map(|(_, envelope)| envelope.event) + // .collect::>(); + // assert_eq!(events_by_object[0], sender_event.clone()); + // assert_eq!(events_by_object[1], recipient_event.clone()); + // // query by transaction module // Query by module ID - let events_by_module = node - .state() - .query_events( - EventQuery::MoveModule { - package: SuiFramework::ID, - module: "unused_input_object".to_string(), - }, - None, - 10, - false, - ) - .await?; - assert_eq!(events_by_module[0].1.tx_digest, digest); - let events_by_module = events_by_module - .into_iter() - .map(|(_, envelope)| envelope.event) - .collect::>(); - assert_eq!(events_by_module.len(), 2); - assert_eq!(events_by_module[0], sender_event); - assert_eq!(events_by_module[1], recipient_event);*/ + // let events_by_module = node + // .state() + // .query_events( + // EventQuery::MoveModule { + // package: SuiFramework::ID, + // module: "unused_input_object".to_string(), + // }, + // None, + // 10, + // false, + // ) + // .await?; + // assert_eq!(events_by_module[0].1.tx_digest, digest); + // let events_by_module = events_by_module + // .into_iter() + // .map(|(_, envelope)| envelope.event) + // .collect::>(); + // assert_eq!(events_by_module.len(), 2); + // assert_eq!(events_by_module[0], sender_event); + // assert_eq!(events_by_module[1], recipient_event); Ok(()) } @@ -927,9 +931,10 @@ async fn test_validator_node_has_no_transaction_orchestrator() { let node_handle = test_cluster.swarm.validator_node_handles().pop().unwrap(); node_handle.with(|node| { assert!(node.transaction_orchestrator().is_none()); - assert!(node - .subscribe_to_transaction_orchestrator_effects() - .is_err()); + assert!( + node.subscribe_to_transaction_orchestrator_effects() + .is_err() + ); assert!(node.get_google_jwk_bytes().is_ok()); }); } @@ -1190,8 +1195,8 @@ async fn test_full_node_bootstrap_from_snapshot() -> Result<(), anyhow::Error> { let (_transferred_object, _, _, digest, ..) = transfer_coin(&test_cluster.wallet).await?; // Skip the first epoch change from epoch 0 to epoch 1, but wait for the second - // epoch change from epoch 1 to epoch 2 at which point during reconfiguration we will take - // the db snapshot for epoch 1 + // epoch change from epoch 1 to epoch 2 at which point during reconfiguration we + // will take the db snapshot for epoch 1 loop { if checkpoint_path.join("epoch_1").exists() { break; @@ -1199,7 +1204,8 @@ async fn test_full_node_bootstrap_from_snapshot() -> Result<(), anyhow::Error> { sleep(Duration::from_millis(500)).await; } - // Spin up a new full node restored from the snapshot taken at the end of epoch 1 + // Spin up a new full node restored from the snapshot taken at the end of epoch + // 1 restore_from_db_checkpoint(&config, &checkpoint_path.join("epoch_1")).await?; let node = test_cluster .start_fullnode_from_config(config) @@ -1220,8 +1226,8 @@ async fn test_full_node_bootstrap_from_snapshot() -> Result<(), anyhow::Error> { sleep(Duration::from_millis(500)).await; } - // Ensure this fullnode never processed older epoch (before snapshot) i.e. epoch_0 store was - // doesn't exist + // Ensure this fullnode never processed older epoch (before snapshot) i.e. + // epoch_0 store was doesn't exist assert!(!epoch_0_db_path.exists()); let (_transferred_object, _, _, digest_after_restore, ..) = @@ -1251,7 +1257,8 @@ async fn test_pass_back_no_object() -> Result<(), anyhow::Error> { .cloned() .unwrap(); - // TODO: this is publishing the wrong package - we should be publishing the one in `sui-core/src/unit_tests/data` instead. + // TODO: this is publishing the wrong package - we should be publishing the one + // in `sui-core/src/unit_tests/data` instead. let package_ref = publish_basics_package(context).await; let gas_obj = context @@ -1274,7 +1281,8 @@ async fn test_pass_back_no_object() -> Result<(), anyhow::Error> { package_ref.0, ident_str!("object_basics").to_owned(), ident_str!("use_clock").to_owned(), - /* type_args */ vec![], + // type_args + vec![], gas_obj, vec![CallArg::CLOCK_IMM], TEST_ONLY_GAS_UNIT_FOR_OBJECT_BASICS * rgp, @@ -1306,10 +1314,10 @@ async fn test_pass_back_no_object() -> Result<(), anyhow::Error> { #[sim_test] async fn test_access_old_object_pruned() { - // This test checks that when we ask a validator to handle a transaction that uses - // an old object that's already been pruned, it's able to return an non-retriable - // error ObjectVersionUnavailableForConsumption, instead of the retriable error - // ObjectNotFound. + // This test checks that when we ask a validator to handle a transaction that + // uses an old object that's already been pruned, it's able to return an + // non-retriable error ObjectVersionUnavailableForConsumption, instead of + // the retriable error ObjectNotFound. let test_cluster = TestClusterBuilder::new().build().await; let tx_builder = test_cluster.test_transaction_builder().await; let sender = tx_builder.sender(); @@ -1335,11 +1343,13 @@ async fn test_access_old_object_pruned() { let state = validator.get_node_handle().unwrap().state(); state.prune_objects_and_compact_for_testing().await; // Make sure the old version of the object is already pruned. - assert!(state - .get_object_store() - .get_object_by_key(&gas_object.0, gas_object.1) - .unwrap() - .is_none()); + assert!( + state + .get_object_store() + .get_object_by_key(&gas_object.0, gas_object.1) + .unwrap() + .is_none() + ); let epoch_store = state.epoch_store_for_testing(); assert_eq!( state @@ -1360,13 +1370,15 @@ async fn test_access_old_object_pruned() { // Check that fullnode would return the same error. let result = test_cluster.wallet.execute_transaction_may_fail(tx).await; - assert!(result.unwrap_err().to_string().contains( - &UserInputError::ObjectVersionUnavailableForConsumption { - provided_obj_ref: gas_object, - current_version: new_gas_version, - } - .to_string() - )) + assert!( + result.unwrap_err().to_string().contains( + &UserInputError::ObjectVersionUnavailableForConsumption { + provided_obj_ref: gas_object, + current_version: new_gas_version, + } + .to_string() + ) + ) } async fn transfer_coin( @@ -1435,11 +1447,13 @@ async fn test_full_node_run_with_range_checkpoint() -> Result<(), anyhow::Error> })); // we dont want transaction orchestrator enabled when run_with_range != None - assert!(test_cluster - .fullnode_handle - .sui_node - .with(|node| node.transaction_orchestrator()) - .is_none()); + assert!( + test_cluster + .fullnode_handle + .sui_node + .with(|node| node.transaction_orchestrator()) + .is_none() + ); Ok(()) } @@ -1461,28 +1475,35 @@ async fn test_full_node_run_with_range_epoch() -> Result<(), anyhow::Error> { assert_eq!(got_run_with_range, want_run_with_range); // ensure we end up at epoch + 1 - // this is because we execute the target epoch, reconfigure, and then send shutdown signal at - // epoch + 1 - assert!(test_cluster - .fullnode_handle - .sui_node - .with(|node| node.current_epoch_for_testing() == stop_after_epoch + 1)); + // this is because we execute the target epoch, reconfigure, and then send + // shutdown signal at epoch + 1 + assert!( + test_cluster + .fullnode_handle + .sui_node + .with(|node| node.current_epoch_for_testing() == stop_after_epoch + 1) + ); - // epoch duration is 10s for testing, lets sleep long enough that epoch would normally progress + // epoch duration is 10s for testing, lets sleep long enough that epoch would + // normally progress tokio::time::sleep(tokio::time::Duration::from_secs(15)).await; // ensure we are still at epoch + 1 - assert!(test_cluster - .fullnode_handle - .sui_node - .with(|node| node.current_epoch_for_testing() == stop_after_epoch + 1)); + assert!( + test_cluster + .fullnode_handle + .sui_node + .with(|node| node.current_epoch_for_testing() == stop_after_epoch + 1) + ); // we dont want transaction orchestrator enabled when run_with_range != None - assert!(test_cluster - .fullnode_handle - .sui_node - .with(|node| node.transaction_orchestrator()) - .is_none()); + assert!( + test_cluster + .fullnode_handle + .sui_node + .with(|node| node.transaction_orchestrator()) + .is_none() + ); Ok(()) } diff --git a/crates/sui-e2e-tests/tests/multisig_tests.rs b/crates/sui-e2e-tests/tests/multisig_tests.rs index 4de9fabc23f..0f7ed9c7c77 100644 --- a/crates/sui-e2e-tests/tests/multisig_tests.rs +++ b/crates/sui-e2e-tests/tests/multisig_tests.rs @@ -62,8 +62,8 @@ async fn test_upgraded_multisig_feature_allow() { let res = do_upgraded_multisig_test().await; - // we didn't make a real transaction with a valid object, but we verify that we pass the - // feature gate. + // we didn't make a real transaction with a valid object, but we verify that we + // pass the feature gate. assert!(matches!(res.unwrap_err(), SuiError::UserInputError { .. })); } @@ -114,40 +114,44 @@ async fn test_multisig_e2e() { .transfer_sui(None, SuiAddress::ZERO) .build_and_sign_multisig(multisig_pk.clone(), &[&keys[2], &keys[1]], 0b110); let res = context.execute_transaction_may_fail(tx3).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Invalid sig for pk=AQIOF81ZOeRrGWZBlozXWZELold+J/pz/eOHbbm+xbzrKw==")); + assert!( + res.unwrap_err() + .to_string() + .contains("Invalid sig for pk=AQIOF81ZOeRrGWZBlozXWZELold+J/pz/eOHbbm+xbzrKw==") + ); // 4. sign with key 0 only is below threshold, fails to execute. let tx4 = TestTransactionBuilder::new(multisig_addr, gas, rgp) .transfer_sui(None, SuiAddress::ZERO) .build_and_sign_multisig(multisig_pk.clone(), &[&keys[0]], 0b001); let res = context.execute_transaction_may_fail(tx4).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Insufficient weight=1 threshold=2")); + assert!( + res.unwrap_err() + .to_string() + .contains("Insufficient weight=1 threshold=2") + ); // 5. multisig with no single sig fails to execute. let tx5 = TestTransactionBuilder::new(multisig_addr, gas, rgp) .transfer_sui(None, SuiAddress::ZERO) .build_and_sign_multisig(multisig_pk.clone(), &[], 0b001); let res = context.execute_transaction_may_fail(tx5).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Invalid value was given to the function")); + assert!( + res.unwrap_err() + .to_string() + .contains("Invalid value was given to the function") + ); // 6. multisig two dup sigs fails to execute. let tx6 = TestTransactionBuilder::new(multisig_addr, gas, rgp) .transfer_sui(None, SuiAddress::ZERO) .build_and_sign_multisig(multisig_pk.clone(), &[&keys[0], &keys[0]], 0b011); let res = context.execute_transaction_may_fail(tx6).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Invalid ed25519 pk bytes")); + assert!( + res.unwrap_err() + .to_string() + .contains("Invalid ed25519 pk bytes") + ); // 7. mismatch pks in sig with multisig address fails to execute. let kp3: SuiKeyPair = SuiKeyPair::Secp256r1(get_key_pair().1); @@ -166,10 +170,11 @@ async fn test_multisig_e2e() { .transfer_sui(None, SuiAddress::ZERO) .build_and_sign_multisig(wrong_multisig_pk.clone(), &[&keys[0], &keys[2]], 0b101); let res = context.execute_transaction_may_fail(tx7).await; - assert!(res - .unwrap_err() - .to_string() - .contains(format!("Invalid sig for pk={}", pk3.encode_base64()).as_str())); + assert!( + res.unwrap_err() + .to_string() + .contains(format!("Invalid sig for pk={}", pk3.encode_base64()).as_str()) + ); } #[sim_test] @@ -190,7 +195,8 @@ async fn test_multisig_with_zklogin_scenerios() { let pk1 = keys[1].public(); // secp256k1 let pk2 = keys[2].public(); // secp256r1 - // construct a multisig address with 4 pks (ed25519, secp256k1, secp256r1, zklogin) with threshold = 1. + // construct a multisig address with 4 pks (ed25519, secp256k1, secp256r1, + // zklogin) with threshold = 1. let (eph_kp, _eph_pk, zklogin_inputs) = &load_test_vectors("../sui-types/src/unit_tests/zklogin_test_vectors.json")[1]; let (eph_kp_1, _, _) = @@ -224,10 +230,11 @@ async fn test_multisig_with_zklogin_scenerios() { ); let tx_1 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let res = context.execute_transaction_may_fail(tx_1).await; - assert!(res - .unwrap_err() - .to_string() - .contains(format!("Invalid sig for pk={}", pk0.encode_base64()).as_str())); + assert!( + res.unwrap_err() + .to_string() + .contains(format!("Invalid sig for pk={}", pk0.encode_base64()).as_str()) + ); // 2. a multisig with a bad secp256k1 sig fails to execute. let wrong_sig_2: GenericSignature = Signature::new_secure(&wrong_intent_msg, &keys[1]).into(); @@ -236,10 +243,11 @@ async fn test_multisig_with_zklogin_scenerios() { ); let tx_2 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let res = context.execute_transaction_may_fail(tx_2).await; - assert!(res - .unwrap_err() - .to_string() - .contains(format!("Invalid sig for pk={}", pk1.encode_base64()).as_str())); + assert!( + res.unwrap_err() + .to_string() + .contains(format!("Invalid sig for pk={}", pk1.encode_base64()).as_str()) + ); // 3. a multisig with a bad secp256r1 sig fails to execute. let wrong_sig_3: GenericSignature = Signature::new_secure(&wrong_intent_msg, &keys[2]).into(); @@ -248,10 +256,11 @@ async fn test_multisig_with_zklogin_scenerios() { ); let tx_3 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let res = context.execute_transaction_may_fail(tx_3).await; - assert!(res - .unwrap_err() - .to_string() - .contains(format!("Invalid sig for pk={}", pk2.encode_base64()).as_str())); + assert!( + res.unwrap_err() + .to_string() + .contains(format!("Invalid sig for pk={}", pk2.encode_base64()).as_str()) + ); // 4. a multisig with a bad ephemeral sig inside zklogin sig fails to execute. let wrong_eph_sig = Signature::new_secure(&wrong_intent_msg, eph_kp); @@ -269,12 +278,14 @@ async fn test_multisig_with_zklogin_scenerios() { ZkLoginPublicIdentifier::new(zklogin_inputs.get_iss(), zklogin_inputs.get_address_seed()) .unwrap(), ); - assert!(res - .unwrap_err() - .to_string() - .contains(format!("Invalid sig for pk={}", pk3.encode_base64()).as_str())); + assert!( + res.unwrap_err() + .to_string() + .contains(format!("Invalid sig for pk={}", pk3.encode_base64()).as_str()) + ); - // 5. a multisig with a mismatch ephermeal sig and zklogin inputs fails to execute. + // 5. a multisig with a mismatch ephermeal sig and zklogin inputs fails to + // execute. let eph_sig = Signature::new_secure(&intent_msg, eph_kp_1); let zklogin_sig_mismatch = GenericSignature::ZkLoginAuthenticator(ZkLoginAuthenticator::new( zklogin_inputs.clone(), @@ -286,25 +297,29 @@ async fn test_multisig_with_zklogin_scenerios() { ); let tx_5 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let res = context.execute_transaction_may_fail(tx_5).await; - assert!(res - .unwrap_err() - .to_string() - .contains(format!("Invalid sig for pk={}", pk3.encode_base64()).as_str())); + assert!( + res.unwrap_err() + .to_string() + .contains(format!("Invalid sig for pk={}", pk3.encode_base64()).as_str()) + ); - // 6. a multisig with an inconsistent max_epoch with zk proof itself fails to execute. + // 6. a multisig with an inconsistent max_epoch with zk proof itself fails to + // execute. let eph_sig = Signature::new_secure(&intent_msg, eph_kp); let zklogin_sig_wrong_zklogin_inputs = GenericSignature::ZkLoginAuthenticator( - ZkLoginAuthenticator::new(zklogin_inputs.clone(), 1, eph_sig), // max_epoch set to 1 instead of 2 + ZkLoginAuthenticator::new(zklogin_inputs.clone(), 1, eph_sig), /* max_epoch set to 1 + * instead of 2 */ ); let multisig = GenericSignature::MultiSig( MultiSig::combine(vec![zklogin_sig_wrong_zklogin_inputs], multisig_pk.clone()).unwrap(), ); let tx_7 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let res = context.execute_transaction_may_fail(tx_7).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Groth16 proof verify failed")); + assert!( + res.unwrap_err() + .to_string() + .contains("Groth16 proof verify failed") + ); // 7. a multisig with the wrong sender fails to execute. let wrong_multisig_addr = SuiAddress::from( @@ -332,12 +347,14 @@ async fn test_multisig_with_zklogin_scenerios() { GenericSignature::MultiSig(MultiSig::combine(vec![sig_4], multisig_pk.clone()).unwrap()); let tx_8 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let res = context.execute_transaction_may_fail(tx_8).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Required Signature from")); + assert!( + res.unwrap_err() + .to_string() + .contains("Required Signature from") + ); - // 8. a multisig with zklogin sig of invalid compact signature bytes fails to execute. + // 8. a multisig with zklogin sig of invalid compact signature bytes fails to + // execute. let multisig = GenericSignature::MultiSig(MultiSig::insecure_new( vec![CompressedSignature::ZkLogin(ZkLoginAuthenticatorAsBytes( vec![0], @@ -347,10 +364,11 @@ async fn test_multisig_with_zklogin_scenerios() { )); let tx_7 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let res = context.execute_transaction_may_fail(tx_7).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Invalid zklogin authenticator bytes")); + assert!( + res.unwrap_err() + .to_string() + .contains("Invalid zklogin authenticator bytes") + ); // assert positive case for all 4 participanting parties. // 1a. good ed25519 sig used in multisig executes successfully. @@ -414,7 +432,8 @@ async fn test_multisig_with_zklogin_scenerios() { let tx_10 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let _ = context.execute_transaction_must_succeed(tx_10).await; - // 4c. good zklogin sig AND good ed25519 combined used in multisig executes successfully. + // 4c. good zklogin sig AND good ed25519 combined used in multisig executes + // successfully. let gas = test_cluster .fund_address_and_return_gas(rgp, Some(20000000000), multisig_addr) .await; @@ -451,12 +470,14 @@ async fn test_multisig_with_zklogin_scenerios() { )); let tx_11 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let res = context.execute_transaction_may_fail(tx_11).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Invalid ed25519 pk bytes")); + assert!( + res.unwrap_err() + .to_string() + .contains("Invalid ed25519 pk bytes") + ); - // 10. invalid bitmap b10000 when the max bitmap for 4 pks is b1111, fails to execute. + // 10. invalid bitmap b10000 when the max bitmap for 4 pks is b1111, fails to + // execute. let multisig = GenericSignature::MultiSig(MultiSig::insecure_new( vec![sig.clone().to_compressed().unwrap()], 1 << 4, @@ -464,10 +485,11 @@ async fn test_multisig_with_zklogin_scenerios() { )); let tx_10 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let res = context.execute_transaction_may_fail(tx_10).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Invalid public keys index")); + assert!( + res.unwrap_err() + .to_string() + .contains("Invalid public keys index") + ); // 11. malformed multisig pk where threshold = 0, fails to execute. let bad_multisig_pk = MultiSigPublicKey::insecure_new( @@ -490,10 +512,11 @@ async fn test_multisig_with_zklogin_scenerios() { )); let tx_11 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let res = context.execute_transaction_may_fail(tx_11).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Invalid value was given to the function")); + assert!( + res.unwrap_err() + .to_string() + .contains("Invalid value was given to the function") + ); // 12. malformed multisig a pk has weight = 0, fails to execute. let bad_multisig_pk_2 = MultiSigPublicKey::insecure_new( @@ -515,10 +538,11 @@ async fn test_multisig_with_zklogin_scenerios() { )); let tx_14 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let res = context.execute_transaction_may_fail(tx_14).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Invalid value was given to the function")); + assert!( + res.unwrap_err() + .to_string() + .contains("Invalid value was given to the function") + ); // 13. pass in 2 sigs when only 1 pk in multisig_pk, fails to execute. let small_multisig_pk = MultiSigPublicKey::insecure_new(vec![(pk0.clone(), 1)], 1); @@ -538,10 +562,11 @@ async fn test_multisig_with_zklogin_scenerios() { )); let tx_13 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let res = context.execute_transaction_may_fail(tx_13).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Invalid value was given to the function")); + assert!( + res.unwrap_err() + .to_string() + .contains("Invalid value was given to the function") + ); // 14. pass a multisig where there is dup pk in multisig_pk, fails to execute. let multisig_pk_with_dup = @@ -562,10 +587,11 @@ async fn test_multisig_with_zklogin_scenerios() { )); let tx_14 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let res = context.execute_transaction_may_fail(tx_14).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Invalid value was given to the function")); + assert!( + res.unwrap_err() + .to_string() + .contains("Invalid value was given to the function") + ); // 15. a sig with 11 pks fails to execute. let multisig_pk_11 = MultiSigPublicKey::insecure_new(vec![(pk0.clone(), 1); 11], 1); @@ -585,10 +611,11 @@ async fn test_multisig_with_zklogin_scenerios() { )); let tx_15 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let res = context.execute_transaction_may_fail(tx_15).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Invalid value was given to the function")); + assert!( + res.unwrap_err() + .to_string() + .contains("Invalid value was given to the function") + ); // 16. total weight of all pks < threshold fails to execute. let multisig_pk_12 = @@ -609,10 +636,11 @@ async fn test_multisig_with_zklogin_scenerios() { )); let tx_16 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let res = context.execute_transaction_may_fail(tx_16).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Invalid value was given to the function")); + assert!( + res.unwrap_err() + .to_string() + .contains("Invalid value was given to the function") + ); // 17. multisig with empty pk map fails to execute. let bad_multisig_empty_pk = MultiSigPublicKey::insecure_new(vec![], 1); @@ -632,10 +660,11 @@ async fn test_multisig_with_zklogin_scenerios() { )); let tx_17 = Transaction::from_generic_sig_data(tx_data.clone(), vec![multisig]); let res = context.execute_transaction_may_fail(tx_17).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Invalid value was given to the function")); + assert!( + res.unwrap_err() + .to_string() + .contains("Invalid value was given to the function") + ); } #[sim_test] @@ -648,10 +677,11 @@ async fn test_expired_epoch_zklogin_in_multisig() { test_cluster.wait_for_epoch(Some(3)).await; let tx = construct_simple_zklogin_multisig_tx(&test_cluster).await; let res = test_cluster.wallet.execute_transaction_may_fail(tx).await; - assert!(res - .unwrap_err() - .to_string() - .contains("ZKLogin expired at epoch 2")); + assert!( + res.unwrap_err() + .to_string() + .contains("ZKLogin expired at epoch 2") + ); } #[sim_test] @@ -689,10 +719,11 @@ async fn test_random_zklogin_in_multisig() { ); let bad_tx = Transaction::from_generic_sig_data(tx_data.clone(), vec![short_multisig]); let res = context.execute_transaction_may_fail(bad_tx).await; - assert!(res - .unwrap_err() - .to_string() - .contains("Insufficient weight=9 threshold=10")); + assert!( + res.unwrap_err() + .to_string() + .contains("Insufficient weight=9 threshold=10") + ); let multisig = GenericSignature::MultiSig( MultiSig::combine(zklogin_sigs.clone(), multisig_pk.clone()).unwrap(), @@ -749,10 +780,11 @@ async fn test_zklogin_inside_multisig_feature_deny() { test_cluster.wait_for_authenticator_state_update().await; let tx = construct_simple_zklogin_multisig_tx(&test_cluster).await; let res = test_cluster.wallet.execute_transaction_may_fail(tx).await; - assert!(res - .unwrap_err() - .to_string() - .contains("zkLogin sig not supported inside multisig")); + assert!( + res.unwrap_err() + .to_string() + .contains("zkLogin sig not supported inside multisig") + ); } async fn construct_simple_zklogin_multisig_tx(test_cluster: &TestCluster) -> Transaction { diff --git a/crates/sui-e2e-tests/tests/object_deletion_tests.rs b/crates/sui-e2e-tests/tests/object_deletion_tests.rs index 4c6c769719b..4c46e037369 100644 --- a/crates/sui-e2e-tests/tests/object_deletion_tests.rs +++ b/crates/sui-e2e-tests/tests/object_deletion_tests.rs @@ -3,22 +3,26 @@ #[cfg(msim)] mod sim_only_tests { - use std::path::PathBuf; - use std::time::Duration; - use sui_core::authority::authority_store_tables::LiveObject; - use sui_core::state_accumulator::AccumulatorStore; + use std::{path::PathBuf, time::Duration}; + + use sui_core::{ + authority::authority_store_tables::LiveObject, state_accumulator::AccumulatorStore, + }; use sui_json_rpc_types::{SuiTransactionBlockEffects, SuiTransactionBlockEffectsAPI}; use sui_macros::sim_test; use sui_node::SuiNode; use sui_protocol_config::{ProtocolConfig, ProtocolVersion, SupportedProtocolVersions}; use sui_test_transaction_builder::publish_package; - use sui_types::messages_checkpoint::CheckpointSequenceNumber; - use sui_types::{base_types::ObjectID, digests::TransactionDigest}; + use sui_types::{ + base_types::ObjectID, digests::TransactionDigest, + messages_checkpoint::CheckpointSequenceNumber, + }; use test_cluster::{TestCluster, TestClusterBuilder}; use tokio::time::timeout; - /// This test checks that after we enable simplified_unwrap_then_delete, we no longer depend - /// on wrapped tombstones when generating effects and using effects. + /// This test checks that after we enable simplified_unwrap_then_delete, we + /// no longer depend on wrapped tombstones when generating effects and + /// using effects. #[sim_test] async fn test_no_more_dependency_on_wrapped_tombstone() { let mut _guard = ProtocolConfig::apply_overrides_for_testing(|_, mut config| { @@ -47,8 +51,8 @@ mod sim_only_tests { config.set_simplified_unwrap_then_delete(true); config }); - // At this epoch change, we should be re-accumulating without wrapped tombstone and now - // flips the feature flag simplified_unwrap_then_delete to true. + // At this epoch change, we should be re-accumulating without wrapped tombstone + // and now flips the feature flag simplified_unwrap_then_delete to true. test_cluster.trigger_reconfiguration().await; // Remove the wrapped tombstone on some nodes but not all. @@ -71,15 +75,16 @@ mod sim_only_tests { } // Tests that object pruning can prune objects correctly. - // Specifically, we first wrap a child object into a root object (tests wrap tombstone), - // then unwrap and delete the child object (tests unwrap and delete), - // and last delete the root object (tests object deletion). + // Specifically, we first wrap a child object into a root object (tests wrap + // tombstone), then unwrap and delete the child object (tests unwrap and + // delete), and last delete the root object (tests object deletion). #[sim_test] async fn object_pruning_test() { let test_cluster = TestClusterBuilder::new().build().await; let fullnode = &test_cluster.fullnode_handle.sui_node; - // Create a root object and a child object. Wrap the child object inside the root object. + // Create a root object and a child object. Wrap the child object inside the + // root object. let (package_id, object_id) = publish_package_and_create_parent_object(&test_cluster).await; let child_id = create_owned_child(&test_cluster, package_id).await; let wrap_child_txn_digest = wrap_child(&test_cluster, package_id, object_id, child_id) @@ -105,7 +110,8 @@ mod sim_only_tests { .await .unwrap(); - // Manually initiating a pruning and compaction job to make sure that deleted objects are gong from object store. + // Manually initiating a pruning and compaction job to make sure that deleted + // objects are gong from object store. node.state().prune_objects_and_compact_for_testing().await; // Check that no object with `child_id` exists in object store. @@ -124,7 +130,8 @@ mod sim_only_tests { }) .await; - // Next, we unwrap and delete the child object, as well as delete the root object. + // Next, we unwrap and delete the child object, as well as delete the root + // object. let unwrap_delete_txn_digest = unwrap_and_delete_child(&test_cluster, package_id, object_id) .await @@ -158,7 +165,8 @@ mod sim_only_tests { .await .unwrap(); - // Manually initiating a pruning and compaction job to make sure that deleted objects are gong from object store. + // Manually initiating a pruning and compaction job to make sure that deleted + // objects are gong from object store. node.state().prune_objects_and_compact_for_testing().await; // Check that both root and child objects are gone from object store. @@ -250,11 +258,13 @@ mod sim_only_tests { .effects .unwrap(); assert_eq!(effects.wrapped().len(), 1); - assert!(test_cluster - .get_object_or_tombstone_from_fullnode_store(child_id) - .await - .2 - .is_wrapped()); + assert!( + test_cluster + .get_object_or_tombstone_from_fullnode_store(child_id) + .await + .2 + .is_wrapped() + ); effects } diff --git a/crates/sui-e2e-tests/tests/onsite_reconfig_observer_tests.rs b/crates/sui-e2e-tests/tests/onsite_reconfig_observer_tests.rs index e31d6aa017d..458a8d1c4d5 100644 --- a/crates/sui-e2e-tests/tests/onsite_reconfig_observer_tests.rs +++ b/crates/sui-e2e-tests/tests/onsite_reconfig_observer_tests.rs @@ -2,15 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 use prometheus::Registry; -use sui_core::authority_aggregator::AuthAggMetrics; -use sui_core::quorum_driver::reconfig_observer::OnsiteReconfigObserver; -use sui_core::quorum_driver::reconfig_observer::ReconfigObserver; -use sui_core::safe_client::SafeClientMetricsBase; +use sui_core::{ + authority_aggregator::AuthAggMetrics, + quorum_driver::reconfig_observer::{OnsiteReconfigObserver, ReconfigObserver}, + safe_client::SafeClientMetricsBase, +}; +use sui_macros::sim_test; use test_cluster::TestClusterBuilder; use tracing::info; -use sui_macros::sim_test; - #[sim_test] async fn test_onsite_reconfig_observer_basic() { telemetry_subscribers::init_for_testing(); @@ -55,7 +55,8 @@ async fn test_onsite_reconfig_observer_basic() { fullnode.with(|node| node.clone_authority_aggregator().unwrap().committee.epoch), 1 ); - // The observer thread is not managed by simtest, and hence we must abort it manually to make sure - // it stops running first. Otherwise it may lead to unexpected channel close issue. + // The observer thread is not managed by simtest, and hence we must abort it + // manually to make sure it stops running first. Otherwise it may lead to + // unexpected channel close issue. observer_handle.abort(); } diff --git a/crates/sui-e2e-tests/tests/protocol_version_tests.rs b/crates/sui-e2e-tests/tests/protocol_version_tests.rs index ae6bb0eb9c4..ee63bc3c32f 100644 --- a/crates/sui-e2e-tests/tests/protocol_version_tests.rs +++ b/crates/sui-e2e-tests/tests/protocol_version_tests.rs @@ -34,8 +34,8 @@ fn test_protocol_overrides() { ); } -// Same as the previous test, to ensure we have test isolation with all the caching that -// happens in get_for_min_version/get_for_max_version_UNSAFE. +// Same as the previous test, to ensure we have test isolation with all the +// caching that happens in get_for_min_version/get_for_max_version_UNSAFE. #[test] fn test_protocol_overrides_2() { telemetry_subscribers::init_for_testing(); @@ -54,12 +54,11 @@ fn test_protocol_overrides_2() { #[cfg(msim)] mod sim_only_tests { - use super::*; + use std::{path::PathBuf, sync::Arc}; + use fastcrypto::encoding::Base64; use move_binary_format::CompiledModule; use move_core_types::ident_str; - use std::path::PathBuf; - use std::sync::Arc; use sui_core::authority::framework_injection; use sui_framework::BuiltInFramework; use sui_json_rpc_api::WriteApiClient; @@ -67,36 +66,32 @@ mod sim_only_tests { use sui_macros::*; use sui_move_build::{BuildConfig, CompiledPackage}; use sui_protocol_config::SupportedProtocolVersions; - use sui_types::base_types::ConciseableName; - use sui_types::base_types::{ObjectID, ObjectRef}; - use sui_types::effects::{TransactionEffects, TransactionEffectsAPI}; - use sui_types::id::ID; - use sui_types::object::Owner; - use sui_types::sui_system_state::{ - epoch_start_sui_system_state::EpochStartSystemStateTrait, get_validator_from_table, - SuiSystemState, SuiSystemStateTrait, SUI_SYSTEM_STATE_SIM_TEST_DEEP_V2, - SUI_SYSTEM_STATE_SIM_TEST_SHALLOW_V2, SUI_SYSTEM_STATE_SIM_TEST_V1, - }; - use sui_types::transaction::{ - CallArg, Command, ObjectArg, ProgrammableMoveCall, ProgrammableTransaction, - TransactionData, TEST_ONLY_GAS_UNIT_FOR_GENERIC, - }; use sui_types::{ - base_types::{SequenceNumber, SuiAddress}, + base_types::{ConciseableName, ObjectID, ObjectRef, SequenceNumber, SuiAddress}, digests::TransactionDigest, - object::Object, + effects::{TransactionEffects, TransactionEffectsAPI}, + id::ID, + object::{Object, Owner}, programmable_transaction_builder::ProgrammableTransactionBuilder, - transaction::TransactionKind, - MOVE_STDLIB_PACKAGE_ID, SUI_FRAMEWORK_PACKAGE_ID, SUI_SYSTEM_PACKAGE_ID, - }; - use sui_types::{ - SUI_AUTHENTICATOR_STATE_OBJECT_ID, SUI_CLOCK_OBJECT_ID, SUI_RANDOMNESS_STATE_OBJECT_ID, + sui_system_state::{ + epoch_start_sui_system_state::EpochStartSystemStateTrait, get_validator_from_table, + SuiSystemState, SuiSystemStateTrait, SUI_SYSTEM_STATE_SIM_TEST_DEEP_V2, + SUI_SYSTEM_STATE_SIM_TEST_SHALLOW_V2, SUI_SYSTEM_STATE_SIM_TEST_V1, + }, + transaction::{ + CallArg, Command, ObjectArg, ProgrammableMoveCall, ProgrammableTransaction, + TransactionData, TransactionKind, TEST_ONLY_GAS_UNIT_FOR_GENERIC, + }, + MOVE_STDLIB_PACKAGE_ID, SUI_AUTHENTICATOR_STATE_OBJECT_ID, SUI_CLOCK_OBJECT_ID, + SUI_FRAMEWORK_PACKAGE_ID, SUI_RANDOMNESS_STATE_OBJECT_ID, SUI_SYSTEM_PACKAGE_ID, SUI_SYSTEM_STATE_OBJECT_ID, }; use test_cluster::TestCluster; use tokio::time::{sleep, Duration}; use tracing::info; + use super::*; + const START: u64 = ProtocolVersion::MAX.as_u64(); const FINISH: u64 = ProtocolVersion::MAX_ALLOWED.as_u64(); @@ -346,14 +341,14 @@ mod sim_only_tests { let to_wrap1 = create_obj(&cluster).await; let to_transfer1 = create_obj(&cluster).await; - // Instances of the type that existed before will not have public transfer despite - // now having store + // Instances of the type that existed before will not have public transfer + // despite now having store assert!(!has_public_transfer(&cluster, &to_wrap0.0).await); assert!(!has_public_transfer(&cluster, &to_transfer0.0).await); assert!(has_public_transfer(&cluster, &to_wrap1.0).await); assert!(has_public_transfer(&cluster, &to_transfer1.0).await); - // Instances of the type that existed before and new instances are able to take advantage of - // the newly introduced ability + // Instances of the type that existed before and new instances are able to take + // advantage of the newly introduced ability wrap_obj(&cluster, to_wrap0).await; transfer_obj(&cluster, SuiAddress::ZERO, to_transfer0).await; wrap_obj(&cluster, to_wrap1).await; @@ -422,8 +417,8 @@ mod sim_only_tests { expect_upgrade_succeeded(&cluster).await; - // Make sure the epoch change event includes the event from the new package's module - // initializer + // Make sure the epoch change event includes the event from the new package's + // module initializer let effects = get_framework_upgrade_effects(&cluster, &sui_extra).await; let shared_id = effects @@ -503,8 +498,10 @@ mod sim_only_tests { SUI_SYSTEM_PACKAGE_ID, ident_str!("msim_extra_1").to_owned(), ident_str!("mint").to_owned(), - /* type_arguments */ vec![], - /* call_args */ vec![], + // type_arguments + vec![], + // call_args + vec![], ) .unwrap(); builder.finish() @@ -523,7 +520,8 @@ mod sim_only_tests { SUI_SYSTEM_PACKAGE_ID, ident_str!("msim_extra_1").to_owned(), ident_str!("wrap").to_owned(), - /* type_arguments */ vec![], + // type_arguments + vec![], vec![CallArg::Object(ObjectArg::ImmOrOwnedObject(obj))], ) .unwrap(); @@ -569,9 +567,12 @@ mod sim_only_tests { .dev_inspect_transaction_block( sender, Base64::from_bytes(&bcs::to_bytes(&txn).unwrap()), - /* gas_price */ None, - /* epoch_id */ None, - /* additional_args */ None, + // gas_price + None, + // epoch_id + None, + // additional_args + None, ) .await .unwrap(); @@ -687,7 +688,8 @@ mod sim_only_tests { async fn test_framework_compatible_upgrade_no_protocol_version() { ProtocolConfig::poison_get_for_min_version(); - // Even though a new framework is available, the required new protocol version is not. + // Even though a new framework is available, the required new protocol version + // is not. override_sui_system_modules("compatible"); let test_cluster = TestClusterBuilder::new() .with_epoch_duration_ms(20000) @@ -717,9 +719,10 @@ mod sim_only_tests { .build() .await; - // We must stop the validators before overriding the system modules, otherwise the validators - // may start running before the override and hence send capabilities indicating that they - // only support the genesis system modules. + // We must stop the validators before overriding the system modules, otherwise + // the validators may start running before the override and hence send + // capabilities indicating that they only support the genesis system + // modules. test_cluster.stop_all_validators().await; let first = test_cluster.swarm.validator_nodes().next().unwrap(); let first_name = first.name(); @@ -735,12 +738,14 @@ mod sim_only_tests { expect_upgrade_succeeded(&test_cluster).await; - // expect_upgrade_succeeded only waits for fullnode to reconfigure - validator can actually be - // slower than fullnode if it wasn't one of the signers of the final checkpoint. + // expect_upgrade_succeeded only waits for fullnode to reconfigure - validator + // can actually be slower than fullnode if it wasn't one of the signers + // of the final checkpoint. sleep(Duration::from_secs(3)).await; let node_handle = first.get_node_handle().expect("node should be running"); - // The dissenting node receives the correct framework via state sync and completes the upgrade + // The dissenting node receives the correct framework via state sync and + // completes the upgrade node_handle.with(|node| { let committee = node.state().epoch_store_for_testing().committee().clone(); assert_eq!( @@ -751,8 +756,8 @@ mod sim_only_tests { }); } - // Test that protocol version upgrade does not complete when there is no quorum on the - // framework upgrades. + // Test that protocol version upgrade does not complete when there is no quorum + // on the framework upgrades. #[sim_test] async fn test_framework_upgrade_conflicting_versions_no_quorum() { ProtocolConfig::poison_get_for_min_version(); @@ -814,9 +819,9 @@ mod sim_only_tests { // We are going to enter safe mode so set the expectation right. test_cluster.set_safe_mode_expected(true); - // Wait for epoch change to happen. This epoch we should also experience a framework - // upgrade that upgrades the framework to the base one (which doesn't abort), and thus - // a protocol version increment. + // Wait for epoch change to happen. This epoch we should also experience a + // framework upgrade that upgrades the framework to the base one (which + // doesn't abort), and thus a protocol version increment. let system_state = test_cluster.wait_for_epoch(Some(1)).await; assert_eq!(system_state.epoch(), 1); assert_eq!(system_state.protocol_version(), FINISH); // protocol version increments @@ -826,7 +831,8 @@ mod sim_only_tests { // We are getting out of safe mode soon. test_cluster.set_safe_mode_expected(false); - // This epoch change should execute successfully without any upgrade and get us out of safe mode. + // This epoch change should execute successfully without any upgrade and get us + // out of safe mode. let system_state = test_cluster.wait_for_epoch(Some(2)).await; assert_eq!(system_state.epoch(), 2); assert_eq!(system_state.protocol_version(), FINISH); // protocol version stays the same @@ -859,8 +865,8 @@ mod sim_only_tests { .with_objects([sui_system_package_object("mock_sui_systems/base")]) .build() .await; - // Wait for the upgrade to finish. After the upgrade, the new framework will be installed, - // but the system state object hasn't been upgraded yet. + // Wait for the upgrade to finish. After the upgrade, the new framework will be + // installed, but the system state object hasn't been upgraded yet. let system_state = test_cluster.wait_for_epoch(Some(1)).await; assert_eq!(system_state.protocol_version(), FINISH); assert_eq!( @@ -869,8 +875,8 @@ mod sim_only_tests { ); assert!(matches!(system_state, SuiSystemState::SimTestV1(_))); - // The system state object will be upgraded next time we execute advance_epoch transaction - // at epoch boundary. + // The system state object will be upgraded next time we execute advance_epoch + // transaction at epoch boundary. let system_state = test_cluster.wait_for_epoch(Some(2)).await; assert_eq!( system_state.system_state_version(), @@ -891,8 +897,8 @@ mod sim_only_tests { .with_objects([sui_system_package_object("mock_sui_systems/base")]) .build() .await; - // Wait for the upgrade to finish. After the upgrade, the new framework will be installed, - // but the system state object hasn't been upgraded yet. + // Wait for the upgrade to finish. After the upgrade, the new framework will be + // installed, but the system state object hasn't been upgraded yet. let system_state = test_cluster.wait_for_epoch(Some(1)).await; assert_eq!(system_state.protocol_version(), FINISH); assert_eq!( @@ -917,8 +923,8 @@ mod sim_only_tests { panic!("Expecting SimTestV1 type"); } - // The system state object will be upgraded next time we execute advance_epoch transaction - // at epoch boundary. + // The system state object will be upgraded next time we execute advance_epoch + // transaction at epoch boundary. let system_state = test_cluster.wait_for_epoch(Some(2)).await; assert_eq!( system_state.system_state_version(), @@ -945,10 +951,11 @@ mod sim_only_tests { #[sim_test] async fn sui_system_state_production_upgrade_test() { - // Use this test to test a real sui system state upgrade. To make this test work, - // put the new sui system in a new path and point to it in the override. - // It's important to also handle the new protocol version in protocol-config/lib.rs. - // The MAX_PROTOCOL_VERSION must not be changed yet when testing this. + // Use this test to test a real sui system state upgrade. To make this test + // work, put the new sui system in a new path and point to it in the + // override. It's important to also handle the new protocol version in + // protocol-config/lib.rs. The MAX_PROTOCOL_VERSION must not be changed + // yet when testing this. let test_cluster = TestClusterBuilder::new() .with_epoch_duration_ms(20000) .with_supported_protocol_versions(SupportedProtocolVersions::new_for_testing( @@ -958,13 +965,13 @@ mod sim_only_tests { .await; // TODO: Replace the path with the new framework path when we test it for real. override_sui_system_modules("../../../sui-framework/packages/sui-system"); - // Wait for the upgrade to finish. After the upgrade, the new framework will be installed, - // but the system state object hasn't been upgraded yet. + // Wait for the upgrade to finish. After the upgrade, the new framework will be + // installed, but the system state object hasn't been upgraded yet. let system_state = test_cluster.wait_for_epoch(Some(1)).await; assert_eq!(system_state.protocol_version(), FINISH); - // The system state object will be upgraded next time we execute advance_epoch transaction - // at epoch boundary. + // The system state object will be upgraded next time we execute advance_epoch + // transaction at epoch boundary. let system_state = test_cluster.wait_for_epoch(Some(2)).await; if let SuiSystemState::V2(inner) = system_state { assert_eq!(inner.parameters.min_validator_count, 4); @@ -1012,8 +1019,8 @@ mod sim_only_tests { .unwrap() } - /// Get root compiled modules, built from fixture `fixture` in the `framework_upgrades` - /// directory. + /// Get root compiled modules, built from fixture `fixture` in the + /// `framework_upgrades` directory. fn fixture_modules(fixture: &str) -> Vec { fixture_package(fixture).into_modules() } diff --git a/crates/sui-e2e-tests/tests/randomness_tests.rs b/crates/sui-e2e-tests/tests/randomness_tests.rs index e622d42170d..8cd21745876 100644 --- a/crates/sui-e2e-tests/tests/randomness_tests.rs +++ b/crates/sui-e2e-tests/tests/randomness_tests.rs @@ -2,11 +2,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use sui_macros::sim_test; use sui_types::SUI_RANDOMNESS_STATE_OBJECT_ID; use test_cluster::TestClusterBuilder; -use sui_macros::sim_test; - #[sim_test] async fn test_create_randomness_state_object() { let test_cluster = TestClusterBuilder::new() @@ -20,19 +19,20 @@ async fn test_create_randomness_state_object() { // no node has the randomness state object yet for h in &handles { h.with(|node| { - assert!(node - .state() - .get_cache_reader() - .get_latest_object_ref_or_tombstone(SUI_RANDOMNESS_STATE_OBJECT_ID) - .unwrap() - .is_none()); + assert!( + node.state() + .get_cache_reader() + .get_latest_object_ref_or_tombstone(SUI_RANDOMNESS_STATE_OBJECT_ID) + .unwrap() + .is_none() + ); }); } // wait until feature is enabled test_cluster.wait_for_protocol_version(32.into()).await; - // wait until next epoch - randomness state object is created at the end of the first epoch - // in which it is supported. + // wait until next epoch - randomness state object is created at the end of the + // first epoch in which it is supported. test_cluster.wait_for_epoch_all_nodes(2).await; // protocol upgrade completes in epoch 1 for h in &handles { diff --git a/crates/sui-e2e-tests/tests/readme.rs b/crates/sui-e2e-tests/tests/readme.rs index 19c64b354b5..a21036b8bd9 100644 --- a/crates/sui-e2e-tests/tests/readme.rs +++ b/crates/sui-e2e-tests/tests/readme.rs @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use std::{io::Write, process::Command}; + use tempfile::tempdir; #[test] diff --git a/crates/sui-e2e-tests/tests/reconfiguration_tests.rs b/crates/sui-e2e-tests/tests/reconfiguration_tests.rs index 3b4cc80adf7..e5bd24c18db 100644 --- a/crates/sui-e2e-tests/tests/reconfiguration_tests.rs +++ b/crates/sui-e2e-tests/tests/reconfiguration_tests.rs @@ -1,30 +1,36 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::{BTreeSet, HashSet}, + sync::Arc, + time::Duration, +}; + use futures::future::join_all; use rand::rngs::OsRng; -use std::collections::{BTreeSet, HashSet}; -use std::sync::Arc; -use std::time::Duration; -use sui_core::authority::epoch_start_configuration::EpochFlag; -use sui_core::consensus_adapter::position_submit_certificate; +use sui_core::{ + authority::epoch_start_configuration::EpochFlag, consensus_adapter::position_submit_certificate, +}; use sui_json_rpc_types::SuiTransactionBlockEffectsAPI; use sui_macros::{register_fail_point_arg, sim_test}; use sui_node::SuiNodeHandle; use sui_protocol_config::ProtocolConfig; use sui_swarm_config::genesis_config::{ValidatorGenesisConfig, ValidatorGenesisConfigBuilder}; use sui_test_transaction_builder::{make_transfer_sui_transaction, TestTransactionBuilder}; -use sui_types::base_types::SuiAddress; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::error::SuiError; -use sui_types::gas::GasCostSummary; -use sui_types::governance::MIN_VALIDATOR_JOINING_STAKE_MIST; -use sui_types::message_envelope::Message; -use sui_types::sui_system_state::{ - get_validator_from_table, sui_system_state_summary::get_validator_by_pool_id, - SuiSystemStateTrait, +use sui_types::{ + base_types::SuiAddress, + effects::TransactionEffectsAPI, + error::SuiError, + gas::GasCostSummary, + governance::MIN_VALIDATOR_JOINING_STAKE_MIST, + message_envelope::Message, + sui_system_state::{ + get_validator_from_table, sui_system_state_summary::get_validator_by_pool_id, + SuiSystemStateTrait, + }, + transaction::{TransactionDataAPI, TransactionExpiration}, }; -use sui_types::transaction::{TransactionDataAPI, TransactionExpiration}; use test_cluster::{TestCluster, TestClusterBuilder}; use tokio::time::sleep; @@ -50,13 +56,15 @@ async fn advance_epoch_tx_test() { .await .unwrap(); // Check that the validator didn't commit the transaction yet. - assert!(state - .get_signed_effects_and_maybe_resign( - effects.transaction_digest(), - &state.epoch_store_for_testing() - ) - .unwrap() - .is_none()); + assert!( + state + .get_signed_effects_and_maybe_resign( + effects.transaction_digest(), + &state.epoch_store_for_testing() + ) + .unwrap() + .is_none() + ); effects }) .collect(); @@ -123,8 +131,8 @@ async fn test_transaction_expiration() { .unwrap(); } -// TODO: This test does not guarantee that tx would be reverted, and hence the code path -// may not always be tested. +// TODO: This test does not guarantee that tx would be reverted, and hence the +// code path may not always be tested. #[sim_test] async fn reconfig_with_revert_end_to_end_test() { let test_cluster = TestClusterBuilder::new().build().await; @@ -371,6 +379,7 @@ async fn do_test_lock_table_upgrade() { #[sim_test] async fn test_create_advance_epoch_tx_race() { use std::sync::Arc; + use sui_macros::{register_fail_point, register_fail_point_async}; use tokio::sync::broadcast; use tracing::info; @@ -378,14 +387,15 @@ async fn test_create_advance_epoch_tx_race() { telemetry_subscribers::init_for_testing(); sui_protocol_config::ProtocolConfig::poison_get_for_min_version(); - // panic if we enter safe mode. If you remove the check for `is_tx_already_executed` in + // panic if we enter safe mode. If you remove the check for + // `is_tx_already_executed` in // AuthorityState::create_and_execute_advance_epoch_tx, this test should fail. register_fail_point("record_checkpoint_builder_is_safe_mode_metric", || { panic!("safe mode recorded"); }); - // Intercept the specified async wait point on a given node, and wait there until a message - // is sent from the given tx. + // Intercept the specified async wait point on a given node, and wait there + // until a message is sent from the given tx. let register_wait = |failpoint, node_id, tx: Arc>| { let node = sui_simulator::task::NodeId(node_id); register_fail_point_async(failpoint, move || { @@ -431,7 +441,8 @@ async fn test_create_advance_epoch_tx_race() { // Allow time for paused node to execute change epoch tx via state sync. sleep(Duration::from_secs(5)).await; - // now release the pause, node will find that change epoch tx has already been executed. + // now release the pause, node will find that change epoch tx has already been + // executed. info!("releasing change epoch delay tx"); change_epoch_delay_tx.send(()).unwrap(); @@ -462,7 +473,8 @@ async fn test_reconfig_with_failing_validator() { .map(|v| v.parse().unwrap()) .unwrap_or(4); - // A longer timeout is required, as restarts can cause reconfiguration to take longer. + // A longer timeout is required, as restarts can cause reconfiguration to take + // longer. test_cluster .wait_for_epoch_with_timeout(Some(target_epoch), Duration::from_secs(90)) .await; @@ -470,9 +482,9 @@ async fn test_reconfig_with_failing_validator() { #[sim_test] async fn test_validator_resign_effects() { - // This test checks that validators are able to re-sign transaction effects that were finalized - // in previous epochs. This allows authority aggregator to form a new effects certificate - // in the new epoch. + // This test checks that validators are able to re-sign transaction effects that + // were finalized in previous epochs. This allows authority aggregator to + // form a new effects certificate in the new epoch. let test_cluster = TestClusterBuilder::new().build().await; let tx = make_transfer_sui_transaction(&test_cluster.wallet, None, None).await; let effects0 = test_cluster @@ -572,13 +584,15 @@ async fn test_inactive_validator_pool_read() { // Check that this node is no longer a validator. validator.with(|node| { - assert!(node - .state() - .is_fullnode(&node.state().epoch_store_for_testing())); + assert!( + node.state() + .is_fullnode(&node.state().epoch_store_for_testing()) + ); }); - // Check that the validator that just left now shows up in the inactive_validators, - // and we can still deserialize it and get the inactive staking pool. + // Check that the validator that just left now shows up in the + // inactive_validators, and we can still deserialize it and get the inactive + // staking pool. test_cluster.fullnode_handle.sui_node.with(|node| { let system_state = node .state() @@ -606,7 +620,8 @@ async fn test_inactive_validator_pool_read() { #[sim_test] async fn test_reconfig_with_committee_change_basic() { - // This test exercise the full flow of a validator joining the network, catch up and then leave. + // This test exercise the full flow of a validator joining the network, catch up + // and then leave. let new_validator = ValidatorGenesisConfigBuilder::new().build(&mut OsRng); let address = (&new_validator.account_key_pair.public()).into(); @@ -633,9 +648,10 @@ async fn test_reconfig_with_committee_change_basic() { test_cluster.wait_for_epoch_all_nodes(1).await; new_validator_handle.with(|node| { - assert!(node - .state() - .is_validator(&node.state().epoch_store_for_testing())); + assert!( + node.state() + .is_validator(&node.state().epoch_store_for_testing()) + ); }); execute_remove_validator_tx(&test_cluster, &new_validator_handle).await; @@ -743,7 +759,8 @@ async fn safe_mode_reconfig_test() { assert_eq!(system_state.system_state_version, 1); assert_eq!(system_state.epoch, 0); - // Wait for regular epoch change to happen once. Migration from V1 to V2 should happen here. + // Wait for regular epoch change to happen once. Migration from V1 to V2 should + // happen here. let system_state = test_cluster.wait_for_epoch(Some(1)).await; assert!(!system_state.safe_mode()); assert_eq!(system_state.epoch(), 1); @@ -769,7 +786,8 @@ async fn safe_mode_reconfig_test() { let txn = make_staking_transaction(&test_cluster.wallet, validator_address).await; test_cluster.execute_transaction(txn).await; - // Now remove the override and check that in the next epoch we are no longer in safe mode. + // Now remove the override and check that in the next epoch we are no longer in + // safe mode. test_cluster.set_safe_mode_expected(false); let system_state = test_cluster.wait_for_epoch(Some(3)).await; @@ -837,8 +855,8 @@ async fn execute_remove_validator_tx(test_cluster: &TestCluster, handle: &SuiNod test_cluster.execute_transaction(tx).await; } -/// Execute a sequence of transactions to add a validator, including adding candidate, adding stake -/// and activate the validator. +/// Execute a sequence of transactions to add a validator, including adding +/// candidate, adding stake and activate the validator. /// It does not however trigger reconfiguration yet. async fn execute_add_validator_transactions( test_cluster: &TestCluster, diff --git a/crates/sui-e2e-tests/tests/sdk_stream_tests.rs b/crates/sui-e2e-tests/tests/sdk_stream_tests.rs index 739f30ed819..9b11d5d8923 100644 --- a/crates/sui-e2e-tests/tests/sdk_stream_tests.rs +++ b/crates/sui-e2e-tests/tests/sdk_stream_tests.rs @@ -1,8 +1,9 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use futures::StreamExt; use std::future; + +use futures::StreamExt; use sui_sdk::{SuiClientBuilder, SUI_COIN_TYPE}; use sui_swarm_config::genesis_config::{DEFAULT_GAS_AMOUNT, DEFAULT_NUMBER_OF_OBJECT_PER_ACCOUNT}; use test_cluster::TestClusterBuilder; @@ -16,8 +17,8 @@ use test_cluster::TestClusterBuilder; // let client = SuiClientBuilder::default().build(rpc_url).await?; // let txs = client // .read_api() -// .get_transactions_stream(SuiTransactionBlockResponseQuery::default(), None, true) -// .collect::>() +// .get_transactions_stream(SuiTransactionBlockResponseQuery::default(), +// None, true) .collect::>() // .await; // assert_eq!(1, txs.len()); @@ -35,8 +36,8 @@ use test_cluster::TestClusterBuilder; // let txs = client // .read_api() -// .get_transactions_stream(SuiTransactionBlockResponseQuery::default(), None, true) -// .collect::>() +// .get_transactions_stream(SuiTransactionBlockResponseQuery::default(), +// None, true) .collect::>() // .await; // assert_eq!(2, txs.len()); diff --git a/crates/sui-e2e-tests/tests/shared_objects_tests.rs b/crates/sui-e2e-tests/tests/shared_objects_tests.rs index e5d7297527a..5392d4df8a5 100644 --- a/crates/sui-e2e-tests/tests/shared_objects_tests.rs +++ b/crates/sui-e2e-tests/tests/shared_objects_tests.rs @@ -1,29 +1,33 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use futures::future::join_all; -use futures::join; +use std::{ + ops::Deref, + time::{Duration, SystemTime}, +}; + +use futures::{future::join_all, join}; use rand::distributions::Distribution; -use std::ops::Deref; -use std::time::{Duration, SystemTime}; use sui_config::node::AuthorityOverloadConfig; -use sui_core::authority::EffectsNotifyRead; -use sui_core::consensus_adapter::position_submit_certificate; +use sui_core::{authority::EffectsNotifyRead, consensus_adapter::position_submit_certificate}; use sui_json_rpc_types::SuiTransactionBlockEffectsAPI; use sui_macros::{register_fail_point_async, sim_test}; use sui_swarm_config::genesis_config::{AccountConfig, DEFAULT_GAS_AMOUNT}; use sui_test_transaction_builder::{ publish_basics_package, publish_basics_package_and_make_counter, TestTransactionBuilder, }; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::event::Event; -use sui_types::execution_status::{CommandArgumentError, ExecutionFailureStatus, ExecutionStatus}; -use sui_types::messages_grpc::{LayoutGenerationOption, ObjectInfoRequest}; -use sui_types::transaction::{CallArg, ObjectArg}; +use sui_types::{ + effects::TransactionEffectsAPI, + event::Event, + execution_status::{CommandArgumentError, ExecutionFailureStatus, ExecutionStatus}, + messages_grpc::{LayoutGenerationOption, ObjectInfoRequest}, + transaction::{CallArg, ObjectArg}, +}; use test_cluster::TestClusterBuilder; use tokio::time::sleep; -/// Send a simple shared object transaction to Sui and ensures the client gets back a response. +/// Send a simple shared object transaction to Sui and ensures the client gets +/// back a response. #[sim_test] async fn shared_object_transaction() { let test_cluster = TestClusterBuilder::new().build().await; @@ -128,8 +132,8 @@ async fn shared_object_deletion_multiple_times() { }); let digests = join_all(submissions).await; - // Start a new fullnode and let it sync from genesis and wait for us to see all the deletion - // transactions. + // Start a new fullnode and let it sync from genesis and wait for us to see all + // the deletion transactions. let fullnode = test_cluster.spawn_new_fullnode().await.sui_node; fullnode .state() @@ -184,8 +188,8 @@ async fn shared_object_deletion_multiple_times_cert_racing() { digests.push(*signed.digest()); } - // Start a new fullnode and let it sync from genesis and wait for us to see all the deletion - // transactions. + // Start a new fullnode and let it sync from genesis and wait for us to see all + // the deletion transactions. let fullnode = test_cluster.spawn_new_fullnode().await.sui_node; fullnode .state() @@ -195,18 +199,20 @@ async fn shared_object_deletion_multiple_times_cert_racing() { .unwrap(); } -/// Test for execution of shared object certs that are sequenced after a shared object is deleted. -/// The test strategy is: +/// Test for execution of shared object certs that are sequenced after a shared +/// object is deleted. The test strategy is: /// 0. Inject a random delay just before execution of a transaction. /// 1. Create a shared object -/// 2. Create a delete cert and two increment certs, but do not execute any of them yet. +/// 2. Create a delete cert and two increment certs, but do not execute any of +/// them yet. /// 3. Execute the delete cert. /// 4. Execute the two increment certs. /// -/// The two execution certs should be immediately executable (because they have a missing -/// input). Therefore validators may execute them in either order. The injected delay ensures that -/// we will explore all possible orders, and `submit_transaction_to_validators` verifies that we -/// get the same effects regardless of the order. (checkpoint fork detection will also test this). +/// The two execution certs should be immediately executable (because they have +/// a missing input). Therefore validators may execute them in either order. The +/// injected delay ensures that we will explore all possible orders, and +/// `submit_transaction_to_validators` verifies that we get the same effects +/// regardless of the order. (checkpoint fork detection will also test this). #[sim_test] async fn shared_object_deletion_multi_certs() { // cause random delay just before tx is executed @@ -308,8 +314,8 @@ async fn shared_object_deletion_multi_certs() { .unwrap(); } -/// End-to-end shared transaction test for a Sui validator. It does not test the client or wallet, -/// but tests the end-to-end flow from Sui to consensus. +/// End-to-end shared transaction test for a Sui validator. It does not test the +/// client or wallet, but tests the end-to-end flow from Sui to consensus. #[sim_test] async fn call_shared_object_contract() { let test_cluster = TestClusterBuilder::new().build().await; @@ -355,13 +361,18 @@ async fn call_shared_object_contract() { .await .effects .unwrap(); - // Check that all reads must depend on the creation of the counter, but not to any previous reads. - assert!(effects - .dependencies() - .contains(&counter_creation_transaction)); - assert!(prev_assert_value_txs - .iter() - .all(|tx| { !effects.dependencies().contains(tx) })); + // Check that all reads must depend on the creation of the counter, but not to + // any previous reads. + assert!( + effects + .dependencies() + .contains(&counter_creation_transaction) + ); + assert!( + prev_assert_value_txs + .iter() + .all(|tx| { !effects.dependencies().contains(tx) }) + ); prev_assert_value_txs.push(*effects.transaction_digest()); } @@ -377,16 +388,22 @@ async fn call_shared_object_contract() { .effects .unwrap(); let increment_transaction = *effects.transaction_digest(); - assert!(effects - .dependencies() - .contains(&counter_creation_transaction)); - // Previously executed assert_value transaction(s) are not a dependency because they took immutable reference to shared object - assert!(prev_assert_value_txs - .iter() - .all(|tx| { !effects.dependencies().contains(tx) })); + assert!( + effects + .dependencies() + .contains(&counter_creation_transaction) + ); + // Previously executed assert_value transaction(s) are not a dependency because + // they took immutable reference to shared object + assert!( + prev_assert_value_txs + .iter() + .all(|tx| { !effects.dependencies().contains(tx) }) + ); // assert_value can take both mutable and immutable references - // it is allowed to pass mutable shared object arg to move call taking immutable reference + // it is allowed to pass mutable shared object arg to move call taking immutable + // reference let mut assert_value_mut_transaction = None; for imm in [true, false] { // Ensure the value of the counter is `1`. @@ -421,7 +438,8 @@ async fn call_shared_object_contract() { let assert_value_mut_transaction = assert_value_mut_transaction.unwrap(); - // And last check - attempt to send increment transaction with immutable reference + // And last check - attempt to send increment transaction with immutable + // reference let transaction = test_cluster .test_transaction_builder() .await @@ -451,9 +469,11 @@ async fn call_shared_object_contract() { } .into() ); - assert!(effects - .dependencies() - .contains(&assert_value_mut_transaction)); + assert!( + effects + .dependencies() + .contains(&assert_value_mut_transaction) + ); } #[ignore("Disabled due to flakiness - re-enable when failure is fixed")] @@ -542,8 +562,8 @@ async fn shared_object_sync() { .await; let package_id = publish_basics_package(&test_cluster.wallet).await.0; - // Since we use submit_transaction_to_validators in this test, which does not go through fullnode, - // we need to manage gas objects ourselves. + // Since we use submit_transaction_to_validators in this test, which does not go + // through fullnode, we need to manage gas objects ourselves. let (sender, mut objects) = test_cluster.wallet.get_one_account().await.unwrap(); let rgp = test_cluster.get_reference_gas_price().await; // Send a transaction to create a counter, to all but one authority. @@ -566,32 +586,37 @@ async fn shared_object_sync() { assert!(effects.status().is_ok()); let ((counter_id, counter_initial_shared_version, _), _) = effects.created()[0]; - // Check that the counter object exists in at least one of the validators the transaction was - // sent to. + // Check that the counter object exists in at least one of the validators the + // transaction was sent to. for validator in test_cluster.swarm.validator_node_handles() { if slow_validators.contains(&validator.state().name) { - assert!(validator - .state() - .handle_object_info_request(ObjectInfoRequest::latest_object_info_request( - counter_id, - LayoutGenerationOption::None, - )) - .await - .is_ok()); + assert!( + validator + .state() + .handle_object_info_request(ObjectInfoRequest::latest_object_info_request( + counter_id, + LayoutGenerationOption::None, + )) + .await + .is_ok() + ); } } - // Check that the validator that wasn't sent the transaction is unaware of the counter object + // Check that the validator that wasn't sent the transaction is unaware of the + // counter object for validator in test_cluster.swarm.validator_node_handles() { if fast_validators.contains(&validator.state().name) { - assert!(validator - .state() - .handle_object_info_request(ObjectInfoRequest::latest_object_info_request( - counter_id, - LayoutGenerationOption::None, - )) - .await - .is_err()); + assert!( + validator + .state() + .handle_object_info_request(ObjectInfoRequest::latest_object_info_request( + counter_id, + LayoutGenerationOption::None, + )) + .await + .is_err() + ); } } @@ -602,7 +627,8 @@ async fn shared_object_sync() { .build(), ); - // Let's submit the transaction to the original set of validators, except the first. + // Let's submit the transaction to the original set of validators, except the + // first. let (effects, _) = test_cluster .submit_transaction_to_validators(increment_counter_transaction.clone(), &validators[1..]) .await @@ -618,7 +644,8 @@ async fn shared_object_sync() { assert!(effects.status().is_ok()); } -/// Send a simple shared object transaction to Sui and ensures the client gets back a response. +/// Send a simple shared object transaction to Sui and ensures the client gets +/// back a response. #[sim_test] async fn replay_shared_object_transaction() { let test_cluster = TestClusterBuilder::new().build().await; diff --git a/crates/sui-e2e-tests/tests/shared_objects_version_tests.rs b/crates/sui-e2e-tests/tests/shared_objects_version_tests.rs index 982c5865912..29e15071d07 100644 --- a/crates/sui-e2e-tests/tests/shared_objects_version_tests.rs +++ b/crates/sui-e2e-tests/tests/shared_objects_version_tests.rs @@ -2,15 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 use std::path::PathBuf; + use sui_macros::*; use sui_test_transaction_builder::publish_package; -use sui_types::base_types::{ObjectID, ObjectRef, SequenceNumber}; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::effects::{TransactionEffects, TransactionEvents}; -use sui_types::execution_status::{ExecutionFailureStatus, ExecutionStatus}; -use sui_types::object::{Owner, OBJECT_START_VERSION}; -use sui_types::transaction::{CallArg, ObjectArg}; -use sui_types::SUI_FRAMEWORK_ADDRESS; +use sui_types::{ + base_types::{ObjectID, ObjectRef, SequenceNumber}, + effects::{TransactionEffects, TransactionEffectsAPI, TransactionEvents}, + execution_status::{ExecutionFailureStatus, ExecutionStatus}, + object::{Owner, OBJECT_START_VERSION}, + transaction::{CallArg, ObjectArg}, + SUI_FRAMEWORK_ADDRESS, +}; use test_cluster::{TestCluster, TestClusterBuilder}; #[sim_test] @@ -90,10 +92,11 @@ async fn shared_object_not_found() { let env = TestEnvironment::new().await; let nonexistent_id = ObjectID::random(); let initial_shared_seq = SequenceNumber::from_u64(42); - assert!(env - .increment_shared_counter(nonexistent_id, initial_shared_seq) - .await - .is_err()); + assert!( + env.increment_shared_counter(nonexistent_id, initial_shared_seq) + .await + .is_err() + ); } fn is_shared_at(owner: &Owner, version: SequenceNumber) -> bool { diff --git a/crates/sui-e2e-tests/tests/simulator_tests.rs b/crates/sui-e2e-tests/tests/simulator_tests.rs index 45f909c6672..68b4ef4240c 100644 --- a/crates/sui-e2e-tests/tests/simulator_tests.rs +++ b/crates/sui-e2e-tests/tests/simulator_tests.rs @@ -1,6 +1,8 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::collections::{HashMap, HashSet}; + use futures::{ stream::{FuturesOrdered, FuturesUnordered}, StreamExt, @@ -10,16 +12,14 @@ use rand::{ rngs::OsRng, Rng, }; -use std::collections::{HashMap, HashSet}; use sui_core::authority::EffectsNotifyRead; +use sui_macros::*; use sui_protocol_config::ProtocolConfig; use sui_test_transaction_builder::make_transfer_sui_transaction; +use test_cluster::TestClusterBuilder; use tokio::time::{sleep, Duration, Instant}; use tracing::{debug, trace}; -use sui_macros::*; -use test_cluster::TestClusterBuilder; - async fn make_fut(i: usize) -> usize { let count_dist = Uniform::from(1..5); let sleep_dist = Uniform::from(1000..10000); @@ -123,14 +123,15 @@ async fn test_hash_collections() { debug!("final rng state: {}", OsRng.gen::()); } -// Test that starting up a network + fullnode, and sending one transaction through that network is -// repeatable and deterministic. +// Test that starting up a network + fullnode, and sending one transaction +// through that network is repeatable and deterministic. #[sim_test(check_determinism)] async fn test_net_determinism() { let _guard = ProtocolConfig::apply_overrides_for_testing(|_, mut config| { - // TODO: this test fails due to some non-determinism caused by submitting messages to - // consensus. It does not appear to be caused by this feature itself, so I'm disabling this - // until I have time to debug further. + // TODO: this test fails due to some non-determinism caused by submitting + // messages to consensus. It does not appear to be caused by this + // feature itself, so I'm disabling this until I have time to debug + // further. config.set_enable_jwk_consensus_updates_for_testing(false); config.set_random_beacon_for_testing(false); config diff --git a/crates/sui-e2e-tests/tests/snapshot_tests.rs b/crates/sui-e2e-tests/tests/snapshot_tests.rs index 575a7a93694..d6ef43f15cf 100644 --- a/crates/sui-e2e-tests/tests/snapshot_tests.rs +++ b/crates/sui-e2e-tests/tests/snapshot_tests.rs @@ -60,16 +60,19 @@ async fn basic_read_cmd_snapshot_tests() -> Result<(), anyhow::Error> { let cmds = vec![ "sui client objects {ME}", // valid addr - "sui client objects 0x0000000000000000000000000000000000000000000000000000000000000000", // empty addr + "sui client objects 0x0000000000000000000000000000000000000000000000000000000000000000", /* empty addr */ "sui client object 0x5", // valid object "sui client object 0x5 --bcs", // valid object BCS // Simtest object IDs are not stable so these object IDs may or may not exist currently -- // commenting them out for now. - // "sui client object 0x3b5121a0603ef7ab4cb57827fceca17db3338ef2cd76126cc1523b681df27cee", // valid object - // "sui client object 0x3b5121a0603ef7ab4cb57827fceca17db3338ef2cd76126cc1523b681df27cee --bcs", // valid object BCS - "sui client object 0x0000000000000000000000000000000000000000000000000000000000000000", // non-existent object + // "sui client object 0x3b5121a0603ef7ab4cb57827fceca17db3338ef2cd76126cc1523b681df27cee", + // // valid object "sui client object + // 0x3b5121a0603ef7ab4cb57827fceca17db3338ef2cd76126cc1523b681df27cee --bcs", // valid + // object BCS + "sui client object 0x0000000000000000000000000000000000000000000000000000000000000000", /* non-existent object */ "sui client tx-block Duwr9uSk9ZvAndEa8oDHunx345i6oyrp3e78MYHVAbYdv", // valid tx digest - "sui client tx-block EgMTHQygMi6SRsBqrPHAEKZCNrpShXurCp9rcb9qbSg8", // non-existent tx digest + "sui client tx-block EgMTHQygMi6SRsBqrPHAEKZCNrpShXurCp9rcb9qbSg8", /* non-existent tx + * digest */ ]; assert_json_snapshot!(run_one(cmds, context).await?); Ok(()) diff --git a/crates/sui-e2e-tests/tests/transaction_orchestrator_tests.rs b/crates/sui-e2e-tests/tests/transaction_orchestrator_tests.rs index 981ed7a25ab..9e2c90cecfc 100644 --- a/crates/sui-e2e-tests/tests/transaction_orchestrator_tests.rs +++ b/crates/sui-e2e-tests/tests/transaction_orchestrator_tests.rs @@ -1,23 +1,27 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{sync::Arc, time::Duration}; + use prometheus::Registry; -use std::sync::Arc; -use std::time::Duration; -use sui_core::authority::EffectsNotifyRead; -use sui_core::authority_client::NetworkAuthorityClient; -use sui_core::transaction_orchestrator::TransactiondOrchestrator; +use sui_core::{ + authority::EffectsNotifyRead, authority_client::NetworkAuthorityClient, + transaction_orchestrator::TransactiondOrchestrator, +}; use sui_macros::sim_test; -use sui_storage::key_value_store::TransactionKeyValueStore; -use sui_storage::key_value_store_metrics::KeyValueStoreMetrics; +use sui_storage::{ + key_value_store::TransactionKeyValueStore, key_value_store_metrics::KeyValueStoreMetrics, +}; use sui_test_transaction_builder::{ batch_make_transfer_transactions, make_transfer_sui_transaction, }; -use sui_types::quorum_driver_types::{ - ExecuteTransactionRequest, ExecuteTransactionRequestType, ExecuteTransactionResponse, - FinalizedEffects, QuorumDriverError, +use sui_types::{ + quorum_driver_types::{ + ExecuteTransactionRequest, ExecuteTransactionRequestType, ExecuteTransactionResponse, + FinalizedEffects, QuorumDriverError, + }, + transaction::Transaction, }; -use sui_types::transaction::Transaction; use test_cluster::TestClusterBuilder; use tokio::time::timeout; use tracing::info; @@ -89,11 +93,13 @@ async fn test_blocking_execution() -> Result<(), anyhow::Error> { handle.state(), )); - assert!(handle - .state() - .get_executed_transaction_and_effects(digest, kv_store) - .await - .is_ok()); + assert!( + handle + .state() + .get_executed_transaction_and_effects(digest, kv_store) + .await + .is_ok() + ); Ok(()) } @@ -178,9 +184,9 @@ async fn test_fullnode_wal_log() -> Result<(), anyhow::Error> { .await .unwrap(); - // TODO: wal erasing is done in the loop handling effects, so may have some delay. - // However, once the refactoring is completed the wal removal will be done before - // response is returned and we will not need the sleep. + // TODO: wal erasing is done in the loop handling effects, so may have some + // delay. However, once the refactoring is completed the wal removal will be + // done before response is returned and we will not need the sleep. tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; // The tx should be erased in wal log. let pending_txes = orchestrator.load_all_pending_transactions(); @@ -203,9 +209,10 @@ async fn test_transaction_orchestrator_reconfig() { test_cluster.trigger_reconfiguration().await; - // After epoch change on a fullnode, there could be a delay before the transaction orchestrator - // updates its committee (happens asynchronously after receiving a reconfig message). Use a timeout - // to make the test more reliable. + // After epoch change on a fullnode, there could be a delay before the + // transaction orchestrator updates its committee (happens asynchronously + // after receiving a reconfig message). Use a timeout to make the test more + // reliable. timeout(Duration::from_secs(5), async { loop { let epoch = test_cluster.fullnode_handle.sui_node.with(|node| { diff --git a/crates/sui-e2e-tests/tests/transfer_to_object_tests.rs b/crates/sui-e2e-tests/tests/transfer_to_object_tests.rs index 0feb6476972..b3f83652f9e 100644 --- a/crates/sui-e2e-tests/tests/transfer_to_object_tests.rs +++ b/crates/sui-e2e-tests/tests/transfer_to_object_tests.rs @@ -1,17 +1,18 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::collections::HashSet; -use std::path::PathBuf; +use std::{collections::HashSet, path::PathBuf}; + use sui_core::authority_client::AuthorityAPI; use sui_macros::*; use sui_test_transaction_builder::publish_package; -use sui_types::base_types::{ObjectID, ObjectRef}; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::effects::{TransactionEffects, TransactionEvents}; -use sui_types::error::SuiError; -use sui_types::object::Owner; -use sui_types::transaction::{CallArg, ObjectArg, Transaction}; +use sui_types::{ + base_types::{ObjectID, ObjectRef}, + effects::{TransactionEffects, TransactionEffectsAPI, TransactionEvents}, + error::SuiError, + object::Owner, + transaction::{CallArg, ObjectArg, Transaction}, +}; use test_cluster::{TestCluster, TestClusterBuilder}; #[sim_test] @@ -126,8 +127,8 @@ async fn delete_of_object_with_reconfiguration_receive_of_new_parent_and_old_chi } fn get_parent_and_child(created: Vec<(ObjectRef, Owner)>) -> (ObjectRef, ObjectRef) { - // make sure there is an object with an `AddressOwner` who matches the object ID of another - // object. + // make sure there is an object with an `AddressOwner` who matches the object ID + // of another object. let created_addrs: HashSet<_> = created.iter().map(|((i, _, _), _)| i).collect(); let (child, parent_id) = created .iter() @@ -210,11 +211,7 @@ impl TestEnvironment { .iter() .find_map( |(oref, _)| { - if oref.0 == child.0 { - Some(*oref) - } else { - None - } + if oref.0 == child.0 { Some(*oref) } else { None } }, ) .unwrap(); diff --git a/crates/sui-e2e-tests/tests/zklogin_tests.rs b/crates/sui-e2e-tests/tests/zklogin_tests.rs index 392f015c31d..ff4602d87ae 100644 --- a/crates/sui-e2e-tests/tests/zklogin_tests.rs +++ b/crates/sui-e2e-tests/tests/zklogin_tests.rs @@ -2,22 +2,23 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use shared_crypto::intent::Intent; -use shared_crypto::intent::IntentMessage; +use shared_crypto::intent::{Intent, IntentMessage}; use sui_core::authority_client::AuthorityAPI; use sui_macros::sim_test; use sui_test_transaction_builder::TestTransactionBuilder; -use sui_types::base_types::SuiAddress; -use sui_types::crypto::Signature; -use sui_types::error::{SuiError, SuiResult}; -use sui_types::signature::GenericSignature; -use sui_types::transaction::Transaction; -use sui_types::utils::load_test_vectors; -use sui_types::utils::{ - get_legacy_zklogin_user_address, get_zklogin_user_address, make_zklogin_tx, +use sui_types::{ + base_types::SuiAddress, + crypto::Signature, + error::{SuiError, SuiResult}, + signature::GenericSignature, + transaction::Transaction, + utils::{ + get_legacy_zklogin_user_address, get_zklogin_user_address, load_test_vectors, + make_zklogin_tx, + }, + zk_login_authenticator::ZkLoginAuthenticator, + SUI_AUTHENTICATOR_STATE_OBJECT_ID, }; -use sui_types::zk_login_authenticator::ZkLoginAuthenticator; -use sui_types::SUI_AUTHENTICATOR_STATE_OBJECT_ID; use test_cluster::TestClusterBuilder; async fn do_zklogin_test(address: SuiAddress, legacy: bool) -> SuiResult { @@ -128,10 +129,12 @@ async fn zklogin_end_to_end_test() { )); let signed_txn_with_wrong_max_epoch = Transaction::from_generic_sig_data(tx_data, vec![generic_sig]); - assert!(context - .execute_transaction_may_fail(signed_txn_with_wrong_max_epoch) - .await - .is_err()); + assert!( + context + .execute_transaction_may_fail(signed_txn_with_wrong_max_epoch) + .await + .is_err() + ); } #[sim_test] @@ -177,10 +180,11 @@ async fn test_expired_zklogin_sig() { let res = context .execute_transaction_may_fail(signed_txn_expired) .await; - assert!(res - .unwrap_err() - .to_string() - .contains("ZKLogin expired at epoch 2")); + assert!( + res.unwrap_err() + .to_string() + .contains("ZKLogin expired at epoch 2") + ); } #[sim_test] @@ -192,10 +196,11 @@ async fn test_auth_state_creation() { .with_default_jwks() .build() .await; - // Wait until we are in an epoch that has zklogin enabled, but the auth state object is not - // created yet. + // Wait until we are in an epoch that has zklogin enabled, but the auth state + // object is not created yet. test_cluster.wait_for_protocol_version(24.into()).await; - // Now wait until the auth state object is created, ie. AuthenticatorStateUpdate transaction happened. + // Now wait until the auth state object is created, ie. AuthenticatorStateUpdate + // transaction happened. test_cluster.wait_for_authenticator_state_update().await; } @@ -212,19 +217,20 @@ async fn test_create_authenticator_state_object() { // no node has the authenticator state object yet for h in &handles { h.with(|node| { - assert!(node - .state() - .get_cache_reader() - .get_latest_object_ref_or_tombstone(SUI_AUTHENTICATOR_STATE_OBJECT_ID) - .unwrap() - .is_none()); + assert!( + node.state() + .get_cache_reader() + .get_latest_object_ref_or_tombstone(SUI_AUTHENTICATOR_STATE_OBJECT_ID) + .unwrap() + .is_none() + ); }); } // wait until feature is enabled test_cluster.wait_for_protocol_version(24.into()).await; - // wait until next epoch - authenticator state object is created at the end of the first epoch - // in which it is supported. + // wait until next epoch - authenticator state object is created at the end of + // the first epoch in which it is supported. test_cluster.wait_for_epoch_all_nodes(2).await; // protocol upgrade completes in epoch 1 for h in &handles { @@ -238,18 +244,22 @@ async fn test_create_authenticator_state_object() { } } -// This test is intended to look for forks caused by conflicting / repeated JWK votes from -// validators. +// This test is intended to look for forks caused by conflicting / repeated JWK +// votes from validators. #[cfg(msim)] #[sim_test] async fn test_conflicting_jwks() { + use std::{ + collections::HashSet, + sync::{Arc, Mutex}, + }; + use futures::StreamExt; - use std::collections::HashSet; - use std::sync::{Arc, Mutex}; - use sui_json_rpc_types::SuiTransactionBlockEffectsAPI; - use sui_json_rpc_types::TransactionFilter; - use sui_types::base_types::ObjectID; - use sui_types::transaction::{TransactionDataAPI, TransactionKind}; + use sui_json_rpc_types::{SuiTransactionBlockEffectsAPI, TransactionFilter}; + use sui_types::{ + base_types::ObjectID, + transaction::{TransactionDataAPI, TransactionKind}, + }; use tokio::time::Duration; let test_cluster = TestClusterBuilder::new() diff --git a/crates/sui-enum-compat-util/src/lib.rs b/crates/sui-enum-compat-util/src/lib.rs index 8351449e97e..6c7c4dc06ab 100644 --- a/crates/sui-enum-compat-util/src/lib.rs +++ b/crates/sui-enum-compat-util/src/lib.rs @@ -1,8 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::io::Write; -use std::path::PathBuf; +use std::{io::Write, path::PathBuf}; pub trait EnumOrderMap { fn order_to_variant_map() -> std::collections::BTreeMap; @@ -30,11 +29,16 @@ pub fn check_enum_compat_order(snapshot_file: PathBuf) { for (pos, val) in existing_map { match new_map.get(&pos) { None => { - panic!("Enum variant {} has been removed. Not allowed: enum must be backward compatible.", val); + panic!( + "Enum variant {} has been removed. Not allowed: enum must be backward compatible.", + val + ); } Some(new_val) if new_val == &val => continue, Some(new_val) => { - panic!("Enum variant {val} has been swapped with {new_val} at position {pos}. Not allowed: enum must be backward compatible."); + panic!( + "Enum variant {val} has been swapped with {new_val} at position {pos}. Not allowed: enum must be backward compatible." + ); } } } diff --git a/crates/sui-faucet/src/bin/merge_coins.rs b/crates/sui-faucet/src/bin/merge_coins.rs index 1e0c060fbff..3cb3946880e 100644 --- a/crates/sui-faucet/src/bin/merge_coins.rs +++ b/crates/sui-faucet/src/bin/merge_coins.rs @@ -1,14 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use shared_crypto::intent::Intent; use std::{str::FromStr, time::Duration}; + +use shared_crypto::intent::Intent; use sui_config::{sui_config_dir, SUI_CLIENT_CONFIG}; use sui_faucet::FaucetError; use sui_json_rpc_types::SuiTransactionBlockResponseOptions; use sui_keys::keystore::AccountKeystore; use sui_sdk::wallet_context::WalletContext; -use sui_types::quorum_driver_types::ExecuteTransactionRequestType; -use sui_types::{base_types::ObjectID, gas_coin::GasCoin, transaction::Transaction}; +use sui_types::{ + base_types::ObjectID, gas_coin::GasCoin, quorum_driver_types::ExecuteTransactionRequestType, + transaction::Transaction, +}; use tracing::info; #[tokio::main] @@ -75,7 +78,8 @@ async fn _merge_coins(gas_coin: &str, mut wallet: WalletContext) -> Result<(), a .active_address() .map_err(|err| FaucetError::Wallet(err.to_string()))?; let client = wallet.get_client().await?; - // Pick a gas coin here that isn't in use by the faucet otherwise there will be some contention. + // Pick a gas coin here that isn't in use by the faucet otherwise there will be + // some contention. let small_coins = wallet .gas_objects(active_address) .await diff --git a/crates/sui-faucet/src/faucet/mod.rs b/crates/sui-faucet/src/faucet/mod.rs index a1f890f22f8..b93ac4546fe 100644 --- a/crates/sui-faucet/src/faucet/mod.rs +++ b/crates/sui-faucet/src/faucet/mod.rs @@ -1,17 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::FaucetError; use async_trait::async_trait; use serde::{Deserialize, Serialize}; use sui_types::base_types::{ObjectID, SuiAddress, TransactionDigest}; use uuid::Uuid; +use crate::FaucetError; + mod simple_faucet; mod write_ahead_log; -pub use self::simple_faucet::SimpleFaucet; -use clap::Parser; use std::{net::Ipv4Addr, path::PathBuf}; +use clap::Parser; + +pub use self::simple_faucet::SimpleFaucet; + #[derive(Serialize, Deserialize, Debug, Clone)] pub struct FaucetReceipt { pub sent: Vec, @@ -54,7 +57,8 @@ pub trait Faucet { amounts: &[u64], ) -> Result; - /// Send `Coin` of the specified amount to the recipient in a batch request + /// Send `Coin` of the specified amount to the recipient in a batch + /// request async fn batch_send( &self, id: Uuid, diff --git a/crates/sui-faucet/src/faucet/simple_faucet.rs b/crates/sui-faucet/src/faucet/simple_faucet.rs index 698aac4c20d..7b188afeebd 100644 --- a/crates/sui-faucet/src/faucet/simple_faucet.rs +++ b/crates/sui-faucet/src/faucet/simple_faucet.rs @@ -1,50 +1,51 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::faucet::write_ahead_log; -use crate::metrics::FaucetMetrics; +#[cfg(test)] +use std::collections::HashSet; +use std::{ + collections::HashMap, + fmt, + path::Path, + sync::{Arc, Weak}, +}; + use async_recursion::async_recursion; use async_trait::async_trait; use mysten_metrics::spawn_monitored_task; use prometheus::Registry; use shared_crypto::intent::Intent; -use std::collections::HashMap; -#[cfg(test)] -use std::collections::HashSet; -use std::fmt; -use std::path::Path; -use std::sync::{Arc, Weak}; -use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; -use tap::tap::TapFallible; -use tokio::sync::oneshot; -use ttl_cache::TtlCache; -use typed_store::Map; - use sui_json_rpc_types::{ OwnedObjectRef, SuiObjectDataOptions, SuiTransactionBlockEffectsAPI, SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, }; use sui_keys::keystore::AccountKeystore; use sui_sdk::wallet_context::WalletContext; -use sui_types::object::Owner; -use sui_types::quorum_driver_types::ExecuteTransactionRequestType; use sui_types::{ base_types::{ObjectID, SuiAddress, TransactionDigest}, gas_coin::GasCoin, + object::Owner, + programmable_transaction_builder::ProgrammableTransactionBuilder, + quorum_driver_types::ExecuteTransactionRequestType, transaction::{Transaction, TransactionData}, }; -use tokio::sync::{ - mpsc::{self, Receiver, Sender}, - Mutex, +use tap::tap::TapFallible; +use tokio::{ + sync::{ + mpsc::{self, Receiver, Sender}, + oneshot, Mutex, + }, + time::{timeout, Duration}, }; -use tokio::time::{timeout, Duration}; use tracing::{error, info, warn}; +use ttl_cache::TtlCache; +use typed_store::Map; use uuid::Uuid; use super::write_ahead_log::WriteAheadLog; use crate::{ - BatchFaucetReceipt, BatchSendStatus, BatchSendStatusType, CoinInfo, Faucet, FaucetConfig, - FaucetError, FaucetReceipt, + faucet::write_ahead_log, metrics::FaucetMetrics, BatchFaucetReceipt, BatchSendStatus, + BatchSendStatusType, CoinInfo, Faucet, FaucetConfig, FaucetError, FaucetReceipt, }; pub struct SimpleFaucet { @@ -66,8 +67,8 @@ pub struct SimpleFaucet { batch_transfer_shutdown: parking_lot::Mutex>>, } -/// We do not just derive(Debug) because WalletContext and the WriteAheadLog do not implement Debug / are also hard -/// to implement Debug. +/// We do not just derive(Debug) because WalletContext and the WriteAheadLog do +/// not implement Debug / are also hard to implement Debug. impl fmt::Debug for SimpleFaucet { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SimpleFaucet") @@ -127,14 +128,16 @@ impl SimpleFaucet { let (sender, mut receiver) = mpsc::channel::<(Uuid, SuiAddress, Vec)>(config.max_request_queue_length as usize); - // This is to handle the case where there is only 1 coin, we want it to go to the normal queue + // This is to handle the case where there is only 1 coin, we want it to go to + // the normal queue let split_point = if coins.len() > 10 { coins.len() / 2 } else { coins.len() }; - // Put half of the coins in the old faucet impl queue, and put half in the other queue for batch coins. - // In the test cases we create an account with 5 coins so we just let this run with a minimum of 5 coins + // Put half of the coins in the old faucet impl queue, and put half in the other + // queue for batch coins. In the test cases we create an account with 5 + // coins so we just let this run with a minimum of 5 coins for (coins_processed, coin) in coins.iter().enumerate() { let coin_id = *coin.id(); if let Some(write_ahead_log::Entry { @@ -184,7 +187,8 @@ impl SimpleFaucet { request_producer: sender, batch_request_size: config.batch_request_size, // Max faucet requests times 10 minutes worth of requests to hold onto at max. - // Note that the cache holds onto a Uuid for [ttl_expiration] in from every update in status with both INPROGRESS and SUCCEEDED + // Note that the cache holds onto a Uuid for [ttl_expiration] in from every update in + // status with both INPROGRESS and SUCCEEDED task_id_cache: TtlCache::new(config.max_request_per_second as usize * 60 * 10).into(), ttl_expiration: config.ttl_expiration, coin_amount: config.amount, @@ -219,9 +223,9 @@ impl SimpleFaucet { } } }); - // Retrying all the pending transactions from the WAL, before continuing. Ignore return - // values -- if the executions failed, the pending coins will simply remain in the WAL, and - // not recycled. + // Retrying all the pending transactions from the WAL, before continuing. + // Ignore return values -- if the executions failed, the pending coins + // will simply remain in the WAL, and not recycled. futures::future::join_all(pending.into_iter().map(|(uuid, recipient, coin_id, tx)| { arc_faucet.sign_and_execute_txn(uuid, recipient, coin_id, tx, false) })) @@ -230,12 +234,13 @@ impl SimpleFaucet { Ok(arc_faucet) } - /// Take the consumer lock and pull a Coin ID from the queue, without checking whether it is - /// valid or not. + /// Take the consumer lock and pull a Coin ID from the queue, without + /// checking whether it is valid or not. async fn pop_gas_coin(&self, uuid: Uuid) -> Option { - // If the gas candidate queue is exhausted, the request will be suspended indefinitely until - // a producer puts in more candidate gas objects. At the same time, other requests will be - // blocked by the lock acquisition as well. + // If the gas candidate queue is exhausted, the request will be suspended + // indefinitely until a producer puts in more candidate gas objects. At + // the same time, other requests will be blocked by the lock acquisition + // as well. let Ok(mut consumer) = tokio::time::timeout(LOCK_TIMEOUT, self.consumer.lock()).await else { error!(?uuid, "Timeout when getting consumer lock"); @@ -256,12 +261,13 @@ impl SimpleFaucet { Some(coin) } - /// Take the consumer lock and pull a Coin ID from the queue, without checking whether it is - /// valid or not. + /// Take the consumer lock and pull a Coin ID from the queue, without + /// checking whether it is valid or not. async fn pop_gas_coin_for_batch(&self, uuid: Uuid) -> Option { - // If the gas candidate queue is exhausted, the request will be suspended indefinitely until - // a producer puts in more candidate gas objects. At the same time, other requests will be - // blocked by the lock acquisition as well. + // If the gas candidate queue is exhausted, the request will be suspended + // indefinitely until a producer puts in more candidate gas objects. At + // the same time, other requests will be blocked by the lock acquisition + // as well. let Ok(mut batch_consumer) = tokio::time::timeout(LOCK_TIMEOUT, self.batch_consumer.lock()).await else { @@ -283,8 +289,8 @@ impl SimpleFaucet { Some(coin) } - /// Pulls a coin from the queue and makes sure it is fit for use (belongs to the faucet, has - /// sufficient balance). + /// Pulls a coin from the queue and makes sure it is fit for use (belongs to + /// the faucet, has sufficient balance). async fn prepare_gas_coin( &self, total_amount: u64, @@ -404,8 +410,9 @@ impl SimpleFaucet { Ok(()) } - /// Sign an already created transaction (in `tx_data`) and keep trying to execute it until - /// fullnode returns a definite response or a timeout is hit. + /// Sign an already created transaction (in `tx_data`) and keep trying to + /// execute it until fullnode returns a definite response or a timeout + /// is hit. async fn sign_and_execute_txn( &self, uuid: Uuid, @@ -464,15 +471,16 @@ impl SimpleFaucet { } Ok(result) => { - // Note: we do not recycle gas unless the transaction was successful - the faucet - // may run out of available coins due to errors, but this allows a human to - // intervene and attempt to fix things. If we re-use coins that had errors, we may - // lock them permanently. - - // It's important to remove the coin from the WAL before recycling it, to avoid a - // race with the next request served with this coin. If this operation fails, log - // it and continue so we don't lose access to the coin -- the worst that can happen - // is that the WAL contains a stale entry. + // Note: we do not recycle gas unless the transaction was successful - the + // faucet may run out of available coins due to errors, but this + // allows a human to intervene and attempt to fix things. If we + // re-use coins that had errors, we may lock them permanently. + + // It's important to remove the coin from the WAL before recycling it, to avoid + // a race with the next request served with this coin. If this + // operation fails, log it and continue so we don't lose access + // to the coin -- the worst that can happen is that the WAL + // contains a stale entry. if self.wal.lock().await.commit(coin_id).is_err() { error!(?coin_id, "Failed to remove coin from WAL"); } @@ -700,9 +708,11 @@ impl SimpleFaucet { number_of_coins, created ))); } - assert!(created - .iter() - .all(|created_coin_owner_ref| created_coin_owner_ref.owner == recipient)); + assert!( + created + .iter() + .all(|created_coin_owner_ref| created_coin_owner_ref.owner == recipient) + ); let coin_ids: Vec = created .iter() .map(|created_coin_owner_ref| created_coin_owner_ref.reference.object_id) @@ -719,7 +729,8 @@ impl SimpleFaucet { ) -> Result { let gas_payment = self.wallet.get_object_ref(coin_id).await?; let gas_price = self.wallet.get_reference_gas_price().await?; - // TODO (Jian): change to make this more efficient by changing impl to one Splitcoin, and many TransferObjects + // TODO (Jian): change to make this more efficient by changing impl to one + // Splitcoin, and many TransferObjects let pt = { let mut builder = ProgrammableTransactionBuilder::new(); for (_uuid, recipient, amounts) in batch_requests { @@ -743,7 +754,8 @@ impl SimpleFaucet { res: SuiTransactionBlockResponse, requests: Vec<(Uuid, SuiAddress, Vec)>, ) -> Result<(), FaucetError> { - // Grab the list of created coins and turn it into a map of destination SuiAddress to Vec + // Grab the list of created coins and turn it into a map of destination + // SuiAddress to Vec let created = res .effects .ok_or_else(|| { @@ -767,8 +779,8 @@ impl SimpleFaucet { .push(coin_obj_ref); }); - // Assert that the number of times a sui_address occurs is the number of times the coins - // come up in the vector. + // Assert that the number of times a sui_address occurs is the number of times + // the coins come up in the vector. let mut request_count: HashMap = HashMap::new(); // Acquire lock and update all of the request Uuids let mut task_map = self.task_id_cache.lock().await; @@ -777,7 +789,8 @@ impl SimpleFaucet { // Get or insert sui_address into request count let index = *request_count.entry(addy).or_insert(0); - // The address coin map should contain the coins transferred in the given request. + // The address coin map should contain the coins transferred in the given + // request. let coins_created_for_address = address_coins_map.entry(addy).or_default(); if number_of_coins as u64 + index > coins_created_for_address.len() as u64 { @@ -1030,8 +1043,9 @@ pub async fn batch_transfer_gases( .await .map_err(FaucetError::internal)?; - // Because we are batching transactions to faucet, we will just not use a real recipient for - // sui address, and instead just fill it with the ZERO address. + // Because we are batching transactions to faucet, we will just not use a real + // recipient for sui address, and instead just fill it with the + // ZERO address. let recipient = SuiAddress::ZERO; { // Register the intention to send this transaction before we send it, so that if @@ -1211,7 +1225,8 @@ mod tests { .map(|res| res.unwrap()) .collect::>(); - // After all transfer requests settle, we still have the original candidates gas in queue. + // After all transfer requests settle, we still have the original candidates gas + // in queue. let available = faucet.metrics.total_available_coins.get(); faucet.shutdown_batch_send_task(); @@ -1303,8 +1318,8 @@ mod tests { .map(|res| res.unwrap()) .collect::>(); - // All requests are submitted and picked up by the same batch, so one success in the test - // will guarantee all success. + // All requests are submitted and picked up by the same batch, so one success in + // the test will guarantee all success. if status_results[0].status == BatchSendStatusType::SUCCEEDED { break; } @@ -1322,8 +1337,8 @@ mod tests { async fn test_ttl_cache_expires_after_duration() { let test_cluster = TestClusterBuilder::new().build().await; let context = test_cluster.wallet; - // We set it to a fast expiration for the purposes of testing and so these requests don't have time to pass - // through the batch process. + // We set it to a fast expiration for the purposes of testing and so these + // requests don't have time to pass through the batch process. let config = FaucetConfig { ttl_expiration: 1, ..Default::default() @@ -1417,7 +1432,8 @@ mod tests { let number_of_coins = gases.len(); let amounts = &vec![1; number_of_coins]; - // We traverse the list twice, which must trigger the transferred gas to be kicked out + // We traverse the list twice, which must trigger the transferred gas to be + // kicked out futures::future::join_all((0..2).map(|_| { faucet.send( Uuid::new_v4(), @@ -1515,7 +1531,8 @@ mod tests { let gases = get_current_gases(address, &mut context).await; // split out a coin that has a very small balance such that - // this coin will be not used later on. This is the new default amount for faucet due to gas changes + // this coin will be not used later on. This is the new default amount for + // faucet due to gas changes let config = FaucetConfig::default(); let tiny_value = (config.num_coins as u64 * config.amount) + 1; let res = SuiClientCommands::SplitCoin { @@ -1566,7 +1583,8 @@ mod tests { // Ask for a value higher than tiny coin + DEFAULT_GAS_COMPUTATION_BUCKET let number_of_coins = gases.len(); let amounts = &vec![tiny_value + 1; number_of_coins]; - // We traverse the list ten times, which must trigger the tiny gas to be examined and then discarded + // We traverse the list ten times, which must trigger the tiny gas to be + // examined and then discarded futures::future::join_all((0..10).map(|_| { faucet.send( Uuid::new_v4(), @@ -1601,7 +1619,8 @@ mod tests { let gases = get_current_gases(address, &mut context).await; let config = FaucetConfig::default(); - // The coin that is split off stays because we don't try to refresh the coin vector + // The coin that is split off stays because we don't try to refresh the coin + // vector let reasonable_value = (config.num_coins as u64 * config.amount) * 10; SuiClientCommands::SplitCoin { coin_id: *gases[0].id(), @@ -1632,7 +1651,8 @@ mod tests { .expect("transfer failed"); } - // Assert that the coins were transferred away successfully to destination address + // Assert that the coins were transferred away successfully to destination + // address let gases = get_current_gases(destination_address, &mut context).await; assert!(!gases.is_empty()); @@ -1705,7 +1725,8 @@ mod tests { .expect("transfer failed"); } - // Assert that the coins were transferred away successfully to destination address + // Assert that the coins were transferred away successfully to destination + // address let gases = get_current_gases(destination_address, &mut context).await; assert!(!gases.is_empty()); @@ -1868,8 +1889,8 @@ mod tests { .map(|res| res.unwrap()) .collect::>(); - // All requests are submitted and picked up by the same batch, so one success in the test - // will guarantee all success. + // All requests are submitted and picked up by the same batch, so one success in + // the test will guarantee all success. if status_results[0].status == BatchSendStatusType::SUCCEEDED { break; } diff --git a/crates/sui-faucet/src/faucet/write_ahead_log.rs b/crates/sui-faucet/src/faucet/write_ahead_log.rs index e05a908fb5c..87784f394bd 100644 --- a/crates/sui-faucet/src/faucet/write_ahead_log.rs +++ b/crates/sui-faucet/src/faucet/write_ahead_log.rs @@ -4,23 +4,26 @@ use std::path::Path; use serde::{Deserialize, Serialize}; -use sui_types::base_types::SuiAddress; -use sui_types::{base_types::ObjectID, transaction::TransactionData}; -use typed_store::traits::{TableSummary, TypedStoreDebug}; -use typed_store::Map; -use typed_store::{rocks::DBMap, TypedStoreError}; - +use sui_types::{ + base_types::{ObjectID, SuiAddress}, + transaction::TransactionData, +}; use tracing::info; +use typed_store::{ + rocks::DBMap, + traits::{TableSummary, TypedStoreDebug}, + Map, TypedStoreError, +}; use typed_store_derive::DBMapUtils; use uuid::Uuid; -/// Persistent log of transactions paying out sui from the faucet, keyed by the coin serving the -/// request. Transactions are expected to be written to the log before they are sent to full-node, -/// and removed after receiving a response back, before the coin becomes available for subsequent -/// writes. +/// Persistent log of transactions paying out sui from the faucet, keyed by the +/// coin serving the request. Transactions are expected to be written to the +/// log before they are sent to full-node, and removed after receiving a +/// response back, before the coin becomes available for subsequent writes. /// -/// This allows the faucet to go down and back up, and not forget which requests were in-flight that -/// it needs to confirm succeeded or failed. +/// This allows the faucet to go down and back up, and not forget which requests +/// were in-flight that it needs to confirm succeeded or failed. #[derive(DBMapUtils, Clone)] pub struct WriteAheadLog { pub log: DBMap, @@ -46,8 +49,9 @@ impl WriteAheadLog { ) } - /// Mark `coin` as reserved for transaction `tx` sending coin to `recipient`. Fails if `coin` is - /// already in the WAL pointing to an existing transaction. + /// Mark `coin` as reserved for transaction `tx` sending coin to + /// `recipient`. Fails if `coin` is already in the WAL pointing to an + /// existing transaction. pub(crate) fn reserve( &mut self, uuid: Uuid, @@ -76,15 +80,16 @@ impl WriteAheadLog { ) } - /// Check whether `coin` has a pending transaction in the WAL. Returns `Ok(Some(entry))` if a - /// pending transaction exists, `Ok(None)` if not, and `Err(_)` if there was an internal error - /// accessing the WAL. + /// Check whether `coin` has a pending transaction in the WAL. Returns + /// `Ok(Some(entry))` if a pending transaction exists, `Ok(None)` if + /// not, and `Err(_)` if there was an internal error accessing the WAL. pub(crate) fn reclaim(&self, coin: ObjectID) -> Result, TypedStoreError> { match self.log.get(&coin) { Ok(entry) => Ok(entry), Err(TypedStoreError::SerializationError(_)) => { - // Remove bad log from the store, so we don't crash on start up, this can happen if we update the - // WAL Entry and have some leftover Entry from the WAL. + // Remove bad log from the store, so we don't crash on start up, this can happen + // if we update the WAL Entry and have some leftover Entry from + // the WAL. self.log .remove(&coin) .expect("Coin: {coin:?} unable to be removed from log."); @@ -94,8 +99,8 @@ impl WriteAheadLog { } } - /// Indicate that the transaction in flight for `coin` has landed, and the entry in the WAL can - /// be removed. + /// Indicate that the transaction in flight for `coin` has landed, and the + /// entry in the WAL can be removed. pub(crate) fn commit(&mut self, coin: ObjectID) -> Result<(), TypedStoreError> { self.log.remove(&coin) } diff --git a/crates/sui-faucet/src/lib.rs b/crates/sui-faucet/src/lib.rs index 0b1c4dd4c9d..8d3697fbce1 100644 --- a/crates/sui-faucet/src/lib.rs +++ b/crates/sui-faucet/src/lib.rs @@ -8,9 +8,8 @@ mod requests; mod responses; pub mod metrics_layer; -pub use metrics_layer::*; - pub use errors::FaucetError; pub use faucet::*; +pub use metrics_layer::*; pub use requests::*; pub use responses::*; diff --git a/crates/sui-faucet/src/main.rs b/crates/sui-faucet/src/main.rs index a8899b54fdb..1acb75623f7 100644 --- a/crates/sui-faucet/src/main.rs +++ b/crates/sui-faucet/src/main.rs @@ -1,6 +1,14 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + borrow::Cow, + env, + net::{IpAddr, SocketAddr}, + sync::Arc, + time::Duration, +}; + use axum::{ error_handling::HandleErrorLayer, extract::Path, @@ -12,13 +20,6 @@ use axum::{ use clap::Parser; use http::Method; use mysten_metrics::spawn_monitored_task; -use std::env; -use std::{ - borrow::Cow, - net::{IpAddr, SocketAddr}, - sync::Arc, - time::Duration, -}; use sui_config::{sui_config_dir, SUI_CLIENT_CONFIG}; use sui_faucet::{ BatchFaucetResponse, BatchStatusFaucetResponse, Faucet, FaucetConfig, FaucetError, @@ -112,7 +113,8 @@ async fn main() -> Result<(), anyhow::Error> { spawn_monitored_task!(async move { info!("Starting task to clear WAL."); loop { - // Every config.wal_retry_interval (Default: 300 seconds) we try to clear the wal coins + // Every config.wal_retry_interval (Default: 300 seconds) we try to clear the + // wal coins tokio::time::sleep(Duration::from_secs(wal_retry_interval)).await; app_state.faucet.retry_wal_coins().await.unwrap(); } @@ -177,7 +179,8 @@ async fn batch_request_gas( } } } else { - // TODO (jian): remove this feature gate when batch has proven to be baked long enough + // TODO (jian): remove this feature gate when batch has proven to be baked long + // enough info!(uuid = ?id, "Falling back to v1 implementation"); let result = spawn_monitored_task!(async move { state @@ -267,7 +270,7 @@ async fn request_gas( Json(FaucetResponse::from(FaucetError::Internal( "Input Error.".to_string(), ))), - ) + ); } }; match result { diff --git a/crates/sui-faucet/src/metrics_layer.rs b/crates/sui-faucet/src/metrics_layer.rs index 6b2084f00c6..bd2d29f8f99 100644 --- a/crates/sui-faucet/src/metrics_layer.rs +++ b/crates/sui-faucet/src/metrics_layer.rs @@ -15,8 +15,8 @@ use tracing::{error, info, warn}; use crate::metrics::RequestMetrics; -/// Tower Layer for tracking metrics in Prometheus related to number, success-rate and latency of -/// requests running through service. +/// Tower Layer for tracking metrics in Prometheus related to number, +/// success-rate and latency of requests running through service. #[derive(Clone)] pub struct RequestMetricsLayer { metrics: Arc, @@ -147,7 +147,8 @@ impl Drop for MetricsGuard { fn drop(&mut self) { self.metrics.current_requests_in_flight.dec(); - // Request was still in flight when the guard was dropped, implying the client disconnected. + // Request was still in flight when the guard was dropped, implying the client + // disconnected. if let Some(timer) = self.timer.take() { let elapsed = timer.stop_and_record(); self.metrics.total_requests_disconnected.inc(); diff --git a/crates/sui-faucet/src/responses.rs b/crates/sui-faucet/src/responses.rs index f4e72b88aa9..74cf366c2a3 100644 --- a/crates/sui-faucet/src/responses.rs +++ b/crates/sui-faucet/src/responses.rs @@ -1,10 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::*; use serde::{Deserialize, Serialize}; use uuid::Uuid; +use crate::*; + #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] pub struct FaucetResponse { diff --git a/crates/sui-framework-snapshot/src/lib.rs b/crates/sui-framework-snapshot/src/lib.rs index 808073492e3..eafb5436a4c 100644 --- a/crates/sui-framework-snapshot/src/lib.rs +++ b/crates/sui-framework-snapshot/src/lib.rs @@ -1,13 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{collections::BTreeMap, fs, io::Read, path::PathBuf}; + use serde::{Deserialize, Serialize}; -use std::collections::BTreeMap; -use std::{fs, io::Read, path::PathBuf}; use sui_framework::SystemPackage; -use sui_types::base_types::ObjectID; use sui_types::{ - DEEPBOOK_PACKAGE_ID, MOVE_STDLIB_PACKAGE_ID, SUI_FRAMEWORK_PACKAGE_ID, SUI_SYSTEM_PACKAGE_ID, + base_types::ObjectID, DEEPBOOK_PACKAGE_ID, MOVE_STDLIB_PACKAGE_ID, SUI_FRAMEWORK_PACKAGE_ID, + SUI_SYSTEM_PACKAGE_ID, }; pub type SnapshotManifest = BTreeMap; @@ -16,7 +16,8 @@ pub type SnapshotManifest = BTreeMap; pub struct SingleSnapshot { /// Git revision that this snapshot is taken on. git_revision: String, - /// List of file names (also identical to object ID) of the bytecode package files. + /// List of file names (also identical to object ID) of the bytecode package + /// files. package_ids: Vec, } diff --git a/crates/sui-framework-snapshot/src/main.rs b/crates/sui-framework-snapshot/src/main.rs index 7c7e16e8e90..2bfe287ebac 100644 --- a/crates/sui-framework-snapshot/src/main.rs +++ b/crates/sui-framework-snapshot/src/main.rs @@ -1,9 +1,8 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::env; -use std::fs; -use std::path::PathBuf; +use std::{env, fs, path::PathBuf}; + use sui_framework::{BuiltInFramework, SystemPackage}; use sui_framework_snapshot::update_bytecode_snapshot_manifest; use sui_protocol_config::ProtocolVersion; diff --git a/crates/sui-framework-snapshot/tests/compatibility_tests.rs b/crates/sui-framework-snapshot/tests/compatibility_tests.rs index 7375e49cb27..33f30482f9d 100644 --- a/crates/sui-framework-snapshot/tests/compatibility_tests.rs +++ b/crates/sui-framework-snapshot/tests/compatibility_tests.rs @@ -3,6 +3,7 @@ mod compatibility_tests { use std::collections::BTreeMap; + use sui_framework::{compare_system_package, BuiltInFramework}; use sui_framework_snapshot::{load_bytecode_snapshot, load_bytecode_snapshot_manifest}; use sui_protocol_config::{Chain, ProtocolConfig, ProtocolVersion}; @@ -10,8 +11,8 @@ mod compatibility_tests { #[tokio::test] async fn test_framework_compatibility() { - // This test checks that the current framework is compatible with all previous framework - // bytecode snapshots. + // This test checks that the current framework is compatible with all previous + // framework bytecode snapshots. for (version, _snapshots) in load_bytecode_snapshot_manifest() { let config = ProtocolConfig::get_for_version(ProtocolVersion::new(version), Chain::Unknown); @@ -44,13 +45,13 @@ mod compatibility_tests { #[test] fn check_framework_change_with_protocol_upgrade() { - // This test checks that if we ever update the framework, the current protocol version must differ - // the latest bytecode snapshot in each network. + // This test checks that if we ever update the framework, the current protocol + // version must differ the latest bytecode snapshot in each network. let snapshots = load_bytecode_snapshot_manifest(); let latest_snapshot_version = *snapshots.keys().max().unwrap(); if latest_snapshot_version != ProtocolVersion::MAX.as_u64() { - // If we have already incremented the protocol version, then we are fine and we don't - // care if the framework has changed. + // If we have already incremented the protocol version, then we are fine and we + // don't care if the framework has changed. return; } let latest_snapshot = load_bytecode_snapshot(*snapshots.keys().max().unwrap()).unwrap(); @@ -61,10 +62,9 @@ mod compatibility_tests { .map(|p| (p.id(), p)) .collect(); assert_eq!( - latest_snapshot_ref, - current_framework, - "The current framework differs the latest bytecode snapshot. Did you forget to upgrade protocol version?" - ); + latest_snapshot_ref, current_framework, + "The current framework differs the latest bytecode snapshot. Did you forget to upgrade protocol version?" + ); } #[test] diff --git a/crates/sui-framework-tests/src/metered_verifier.rs b/crates/sui-framework-tests/src/metered_verifier.rs index 80b1db4b1cc..fde5b7a060f 100644 --- a/crates/sui-framework-tests/src/metered_verifier.rs +++ b/crates/sui-framework-tests/src/metered_verifier.rs @@ -1,9 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{path::PathBuf, sync::Arc, time::Instant}; + use move_bytecode_verifier::meter::Scope; use prometheus::Registry; -use std::{path::PathBuf, sync::Arc, time::Instant}; use sui_adapter::adapter::run_metered_move_bytecode_verifier; use sui_framework::BuiltInFramework; use sui_move_build::{CompiledPackage, SuiPackageHooks}; @@ -31,7 +32,7 @@ fn test_metered_move_bytecode_verifier() { let mut metered_verifier_config = default_verifier_config( &ProtocolConfig::get_for_max_version_UNSAFE(), - true, /* enable metering */ + true, // enable metering ); let registry = &Registry::new(); let bytecode_verifier_metrics = Arc::new(BytecodeVerifierMetrics::new(registry)); diff --git a/crates/sui-framework-tests/src/unit_tests.rs b/crates/sui-framework-tests/src/unit_tests.rs index ebd532aa147..5618cfa098c 100644 --- a/crates/sui-framework-tests/src/unit_tests.rs +++ b/crates/sui-framework-tests/src/unit_tests.rs @@ -1,10 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{fs, io, path::PathBuf}; + use move_cli::base::test::UnitTestResult; use move_package::LintFlag; use move_unit_test::UnitTestingConfig; -use std::{fs, io, path::PathBuf}; use sui_move::unit_test::run_move_unit_tests; use sui_move_build::BuildConfig; diff --git a/crates/sui-framework/build.rs b/crates/sui-framework/build.rs index 47280c4a6b6..1398baddd81 100644 --- a/crates/sui-framework/build.rs +++ b/crates/sui-framework/build.rs @@ -1,16 +1,16 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anyhow::Result; -use move_binary_format::CompiledModule; -use move_compiler::editions::Edition; -use move_package::{BuildConfig as MoveBuildConfig, LintFlag}; use std::{ collections::BTreeMap, env, fs, path::{Path, PathBuf}, }; +use anyhow::Result; +use move_binary_format::CompiledModule; +use move_compiler::editions::Edition; +use move_package::{BuildConfig as MoveBuildConfig, LintFlag}; use sui_move_build::{BuildConfig, SuiPackageHooks}; const DOCS_DIR: &str = "docs"; @@ -220,8 +220,8 @@ fn build_packages_with_move_config( serialize_modules_to_file(timelock, &out_dir.join(timelock_dir)).unwrap(); // write out generated docs if write_docs { - // Remove the old docs directory -- in case there was a module that was deleted (could - // happen during development). + // Remove the old docs directory -- in case there was a module that was deleted + // (could happen during development). if Path::new(DOCS_DIR).exists() { std::fs::remove_dir_all(DOCS_DIR).unwrap(); } @@ -260,16 +260,17 @@ fn build_packages_with_move_config( } } -/// Post process the generated docs so that they are in a format that can be consumed by -/// docusaurus. -/// * Flatten out the tree-like structure of the docs directory that we generate for a package into -/// a flat list of packages; -/// * Deduplicate packages (since multiple packages could share dependencies); and +/// Post process the generated docs so that they are in a format that can be +/// consumed by docusaurus. +/// * Flatten out the tree-like structure of the docs directory that we generate +/// for a package into a flat list of packages; +/// * Deduplicate packages (since multiple packages could share dependencies); +/// and /// * Write out the package docs in a flat directory structure. fn relocate_docs(prefix: &str, files: &[(String, String)], output: &mut BTreeMap) { - // Turn on multi-line mode so that `.` matches newlines, consume from the start of the file to - // beginning of the heading, then capture the heading and replace with the yaml tag for docusaurus. E.g., - // ``` + // Turn on multi-line mode so that `.` matches newlines, consume from the start + // of the file to beginning of the heading, then capture the heading and + // replace with the yaml tag for docusaurus. E.g., ``` // - // - // -# Module `0x2::display` diff --git a/crates/sui-framework/src/lib.rs b/crates/sui-framework/src/lib.rs index 7d04c92b785..0babc0838c3 100644 --- a/crates/sui-framework/src/lib.rs +++ b/crates/sui-framework/src/lib.rs @@ -1,28 +1,30 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use move_binary_format::binary_config::BinaryConfig; -use move_binary_format::compatibility::Compatibility; -use move_binary_format::file_format::{Ability, AbilitySet}; -use move_binary_format::CompiledModule; +use std::fmt::Formatter; + +use move_binary_format::{ + binary_config::BinaryConfig, + compatibility::Compatibility, + file_format::{Ability, AbilitySet}, + CompiledModule, +}; use move_core_types::gas_algebra::InternalGas; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; -use std::fmt::Formatter; -use sui_types::base_types::ObjectRef; -use sui_types::storage::ObjectStore; use sui_types::{ - base_types::ObjectID, + base_types::{ObjectID, ObjectRef}, digests::TransactionDigest, move_package::MovePackage, object::{Object, OBJECT_START_VERSION}, - MOVE_STDLIB_PACKAGE_ID, SUI_FRAMEWORK_PACKAGE_ID, SUI_SYSTEM_PACKAGE_ID, + storage::ObjectStore, + DEEPBOOK_PACKAGE_ID, MOVE_STDLIB_PACKAGE_ID, STARDUST_PACKAGE_ID, SUI_FRAMEWORK_PACKAGE_ID, + SUI_SYSTEM_PACKAGE_ID, TIMELOCK_PACKAGE_ID, }; -use sui_types::{DEEPBOOK_PACKAGE_ID, STARDUST_PACKAGE_ID, TIMELOCK_PACKAGE_ID}; use tracing::error; -/// Represents a system package in the framework, that's built from the source code inside -/// sui-framework. +/// Represents a system package in the framework, that's built from the source +/// code inside sui-framework. #[derive(Clone, Serialize, PartialEq, Eq, Deserialize)] pub struct SystemPackage { pub id: ObjectID, @@ -104,8 +106,8 @@ macro_rules! define_system_packages { pub struct BuiltInFramework; impl BuiltInFramework { /// Dedicated method to iterate on `stardust` packages. - // TODO: integrate to iter_system_packages when we make a new system-framework-snapshot - // with the associated protocol bump:wq + // TODO: integrate to iter_system_packages when we make a new + // system-framework-snapshot with the associated protocol bump:wq pub fn iter_stardust_packages() -> impl Iterator { define_system_packages!([ ( @@ -127,9 +129,10 @@ impl BuiltInFramework { } pub fn iter_system_packages() -> impl Iterator { - // All system packages in the current build should be registered here, and this is the only - // place we need to worry about if any of them changes. - // TODO: Is it possible to derive dependencies from the bytecode instead of manually specifying them? + // All system packages in the current build should be registered here, and this + // is the only place we need to worry about if any of them changes. + // TODO: Is it possible to derive dependencies from the bytecode instead of + // manually specifying them? define_system_packages!([ (MOVE_STDLIB_PACKAGE_ID, "move-stdlib", []), ( @@ -174,17 +177,18 @@ pub fn legacy_test_cost() -> InternalGas { InternalGas::new(0) } -/// Check whether the framework defined by `modules` is compatible with the framework that is -/// already on-chain (i.e. stored in `object_store`) at `id`. +/// Check whether the framework defined by `modules` is compatible with the +/// framework that is already on-chain (i.e. stored in `object_store`) at `id`. /// -/// - Returns `None` if the current package at `id` cannot be loaded, or the compatibility check -/// fails (This is grounds not to upgrade). -/// - Panics if the object at `id` can be loaded but is not a package -- this is an invariant -/// violation. -/// - Returns the digest of the current framework (and version) if it is equivalent to the new -/// framework (indicates support for a protocol upgrade without a framework upgrade). -/// - Returns the digest of the new framework (and version) if it is compatible (indicates -/// support for a protocol upgrade with a framework upgrade). +/// - Returns `None` if the current package at `id` cannot be loaded, or the +/// compatibility check fails (This is grounds not to upgrade). +/// - Panics if the object at `id` can be loaded but is not a package -- this is +/// an invariant violation. +/// - Returns the digest of the current framework (and version) if it is +/// equivalent to the new framework (indicates support for a protocol upgrade +/// without a framework upgrade). +/// - Returns the digest of the new framework (and version) if it is compatible +/// (indicates support for a protocol upgrade with a framework upgrade). pub async fn compare_system_package( object_store: &S, id: &ObjectID, @@ -200,12 +204,14 @@ pub async fn compare_system_package( return Some( Object::new_system_package( modules, - // note: execution_engine assumes any system package with version OBJECT_START_VERSION is freshly created - // rather than upgraded + // note: execution_engine assumes any system package with version + // OBJECT_START_VERSION is freshly created rather than + // upgraded OBJECT_START_VERSION, dependencies, - // Genesis is fine here, we only use it to calculate an object ref that we can use - // for all validators to commit to the same bytes in the update + // Genesis is fine here, we only use it to calculate an object ref that we can + // use for all validators to commit to the same bytes in + // the update TransactionDigest::genesis_marker(), ) .compute_object_reference(), diff --git a/crates/sui-genesis-builder/examples/build_and_compile_native_token.rs b/crates/sui-genesis-builder/examples/build_and_compile_native_token.rs index 2b38c0e7455..0550be01bf5 100644 --- a/crates/sui-genesis-builder/examples/build_and_compile_native_token.rs +++ b/crates/sui-genesis-builder/examples/build_and_compile_native_token.rs @@ -3,12 +3,16 @@ //! Example demonstrating building and compiling two native token packages. -use iota_sdk::types::block::address::AliasAddress; -use iota_sdk::types::block::output::{AliasId, FoundryId}; -use iota_sdk::Url; -use sui_genesis_builder::stardust::native_token::package_builder; -use sui_genesis_builder::stardust::native_token::package_data::{ - NativeTokenModuleData, NativeTokenPackageData, +use iota_sdk::{ + types::block::{ + address::AliasAddress, + output::{AliasId, FoundryId}, + }, + Url, +}; +use sui_genesis_builder::stardust::native_token::{ + package_builder, + package_data::{NativeTokenModuleData, NativeTokenPackageData}, }; fn main() -> anyhow::Result<()> { diff --git a/crates/sui-genesis-builder/src/lib.rs b/crates/sui-genesis-builder/src/lib.rs index 9a115708c27..9e10177f122 100644 --- a/crates/sui-genesis-builder/src/lib.rs +++ b/crates/sui-genesis-builder/src/lib.rs @@ -1,17 +1,19 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::{BTreeMap, HashSet}, + fs, + path::Path, + sync::Arc, +}; + use anyhow::{bail, Context}; use camino::Utf8Path; -use fastcrypto::hash::HashFunction; -use fastcrypto::traits::KeyPair; +use fastcrypto::{hash::HashFunction, traits::KeyPair}; use move_binary_format::CompiledModule; use move_core_types::ident_str; use shared_crypto::intent::{Intent, IntentMessage, IntentScope}; -use std::collections::{BTreeMap, HashSet}; -use std::fs; -use std::path::Path; -use std::sync::Arc; use sui_config::genesis::{ Genesis, GenesisCeremonyParameters, GenesisChainParameters, TokenDistributionSchedule, UnsignedGenesis, @@ -19,35 +21,35 @@ use sui_config::genesis::{ use sui_execution::{self, Executor}; use sui_framework::{BuiltInFramework, SystemPackage}; use sui_protocol_config::{Chain, ProtocolConfig, ProtocolVersion}; -use sui_types::base_types::{ - ExecutionDigests, ObjectID, SequenceNumber, SuiAddress, TransactionDigest, TxContext, -}; -use sui_types::committee::Committee; -use sui_types::crypto::{ - AuthorityKeyPair, AuthorityPublicKeyBytes, AuthoritySignInfo, AuthoritySignInfoTrait, - AuthoritySignature, DefaultHash, SuiAuthoritySignature, -}; -use sui_types::deny_list::{DENY_LIST_CREATE_FUNC, DENY_LIST_MODULE}; -use sui_types::digests::ChainIdentifier; -use sui_types::effects::{TransactionEffects, TransactionEffectsAPI, TransactionEvents}; -use sui_types::epoch_data::EpochData; -use sui_types::gas::SuiGasStatus; -use sui_types::gas_coin::GasCoin; -use sui_types::governance::StakedSui; -use sui_types::in_memory_storage::InMemoryStorage; -use sui_types::inner_temporary_store::InnerTemporaryStore; -use sui_types::message_envelope::Message; -use sui_types::messages_checkpoint::{ - CertifiedCheckpointSummary, CheckpointContents, CheckpointSummary, -}; -use sui_types::metrics::LimitsMetrics; -use sui_types::object::{Object, Owner}; -use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; -use sui_types::sui_system_state::{get_sui_system_state, SuiSystemState, SuiSystemStateTrait}; -use sui_types::transaction::{ - CallArg, CheckedInputObjects, Command, InputObjectKind, ObjectReadResult, Transaction, +use sui_types::{ + base_types::{ + ExecutionDigests, ObjectID, SequenceNumber, SuiAddress, TransactionDigest, TxContext, + }, + committee::Committee, + crypto::{ + AuthorityKeyPair, AuthorityPublicKeyBytes, AuthoritySignInfo, AuthoritySignInfoTrait, + AuthoritySignature, DefaultHash, SuiAuthoritySignature, + }, + deny_list::{DENY_LIST_CREATE_FUNC, DENY_LIST_MODULE}, + digests::ChainIdentifier, + effects::{TransactionEffects, TransactionEffectsAPI, TransactionEvents}, + epoch_data::EpochData, + gas::SuiGasStatus, + gas_coin::GasCoin, + governance::StakedSui, + in_memory_storage::InMemoryStorage, + inner_temporary_store::InnerTemporaryStore, + message_envelope::Message, + messages_checkpoint::{CertifiedCheckpointSummary, CheckpointContents, CheckpointSummary}, + metrics::LimitsMetrics, + object::{Object, Owner}, + programmable_transaction_builder::ProgrammableTransactionBuilder, + sui_system_state::{get_sui_system_state, SuiSystemState, SuiSystemStateTrait}, + transaction::{ + CallArg, CheckedInputObjects, Command, InputObjectKind, ObjectReadResult, Transaction, + }, + SUI_FRAMEWORK_ADDRESS, SUI_SYSTEM_ADDRESS, }; -use sui_types::{SUI_FRAMEWORK_ADDRESS, SUI_SYSTEM_ADDRESS}; use tracing::trace; use validator_info::{GenesisValidatorInfo, GenesisValidatorMetadata, ValidatorInfo}; @@ -242,15 +244,16 @@ impl Builder { genesis } - /// Validates the entire state of the build, no matter what the internal state is (input - /// collection phase or output phase) + /// Validates the entire state of the build, no matter what the internal + /// state is (input collection phase or output phase) pub fn validate(&self) -> anyhow::Result<(), anyhow::Error> { self.validate_inputs()?; self.validate_output(); Ok(()) } - /// Runs through validation checks on the input values present in the builder + /// Runs through validation checks on the input values present in the + /// builder fn validate_inputs(&self) -> anyhow::Result<(), anyhow::Error> { if !self.parameters.allow_insertion_of_extra_objects && !self.objects.is_empty() { bail!("extra objects are disallowed"); @@ -275,10 +278,11 @@ impl Builder { Ok(()) } - /// Runs through validation checks on the generated output (the initial chain state) based on - /// the input values present in the builder + /// Runs through validation checks on the generated output (the initial + /// chain state) based on the input values present in the builder fn validate_output(&self) { - // If genesis hasn't been built yet, just early return as there is nothing to validate yet + // If genesis hasn't been built yet, just early return as there is nothing to + // validate yet let Some(unsigned_genesis) = self.unsigned_genesis_checkpoint() else { return; }; @@ -339,10 +343,13 @@ impl Builder { { let metadata = onchain_validator.verified_metadata(); - // Validators should not have duplicate addresses so the result of insertion should be None. - assert!(address_to_pool_id - .insert(metadata.sui_address, onchain_validator.staking_pool.id) - .is_none()); + // Validators should not have duplicate addresses so the result of insertion + // should be None. + assert!( + address_to_pool_id + .insert(metadata.sui_address, onchain_validator.staking_pool.id) + .is_none() + ); assert_eq!(validator.info.sui_address(), metadata.sui_address); assert_eq!(validator.info.protocol_key(), metadata.sui_pubkey_bytes()); assert_eq!(validator.info.network_key, metadata.network_pubkey); @@ -599,8 +606,8 @@ impl Builder { let unsigned_genesis_bytes = fs::read(unsigned_genesis_file)?; let loaded_genesis: UnsignedGenesis = bcs::from_bytes(&unsigned_genesis_bytes)?; - // If we have a built genesis, then we must have a token_distribution_schedule present - // as well. + // If we have a built genesis, then we must have a token_distribution_schedule + // present as well. assert!( builder.token_distribution_schedule.is_some(), "If a built genesis is present, then there must also be a token-distribution-schedule present" @@ -670,9 +677,9 @@ impl Builder { } } -// Create a Genesis Txn Context to be used when generating genesis objects by hashing all of the -// inputs into genesis ans using that as our "Txn Digest". This is done to ensure that coin objects -// created between chains are unique +// Create a Genesis Txn Context to be used when generating genesis objects by +// hashing all of the inputs into genesis ans using that as our "Txn Digest". +// This is done to ensure that coin objects created between chains are unique fn create_genesis_context( epoch_data: &EpochData, genesis_chain_parameters: &GenesisChainParameters, @@ -700,9 +707,9 @@ fn create_genesis_context( } fn get_genesis_protocol_config(version: ProtocolVersion) -> ProtocolConfig { - // We have a circular dependency here. Protocol config depends on chain ID, which - // depends on genesis checkpoint (digest), which depends on genesis transaction, which - // depends on protocol config. + // We have a circular dependency here. Protocol config depends on chain ID, + // which depends on genesis checkpoint (digest), which depends on genesis + // transaction, which depends on protocol config. // // ChainIdentifier::default().chain() which can be overridden by the // SUI_PROTOCOL_CONFIG_CHAIN_OVERRIDE if necessary @@ -716,7 +723,9 @@ fn build_unsigned_genesis_data( objects: &[Object], ) -> UnsignedGenesis { if !parameters.allow_insertion_of_extra_objects && !objects.is_empty() { - panic!("insertion of extra objects at genesis time is prohibited due to 'allow_insertion_of_extra_objects' parameter"); + panic!( + "insertion of extra objects at genesis time is prohibited due to 'allow_insertion_of_extra_objects' parameter" + ); } let genesis_chain_parameters = parameters.to_genesis_chain_parameters(); @@ -733,9 +742,9 @@ fn build_unsigned_genesis_data( let epoch_data = EpochData::new_genesis(genesis_chain_parameters.chain_start_timestamp_ms); - // Get the correct system packages for our protocol version. If we cannot find the snapshot - // that means that we must be at the latest version and we should use the latest version of the - // framework. + // Get the correct system packages for our protocol version. If we cannot find + // the snapshot that means that we must be at the latest version and we + // should use the latest version of the framework. let system_packages = sui_framework_snapshot::load_bytecode_snapshot(parameters.protocol_version.as_u64()) .unwrap_or_else(|_| BuiltInFramework::iter_system_packages().cloned().collect()); @@ -898,9 +907,9 @@ fn create_genesis_objects( metrics: Arc, ) -> Vec { let mut store = InMemoryStorage::new(Vec::new()); - // We don't know the chain ID here since we haven't yet created the genesis checkpoint. - // However since we know there are no chain specific protool config options in genesis, - // we use Chain::Unknown here. + // We don't know the chain ID here since we haven't yet created the genesis + // checkpoint. However since we know there are no chain specific protool + // config options in genesis, we use Chain::Unknown here. let protocol_config = ProtocolConfig::get_for_version( ProtocolVersion::new(parameters.protocol_version), Chain::Unknown, @@ -954,9 +963,10 @@ pub(crate) fn process_package( ) -> anyhow::Result<()> { let dependency_objects = store.get_objects(&dependencies); // When publishing genesis packages, since the std framework packages all have - // non-zero addresses, [`Transaction::input_objects_in_compiled_modules`] will consider - // them as dependencies even though they are not. Hence input_objects contain objects - // that don't exist on-chain because they are yet to be published. + // non-zero addresses, [`Transaction::input_objects_in_compiled_modules`] will + // consider them as dependencies even though they are not. Hence + // input_objects contain objects that don't exist on-chain because they are + // yet to be published. #[cfg(debug_assertions)] { use move_core_types::account_address::AccountAddress; @@ -1046,8 +1056,8 @@ pub fn generate_genesis_system_object( vec![], )?; - // Step 3: Create ProtocolConfig-controlled system objects, unless disabled (which only - // happens in tests). + // Step 3: Create ProtocolConfig-controlled system objects, unless disabled + // (which only happens in tests). if protocol_config.create_authenticator_state_in_genesis() { builder.move_call( SUI_FRAMEWORK_ADDRESS.into(), @@ -1086,8 +1096,8 @@ pub fn generate_genesis_system_object( ); // Step 5: Run genesis. - // The first argument is the system state uid we got from step 1 and the second one is the SUI supply we - // got from step 3. + // The first argument is the system state uid we got from step 1 and the second + // one is the SUI supply we got from step 3. let mut arguments = vec![sui_system_state_uid, sui_supply]; let mut call_arg_arguments = vec![ CallArg::Pure(bcs::to_bytes(&genesis_chain_parameters).unwrap()), @@ -1134,18 +1144,21 @@ pub fn generate_genesis_system_object( #[cfg(test)] mod test { - use crate::validator_info::ValidatorInfo; - use crate::Builder; use fastcrypto::traits::KeyPair; - use sui_config::genesis::*; - use sui_config::local_ip_utils; - use sui_config::node::DEFAULT_COMMISSION_RATE; - use sui_config::node::DEFAULT_VALIDATOR_GAS_PRICE; - use sui_types::base_types::SuiAddress; - use sui_types::crypto::{ - generate_proof_of_possession, get_key_pair_from_rng, AccountKeyPair, AuthorityKeyPair, - NetworkKeyPair, + use sui_config::{ + genesis::*, + local_ip_utils, + node::{DEFAULT_COMMISSION_RATE, DEFAULT_VALIDATOR_GAS_PRICE}, }; + use sui_types::{ + base_types::SuiAddress, + crypto::{ + generate_proof_of_possession, get_key_pair_from_rng, AccountKeyPair, AuthorityKeyPair, + NetworkKeyPair, + }, + }; + + use crate::{validator_info::ValidatorInfo, Builder}; #[test] fn allocation_csv() { diff --git a/crates/sui-genesis-builder/src/stardust/error.rs b/crates/sui-genesis-builder/src/stardust/error.rs index 8864b3985df..d2ba46134cf 100644 --- a/crates/sui-genesis-builder/src/stardust/error.rs +++ b/crates/sui-genesis-builder/src/stardust/error.rs @@ -2,9 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 //! Error types pertaining to deserializing Stardust snapshots -use iota_sdk::types::block::output::FoundryId; use std::convert::Infallible; +use iota_sdk::types::block::output::FoundryId; use packable::error::UnknownTagError; use thiserror::Error; @@ -18,14 +18,18 @@ pub enum StardustError { BlockError(#[from] iota_sdk::types::block::Error), #[error("{0}")] UnknownTag(#[from] UnknownTagError), - #[error("cannot convert `FoundryOutput` with `FoundryId` {foundry_id} to `NativeTokenPackageData`: {err}")] + #[error( + "cannot convert `FoundryOutput` with `FoundryId` {foundry_id} to `NativeTokenPackageData`: {err}" + )] FoundryConversionError { foundry_id: FoundryId, err: anyhow::Error, }, #[error("framework packages path not found")] FrameworkPackagesPathNotFound, - #[error("failed to derive valid move identifier from symbol `{symbol}`, invalid identifier: `{identifier}`")] + #[error( + "failed to derive valid move identifier from symbol `{symbol}`, invalid identifier: `{identifier}`" + )] InvalidMoveIdentifierDerived { symbol: String, identifier: String }, } diff --git a/crates/sui-genesis-builder/src/stardust/migration/executor.rs b/crates/sui-genesis-builder/src/stardust/migration/executor.rs index dd61265224b..515d2ee8878 100644 --- a/crates/sui-genesis-builder/src/stardust/migration/executor.rs +++ b/crates/sui-genesis-builder/src/stardust/migration/executor.rs @@ -1,16 +1,17 @@ // Copyright (c) 2024 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::{BTreeSet, HashMap}, + sync::Arc, +}; + use anyhow::Result; use iota_sdk::types::block::output::{ AliasOutput, BasicOutput, FoundryOutput, NativeTokens, NftOutput, OutputId, TokenId, }; use move_core_types::{ident_str, language_storage::StructTag}; use move_vm_runtime_v2::move_vm::MoveVM; -use std::{ - collections::{BTreeSet, HashMap}, - sync::Arc, -}; use sui_adapter_v2::{ adapter::new_move_vm, gas_charger::GasCharger, programmable_transactions, temporary_store::TemporaryStore, @@ -21,27 +22,22 @@ use sui_move_natives_v2::all_natives; use sui_protocol_config::{Chain, ProtocolConfig, ProtocolVersion}; use sui_types::{ balance::Balance, - base_types::{ObjectRef, SequenceNumber}, + base_types::{ObjectID, ObjectRef, SequenceNumber, SuiAddress, TxContext}, collection_types::Bag, dynamic_field::Field, - id::UID, - move_package::{MovePackage, TypeOrigin}, - object::Object, - transaction::{Argument, InputObjects, ObjectArg}, - TypeTag, -}; -use sui_types::{ - base_types::{ObjectID, SuiAddress, TxContext}, execution_mode, + id::UID, in_memory_storage::InMemoryStorage, inner_temporary_store::InnerTemporaryStore, metrics::LimitsMetrics, - move_package::UpgradeCap, + move_package::{MovePackage, TypeOrigin, UpgradeCap}, + object::Object, programmable_transaction_builder::ProgrammableTransactionBuilder, transaction::{ - CheckedInputObjects, Command, InputObjectKind, ObjectReadResult, ProgrammableTransaction, + Argument, CheckedInputObjects, Command, InputObjectKind, InputObjects, ObjectArg, + ObjectReadResult, ProgrammableTransaction, }, - STARDUST_PACKAGE_ID, SUI_FRAMEWORK_PACKAGE_ID, + TypeTag, STARDUST_PACKAGE_ID, SUI_FRAMEWORK_PACKAGE_ID, }; use crate::{ @@ -81,18 +77,18 @@ impl Executor { // Use a throwaway metrics registry for transaction execution. let metrics = Arc::new(LimitsMetrics::new(&prometheus::Registry::new())); let mut store = InMemoryStorage::new(Vec::new()); - // We don't know the chain ID here since we haven't yet created the genesis checkpoint. - // However since we know there are no chain specific protocol config options in genesis, - // we use Chain::Unknown here. + // We don't know the chain ID here since we haven't yet created the genesis + // checkpoint. However since we know there are no chain specific + // protocol config options in genesis, we use Chain::Unknown here. let protocol_config = ProtocolConfig::get_for_version(protocol_version, Chain::Unknown); - // Get the correct system packages for our protocol version. If we cannot find the snapshot - // that means that we must be at the latest version and we should use the latest version of the - // framework. + // Get the correct system packages for our protocol version. If we cannot find + // the snapshot that means that we must be at the latest version and we + // should use the latest version of the framework. let mut system_packages = sui_framework_snapshot::load_bytecode_snapshot(protocol_version.as_u64()) .unwrap_or_else(|_| BuiltInFramework::iter_system_packages().cloned().collect()); - // TODO: Remove when we have bumped the protocol to include the stardust packages - // into the system packages. + // TODO: Remove when we have bumped the protocol to include the stardust + // packages into the system packages. // // See also: https://github.com/iotaledger/kinesis/pull/149 system_packages.extend(BuiltInFramework::iter_stardust_packages().cloned()); @@ -269,7 +265,8 @@ impl Executor { header: &OutputHeader, alias: &AliasOutput, ) -> Result { - // Take the Alias ID set in the output or, if its zeroized, compute it from the Output ID. + // Take the Alias ID set in the output or, if its zeroized, compute it from the + // Output ID. let alias_id = ObjectID::new(*alias.alias_id().or_from_output_id(&header.output_id())); let move_alias = Alias::try_from_stardust(alias_id, alias)?; let mut created_objects = CreatedObjects::default(); @@ -309,7 +306,8 @@ impl Executor { created_objects.set_output(move_alias_output_object.id())?; self.store.insert_object(move_alias_output_object); - // Attach the Alias to the Alias Output as a dynamic object field via the attach_alias convenience method. + // Attach the Alias to the Alias Output as a dynamic object field via the + // attach_alias convenience method. let pt = { let mut builder = ProgrammableTransactionBuilder::new(); @@ -339,7 +337,8 @@ impl Executor { Ok(created_objects) } - /// Create a [`Bag`] of balances of native tokens executing a programmable transaction block. + /// Create a [`Bag`] of balances of native tokens executing a programmable + /// transaction block. pub(crate) fn create_bag_with_pt( &mut self, native_tokens: &NativeTokens, @@ -385,9 +384,9 @@ impl Executor { // The `Bag` object does not have the `drop` ability so we have to use it // in the transaction block. Therefore we transfer it to the `0x0` address. // - // Nevertheless, we only store the contents of the object, and thus the ownership - // metadata are irrelevant to us. This is a dummy transfer then to satisfy - // the VM. + // Nevertheless, we only store the contents of the object, and thus the + // ownership metadata are irrelevant to us. This is a dummy transfer + // then to satisfy the VM. builder.transfer_arg(Default::default(), bag); builder.finish() }; @@ -502,7 +501,8 @@ impl Executor { let coins = self.create_native_token_coins(basic_output.native_tokens(), owner)?; created_objects.set_native_tokens(coins)?; } - // Overwrite the default 0 UID of `Bag::default()`, since we won't be creating a new bag in this code path. + // Overwrite the default 0 UID of `Bag::default()`, since we won't be creating a + // new bag in this code path. data.native_tokens.id = UID::new(self.tx_context.fresh_id()); let coin = data.into_genesis_coin_object( owner, @@ -531,8 +531,9 @@ impl Executor { Ok(created_objects) } - /// Creates [`TimeLock>`] objects which represent vested rewards - /// that were created during the stardust upgrade on IOTA mainnet. + /// Creates [`TimeLock>`] objects which represent vested + /// rewards that were created during the stardust upgrade on IOTA + /// mainnet. pub(super) fn create_timelock_object( &mut self, header: &OutputHeader, @@ -576,9 +577,8 @@ impl Executor { } mod pt { - use crate::stardust::migration::NATIVE_TOKEN_BAG_KEY_TYPE; - use super::*; + use crate::stardust::migration::NATIVE_TOKEN_BAG_KEY_TYPE; pub fn coin_balance_split( builder: &mut ProgrammableTransactionBuilder, @@ -643,7 +643,8 @@ pub(crate) struct FoundryLedgerData { } impl FoundryLedgerData { - /// Store the minted coin `ObjectID` and derive data from the foundry package. + /// Store the minted coin `ObjectID` and derive data from the foundry + /// package. /// /// # Panic /// diff --git a/crates/sui-genesis-builder/src/stardust/migration/migration.rs b/crates/sui-genesis-builder/src/stardust/migration/migration.rs index 24cca7a01b2..3735bb9095f 100644 --- a/crates/sui-genesis-builder/src/stardust/migration/migration.rs +++ b/crates/sui-genesis-builder/src/stardust/migration/migration.rs @@ -3,8 +3,14 @@ //! Contains the logic for the migration process. +use std::{ + collections::HashMap, + io::{prelude::Write, BufWriter}, +}; + use anyhow::Result; use fastcrypto::hash::HashFunction; +use iota_sdk::types::block::output::{FoundryOutput, Output, OutputId}; use sui_move_build::CompiledPackage; use sui_protocol_config::ProtocolVersion; use sui_types::{ @@ -17,13 +23,6 @@ use sui_types::{ TIMELOCK_PACKAGE_ID, }; -use std::{ - collections::HashMap, - io::{prelude::Write, BufWriter}, -}; - -use iota_sdk::types::block::output::{FoundryOutput, Output, OutputId}; - use crate::stardust::{ migration::{ executor::Executor, @@ -49,18 +48,19 @@ pub(crate) const NATIVE_TOKEN_BAG_KEY_TYPE: &str = "0x01::ascii::String"; /// The orchestrator of the migration process. /// -/// It is run by providing an [`Iterator`] of stardust UTXOs, and holds an inner executor -/// and in-memory object storage for their conversion into objects. +/// It is run by providing an [`Iterator`] of stardust UTXOs, and holds an inner +/// executor and in-memory object storage for their conversion into objects. /// /// It guarantees the following: /// /// * That foundry UTXOs are sorted by `(milestone_timestamp, output_id)`. /// * That the foundry packages and total supplies are created first -/// * That all other outputs are created in a second iteration over the original UTXOs. +/// * That all other outputs are created in a second iteration over the original +/// UTXOs. /// * That the resulting ledger state is valid. /// -/// The migration process results in the generation of a snapshot file with the generated -/// objects serialized. +/// The migration process results in the generation of a snapshot file with the +/// generated objects serialized. pub struct Migration { target_milestone_timestamp_sec: u32, @@ -138,7 +138,8 @@ impl Migration { self.executor.into_objects() } - /// Create the packages, and associated objects representing foundry outputs. + /// Create the packages, and associated objects representing foundry + /// outputs. fn migrate_foundries<'a>( &mut self, foundries: impl IntoIterator, @@ -164,8 +165,8 @@ impl Migration { let created = match output { Output::Alias(alias) => self.executor.create_alias_objects(header, alias)?, Output::Basic(basic) => { - // All timelocked vested rewards(basic outputs with the specific ID format) should be migrated - // as TimeLock> objects. + // All timelocked vested rewards(basic outputs with the specific ID format) + // should be migrated as TimeLock> objects. if timelock::is_timelocked_vested_reward( header, basic, @@ -188,7 +189,8 @@ impl Migration { Ok(()) } - /// Verify the ledger state represented by the objects in [`InMemoryStorage`]. + /// Verify the ledger state represented by the objects in + /// [`InMemoryStorage`]. pub fn verify_ledger_state<'a>( &self, outputs: impl IntoIterator, @@ -205,8 +207,8 @@ impl Migration { Ok(()) } - /// Consumes the `Migration` and returns the underlying `Executor` so tests can - /// continue to work in the same environment as the migration. + /// Consumes the `Migration` and returns the underlying `Executor` so tests + /// can continue to work in the same environment as the migration. #[cfg(test)] pub(super) fn into_executor(self) -> Executor { self.executor diff --git a/crates/sui-genesis-builder/src/stardust/migration/tests/alias.rs b/crates/sui-genesis-builder/src/stardust/migration/tests/alias.rs index 7e271cd8dcb..daff0be8799 100644 --- a/crates/sui-genesis-builder/src/stardust/migration/tests/alias.rs +++ b/crates/sui-genesis-builder/src/stardust/migration/tests/alias.rs @@ -1,36 +1,41 @@ // Copyright (c) 2024 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 -use crate::stardust::migration::migration::NATIVE_TOKEN_BAG_KEY_TYPE; -use crate::stardust::migration::migration::PACKAGE_DEPS; -use crate::stardust::migration::tests::run_migration; -use crate::stardust::migration::tests::{create_foundry, random_output_header}; -use crate::stardust::types::ALIAS_OUTPUT_MODULE_NAME; -use crate::stardust::types::{snapshot::OutputHeader, Alias, AliasOutput}; -use iota_sdk::types::block::address::Address; -use iota_sdk::types::block::output::feature::Irc30Metadata; -use iota_sdk::types::block::output::{NativeToken, SimpleTokenScheme, TokenId}; -use iota_sdk::types::block::{ - address::Ed25519Address, - output::{ - feature::{IssuerFeature, MetadataFeature, SenderFeature}, - unlock_condition::{GovernorAddressUnlockCondition, StateControllerAddressUnlockCondition}, - AliasId, AliasOutput as StardustAlias, AliasOutputBuilder, Feature, +use std::str::FromStr; + +use iota_sdk::{ + types::block::{ + address::{Address, Ed25519Address}, + output::{ + feature::{Irc30Metadata, IssuerFeature, MetadataFeature, SenderFeature}, + unlock_condition::{ + GovernorAddressUnlockCondition, StateControllerAddressUnlockCondition, + }, + AliasId, AliasOutput as StardustAlias, AliasOutputBuilder, Feature, NativeToken, + SimpleTokenScheme, TokenId, + }, }, + U256, +}; +use move_core_types::{ident_str, language_storage::StructTag}; +use sui_types::{ + balance::Balance, + base_types::{ObjectID, SuiAddress}, + coin::Coin, + gas_coin::GAS, + inner_temporary_store::InnerTemporaryStore, + programmable_transaction_builder::ProgrammableTransactionBuilder, + transaction::{Argument, CheckedInputObjects, ObjectArg}, + TypeTag, STARDUST_PACKAGE_ID, SUI_FRAMEWORK_PACKAGE_ID, +}; + +use crate::stardust::{ + migration::{ + migration::{NATIVE_TOKEN_BAG_KEY_TYPE, PACKAGE_DEPS}, + tests::{create_foundry, random_output_header, run_migration}, + }, + types::{snapshot::OutputHeader, Alias, AliasOutput, ALIAS_OUTPUT_MODULE_NAME}, }; -use iota_sdk::U256; -use move_core_types::ident_str; -use move_core_types::language_storage::StructTag; -use std::str::FromStr; -use sui_types::balance::Balance; -use sui_types::base_types::SuiAddress; -use sui_types::coin::Coin; -use sui_types::gas_coin::GAS; -use sui_types::inner_temporary_store::InnerTemporaryStore; -use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; -use sui_types::transaction::{Argument, CheckedInputObjects, ObjectArg}; -use sui_types::TypeTag; -use sui_types::{base_types::ObjectID, STARDUST_PACKAGE_ID, SUI_FRAMEWORK_PACKAGE_ID}; fn migrate_alias( header: OutputHeader, @@ -58,8 +63,9 @@ fn migrate_alias( }) .expect("alias object should be present in the migrated snapshot"); - // Version is set to 1 when the alias is created based on the computed lamport timestamp. - // When the alias is attached to the alias output, the version should be incremented. + // Version is set to 1 when the alias is created based on the computed lamport + // timestamp. When the alias is attached to the alias output, the version + // should be incremented. assert!( alias_object.version().value() > 1, "alias object version should have been incremented" @@ -77,7 +83,8 @@ fn migrate_alias( (alias_object_id, alias, alias_output) } -/// Test that the migrated alias objects in the snapshot contain the expected data. +/// Test that the migrated alias objects in the snapshot contain the expected +/// data. #[test] fn alias_migration_with_full_features() { let alias_id = AliasId::new(rand::random()); @@ -103,13 +110,15 @@ fn alias_migration_with_full_features() { let (alias_object_id, alias, alias_output) = migrate_alias(header, stardust_alias.clone()); let expected_alias = Alias::try_from_stardust(alias_object_id, &stardust_alias).unwrap(); - // Compare only the balance. The ID is newly generated and the bag is tested separately. + // Compare only the balance. The ID is newly generated and the bag is tested + // separately. assert_eq!(stardust_alias.amount(), alias_output.iota.value()); assert_eq!(expected_alias, alias); } -/// Test that an Alias with a zeroed ID is migrated to an Alias Object with its UID set to the hashed Output ID. +/// Test that an Alias with a zeroed ID is migrated to an Alias Object with its +/// UID set to the hashed Output ID. #[test] fn alias_migration_with_zeroed_id() { let random_address = Ed25519Address::from(rand::random::<[u8; Ed25519Address::LENGTH]>()); @@ -126,9 +135,11 @@ fn alias_migration_with_zeroed_id() { migrate_alias(header, stardust_alias); } -/// Test that an Alias owned by another Alias can be received by the owning object. +/// Test that an Alias owned by another Alias can be received by the owning +/// object. /// -/// The PTB sends the extracted assets to the null address since it must be used in the transaction. +/// The PTB sends the extracted assets to the null address since it must be used +/// in the transaction. #[test] fn test_alias_migration_with_alias_owner() { let random_address = Ed25519Address::from(rand::random::<[u8; Ed25519Address::LENGTH]>()); @@ -159,8 +170,9 @@ fn test_alias_migration_with_alias_owner() { (random_output_header(), stardust_alias2.into()), ]); - // Find the corresponding objects to the migrated aliases, uniquely identified by their amounts. - // Should be adapted to use the tags from issue 239 to make this much easier. + // Find the corresponding objects to the migrated aliases, uniquely identified + // by their amounts. Should be adapted to use the tags from issue 239 to + // make this much easier. let alias_output1_id = executor .store() .objects() @@ -256,8 +268,8 @@ fn test_alias_migration_with_alias_owner() { builder.transfer_arg(SuiAddress::default(), bag_arg); builder.transfer_arg(SuiAddress::default(), coin_arg); - // We have to use Alias Output as we cannot transfer it (since it lacks the `store` ability), - // so we extract its assets. + // We have to use Alias Output as we cannot transfer it (since it lacks the + // `store` ability), so we extract its assets. let extracted_assets = builder.programmable_move_call( STARDUST_PACKAGE_ID, ALIAS_OUTPUT_MODULE_NAME.into(), @@ -302,7 +314,8 @@ fn test_alias_migration_with_alias_owner() { executor.execute_pt_unmetered(input_objects, pt).unwrap(); } -/// Test that an Alias that owns Native Tokens can extract those tokens from the contained bag. +/// Test that an Alias that owns Native Tokens can extract those tokens from the +/// contained bag. #[test] fn alias_migration_with_native_tokens() { let random_address = Ed25519Address::from(rand::random::<[u8; Ed25519Address::LENGTH]>()); @@ -331,8 +344,9 @@ fn alias_migration_with_native_tokens() { (foundry_header, foundry_output.into()), ]); - // Find the corresponding objects to the migrated aliases, uniquely identified by their amounts. - // Should be adapted to use the tags from issue 239 to make this much easier. + // Find the corresponding objects to the migrated aliases, uniquely identified + // by their amounts. Should be adapted to use the tags from issue 239 to + // make this much easier. let alias_output1_id = executor .store() .objects() @@ -418,7 +432,8 @@ fn alias_migration_with_native_tokens() { vec![balance_arg], ); - // Destroying the bag only works if it's empty, hence asserting that it is in fact empty. + // Destroying the bag only works if it's empty, hence asserting that it is in + // fact empty. builder.programmable_move_call( SUI_FRAMEWORK_PACKAGE_ID, ident_str!("bag").into(), diff --git a/crates/sui-genesis-builder/src/stardust/migration/tests/executor.rs b/crates/sui-genesis-builder/src/stardust/migration/tests/executor.rs index 129e5435acd..fd2da20654e 100644 --- a/crates/sui-genesis-builder/src/stardust/migration/tests/executor.rs +++ b/crates/sui-genesis-builder/src/stardust/migration/tests/executor.rs @@ -1,27 +1,29 @@ // Copyright (c) 2024 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 -use iota_sdk::types::block::output::NativeTokens; use iota_sdk::types::block::{ address::AliasAddress, output::{ unlock_condition::ImmutableAliasAddressUnlockCondition, AliasId, FoundryOutputBuilder, - NativeToken, SimpleTokenScheme, UnlockCondition, + NativeToken, NativeTokens, SimpleTokenScheme, UnlockCondition, }, }; - use sui_protocol_config::ProtocolVersion; -use sui_types::balance::Balance; use sui_types::{ + balance::Balance, dynamic_field::{derive_dynamic_field_id, Field}, object::Owner, }; -use crate::stardust::migration::executor::Executor; -use crate::stardust::migration::migration::NATIVE_TOKEN_BAG_KEY_TYPE; -use crate::stardust::migration::tests::random_output_header; -use crate::stardust::native_token::package_builder; -use crate::stardust::native_token::package_data::{NativeTokenModuleData, NativeTokenPackageData}; +use crate::stardust::{ + migration::{ + executor::Executor, migration::NATIVE_TOKEN_BAG_KEY_TYPE, tests::random_output_header, + }, + native_token::{ + package_builder, + package_data::{NativeTokenModuleData, NativeTokenPackageData}, + }, +}; #[test] fn create_bag_with_pt() { diff --git a/crates/sui-genesis-builder/src/stardust/migration/tests/mod.rs b/crates/sui-genesis-builder/src/stardust/migration/tests/mod.rs index 920b356f0e7..48de7febc46 100644 --- a/crates/sui-genesis-builder/src/stardust/migration/tests/mod.rs +++ b/crates/sui-genesis-builder/src/stardust/migration/tests/mod.rs @@ -1,21 +1,20 @@ // Copyright (c) 2024 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 -use iota_sdk::types::block::address::AliasAddress; -use iota_sdk::types::block::output::feature::Irc30Metadata; -use iota_sdk::types::block::output::feature::MetadataFeature; -use iota_sdk::types::block::output::unlock_condition::ImmutableAliasAddressUnlockCondition; -use iota_sdk::types::block::output::AliasId; -use iota_sdk::types::block::output::Feature; -use iota_sdk::types::block::output::FoundryOutput; -use iota_sdk::types::block::output::FoundryOutputBuilder; -use iota_sdk::types::block::output::Output; -use iota_sdk::types::block::output::SimpleTokenScheme; -use iota_sdk::types::block::output::TokenScheme; +use iota_sdk::types::block::{ + address::AliasAddress, + output::{ + feature::{Irc30Metadata, MetadataFeature}, + unlock_condition::ImmutableAliasAddressUnlockCondition, + AliasId, Feature, FoundryOutput, FoundryOutputBuilder, Output, SimpleTokenScheme, + TokenScheme, + }, +}; -use crate::stardust::migration::executor::Executor; -use crate::stardust::migration::migration::Migration; -use crate::stardust::types::snapshot::OutputHeader; +use crate::stardust::{ + migration::{executor::Executor, migration::Migration}, + types::snapshot::OutputHeader, +}; mod alias; mod executor; diff --git a/crates/sui-genesis-builder/src/stardust/migration/verification/basic.rs b/crates/sui-genesis-builder/src/stardust/migration/verification/basic.rs index 2e057681e84..9efa8f03b5c 100644 --- a/crates/sui-genesis-builder/src/stardust/migration/verification/basic.rs +++ b/crates/sui-genesis-builder/src/stardust/migration/verification/basic.rs @@ -19,7 +19,8 @@ pub fn verify_basic_output( created_objects: &CreatedObjects, storage: &InMemoryStorage, ) -> Result<()> { - // If the output has multiple unlock conditions, then a genesis object should have been created. + // If the output has multiple unlock conditions, then a genesis object should + // have been created. if output.unlock_conditions().len() > 1 { let created_output = created_objects .output() @@ -91,8 +92,8 @@ pub fn verify_basic_output( // Sender Feature verify_sender_feature(output.features().sender(), created_output.sender)?; - // Otherwise the output contains only an address unlock condition and only a coin - // and possibly native tokens should have been created. + // Otherwise the output contains only an address unlock condition and only a + // coin and possibly native tokens should have been created. } else { ensure!( created_objects.output().is_err(), diff --git a/crates/sui-genesis-builder/src/stardust/migration/verification/mod.rs b/crates/sui-genesis-builder/src/stardust/migration/verification/mod.rs index d4569e28634..34336de4b6c 100644 --- a/crates/sui-genesis-builder/src/stardust/migration/verification/mod.rs +++ b/crates/sui-genesis-builder/src/stardust/migration/verification/mod.rs @@ -1,14 +1,14 @@ // Copyright (c) 2024 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 -//! The [`verification`] module contains the validation logic to make sure that the stardust outputs are correctly converted to the move objects. +//! The [`verification`] module contains the validation logic to make sure that +//! the stardust outputs are correctly converted to the move objects. use iota_sdk::types::block::output::Output; use sui_types::in_memory_storage::InMemoryStorage; -use crate::stardust::types::snapshot::OutputHeader; - use self::created_objects::CreatedObjects; +use crate::stardust::types::snapshot::OutputHeader; pub mod alias; pub mod basic; diff --git a/crates/sui-genesis-builder/src/stardust/native_token/package_builder.rs b/crates/sui-genesis-builder/src/stardust/native_token/package_builder.rs index 0eeb8e48425..1e890e0559d 100644 --- a/crates/sui-genesis-builder/src/stardust/native_token/package_builder.rs +++ b/crates/sui-genesis-builder/src/stardust/native_token/package_builder.rs @@ -1,18 +1,19 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -//! The [`package_builder`] module provides the [`PackageBuilder`] struct, which is responsible for building and compiling Stardust native token packages. -use std::fs; -use std::path::{Path, PathBuf}; +//! The [`package_builder`] module provides the [`PackageBuilder`] struct, which +//! is responsible for building and compiling Stardust native token packages. +use std::{ + fs, + path::{Path, PathBuf}, +}; use anyhow::Result; use fs_extra::dir::{copy, CopyOptions}; -use tempfile::tempdir; - -use crate::stardust::error::StardustError; use sui_move_build::{BuildConfig, CompiledPackage, SuiPackageHooks}; +use tempfile::tempdir; -use crate::stardust::native_token::package_data::NativeTokenPackageData; +use crate::stardust::{error::StardustError, native_token::package_data::NativeTokenPackageData}; /// Builds and compiles a Stardust native token package. pub fn build_and_compile(package: NativeTokenPackageData) -> Result { @@ -124,12 +125,15 @@ fn adjust_native_token_module(package_path: &Path, package: &NativeTokenPackageD #[cfg(test)] mod tests { - use std::fs::{self, File}; - use std::io::Write; + use std::{ + fs::{self, File}, + io::Write, + }; - use crate::stardust::native_token::package_builder; use tempfile::tempdir; + use crate::stardust::native_token::package_builder; + #[test] fn test_copy_template_dir_success() { // Set up a temporary directory as the environment for the test diff --git a/crates/sui-genesis-builder/src/stardust/native_token/package_data.rs b/crates/sui-genesis-builder/src/stardust/native_token/package_data.rs index 6485479c16c..e17fa55d0d6 100644 --- a/crates/sui-genesis-builder/src/stardust/native_token/package_data.rs +++ b/crates/sui-genesis-builder/src/stardust/native_token/package_data.rs @@ -1,19 +1,28 @@ // Copyright (c) 2024 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 -//! The [`package_data`] module provides the [`NativeTokenPackageData`] struct, which encapsulates all the data necessary to build a Stardust native token package. +//! The [`package_data`] module provides the [`NativeTokenPackageData`] struct, +//! which encapsulates all the data necessary to build a Stardust native token +//! package. -use crate::stardust::error::StardustError; use anyhow::Result; -use iota_sdk::types::block::address::AliasAddress; -use iota_sdk::types::block::output::feature::Irc30Metadata; -use iota_sdk::types::block::output::{FoundryId, FoundryOutput}; -use iota_sdk::Url; -use rand::distributions::{Alphanumeric, DistString}; -use rand::Rng; +use iota_sdk::{ + types::block::{ + address::AliasAddress, + output::{feature::Irc30Metadata, FoundryId, FoundryOutput}, + }, + Url, +}; +use rand::{ + distributions::{Alphanumeric, DistString}, + Rng, +}; use regex::Regex; -/// The [`NativeTokenPackageData`] struct encapsulates all the data necessary to build a Stardust native token package. +use crate::stardust::error::StardustError; + +/// The [`NativeTokenPackageData`] struct encapsulates all the data necessary to +/// build a Stardust native token package. #[derive(Debug)] pub struct NativeTokenPackageData { package_name: String, @@ -40,7 +49,8 @@ impl NativeTokenPackageData { } } -/// The [`NativeTokenModuleData`] struct encapsulates all the data necessary to build a Stardust native token module. +/// The [`NativeTokenModuleData`] struct encapsulates all the data necessary to +/// build a Stardust native token module. #[derive(Debug)] pub struct NativeTokenModuleData { pub foundry_id: FoundryId, @@ -106,7 +116,8 @@ impl TryFrom<&FoundryOutput> for NativeTokenPackageData { } })?; - // Derive a valid, lowercase move identifier from the symbol field in the irc30 metadata + // Derive a valid, lowercase move identifier from the symbol field in the irc30 + // metadata let identifier = derive_lowercase_identifier(irc_30_metadata.symbol())?; let decimals = u8::try_from(*irc_30_metadata.decimals()).map_err(|e| { @@ -132,7 +143,12 @@ impl TryFrom<&FoundryOutput> for NativeTokenPackageData { decimals, symbol: identifier, circulating_tokens: output.token_scheme().as_simple().minted_tokens().as_u64() - - output.token_scheme().as_simple().melted_tokens().as_u64(), // we know that "Melted Tokens must not be greater than Minted Tokens" + - output.token_scheme().as_simple().melted_tokens().as_u64(), /* we know that + * "Melted Tokens + * must not be + * greater than + * Minted Tokens" + */ maximum_supply: maximum_supply_u64, coin_name: irc_30_metadata.name().to_owned(), coin_description: irc_30_metadata.description().clone().unwrap_or_default(), @@ -179,16 +195,19 @@ fn derive_lowercase_identifier(input: &str) -> Result { #[cfg(test)] mod tests { - use crate::stardust::native_token::package_builder; - use iota_sdk::types::block::address::AliasAddress; - use iota_sdk::types::block::output::feature::MetadataFeature; - use iota_sdk::types::block::output::unlock_condition::ImmutableAliasAddressUnlockCondition; - use iota_sdk::types::block::output::{ - AliasId, Feature, FoundryOutputBuilder, SimpleTokenScheme, TokenScheme, + use iota_sdk::{ + types::block::{ + address::AliasAddress, + output::{ + feature::MetadataFeature, unlock_condition::ImmutableAliasAddressUnlockCondition, + AliasId, Feature, FoundryOutputBuilder, SimpleTokenScheme, TokenScheme, + }, + }, + U256, }; - use iota_sdk::U256; use super::*; + use crate::stardust::native_token::package_builder; #[test] fn test_foundry_output_with_default_metadata() -> Result<()> { diff --git a/crates/sui-genesis-builder/src/stardust/types/address.rs b/crates/sui-genesis-builder/src/stardust/types/address.rs index a4827eb7b95..417e3c21def 100644 --- a/crates/sui-genesis-builder/src/stardust/types/address.rs +++ b/crates/sui-genesis-builder/src/stardust/types/address.rs @@ -3,14 +3,16 @@ use sui_types::{base_types::SuiAddress, object::Owner}; /// Converts a ["Stardust" `Address`](Address) to a [`SuiAddress`]. /// -/// This is intended as the only conversion function to go from Stardust to Sui addresses, so there is only -/// one place to potentially update it if we decide to change it later. +/// This is intended as the only conversion function to go from Stardust to Sui +/// addresses, so there is only one place to potentially update it if we decide +/// to change it later. pub fn stardust_to_sui_address(stardust_address: impl Into
) -> anyhow::Result { stardust_address.into().to_string().parse() } -/// Converts a ["Stardust" `Address`](Address) to a [`SuiAddress`] and then wraps it into an [`Owner`] -/// which is either address- or object-owned depending on the stardust address. +/// Converts a ["Stardust" `Address`](Address) to a [`SuiAddress`] and then +/// wraps it into an [`Owner`] which is either address- or object-owned +/// depending on the stardust address. pub fn stardust_to_sui_address_owner( stardust_address: impl Into
, ) -> anyhow::Result { diff --git a/crates/sui-genesis-builder/src/stardust/types/alias.rs b/crates/sui-genesis-builder/src/stardust/types/alias.rs index c1d28ee4e10..7eea204f18f 100644 --- a/crates/sui-genesis-builder/src/stardust/types/alias.rs +++ b/crates/sui-genesis-builder/src/stardust/types/alias.rs @@ -22,8 +22,8 @@ pub const ALIAS_STRUCT_NAME: &IdentStr = ident_str!("Alias"); #[serde_as] #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] pub struct Alias { - /// The ID of the Alias = hash of the Output ID that created the Alias Output in Stardust. - /// This is the AliasID from Stardust. + /// The ID of the Alias = hash of the Output ID that created the Alias + /// Output in Stardust. This is the AliasID from Stardust. pub id: UID, /// The last State Controller address assigned before the migration. @@ -139,8 +139,9 @@ pub struct AliasOutput { /// The amount of IOTA coins held by the output. pub iota: Balance, - /// The `Bag` holds native tokens, key-ed by the stringified type of the asset. - /// Example: key: "0xabcded::soon::SOON", value: Balance<0xabcded::soon::SOON>. + /// The `Bag` holds native tokens, key-ed by the stringified type of the + /// asset. Example: key: "0xabcded::soon::SOON", value: + /// Balance<0xabcded::soon::SOON>. pub native_tokens: Bag, } @@ -154,7 +155,8 @@ impl AliasOutput { } } - /// Creates the Move-based Alias Output model from a Stardust-based Alias Output. + /// Creates the Move-based Alias Output model from a Stardust-based Alias + /// Output. pub fn try_from_stardust( object_id: ObjectID, alias: &StardustAlias, diff --git a/crates/sui-genesis-builder/src/stardust/types/output.rs b/crates/sui-genesis-builder/src/stardust/types/output.rs index 91a7a59b70f..d8309b2994e 100644 --- a/crates/sui-genesis-builder/src/stardust/types/output.rs +++ b/crates/sui-genesis-builder/src/stardust/types/output.rs @@ -1,4 +1,5 @@ -//! Rust types and logic for the Move counterparts in the `stardust` system package. +//! Rust types and logic for the Move counterparts in the `stardust` system +//! package. use anyhow::Result; use move_core_types::{ident_str, identifier::IdentStr, language_storage::StructTag}; @@ -28,9 +29,11 @@ pub const BASIC_OUTPUT_STRUCT_NAME: &IdentStr = ident_str!("BasicOutput"); pub struct ExpirationUnlockCondition { /// The address who owns the output before the timestamp has passed. pub owner: SuiAddress, - /// The address that is allowed to spend the locked funds after the timestamp has passed. + /// The address that is allowed to spend the locked funds after the + /// timestamp has passed. pub return_address: SuiAddress, - /// Before this unix time, Address Unlock Condition is allowed to unlock the output, after that only the address defined in Return Address. + /// Before this unix time, Address Unlock Condition is allowed to unlock the + /// output, after that only the address defined in Return Address. pub unix_time: u32, } @@ -60,9 +63,11 @@ impl TryFrom<&iota_sdk::types::block::output::BasicOutput> for ExpirationUnlockC #[serde_as] #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)] pub struct StorageDepositReturnUnlockCondition { - /// The address to which the consuming transaction should deposit the amount defined in Return Amount. + /// The address to which the consuming transaction should deposit the amount + /// defined in Return Amount. pub return_address: SuiAddress, - /// The amount of IOTA coins the consuming transaction should deposit to the address defined in Return Address. + /// The amount of IOTA coins the consuming transaction should deposit to the + /// address defined in Return Address. pub return_amount: u64, } @@ -87,7 +92,8 @@ impl TryFrom<&iota_sdk::types::block::output::unlock_condition::StorageDepositRe #[serde_as] #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)] pub struct TimelockUnlockCondition { - /// The unix time (seconds since Unix epoch) starting from which the output can be consumed. + /// The unix time (seconds since Unix epoch) starting from which the output + /// can be consumed. pub unix_time: u32, } @@ -113,8 +119,9 @@ pub struct BasicOutput { /// The amount of IOTA coins held by the output. pub iota: Balance, - /// The `Bag` holds native tokens, key-ed by the stringified type of the asset. - /// Example: key: "0xabcded::soon::SOON", value: Balance<0xabcded::soon::SOON>. + /// The `Bag` holds native tokens, key-ed by the stringified type of the + /// asset. Example: key: "0xabcded::soon::SOON", value: + /// Balance<0xabcded::soon::SOON>. pub native_tokens: Bag, /// The storage deposit return unlock condition. @@ -124,7 +131,8 @@ pub struct BasicOutput { /// The expiration unlock condition. pub expiration: Option, - // Possible features, they have no effect and only here to hold data until the object is deleted. + // Possible features, they have no effect and only here to hold data until the object is + // deleted. /// The metadata feature. pub metadata: Option>, /// The tag feature. @@ -134,7 +142,8 @@ pub struct BasicOutput { } impl BasicOutput { - /// Construct the basic output with an empty [`Bag`] through the [`OutputHeader`] + /// Construct the basic output with an empty [`Bag`] through the + /// [`OutputHeader`] /// and [`Output`][iota_sdk::types::block::output::BasicOutput]. pub fn new(header: OutputHeader, output: &iota_sdk::types::block::output::BasicOutput) -> Self { let id = UID::new(ObjectID::new(header.output_id().hash())); diff --git a/crates/sui-genesis-builder/src/stardust/types/snapshot.rs b/crates/sui-genesis-builder/src/stardust/types/snapshot.rs index 050d9b75be0..0c79aa810ee 100644 --- a/crates/sui-genesis-builder/src/stardust/types/snapshot.rs +++ b/crates/sui-genesis-builder/src/stardust/types/snapshot.rs @@ -30,11 +30,13 @@ pub const TOTAL_SUPPLY_IOTA: u64 = 4_600_000_000_000_000; #[derive(Debug, Copy, Clone, Eq, PartialEq, packable::Packable)] #[packable(unpack_error = StardustError)] pub enum SnapshotKind { - /// Full is a snapshot which contains the full ledger entry for a given milestone plus the milestone diffs which - /// subtracted to the ledger milestone reduce to the snapshot milestone ledger. + /// Full is a snapshot which contains the full ledger entry for a given + /// milestone plus the milestone diffs which subtracted to the ledger + /// milestone reduce to the snapshot milestone ledger. Full = 0, - /// Delta is a snapshot which contains solely diffs of milestones newer than a certain ledger milestone instead of - /// the complete ledger state of a given milestone. + /// Delta is a snapshot which contains solely diffs of milestones newer than + /// a certain ledger milestone instead of the complete ledger state of a + /// given milestone. Delta = 1, } diff --git a/crates/sui-genesis-builder/src/stardust/types/timelock.rs b/crates/sui-genesis-builder/src/stardust/types/timelock.rs index a539fecfddd..7fc132a0397 100644 --- a/crates/sui-genesis-builder/src/stardust/types/timelock.rs +++ b/crates/sui-genesis-builder/src/stardust/types/timelock.rs @@ -91,7 +91,8 @@ pub fn try_from_stardust( let id = UID::new(ObjectID::new(header.output_id().hash())); let locked = Balance::new(basic_output.amount()); - // We already checked the existence of the timelock unlock condition at this point. + // We already checked the existence of the timelock unlock condition at this + // point. let timelock_uc = basic_output .unlock_conditions() .timelock() diff --git a/crates/sui-genesis-builder/src/validator_info.rs b/crates/sui-genesis-builder/src/validator_info.rs index d40c1fd3474..0f14acfae2e 100644 --- a/crates/sui-genesis-builder/src/validator_info.rs +++ b/crates/sui-genesis-builder/src/validator_info.rs @@ -5,12 +5,14 @@ use anyhow::bail; use fastcrypto::traits::ToFromBytes; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use sui_types::base_types::SuiAddress; -use sui_types::crypto::{ - verify_proof_of_possession, AuthorityPublicKey, AuthorityPublicKeyBytes, AuthoritySignature, - NetworkPublicKey, +use sui_types::{ + base_types::SuiAddress, + crypto::{ + verify_proof_of_possession, AuthorityPublicKey, AuthorityPublicKeyBytes, + AuthoritySignature, NetworkPublicKey, + }, + multiaddr::Multiaddr, }; -use sui_types::multiaddr::Multiaddr; const MAX_VALIDATOR_METADATA_LENGTH: usize = 256; @@ -207,7 +209,7 @@ pub struct GenesisValidatorMetadata { pub gas_price: u64, pub commission_rate: u64, - pub protocol_public_key: Vec, //AuthorityPublicKeyBytes, + pub protocol_public_key: Vec, // AuthorityPublicKeyBytes, pub proof_of_possession: Vec, // AuthoritySignature, pub network_public_key: Vec, // NetworkPublicKey, diff --git a/crates/sui-graphql-e2e-tests/tests/tests.rs b/crates/sui-graphql-e2e-tests/tests/tests.rs index a875dd42da0..69b549209fd 100644 --- a/crates/sui-graphql-e2e-tests/tests/tests.rs +++ b/crates/sui-graphql-e2e-tests/tests/tests.rs @@ -4,6 +4,7 @@ #![allow(unused_imports)] #![allow(unused_variables)] use std::{path::Path, sync::Arc}; + use sui_transactional_test_runner::{ run_test_impl, test_adapter::{SuiTestAdapter, PRE_COMPILED}, diff --git a/crates/sui-graphql-rpc-client/src/response.rs b/crates/sui-graphql-rpc-client/src/response.rs index 870c0d21f9d..a3f9605b9cf 100644 --- a/crates/sui-graphql-rpc-client/src/response.rs +++ b/crates/sui-graphql-rpc-client/src/response.rs @@ -1,15 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::ClientError; +use std::{collections::BTreeMap, net::SocketAddr}; + use async_graphql::{Response, ServerError, Value}; use axum::http::HeaderName; use hyper::HeaderMap; use reqwest::Response as ReqwestResponse; use serde_json::json; -use std::{collections::BTreeMap, net::SocketAddr}; use sui_graphql_rpc_headers::VERSION_HEADER; +use super::ClientError; + #[derive(Debug)] pub struct GraphqlResponse { headers: HeaderMap, diff --git a/crates/sui-graphql-rpc-client/src/simple_client.rs b/crates/sui-graphql-rpc-client/src/simple_client.rs index 3fcfc911733..cbe8b58c6f3 100644 --- a/crates/sui-graphql-rpc-client/src/simple_client.rs +++ b/crates/sui-graphql-rpc-client/src/simple_client.rs @@ -1,15 +1,16 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::ClientError; +use std::collections::BTreeMap; + use axum::http::HeaderValue; use hyper::header; use reqwest::Response; use serde_json::Value; -use std::collections::BTreeMap; use sui_graphql_rpc_headers::LIMITS_HEADER; use super::response::GraphqlResponse; +use crate::ClientError; #[derive(Clone, Debug)] pub struct GraphqlQueryVariable { diff --git a/crates/sui-graphql-rpc/src/commands.rs b/crates/sui-graphql-rpc/src/commands.rs index 33fa75ff540..8eac2903a0d 100644 --- a/crates/sui-graphql-rpc/src/commands.rs +++ b/crates/sui-graphql-rpc/src/commands.rs @@ -1,9 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use clap::*; use std::path::PathBuf; +use clap::*; + #[derive(Parser)] #[clap( name = "sui-graphql-rpc", diff --git a/crates/sui-graphql-rpc/src/config.rs b/crates/sui-graphql-rpc/src/config.rs index 1510458d504..190dd32e947 100644 --- a/crates/sui-graphql-rpc/src/config.rs +++ b/crates/sui-graphql-rpc/src/config.rs @@ -1,17 +1,19 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::functional_group::FunctionalGroup; -use crate::types::big_int::BigInt; +use std::{collections::BTreeSet, fmt::Display, time::Duration}; + use async_graphql::*; use fastcrypto_zkp::bn254::zk_login_api::ZkLoginEnv; use serde::{Deserialize, Serialize}; -use std::{collections::BTreeSet, fmt::Display, time::Duration}; use sui_json_rpc::name_service::NameServiceConfig; +use crate::{functional_group::FunctionalGroup, types::big_int::BigInt}; + // TODO: calculate proper cost limits -/// These values are set to support TS SDK shim layer queries for json-rpc compatibility. +/// These values are set to support TS SDK shim layer queries for json-rpc +/// compatibility. const MAX_QUERY_NODES: u32 = 300; const MAX_QUERY_PAYLOAD_SIZE: u32 = 5_000; @@ -59,9 +61,10 @@ pub struct ServerConfig { pub ide: Ide, } -/// Configuration for connections for the RPC, passed in as command-line arguments. This configures -/// specific connections between this service and other services, and might differ from instance to -/// instance of the GraphQL service. +/// Configuration for connections for the RPC, passed in as command-line +/// arguments. This configures specific connections between this service and +/// other services, and might differ from instance to instance of the GraphQL +/// service. #[derive(Serialize, Clone, Deserialize, Debug, Eq, PartialEq)] pub struct ConnectionConfig { /// Port to bind the server to @@ -74,9 +77,9 @@ pub struct ConnectionConfig { pub(crate) prom_port: u16, } -/// Configuration on features supported by the GraphQL service, passed in a TOML-based file. These -/// configurations are shared across fleets of the service, i.e. all testnet services will have the -/// same `ServiceConfig`. +/// Configuration on features supported by the GraphQL service, passed in a +/// TOML-based file. These configurations are shared across fleets of the +/// service, i.e. all testnet services will have the same `ServiceConfig`. #[derive(Serialize, Clone, Deserialize, Debug, Eq, PartialEq, Default)] #[serde(rename_all = "kebab-case")] pub struct ServiceConfig { @@ -136,21 +139,22 @@ pub struct BackgroundTasksConfig { } /// The Version of the service. `year.month` represents the major release. -/// New `patch` versions represent backwards compatible fixes for their major release. -/// The `full` version is `year.month.patch-sha`. +/// New `patch` versions represent backwards compatible fixes for their major +/// release. The `full` version is `year.month.patch-sha`. #[derive(Copy, Clone, Debug)] pub struct Version { /// The year of this release. pub year: &'static str, /// The month of this release. pub month: &'static str, - /// The patch is a positive number incremented for every compatible release on top of the major.month release. + /// The patch is a positive number incremented for every compatible release + /// on top of the major.month release. pub patch: &'static str, /// The commit sha for this release. pub sha: &'static str, /// The full version string. - /// Note that this extra field is used only for the uptime_metric function which requries a - /// &'static str. + /// Note that this extra field is used only for the uptime_metric function + /// which requries a &'static str. pub full: &'static str, } @@ -251,27 +255,31 @@ impl ServiceConfig { self.limits.max_query_depth } - /// The maximum number of nodes (field names) the service will accept in a single query. + /// The maximum number of nodes (field names) the service will accept in a + /// single query. pub async fn max_query_nodes(&self) -> u32 { self.limits.max_query_nodes } /// The maximum number of output nodes in a GraphQL response. /// - /// Non-connection nodes have a count of 1, while connection nodes are counted as - /// the specified 'first' or 'last' number of items, or the default_page_size - /// as set by the server if those arguments are not set. + /// Non-connection nodes have a count of 1, while connection nodes are + /// counted as the specified 'first' or 'last' number of items, or the + /// default_page_size as set by the server if those arguments are not + /// set. /// - /// Counts accumulate multiplicatively down the query tree. For example, if a query starts - /// with a connection of first: 10 and has a field to a connection with last: 20, the count - /// at the second level would be 200 nodes. This is then summed to the count of 10 nodes - /// at the first level, for a total of 210 nodes. + /// Counts accumulate multiplicatively down the query tree. For example, if + /// a query starts with a connection of first: 10 and has a field to a + /// connection with last: 20, the count at the second level would be 200 + /// nodes. This is then summed to the count of 10 nodes at the first + /// level, for a total of 210 nodes. pub async fn max_output_nodes(&self) -> u64 { self.limits.max_output_nodes } - /// Maximum estimated cost of a database query used to serve a GraphQL request. This is - /// measured in the same units that the database uses in EXPLAIN queries. + /// Maximum estimated cost of a database query used to serve a GraphQL + /// request. This is measured in the same units that the database uses + /// in EXPLAIN queries. async fn max_db_query_cost(&self) -> BigInt { BigInt::from(self.limits.max_db_query_cost) } @@ -296,24 +304,26 @@ impl ServiceConfig { self.limits.max_query_payload_size } - /// Maximum nesting allowed in type arguments in Move Types resolved by this service. + /// Maximum nesting allowed in type arguments in Move Types resolved by this + /// service. async fn max_type_argument_depth(&self) -> u32 { self.limits.max_type_argument_depth } - /// Maximum number of type arguments passed into a generic instantiation of a Move Type resolved - /// by this service. + /// Maximum number of type arguments passed into a generic instantiation of + /// a Move Type resolved by this service. async fn max_type_argument_width(&self) -> u32 { self.limits.max_type_argument_width } - /// Maximum number of structs that need to be processed when calculating the layout of a single - /// Move Type. + /// Maximum number of structs that need to be processed when calculating the + /// layout of a single Move Type. async fn max_type_nodes(&self) -> u32 { self.limits.max_type_nodes } - /// Maximum nesting allowed in struct fields when calculating the layout of a single Move Type. + /// Maximum nesting allowed in struct fields when calculating the layout of + /// a single Move Type. async fn max_move_value_depth(&self) -> u32 { self.limits.max_move_value_depth } diff --git a/crates/sui-graphql-rpc/src/consistency.rs b/crates/sui-graphql-rpc/src/consistency.rs index 7cc85443334..8ac0e44bcea 100644 --- a/crates/sui-graphql-rpc/src/consistency.rs +++ b/crates/sui-graphql-rpc/src/consistency.rs @@ -5,50 +5,58 @@ use async_graphql::connection::CursorType; use serde::{Deserialize, Serialize}; use sui_indexer::models::objects::StoredHistoryObject; -use crate::data::Conn; -use crate::raw_query::RawQuery; -use crate::types::checkpoint::Checkpoint; -use crate::types::cursor::{JsonCursor, Page}; -use crate::types::object::Cursor; -use crate::{filter, query}; +use crate::{ + data::Conn, + filter, query, + raw_query::RawQuery, + types::{ + checkpoint::Checkpoint, + cursor::{JsonCursor, Page}, + object::Cursor, + }, +}; #[derive(Copy, Clone)] pub(crate) enum View { - /// Return objects that fulfill the filtering criteria, even if there are more recent versions - /// of the object within the checkpoint range. This is used for lookups such as by `object_id` - /// and `version`. + /// Return objects that fulfill the filtering criteria, even if there are + /// more recent versions of the object within the checkpoint range. This + /// is used for lookups such as by `object_id` and `version`. Historical, - /// Return objects that fulfill the filtering criteria and are the most recent version within - /// the checkpoint range. + /// Return objects that fulfill the filtering criteria and are the most + /// recent version within the checkpoint range. Consistent, } -/// The consistent cursor for an index into a `Vec` field is constructed from the index of the -/// element and the checkpoint the cursor was constructed at. +/// The consistent cursor for an index into a `Vec` field is constructed from +/// the index of the element and the checkpoint the cursor was constructed at. #[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Debug)] pub(crate) struct ConsistentIndexCursor { #[serde(rename = "i")] pub ix: usize, - /// The checkpoint sequence number at which the entity corresponding to this cursor was viewed at. + /// The checkpoint sequence number at which the entity corresponding to this + /// cursor was viewed at. pub c: u64, } -/// The consistent cursor for an index into a `Map` field is constructed from the name or key of the -/// element and the checkpoint the cursor was constructed at. +/// The consistent cursor for an index into a `Map` field is constructed from +/// the name or key of the element and the checkpoint the cursor was constructed +/// at. #[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Debug)] pub(crate) struct ConsistentNamedCursor { #[serde(rename = "n")] pub name: String, - /// The checkpoint sequence number at which the entity corresponding to this cursor was viewed at. + /// The checkpoint sequence number at which the entity corresponding to this + /// cursor was viewed at. pub c: u64, } -/// The high checkpoint watermark stamped on each GraphQL request. This is used to ensure -/// cross-query consistency. +/// The high checkpoint watermark stamped on each GraphQL request. This is used +/// to ensure cross-query consistency. #[derive(Clone, Copy)] pub(crate) struct CheckpointViewedAt(pub u64); -/// Trait for cursors that have a checkpoint sequence number associated with them. +/// Trait for cursors that have a checkpoint sequence number associated with +/// them. pub(crate) trait Checkpointed: CursorType { fn checkpoint_viewed_at(&self) -> u64; } @@ -65,40 +73,46 @@ impl Checkpointed for JsonCursor { } } -/// Constructs a `RawQuery` against the `objects_snapshot` and `objects_history` table to fetch -/// objects that satisfy some filtering criteria `filter_fn` within the provided checkpoint range -/// `lhs` and `rhs`. The `objects_snapshot` table contains the latest versions of objects up to a -/// checkpoint sequence number, and `objects_history` captures changes after that, so a query to +/// Constructs a `RawQuery` against the `objects_snapshot` and `objects_history` +/// table to fetch objects that satisfy some filtering criteria `filter_fn` +/// within the provided checkpoint range `lhs` and `rhs`. The `objects_snapshot` +/// table contains the latest versions of objects up to a checkpoint sequence +/// number, and `objects_history` captures changes after that, so a query to /// both tables is necessary to handle these object states: -/// 1) In snapshot, not in history - occurs when an object gets snapshotted and then has not been -/// modified since +/// 1) In snapshot, not in history - occurs when an object gets snapshotted and +/// then has not been modified since /// 2) In history, not in snapshot - occurs when a new object is created -/// 3) In snapshot and in history - occurs when an object is snapshotted and further modified +/// 3) In snapshot and in history - occurs when an object is snapshotted and +/// further modified /// -/// Additionally, even among objects that satisfy the filtering criteria, it is possible that there -/// is a yet more recent version of the object within the checkpoint range, such as when the owner -/// of an object changes. The `LEFT JOIN` against the `objects_history` table handles this and -/// scenario 3. Note that the implementation applies the `LEFT JOIN` to each inner query in -/// conjunction with the `page`'s cursor and limit. If this was instead done once at the end, the -/// query would be drastically inefficient as we would be dealing with a large number of rows from -/// `objects_snapshot`, and potentially `objects_history` as the checkpoint range grows. Instead, -/// the `LEFT JOIN` and limit applied on the inner queries work in conjunction to make the final -/// query noticeably more efficient. The former serves as a filter, and the latter reduces the -/// number of rows that the database needs to work with. +/// Additionally, even among objects that satisfy the filtering criteria, it is +/// possible that there is a yet more recent version of the object within the +/// checkpoint range, such as when the owner of an object changes. The `LEFT +/// JOIN` against the `objects_history` table handles this and scenario 3. Note +/// that the implementation applies the `LEFT JOIN` to each inner query in +/// conjunction with the `page`'s cursor and limit. If this was instead done +/// once at the end, the query would be drastically inefficient as we would be +/// dealing with a large number of rows from `objects_snapshot`, and potentially +/// `objects_history` as the checkpoint range grows. Instead, the `LEFT JOIN` +/// and limit applied on the inner queries work in conjunction to make the final +/// query noticeably more efficient. The former serves as a filter, and the +/// latter reduces the number of rows that the database needs to work with. /// -/// However, not all queries require this `LEFT JOIN`, such as when no filtering criteria is -/// specified, or if the filter is a lookup at a specific `object_id` and `object_version`. This is -/// controlled by the `view` parameter. If the `view` parameter is set to `Consistent`, this filter -/// is applied, otherwise if the `view` parameter is set to `Historical`, this filter is not -/// applied. +/// However, not all queries require this `LEFT JOIN`, such as when no filtering +/// criteria is specified, or if the filter is a lookup at a specific +/// `object_id` and `object_version`. This is controlled by the `view` +/// parameter. If the `view` parameter is set to `Consistent`, this filter +/// is applied, otherwise if the `view` parameter is set to `Historical`, this +/// filter is not applied. /// -/// Finally, the two queries are merged together with `UNION ALL`. We use `UNION ALL` instead of -/// `UNION`; the latter incurs significant overhead as it additionally de-duplicates records from -/// both sources. This dedupe is unnecessary, since we have the fragment `SELECT DISTINCT ON -/// (object_id) ... ORDER BY object_id, object_version DESC`. This is also redundant for the most -/// part, due to the invariant that the `objects_history` captures changes that occur after -/// `objects_snapshot`, but it's a safeguard to handle any possible overlap during snapshot -/// creation. +/// Finally, the two queries are merged together with `UNION ALL`. We use `UNION +/// ALL` instead of `UNION`; the latter incurs significant overhead as it +/// additionally de-duplicates records from both sources. This dedupe is +/// unnecessary, since we have the fragment `SELECT DISTINCT ON (object_id) ... +/// ORDER BY object_id, object_version DESC`. This is also redundant for the +/// most part, due to the invariant that the `objects_history` captures changes +/// that occur after `objects_snapshot`, but it's a safeguard to handle any +/// possible overlap during snapshot creation. pub(crate) fn build_objects_query( view: View, lhs: i64, @@ -107,7 +121,8 @@ pub(crate) fn build_objects_query( filter_fn: impl Fn(RawQuery) -> RawQuery, newer_criteria: impl Fn(RawQuery) -> RawQuery, ) -> RawQuery { - // Subquery to be used in `LEFT JOIN` against the inner queries for more recent object versions + // Subquery to be used in `LEFT JOIN` against the inner queries for more recent + // object versions let newer = newer_criteria(filter!( query!("SELECT object_id, object_version FROM objects_history"), format!(r#"checkpoint_sequence_number BETWEEN {} AND {}"#, lhs, rhs) @@ -118,7 +133,8 @@ pub(crate) fn build_objects_query( let mut snapshot_objs = match view { View::Consistent => { - // The `LEFT JOIN` serves as a filter to remove objects that have a more recent version + // The `LEFT JOIN` serves as a filter to remove objects that have a more recent + // version let mut snapshot_objs = query!( r#"SELECT candidates.* FROM ({}) candidates LEFT JOIN ({}) newer @@ -138,12 +154,14 @@ pub(crate) fn build_objects_query( } }; - // Always apply cursor pagination and limit to constrain the number of rows returned, ensure - // that the inner queries are in step, and to handle the scenario where a user provides more - // `objectKeys` than allowed by the maximum page size. + // Always apply cursor pagination and limit to constrain the number of rows + // returned, ensure that the inner queries are in step, and to handle the + // scenario where a user provides more `objectKeys` than allowed by the + // maximum page size. snapshot_objs = page.apply::(snapshot_objs); - // Similar to the snapshot query, construct the filtered inner query for the history table. + // Similar to the snapshot query, construct the filtered inner query for the + // history table. let mut history_objs_inner = query!("SELECT * FROM objects_history"); history_objs_inner = filter_fn(history_objs_inner); @@ -174,14 +192,15 @@ pub(crate) fn build_objects_query( } }; - // Always apply cursor pagination and limit to constrain the number of rows returned, ensure - // that the inner queries are in step, and to handle the scenario where a user provides more - // `objectKeys` than allowed by the maximum page size. + // Always apply cursor pagination and limit to constrain the number of rows + // returned, ensure that the inner queries are in step, and to handle the + // scenario where a user provides more `objectKeys` than allowed by the + // maximum page size. history_objs = page.apply::(history_objs); - // Combine the two queries, and select the most recent version of each object. The result set is - // the most recent version of objects from `objects_snapshot` and `objects_history` that match - // the filter criteria. + // Combine the two queries, and select the most recent version of each object. + // The result set is the most recent version of objects from + // `objects_snapshot` and `objects_history` that match the filter criteria. let query = query!( r#"SELECT DISTINCT ON (object_id) * FROM (({}) UNION ALL ({})) candidates"#, snapshot_objs, @@ -193,11 +212,13 @@ pub(crate) fn build_objects_query( query!("SELECT * FROM ({}) candidates", query) } -/// Given a `checkpoint_viewed_at` representing the checkpoint sequence number when the query was -/// made, check whether the value falls under the current available range of the database. Returns -/// `None` if the `checkpoint_viewed_at` lies outside the range, otherwise return a tuple consisting -/// of the available range's lower bound and the `checkpoint_viewed_at`, or the upper bound of the -/// database if `checkpoint_viewed_at` is `None`. +/// Given a `checkpoint_viewed_at` representing the checkpoint sequence number +/// when the query was made, check whether the value falls under the current +/// available range of the database. Returns `None` if the +/// `checkpoint_viewed_at` lies outside the range, otherwise return a tuple +/// consisting of the available range's lower bound and the +/// `checkpoint_viewed_at`, or the upper bound of the database if +/// `checkpoint_viewed_at` is `None`. pub(crate) fn consistent_range( conn: &mut Conn, checkpoint_viewed_at: Option, diff --git a/crates/sui-graphql-rpc/src/context_data/db_data_provider.rs b/crates/sui-graphql-rpc/src/context_data/db_data_provider.rs index 6a5c55b974b..258a2ffa6a8 100644 --- a/crates/sui-graphql-rpc/src/context_data/db_data_provider.rs +++ b/crates/sui-graphql-rpc/src/context_data/db_data_provider.rs @@ -1,14 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{ - config::{DEFAULT_REQUEST_TIMEOUT_MS, DEFAULT_SERVER_DB_POOL_SIZE}, - error::Error, - types::{address::Address, sui_address::SuiAddress, validator::Validator}, -}; use std::{collections::BTreeMap, time::Duration}; -use sui_indexer::db::PgConnectionPoolConfig; -use sui_indexer::{apis::GovernanceReadApi, indexer_reader::IndexerReader}; + +use sui_indexer::{ + apis::GovernanceReadApi, db::PgConnectionPoolConfig, indexer_reader::IndexerReader, +}; use sui_json_rpc_types::Stake as RpcStakedSui; use sui_types::{ base_types::SuiAddress as NativeSuiAddress, @@ -18,6 +15,12 @@ use sui_types::{ }, }; +use crate::{ + config::{DEFAULT_REQUEST_TIMEOUT_MS, DEFAULT_SERVER_DB_POOL_SIZE}, + error::Error, + types::{address::Address, sui_address::SuiAddress, validator::Validator}, +}; + pub(crate) struct PgManager { pub inner: IndexerReader, } @@ -27,7 +30,8 @@ impl PgManager { Self { inner } } - /// Create a new underlying reader, which is used by this type as well as other data providers. + /// Create a new underlying reader, which is used by this type as well as + /// other data providers. pub(crate) fn reader(db_url: impl Into) -> Result { Self::reader_with_config( db_url, @@ -85,8 +89,9 @@ impl PgManager { } } - /// Make a request to the RPC for its representations of the staked sui we parsed out of the - /// object. Used to implement fields that are implemented in JSON-RPC but not GraphQL (yet). + /// Make a request to the RPC for its representations of the staked sui we + /// parsed out of the object. Used to implement fields that are + /// implemented in JSON-RPC but not GraphQL (yet). pub(crate) async fn fetch_rpc_staked_sui( &self, stake: NativeStakedSui, @@ -114,9 +119,10 @@ impl PgManager { } } -/// `checkpoint_viewed_at` represents the checkpoint sequence number at which the set of -/// `SuiValidatorSummary` was queried for. Each `Validator` will inherit this checkpoint, so that -/// when viewing the `Validator`'s state, it will be as if it was read at the same checkpoint. +/// `checkpoint_viewed_at` represents the checkpoint sequence number at which +/// the set of `SuiValidatorSummary` was queried for. Each `Validator` will +/// inherit this checkpoint, so that when viewing the `Validator`'s state, it +/// will be as if it was read at the same checkpoint. pub(crate) fn convert_to_validators( validators: Vec, system_state: Option, diff --git a/crates/sui-graphql-rpc/src/context_data/package_cache.rs b/crates/sui-graphql-rpc/src/context_data/package_cache.rs index 96761b6655a..fe9c376ac43 100644 --- a/crates/sui-graphql-rpc/src/context_data/package_cache.rs +++ b/crates/sui-graphql-rpc/src/context_data/package_cache.rs @@ -6,8 +6,7 @@ use std::sync::Arc; use async_trait::async_trait; use diesel::{ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl}; use move_core_types::account_address::AccountAddress; -use sui_indexer::errors::IndexerError; -use sui_indexer::{indexer_reader::IndexerReader, schema::objects}; +use sui_indexer::{errors::IndexerError, indexer_reader::IndexerReader, schema::objects}; use sui_package_resolver::{ error::Error as PackageResolverError, Package, PackageStore, PackageStoreWithLruCache, Result, }; @@ -34,8 +33,8 @@ impl From for PackageResolverError { pub(crate) type PackageCache = PackageStoreWithLruCache; -/// Store which fetches package for the given address from the backend db on every call -/// to `fetch` +/// Store which fetches package for the given address from the backend db on +/// every call to `fetch` pub struct DbPackageStore(pub IndexerReader); #[async_trait] diff --git a/crates/sui-graphql-rpc/src/data.rs b/crates/sui-graphql-rpc/src/data.rs index 129cf0d8b5e..be91cf36dae 100644 --- a/crates/sui-graphql-rpc/src/data.rs +++ b/crates/sui-graphql-rpc/src/data.rs @@ -20,7 +20,8 @@ pub(crate) type Conn<'c> = ::DbConnection<'c>; pub(crate) type DieselConn = ::Connection; pub(crate) type DieselBackend = ::Backend; -/// A generic boxed query (compatible with the return type of `into_boxed` on diesel's table DSL). +/// A generic boxed query (compatible with the return type of `into_boxed` on +/// diesel's table DSL). /// /// - ST is the SqlType of the rows selected. /// - QS is the QuerySource (the table(s) being selected from). @@ -30,8 +31,8 @@ pub(crate) type DieselBackend = ::Backend; pub(crate) type Query = BoxedSelectStatement<'static, ST, FromClause, DieselBackend, GB>; -/// Interface for accessing relational data written by the Indexer, agnostic of the database -/// back-end being used. +/// Interface for accessing relational data written by the Indexer, agnostic of +/// the database back-end being used. #[async_trait] pub(crate) trait QueryExecutor { type Backend: diesel::backend::Backend; @@ -41,8 +42,8 @@ pub(crate) trait QueryExecutor { where Self: 'c; - /// Execute `txn` with read committed isolation. `txn` is supplied a database connection to - /// issue queries over. + /// Execute `txn` with read committed isolation. `txn` is supplied a + /// database connection to issue queries over. async fn execute(&self, txn: T) -> Result where T: FnOnce(&mut Self::DbConnection<'_>) -> Result, @@ -51,9 +52,9 @@ pub(crate) trait QueryExecutor { U: Send + 'static, E: Send + 'static; - /// Execute `txn` with repeatable reads and no phantom reads -- multiple calls to the same query - /// should produce the same results. `txn` is supplied a database connection to issue queries - /// over. + /// Execute `txn` with repeatable reads and no phantom reads -- multiple + /// calls to the same query should produce the same results. `txn` is + /// supplied a database connection to issue queries over. async fn execute_repeatable(&self, txn: T) -> Result where T: FnOnce(&mut Self::DbConnection<'_>) -> Result, @@ -67,24 +68,24 @@ pub(crate) trait DbConnection { type Backend: diesel::backend::Backend; type Connection: diesel::Connection; - /// Run a query that fetches a single value. `query` is a thunk that returns a query when - /// called. + /// Run a query that fetches a single value. `query` is a thunk that returns + /// a query when called. fn result(&mut self, query: impl Fn() -> Q) -> QueryResult where Q: diesel::query_builder::Query, Q: LoadQuery<'static, Self::Connection, U>, Q: QueryId + QueryFragment; - /// Run a query that fetches multiple values. `query` is a thunk that returns a query when - /// called. + /// Run a query that fetches multiple values. `query` is a thunk that + /// returns a query when called. fn results(&mut self, query: impl Fn() -> Q) -> QueryResult> where Q: diesel::query_builder::Query, Q: LoadQuery<'static, Self::Connection, U>, Q: QueryId + QueryFragment; - /// Helper to limit a query that fetches multiple values to return only its first value. `query` - /// is a thunk that returns a query when called. + /// Helper to limit a query that fetches multiple values to return only its + /// first value. `query` is a thunk that returns a query when called. fn first(&mut self, query: impl Fn() -> Q) -> QueryResult where ::Output: diesel::query_builder::Query, diff --git a/crates/sui-graphql-rpc/src/data/pg.rs b/crates/sui-graphql-rpc/src/data/pg.rs index c0e5a3cce60..42c298cb1d6 100644 --- a/crates/sui-graphql-rpc/src/data/pg.rs +++ b/crates/sui-graphql-rpc/src/data/pg.rs @@ -3,8 +3,6 @@ use std::time::Instant; -use super::QueryExecutor; -use crate::{config::Limits, error::Error, metrics::Metrics}; use async_trait::async_trait; use diesel::{ pg::Pg, @@ -13,9 +11,11 @@ use diesel::{ QueryResult, RunQueryDsl, }; use sui_indexer::indexer_reader::IndexerReader; - use tracing::error; +use super::QueryExecutor; +use crate::{config::Limits, error::Error, metrics::Metrics}; + #[derive(Clone)] pub(crate) struct PgExecutor { pub inner: IndexerReader, @@ -114,15 +114,16 @@ impl<'c> super::DbConnection for PgConnection<'c> { } } -/// Support for calculating estimated query cost using EXPLAIN and then logging it. +/// Support for calculating estimated query cost using EXPLAIN and then logging +/// it. mod query_cost { - use super::*; - use diesel::{query_builder::AstPass, sql_types::Text, PgConnection, QueryResult}; use serde_json::Value; use tap::{TapFallible, TapOptional}; use tracing::{info, warn}; + use super::*; + #[derive(Debug, Clone, Copy, QueryId)] struct Explained { query: Q, @@ -182,8 +183,6 @@ mod query_cost { #[cfg(all(test, feature = "pg_integration"))] mod tests { - use super::*; - use crate::config::DEFAULT_SERVER_DB_URL; use diesel::QueryDsl; use sui_framework::BuiltInFramework; use sui_indexer::{ @@ -193,6 +192,9 @@ mod tests { types::IndexedObject, }; + use super::*; + use crate::config::DEFAULT_SERVER_DB_URL; + #[test] fn test_query_cost() { let pool = new_pg_connection_pool(DEFAULT_SERVER_DB_URL, Some(5)).unwrap(); diff --git a/crates/sui-graphql-rpc/src/error.rs b/crates/sui-graphql-rpc/src/error.rs index e20556b3ab3..a4166d2b7b6 100644 --- a/crates/sui-graphql-rpc/src/error.rs +++ b/crates/sui-graphql-rpc/src/error.rs @@ -6,8 +6,8 @@ use async_graphql_axum::GraphQLResponse; use sui_indexer::errors::IndexerError; use sui_json_rpc::name_service::NameServiceError; -/// Error codes for the `extensions.code` field of a GraphQL error that originates from outside -/// GraphQL. +/// Error codes for the `extensions.code` field of a GraphQL error that +/// originates from outside GraphQL. /// `` pub(crate) mod code { pub const BAD_REQUEST: &str = "BAD_REQUEST"; @@ -19,10 +19,11 @@ pub(crate) mod code { /// Create a GraphQL Response containing an Error. /// -/// Most errors produced by the service will automatically be wrapped in a `GraphQLResponse`, -/// because they will originate from within the GraphQL implementation. This function is intended -/// for errors that originated from outside of GraphQL (such as in middleware), but that need to be -/// ingested by GraphQL clients. +/// Most errors produced by the service will automatically be wrapped in a +/// `GraphQLResponse`, because they will originate from within the GraphQL +/// implementation. This function is intended for errors that originated from +/// outside of GraphQL (such as in middleware), but that need to be ingested by +/// GraphQL clients. pub(crate) fn graphql_error_response(code: &str, message: impl Into) -> GraphQLResponse { let error = graphql_error(code, message); Response::from_errors(error.into()).into() @@ -30,7 +31,8 @@ pub(crate) fn graphql_error_response(code: &str, message: impl Into) -> /// Create a generic GraphQL Server Error. /// -/// This error has no path, source, or locations, just a message and an error code. +/// This error has no path, source, or locations, just a message and an error +/// code. pub(crate) fn graphql_error(code: &str, message: impl Into) -> ServerError { let mut ext = ErrorExtensionValues::default(); ext.set("code", code); diff --git a/crates/sui-graphql-rpc/src/examples.rs b/crates/sui-graphql-rpc/src/examples.rs index 56f86ca4281..e33b8babd9d 100644 --- a/crates/sui-graphql-rpc/src/examples.rs +++ b/crates/sui-graphql-rpc/src/examples.rs @@ -1,10 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + io::{BufWriter, Read}, + path::PathBuf, +}; + use anyhow::anyhow; use markdown_gen::markdown::{AsMarkdown, Markdown}; -use std::io::{BufWriter, Read}; -use std::path::PathBuf; #[derive(Debug)] pub struct ExampleQuery { @@ -97,7 +100,8 @@ pub fn load_examples() -> anyhow::Result> { Ok(groups) } -/// This generates a markdown page with all the examples, to be used in the docs site +/// This generates a markdown page with all the examples, to be used in the docs +/// site pub fn generate_examples_for_docs() -> anyhow::Result { let groups = load_examples()?; @@ -209,9 +213,10 @@ pub fn generate_markdown() -> anyhow::Result { #[test] fn test_generate_markdown() { - use similar::*; use std::fs::File; + use similar::*; + let mut buf: PathBuf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); buf.push("docs"); buf.push("examples.md"); @@ -235,6 +240,9 @@ fn test_generate_markdown() { }; res.push(format!("{}{}", sign, change)); } - panic!("Doc examples have changed. Please run `sui-graphql-rpc generate-examples` to update the docs. Diff: {}", res.join("")); + panic!( + "Doc examples have changed. Please run `sui-graphql-rpc generate-examples` to update the docs. Diff: {}", + res.join("") + ); } } diff --git a/crates/sui-graphql-rpc/src/extensions/feature_gate.rs b/crates/sui-graphql-rpc/src/extensions/feature_gate.rs index e68a2bd0ca2..e4e23d29692 100644 --- a/crates/sui-graphql-rpc/src/extensions/feature_gate.rs +++ b/crates/sui-graphql-rpc/src/extensions/feature_gate.rs @@ -47,10 +47,10 @@ impl Extension for FeatureGate { ) })?; - // TODO: Is there a way to set `is_visible` on `MetaField` and `MetaType` in a generic way - // after building the schema? (to a function which reads the `ServiceConfig` from the - // `Context`). This is (probably) required to hide disabled types and interfaces in the - // schema. + // TODO: Is there a way to set `is_visible` on `MetaField` and `MetaType` in a + // generic way after building the schema? (to a function which reads the + // `ServiceConfig` from the `Context`). This is (probably) required to + // hide disabled types and interfaces in the schema. if let Some(group) = functional_group(parent_type, name) { if disabled_features.contains(&group) { @@ -83,9 +83,8 @@ mod tests { use async_graphql::{EmptySubscription, Schema}; use expect_test::expect; - use crate::{functional_group::FunctionalGroup, mutation::Mutation, types::query::Query}; - use super::*; + use crate::{functional_group::FunctionalGroup, mutation::Mutation, types::query::Query}; #[tokio::test] #[should_panic] // because it tries to access the data provider, which isn't there diff --git a/crates/sui-graphql-rpc/src/extensions/logger.rs b/crates/sui-graphql-rpc/src/extensions/logger.rs index 42580f121fb..6105a64cc54 100644 --- a/crates/sui-graphql-rpc/src/extensions/logger.rs +++ b/crates/sui-graphql-rpc/src/extensions/logger.rs @@ -1,7 +1,8 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{error::code, metrics::Metrics}; +use std::{fmt::Write, net::SocketAddr, sync::Arc}; + use async_graphql::{ extensions::{ Extension, ExtensionContext, ExtensionFactory, NextExecute, NextParseQuery, NextResolve, @@ -11,10 +12,11 @@ use async_graphql::{ PathSegment, Response, ServerError, ServerResult, ValidationResult, Variables, }; use async_graphql_value::ConstValue; -use std::{fmt::Write, net::SocketAddr, sync::Arc}; use tracing::{debug, error, info, warn}; use uuid::Uuid; +use crate::{error::code, metrics::Metrics}; + #[derive(Clone, Debug)] pub struct LoggerConfig { pub log_request_query: bool, @@ -51,8 +53,8 @@ struct LoggerExtension { #[async_trait::async_trait] impl Extension for LoggerExtension { - // This hook is used to get the top level node name for recording in the metrics which top - // level nodes are being called. + // This hook is used to get the top level node name for recording in the metrics + // which top level nodes are being called. async fn resolve( &self, ctx: &ExtensionContext<'_>, diff --git a/crates/sui-graphql-rpc/src/extensions/query_limits_checker.rs b/crates/sui-graphql-rpc/src/extensions/query_limits_checker.rs index a971574f1ec..aeb357d293d 100644 --- a/crates/sui-graphql-rpc/src/extensions/query_limits_checker.rs +++ b/crates/sui-graphql-rpc/src/extensions/query_limits_checker.rs @@ -1,30 +1,37 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::config::{Limits, ServiceConfig}; -use crate::error::{code, graphql_error, graphql_error_at_pos}; -use crate::metrics::Metrics; -use async_graphql::extensions::NextParseQuery; -use async_graphql::extensions::NextRequest; -use async_graphql::extensions::{Extension, ExtensionContext, ExtensionFactory}; -use async_graphql::parser::types::{ - Directive, ExecutableDocument, Field, FragmentDefinition, Selection, SelectionSet, +use std::{ + collections::{BTreeSet, HashMap, VecDeque}, + net::SocketAddr, + sync::Arc, + time::Instant, +}; + +use async_graphql::{ + extensions::{Extension, ExtensionContext, ExtensionFactory, NextParseQuery, NextRequest}, + parser::types::{ + Directive, ExecutableDocument, Field, FragmentDefinition, Selection, SelectionSet, + }, + value, Name, Pos, Positioned, Response, ServerResult, Value, Variables, }; -use async_graphql::{value, Name, Pos, Positioned, Response, ServerResult, Value, Variables}; use async_graphql_value::Value as GqlValue; -use axum::headers; -use axum::http::HeaderName; -use axum::http::HeaderValue; +use axum::{ + headers, + http::{HeaderName, HeaderValue}, +}; use once_cell::sync::Lazy; -use std::collections::{BTreeSet, HashMap, VecDeque}; -use std::net::SocketAddr; -use std::sync::Arc; -use std::time::Instant; use sui_graphql_rpc_headers::LIMITS_HEADER; use tokio::sync::Mutex; use tracing::info; use uuid::Uuid; +use crate::{ + config::{Limits, ServiceConfig}, + error::{code, graphql_error, graphql_error_at_pos}, + metrics::Metrics, +}; + /// Only display usage information if this header was in the request. pub(crate) struct ShowUsage; @@ -161,13 +168,14 @@ impl Extension for QueryLimitsChecker { }; let mut max_depth_seen = 0; - // An operation is a query, mutation or subscription consisting of a set of selections + // An operation is a query, mutation or subscription consisting of a set of + // selections for (count, (_name, oper)) in doc.operations.iter().enumerate() { let sel_set = &oper.node.selection_set; // If the query is pure introspection, we don't need to check the limits. - // Pure introspection queries are queries that only have one operation with one field - // and that field is a `__schema` query + // Pure introspection queries are queries that only have one operation with one + // field and that field is a `__schema` query if (count == 0) && (sel_set.node.items.len() == 1) { if let Some(node) = sel_set.node.items.first() { if let Selection::Field(field) = &node.node { @@ -221,7 +229,8 @@ impl Extension for QueryLimitsChecker { } impl QueryLimitsChecker { - /// Parse the selected fields in one operation and check if it conforms to configured limits. + /// Parse the selected fields in one operation and check if it conforms to + /// configured limits. fn analyze_selection_set( &self, limits: &Limits, @@ -231,7 +240,8 @@ impl QueryLimitsChecker { variables: &Variables, ctx: &ExtensionContext<'_>, ) -> ServerResult<()> { - // Use BFS to analyze the query and count the number of nodes and the depth of the query + // Use BFS to analyze the query and count the number of nodes and the depth of + // the query struct ToVisit<'s> { selection: &'s Positioned, parent_node_count: u64, @@ -299,9 +309,10 @@ impl QueryLimitsChecker { ) })?; - // TODO: this is inefficient as we might loop over same fragment multiple times - // Ideally web should cache the costs of fragments we've seen before - // Will do as enhancement + // TODO: this is inefficient as we might loop over same fragment multiple + // times Ideally web should cache the costs of + // fragments we've seen before Will do as + // enhancement check_directives(&frag_def.node.directives)?; for selection in frag_def.node.selection_set.node.items.iter() { que.push_back(ToVisit { @@ -387,7 +398,7 @@ fn check_limits( ); return Err(graphql_error_at_pos( error_code, - format!( + format!( "Query will result in too many output nodes. The maximum allowed is {}, estimated {}", limits.max_output_nodes, cost.output_nodes ), @@ -446,7 +457,8 @@ fn estimate_output_nodes_for_curr_node( } } -/// Try to extract a u64 value from the given argument, or return None on failure. +/// Try to extract a u64 value from the given argument, or return None on +/// failure. fn extract_limit(value: Option<&Positioned>, variables: &Variables) -> Option { if let GqlValue::Variable(var) = &value?.node { return match variables.get(var) { @@ -461,8 +473,9 @@ fn extract_limit(value: Option<&Positioned>, variables: &Variables) -> value.as_u64() } -/// Checks if the given field is a connection field by whether it has 'edges' or 'nodes' selected. -/// This should typically not require checking more than the first element of the selection set +/// Checks if the given field is a connection field by whether it has 'edges' or +/// 'nodes' selected. This should typically not require checking more than the +/// first element of the selection set fn is_connection(f: &Positioned) -> bool { for field_sel in f.node.selection_set.node.items.iter() { if let Selection::Field(field) = &field_sel.node { diff --git a/crates/sui-graphql-rpc/src/extensions/timeout.rs b/crates/sui-graphql-rpc/src/extensions/timeout.rs index 7e206cf3a02..eed9ecba168 100644 --- a/crates/sui-graphql-rpc/src/extensions/timeout.rs +++ b/crates/sui-graphql-rpc/src/extensions/timeout.rs @@ -1,15 +1,18 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + net::SocketAddr, + sync::{Arc, Mutex}, + time::Duration, +}; + use async_graphql::{ extensions::{Extension, ExtensionContext, ExtensionFactory, NextExecute, NextParseQuery}, parser::types::ExecutableDocument, Response, ServerError, ServerResult, }; use async_graphql_value::Variables; -use std::sync::Mutex; -use std::time::Duration; -use std::{net::SocketAddr, sync::Arc}; use tokio::time::timeout; use tracing::error; use uuid::Uuid; diff --git a/crates/sui-graphql-rpc/src/functional_group.rs b/crates/sui-graphql-rpc/src/functional_group.rs index 538b75d376d..ad8d9406b15 100644 --- a/crates/sui-graphql-rpc/src/functional_group.rs +++ b/crates/sui-graphql-rpc/src/functional_group.rs @@ -8,13 +8,14 @@ use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; use serde_json as json; -/// Groups of features served by the RPC service. The GraphQL Service can be configured to enable -/// or disable these features. +/// Groups of features served by the RPC service. The GraphQL Service can be +/// configured to enable or disable these features. #[derive(Enum, Copy, Clone, Serialize, Deserialize, Debug, Eq, PartialEq, Ord, PartialOrd)] #[serde(rename_all = "kebab-case")] #[graphql(name = "Feature")] pub(crate) enum FunctionalGroup { - /// Statistics about how the network was running (TPS, top packages, APY, etc) + /// Statistics about how the network was running (TPS, top packages, APY, + /// etc) Analytics, /// Coin metadata, per-address coin and balance information. @@ -35,8 +36,9 @@ pub(crate) enum FunctionalGroup { } impl FunctionalGroup { - /// Name that the group is referred to by in configuration and responses on the GraphQL API. - /// Not a suitable `Display` implementation because it enquotes the representation. + /// Name that the group is referred to by in configuration and responses on + /// the GraphQL API. Not a suitable `Display` implementation because it + /// enquotes the representation. pub(crate) fn name(&self) -> String { json::ser::to_string(self).expect("Serializing `FunctionalGroup` cannot fail.") } @@ -56,11 +58,13 @@ impl FunctionalGroup { } } -/// Mapping from type and field name in the schema to the functional group it belongs to. +/// Mapping from type and field name in the schema to the functional group it +/// belongs to. fn functional_groups() -> &'static BTreeMap<(&'static str, &'static str), FunctionalGroup> { - // TODO: Introduce a macro to declare the functional group for a field and/or type on the - // appropriate type, field, or function, instead of here. This may also be able to set the - // graphql `visible` attribute to control schema visibility by functional groups. + // TODO: Introduce a macro to declare the functional group for a field and/or + // type on the appropriate type, field, or function, instead of here. This + // may also be able to set the graphql `visible` attribute to control schema + // visibility by functional groups. use FunctionalGroup as G; static GROUPS: Lazy> = Lazy::new(|| { @@ -108,8 +112,8 @@ fn functional_groups() -> &'static BTreeMap<(&'static str, &'static str), Functi Lazy::force(&GROUPS) } -/// Map a type and field name to a functional group. If an explicit group does not exist for the -/// field, then it is assumed to be a "core" feature. +/// Map a type and field name to a functional group. If an explicit group does +/// not exist for the field, then it is assumed to be a "core" feature. pub(crate) fn functional_group(type_: &str, field: &str) -> Option { functional_groups().get(&(type_, field)).copied() } @@ -118,17 +122,16 @@ pub(crate) fn functional_group(type_: &str, field: &str) -> Option, diff --git a/crates/sui-graphql-rpc/src/raw_query.rs b/crates/sui-graphql-rpc/src/raw_query.rs index a4769b1f321..2d026a906f8 100644 --- a/crates/sui-graphql-rpc/src/raw_query.rs +++ b/crates/sui-graphql-rpc/src/raw_query.rs @@ -10,18 +10,19 @@ use crate::data::DieselBackend; pub(crate) type RawSqlQuery = BoxedSqlQuery<'static, DieselBackend, SqlQuery>; -/// `RawQuery` is a utility for building and managing `diesel::query_builder::BoxedSqlQuery` queries -/// dynamically. +/// `RawQuery` is a utility for building and managing +/// `diesel::query_builder::BoxedSqlQuery` queries dynamically. /// -/// 1. **Dynamic Value Binding**: Allows binding string values dynamically to the query, bypassing -/// the need to specify types explicitly, as is typically required with Diesel's -/// `sql_query.bind`. +/// 1. **Dynamic Value Binding**: Allows binding string values dynamically to +/// the query, bypassing the need to specify types explicitly, as is +/// typically required with Diesel's `sql_query.bind`. /// -/// 2. **Query String Merging**: Can be used to represent and merge query strings and their -/// associated bindings. Placeholder strings and bindings are applied in sequential order. +/// 2. **Query String Merging**: Can be used to represent and merge query +/// strings and their associated bindings. Placeholder strings and bindings +/// are applied in sequential order. /// -/// Note: `RawQuery` only supports binding string values, as interpolating raw strings directly -/// increases exposure to SQL injection attacks. +/// Note: `RawQuery` only supports binding string values, as interpolating raw +/// strings directly increases exposure to SQL injection attacks. #[derive(Clone)] pub(crate) struct RawQuery { /// The `SELECT` and `FROM` clauses of the query. @@ -51,7 +52,8 @@ impl RawQuery { } } - /// Adds a `WHERE` condition to the query, combining it with existing conditions using `AND`. + /// Adds a `WHERE` condition to the query, combining it with existing + /// conditions using `AND`. pub(crate) fn filter(mut self, condition: T) -> Self { self.where_ = match self.where_ { Some(where_) => Some(format!("({}) AND {}", where_, condition)), @@ -61,7 +63,8 @@ impl RawQuery { self } - /// Adds a `WHERE` condition to the query, combining it with existing conditions using `OR`. + /// Adds a `WHERE` condition to the query, combining it with existing + /// conditions using `OR`. #[allow(dead_code)] pub(crate) fn or_filter(mut self, condition: T) -> Self { self.where_ = match self.where_ { @@ -95,9 +98,9 @@ impl RawQuery { self.binds.push(condition); } - /// Constructs the query string and returns it along with the list of binds for this query. This - /// function is not intended to be called directly, and instead should be used through the - /// `query!` macro. + /// Constructs the query string and returns it along with the list of binds + /// for this query. This function is not intended to be called directly, + /// and instead should be used through the `query!` macro. pub(crate) fn finish(self) -> (String, Vec) { let mut select = self.select; @@ -128,9 +131,10 @@ impl RawQuery { (select, self.binds) } - /// Converts this `RawQuery` into a `diesel::query_builder::BoxedSqlQuery`. Consumes `self` into - /// a raw sql string and bindings, if any. A `BoxedSqlQuery` is constructed from the raw sql - /// string, and bindings are added using `sql_query.bind()`. + /// Converts this `RawQuery` into a `diesel::query_builder::BoxedSqlQuery`. + /// Consumes `self` into a raw sql string and bindings, if any. A + /// `BoxedSqlQuery` is constructed from the raw sql string, and bindings + /// are added using `sql_query.bind()`. pub(crate) fn into_boxed(self) -> RawSqlQuery { let (raw_sql_string, binds) = self.finish(); @@ -157,7 +161,8 @@ impl RawQuery { } } -/// Applies the `AND` condition to the given `RawQuery` and binds input string values, if any. +/// Applies the `AND` condition to the given `RawQuery` and binds input string +/// values, if any. #[macro_export] macro_rules! filter { ($query:expr, $condition:expr $(,$binds:expr)*) => {{ @@ -168,7 +173,8 @@ macro_rules! filter { }}; } -/// Applies the `OR` condition to the given `RawQuery` and binds input string values, if any. +/// Applies the `OR` condition to the given `RawQuery` and binds input string +/// values, if any. #[macro_export] macro_rules! or_filter { ($query:expr, $condition:expr $(,$binds:expr)*) => {{ @@ -179,11 +185,12 @@ macro_rules! or_filter { }}; } -/// Accepts a `SELECT FROM` format string and optional subqueries. If subqueries are provided, there -/// should be curly braces `{}` in the format string to interpolate each subquery's sql string into. -/// Concatenates subqueries to the `SELECT FROM` clause, and creates a new `RawQuery` from the -/// concatenated sql string. The binds from each subquery are added in the order they appear in the -/// macro parameter. Subqueries are consumed into the new `RawQuery`. +/// Accepts a `SELECT FROM` format string and optional subqueries. If subqueries +/// are provided, there should be curly braces `{}` in the format string to +/// interpolate each subquery's sql string into. Concatenates subqueries to the +/// `SELECT FROM` clause, and creates a new `RawQuery` from the concatenated sql +/// string. The binds from each subquery are added in the order they appear in +/// the macro parameter. Subqueries are consumed into the new `RawQuery`. #[macro_export] macro_rules! query { // Matches the case where no subqueries are provided. A `RawQuery` is constructed from the given diff --git a/crates/sui-graphql-rpc/src/server/builder.rs b/crates/sui-graphql-rpc/src/server/builder.rs index 4ed740a4d94..cf7aa092009 100644 --- a/crates/sui-graphql-rpc/src/server/builder.rs +++ b/crates/sui-graphql-rpc/src/server/builder.rs @@ -1,67 +1,73 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::config::{ - ConnectionConfig, ServiceConfig, Version, MAX_CONCURRENT_REQUESTS, - RPC_TIMEOUT_ERR_SLEEP_RETRY_PERIOD, -}; -use crate::consistency::CheckpointViewedAt; -use crate::context_data::package_cache::DbPackageStore; -use crate::data::Db; -use crate::metrics::Metrics; -use crate::mutation::Mutation; -use crate::types::checkpoint::Checkpoint; -use crate::types::move_object::IMoveObject; -use crate::types::object::IObject; -use crate::types::owner::IOwner; -use crate::{ - config::ServerConfig, - context_data::db_data_provider::PgManager, - error::Error, - extensions::{ - feature_gate::FeatureGate, - logger::Logger, - query_limits_checker::{QueryLimitsChecker, ShowUsage}, - timeout::Timeout, +use std::{ + any::Any, + convert::Infallible, + net::{SocketAddr, TcpStream}, + sync::{ + atomic::{AtomicU64, Ordering::Relaxed}, + Arc, }, - server::version::{check_version_middleware, set_version_middleware}, - types::query::{Query, SuiGraphQLSchema}, + time::Instant, +}; + +use async_graphql::{ + dataloader::DataLoader, + extensions::{ApolloTracing, ExtensionFactory, Tracing}, + EmptySubscription, Schema, SchemaBuilder, ServerError, }; -use async_graphql::dataloader::DataLoader; -use async_graphql::extensions::ApolloTracing; -use async_graphql::extensions::Tracing; -use async_graphql::{extensions::ExtensionFactory, Schema, SchemaBuilder}; -use async_graphql::{EmptySubscription, ServerError}; use async_graphql_axum::{GraphQLRequest, GraphQLResponse}; -use axum::extract::FromRef; -use axum::extract::{connect_info::IntoMakeServiceWithConnectInfo, ConnectInfo, State}; -use axum::http::{HeaderMap, StatusCode}; -use axum::middleware::{self}; -use axum::response::IntoResponse; -use axum::routing::{post, MethodRouter, Route}; -use axum::{headers::Header, Router}; +use axum::{ + extract::{connect_info::IntoMakeServiceWithConnectInfo, ConnectInfo, FromRef, State}, + headers::Header, + http::{HeaderMap, StatusCode}, + middleware::{self}, + response::IntoResponse, + routing::{post, MethodRouter, Route}, + Router, +}; use http::{HeaderValue, Method, Request}; -use hyper::server::conn::AddrIncoming as HyperAddrIncoming; -use hyper::Body; -use hyper::Server as HyperServer; +use hyper::{server::conn::AddrIncoming as HyperAddrIncoming, Body, Server as HyperServer}; use mysten_metrics::spawn_monitored_task; use mysten_network::callback::{CallbackLayer, MakeCallbackHandler, ResponseHandler}; -use std::convert::Infallible; -use std::net::TcpStream; -use std::sync::atomic::{AtomicU64, Ordering::Relaxed}; -use std::sync::Arc; -use std::{any::Any, net::SocketAddr, time::Instant}; use sui_graphql_rpc_headers::{LIMITS_HEADER, VERSION_HEADER}; use sui_package_resolver::{PackageStoreWithLruCache, Resolver}; use sui_sdk::SuiClientBuilder; -use tokio::join; -use tokio::sync::OnceCell; +use tokio::{join, sync::OnceCell}; use tokio_util::sync::CancellationToken; use tower::{Layer, Service}; use tower_http::cors::{AllowOrigin, CorsLayer}; use tracing::{error, info, warn}; use uuid::Uuid; +use crate::{ + config::{ + ConnectionConfig, ServerConfig, ServiceConfig, Version, MAX_CONCURRENT_REQUESTS, + RPC_TIMEOUT_ERR_SLEEP_RETRY_PERIOD, + }, + consistency::CheckpointViewedAt, + context_data::{db_data_provider::PgManager, package_cache::DbPackageStore}, + data::Db, + error::Error, + extensions::{ + feature_gate::FeatureGate, + logger::Logger, + query_limits_checker::{QueryLimitsChecker, ShowUsage}, + timeout::Timeout, + }, + metrics::Metrics, + mutation::Mutation, + server::version::{check_version_middleware, set_version_middleware}, + types::{ + checkpoint::Checkpoint, + move_object::IMoveObject, + object::IObject, + owner::IOwner, + query::{Query, SuiGraphQLSchema}, + }, +}; + pub(crate) struct Server { pub server: HyperServer>, /// The following fields are internally used for background tasks @@ -71,14 +77,15 @@ pub(crate) struct Server { } impl Server { - /// Start the GraphQL service and any background tasks it is dependent on. When a cancellation - /// signal is received, the method waits for all tasks to complete before returning. + /// Start the GraphQL service and any background tasks it is dependent on. + /// When a cancellation signal is received, the method waits for all + /// tasks to complete before returning. pub async fn run(self) -> Result<(), Error> { get_or_init_server_start_time().await; - // A handle that spawns a background task to periodically update the `CheckpointViewedAt`, - // which is the u64 high watermark of checkpoints that the service is guaranteed to produce - // a consistent result for. + // A handle that spawns a background task to periodically update the + // `CheckpointViewedAt`, which is the u64 high watermark of checkpoints + // that the service is guaranteed to produce a consistent result for. let watermark_task = { let metrics = self.state.metrics.clone(); let sleep_ms = self.state.service.background_tasks.watermark_update_ms; @@ -110,8 +117,9 @@ impl Server { }) }; - // Wait for both tasks to complete. This ensures that the service doesn't fully shut down - // until both the background task and the server have completed their shutdown processes. + // Wait for both tasks to complete. This ensures that the service doesn't fully + // shut down until both the background task and the server have + // completed their shutdown processes. let _ = join!(watermark_task, server_task); Ok(()) @@ -134,8 +142,8 @@ pub(crate) struct AppState { pub version: Version, } -/// The high checkpoint watermark stamped on each GraphQL request. This is used to ensure -/// cross-query consistency. +/// The high checkpoint watermark stamped on each GraphQL request. This is used +/// to ensure cross-query consistency. #[derive(Clone)] pub(crate) struct CheckpointWatermark(pub Arc); @@ -200,8 +208,8 @@ impl ServerBuilder { self.schema.finish() } - /// Prepares the components of the server to be run. Finalizes the graphql schema, and expects - /// the `Db` and `Router` to have been initialized. + /// Prepares the components of the server to be run. Finalizes the graphql + /// schema, and expects the `Db` and `Router` to have been initialized. fn build_components( self, ) -> ( @@ -323,8 +331,8 @@ impl ServerBuilder { }) } - /// Instantiate a `ServerBuilder` from a `ServerConfig`, typically called when building the - /// graphql service for production usage. + /// Instantiate a `ServerBuilder` from a `ServerConfig`, typically called + /// when building the graphql service for production usage. pub async fn from_config( config: &ServerConfig, version: &Version, @@ -396,7 +404,9 @@ impl ServerBuilder { .map_err(|e| Error::Internal(format!("Failed to create SuiClient: {}", e)))?, ) } else { - warn!("No fullnode url found in config. `dryRunTransactionBlock` and `executeTransactionBlock` will not work"); + warn!( + "No fullnode url found in config. `dryRunTransactionBlock` and `executeTransactionBlock` will not work" + ); None }; @@ -453,8 +463,9 @@ pub fn export_schema() -> String { schema_builder().finish().sdl() } -/// Entry point for graphql requests. Each request is stamped with a unique ID, a `ShowUsage` flag -/// if set in the request headers, and the high checkpoint watermark as set by the background task. +/// Entry point for graphql requests. Each request is stamped with a unique ID, +/// a `ShowUsage` flag if set in the request headers, and the high checkpoint +/// watermark as set by the background task. async fn graphql_handler( ConnectInfo(addr): ConnectInfo, schema: axum::Extension, @@ -468,18 +479,20 @@ async fn graphql_handler( req.data.insert(ShowUsage) } // Capture the IP address of the client - // Note: if a load balancer is used it must be configured to forward the client IP address + // Note: if a load balancer is used it must be configured to forward the client + // IP address req.data.insert(addr); - let checkpoint_viewed_at = watermark.0 .0.load(Relaxed); + let checkpoint_viewed_at = watermark.0.0.load(Relaxed); - // This wrapping is done to delineate the watermark from potentially other u64 types. + // This wrapping is done to delineate the watermark from potentially other u64 + // types. req.data.insert(CheckpointViewedAt(checkpoint_viewed_at)); let result = schema.execute(req).await; - // If there are errors, insert them as an extention so that the Metrics callback handler can - // pull it out later. + // If there are errors, insert them as an extention so that the Metrics callback + // handler can pull it out later. let mut extensions = axum::http::Extensions::new(); if result.is_err() { extensions.insert(GraphqlErrors(std::sync::Arc::new(result.errors.clone()))); @@ -521,8 +534,8 @@ impl ResponseHandler for MetricsCallbackHandler { fn on_error(self, _error: &E) { // Do nothing if the whole service errored // - // in Axum this isn't possible since all services are required to have an error type of - // Infallible + // in Axum this isn't possible since all services are required to have + // an error type of Infallible } } @@ -565,7 +578,8 @@ async fn get_or_init_server_start_time() -> &'static Instant { ONCE.get_or_init(|| async move { Instant::now() }).await } -/// Starts an infinite loop that periodically updates the `checkpoint_viewed_at` high watermark. +/// Starts an infinite loop that periodically updates the `checkpoint_viewed_at` +/// high watermark. pub(crate) async fn update_watermark( db: &Db, checkpoint_viewed_at: CheckpointWatermark, @@ -599,23 +613,24 @@ pub(crate) async fn update_watermark( } pub mod tests { - use super::*; - use crate::{ - config::{ConnectionConfig, Limits, ServiceConfig, Version}, - context_data::db_data_provider::PgManager, - extensions::query_limits_checker::QueryLimitsChecker, - extensions::timeout::Timeout, - }; + use std::{sync::Arc, time::Duration}; + use async_graphql::{ extensions::{Extension, ExtensionContext, NextExecute}, Response, }; - use std::sync::Arc; - use std::time::Duration; use uuid::Uuid; - /// Prepares a schema for tests dealing with extensions. Returns a `ServerBuilder` that can be - /// further extended with `context_data` and `extension` for testing. + use super::*; + use crate::{ + config::{ConnectionConfig, Limits, ServiceConfig, Version}, + context_data::db_data_provider::PgManager, + extensions::{query_limits_checker::QueryLimitsChecker, timeout::Timeout}, + }; + + /// Prepares a schema for tests dealing with extensions. Returns a + /// `ServerBuilder` that can be further extended with `context_data` and + /// `extension` for testing. fn prep_schema( connection_config: Option, service_config: Option, diff --git a/crates/sui-graphql-rpc/src/server/graphiql_server.rs b/crates/sui-graphql-rpc/src/server/graphiql_server.rs index bf45cb242b8..8ae98c24c2b 100644 --- a/crates/sui-graphql-rpc/src/server/graphiql_server.rs +++ b/crates/sui-graphql-rpc/src/server/graphiql_server.rs @@ -4,9 +4,11 @@ use tokio_util::sync::CancellationToken; use tracing::info; -use crate::config::{ServerConfig, Version}; -use crate::error::Error; -use crate::server::builder::ServerBuilder; +use crate::{ + config::{ServerConfig, Version}, + error::Error, + server::builder::ServerBuilder, +}; async fn graphiql(ide_title: axum::Extension>) -> impl axum::response::IntoResponse { let gq = async_graphql::http::GraphiQLSource::build().endpoint("/"); diff --git a/crates/sui-graphql-rpc/src/server/version.rs b/crates/sui-graphql-rpc/src/server/version.rs index 9386badfa3f..691d6f1ba75 100644 --- a/crates/sui-graphql-rpc/src/server/version.rs +++ b/crates/sui-graphql-rpc/src/server/version.rs @@ -34,9 +34,9 @@ impl headers::Header for SuiRpcVersion { return Err(headers::Error::invalid()); }; - // Extract the header values as bytes. Distinguish the first value as we expect there to be - // just one under normal operation. Do not attempt to parse the value, as a header parsing - // failure produces a generic error. + // Extract the header values as bytes. Distinguish the first value as we expect + // there to be just one under normal operation. Do not attempt to parse + // the value, as a header parsing failure produces a generic error. Ok(SuiRpcVersion(value, values.collect())) } @@ -45,9 +45,10 @@ impl headers::Header for SuiRpcVersion { } } -/// Middleware to check for the existence of a version constraint in the request header, and confirm -/// that this instance of the RPC matches that version constraint. Each RPC instance only supports -/// one version of the RPC software, and it is the responsibility of the load balancer to make sure +/// Middleware to check for the existence of a version constraint in the request +/// header, and confirm that this instance of the RPC matches that version +/// constraint. Each RPC instance only supports one version of the RPC +/// software, and it is the responsibility of the load balancer to make sure /// version constraints are met. pub(crate) async fn check_version_middleware( user_version: Option>, @@ -107,8 +108,8 @@ pub(crate) async fn check_version_middleware( next.run(request).await } -/// Mark every outgoing response with a header indicating the precise version of the RPC that was -/// used (including the patch version and sha). +/// Mark every outgoing response with a header indicating the precise version of +/// the RPC that was used (including the patch version and sha). pub(crate) async fn set_version_middleware( State(version): State, request: Request, @@ -125,8 +126,8 @@ pub(crate) async fn set_version_middleware( /// Split a `version` string into two parts (year and month) separated by a ".". /// -/// Confirms that the version specifier contains exactly two components, and that both -/// components are entirely comprised of digits. +/// Confirms that the version specifier contains exactly two components, and +/// that both components are entirely comprised of digits. fn parse_version(version: &str) -> Option<(&str, &str)> { let mut parts = version.split('.'); let year = parts.next()?; @@ -142,17 +143,18 @@ fn parse_version(version: &str) -> Option<(&str, &str)> { mod tests { use std::net::SocketAddr; + use axum::{body::Body, middleware, routing::get, Router}; + use expect_test::expect; + use mysten_metrics; + use tokio_util::sync::CancellationToken; + use tower::ServiceExt; + use super::*; use crate::{ config::{ConnectionConfig, ServiceConfig, Version}, metrics::Metrics, server::builder::AppState, }; - use axum::{body::Body, middleware, routing::get, Router}; - use expect_test::expect; - use mysten_metrics; - use tokio_util::sync::CancellationToken; - use tower::ServiceExt; fn metrics() -> Metrics { let binding_address: SocketAddr = "0.0.0.0:9185".parse().unwrap(); diff --git a/crates/sui-graphql-rpc/src/test_infra/cluster.rs b/crates/sui-graphql-rpc/src/test_infra/cluster.rs index 15a67a1ddb4..1f0ff07ccd6 100644 --- a/crates/sui-graphql-rpc/src/test_infra/cluster.rs +++ b/crates/sui-graphql-rpc/src/test_infra/cluster.rs @@ -1,30 +1,28 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::config::ConnectionConfig; -use crate::config::ServerConfig; -use crate::config::ServiceConfig; -use crate::config::Version; -use crate::server::graphiql_server::start_graphiql_server; -use std::net::SocketAddr; -use std::sync::Arc; -use std::time::Duration; +use std::{net::SocketAddr, sync::Arc, time::Duration}; + use sui_graphql_rpc_client::simple_client::SimpleClient; -use sui_indexer::errors::IndexerError; pub use sui_indexer::handlers::objects_snapshot_processor::SnapshotLagConfig; -use sui_indexer::store::indexer_store::IndexerStore; -use sui_indexer::store::PgIndexerStore; -use sui_indexer::test_utils::force_delete_database; -use sui_indexer::test_utils::start_test_indexer; -use sui_indexer::test_utils::start_test_indexer_impl; -use sui_indexer::test_utils::ReaderWriterConfig; +use sui_indexer::{ + errors::IndexerError, + store::{indexer_store::IndexerStore, PgIndexerStore}, + test_utils::{ + force_delete_database, start_test_indexer, start_test_indexer_impl, ReaderWriterConfig, + }, +}; use sui_swarm_config::genesis_config::{AccountConfig, DEFAULT_GAS_AMOUNT}; use sui_types::storage::ReadStore; -use test_cluster::TestCluster; -use test_cluster::TestClusterBuilder; +use test_cluster::{TestCluster, TestClusterBuilder}; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; +use crate::{ + config::{ConnectionConfig, ServerConfig, ServiceConfig, Version}, + server::graphiql_server::start_graphiql_server, +}; + const VALIDATOR_COUNT: usize = 7; const EPOCH_DURATION_MS: u64 = 15000; @@ -74,7 +72,8 @@ pub async fn start_cluster( let graphql_server_handle = start_graphql_server_with_fn_rpc( graphql_connection_config.clone(), Some(fn_rpc_url), - /* cancellation_token */ None, + // cancellation_token + None, ) .await; @@ -96,8 +95,9 @@ pub async fn start_cluster( } } -/// Takes in a simulated instantiation of a Sui blockchain and builds a cluster around it. This -/// cluster is typically used in e2e tests to emulate and test behaviors. +/// Takes in a simulated instantiation of a Sui blockchain and builds a cluster +/// around it. This cluster is typically used in e2e tests to emulate and test +/// behaviors. pub async fn serve_executor( graphql_connection_config: ConnectionConfig, internal_data_source_rpc_port: u16, @@ -221,8 +221,8 @@ async fn wait_for_graphql_server(client: &SimpleClient) { .expect("Timeout waiting for graphql server to start"); } -/// Ping the GraphQL server until its background task has updated the checkpoint watermark to the -/// desired checkpoint. +/// Ping the GraphQL server until its background task has updated the checkpoint +/// watermark to the desired checkpoint. async fn wait_for_graphql_checkpoint_catchup( client: &SimpleClient, checkpoint: u64, @@ -269,23 +269,26 @@ async fn wait_for_graphql_checkpoint_catchup( } impl Cluster { - /// Waits for the indexer to index up to the given checkpoint, then waits for the graphql - /// service's background task to update the checkpoint watermark to the given checkpoint. + /// Waits for the indexer to index up to the given checkpoint, then waits + /// for the graphql service's background task to update the checkpoint + /// watermark to the given checkpoint. pub async fn wait_for_checkpoint_catchup(&self, checkpoint: u64, base_timeout: Duration) { wait_for_graphql_checkpoint_catchup(&self.graphql_client, checkpoint, base_timeout).await } } impl ExecutorCluster { - /// Waits for the indexer to index up to the given checkpoint, then waits for the graphql - /// service's background task to update the checkpoint watermark to the given checkpoint. + /// Waits for the indexer to index up to the given checkpoint, then waits + /// for the graphql service's background task to update the checkpoint + /// watermark to the given checkpoint. pub async fn wait_for_checkpoint_catchup(&self, checkpoint: u64, base_timeout: Duration) { wait_for_graphql_checkpoint_catchup(&self.graphql_client, checkpoint, base_timeout).await } - /// The ObjectsSnapshotProcessor is a long-running task that periodically takes a snapshot of - /// the objects table. This leads to flakiness in tests, so we wait until the objects_snapshot - /// has reached the expected state. + /// The ObjectsSnapshotProcessor is a long-running task that periodically + /// takes a snapshot of the objects table. This leads to flakiness in + /// tests, so we wait until the objects_snapshot has reached the + /// expected state. pub async fn wait_for_objects_snapshot_catchup(&self, base_timeout: Duration) { let mut latest_snapshot_cp = 0; @@ -312,9 +315,10 @@ impl ExecutorCluster { latest_cp, latest_snapshot_cp)); } - /// Deletes the database created for the test and sends a cancellation signal to the graphql - /// service. When this function is awaited on, the callsite will wait for the graphql service to - /// terminate its background task and then itself. + /// Deletes the database created for the test and sends a cancellation + /// signal to the graphql service. When this function is awaited on, the + /// callsite will wait for the graphql service to terminate its + /// background task and then itself. pub async fn cleanup_resources(self) { self.cancellation_token.cancel(); let db_url = self.graphql_connection_config.db_url.clone(); diff --git a/crates/sui-graphql-rpc/src/types/address.rs b/crates/sui-graphql-rpc/src/types/address.rs index 71ec14fe68f..9d5f79a34f3 100644 --- a/crates/sui-graphql-rpc/src/types/address.rs +++ b/crates/sui-graphql-rpc/src/types/address.rs @@ -1,6 +1,8 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use async_graphql::{connection::Connection, *}; + use super::{ balance::{self, Balance}, coin::Coin, @@ -14,17 +16,17 @@ use super::{ transaction_block::{self, TransactionBlock, TransactionBlockFilter}, type_filter::ExactTypeFilter, }; -use async_graphql::{connection::Connection, *}; #[derive(Clone, Debug, PartialEq, Eq, Copy)] pub(crate) struct Address { pub address: SuiAddress, - /// The checkpoint sequence number at which this was viewed at, or None if the data was - /// requested at the latest checkpoint. + /// The checkpoint sequence number at which this was viewed at, or None if + /// the data was requested at the latest checkpoint. pub checkpoint_viewed_at: Option, } -/// The possible relationship types for a transaction block: sign, sent, received, or paid. +/// The possible relationship types for a transaction block: sign, sent, +/// received, or paid. #[derive(Enum, Copy, Clone, Eq, PartialEq)] pub(crate) enum AddressTransactionBlockRelationship { /// Transactions this address has signed either as a sender or as a sponsor. @@ -33,7 +35,8 @@ pub(crate) enum AddressTransactionBlockRelationship { Recv, } -/// The 32-byte address that is an account address (corresponding to a public key). +/// The 32-byte address that is an account address (corresponding to a public +/// key). #[Object] impl Address { pub(crate) async fn address(&self) -> SuiAddress { @@ -55,8 +58,8 @@ impl Address { .await } - /// Total balance of all coins with marker type owned by this address. If type is not supplied, - /// it defaults to `0x2::sui::SUI`. + /// Total balance of all coins with marker type owned by this address. If + /// type is not supplied, it defaults to `0x2::sui::SUI`. pub(crate) async fn balance( &self, ctx: &Context<'_>, @@ -81,7 +84,8 @@ impl Address { /// The coin objects for this address. /// - ///`type` is a filter on the coin's type parameter, defaulting to `0x2::sui::SUI`. + /// `type` is a filter on the coin's type parameter, defaulting to + /// `0x2::sui::SUI`. pub(crate) async fn coins( &self, ctx: &Context<'_>, @@ -110,7 +114,8 @@ impl Address { .await } - /// The domain explicitly configured as the default domain pointing to this address. + /// The domain explicitly configured as the default domain pointing to this + /// address. pub(crate) async fn default_suins_name( &self, ctx: &Context<'_>, @@ -119,8 +124,8 @@ impl Address { OwnerImpl::from(self).default_suins_name(ctx, format).await } - /// The SuinsRegistration NFTs owned by this address. These grant the owner the capability to - /// manage the associated domain. + /// The SuinsRegistration NFTs owned by this address. These grant the owner + /// the capability to manage the associated domain. pub(crate) async fn suins_registrations( &self, ctx: &Context<'_>, @@ -134,8 +139,9 @@ impl Address { .await } - /// Similar behavior to the `transactionBlocks` in Query but supporting the additional - /// `AddressTransactionBlockRelationship` filter, which defaults to `SIGN`. + /// Similar behavior to the `transactionBlocks` in Query but supporting the + /// additional `AddressTransactionBlockRelationship` filter, which + /// defaults to `SIGN`. async fn transaction_blocks( &self, ctx: &Context<'_>, diff --git a/crates/sui-graphql-rpc/src/types/available_range.rs b/crates/sui-graphql-rpc/src/types/available_range.rs index 83289e1532a..d269e47ceb1 100644 --- a/crates/sui-graphql-rpc/src/types/available_range.rs +++ b/crates/sui-graphql-rpc/src/types/available_range.rs @@ -1,9 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::checkpoint::{Checkpoint, CheckpointId}; use async_graphql::*; +use super::checkpoint::{Checkpoint, CheckpointId}; + #[derive(Clone, Debug, PartialEq, Eq, Copy)] pub(crate) struct AvailableRange { pub first: u64, @@ -11,7 +12,8 @@ pub(crate) struct AvailableRange { } // TODO: do both in one query? -/// Range of checkpoints that the RPC is guaranteed to produce a consistent response for. +/// Range of checkpoints that the RPC is guaranteed to produce a consistent +/// response for. #[Object] impl AvailableRange { async fn first(&self, ctx: &Context<'_>) -> Result> { diff --git a/crates/sui-graphql-rpc/src/types/balance.rs b/crates/sui-graphql-rpc/src/types/balance.rs index 319c601686e..588601c1b55 100644 --- a/crates/sui-graphql-rpc/src/types/balance.rs +++ b/crates/sui-graphql-rpc/src/types/balance.rs @@ -1,24 +1,34 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::cursor::{self, Page, RawPaginated, Target}; -use super::{big_int::BigInt, move_type::MoveType, sui_address::SuiAddress}; -use crate::consistency::{consistent_range, Checkpointed}; -use crate::data::{Db, DbConnection, QueryExecutor}; -use crate::error::Error; -use crate::raw_query::RawQuery; -use crate::{filter, query}; -use async_graphql::connection::{Connection, CursorType, Edge}; -use async_graphql::*; +use std::str::FromStr; + +use async_graphql::{ + connection::{Connection, CursorType, Edge}, + *, +}; use diesel::{ sql_types::{BigInt as SqlBigInt, Nullable, Text}, OptionalExtension, QueryableByName, }; use serde::{Deserialize, Serialize}; -use std::str::FromStr; use sui_indexer::types::OwnerType; use sui_types::{parse_sui_type_tag, TypeTag}; +use super::{ + big_int::BigInt, + cursor::{self, Page, RawPaginated, Target}, + move_type::MoveType, + sui_address::SuiAddress, +}; +use crate::{ + consistency::{consistent_range, Checkpointed}, + data::{Db, DbConnection, QueryExecutor}, + error::Error, + filter, query, + raw_query::RawQuery, +}; + /// The total balance for a particular coin type. #[derive(Clone, Debug, SimpleObject)] pub(crate) struct Balance { @@ -30,8 +40,9 @@ pub(crate) struct Balance { pub(crate) total_balance: Option, } -/// Representation of a row of balance information from the DB. We read the balance as a `String` to -/// deal with the large (bigger than 2^63 - 1) balances. +/// Representation of a row of balance information from the DB. We read the +/// balance as a `String` to deal with the large (bigger than 2^63 - 1) +/// balances. #[derive(QueryableByName)] pub struct StoredBalance { #[diesel(sql_type = Nullable)] @@ -44,8 +55,9 @@ pub struct StoredBalance { pub(crate) type Cursor = cursor::JsonCursor; -/// The inner struct for the `Balance`'s cursor. The `coin_type` is used as the cursor, while the -/// `checkpoint_viewed_at` sets the consistent upper bound for the cursor. +/// The inner struct for the `Balance`'s cursor. The `coin_type` is used as the +/// cursor, while the `checkpoint_viewed_at` sets the consistent upper bound for +/// the cursor. #[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Debug)] pub(crate) struct BalanceCursor { #[serde(rename = "t")] @@ -56,8 +68,9 @@ pub(crate) struct BalanceCursor { } impl Balance { - /// Query for the balance of coins owned by `address`, of coins with type `coin_type`. Note that - /// `coin_type` is the type of `0x2::coin::Coin`'s type parameter, not the full type of the coin + /// Query for the balance of coins owned by `address`, of coins with type + /// `coin_type`. Note that `coin_type` is the type of + /// `0x2::coin::Coin`'s type parameter, not the full type of the coin /// object. pub(crate) async fn query( db: &Db, @@ -82,17 +95,19 @@ impl Balance { stored.map(Balance::try_from).transpose() } - /// Query the database for a `page` of coin balances. Each balance represents the total balance - /// for a particular coin type, owned by `address`. + /// Query the database for a `page` of coin balances. Each balance + /// represents the total balance for a particular coin type, owned by + /// `address`. pub(crate) async fn paginate( db: &Db, page: Page, address: SuiAddress, checkpoint_viewed_at: Option, ) -> Result, Error> { - // If cursors are provided, defer to the `checkpoint_viewed_at` in the cursor if they are - // consistent. Otherwise, use the value from the parameter, or set to None. This is so that - // paginated queries are consistent with the previous query that created the cursor. + // If cursors are provided, defer to the `checkpoint_viewed_at` in the cursor if + // they are consistent. Otherwise, use the value from the parameter, or + // set to None. This is so that paginated queries are consistent with + // the previous query that created the cursor. let cursor_viewed_at = page.validate_cursor_consistency()?; let checkpoint_viewed_at: Option = cursor_viewed_at.or(checkpoint_viewed_at); @@ -191,17 +206,18 @@ impl TryFrom for Balance { } } -/// Query the database for a `page` of coin balances. Each balance represents the total balance for -/// a particular coin type, owned by `address`. This function is meant to be called within a thunk -/// and returns a RawQuery that can be converted into a BoxedSqlQuery with `.into_boxed()`. +/// Query the database for a `page` of coin balances. Each balance represents +/// the total balance for a particular coin type, owned by `address`. This +/// function is meant to be called within a thunk and returns a RawQuery that +/// can be converted into a BoxedSqlQuery with `.into_boxed()`. fn balance_query(address: SuiAddress, coin_type: Option, lhs: i64, rhs: i64) -> RawQuery { - // Construct the filtered inner query - apply the same filtering criteria to both - // objects_snapshot and objects_history tables. + // Construct the filtered inner query - apply the same filtering criteria to + // both objects_snapshot and objects_history tables. let mut snapshot_objs = query!("SELECT * FROM objects_snapshot"); snapshot_objs = filter(snapshot_objs, address, coin_type.clone()); - // Additionally filter objects_history table for results between the available range, or - // checkpoint_viewed_at, if provided. + // Additionally filter objects_history table for results between the available + // range, or checkpoint_viewed_at, if provided. let mut history_objs = query!("SELECT * FROM objects_history"); history_objs = filter(history_objs, address, coin_type.clone()); history_objs = filter!( @@ -218,9 +234,9 @@ fn balance_query(address: SuiAddress, coin_type: Option, lhs: i64, rhs: .order_by("object_id") .order_by("object_version DESC"); - // Objects that fulfill the filtering criteria may not be the most recent version available. - // Left join the candidates table on newer to filter out any objects that have a newer - // version. + // Objects that fulfill the filtering criteria may not be the most recent + // version available. Left join the candidates table on newer to filter out + // any objects that have a newer version. let mut newer = query!("SELECT object_id, object_version FROM objects_history"); newer = filter!( newer, @@ -245,8 +261,8 @@ fn balance_query(address: SuiAddress, coin_type: Option, lhs: i64, rhs: filter!(final_, "newer.object_version IS NULL").group_by("coin_type") } -/// Applies the filtering criteria for balances to the input `RawQuery` and returns a new -/// `RawQuery`. +/// Applies the filtering criteria for balances to the input `RawQuery` and +/// returns a new `RawQuery`. fn filter(mut query: RawQuery, owner: SuiAddress, coin_type: Option) -> RawQuery { query = filter!(query, "coin_type IS NOT NULL"); diff --git a/crates/sui-graphql-rpc/src/types/balance_change.rs b/crates/sui-graphql-rpc/src/types/balance_change.rs index 1c4fcf0b2b3..ebb75327f6f 100644 --- a/crates/sui-graphql-rpc/src/types/balance_change.rs +++ b/crates/sui-graphql-rpc/src/types/balance_change.rs @@ -14,7 +14,8 @@ pub(crate) struct BalanceChange { checkpoint_viewed_at: u64, } -/// Effects to the balance (sum of coin values per coin type) owned by an address or object. +/// Effects to the balance (sum of coin values per coin type) owned by an +/// address or object. #[Object] impl BalanceChange { /// The address or object whose balance has changed. @@ -31,7 +32,8 @@ impl BalanceChange { } } - /// The inner type of the coin whose balance has changed (e.g. `0x2::sui::SUI`). + /// The inner type of the coin whose balance has changed (e.g. + /// `0x2::sui::SUI`). async fn coin_type(&self) -> Option { Some(MoveType::new(self.stored.coin_type.clone())) } @@ -43,10 +45,11 @@ impl BalanceChange { } impl BalanceChange { - /// `checkpoint_viewed_at` represents the checkpoint sequence number at which this - /// `BalanceChange` was queried for, or `None` if the data was requested at the latest - /// checkpoint. This is stored on `BalanceChange` so that when viewing that entity's state, it - /// will be as if it was read at the same checkpoint. + /// `checkpoint_viewed_at` represents the checkpoint sequence number at + /// which this `BalanceChange` was queried for, or `None` if the data + /// was requested at the latest checkpoint. This is stored on + /// `BalanceChange` so that when viewing that entity's state, it will be + /// as if it was read at the same checkpoint. pub(crate) fn read(bytes: &[u8], checkpoint_viewed_at: u64) -> Result { let stored = bcs::from_bytes(bytes) .map_err(|e| Error::Internal(format!("Error deserializing BalanceChange: {e}")))?; diff --git a/crates/sui-graphql-rpc/src/types/base64.rs b/crates/sui-graphql-rpc/src/types/base64.rs index e74af2258bf..3d2f59d501b 100644 --- a/crates/sui-graphql-rpc/src/types/base64.rs +++ b/crates/sui-graphql-rpc/src/types/base64.rs @@ -4,8 +4,7 @@ use std::str::FromStr; use async_graphql::*; -use fastcrypto::encoding::Base64 as FastCryptoBase64; -use fastcrypto::encoding::Encoding as FastCryptoEncoding; +use fastcrypto::encoding::{Base64 as FastCryptoBase64, Encoding as FastCryptoEncoding}; #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct Base64(pub(crate) Vec); @@ -58,9 +57,10 @@ impl From> for Base64 { #[cfg(test)] mod tests { - use super::*; use async_graphql::Value; + use super::*; + fn assert_input_value_error(result: Result>) { match result { Err(InputValueError { .. }) => {} diff --git a/crates/sui-graphql-rpc/src/types/chain_identifier.rs b/crates/sui-graphql-rpc/src/types/chain_identifier.rs index 1072d0a580f..829b6103262 100644 --- a/crates/sui-graphql-rpc/src/types/chain_identifier.rs +++ b/crates/sui-graphql-rpc/src/types/chain_identifier.rs @@ -1,10 +1,6 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{ - data::{Db, DbConnection, QueryExecutor}, - error::Error, -}; use async_graphql::*; use diesel::{ExpressionMethods, QueryDsl}; use sui_indexer::schema::checkpoints; @@ -12,6 +8,11 @@ use sui_types::{ digests::ChainIdentifier as NativeChainIdentifier, messages_checkpoint::CheckpointDigest, }; +use crate::{ + data::{Db, DbConnection, QueryExecutor}, + error::Error, +}; + pub(crate) struct ChainIdentifier; impl ChainIdentifier { @@ -33,7 +34,8 @@ impl ChainIdentifier { Self::from_bytes(digest_bytes) } - /// Treat `bytes` as a checkpoint digest and extract a chain identifier from it. + /// Treat `bytes` as a checkpoint digest and extract a chain identifier from + /// it. pub(crate) fn from_bytes(bytes: Vec) -> Result { let genesis_digest = CheckpointDigest::try_from(bytes) .map_err(|e| Error::Internal(format!("Failed to deserialize genesis digest: {e}")))?; diff --git a/crates/sui-graphql-rpc/src/types/checkpoint.rs b/crates/sui-graphql-rpc/src/types/checkpoint.rs index 656ccb1182e..54e8c8f67f7 100644 --- a/crates/sui-graphql-rpc/src/types/checkpoint.rs +++ b/crates/sui-graphql-rpc/src/types/checkpoint.rs @@ -3,20 +3,6 @@ use std::collections::{BTreeMap, BTreeSet, HashMap}; -use super::{ - base64::Base64, - cursor::{self, Page, Paginated, Target}, - date_time::DateTime, - digest::Digest, - epoch::Epoch, - gas::GasCostSummary, - transaction_block::{self, TransactionBlock, TransactionBlockFilter}, -}; -use crate::consistency::Checkpointed; -use crate::{ - data::{self, Conn, Db, DbConnection, QueryExecutor}, - error::Error, -}; use async_graphql::{ connection::{Connection, CursorType, Edge}, dataloader::{DataLoader, Loader}, @@ -31,26 +17,43 @@ use sui_indexer::{ }; use sui_types::messages_checkpoint::CheckpointDigest; -/// Filter either by the digest, or the sequence number, or neither, to get the latest checkpoint. +use super::{ + base64::Base64, + cursor::{self, Page, Paginated, Target}, + date_time::DateTime, + digest::Digest, + epoch::Epoch, + gas::GasCostSummary, + transaction_block::{self, TransactionBlock, TransactionBlockFilter}, +}; +use crate::{ + consistency::Checkpointed, + data::{self, Conn, Db, DbConnection, QueryExecutor}, + error::Error, +}; + +/// Filter either by the digest, or the sequence number, or neither, to get the +/// latest checkpoint. #[derive(Default, InputObject)] pub(crate) struct CheckpointId { pub digest: Option, pub sequence_number: Option, } -/// DataLoader key for fetching a `Checkpoint` by its sequence number, optionally constrained by a -/// consistency cursor. +/// DataLoader key for fetching a `Checkpoint` by its sequence number, +/// optionally constrained by a consistency cursor. #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] struct SeqNumKey { pub sequence_number: u64, - /// The digest is not used for fetching, but is used as an additional filter, to correctly - /// implement a request that sets both a sequence number and a digest. + /// The digest is not used for fetching, but is used as an additional + /// filter, to correctly implement a request that sets both a sequence + /// number and a digest. pub digest: Option, pub checkpoint_viewed_at: Option, } -/// DataLoader key for fetching a `Checkpoint` by its digest, optionally constrained by a -/// consistency cursor. +/// DataLoader key for fetching a `Checkpoint` by its digest, optionally +/// constrained by a consistency cursor. #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] struct DigestKey { pub digest: Digest, @@ -59,8 +62,8 @@ struct DigestKey { #[derive(Clone)] pub(crate) struct Checkpoint { - /// Representation of transaction data in the Indexer's Store. The indexer stores the - /// transaction data and its effects together, in one table. + /// Representation of transaction data in the Indexer's Store. The indexer + /// stores the transaction data and its effects together, in one table. pub stored: StoredCheckpoint, // The checkpoint_sequence_number at which this was viewed at, or `None` if the data was // requested at the latest checkpoint. @@ -70,9 +73,9 @@ pub(crate) struct Checkpoint { pub(crate) type Cursor = cursor::JsonCursor; type Query = data::Query; -/// The cursor returned for each `Checkpoint` in a connection's page of results. The -/// `checkpoint_viewed_at` will set the consistent upper bound for subsequent queries made on this -/// cursor. +/// The cursor returned for each `Checkpoint` in a connection's page of results. +/// The `checkpoint_viewed_at` will set the consistent upper bound for +/// subsequent queries made on this cursor. #[derive(Serialize, Deserialize, Clone, PartialEq, Eq)] pub(crate) struct CheckpointCursor { /// The checkpoint sequence number this was viewed at. @@ -82,31 +85,33 @@ pub(crate) struct CheckpointCursor { pub sequence_number: u64, } -/// Checkpoints contain finalized transactions and are used for node synchronization -/// and global transaction ordering. +/// Checkpoints contain finalized transactions and are used for node +/// synchronization and global transaction ordering. #[Object] impl Checkpoint { - /// A 32-byte hash that uniquely identifies the checkpoint contents, encoded in Base58. This - /// hash can be used to verify checkpoint contents by checking signatures against the committee, - /// Hashing contents to match digest, and checking that the previous checkpoint digest matches. + /// A 32-byte hash that uniquely identifies the checkpoint contents, encoded + /// in Base58. This hash can be used to verify checkpoint contents by + /// checking signatures against the committee, Hashing contents to match + /// digest, and checking that the previous checkpoint digest matches. async fn digest(&self) -> Result { Ok(self.digest_impl().extend()?.base58_encode()) } - /// This checkpoint's position in the total order of finalized checkpoints, agreed upon by - /// consensus. + /// This checkpoint's position in the total order of finalized checkpoints, + /// agreed upon by consensus. async fn sequence_number(&self) -> u64 { self.sequence_number_impl() } - /// The timestamp at which the checkpoint is agreed to have happened according to consensus. - /// Transactions that access time in this checkpoint will observe this timestamp. + /// The timestamp at which the checkpoint is agreed to have happened + /// according to consensus. Transactions that access time in this + /// checkpoint will observe this timestamp. async fn timestamp(&self) -> Result { DateTime::from_ms(self.stored.timestamp_ms).extend() } - /// This is an aggregation of signatures from a quorum of validators for the checkpoint - /// proposal. + /// This is an aggregation of signatures from a quorum of validators for the + /// checkpoint proposal. async fn validator_signatures(&self) -> Base64 { Base64::from(&self.stored.validator_signature) } @@ -119,14 +124,16 @@ impl Checkpoint { .map(Base58::encode) } - /// The total number of transaction blocks in the network by the end of this checkpoint. + /// The total number of transaction blocks in the network by the end of this + /// checkpoint. async fn network_total_transactions(&self) -> Option { Some(self.network_total_transactions_impl()) } - /// The computation cost, storage cost, storage rebate, and non-refundable storage fee - /// accumulated during this epoch, up to and including this checkpoint. These values increase - /// monotonically across checkpoints in the same epoch, and reset on epoch boundaries. + /// The computation cost, storage cost, storage rebate, and non-refundable + /// storage fee accumulated during this epoch, up to and including this + /// checkpoint. These values increase monotonically across checkpoints + /// in the same epoch, and reset on epoch boundaries. async fn rolling_gas_summary(&self) -> Option { Some(GasCostSummary { computation_cost: self.stored.computation_cost as u64, @@ -203,9 +210,9 @@ impl Checkpoint { .map_err(|e| Error::Internal(format!("Failed to deserialize checkpoint digest: {e}"))) } - /// Look up a `Checkpoint` in the database, filtered by either sequence number or digest. If - /// both filters are supplied they will both be applied. If none are supplied, the latest - /// checkpoint is fetched. + /// Look up a `Checkpoint` in the database, filtered by either sequence + /// number or digest. If both filters are supplied they will both be + /// applied. If none are supplied, the latest checkpoint is fetched. pub(crate) async fn query( ctx: &Context<'_>, filter: CheckpointId, @@ -244,9 +251,9 @@ impl Checkpoint { } } - /// Look up the latest `Checkpoint` from the database, optionally filtered by a consistency - /// cursor (querying for a consistency cursor in the past looks for the latest checkpoint as of - /// that cursor). + /// Look up the latest `Checkpoint` from the database, optionally filtered + /// by a consistency cursor (querying for a consistency cursor in the + /// past looks for the latest checkpoint as of that cursor). async fn query_latest_at( db: &Db, checkpoint_viewed_at: Option, @@ -279,8 +286,9 @@ impl Checkpoint { })) } - /// Look up a `Checkpoint` in the database and retrieve its `timestamp_ms` field. This method - /// takes a connection, so that it can be used within a transaction. + /// Look up a `Checkpoint` in the database and retrieve its `timestamp_ms` + /// field. This method takes a connection, so that it can be used within + /// a transaction. pub(crate) fn query_timestamp( conn: &mut Conn, seq_num: u64, @@ -302,9 +310,9 @@ impl Checkpoint { .map_err(|e| Error::Internal(format!("Failed to fetch checkpoint: {e}"))) } - /// Queries the database for the upper bound of the available range supported by the graphql - /// server. This method takes a connection, so that it can be used in an execute_repeatable - /// transaction. + /// Queries the database for the upper bound of the available range + /// supported by the graphql server. This method takes a connection, so + /// that it can be used in an execute_repeatable transaction. pub(crate) fn latest_checkpoint_sequence_number( conn: &mut Conn, ) -> Result { @@ -319,19 +327,21 @@ impl Checkpoint { Ok(result as u64) } - /// Query the database for a `page` of checkpoints. The Page uses the checkpoint sequence number - /// of the stored checkpoint and the checkpoint at which this was viewed at as the cursor, and - /// can optionally be further `filter`-ed by an epoch number (to only return checkpoints within - /// that epoch). + /// Query the database for a `page` of checkpoints. The Page uses the + /// checkpoint sequence number of the stored checkpoint and the + /// checkpoint at which this was viewed at as the cursor, and + /// can optionally be further `filter`-ed by an epoch number (to only return + /// checkpoints within that epoch). /// /// The `checkpoint_viewed_at` parameter is an Option representing the - /// checkpoint_sequence_number at which this page was queried for, or `None` if the data was - /// requested at the latest checkpoint. Each entity returned in the connection will inherit this - /// checkpoint, so that when viewing that entity's state, it will be from the reference of this + /// checkpoint_sequence_number at which this page was queried for, or `None` + /// if the data was requested at the latest checkpoint. Each entity + /// returned in the connection will inherit this checkpoint, so that + /// when viewing that entity's state, it will be from the reference of this /// checkpoint_viewed_at parameter. /// - /// If the `Page` is set, then this function will defer to the `checkpoint_viewed_at` in - /// the cursor if they are consistent. + /// If the `Page` is set, then this function will defer to the + /// `checkpoint_viewed_at` in the cursor if they are consistent. pub(crate) async fn paginate( db: &Db, page: Page, @@ -366,8 +376,9 @@ impl Checkpoint { }) .await?; - // Defer to the provided checkpoint_viewed_at, but if it is not provided, use the - // current available range. This sets a consistent upper bound for the nested queries. + // Defer to the provided checkpoint_viewed_at, but if it is not provided, use + // the current available range. This sets a consistent upper bound for + // the nested queries. let mut conn = Connection::new(prev, next); let checkpoint_viewed_at = checkpoint_viewed_at.unwrap_or(rhs); for stored in results { @@ -384,8 +395,9 @@ impl Checkpoint { Ok(conn) } - /// Queries the database for the available range supported by the graphql server. This method - /// takes a connection, so that it can be used in an execute_repeatable transaction. + /// Queries the database for the available range supported by the graphql + /// server. This method takes a connection, so that it can be used in an + /// execute_repeatable transaction. pub(crate) fn available_range(conn: &mut Conn) -> Result<(u64, u64), diesel::result::Error> { use checkpoints::dsl as checkpoints; use objects_snapshot::dsl as snapshots; @@ -542,9 +554,9 @@ impl Loader for Db { checkpoint_viewed_at: key.checkpoint_viewed_at, }; - // Filter by key's checkpoint viewed at here. Doing this in memory because it should - // be quite rare that this query actually filters something, but encoding it in SQL - // is complicated. + // Filter by key's checkpoint viewed at here. Doing this in memory because it + // should be quite rare that this query actually filters + // something, but encoding it in SQL is complicated. let seq_num = checkpoint.stored.sequence_number as u64; if matches!(key.checkpoint_viewed_at, Some(cp) if cp < seq_num) { None diff --git a/crates/sui-graphql-rpc/src/types/coin.rs b/crates/sui-graphql-rpc/src/types/coin.rs index c152aa2ef5d..6fd7a49a82a 100644 --- a/crates/sui-graphql-rpc/src/types/coin.rs +++ b/crates/sui-graphql-rpc/src/types/coin.rs @@ -1,41 +1,45 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::consistency::{build_objects_query, consistent_range, View}; -use crate::data::{Db, QueryExecutor}; -use crate::error::Error; -use crate::filter; -use crate::raw_query::RawQuery; - -use super::balance::{self, Balance}; -use super::base64::Base64; -use super::big_int::BigInt; -use super::cursor::{Page, Target}; -use super::display::DisplayEntry; -use super::dynamic_field::{DynamicField, DynamicFieldName}; -use super::move_object::{MoveObject, MoveObjectImpl}; -use super::move_value::MoveValue; -use super::object::{self, Object, ObjectFilter, ObjectImpl, ObjectOwner, ObjectStatus}; -use super::owner::OwnerImpl; -use super::stake::StakedSui; -use super::sui_address::SuiAddress; -use super::suins_registration::{DomainFormat, SuinsRegistration}; -use super::transaction_block::{self, TransactionBlock, TransactionBlockFilter}; -use super::type_filter::ExactTypeFilter; -use async_graphql::*; - -use async_graphql::connection::{Connection, CursorType, Edge}; -use sui_indexer::models::objects::StoredHistoryObject; -use sui_indexer::types::OwnerType; -use sui_types::coin::Coin as NativeCoin; -use sui_types::TypeTag; +use async_graphql::{ + connection::{Connection, CursorType, Edge}, + *, +}; +use sui_indexer::{models::objects::StoredHistoryObject, types::OwnerType}; +use sui_types::{coin::Coin as NativeCoin, TypeTag}; + +use super::{ + balance::{self, Balance}, + base64::Base64, + big_int::BigInt, + cursor::{Page, Target}, + display::DisplayEntry, + dynamic_field::{DynamicField, DynamicFieldName}, + move_object::{MoveObject, MoveObjectImpl}, + move_value::MoveValue, + object::{self, Object, ObjectFilter, ObjectImpl, ObjectOwner, ObjectStatus}, + owner::OwnerImpl, + stake::StakedSui, + sui_address::SuiAddress, + suins_registration::{DomainFormat, SuinsRegistration}, + transaction_block::{self, TransactionBlock, TransactionBlockFilter}, + type_filter::ExactTypeFilter, +}; +use crate::{ + consistency::{build_objects_query, consistent_range, View}, + data::{Db, QueryExecutor}, + error::Error, + filter, + raw_query::RawQuery, +}; #[derive(Clone)] pub(crate) struct Coin { /// Representation of this Coin as a generic Move Object. pub super_: MoveObject, - /// The deserialized representation of the Move Object's contents, as a `0x2::coin::Coin`. + /// The deserialized representation of the Move Object's contents, as a + /// `0x2::coin::Coin`. pub native: NativeCoin, } @@ -66,8 +70,8 @@ impl Coin { .await } - /// Total balance of all coins with marker type owned by this object. If type is not supplied, - /// it defaults to `0x2::sui::SUI`. + /// Total balance of all coins with marker type owned by this object. If + /// type is not supplied, it defaults to `0x2::sui::SUI`. pub(crate) async fn balance( &self, ctx: &Context<'_>, @@ -94,7 +98,8 @@ impl Coin { /// The coin objects for this object. /// - ///`type` is a filter on the coin's type parameter, defaulting to `0x2::sui::SUI`. + /// `type` is a filter on the coin's type parameter, defaulting to + /// `0x2::sui::SUI`. pub(crate) async fn coins( &self, ctx: &Context<'_>, @@ -123,7 +128,8 @@ impl Coin { .await } - /// The domain explicitly configured as the default domain pointing to this object. + /// The domain explicitly configured as the default domain pointing to this + /// object. pub(crate) async fn default_suins_name( &self, ctx: &Context<'_>, @@ -134,8 +140,8 @@ impl Coin { .await } - /// The SuinsRegistration NFTs owned by this object. These grant the owner the capability to - /// manage the associated domain. + /// The SuinsRegistration NFTs owned by this object. These grant the owner + /// the capability to manage the associated domain. pub(crate) async fn suins_registrations( &self, ctx: &Context<'_>, @@ -153,18 +159,21 @@ impl Coin { ObjectImpl(&self.super_.super_).version().await } - /// The current status of the object as read from the off-chain store. The possible states are: - /// NOT_INDEXED, the object is loaded from serialized data, such as the contents of a genesis or - /// system package upgrade transaction. LIVE, the version returned is the most recent for the - /// object, and it is not deleted or wrapped at that version. HISTORICAL, the object was - /// referenced at a specific version or checkpoint, so is fetched from historical tables and may - /// not be the latest version of the object. WRAPPED_OR_DELETED, the object is deleted or - /// wrapped and only partial information can be loaded." + /// The current status of the object as read from the off-chain store. The + /// possible states are: NOT_INDEXED, the object is loaded from + /// serialized data, such as the contents of a genesis or system package + /// upgrade transaction. LIVE, the version returned is the most recent for + /// the object, and it is not deleted or wrapped at that version. + /// HISTORICAL, the object was referenced at a specific version or + /// checkpoint, so is fetched from historical tables and may not be the + /// latest version of the object. WRAPPED_OR_DELETED, the object is deleted + /// or wrapped and only partial information can be loaded." pub(crate) async fn status(&self) -> ObjectStatus { ObjectImpl(&self.super_.super_).status().await } - /// 32-byte hash that identifies the object's contents, encoded as a Base58 string. + /// 32-byte hash that identifies the object's contents, encoded as a Base58 + /// string. pub(crate) async fn digest(&self) -> Option { ObjectImpl(&self.super_.super_).digest().await } @@ -184,8 +193,9 @@ impl Coin { .await } - /// The amount of SUI we would rebate if this object gets deleted or mutated. This number is - /// recalculated based on the present storage gas price. + /// The amount of SUI we would rebate if this object gets deleted or + /// mutated. This number is recalculated based on the present storage + /// gas price. pub(crate) async fn storage_rebate(&self) -> Option { ObjectImpl(&self.super_.super_).storage_rebate().await } @@ -210,33 +220,34 @@ impl Coin { ObjectImpl(&self.super_.super_).bcs().await } - /// Displays the contents of the Move object in a JSON string and through GraphQL types. Also - /// provides the flat representation of the type signature, and the BCS of the corresponding - /// data. + /// Displays the contents of the Move object in a JSON string and through + /// GraphQL types. Also provides the flat representation of the type + /// signature, and the BCS of the corresponding data. pub(crate) async fn contents(&self) -> Option { MoveObjectImpl(&self.super_).contents().await } - /// Determines whether a transaction can transfer this object, using the TransferObjects - /// transaction command or `sui::transfer::public_transfer`, both of which require the object to + /// Determines whether a transaction can transfer this object, using the + /// TransferObjects transaction command or + /// `sui::transfer::public_transfer`, both of which require the object to /// have the `key` and `store` abilities. pub(crate) async fn has_public_transfer(&self, ctx: &Context<'_>) -> Result { MoveObjectImpl(&self.super_).has_public_transfer(ctx).await } - /// The set of named templates defined on-chain for the type of this object, to be handled - /// off-chain. The server substitutes data from the object into these templates to generate a - /// display string per template. + /// The set of named templates defined on-chain for the type of this object, + /// to be handled off-chain. The server substitutes data from the object + /// into these templates to generate a display string per template. pub(crate) async fn display(&self, ctx: &Context<'_>) -> Result>> { ObjectImpl(&self.super_.super_).display(ctx).await } - /// Access a dynamic field on an object using its name. Names are arbitrary Move values whose - /// type have `copy`, `drop`, and `store`, and are specified using their type, and their BCS - /// contents, Base64 encoded. + /// Access a dynamic field on an object using its name. Names are arbitrary + /// Move values whose type have `copy`, `drop`, and `store`, and are + /// specified using their type, and their BCS contents, Base64 encoded. /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. pub(crate) async fn dynamic_field( &self, ctx: &Context<'_>, @@ -247,13 +258,14 @@ impl Coin { .await } - /// Access a dynamic object field on an object using its name. Names are arbitrary Move values - /// whose type have `copy`, `drop`, and `store`, and are specified using their type, and their - /// BCS contents, Base64 encoded. The value of a dynamic object field can also be accessed + /// Access a dynamic object field on an object using its name. Names are + /// arbitrary Move values whose type have `copy`, `drop`, and `store`, + /// and are specified using their type, and their BCS contents, Base64 + /// encoded. The value of a dynamic object field can also be accessed /// off-chain directly via its address (e.g. using `Query.object`). /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. pub(crate) async fn dynamic_object_field( &self, ctx: &Context<'_>, @@ -266,8 +278,8 @@ impl Coin { /// The dynamic fields and dynamic object fields on an object. /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. pub(crate) async fn dynamic_fields( &self, ctx: &Context<'_>, @@ -295,8 +307,8 @@ impl Coin { } impl Coin { - /// Query the database for a `page` of coins. The page uses the bytes of an Object ID as the - /// cursor, and can optionally be filtered by an owner. + /// Query the database for a `page` of coins. The page uses the bytes of an + /// Object ID as the cursor, and can optionally be filtered by an owner. pub(crate) async fn paginate( db: &Db, page: Page, @@ -304,9 +316,10 @@ impl Coin { owner: Option, checkpoint_viewed_at: Option, ) -> Result, Error> { - // If cursors are provided, defer to the `checkpoint_viewed_at` in the cursor if they are - // consistent. Otherwise, use the value from the parameter, or set to None. This is so that - // paginated queries are consistent with the previous query that created the cursor. + // If cursors are provided, defer to the `checkpoint_viewed_at` in the cursor if + // they are consistent. Otherwise, use the value from the parameter, or + // set to None. This is so that paginated queries are consistent with + // the previous query that created the cursor. let cursor_viewed_at = page.validate_cursor_consistency()?; let checkpoint_viewed_at: Option = cursor_viewed_at.or(checkpoint_viewed_at); @@ -335,8 +348,8 @@ impl Coin { let mut conn: Connection = Connection::new(prev, next); for stored in results { - // To maintain consistency, the returned cursor should have the same upper-bound as the - // checkpoint found on the cursor. + // To maintain consistency, the returned cursor should have the same upper-bound + // as the checkpoint found on the cursor. let cursor = stored.cursor(checkpoint_viewed_at).encode_cursor(); let object = Object::try_from_stored_history_object(stored, Some(checkpoint_viewed_at))?; @@ -375,9 +388,9 @@ impl TryFrom<&MoveObject> for Coin { } } -/// Constructs a raw query to fetch objects from the database. Since there are no point lookups for -/// the coin query, objects are filtered out if they satisfy the criteria but have a later version -/// in the same checkpoint. +/// Constructs a raw query to fetch objects from the database. Since there are +/// no point lookups for the coin query, objects are filtered out if they +/// satisfy the criteria but have a later version in the same checkpoint. fn coins_query( coin_type: TypeTag, owner: Option, diff --git a/crates/sui-graphql-rpc/src/types/coin_metadata.rs b/crates/sui-graphql-rpc/src/types/coin_metadata.rs index 6abe45f1ae6..7c17589f3eb 100644 --- a/crates/sui-graphql-rpc/src/types/coin_metadata.rs +++ b/crates/sui-graphql-rpc/src/types/coin_metadata.rs @@ -1,28 +1,31 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::balance::{self, Balance}; -use super::base64::Base64; -use super::big_int::BigInt; -use super::coin::Coin; -use super::display::DisplayEntry; -use super::dynamic_field::{DynamicField, DynamicFieldName}; -use super::move_object::{MoveObject, MoveObjectImpl}; -use super::move_value::MoveValue; -use super::object::{self, Object, ObjectFilter, ObjectImpl, ObjectOwner, ObjectStatus}; -use super::owner::OwnerImpl; -use super::stake::StakedSui; -use super::sui_address::SuiAddress; -use super::suins_registration::{DomainFormat, SuinsRegistration}; -use super::transaction_block::{self, TransactionBlock, TransactionBlockFilter}; -use super::type_filter::ExactTypeFilter; -use crate::data::Db; -use crate::error::Error; -use async_graphql::connection::Connection; -use async_graphql::*; -use sui_types::coin::{CoinMetadata as NativeCoinMetadata, TreasuryCap}; -use sui_types::gas_coin::{GAS, TOTAL_SUPPLY_SUI}; -use sui_types::TypeTag; +use async_graphql::{connection::Connection, *}; +use sui_types::{ + coin::{CoinMetadata as NativeCoinMetadata, TreasuryCap}, + gas_coin::{GAS, TOTAL_SUPPLY_SUI}, + TypeTag, +}; + +use super::{ + balance::{self, Balance}, + base64::Base64, + big_int::BigInt, + coin::Coin, + display::DisplayEntry, + dynamic_field::{DynamicField, DynamicFieldName}, + move_object::{MoveObject, MoveObjectImpl}, + move_value::MoveValue, + object::{self, Object, ObjectFilter, ObjectImpl, ObjectOwner, ObjectStatus}, + owner::OwnerImpl, + stake::StakedSui, + sui_address::SuiAddress, + suins_registration::{DomainFormat, SuinsRegistration}, + transaction_block::{self, TransactionBlock, TransactionBlockFilter}, + type_filter::ExactTypeFilter, +}; +use crate::{data::Db, error::Error}; pub(crate) struct CoinMetadata { pub super_: MoveObject, @@ -56,8 +59,8 @@ impl CoinMetadata { .await } - /// Total balance of all coins with marker type owned by this object. If type is not supplied, - /// it defaults to `0x2::sui::SUI`. + /// Total balance of all coins with marker type owned by this object. If + /// type is not supplied, it defaults to `0x2::sui::SUI`. pub(crate) async fn balance( &self, ctx: &Context<'_>, @@ -84,7 +87,8 @@ impl CoinMetadata { /// The coin objects for this object. /// - ///`type` is a filter on the coin's type parameter, defaulting to `0x2::sui::SUI`. + /// `type` is a filter on the coin's type parameter, defaulting to + /// `0x2::sui::SUI`. pub(crate) async fn coins( &self, ctx: &Context<'_>, @@ -113,7 +117,8 @@ impl CoinMetadata { .await } - /// The domain explicitly configured as the default domain pointing to this object. + /// The domain explicitly configured as the default domain pointing to this + /// object. pub(crate) async fn default_suins_name( &self, ctx: &Context<'_>, @@ -124,8 +129,8 @@ impl CoinMetadata { .await } - /// The SuinsRegistration NFTs owned by this object. These grant the owner the capability to - /// manage the associated domain. + /// The SuinsRegistration NFTs owned by this object. These grant the owner + /// the capability to manage the associated domain. pub(crate) async fn suins_registrations( &self, ctx: &Context<'_>, @@ -143,18 +148,21 @@ impl CoinMetadata { ObjectImpl(&self.super_.super_).version().await } - /// The current status of the object as read from the off-chain store. The possible states are: - /// NOT_INDEXED, the object is loaded from serialized data, such as the contents of a genesis or - /// system package upgrade transaction. LIVE, the version returned is the most recent for the - /// object, and it is not deleted or wrapped at that version. HISTORICAL, the object was - /// referenced at a specific version or checkpoint, so is fetched from historical tables and may - /// not be the latest version of the object. WRAPPED_OR_DELETED, the object is deleted or - /// wrapped and only partial information can be loaded." + /// The current status of the object as read from the off-chain store. The + /// possible states are: NOT_INDEXED, the object is loaded from + /// serialized data, such as the contents of a genesis or system package + /// upgrade transaction. LIVE, the version returned is the most recent for + /// the object, and it is not deleted or wrapped at that version. + /// HISTORICAL, the object was referenced at a specific version or + /// checkpoint, so is fetched from historical tables and may not be the + /// latest version of the object. WRAPPED_OR_DELETED, the object is deleted + /// or wrapped and only partial information can be loaded." pub(crate) async fn status(&self) -> ObjectStatus { ObjectImpl(&self.super_.super_).status().await } - /// 32-byte hash that identifies the object's contents, encoded as a Base58 string. + /// 32-byte hash that identifies the object's contents, encoded as a Base58 + /// string. pub(crate) async fn digest(&self) -> Option { ObjectImpl(&self.super_.super_).digest().await } @@ -174,8 +182,9 @@ impl CoinMetadata { .await } - /// The amount of SUI we would rebate if this object gets deleted or mutated. This number is - /// recalculated based on the present storage gas price. + /// The amount of SUI we would rebate if this object gets deleted or + /// mutated. This number is recalculated based on the present storage + /// gas price. pub(crate) async fn storage_rebate(&self) -> Option { ObjectImpl(&self.super_.super_).storage_rebate().await } @@ -200,33 +209,34 @@ impl CoinMetadata { ObjectImpl(&self.super_.super_).bcs().await } - /// Displays the contents of the Move object in a JSON string and through GraphQL types. Also - /// provides the flat representation of the type signature, and the BCS of the corresponding - /// data. + /// Displays the contents of the Move object in a JSON string and through + /// GraphQL types. Also provides the flat representation of the type + /// signature, and the BCS of the corresponding data. pub(crate) async fn contents(&self) -> Option { MoveObjectImpl(&self.super_).contents().await } - /// Determines whether a transaction can transfer this object, using the TransferObjects - /// transaction command or `sui::transfer::public_transfer`, both of which require the object to + /// Determines whether a transaction can transfer this object, using the + /// TransferObjects transaction command or + /// `sui::transfer::public_transfer`, both of which require the object to /// have the `key` and `store` abilities. pub(crate) async fn has_public_transfer(&self, ctx: &Context<'_>) -> Result { MoveObjectImpl(&self.super_).has_public_transfer(ctx).await } - /// The set of named templates defined on-chain for the type of this object, to be handled - /// off-chain. The server substitutes data from the object into these templates to generate a - /// display string per template. + /// The set of named templates defined on-chain for the type of this object, + /// to be handled off-chain. The server substitutes data from the object + /// into these templates to generate a display string per template. pub(crate) async fn display(&self, ctx: &Context<'_>) -> Result>> { ObjectImpl(&self.super_.super_).display(ctx).await } - /// Access a dynamic field on an object using its name. Names are arbitrary Move values whose - /// type have `copy`, `drop`, and `store`, and are specified using their type, and their BCS - /// contents, Base64 encoded. + /// Access a dynamic field on an object using its name. Names are arbitrary + /// Move values whose type have `copy`, `drop`, and `store`, and are + /// specified using their type, and their BCS contents, Base64 encoded. /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. pub(crate) async fn dynamic_field( &self, ctx: &Context<'_>, @@ -237,13 +247,14 @@ impl CoinMetadata { .await } - /// Access a dynamic object field on an object using its name. Names are arbitrary Move values - /// whose type have `copy`, `drop`, and `store`, and are specified using their type, and their - /// BCS contents, Base64 encoded. The value of a dynamic object field can also be accessed + /// Access a dynamic object field on an object using its name. Names are + /// arbitrary Move values whose type have `copy`, `drop`, and `store`, + /// and are specified using their type, and their BCS contents, Base64 + /// encoded. The value of a dynamic object field can also be accessed /// off-chain directly via its address (e.g. using `Query.object`). /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. pub(crate) async fn dynamic_object_field( &self, ctx: &Context<'_>, @@ -256,8 +267,8 @@ impl CoinMetadata { /// The dynamic fields and dynamic object fields on an object. /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. pub(crate) async fn dynamic_fields( &self, ctx: &Context<'_>, @@ -318,11 +329,12 @@ impl CoinMetadata { } impl CoinMetadata { - /// Read a `CoinMetadata` from the `db` for the coin whose inner type is `coin_type`. + /// Read a `CoinMetadata` from the `db` for the coin whose inner type is + /// `coin_type`. pub(crate) async fn query(db: &Db, coin_type: TypeTag) -> Result, Error> { let TypeTag::Struct(coin_struct) = coin_type else { - // If the type supplied is not metadata, we know it's not a valid coin type, so there - // won't be CoinMetadata for it. + // If the type supplied is not metadata, we know it's not a valid coin type, so + // there won't be CoinMetadata for it. return Ok(None); }; @@ -353,8 +365,8 @@ impl CoinMetadata { coin_type: TypeTag, ) -> Result, Error> { let TypeTag::Struct(coin_struct) = coin_type else { - // If the type supplied is not metadata, we know it's not a valid coin type, so there - // won't be CoinMetadata for it. + // If the type supplied is not metadata, we know it's not a valid coin type, so + // there won't be CoinMetadata for it. return Ok(None); }; diff --git a/crates/sui-graphql-rpc/src/types/cursor.rs b/crates/sui-graphql-rpc/src/types/cursor.rs index 16c6bcdcba7..fb79d2a2714 100644 --- a/crates/sui-graphql-rpc/src/types/cursor.rs +++ b/crates/sui-graphql-rpc/src/types/cursor.rs @@ -28,27 +28,29 @@ pub(crate) struct JsonCursor(OpaqueCursor); /// Cursor that hides its value by encoding it as BCS and then Base64. pub(crate) struct BcsCursor(C); -/// Connection field parameters parsed into a single type that encodes the bounds of a single page -/// in a paginated response. +/// Connection field parameters parsed into a single type that encodes the +/// bounds of a single page in a paginated response. #[derive(Debug, Clone)] pub(crate) struct Page { - /// The exclusive lower bound of the page (no bound means start from the beginning of the - /// data-set). + /// The exclusive lower bound of the page (no bound means start from the + /// beginning of the data-set). after: Option, - /// The exclusive upper bound of the page (no bound means continue to the end of the data-set). + /// The exclusive upper bound of the page (no bound means continue to the + /// end of the data-set). before: Option, /// Maximum number of entries in the page. limit: u64, - /// In case there are more than `limit` entries in the range described by `(after, before)`, - /// this field states whether the entries up to limit are taken fron the `Front` or `Back` of - /// that range. + /// In case there are more than `limit` entries in the range described by + /// `(after, before)`, this field states whether the entries up to limit + /// are taken fron the `Front` or `Back` of that range. end: End, } -/// Whether the page is extracted from the beginning or the end of the range bounded by the cursors. +/// Whether the page is extracted from the beginning or the end of the range +/// bounded by the cursors. #[derive(PartialEq, Eq, Debug, Clone, Copy)] enum End { Front, @@ -59,45 +61,48 @@ enum End { pub(crate) trait Paginated: Target { type Source: QuerySource; - /// Adds a filter to `query` to bound its result to be greater than or equal to `cursor` - /// (returning the new query). + /// Adds a filter to `query` to bound its result to be greater than or equal + /// to `cursor` (returning the new query). fn filter_ge( cursor: &C, query: Query, ) -> Query; - /// Adds a filter to `query` to bound its results to be less than or equal to `cursor` - /// (returning the new query). + /// Adds a filter to `query` to bound its results to be less than or equal + /// to `cursor` (returning the new query). fn filter_le( cursor: &C, query: Query, ) -> Query; - /// Adds an `ORDER BY` clause to `query` to order rows according to their cursor values - /// (returning the new query). The `asc` parameter controls whether the ordering is ASCending - /// (`true`) or descending (`false`). + /// Adds an `ORDER BY` clause to `query` to order rows according to their + /// cursor values (returning the new query). The `asc` parameter + /// controls whether the ordering is ASCending (`true`) or descending + /// (`false`). fn order(asc: bool, query: Query) -> Query; } -/// Results from the database that are pointed to by cursors. Equivalent to `Paginated`, but for a -/// `RawQuery`. +/// Results from the database that are pointed to by cursors. Equivalent to +/// `Paginated`, but for a `RawQuery`. pub(crate) trait RawPaginated: Target { - /// Adds a filter to `query` to bound its result to be greater than or equal to `cursor` - /// (returning the new query). + /// Adds a filter to `query` to bound its result to be greater than or equal + /// to `cursor` (returning the new query). fn filter_ge(cursor: &C, query: RawQuery) -> RawQuery; - /// Adds a filter to `query` to bound its results to be less than or equal to `cursor` - /// (returning the new query). + /// Adds a filter to `query` to bound its results to be less than or equal + /// to `cursor` (returning the new query). fn filter_le(cursor: &C, query: RawQuery) -> RawQuery; - /// Adds an `ORDER BY` clause to `query` to order rows according to their cursor values - /// (returning the new query). The `asc` parameter controls whether the ordering is ASCending - /// (`true`) or descending (`false`). + /// Adds an `ORDER BY` clause to `query` to order rows according to their + /// cursor values (returning the new query). The `asc` parameter + /// controls whether the ordering is ASCending (`true`) or descending + /// (`false`). fn order(asc: bool, query: RawQuery) -> RawQuery; } pub(crate) trait Target { - /// The cursor pointing at this target value, assuming it was read at `checkpoint_viewed_at`. + /// The cursor pointing at this target value, assuming it was read at + /// `checkpoint_viewed_at`. fn cursor(&self, checkpoint_viewed_at: u64) -> C; } @@ -114,18 +119,21 @@ impl BcsCursor { } impl Page { - /// Convert connection parameters into a page. Entries for the page are drawn from the range - /// `(after, before)` (Both bounds are optional). The number of entries in the page is - /// controlled by `first` and `last`. + /// Convert connection parameters into a page. Entries for the page are + /// drawn from the range `(after, before)` (Both bounds are optional). + /// The number of entries in the page is controlled by `first` and + /// `last`. /// /// - Setting both is in an error. - /// - Setting `first` indicates that the entries are taken from the front of the range. - /// - Setting `last` indicates that the entries are taken from the end of the range. - /// - Setting neither defaults the limit to the default page size in `config`, taken from the - /// front of the range. + /// - Setting `first` indicates that the entries are taken from the front of + /// the range. + /// - Setting `last` indicates that the entries are taken from the end of + /// the range. + /// - Setting neither defaults the limit to the default page size in + /// `config`, taken from the front of the range. /// - /// It is an error to set a limit on page size that is greater than the `config`'s max page - /// size. + /// It is an error to set a limit on page size that is greater than the + /// `config`'s max page size. pub(crate) fn from_params( config: &ServiceConfig, first: Option, @@ -180,9 +188,10 @@ impl Page where C: Checkpointed, { - /// If cursors are provided, defer to the `checkpoint_viewed_at` in the cursor if they are - /// consistent. Otherwise, use the value from the parameter, or set to None. This is so that - /// paginated queries are consistent with the previous query that created the cursor. + /// If cursors are provided, defer to the `checkpoint_viewed_at` in the + /// cursor if they are consistent. Otherwise, use the value from the + /// parameter, or set to None. This is so that paginated queries are + /// consistent with the previous query that created the cursor. pub(crate) fn validate_cursor_consistency(&self) -> Result, Error> { match (self.after(), self.before()) { (Some(after), Some(before)) => { @@ -204,10 +213,11 @@ where } impl Page> { - /// Treat the cursors of this Page as indices into a range [0, total). Validates that the - /// cursors of the page are consistent, and returns two booleans indicating whether there is a - /// previous or next page in the range, the `checkpoint_viewed_at` to set for consistency, and - /// an iterator of cursors within that Page. + /// Treat the cursors of this Page as indices into a range [0, total). + /// Validates that the cursors of the page are consistent, and returns + /// two booleans indicating whether there is a previous or next page in + /// the range, the `checkpoint_viewed_at` to set for consistency, and an + /// iterator of cursors within that Page. pub(crate) fn paginate_consistent_indices( &self, total: usize, @@ -252,14 +262,16 @@ impl Page> { } impl Page { - /// Treat the cursors of this page as upper- and lowerbound filters for a database `query`. - /// Returns two booleans indicating whether there is a previous or next page in the range, - /// followed by an iterator of values in the page, fetched from the database. + /// Treat the cursors of this page as upper- and lowerbound filters for a + /// database `query`. Returns two booleans indicating whether there is a + /// previous or next page in the range, followed by an iterator of + /// values in the page, fetched from the database. /// - /// The values returned implement `Target`, so are able to compute their own cursors. + /// The values returned implement `Target`, so are able to compute their + /// own cursors. /// - /// `checkpoint_viewed_at` is a required parameter to and passed to each element to construct a - /// consistent cursor. + /// `checkpoint_viewed_at` is a required parameter to and passed to each + /// element to construct a consistent cursor. pub(crate) fn paginate_query( &self, conn: &mut Conn<'_>, @@ -311,13 +323,15 @@ impl Page { )) } - /// This function is similar to `paginate_query`, but is specifically designed for handling - /// `RawQuery`. Treat the cursors of this page as upper- and lowerbound filters for a database - /// `query`. Returns two booleans indicating whether there is a previous or next page in the - /// range, followed by an iterator of values in the page, fetched from the database. + /// This function is similar to `paginate_query`, but is specifically + /// designed for handling `RawQuery`. Treat the cursors of this page as + /// upper- and lowerbound filters for a database `query`. Returns two + /// booleans indicating whether there is a previous or next page in the + /// range, followed by an iterator of values in the page, fetched from the + /// database. /// - /// `checkpoint_viewed_at` is a required parameter to and passed to each element to construct a - /// consistent cursor. + /// `checkpoint_viewed_at` is a required parameter to and passed to each + /// element to construct a consistent cursor. pub(crate) fn paginate_raw_query( &self, conn: &mut Conn<'_>, @@ -350,12 +364,14 @@ impl Page { )) } - /// Given the results of a database query, determine whether the result set has a previous and - /// next page and is consistent with the provided cursors. + /// Given the results of a database query, determine whether the result set + /// has a previous and next page and is consistent with the provided + /// cursors. /// - /// Returns two booleans indicating whether there is a previous or next page in the range, - /// followed by an iterator of values in the page, fetched from the database. The values - /// returned implement `Target`, so are able to compute their own cursors. + /// Returns two booleans indicating whether there is a previous or next page + /// in the range, followed by an iterator of values in the page, fetched + /// from the database. The values returned implement `Target`, so are + /// able to compute their own cursors. fn paginate_results( &self, f_cursor: Option, @@ -393,9 +409,9 @@ impl Page { let has_previous_page = after.is_some(); let prefix = has_previous_page as usize; - // If results end with the before cursor, we will at least need to trim one element - // from the suffix and we trim more off the end if there is more after applying the - // limit. + // If results end with the before cursor, we will at least need to trim one + // element from the suffix and we trim more off the end if + // there is more after applying the limit. let mut suffix = before.is_some_and(|b| *b == l) as usize; suffix += results.len().saturating_sub(self.limit() + prefix + suffix); let has_next_page = suffix > 0; @@ -416,14 +432,15 @@ impl Page { } }; - // If after trimming, we're going to return no elements, then forget whether there's a - // previous or next page, because there will be no start or end cursor for this page to - // anchor on. + // If after trimming, we're going to return no elements, then forget whether + // there's a previous or next page, because there will be no start or + // end cursor for this page to anchor on. if results.len() == prefix + suffix { return (false, false, vec![].into_iter()); } - // We finally made it -- trim the prefix and suffix rows from the result and send it! + // We finally made it -- trim the prefix and suffix rows from the result and + // send it! let mut results = results.into_iter(); if prefix > 0 { results.nth(prefix - 1); @@ -467,7 +484,8 @@ where Ok(JsonCursor(OpaqueCursor::decode_cursor(&s)?)) } - /// Just check that the value is a string, as we'll do more involved tests during parsing. + /// Just check that the value is a string, as we'll do more involved tests + /// during parsing. fn is_valid(value: &Value) -> bool { matches!(value, Value::String(_)) } @@ -491,7 +509,8 @@ where Ok(Self::decode_cursor(&s)?) } - /// Just check that the value is a string, as we'll do more involved tests during parsing. + /// Just check that the value is a string, as we'll do more involved tests + /// during parsing. fn is_valid(value: &Value) -> bool { matches!(value, Value::String(_)) } @@ -501,7 +520,8 @@ where } } -/// Wrapping implementation of `CursorType` directly forwarding to `OpaqueCursor`. +/// Wrapping implementation of `CursorType` directly forwarding to +/// `OpaqueCursor`. impl CursorType for JsonCursor where C: Send + Sync, @@ -566,7 +586,7 @@ impl fmt::Debug for BcsCursor { impl Clone for JsonCursor { fn clone(&self) -> Self { - JsonCursor::new(self.0 .0.clone()) + JsonCursor::new(self.0.0.clone()) } } @@ -593,9 +613,10 @@ impl Eq for BcsCursor {} #[cfg(test)] mod tests { - use super::*; use expect_test::expect; + use super::*; + #[test] fn test_default_page() { let config = ServiceConfig::default(); diff --git a/crates/sui-graphql-rpc/src/types/digest.rs b/crates/sui-graphql-rpc/src/types/digest.rs index 1f6726e461e..7a58d0dc85a 100644 --- a/crates/sui-graphql-rpc/src/types/digest.rs +++ b/crates/sui-graphql-rpc/src/types/digest.rs @@ -1,12 +1,14 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::string_input::impl_string_input; +use std::{fmt, str::FromStr}; + use async_graphql::*; use fastcrypto::encoding::{Base58, Encoding}; -use std::{fmt, str::FromStr}; use sui_types::digests::{ObjectDigest, TransactionDigest}; +use super::string_input::impl_string_input; + pub(crate) const BASE58_DIGEST_LENGTH: usize = 32; #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)] @@ -80,8 +82,7 @@ impl fmt::Display for Digest { #[cfg(test)] mod tests { - use super::Error; - use super::*; + use super::{Error, *}; #[test] fn test_base58_digest() { diff --git a/crates/sui-graphql-rpc/src/types/display.rs b/crates/sui-graphql-rpc/src/types/display.rs index 67fe2fd413c..ca1c52142ca 100644 --- a/crates/sui-graphql-rpc/src/types/display.rs +++ b/crates/sui-graphql-rpc/src/types/display.rs @@ -2,17 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 use async_graphql::*; - use diesel::{ExpressionMethods, OptionalExtension, QueryDsl}; use move_core_types::annotated_value::{MoveStruct, MoveValue}; use sui_indexer::{models::display::StoredDisplay, schema::display}; +use sui_json_rpc_types::SuiMoveValue; use sui_types::TypeTag; use crate::{ data::{Db, DbConnection, QueryExecutor}, error::Error, }; -use sui_json_rpc_types::SuiMoveValue; pub(crate) struct Display { pub stored: StoredDisplay, @@ -63,7 +62,8 @@ impl Display { Ok(stored.map(|stored| Display { stored })) } - /// Render the fields defined by this `Display` from the contents of `struct_`. + /// Render the fields defined by this `Display` from the contents of + /// `struct_`. pub(crate) fn render(&self, struct_: &MoveStruct) -> Result, Error> { let event = self .stored diff --git a/crates/sui-graphql-rpc/src/types/dry_run_result.rs b/crates/sui-graphql-rpc/src/types/dry_run_result.rs index 16c5c7f180d..589c958c636 100644 --- a/crates/sui-graphql-rpc/src/types/dry_run_result.rs +++ b/crates/sui-graphql-rpc/src/types/dry_run_result.rs @@ -1,22 +1,27 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::base64::Base64; -use super::move_type::MoveType; -use super::transaction_block::{TransactionBlock, TransactionBlockInner}; -use super::transaction_block_kind::programmable::TransactionArgument; -use crate::error::Error; use async_graphql::*; use sui_json_rpc_types::{DevInspectResults, SuiExecutionResult}; -use sui_types::effects::TransactionEffects as NativeTransactionEffects; -use sui_types::transaction::TransactionData as NativeTransactionData; +use sui_types::{ + effects::TransactionEffects as NativeTransactionEffects, + transaction::TransactionData as NativeTransactionData, +}; + +use super::{ + base64::Base64, + move_type::MoveType, + transaction_block::{TransactionBlock, TransactionBlockInner}, + transaction_block_kind::programmable::TransactionArgument, +}; +use crate::error::Error; #[derive(Clone, Debug, SimpleObject)] pub(crate) struct DryRunResult { /// The error that occurred during dry run execution, if any. pub error: Option, - /// The intermediate results for each command of the dry run execution, including - /// contents of mutated references and return values. + /// The intermediate results for each command of the dry run execution, + /// including contents of mutated references and return values. pub results: Option>, /// The transaction block representing the dry run execution. pub transaction: Option, @@ -24,7 +29,8 @@ pub(crate) struct DryRunResult { #[derive(Clone, Debug, PartialEq, Eq, SimpleObject)] pub(crate) struct DryRunEffect { - /// Changes made to arguments that were mutably borrowed by each command in this transaction. + /// Changes made to arguments that were mutably borrowed by each command in + /// this transaction. pub mutated_references: Option>, /// Return results of each command in this transaction. diff --git a/crates/sui-graphql-rpc/src/types/dynamic_field.rs b/crates/sui-graphql-rpc/src/types/dynamic_field.rs index 6c6f7878b74..99c45fdb3d6 100644 --- a/crates/sui-graphql-rpc/src/types/dynamic_field.rs +++ b/crates/sui-graphql-rpc/src/types/dynamic_field.rs @@ -1,26 +1,32 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use async_graphql::connection::{Connection, CursorType, Edge}; -use async_graphql::*; +use async_graphql::{ + connection::{Connection, CursorType, Edge}, + *, +}; use move_core_types::annotated_value::{self as A, MoveStruct}; -use sui_indexer::models::objects::StoredHistoryObject; -use sui_indexer::types::OwnerType; +use sui_indexer::{models::objects::StoredHistoryObject, types::OwnerType}; use sui_package_resolver::Resolver; use sui_types::dynamic_field::{derive_dynamic_field_id, DynamicFieldInfo, DynamicFieldType}; -use super::cursor::{Page, Target}; -use super::object::{self, deserialize_move_struct, Object, ObjectKind, ObjectLookupKey}; -use super::type_filter::ExactTypeFilter; use super::{ - base64::Base64, move_object::MoveObject, move_value::MoveValue, sui_address::SuiAddress, + base64::Base64, + cursor::{Page, Target}, + move_object::MoveObject, + move_value::MoveValue, + object::{self, deserialize_move_struct, Object, ObjectKind, ObjectLookupKey}, + sui_address::SuiAddress, + type_filter::ExactTypeFilter, +}; +use crate::{ + consistency::{build_objects_query, consistent_range, View}, + context_data::package_cache::PackageCache, + data::{Db, QueryExecutor}, + error::Error, + filter, + raw_query::RawQuery, }; -use crate::consistency::{build_objects_query, consistent_range, View}; -use crate::context_data::package_cache::PackageCache; -use crate::data::{Db, QueryExecutor}; -use crate::error::Error; -use crate::filter; -use crate::raw_query::RawQuery; pub(crate) struct DynamicField { pub super_: MoveObject, @@ -37,26 +43,29 @@ pub(crate) enum DynamicFieldValue { #[derive(InputObject)] // used as input object pub(crate) struct DynamicFieldName { /// The string type of the DynamicField's 'name' field. - /// A string representation of a Move primitive like 'u64', or a struct type like '0x2::kiosk::Listing' + /// A string representation of a Move primitive like 'u64', or a struct type + /// like '0x2::kiosk::Listing' pub type_: ExactTypeFilter, /// The Base64 encoded bcs serialization of the DynamicField's 'name' field. pub bcs: Base64, } -/// Dynamic fields are heterogeneous fields that can be added or removed at runtime, -/// and can have arbitrary user-assigned names. There are two sub-types of dynamic -/// fields: +/// Dynamic fields are heterogeneous fields that can be added or removed at +/// runtime, and can have arbitrary user-assigned names. There are two sub-types +/// of dynamic fields: /// -/// 1) Dynamic Fields can store any value that has the `store` ability, however an object -/// stored in this kind of field will be considered wrapped and will not be accessible -/// directly via its ID by external tools (explorers, wallets, etc) accessing storage. -/// 2) Dynamic Object Fields values must be Sui objects (have the `key` and `store` -/// abilities, and id: UID as the first field), but will still be directly accessible off-chain -/// via their object ID after being attached. +/// 1) Dynamic Fields can store any value that has the `store` ability, however +/// an object stored in this kind of field will be considered wrapped and +/// will not be accessible directly via its ID by external tools (explorers, +/// wallets, etc) accessing storage. +/// 2) Dynamic Object Fields values must be Sui objects (have the `key` and +/// `store` abilities, and id: UID as the first field), but will still be +/// directly accessible off-chain via their object ID after being attached. #[Object] impl DynamicField { - /// The string type, data, and serialized value of the DynamicField's 'name' field. - /// This field is used to uniquely identify a child of the parent object. + /// The string type, data, and serialized value of the DynamicField's 'name' + /// field. This field is used to uniquely identify a child of the parent + /// object. async fn name(&self, ctx: &Context<'_>) -> Result> { let resolver: &Resolver = ctx .data() @@ -95,14 +104,16 @@ impl DynamicField { } /// The actual data stored in the dynamic field. - /// The returned dynamic field is an object if its return type is MoveObject, - /// in which case it is also accessible off-chain via its address. + /// The returned dynamic field is an object if its return type is + /// MoveObject, in which case it is also accessible off-chain via its + /// address. async fn value(&self, ctx: &Context<'_>) -> Result> { if self.df_kind == DynamicFieldType::DynamicObject { - // If `df_kind` is a DynamicObject, the object we are currently on is the field object, - // and we must resolve one more level down to the value object. Becuase we only have - // checkpoint-level granularity, we may end up reading a later version of the value - // object. Thus, we use the version of the field object to bound the value object at the + // If `df_kind` is a DynamicObject, the object we are currently on is the field + // object, and we must resolve one more level down to the value + // object. Becuase we only have checkpoint-level granularity, we may + // end up reading a later version of the value object. Thus, we use + // the version of the field object to bound the value object at the // correct version. let obj = MoveObject::query( ctx.data_unchecked(), @@ -146,11 +157,13 @@ impl DynamicField { } impl DynamicField { - /// Fetch a single dynamic field entry from the `db`, on `parent` object, with field name - /// `name`, and kind `kind` (dynamic field or dynamic object field). The dynamic field is bound - /// by the `parent_version` if provided - the fetched field will be the latest version at or - /// before the provided version. If `parent_version` is not provided, the latest version of the - /// field is returned as bounded by the `checkpoint_viewed_at` parameter. + /// Fetch a single dynamic field entry from the `db`, on `parent` object, + /// with field name `name`, and kind `kind` (dynamic field or dynamic + /// object field). The dynamic field is bound by the `parent_version` if + /// provided - the fetched field will be the latest version at or before + /// the provided version. If `parent_version` is not provided, the latest + /// version of the field is returned as bounded by the + /// `checkpoint_viewed_at` parameter. pub(crate) async fn query( db: &Db, parent: SuiAddress, @@ -184,11 +197,12 @@ impl DynamicField { super_.map(Self::try_from).transpose() } - /// Query the `db` for a `page` of dynamic fields attached to object with ID `parent`. The - /// returned dynamic fields are bound by the `parent_version` if provided - each field will be - /// the latest version at or before the provided version. If `parent_version` is not provided, - /// the latest version of each field is returned as bounded by the `checkpoint_viewed-at` - /// parameter.` + /// Query the `db` for a `page` of dynamic fields attached to object with ID + /// `parent`. The returned dynamic fields are bound by the + /// `parent_version` if provided - each field will be the latest version + /// at or before the provided version. If `parent_version` is not provided, + /// the latest version of each field is returned as bounded by the + /// `checkpoint_viewed-at` parameter.` pub(crate) async fn paginate( db: &Db, page: Page, @@ -196,9 +210,10 @@ impl DynamicField { parent_version: Option, checkpoint_viewed_at: Option, ) -> Result, Error> { - // If cursors are provided, defer to the `checkpoint_viewed_at` in the cursor if they are - // consistent. Otherwise, use the value from the parameter, or set to None. This is so that - // paginated queries are consistent with the previous query that created the cursor. + // If cursors are provided, defer to the `checkpoint_viewed_at` in the cursor if + // they are consistent. Otherwise, use the value from the parameter, or + // set to None. This is so that paginated queries are consistent with + // the previous query that created the cursor. let cursor_viewed_at = page.validate_cursor_consistency()?; let checkpoint_viewed_at: Option = cursor_viewed_at.or(checkpoint_viewed_at); @@ -226,8 +241,8 @@ impl DynamicField { let mut conn: Connection = Connection::new(prev, next); for stored in results { - // To maintain consistency, the returned cursor should have the same upper-bound as the - // checkpoint found on the cursor. + // To maintain consistency, the returned cursor should have the same upper-bound + // as the checkpoint found on the cursor. let cursor = stored.cursor(checkpoint_viewed_at).encode_cursor(); let object = @@ -270,7 +285,7 @@ impl TryFrom for DynamicField { return Err(Error::Internal( "A WrappedOrDeleted object cannot be converted into a DynamicField." .to_string(), - )) + )); } }; @@ -284,7 +299,7 @@ impl TryFrom for DynamicField { Some(k) => { return Err(Error::Internal(format!( "Unrecognized dynamic field kind: {k}." - ))) + ))); } None => return Err(Error::Internal("No dynamic field kind.".to_string())), }; @@ -314,16 +329,17 @@ pub fn extract_field_from_move_struct( .ok_or_else(|| Error::Internal(format!("Field '{}' not found", field_name))) } -/// Builds the `RawQuery` for fetching dynamic fields attached to a parent object. If -/// `parent_version` is null, the latest version of each field within the given checkpoint range -/// [`lhs`, `rhs`] is returned, conditioned on the fact that there is not a more recent version of -/// the field. +/// Builds the `RawQuery` for fetching dynamic fields attached to a parent +/// object. If `parent_version` is null, the latest version of each field within +/// the given checkpoint range [`lhs`, `rhs`] is returned, conditioned on the +/// fact that there is not a more recent version of the field. /// -/// If `parent_version` is provided, it is used to bound both the `candidates` and `newer` objects -/// subqueries. This is because the dynamic fields of a parent at version v are dynamic fields owned -/// by the parent whose versions are <= v. Unlike object ownership, where owned and owner objects -/// can have arbitrary `object_version`s, dynamic fields on a parent cannot have a version greater -/// than its parent. +/// If `parent_version` is provided, it is used to bound both the `candidates` +/// and `newer` objects subqueries. This is because the dynamic fields of a +/// parent at version v are dynamic fields owned by the parent whose versions +/// are <= v. Unlike object ownership, where owned and owner objects +/// can have arbitrary `object_version`s, dynamic fields on a parent cannot have +/// a version greater than its parent. fn dynamic_fields_query( parent: SuiAddress, parent_version: Option, diff --git a/crates/sui-graphql-rpc/src/types/epoch.rs b/crates/sui-graphql-rpc/src/types/epoch.rs index faa730cafe5..18987cf0e5d 100644 --- a/crates/sui-graphql-rpc/src/types/epoch.rs +++ b/crates/sui-graphql-rpc/src/types/epoch.rs @@ -3,56 +3,63 @@ use std::collections::{BTreeMap, BTreeSet, HashMap}; -use crate::context_data::db_data_provider::{convert_to_validators, PgManager}; -use crate::data::{Db, DbConnection, QueryExecutor}; -use crate::error::Error; - -use super::big_int::BigInt; -use super::checkpoint::{self, Checkpoint, CheckpointId}; -use super::cursor::Page; -use super::date_time::DateTime; -use super::protocol_config::ProtocolConfigs; -use super::system_state_summary::SystemStateSummary; -use super::transaction_block::{self, TransactionBlock, TransactionBlockFilter}; -use super::validator_set::ValidatorSet; -use async_graphql::connection::Connection; -use async_graphql::dataloader::{DataLoader, Loader}; -use async_graphql::*; +use async_graphql::{ + connection::Connection, + dataloader::{DataLoader, Loader}, + *, +}; use diesel::{ExpressionMethods, OptionalExtension, QueryDsl, SelectableHelper}; use fastcrypto::encoding::{Base58, Encoding}; -use sui_indexer::models::epoch::QueryableEpochInfo; -use sui_indexer::schema::epochs; +use sui_indexer::{models::epoch::QueryableEpochInfo, schema::epochs}; use sui_types::messages_checkpoint::CheckpointCommitment as EpochCommitment; +use super::{ + big_int::BigInt, + checkpoint::{self, Checkpoint, CheckpointId}, + cursor::Page, + date_time::DateTime, + protocol_config::ProtocolConfigs, + system_state_summary::SystemStateSummary, + transaction_block::{self, TransactionBlock, TransactionBlockFilter}, + validator_set::ValidatorSet, +}; +use crate::{ + context_data::db_data_provider::{convert_to_validators, PgManager}, + data::{Db, DbConnection, QueryExecutor}, + error::Error, +}; + #[derive(Clone)] pub(crate) struct Epoch { pub stored: QueryableEpochInfo, pub checkpoint_viewed_at: Option, } -/// DataLoader key for fetching an `Epoch` by its ID, optionally constrained by a consistency -/// cursor. +/// DataLoader key for fetching an `Epoch` by its ID, optionally constrained by +/// a consistency cursor. #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] struct EpochKey { pub epoch_id: u64, pub checkpoint_viewed_at: Option, } -/// Operation of the Sui network is temporally partitioned into non-overlapping epochs, -/// and the network aims to keep epochs roughly the same duration as each other. -/// During a particular epoch the following data is fixed: +/// Operation of the Sui network is temporally partitioned into non-overlapping +/// epochs, and the network aims to keep epochs roughly the same duration as +/// each other. During a particular epoch the following data is fixed: /// /// - the protocol version /// - the reference gas price /// - the set of participating validators #[Object] impl Epoch { - /// The epoch's id as a sequence number that starts at 0 and is incremented by one at every epoch change. + /// The epoch's id as a sequence number that starts at 0 and is incremented + /// by one at every epoch change. async fn epoch_id(&self) -> u64 { self.stored.epoch as u64 } - /// The minimum gas price that a quorum of validators are guaranteed to sign a transaction for. + /// The minimum gas price that a quorum of validators are guaranteed to sign + /// a transaction for. async fn reference_gas_price(&self) -> Option { Some(BigInt::from(self.stored.reference_gas_price as u64)) } @@ -164,14 +171,14 @@ impl Epoch { self.stored.storage_charge.map(BigInt::from) } - /// The storage fee rebates paid to users who deleted the data associated with past - /// transactions. + /// The storage fee rebates paid to users who deleted the data associated + /// with past transactions. async fn fund_outflow(&self) -> Option { self.stored.storage_rebate.map(BigInt::from) } - /// The epoch's corresponding protocol configuration, including the feature flags and the - /// configuration options. + /// The epoch's corresponding protocol configuration, including the feature + /// flags and the configuration options. async fn protocol_configs(&self, ctx: &Context<'_>) -> Result { ProtocolConfigs::query(ctx.data_unchecked(), Some(self.protocol_version())) .await @@ -187,8 +194,9 @@ impl Epoch { Ok(SystemStateSummary { native: state }) } - /// A commitment by the committee at the end of epoch on the contents of the live object set at - /// that time. This can be used to verify state snapshots. + /// A commitment by the committee at the end of epoch on the contents of the + /// live object set at that time. This can be used to verify state + /// snapshots. async fn live_object_set_digest(&self) -> Result> { let Some(commitments) = self.stored.epoch_commitments.as_ref() else { return Ok(None); @@ -268,8 +276,8 @@ impl Epoch { self.stored.protocol_version as u64 } - /// Look up an `Epoch` in the database, optionally filtered by its Epoch ID. If no ID is - /// supplied, defaults to fetching the latest epoch. + /// Look up an `Epoch` in the database, optionally filtered by its Epoch ID. + /// If no ID is supplied, defaults to fetching the latest epoch. pub(crate) async fn query( ctx: &Context<'_>, filter: Option, @@ -287,9 +295,9 @@ impl Epoch { } } - /// Look up the latest `Epoch` from the database, optionally filtered by a consistency cursor - /// (querying for a consistency cursor in the past looks for the latest epoch as of that - /// cursor). + /// Look up the latest `Epoch` from the database, optionally filtered by a + /// consistency cursor (querying for a consistency cursor in the past + /// looks for the latest epoch as of that cursor). pub(crate) async fn query_latest_at( db: &Db, checkpoint_viewed_at: Option, @@ -367,9 +375,10 @@ impl Loader for Db { checkpoint_viewed_at: key.checkpoint_viewed_at, }; - // We filter by checkpoint viewed at in memory because it should be quite rare that - // this query actually filters something (only in edge cases), and not trying to - // encode it in the SQL query makes the query much simpler and therefore easier for + // We filter by checkpoint viewed at in memory because it should be quite rare + // that this query actually filters something (only in edge + // cases), and not trying to encode it in the SQL query makes + // the query much simpler and therefore easier for // the DB to plan. let start = epoch.stored.first_checkpoint_id as u64; if matches!(key.checkpoint_viewed_at, Some(cp) if cp < start) { diff --git a/crates/sui-graphql-rpc/src/types/event.rs b/crates/sui-graphql-rpc/src/types/event.rs index 91a3a2cc733..ced04864069 100644 --- a/crates/sui-graphql-rpc/src/types/event.rs +++ b/crates/sui-graphql-rpc/src/types/event.rs @@ -3,27 +3,38 @@ use std::str::FromStr; -use super::checkpoint::Checkpoint; -use super::cursor::{self, Page, Paginated, Target}; -use super::digest::Digest; -use super::type_filter::{ModuleFilter, TypeFilter}; -use super::{ - address::Address, base64::Base64, date_time::DateTime, move_module::MoveModule, - move_value::MoveValue, sui_address::SuiAddress, +use async_graphql::{ + connection::{Connection, CursorType, Edge}, + *, }; -use crate::consistency::Checkpointed; -use crate::data::{self, QueryExecutor}; -use crate::{data::Db, error::Error}; -use async_graphql::connection::{Connection, CursorType, Edge}; -use async_graphql::*; use diesel::{BoolExpressionMethods, ExpressionMethods, NullableExpressionMethods, QueryDsl}; use serde::{Deserialize, Serialize}; -use sui_indexer::models::{events::StoredEvent, transactions::StoredTransaction}; -use sui_indexer::schema::{events, transactions, tx_senders}; -use sui_types::base_types::ObjectID; -use sui_types::Identifier; +use sui_indexer::{ + models::{events::StoredEvent, transactions::StoredTransaction}, + schema::{events, transactions, tx_senders}, +}; use sui_types::{ - base_types::SuiAddress as NativeSuiAddress, event::Event as NativeEvent, parse_sui_struct_tag, + base_types::{ObjectID, SuiAddress as NativeSuiAddress}, + event::Event as NativeEvent, + parse_sui_struct_tag, Identifier, +}; + +use super::{ + address::Address, + base64::Base64, + checkpoint::Checkpoint, + cursor::{self, Page, Paginated, Target}, + date_time::DateTime, + digest::Digest, + move_module::MoveModule, + move_value::MoveValue, + sui_address::SuiAddress, + type_filter::{ModuleFilter, TypeFilter}, +}; +use crate::{ + consistency::Checkpointed, + data::{self, Db, QueryExecutor}, + error::Error, }; /// A Sui node emits one of the following events: @@ -140,18 +151,20 @@ impl Event { } impl Event { - /// Query the database for a `page` of events. The Page uses the transaction, event, and - /// checkpoint sequence numbers as the cursor to determine the correct page of results. The - /// query can optionally be further `filter`-ed by the `EventFilter`. + /// Query the database for a `page` of events. The Page uses the + /// transaction, event, and checkpoint sequence numbers as the cursor to + /// determine the correct page of results. The query can optionally be + /// further `filter`-ed by the `EventFilter`. /// /// The `checkpoint_viewed_at` parameter is an Option representing the - /// checkpoint_sequence_number at which this page was queried for, or `None` if the data was - /// requested at the latest checkpoint. Each entity returned in the connection will inherit this - /// checkpoint, so that when viewing that entity's state, it will be from the reference of this + /// checkpoint_sequence_number at which this page was queried for, or `None` + /// if the data was requested at the latest checkpoint. Each entity + /// returned in the connection will inherit this checkpoint, so that + /// when viewing that entity's state, it will be from the reference of this /// checkpoint_viewed_at parameter. /// - /// If the `Page` is set, then this function will defer to the `checkpoint_viewed_at` in - /// the cursor if they are consistent. + /// If the `Page` is set, then this function will defer to the + /// `checkpoint_viewed_at` in the cursor if they are consistent. pub(crate) async fn paginate( db: &Db, page: Page, @@ -181,8 +194,8 @@ impl Event { events::dsl::checkpoint_sequence_number.le(checkpoint_viewed_at as i64), ); - // The transactions table doesn't have an index on the senders column, so use - // `tx_senders`. + // The transactions table doesn't have an index on the senders column, so + // use `tx_senders`. if let Some(sender) = &filter.sender { query = query.filter( events::dsl::tx_sequence_number.eq_any( @@ -230,8 +243,9 @@ impl Event { let mut conn = Connection::new(prev, next); - // Defer to the provided checkpoint_viewed_at, but if it is not provided, use the - // current available range. This sets a consistent upper bound for the nested queries. + // Defer to the provided checkpoint_viewed_at, but if it is not provided, use + // the current available range. This sets a consistent upper bound for + // the nested queries. for stored in results { let cursor = stored.cursor(checkpoint_viewed_at).encode_cursor(); conn.edges.push(Edge::new( diff --git a/crates/sui-graphql-rpc/src/types/execution_result.rs b/crates/sui-graphql-rpc/src/types/execution_result.rs index def1db5433b..86f0504d934 100644 --- a/crates/sui-graphql-rpc/src/types/execution_result.rs +++ b/crates/sui-graphql-rpc/src/types/execution_result.rs @@ -1,17 +1,19 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::transaction_block_effects::TransactionBlockEffects; use async_graphql::*; -/// The result of an execution, including errors that occurred during said execution. +use super::transaction_block_effects::TransactionBlockEffects; + +/// The result of an execution, including errors that occurred during said +/// execution. #[derive(SimpleObject, Clone)] pub(crate) struct ExecutionResult { /// The errors field captures any errors that occurred during execution pub errors: Option>, - /// The effects of the executed transaction. Since the transaction was just executed - /// and not indexed yet, fields including `balance_changes`, `timestamp` and `checkpoint` - /// are not available. + /// The effects of the executed transaction. Since the transaction was just + /// executed and not indexed yet, fields including `balance_changes`, + /// `timestamp` and `checkpoint` are not available. pub effects: TransactionBlockEffects, } diff --git a/crates/sui-graphql-rpc/src/types/gas.rs b/crates/sui-graphql-rpc/src/types/gas.rs index 401377aa7a4..8a1ce250117 100644 --- a/crates/sui-graphql-rpc/src/types/gas.rs +++ b/crates/sui-graphql-rpc/src/types/gas.rs @@ -1,20 +1,21 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::types::object::Object; -use async_graphql::connection::Connection; -use async_graphql::*; +use async_graphql::{connection::Connection, *}; use sui_types::{ effects::{TransactionEffects as NativeTransactionEffects, TransactionEffectsAPI}, gas::GasCostSummary as NativeGasCostSummary, transaction::GasData, }; -use super::{address::Address, big_int::BigInt, object::ObjectLookupKey, sui_address::SuiAddress}; use super::{ + address::Address, + big_int::BigInt, cursor::Page, - object::{self, ObjectFilter, ObjectKey}, + object::{self, ObjectFilter, ObjectKey, ObjectLookupKey}, + sui_address::SuiAddress, }; +use crate::types::object::Object; #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct GasInput { @@ -22,8 +23,8 @@ pub(crate) struct GasInput { pub price: u64, pub budget: u64, pub payment_obj_keys: Vec, - /// The checkpoint sequence number at which this was viewed at, or None if the data was - /// requested at the latest checkpoint. + /// The checkpoint sequence number at which this was viewed at, or None if + /// the data was requested at the latest checkpoint. pub checkpoint_viewed_at: Option, } @@ -44,7 +45,8 @@ pub(crate) struct GasEffects { pub checkpoint_viewed_at: u64, } -/// Configuration for this transaction's gas price and the coins used to pay for gas. +/// Configuration for this transaction's gas price and the coins used to pay for +/// gas. #[Object] impl GasInput { /// Address of the owner of the gas object(s) used @@ -64,12 +66,13 @@ impl GasInput { last: Option, before: Option, ) -> Result> { - // A possible user error during dry run or execution would be to supply a gas payment that - // is not a Move object (i.e a package). Even though the transaction would fail to run, this - // service will still attempt to present execution results. If the return type of this field - // is a `MoveObject`, then GraphQL will fail on the top-level with an internal error. - // Instead, we return an `Object` here, so that the rest of the `TransactionBlock` will - // still be viewable. + // A possible user error during dry run or execution would be to supply a gas + // payment that is not a Move object (i.e a package). Even though the + // transaction would fail to run, this service will still attempt to + // present execution results. If the return type of this field + // is a `MoveObject`, then GraphQL will fail on the top-level with an internal + // error. Instead, we return an `Object` here, so that the rest of the + // `TransactionBlock` will still be viewable. let page = Page::from_params(ctx.data_unchecked(), first, after, last, before)?; let filter = ObjectFilter { @@ -87,13 +90,14 @@ impl GasInput { .extend() } - /// An unsigned integer specifying the number of native tokens per gas unit this transaction - /// will pay (in MIST). + /// An unsigned integer specifying the number of native tokens per gas unit + /// this transaction will pay (in MIST). async fn gas_price(&self) -> Option { Some(BigInt::from(self.price)) } - /// The maximum number of gas units that can be expended by executing this transaction + /// The maximum number of gas units that can be expended by executing this + /// transaction async fn gas_budget(&self) -> Option { Some(BigInt::from(self.budget)) } @@ -112,21 +116,23 @@ impl GasCostSummary { Some(BigInt::from(self.storage_cost)) } - /// Part of storage cost that can be reclaimed by cleaning up data created by this transaction - /// (when objects are deleted or an object is modified, which is treated as a deletion followed - /// by a creation) (in MIST). + /// Part of storage cost that can be reclaimed by cleaning up data created + /// by this transaction (when objects are deleted or an object is + /// modified, which is treated as a deletion followed by a creation) (in + /// MIST). async fn storage_rebate(&self) -> Option { Some(BigInt::from(self.storage_rebate)) } - /// Part of storage cost that is not reclaimed when data created by this transaction is cleaned - /// up (in MIST). + /// Part of storage cost that is not reclaimed when data created by this + /// transaction is cleaned up (in MIST). async fn non_refundable_storage_fee(&self) -> Option { Some(BigInt::from(self.non_refundable_storage_fee)) } } -/// Effects related to gas (costs incurred and the identity of the smashed gas object returned). +/// Effects related to gas (costs incurred and the identity of the smashed gas +/// object returned). #[Object] impl GasEffects { async fn gas_object(&self, ctx: &Context<'_>) -> Result> { @@ -148,9 +154,10 @@ impl GasEffects { } impl GasEffects { - /// `checkpoint_viewed_at` represents the checkpoint sequence number at which this `GasEffects` - /// was queried for, or `None` if the data was requested at the latest checkpoint. This is - /// stored on `GasEffects` so that when viewing that entity's state, it will be as if it was + /// `checkpoint_viewed_at` represents the checkpoint sequence number at + /// which this `GasEffects` was queried for, or `None` if the data was + /// requested at the latest checkpoint. This is stored on `GasEffects` + /// so that when viewing that entity's state, it will be as if it was /// read at the same checkpoint. pub(crate) fn from(effects: &NativeTransactionEffects, checkpoint_viewed_at: u64) -> Self { let ((id, version, _digest), _owner) = effects.gas_object(); @@ -164,9 +171,10 @@ impl GasEffects { } impl GasInput { - /// `checkpoint_viewed_at` represents the checkpoint sequence number at which this `GasInput` - /// was queried for, or `None` if the data was requested at the latest checkpoint. This is - /// stored on `GasInput` so that when viewing that entity's state, it will be as if it was read + /// `checkpoint_viewed_at` represents the checkpoint sequence number at + /// which this `GasInput` was queried for, or `None` if the data was + /// requested at the latest checkpoint. This is stored on `GasInput` so + /// that when viewing that entity's state, it will be as if it was read /// at the same checkpoint. pub(crate) fn from(s: &GasData, checkpoint_viewed_at: Option) -> Self { Self { diff --git a/crates/sui-graphql-rpc/src/types/intersect.rs b/crates/sui-graphql-rpc/src/types/intersect.rs index ffc4671fc3a..d6d10fcd228 100644 --- a/crates/sui-graphql-rpc/src/types/intersect.rs +++ b/crates/sui-graphql-rpc/src/types/intersect.rs @@ -1,9 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -/// Merges two filter fields. If both values exist, `merge` is used to combine them, which returns -/// some combined value if there is some consistent combination, and `None` otherwise. The overall -/// function returns `Some(None)`, if the filters combined to no filter, `Some(Some(f))` if the +/// Merges two filter fields. If both values exist, `merge` is used to combine +/// them, which returns some combined value if there is some consistent +/// combination, and `None` otherwise. The overall function returns +/// `Some(None)`, if the filters combined to no filter, `Some(Some(f))` if the /// filters combined to `f`, and `None` if the filters couldn't be combined. pub(crate) fn field( this: Option, @@ -18,7 +19,8 @@ pub(crate) fn field( } } -/// Merge options by equality check (equal values get merged, everything else is inconsistent). +/// Merge options by equality check (equal values get merged, everything else is +/// inconsistent). pub(crate) fn by_eq(a: T, b: T) -> Option { (a == b).then_some(a) } diff --git a/crates/sui-graphql-rpc/src/types/move_function.rs b/crates/sui-graphql-rpc/src/types/move_function.rs index 7325cbb2c3a..001909d1e97 100644 --- a/crates/sui-graphql-rpc/src/types/move_function.rs +++ b/crates/sui-graphql-rpc/src/types/move_function.rs @@ -4,13 +4,12 @@ use async_graphql::*; use sui_package_resolver::FunctionDef; -use crate::{data::Db, error::Error}; - use super::{ move_module::MoveModule, open_move_type::{abilities, MoveAbility, MoveVisibility, OpenMoveType}, sui_address::SuiAddress, }; +use crate::{data::Db, error::Error}; pub(crate) struct MoveFunction { package: SuiAddress, @@ -68,22 +67,23 @@ impl MoveFunction { Some(self.is_entry) } - /// Constraints on the function's formal type parameters. Move bytecode does not name type - /// parameters, so when they are referenced (e.g. in parameter and return types) they are - /// identified by their index in this list. + /// Constraints on the function's formal type parameters. Move bytecode + /// does not name type parameters, so when they are referenced (e.g. in + /// parameter and return types) they are identified by their index in + /// this list. async fn type_parameters(&self) -> Option<&Vec> { Some(&self.type_parameters) } - /// The function's parameter types. These types can reference type parameters introduce by this - /// function (see `typeParameters`). + /// The function's parameter types. These types can reference type + /// parameters introduce by this function (see `typeParameters`). async fn parameters(&self) -> Option<&Vec> { Some(&self.parameters) } - /// The function's return types. There can be multiple because functions in Move can return - /// multiple values. These types can reference type parameters introduced by this function (see - /// `typeParameters`). + /// The function's return types. There can be multiple because functions in + /// Move can return multiple values. These types can reference type + /// parameters introduced by this function (see `typeParameters`). #[graphql(name = "return")] async fn return_(&self) -> Option<&Vec> { Some(&self.return_) diff --git a/crates/sui-graphql-rpc/src/types/move_module.rs b/crates/sui-graphql-rpc/src/types/move_module.rs index 9b035b2f8e2..4361af3efd9 100644 --- a/crates/sui-graphql-rpc/src/types/move_module.rs +++ b/crates/sui-graphql-rpc/src/types/move_module.rs @@ -1,23 +1,29 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use async_graphql::connection::{Connection, CursorType, Edge}; -use async_graphql::*; -use move_binary_format::access::ModuleAccess; -use move_binary_format::binary_views::BinaryIndexedView; +use async_graphql::{ + connection::{Connection, CursorType, Edge}, + *, +}; +use move_binary_format::{access::ModuleAccess, binary_views::BinaryIndexedView}; use move_disassembler::disassembler::Disassembler; use move_ir_types::location::Loc; - -use crate::consistency::{ConsistentIndexCursor, ConsistentNamedCursor}; -use crate::data::Db; -use crate::error::Error; use sui_package_resolver::Module as ParsedMoveModule; -use super::cursor::{JsonCursor, Page}; -use super::move_function::MoveFunction; -use super::move_struct::MoveStruct; -use super::object::ObjectLookupKey; -use super::{base64::Base64, move_package::MovePackage, sui_address::SuiAddress}; +use super::{ + base64::Base64, + cursor::{JsonCursor, Page}, + move_function::MoveFunction, + move_package::MovePackage, + move_struct::MoveStruct, + object::ObjectLookupKey, + sui_address::SuiAddress, +}; +use crate::{ + consistency::{ConsistentIndexCursor, ConsistentNamedCursor}, + data::Db, + error::Error, +}; #[derive(Clone)] pub(crate) struct MoveModule { @@ -65,8 +71,8 @@ impl MoveModule { self.parsed.bytecode().version } - /// Modules that this module considers friends (these modules can access `public(friend)` - /// functions from this module). + /// Modules that this module considers friends (these modules can access + /// `public(friend)` functions from this module). async fn friends( &self, ctx: &Context<'_>, @@ -104,8 +110,8 @@ impl MoveModule { .extend()); }; - // Select `friend_decls[lo..hi]` using iterators to enumerate before taking a sub-sequence - // from it, to get pairs `(i, friend_decls[i])`. + // Select `friend_decls[lo..hi]` using iterators to enumerate before taking a + // sub-sequence from it, to get pairs `(i, friend_decls[i])`. for c in cs { let decl = &bytecode.friend_decls[c.ix]; let friend_pkg = bytecode.address_identifier_at(decl.address); diff --git a/crates/sui-graphql-rpc/src/types/move_object.rs b/crates/sui-graphql-rpc/src/types/move_object.rs index dc082151271..e809ad77992 100644 --- a/crates/sui-graphql-rpc/src/types/move_object.rs +++ b/crates/sui-graphql-rpc/src/types/move_object.rs @@ -1,32 +1,33 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::balance::{self, Balance}; -use super::base64::Base64; -use super::big_int::BigInt; -use super::coin::CoinDowncastError; -use super::coin_metadata::{CoinMetadata, CoinMetadataDowncastError}; -use super::cursor::Page; -use super::display::DisplayEntry; -use super::dynamic_field::{DynamicField, DynamicFieldName}; -use super::move_type::MoveType; -use super::move_value::MoveValue; -use super::object::{self, ObjectFilter, ObjectImpl, ObjectLookupKey, ObjectOwner, ObjectStatus}; -use super::owner::OwnerImpl; -use super::stake::StakedSuiDowncastError; -use super::sui_address::SuiAddress; -use super::suins_registration::{DomainFormat, SuinsRegistration, SuinsRegistrationDowncastError}; -use super::transaction_block::{self, TransactionBlock, TransactionBlockFilter}; -use super::type_filter::ExactTypeFilter; -use super::{coin::Coin, object::Object}; -use crate::data::Db; -use crate::error::Error; -use crate::types::stake::StakedSui; -use async_graphql::connection::Connection; -use async_graphql::*; +use async_graphql::{connection::Connection, *}; use sui_json_rpc::name_service::NameServiceConfig; -use sui_types::object::{Data, MoveObject as NativeMoveObject}; -use sui_types::TypeTag; +use sui_types::{ + object::{Data, MoveObject as NativeMoveObject}, + TypeTag, +}; + +use super::{ + balance::{self, Balance}, + base64::Base64, + big_int::BigInt, + coin::{Coin, CoinDowncastError}, + coin_metadata::{CoinMetadata, CoinMetadataDowncastError}, + cursor::Page, + display::DisplayEntry, + dynamic_field::{DynamicField, DynamicFieldName}, + move_type::MoveType, + move_value::MoveValue, + object::{self, Object, ObjectFilter, ObjectImpl, ObjectLookupKey, ObjectOwner, ObjectStatus}, + owner::OwnerImpl, + stake::StakedSuiDowncastError, + sui_address::SuiAddress, + suins_registration::{DomainFormat, SuinsRegistration, SuinsRegistrationDowncastError}, + transaction_block::{self, TransactionBlock, TransactionBlockFilter}, + type_filter::ExactTypeFilter, +}; +use crate::{data::Db, error::Error, types::stake::StakedSui}; #[derive(Clone)] pub(crate) struct MoveObject { @@ -46,8 +47,8 @@ pub(crate) enum MoveObjectDowncastError { NotAMoveObject, } -/// This interface is implemented by types that represent a Move object on-chain (A Move value whose -/// type has `key`). +/// This interface is implemented by types that represent a Move object on-chain +/// (A Move value whose type has `key`). #[derive(Interface)] #[graphql( name = "IMoveObject", @@ -114,8 +115,9 @@ pub(crate) enum IMoveObject { SuinsRegistration(SuinsRegistration), } -/// The representation of an object as a Move Object, which exposes additional information -/// (content, module that governs it, version, is transferrable, etc.) about this object. +/// The representation of an object as a Move Object, which exposes additional +/// information (content, module that governs it, version, is transferrable, +/// etc.) about this object. #[Object] impl MoveObject { pub(crate) async fn address(&self) -> SuiAddress { @@ -137,8 +139,8 @@ impl MoveObject { .await } - /// Total balance of all coins with marker type owned by this object. If type is not supplied, - /// it defaults to `0x2::sui::SUI`. + /// Total balance of all coins with marker type owned by this object. If + /// type is not supplied, it defaults to `0x2::sui::SUI`. pub(crate) async fn balance( &self, ctx: &Context<'_>, @@ -163,7 +165,8 @@ impl MoveObject { /// The coin objects for this object. /// - ///`type` is a filter on the coin's type parameter, defaulting to `0x2::sui::SUI`. + /// `type` is a filter on the coin's type parameter, defaulting to + /// `0x2::sui::SUI`. pub(crate) async fn coins( &self, ctx: &Context<'_>, @@ -192,7 +195,8 @@ impl MoveObject { .await } - /// The domain explicitly configured as the default domain pointing to this object. + /// The domain explicitly configured as the default domain pointing to this + /// object. pub(crate) async fn default_suins_name( &self, ctx: &Context<'_>, @@ -203,8 +207,8 @@ impl MoveObject { .await } - /// The SuinsRegistration NFTs owned by this object. These grant the owner the capability to - /// manage the associated domain. + /// The SuinsRegistration NFTs owned by this object. These grant the owner + /// the capability to manage the associated domain. pub(crate) async fn suins_registrations( &self, ctx: &Context<'_>, @@ -222,18 +226,21 @@ impl MoveObject { ObjectImpl(&self.super_).version().await } - /// The current status of the object as read from the off-chain store. The possible states are: - /// NOT_INDEXED, the object is loaded from serialized data, such as the contents of a genesis or - /// system package upgrade transaction. LIVE, the version returned is the most recent for the - /// object, and it is not deleted or wrapped at that version. HISTORICAL, the object was - /// referenced at a specific version or checkpoint, so is fetched from historical tables and may - /// not be the latest version of the object. WRAPPED_OR_DELETED, the object is deleted or - /// wrapped and only partial information can be loaded." + /// The current status of the object as read from the off-chain store. The + /// possible states are: NOT_INDEXED, the object is loaded from + /// serialized data, such as the contents of a genesis or system package + /// upgrade transaction. LIVE, the version returned is the most recent for + /// the object, and it is not deleted or wrapped at that version. + /// HISTORICAL, the object was referenced at a specific version or + /// checkpoint, so is fetched from historical tables and may not be the + /// latest version of the object. WRAPPED_OR_DELETED, the object is deleted + /// or wrapped and only partial information can be loaded." pub(crate) async fn status(&self) -> ObjectStatus { ObjectImpl(&self.super_).status().await } - /// 32-byte hash that identifies the object's contents, encoded as a Base58 string. + /// 32-byte hash that identifies the object's contents, encoded as a Base58 + /// string. pub(crate) async fn digest(&self) -> Option { ObjectImpl(&self.super_).digest().await } @@ -253,8 +260,9 @@ impl MoveObject { .await } - /// The amount of SUI we would rebate if this object gets deleted or mutated. This number is - /// recalculated based on the present storage gas price. + /// The amount of SUI we would rebate if this object gets deleted or + /// mutated. This number is recalculated based on the present storage + /// gas price. pub(crate) async fn storage_rebate(&self) -> Option { ObjectImpl(&self.super_).storage_rebate().await } @@ -279,33 +287,34 @@ impl MoveObject { ObjectImpl(&self.super_).bcs().await } - /// Displays the contents of the Move object in a JSON string and through GraphQL types. Also - /// provides the flat representation of the type signature, and the BCS of the corresponding - /// data. + /// Displays the contents of the Move object in a JSON string and through + /// GraphQL types. Also provides the flat representation of the type + /// signature, and the BCS of the corresponding data. pub(crate) async fn contents(&self) -> Option { MoveObjectImpl(self).contents().await } - /// Determines whether a transaction can transfer this object, using the TransferObjects - /// transaction command or `sui::transfer::public_transfer`, both of which require the object to + /// Determines whether a transaction can transfer this object, using the + /// TransferObjects transaction command or + /// `sui::transfer::public_transfer`, both of which require the object to /// have the `key` and `store` abilities. pub(crate) async fn has_public_transfer(&self, ctx: &Context<'_>) -> Result { MoveObjectImpl(self).has_public_transfer(ctx).await } - /// The set of named templates defined on-chain for the type of this object, to be handled - /// off-chain. The server substitutes data from the object into these templates to generate a - /// display string per template. + /// The set of named templates defined on-chain for the type of this object, + /// to be handled off-chain. The server substitutes data from the object + /// into these templates to generate a display string per template. pub(crate) async fn display(&self, ctx: &Context<'_>) -> Result>> { ObjectImpl(&self.super_).display(ctx).await } - /// Access a dynamic field on an object using its name. Names are arbitrary Move values whose - /// type have `copy`, `drop`, and `store`, and are specified using their type, and their BCS - /// contents, Base64 encoded. + /// Access a dynamic field on an object using its name. Names are arbitrary + /// Move values whose type have `copy`, `drop`, and `store`, and are + /// specified using their type, and their BCS contents, Base64 encoded. /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. pub(crate) async fn dynamic_field( &self, ctx: &Context<'_>, @@ -316,13 +325,14 @@ impl MoveObject { .await } - /// Access a dynamic object field on an object using its name. Names are arbitrary Move values - /// whose type have `copy`, `drop`, and `store`, and are specified using their type, and their - /// BCS contents, Base64 encoded. The value of a dynamic object field can also be accessed + /// Access a dynamic object field on an object using its name. Names are + /// arbitrary Move values whose type have `copy`, `drop`, and `store`, + /// and are specified using their type, and their BCS contents, Base64 + /// encoded. The value of a dynamic object field can also be accessed /// off-chain directly via its address (e.g. using `Query.object`). /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. pub(crate) async fn dynamic_object_field( &self, ctx: &Context<'_>, @@ -335,8 +345,8 @@ impl MoveObject { /// The dynamic fields and dynamic object fields on an object. /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. pub(crate) async fn dynamic_fields( &self, ctx: &Context<'_>, @@ -368,7 +378,8 @@ impl MoveObject { } } - /// Attempts to convert the Move object into a `0x3::staking_pool::StakedSui`. + /// Attempts to convert the Move object into a + /// `0x3::staking_pool::StakedSui`. async fn as_staked_sui(&self) -> Result> { match StakedSui::try_from(self) { Ok(coin) => Ok(Some(coin)), @@ -442,9 +453,10 @@ impl MoveObject { /// Query the database for a `page` of Move objects, optionally `filter`-ed. /// - /// `checkpoint_viewed_at` represents the checkpoint sequence number at which this page was - /// queried for, or `None` if the data was requested at the latest checkpoint. Each entity - /// returned in the connection will inherit this checkpoint, so that when viewing that entity's + /// `checkpoint_viewed_at` represents the checkpoint sequence number at + /// which this page was queried for, or `None` if the data was requested + /// at the latest checkpoint. Each entity returned in the connection + /// will inherit this checkpoint, so that when viewing that entity's /// state, it will be as if it was read at the same checkpoint. pub(crate) async fn paginate( db: &Db, diff --git a/crates/sui-graphql-rpc/src/types/move_package.rs b/crates/sui-graphql-rpc/src/types/move_package.rs index 3ab442c8e57..616d7be953f 100644 --- a/crates/sui-graphql-rpc/src/types/move_package.rs +++ b/crates/sui-graphql-rpc/src/types/move_package.rs @@ -1,31 +1,33 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::balance::{self, Balance}; -use super::base64::Base64; -use super::big_int::BigInt; -use super::coin::Coin; -use super::cursor::{JsonCursor, Page}; -use super::move_module::MoveModule; -use super::move_object::MoveObject; -use super::object::{ - self, Object, ObjectFilter, ObjectImpl, ObjectLookupKey, ObjectOwner, ObjectStatus, +use async_graphql::{ + connection::{Connection, CursorType, Edge}, + *, }; -use super::owner::OwnerImpl; -use super::stake::StakedSui; -use super::sui_address::SuiAddress; -use super::suins_registration::{DomainFormat, SuinsRegistration}; -use super::transaction_block::{self, TransactionBlock, TransactionBlockFilter}; -use super::type_filter::ExactTypeFilter; -use crate::consistency::ConsistentNamedCursor; -use crate::data::Db; -use crate::error::Error; -use crate::types::checkpoint::Checkpoint; -use async_graphql::connection::{Connection, CursorType, Edge}; -use async_graphql::*; use sui_package_resolver::{error::Error as PackageCacheError, Package as ParsedMovePackage}; use sui_types::{move_package::MovePackage as NativeMovePackage, object::Data}; +use super::{ + balance::{self, Balance}, + base64::Base64, + big_int::BigInt, + coin::Coin, + cursor::{JsonCursor, Page}, + move_module::MoveModule, + move_object::MoveObject, + object::{self, Object, ObjectFilter, ObjectImpl, ObjectLookupKey, ObjectOwner, ObjectStatus}, + owner::OwnerImpl, + stake::StakedSui, + sui_address::SuiAddress, + suins_registration::{DomainFormat, SuinsRegistration}, + transaction_block::{self, TransactionBlock, TransactionBlockFilter}, + type_filter::ExactTypeFilter, +}; +use crate::{ + consistency::ConsistentNamedCursor, data::Db, error::Error, types::checkpoint::Checkpoint, +}; + #[derive(Clone)] pub(crate) struct MovePackage { /// Representation of this Move Object as a generic Object. @@ -39,13 +41,15 @@ pub(crate) struct MovePackage { pub checkpoint_viewed_at: u64, } -/// Information used by a package to link to a specific version of its dependency. +/// Information used by a package to link to a specific version of its +/// dependency. #[derive(SimpleObject)] struct Linkage { /// The ID on-chain of the first version of the dependency. original_id: SuiAddress, - /// The ID on-chain of the version of the dependency that this package depends on. + /// The ID on-chain of the version of the dependency that this package + /// depends on. upgraded_id: SuiAddress, /// The version of the dependency that this package depends on. @@ -70,8 +74,9 @@ pub(crate) struct MovePackageDowncastError; pub(crate) type CModule = JsonCursor; -/// A MovePackage is a kind of Move object that represents code that has been published on chain. -/// It exposes information about its modules, type definitions, functions, and dependencies. +/// A MovePackage is a kind of Move object that represents code that has been +/// published on chain. It exposes information about its modules, type +/// definitions, functions, and dependencies. #[Object] impl MovePackage { pub(crate) async fn address(&self) -> SuiAddress { @@ -80,8 +85,8 @@ impl MovePackage { /// Objects owned by this package, optionally `filter`-ed. /// - /// Note that objects owned by a package are inaccessible, because packages are immutable and - /// cannot be owned by an address. + /// Note that objects owned by a package are inaccessible, because packages + /// are immutable and cannot be owned by an address. pub(crate) async fn objects( &self, ctx: &Context<'_>, @@ -96,11 +101,11 @@ impl MovePackage { .await } - /// Total balance of all coins with marker type owned by this package. If type is not supplied, - /// it defaults to `0x2::sui::SUI`. + /// Total balance of all coins with marker type owned by this package. If + /// type is not supplied, it defaults to `0x2::sui::SUI`. /// - /// Note that coins owned by a package are inaccessible, because packages are immutable and - /// cannot be owned by an address. + /// Note that coins owned by a package are inaccessible, because packages + /// are immutable and cannot be owned by an address. pub(crate) async fn balance( &self, ctx: &Context<'_>, @@ -111,8 +116,8 @@ impl MovePackage { /// The balances of all coin types owned by this package. /// - /// Note that coins owned by a package are inaccessible, because packages are immutable and - /// cannot be owned by an address. + /// Note that coins owned by a package are inaccessible, because packages + /// are immutable and cannot be owned by an address. pub(crate) async fn balances( &self, ctx: &Context<'_>, @@ -128,10 +133,11 @@ impl MovePackage { /// The coin objects owned by this package. /// - ///`type` is a filter on the coin's type parameter, defaulting to `0x2::sui::SUI`. + /// `type` is a filter on the coin's type parameter, defaulting to + /// `0x2::sui::SUI`. /// - /// Note that coins owned by a package are inaccessible, because packages are immutable and - /// cannot be owned by an address. + /// Note that coins owned by a package are inaccessible, because packages + /// are immutable and cannot be owned by an address. pub(crate) async fn coins( &self, ctx: &Context<'_>, @@ -148,8 +154,8 @@ impl MovePackage { /// The `0x3::staking_pool::StakedSui` objects owned by this package. /// - /// Note that objects owned by a package are inaccessible, because packages are immutable and - /// cannot be owned by an address. + /// Note that objects owned by a package are inaccessible, because packages + /// are immutable and cannot be owned by an address. pub(crate) async fn staked_suis( &self, ctx: &Context<'_>, @@ -163,7 +169,8 @@ impl MovePackage { .await } - /// The domain explicitly configured as the default domain pointing to this object. + /// The domain explicitly configured as the default domain pointing to this + /// object. pub(crate) async fn default_suins_name( &self, ctx: &Context<'_>, @@ -174,11 +181,11 @@ impl MovePackage { .await } - /// The SuinsRegistration NFTs owned by this package. These grant the owner the capability to - /// manage the associated domain. + /// The SuinsRegistration NFTs owned by this package. These grant the owner + /// the capability to manage the associated domain. /// - /// Note that objects owned by a package are inaccessible, because packages are immutable and - /// cannot be owned by an address. + /// Note that objects owned by a package are inaccessible, because packages + /// are immutable and cannot be owned by an address. pub(crate) async fn suins_registrations( &self, ctx: &Context<'_>, @@ -196,18 +203,21 @@ impl MovePackage { ObjectImpl(&self.super_).version().await } - /// The current status of the object as read from the off-chain store. The possible states are: - /// NOT_INDEXED, the object is loaded from serialized data, such as the contents of a genesis or - /// system package upgrade transaction. LIVE, the version returned is the most recent for the - /// object, and it is not deleted or wrapped at that version. HISTORICAL, the object was - /// referenced at a specific version or checkpoint, so is fetched from historical tables and may - /// not be the latest version of the object. WRAPPED_OR_DELETED, the object is deleted or - /// wrapped and only partial information can be loaded." + /// The current status of the object as read from the off-chain store. The + /// possible states are: NOT_INDEXED, the object is loaded from + /// serialized data, such as the contents of a genesis or system package + /// upgrade transaction. LIVE, the version returned is the most recent for + /// the object, and it is not deleted or wrapped at that version. + /// HISTORICAL, the object was referenced at a specific version or + /// checkpoint, so is fetched from historical tables and may not be the + /// latest version of the object. WRAPPED_OR_DELETED, the object is deleted + /// or wrapped and only partial information can be loaded." pub(crate) async fn status(&self) -> ObjectStatus { ObjectImpl(&self.super_).status().await } - /// 32-byte hash that identifies the package's contents, encoded as a Base58 string. + /// 32-byte hash that identifies the package's contents, encoded as a Base58 + /// string. pub(crate) async fn digest(&self) -> Option { ObjectImpl(&self.super_).digest().await } @@ -228,11 +238,12 @@ impl MovePackage { .await } - /// The amount of SUI we would rebate if this object gets deleted or mutated. This number is - /// recalculated based on the present storage gas price. + /// The amount of SUI we would rebate if this object gets deleted or + /// mutated. This number is recalculated based on the present storage + /// gas price. /// - /// Note that packages cannot be deleted or mutated, so this number is provided purely for - /// reference. + /// Note that packages cannot be deleted or mutated, so this number is + /// provided purely for reference. pub(crate) async fn storage_rebate(&self) -> Option { ObjectImpl(&self.super_).storage_rebate().await } @@ -259,8 +270,8 @@ impl MovePackage { ObjectImpl(&self.super_).bcs().await } - /// A representation of the module called `name` in this package, including the - /// structs and functions it defines. + /// A representation of the module called `name` in this package, including + /// the structs and functions it defines. async fn module(&self, name: String) -> Result> { self.module_impl(&name).extend() } @@ -374,8 +385,9 @@ impl MovePackage { Some(type_origins) } - /// BCS representation of the package's modules. Modules appear as a sequence of pairs (module - /// name, followed by module bytes), in alphabetic order by module name. + /// BCS representation of the package's modules. Modules appear as a + /// sequence of pairs (module name, followed by module bytes), in + /// alphabetic order by module name. async fn module_bcs(&self) -> Result> { let bcs = bcs::to_bytes(self.native.serialized_module_map()) .map_err(|_| { @@ -389,8 +401,8 @@ impl MovePackage { impl MovePackage { fn parsed_package(&self) -> Result { - // TODO: Leverage the package cache (attempt to read from it, and if that doesn't succeed, - // write back the parsed Package to the cache as well.) + // TODO: Leverage the package cache (attempt to read from it, and if that + // doesn't succeed, write back the parsed Package to the cache as well.) let Some(native) = self.super_.native_impl() else { return Err(Error::Internal( "No native representation of package to parse.".to_string(), diff --git a/crates/sui-graphql-rpc/src/types/move_struct.rs b/crates/sui-graphql-rpc/src/types/move_struct.rs index 8bae7ec46c1..0a8babd73b4 100644 --- a/crates/sui-graphql-rpc/src/types/move_struct.rs +++ b/crates/sui-graphql-rpc/src/types/move_struct.rs @@ -4,13 +4,12 @@ use async_graphql::*; use sui_package_resolver::StructDef; -use crate::error::Error; - use super::{ move_module::MoveModule, open_move_type::{abilities, MoveAbility, OpenMoveType}, sui_address::SuiAddress, }; +use crate::error::Error; pub(crate) struct MoveStruct { defining_id: SuiAddress, @@ -71,15 +70,16 @@ impl MoveStruct { Some(&self.abilities) } - /// Constraints on the struct's formal type parameters. Move bytecode does not name type - /// parameters, so when they are referenced (e.g. in field types) they are identified by their - /// index in this list. + /// Constraints on the struct's formal type parameters. Move bytecode does + /// not name type parameters, so when they are referenced (e.g. in field + /// types) they are identified by their index in this list. async fn type_parameters(&self) -> Option<&Vec> { Some(&self.type_parameters) } - /// The names and types of the struct's fields. Field types reference type parameters, by their - /// index in the defining struct's `typeParameters` list. + /// The names and types of the struct's fields. Field types reference type + /// parameters, by their index in the defining struct's `typeParameters` + /// list. async fn fields(&self) -> Option<&Vec> { Some(&self.fields) } diff --git a/crates/sui-graphql-rpc/src/types/move_type.rs b/crates/sui-graphql-rpc/src/types/move_type.rs index 8bd26cdac77..f3cd07a866c 100644 --- a/crates/sui-graphql-rpc/src/types/move_type.rs +++ b/crates/sui-graphql-rpc/src/types/move_type.rs @@ -1,16 +1,14 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::context_data::package_cache::PackageCache; use async_graphql::*; use move_binary_format::file_format::AbilitySet; use move_core_types::{annotated_value as A, language_storage::TypeTag}; use serde::{Deserialize, Serialize}; use sui_package_resolver::Resolver; -use crate::error::Error; - use super::open_move_type::MoveAbility; +use crate::{context_data::package_cache::PackageCache, error::Error}; #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct MoveType { @@ -117,7 +115,8 @@ impl MoveType { /// Structured representation of the type signature. async fn signature(&self) -> Result { - // Factor out into its own non-GraphQL, non-async function for better testability + // Factor out into its own non-GraphQL, non-async function for better + // testability self.signature_impl().extend() } @@ -271,7 +270,8 @@ impl TryFrom for MoveFieldLayout { } } -/// Error from seeing a `signer` value or type, which shouldn't be possible in Sui Move. +/// Error from seeing a `signer` value or type, which shouldn't be possible in +/// Sui Move. pub(crate) fn unexpected_signer_error() -> Error { Error::Internal("Unexpected value of type: signer.".to_string()) } @@ -280,10 +280,10 @@ pub(crate) fn unexpected_signer_error() -> Error { mod tests { use std::str::FromStr; - use super::*; - use expect_test::expect; + use super::*; + fn signature(repr: impl Into) -> Result { let tag = TypeTag::from_str(repr.into().as_str()).unwrap(); MoveType::new(tag).signature_impl() diff --git a/crates/sui-graphql-rpc/src/types/move_value.rs b/crates/sui-graphql-rpc/src/types/move_value.rs index 0f45518bbb3..7f469e112c8 100644 --- a/crates/sui-graphql-rpc/src/types/move_value.rs +++ b/crates/sui-graphql-rpc/src/types/move_value.rs @@ -12,10 +12,12 @@ use serde::{Deserialize, Serialize}; use sui_package_resolver::Resolver; use sui_types::object::bounded_visitor::BoundedVisitor; -use crate::context_data::package_cache::PackageCache; -use crate::{error::Error, types::json::Json, types::move_type::unexpected_signer_error}; - use super::{base64::Base64, big_int::BigInt, move_type::MoveType, sui_address::SuiAddress}; +use crate::{ + context_data::package_cache::PackageCache, + error::Error, + types::{json::Json, move_type::unexpected_signer_error}, +}; const STD: AccountAddress = AccountAddress::ONE; const SUI: AccountAddress = AccountAddress::TWO; @@ -88,14 +90,16 @@ impl MoveValue { .map_err(|_| Error::Internal("Unable to fetch Package Cache.".to_string())) .extend()?; - // Factor out into its own non-GraphQL, non-async function for better testability + // Factor out into its own non-GraphQL, non-async function for better + // testability self.data_impl(self.type_.layout_impl(resolver).await.extend()?) .extend() } /// Representation of a Move value in JSON, where: /// - /// - Addresses, IDs, and UIDs are represented in canonical form, as JSON strings. + /// - Addresses, IDs, and UIDs are represented in canonical form, as JSON + /// strings. /// - Bools are represented by JSON boolean literals. /// - u8, u16, and u32 are represented as JSON numbers. /// - u64, u128, and u256 are represented as JSON strings. @@ -103,15 +107,16 @@ impl MoveValue { /// - Structs are represented by JSON objects. /// - Empty optional values are represented by `null`. /// - /// This form is offered as a less verbose convenience in cases where the layout of the type is - /// known by the client. + /// This form is offered as a less verbose convenience in cases where the + /// layout of the type is known by the client. async fn json(&self, ctx: &Context<'_>) -> Result { let resolver = ctx .data::>() .map_err(|_| Error::Internal("Unable to fetch Package Cache.".to_string())) .extend()?; - // Factor out into its own non-GraphQL, non-async function for better testability + // Factor out into its own non-GraphQL, non-async function for better + // testability self.json_impl(self.type_.layout_impl(resolver).await.extend()?) .extend() } @@ -293,8 +298,8 @@ macro_rules! extract_field { }}; } -/// Extracts a vector of bytes from `value`, assuming it's a `MoveValue::Vector` where all the -/// values are `MoveValue::U8`s. +/// Extracts a vector of bytes from `value`, assuming it's a `MoveValue::Vector` +/// where all the values are `MoveValue::U8`s. fn extract_bytes(value: A::MoveValue) -> Result, Error> { use A::MoveValue as V; let V::Vector(elements) = value else { @@ -312,14 +317,15 @@ fn extract_bytes(value: A::MoveValue) -> Result, Error> { Ok(bytes) } -/// Extracts a Rust String from the contents of a Move Struct assuming that struct matches the -/// contents of Move String: +/// Extracts a Rust String from the contents of a Move Struct assuming that +/// struct matches the contents of Move String: /// /// ```notrust /// { bytes: vector } /// ``` /// -/// Which is conformed to by both `std::ascii::String` and `std::string::String`. +/// Which is conformed to by both `std::ascii::String` and +/// `std::string::String`. fn extract_string( type_: &StructTag, fields: Vec<(Identifier, A::MoveValue)>, @@ -340,8 +346,8 @@ fn extract_string( }) } -/// Extracts an address from the contents of a Move Struct, assuming the struct matches the -/// following shape: +/// Extracts an address from the contents of a Move Struct, assuming the struct +/// matches the following shape: /// /// ```notrust /// { bytes: address } @@ -362,8 +368,8 @@ fn extract_id( Ok(addr) } -/// Extracts an address from the contents of a Move Struct, assuming the struct matches the -/// following shape: +/// Extracts an address from the contents of a Move Struct, assuming the struct +/// matches the following shape: /// /// ```notrust /// { id: 0x2::object::ID { bytes: address } } @@ -391,14 +397,15 @@ fn extract_uid( extract_id(&type_, fields) } -/// Extracts a value from the contents of a Move Struct, assuming the struct matches the following -/// shape: +/// Extracts a value from the contents of a Move Struct, assuming the struct +/// matches the following shape: /// /// ```notrust /// { vec: vector } /// ``` /// -/// Where `vec` contains at most one element. This matches the shape of `0x1::option::Option`. +/// Where `vec` contains at most one element. This matches the shape of +/// `0x1::option::Option`. fn extract_option( type_: &StructTag, fields: Vec<(Identifier, A::MoveValue)>, @@ -455,9 +462,9 @@ mod tests { fn data(layout: A::MoveTypeLayout, data: T) -> Result { let tag: TypeTag = (&layout).into(); - // The format for type from its `Display` impl does not technically match the format that - // the RPC expects from the data layer (where a type's package should be canonicalized), but - // it will suffice. + // The format for type from its `Display` impl does not technically match the + // format that the RPC expects from the data layer (where a type's + // package should be canonicalized), but it will suffice. data_with_tag(format!("{}", tag), layout, data) } @@ -651,7 +658,9 @@ mod tests { #[test] fn address_data() { let v = data(L::Address, address("0x42")); - let expect = expect!["Ok(Address(SuiAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 66])))"]; + let expect = expect![ + "Ok(Address(SuiAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 66])))" + ]; expect.assert_eq(&format!("{v:?}")); } @@ -672,7 +681,9 @@ mod tests { }); let v = data(l, address("0x42")); - let expect = expect!["Ok(Uid(SuiAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 66])))"]; + let expect = expect![ + "Ok(Uid(SuiAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 66])))" + ]; expect.assert_eq(&format!("{v:?}")); } diff --git a/crates/sui-graphql-rpc/src/types/object.rs b/crates/sui-graphql-rpc/src/types/object.rs index 0d493da60b3..c8573c6c0c5 100644 --- a/crates/sui-graphql-rpc/src/types/object.rs +++ b/crates/sui-graphql-rpc/src/types/object.rs @@ -1,58 +1,68 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::collections::BTreeMap; -use std::fmt::Write; - -use super::balance::{self, Balance}; -use super::big_int::BigInt; -use super::checkpoint::Checkpoint; -use super::coin::Coin; -use super::coin_metadata::CoinMetadata; -use super::cursor::{self, Page, Paginated, RawPaginated, Target}; -use super::digest::Digest; -use super::display::{Display, DisplayEntry}; -use super::dynamic_field::{DynamicField, DynamicFieldName}; -use super::move_object::MoveObject; -use super::move_package::MovePackage; -use super::owner::OwnerImpl; -use super::stake::StakedSui; -use super::suins_registration::{DomainFormat, SuinsRegistration}; -use super::transaction_block; -use super::transaction_block::TransactionBlockFilter; -use super::type_filter::{ExactTypeFilter, TypeFilter}; -use super::{owner::Owner, sui_address::SuiAddress, transaction_block::TransactionBlock}; -use crate::consistency::{build_objects_query, consistent_range, Checkpointed, View}; -use crate::context_data::package_cache::PackageCache; -use crate::data::{self, Db, DbConnection, QueryExecutor}; -use crate::error::Error; -use crate::raw_query::RawQuery; -use crate::types::base64::Base64; -use crate::types::intersect; -use crate::{filter, or_filter}; -use async_graphql::connection::{CursorType, Edge}; -use async_graphql::{connection::Connection, *}; +use std::{collections::BTreeMap, fmt::Write}; + +use async_graphql::{ + connection::{Connection, CursorType, Edge}, + *, +}; use diesel::{CombineDsl, ExpressionMethods, OptionalExtension, QueryDsl}; -use move_core_types::annotated_value::{MoveStruct, MoveTypeLayout}; -use move_core_types::language_storage::StructTag; +use move_core_types::{ + annotated_value::{MoveStruct, MoveTypeLayout}, + language_storage::StructTag, +}; use serde::{Deserialize, Serialize}; -use sui_indexer::models::objects::{StoredDeletedHistoryObject, StoredHistoryObject, StoredObject}; -use sui_indexer::schema::{objects, objects_history, objects_snapshot}; -use sui_indexer::types::ObjectStatus as NativeObjectStatus; -use sui_indexer::types::OwnerType; +use sui_indexer::{ + models::objects::{StoredDeletedHistoryObject, StoredHistoryObject, StoredObject}, + schema::{objects, objects_history, objects_snapshot}, + types::{ObjectStatus as NativeObjectStatus, OwnerType}, +}; use sui_package_resolver::Resolver; -use sui_types::object::bounded_visitor::BoundedVisitor; -use sui_types::object::{ - MoveObject as NativeMoveObject, Object as NativeObject, Owner as NativeOwner, +use sui_types::{ + object::{ + bounded_visitor::BoundedVisitor, MoveObject as NativeMoveObject, Object as NativeObject, + Owner as NativeOwner, + }, + TypeTag, +}; + +use super::{ + balance::{self, Balance}, + big_int::BigInt, + checkpoint::Checkpoint, + coin::Coin, + coin_metadata::CoinMetadata, + cursor::{self, Page, Paginated, RawPaginated, Target}, + digest::Digest, + display::{Display, DisplayEntry}, + dynamic_field::{DynamicField, DynamicFieldName}, + move_object::MoveObject, + move_package::MovePackage, + owner::{Owner, OwnerImpl}, + stake::StakedSui, + sui_address::SuiAddress, + suins_registration::{DomainFormat, SuinsRegistration}, + transaction_block, + transaction_block::{TransactionBlock, TransactionBlockFilter}, + type_filter::{ExactTypeFilter, TypeFilter}, +}; +use crate::{ + consistency::{build_objects_query, consistent_range, Checkpointed, View}, + context_data::package_cache::PackageCache, + data::{self, Db, DbConnection, QueryExecutor}, + error::Error, + filter, or_filter, + raw_query::RawQuery, + types::{base64::Base64, intersect}, }; -use sui_types::TypeTag; #[derive(Clone, Debug)] pub(crate) struct Object { pub address: SuiAddress, pub kind: ObjectKind, - /// The checkpoint sequence number at which this was viewed at, or None if the data was - /// requested at the latest checkpoint. + /// The checkpoint sequence number at which this was viewed at, or None if + /// the data was requested at the latest checkpoint. pub checkpoint_viewed_at: Option, } @@ -61,29 +71,31 @@ pub(crate) struct ObjectImpl<'o>(pub &'o Object); #[derive(Clone, Debug)] pub(crate) enum ObjectKind { - /// An object loaded from serialized data, such as the contents of a transaction. + /// An object loaded from serialized data, such as the contents of a + /// transaction. NotIndexed(NativeObject), /// An object fetched from the live objects table. Live(NativeObject, StoredObject), /// An object fetched from the snapshot or historical objects table. Historical(NativeObject, StoredHistoryObject), - /// The object is wrapped or deleted and only partial information can be loaded from the - /// indexer. + /// The object is wrapped or deleted and only partial information can be + /// loaded from the indexer. WrappedOrDeleted(StoredDeletedHistoryObject), } #[derive(Enum, Copy, Clone, Eq, PartialEq, Debug)] #[graphql(name = "ObjectKind")] pub enum ObjectStatus { - /// The object is loaded from serialized data, such as the contents of a transaction. + /// The object is loaded from serialized data, such as the contents of a + /// transaction. NotIndexed, /// The object is currently live and is not deleted or wrapped. Live, - /// The object is referenced at some version, and thus is fetched from the snapshot or - /// historical objects table. + /// The object is referenced at some version, and thus is fetched from the + /// snapshot or historical objects table. Historical, - /// The object is deleted or wrapped and only partial information can be loaded from the - /// indexer. + /// The object is deleted or wrapped and only partial information can be + /// loaded from the indexer. WrappedOrDeleted, } @@ -97,22 +109,24 @@ pub(crate) struct ObjectRef { pub digest: Digest, } -/// Constrains the set of objects returned. All filters are optional, and the resulting set of -/// objects are ones whose +/// Constrains the set of objects returned. All filters are optional, and the +/// resulting set of objects are ones whose /// /// - Type matches the `type` filter, /// - AND, whose owner matches the `owner` filter, -/// - AND, whose ID is in `objectIds` OR whose ID and version is in `objectKeys`. +/// - AND, whose ID is in `objectIds` OR whose ID and version is in +/// `objectKeys`. #[derive(InputObject, Default, Debug, Clone, Eq, PartialEq)] pub(crate) struct ObjectFilter { - /// This field is used to specify the type of objects that should be included in the query - /// results. + /// This field is used to specify the type of objects that should be + /// included in the query results. /// - /// Objects can be filtered by their type's package, package::module, or their fully qualified - /// type name. + /// Objects can be filtered by their type's package, package::module, or + /// their fully qualified type name. /// - /// Generic types can be queried by either the generic type name, e.g. `0x2::coin::Coin`, or by - /// the full type name, such as `0x2::coin::Coin<0x2::sui::SUI>`. + /// Generic types can be queried by either the generic type name, e.g. + /// `0x2::coin::Coin`, or by the full type name, such as + /// `0x2::coin::Coin<0x2::sui::SUI>`. pub type_: Option, /// Filter for live objects by their current owners. @@ -121,7 +135,8 @@ pub(crate) struct ObjectFilter { /// Filter for live objects by their IDs. pub object_ids: Option>, - /// Filter for live or potentially historical objects by their ID and version. + /// Filter for live or potentially historical objects by their ID and + /// version. pub object_keys: Option>, } @@ -140,24 +155,26 @@ pub enum ObjectOwner { Address(AddressOwner), } -/// An immutable object is an object that can't be mutated, transferred, or deleted. -/// Immutable objects have no owner, so anyone can use them. +/// An immutable object is an object that can't be mutated, transferred, or +/// deleted. Immutable objects have no owner, so anyone can use them. #[derive(SimpleObject, Clone)] pub struct Immutable { #[graphql(name = "_")] dummy: Option, } -/// A shared object is an object that is shared using the 0x2::transfer::share_object function. -/// Unlike owned objects, once an object is shared, it stays mutable and is accessible by anyone. +/// A shared object is an object that is shared using the +/// 0x2::transfer::share_object function. Unlike owned objects, once an object +/// is shared, it stays mutable and is accessible by anyone. #[derive(SimpleObject, Clone)] pub struct Shared { initial_shared_version: u64, } -/// If the object's owner is a Parent, this object is part of a dynamic field (it is the value of -/// the dynamic field, or the intermediate Field object itself). Also note that if the owner -/// is a parent, then it's guaranteed to be an object. +/// If the object's owner is a Parent, this object is part of a dynamic field +/// (it is the value of the dynamic field, or the intermediate Field object +/// itself). Also note that if the owner is a parent, then it's guaranteed to be +/// an object. #[derive(SimpleObject, Clone)] pub struct Parent { parent: Option, @@ -165,7 +182,8 @@ pub struct Parent { /// An address-owned object is owned by a specific 32-byte address that is /// either an account address (derived from a particular signature scheme) or -/// an object ID. An address-owned object is accessible only to its owner and no others. +/// an object ID. An address-owned object is accessible only to its owner and no +/// others. #[derive(SimpleObject, Clone)] pub struct AddressOwner { owner: Option, @@ -176,16 +194,17 @@ pub(crate) enum ObjectLookupKey { LatestAt(u64), VersionAt { version: u64, - /// The checkpoint sequence number at which this was viewed at, or None if the data was - /// requested at the latest checkpoint. + /// The checkpoint sequence number at which this was viewed at, or None + /// if the data was requested at the latest checkpoint. checkpoint_viewed_at: Option, }, LatestAtParentVersion { - /// The parent version to be used as the upper bound for the query. Look for the latest - /// version of a child object that is less than or equal to this upper bound. + /// The parent version to be used as the upper bound for the query. Look + /// for the latest version of a child object that is less than + /// or equal to this upper bound. version: u64, - /// The checkpoint sequence number at which this was viewed at, or None if the data was - /// requested at the latest checkpoint. + /// The checkpoint sequence number at which this was viewed at, or None + /// if the data was requested at the latest checkpoint. checkpoint_viewed_at: Option, }, } @@ -193,8 +212,9 @@ pub(crate) enum ObjectLookupKey { pub(crate) type Cursor = cursor::BcsCursor; type Query = data::Query; -/// The inner struct for the `Object`'s cursor. The `object_id` is used as the cursor, while the -/// `checkpoint_viewed_at` sets the consistent upper bound for the cursor. +/// The inner struct for the `Object`'s cursor. The `object_id` is used as the +/// cursor, while the `checkpoint_viewed_at` sets the consistent upper bound for +/// the cursor. #[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Debug)] pub(crate) struct HistoricalObjectCursor { #[serde(rename = "o")] @@ -204,8 +224,8 @@ pub(crate) struct HistoricalObjectCursor { checkpoint_viewed_at: u64, } -/// Interface implemented by on-chain values that are addressable by an ID (also referred to as its -/// address). This includes Move objects and packages. +/// Interface implemented by on-chain values that are addressable by an ID (also +/// referred to as its address). This includes Move objects and packages. #[derive(Interface)] #[graphql( name = "IObject", @@ -266,9 +286,10 @@ pub(crate) enum IObject { SuinsRegistration(SuinsRegistration), } -/// An object in Sui is a package (set of Move bytecode modules) or object (typed data structure -/// with fields) with additional metadata detailing its id, version, transaction digest, owner -/// field indicating how this object can be accessed. +/// An object in Sui is a package (set of Move bytecode modules) or object +/// (typed data structure with fields) with additional metadata detailing its +/// id, version, transaction digest, owner field indicating how this object can +/// be accessed. #[Object] impl Object { pub(crate) async fn address(&self) -> SuiAddress { @@ -290,8 +311,8 @@ impl Object { .await } - /// Total balance of all coins with marker type owned by this object. If type is not supplied, - /// it defaults to `0x2::sui::SUI`. + /// Total balance of all coins with marker type owned by this object. If + /// type is not supplied, it defaults to `0x2::sui::SUI`. pub(crate) async fn balance( &self, ctx: &Context<'_>, @@ -316,7 +337,8 @@ impl Object { /// The coin objects for this object. /// - ///`type` is a filter on the coin's type parameter, defaulting to `0x2::sui::SUI`. + /// `type` is a filter on the coin's type parameter, defaulting to + /// `0x2::sui::SUI`. pub(crate) async fn coins( &self, ctx: &Context<'_>, @@ -345,7 +367,8 @@ impl Object { .await } - /// The domain explicitly configured as the default domain pointing to this object. + /// The domain explicitly configured as the default domain pointing to this + /// object. pub(crate) async fn default_suins_name( &self, ctx: &Context<'_>, @@ -354,8 +377,8 @@ impl Object { OwnerImpl::from(self).default_suins_name(ctx, format).await } - /// The SuinsRegistration NFTs owned by this object. These grant the owner the capability to - /// manage the associated domain. + /// The SuinsRegistration NFTs owned by this object. These grant the owner + /// the capability to manage the associated domain. pub(crate) async fn suins_registrations( &self, ctx: &Context<'_>, @@ -373,18 +396,21 @@ impl Object { ObjectImpl(self).version().await } - /// The current status of the object as read from the off-chain store. The possible states are: - /// NOT_INDEXED, the object is loaded from serialized data, such as the contents of a genesis or - /// system package upgrade transaction. LIVE, the version returned is the most recent for the - /// object, and it is not deleted or wrapped at that version. HISTORICAL, the object was - /// referenced at a specific version or checkpoint, so is fetched from historical tables and may - /// not be the latest version of the object. WRAPPED_OR_DELETED, the object is deleted or - /// wrapped and only partial information can be loaded." + /// The current status of the object as read from the off-chain store. The + /// possible states are: NOT_INDEXED, the object is loaded from + /// serialized data, such as the contents of a genesis or system package + /// upgrade transaction. LIVE, the version returned is the most recent for + /// the object, and it is not deleted or wrapped at that version. + /// HISTORICAL, the object was referenced at a specific version or + /// checkpoint, so is fetched from historical tables and may not be the + /// latest version of the object. WRAPPED_OR_DELETED, the object is deleted + /// or wrapped and only partial information can be loaded." pub(crate) async fn status(&self) -> ObjectStatus { ObjectImpl(self).status().await } - /// 32-byte hash that identifies the object's current contents, encoded as a Base58 string. + /// 32-byte hash that identifies the object's current contents, encoded as a + /// Base58 string. pub(crate) async fn digest(&self) -> Option { ObjectImpl(self).digest().await } @@ -403,8 +429,9 @@ impl Object { ObjectImpl(self).previous_transaction_block(ctx).await } - /// The amount of SUI we would rebate if this object gets deleted or mutated. This number is - /// recalculated based on the present storage gas price. + /// The amount of SUI we would rebate if this object gets deleted or + /// mutated. This number is recalculated based on the present storage + /// gas price. pub(crate) async fn storage_rebate(&self) -> Option { ObjectImpl(self).storage_rebate().await } @@ -429,19 +456,19 @@ impl Object { ObjectImpl(self).bcs().await } - /// The set of named templates defined on-chain for the type of this object, to be handled - /// off-chain. The server substitutes data from the object into these templates to generate a - /// display string per template. + /// The set of named templates defined on-chain for the type of this object, + /// to be handled off-chain. The server substitutes data from the object + /// into these templates to generate a display string per template. async fn display(&self, ctx: &Context<'_>) -> Result>> { ObjectImpl(self).display(ctx).await } - /// Access a dynamic field on an object using its name. Names are arbitrary Move values whose - /// type have `copy`, `drop`, and `store`, and are specified using their type, and their BCS - /// contents, Base64 encoded. + /// Access a dynamic field on an object using its name. Names are arbitrary + /// Move values whose type have `copy`, `drop`, and `store`, and are + /// specified using their type, and their BCS contents, Base64 encoded. /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. async fn dynamic_field( &self, ctx: &Context<'_>, @@ -452,13 +479,14 @@ impl Object { .await } - /// Access a dynamic object field on an object using its name. Names are arbitrary Move values - /// whose type have `copy`, `drop`, and `store`, and are specified using their type, and their - /// BCS contents, Base64 encoded. The value of a dynamic object field can also be accessed + /// Access a dynamic object field on an object using its name. Names are + /// arbitrary Move values whose type have `copy`, `drop`, and `store`, + /// and are specified using their type, and their BCS contents, Base64 + /// encoded. The value of a dynamic object field can also be accessed /// off-chain directly via its address (e.g. using `Query.object`). /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. async fn dynamic_object_field( &self, ctx: &Context<'_>, @@ -471,8 +499,8 @@ impl Object { /// The dynamic fields and dynamic object fields on an object. /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. async fn dynamic_fields( &self, ctx: &Context<'_>, @@ -638,8 +666,8 @@ impl ObjectImpl<'_> { }) } - /// `display` is part of the `IMoveObject` interface, but is implemented on `ObjectImpl` to - /// allow for a convenience function on `Object`. + /// `display` is part of the `IMoveObject` interface, but is implemented on + /// `ObjectImpl` to allow for a convenience function on `Object`. pub(crate) async fn display(&self, ctx: &Context<'_>) -> Result>> { let Some(native) = self.0.native_impl() else { return Ok(None); @@ -667,11 +695,13 @@ impl ObjectImpl<'_> { } impl Object { - /// Construct a GraphQL object from a native object, without its stored (indexed) counterpart. + /// Construct a GraphQL object from a native object, without its stored + /// (indexed) counterpart. /// - /// `checkpoint_viewed_at` represents the checkpoint sequence number at which this `Object` was - /// constructed in, or `None` if the data was requested at the latest checkpoint. This is - /// stored on `Object` so that when viewing that entity's state, it will be as if it was + /// `checkpoint_viewed_at` represents the checkpoint sequence number at + /// which this `Object` was constructed in, or `None` if the data was + /// requested at the latest checkpoint. This is stored on `Object` so + /// that when viewing that entity's state, it will be as if it was /// read at the same checkpoint. pub(crate) fn from_native( address: SuiAddress, @@ -707,9 +737,10 @@ impl Object { /// Query the database for a `page` of objects, optionally `filter`-ed. /// - /// `checkpoint_viewed_at` represents the checkpoint sequence number at which this page was - /// queried for, or `None` if the data was requested at the latest checkpoint. Each entity - /// returned in the connection will inherit this checkpoint, so that when viewing that entity's + /// `checkpoint_viewed_at` represents the checkpoint sequence number at + /// which this page was queried for, or `None` if the data was requested + /// at the latest checkpoint. Each entity returned in the connection + /// will inherit this checkpoint, so that when viewing that entity's /// state, it will be as if it was read at the same checkpoint. pub(crate) async fn paginate( db: &Db, @@ -720,20 +751,23 @@ impl Object { Self::paginate_subtype(db, page, filter, checkpoint_viewed_at, Ok).await } - /// Query the database for a `page` of some sub-type of Object. The page uses the bytes of an - /// Object ID and the checkpoint when the query was made as the cursor, and can optionally be - /// further `filter`-ed. The subtype is created using the `downcast` function, which is allowed + /// Query the database for a `page` of some sub-type of Object. The page + /// uses the bytes of an Object ID and the checkpoint when the query was + /// made as the cursor, and can optionally be further `filter`-ed. The + /// subtype is created using the `downcast` function, which is allowed /// to fail, if the downcast has failed. /// - /// `checkpoint_viewed_at` represents the checkpoint sequence number at which this page was - /// queried for, or `None` if the data was requested at the latest checkpoint. Each entity - /// returned in the connection will inherit this checkpoint, so that when viewing that entity's + /// `checkpoint_viewed_at` represents the checkpoint sequence number at + /// which this page was queried for, or `None` if the data was requested + /// at the latest checkpoint. Each entity returned in the connection + /// will inherit this checkpoint, so that when viewing that entity's /// state, it will be as if it was read at the same checkpoint. /// - /// If a `Page` is also provided, then this function will defer to the - /// `checkpoint_viewed_at` in the cursors. Otherwise, use the value from the parameter, or set - /// to None. This is so that paginated queries are consistent with the previous query that - /// created the cursor. + /// If a `Page` is also provided, then this function will defer to + /// the `checkpoint_viewed_at` in the cursors. Otherwise, use the value + /// from the parameter, or set to None. This is so that paginated + /// queries are consistent with the previous query that created the + /// cursor. pub(crate) async fn paginate_subtype( db: &Db, page: Page, @@ -741,9 +775,10 @@ impl Object { checkpoint_viewed_at: Option, downcast: impl Fn(Object) -> Result, ) -> Result, Error> { - // If cursors are provided, defer to the `checkpoint_viewed_at` in the cursor if they are - // consistent. Otherwise, use the value from the parameter, or set to None. This is so that - // paginated queries are consistent with the previous query that created the cursor. + // If cursors are provided, defer to the `checkpoint_viewed_at` in the cursor if + // they are consistent. Otherwise, use the value from the parameter, or + // set to None. This is so that paginated queries are consistent with + // the previous query that created the cursor. let cursor_viewed_at = page.validate_cursor_consistency()?; let checkpoint_viewed_at: Option = cursor_viewed_at.or(checkpoint_viewed_at); @@ -772,8 +807,8 @@ impl Object { let mut conn: Connection = Connection::new(prev, next); for stored in results { - // To maintain consistency, the returned cursor should have the same upper-bound as the - // checkpoint found on the cursor. + // To maintain consistency, the returned cursor should have the same upper-bound + // as the checkpoint found on the cursor. let cursor = stored.cursor(checkpoint_viewed_at).encode_cursor(); let object = Object::try_from_stored_history_object(stored, Some(checkpoint_viewed_at))?; @@ -783,13 +818,14 @@ impl Object { Ok(conn) } - /// Query for the object at a specific version, at the checkpoint_viewed_at if given, else - /// against the latest checkpoint. + /// Query for the object at a specific version, at the checkpoint_viewed_at + /// if given, else against the latest checkpoint. /// - /// `checkpoint_viewed_at` represents the checkpoint sequence number at which this `Object` was - /// queried in, or `None` if the data was requested at the latest checkpoint. This is stored on - /// `Object` so that when viewing that entity's state, it will be as if it was read at the same - /// checkpoint. + /// `checkpoint_viewed_at` represents the checkpoint sequence number at + /// which this `Object` was queried in, or `None` if the data was + /// requested at the latest checkpoint. This is stored on `Object` so + /// that when viewing that entity's state, it will be as if it was read at + /// the same checkpoint. async fn query_at_version( db: &Db, address: SuiAddress, @@ -833,7 +869,8 @@ impl Object { return Ok(None); }; - // Select the max by key after the union query, because Diesel currently does not support order_by on union + // Select the max by key after the union query, because Diesel currently does + // not support order_by on union stored_objs .into_iter() .max_by_key(|o| o.object_version) @@ -841,12 +878,14 @@ impl Object { .transpose() } - /// Query for the latest version of an object bounded by the provided `parent_version`. + /// Query for the latest version of an object bounded by the provided + /// `parent_version`. /// - /// `checkpoint_viewed_at` represents the checkpoint sequence number at which this `Object` was - /// queried in, or `None` if the data was requested at the latest checkpoint. This is stored on - /// `Object` so that when viewing that entity's state, it will be as if it was read at the same - /// checkpoint. + /// `checkpoint_viewed_at` represents the checkpoint sequence number at + /// which this `Object` was queried in, or `None` if the data was + /// requested at the latest checkpoint. This is stored on `Object` so + /// that when viewing that entity's state, it will be as if it was read at + /// the same checkpoint. async fn query_latest_at_version( db: &Db, address: SuiAddress, @@ -890,8 +929,8 @@ impl Object { return Ok(None); }; - // Select the max by key after the union query, because Diesel currently does not support - // order_by on union + // Select the max by key after the union query, because Diesel currently does + // not support order_by on union stored_objs .into_iter() .max_by_key(|o| o.object_version) @@ -899,8 +938,9 @@ impl Object { .transpose() } - /// Query for the object at the latest version at the checkpoint sequence number if given, else - /// the latest version of the object against the latest checkpoint. + /// Query for the object at the latest version at the checkpoint sequence + /// number if given, else the latest version of the object against the + /// latest checkpoint. async fn query_latest_at_checkpoint( db: &Db, address: SuiAddress, @@ -939,7 +979,8 @@ impl Object { return Ok(None); }; - // Select the max by key after the union query, because Diesel currently does not support order_by on union + // Select the max by key after the union query, because Diesel currently does + // not support order_by on union stored_objs .into_iter() .max_by_key(|o| o.object_version) @@ -970,9 +1011,9 @@ impl Object { .map_err(|e| Error::Internal(format!("Failed to fetch object: {e}"))) } - /// Query for a singleton object identified by its type. Note: the object is assumed to be a - /// singleton (we either find at least one object with this type and then return it, or return - /// nothing). + /// Query for a singleton object identified by its type. Note: the object is + /// assumed to be a singleton (we either find at least one object with + /// this type and then return it, or return nothing). pub(crate) async fn query_singleton(db: &Db, type_: TypeTag) -> Result, Error> { use objects::dsl; @@ -993,9 +1034,10 @@ impl Object { .transpose() } - /// `checkpoint_viewed_at` represents the checkpoint sequence number at which this `Object` was - /// constructed in, or `None` if the data was requested at the latest checkpoint. This is - /// stored on `Object` so that when viewing that entity's state, it will be as if it was read at + /// `checkpoint_viewed_at` represents the checkpoint sequence number at + /// which this `Object` was constructed in, or `None` if the data was + /// requested at the latest checkpoint. This is stored on `Object` so + /// that when viewing that entity's state, it will be as if it was read at /// the same checkpoint. pub(crate) fn try_from_stored_object( stored_object: StoredObject, @@ -1012,9 +1054,10 @@ impl Object { }) } - /// `checkpoint_viewed_at` represents the checkpoint sequence number at which this `Object` was - /// constructed in, or `None` if the data was requested at the latest checkpoint. This is - /// stored on `Object` so that when viewing that entity's state, it will be as if it was read at + /// `checkpoint_viewed_at` represents the checkpoint sequence number at + /// which this `Object` was constructed in, or `None` if the data was + /// requested at the latest checkpoint. This is stored on `Object` so + /// that when viewing that entity's state, it will be as if it was read at /// the same checkpoint. pub(crate) fn try_from_stored_history_object( history_object: StoredHistoryObject, @@ -1064,9 +1107,10 @@ impl Object { } impl ObjectFilter { - /// Try to create a filter whose results are the intersection of objects in `self`'s results and - /// objects in `other`'s results. This may not be possible if the resulting filter is - /// inconsistent in some way (e.g. a filter that requires one field to be two different values + /// Try to create a filter whose results are the intersection of objects in + /// `self`'s results and objects in `other`'s results. This may not be + /// possible if the resulting filter is inconsistent in some way (e.g. a + /// filter that requires one field to be two different values /// simultaneously). pub(crate) fn intersect(self, other: ObjectFilter) -> Option { macro_rules! intersect { @@ -1075,8 +1119,8 @@ impl ObjectFilter { }; } - // Treat `object_ids` and `object_keys` as a single filter on IDs, and optionally versions, - // and compute the intersection of that. + // Treat `object_ids` and `object_keys` as a single filter on IDs, and + // optionally versions, and compute the intersection of that. let keys = intersect::field(self.keys(), other.keys(), |k, l| { let mut combined = BTreeMap::new(); @@ -1086,15 +1130,17 @@ impl ObjectFilter { } } - // If the intersection is empty, it means, there were some ID or Key filters in both - // `self` and `other`, but they don't overlap, so the final result is inconsistent. + // If the intersection is empty, it means, there were some ID or Key filters in + // both `self` and `other`, but they don't overlap, so the final + // result is inconsistent. (!combined.is_empty()).then_some(combined) })?; - // Extract the ID and Key filters back out. At this point, we know that if there were ID/Key - // filters in both `self` and `other`, then they intersected to form a consistent set of - // constraints, so it is safe to interpret the lack of any ID/Key filters respectively as a - // lack of that kind of constraint, rather than a constraint on the empty set. + // Extract the ID and Key filters back out. At this point, we know that if there + // were ID/Key filters in both `self` and `other`, then they intersected + // to form a consistent set of constraints, so it is safe to interpret + // the lack of any ID/Key filters respectively as a lack of that kind of + // constraint, rather than a constraint on the empty set. let object_ids = { let partition: Vec<_> = keys @@ -1129,9 +1175,9 @@ impl ObjectFilter { }) } - /// Extract the Object ID and Key filters into one combined map from Object IDs in this filter, - /// to the versions they should have (or None if the filter mentions the ID but no version for - /// it). + /// Extract the Object ID and Key filters into one combined map from Object + /// IDs in this filter, to the versions they should have (or None if the + /// filter mentions the ID but no version for it). fn keys(&self) -> Option>> { if self.object_keys.is_none() && self.object_ids.is_none() { return None; @@ -1148,7 +1194,8 @@ impl ObjectFilter { )) } - /// Applies ObjectFilter to the input `RawQuery` and returns a new `RawQuery`. + /// Applies ObjectFilter to the input `RawQuery` and returns a new + /// `RawQuery`. pub(crate) fn apply(&self, mut query: RawQuery) -> RawQuery { // Start by applying the filters on IDs and/or keys because they are combined as // a disjunction, while the remaining queries are conjunctions. @@ -1324,8 +1371,9 @@ impl From<&Object> for OwnerImpl { } } -/// Parse a `SuiAddress` from its stored representation. Failure is an internal error: the -/// database should never contain a malformed address (containing the wrong number of bytes). +/// Parse a `SuiAddress` from its stored representation. Failure is an internal +/// error: the database should never contain a malformed address (containing the +/// wrong number of bytes). fn addr(bytes: impl AsRef<[u8]>) -> Result { SuiAddress::from_bytes(bytes.as_ref()).map_err(|e| { let bytes = bytes.as_ref().to_vec(); @@ -1353,8 +1401,9 @@ pub(crate) async fn deserialize_move_struct( return Err(Error::Internal("Object is not a move struct".to_string())); }; - // TODO (annotated-visitor): Use custom visitors for extracting a dynamic field, and for - // creating a GraphQL MoveValue directly (not via an annotated visitor). + // TODO (annotated-visitor): Use custom visitors for extracting a dynamic field, + // and for creating a GraphQL MoveValue directly (not via an annotated + // visitor). let move_struct = BoundedVisitor::deserialize_struct(contents, &layout).map_err(|e| { Error::Internal(format!( "Error deserializing move struct for type {}: {e}", @@ -1365,9 +1414,10 @@ pub(crate) async fn deserialize_move_struct( Ok((struct_tag, move_struct)) } -/// Constructs a raw query to fetch objects from the database. Objects are filtered out if they -/// satisfy the criteria but have a later version in the same checkpoint. If object keys are -/// provided, or no filters are specified at all, then this final condition is not applied. +/// Constructs a raw query to fetch objects from the database. Objects are +/// filtered out if they satisfy the criteria but have a later version in the +/// same checkpoint. If object keys are provided, or no filters are specified at +/// all, then this final condition is not applied. fn objects_query(filter: &ObjectFilter, lhs: i64, rhs: i64, page: &Page) -> RawQuery where { @@ -1389,9 +1439,10 @@ where #[cfg(test)] mod tests { - use super::*; use std::str::FromStr; + use super::*; + #[test] fn test_owner_filter_intersection() { let f0 = ObjectFilter { diff --git a/crates/sui-graphql-rpc/src/types/object_read.rs b/crates/sui-graphql-rpc/src/types/object_read.rs index 910c6485382..c9d0d909a6a 100644 --- a/crates/sui-graphql-rpc/src/types/object_read.rs +++ b/crates/sui-graphql-rpc/src/types/object_read.rs @@ -9,8 +9,8 @@ use super::{ sui_address::SuiAddress, }; -// A helper type representing the read of a specific version of an object. Intended to be -// "flattened" into other GraphQL types. +// A helper type representing the read of a specific version of an object. +// Intended to be "flattened" into other GraphQL types. #[derive(Clone, Eq, PartialEq)] pub(crate) struct ObjectRead { pub native: NativeObjectRef, @@ -30,8 +30,8 @@ impl ObjectRead { self.version_impl() } - /// 32-byte hash that identifies the object's contents at this version, encoded as a Base58 - /// string. + /// 32-byte hash that identifies the object's contents at this version, + /// encoded as a Base58 string. async fn digest(&self) -> String { self.native.2.base58_encode() } diff --git a/crates/sui-graphql-rpc/src/types/open_move_type.rs b/crates/sui-graphql-rpc/src/types/open_move_type.rs index a96c4fd607b..62c74800ed3 100644 --- a/crates/sui-graphql-rpc/src/types/open_move_type.rs +++ b/crates/sui-graphql-rpc/src/types/open_move_type.rs @@ -12,7 +12,8 @@ pub(crate) struct OpenMoveType { signature: OpenMoveTypeSignature, } -/// Abilities are keywords in Sui Move that define how types behave at the compiler level. +/// Abilities are keywords in Sui Move that define how types behave at the +/// compiler level. #[derive(Enum, Copy, Clone, Eq, PartialEq)] pub(crate) enum MoveAbility { /// Enables values to be copied. @@ -25,16 +26,18 @@ pub(crate) enum MoveAbility { Store, } -/// The visibility modifier describes which modules can access this module member. -/// By default, a module member can be called only within the same module. +/// The visibility modifier describes which modules can access this module +/// member. By default, a module member can be called only within the same +/// module. #[derive(Enum, Copy, Clone, Eq, PartialEq)] pub(crate) enum MoveVisibility { /// A public member can be accessed by any module. Public, /// A private member can be accessed in the module it is defined in. Private, - /// A friend member can be accessed in the module it is defined in and any other module in - /// its package that is explicitly specified in its friend list. + /// A friend member can be accessed in the module it is defined in and any + /// other module in its package that is explicitly specified in its + /// friend list. Friend, } @@ -104,8 +107,9 @@ pub(crate) enum OpenMoveTypeSignatureBody { }, } -/// Represents types that could contain references or free type parameters. Such types can appear -/// as function parameters, in fields of structs, or as actual type parameter. +/// Represents types that could contain references or free type parameters. +/// Such types can appear as function parameters, in fields of structs, or as +/// actual type parameter. #[Object] impl OpenMoveType { /// Structured representation of the type signature. @@ -277,7 +281,8 @@ impl fmt::Display for OpenMoveTypeSignatureBody { } } -/// Convert an `AbilitySet` from the binary format into a vector of `MoveAbility` (a GraphQL type). +/// Convert an `AbilitySet` from the binary format into a vector of +/// `MoveAbility` (a GraphQL type). pub(crate) fn abilities(set: AbilitySet) -> Vec { set.into_iter().map(MoveAbility::from).collect() } @@ -286,14 +291,13 @@ pub(crate) fn abilities(set: AbilitySet) -> Vec { mod tests { use std::str::FromStr; - use super::*; - use expect_test::expect; use move_core_types::language_storage::StructTag; use sui_package_resolver::{DatatypeKey, DatatypeRef}; - use OpenSignatureBody as S; + use super::*; + fn struct_key(s: &str) -> DatatypeKey { DatatypeRef::from(&StructTag::from_str(s).unwrap()).as_key() } @@ -359,7 +363,9 @@ mod tests { vec![S::TypeParameter(0), S::TypeParameter(1)], )); - let expect = expect!["0x0000000000000000000000000000000000000000000000000000000000000002::table::Table<$0, $1>"]; + let expect = expect![ + "0x0000000000000000000000000000000000000000000000000000000000000002::table::Table<$0, $1>" + ]; expect.assert_eq(&format!("{signature}")); } @@ -370,7 +376,9 @@ mod tests { vec![S::Datatype(struct_key("0x2::sui::SUI"), vec![])], )); - let expect = expect!["0x0000000000000000000000000000000000000000000000000000000000000002::coin::Coin<0x0000000000000000000000000000000000000000000000000000000000000002::sui::SUI>"]; + let expect = expect![ + "0x0000000000000000000000000000000000000000000000000000000000000002::coin::Coin<0x0000000000000000000000000000000000000000000000000000000000000002::sui::SUI>" + ]; expect.assert_eq(&format!("{signature}")); } } diff --git a/crates/sui-graphql-rpc/src/types/owner.rs b/crates/sui-graphql-rpc/src/types/owner.rs index 3cadfa24783..d96a68961d8 100644 --- a/crates/sui-graphql-rpc/src/types/owner.rs +++ b/crates/sui-graphql-rpc/src/types/owner.rs @@ -1,49 +1,53 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::address::Address; -use super::coin_metadata::CoinMetadata; -use super::cursor::Page; -use super::dynamic_field::DynamicField; -use super::dynamic_field::DynamicFieldName; -use super::move_package::MovePackage; -use super::object::ObjectLookupKey; -use super::stake::StakedSui; -use super::suins_registration::{DomainFormat, NameService, SuinsRegistration}; -use crate::data::Db; -use crate::types::balance::{self, Balance}; -use crate::types::coin::Coin; -use crate::types::move_object::MoveObject; -use crate::types::object::{self, Object, ObjectFilter}; -use crate::types::sui_address::SuiAddress; -use crate::types::type_filter::ExactTypeFilter; - -use async_graphql::connection::Connection; -use async_graphql::*; +use async_graphql::{connection::Connection, *}; use sui_json_rpc::name_service::NameServiceConfig; -use sui_types::dynamic_field::DynamicFieldType; -use sui_types::gas_coin::GAS; +use sui_types::{dynamic_field::DynamicFieldType, gas_coin::GAS}; + +use super::{ + address::Address, + coin_metadata::CoinMetadata, + cursor::Page, + dynamic_field::{DynamicField, DynamicFieldName}, + move_package::MovePackage, + object::ObjectLookupKey, + stake::StakedSui, + suins_registration::{DomainFormat, NameService, SuinsRegistration}, +}; +use crate::{ + data::Db, + types::{ + balance::{self, Balance}, + coin::Coin, + move_object::MoveObject, + object::{self, Object, ObjectFilter}, + sui_address::SuiAddress, + type_filter::ExactTypeFilter, + }, +}; #[derive(Clone, Debug)] pub(crate) struct Owner { pub address: SuiAddress, - /// The checkpoint sequence number at which this was viewed at, or None if the data was - /// requested at the latest checkpoint. + /// The checkpoint sequence number at which this was viewed at, or None if + /// the data was requested at the latest checkpoint. pub checkpoint_viewed_at: Option, } /// Type to implement GraphQL fields that are shared by all Owners. pub(crate) struct OwnerImpl { pub address: SuiAddress, - /// The checkpoint sequence number at which this was viewed at, or None if the data was - /// requested at the latest checkpoint. + /// The checkpoint sequence number at which this was viewed at, or None if + /// the data was requested at the latest checkpoint. pub checkpoint_viewed_at: Option, } -/// Interface implemented by GraphQL types representing entities that can own objects. Object owners -/// are identified by an address which can represent either the public key of an account or another -/// object. The same address can only refer to an account or an object, never both, but it is not -/// possible to know which up-front. +/// Interface implemented by GraphQL types representing entities that can own +/// objects. Object owners are identified by an address which can represent +/// either the public key of an account or another object. The same address can +/// only refer to an account or an object, never both, but it is not possible to +/// know which up-front. #[derive(Interface)] #[graphql( name = "IOwner", @@ -124,9 +128,10 @@ pub(crate) enum IOwner { SuinsRegistration(SuinsRegistration), } -/// An Owner is an entity that can own an object. Each Owner is identified by a SuiAddress which -/// represents either an Address (corresponding to a public key of an account) or an Object, but -/// never both (it is not known up-front whether a given Owner is an Address or an Object). +/// An Owner is an entity that can own an object. Each Owner is identified by a +/// SuiAddress which represents either an Address (corresponding to a public key +/// of an account) or an Object, but never both (it is not known up-front +/// whether a given Owner is an Address or an Object). #[Object] impl Owner { pub(crate) async fn address(&self) -> SuiAddress { @@ -148,8 +153,8 @@ impl Owner { .await } - /// Total balance of all coins with marker type owned by this object or address. If type is not - /// supplied, it defaults to `0x2::sui::SUI`. + /// Total balance of all coins with marker type owned by this object or + /// address. If type is not supplied, it defaults to `0x2::sui::SUI`. pub(crate) async fn balance( &self, ctx: &Context<'_>, @@ -174,7 +179,8 @@ impl Owner { /// The coin objects for this object or address. /// - ///`type` is a filter on the coin's type parameter, defaulting to `0x2::sui::SUI`. + /// `type` is a filter on the coin's type parameter, defaulting to + /// `0x2::sui::SUI`. pub(crate) async fn coins( &self, ctx: &Context<'_>, @@ -189,7 +195,8 @@ impl Owner { .await } - /// The `0x3::staking_pool::StakedSui` objects owned by this object or address. + /// The `0x3::staking_pool::StakedSui` objects owned by this object or + /// address. pub(crate) async fn staked_suis( &self, ctx: &Context<'_>, @@ -203,7 +210,8 @@ impl Owner { .await } - /// The domain explicitly configured as the default domain pointing to this object or address. + /// The domain explicitly configured as the default domain pointing to this + /// object or address. pub(crate) async fn default_suins_name( &self, ctx: &Context<'_>, @@ -212,8 +220,8 @@ impl Owner { OwnerImpl::from(self).default_suins_name(ctx, format).await } - /// The SuinsRegistration NFTs owned by this object or address. These grant the owner the - /// capability to manage the associated domain. + /// The SuinsRegistration NFTs owned by this object or address. These grant + /// the owner the capability to manage the associated domain. pub(crate) async fn suins_registrations( &self, ctx: &Context<'_>, @@ -248,11 +256,12 @@ impl Owner { .extend() } - /// Access a dynamic field on an object using its name. Names are arbitrary Move values whose - /// type have `copy`, `drop`, and `store`, and are specified using their type, and their BCS - /// contents, Base64 encoded. + /// Access a dynamic field on an object using its name. Names are arbitrary + /// Move values whose type have `copy`, `drop`, and `store`, and are + /// specified using their type, and their BCS contents, Base64 encoded. /// - /// This field exists as a convenience when accessing a dynamic field on a wrapped object. + /// This field exists as a convenience when accessing a dynamic field on a + /// wrapped object. async fn dynamic_field( &self, ctx: &Context<'_>, @@ -263,12 +272,14 @@ impl Owner { .await } - /// Access a dynamic object field on an object using its name. Names are arbitrary Move values - /// whose type have `copy`, `drop`, and `store`, and are specified using their type, and their - /// BCS contents, Base64 encoded. The value of a dynamic object field can also be accessed + /// Access a dynamic object field on an object using its name. Names are + /// arbitrary Move values whose type have `copy`, `drop`, and `store`, + /// and are specified using their type, and their BCS contents, Base64 + /// encoded. The value of a dynamic object field can also be accessed /// off-chain directly via its address (e.g. using `Query.object`). /// - /// This field exists as a convenience when accessing a dynamic field on a wrapped object. + /// This field exists as a convenience when accessing a dynamic field on a + /// wrapped object. async fn dynamic_object_field( &self, ctx: &Context<'_>, @@ -281,7 +292,8 @@ impl Owner { /// The dynamic fields and dynamic object fields on an object. /// - /// This field exists as a convenience when accessing a dynamic field on a wrapped object. + /// This field exists as a convenience when accessing a dynamic field on a + /// wrapped object. async fn dynamic_fields( &self, ctx: &Context<'_>, @@ -292,7 +304,8 @@ impl Owner { ) -> Result> { OwnerImpl::from(self) .dynamic_fields( - ctx, first, after, last, before, /* parent_version */ None, + ctx, first, after, last, before, // parent_version + None, ) .await } @@ -440,8 +453,9 @@ impl OwnerImpl { .extend() } - // Dynamic field related functions are part of the `IMoveObject` interface, but are provided - // here to implement convenience functions on `Owner` and `Object` to access dynamic fields. + // Dynamic field related functions are part of the `IMoveObject` interface, but + // are provided here to implement convenience functions on `Owner` and + // `Object` to access dynamic fields. pub(crate) async fn dynamic_field( &self, diff --git a/crates/sui-graphql-rpc/src/types/protocol_config.rs b/crates/sui-graphql-rpc/src/types/protocol_config.rs index b8387db16f9..7165d65cb8f 100644 --- a/crates/sui-graphql-rpc/src/types/protocol_config.rs +++ b/crates/sui-graphql-rpc/src/types/protocol_config.rs @@ -33,18 +33,21 @@ pub(crate) struct ProtocolConfigs { /// Constants that control how the chain operates. /// -/// These can only change during protocol upgrades which happen on epoch boundaries. +/// These can only change during protocol upgrades which happen on epoch +/// boundaries. #[Object] impl ProtocolConfigs { - /// The protocol is not required to change on every epoch boundary, so the protocol version - /// tracks which change to the protocol these configs are from. + /// The protocol is not required to change on every epoch boundary, so the + /// protocol version tracks which change to the protocol these configs + /// are from. async fn protocol_version(&self) -> u64 { self.native.version.as_u64() } - /// List all available feature flags and their values. Feature flags are a form of boolean - /// configuration that are usually used to gate features while they are in development. Once a - /// flag has been enabled, it is rare for it to be disabled. + /// List all available feature flags and their values. Feature flags are a + /// form of boolean configuration that are usually used to gate features + /// while they are in development. Once a flag has been enabled, it is + /// rare for it to be disabled. async fn feature_flags(&self) -> Vec { self.native .feature_map() @@ -53,8 +56,9 @@ impl ProtocolConfigs { .collect() } - /// List all available configurations and their values. These configurations can take any value - /// (but they will all be represented in string form), and do not include feature flags. + /// List all available configurations and their values. These + /// configurations can take any value (but they will all be represented + /// in string form), and do not include feature flags. async fn configs(&self) -> Vec { self.native .attr_map() diff --git a/crates/sui-graphql-rpc/src/types/query.rs b/crates/sui-graphql-rpc/src/types/query.rs index 747aef5db0f..a1ccb34c6ce 100644 --- a/crates/sui-graphql-rpc/src/types/query.rs +++ b/crates/sui-graphql-rpc/src/types/query.rs @@ -9,10 +9,12 @@ use move_core_types::account_address::AccountAddress; use serde::de::DeserializeOwned; use sui_json_rpc_types::DevInspectArgs; use sui_sdk::SuiClient; -use sui_types::transaction::{TransactionData, TransactionKind}; -use sui_types::{gas_coin::GAS, transaction::TransactionDataAPI, TypeTag}; +use sui_types::{ + gas_coin::GAS, + transaction::{TransactionData, TransactionDataAPI, TransactionKind}, + TypeTag, +}; -use super::suins_registration::NameService; use super::{ address::Address, available_range::AvailableRange, @@ -30,26 +32,32 @@ use super::{ owner::Owner, protocol_config::ProtocolConfigs, sui_address::SuiAddress, - suins_registration::Domain, + suins_registration::{Domain, NameService}, transaction_block::{self, TransactionBlock, TransactionBlockFilter}, transaction_metadata::TransactionMetadata, type_filter::ExactTypeFilter, }; -use crate::consistency::{consistent_range, CheckpointViewedAt}; -use crate::data::QueryExecutor; -use crate::types::base64::Base64 as GraphQLBase64; -use crate::types::zklogin_verify_signature::verify_zklogin_signature; -use crate::types::zklogin_verify_signature::ZkLoginIntentScope; -use crate::types::zklogin_verify_signature::ZkLoginVerifyResult; -use crate::{config::ServiceConfig, data::Db, error::Error, mutation::Mutation}; +use crate::{ + config::ServiceConfig, + consistency::{consistent_range, CheckpointViewedAt}, + data::{Db, QueryExecutor}, + error::Error, + mutation::Mutation, + types::{ + base64::Base64 as GraphQLBase64, + zklogin_verify_signature::{ + verify_zklogin_signature, ZkLoginIntentScope, ZkLoginVerifyResult, + }, + }, +}; pub(crate) struct Query; pub(crate) type SuiGraphQLSchema = async_graphql::Schema; #[Object] impl Query { - /// First four bytes of the network's genesis checkpoint digest (uniquely identifies the - /// network). + /// First four bytes of the network's genesis checkpoint digest (uniquely + /// identifies the network). async fn chain_identifier(&self, ctx: &Context<'_>) -> Result { Ok(ChainIdentifier::query(ctx.data_unchecked()) .await @@ -195,8 +203,8 @@ impl Query { })) } - /// The object corresponding to the given address at the (optionally) given version. - /// When no version is given, the latest version is returned. + /// The object corresponding to the given address at the (optionally) given + /// version. When no version is given, the latest version is returned. async fn object( &self, ctx: &Context<'_>, @@ -236,8 +244,8 @@ impl Query { })) } - /// Fetch a structured representation of a concrete type, including its layout information. - /// Fails if the type is malformed. + /// Fetch a structured representation of a concrete type, including its + /// layout information. Fails if the type is malformed. async fn type_(&self, type_: String) -> Result { Ok(MoveType::new( TypeTag::from_str(&type_) @@ -255,8 +263,8 @@ impl Query { .extend() } - /// Fetch checkpoint information by sequence number or digest (defaults to the latest available - /// checkpoint). + /// Fetch checkpoint information by sequence number or digest (defaults to + /// the latest available checkpoint). async fn checkpoint( &self, ctx: &Context<'_>, @@ -284,8 +292,9 @@ impl Query { /// The coin objects that exist in the network. /// - /// The type field is a string of the inner type of the coin by which to filter (e.g. - /// `0x2::sui::SUI`). If no type is provided, it will default to `0x2::sui::SUI`. + /// The type field is a string of the inner type of the coin by which to + /// filter (e.g. `0x2::sui::SUI`). If no type is provided, it will + /// default to `0x2::sui::SUI`. async fn coins( &self, ctx: &Context<'_>, @@ -303,7 +312,8 @@ impl Query { ctx.data_unchecked(), page, coin, - /* owner */ None, + // owner + None, Some(checkpoint_viewed_at), ) .await @@ -325,7 +335,8 @@ impl Query { Checkpoint::paginate( ctx.data_unchecked(), page, - /* epoch */ None, + // epoch + None, Some(checkpoint_viewed_at), ) .await @@ -401,8 +412,8 @@ impl Query { .extend() } - /// Fetch the protocol config by protocol version (defaults to the latest protocol - /// version known to the GraphQL service). + /// Fetch the protocol config by protocol version (defaults to the latest + /// protocol version known to the GraphQL service). async fn protocol_config( &self, ctx: &Context<'_>, @@ -443,17 +454,20 @@ impl Query { .extend() } - /// Verify a zkLogin signature based on the provided transaction or personal message - /// based on current epoch, chain id, and latest JWKs fetched on-chain. If the - /// signature is valid, the function returns a `ZkLoginVerifyResult` with success as - /// true and an empty list of errors. If the signature is invalid, the function returns + /// Verify a zkLogin signature based on the provided transaction or personal + /// message based on current epoch, chain id, and latest JWKs fetched + /// on-chain. If the signature is valid, the function returns a + /// `ZkLoginVerifyResult` with success as true and an empty list of + /// errors. If the signature is invalid, the function returns /// a `ZkLoginVerifyResult` with success as false with a list of errors. /// - /// - `bytes` is either the personal message in raw bytes or transaction data bytes in - /// BCS-encoded and then Base64-encoded. + /// - `bytes` is either the personal message in raw bytes or transaction + /// data bytes in BCS-encoded and then Base64-encoded. /// - `signature` is a serialized zkLogin signature that is Base64-encoded. - /// - `intentScope` is an enum that specifies the intent scope to be used to parse bytes. - /// - `author` is the address of the signer of the transaction or personal msg. + /// - `intentScope` is an enum that specifies the intent scope to be used to + /// parse bytes. + /// - `author` is the address of the signer of the transaction or personal + /// msg. async fn verify_zklogin_signature( &self, ctx: &Context<'_>, diff --git a/crates/sui-graphql-rpc/src/types/safe_mode.rs b/crates/sui-graphql-rpc/src/types/safe_mode.rs index fdc3a272663..d16f0b3f537 100644 --- a/crates/sui-graphql-rpc/src/types/safe_mode.rs +++ b/crates/sui-graphql-rpc/src/types/safe_mode.rs @@ -1,17 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::gas::GasCostSummary; use async_graphql::*; +use super::gas::GasCostSummary; + /// Information about whether epoch changes are using safe mode. #[derive(Clone, Debug, PartialEq, Eq, SimpleObject)] pub(crate) struct SafeMode { - /// Whether safe mode was used for the last epoch change. The system will retry a full epoch - /// change on every epoch boundary and automatically reset this flag if so. + /// Whether safe mode was used for the last epoch change. The system will + /// retry a full epoch change on every epoch boundary and automatically + /// reset this flag if so. pub enabled: Option, - /// Accumulated fees for computation and cost that have not been added to the various reward - /// pools, because the full epoch change did not happen. + /// Accumulated fees for computation and cost that have not been added to + /// the various reward pools, because the full epoch change did not + /// happen. pub gas_summary: Option, } diff --git a/crates/sui-graphql-rpc/src/types/stake.rs b/crates/sui-graphql-rpc/src/types/stake.rs index 257bc8980d0..096cc55e054 100644 --- a/crates/sui-graphql-rpc/src/types/stake.rs +++ b/crates/sui-graphql-rpc/src/types/stake.rs @@ -1,36 +1,37 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::error::Error; -use crate::{context_data::db_data_provider::PgManager, data::Db}; - -use super::balance::{self, Balance}; -use super::base64::Base64; -use super::coin::Coin; -use super::cursor::Page; -use super::display::DisplayEntry; -use super::dynamic_field::{DynamicField, DynamicFieldName}; -use super::move_object::MoveObjectImpl; -use super::move_value::MoveValue; -use super::object::{Object, ObjectFilter, ObjectImpl, ObjectOwner, ObjectStatus}; -use super::owner::OwnerImpl; -use super::suins_registration::{DomainFormat, SuinsRegistration}; -use super::transaction_block::{self, TransactionBlock, TransactionBlockFilter}; -use super::type_filter::ExactTypeFilter; -use super::{ - big_int::BigInt, epoch::Epoch, move_object::MoveObject, object, sui_address::SuiAddress, -}; -use async_graphql::connection::Connection; -use async_graphql::*; +use async_graphql::{connection::Connection, *}; use move_core_types::language_storage::StructTag; use sui_json_rpc_types::{Stake as RpcStakedSui, StakeStatus as RpcStakeStatus}; -use sui_types::base_types::MoveObjectType; -use sui_types::governance::StakedSui as NativeStakedSui; +use sui_types::{base_types::MoveObjectType, governance::StakedSui as NativeStakedSui}; + +use super::{ + balance::{self, Balance}, + base64::Base64, + big_int::BigInt, + coin::Coin, + cursor::Page, + display::DisplayEntry, + dynamic_field::{DynamicField, DynamicFieldName}, + epoch::Epoch, + move_object::{MoveObject, MoveObjectImpl}, + move_value::MoveValue, + object, + object::{Object, ObjectFilter, ObjectImpl, ObjectOwner, ObjectStatus}, + owner::OwnerImpl, + sui_address::SuiAddress, + suins_registration::{DomainFormat, SuinsRegistration}, + transaction_block::{self, TransactionBlock, TransactionBlockFilter}, + type_filter::ExactTypeFilter, +}; +use crate::{context_data::db_data_provider::PgManager, data::Db, error::Error}; #[derive(Copy, Clone, Enum, PartialEq, Eq)] /// The stake's possible status: active, pending, or unstaked. pub(crate) enum StakeStatus { - /// The stake object is active in a staking pool and it is generating rewards. + /// The stake object is active in a staking pool and it is generating + /// rewards. Active, /// The stake awaits to join a staking pool in the next epoch. Pending, @@ -75,8 +76,8 @@ impl StakedSui { .await } - /// Total balance of all coins with marker type owned by this object. If type is not supplied, - /// it defaults to `0x2::sui::SUI`. + /// Total balance of all coins with marker type owned by this object. If + /// type is not supplied, it defaults to `0x2::sui::SUI`. pub(crate) async fn balance( &self, ctx: &Context<'_>, @@ -103,7 +104,8 @@ impl StakedSui { /// The coin objects for this object. /// - ///`type` is a filter on the coin's type parameter, defaulting to `0x2::sui::SUI`. + /// `type` is a filter on the coin's type parameter, defaulting to + /// `0x2::sui::SUI`. pub(crate) async fn coins( &self, ctx: &Context<'_>, @@ -132,7 +134,8 @@ impl StakedSui { .await } - /// The domain explicitly configured as the default domain pointing to this object. + /// The domain explicitly configured as the default domain pointing to this + /// object. pub(crate) async fn default_suins_name( &self, ctx: &Context<'_>, @@ -143,8 +146,8 @@ impl StakedSui { .await } - /// The SuinsRegistration NFTs owned by this object. These grant the owner the capability to - /// manage the associated domain. + /// The SuinsRegistration NFTs owned by this object. These grant the owner + /// the capability to manage the associated domain. pub(crate) async fn suins_registrations( &self, ctx: &Context<'_>, @@ -162,18 +165,21 @@ impl StakedSui { ObjectImpl(&self.super_.super_).version().await } - /// The current status of the object as read from the off-chain store. The possible states are: - /// NOT_INDEXED, the object is loaded from serialized data, such as the contents of a genesis or - /// system package upgrade transaction. LIVE, the version returned is the most recent for the - /// object, and it is not deleted or wrapped at that version. HISTORICAL, the object was - /// referenced at a specific version or checkpoint, so is fetched from historical tables and may - /// not be the latest version of the object. WRAPPED_OR_DELETED, the object is deleted or - /// wrapped and only partial information can be loaded." + /// The current status of the object as read from the off-chain store. The + /// possible states are: NOT_INDEXED, the object is loaded from + /// serialized data, such as the contents of a genesis or system package + /// upgrade transaction. LIVE, the version returned is the most recent for + /// the object, and it is not deleted or wrapped at that version. + /// HISTORICAL, the object was referenced at a specific version or + /// checkpoint, so is fetched from historical tables and may not be the + /// latest version of the object. WRAPPED_OR_DELETED, the object is deleted + /// or wrapped and only partial information can be loaded." pub(crate) async fn status(&self) -> ObjectStatus { ObjectImpl(&self.super_.super_).status().await } - /// 32-byte hash that identifies the object's contents, encoded as a Base58 string. + /// 32-byte hash that identifies the object's contents, encoded as a Base58 + /// string. pub(crate) async fn digest(&self) -> Option { ObjectImpl(&self.super_.super_).digest().await } @@ -193,8 +199,9 @@ impl StakedSui { .await } - /// The amount of SUI we would rebate if this object gets deleted or mutated. This number is - /// recalculated based on the present storage gas price. + /// The amount of SUI we would rebate if this object gets deleted or + /// mutated. This number is recalculated based on the present storage + /// gas price. pub(crate) async fn storage_rebate(&self) -> Option { ObjectImpl(&self.super_.super_).storage_rebate().await } @@ -219,33 +226,34 @@ impl StakedSui { ObjectImpl(&self.super_.super_).bcs().await } - /// Displays the contents of the Move object in a JSON string and through GraphQL types. Also - /// provides the flat representation of the type signature, and the BCS of the corresponding - /// data. + /// Displays the contents of the Move object in a JSON string and through + /// GraphQL types. Also provides the flat representation of the type + /// signature, and the BCS of the corresponding data. pub(crate) async fn contents(&self) -> Option { MoveObjectImpl(&self.super_).contents().await } - /// Determines whether a transaction can transfer this object, using the TransferObjects - /// transaction command or `sui::transfer::public_transfer`, both of which require the object to + /// Determines whether a transaction can transfer this object, using the + /// TransferObjects transaction command or + /// `sui::transfer::public_transfer`, both of which require the object to /// have the `key` and `store` abilities. pub(crate) async fn has_public_transfer(&self, ctx: &Context<'_>) -> Result { MoveObjectImpl(&self.super_).has_public_transfer(ctx).await } - /// The set of named templates defined on-chain for the type of this object, to be handled - /// off-chain. The server substitutes data from the object into these templates to generate a - /// display string per template. + /// The set of named templates defined on-chain for the type of this object, + /// to be handled off-chain. The server substitutes data from the object + /// into these templates to generate a display string per template. pub(crate) async fn display(&self, ctx: &Context<'_>) -> Result>> { ObjectImpl(&self.super_.super_).display(ctx).await } - /// Access a dynamic field on an object using its name. Names are arbitrary Move values whose - /// type have `copy`, `drop`, and `store`, and are specified using their type, and their BCS - /// contents, Base64 encoded. + /// Access a dynamic field on an object using its name. Names are arbitrary + /// Move values whose type have `copy`, `drop`, and `store`, and are + /// specified using their type, and their BCS contents, Base64 encoded. /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. pub(crate) async fn dynamic_field( &self, ctx: &Context<'_>, @@ -256,13 +264,14 @@ impl StakedSui { .await } - /// Access a dynamic object field on an object using its name. Names are arbitrary Move values - /// whose type have `copy`, `drop`, and `store`, and are specified using their type, and their - /// BCS contents, Base64 encoded. The value of a dynamic object field can also be accessed + /// Access a dynamic object field on an object using its name. Names are + /// arbitrary Move values whose type have `copy`, `drop`, and `store`, + /// and are specified using their type, and their BCS contents, Base64 + /// encoded. The value of a dynamic object field can also be accessed /// off-chain directly via its address (e.g. using `Query.object`). /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. pub(crate) async fn dynamic_object_field( &self, ctx: &Context<'_>, @@ -275,8 +284,8 @@ impl StakedSui { /// The dynamic fields and dynamic object fields on an object. /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. pub(crate) async fn dynamic_fields( &self, ctx: &Context<'_>, @@ -344,7 +353,8 @@ impl StakedSui { /// /// Or 0, if this value is negative, where: /// - /// - `initial_stake_rate` is the stake rate at the epoch this stake was activated at. + /// - `initial_stake_rate` is the stake rate at the epoch this stake was + /// activated at. /// - `current_stake_rate` is the stake rate in the current epoch. /// /// This value is only available if the stake is active. @@ -358,12 +368,14 @@ impl StakedSui { } impl StakedSui { - /// Query the database for a `page` of Staked SUI. The page uses the same cursor type as is used - /// for `Object`, and is further filtered to a particular `owner`. + /// Query the database for a `page` of Staked SUI. The page uses the same + /// cursor type as is used for `Object`, and is further filtered to a + /// particular `owner`. /// - /// `checkpoint_viewed_at` represents the checkpoint sequence number at which this page was - /// queried for, or `None` if the data was requested at the latest checkpoint. Each entity - /// returned in the connection will inherit this checkpoint, so that when viewing that entity's + /// `checkpoint_viewed_at` represents the checkpoint sequence number at + /// which this page was queried for, or `None` if the data was requested + /// at the latest checkpoint. Each entity returned in the connection + /// will inherit this checkpoint, so that when viewing that entity's /// state, it will be as if it was read at the same checkpoint. pub(crate) async fn paginate( db: &Db, @@ -396,8 +408,8 @@ impl StakedSui { .await } - /// The JSON-RPC representation of a StakedSui so that we can "cheat" to implement fields that - /// are not yet implemented directly for GraphQL. + /// The JSON-RPC representation of a StakedSui so that we can "cheat" to + /// implement fields that are not yet implemented directly for GraphQL. /// /// TODO: Make this obsolete async fn rpc_stake(&self, ctx: &Context<'_>) -> Result { diff --git a/crates/sui-graphql-rpc/src/types/stake_subsidy.rs b/crates/sui-graphql-rpc/src/types/stake_subsidy.rs index a99c38198ce..063ec94f04f 100644 --- a/crates/sui-graphql-rpc/src/types/stake_subsidy.rs +++ b/crates/sui-graphql-rpc/src/types/stake_subsidy.rs @@ -1,28 +1,30 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::big_int::BigInt; use async_graphql::*; +use super::big_int::BigInt; + /// Parameters that control the distribution of the stake subsidy. #[derive(Clone, Debug, PartialEq, Eq, SimpleObject)] pub(crate) struct StakeSubsidy { - /// SUI set aside for stake subsidies -- reduces over time as stake subsidies are paid out over - /// time. + /// SUI set aside for stake subsidies -- reduces over time as stake + /// subsidies are paid out over time. pub balance: Option, - /// Number of times stake subsidies have been distributed subsidies are distributed with other - /// staking rewards, at the end of the epoch. + /// Number of times stake subsidies have been distributed subsidies are + /// distributed with other staking rewards, at the end of the epoch. pub distribution_counter: Option, - /// Amount of stake subsidy deducted from the balance per distribution -- decays over time. + /// Amount of stake subsidy deducted from the balance per distribution -- + /// decays over time. pub current_distribution_amount: Option, - /// Maximum number of stake subsidy distributions that occur with the same distribution amount - /// (before the amount is reduced). + /// Maximum number of stake subsidy distributions that occur with the same + /// distribution amount (before the amount is reduced). pub period_length: Option, - /// Percentage of the current distribution amount to deduct at the end of the current subsidy - /// period, expressed in basis points. + /// Percentage of the current distribution amount to deduct at the end of + /// the current subsidy period, expressed in basis points. pub decrease_rate: Option, } diff --git a/crates/sui-graphql-rpc/src/types/storage_fund.rs b/crates/sui-graphql-rpc/src/types/storage_fund.rs index 1283efd3b7f..9214f2f749c 100644 --- a/crates/sui-graphql-rpc/src/types/storage_fund.rs +++ b/crates/sui-graphql-rpc/src/types/storage_fund.rs @@ -1,19 +1,21 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::big_int::BigInt; use async_graphql::*; +use super::big_int::BigInt; + /// SUI set aside to account for objects stored on-chain. #[derive(Clone, Debug, PartialEq, Eq, SimpleObject)] pub(crate) struct StorageFund { /// Sum of storage rebates of live objects on chain. pub total_object_storage_rebates: Option, - /// The portion of the storage fund that will never be refunded through storage rebates. + /// The portion of the storage fund that will never be refunded through + /// storage rebates. /// - /// The system maintains an invariant that the sum of all storage fees into the storage fund is - /// equal to the sum of of all storage rebates out, the total storage rebates remaining, and the - /// non-refundable balance. + /// The system maintains an invariant that the sum of all storage fees into + /// the storage fund is equal to the sum of of all storage rebates out, + /// the total storage rebates remaining, and the non-refundable balance. pub non_refundable_balance: Option, } diff --git a/crates/sui-graphql-rpc/src/types/string_input.rs b/crates/sui-graphql-rpc/src/types/string_input.rs index c37a56791a1..1937c9cf95f 100644 --- a/crates/sui-graphql-rpc/src/types/string_input.rs +++ b/crates/sui-graphql-rpc/src/types/string_input.rs @@ -1,9 +1,9 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -/// Opt-in to an implementation of `ScalarType` for a `$Type` that implements `FromStr`, solely for -/// use as an input (not an output). The type masquarades as a `String` in the GraphQL schema, to -/// avoid adding a new scalar. +/// Opt-in to an implementation of `ScalarType` for a `$Type` that implements +/// `FromStr`, solely for use as an input (not an output). The type masquarades +/// as a `String` in the GraphQL schema, to avoid adding a new scalar. macro_rules! impl_string_input { ($Type:ident) => { #[Scalar(name = "String", visible = false)] diff --git a/crates/sui-graphql-rpc/src/types/sui_address.rs b/crates/sui-graphql-rpc/src/types/sui_address.rs index 287bf0540e8..e70f9bd70ac 100644 --- a/crates/sui-graphql-rpc/src/types/sui_address.rs +++ b/crates/sui-graphql-rpc/src/types/sui_address.rs @@ -163,9 +163,10 @@ impl std::fmt::Display for SuiAddress { #[cfg(test)] mod tests { - use super::*; use async_graphql::Value; + use super::*; + const STR_ADDRESS: &str = "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"; const ARR_ADDRESS: [u8; SUI_ADDRESS_LENGTH] = [ 1, 35, 69, 103, 137, 171, 205, 239, 1, 35, 69, 103, 137, 171, 205, 239, 1, 35, 69, 103, diff --git a/crates/sui-graphql-rpc/src/types/suins_registration.rs b/crates/sui-graphql-rpc/src/types/suins_registration.rs index dd398c5f671..c4f7e093c3b 100644 --- a/crates/sui-graphql-rpc/src/types/suins_registration.rs +++ b/crates/sui-graphql-rpc/src/types/suins_registration.rs @@ -3,6 +3,15 @@ use std::str::FromStr; +use async_graphql::{connection::Connection, *}; +use move_core_types::{ident_str, identifier::IdentStr, language_storage::StructTag}; +use serde::{Deserialize, Serialize}; +use sui_indexer::models::objects::StoredHistoryObject; +use sui_json_rpc::name_service::{ + Domain as NativeDomain, NameRecord, NameServiceConfig, NameServiceError, +}; +use sui_types::{base_types::SuiAddress as NativeSuiAddress, dynamic_field::Field, id::UID}; + use super::{ balance::{self, Balance}, base64::Base64, @@ -27,20 +36,13 @@ use crate::{ data::{Db, DbConnection, QueryExecutor}, error::Error, }; -use async_graphql::{connection::Connection, *}; -use move_core_types::{ident_str, identifier::IdentStr, language_storage::StructTag}; -use serde::{Deserialize, Serialize}; -use sui_indexer::models::objects::StoredHistoryObject; -use sui_json_rpc::name_service::{ - Domain as NativeDomain, NameRecord, NameServiceConfig, NameServiceError, -}; -use sui_types::{base_types::SuiAddress as NativeSuiAddress, dynamic_field::Field, id::UID}; const MOD_REGISTRATION: &IdentStr = ident_str!("suins_registration"); const TYP_REGISTRATION: &IdentStr = ident_str!("SuinsRegistration"); -/// Represents the "core" of the name service (e.g. the on-chain registry and reverse registry). It -/// doesn't contain any fields because we look them up based on the `NameServiceConfig`. +/// Represents the "core" of the name service (e.g. the on-chain registry and +/// reverse registry). It doesn't contain any fields because we look them up +/// based on the `NameServiceConfig`. pub(crate) struct NameService; /// Wrap SuiNS Domain type to expose as a string scalar in GraphQL. @@ -72,16 +74,18 @@ pub(crate) struct SuinsRegistration { pub native: NativeSuinsRegistration, } -/// Represents the results of a query for a domain's `NameRecord` and its parent's `NameRecord`. The -/// `expiration_timestamp_ms` on the name records are compared to the checkpoint's timestamp to -/// check that the domain is not expired. +/// Represents the results of a query for a domain's `NameRecord` and its +/// parent's `NameRecord`. The `expiration_timestamp_ms` on the name records are +/// compared to the checkpoint's timestamp to check that the domain is not +/// expired. pub(crate) struct DomainExpiration { /// The domain's `NameRecord`. pub name_record: Option, /// The parent's `NameRecord`, populated only if the domain is a subdomain. pub parent_name_record: Option, - /// The timestamp of the checkpoint at which the query was made. This is used to check if the - /// `expiration_timestamp_ms` on the name records are expired. + /// The timestamp of the checkpoint at which the query was made. This is + /// used to check if the `expiration_timestamp_ms` on the name records + /// are expired. pub checkpoint_timestamp_ms: u64, } @@ -111,8 +115,8 @@ impl SuinsRegistration { .await } - /// Total balance of all coins with marker type owned by this object. If type is not supplied, - /// it defaults to `0x2::sui::SUI`. + /// Total balance of all coins with marker type owned by this object. If + /// type is not supplied, it defaults to `0x2::sui::SUI`. pub(crate) async fn balance( &self, ctx: &Context<'_>, @@ -139,7 +143,8 @@ impl SuinsRegistration { /// The coin objects for this object. /// - ///`type` is a filter on the coin's type parameter, defaulting to `0x2::sui::SUI`. + /// `type` is a filter on the coin's type parameter, defaulting to + /// `0x2::sui::SUI`. pub(crate) async fn coins( &self, ctx: &Context<'_>, @@ -168,7 +173,8 @@ impl SuinsRegistration { .await } - /// The domain explicitly configured as the default domain pointing to this object. + /// The domain explicitly configured as the default domain pointing to this + /// object. pub(crate) async fn default_suins_name( &self, ctx: &Context<'_>, @@ -179,8 +185,8 @@ impl SuinsRegistration { .await } - /// The SuinsRegistration NFTs owned by this object. These grant the owner the capability to - /// manage the associated domain. + /// The SuinsRegistration NFTs owned by this object. These grant the owner + /// the capability to manage the associated domain. pub(crate) async fn suins_registrations( &self, ctx: &Context<'_>, @@ -198,18 +204,21 @@ impl SuinsRegistration { ObjectImpl(&self.super_.super_).version().await } - /// The current status of the object as read from the off-chain store. The possible states are: - /// NOT_INDEXED, the object is loaded from serialized data, such as the contents of a genesis or - /// system package upgrade transaction. LIVE, the version returned is the most recent for the - /// object, and it is not deleted or wrapped at that version. HISTORICAL, the object was - /// referenced at a specific version or checkpoint, so is fetched from historical tables and may - /// not be the latest version of the object. WRAPPED_OR_DELETED, the object is deleted or - /// wrapped and only partial information can be loaded." + /// The current status of the object as read from the off-chain store. The + /// possible states are: NOT_INDEXED, the object is loaded from + /// serialized data, such as the contents of a genesis or system package + /// upgrade transaction. LIVE, the version returned is the most recent for + /// the object, and it is not deleted or wrapped at that version. + /// HISTORICAL, the object was referenced at a specific version or + /// checkpoint, so is fetched from historical tables and may not be the + /// latest version of the object. WRAPPED_OR_DELETED, the object is deleted + /// or wrapped and only partial information can be loaded." pub(crate) async fn status(&self) -> ObjectStatus { ObjectImpl(&self.super_.super_).status().await } - /// 32-byte hash that identifies the object's contents, encoded as a Base58 string. + /// 32-byte hash that identifies the object's contents, encoded as a Base58 + /// string. pub(crate) async fn digest(&self) -> Option { ObjectImpl(&self.super_.super_).digest().await } @@ -229,8 +238,9 @@ impl SuinsRegistration { .await } - /// The amount of SUI we would rebate if this object gets deleted or mutated. This number is - /// recalculated based on the present storage gas price. + /// The amount of SUI we would rebate if this object gets deleted or + /// mutated. This number is recalculated based on the present storage + /// gas price. pub(crate) async fn storage_rebate(&self) -> Option { ObjectImpl(&self.super_.super_).storage_rebate().await } @@ -255,33 +265,34 @@ impl SuinsRegistration { ObjectImpl(&self.super_.super_).bcs().await } - /// Displays the contents of the Move object in a JSON string and through GraphQL types. Also - /// provides the flat representation of the type signature, and the BCS of the corresponding - /// data. + /// Displays the contents of the Move object in a JSON string and through + /// GraphQL types. Also provides the flat representation of the type + /// signature, and the BCS of the corresponding data. pub(crate) async fn contents(&self) -> Option { MoveObjectImpl(&self.super_).contents().await } - /// Determines whether a transaction can transfer this object, using the TransferObjects - /// transaction command or `sui::transfer::public_transfer`, both of which require the object to + /// Determines whether a transaction can transfer this object, using the + /// TransferObjects transaction command or + /// `sui::transfer::public_transfer`, both of which require the object to /// have the `key` and `store` abilities. pub(crate) async fn has_public_transfer(&self, ctx: &Context<'_>) -> Result { MoveObjectImpl(&self.super_).has_public_transfer(ctx).await } - /// The set of named templates defined on-chain for the type of this object, to be handled - /// off-chain. The server substitutes data from the object into these templates to generate a - /// display string per template. + /// The set of named templates defined on-chain for the type of this object, + /// to be handled off-chain. The server substitutes data from the object + /// into these templates to generate a display string per template. pub(crate) async fn display(&self, ctx: &Context<'_>) -> Result>> { ObjectImpl(&self.super_.super_).display(ctx).await } - /// Access a dynamic field on an object using its name. Names are arbitrary Move values whose - /// type have `copy`, `drop`, and `store`, and are specified using their type, and their BCS - /// contents, Base64 encoded. + /// Access a dynamic field on an object using its name. Names are arbitrary + /// Move values whose type have `copy`, `drop`, and `store`, and are + /// specified using their type, and their BCS contents, Base64 encoded. /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. pub(crate) async fn dynamic_field( &self, ctx: &Context<'_>, @@ -292,13 +303,14 @@ impl SuinsRegistration { .await } - /// Access a dynamic object field on an object using its name. Names are arbitrary Move values - /// whose type have `copy`, `drop`, and `store`, and are specified using their type, and their - /// BCS contents, Base64 encoded. The value of a dynamic object field can also be accessed + /// Access a dynamic object field on an object using its name. Names are + /// arbitrary Move values whose type have `copy`, `drop`, and `store`, + /// and are specified using their type, and their BCS contents, Base64 + /// encoded. The value of a dynamic object field can also be accessed /// off-chain directly via its address (e.g. using `Query.object`). /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. pub(crate) async fn dynamic_object_field( &self, ctx: &Context<'_>, @@ -311,8 +323,8 @@ impl SuinsRegistration { /// The dynamic fields and dynamic object fields on an object. /// - /// Dynamic fields on wrapped objects can be accessed by using the same API under the Owner - /// type. + /// Dynamic fields on wrapped objects can be accessed by using the same API + /// under the Owner type. pub(crate) async fn dynamic_fields( &self, ctx: &Context<'_>, @@ -340,37 +352,40 @@ impl SuinsRegistration { } impl NameService { - /// Lookup the SuiNS NameRecord for the given `domain` name. `config` specifies where to find - /// the domain name registry, and its type. + /// Lookup the SuiNS NameRecord for the given `domain` name. `config` + /// specifies where to find the domain name registry, and its type. /// - /// `checkpoint_viewed_at` represents the checkpoint sequence number at which this was queried - /// for, or `None` if the data was requested at the latest checkpoint. + /// `checkpoint_viewed_at` represents the checkpoint sequence number at + /// which this was queried for, or `None` if the data was requested at + /// the latest checkpoint. /// - /// The `NameRecord` is returned only if it has not expired as of the `checkpoint_viewed_at` or - /// latest checkpoint's timestamp. + /// The `NameRecord` is returned only if it has not expired as of the + /// `checkpoint_viewed_at` or latest checkpoint's timestamp. /// - /// For leaf domains, the `NameRecord` is returned only if its parent is valid and not expired. + /// For leaf domains, the `NameRecord` is returned only if its parent is + /// valid and not expired. pub(crate) async fn resolve_to_record( ctx: &Context<'_>, domain: &Domain, checkpoint_viewed_at: Option, ) -> Result, Error> { - // Query for the domain's NameRecord and parent NameRecord if applicable. The checkpoint's - // timestamp is also fetched. These values are used to determine if the domain is expired. + // Query for the domain's NameRecord and parent NameRecord if applicable. The + // checkpoint's timestamp is also fetched. These values are used to + // determine if the domain is expired. let Some(domain_expiration) = Self::query_domain_expiration(ctx, domain, checkpoint_viewed_at).await? else { return Ok(None); }; - // Get the name_record from the query. If we didn't find it, we return as it means that the - // requested name is not registered. + // Get the name_record from the query. If we didn't find it, we return as it + // means that the requested name is not registered. let Some(name_record) = domain_expiration.name_record else { return Ok(None); }; - // If name record is SLD, or Node subdomain, we can check the expiration and return the - // record if not expired. + // If name record is SLD, or Node subdomain, we can check the expiration and + // return the record if not expired. if !name_record.is_leaf_record() { return if !name_record.is_node_expired(domain_expiration.checkpoint_timestamp_ms) { Ok(Some(name_record)) @@ -384,8 +399,8 @@ impl NameService { return Err(Error::NameService(NameServiceError::NameExpired)); }; - // If the parent is valid for this leaf, and not expired, then we can return the name - // record. Otherwise, the name is expired. + // If the parent is valid for this leaf, and not expired, then we can return the + // name record. Otherwise, the name is expired. if parent_name_record.is_valid_leaf_parent(&name_record) && !parent_name_record.is_node_expired(domain_expiration.checkpoint_timestamp_ms) { @@ -395,11 +410,12 @@ impl NameService { } } - /// Lookup the SuiNS Domain for the given `address`. `config` specifies where to find the domain - /// name registry, and its type. + /// Lookup the SuiNS Domain for the given `address`. `config` specifies + /// where to find the domain name registry, and its type. /// - /// `checkpoint_viewed_at` represents the checkpoint sequence number at which this was queried - /// for, or `None` if the data was requested at the latest checkpoint. + /// `checkpoint_viewed_at` represents the checkpoint sequence number at + /// which this was queried for, or `None` if the data was requested at + /// the latest checkpoint. pub(crate) async fn reverse_resolve_to_name( ctx: &Context<'_>, address: SuiAddress, @@ -429,8 +445,9 @@ impl NameService { let domain = Domain(field.value); - // We attempt to resolve the domain to a record, and if it fails, we return None. That way - // we can validate that the name has not expired and is still valid. + // We attempt to resolve the domain to a record, and if it fails, we return + // None. That way we can validate that the name has not expired and is + // still valid. let Some(_) = Self::resolve_to_record(ctx, &domain, checkpoint_viewed_at).await? else { return Ok(None); }; @@ -438,8 +455,8 @@ impl NameService { Ok(Some(domain.0)) } - /// Query for a domain's NameRecord, its parent's NameRecord if supplied, and the timestamp of - /// the checkpoint bound. + /// Query for a domain's NameRecord, its parent's NameRecord if supplied, + /// and the timestamp of the checkpoint bound. async fn query_domain_expiration( ctx: &Context<'_>, domain: &Domain, @@ -447,15 +464,16 @@ impl NameService { ) -> Result, Error> { let config = ctx.data_unchecked::(); let db: &crate::data::pg::PgExecutor = ctx.data_unchecked::(); - // Construct the list of `object_id`s to look up. The first element is the domain's - // `NameRecord`. If the domain is a subdomain, there will be a second element for the - // parent's `NameRecord`. + // Construct the list of `object_id`s to look up. The first element is the + // domain's `NameRecord`. If the domain is a subdomain, there will be a + // second element for the parent's `NameRecord`. let mut object_ids = vec![SuiAddress::from(config.record_field_id(&domain.0))]; if domain.0.is_subdomain() { object_ids.push(SuiAddress::from(config.record_field_id(&domain.0.parent()))); } - // Create a page with a bound of `object_ids` length to fetch the relevant `NameRecord`s. + // Create a page with a bound of `object_ids` length to fetch the relevant + // `NameRecord`s. let page: Page = Page::from_params( ctx.data_unchecked(), Some(object_ids.len() as u64), @@ -509,9 +527,9 @@ impl NameService { checkpoint_timestamp_ms, }; - // Max size of results is 2. We loop through them, convert to objects, and then parse - // name_record. We then assign it to the correct field on `domain_expiration` based on the - // address. + // Max size of results is 2. We loop through them, convert to objects, and then + // parse name_record. We then assign it to the correct field on + // `domain_expiration` based on the address. for result in results { let object = Object::try_from_stored_history_object(result, None)?; let move_object = MoveObject::try_from(&object).map_err(|_| { @@ -535,13 +553,15 @@ impl NameService { } impl SuinsRegistration { - /// Query the database for a `page` of SuiNS registrations. The page uses the same cursor type - /// as is used for `Object`, and is further filtered to a particular `owner`. `config` specifies - /// where to find the domain name registry and its type. + /// Query the database for a `page` of SuiNS registrations. The page uses + /// the same cursor type as is used for `Object`, and is further + /// filtered to a particular `owner`. `config` specifies where to find + /// the domain name registry and its type. /// - /// `checkpoint_viewed_at` represents the checkpoint sequence number at which this page was - /// queried for, or `None` if the data was requested at the latest checkpoint. Each entity - /// returned in the connection will inherit this checkpoint, so that when viewing that entity's + /// `checkpoint_viewed_at` represents the checkpoint sequence number at + /// which this page was queried for, or `None` if the data was requested + /// at the latest checkpoint. Each entity returned in the connection + /// will inherit this checkpoint, so that when viewing that entity's /// state, it will be as if it was read at the same checkpoint. pub(crate) async fn paginate( db: &Db, @@ -575,8 +595,8 @@ impl SuinsRegistration { .await } - /// Return the type representing a `SuinsRegistration` on chain. This can change from chain to - /// chain (mainnet, testnet, devnet etc). + /// Return the type representing a `SuinsRegistration` on chain. This can + /// change from chain to chain (mainnet, testnet, devnet etc). pub(crate) fn type_(package: SuiAddress) -> StructTag { StructTag { address: package.into(), diff --git a/crates/sui-graphql-rpc/src/types/system_parameters.rs b/crates/sui-graphql-rpc/src/types/system_parameters.rs index 96e18254eef..07177bd993b 100644 --- a/crates/sui-graphql-rpc/src/types/system_parameters.rs +++ b/crates/sui-graphql-rpc/src/types/system_parameters.rs @@ -1,8 +1,9 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::big_int::BigInt; use async_graphql::*; +use super::big_int::BigInt; + /// Details of the system that are decided during genesis. #[derive(Clone, Debug, PartialEq, Eq, SimpleObject)] pub(crate) struct SystemParameters { @@ -21,15 +22,17 @@ pub(crate) struct SystemParameters { /// Minimum stake needed to become a new validator. pub min_validator_joining_stake: Option, - /// Validators with stake below this threshold will enter the grace period (see - /// `validatorLowStakeGracePeriod`), after which they are removed from the active validator set. + /// Validators with stake below this threshold will enter the grace period + /// (see `validatorLowStakeGracePeriod`), after which they are removed + /// from the active validator set. pub validator_low_stake_threshold: Option, - /// Validators with stake below this threshold will be removed from the active validator set - /// at the next epoch boundary, without a grace period. + /// Validators with stake below this threshold will be removed from the + /// active validator set at the next epoch boundary, without a grace + /// period. pub validator_very_low_stake_threshold: Option, - /// The number of epochs that a validator has to recover from having less than - /// `validatorLowStakeThreshold` stake. + /// The number of epochs that a validator has to recover from having less + /// than `validatorLowStakeThreshold` stake. pub validator_low_stake_grace_period: Option, } diff --git a/crates/sui-graphql-rpc/src/types/system_state_summary.rs b/crates/sui-graphql-rpc/src/types/system_state_summary.rs index da464f154f2..d70243f9f6b 100644 --- a/crates/sui-graphql-rpc/src/types/system_state_summary.rs +++ b/crates/sui-graphql-rpc/src/types/system_state_summary.rs @@ -1,24 +1,25 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use async_graphql::*; +use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary as NativeSystemStateSummary; + use super::{ big_int::BigInt, gas::GasCostSummary, safe_mode::SafeMode, stake_subsidy::StakeSubsidy, storage_fund::StorageFund, system_parameters::SystemParameters, }; -use async_graphql::*; -use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary as NativeSystemStateSummary; #[derive(Clone, Debug)] pub(crate) struct SystemStateSummary { pub native: NativeSystemStateSummary, } -/// Aspects that affect the running of the system that are managed by the validators either -/// directly, or through system transactions. +/// Aspects that affect the running of the system that are managed by the +/// validators either directly, or through system transactions. #[Object] impl SystemStateSummary { - /// SUI set aside to account for objects stored on-chain, at the start of the epoch. - /// This is also used for storage rebates. + /// SUI set aside to account for objects stored on-chain, at the start of + /// the epoch. This is also used for storage rebates. async fn storage_fund(&self) -> Option { Some(StorageFund { total_object_storage_rebates: Some(BigInt::from( @@ -30,8 +31,8 @@ impl SystemStateSummary { }) } - /// Information about whether this epoch was started in safe mode, which happens if the full epoch - /// change logic fails for some reason. + /// Information about whether this epoch was started in safe mode, which + /// happens if the full epoch change logic fails for some reason. async fn safe_mode(&self) -> Option { Some(SafeMode { enabled: Some(self.native.safe_mode), @@ -44,8 +45,9 @@ impl SystemStateSummary { }) } - /// The value of the `version` field of `0x5`, the `0x3::sui::SuiSystemState` object. This - /// version changes whenever the fields contained in the system state object (held in a dynamic + /// The value of the `version` field of `0x5`, the + /// `0x3::sui::SuiSystemState` object. This version changes whenever + /// the fields contained in the system state object (held in a dynamic /// field attached to `0x5`) change. async fn system_state_version(&self) -> Option { Some(self.native.system_state_version) diff --git a/crates/sui-graphql-rpc/src/types/transaction_block.rs b/crates/sui-graphql-rpc/src/types/transaction_block.rs index 91f3d332570..ce2c807e4af 100644 --- a/crates/sui-graphql-rpc/src/types/transaction_block.rs +++ b/crates/sui-graphql-rpc/src/types/transaction_block.rs @@ -26,13 +26,6 @@ use sui_types::{ }, }; -use crate::{ - consistency::Checkpointed, - data::{self, Db, DbConnection, QueryExecutor}, - error::Error, - types::intersect, -}; - use super::{ address::Address, base64::Base64, @@ -46,9 +39,16 @@ use super::{ transaction_block_kind::TransactionBlockKind, type_filter::FqNameFilter, }; +use crate::{ + consistency::Checkpointed, + data::{self, Db, DbConnection, QueryExecutor}, + error::Error, + types::intersect, +}; -/// Wraps the actual transaction block data with the checkpoint sequence number at which the data -/// was viewed, for consistent results on paginating through and resolving nested types. +/// Wraps the actual transaction block data with the checkpoint sequence number +/// at which the data was viewed, for consistent results on paginating through +/// and resolving nested types. #[derive(Clone, Debug)] pub(crate) struct TransactionBlock { pub inner: TransactionBlockInner, @@ -72,7 +72,8 @@ pub(crate) enum TransactionBlockInner { events: Vec, }, /// A transaction block that has been executed via dryRunTransactionBlock. - /// This variant also does not return signatures or digest since only `NativeTransactionData` is present. + /// This variant also does not return signatures or digest since only + /// `NativeTransactionData` is present. DryRun { tx_data: NativeTransactionData, effects: NativeTransactionEffects, @@ -94,7 +95,8 @@ pub(crate) enum TransactionBlockKindInput { pub(crate) struct TransactionBlockFilter { pub function: Option, - /// An input filter selecting for either system or programmable transactions. + /// An input filter selecting for either system or programmable + /// transactions. pub kind: Option, pub after_checkpoint: Option, pub at_checkpoint: Option, @@ -112,9 +114,9 @@ pub(crate) struct TransactionBlockFilter { pub(crate) type Cursor = cursor::JsonCursor; type Query = data::Query; -/// The cursor returned for each `TransactionBlock` in a connection's page of results. The -/// `checkpoint_viewed_at` will set the consistent upper bound for subsequent queries made on this -/// cursor. +/// The cursor returned for each `TransactionBlock` in a connection's page of +/// results. The `checkpoint_viewed_at` will set the consistent upper bound for +/// subsequent queries made on this cursor. #[derive(Serialize, Deserialize, Clone, PartialEq, Eq)] pub(crate) struct TransactionBlockCursor { /// The checkpoint sequence number this was viewed at. @@ -129,15 +131,16 @@ pub(crate) struct TransactionBlockCursor { #[Object] impl TransactionBlock { - /// A 32-byte hash that uniquely identifies the transaction block contents, encoded in Base58. - /// This serves as a unique id for the block on chain. + /// A 32-byte hash that uniquely identifies the transaction block contents, + /// encoded in Base58. This serves as a unique id for the block on + /// chain. async fn digest(&self) -> Option { self.native_signed_data() .map(|s| Base58::encode(s.digest())) } - /// The address corresponding to the public key that signed this transaction. System - /// transactions do not have senders. + /// The address corresponding to the public key that signed this + /// transaction. System transactions do not have senders. async fn sender(&self) -> Option
{ let sender = self.native().sender(); @@ -147,11 +150,12 @@ impl TransactionBlock { }) } - /// The gas input field provides information on what objects were used as gas as well as the - /// owner of the gas object(s) and information on the gas price and budget. + /// The gas input field provides information on what objects were used as + /// gas as well as the owner of the gas object(s) and information on the + /// gas price and budget. /// - /// If the owner of the gas object(s) is not the same as the sender, the transaction block is a - /// sponsored transaction block. + /// If the owner of the gas object(s) is not the same as the sender, the + /// transaction block is a sponsored transaction block. async fn gas_input(&self) -> Option { let checkpoint_sequence_number = match &self.inner { TransactionBlockInner::Stored { stored_tx, .. } => { @@ -166,8 +170,8 @@ impl TransactionBlock { )) } - /// The type of this transaction as well as the commands and/or parameters comprising the - /// transaction of this kind. + /// The type of this transaction as well as the commands and/or parameters + /// comprising the transaction of this kind. async fn kind(&self) -> Option { Some(TransactionBlockKind::from( self.native().kind().clone(), @@ -175,8 +179,8 @@ impl TransactionBlock { )) } - /// A list of all signatures, Base64-encoded, from senders, and potentially the gas owner if - /// this is a sponsored transaction. + /// A list of all signatures, Base64-encoded, from senders, and potentially + /// the gas owner if this is a sponsored transaction. async fn signatures(&self) -> Option> { self.native_signed_data().map(|s| { s.tx_signatures() @@ -186,14 +190,16 @@ impl TransactionBlock { }) } - /// The effects field captures the results to the chain of executing this transaction. + /// The effects field captures the results to the chain of executing this + /// transaction. async fn effects(&self) -> Result> { Ok(Some(self.clone().try_into().extend()?)) } - /// This field is set by senders of a transaction block. It is an epoch reference that sets a - /// deadline after which validators will no longer consider the transaction valid. By default, - /// there is no deadline for when a transaction must execute. + /// This field is set by senders of a transaction block. It is an epoch + /// reference that sets a deadline after which validators will no longer + /// consider the transaction valid. By default, there is no deadline for + /// when a transaction must execute. async fn expiration(&self, ctx: &Context<'_>) -> Result> { let TransactionExpiration::Epoch(id) = self.native().expiration() else { return Ok(None); @@ -204,7 +210,8 @@ impl TransactionBlock { .extend() } - /// Serialized form of this transaction's `SenderSignedData`, BCS serialized and Base64 encoded. + /// Serialized form of this transaction's `SenderSignedData`, BCS serialized + /// and Base64 encoded. async fn bcs(&self) -> Option { match &self.inner { TransactionBlockInner::Stored { stored_tx, .. } => { @@ -236,9 +243,10 @@ impl TransactionBlock { } } - /// Look up a `TransactionBlock` in the database, by its transaction digest. If - /// `checkpoint_viewed_at` is provided, the transaction block will inherit the value. Otherwise, - /// it will be set to the upper bound of the available range at the time of the query. + /// Look up a `TransactionBlock` in the database, by its transaction digest. + /// If `checkpoint_viewed_at` is provided, the transaction block will + /// inherit the value. Otherwise, it will be set to the upper bound of + /// the available range at the time of the query. pub(crate) async fn query( db: &Db, digest: Digest, @@ -275,9 +283,10 @@ impl TransactionBlock { })) } - /// Look up multiple `TransactionBlock`s by their digests. Returns a map from those digests to - /// their resulting transaction blocks, for the blocks that could be found. We return a map - /// because the order of results from the DB is not otherwise guaranteed to match the order that + /// Look up multiple `TransactionBlock`s by their digests. Returns a map + /// from those digests to their resulting transaction blocks, for the + /// blocks that could be found. We return a map because the order of + /// results from the DB is not otherwise guaranteed to match the order that /// digests were passed into `multi_query`. pub(crate) async fn multi_query( db: &Db, @@ -312,17 +321,19 @@ impl TransactionBlock { Ok(transactions) } - /// Query the database for a `page` of TransactionBlocks. The page uses `tx_sequence_number` and - /// `checkpoint_viewed_at` as the cursor, and can optionally be further `filter`-ed. + /// Query the database for a `page` of TransactionBlocks. The page uses + /// `tx_sequence_number` and `checkpoint_viewed_at` as the cursor, and + /// can optionally be further `filter`-ed. /// /// The `checkpoint_viewed_at` parameter is an Option representing the - /// checkpoint_sequence_number at which this page was queried for, or `None` if the data was - /// requested at the latest checkpoint. Each entity returned in the connection will inherit this - /// checkpoint, so that when viewing that entity's state, it will be from the reference of this + /// checkpoint_sequence_number at which this page was queried for, or `None` + /// if the data was requested at the latest checkpoint. Each entity + /// returned in the connection will inherit this checkpoint, so that + /// when viewing that entity's state, it will be from the reference of this /// checkpoint_viewed_at parameter. /// - /// If the `Page` is set, then this function will defer to the `checkpoint_viewed_at` in - /// the cursor if they are consistent. + /// If the `Page` is set, then this function will defer to the + /// `checkpoint_viewed_at` in the cursor if they are consistent. pub(crate) async fn paginate( db: &Db, page: Page, @@ -426,8 +437,9 @@ impl TransactionBlock { let mut conn = Connection::new(prev, next); - // Defer to the provided checkpoint_viewed_at, but if it is not provided, use the - // current available range. This sets a consistent upper bound for the nested queries. + // Defer to the provided checkpoint_viewed_at, but if it is not provided, use + // the current available range. This sets a consistent upper bound for + // the nested queries. for stored in results { let cursor = stored.cursor(checkpoint_viewed_at).encode_cursor(); let inner = TransactionBlockInner::try_from(stored)?; @@ -443,9 +455,10 @@ impl TransactionBlock { } impl TransactionBlockFilter { - /// Try to create a filter whose results are the intersection of transaction blocks in `self`'s - /// results and transaction blocks in `other`'s results. This may not be possible if the - /// resulting filter is inconsistent in some way (e.g. a filter that requires one field to be + /// Try to create a filter whose results are the intersection of transaction + /// blocks in `self`'s results and transaction blocks in `other`'s + /// results. This may not be possible if the resulting filter is + /// inconsistent in some way (e.g. a filter that requires one field to be /// two different values simultaneously). pub(crate) fn intersect(self, other: Self) -> Option { macro_rules! intersect { diff --git a/crates/sui-graphql-rpc/src/types/transaction_block_effects.rs b/crates/sui-graphql-rpc/src/types/transaction_block_effects.rs index 4d3c0acbdf7..a3797ed20a9 100644 --- a/crates/sui-graphql-rpc/src/types/transaction_block_effects.rs +++ b/crates/sui-graphql-rpc/src/types/transaction_block_effects.rs @@ -1,7 +1,6 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{consistency::ConsistentIndexCursor, error::Error}; use async_graphql::{ connection::{Connection, ConnectionNameType, CursorType, Edge, EdgeNameType, EmptyFields}, *, @@ -11,8 +10,9 @@ use sui_types::{ effects::{TransactionEffects as NativeTransactionEffects, TransactionEffectsAPI}, event::Event as NativeEvent, execution_status::ExecutionStatus as NativeExecutionStatus, - transaction::SenderSignedData as NativeSenderSignedData, - transaction::TransactionData as NativeTransactionData, + transaction::{ + SenderSignedData as NativeSenderSignedData, TransactionData as NativeTransactionData, + }, }; use super::{ @@ -29,9 +29,11 @@ use super::{ transaction_block::{TransactionBlock, TransactionBlockInner}, unchanged_shared_object::UnchangedSharedObject, }; +use crate::{consistency::ConsistentIndexCursor, error::Error}; -/// Wraps the actual transaction block effects data with the checkpoint sequence number at which the -/// data was viewed, for consistent results on paginating through and resolving nested types. +/// Wraps the actual transaction block effects data with the checkpoint sequence +/// number at which the data was viewed, for consistent results on paginating +/// through and resolving nested types. #[derive(Clone, Debug)] pub(crate) struct TransactionBlockEffects { pub kind: TransactionBlockEffectsKind, @@ -48,14 +50,16 @@ pub(crate) enum TransactionBlockEffectsKind { native: NativeTransactionEffects, }, /// A transaction block that has been executed via executeTransactionBlock - /// but not yet indexed. So it does not contain checkpoint, timestamp or balanceChanges. + /// but not yet indexed. So it does not contain checkpoint, timestamp or + /// balanceChanges. Executed { tx_data: NativeSenderSignedData, native: NativeTransactionEffects, events: Vec, }, - /// A transaction block that has been executed via dryRunTransactionBlock. Similar to - /// Executed, it does not contain checkpoint, timestamp or balanceChanges. + /// A transaction block that has been executed via dryRunTransactionBlock. + /// Similar to Executed, it does not contain checkpoint, timestamp or + /// balanceChanges. DryRun { tx_data: NativeTransactionData, native: NativeTransactionEffects, @@ -72,8 +76,9 @@ pub enum ExecutionStatus { Failure, } -/// Type to override names of the Dependencies Connection (which has nullable transactions and -/// therefore must be a different types to the default `TransactionBlockConnection`). +/// Type to override names of the Dependencies Connection (which has nullable +/// transactions and therefore must be a different types to the default +/// `TransactionBlockConnection`). struct DependencyConnectionNames; type CDependencies = JsonCursor; @@ -98,8 +103,9 @@ impl TransactionBlockEffects { }) } - /// The latest version of all objects (apart from packages) that have been created or modified - /// by this transaction, immediately following this transaction. + /// The latest version of all objects (apart from packages) that have been + /// created or modified by this transaction, immediately following this + /// transaction. async fn lamport_version(&self) -> u64 { self.native().lamport_version().value() } @@ -201,7 +207,8 @@ impl TransactionBlockEffects { Some(GasEffects::from(self.native(), self.checkpoint_viewed_at)) } - /// Shared objects that are referenced by but not changed by this transaction. + /// Shared objects that are referenced by but not changed by this + /// transaction. async fn unchanged_shared_objects( &self, ctx: &Context<'_>, @@ -232,7 +239,8 @@ impl TransactionBlockEffects { .edges .push(Edge::new(c.encode_cursor(), unchanged_shared_object)); } - Err(_shared_object_changed) => continue, // Only add unchanged shared objects to the connection. + Err(_shared_object_changed) => continue, /* Only add unchanged shared objects to + * the connection. */ } } @@ -276,8 +284,8 @@ impl TransactionBlockEffects { Ok(connection) } - /// The effect this transaction had on the balances (sum of coin values per coin type) of - /// addresses and objects. + /// The effect this transaction had on the balances (sum of coin values per + /// coin type) of addresses and objects. async fn balance_changes( &self, ctx: &Context<'_>, @@ -361,7 +369,8 @@ impl TransactionBlockEffects { Ok(connection) } - /// Timestamp corresponding to the checkpoint this transaction was finalized in. + /// Timestamp corresponding to the checkpoint this transaction was finalized + /// in. async fn timestamp(&self) -> Result, Error> { let TransactionBlockEffectsKind::Stored { stored_tx, .. } = &self.kind else { return Ok(None); @@ -382,7 +391,8 @@ impl TransactionBlockEffects { /// The checkpoint this transaction was finalized in. async fn checkpoint(&self, ctx: &Context<'_>) -> Result> { - // If the transaction data is not a stored transaction, it's not in the checkpoint yet so we return None. + // If the transaction data is not a stored transaction, it's not in the + // checkpoint yet so we return None. let TransactionBlockEffectsKind::Stored { stored_tx, .. } = &self.kind else { return Ok(None); }; diff --git a/crates/sui-graphql-rpc/src/types/transaction_block_kind/authenticator_state_update.rs b/crates/sui-graphql-rpc/src/types/transaction_block_kind/authenticator_state_update.rs index 8fc56a73ca4..7c2dec66ef0 100644 --- a/crates/sui-graphql-rpc/src/types/transaction_block_kind/authenticator_state_update.rs +++ b/crates/sui-graphql-rpc/src/types/transaction_block_kind/authenticator_state_update.rs @@ -5,7 +5,6 @@ use async_graphql::{ connection::{Connection, CursorType, Edge}, *, }; - use sui_types::{ authenticator_state::ActiveJwk as NativeActiveJwk, transaction::AuthenticatorStateUpdate as NativeAuthenticatorStateUpdateTransaction, @@ -28,7 +27,8 @@ pub(crate) struct AuthenticatorStateUpdateTransaction { pub(crate) type CActiveJwk = JsonCursor; -/// The active JSON Web Key representing a set of public keys for an OpenID provider +/// The active JSON Web Key representing a set of public keys for an OpenID +/// provider struct ActiveJwk { native: NativeActiveJwk, /// The checkpoint sequence number this was viewed at. @@ -103,7 +103,8 @@ impl ActiveJwk { &self.native.jwk_id.iss } - /// The string (Key ID) that identifies the JWK among a set of JWKs, (RFC 7517, Section 4.5). + /// The string (Key ID) that identifies the JWK among a set of JWKs, (RFC + /// 7517, Section 4.5). async fn kid(&self) -> &str { &self.native.jwk_id.kid } diff --git a/crates/sui-graphql-rpc/src/types/transaction_block_kind/consensus_commit_prologue.rs b/crates/sui-graphql-rpc/src/types/transaction_block_kind/consensus_commit_prologue.rs index 4c5284fa601..270724bf066 100644 --- a/crates/sui-graphql-rpc/src/types/transaction_block_kind/consensus_commit_prologue.rs +++ b/crates/sui-graphql-rpc/src/types/transaction_block_kind/consensus_commit_prologue.rs @@ -1,7 +1,6 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::types::{date_time::DateTime, epoch::Epoch}; use async_graphql::*; use fastcrypto::encoding::{Base58, Encoding}; use sui_types::{ @@ -13,10 +12,13 @@ use sui_types::{ }, }; -/// Other transaction kinds are usually represented by directly wrapping their native -/// representation. This kind has two native versions in the protocol, so the same cannot be done. -/// V2 has all the fields of V1 and one extra (consensus commit digest). The GraphQL representation -/// of this type is a struct containing all the common fields, as they are in the native type, and +use crate::types::{date_time::DateTime, epoch::Epoch}; + +/// Other transaction kinds are usually represented by directly wrapping their +/// native representation. This kind has two native versions in the protocol, so +/// the same cannot be done. V2 has all the fields of V1 and one extra +/// (consensus commit digest). The GraphQL representation of this type is a +/// struct containing all the common fields, as they are in the native type, and /// an optional `consensus_commit_digest`. #[derive(Clone, PartialEq, Eq)] pub(crate) struct ConsensusCommitPrologueTransaction { @@ -28,8 +30,9 @@ pub(crate) struct ConsensusCommitPrologueTransaction { checkpoint_viewed_at: u64, } -/// System transaction that runs at the beginning of a checkpoint, and is responsible for setting -/// the current value of the clock, based on the timestamp from consensus. +/// System transaction that runs at the beginning of a checkpoint, and is +/// responsible for setting the current value of the clock, based on the +/// timestamp from consensus. #[Object] impl ConsensusCommitPrologueTransaction { /// Epoch of the commit prologue transaction. @@ -49,8 +52,8 @@ impl ConsensusCommitPrologueTransaction { Ok(DateTime::from_ms(self.commit_timestamp_ms as i64)?) } - /// Digest of consensus output, encoded as a Base58 string (only available from V2 of the - /// transaction). + /// Digest of consensus output, encoded as a Base58 string (only available + /// from V2 of the transaction). async fn consensus_commit_digest(&self) -> Option { self.consensus_commit_digest .map(|digest| Base58::encode(digest.inner())) diff --git a/crates/sui-graphql-rpc/src/types/transaction_block_kind/end_of_epoch.rs b/crates/sui-graphql-rpc/src/types/transaction_block_kind/end_of_epoch.rs index 6e0626bbfd5..781d8873ee0 100644 --- a/crates/sui-graphql-rpc/src/types/transaction_block_kind/end_of_epoch.rs +++ b/crates/sui-graphql-rpc/src/types/transaction_block_kind/end_of_epoch.rs @@ -1,10 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use async_graphql::connection::{Connection, CursorType, Edge}; -use async_graphql::*; -use move_binary_format::errors::PartialVMResult; -use move_binary_format::CompiledModule; +use async_graphql::{ + connection::{Connection, CursorType, Edge}, + *, +}; +use move_binary_format::{errors::PartialVMResult, CompiledModule}; use sui_types::{ digests::TransactionDigest, object::Object as NativeObject, @@ -15,14 +16,17 @@ use sui_types::{ }, }; -use crate::consistency::ConsistentIndexCursor; -use crate::types::cursor::{JsonCursor, Page}; -use crate::types::sui_address::SuiAddress; use crate::{ + consistency::ConsistentIndexCursor, error::Error, types::{ - big_int::BigInt, date_time::DateTime, epoch::Epoch, move_package::MovePackage, + big_int::BigInt, + cursor::{JsonCursor, Page}, + date_time::DateTime, + epoch::Epoch, + move_package::MovePackage, object::Object, + sui_address::SuiAddress, }, }; @@ -81,12 +85,14 @@ pub(crate) struct CoinDenyListStateCreateTransaction { pub(crate) type CTxn = JsonCursor; pub(crate) type CPackage = JsonCursor; -/// System transaction that supersedes `ChangeEpochTransaction` as the new way to run transactions -/// at the end of an epoch. Behaves similarly to `ChangeEpochTransaction` but can accommodate other -/// optional transactions to run at the end of the epoch. +/// System transaction that supersedes `ChangeEpochTransaction` as the new way +/// to run transactions at the end of an epoch. Behaves similarly to +/// `ChangeEpochTransaction` but can accommodate other optional transactions to +/// run at the end of the epoch. #[Object] impl EndOfEpochTransaction { - /// The list of system transactions that are allowed to run at the end of the epoch. + /// The list of system transactions that are allowed to run at the end of + /// the epoch. async fn transactions( &self, ctx: &Context<'_>, @@ -116,9 +122,10 @@ impl EndOfEpochTransaction { } } -/// A system transaction that updates epoch information on-chain (increments the current epoch). -/// Executed by the system once per epoch, without using gas. Epoch change transactions cannot be -/// submitted by users, because validators will refuse to sign them. +/// A system transaction that updates epoch information on-chain (increments the +/// current epoch). Executed by the system once per epoch, without using gas. +/// Epoch change transactions cannot be submitted by users, because validators +/// will refuse to sign them. /// /// This transaction kind is deprecated in favour of `EndOfEpochTransaction`. #[Object] @@ -139,23 +146,26 @@ impl ChangeEpochTransaction { self.native.protocol_version.as_u64() } - /// The total amount of gas charged for storage during the previous epoch (in MIST). + /// The total amount of gas charged for storage during the previous epoch + /// (in MIST). async fn storage_charge(&self) -> BigInt { BigInt::from(self.native.storage_charge) } - /// The total amount of gas charged for computation during the previous epoch (in MIST). + /// The total amount of gas charged for computation during the previous + /// epoch (in MIST). async fn computation_charge(&self) -> BigInt { BigInt::from(self.native.computation_charge) } - /// The SUI returned to transaction senders for cleaning up objects (in MIST). + /// The SUI returned to transaction senders for cleaning up objects (in + /// MIST). async fn storage_rebate(&self) -> BigInt { BigInt::from(self.native.storage_rebate) } - /// The total gas retained from storage fees, that will not be returned by storage rebates when - /// the relevant objects are cleaned up (in MIST). + /// The total gas retained from storage fees, that will not be returned by + /// storage rebates when the relevant objects are cleaned up (in MIST). async fn non_refundable_storage_fee(&self) -> BigInt { BigInt::from(self.native.non_refundable_storage_fee) } @@ -165,9 +175,9 @@ impl ChangeEpochTransaction { DateTime::from_ms(self.native.epoch_start_timestamp_ms as i64) } - /// System packages (specifically framework and move stdlib) that are written before the new - /// epoch starts, to upgrade them on-chain. Validators write these packages out when running the - /// transaction. + /// System packages (specifically framework and move stdlib) that are + /// written before the new epoch starts, to upgrade them on-chain. + /// Validators write these packages out when running the transaction. async fn system_packages( &self, ctx: &Context<'_>, diff --git a/crates/sui-graphql-rpc/src/types/transaction_block_kind/genesis.rs b/crates/sui-graphql-rpc/src/types/transaction_block_kind/genesis.rs index 8cd91381778..1cb8f881305 100644 --- a/crates/sui-graphql-rpc/src/types/transaction_block_kind/genesis.rs +++ b/crates/sui-graphql-rpc/src/types/transaction_block_kind/genesis.rs @@ -29,7 +29,8 @@ pub(crate) struct GenesisTransaction { pub(crate) type CObject = JsonCursor; -/// System transaction that initializes the network and writes the initial set of objects on-chain. +/// System transaction that initializes the network and writes the initial set +/// of objects on-chain. #[Object] impl GenesisTransaction { /// Objects to be created during genesis. diff --git a/crates/sui-graphql-rpc/src/types/transaction_block_kind/mod.rs b/crates/sui-graphql-rpc/src/types/transaction_block_kind/mod.rs index a76007c0bfe..e2e64f8de45 100644 --- a/crates/sui-graphql-rpc/src/types/transaction_block_kind/mod.rs +++ b/crates/sui-graphql-rpc/src/types/transaction_block_kind/mod.rs @@ -1,6 +1,9 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use async_graphql::*; +use sui_types::transaction::TransactionKind as NativeTransactionKind; + use self::{ consensus_commit_prologue::ConsensusCommitPrologueTransaction, end_of_epoch::ChangeEpochTransaction, genesis::GenesisTransaction, @@ -10,8 +13,6 @@ use crate::types::transaction_block_kind::{ authenticator_state_update::AuthenticatorStateUpdateTransaction, end_of_epoch::EndOfEpochTransaction, programmable::ProgrammableTransactionBlock, }; -use async_graphql::*; -use sui_types::transaction::TransactionKind as NativeTransactionKind; pub(crate) mod authenticator_state_update; pub(crate) mod consensus_commit_prologue; @@ -20,7 +21,8 @@ pub(crate) mod genesis; pub(crate) mod programmable; pub(crate) mod randomness_state_update; -/// The kind of transaction block, either a programmable transaction or a system transaction. +/// The kind of transaction block, either a programmable transaction or a system +/// transaction. #[derive(Union, PartialEq, Clone, Eq)] pub(crate) enum TransactionBlockKind { ConsensusCommitPrologue(ConsensusCommitPrologueTransaction), diff --git a/crates/sui-graphql-rpc/src/types/transaction_block_kind/programmable.rs b/crates/sui-graphql-rpc/src/types/transaction_block_kind/programmable.rs index c4d185f841a..a18eb9309c7 100644 --- a/crates/sui-graphql-rpc/src/types/transaction_block_kind/programmable.rs +++ b/crates/sui-graphql-rpc/src/types/transaction_block_kind/programmable.rs @@ -1,6 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use async_graphql::{ + connection::{Connection, CursorType, Edge}, + *, +}; +use sui_json_rpc_types::SuiArgument; +use sui_types::transaction::{ + Argument as NativeArgument, CallArg as NativeCallArg, Command as NativeProgrammableTransaction, + ObjectArg as NativeObjectArg, ProgrammableMoveCall as NativeMoveCallTransaction, + ProgrammableTransaction as NativeProgrammableTransactionBlock, +}; + use crate::{ consistency::ConsistentIndexCursor, types::{ @@ -12,16 +23,6 @@ use crate::{ sui_address::SuiAddress, }, }; -use async_graphql::{ - connection::{Connection, CursorType, Edge}, - *, -}; -use sui_json_rpc_types::SuiArgument; -use sui_types::transaction::{ - Argument as NativeArgument, CallArg as NativeCallArg, Command as NativeProgrammableTransaction, - ObjectArg as NativeObjectArg, ProgrammableMoveCall as NativeMoveCallTransaction, - ProgrammableTransaction as NativeProgrammableTransactionBlock, -}; #[derive(Clone, Eq, PartialEq)] pub(crate) struct ProgrammableTransactionBlock { @@ -54,11 +55,12 @@ struct SharedInput { address: SuiAddress, /// The version that this this object was shared at. initial_shared_version: u64, - /// Controls whether the transaction block can reference the shared object as a mutable - /// reference or by value. This has implications for scheduling: Transactions that just read - /// shared objects at a certain version (mutable = false) can be executed concurrently, while - /// transactions that write shared objects (mutable = true) must be executed serially with - /// respect to each other. + /// Controls whether the transaction block can reference the shared object + /// as a mutable reference or by value. This has implications for + /// scheduling: Transactions that just read shared objects at a certain + /// version (mutable = false) can be executed concurrently, while + /// transactions that write shared objects (mutable = true) must be executed + /// serially with respect to each other. mutable: bool, } @@ -94,8 +96,8 @@ struct MoveCallTransaction { checkpoint_viewed_at: u64, } -/// Transfers `inputs` to `address`. All inputs must have the `store` ability (allows public -/// transfer) and must not be previously immutable or shared. +/// Transfers `inputs` to `address`. All inputs must have the `store` ability +/// (allows public transfer) and must not be previously immutable or shared. #[derive(SimpleObject, Clone, Eq, PartialEq)] struct TransferObjectsTransaction { /// The objects to transfer. @@ -105,8 +107,8 @@ struct TransferObjectsTransaction { address: TransactionArgument, } -/// Splits off coins with denominations in `amounts` from `coin`, returning multiple results (as -/// many as there are amounts.) +/// Splits off coins with denominations in `amounts` from `coin`, returning +/// multiple results (as many as there are amounts.) #[derive(SimpleObject, Clone, Eq, PartialEq)] struct SplitCoinsTransaction { /// The coin to split. @@ -129,7 +131,8 @@ struct MergeCoinsTransaction { /// Publishes a Move Package. #[derive(SimpleObject, Clone, Eq, PartialEq)] struct PublishTransaction { - /// Bytecode for the modules to be published, BCS serialized and Base64 encoded. + /// Bytecode for the modules to be published, BCS serialized and Base64 + /// encoded. modules: Vec, /// IDs of the transitive dependencies of the package to be published. @@ -139,7 +142,8 @@ struct PublishTransaction { /// Upgrades a Move Package. #[derive(SimpleObject, Clone, Eq, PartialEq)] struct UpgradeTransaction { - /// Bytecode for the modules to be published, BCS serialized and Base64 encoded. + /// Bytecode for the modules to be published, BCS serialized and Base64 + /// encoded. modules: Vec, /// IDs of the transitive dependencies of the package to be published. @@ -155,7 +159,8 @@ struct UpgradeTransaction { /// Create a vector (possibly empty). #[derive(SimpleObject, Clone, Eq, PartialEq)] struct MakeMoveVecTransaction { - /// If the elements are not objects, or the vector is empty, a type must be supplied. + /// If the elements are not objects, or the vector is empty, a type must be + /// supplied. #[graphql(name = "type")] type_: Option, @@ -171,8 +176,9 @@ pub(crate) enum TransactionArgument { Result(TxResult), } -/// Access to the gas inputs, after they have been smashed into one coin. The gas coin can only be -/// used by reference, except for with `TransferObjectsTransaction` that can accept it by value. +/// Access to the gas inputs, after they have been smashed into one coin. The +/// gas coin can only be used by reference, except for with +/// `TransferObjectsTransaction` that can accept it by value. #[derive(SimpleObject, Clone, Debug, Eq, PartialEq)] pub(crate) struct GasCoin { /// A workaround to define an empty variant of a GraphQL union. @@ -180,7 +186,8 @@ pub(crate) struct GasCoin { dummy: Option, } -/// One of the input objects or primitive values to the programmable transaction block. +/// One of the input objects or primitive values to the programmable transaction +/// block. #[derive(SimpleObject, Clone, Debug, Eq, PartialEq)] pub(crate) struct Input { /// Index of the programmable transaction block input (0-indexed). @@ -194,13 +201,15 @@ pub(crate) struct TxResult { /// The index of the previous command (0-indexed) that returned this result. cmd: u16, - /// If the previous command returns multiple values, this is the index of the individual result - /// among the multiple results from that command (also 0-indexed). + /// If the previous command returns multiple values, this is the index of + /// the individual result among the multiple results from that command + /// (also 0-indexed). ix: Option, } -/// A user transaction that allows the interleaving of native commands (like transfer, split coins, -/// merge coins, etc) and move calls, executed atomically. +/// A user transaction that allows the interleaving of native commands (like +/// transfer, split coins, merge coins, etc) and move calls, executed +/// atomically. #[Object] impl ProgrammableTransactionBlock { /// Input objects or primitive values. diff --git a/crates/sui-graphql-rpc/src/types/transaction_block_kind/randomness_state_update.rs b/crates/sui-graphql-rpc/src/types/transaction_block_kind/randomness_state_update.rs index 59738577a5f..a4436552619 100644 --- a/crates/sui-graphql-rpc/src/types/transaction_block_kind/randomness_state_update.rs +++ b/crates/sui-graphql-rpc/src/types/transaction_block_kind/randomness_state_update.rs @@ -1,10 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::types::{base64::Base64, epoch::Epoch}; use async_graphql::*; use sui_types::transaction::RandomnessStateUpdate as NativeRandomnessStateUpdate; +use crate::types::{base64::Base64, epoch::Epoch}; + #[derive(Clone, Eq, PartialEq)] pub(crate) struct RandomnessStateUpdateTransaction { pub native: NativeRandomnessStateUpdate, diff --git a/crates/sui-graphql-rpc/src/types/transaction_metadata.rs b/crates/sui-graphql-rpc/src/types/transaction_metadata.rs index 8dae3e42bca..1ecc63356a2 100644 --- a/crates/sui-graphql-rpc/src/types/transaction_metadata.rs +++ b/crates/sui-graphql-rpc/src/types/transaction_metadata.rs @@ -1,15 +1,15 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::object::ObjectRef; -use super::sui_address::SuiAddress; use async_graphql::*; +use super::{object::ObjectRef, sui_address::SuiAddress}; + /// The optional extra data a user can provide to a transaction dry run. -/// `sender` defaults to `0x0`. If gasObjects` is not present, or is an empty list, -/// it is substituted with a mock Coin object, `gasPrice` defaults to the reference -/// gas price, `gasBudget` defaults to the max gas budget and `gasSponsor` defaults -/// to the sender. +/// `sender` defaults to `0x0`. If gasObjects` is not present, or is an empty +/// list, it is substituted with a mock Coin object, `gasPrice` defaults to the +/// reference gas price, `gasBudget` defaults to the max gas budget and +/// `gasSponsor` defaults to the sender. #[derive(Clone, Debug, PartialEq, Eq, InputObject)] pub(crate) struct TransactionMetadata { pub sender: Option, diff --git a/crates/sui-graphql-rpc/src/types/type_filter.rs b/crates/sui-graphql-rpc/src/types/type_filter.rs index 2bec5eb3fee..52de71cad2d 100644 --- a/crates/sui-graphql-rpc/src/types/type_filter.rs +++ b/crates/sui-graphql-rpc/src/types/type_filter.rs @@ -1,12 +1,8 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::{string_input::impl_string_input, sui_address::SuiAddress}; -use crate::raw_query::RawQuery; -use crate::{ - data::{DieselBackend, Query}, - filter, -}; +use std::{fmt, result::Result, str::FromStr}; + use async_graphql::*; use diesel::{ expression::{is_aggregate::No, ValidGrouping}, @@ -16,11 +12,17 @@ use diesel::{ TextExpressionMethods, }; use move_core_types::language_storage::StructTag; -use std::{fmt, result::Result, str::FromStr}; use sui_types::{ parse_sui_address, parse_sui_fq_name, parse_sui_module_id, parse_sui_type_tag, TypeTag, }; +use super::{string_input::impl_string_input, sui_address::SuiAddress}; +use crate::{ + data::{DieselBackend, Query}, + filter, + raw_query::RawQuery, +}; + /// A GraphQL scalar containing a filter on types that requires an exact match. #[derive(Clone, Debug, Eq, PartialEq)] pub(crate) struct ExactTypeFilter(pub TypeTag); @@ -31,9 +33,10 @@ pub(crate) enum TypeFilter { /// Filter the type by the package or module it's from. ByModule(ModuleFilter), - /// If the type tag has type parameters, treat it as an exact filter on that instantiation, - /// otherwise treat it as either a filter on all generic instantiations of the type, or an exact - /// match on the type with no type parameters. E.g. + /// If the type tag has type parameters, treat it as an exact filter on that + /// instantiation, otherwise treat it as either a filter on all generic + /// instantiations of the type, or an exact match on the type with no + /// type parameters. E.g. /// /// 0x2::coin::Coin /// @@ -145,9 +148,10 @@ impl TypeFilter { query } - /// Try to create a filter whose results are the intersection of the results of the input - /// filters (`self` and `other`). This may not be possible if the resulting filter is - /// inconsistent (e.g. a filter that requires the module member's package to be at two different + /// Try to create a filter whose results are the intersection of the results + /// of the input filters (`self` and `other`). This may not be possible + /// if the resulting filter is inconsistent (e.g. a filter that requires + /// the module member's package to be at two different /// addresses simultaneously), in which case `None` is returned. pub(crate) fn intersect(self, other: Self) -> Option { use ModuleFilter as M; @@ -198,9 +202,10 @@ impl TypeFilter { } impl FqNameFilter { - /// Modify `query` to apply this filter, treating `package` as the column containing the package - /// address, `module` as the module containing the module name, and `name` as the column - /// containing the module member name. + /// Modify `query` to apply this filter, treating `package` as the column + /// containing the package address, `module` as the module containing + /// the module name, and `name` as the column containing the module + /// member name. pub(crate) fn apply( &self, query: Query, @@ -230,9 +235,10 @@ impl FqNameFilter { } } - /// Try to create a filter whose results are the intersection of the results of the input - /// filters (`self` and `other`). This may not be possible if the resulting filter is - /// inconsistent (e.g. a filter that requires the module member's package to be at two different + /// Try to create a filter whose results are the intersection of the results + /// of the input filters (`self` and `other`). This may not be possible + /// if the resulting filter is inconsistent (e.g. a filter that requires + /// the module member's package to be at two different /// addresses simultaneously), in which case `None` is returned. pub(crate) fn intersect(self, other: Self) -> Option { use FqNameFilter as F; @@ -257,8 +263,9 @@ impl FqNameFilter { } impl ModuleFilter { - /// Modify `query` to apply this filter, treating `package` as the column containing the package - /// address and `module` as the module containing the module name. + /// Modify `query` to apply this filter, treating `package` as the column + /// containing the package address and `module` as the module containing + /// the module name. pub(crate) fn apply( &self, query: Query, @@ -283,9 +290,10 @@ impl ModuleFilter { } } - /// Try to create a filter whose results are the intersection of the results of the input - /// filters (`self` and `other`). This may not be possible if the resulting filter is - /// inconsistent (e.g. a filter that requires the module's package to be at two different + /// Try to create a filter whose results are the intersection of the results + /// of the input filters (`self` and `other`). This may not be possible + /// if the resulting filter is inconsistent (e.g. a filter that requires + /// the module's package to be at two different /// addresses simultaneously), in which case `None` is returned. pub(crate) fn intersect(self, other: Self) -> Option { match (&self, &other) { @@ -414,9 +422,10 @@ impl From for TypeFilter { #[cfg(test)] mod tests { - use super::*; use expect_test::expect; + use super::*; + #[test] fn test_valid_exact_type_filters() { let inputs = [ diff --git a/crates/sui-graphql-rpc/src/types/unchanged_shared_object.rs b/crates/sui-graphql-rpc/src/types/unchanged_shared_object.rs index cd0f77805d0..c9a325a9993 100644 --- a/crates/sui-graphql-rpc/src/types/unchanged_shared_object.rs +++ b/crates/sui-graphql-rpc/src/types/unchanged_shared_object.rs @@ -6,10 +6,10 @@ use sui_types::effects::InputSharedObject as NativeInputSharedObject; use super::{object_read::ObjectRead, sui_address::SuiAddress}; -/// Details pertaining to shared objects that are referenced by but not changed by a transaction. -/// This information is considered part of the effects, because although the transaction specifies -/// the shared object as input, consensus must schedule it and pick the version that is actually -/// used. +/// Details pertaining to shared objects that are referenced by but not changed +/// by a transaction. This information is considered part of the effects, +/// because although the transaction specifies the shared object as input, +/// consensus must schedule it and pick the version that is actually used. #[derive(Union)] pub(crate) enum UnchangedSharedObject { Read(SharedObjectRead), @@ -23,19 +23,19 @@ pub(crate) struct SharedObjectRead { read: ObjectRead, } -/// The transaction accepted a shared object as input, but it was deleted before the transaction -/// executed. +/// The transaction accepted a shared object as input, but it was deleted before +/// the transaction executed. #[derive(SimpleObject)] pub(crate) struct SharedObjectDelete { /// ID of the shared object. address: SuiAddress, - /// The version of the shared object that was assigned to this transaction during by consensus, - /// during sequencing. + /// The version of the shared object that was assigned to this transaction + /// during by consensus, during sequencing. version: u64, - /// Whether this transaction intended to use this shared object mutably or not. See - /// `SharedInput.mutable` for further details. + /// Whether this transaction intended to use this shared object mutably or + /// not. See `SharedInput.mutable` for further details. mutable: bool, } diff --git a/crates/sui-graphql-rpc/src/types/validator.rs b/crates/sui-graphql-rpc/src/types/validator.rs index 1c433ce3c42..6b6df91cb63 100644 --- a/crates/sui-graphql-rpc/src/types/validator.rs +++ b/crates/sui-graphql-rpc/src/types/validator.rs @@ -1,20 +1,22 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::consistency::ConsistentIndexCursor; -use crate::context_data::db_data_provider::PgManager; -use crate::types::cursor::{JsonCursor, Page}; -use async_graphql::connection::{Connection, CursorType, Edge}; - -use super::big_int::BigInt; -use super::move_object::MoveObject; -use super::object::ObjectLookupKey; -use super::sui_address::SuiAddress; -use super::validator_credentials::ValidatorCredentials; -use super::{address::Address, base64::Base64}; -use async_graphql::*; +use async_graphql::{ + connection::{Connection, CursorType, Edge}, + *, +}; use sui_types::sui_system_state::sui_system_state_summary::SuiValidatorSummary as NativeSuiValidatorSummary; +use super::{ + address::Address, base64::Base64, big_int::BigInt, move_object::MoveObject, + object::ObjectLookupKey, sui_address::SuiAddress, validator_credentials::ValidatorCredentials, +}; +use crate::{ + consistency::ConsistentIndexCursor, + context_data::db_data_provider::PgManager, + types::cursor::{JsonCursor, Page}, +}; + #[derive(Clone, Debug)] pub(crate) struct Validator { pub validator_summary: NativeSuiValidatorSummary, @@ -36,7 +38,8 @@ impl Validator { } } - /// Validator's set of credentials such as public keys, network addresses and others. + /// Validator's set of credentials such as public keys, network addresses + /// and others. async fn credentials(&self) -> Option { let v = &self.validator_summary; let credentials = ValidatorCredentials { @@ -92,8 +95,9 @@ impl Validator { } /// The validator's current valid `Cap` object. Validators can delegate - /// the operation ability to another address. The address holding this `Cap` object - /// can then update the reference gas price and tallying rule on behalf of the validator. + /// the operation ability to another address. The address holding this `Cap` + /// object can then update the reference gas price and tallying rule on + /// behalf of the validator. async fn operation_cap(&self, ctx: &Context<'_>) -> Result> { MoveObject::query( ctx.data_unchecked(), @@ -104,8 +108,8 @@ impl Validator { .extend() } - /// The validator's current staking pool object, used to track the amount of stake - /// and to compound staking rewards. + /// The validator's current staking pool object, used to track the amount of + /// stake and to compound staking rewards. async fn staking_pool(&self, ctx: &Context<'_>) -> Result> { MoveObject::query( ctx.data_unchecked(), @@ -116,8 +120,9 @@ impl Validator { .extend() } - /// The validator's current exchange object. The exchange rate is used to determine - /// the amount of SUI tokens that each past SUI staker can withdraw in the future. + /// The validator's current exchange object. The exchange rate is used to + /// determine the amount of SUI tokens that each past SUI staker can + /// withdraw in the future. async fn exchange_rates(&self, ctx: &Context<'_>) -> Result> { MoveObject::query( ctx.data_unchecked(), @@ -160,21 +165,24 @@ impl Validator { Some(BigInt::from(self.validator_summary.pending_stake)) } - /// Pending stake withdrawn during the current epoch, emptied at epoch boundaries. + /// Pending stake withdrawn during the current epoch, emptied at epoch + /// boundaries. async fn pending_total_sui_withdraw(&self) -> Option { Some(BigInt::from( self.validator_summary.pending_total_sui_withdraw, )) } - /// Pending pool token withdrawn during the current epoch, emptied at epoch boundaries. + /// Pending pool token withdrawn during the current epoch, emptied at epoch + /// boundaries. async fn pending_pool_token_withdraw(&self) -> Option { Some(BigInt::from( self.validator_summary.pending_pool_token_withdraw, )) } - /// The voting power of this validator in basis points (e.g., 100 = 1% voting power). + /// The voting power of this validator in basis points (e.g., 100 = 1% + /// voting power). async fn voting_power(&self) -> Option { Some(self.validator_summary.voting_power) } diff --git a/crates/sui-graphql-rpc/src/types/validator_credentials.rs b/crates/sui-graphql-rpc/src/types/validator_credentials.rs index 1172ea9302d..04a5c6ef2ad 100644 --- a/crates/sui-graphql-rpc/src/types/validator_credentials.rs +++ b/crates/sui-graphql-rpc/src/types/validator_credentials.rs @@ -1,9 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::base64::Base64; use async_graphql::*; +use super::base64::Base64; + /// The credentials related fields associated with a validator. #[derive(Clone, Debug, PartialEq, Eq, SimpleObject)] pub(crate) struct ValidatorCredentials { diff --git a/crates/sui-graphql-rpc/src/types/validator_set.rs b/crates/sui-graphql-rpc/src/types/validator_set.rs index 16267953fd0..1b78385ec92 100644 --- a/crates/sui-graphql-rpc/src/types/validator_set.rs +++ b/crates/sui-graphql-rpc/src/types/validator_set.rs @@ -1,40 +1,49 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::consistency::ConsistentIndexCursor; -use crate::types::cursor::{JsonCursor, Page}; -use crate::types::sui_address::SuiAddress; -use async_graphql::connection::{Connection, CursorType, Edge}; - -use super::big_int::BigInt; -use super::validator::Validator; -use async_graphql::*; +use async_graphql::{ + connection::{Connection, CursorType, Edge}, + *, +}; + +use super::{big_int::BigInt, validator::Validator}; +use crate::{ + consistency::ConsistentIndexCursor, + types::{ + cursor::{JsonCursor, Page}, + sui_address::SuiAddress, + }, +}; /// Representation of `0x3::validator_set::ValidatorSet`. #[derive(Clone, Debug, SimpleObject, Default)] #[graphql(complex)] pub(crate) struct ValidatorSet { - /// Total amount of stake for all active validators at the beginning of the epoch. + /// Total amount of stake for all active validators at the beginning of the + /// epoch. pub total_stake: Option, #[graphql(skip)] /// The current list of active validators. pub active_validators: Option>, - /// Validators that are pending removal from the active validator set, expressed as indices in - /// to `activeValidators`. + /// Validators that are pending removal from the active validator set, + /// expressed as indices in to `activeValidators`. pub pending_removals: Option>, - // TODO: instead of returning the id and size of the table, potentially return the table itself, paginated. - /// Object ID of the wrapped object `TableVec` storing the pending active validators. + // TODO: instead of returning the id and size of the table, potentially return the table + // itself, paginated. + /// Object ID of the wrapped object `TableVec` storing the pending active + /// validators. pub pending_active_validators_id: Option, /// Size of the pending active validators table. pub pending_active_validators_size: Option, - /// Object ID of the `Table` storing the mapping from staking pool ids to the addresses - /// of the corresponding validators. This is needed because a validator's address - /// can potentially change but the object ID of its pool will not. + /// Object ID of the `Table` storing the mapping from staking pool ids to + /// the addresses of the corresponding validators. This is needed + /// because a validator's address can potentially change but the object + /// ID of its pool will not. pub staking_pool_mappings_id: Option, /// Size of the stake pool mappings `Table`. diff --git a/crates/sui-graphql-rpc/src/types/zklogin_verify_signature.rs b/crates/sui-graphql-rpc/src/types/zklogin_verify_signature.rs index 305847405a0..1704a5e40a9 100644 --- a/crates/sui-graphql-rpc/src/types/zklogin_verify_signature.rs +++ b/crates/sui-graphql-rpc/src/types/zklogin_verify_signature.rs @@ -1,29 +1,35 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::config::ZkLoginConfig; -use crate::error::Error; -use crate::types::base64::Base64; -use crate::types::dynamic_field::{DynamicField, DynamicFieldName}; -use crate::types::epoch::Epoch; -use crate::types::sui_address::SuiAddress; -use crate::types::type_filter::ExactTypeFilter; use async_graphql::*; use im::hashmap::HashMap as ImHashMap; use shared_crypto::intent::{ AppId, Intent, IntentMessage, IntentScope, IntentVersion, PersonalMessage, }; -use sui_types::authenticator_state::{ActiveJwk, AuthenticatorStateInner}; -use sui_types::crypto::ToFromBytes; -use sui_types::dynamic_field::{DynamicFieldType, Field}; -use sui_types::signature::GenericSignature; -use sui_types::signature::{AuthenticatorTrait, VerifyParams}; -use sui_types::transaction::TransactionData; -use sui_types::{TypeTag, SUI_AUTHENTICATOR_STATE_ADDRESS}; +use sui_types::{ + authenticator_state::{ActiveJwk, AuthenticatorStateInner}, + crypto::ToFromBytes, + dynamic_field::{DynamicFieldType, Field}, + signature::{AuthenticatorTrait, GenericSignature, VerifyParams}, + transaction::TransactionData, + TypeTag, SUI_AUTHENTICATOR_STATE_ADDRESS, +}; use tracing::warn; -/// An enum that specifies the intent scope to be used to parse the bytes for signature -/// verification. +use crate::{ + config::ZkLoginConfig, + error::Error, + types::{ + base64::Base64, + dynamic_field::{DynamicField, DynamicFieldName}, + epoch::Epoch, + sui_address::SuiAddress, + type_filter::ExactTypeFilter, + }, +}; + +/// An enum that specifies the intent scope to be used to parse the bytes for +/// signature verification. #[derive(Enum, Copy, Clone, Eq, PartialEq)] pub(crate) enum ZkLoginIntentScope { /// Indicates that the bytes are to be parsed as transaction data bytes. @@ -41,8 +47,9 @@ pub(crate) struct ZkLoginVerifyResult { pub errors: Vec, } -/// Verifies a zkLogin signature based on the bytes (parsed as either TransactionData or -/// PersonalMessage based on the intent scope) and its author. +/// Verifies a zkLogin signature based on the bytes (parsed as either +/// TransactionData or PersonalMessage based on the intent scope) and its +/// author. pub(crate) async fn verify_zklogin_signature( ctx: &Context<'_>, bytes: Base64, diff --git a/crates/sui-graphql-rpc/tests/e2e_tests.rs b/crates/sui-graphql-rpc/tests/e2e_tests.rs index ff4dcf5c63d..ca945c5a3bc 100644 --- a/crates/sui-graphql-rpc/tests/e2e_tests.rs +++ b/crates/sui-graphql-rpc/tests/e2e_tests.rs @@ -3,26 +3,24 @@ #[cfg(feature = "pg_integration")] mod tests { + use std::{sync::Arc, time::Duration}; + use fastcrypto::encoding::{Base64, Encoding}; - use rand::rngs::StdRng; - use rand::SeedableRng; + use rand::{rngs::StdRng, SeedableRng}; use serde_json::json; use serial_test::serial; use simulacrum::Simulacrum; - use std::sync::Arc; - use std::time::Duration; - use sui_graphql_rpc::client::simple_client::GraphqlQueryVariable; - use sui_graphql_rpc::client::ClientError; - use sui_graphql_rpc::config::ConnectionConfig; - use sui_graphql_rpc::test_infra::cluster::DEFAULT_INTERNAL_DATA_SOURCE_PORT; - use sui_types::digests::ChainIdentifier; - use sui_types::gas_coin::GAS; - use sui_types::transaction::CallArg; - use sui_types::transaction::ObjectArg; - use sui_types::transaction::TransactionDataAPI; - use sui_types::DEEPBOOK_ADDRESS; - use sui_types::SUI_FRAMEWORK_ADDRESS; - use sui_types::SUI_FRAMEWORK_PACKAGE_ID; + use sui_graphql_rpc::{ + client::{simple_client::GraphqlQueryVariable, ClientError}, + config::ConnectionConfig, + test_infra::cluster::DEFAULT_INTERNAL_DATA_SOURCE_PORT, + }; + use sui_types::{ + digests::ChainIdentifier, + gas_coin::GAS, + transaction::{CallArg, ObjectArg, TransactionDataAPI}, + DEEPBOOK_ADDRESS, SUI_FRAMEWORK_ADDRESS, SUI_FRAMEWORK_PACKAGE_ID, + }; use tokio::time::sleep; #[tokio::test] @@ -427,12 +425,14 @@ mod tests { let cluster = sui_graphql_rpc::test_infra::cluster::start_cluster(connection_config, None).await; - // wait for epoch to be indexed, so that current epoch and JWK are populated in db. + // wait for epoch to be indexed, so that current epoch and JWK are populated in + // db. let test_cluster = cluster.validator_fullnode_handle; test_cluster.wait_for_epoch(Some(1)).await; test_cluster.wait_for_authenticator_state_update().await; - // now query the endpoint with a valid tx data bytes and a valid signature with the correct proof for dev env. + // now query the endpoint with a valid tx data bytes and a valid signature with + // the correct proof for dev env. let bytes = "AAABACACAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgEBAQABAAAcpgUkGBwS5nPO79YXkjMyvaRjGS57hqxzfyd2yGtejwGbB4FfBEl+LgXSLKw6oGFBCyCGjMYZFUxCocYb6ZAnFwEAAAAAAAAAIJZw7UpW1XHubORIOaY8d2+WyBNwoJ+FEAxlsa7h7JHrHKYFJBgcEuZzzu/WF5IzMr2kYxkue4asc38ndshrXo8BAAAAAAAAABAnAAAAAAAAAA=="; let signature = "BQNNMTczMTgwODkxMjU5NTI0MjE3MzYzNDIyNjM3MTc5MzI3MTk0Mzc3MTc4NDQyODI0MTAxODc5NTc5ODQ3NTE5Mzk5NDI4OTgyNTEyNTBNMTEzNzM5NjY2NDU0NjkxMjI1ODIwNzQwODIyOTU5ODUzODgyNTg4NDA2ODE2MTgyNjg1OTM5NzY2OTczMjU4OTIyODA5MTU2ODEyMDcBMQMCTDU5Mzk4NzExNDczNDg4MzQ5OTczNjE3MjAxMjIyMzg5ODAxNzcxNTIzMDMyNzQzMTEwNDcyNDk5MDU5NDIzODQ5MTU3Njg2OTA4OTVMNDUzMzU2ODI3MTEzNDc4NTI3ODczMTIzNDU3MDM2MTQ4MjY1MTk5Njc0MDc5MTg4ODI4NTg2NDk2Njg4NDAzMjcxNzA0OTgxMTcwOAJNMTA1NjQzODcyODUwNzE1NTU0Njk3NTM5OTA2NjE0MTA4NDAxMTg2MzU5MjU0NjY1OTcwMzcwMTgwNTg3NzAwNDEzNDc1MTg0NjEzNjhNMTI1OTczMjM1NDcyNzc1NzkxNDQ2OTg0OTYzNzIyNDI2MTUzNjgwODU4MDEzMTMzNDMxNTU3MzU1MTEzMzAwMDM4ODQ3Njc5NTc4NTQCATEBMANNMTU3OTE1ODk0NzI1NTY4MjYyNjMyMzE2NDQ3Mjg4NzMzMzc2MjkwMTUyNjk5ODQ2OTk0MDQwNzM2MjM2MDMzNTI1Mzc2Nzg4MTMxNzFMNDU0Nzg2NjQ5OTI0ODg4MTQ0OTY3NjE2MTE1ODAyNDc0ODA2MDQ4NTM3MzI1MDAyOTQyMzkwNDExMzAxNzQyMjUzOTAzNzE2MjUyNwExMXdpYVhOeklqb2lhSFIwY0hNNkx5OXBaQzUwZDJsMFkyZ3VkSFl2YjJGMWRHZ3lJaXcCMmV5SmhiR2NpT2lKU1V6STFOaUlzSW5SNWNDSTZJa3BYVkNJc0ltdHBaQ0k2SWpFaWZRTTIwNzk0Nzg4NTU5NjIwNjY5NTk2MjA2NDU3MDIyOTY2MTc2OTg2Njg4NzI3ODc2MTI4MjIzNjI4MTEzOTE2MzgwOTI3NTAyNzM3OTExCgAAAAAAAABhAG6Bf8BLuaIEgvF8Lx2jVoRWKKRIlaLlEJxgvqwq5nDX+rvzJxYAUFd7KeQBd9upNx+CHpmINkfgj26jcHbbqAy5xu4WMO8+cRFEpkjbBruyKE9ydM++5T/87lA8waSSAA=="; let intent_scope = "TRANSACTION_DATA"; @@ -506,7 +506,8 @@ mod tests { assert_eq!(res.get("success").unwrap(), false); } - // TODO: add more test cases for transaction execution/dry run in transactional test runner. + // TODO: add more test cases for transaction execution/dry run in transactional + // test runner. #[tokio::test] #[serial] async fn test_transaction_dry_run() { @@ -600,7 +601,8 @@ mod tests { assert!(res.get("results").unwrap().is_array()); } - // Test dry run where the transaction kind is provided instead of the full transaction. + // Test dry run where the transaction kind is provided instead of the full + // transaction. #[tokio::test] #[serial] async fn test_transaction_dry_run_with_kind() { @@ -665,8 +667,8 @@ mod tests { assert!(digest.is_null()); assert!(res.get("error").unwrap().is_null()); let sender_read = res.get("transaction").unwrap().get("sender").unwrap(); - // Since no transaction metadata is provided, we use 0x0 as the sender while dry running the trasanction - // in which case the sender is null. + // Since no transaction metadata is provided, we use 0x0 as the sender while dry + // running the trasanction in which case the sender is null. assert!(sender_read.is_null()); assert!(res.get("results").unwrap().is_array()); } @@ -752,12 +754,13 @@ mod tests { // Execution failed so the results are null. assert!(res.get("results").unwrap().is_null()); // Check that the error is not null and contains the error message. - assert!(res - .get("error") - .unwrap() - .as_str() - .unwrap() - .contains("UnusedValueWithoutDrop")); + assert!( + res.get("error") + .unwrap() + .as_str() + .unwrap() + .contains("UnusedValueWithoutDrop") + ); } #[tokio::test] @@ -798,12 +801,14 @@ mod tests { let binding = res.response_body().data.clone().into_json().unwrap(); // Check that liveObjectSetDigest is not null - assert!(!binding - .get("epoch") - .unwrap() - .get("liveObjectSetDigest") - .unwrap() - .is_null()); + assert!( + !binding + .get("epoch") + .unwrap() + .get("liveObjectSetDigest") + .unwrap() + .is_null() + ); } use sui_graphql_rpc::server::builder::tests::*; diff --git a/crates/sui-graphql-rpc/tests/examples_validation_tests.rs b/crates/sui-graphql-rpc/tests/examples_validation_tests.rs index 8ef26ec9ff8..35f22041289 100644 --- a/crates/sui-graphql-rpc/tests/examples_validation_tests.rs +++ b/crates/sui-graphql-rpc/tests/examples_validation_tests.rs @@ -3,17 +3,16 @@ #[cfg(feature = "pg_integration")] mod tests { - use rand::rngs::StdRng; - use rand::SeedableRng; + use std::{cmp::max, path::PathBuf, sync::Arc}; + + use rand::{rngs::StdRng, SeedableRng}; use serial_test::serial; use simulacrum::Simulacrum; - use std::cmp::max; - use std::path::PathBuf; - use std::sync::Arc; - use sui_graphql_rpc::config::{ConnectionConfig, Limits}; - use sui_graphql_rpc::examples::{load_examples, ExampleQuery, ExampleQueryGroup}; - use sui_graphql_rpc::test_infra::cluster::ExecutorCluster; - use sui_graphql_rpc::test_infra::cluster::DEFAULT_INTERNAL_DATA_SOURCE_PORT; + use sui_graphql_rpc::{ + config::{ConnectionConfig, Limits}, + examples::{load_examples, ExampleQuery, ExampleQueryGroup}, + test_infra::cluster::{ExecutorCluster, DEFAULT_INTERNAL_DATA_SOURCE_PORT}, + }; fn bad_examples() -> ExampleQueryGroup { ExampleQueryGroup { diff --git a/crates/sui-graphql-rpc/tests/snapshot_tests.rs b/crates/sui-graphql-rpc/tests/snapshot_tests.rs index 30a66934b5d..c1c2abc1332 100644 --- a/crates/sui-graphql-rpc/tests/snapshot_tests.rs +++ b/crates/sui-graphql-rpc/tests/snapshot_tests.rs @@ -1,9 +1,9 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{fs::write, path::PathBuf}; + use insta::assert_snapshot; -use std::fs::write; -use std::path::PathBuf; use sui_graphql_rpc::server::builder::export_schema; #[test] diff --git a/crates/sui-indexer/src/apis/coin_api.rs b/crates/sui-indexer/src/apis/coin_api.rs index 1ede49a9701..e5204eb89c7 100644 --- a/crates/sui-indexer/src/apis/coin_api.rs +++ b/crates/sui-indexer/src/apis/coin_api.rs @@ -1,18 +1,22 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::indexer_reader::IndexerReader; use async_trait::async_trait; -use jsonrpsee::core::RpcResult; -use jsonrpsee::RpcModule; -use sui_json_rpc::coin_api::{parse_to_struct_tag, parse_to_type_tag}; -use sui_json_rpc::SuiRpcModule; +use jsonrpsee::{core::RpcResult, RpcModule}; +use sui_json_rpc::{ + coin_api::{parse_to_struct_tag, parse_to_type_tag}, + SuiRpcModule, +}; use sui_json_rpc_api::{cap_page_limit, CoinReadApiServer}; use sui_json_rpc_types::{Balance, CoinPage, Page, SuiCoinMetadata}; use sui_open_rpc::Module; -use sui_types::balance::Supply; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::gas_coin::{GAS, TOTAL_SUPPLY_MIST}; +use sui_types::{ + balance::Supply, + base_types::{ObjectID, SuiAddress}, + gas_coin::{GAS, TOTAL_SUPPLY_MIST}, +}; + +use crate::indexer_reader::IndexerReader; pub(crate) struct CoinReadApi { inner: IndexerReader, @@ -44,7 +48,8 @@ impl CoinReadApiServer for CoinReadApi { let cursor = match cursor { Some(c) => c, - // If cursor is not specified, we need to start from the beginning of the coin type, which is the minimal possible ObjectID. + // If cursor is not specified, we need to start from the beginning of the coin type, + // which is the minimal possible ObjectID. None => ObjectID::ZERO, }; let mut results = self @@ -75,7 +80,8 @@ impl CoinReadApiServer for CoinReadApi { let cursor = match cursor { Some(c) => c, - // If cursor is not specified, we need to start from the beginning of the coin type, which is the minimal possible ObjectID. + // If cursor is not specified, we need to start from the beginning of the coin type, + // which is the minimal possible ObjectID. None => ObjectID::ZERO, }; let mut results = self diff --git a/crates/sui-indexer/src/apis/extended_api.rs b/crates/sui-indexer/src/apis/extended_api.rs index 79ece25e74f..f35fa95fef0 100644 --- a/crates/sui-indexer/src/apis/extended_api.rs +++ b/crates/sui-indexer/src/apis/extended_api.rs @@ -1,7 +1,6 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::indexer_reader::IndexerReader; use jsonrpsee::{core::RpcResult, RpcModule}; use sui_json_rpc::SuiRpcModule; use sui_json_rpc_api::{validate_limit, ExtendedApiServer, QUERY_MAX_RESULT_LIMIT_CHECKPOINTS}; @@ -11,6 +10,8 @@ use sui_json_rpc_types::{ use sui_open_rpc::Module; use sui_types::sui_serde::BigInt; +use crate::indexer_reader::IndexerReader; + pub(crate) struct ExtendedApi { inner: IndexerReader, } diff --git a/crates/sui-indexer/src/apis/governance_api.rs b/crates/sui-indexer/src/apis/governance_api.rs index f1576cc9315..ace8fb5d973 100644 --- a/crates/sui-indexer/src/apis/governance_api.rs +++ b/crates/sui-indexer/src/apis/governance_api.rs @@ -3,11 +3,9 @@ use std::collections::BTreeMap; -use crate::{errors::IndexerError, indexer_reader::IndexerReader}; use async_trait::async_trait; -use jsonrpsee::{core::RpcResult, RpcModule}; - use cached::{proc_macro::cached, SizedCache}; +use jsonrpsee::{core::RpcResult, RpcModule}; use sui_json_rpc::{governance_api::ValidatorExchangeRates, SuiRpcModule}; use sui_json_rpc_api::GovernanceReadApiServer; use sui_json_rpc_types::{ @@ -24,6 +22,8 @@ use sui_types::{ timelock::timelocked_staked_sui::TimelockedStakedSui, }; +use crate::{errors::IndexerError, indexer_reader::IndexerReader}; + /// Maximum amount of staked objects for querying. const MAX_QUERY_STAKED_OBJECTS: usize = 1000; @@ -290,8 +290,9 @@ fn stake_status( } } -/// Cached exchange rates for validators for the given epoch, the cache size is 1, it will be cleared when the epoch changes. -/// rates are in descending order by epoch. +/// Cached exchange rates for validators for the given epoch, the cache size is +/// 1, it will be cleared when the epoch changes. rates are in descending order +/// by epoch. #[cached( type = "SizedCache>", create = "{ SizedCache::with_size(1) }", diff --git a/crates/sui-indexer/src/apis/indexer_api.rs b/crates/sui-indexer/src/apis/indexer_api.rs index 03843d3bd21..21ef0ff379a 100644 --- a/crates/sui-indexer/src/apis/indexer_api.rs +++ b/crates/sui-indexer/src/apis/indexer_api.rs @@ -1,15 +1,16 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::indexer_reader::IndexerReader; -use crate::IndexerError; use async_trait::async_trait; -use jsonrpsee::core::RpcResult; -use jsonrpsee::types::SubscriptionEmptyError; -use jsonrpsee::types::SubscriptionResult; -use jsonrpsee::{RpcModule, SubscriptionSink}; -use sui_json_rpc::name_service::{Domain, NameRecord, NameServiceConfig}; -use sui_json_rpc::SuiRpcModule; +use jsonrpsee::{ + core::RpcResult, + types::{SubscriptionEmptyError, SubscriptionResult}, + RpcModule, SubscriptionSink, +}; +use sui_json_rpc::{ + name_service::{Domain, NameRecord, NameServiceConfig}, + SuiRpcModule, +}; use sui_json_rpc_api::{cap_page_limit, IndexerApiServer}; use sui_json_rpc_types::{ DynamicFieldPage, EventFilter, EventPage, ObjectsPage, Page, SuiObjectResponse, @@ -17,13 +18,17 @@ use sui_json_rpc_types::{ TransactionFilter, }; use sui_open_rpc::Module; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::digests::TransactionDigest; -use sui_types::dynamic_field::{DynamicFieldName, Field}; -use sui_types::error::SuiObjectResponseError; -use sui_types::event::EventID; -use sui_types::object::ObjectRead; -use sui_types::TypeTag; +use sui_types::{ + base_types::{ObjectID, SuiAddress}, + digests::TransactionDigest, + dynamic_field::{DynamicFieldName, Field}, + error::SuiObjectResponseError, + event::EventID, + object::ObjectRead, + TypeTag, +}; + +use crate::{indexer_reader::IndexerReader, IndexerError}; pub(crate) struct IndexerApi { inner: IndexerReader, @@ -333,7 +338,7 @@ impl IndexerApiServer for IndexerApi { data: vec![], next_cursor: None, has_next_page: false, - }) + }); } }; diff --git a/crates/sui-indexer/src/apis/move_utils.rs b/crates/sui-indexer/src/apis/move_utils.rs index 45341b42d2e..93f0c848e82 100644 --- a/crates/sui-indexer/src/apis/move_utils.rs +++ b/crates/sui-indexer/src/apis/move_utils.rs @@ -4,22 +4,16 @@ use std::collections::BTreeMap; use async_trait::async_trait; -use jsonrpsee::core::RpcResult; -use jsonrpsee::RpcModule; +use jsonrpsee::{core::RpcResult, RpcModule}; use move_binary_format::binary_config::BinaryConfig; - -use sui_json_rpc::error::SuiRpcInputError; -use sui_json_rpc::SuiRpcModule; +use sui_json_rpc::{error::SuiRpcInputError, SuiRpcModule}; use sui_json_rpc_api::MoveUtilsServer; -use sui_json_rpc_types::ObjectValueKind; -use sui_json_rpc_types::SuiMoveNormalizedType; use sui_json_rpc_types::{ - MoveFunctionArgType, SuiMoveNormalizedFunction, SuiMoveNormalizedModule, - SuiMoveNormalizedStruct, + MoveFunctionArgType, ObjectValueKind, SuiMoveNormalizedFunction, SuiMoveNormalizedModule, + SuiMoveNormalizedStruct, SuiMoveNormalizedType, }; use sui_open_rpc::Module; -use sui_types::base_types::ObjectID; -use sui_types::move_package::normalize_modules; +use sui_types::{base_types::ObjectID, move_package::normalize_modules}; use crate::indexer_reader::IndexerReader; diff --git a/crates/sui-indexer/src/apis/read_api.rs b/crates/sui-indexer/src/apis/read_api.rs index f5b445c3bbf..b2b5478a955 100644 --- a/crates/sui-indexer/src/apis/read_api.rs +++ b/crates/sui-indexer/src/apis/read_api.rs @@ -2,28 +2,26 @@ // SPDX-License-Identifier: Apache-2.0 use async_trait::async_trait; -use jsonrpsee::core::RpcResult; -use jsonrpsee::RpcModule; -use sui_json_rpc::error::SuiRpcInputError; -use sui_types::error::SuiObjectResponseError; -use sui_types::object::ObjectRead; - -use crate::errors::IndexerError; -use crate::indexer_reader::IndexerReader; -use sui_json_rpc::SuiRpcModule; +use jsonrpsee::{core::RpcResult, RpcModule}; +use sui_json_rpc::{error::SuiRpcInputError, SuiRpcModule}; use sui_json_rpc_api::{ReadApiServer, QUERY_MAX_RESULT_LIMIT}; use sui_json_rpc_types::{ Checkpoint, CheckpointId, CheckpointPage, ProtocolConfigResponse, SuiEvent, - SuiGetPastObjectRequest, SuiObjectDataOptions, SuiObjectResponse, SuiPastObjectResponse, - SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, + SuiGetPastObjectRequest, SuiLoadedChildObjectsResponse, SuiObjectDataOptions, + SuiObjectResponse, SuiPastObjectResponse, SuiTransactionBlockResponse, + SuiTransactionBlockResponseOptions, }; use sui_open_rpc::Module; use sui_protocol_config::{ProtocolConfig, ProtocolVersion}; -use sui_types::base_types::{ObjectID, SequenceNumber}; -use sui_types::digests::{ChainIdentifier, TransactionDigest}; -use sui_types::sui_serde::BigInt; +use sui_types::{ + base_types::{ObjectID, SequenceNumber}, + digests::{ChainIdentifier, TransactionDigest}, + error::SuiObjectResponseError, + object::ObjectRead, + sui_serde::BigInt, +}; -use sui_json_rpc_types::SuiLoadedChildObjectsResponse; +use crate::{errors::IndexerError, indexer_reader::IndexerReader}; #[derive(Clone)] pub(crate) struct ReadApi { @@ -107,9 +105,9 @@ impl ReadApiServer for ReadApi { } } - // For ease of implementation we just forward to the single object query, although in the - // future we may want to improve the performance by having a more naitive multi_get - // functionality + // For ease of implementation we just forward to the single object query, + // although in the future we may want to improve the performance by having a + // more naitive multi_get functionality async fn multi_get_objects( &self, object_ids: Vec, diff --git a/crates/sui-indexer/src/apis/transaction_builder_api.rs b/crates/sui-indexer/src/apis/transaction_builder_api.rs index fc9d3b887fb..d49c50d0f78 100644 --- a/crates/sui-indexer/src/apis/transaction_builder_api.rs +++ b/crates/sui-indexer/src/apis/transaction_builder_api.rs @@ -1,15 +1,18 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::governance_api::GovernanceReadApi; -use crate::indexer_reader::IndexerReader; use async_trait::async_trait; use move_core_types::language_storage::StructTag; use sui_json_rpc::transaction_builder_api::TransactionBuilderApi as SuiTransactionBuilderApi; use sui_json_rpc_types::{SuiObjectDataFilter, SuiObjectDataOptions, SuiObjectResponse}; use sui_transaction_builder::DataReader; -use sui_types::base_types::{ObjectID, ObjectInfo, SuiAddress}; -use sui_types::object::Object; +use sui_types::{ + base_types::{ObjectID, ObjectInfo, SuiAddress}, + object::Object, +}; + +use super::governance_api::GovernanceReadApi; +use crate::indexer_reader::IndexerReader; pub(crate) struct TransactionBuilderApi { inner: IndexerReader, diff --git a/crates/sui-indexer/src/apis/write_api.rs b/crates/sui-indexer/src/apis/write_api.rs index 71a54c35663..5750c25644b 100644 --- a/crates/sui-indexer/src/apis/write_api.rs +++ b/crates/sui-indexer/src/apis/write_api.rs @@ -3,10 +3,7 @@ use async_trait::async_trait; use fastcrypto::encoding::Base64; -use jsonrpsee::core::RpcResult; -use jsonrpsee::http_client::HttpClient; -use jsonrpsee::RpcModule; - +use jsonrpsee::{core::RpcResult, http_client::HttpClient, RpcModule}; use sui_json_rpc::SuiRpcModule; use sui_json_rpc_api::{WriteApiClient, WriteApiServer}; use sui_json_rpc_types::{ @@ -14,9 +11,9 @@ use sui_json_rpc_types::{ SuiTransactionBlockResponseOptions, }; use sui_open_rpc::Module; -use sui_types::base_types::SuiAddress; -use sui_types::quorum_driver_types::ExecuteTransactionRequestType; -use sui_types::sui_serde::BigInt; +use sui_types::{ + base_types::SuiAddress, quorum_driver_types::ExecuteTransactionRequestType, sui_serde::BigInt, +}; use crate::types::SuiTransactionBlockResponseWithOptions; diff --git a/crates/sui-indexer/src/db.rs b/crates/sui-indexer/src/db.rs index 57615b90e19..88bea7111cb 100644 --- a/crates/sui-indexer/src/db.rs +++ b/crates/sui-indexer/src/db.rs @@ -4,8 +4,7 @@ use std::time::Duration; use anyhow::anyhow; -use diesel::migration::MigrationSource; -use diesel::{r2d2::ConnectionManager, PgConnection, RunQueryDsl}; +use diesel::{migration::MigrationSource, r2d2::ConnectionManager, PgConnection, RunQueryDsl}; use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; use tracing::info; @@ -138,9 +137,10 @@ const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations"); /// Resets the database by reverting all migrations and reapplying them. /// -/// If `drop_all` is set to `true`, the function will drop all tables in the database before -/// resetting the migrations. This option is destructive and will result in the loss of all -/// data in the tables. Use with caution, especially in production environments. +/// If `drop_all` is set to `true`, the function will drop all tables in the +/// database before resetting the migrations. This option is destructive and +/// will result in the loss of all data in the tables. Use with caution, +/// especially in production environments. pub fn reset_database(conn: &mut PgPoolConnection, drop_all: bool) -> Result<(), anyhow::Error> { info!("Resetting database ..."); if drop_all { diff --git a/crates/sui-indexer/src/errors.rs b/crates/sui-indexer/src/errors.rs index 77e26b08637..35e7badb271 100644 --- a/crates/sui-indexer/src/errors.rs +++ b/crates/sui-indexer/src/errors.rs @@ -2,14 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 use fastcrypto::error::FastCryptoError; -use jsonrpsee::core::Error as RpcError; -use jsonrpsee::types::error::CallError; +use jsonrpsee::{core::Error as RpcError, types::error::CallError}; use sui_json_rpc::name_service::NameServiceError; +use sui_types::{ + base_types::ObjectIDParseError, + error::{SuiError, SuiObjectResponseError, UserInputError}, +}; use thiserror::Error; -use sui_types::base_types::ObjectIDParseError; -use sui_types::error::{SuiError, SuiObjectResponseError, UserInputError}; - #[derive(Debug, Error)] pub struct DataDownloadError { pub error: IndexerError, @@ -34,7 +34,9 @@ pub enum IndexerError { #[error("Indexer failed to deserialize event from events table with error: `{0}`")] EventDeserializationError(String), - #[error("Fullnode returns unexpected responses, which may block indexers from proceeding, with error: `{0}`")] + #[error( + "Fullnode returns unexpected responses, which may block indexers from proceeding, with error: `{0}`" + )] UnexpectedFullnodeResponseError(String), #[error("Indexer failed to transform data with error: `{0}`")] diff --git a/crates/sui-indexer/src/framework/builder.rs b/crates/sui-indexer/src/framework/builder.rs index 055f1da2116..26ecc31ed5f 100644 --- a/crates/sui-indexer/src/framework/builder.rs +++ b/crates/sui-indexer/src/framework/builder.rs @@ -3,11 +3,9 @@ use sui_types::messages_checkpoint::CheckpointSequenceNumber; +use super::{fetcher::CheckpointFetcher, Handler}; use crate::metrics::IndexerMetrics; -use super::fetcher::CheckpointFetcher; -use super::Handler; - pub struct IndexerBuilder { rest_url: Option, handlers: Vec>, @@ -63,8 +61,8 @@ impl IndexerBuilder { .with_label_values(&["checkpoint_tx_downloading"]), ); - // experimental rest api route is found at `/rest` on the same interface as the jsonrpc - // service + // experimental rest api route is found at `/rest` on the same interface as the + // jsonrpc service let rest_api_url = format!("{}/rest", self.rest_url.unwrap()); let fetcher = CheckpointFetcher::new( sui_rest_api::Client::new(rest_api_url), diff --git a/crates/sui-indexer/src/framework/fetcher.rs b/crates/sui-indexer/src/framework/fetcher.rs index cddce8d34a5..2ba32cdb2c8 100644 --- a/crates/sui-indexer/src/framework/fetcher.rs +++ b/crates/sui-indexer/src/framework/fetcher.rs @@ -70,8 +70,9 @@ impl CheckpointFetcher { let checkpoint = self.client.get_latest_checkpoint().await?; self.highest_known_checkpoint = std::cmp::max(self.highest_known_checkpoint, *checkpoint.sequence_number()); - // NOTE: this metric is used to monitor delta between the highest known checkpoint on FN and in DB, - // there is an alert based on the delta of these two metrics. + // NOTE: this metric is used to monitor delta between the highest known + // checkpoint on FN and in DB, there is an alert based on the delta of + // these two metrics. self.metrics .latest_fullnode_checkpoint_sequence_number .set(self.highest_known_checkpoint as i64); diff --git a/crates/sui-indexer/src/framework/runner.rs b/crates/sui-indexer/src/framework/runner.rs index 45f4992aa87..76f45f67867 100644 --- a/crates/sui-indexer/src/framework/runner.rs +++ b/crates/sui-indexer/src/framework/runner.rs @@ -1,11 +1,9 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use super::{fetcher::CheckpointDownloadData, interface::Handler}; use crate::metrics::IndexerMetrics; -use super::fetcher::CheckpointDownloadData; -use super::interface::Handler; - // Limit indexing parallelism on big checkpoints to avoid OOM, // by limiting the total size of batch checkpoints to ~50MB. // On testnet, most checkpoints are < 200KB, some can go up to 50MB. @@ -25,7 +23,7 @@ where tracing::info!("Indexer runner is starting with {batch_size}"); let mut chunks: futures::stream::ReadyChunks = stream.ready_chunks(batch_size); while let Some(checkpoints) = chunks.next().await { - //TODO create tracing spans for processing + // TODO create tracing spans for processing let mut cp_batch = vec![]; let mut cp_batch_total_size = 0; for checkpoint in checkpoints.iter() { diff --git a/crates/sui-indexer/src/handlers/checkpoint_handler.rs b/crates/sui-indexer/src/handlers/checkpoint_handler.rs index 67671a1c1e6..b8b30e9ac9a 100644 --- a/crates/sui-indexer/src/handlers/checkpoint_handler.rs +++ b/crates/sui-indexer/src/handlers/checkpoint_handler.rs @@ -1,60 +1,57 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::handlers::committer::start_tx_checkpoint_commit_task; -use crate::handlers::tx_processor::IndexingPackageBuffer; -use crate::models::display::StoredDisplay; +use std::{ + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + sync::{Arc, Mutex}, +}; + use async_trait::async_trait; use itertools::Itertools; -use move_core_types::annotated_value::{MoveStructLayout, MoveTypeLayout}; -use move_core_types::language_storage::{StructTag, TypeTag}; +use move_core_types::{ + annotated_value::{MoveStructLayout, MoveTypeLayout}, + language_storage::{StructTag, TypeTag}, +}; use mysten_metrics::{get_metrics, spawn_monitored_task}; -use std::collections::{BTreeMap, HashMap}; -use std::sync::{Arc, Mutex}; -use sui_package_resolver::{PackageStore, Resolver}; -use sui_rest_api::CheckpointData; -use sui_rest_api::CheckpointTransaction; -use sui_types::base_types::ObjectRef; -use sui_types::dynamic_field::DynamicFieldInfo; -use sui_types::dynamic_field::DynamicFieldName; -use sui_types::dynamic_field::DynamicFieldType; -use sui_types::messages_checkpoint::{CertifiedCheckpointSummary, CheckpointContents}; -use sui_types::object::Object; - -use tokio::sync::watch; - -use std::collections::hash_map::Entry; -use std::collections::HashSet; use sui_json_rpc_types::SuiMoveValue; -use sui_types::base_types::SequenceNumber; -use sui_types::effects::{TransactionEffects, TransactionEffectsAPI}; -use sui_types::event::SystemEpochInfoEvent; -use sui_types::object::Owner; -use sui_types::transaction::TransactionDataAPI; +use sui_package_resolver::{PackageStore, Resolver}; +use sui_rest_api::{CheckpointData, CheckpointTransaction}; +use sui_types::{ + base_types::{ObjectID, ObjectRef, SequenceNumber}, + dynamic_field::{DynamicFieldInfo, DynamicFieldName, DynamicFieldType}, + effects::{TransactionEffects, TransactionEffectsAPI}, + event::SystemEpochInfoEvent, + messages_checkpoint::{CertifiedCheckpointSummary, CheckpointContents}, + object::{Object, Owner}, + sui_system_state::{ + get_sui_system_state, sui_system_state_summary::SuiSystemStateSummary, SuiSystemStateTrait, + }, + transaction::TransactionDataAPI, +}; use tap::tap::TapFallible; +use tokio::sync::watch; use tracing::{error, info, warn}; -use sui_types::base_types::ObjectID; -use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary; -use sui_types::sui_system_state::{get_sui_system_state, SuiSystemStateTrait}; - -use crate::errors::IndexerError; -use crate::framework::interface::Handler; -use crate::metrics::IndexerMetrics; - -use crate::db::PgConnectionPool; -use crate::store::module_resolver::{IndexerStorePackageModuleResolver, InterimPackageResolver}; -use crate::store::{IndexerStore, PgIndexerStore}; -use crate::types::{ - IndexedCheckpoint, IndexedDeletedObject, IndexedEpochInfo, IndexedEvent, IndexedObject, - IndexedPackage, IndexedTransaction, IndexerResult, TransactionKind, TxIndex, +use super::{ + tx_processor::{EpochEndIndexingObjectStore, TxChangesProcessor}, + CheckpointDataToCommit, EpochToCommit, TransactionObjectChangesToCommit, +}; +use crate::{ + db::PgConnectionPool, + errors::IndexerError, + framework::interface::Handler, + handlers::{committer::start_tx_checkpoint_commit_task, tx_processor::IndexingPackageBuffer}, + metrics::IndexerMetrics, + models::display::StoredDisplay, + store::{ + module_resolver::{IndexerStorePackageModuleResolver, InterimPackageResolver}, + IndexerStore, PgIndexerStore, + }, + types::{ + IndexedCheckpoint, IndexedDeletedObject, IndexedEpochInfo, IndexedEvent, IndexedObject, + IndexedPackage, IndexedTransaction, IndexerResult, TransactionKind, TxIndex, + }, }; - -use super::tx_processor::EpochEndIndexingObjectStore; -use super::tx_processor::TxChangesProcessor; -use super::CheckpointDataToCommit; -use super::EpochToCommit; -use super::TransactionObjectChangesToCommit; const CHECKPOINT_QUEUE_SIZE: usize = 100; @@ -197,8 +194,9 @@ where "Checkpoints indexing finished, about to sending to commit handler" ); - // NOTE: when the channel is full, checkpoint_sender_guard will wait until the channel has space. - // Checkpoints are sent sequentially to stick to the order of checkpoint sequence numbers. + // NOTE: when the channel is full, checkpoint_sender_guard will wait until the + // channel has space. Checkpoints are sent sequentially to stick to the + // order of checkpoint sequence numbers. for checkpoint_data in checkpoint_data_to_commit { let checkpoint_seq = checkpoint_data.checkpoint.sequence_number; self.indexed_checkpoint_sender @@ -241,7 +239,7 @@ where last_epoch: None, new_epoch: IndexedEpochInfo::from_new_system_state_summary( system_state, - 0, //first_checkpoint_id + 0, // first_checkpoint_id None, ), })); @@ -402,7 +400,9 @@ where if tx_digest != *sender_signed_data.digest() { return Err(IndexerError::FullNodeReadingError(format!( "Transactions has different ordering from CheckpointContents, for checkpoint {}, Mismatch found at {} v.s. {}", - checkpoint_seq, tx_digest, sender_signed_data.digest() + checkpoint_seq, + tx_digest, + sender_signed_data.digest() ))); } let tx = sender_signed_data.transaction_data(); diff --git a/crates/sui-indexer/src/handlers/committer.rs b/crates/sui-indexer/src/handlers/committer.rs index 0afe433fc5a..7f92e855bd3 100644 --- a/crates/sui-indexer/src/handlers/committer.rs +++ b/crates/sui-indexer/src/handlers/committer.rs @@ -3,19 +3,13 @@ use std::collections::BTreeMap; -use tokio::sync::watch; -use tracing::instrument; - -use tap::tap::TapFallible; -use tracing::{error, info}; - use sui_types::messages_checkpoint::CheckpointSequenceNumber; - -use crate::metrics::IndexerMetrics; -use crate::store::IndexerStore; -use crate::types::IndexerResult; +use tap::tap::TapFallible; +use tokio::sync::watch; +use tracing::{error, info, instrument}; use super::{CheckpointDataToCommit, EpochToCommit}; +use crate::{metrics::IndexerMetrics, store::IndexerStore, types::IndexerResult}; const CHECKPOINT_COMMIT_BATCH_SIZE: usize = 100; @@ -202,8 +196,9 @@ async fn commit_checkpoints( metrics .transaction_per_checkpoint .observe(tx_count as f64 / (last_checkpoint_seq - first_checkpoint_seq + 1) as f64); - // 1000.0 is not necessarily the batch size, it's to roughly map average tx commit latency to [0.1, 1] seconds, - // which is well covered by DB_COMMIT_LATENCY_SEC_BUCKETS. + // 1000.0 is not necessarily the batch size, it's to roughly map average tx + // commit latency to [0.1, 1] seconds, which is well covered by + // DB_COMMIT_LATENCY_SEC_BUCKETS. metrics .thousand_transaction_avg_db_commit_latency .observe(elapsed * 1000.0 / tx_count as f64); diff --git a/crates/sui-indexer/src/handlers/objects_snapshot_processor.rs b/crates/sui-indexer/src/handlers/objects_snapshot_processor.rs index 6fd46af3a1b..50cdcc41487 100644 --- a/crates/sui-indexer/src/handlers/objects_snapshot_processor.rs +++ b/crates/sui-indexer/src/handlers/objects_snapshot_processor.rs @@ -3,8 +3,7 @@ use tracing::info; -use crate::types::IndexerResult; -use crate::{metrics::IndexerMetrics, store::IndexerStore}; +use crate::{metrics::IndexerMetrics, store::IndexerStore, types::IndexerResult}; const OBJECTS_SNAPSHOT_MAX_CHECKPOINT_LAG: usize = 900; const OBJECTS_SNAPSHOT_MIN_CHECKPOINT_LAG: usize = 300; @@ -74,14 +73,15 @@ where } } - // The `objects_snapshot` table maintains a delayed snapshot of the `objects` table, - // controlled by `object_snapshot_max_checkpoint_lag` (max lag) and - // `object_snapshot_min_checkpoint_lag` (min lag). For instance, with a max lag of 900 - // and a min lag of 300 checkpoints, the `objects_snapshot` table will lag behind the - // `objects` table by 300 to 900 checkpoints. The snapshot is updated when the lag - // exceeds the max lag threshold, and updates continue until the lag is reduced to - // the min lag threshold. Then, we have a consistent read range between - // `latest_snapshot_cp` and `latest_cp` based on `objects_snapshot` and `objects_history`, + // The `objects_snapshot` table maintains a delayed snapshot of the `objects` + // table, controlled by `object_snapshot_max_checkpoint_lag` (max lag) and + // `object_snapshot_min_checkpoint_lag` (min lag). For instance, with a max lag + // of 900 and a min lag of 300 checkpoints, the `objects_snapshot` table + // will lag behind the `objects` table by 300 to 900 checkpoints. The + // snapshot is updated when the lag exceeds the max lag threshold, and + // updates continue until the lag is reduced to the min lag threshold. Then, + // we have a consistent read range between `latest_snapshot_cp` and + // `latest_cp` based on `objects_snapshot` and `objects_history`, // where the size of this range varies between the min and max lag values. pub async fn start(&self) -> IndexerResult<()> { info!("Starting object snapshot processor..."); diff --git a/crates/sui-indexer/src/handlers/tx_processor.rs b/crates/sui-indexer/src/handlers/tx_processor.rs index eb6f44e6097..f552a9afdfa 100644 --- a/crates/sui-indexer/src/handlers/tx_processor.rs +++ b/crates/sui-indexer/src/handlers/tx_processor.rs @@ -4,45 +4,44 @@ // TODO remove the dead_code attribute after integration is done #![allow(dead_code)] +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; + use async_trait::async_trait; use move_binary_format::CompiledModule; use move_core_types::language_storage::ModuleId; -use mysten_metrics::monitored_scope; -use mysten_metrics::spawn_monitored_task; +use mysten_metrics::{monitored_scope, spawn_monitored_task}; +use sui_json_rpc::{get_balance_changes_from_effect, get_object_changes, ObjectProvider}; use sui_rest_api::CheckpointData; -use tokio::sync::watch; - -use std::collections::HashMap; -use std::sync::{Arc, Mutex}; -use sui_types::object::Object; -use tokio::time::Duration; -use tokio::time::Instant; - -use sui_json_rpc::get_balance_changes_from_effect; -use sui_json_rpc::get_object_changes; -use sui_json_rpc::ObjectProvider; -use sui_types::base_types::SequenceNumber; -use sui_types::digests::TransactionDigest; -use sui_types::effects::{TransactionEffects, TransactionEffectsAPI}; -use sui_types::transaction::{TransactionData, TransactionDataAPI}; +use sui_types::{ + base_types::{ObjectID, SequenceNumber}, + digests::TransactionDigest, + effects::{TransactionEffects, TransactionEffectsAPI}, + messages_checkpoint::CheckpointSequenceNumber, + object::Object, + transaction::{TransactionData, TransactionDataAPI}, +}; +use tokio::{ + sync::watch, + time::{Duration, Instant}, +}; use tracing::debug; -use sui_types::base_types::ObjectID; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; - -use crate::errors::IndexerError; -use crate::metrics::IndexerMetrics; - -use crate::types::IndexedPackage; -use crate::types::{IndexedObjectChange, IndexerResult}; +use crate::{ + errors::IndexerError, + metrics::IndexerMetrics, + types::{IndexedObjectChange, IndexedPackage, IndexerResult}, +}; // GC the buffer every 300 checkpoints, or 5 minutes pub const BUFFER_GC_INTERVAL: Duration = Duration::from_secs(300); /// An in-mem buffer for modules during writer path indexing. /// It has static lifetime. Since we batch process checkpoints, -/// it's possible that when a package is looked up (e.g. to create dynamic field), -/// it has not been persisted in the database yet. So it works as an in-mem -/// store for package resolution. To avoid bloating memory, we GC modules +/// it's possible that when a package is looked up (e.g. to create dynamic +/// field), it has not been persisted in the database yet. So it works as an +/// in-mem store for package resolution. To avoid bloating memory, we GC modules /// that are older than the committed checkpoints. pub struct IndexingModuleBuffer { modules: HashMap<(ObjectID, String), (Arc, CheckpointSequenceNumber)>, @@ -121,7 +120,7 @@ pub struct IndexingPackageBuffer { ObjectID, ( Arc, - u64, /* package version */ + u64, // package version CheckpointSequenceNumber, ), >, @@ -349,12 +348,15 @@ impl ObjectProvider for TxChangesProcessor { } } - panic!("Object {} is not found in TxChangesProcessor as an ObjectProvider (fn find_object_lt_or_eq_version)", id); + panic!( + "Object {} is not found in TxChangesProcessor as an ObjectProvider (fn find_object_lt_or_eq_version)", + id + ); } } -// This is a struct that is used to extract SuiSystemState and its dynamic children -// for end-of-epoch indexing. +// This is a struct that is used to extract SuiSystemState and its dynamic +// children for end-of-epoch indexing. pub(crate) struct EpochEndIndexingObjectStore<'a> { objects: Vec<&'a Object>, } diff --git a/crates/sui-indexer/src/indexer.rs b/crates/sui-indexer/src/indexer.rs index 5ce7db9f6ca..35a3ca79ef2 100644 --- a/crates/sui-indexer/src/indexer.rs +++ b/crates/sui-indexer/src/indexer.rs @@ -4,20 +4,23 @@ use std::env; use anyhow::Result; +use mysten_metrics::spawn_monitored_task; use prometheus::Registry; use tracing::info; -use mysten_metrics::spawn_monitored_task; - -use crate::build_json_rpc_server; -use crate::errors::IndexerError; -use crate::framework::fetcher::CheckpointFetcher; -use crate::handlers::checkpoint_handler::new_handlers; -use crate::handlers::objects_snapshot_processor::{ObjectsSnapshotProcessor, SnapshotLagConfig}; -use crate::indexer_reader::IndexerReader; -use crate::metrics::IndexerMetrics; -use crate::store::IndexerStore; -use crate::IndexerConfig; +use crate::{ + build_json_rpc_server, + errors::IndexerError, + framework::fetcher::CheckpointFetcher, + handlers::{ + checkpoint_handler::new_handlers, + objects_snapshot_processor::{ObjectsSnapshotProcessor, SnapshotLagConfig}, + }, + indexer_reader::IndexerReader, + metrics::IndexerMetrics, + store::IndexerStore, + IndexerConfig, +}; const DOWNLOAD_QUEUE_SIZE: usize = 200; diff --git a/crates/sui-indexer/src/indexer_reader.rs b/crates/sui-indexer/src/indexer_reader.rs index f2ab5ff41d7..5b31180a221 100644 --- a/crates/sui-indexer/src/indexer_reader.rs +++ b/crates/sui-indexer/src/indexer_reader.rs @@ -1,6 +1,39 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::{BTreeMap, HashMap}, + sync::{Arc, RwLock}, +}; + +use anyhow::{anyhow, Result}; +use cached::{proc_macro::cached, SizedCache}; +use diesel::{ + dsl::sql, r2d2::ConnectionManager, sql_types::Bool, ExpressionMethods, OptionalExtension, + PgConnection, QueryDsl, RunQueryDsl, TextExpressionMethods, +}; +use fastcrypto::encoding::{Encoding, Hex}; +use itertools::{any, Itertools}; +use move_core_types::{annotated_value::MoveStructLayout, language_storage::StructTag}; +use sui_json_rpc_types::{ + Balance, CheckpointId, Coin as SuiCoin, DisplayFieldsResponse, EpochInfo, EventFilter, + SuiCoinMetadata, SuiEvent, SuiObjectDataFilter, SuiTransactionBlockEffects, + SuiTransactionBlockEffectsAPI, SuiTransactionBlockResponse, TransactionFilter, +}; +use sui_types::{ + balance::Supply, + base_types::{ObjectID, ObjectRef, SequenceNumber, SuiAddress, VersionNumber}, + coin::{CoinMetadata, TreasuryCap}, + committee::EpochId, + digests::{ObjectDigest, TransactionDigest}, + dynamic_field::{DynamicFieldInfo, DynamicFieldName}, + event::EventID, + is_system_package, + move_package::MovePackage, + object::{MoveObject, Object, ObjectRead}, + sui_system_state::{sui_system_state_summary::SuiSystemStateSummary, SuiSystemStateTrait}, +}; + use crate::{ db::{PgConnectionConfig, PgConnectionPoolConfig, PgPoolConnection}, errors::IndexerError, @@ -19,45 +52,6 @@ use crate::{ }, types::{IndexerResult, OwnerType}, }; -use anyhow::{anyhow, Result}; -use cached::proc_macro::cached; -use cached::SizedCache; -use diesel::{ - dsl::sql, r2d2::ConnectionManager, sql_types::Bool, ExpressionMethods, OptionalExtension, - PgConnection, QueryDsl, RunQueryDsl, TextExpressionMethods, -}; -use fastcrypto::encoding::Encoding; -use fastcrypto::encoding::Hex; -use itertools::{any, Itertools}; -use move_core_types::annotated_value::MoveStructLayout; -use move_core_types::language_storage::StructTag; -use std::{ - collections::{BTreeMap, HashMap}, - sync::{Arc, RwLock}, -}; -use sui_json_rpc_types::DisplayFieldsResponse; -use sui_json_rpc_types::{ - Balance, Coin as SuiCoin, SuiCoinMetadata, SuiTransactionBlockEffects, - SuiTransactionBlockEffectsAPI, -}; -use sui_json_rpc_types::{ - CheckpointId, EpochInfo, EventFilter, SuiEvent, SuiObjectDataFilter, - SuiTransactionBlockResponse, TransactionFilter, -}; -use sui_types::{ - balance::Supply, coin::TreasuryCap, dynamic_field::DynamicFieldName, object::MoveObject, -}; -use sui_types::{ - base_types::{ObjectID, ObjectRef, SequenceNumber, SuiAddress, VersionNumber}, - committee::EpochId, - digests::{ObjectDigest, TransactionDigest}, - dynamic_field::DynamicFieldInfo, - is_system_package, - move_package::MovePackage, - object::{Object, ObjectRead}, - sui_system_state::{sui_system_state_summary::SuiSystemStateSummary, SuiSystemStateTrait}, -}; -use sui_types::{coin::CoinMetadata, event::EventID}; pub const TX_SEQUENCE_NUMBER_STR: &str = "tx_sequence_number"; pub const TRANSACTION_DIGEST_STR: &str = "transaction_digest"; @@ -186,9 +180,10 @@ thread_local! { /// This is done by either: /// - Checking that we are not inside a tokio runtime context /// Or: -/// - If we are inside a tokio runtime context, ensure that the call went through -/// `IndexerReader::spawn_blocking` which properly moves the blocking call to a blocking thread -/// pool. +/// - If we are inside a tokio runtime context, ensure that the call went +/// through +/// `IndexerReader::spawn_blocking` which properly moves the blocking call to a +/// blocking thread pool. fn blocking_call_is_ok_or_panic() { if tokio::runtime::Handle::try_current().is_ok() && !CALLED_FROM_BLOCKING_POOL.with(|in_blocking_pool| *in_blocking_pool.borrow()) @@ -416,11 +411,12 @@ impl IndexerReader { Ok(system_state) } - /// Retrieve the system state data for the given epoch. If no epoch is given, - /// it will retrieve the latest epoch's data and return the system state. - /// System state of the an epoch is written at the end of the epoch, so system state - /// of the current epoch is empty until the epoch ends. You can call - /// `get_latest_sui_system_state` for current epoch instead. + /// Retrieve the system state data for the given epoch. If no epoch is + /// given, it will retrieve the latest epoch's data and return the + /// system state. System state of the an epoch is written at the end of + /// the epoch, so system state of the current epoch is empty until the + /// epoch ends. You can call `get_latest_sui_system_state` for current + /// epoch instead. pub fn get_epoch_sui_system_state( &self, epoch: Option, @@ -823,7 +819,7 @@ impl IndexerReader { cursor_tx_seq, limit, is_descending, - ) + ); } // FIXME: sanitize module & function Some(TransactionFilter::MoveFunction { @@ -968,11 +964,7 @@ impl IndexerReader { let query = format!( "SELECT {TX_SEQUENCE_NUMBER_STR} FROM {} WHERE {} {} ORDER BY {TX_SEQUENCE_NUMBER_STR} {} LIMIT {}", - table_name, - main_where_clause, - cursor_clause, - order_str, - limit, + table_name, main_where_clause, cursor_clause, order_str, limit, ); tracing::debug!("query transaction blocks: {}", query); @@ -1134,9 +1126,15 @@ impl IndexerReader { let query = if let EventFilter::Sender(sender) = &filter { // Need to remove ambiguities for tx_sequence_number column let cursor_clause = if descending_order { - format!("(e.{TX_SEQUENCE_NUMBER_STR} < {} OR (e.{TX_SEQUENCE_NUMBER_STR} = {} AND e.{EVENT_SEQUENCE_NUMBER_STR} < {}))", tx_seq, tx_seq, event_seq) + format!( + "(e.{TX_SEQUENCE_NUMBER_STR} < {} OR (e.{TX_SEQUENCE_NUMBER_STR} = {} AND e.{EVENT_SEQUENCE_NUMBER_STR} < {}))", + tx_seq, tx_seq, event_seq + ) } else { - format!("(e.{TX_SEQUENCE_NUMBER_STR} > {} OR (e.{TX_SEQUENCE_NUMBER_STR} = {} AND e.{EVENT_SEQUENCE_NUMBER_STR} > {}))", tx_seq, tx_seq, event_seq) + format!( + "(e.{TX_SEQUENCE_NUMBER_STR} > {} OR (e.{TX_SEQUENCE_NUMBER_STR} = {} AND e.{EVENT_SEQUENCE_NUMBER_STR} > {}))", + tx_seq, tx_seq, event_seq + ) }; let order_clause = if descending_order { format!("e.{TX_SEQUENCE_NUMBER_STR} DESC, e.{EVENT_SEQUENCE_NUMBER_STR} DESC") @@ -1201,9 +1199,15 @@ impl IndexerReader { }; let cursor_clause = if descending_order { - format!("AND ({TX_SEQUENCE_NUMBER_STR} < {} OR ({TX_SEQUENCE_NUMBER_STR} = {} AND {EVENT_SEQUENCE_NUMBER_STR} < {}))", tx_seq, tx_seq, event_seq) + format!( + "AND ({TX_SEQUENCE_NUMBER_STR} < {} OR ({TX_SEQUENCE_NUMBER_STR} = {} AND {EVENT_SEQUENCE_NUMBER_STR} < {}))", + tx_seq, tx_seq, event_seq + ) } else { - format!("AND ({TX_SEQUENCE_NUMBER_STR} > {} OR ({TX_SEQUENCE_NUMBER_STR} = {} AND {EVENT_SEQUENCE_NUMBER_STR} > {}))", tx_seq, tx_seq, event_seq) + format!( + "AND ({TX_SEQUENCE_NUMBER_STR} > {} OR ({TX_SEQUENCE_NUMBER_STR} = {} AND {EVENT_SEQUENCE_NUMBER_STR} > {}))", + tx_seq, tx_seq, event_seq + ) }; let order_clause = if descending_order { format!("{TX_SEQUENCE_NUMBER_STR} DESC, {EVENT_SEQUENCE_NUMBER_STR} DESC") @@ -1669,7 +1673,8 @@ impl move_bytecode_utils::module_cache::GetModule for IndexerReader { ) -> Result, Self::Error> { let package_id = ObjectID::from(*id.address()); let module_name = id.name().to_string(); - // TODO: we need a cache here for deserialized module and take care of package upgrades + // TODO: we need a cache here for deserialized module and take care of package + // upgrades self.get_package(&package_id)? .and_then(|package| package.serialized_module_map().get(&module_name).cloned()) .map(|bytes| move_binary_format::CompiledModule::deserialize_with_defaults(&bytes)) @@ -1713,10 +1718,12 @@ fn get_single_obj_id_from_package_publish( if obj_ids_with_type.len() == 1 { Ok(Some(obj_ids_with_type[0])) } else if obj_ids_with_type.is_empty() { - // The package exists but no such object is created in that transaction. Or maybe it is wrapped and we don't know yet. + // The package exists but no such object is created in that transaction. Or + // maybe it is wrapped and we don't know yet. Ok(None) } else { - // We expect there to be only one object of this type created by the package but more than one is found. + // We expect there to be only one object of this type created by the package but + // more than one is found. tracing::error!( "There are more than one objects found for type {}", obj_type diff --git a/crates/sui-indexer/src/lib.rs b/crates/sui-indexer/src/lib.rs index 9c5d08919d6..0510939629a 100644 --- a/crates/sui-indexer/src/lib.rs +++ b/crates/sui-indexer/src/lib.rs @@ -6,23 +6,23 @@ use std::net::SocketAddr; use anyhow::{anyhow, Result}; use clap::Parser; +use errors::IndexerError; use jsonrpsee::http_client::{HeaderMap, HeaderValue, HttpClient, HttpClientBuilder}; use metrics::IndexerMetrics; use prometheus::Registry; +use sui_json_rpc::{JsonRpcServerBuilder, ServerHandle, ServerType}; +use sui_json_rpc_api::CLIENT_SDK_TYPE_HEADER; use tokio::runtime::Handle; use tracing::warn; use url::Url; -use sui_json_rpc::ServerType; -use sui_json_rpc::{JsonRpcServerBuilder, ServerHandle}; -use sui_json_rpc_api::CLIENT_SDK_TYPE_HEADER; - -use crate::apis::{ - CoinReadApi, ExtendedApi, GovernanceReadApi, IndexerApi, MoveUtilsApi, ReadApi, - TransactionBuilderApi, WriteApi, +use crate::{ + apis::{ + CoinReadApi, ExtendedApi, GovernanceReadApi, IndexerApi, MoveUtilsApi, ReadApi, + TransactionBuilderApi, WriteApi, + }, + indexer_reader::IndexerReader, }; -use crate::indexer_reader::IndexerReader; -use errors::IndexerError; pub mod apis; pub mod db; @@ -91,15 +91,29 @@ impl IndexerConfig { } pub fn get_db_url(&self) -> Result { - match (&self.db_url, &self.db_user_name, &self.db_password, &self.db_host, &self.db_port, &self.db_name) { + match ( + &self.db_url, + &self.db_user_name, + &self.db_password, + &self.db_host, + &self.db_port, + &self.db_name, + ) { (Some(db_url), _, _, _, _, _) => Ok(db_url.clone()), - (None, Some(db_user_name), Some(db_password), Some(db_host), Some(db_port), Some(db_name)) => { - Ok(format!( - "postgres://{}:{}@{}:{}/{}", - db_user_name, db_password, db_host, db_port, db_name - )) - } - _ => Err(anyhow!("Invalid db connection config, either db_url or (db_user_name, db_password, db_host, db_port, db_name) must be provided")), + ( + None, + Some(db_user_name), + Some(db_password), + Some(db_host), + Some(db_port), + Some(db_name), + ) => Ok(format!( + "postgres://{}:{}@{}:{}/{}", + db_user_name, db_password, db_host, db_port, db_name + )), + _ => Err(anyhow!( + "Invalid db connection config, either db_url or (db_user_name, db_password, db_host, db_port, db_name) must be provided" + )), } } } diff --git a/crates/sui-indexer/src/main.rs b/crates/sui-indexer/src/main.rs index 8aa744d2855..e633436639b 100644 --- a/crates/sui-indexer/src/main.rs +++ b/crates/sui-indexer/src/main.rs @@ -2,16 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 use clap::Parser; +use sui_indexer::{ + db::{get_pg_pool_connection, new_pg_connection_pool, reset_database}, + errors::IndexerError, + indexer::Indexer, + metrics::{start_prometheus_server, IndexerMetrics}, + store::PgIndexerStore, + IndexerConfig, +}; use tracing::{error, info}; -use sui_indexer::db::{get_pg_pool_connection, new_pg_connection_pool, reset_database}; -use sui_indexer::errors::IndexerError; -use sui_indexer::indexer::Indexer; -use sui_indexer::metrics::start_prometheus_server; -use sui_indexer::metrics::IndexerMetrics; -use sui_indexer::store::PgIndexerStore; -use sui_indexer::IndexerConfig; - #[tokio::main] async fn main() -> Result<(), IndexerError> { // NOTE: this is to print out tracing like info, warn & error. diff --git a/crates/sui-indexer/src/metrics.rs b/crates/sui-indexer/src/metrics.rs index d125f3e9e80..a14975a3053 100644 --- a/crates/sui-indexer/src/metrics.rs +++ b/crates/sui-indexer/src/metrics.rs @@ -1,20 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::collections::HashMap; -use std::net::SocketAddr; +use std::{collections::HashMap, net::SocketAddr}; use axum::{extract::Extension, http::StatusCode, routing::get, Router}; +use mysten_metrics::RegistryService; use prometheus::{ register_histogram_with_registry, register_int_counter_with_registry, - register_int_gauge_with_registry, Histogram, IntCounter, IntGauge, + register_int_gauge_with_registry, Histogram, IntCounter, IntGauge, Registry, TextEncoder, }; -use prometheus::{Registry, TextEncoder}; use regex::Regex; use tracing::{info, warn}; -use mysten_metrics::RegistryService; - const METRICS_ROUTE: &str = "/metrics"; pub fn start_prometheus_server( @@ -130,8 +127,8 @@ pub struct IndexerMetrics { pub update_object_snapshot_latency: Histogram, pub tokio_blocking_task_wait_latency: Histogram, // average latency of committing 1000 transactions. - // 1000 is not necessarily the batch size, it's to roughly map average tx commit latency to [0.1, 1] seconds, - // which is well covered by DB_COMMIT_LATENCY_SEC_BUCKETS. + // 1000 is not necessarily the batch size, it's to roughly map average tx commit latency to + // [0.1, 1] seconds, which is well covered by DB_COMMIT_LATENCY_SEC_BUCKETS. pub thousand_transaction_avg_db_commit_latency: Histogram, pub object_db_commit_latency: Histogram, pub object_mutation_db_commit_latency: Histogram, diff --git a/crates/sui-indexer/src/models/checkpoints.rs b/crates/sui-indexer/src/models/checkpoints.rs index a4124d3fd49..e6801f5f064 100644 --- a/crates/sui-indexer/src/models/checkpoints.rs +++ b/crates/sui-indexer/src/models/checkpoints.rs @@ -2,15 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 use diesel::prelude::*; - use sui_json_rpc_types::Checkpoint as RpcCheckpoint; -use sui_types::base_types::TransactionDigest; -use sui_types::digests::CheckpointDigest; -use sui_types::gas::GasCostSummary; +use sui_types::{base_types::TransactionDigest, digests::CheckpointDigest, gas::GasCostSummary}; -use crate::errors::IndexerError; -use crate::schema::checkpoints; -use crate::types::IndexedCheckpoint; +use crate::{errors::IndexerError, schema::checkpoints, types::IndexedCheckpoint}; #[derive(Queryable, Insertable, Debug, Clone, Default)] #[diesel(table_name = checkpoints)] diff --git a/crates/sui-indexer/src/models/epoch.rs b/crates/sui-indexer/src/models/epoch.rs index a392fafbbd4..6b12557dc66 100644 --- a/crates/sui-indexer/src/models/epoch.rs +++ b/crates/sui-indexer/src/models/epoch.rs @@ -2,13 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 use diesel::{Insertable, Queryable, Selectable}; - -use crate::errors::IndexerError; -use crate::schema::epochs; -use crate::types::IndexedEpochInfo; use sui_json_rpc_types::{EndOfEpochInfo, EpochInfo}; use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary; +use crate::{errors::IndexerError, schema::epochs, types::IndexedEpochInfo}; + #[derive(Queryable, Insertable, Debug, Clone, Default)] #[diesel(table_name = epochs)] pub struct StoredEpochInfo { diff --git a/crates/sui-indexer/src/models/events.rs b/crates/sui-indexer/src/models/events.rs index 00e3c541c8c..85c901d5af8 100644 --- a/crates/sui-indexer/src/models/events.rs +++ b/crates/sui-indexer/src/models/events.rs @@ -6,18 +6,16 @@ use std::str::FromStr; use diesel::prelude::*; use move_bytecode_utils::module_cache::GetModule; use move_core_types::identifier::Identifier; - use sui_json_rpc_types::{SuiEvent, SuiMoveStruct}; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::digests::TransactionDigest; -use sui_types::event::EventID; -use sui_types::object::bounded_visitor::BoundedVisitor; -use sui_types::object::MoveObject; -use sui_types::parse_sui_struct_tag; +use sui_types::{ + base_types::{ObjectID, SuiAddress}, + digests::TransactionDigest, + event::EventID, + object::{bounded_visitor::BoundedVisitor, MoveObject}, + parse_sui_struct_tag, +}; -use crate::errors::IndexerError; -use crate::schema::events; -use crate::types::IndexedEvent; +use crate::{errors::IndexerError, schema::events, types::IndexedEvent}; #[derive(Queryable, QueryableByName, Insertable, Debug, Clone)] #[diesel(table_name = events)] @@ -101,7 +99,7 @@ impl StoredEvent { None => { return Err(IndexerError::PersistentStorageDataCorruptionError( "Event senders element should not be null".to_string(), - )) + )); } }; @@ -136,10 +134,11 @@ impl StoredEvent { #[cfg(test)] mod tests { - use super::*; use move_core_types::{account_address::AccountAddress, language_storage::StructTag}; use sui_types::event::Event; + use super::*; + #[test] fn test_canonical_string_of_event_type() { let tx_digest = TransactionDigest::default(); diff --git a/crates/sui-indexer/src/models/objects.rs b/crates/sui-indexer/src/models/objects.rs index e37cbce4659..78cbfe5138b 100644 --- a/crates/sui-indexer/src/models/objects.rs +++ b/crates/sui-indexer/src/models/objects.rs @@ -1,22 +1,25 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use serde::de::DeserializeOwned; use std::collections::HashMap; -use sui_json_rpc::coin_api::parse_to_struct_tag; use diesel::prelude::*; use move_bytecode_utils::module_cache::GetModule; +use serde::de::DeserializeOwned; +use sui_json_rpc::coin_api::parse_to_struct_tag; use sui_json_rpc_types::{Balance, Coin as SuiCoin}; -use sui_types::base_types::{ObjectID, ObjectRef, SequenceNumber}; -use sui_types::digests::ObjectDigest; -use sui_types::dynamic_field::{DynamicFieldInfo, DynamicFieldName, DynamicFieldType, Field}; -use sui_types::object::Object; -use sui_types::object::ObjectRead; - -use crate::errors::IndexerError; -use crate::schema::{objects, objects_history}; -use crate::types::{IndexedDeletedObject, IndexedObject, ObjectStatus}; +use sui_types::{ + base_types::{ObjectID, ObjectRef, SequenceNumber}, + digests::ObjectDigest, + dynamic_field::{DynamicFieldInfo, DynamicFieldName, DynamicFieldType, Field}, + object::{Object, ObjectRead}, +}; + +use crate::{ + errors::IndexerError, + schema::{objects, objects_history}, + types::{IndexedDeletedObject, IndexedObject, ObjectStatus}, +}; #[derive(Queryable)] pub struct DynamicFieldColumn { @@ -212,7 +215,8 @@ impl StoredObject { return Ok(None); } - // Past this point, if there is any unexpected field, it's a data corruption error + // Past this point, if there is any unexpected field, it's a data corruption + // error let object_id = ObjectID::from_bytes(&self.object_id).map_err(|_| { IndexerError::PersistentStorageDataCorruptionError(format!( "Can't convert {:?} to object_id", @@ -245,7 +249,7 @@ impl StoredObject { return Err(IndexerError::PersistentStorageDataCorruptionError(format!( "object {} has incompatible dynamic field type: empty df_kind", object_id - ))) + ))); } }; let name = if let Some(field_name) = self.df_name { @@ -411,7 +415,10 @@ mod tests { match stored_obj.object_type { Some(t) => { - assert_eq!(t, "0x0000000000000000000000000000000000000000000000000000000000000002::coin::Coin<0x0000000000000000000000000000000000000000000000000000000000000002::sui::SUI>"); + assert_eq!( + t, + "0x0000000000000000000000000000000000000000000000000000000000000002::coin::Coin<0x0000000000000000000000000000000000000000000000000000000000000002::sui::SUI>" + ); } None => { panic!("object_type should not be none"); @@ -491,7 +498,10 @@ mod tests { match stored_obj.object_type { Some(t) => { - assert_eq!(t, "0x00000000000000000000000000000000000000000000000000000000000000e7::vec_coin::VecCoin>>"); + assert_eq!( + t, + "0x00000000000000000000000000000000000000000000000000000000000000e7::vec_coin::VecCoin>>" + ); } None => { panic!("object_type should not be none"); diff --git a/crates/sui-indexer/src/models/packages.rs b/crates/sui-indexer/src/models/packages.rs index 1d042bf5138..f2be540fda7 100644 --- a/crates/sui-indexer/src/models/packages.rs +++ b/crates/sui-indexer/src/models/packages.rs @@ -1,11 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::schema::packages; -use crate::types::IndexedPackage; - use diesel::prelude::*; +use crate::{schema::packages, types::IndexedPackage}; + #[derive(Queryable, Insertable, Clone, Debug, Identifiable)] #[diesel(table_name = packages, primary_key(package_id))] pub struct StoredPackage { diff --git a/crates/sui-indexer/src/models/transactions.rs b/crates/sui-indexer/src/models/transactions.rs index be2389ded3d..7d496f3aedd 100644 --- a/crates/sui-indexer/src/models/transactions.rs +++ b/crates/sui-indexer/src/models/transactions.rs @@ -1,26 +1,23 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 use diesel::prelude::*; - use move_bytecode_utils::module_cache::GetModule; -use sui_json_rpc_types::BalanceChange; -use sui_json_rpc_types::ObjectChange; -use sui_json_rpc_types::SuiTransactionBlock; -use sui_json_rpc_types::SuiTransactionBlockEffects; -use sui_json_rpc_types::SuiTransactionBlockEvents; -use sui_json_rpc_types::SuiTransactionBlockResponse; -use sui_json_rpc_types::SuiTransactionBlockResponseOptions; -use sui_types::digests::TransactionDigest; -use sui_types::effects::TransactionEffects; -use sui_types::effects::TransactionEvents; -use sui_types::event::Event; -use sui_types::transaction::SenderSignedData; - -use crate::errors::IndexerError; -use crate::schema::transactions; -use crate::types::IndexedObjectChange; -use crate::types::IndexedTransaction; -use crate::types::IndexerResult; +use sui_json_rpc_types::{ + BalanceChange, ObjectChange, SuiTransactionBlock, SuiTransactionBlockEffects, + SuiTransactionBlockEvents, SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, +}; +use sui_types::{ + digests::TransactionDigest, + effects::{TransactionEffects, TransactionEvents}, + event::Event, + transaction::SenderSignedData, +}; + +use crate::{ + errors::IndexerError, + schema::transactions, + types::{IndexedObjectChange, IndexedTransaction, IndexerResult}, +}; #[derive(Clone, Debug, Queryable, Insertable, QueryableByName)] #[diesel(table_name = transactions)] diff --git a/crates/sui-indexer/src/models/tx_indices.rs b/crates/sui-indexer/src/models/tx_indices.rs index 9e30be163ad..07339e1eb77 100644 --- a/crates/sui-indexer/src/models/tx_indices.rs +++ b/crates/sui-indexer/src/models/tx_indices.rs @@ -1,11 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use diesel::prelude::*; + use crate::{ schema::{tx_calls, tx_changed_objects, tx_input_objects, tx_recipients, tx_senders}, types::TxIndex, }; -use diesel::prelude::*; #[derive(QueryableByName)] pub struct TxSequenceNumber { diff --git a/crates/sui-indexer/src/store/indexer_store.rs b/crates/sui-indexer/src/store/indexer_store.rs index 80cf4cfb5a0..4abf2e61efd 100644 --- a/crates/sui-indexer/src/store/indexer_store.rs +++ b/crates/sui-indexer/src/store/indexer_store.rs @@ -1,22 +1,25 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{any::Any, collections::BTreeMap, sync::Arc}; + use async_trait::async_trait; use move_binary_format::CompiledModule; use move_bytecode_utils::module_cache::GetModule; -use std::any::Any; -use std::collections::BTreeMap; -use std::sync::Arc; - -use sui_types::base_types::{ObjectID, SequenceNumber}; -use sui_types::object::ObjectRead; - -use crate::errors::IndexerError; -use crate::handlers::{EpochToCommit, TransactionObjectChangesToCommit}; - -use crate::models::display::StoredDisplay; -use crate::models::objects::{StoredDeletedObject, StoredObject}; -use crate::types::{IndexedCheckpoint, IndexedEvent, IndexedPackage, IndexedTransaction, TxIndex}; +use sui_types::{ + base_types::{ObjectID, SequenceNumber}, + object::ObjectRead, +}; + +use crate::{ + errors::IndexerError, + handlers::{EpochToCommit, TransactionObjectChangesToCommit}, + models::{ + display::StoredDisplay, + objects::{StoredDeletedObject, StoredObject}, + }, + types::{IndexedCheckpoint, IndexedEvent, IndexedPackage, IndexedTransaction, TxIndex}, +}; #[allow(clippy::large_enum_variant)] pub enum ObjectChangeToCommit { @@ -54,7 +57,7 @@ pub trait IndexerStore: Any + Clone + Sync + Send + 'static { ) -> Result<(), IndexerError>; async fn persist_object_snapshot(&self, start_cp: u64, end_cp: u64) - -> Result<(), IndexerError>; + -> Result<(), IndexerError>; async fn persist_checkpoints( &self, diff --git a/crates/sui-indexer/src/store/module_resolver.rs b/crates/sui-indexer/src/store/module_resolver.rs index 71ce7217af7..705fa8acfd0 100644 --- a/crates/sui-indexer/src/store/module_resolver.rs +++ b/crates/sui-indexer/src/store/module_resolver.rs @@ -1,28 +1,30 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use async_trait::async_trait; -use diesel::ExpressionMethods; -use diesel::OptionalExtension; -use diesel::{QueryDsl, RunQueryDsl}; use std::sync::{Arc, Mutex}; -use move_core_types::account_address::AccountAddress; -use move_core_types::language_storage::ModuleId; -use move_core_types::resolver::ModuleResolver; +use async_trait::async_trait; +use diesel::{ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl}; +use move_core_types::{ + account_address::AccountAddress, language_storage::ModuleId, resolver::ModuleResolver, +}; use sui_package_resolver::{error::Error as PackageResolverError, Package, PackageStore}; -use sui_types::base_types::{ObjectID, SequenceNumber}; -use sui_types::move_package::MovePackage; -use sui_types::object::Object; - -use crate::db::PgConnectionPool; -use crate::errors::{Context, IndexerError}; -use crate::handlers::tx_processor::IndexingPackageBuffer; -use crate::metrics::IndexerMetrics; -use crate::models::packages::StoredPackage; -use crate::schema::{objects, packages}; -use crate::store::diesel_macro::read_only_blocking; -use crate::types::IndexedPackage; +use sui_types::{ + base_types::{ObjectID, SequenceNumber}, + move_package::MovePackage, + object::Object, +}; + +use crate::{ + db::PgConnectionPool, + errors::{Context, IndexerError}, + handlers::tx_processor::IndexingPackageBuffer, + metrics::IndexerMetrics, + models::packages::StoredPackage, + schema::{objects, packages}, + store::diesel_macro::read_only_blocking, + types::IndexedPackage, +}; /// A package resolver that reads packages from the database. pub struct IndexerStorePackageModuleResolver { @@ -42,8 +44,9 @@ impl ModuleResolver for IndexerStorePackageModuleResolver { let package_id = ObjectID::from(*id.address()).to_vec(); let module_name = id.name().to_string(); - // Note: this implementation is potentially vulnerable to package upgrade race conditions - // for framework packages because they reuse the same package IDs. + // Note: this implementation is potentially vulnerable to package upgrade race + // conditions for framework packages because they reuse the same package + // IDs. let stored_package: StoredPackage = read_only_blocking!(&self.cp, |conn| { packages::dsl::packages .filter(packages::dsl::package_id.eq(package_id)) diff --git a/crates/sui-indexer/src/store/pg_indexer_store.rs b/crates/sui-indexer/src/store/pg_indexer_store.rs index 8f28c5dd65b..d28fe1bc819 100644 --- a/crates/sui-indexer/src/store/pg_indexer_store.rs +++ b/crates/sui-indexer/src/store/pg_indexer_store.rs @@ -2,54 +2,56 @@ // SPDX-License-Identifier: Apache-2.0 use core::result::Result::Ok; -use itertools::Itertools; -use std::any::Any; -use std::collections::hash_map::Entry; -use std::collections::BTreeMap; -use std::collections::HashMap; -use std::sync::Arc; -use std::time::Duration; -use std::time::Instant; -use tap::Tap; +use std::{ + any::Any, + collections::{hash_map::Entry, BTreeMap, HashMap}, + sync::Arc, + time::{Duration, Instant}, +}; use async_trait::async_trait; -use diesel::dsl::max; -use diesel::upsert::excluded; -use diesel::ExpressionMethods; -use diesel::OptionalExtension; -use diesel::{QueryDsl, RunQueryDsl}; +use diesel::{ + dsl::max, upsert::excluded, ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, +}; +use itertools::Itertools; use move_bytecode_utils::module_cache::SyncModuleCache; +use sui_types::{ + base_types::{ObjectID, SequenceNumber}, + object::ObjectRead, +}; +use tap::Tap; use tracing::info; -use sui_types::base_types::{ObjectID, SequenceNumber}; -use sui_types::object::ObjectRead; - -use crate::errors::{Context, IndexerError}; -use crate::handlers::EpochToCommit; -use crate::handlers::TransactionObjectChangesToCommit; -use crate::metrics::IndexerMetrics; - -use crate::db::PgConnectionPool; -use crate::models::checkpoints::StoredCheckpoint; -use crate::models::display::StoredDisplay; -use crate::models::epoch::StoredEpochInfo; -use crate::models::events::StoredEvent; -use crate::models::objects::{ - StoredDeletedHistoryObject, StoredDeletedObject, StoredHistoryObject, StoredObject, +use super::{ + pg_partition_manager::{EpochPartitionData, PgPartitionManager}, + IndexerStore, ObjectChangeToCommit, }; -use crate::models::packages::StoredPackage; -use crate::models::transactions::StoredTransaction; -use crate::schema::{ - checkpoints, display, epochs, events, objects, objects_history, objects_snapshot, packages, - transactions, tx_calls, tx_changed_objects, tx_input_objects, tx_recipients, tx_senders, +use crate::{ + db::PgConnectionPool, + errors::{Context, IndexerError}, + handlers::{EpochToCommit, TransactionObjectChangesToCommit}, + metrics::IndexerMetrics, + models::{ + checkpoints::StoredCheckpoint, + display::StoredDisplay, + epoch::StoredEpochInfo, + events::StoredEvent, + objects::{ + StoredDeletedHistoryObject, StoredDeletedObject, StoredHistoryObject, StoredObject, + }, + packages::StoredPackage, + transactions::StoredTransaction, + }, + schema::{ + checkpoints, display, epochs, events, objects, objects_history, objects_snapshot, packages, + transactions, tx_calls, tx_changed_objects, tx_input_objects, tx_recipients, tx_senders, + }, + store::{ + diesel_macro::{read_only_blocking, transactional_blocking_with_retry}, + module_resolver::IndexerStorePackageModuleResolver, + }, + types::{IndexedCheckpoint, IndexedEvent, IndexedPackage, IndexedTransaction, TxIndex}, }; -use crate::store::diesel_macro::{read_only_blocking, transactional_blocking_with_retry}; -use crate::store::module_resolver::IndexerStorePackageModuleResolver; -use crate::types::{IndexedCheckpoint, IndexedEvent, IndexedPackage, IndexedTransaction, TxIndex}; - -use super::pg_partition_manager::{EpochPartitionData, PgPartitionManager}; -use super::IndexerStore; -use super::ObjectChangeToCommit; #[macro_export] macro_rules! chunk { @@ -710,7 +712,8 @@ impl PgIndexerStore { .do_update() .set(( // Note: Exclude epoch beginning info except system_state below. - // This is to ensure that epoch beginning info columns are not overridden with default values, + // This is to ensure that epoch beginning info columns are not + // overridden with default values, // because these columns are default values in `last_epoch`. epochs::system_state.eq(excluded(epochs::system_state)), epochs::epoch_total_transactions diff --git a/crates/sui-indexer/src/store/pg_partition_manager.rs b/crates/sui-indexer/src/store/pg_partition_manager.rs index a22c01207cc..bcdca85a2c5 100644 --- a/crates/sui-indexer/src/store/pg_partition_manager.rs +++ b/crates/sui-indexer/src/store/pg_partition_manager.rs @@ -1,17 +1,21 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use diesel::sql_types::{BigInt, VarChar}; -use diesel::{QueryableByName, RunQueryDsl}; -use std::collections::BTreeMap; -use std::time::Duration; +use std::{collections::BTreeMap, time::Duration}; + +use diesel::{ + sql_types::{BigInt, VarChar}, + QueryableByName, RunQueryDsl, +}; use tracing::{error, info}; -use crate::db::PgConnectionPool; -use crate::handlers::EpochToCommit; -use crate::models::epoch::StoredEpochInfo; -use crate::store::diesel_macro::{read_only_blocking, transactional_blocking_with_retry}; -use crate::IndexerError; +use crate::{ + db::PgConnectionPool, + handlers::EpochToCommit, + models::epoch::StoredEpochInfo, + store::diesel_macro::{read_only_blocking, transactional_blocking_with_retry}, + IndexerError, +}; const GET_PARTITION_SQL: &str = r" SELECT parent.relname AS table_name, @@ -116,8 +120,8 @@ impl PgPartitionManager { table, last_partition, data.next_epoch ); } else if last_partition != data.next_epoch { - // skip when the partition is already advanced once, which is possible when indexer - // crashes and restarts; error otherwise. + // skip when the partition is already advanced once, which is possible when + // indexer crashes and restarts; error otherwise. error!( "Epoch partition for table {} is not in sync with the last epoch {}.", table, data.last_epoch diff --git a/crates/sui-indexer/src/store/query.rs b/crates/sui-indexer/src/store/query.rs index 93d57b29804..8b5c3c2441b 100644 --- a/crates/sui-indexer/src/store/query.rs +++ b/crates/sui-indexer/src/store/query.rs @@ -6,7 +6,7 @@ use sui_types::base_types::ObjectID; pub trait DBFilter { fn to_objects_history_sql(&self, cursor: Option, limit: usize, columns: Vec<&str>) - -> String; + -> String; fn to_latest_objects_sql(&self, cursor: Option, limit: usize, columns: Vec<&str>) -> String; } @@ -40,8 +40,9 @@ impl DBFilter for SuiObjectDataFilter { .map(|c| format!("t1.{c}")) .collect::>() .join(", "); - // NOTE: order by checkpoint DESC so that whenever a row from checkpoint is available, - // we will pick that over the one from fast-path, which has checkpoint of -1. + // NOTE: order by checkpoint DESC so that whenever a row from checkpoint is + // available, we will pick that over the one from fast-path, which has + // checkpoint of -1. format!( "SELECT {columns} FROM (SELECT DISTINCT ON (o.object_id) * @@ -126,7 +127,9 @@ fn to_clauses(filter: &SuiObjectDataFilter) -> Option { Some(format!("NOT ({})", sub_filters.join(" OR "))) } } - SuiObjectDataFilter::Package(p) => Some(format!("o.object_type LIKE '{}::%'", p.to_hex_literal())), + SuiObjectDataFilter::Package(p) => { + Some(format!("o.object_type LIKE '{}::%'", p.to_hex_literal())) + } SuiObjectDataFilter::MoveModule { package, module } => Some(format!( "o.object_type LIKE '{}::{}::%'", package.to_hex_literal(), @@ -140,16 +143,14 @@ fn to_clauses(filter: &SuiObjectDataFilter) -> Option { } else { Some(format!("o.object_type = '{s}'")) } - }, - SuiObjectDataFilter::AddressOwner(a) => { - Some(format!("((o.owner_type = 'address_owner' AND o.owner_address = '{a}') OR (o.old_owner_type = 'address_owner' AND o.old_owner_address = '{a}'))")) - } - SuiObjectDataFilter::ObjectOwner(o) => { - Some(format!("((o.owner_type = 'object_owner' AND o.owner_address = '{o}') OR (o.old_owner_type = 'object_owner' AND o.old_owner_address = '{o}'))")) - } - SuiObjectDataFilter::ObjectId(id) => { - Some(format!("o.object_id = '{id}'")) } + SuiObjectDataFilter::AddressOwner(a) => Some(format!( + "((o.owner_type = 'address_owner' AND o.owner_address = '{a}') OR (o.old_owner_type = 'address_owner' AND o.old_owner_address = '{a}'))" + )), + SuiObjectDataFilter::ObjectOwner(o) => Some(format!( + "((o.owner_type = 'object_owner' AND o.owner_address = '{o}') OR (o.old_owner_type = 'object_owner' AND o.old_owner_address = '{o}'))" + )), + SuiObjectDataFilter::ObjectId(id) => Some(format!("o.object_id = '{id}'")), SuiObjectDataFilter::ObjectIds(ids) => { if ids.is_empty() { None @@ -215,10 +216,11 @@ mod test { use std::str::FromStr; use move_core_types::ident_str; - use sui_json_rpc_types::SuiObjectDataFilter; - use sui_types::base_types::{ObjectID, SuiAddress}; - use sui_types::parse_sui_struct_tag; + use sui_types::{ + base_types::{ObjectID, SuiAddress}, + parse_sui_struct_tag, + }; use crate::store::query::DBFilter; diff --git a/crates/sui-indexer/src/test_utils.rs b/crates/sui-indexer/src/test_utils.rs index 84beae9a716..dcc2df10424 100644 --- a/crates/sui-indexer/src/test_utils.rs +++ b/crates/sui-indexer/src/test_utils.rs @@ -1,22 +1,22 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{env, net::SocketAddr, time::Duration}; + use diesel::connection::SimpleConnection; use mysten_metrics::init_metrics; -use tokio::task::JoinHandle; - -use std::env; -use std::net::SocketAddr; -use std::time::Duration; use sui_json_rpc_types::SuiTransactionBlockResponse; +use tokio::task::JoinHandle; use tracing::info; -use crate::db::{new_pg_connection_pool_with_config, reset_database, PgConnectionPoolConfig}; -use crate::errors::IndexerError; -use crate::handlers::objects_snapshot_processor::SnapshotLagConfig; -use crate::indexer::Indexer; -use crate::store::PgIndexerStore; -use crate::{IndexerConfig, IndexerMetrics}; +use crate::{ + db::{new_pg_connection_pool_with_config, reset_database, PgConnectionPoolConfig}, + errors::IndexerError, + handlers::objects_snapshot_processor::SnapshotLagConfig, + indexer::Indexer, + store::PgIndexerStore, + IndexerConfig, IndexerMetrics, +}; pub enum ReaderWriterConfig { Reader { reader_mode_rpc_url: String }, @@ -168,9 +168,10 @@ fn replace_db_name(db_url: &str, new_db_name: &str) -> (String, String) { } pub async fn force_delete_database(db_url: String) { - // Replace the database name with the default `postgres`, which should be the last string after `/` - // This is necessary because you can't drop a database while being connected to it. - // Hence switch to the default `postgres` database to drop the active database. + // Replace the database name with the default `postgres`, which should be the + // last string after `/` This is necessary because you can't drop a database + // while being connected to it. Hence switch to the default `postgres` + // database to drop the active database. let (default_db_url, db_name) = replace_db_name(&db_url, "postgres"); // Set connection timeout for tests to 1 second let mut pool_config = PgConnectionPoolConfig::default(); diff --git a/crates/sui-indexer/src/types.rs b/crates/sui-indexer/src/types.rs index fc5d325f71a..089fb130ee8 100644 --- a/crates/sui-indexer/src/types.rs +++ b/crates/sui-indexer/src/types.rs @@ -1,28 +1,30 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::errors::IndexerError; use move_core_types::language_storage::StructTag; use serde::{Deserialize, Serialize}; use serde_with::serde_as; use sui_json_rpc_types::{ ObjectChange, SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, }; -use sui_types::base_types::{ObjectDigest, SequenceNumber}; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::crypto::AggregateAuthoritySignature; -use sui_types::digests::TransactionDigest; -use sui_types::dynamic_field::DynamicFieldInfo; -use sui_types::effects::TransactionEffects; -use sui_types::event::SystemEpochInfoEvent; -use sui_types::messages_checkpoint::{ - CertifiedCheckpointSummary, CheckpointCommitment, CheckpointDigest, EndOfEpochData, +use sui_types::{ + base_types::{ObjectDigest, ObjectID, SequenceNumber, SuiAddress}, + crypto::AggregateAuthoritySignature, + digests::TransactionDigest, + dynamic_field::DynamicFieldInfo, + effects::TransactionEffects, + event::SystemEpochInfoEvent, + messages_checkpoint::{ + CertifiedCheckpointSummary, CheckpointCommitment, CheckpointDigest, EndOfEpochData, + }, + move_package::MovePackage, + object::{Object, Owner}, + sui_serde::SuiStructTag, + sui_system_state::sui_system_state_summary::SuiSystemStateSummary, + transaction::SenderSignedData, }; -use sui_types::move_package::MovePackage; -use sui_types::object::{Object, Owner}; -use sui_types::sui_serde::SuiStructTag; -use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary; -use sui_types::transaction::SenderSignedData; + +use crate::errors::IndexerError; pub type IndexerResult = Result; @@ -226,7 +228,7 @@ impl TryFrom for ObjectStatus { value => { return Err(IndexerError::PersistentStorageDataCorruptionError(format!( "{value} as ObjectStatus" - ))) + ))); } }) } @@ -244,7 +246,7 @@ impl TryFrom for OwnerType { value => { return Err(IndexerError::PersistentStorageDataCorruptionError(format!( "{value} as OwnerType" - ))) + ))); } }) } diff --git a/crates/sui-indexer/tests/ingestion_tests.rs b/crates/sui-indexer/tests/ingestion_tests.rs index 173b3061cfb..e7e5916d740 100644 --- a/crates/sui-indexer/tests/ingestion_tests.rs +++ b/crates/sui-indexer/tests/ingestion_tests.rs @@ -3,22 +3,19 @@ #[cfg(feature = "pg_integration")] mod ingestion_tests { - use diesel::ExpressionMethods; - use diesel::{QueryDsl, RunQueryDsl}; + use std::{net::SocketAddr, sync::Arc, time::Duration}; + + use diesel::{ExpressionMethods, QueryDsl, RunQueryDsl}; use simulacrum::Simulacrum; - use std::net::SocketAddr; - use std::sync::Arc; - use std::time::Duration; - use sui_indexer::db::get_pg_pool_connection; - use sui_indexer::errors::Context; - use sui_indexer::errors::IndexerError; - use sui_indexer::models::transactions::StoredTransaction; - use sui_indexer::schema::transactions; - use sui_indexer::store::{indexer_store::IndexerStore, PgIndexerStore}; - use sui_indexer::test_utils::{start_test_indexer, ReaderWriterConfig}; - use sui_types::base_types::SuiAddress; - use sui_types::effects::TransactionEffectsAPI; - use sui_types::storage::ReadStore; + use sui_indexer::{ + db::get_pg_pool_connection, + errors::{Context, IndexerError}, + models::transactions::StoredTransaction, + schema::transactions, + store::{indexer_store::IndexerStore, PgIndexerStore}, + test_utils::{start_test_indexer, ReaderWriterConfig}, + }; + use sui_types::{base_types::SuiAddress, effects::TransactionEffectsAPI, storage::ReadStore}; use tokio::task::JoinHandle; macro_rules! read_only_blocking { @@ -35,7 +32,8 @@ mod ingestion_tests { const DEFAULT_SERVER_PORT: u16 = 3000; const DEFAULT_DB_URL: &str = "postgres://postgres:postgrespw@localhost:5432/sui_indexer"; - /// Set up a test indexer fetching from a REST endpoint served by the given Simulacrum. + /// Set up a test indexer fetching from a REST endpoint served by the given + /// Simulacrum. async fn set_up( sim: Arc, ) -> ( @@ -69,7 +67,8 @@ mod ingestion_tests { (server_handle, pg_store, pg_handle) } - /// Wait for the indexer to catch up to the given checkpoint sequence number. + /// Wait for the indexer to catch up to the given checkpoint sequence + /// number. async fn wait_for_checkpoint( pg_store: &PgIndexerStore, checkpoint_sequence_number: u64, diff --git a/crates/sui-json-rpc-api/src/coin.rs b/crates/sui-json-rpc-api/src/coin.rs index b9f7405912c..16e3be20985 100644 --- a/crates/sui-json-rpc-api/src/coin.rs +++ b/crates/sui-json-rpc-api/src/coin.rs @@ -1,12 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use jsonrpsee::core::RpcResult; -use jsonrpsee::proc_macros::rpc; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use sui_json_rpc_types::{Balance, CoinPage, SuiCoinMetadata}; use sui_open_rpc_macros::open_rpc; -use sui_types::balance::Supply; -use sui_types::base_types::{ObjectID, SuiAddress}; +use sui_types::{ + balance::Supply, + base_types::{ObjectID, SuiAddress}, +}; #[open_rpc(namespace = "suix", tag = "Coin Query API")] #[rpc(server, client, namespace = "suix")] @@ -17,7 +18,9 @@ pub trait CoinReadApi { &self, /// the owner's Sui address owner: SuiAddress, - /// optional type name for the coin (e.g., 0x168da5bf1f48dafc111b0a488fa454aca95e0b5e::usdc::USDC), default to 0x2::sui::SUI if not specified. + /// optional type name for the coin (e.g., + /// 0x168da5bf1f48dafc111b0a488fa454aca95e0b5e::usdc::USDC), default to + /// 0x2::sui::SUI if not specified. coin_type: Option, /// optional paging cursor cursor: Option, @@ -37,17 +40,21 @@ pub trait CoinReadApi { limit: Option, ) -> RpcResult; - /// Return the total coin balance for one coin type, owned by the address owner. + /// Return the total coin balance for one coin type, owned by the address + /// owner. #[method(name = "getBalance")] async fn get_balance( &self, /// the owner's Sui address owner: SuiAddress, - /// optional type names for the coin (e.g., 0x168da5bf1f48dafc111b0a488fa454aca95e0b5e::usdc::USDC), default to 0x2::sui::SUI if not specified. + /// optional type names for the coin (e.g., + /// 0x168da5bf1f48dafc111b0a488fa454aca95e0b5e::usdc::USDC), default to + /// 0x2::sui::SUI if not specified. coin_type: Option, ) -> RpcResult; - /// Return the total coin balance for all coin type, owned by the address owner. + /// Return the total coin balance for all coin type, owned by the address + /// owner. #[method(name = "getAllBalances")] async fn get_all_balances( &self, @@ -59,7 +66,8 @@ pub trait CoinReadApi { #[method(name = "getCoinMetadata")] async fn get_coin_metadata( &self, - /// type name for the coin (e.g., 0x168da5bf1f48dafc111b0a488fa454aca95e0b5e::usdc::USDC) + /// type name for the coin (e.g., + /// 0x168da5bf1f48dafc111b0a488fa454aca95e0b5e::usdc::USDC) coin_type: String, ) -> RpcResult>; @@ -67,7 +75,8 @@ pub trait CoinReadApi { #[method(name = "getTotalSupply")] async fn get_total_supply( &self, - /// type name for the coin (e.g., 0x168da5bf1f48dafc111b0a488fa454aca95e0b5e::usdc::USDC) + /// type name for the coin (e.g., + /// 0x168da5bf1f48dafc111b0a488fa454aca95e0b5e::usdc::USDC) coin_type: String, ) -> RpcResult; } diff --git a/crates/sui-json-rpc-api/src/extended.rs b/crates/sui-json-rpc-api/src/extended.rs index ff6f73b760a..c3a3c25de07 100644 --- a/crates/sui-json-rpc-api/src/extended.rs +++ b/crates/sui-json-rpc-api/src/extended.rs @@ -1,9 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use jsonrpsee::core::RpcResult; -use jsonrpsee::proc_macros::rpc; - +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use sui_json_rpc_types::{ CheckpointedObjectID, EpochInfo, EpochPage, QueryObjectsPage, SuiObjectResponseQuery, }; @@ -29,15 +27,19 @@ pub trait ExtendedApi { #[method(name = "getCurrentEpoch")] async fn get_current_epoch(&self) -> RpcResult; - /// Return the list of queried objects. Note that this is an enhanced full node only api. + /// Return the list of queried objects. Note that this is an enhanced full + /// node only api. #[method(name = "queryObjects")] async fn query_objects( &self, /// the objects query criteria. query: SuiObjectResponseQuery, - /// An optional paging cursor. If provided, the query will start from the next item after the specified cursor. Default to start from the first item if not specified. + /// An optional paging cursor. If provided, the query will start from + /// the next item after the specified cursor. Default to start from the + /// first item if not specified. cursor: Option, - /// Max number of items returned per page, default to [QUERY_MAX_RESULT_LIMIT] if not specified. + /// Max number of items returned per page, default to + /// [QUERY_MAX_RESULT_LIMIT] if not specified. limit: Option, ) -> RpcResult; diff --git a/crates/sui-json-rpc-api/src/governance.rs b/crates/sui-json-rpc-api/src/governance.rs index 73e508d73e5..db75ef6e59d 100644 --- a/crates/sui-json-rpc-api/src/governance.rs +++ b/crates/sui-json-rpc-api/src/governance.rs @@ -1,19 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use jsonrpsee::core::RpcResult; -use jsonrpsee::proc_macros::rpc; - +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use sui_json_rpc_types::{DelegatedStake, DelegatedTimelockedStake, SuiCommittee, ValidatorApys}; use sui_open_rpc_macros::open_rpc; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::sui_serde::BigInt; -use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary; +use sui_types::{ + base_types::{ObjectID, SuiAddress}, + sui_serde::BigInt, + sui_system_state::sui_system_state_summary::SuiSystemStateSummary, +}; #[open_rpc(namespace = "suix", tag = "Governance Read API")] #[rpc(server, client, namespace = "suix")] pub trait GovernanceReadApi { - /// Return one or more [DelegatedStake]. If a Stake was withdrawn its status will be Unstaked. + /// Return one or more [DelegatedStake]. If a Stake was withdrawn its status + /// will be Unstaked. #[method(name = "getStakesByIds")] async fn get_stakes_by_ids( &self, @@ -24,7 +25,8 @@ pub trait GovernanceReadApi { #[method(name = "getStakes")] async fn get_stakes(&self, owner: SuiAddress) -> RpcResult>; - /// Return one or more [DelegatedTimelockedStake]. If a Stake was withdrawn its status will be Unstaked. + /// Return one or more [DelegatedTimelockedStake]. If a Stake was withdrawn + /// its status will be Unstaked. #[method(name = "getTimelockedStakesByIds")] async fn get_timelocked_stakes_by_ids( &self, diff --git a/crates/sui-json-rpc-api/src/indexer.rs b/crates/sui-json-rpc-api/src/indexer.rs index 93b1d8dc70f..c7c80cbbe4c 100644 --- a/crates/sui-json-rpc-api/src/indexer.rs +++ b/crates/sui-json-rpc-api/src/indexer.rs @@ -1,28 +1,27 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use jsonrpsee::core::RpcResult; -use jsonrpsee::proc_macros::rpc; - -use sui_json_rpc_types::SuiTransactionBlockEffects; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use sui_json_rpc_types::{ DynamicFieldPage, EventFilter, EventPage, ObjectsPage, Page, SuiEvent, SuiObjectResponse, - SuiObjectResponseQuery, SuiTransactionBlockResponseQuery, TransactionBlocksPage, - TransactionFilter, + SuiObjectResponseQuery, SuiTransactionBlockEffects, SuiTransactionBlockResponseQuery, + TransactionBlocksPage, TransactionFilter, }; use sui_open_rpc_macros::open_rpc; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::digests::TransactionDigest; -use sui_types::dynamic_field::DynamicFieldName; -use sui_types::event::EventID; +use sui_types::{ + base_types::{ObjectID, SuiAddress}, + digests::TransactionDigest, + dynamic_field::DynamicFieldName, + event::EventID, +}; #[open_rpc(namespace = "suix", tag = "Extended API")] #[rpc(server, client, namespace = "suix")] pub trait IndexerApi { /// Return the list of objects owned by an address. - /// Note that if the address owns more than `QUERY_MAX_RESULT_LIMIT` objects, - /// the pagination is not accurate, because previous page may have been updated when - /// the next page is fetched. + /// Note that if the address owns more than `QUERY_MAX_RESULT_LIMIT` + /// objects, the pagination is not accurate, because previous page may + /// have been updated when the next page is fetched. /// Please use suix_queryObjects if this is a concern. #[method(name = "getOwnedObjects")] async fn get_owned_objects( @@ -31,9 +30,12 @@ pub trait IndexerApi { address: SuiAddress, /// the objects query criteria. query: Option, - /// An optional paging cursor. If provided, the query will start from the next item after the specified cursor. Default to start from the first item if not specified. + /// An optional paging cursor. If provided, the query will start from + /// the next item after the specified cursor. Default to start from the + /// first item if not specified. cursor: Option, - /// Max number of items returned per page, default to [QUERY_MAX_RESULT_LIMIT] if not specified. + /// Max number of items returned per page, default to + /// [QUERY_MAX_RESULT_LIMIT] if not specified. limit: Option, ) -> RpcResult; @@ -43,11 +45,15 @@ pub trait IndexerApi { &self, /// the transaction query criteria. query: SuiTransactionBlockResponseQuery, - /// An optional paging cursor. If provided, the query will start from the next item after the specified cursor. Default to start from the first item if not specified. + /// An optional paging cursor. If provided, the query will start from + /// the next item after the specified cursor. Default to start from the + /// first item if not specified. cursor: Option, - /// Maximum item returned per page, default to QUERY_MAX_RESULT_LIMIT if not specified. + /// Maximum item returned per page, default to QUERY_MAX_RESULT_LIMIT if + /// not specified. limit: Option, - /// query result ordering, default to false (ascending order), oldest record first. + /// query result ordering, default to false (ascending order), oldest + /// record first. descending_order: Option, ) -> RpcResult; @@ -59,9 +65,11 @@ pub trait IndexerApi { query: EventFilter, /// optional paging cursor cursor: Option, - /// maximum number of items per page, default to [QUERY_MAX_RESULT_LIMIT] if not specified. + /// maximum number of items per page, default to + /// [QUERY_MAX_RESULT_LIMIT] if not specified. limit: Option, - /// query result ordering, default to false (ascending order), oldest record first. + /// query result ordering, default to false (ascending order), oldest + /// record first. descending_order: Option, ) -> RpcResult; @@ -83,9 +91,12 @@ pub trait IndexerApi { &self, /// The ID of the parent object parent_object_id: ObjectID, - /// An optional paging cursor. If provided, the query will start from the next item after the specified cursor. Default to start from the first item if not specified. + /// An optional paging cursor. If provided, the query will start from + /// the next item after the specified cursor. Default to start from the + /// first item if not specified. cursor: Option, - /// Maximum item returned per page, default to [QUERY_MAX_RESULT_LIMIT] if not specified. + /// Maximum item returned per page, default to [QUERY_MAX_RESULT_LIMIT] + /// if not specified. limit: Option, ) -> RpcResult; diff --git a/crates/sui-json-rpc-api/src/lib.rs b/crates/sui-json-rpc-api/src/lib.rs index 053d69c2229..7fc4cc294c0 100644 --- a/crates/sui-json-rpc-api/src/lib.rs +++ b/crates/sui-json-rpc-api/src/lib.rs @@ -2,36 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::anyhow; +pub use coin::{CoinReadApiClient, CoinReadApiOpenRpc, CoinReadApiServer}; +pub use extended::{ExtendedApiClient, ExtendedApiOpenRpc, ExtendedApiServer}; +pub use governance::{GovernanceReadApiClient, GovernanceReadApiOpenRpc, GovernanceReadApiServer}; +pub use indexer::{IndexerApiClient, IndexerApiOpenRpc, IndexerApiServer}; +pub use move_utils::{MoveUtilsClient, MoveUtilsOpenRpc, MoveUtilsServer}; use mysten_metrics::histogram::Histogram; - -pub use coin::CoinReadApiClient; -pub use coin::CoinReadApiOpenRpc; -pub use coin::CoinReadApiServer; -pub use extended::ExtendedApiClient; -pub use extended::ExtendedApiOpenRpc; -pub use extended::ExtendedApiServer; -pub use governance::GovernanceReadApiClient; -pub use governance::GovernanceReadApiOpenRpc; -pub use governance::GovernanceReadApiServer; -pub use indexer::IndexerApiClient; -pub use indexer::IndexerApiOpenRpc; -pub use indexer::IndexerApiServer; -pub use move_utils::MoveUtilsClient; -pub use move_utils::MoveUtilsOpenRpc; -pub use move_utils::MoveUtilsServer; use once_cell::sync::Lazy; use prometheus::{register_int_counter_with_registry, IntCounter}; -pub use read::ReadApiClient; -pub use read::ReadApiOpenRpc; -pub use read::ReadApiServer; +pub use read::{ReadApiClient, ReadApiOpenRpc, ReadApiServer}; use tap::TapFallible; use tracing::warn; -pub use transaction_builder::TransactionBuilderClient; -pub use transaction_builder::TransactionBuilderOpenRpc; -pub use transaction_builder::TransactionBuilderServer; -pub use write::WriteApiClient; -pub use write::WriteApiOpenRpc; -pub use write::WriteApiServer; +pub use transaction_builder::{ + TransactionBuilderClient, TransactionBuilderOpenRpc, TransactionBuilderServer, +}; +pub use write::{WriteApiClient, WriteApiOpenRpc, WriteApiServer}; mod coin; mod extended; @@ -292,10 +277,11 @@ pub fn read_size_from_env(var_name: &str) -> Option { } pub const CLIENT_SDK_TYPE_HEADER: &str = "client-sdk-type"; -/// The version number of the SDK itself. This can be different from the API version. +/// The version number of the SDK itself. This can be different from the API +/// version. pub const CLIENT_SDK_VERSION_HEADER: &str = "client-sdk-version"; -/// The RPC API version that the client is targeting. Different SDK versions may target the same -/// API version. +/// The RPC API version that the client is targeting. Different SDK versions may +/// target the same API version. pub const CLIENT_TARGET_API_VERSION_HEADER: &str = "client-target-api-version"; pub const TRANSIENT_ERROR_CODE: i32 = -32050; diff --git a/crates/sui-json-rpc-api/src/move_utils.rs b/crates/sui-json-rpc-api/src/move_utils.rs index fc208d8c29b..b0a79355b69 100644 --- a/crates/sui-json-rpc-api/src/move_utils.rs +++ b/crates/sui-json-rpc-api/src/move_utils.rs @@ -3,9 +3,7 @@ use std::collections::BTreeMap; -use jsonrpsee::core::RpcResult; -use jsonrpsee::proc_macros::rpc; - +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use sui_json_rpc_types::{ MoveFunctionArgType, SuiMoveNormalizedFunction, SuiMoveNormalizedModule, SuiMoveNormalizedStruct, diff --git a/crates/sui-json-rpc-api/src/read.rs b/crates/sui-json-rpc-api/src/read.rs index 89dbb4ef36b..2bb85cc9e3b 100644 --- a/crates/sui-json-rpc-api/src/read.rs +++ b/crates/sui-json-rpc-api/src/read.rs @@ -1,18 +1,18 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use jsonrpsee::core::RpcResult; -use jsonrpsee::proc_macros::rpc; - +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use sui_json_rpc_types::{ - Checkpoint, CheckpointId, CheckpointPage, SuiEvent, SuiGetPastObjectRequest, - SuiObjectDataOptions, SuiObjectResponse, SuiPastObjectResponse, SuiTransactionBlockResponse, + Checkpoint, CheckpointId, CheckpointPage, ProtocolConfigResponse, SuiEvent, + SuiGetPastObjectRequest, SuiLoadedChildObjectsResponse, SuiObjectDataOptions, + SuiObjectResponse, SuiPastObjectResponse, SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, }; -use sui_json_rpc_types::{ProtocolConfigResponse, SuiLoadedChildObjectsResponse}; use sui_open_rpc_macros::open_rpc; -use sui_types::base_types::{ObjectID, SequenceNumber, TransactionDigest}; -use sui_types::sui_serde::BigInt; +use sui_types::{ + base_types::{ObjectID, SequenceNumber, TransactionDigest}, + sui_serde::BigInt, +}; #[open_rpc(namespace = "sui", tag = "Read API")] #[rpc(server, client, namespace = "sui")] @@ -59,25 +59,28 @@ pub trait ReadApi { options: Option, ) -> RpcResult>; - /// Note there is no software-level guarantee/SLA that objects with past versions - /// can be retrieved by this API, even if the object and version exists/existed. - /// The result may vary across nodes depending on their pruning policies. - /// Return the object information for a specified version + /// Note there is no software-level guarantee/SLA that objects with past + /// versions can be retrieved by this API, even if the object and + /// version exists/existed. The result may vary across nodes depending + /// on their pruning policies. Return the object information for a + /// specified version #[method(name = "tryGetPastObject")] async fn try_get_past_object( &self, /// the ID of the queried object object_id: ObjectID, - /// the version of the queried object. If None, default to the latest known version + /// the version of the queried object. If None, default to the latest + /// known version version: SequenceNumber, /// options for specifying the content to be returned options: Option, ) -> RpcResult; - /// Note there is no software-level guarantee/SLA that objects with past versions - /// can be retrieved by this API, even if the object and version exists/existed. - /// The result may vary across nodes depending on their pruning policies. - /// Return the object information for a specified version + /// Note there is no software-level guarantee/SLA that objects with past + /// versions can be retrieved by this API, even if the object and + /// version exists/existed. The result may vary across nodes depending + /// on their pruning policies. Return the object information for a + /// specified version #[method(name = "tryMultiGetPastObjects")] async fn try_multi_get_past_objects( &self, @@ -97,7 +100,8 @@ pub trait ReadApi { #[method(name = "getCheckpoint")] async fn get_checkpoint( &self, - /// Checkpoint identifier, can use either checkpoint digest, or checkpoint sequence number as input. + /// Checkpoint identifier, can use either checkpoint digest, or + /// checkpoint sequence number as input. id: CheckpointId, ) -> RpcResult; @@ -105,22 +109,30 @@ pub trait ReadApi { #[method(name = "getCheckpoints")] async fn get_checkpoints( &self, - /// An optional paging cursor. If provided, the query will start from the next item after the specified cursor. Default to start from the first item if not specified. + /// An optional paging cursor. If provided, the query will start from + /// the next item after the specified cursor. Default to start from the + /// first item if not specified. cursor: Option>, - /// Maximum item returned per page, default to [QUERY_MAX_RESULT_LIMIT_CHECKPOINTS] if not specified. + /// Maximum item returned per page, default to + /// [QUERY_MAX_RESULT_LIMIT_CHECKPOINTS] if not specified. limit: Option, - /// query result ordering, default to false (ascending order), oldest record first. + /// query result ordering, default to false (ascending order), oldest + /// record first. descending_order: bool, ) -> RpcResult; #[method(name = "getCheckpoints", version <= "0.31")] async fn get_checkpoints_deprecated_limit( &self, - /// An optional paging cursor. If provided, the query will start from the next item after the specified cursor. Default to start from the first item if not specified. + /// An optional paging cursor. If provided, the query will start from + /// the next item after the specified cursor. Default to start from the + /// first item if not specified. cursor: Option>, - /// Maximum item returned per page, default to [QUERY_MAX_RESULT_LIMIT_CHECKPOINTS] if not specified. + /// Maximum item returned per page, default to + /// [QUERY_MAX_RESULT_LIMIT_CHECKPOINTS] if not specified. limit: Option>, - /// query result ordering, default to false (ascending order), oldest record first. + /// query result ordering, default to false (ascending order), oldest + /// record first. descending_order: bool, ) -> RpcResult; @@ -136,16 +148,19 @@ pub trait ReadApi { #[method(name = "getTotalTransactionBlocks")] async fn get_total_transaction_blocks(&self) -> RpcResult>; - /// Return the sequence number of the latest checkpoint that has been executed + /// Return the sequence number of the latest checkpoint that has been + /// executed #[method(name = "getLatestCheckpointSequenceNumber")] async fn get_latest_checkpoint_sequence_number(&self) -> RpcResult>; /// Return the protocol config table for the given version number. - /// If the version number is not specified, If none is specified, the node uses the version of the latest epoch it has processed. + /// If the version number is not specified, If none is specified, the node + /// uses the version of the latest epoch it has processed. #[method(name = "getProtocolConfig")] async fn get_protocol_config( &self, - /// An optional protocol version specifier. If omitted, the latest protocol config table for the node will be returned. + /// An optional protocol version specifier. If omitted, the latest + /// protocol config table for the node will be returned. version: Option>, ) -> RpcResult; diff --git a/crates/sui-json-rpc-api/src/transaction_builder.rs b/crates/sui-json-rpc-api/src/transaction_builder.rs index b28a6bff5bd..c340455f5e3 100644 --- a/crates/sui-json-rpc-api/src/transaction_builder.rs +++ b/crates/sui-json-rpc-api/src/transaction_builder.rs @@ -2,22 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 use fastcrypto::encoding::Base64; -use jsonrpsee::core::RpcResult; -use jsonrpsee::proc_macros::rpc; - +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use sui_json::SuiJsonValue; use sui_json_rpc_types::{ RPCTransactionRequestParams, SuiTransactionBlockBuilderMode, SuiTypeTag, TransactionBlockBytes, }; use sui_open_rpc_macros::open_rpc; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::sui_serde::BigInt; +use sui_types::{ + base_types::{ObjectID, SuiAddress}, + sui_serde::BigInt, +}; #[open_rpc(namespace = "unsafe", tag = "Transaction Builder API")] #[rpc(server, client, namespace = "unsafe")] pub trait TransactionBuilder { - /// Create an unsigned transaction to transfer an object from one address to another. The object's type - /// must allow public transfers + /// Create an unsigned transaction to transfer an object from one address to + /// another. The object's type must allow public transfers #[method(name = "transferObject")] async fn transfer_object( &self, @@ -25,15 +25,18 @@ pub trait TransactionBuilder { signer: SuiAddress, /// the ID of the object to be transferred object_id: ObjectID, - /// gas object to be used in this transaction, node will pick one from the signer's possession if not provided + /// gas object to be used in this transaction, node will pick one from + /// the signer's possession if not provided gas: Option, - /// the gas budget, the transaction will fail if the gas cost exceed the budget + /// the gas budget, the transaction will fail if the gas cost exceed the + /// budget gas_budget: BigInt, /// the recipient's Sui address recipient: SuiAddress, ) -> RpcResult; - /// Create an unsigned transaction to send SUI coin object to a Sui address. The SUI object is also used as the gas object. + /// Create an unsigned transaction to send SUI coin object to a Sui address. + /// The SUI object is also used as the gas object. #[method(name = "transferSui")] async fn transfer_sui( &self, @@ -41,7 +44,8 @@ pub trait TransactionBuilder { signer: SuiAddress, /// the Sui coin object to be used in this transaction sui_object_id: ObjectID, - /// the gas budget, the transaction will fail if the gas cost exceed the budget + /// the gas budget, the transaction will fail if the gas cost exceed the + /// budget gas_budget: BigInt, /// the recipient's Sui address recipient: SuiAddress, @@ -49,10 +53,11 @@ pub trait TransactionBuilder { amount: Option>, ) -> RpcResult; - /// Send `Coin` to a list of addresses, where `T` can be any coin type, following a list of amounts, - /// The object specified in the `gas` field will be used to pay the gas fee for the transaction. - /// The gas object can not appear in `input_coins`. If the gas object is not specified, the RPC server - /// will auto-select one. + /// Send `Coin` to a list of addresses, where `T` can be any coin type, + /// following a list of amounts, The object specified in the `gas` field + /// will be used to pay the gas fee for the transaction. The gas object + /// can not appear in `input_coins`. If the gas object is not specified, the + /// RPC server will auto-select one. #[method(name = "pay")] async fn pay( &self, @@ -60,61 +65,77 @@ pub trait TransactionBuilder { signer: SuiAddress, /// the Sui coins to be used in this transaction input_coins: Vec, - /// the recipients' addresses, the length of this vector must be the same as amounts. + /// the recipients' addresses, the length of this vector must be the + /// same as amounts. recipients: Vec, - /// the amounts to be transferred to recipients, following the same order + /// the amounts to be transferred to recipients, following the same + /// order amounts: Vec>, - /// gas object to be used in this transaction, node will pick one from the signer's possession if not provided + /// gas object to be used in this transaction, node will pick one from + /// the signer's possession if not provided gas: Option, - /// the gas budget, the transaction will fail if the gas cost exceed the budget + /// the gas budget, the transaction will fail if the gas cost exceed the + /// budget gas_budget: BigInt, ) -> RpcResult; /// Send SUI coins to a list of addresses, following a list of amounts. - /// This is for SUI coin only and does not require a separate gas coin object. - /// Specifically, what pay_sui does are: + /// This is for SUI coin only and does not require a separate gas coin + /// object. Specifically, what pay_sui does are: /// 1. debit each input_coin to create new coin following the order of /// amounts and assign it to the corresponding recipient. - /// 2. accumulate all residual SUI from input coins left and deposit all SUI to the first + /// 2. accumulate all residual SUI from input coins left and deposit all SUI + /// to the first /// input coin, then use the first input coin as the gas coin object. - /// 3. the balance of the first input coin after tx is sum(input_coins) - sum(amounts) - actual_gas_cost + /// 3. the balance of the first input coin after tx is sum(input_coins) - + /// sum(amounts) - actual_gas_cost /// 4. all other input coints other than the first one are deleted. #[method(name = "paySui")] async fn pay_sui( &self, /// the transaction signer's Sui address signer: SuiAddress, - /// the Sui coins to be used in this transaction, including the coin for gas payment. + /// the Sui coins to be used in this transaction, including the coin for + /// gas payment. input_coins: Vec, - /// the recipients' addresses, the length of this vector must be the same as amounts. + /// the recipients' addresses, the length of this vector must be the + /// same as amounts. recipients: Vec, - /// the amounts to be transferred to recipients, following the same order + /// the amounts to be transferred to recipients, following the same + /// order amounts: Vec>, - /// the gas budget, the transaction will fail if the gas cost exceed the budget + /// the gas budget, the transaction will fail if the gas cost exceed the + /// budget gas_budget: BigInt, ) -> RpcResult; /// Send all SUI coins to one recipient. - /// This is for SUI coin only and does not require a separate gas coin object. - /// Specifically, what pay_all_sui does are: - /// 1. accumulate all SUI from input coins and deposit all SUI to the first input coin - /// 2. transfer the updated first coin to the recipient and also use this first coin as gas coin object. - /// 3. the balance of the first input coin after tx is sum(input_coins) - actual_gas_cost. + /// This is for SUI coin only and does not require a separate gas coin + /// object. Specifically, what pay_all_sui does are: + /// 1. accumulate all SUI from input coins and deposit all SUI to the first + /// input coin + /// 2. transfer the updated first coin to the recipient and also use this + /// first coin as gas coin object. + /// 3. the balance of the first input coin after tx is sum(input_coins) - + /// actual_gas_cost. /// 4. all other input coins other than the first are deleted. #[method(name = "payAllSui")] async fn pay_all_sui( &self, /// the transaction signer's Sui address signer: SuiAddress, - /// the Sui coins to be used in this transaction, including the coin for gas payment. + /// the Sui coins to be used in this transaction, including the coin for + /// gas payment. input_coins: Vec, /// the recipient address, recipient: SuiAddress, - /// the gas budget, the transaction will fail if the gas cost exceed the budget + /// the gas budget, the transaction will fail if the gas cost exceed the + /// budget gas_budget: BigInt, ) -> RpcResult; - /// Create an unsigned transaction to execute a Move call on the network, by calling the specified function in the module of a given package. + /// Create an unsigned transaction to execute a Move call on the network, by + /// calling the specified function in the module of a given package. #[method(name = "moveCall")] async fn move_call( &self, @@ -130,11 +151,15 @@ pub trait TransactionBuilder { type_arguments: Vec, /// the arguments to be passed into the Move function, in [SuiJson](https://docs.sui.io/build/sui-json) format arguments: Vec, - /// gas object to be used in this transaction, node will pick one from the signer's possession if not provided + /// gas object to be used in this transaction, node will pick one from + /// the signer's possession if not provided gas: Option, - /// the gas budget, the transaction will fail if the gas cost exceed the budget + /// the gas budget, the transaction will fail if the gas cost exceed the + /// budget gas_budget: BigInt, - /// Whether this is a Normal transaction or a Dev Inspect Transaction. Default to be `SuiTransactionBlockBuilderMode::Commit` when it's None. + /// Whether this is a Normal transaction or a Dev Inspect Transaction. + /// Default to be `SuiTransactionBlockBuilderMode::Commit` when it's + /// None. execution_mode: Option, ) -> RpcResult; @@ -146,15 +171,19 @@ pub trait TransactionBuilder { sender: SuiAddress, /// the compiled bytes of a Move package compiled_modules: Vec, - /// a list of transitive dependency addresses that this set of modules depends on. + /// a list of transitive dependency addresses that this set of modules + /// depends on. dependencies: Vec, - /// gas object to be used in this transaction, node will pick one from the signer's possession if not provided + /// gas object to be used in this transaction, node will pick one from + /// the signer's possession if not provided gas: Option, - /// the gas budget, the transaction will fail if the gas cost exceed the budget + /// the gas budget, the transaction will fail if the gas cost exceed the + /// budget gas_budget: BigInt, ) -> RpcResult; - /// Create an unsigned transaction to split a coin object into multiple coins. + /// Create an unsigned transaction to split a coin object into multiple + /// coins. #[method(name = "splitCoin")] async fn split_coin( &self, @@ -164,13 +193,16 @@ pub trait TransactionBuilder { coin_object_id: ObjectID, /// the amounts to split out from the coin split_amounts: Vec>, - /// gas object to be used in this transaction, node will pick one from the signer's possession if not provided + /// gas object to be used in this transaction, node will pick one from + /// the signer's possession if not provided gas: Option, - /// the gas budget, the transaction will fail if the gas cost exceed the budget + /// the gas budget, the transaction will fail if the gas cost exceed the + /// budget gas_budget: BigInt, ) -> RpcResult; - /// Create an unsigned transaction to split a coin object into multiple equal-size coins. + /// Create an unsigned transaction to split a coin object into multiple + /// equal-size coins. #[method(name = "splitCoinEqual")] async fn split_coin_equal( &self, @@ -180,9 +212,11 @@ pub trait TransactionBuilder { coin_object_id: ObjectID, /// the number of coins to split into split_count: BigInt, - /// gas object to be used in this transaction, node will pick one from the signer's possession if not provided + /// gas object to be used in this transaction, node will pick one from + /// the signer's possession if not provided gas: Option, - /// the gas budget, the transaction will fail if the gas cost exceed the budget + /// the gas budget, the transaction will fail if the gas cost exceed the + /// budget gas_budget: BigInt, ) -> RpcResult; @@ -192,13 +226,17 @@ pub trait TransactionBuilder { &self, /// the transaction signer's Sui address signer: SuiAddress, - /// the coin object to merge into, this coin will remain after the transaction + /// the coin object to merge into, this coin will remain after the + /// transaction primary_coin: ObjectID, - /// the coin object to be merged, this coin will be destroyed, the balance will be added to `primary_coin` + /// the coin object to be merged, this coin will be destroyed, the + /// balance will be added to `primary_coin` coin_to_merge: ObjectID, - /// gas object to be used in this transaction, node will pick one from the signer's possession if not provided + /// gas object to be used in this transaction, node will pick one from + /// the signer's possession if not provided gas: Option, - /// the gas budget, the transaction will fail if the gas cost exceed the budget + /// the gas budget, the transaction will fail if the gas cost exceed the + /// budget gas_budget: BigInt, ) -> RpcResult; @@ -210,9 +248,11 @@ pub trait TransactionBuilder { signer: SuiAddress, /// list of transaction request parameters single_transaction_params: Vec, - /// gas object to be used in this transaction, node will pick one from the signer's possession if not provided + /// gas object to be used in this transaction, node will pick one from + /// the signer's possession if not provided gas: Option, - /// the gas budget, the transaction will fail if the gas cost exceed the budget + /// the gas budget, the transaction will fail if the gas cost exceed the + /// budget gas_budget: BigInt, /// Whether this is a regular transaction or a Dev Inspect Transaction txn_builder_mode: Option, @@ -230,9 +270,11 @@ pub trait TransactionBuilder { amount: Option>, /// the validator's Sui address validator: SuiAddress, - /// gas object to be used in this transaction, node will pick one from the signer's possession if not provided + /// gas object to be used in this transaction, node will pick one from + /// the signer's possession if not provided gas: Option, - /// the gas budget, the transaction will fail if the gas cost exceed the budget + /// the gas budget, the transaction will fail if the gas cost exceed the + /// budget gas_budget: BigInt, ) -> RpcResult; @@ -244,13 +286,16 @@ pub trait TransactionBuilder { signer: SuiAddress, /// StakedSui object ID staked_sui: ObjectID, - /// gas object to be used in this transaction, node will pick one from the signer's possession if not provided + /// gas object to be used in this transaction, node will pick one from + /// the signer's possession if not provided gas: Option, - /// the gas budget, the transaction will fail if the gas cost exceed the budget + /// the gas budget, the transaction will fail if the gas cost exceed the + /// budget gas_budget: BigInt, ) -> RpcResult; - /// Add timelocked stake to a validator's staking pool using multiple balances and amount. + /// Add timelocked stake to a validator's staking pool using multiple + /// balances and amount. #[method(name = "requestAddTimelockedStake")] async fn request_add_timelocked_stake( &self, @@ -262,7 +307,8 @@ pub trait TransactionBuilder { validator: SuiAddress, /// gas object to be used in this transaction gas: ObjectID, - /// the gas budget, the transaction will fail if the gas cost exceed the budget + /// the gas budget, the transaction will fail if the gas cost exceed the + /// budget gas_budget: BigInt, ) -> RpcResult; @@ -276,7 +322,8 @@ pub trait TransactionBuilder { timelocked_staked_sui: ObjectID, /// gas object to be used in this transaction gas: ObjectID, - /// the gas budget, the transaction will fail if the gas cost exceed the budget + /// the gas budget, the transaction will fail if the gas cost exceed the + /// budget gas_budget: BigInt, ) -> RpcResult; } diff --git a/crates/sui-json-rpc-api/src/write.rs b/crates/sui-json-rpc-api/src/write.rs index 8b53ea9b913..797448bb715 100644 --- a/crates/sui-json-rpc-api/src/write.rs +++ b/crates/sui-json-rpc-api/src/write.rs @@ -2,41 +2,45 @@ // SPDX-License-Identifier: Apache-2.0 use fastcrypto::encoding::Base64; -use jsonrpsee::core::RpcResult; -use jsonrpsee::proc_macros::rpc; - +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use sui_json_rpc_types::{ DevInspectArgs, DevInspectResults, DryRunTransactionBlockResponse, SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, }; use sui_open_rpc_macros::open_rpc; -use sui_types::base_types::SuiAddress; -use sui_types::quorum_driver_types::ExecuteTransactionRequestType; -use sui_types::sui_serde::BigInt; +use sui_types::{ + base_types::SuiAddress, quorum_driver_types::ExecuteTransactionRequestType, sui_serde::BigInt, +}; #[open_rpc(namespace = "sui", tag = "Write API")] #[rpc(server, client, namespace = "sui")] pub trait WriteApi { /// Execute the transaction and wait for results if desired. /// Request types: - /// 1. WaitForEffectsCert: waits for TransactionEffectsCert and then return to client. - /// This mode is a proxy for transaction finality. - /// 2. WaitForLocalExecution: waits for TransactionEffectsCert and make sure the node - /// executed the transaction locally before returning the client. The local execution - /// makes sure this node is aware of this transaction when client fires subsequent queries. - /// However if the node fails to execute the transaction locally in a timely manner, - /// a bool type in the response is set to false to indicated the case. - /// request_type is default to be `WaitForEffectsCert` unless options.show_events or options.show_effects is true + /// 1. WaitForEffectsCert: waits for TransactionEffectsCert and then return + /// to client. This mode is a proxy for transaction finality. + /// 2. WaitForLocalExecution: waits for TransactionEffectsCert and make sure + /// the node executed the transaction locally before returning the + /// client. The local execution makes sure this node is aware of this + /// transaction when client fires subsequent queries. However if the node + /// fails to execute the transaction locally in a timely manner, a bool + /// type in the response is set to false to indicated the case. + /// request_type is default to be `WaitForEffectsCert` unless + /// options.show_events or options.show_effects is true #[method(name = "executeTransactionBlock")] async fn execute_transaction_block( &self, - /// BCS serialized transaction data bytes without its type tag, as base-64 encoded string. + /// BCS serialized transaction data bytes without its type tag, as + /// base-64 encoded string. tx_bytes: Base64, - /// A list of signatures (`flag || signature || pubkey` bytes, as base-64 encoded string). Signature is committed to the intent message of the transaction data, as base-64 encoded string. + /// A list of signatures (`flag || signature || pubkey` bytes, as + /// base-64 encoded string). Signature is committed to the intent + /// message of the transaction data, as base-64 encoded string. signatures: Vec, /// options for specifying the content to be returned options: Option, - /// The request type, derived from `SuiTransactionBlockResponseOptions` if None + /// The request type, derived from `SuiTransactionBlockResponseOptions` + /// if None request_type: Option, ) -> RpcResult; @@ -47,13 +51,17 @@ pub trait WriteApi { async fn dev_inspect_transaction_block( &self, sender_address: SuiAddress, - /// BCS encoded TransactionKind(as opposed to TransactionData, which include gasBudget and gasPrice) + /// BCS encoded TransactionKind(as opposed to TransactionData, which + /// include gasBudget and gasPrice) tx_bytes: Base64, - /// Gas is not charged, but gas usage is still calculated. Default to use reference gas price + /// Gas is not charged, but gas usage is still calculated. Default to + /// use reference gas price gas_price: Option>, - /// The epoch to perform the call. Will be set from the system state object if not provided + /// The epoch to perform the call. Will be set from the system state + /// object if not provided epoch: Option>, - /// Additional arguments including gas_budget, gas_objects, gas_sponsor and skip_checks. + /// Additional arguments including gas_budget, gas_objects, gas_sponsor + /// and skip_checks. additional_args: Option, ) -> RpcResult; diff --git a/crates/sui-json-rpc-tests/tests/balance_changes_tests.rs b/crates/sui-json-rpc-tests/tests/balance_changes_tests.rs index 0324ff13b70..508d9f10c92 100644 --- a/crates/sui-json-rpc-tests/tests/balance_changes_tests.rs +++ b/crates/sui-json-rpc-tests/tests/balance_changes_tests.rs @@ -2,10 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 use std::path::PathBuf; + use sui_move_build::{BuildConfig, SuiPackageHooks}; use sui_sdk::SuiClient; -use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; -use sui_types::transaction::{TransactionData, TransactionKind}; +use sui_types::{ + programmable_transaction_builder::ProgrammableTransactionBuilder, + transaction::{TransactionData, TransactionKind}, +}; use test_cluster::TestClusterBuilder; #[tokio::test] diff --git a/crates/sui-json-rpc-tests/tests/name_service_tests.rs b/crates/sui-json-rpc-tests/tests/name_service_tests.rs index 0b12ecf3699..3f78667fbf8 100644 --- a/crates/sui-json-rpc-tests/tests/name_service_tests.rs +++ b/crates/sui-json-rpc-tests/tests/name_service_tests.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use std::str::FromStr; + use sui_json_rpc::name_service::{self, Domain}; use sui_types::{ base_types::{ObjectID, SuiAddress}, diff --git a/crates/sui-json-rpc-tests/tests/routing_tests.rs b/crates/sui-json-rpc-tests/tests/routing_tests.rs index b270aff74ad..3e8295e5b97 100644 --- a/crates/sui-json-rpc-tests/tests/routing_tests.rs +++ b/crates/sui-json-rpc-tests/tests/routing_tests.rs @@ -1,17 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::env; + use async_trait::async_trait; -use hyper::header::HeaderValue; -use hyper::HeaderMap; -use jsonrpsee::core::client::ClientT; -use jsonrpsee::core::RpcResult; -use jsonrpsee::http_client::HttpClientBuilder; -use jsonrpsee::proc_macros::rpc; -use jsonrpsee::rpc_params; -use jsonrpsee::RpcModule; +use hyper::{header::HeaderValue, HeaderMap}; +use jsonrpsee::{ + core::{client::ClientT, RpcResult}, + http_client::HttpClientBuilder, + proc_macros::rpc, + rpc_params, RpcModule, +}; use prometheus::Registry; -use std::env; use sui_config::local_ip_utils; use sui_json_rpc::{JsonRpcServerBuilder, SuiRpcModule}; use sui_json_rpc_api::CLIENT_TARGET_API_VERSION_HEADER; @@ -111,7 +111,8 @@ async fn test_disable_routing() { let response: RpcResult = client.request("test_foo_1_5", rpc_params!("string")).await; assert!(response.is_err()); - // Test with versioned client, version = backward compatible method version, should fail because routing is disabled. + // Test with versioned client, version = backward compatible method version, + // should fail because routing is disabled. let mut versioned_header = HeaderMap::new(); versioned_header.insert( CLIENT_TARGET_API_VERSION_HEADER, @@ -173,8 +174,8 @@ async fn test_disable_routing() { // jsonrpc: Default::default(), // id: Id::Number(1), // method: "test_foo".into(), -// params: Some(&JsonRawValue::from_string("[true]".into()).unwrap()), -// }), +// params: +// Some(&JsonRawValue::from_string("[true]".into()).unwrap()), }), // json!("Bad json input"), // ]) // .send() @@ -182,18 +183,21 @@ async fn test_disable_routing() { // .unwrap(); // let responses = response.text().await.unwrap(); -// let responses: Vec<&JsonRawValue> = serde_json::from_str(&responses).unwrap(); +// let responses: Vec<&JsonRawValue> = +// serde_json::from_str(&responses).unwrap(); // // Should have 2 results // assert_eq!(2, responses.len()); // // First response should success -// let response = serde_json::from_str::>(responses[0].get()); -// assert!(matches!(response, Ok(result) if result.result == "Some string")); +// let response = +// serde_json::from_str::>(responses[0].get()); assert! +// (matches!(response, Ok(result) if result.result == "Some string")); // // Second response should fail // let response = serde_json::from_str::(responses[1].get()); -// assert!(matches!(response, Ok(result) if result.error_object().message() == "Invalid request")); +// assert!(matches!(response, Ok(result) if result.error_object().message() +// == "Invalid request")); // handle.stop().unwrap() // } diff --git a/crates/sui-json-rpc-tests/tests/rpc_server_tests.rs b/crates/sui-json-rpc-tests/tests/rpc_server_tests.rs index a1a13bce899..4a97271cc38 100644 --- a/crates/sui-json-rpc-tests/tests/rpc_server_tests.rs +++ b/crates/sui-json-rpc-tests/tests/rpc_server_tests.rs @@ -1,42 +1,46 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::collections::BTreeMap; -use std::path::{Path, PathBuf}; #[cfg(not(msim))] use std::str::FromStr; -use std::time::Duration; +use std::{ + collections::BTreeMap, + path::{Path, PathBuf}, + time::Duration, +}; + use sui_json::{call_args, type_args}; use sui_json_rpc_api::{ CoinReadApiClient, GovernanceReadApiClient, IndexerApiClient, ReadApiClient, TransactionBuilderClient, WriteApiClient, }; -use sui_json_rpc_types::ObjectsPage; use sui_json_rpc_types::{ - Balance, CoinPage, DelegatedStake, StakeStatus, SuiCoinMetadata, SuiExecutionStatus, - SuiObjectDataOptions, SuiObjectResponse, SuiObjectResponseQuery, SuiTransactionBlockEffectsAPI, - SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, TransactionBlockBytes, + Balance, CoinPage, DelegatedStake, DelegatedTimelockedStake, ObjectChange, ObjectsPage, + StakeStatus, SuiCoinMetadata, SuiExecutionStatus, SuiObjectDataOptions, SuiObjectResponse, + SuiObjectResponseQuery, SuiTransactionBlockEffectsAPI, SuiTransactionBlockResponse, + SuiTransactionBlockResponseOptions, TransactionBlockBytes, }; -use sui_json_rpc_types::{DelegatedTimelockedStake, ObjectChange}; use sui_macros::sim_test; use sui_move_build::BuildConfig; use sui_protocol_config::ProtocolConfig; use sui_swarm_config::genesis_config::{ AccountConfig, DEFAULT_GAS_AMOUNT, DEFAULT_NUMBER_OF_OBJECT_PER_ACCOUNT, }; -use sui_types::balance::Supply; -use sui_types::base_types::SequenceNumber; -use sui_types::base_types::{MoveObjectType, ObjectID}; -use sui_types::coin::{TreasuryCap, COIN_MODULE_NAME}; -use sui_types::crypto::deterministic_random_account_key; -use sui_types::digests::{ObjectDigest, TransactionDigest}; -use sui_types::gas_coin::GAS; -use sui_types::id::UID; -use sui_types::object::{Data, MoveObject, ObjectInner, Owner, OBJECT_START_VERSION}; -use sui_types::quorum_driver_types::ExecuteTransactionRequestType; -use sui_types::timelock::timelock::TimeLock; -use sui_types::utils::to_sender_signed_transaction; -use sui_types::{parse_sui_struct_tag, SUI_FRAMEWORK_ADDRESS}; +use sui_types::{ + balance::Supply, + base_types::{MoveObjectType, ObjectID, SequenceNumber}, + coin::{TreasuryCap, COIN_MODULE_NAME}, + crypto::deterministic_random_account_key, + digests::{ObjectDigest, TransactionDigest}, + gas_coin::GAS, + id::UID, + object::{Data, MoveObject, ObjectInner, Owner, OBJECT_START_VERSION}, + parse_sui_struct_tag, + quorum_driver_types::ExecuteTransactionRequestType, + timelock::timelock::TimeLock, + utils::to_sender_signed_transaction, + SUI_FRAMEWORK_ADDRESS, +}; use test_cluster::TestClusterBuilder; use tokio::time::sleep; @@ -83,13 +87,15 @@ async fn test_get_package_with_display_should_not_fail() -> Result<(), anyhow::E .await; assert!(response.is_ok()); let response: SuiObjectResponse = response?; - assert!(response - .into_object() - .unwrap() - .display - .unwrap() - .data - .is_none()); + assert!( + response + .into_object() + .unwrap() + .display + .unwrap() + .data + .is_none() + ); Ok(()) } @@ -962,7 +968,8 @@ async fn test_staking_multiple_coins() -> Result<(), anyhow::Error> { Ok(()) } -// Need to be enable when the Stardust package is integrated in the system packages list. +// Need to be enable when the Stardust package is integrated in the system +// packages list. #[ignore] #[sim_test] async fn test_timelocked_staking() -> Result<(), anyhow::Error> { diff --git a/crates/sui-json-rpc-tests/tests/subscription_tests.rs b/crates/sui-json-rpc-tests/tests/subscription_tests.rs index adfb52e2c9d..9980946c0d6 100644 --- a/crates/sui-json-rpc-tests/tests/subscription_tests.rs +++ b/crates/sui-json-rpc-tests/tests/subscription_tests.rs @@ -3,16 +3,17 @@ use std::time::Duration; -use jsonrpsee::core::client::{Subscription, SubscriptionClientT}; -use jsonrpsee::rpc_params; -use sui_test_transaction_builder::{create_devnet_nft, publish_nfts_package}; -use tokio::time::timeout; - +use jsonrpsee::{ + core::client::{Subscription, SubscriptionClientT}, + rpc_params, +}; use sui_core::test_utils::wait_for_tx; use sui_json_rpc_types::{ SuiTransactionBlockEffects, SuiTransactionBlockEffectsAPI, TransactionFilter, }; +use sui_test_transaction_builder::{create_devnet_nft, publish_nfts_package}; use test_cluster::TestClusterBuilder; +use tokio::time::timeout; #[tokio::test] async fn test_subscribe_transaction() -> Result<(), anyhow::Error> { diff --git a/crates/sui-json-rpc-tests/tests/transaction_tests.rs b/crates/sui-json-rpc-tests/tests/transaction_tests.rs index b8f980f5238..99c3a2594c0 100644 --- a/crates/sui-json-rpc-tests/tests/transaction_tests.rs +++ b/crates/sui-json-rpc-tests/tests/transaction_tests.rs @@ -1,19 +1,18 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use sui_json_rpc_types::SuiTransactionBlockResponseQuery; -use sui_json_rpc_types::TransactionFilter; +use sui_json_rpc_api::{IndexerApiClient, TransactionBuilderClient, WriteApiClient}; use sui_json_rpc_types::{ SuiObjectDataOptions, SuiObjectResponseQuery, SuiTransactionBlockResponse, - SuiTransactionBlockResponseOptions, TransactionBlockBytes, + SuiTransactionBlockResponseOptions, SuiTransactionBlockResponseQuery, TransactionBlockBytes, + TransactionFilter, }; use sui_macros::sim_test; -use sui_types::quorum_driver_types::ExecuteTransactionRequestType; -use sui_types::transaction::SenderSignedData; +use sui_types::{ + quorum_driver_types::ExecuteTransactionRequestType, transaction::SenderSignedData, +}; use test_cluster::TestClusterBuilder; -use sui_json_rpc_api::{IndexerApiClient, TransactionBuilderClient, WriteApiClient}; - #[sim_test] async fn test_get_transaction_block() -> Result<(), anyhow::Error> { let cluster = TestClusterBuilder::new().build().await; @@ -67,19 +66,19 @@ async fn test_get_transaction_block() -> Result<(), anyhow::Error> { tx_responses.push(response); } - // TODO(chris): re-enable after rewriting get_transactions_in_range_deprecated with query_transactions - // test get_transaction_batch + // TODO(chris): re-enable after rewriting get_transactions_in_range_deprecated + // with query_transactions test get_transaction_batch // let batch_responses: Vec = http_client - // .multi_get_transaction_blocks(tx, Some(SuiTransactionBlockResponseOptions::new())) - // .await?; + // .multi_get_transaction_blocks(tx, + // Some(SuiTransactionBlockResponseOptions::new())) .await?; // assert_eq!(5, batch_responses.len()); // for r in batch_responses.iter().skip(1) { // assert!(tx_responses // .iter() - // .any(|resp| matches!(resp, SuiTransactionBlockResponse {digest, ..} if *digest == r.digest))) - // } + // .any(|resp| matches!(resp, SuiTransactionBlockResponse {digest, ..} + // if *digest == r.digest))) } // // test get_transaction // for tx_digest in tx { diff --git a/crates/sui-json-rpc-types/src/balance_changes.rs b/crates/sui-json-rpc-types/src/balance_changes.rs index 83b659f75b0..23c3c07e8b0 100644 --- a/crates/sui-json-rpc-types/src/balance_changes.rs +++ b/crates/sui-json-rpc-types/src/balance_changes.rs @@ -1,13 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::fmt::{Display, Formatter, Result}; + use move_core_types::language_storage::TypeTag; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use serde_with::serde_as; -use serde_with::DisplayFromStr; -use std::fmt::{Display, Formatter, Result}; -use sui_types::object::Owner; -use sui_types::sui_serde::SuiTypeTag; +use serde_with::{serde_as, DisplayFromStr}; +use sui_types::{object::Owner, sui_serde::SuiTypeTag}; #[serde_as] #[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, PartialEq, Eq)] @@ -19,7 +18,8 @@ pub struct BalanceChange { #[serde_as(as = "SuiTypeTag")] pub coin_type: TypeTag, /// The amount indicate the balance value changes, - /// negative amount means spending coin value and positive means receiving coin value. + /// negative amount means spending coin value and positive means receiving + /// coin value. #[schemars(with = "String")] #[serde_as(as = "DisplayFromStr")] pub amount: i128, diff --git a/crates/sui-json-rpc-types/src/displays/transaction_displays.rs b/crates/sui-json-rpc-types/src/displays/transaction_displays.rs index ad37fd194e1..478a3de9072 100644 --- a/crates/sui-json-rpc-types/src/displays/transaction_displays.rs +++ b/crates/sui-json-rpc-types/src/displays/transaction_displays.rs @@ -1,19 +1,19 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::displays::Pretty; use std::fmt::{Display, Formatter}; -use crate::{ - SuiArgument, SuiCallArg, SuiCommand, SuiObjectArg, SuiProgrammableMoveCall, - SuiProgrammableTransactionBlock, -}; use sui_types::transaction::write_sep; use tabled::{ builder::Builder as TableBuilder, settings::{style::HorizontalLine, Panel as TablePanel, Style as TableStyle}, }; +use crate::{ + displays::Pretty, SuiArgument, SuiCallArg, SuiCommand, SuiObjectArg, SuiProgrammableMoveCall, + SuiProgrammableTransactionBlock, +}; + impl<'a> Display for Pretty<'a, SuiProgrammableTransactionBlock> { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let Pretty(ptb) = self; diff --git a/crates/sui-json-rpc-types/src/lib.rs b/crates/sui-json-rpc-types/src/lib.rs index 8d1818dd7b3..f8469ed62c2 100644 --- a/crates/sui-json-rpc-types/src/lib.rs +++ b/crates/sui-json-rpc-types/src/lib.rs @@ -1,11 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; - pub use balance_changes::*; pub use object_changes::*; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; pub use sui_checkpoint::*; pub use sui_coin::*; pub use sui_event::*; @@ -15,8 +14,7 @@ pub use sui_move::*; pub use sui_object::*; pub use sui_protocol::*; pub use sui_transaction::*; -use sui_types::base_types::ObjectID; -use sui_types::dynamic_field::DynamicFieldInfo; +use sui_types::{base_types::ObjectID, dynamic_field::DynamicFieldInfo}; #[cfg(test)] #[path = "unit_tests/rpc_types_tests.rs"] @@ -37,8 +35,8 @@ mod sui_transaction; pub type DynamicFieldPage = Page; /// `next_cursor` points to the last item in the page; -/// Reading with `next_cursor` will start from the next item after `next_cursor` if -/// `next_cursor` is `Some`, otherwise it will start from the first item. +/// Reading with `next_cursor` will start from the next item after `next_cursor` +/// if `next_cursor` is `Some`, otherwise it will start from the first item. #[derive(Clone, Debug, JsonSchema, Serialize, Deserialize, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct Page { diff --git a/crates/sui-json-rpc-types/src/object_changes.rs b/crates/sui-json-rpc-types/src/object_changes.rs index 34682364632..895e9c34fef 100644 --- a/crates/sui-json-rpc-types/src/object_changes.rs +++ b/crates/sui-json-rpc-types/src/object_changes.rs @@ -1,17 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::fmt::{Display, Formatter, Result}; + use move_core_types::language_storage::StructTag; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use std::fmt::{Display, Formatter, Result}; -use sui_types::base_types::{ObjectDigest, ObjectID, ObjectRef, SequenceNumber, SuiAddress}; -use sui_types::object::Owner; -use sui_types::sui_serde::SequenceNumber as AsSequenceNumber; -use sui_types::sui_serde::SuiStructTag; +use sui_types::{ + base_types::{ObjectDigest, ObjectID, ObjectRef, SequenceNumber, SuiAddress}, + object::Owner, + sui_serde::{SequenceNumber as AsSequenceNumber, SuiStructTag}, +}; -/// ObjectChange are derived from the object mutations in the TransactionEffect to provide richer object information. +/// ObjectChange are derived from the object mutations in the TransactionEffect +/// to provide richer object information. #[serde_as] #[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, PartialEq, Eq)] #[serde(rename_all = "camelCase", tag = "type")] @@ -197,7 +200,12 @@ impl Display for ObjectChange { write!( f, " ┌──\n │ ObjectID: {}\n │ Sender: {} \n │ Recipient: {}\n │ ObjectType: {} \n │ Version: {}\n │ Digest: {}\n └──", - object_id, sender, recipient, object_type, u64::from(*version), digest + object_id, + sender, + recipient, + object_type, + u64::from(*version), + digest ) } ObjectChange::Mutated { @@ -212,7 +220,12 @@ impl Display for ObjectChange { write!( f, " ┌──\n │ ObjectID: {}\n │ Sender: {} \n │ Owner: {}\n │ ObjectType: {} \n │ Version: {}\n │ Digest: {}\n └──", - object_id, sender, owner, object_type, u64::from(*version), digest + object_id, + sender, + owner, + object_type, + u64::from(*version), + digest ) } ObjectChange::Deleted { @@ -224,7 +237,10 @@ impl Display for ObjectChange { write!( f, " ┌──\n │ ObjectID: {}\n │ Sender: {} \n │ ObjectType: {} \n │ Version: {}\n └──", - object_id, sender, object_type, u64::from(*version) + object_id, + sender, + object_type, + u64::from(*version) ) } ObjectChange::Wrapped { @@ -236,7 +252,10 @@ impl Display for ObjectChange { write!( f, " ┌──\n │ ObjectID: {}\n │ Sender: {} \n │ ObjectType: {} \n │ Version: {}\n └──", - object_id, sender, object_type, u64::from(*version) + object_id, + sender, + object_type, + u64::from(*version) ) } ObjectChange::Created { @@ -250,7 +269,12 @@ impl Display for ObjectChange { write!( f, " ┌──\n │ ObjectID: {}\n │ Sender: {} \n │ Owner: {}\n │ ObjectType: {} \n │ Version: {}\n │ Digest: {}\n └──", - object_id, sender, owner, object_type, u64::from(*version), digest + object_id, + sender, + owner, + object_type, + u64::from(*version), + digest ) } } diff --git a/crates/sui-json-rpc-types/src/sui_checkpoint.rs b/crates/sui-json-rpc-types/src/sui_checkpoint.rs index 673a2516ce1..88f49f1984d 100644 --- a/crates/sui-json-rpc-types/src/sui_checkpoint.rs +++ b/crates/sui-json-rpc-types/src/sui_checkpoint.rs @@ -1,22 +1,25 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::Page; use fastcrypto::encoding::Base64; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use sui_types::base_types::TransactionDigest; -use sui_types::committee::EpochId; -use sui_types::crypto::AggregateAuthoritySignature; -use sui_types::digests::CheckpointDigest; -use sui_types::gas::GasCostSummary; -use sui_types::message_envelope::Message; -use sui_types::messages_checkpoint::{ - CheckpointCommitment, CheckpointContents, CheckpointSequenceNumber, CheckpointSummary, - CheckpointTimestamp, EndOfEpochData, +use sui_types::{ + base_types::TransactionDigest, + committee::EpochId, + crypto::AggregateAuthoritySignature, + digests::CheckpointDigest, + gas::GasCostSummary, + message_envelope::Message, + messages_checkpoint::{ + CheckpointCommitment, CheckpointContents, CheckpointSequenceNumber, CheckpointSummary, + CheckpointTimestamp, EndOfEpochData, + }, + sui_serde::BigInt, }; -use sui_types::sui_serde::BigInt; + +use crate::Page; pub type CheckpointPage = Page>; #[serde_as] @@ -33,20 +36,21 @@ pub struct Checkpoint { pub sequence_number: CheckpointSequenceNumber, /// Checkpoint digest pub digest: CheckpointDigest, - /// Total number of transactions committed since genesis, including those in this - /// checkpoint. + /// Total number of transactions committed since genesis, including those in + /// this checkpoint. #[schemars(with = "BigInt")] #[serde_as(as = "BigInt")] pub network_total_transactions: u64, /// Digest of the previous checkpoint #[serde(skip_serializing_if = "Option::is_none")] pub previous_digest: Option, - /// The running total gas costs of all transactions included in the current epoch so far - /// until this checkpoint. + /// The running total gas costs of all transactions included in the current + /// epoch so far until this checkpoint. pub epoch_rolling_gas_cost_summary: GasCostSummary, /// Timestamp of the checkpoint - number of milliseconds from the Unix epoch - /// Checkpoint timestamps are monotonic, but not strongly monotonic - subsequent - /// checkpoints can have same timestamp if they originate from the same underlining consensus commit + /// Checkpoint timestamps are monotonic, but not strongly monotonic - + /// subsequent checkpoints can have same timestamp if they originate + /// from the same underlining consensus commit #[schemars(with = "BigInt")] #[serde_as(as = "BigInt")] pub timestamp_ms: CheckpointTimestamp, diff --git a/crates/sui-json-rpc-types/src/sui_coin.rs b/crates/sui-json-rpc-types/src/sui_coin.rs index cd4e146be26..71b38b6b65d 100644 --- a/crates/sui-json-rpc-types/src/sui_coin.rs +++ b/crates/sui-json-rpc-types/src/sui_coin.rs @@ -6,16 +6,15 @@ use std::collections::HashMap; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use serde_with::serde_as; +use sui_types::{ + base_types::{EpochId, ObjectDigest, ObjectID, ObjectRef, SequenceNumber, TransactionDigest}, + coin::CoinMetadata, + error::SuiError, + object::Object, + sui_serde::{BigInt, SequenceNumber as AsSequenceNumber}, +}; use crate::Page; -use sui_types::base_types::{ - EpochId, ObjectDigest, ObjectID, ObjectRef, SequenceNumber, TransactionDigest, -}; -use sui_types::coin::CoinMetadata; -use sui_types::error::SuiError; -use sui_types::object::Object; -use sui_types::sui_serde::BigInt; -use sui_types::sui_serde::SequenceNumber as AsSequenceNumber; pub type CoinPage = Page; diff --git a/crates/sui-json-rpc-types/src/sui_event.rs b/crates/sui-json-rpc-types/src/sui_event.rs index b3739868665..aa24a939861 100644 --- a/crates/sui-json-rpc-types/src/sui_event.rs +++ b/crates/sui-json-rpc-types/src/sui_event.rs @@ -1,30 +1,29 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +#[cfg(any(feature = "test-utils", test))] +use std::str::FromStr; +use std::{fmt, fmt::Display}; + use fastcrypto::encoding::{Base58, Base64}; -use move_core_types::annotated_value::MoveStructLayout; -use move_core_types::identifier::Identifier; -use move_core_types::language_storage::StructTag; +use json_to_table::json_to_table; +use move_core_types::{ + annotated_value::MoveStructLayout, identifier::Identifier, language_storage::StructTag, +}; use mysten_metrics::monitored_scope; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; use serde_with::{serde_as, DisplayFromStr}; -use std::fmt; -use std::fmt::Display; -use sui_types::base_types::{ObjectID, SuiAddress, TransactionDigest}; -use sui_types::error::SuiResult; -use sui_types::event::{Event, EventEnvelope, EventID}; -use sui_types::sui_serde::BigInt; - -use json_to_table::json_to_table; +use sui_types::{ + base_types::{ObjectID, SuiAddress, TransactionDigest}, + error::SuiResult, + event::{Event, EventEnvelope, EventID}, + sui_serde::{BigInt, SuiStructTag}, +}; use tabled::settings::Style as TableStyle; use crate::{type_and_fields_from_move_struct, Page}; -use sui_types::sui_serde::SuiStructTag; - -#[cfg(any(feature = "test-utils", test))] -use std::str::FromStr; pub type EventPage = Page; @@ -34,8 +33,8 @@ pub type EventPage = Page; pub struct SuiEvent { /// Sequential event ID, ie (transaction seq number, event seq number). /// 1) Serves as a unique event ID for each fullnode - /// 2) Also serves to sequence events for the purposes of pagination and querying. - /// A higher id is an event seen later by that fullnode. + /// 2) Also serves to sequence events for the purposes of pagination and + /// querying. A higher id is an event seen later by that fullnode. /// This ID is the "cursor" for event querying. pub id: EventID, /// Move package where this event was emitted. @@ -137,9 +136,16 @@ impl Display for SuiEvent { let mut table = json_to_table(parsed_json); let style = TableStyle::modern(); table.collapse().with(style); - write!(f, + write!( + f, " ┌──\n │ EventID: {}:{}\n │ PackageID: {}\n │ Transaction Module: {}\n │ Sender: {}\n │ EventType: {}\n", - self.id.tx_digest, self.id.event_seq, self.package_id, self.transaction_module, self.sender, self.type_)?; + self.id.tx_digest, + self.id.event_seq, + self.package_id, + self.transaction_module, + self.sender, + self.type_ + )?; if let Some(ts) = self.timestamp_ms { writeln!(f, " │ Timestamp: {}\n └──", ts)?; } @@ -207,7 +213,7 @@ pub enum EventFilter { Sender(SuiAddress), /// Return events emitted by the given transaction. Transaction( - ///digest of the transaction, as base-64 encoded string + /// digest of the transaction, as base-64 encoded string TransactionDigest, ), /// Return events emitted in a specified Package. @@ -232,10 +238,10 @@ pub enum EventFilter { #[serde_as(as = "SuiStructTag")] StructTag, ), - /// Return events with the given Move module name where the event struct is defined. - /// If the event is defined in Module A but emitted in a tx with Module B, - /// query `MoveEventModule` by module A returns the event. - /// Query `MoveModule` by module B returns the event too. + /// Return events with the given Move module name where the event struct is + /// defined. If the event is defined in Module A but emitted in a tx + /// with Module B, query `MoveEventModule` by module A returns the + /// event. Query `MoveModule` by module B returns the event too. MoveEventModule { /// the Move package ID package: ObjectID, diff --git a/crates/sui-json-rpc-types/src/sui_extended.rs b/crates/sui-json-rpc-types/src/sui_extended.rs index 08f049f59c7..dd474455576 100644 --- a/crates/sui-json-rpc-types/src/sui_extended.rs +++ b/crates/sui-json-rpc-types/src/sui_extended.rs @@ -6,17 +6,15 @@ use std::collections::BTreeMap; use fastcrypto::traits::ToFromBytes; use move_core_types::identifier::Identifier; use schemars::JsonSchema; -use serde::Deserialize; -use serde::Serialize; -use serde_with::serde_as; -use serde_with::DisplayFromStr; - -use sui_types::base_types::AuthorityName; -use sui_types::base_types::{EpochId, ObjectID}; -use sui_types::committee::Committee; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; -use sui_types::sui_serde::BigInt; -use sui_types::sui_system_state::sui_system_state_summary::SuiValidatorSummary; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, DisplayFromStr}; +use sui_types::{ + base_types::{AuthorityName, EpochId, ObjectID}, + committee::Committee, + messages_checkpoint::CheckpointSequenceNumber, + sui_serde::BigInt, + sui_system_state::sui_system_state_summary::SuiValidatorSummary, +}; use crate::Page; diff --git a/crates/sui-json-rpc-types/src/sui_governance.rs b/crates/sui-json-rpc-types/src/sui_governance.rs index e0f384c464a..eb05236a98c 100644 --- a/crates/sui-json-rpc-types/src/sui_governance.rs +++ b/crates/sui-json-rpc-types/src/sui_governance.rs @@ -4,9 +4,11 @@ use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use sui_types::base_types::{AuthorityName, EpochId, ObjectID, SuiAddress}; -use sui_types::committee::{Committee, StakeUnit}; -use sui_types::sui_serde::BigInt; +use sui_types::{ + base_types::{AuthorityName, EpochId, ObjectID, SuiAddress}, + committee::{Committee, StakeUnit}, + sui_serde::BigInt, +}; /// RPC representation of the [Committee] type. #[serde_as] diff --git a/crates/sui-json-rpc-types/src/sui_move.rs b/crates/sui-json-rpc-types/src/sui_move.rs index 80d6a3733a3..263ebf5258b 100644 --- a/crates/sui-json-rpc-types/src/sui_move.rs +++ b/crates/sui-json-rpc-types/src/sui_move.rs @@ -1,29 +1,37 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::BTreeMap, + fmt, + fmt::{Display, Formatter, Write}, +}; + use colored::Colorize; use itertools::Itertools; -use move_binary_format::file_format::{Ability, AbilitySet, StructTypeParameter, Visibility}; -use move_binary_format::normalized::{ - Field as NormalizedField, Function as SuiNormalizedFunction, Module as NormalizedModule, - Struct as NormalizedStruct, Type as NormalizedType, +use move_binary_format::{ + file_format::{Ability, AbilitySet, StructTypeParameter, Visibility}, + normalized::{ + Field as NormalizedField, Function as SuiNormalizedFunction, Module as NormalizedModule, + Struct as NormalizedStruct, Type as NormalizedType, + }, +}; +use move_core_types::{ + annotated_value::{MoveStruct, MoveValue}, + identifier::Identifier, + language_storage::StructTag, }; -use move_core_types::annotated_value::{MoveStruct, MoveValue}; -use move_core_types::identifier::Identifier; -use move_core_types::language_storage::StructTag; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; use serde_with::serde_as; -use std::collections::BTreeMap; -use std::fmt; -use std::fmt::{Display, Formatter, Write}; use sui_macros::EnumVariantOrder; +use sui_types::{ + base_types::{ObjectID, SuiAddress}, + sui_serde::SuiStructTag, +}; use tracing::warn; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::sui_serde::SuiStructTag; - pub type SuiMoveTypeParameterIndex = u16; #[cfg(test)] @@ -423,7 +431,8 @@ impl SuiMoveStruct { .collect::>(); json!(values) } - // We only care about values here, assuming struct type information is known at the client side. + // We only care about values here, assuming struct type information is known at the + // client side. SuiMoveStruct::WithTypes { type_: _, fields } | SuiMoveStruct::WithFields(fields) => { let fields = fields .into_iter() diff --git a/crates/sui-json-rpc-types/src/sui_object.rs b/crates/sui-json-rpc-types/src/sui_object.rs index eadcd2179b3..0c9f6485dba 100644 --- a/crates/sui-json-rpc-types/src/sui_object.rs +++ b/crates/sui-json-rpc-types/src/sui_object.rs @@ -1,39 +1,39 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::cmp::Ordering; -use std::collections::BTreeMap; -use std::fmt; -use std::fmt::Write; -use std::fmt::{Display, Formatter}; +use std::{ + cmp::Ordering, + collections::BTreeMap, + fmt, + fmt::{Display, Formatter, Write}, +}; use anyhow::anyhow; use colored::Colorize; use fastcrypto::encoding::Base64; use move_bytecode_utils::module_cache::GetModule; -use move_core_types::annotated_value::{MoveStruct, MoveStructLayout}; -use move_core_types::identifier::Identifier; -use move_core_types::language_storage::StructTag; +use move_core_types::{ + annotated_value::{MoveStruct, MoveStructLayout}, + identifier::Identifier, + language_storage::StructTag, +}; use schemars::JsonSchema; -use serde::Deserialize; -use serde::Serialize; +use serde::{Deserialize, Serialize}; use serde_json::Value; -use serde_with::serde_as; -use serde_with::DisplayFromStr; - +use serde_with::{serde_as, DisplayFromStr}; use sui_protocol_config::ProtocolConfig; -use sui_types::base_types::{ - ObjectDigest, ObjectID, ObjectInfo, ObjectRef, ObjectType, SequenceNumber, SuiAddress, - TransactionDigest, +use sui_types::{ + base_types::{ + ObjectDigest, ObjectID, ObjectInfo, ObjectRef, ObjectType, SequenceNumber, SuiAddress, + TransactionDigest, + }, + error::{ExecutionError, SuiObjectResponseError, UserInputError, UserInputResult}, + gas_coin::GasCoin, + messages_checkpoint::CheckpointSequenceNumber, + move_package::{MovePackage, TypeOrigin, UpgradeInfo}, + object::{Data, MoveObject, Object, ObjectInner, ObjectRead, Owner}, + sui_serde::{BigInt, SequenceNumber as AsSequenceNumber, SuiStructTag}, }; -use sui_types::error::{ExecutionError, SuiObjectResponseError, UserInputError, UserInputResult}; -use sui_types::gas_coin::GasCoin; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; -use sui_types::move_package::{MovePackage, TypeOrigin, UpgradeInfo}; -use sui_types::object::{Data, MoveObject, Object, ObjectInner, ObjectRead, Owner}; -use sui_types::sui_serde::BigInt; -use sui_types::sui_serde::SequenceNumber as AsSequenceNumber; -use sui_types::sui_serde::SuiStructTag; use crate::{Page, SuiMoveStruct, SuiMoveValue}; @@ -121,7 +121,9 @@ impl SuiObjectResponse { digest: _, }), ) => Ok(*object_id), - _ => Err(anyhow!("Could not get object_id, something went wrong with SuiObjectResponse construction.")), + _ => Err(anyhow!( + "Could not get object_id, something went wrong with SuiObjectResponse construction." + )), } } @@ -176,17 +178,20 @@ pub struct SuiObjectData { pub version: SequenceNumber, /// Base64 string representing the object digest pub digest: ObjectDigest, - /// The type of the object. Default to be None unless SuiObjectDataOptions.showType is set to true + /// The type of the object. Default to be None unless + /// SuiObjectDataOptions.showType is set to true #[schemars(with = "Option")] #[serde_as(as = "Option")] #[serde(rename = "type", skip_serializing_if = "Option::is_none")] pub type_: Option, // Default to be None because otherwise it will be repeated for the getOwnedObjects endpoint - /// The owner of this object. Default to be None unless SuiObjectDataOptions.showOwner is set to true + /// The owner of this object. Default to be None unless + /// SuiObjectDataOptions.showOwner is set to true #[serde(skip_serializing_if = "Option::is_none")] pub owner: Option, - /// The digest of the transaction that created or last mutated this object. Default to be None unless - /// SuiObjectDataOptions.showPreviousTransaction is set to true + /// The digest of the transaction that created or last mutated this object. + /// Default to be None unless SuiObjectDataOptions. + /// showPreviousTransaction is set to true #[serde(skip_serializing_if = "Option::is_none")] pub previous_transaction: Option, /// The amount of SUI we would rebate if this object gets deleted. @@ -196,15 +201,17 @@ pub struct SuiObjectData { #[serde_as(as = "Option>")] #[serde(skip_serializing_if = "Option::is_none")] pub storage_rebate: Option, - /// The Display metadata for frontend UI rendering, default to be None unless SuiObjectDataOptions.showContent is set to true - /// This can also be None if the struct type does not have Display defined - /// See more details in + /// The Display metadata for frontend UI rendering, default to be None + /// unless SuiObjectDataOptions.showContent is set to true This can also + /// be None if the struct type does not have Display defined See more details in #[serde(skip_serializing_if = "Option::is_none")] pub display: Option, - /// Move object content or package content, default to be None unless SuiObjectDataOptions.showContent is set to true + /// Move object content or package content, default to be None unless + /// SuiObjectDataOptions.showContent is set to true #[serde(skip_serializing_if = "Option::is_none")] pub content: Option, - /// Move object content or package content in BCS, default to be None unless SuiObjectDataOptions.showBcs is set to true + /// Move object content or package content in BCS, default to be None unless + /// SuiObjectDataOptions.showBcs is set to true #[serde(skip_serializing_if = "Option::is_none")] pub bcs: Option, } @@ -328,12 +335,14 @@ pub struct SuiObjectDataOptions { pub show_type: bool, /// Whether to show the owner of the object. Default to be False pub show_owner: bool, - /// Whether to show the previous transaction digest of the object. Default to be False + /// Whether to show the previous transaction digest of the object. Default + /// to be False pub show_previous_transaction: bool, - /// Whether to show the Display metadata of the object for frontend rendering. Default to be False + /// Whether to show the Display metadata of the object for frontend + /// rendering. Default to be False pub show_display: bool, - /// Whether to show the content(i.e., package content or Move struct content) of the object. - /// Default to be False + /// Whether to show the content(i.e., package content or Move struct + /// content) of the object. Default to be False pub show_content: bool, /// Whether to show the content in BCS format. Default to be False pub show_bcs: bool, @@ -590,7 +599,8 @@ impl SuiObjectResponse { } else if let Some(error) = &self.error { Err(error.clone()) } else { - // We really shouldn't reach this code block since either data, or error field should always be filled. + // We really shouldn't reach this code block since either data, or error field + // should always be filled. Err(SuiObjectResponseError::Unknown) } } @@ -689,7 +699,7 @@ pub trait SuiData: Sized { type ObjectType; type PackageType; fn try_from_object(object: MoveObject, layout: MoveStructLayout) - -> Result; + -> Result; fn try_from_package(package: MovePackage) -> Result; fn try_as_move(&self) -> Option<&Self::ObjectType>; fn try_into_move(self) -> Option; @@ -851,7 +861,7 @@ impl SuiParsedData { pub trait SuiMoveObject: Sized { fn try_from_layout(object: MoveObject, layout: MoveStructLayout) - -> Result; + -> Result; fn try_from(o: MoveObject, resolver: &impl GetModule) -> Result { let layout = o.get_layout(resolver)?; @@ -1223,7 +1233,8 @@ impl SuiObjectDataFilter { pub struct SuiObjectResponseQuery { /// If None, no filter will be applied pub filter: Option, - /// config which fields to include in the response, by default only digest is included + /// config which fields to include in the response, by default only digest + /// is included pub options: Option, } diff --git a/crates/sui-json-rpc-types/src/sui_protocol.rs b/crates/sui-json-rpc-types/src/sui_protocol.rs index 9a186187660..934d072f338 100644 --- a/crates/sui-json-rpc-types/src/sui_protocol.rs +++ b/crates/sui-json-rpc-types/src/sui_protocol.rs @@ -5,11 +5,9 @@ use std::collections::BTreeMap; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use serde_with::serde_as; -use serde_with::DisplayFromStr; +use serde_with::{serde_as, DisplayFromStr}; use sui_protocol_config::{ProtocolConfig, ProtocolConfigValue, ProtocolVersion}; -use sui_types::sui_serde::Readable; -use sui_types::sui_serde::{AsProtocolVersion, BigInt}; +use sui_types::sui_serde::{AsProtocolVersion, BigInt, Readable}; #[serde_as] #[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, PartialEq)] diff --git a/crates/sui-json-rpc-types/src/sui_transaction.rs b/crates/sui-json-rpc-types/src/sui_transaction.rs index 47ede5128ed..a90857e6b97 100644 --- a/crates/sui-json-rpc-types/src/sui_transaction.rs +++ b/crates/sui-json-rpc-types/src/sui_transaction.rs @@ -1,57 +1,57 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::balance_changes::BalanceChange; -use crate::object_changes::ObjectChange; -use crate::sui_transaction::GenericSignature::Signature; -use crate::{Filter, Page, SuiEvent, SuiObjectRef}; +use std::fmt::{self, Display, Formatter, Write}; + use enum_dispatch::enum_dispatch; use fastcrypto::encoding::Base64; -use move_binary_format::access::ModuleAccess; -use move_binary_format::binary_views::BinaryIndexedView; -use move_binary_format::CompiledModule; +use move_binary_format::{access::ModuleAccess, binary_views::BinaryIndexedView, CompiledModule}; use move_bytecode_utils::module_cache::GetModule; -use move_core_types::annotated_value::MoveTypeLayout; -use move_core_types::identifier::IdentStr; -use move_core_types::language_storage::{ModuleId, StructTag, TypeTag}; +use move_core_types::{ + annotated_value::MoveTypeLayout, + identifier::IdentStr, + language_storage::{ModuleId, StructTag, TypeTag}, +}; use mysten_metrics::monitored_scope; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use std::fmt::{self, Display, Formatter, Write}; use sui_json::{primitive_type, SuiJsonValue}; -use sui_types::authenticator_state::ActiveJwk; -use sui_types::base_types::{ - EpochId, ObjectID, ObjectRef, SequenceNumber, SuiAddress, TransactionDigest, -}; -use sui_types::crypto::SuiSignature; -use sui_types::digests::{ConsensusCommitDigest, ObjectDigest, TransactionEventsDigest}; -use sui_types::effects::{TransactionEffects, TransactionEffectsAPI, TransactionEvents}; -use sui_types::error::{ExecutionError, SuiError, SuiResult}; -use sui_types::execution_status::ExecutionStatus; -use sui_types::gas::GasCostSummary; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; -use sui_types::object::{MoveObject, Owner}; -use sui_types::parse_sui_type_tag; -use sui_types::quorum_driver_types::ExecuteTransactionRequestType; -use sui_types::signature::GenericSignature; -use sui_types::storage::{DeleteKind, WriteKind}; -use sui_types::sui_serde::Readable; -use sui_types::sui_serde::{ - BigInt, SequenceNumber as AsSequenceNumber, SuiTypeTag as AsSuiTypeTag, -}; -use sui_types::transaction::{ - Argument, CallArg, ChangeEpoch, Command, EndOfEpochTransactionKind, GenesisObject, - InputObjectKind, ObjectArg, ProgrammableMoveCall, ProgrammableTransaction, SenderSignedData, - TransactionData, TransactionDataAPI, TransactionKind, VersionedProtocolMessage, +use sui_types::{ + authenticator_state::ActiveJwk, + base_types::{EpochId, ObjectID, ObjectRef, SequenceNumber, SuiAddress, TransactionDigest}, + crypto::SuiSignature, + digests::{ConsensusCommitDigest, ObjectDigest, TransactionEventsDigest}, + effects::{TransactionEffects, TransactionEffectsAPI, TransactionEvents}, + error::{ExecutionError, SuiError, SuiResult}, + execution_status::ExecutionStatus, + gas::GasCostSummary, + messages_checkpoint::CheckpointSequenceNumber, + object::{MoveObject, Owner}, + parse_sui_type_tag, + quorum_driver_types::ExecuteTransactionRequestType, + signature::GenericSignature, + storage::{DeleteKind, WriteKind}, + sui_serde::{BigInt, Readable, SequenceNumber as AsSequenceNumber, SuiTypeTag as AsSuiTypeTag}, + transaction::{ + Argument, CallArg, ChangeEpoch, Command, EndOfEpochTransactionKind, GenesisObject, + InputObjectKind, ObjectArg, ProgrammableMoveCall, ProgrammableTransaction, + SenderSignedData, TransactionData, TransactionDataAPI, TransactionKind, + VersionedProtocolMessage, + }, + type_resolver::LayoutResolver, + SUI_FRAMEWORK_ADDRESS, }; -use sui_types::type_resolver::LayoutResolver; -use sui_types::SUI_FRAMEWORK_ADDRESS; use tabled::{ builder::Builder as TableBuilder, settings::{style::HorizontalLine, Panel as TablePanel, Style as TableStyle}, }; +use crate::{ + balance_changes::BalanceChange, object_changes::ObjectChange, + sui_transaction::GenericSignature::Signature, Filter, Page, SuiEvent, SuiObjectRef, +}; + // similar to EpochId of sui-types but BigInt pub type SuiEpochId = BigInt; @@ -64,7 +64,8 @@ pub type SuiEpochId = BigInt; pub struct SuiTransactionBlockResponseQuery { /// If None, no filter will be applied pub filter: Option, - /// config which fields to include in the response, by default only digest is included + /// config which fields to include in the response, by default only digest + /// is included pub options: Option, } @@ -166,7 +167,8 @@ impl SuiTransactionBlockResponseOptions { /// default to return `WaitForEffectsCert` unless some options require /// local execution pub fn default_execution_request_type(&self) -> ExecuteTransactionRequestType { - // if people want effects or events, they typically want to wait for local execution + // if people want effects or events, they typically want to wait for local + // execution if self.require_effects() { ExecuteTransactionRequestType::WaitForLocalExecution } else { @@ -223,8 +225,9 @@ pub struct SuiTransactionBlockResponse { pub timestamp_ms: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub confirmed_local_execution: Option, - /// The checkpoint number when this transaction was included and hence finalized. - /// This is only returned in the read api, not in the transaction execution api. + /// The checkpoint number when this transaction was included and hence + /// finalized. This is only returned in the read api, not in the + /// transaction execution api. #[schemars(with = "Option>")] #[serde_as(as = "Option>")] #[serde(skip_serializing_if = "Option::is_none")] @@ -382,13 +385,14 @@ pub fn get_new_package_upgrade_cap_from_response( pub enum SuiTransactionBlockKind { /// A system transaction that will update epoch information on-chain. ChangeEpoch(SuiChangeEpoch), - /// A system transaction used for initializing the initial state of the chain. + /// A system transaction used for initializing the initial state of the + /// chain. Genesis(SuiGenesisTransaction), - /// A system transaction marking the start of a series of transactions scheduled as part of a - /// checkpoint + /// A system transaction marking the start of a series of transactions + /// scheduled as part of a checkpoint ConsensusCommitPrologue(SuiConsensusCommitPrologue), - /// A series of transactions where the results of one transaction can be used in future - /// transactions + /// A series of transactions where the results of one transaction can be + /// used in future transactions ProgrammableTransaction(SuiProgrammableTransactionBlock), /// A transaction which updates global authenticator state AuthenticatorStateUpdate(SuiAuthenticatorStateUpdate), @@ -637,11 +641,12 @@ pub struct SuiTransactionBlockEffectsV1 { #[serde_as(as = "BigInt")] pub executed_epoch: EpochId, pub gas_used: GasCostSummary, - /// The version that every modified (mutated or deleted) object had before it was modified by - /// this transaction. + /// The version that every modified (mutated or deleted) object had before + /// it was modified by this transaction. #[serde(default, skip_serializing_if = "Vec::is_empty")] pub modified_at_versions: Vec, - /// The object references of the shared objects used in this transaction. Empty if no shared objects were used. + /// The object references of the shared objects used in this transaction. + /// Empty if no shared objects were used. #[serde(default, skip_serializing_if = "Vec::is_empty")] pub shared_objects: Vec, /// The transaction digest @@ -653,21 +658,22 @@ pub struct SuiTransactionBlockEffectsV1 { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub mutated: Vec, /// ObjectRef and owner of objects that are unwrapped in this transaction. - /// Unwrapped objects are objects that were wrapped into other objects in the past, - /// and just got extracted out. + /// Unwrapped objects are objects that were wrapped into other objects in + /// the past, and just got extracted out. #[serde(default, skip_serializing_if = "Vec::is_empty")] pub unwrapped: Vec, /// Object Refs of objects now deleted (the old refs). #[serde(default, skip_serializing_if = "Vec::is_empty")] pub deleted: Vec, - /// Object refs of objects previously wrapped in other objects but now deleted. + /// Object refs of objects previously wrapped in other objects but now + /// deleted. #[serde(default, skip_serializing_if = "Vec::is_empty")] pub unwrapped_then_deleted: Vec, /// Object refs of objects now wrapped in other objects. #[serde(default, skip_serializing_if = "Vec::is_empty")] pub wrapped: Vec, - /// The updated gas object reference. Have a dedicated field for convenient access. - /// It's also included in mutated. + /// The updated gas object reference. Have a dedicated field for convenient + /// access. It's also included in mutated. pub gas_object: OwnedObjectRef, /// The digest of the events emitted during execution, /// can be None if the transaction does not emit any event. @@ -991,7 +997,8 @@ impl SuiTransactionBlockEvents { }) } - // TODO: this is only called from the indexer. Remove this once indexer moves to its own resolver. + // TODO: this is only called from the indexer. Remove this once indexer moves to + // its own resolver. pub fn try_from_using_module_resolver( events: TransactionEvents, tx_digest: TransactionDigest, @@ -1038,11 +1045,13 @@ impl Display for SuiTransactionBlockEvents { } // TODO: this file might not be the best place for this struct. -/// Additional rguments supplied to dev inspect beyond what is allowed in today's API. +/// Additional rguments supplied to dev inspect beyond what is allowed in +/// today's API. #[derive(Debug, Default, Clone, Serialize, Deserialize, JsonSchema)] #[serde(rename = "DevInspectArgs", rename_all = "camelCase")] pub struct DevInspectArgs { - /// The sponsor of the gas for the transaction, might be different from the sender. + /// The sponsor of the gas for the transaction, might be different from the + /// sender. pub gas_sponsor: Option, /// The gas budget for the transaction. pub gas_budget: Option>, @@ -1058,13 +1067,16 @@ pub struct DevInspectArgs { #[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] #[serde(rename = "DevInspectResults", rename_all = "camelCase")] pub struct DevInspectResults { - /// Summary of effects that likely would be generated if the transaction is actually run. - /// Note however, that not all dev-inspect transactions are actually usable as transactions so - /// it might not be possible actually generate these effects from a normal transaction. + /// Summary of effects that likely would be generated if the transaction is + /// actually run. Note however, that not all dev-inspect transactions + /// are actually usable as transactions so it might not be possible + /// actually generate these effects from a normal transaction. pub effects: SuiTransactionBlockEffects, - /// Events that likely would be generated if the transaction is actually run. + /// Events that likely would be generated if the transaction is actually + /// run. pub events: SuiTransactionBlockEvents, - /// Execution results (including return values) from executing the transactions + /// Execution results (including return values) from executing the + /// transactions #[serde(skip_serializing_if = "Option::is_none")] pub results: Option>, /// Execution error from executing the transactions @@ -1091,8 +1103,10 @@ pub struct SuiExecutionResult { } type ExecutionResult = ( - /* mutable_reference_outputs */ Vec<(Argument, Vec, TypeTag)>, - /* return_values */ Vec<(Vec, TypeTag)>, + // mutable_reference_outputs + Vec<(Argument, Vec, TypeTag)>, + // return_values + Vec<(Vec, TypeTag)>, ); impl DevInspectResults { @@ -1365,7 +1379,12 @@ impl Display for SuiTransactionBlock { " {}\n", match tx_sig { Signature(sig) => Base64::from_bytes(sig.signature_bytes()).encoded(), - _ => Base64::from_bytes(tx_sig.as_ref()).encoded(), // the signatures for multisig and zklogin are not suited to be parsed out. they should be interpreted as a whole + _ => Base64::from_bytes(tx_sig.as_ref()).encoded(), /* the signatures for + * multisig and zklogin + * are not suited to be + * parsed out. they + * should be interpreted + * as a whole */ } )]); } @@ -1535,8 +1554,9 @@ pub struct SuiProgrammableTransactionBlock { /// Input objects or primitive values pub inputs: Vec, #[serde(rename = "transactions")] - /// The transactions to be executed sequentially. A failure in any transaction will - /// result in the failure of the entire programmable transaction block. + /// The transactions to be executed sequentially. A failure in any + /// transaction will result in the failure of the entire programmable + /// transaction block. pub commands: Vec, } @@ -1646,9 +1666,9 @@ pub enum SuiCommand { /// A call to either an entry or a public Move function MoveCall(Box), /// `(Vec, address)` - /// It sends n-objects to the specified address. These objects must have store - /// (public transfer) and either the previous owner must be an address or the object must - /// be newly created. + /// It sends n-objects to the specified address. These objects must have + /// store (public transfer) and either the previous owner must be an + /// address or the object must be newly created. TransferObjects(Vec, SuiArgument), /// `(&mut Coin, Vec)` -> `Vec>` /// It splits off some amounts into a new coins with those amounts @@ -1656,14 +1676,14 @@ pub enum SuiCommand { /// `(&mut Coin, Vec>)` /// It merges n-coins into the first coin MergeCoins(SuiArgument, Vec), - /// Publishes a Move package. It takes the package bytes and a list of the package's transitive - /// dependencies to link against on-chain. + /// Publishes a Move package. It takes the package bytes and a list of the + /// package's transitive dependencies to link against on-chain. Publish(Vec), /// Upgrades a Move package Upgrade(Vec, ObjectID, SuiArgument), /// `forall T: Vec -> vector` - /// Given n-values of the same type, it constructs a vector. For non objects or an empty vector, - /// the type tag must be specified. + /// Given n-values of the same type, it constructs a vector. For non objects + /// or an empty vector, the type tag must be specified. MakeMoveVec(Option, Vec), } @@ -1751,10 +1771,12 @@ pub enum SuiArgument { /// One of the input objects or primitive values (from /// `ProgrammableTransactionBlock` inputs) Input(u16), - /// The result of another transaction (from `ProgrammableTransactionBlock` transactions) + /// The result of another transaction (from `ProgrammableTransactionBlock` + /// transactions) Result(u16), - /// Like a `Result` but it accesses a nested result. Currently, the only usage - /// of this is to access a value from a Move call with multiple return values. + /// Like a `Result` but it accesses a nested result. Currently, the only + /// usage of this is to access a value from a Move call with multiple + /// return values. NestedResult(u16, u16), } @@ -1780,8 +1802,8 @@ impl From for SuiArgument { } } -/// The transaction for calling a Move function, either an entry function or a public -/// function (which cannot return references). +/// The transaction for calling a Move function, either an entry function or a +/// public function (which cannot return references). #[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, PartialEq, Eq)] pub struct SuiProgrammableMoveCall { /// The package containing the module and function. @@ -1927,7 +1949,8 @@ pub struct MoveCallParams { #[derive(Serialize, Deserialize, JsonSchema)] #[serde(rename_all = "camelCase")] pub struct TransactionBlockBytes { - /// BCS serialized transaction data bytes without its type tag, as base-64 encoded string. + /// BCS serialized transaction data bytes without its type tag, as base-64 + /// encoded string. pub tx_bytes: Base64, /// the gas objects to be used pub gas: Vec, @@ -2070,7 +2093,8 @@ pub enum SuiObjectArg { digest: ObjectDigest, }, // A Move object that's shared. - // SharedObject::mutable controls whether caller asks for a mutable reference to shared object. + // SharedObject::mutable controls whether caller asks for a mutable reference to shared + // object. #[serde(rename_all = "camelCase")] SharedObject { object_id: ObjectID, @@ -2153,7 +2177,8 @@ pub enum TransactionFilter { }, /// Query by input object. InputObject(ObjectID), - /// Query by changed object, including created, mutated and unwrapped objects. + /// Query by changed object, including created, mutated and unwrapped + /// objects. ChangedObject(ObjectID), /// Query by sender address. FromAddress(SuiAddress), diff --git a/crates/sui-json-rpc-types/src/unit_tests/rpc_types_tests.rs b/crates/sui-json-rpc-types/src/unit_tests/rpc_types_tests.rs index 6bff1ce593a..a51264d72d6 100644 --- a/crates/sui-json-rpc-types/src/unit_tests/rpc_types_tests.rs +++ b/crates/sui-json-rpc-types/src/unit_tests/rpc_types_tests.rs @@ -4,17 +4,19 @@ use std::str::FromStr; use anyhow::anyhow; -use move_core_types::annotated_value::{MoveStruct, MoveValue}; -use move_core_types::ident_str; -use move_core_types::identifier::Identifier; -use move_core_types::language_storage::{StructTag, TypeTag}; +use move_core_types::{ + annotated_value::{MoveStruct, MoveValue}, + ident_str, + identifier::Identifier, + language_storage::{StructTag, TypeTag}, +}; use serde_json::json; - -use sui_types::base_types::{ObjectDigest, SequenceNumber}; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::gas_coin::GasCoin; -use sui_types::object::{MoveObject, Owner}; -use sui_types::{parse_sui_struct_tag, MOVE_STDLIB_ADDRESS, SUI_FRAMEWORK_ADDRESS}; +use sui_types::{ + base_types::{ObjectDigest, ObjectID, SequenceNumber, SuiAddress}, + gas_coin::GasCoin, + object::{MoveObject, Owner}, + parse_sui_struct_tag, MOVE_STDLIB_ADDRESS, SUI_FRAMEWORK_ADDRESS, +}; use crate::{ObjectChange, SuiMoveStruct, SuiMoveValue}; diff --git a/crates/sui-json-rpc/src/authority_state.rs b/crates/sui-json-rpc/src/authority_state.rs index d20295b53ed..2684b43a26c 100644 --- a/crates/sui-json-rpc/src/authority_state.rs +++ b/crates/sui-json-rpc/src/authority_state.rs @@ -1,51 +1,56 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; + use anyhow::anyhow; use arc_swap::Guard; use async_trait::async_trait; +#[cfg(test)] +use mockall::automock; use move_core_types::language_storage::TypeTag; -use std::collections::{BTreeMap, HashMap}; -use std::sync::Arc; -use sui_core::authority::authority_per_epoch_store::AuthorityPerEpochStore; -use sui_core::authority::AuthorityState; -use sui_core::execution_cache::ExecutionCacheRead; -use sui_core::subscription_handler::SubscriptionHandler; +use sui_core::{ + authority::{authority_per_epoch_store::AuthorityPerEpochStore, AuthorityState}, + execution_cache::ExecutionCacheRead, + subscription_handler::SubscriptionHandler, +}; use sui_json_rpc_types::{ Coin as SuiCoin, DevInspectResults, DryRunTransactionBlockResponse, EventFilter, SuiEvent, SuiObjectDataFilter, TransactionFilter, }; -use sui_storage::indexes::TotalBalance; -use sui_storage::key_value_store::{ - KVStoreCheckpointData, KVStoreTransactionData, TransactionKeyValueStore, - TransactionKeyValueStoreTrait, +use sui_storage::{ + indexes::TotalBalance, + key_value_store::{ + KVStoreCheckpointData, KVStoreTransactionData, TransactionKeyValueStore, + TransactionKeyValueStoreTrait, + }, }; -use sui_types::base_types::{ - MoveObjectType, ObjectID, ObjectInfo, ObjectRef, SequenceNumber, SuiAddress, +use sui_types::{ + base_types::{MoveObjectType, ObjectID, ObjectInfo, ObjectRef, SequenceNumber, SuiAddress}, + committee::{Committee, EpochId}, + digests::{ChainIdentifier, TransactionDigest, TransactionEventsDigest}, + dynamic_field::DynamicFieldInfo, + effects::TransactionEffects, + error::{SuiError, UserInputError}, + event::EventID, + governance::StakedSui, + messages_checkpoint::{ + CheckpointContents, CheckpointContentsDigest, CheckpointDigest, CheckpointSequenceNumber, + VerifiedCheckpoint, + }, + object::{Object, ObjectRead, PastObjectRead}, + storage::{BackingPackageStore, ObjectStore, WriteKind}, + sui_serde::BigInt, + sui_system_state::SuiSystemState, + timelock::timelocked_staked_sui::TimelockedStakedSui, + transaction::{Transaction, TransactionData, TransactionKind}, }; -use sui_types::committee::{Committee, EpochId}; -use sui_types::digests::{ChainIdentifier, TransactionDigest, TransactionEventsDigest}; -use sui_types::dynamic_field::DynamicFieldInfo; -use sui_types::effects::TransactionEffects; -use sui_types::error::{SuiError, UserInputError}; -use sui_types::event::EventID; -use sui_types::governance::StakedSui; -use sui_types::messages_checkpoint::{ - CheckpointContents, CheckpointContentsDigest, CheckpointDigest, CheckpointSequenceNumber, - VerifiedCheckpoint, -}; -use sui_types::object::{Object, ObjectRead, PastObjectRead}; -use sui_types::storage::{BackingPackageStore, ObjectStore, WriteKind}; -use sui_types::sui_serde::BigInt; -use sui_types::sui_system_state::SuiSystemState; -use sui_types::timelock::timelocked_staked_sui::TimelockedStakedSui; -use sui_types::transaction::{Transaction, TransactionData, TransactionKind}; use thiserror::Error; use tokio::task::JoinError; -#[cfg(test)] -use mockall::automock; - use crate::ObjectProvider; pub type StateReadResult = Result; @@ -583,8 +588,9 @@ impl StateRead for AuthorityState { } } -/// This implementation allows `S` to be a dynamically sized type (DST) that implements ObjectProvider -/// Valid as `S` is referenced only, and memory management is handled by `Arc` +/// This implementation allows `S` to be a dynamically sized type (DST) that +/// implements ObjectProvider Valid as `S` is referenced only, and memory +/// management is handled by `Arc` #[async_trait] impl ObjectProvider for Arc { type Error = StateReadError; @@ -660,9 +666,10 @@ pub enum StateReadClientError { } /// `StateReadError` is the error type for callers to work with. -/// It captures all possible errors that can occur while reading state, classifying them into two categories. -/// Unless `StateReadError` is the final error state before returning to caller, the app may still want error context. -/// This context is preserved in `Internal` and `Client` variants. +/// It captures all possible errors that can occur while reading state, +/// classifying them into two categories. Unless `StateReadError` is the final +/// error state before returning to caller, the app may still want error +/// context. This context is preserved in `Internal` and `Client` variants. #[derive(Debug, Error)] pub enum StateReadError { // sui_json_rpc::Error will do the final conversion to generic error message diff --git a/crates/sui-json-rpc/src/axum_router.rs b/crates/sui-json-rpc/src/axum_router.rs index df71d811d25..d2139e4d9dd 100644 --- a/crates/sui-json-rpc/src/axum_router.rs +++ b/crates/sui-json-rpc/src/axum_router.rs @@ -3,23 +3,27 @@ use std::sync::Arc; -use axum::extract::Json; -use axum::extract::State; +use axum::extract::{Json, State}; use futures::StreamExt; use hyper::HeaderMap; -use jsonrpsee::core::server::helpers::BoundedSubscriptions; -use jsonrpsee::core::server::helpers::MethodResponse; -use jsonrpsee::core::server::helpers::MethodSink; -use jsonrpsee::core::server::rpc_module::MethodKind; -use jsonrpsee::server::logger::{self, TransportProtocol}; -use jsonrpsee::server::RandomIntegerIdProvider; -use jsonrpsee::types::error::{ErrorCode, BATCHES_NOT_SUPPORTED_CODE, BATCHES_NOT_SUPPORTED_MSG}; -use jsonrpsee::types::{ErrorObject, Id, InvalidRequest, Params, Request}; -use jsonrpsee::{core::server::rpc_module::Methods, server::logger::Logger}; +use jsonrpsee::{ + core::server::{ + helpers::{BoundedSubscriptions, MethodResponse, MethodSink}, + rpc_module::{MethodKind, Methods}, + }, + server::{ + logger::{self, Logger, TransportProtocol}, + RandomIntegerIdProvider, + }, + types::{ + error::{ErrorCode, BATCHES_NOT_SUPPORTED_CODE, BATCHES_NOT_SUPPORTED_MSG}, + ErrorObject, Id, InvalidRequest, Params, Request, + }, +}; use serde_json::value::RawValue; +use sui_json_rpc_api::CLIENT_TARGET_API_VERSION_HEADER; use crate::routing_layer::RpcRouter; -use sui_json_rpc_api::CLIENT_TARGET_API_VERSION_HEADER; pub const MAX_RESPONSE_SIZE: u32 = 2 << 30; @@ -203,8 +207,8 @@ async fn process_request( response } -/// Figure out if this is a sufficiently complete request that we can extract an [`Id`] out of, or just plain -/// unparsable garbage. +/// Figure out if this is a sufficiently complete request that we can extract an +/// [`Id`] out of, or just plain unparsable garbage. pub fn prepare_error(data: &str) -> (Id<'_>, ErrorCode) { match serde_json::from_str::(data) { Ok(InvalidRequest { id }) => (id, ErrorCode::InvalidRequest), @@ -254,7 +258,8 @@ pub mod ws { // A WebSocket handler that echos any message it receives. // - // This one we'll be integration testing so it can be written in the regular way. + // This one we'll be integration testing so it can be written in the regular + // way. pub async fn ws_json_rpc_upgrade( ws: WebSocketUpgrade, State(service): State>, diff --git a/crates/sui-json-rpc/src/balance_changes.rs b/crates/sui-json-rpc/src/balance_changes.rs index 91b4b3011ed..51973d77bfa 100644 --- a/crates/sui-json-rpc/src/balance_changes.rs +++ b/crates/sui-json-rpc/src/balance_changes.rs @@ -1,23 +1,26 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::collections::{BTreeMap, HashMap, HashSet}; -use std::ops::Neg; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + ops::Neg, +}; use async_trait::async_trait; use move_core_types::language_storage::TypeTag; -use tokio::sync::RwLock; - use sui_json_rpc_types::BalanceChange; -use sui_types::base_types::{ObjectID, ObjectRef, SequenceNumber}; -use sui_types::coin::Coin; -use sui_types::digests::ObjectDigest; -use sui_types::effects::{TransactionEffects, TransactionEffectsAPI}; -use sui_types::execution_status::ExecutionStatus; -use sui_types::gas_coin::GAS; -use sui_types::object::{Object, Owner}; -use sui_types::storage::WriteKind; -use sui_types::transaction::InputObjectKind; +use sui_types::{ + base_types::{ObjectID, ObjectRef, SequenceNumber}, + coin::Coin, + digests::ObjectDigest, + effects::{TransactionEffects, TransactionEffectsAPI}, + execution_status::ExecutionStatus, + gas_coin::GAS, + object::{Object, Owner}, + storage::WriteKind, + transaction::InputObjectKind, +}; +use tokio::sync::RwLock; pub async fn get_balance_changes_from_effect, E>( object_provider: &P, diff --git a/crates/sui-json-rpc/src/coin_api.rs b/crates/sui-json-rpc/src/coin_api.rs index fafb06d4bfd..68b02744e6e 100644 --- a/crates/sui-json-rpc/src/coin_api.rs +++ b/crates/sui-json-rpc/src/coin_api.rs @@ -1,40 +1,37 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::collections::HashMap; -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; use async_trait::async_trait; -use cached::proc_macro::cached; -use cached::SizedCache; -use jsonrpsee::core::RpcResult; -use jsonrpsee::RpcModule; +use cached::{proc_macro::cached, SizedCache}; +use jsonrpsee::{core::RpcResult, RpcModule}; +#[cfg(test)] +use mockall::automock; use move_core_types::language_storage::{StructTag, TypeTag}; -use sui_storage::indexes::TotalBalance; -use tap::TapFallible; -use tracing::{debug, info, instrument}; - use mysten_metrics::spawn_monitored_task; use sui_core::authority::AuthorityState; use sui_json_rpc_api::{cap_page_limit, CoinReadApiOpenRpc, CoinReadApiServer, JsonRpcMetrics}; -use sui_json_rpc_types::Balance; -use sui_json_rpc_types::{CoinPage, SuiCoinMetadata}; +use sui_json_rpc_types::{Balance, CoinPage, SuiCoinMetadata}; use sui_open_rpc::Module; -use sui_storage::key_value_store::TransactionKeyValueStore; -use sui_types::balance::Supply; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::coin::{CoinMetadata, TreasuryCap}; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::gas_coin::{GAS, TOTAL_SUPPLY_MIST}; -use sui_types::object::Object; -use sui_types::parse_sui_struct_tag; - -#[cfg(test)] -use mockall::automock; +use sui_storage::{indexes::TotalBalance, key_value_store::TransactionKeyValueStore}; +use sui_types::{ + balance::Supply, + base_types::{ObjectID, SuiAddress}, + coin::{CoinMetadata, TreasuryCap}, + effects::TransactionEffectsAPI, + gas_coin::{GAS, TOTAL_SUPPLY_MIST}, + object::Object, + parse_sui_struct_tag, +}; +use tap::TapFallible; +use tracing::{debug, info, instrument}; -use crate::authority_state::StateRead; -use crate::error::{Error, RpcInterimResult, SuiRpcInputError}; -use crate::{with_tracing, SuiRpcModule}; +use crate::{ + authority_state::StateRead, + error::{Error, RpcInterimResult, SuiRpcInputError}, + with_tracing, SuiRpcModule, +}; pub fn parse_to_struct_tag(coin_type: &str) -> Result { parse_sui_struct_tag(coin_type) @@ -95,7 +92,8 @@ impl CoinReadApiServer for CoinReadApi { let cursor = match cursor { Some(c) => (coin_type_tag.to_string(), c), - // If cursor is not specified, we need to start from the beginning of the coin type, which is the minimal possible ObjectID. + // If cursor is not specified, we need to start from the beginning of the coin type, + // which is the minimal possible ObjectID. None => (coin_type_tag.to_string(), ObjectID::ZERO), }; @@ -277,8 +275,8 @@ async fn find_package_object_id( .await? } -/// CoinReadInternal trait to capture logic of interactions with AuthorityState and metrics -/// This allows us to also mock internal implementation for testing +/// CoinReadInternal trait to capture logic of interactions with AuthorityState +/// and metrics This allows us to also mock internal implementation for testing #[cfg_attr(test, automock)] #[async_trait] pub trait CoinReadInternal { @@ -398,33 +396,37 @@ impl CoinReadInternal for CoinReadInternalImpl { #[cfg(test)] mod tests { - use super::*; - use crate::authority_state::{MockStateRead, StateReadError}; use expect_test::expect; use jsonrpsee::types::ErrorObjectOwned; - use mockall::mock; - use mockall::predicate; - use move_core_types::account_address::AccountAddress; - use move_core_types::language_storage::StructTag; + use mockall::{mock, predicate}; + use move_core_types::{account_address::AccountAddress, language_storage::StructTag}; use sui_json_rpc_types::Coin; - use sui_storage::key_value_store::{ - KVStoreCheckpointData, KVStoreTransactionData, TransactionKeyValueStoreTrait, + use sui_storage::{ + key_value_store::{ + KVStoreCheckpointData, KVStoreTransactionData, TransactionKeyValueStoreTrait, + }, + key_value_store_metrics::KeyValueStoreMetrics, }; - use sui_storage::key_value_store_metrics::KeyValueStoreMetrics; - use sui_types::balance::Supply; - use sui_types::base_types::{ObjectID, SequenceNumber, SuiAddress}; - use sui_types::coin::TreasuryCap; - use sui_types::digests::{ObjectDigest, TransactionDigest, TransactionEventsDigest}; - use sui_types::effects::TransactionEffects; - use sui_types::error::{SuiError, SuiResult}; - use sui_types::gas_coin::GAS; - use sui_types::id::UID; - use sui_types::messages_checkpoint::{ - CheckpointContentsDigest, CheckpointDigest, CheckpointSequenceNumber, + use sui_types::{ + balance::Supply, + base_types::{ObjectID, SequenceNumber, SuiAddress}, + coin::TreasuryCap, + digests::{ObjectDigest, TransactionDigest, TransactionEventsDigest}, + effects::TransactionEffects, + error::{SuiError, SuiResult}, + gas_coin::GAS, + id::UID, + messages_checkpoint::{ + CheckpointContentsDigest, CheckpointDigest, CheckpointSequenceNumber, + }, + object::Object, + parse_sui_struct_tag, + utils::create_fake_transaction, + TypeTag, }; - use sui_types::object::Object; - use sui_types::utils::create_fake_transaction; - use sui_types::{parse_sui_struct_tag, TypeTag}; + + use super::*; + use crate::authority_state::{MockStateRead, StateReadError}; mock! { pub KeyValueStore {} @@ -558,10 +560,10 @@ mod tests { } mod get_coins_tests { - use super::super::*; - use super::*; use jsonrpsee::types::ErrorObjectOwned; + use super::{super::*, *}; + // Success scenarios #[tokio::test] async fn test_gas_coin_no_cursor() { @@ -728,7 +730,9 @@ mod tests { let error_object: ErrorObjectOwned = error_result.into(); let expected = expect!["-32602"]; expected.assert_eq(&error_object.code().to_string()); - let expected = expect!["Invalid struct type: 0x2::invalid::struct::tag. Got error: Expected end of token stream. Got: ::"]; + let expected = expect![ + "Invalid struct type: 0x2::invalid::struct::tag. Got error: Expected end of token stream. Got: ::" + ]; expected.assert_eq(error_object.message()); } @@ -811,8 +815,7 @@ mod tests { mod get_all_coins_tests { use sui_types::object::{MoveObject, Owner}; - use super::super::*; - use super::*; + use super::{super::*, *}; // Success scenarios #[tokio::test] @@ -933,9 +936,9 @@ mod tests { } mod get_balance_tests { - use super::super::*; - use super::*; use jsonrpsee::types::ErrorObjectOwned; + + use super::{super::*, *}; // Success scenarios #[tokio::test] async fn test_gas_coin() { @@ -1023,7 +1026,9 @@ mod tests { let error_object: ErrorObjectOwned = error_result.into(); let expected = expect!["-32602"]; expected.assert_eq(&error_object.code().to_string()); - let expected = expect!["Invalid struct type: 0x2::invalid::struct::tag. Got error: Expected end of token stream. Got: ::"]; + let expected = expect![ + "Invalid struct type: 0x2::invalid::struct::tag. Got error: Expected end of token stream. Got: ::" + ]; expected.assert_eq(error_object.message()); } @@ -1056,7 +1061,8 @@ mod tests { #[tokio::test] async fn test_get_balance_execution_error() { - // Validate that we handle and return an error message when we encounter an unexpected error + // Validate that we handle and return an error message when we encounter an + // unexpected error let owner = get_test_owner(); let coin_type = get_test_coin_type(get_test_package_id()); let mut mock_state = MockStateRead::new(); @@ -1082,10 +1088,10 @@ mod tests { } mod get_all_balances_tests { - use super::super::*; - use super::*; use jsonrpsee::types::ErrorObjectOwned; + use super::{super::*, *}; + // Success scenarios #[tokio::test] async fn test_success_scenario() { @@ -1134,7 +1140,8 @@ mod tests { locked_balance: Default::default(), }, ]; - // This is because the underlying result is a hashmap, so order is not guaranteed + // This is because the underlying result is a hashmap, so order is not + // guaranteed let mut result = response.unwrap(); for item in expected_result { if let Some(pos) = result.iter().position(|i| *i == item) { @@ -1172,11 +1179,11 @@ mod tests { } mod get_coin_metadata_tests { - use super::super::*; - use super::*; use mockall::predicate; use sui_types::id::UID; + use super::{super::*, *}; + // Success scenarios #[tokio::test] async fn test_valid_coin_metadata_object() { @@ -1278,11 +1285,11 @@ mod tests { } mod get_total_supply_tests { - use super::super::*; - use super::*; use mockall::predicate; use sui_types::id::UID; + use super::{super::*, *}; + #[tokio::test] async fn test_success_response_for_gas_coin() { let coin_type = "0x2::sui::SUI"; @@ -1349,7 +1356,9 @@ mod tests { let error_object: ErrorObjectOwned = error_result.into(); let expected = expect!["-32602"]; expected.assert_eq(&error_object.code().to_string()); - let expected = expect!["Cannot find object [0x2::coin::TreasuryCap<0xf::test_coin::TEST_COIN>] from [0x000000000000000000000000000000000000000000000000000000000000000f] package event."]; + let expected = expect![ + "Cannot find object [0x2::coin::TreasuryCap<0xf::test_coin::TEST_COIN>] from [0x000000000000000000000000000000000000000000000000000000000000000f] package event." + ]; expected.assert_eq(error_object.message()); } @@ -1391,7 +1400,9 @@ mod tests { error_object.code(), jsonrpsee::types::error::CALL_EXECUTION_FAILED_CODE ); - let expected = expect!["Failure deserializing object in the requested format: \"Unable to deserialize TreasuryCap object: remaining input\""]; + let expected = expect![ + "Failure deserializing object in the requested format: \"Unable to deserialize TreasuryCap object: remaining input\"" + ]; expected.assert_eq(error_object.message()); } } diff --git a/crates/sui-json-rpc/src/error.rs b/crates/sui-json-rpc/src/error.rs index a1b88018594..ed4e39e30cc 100644 --- a/crates/sui-json-rpc/src/error.rs +++ b/crates/sui-json-rpc/src/error.rs @@ -1,21 +1,27 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::collections::BTreeMap; + use fastcrypto::error::FastCryptoError; use hyper::header::InvalidHeaderValue; use itertools::Itertools; -use jsonrpsee::core::Error as RpcError; -use jsonrpsee::types::error::{CallError, INTERNAL_ERROR_CODE}; -use jsonrpsee::types::ErrorObject; -use std::collections::BTreeMap; +use jsonrpsee::{ + core::Error as RpcError, + types::{ + error::{CallError, INTERNAL_ERROR_CODE}, + ErrorObject, + }, +}; use sui_json_rpc_api::{TRANSACTION_EXECUTION_CLIENT_ERROR_CODE, TRANSIENT_ERROR_CODE}; -use sui_types::error::{SuiError, SuiObjectResponseError, UserInputError}; -use sui_types::quorum_driver_types::QuorumDriverError; +use sui_types::{ + error::{SuiError, SuiObjectResponseError, UserInputError}, + quorum_driver_types::QuorumDriverError, +}; use thiserror::Error; use tokio::task::JoinError; -use crate::authority_state::StateReadError; -use crate::name_service::NameServiceError; +use crate::{authority_state::StateReadError, name_service::NameServiceError}; pub type RpcInterimResult = Result; @@ -134,7 +140,8 @@ impl From for RpcError { match err { QuorumDriverError::InvalidUserSignature(err) => { let inner_error_str = match err { - // TODO(wlmyng): update SuiError display trait to render UserInputError with display + // TODO(wlmyng): update SuiError display trait to render UserInputError + // with display SuiError::UserInputError { error } => error.to_string(), _ => err.to_string(), }; @@ -168,10 +175,9 @@ impl From for RpcError { retried_tx_success, } => { let error_message = format!( - "Failed to sign transaction by a quorum of validators because of locked objects. Retried a conflicting transaction {:?}, success: {:?}", - retried_tx, - retried_tx_success - ); + "Failed to sign transaction by a quorum of validators because of locked objects. Retried a conflicting transaction {:?}, success: {:?}", + retried_tx, retried_tx_success + ); let new_map = conflicting_txes .into_iter() @@ -193,7 +199,8 @@ impl From for RpcError { QuorumDriverError::NonRecoverableTransactionError { errors } => { let new_errors: Vec = errors .into_iter() - // sort by total stake, descending, so users see the most prominent one first + // sort by total stake, descending, so users see the most prominent one + // first .sorted_by(|(_, a, _), (_, b, _)| b.cmp(a)) .filter_map(|(err, _, _)| { match &err { @@ -225,7 +232,10 @@ impl From for RpcError { ); let error_list = new_errors.join(", "); - let error_msg = format!("Transaction execution failed due to issues with transaction inputs, please review the errors and try again: {}.", error_list); + let error_msg = format!( + "Transaction execution failed due to issues with transaction inputs, please review the errors and try again: {}.", + error_list + ); let error_object = ErrorObject::owned( TRANSACTION_EXECUTION_CLIENT_ERROR_CODE, @@ -269,7 +279,9 @@ pub enum SuiRpcInputError { #[error("{0}")] GenericInvalid(String), - #[error("request_type` must set to `None` or `WaitForLocalExecution` if effects is required in the response")] + #[error( + "request_type` must set to `None` or `WaitForLocalExecution` if effects is required in the response" + )] InvalidExecuteTransactionRequestType, #[error("Unsupported protocol version requested. Min supported: {0}, max supported: {1}")] @@ -302,18 +314,16 @@ impl From for RpcError { #[cfg(test)] mod tests { - use super::*; use expect_test::expect; use jsonrpsee::types::ErrorObjectOwned; - use sui_types::base_types::AuthorityName; - use sui_types::base_types::ObjectID; - use sui_types::base_types::ObjectRef; - use sui_types::base_types::SequenceNumber; - use sui_types::committee::StakeUnit; - use sui_types::crypto::AuthorityPublicKey; - use sui_types::crypto::AuthorityPublicKeyBytes; - use sui_types::digests::ObjectDigest; - use sui_types::digests::TransactionDigest; + use sui_types::{ + base_types::{AuthorityName, ObjectID, ObjectRef, SequenceNumber}, + committee::StakeUnit, + crypto::{AuthorityPublicKey, AuthorityPublicKeyBytes}, + digests::{ObjectDigest, TransactionDigest}, + }; + + use super::*; fn test_object_ref() -> ObjectRef { ( @@ -399,7 +409,9 @@ mod tests { let error_object: ErrorObjectOwned = rpc_error.into(); let expected_code = expect!["-32002"]; expected_code.assert_eq(&error_object.code().to_string()); - let expected_message = expect!["Failed to sign transaction by a quorum of validators because of locked objects. Retried a conflicting transaction Some(TransactionDigest(11111111111111111111111111111111)), success: Some(true)"]; + let expected_message = expect![ + "Failed to sign transaction by a quorum of validators because of locked objects. Retried a conflicting transaction Some(TransactionDigest(11111111111111111111111111111111)), success: Some(true)" + ]; expected_message.assert_eq(error_object.message()); let expected_data = expect![[ r#"{"11111111111111111111111111111111":[["0x0000000000000000000000000000000000000000000000000000000000000000",0,"11111111111111111111111111111111"]]}"# @@ -440,8 +452,9 @@ mod tests { let error_object: ErrorObjectOwned = rpc_error.into(); let expected_code = expect!["-32002"]; expected_code.assert_eq(&error_object.code().to_string()); - let expected_message = - expect!["Transaction execution failed due to issues with transaction inputs, please review the errors and try again: Balance of gas object 10 is lower than the needed amount: 100., Object (0x0000000000000000000000000000000000000000000000000000000000000000, SequenceNumber(0), o#11111111111111111111111111111111) is not available for consumption, its current version: SequenceNumber(10).."]; + let expected_message = expect![ + "Transaction execution failed due to issues with transaction inputs, please review the errors and try again: Balance of gas object 10 is lower than the needed amount: 100., Object (0x0000000000000000000000000000000000000000000000000000000000000000, SequenceNumber(0), o#11111111111111111111111111111111) is not available for consumption, its current version: SequenceNumber(10).." + ]; expected_message.assert_eq(error_object.message()); } @@ -472,8 +485,9 @@ mod tests { let error_object: ErrorObjectOwned = rpc_error.into(); let expected_code = expect!["-32002"]; expected_code.assert_eq(&error_object.code().to_string()); - let expected_message = - expect!["Transaction execution failed due to issues with transaction inputs, please review the errors and try again: Could not find the referenced object 0x0000000000000000000000000000000000000000000000000000000000000000 at version None.."]; + let expected_message = expect![ + "Transaction execution failed due to issues with transaction inputs, please review the errors and try again: Could not find the referenced object 0x0000000000000000000000000000000000000000000000000000000000000000 at version None.." + ]; expected_message.assert_eq(error_object.message()); } @@ -503,7 +517,9 @@ mod tests { let error_object: ErrorObjectOwned = rpc_error.into(); let expected_code = expect!["-32050"]; expected_code.assert_eq(&error_object.code().to_string()); - let expected_message = expect!["Transaction is not processed because 10 of validators by stake are overloaded with certificates pending execution."]; + let expected_message = expect![ + "Transaction is not processed because 10 of validators by stake are overloaded with certificates pending execution." + ]; expected_message.assert_eq(error_object.message()); } } diff --git a/crates/sui-json-rpc/src/governance_api.rs b/crates/sui-json-rpc/src/governance_api.rs index 17c4b2d8369..ac525786509 100644 --- a/crates/sui-json-rpc/src/governance_api.rs +++ b/crates/sui-json-rpc/src/governance_api.rs @@ -1,43 +1,42 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::cmp::max; -use std::collections::BTreeMap; -use std::sync::Arc; +use std::{cmp::max, collections::BTreeMap, sync::Arc}; use async_trait::async_trait; -use cached::proc_macro::cached; -use cached::SizedCache; +use cached::{proc_macro::cached, SizedCache}; use itertools::Itertools; -use jsonrpsee::core::RpcResult; -use jsonrpsee::RpcModule; -use sui_types::timelock::timelocked_staked_sui::TimelockedStakedSui; -use tracing::{info, instrument}; - +use jsonrpsee::{core::RpcResult, RpcModule}; use mysten_metrics::spawn_monitored_task; use sui_core::authority::AuthorityState; use sui_json_rpc_api::{GovernanceReadApiOpenRpc, GovernanceReadApiServer, JsonRpcMetrics}; use sui_json_rpc_types::{ - DelegatedStake, DelegatedTimelockedStake, Stake, StakeStatus, TimelockedStake, + DelegatedStake, DelegatedTimelockedStake, Stake, StakeStatus, SuiCommittee, TimelockedStake, + ValidatorApy, ValidatorApys, }; -use sui_json_rpc_types::{SuiCommittee, ValidatorApy, ValidatorApys}; use sui_open_rpc::Module; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::committee::EpochId; -use sui_types::dynamic_field::get_dynamic_field_from_store; -use sui_types::error::{SuiError, UserInputError}; -use sui_types::governance::StakedSui; -use sui_types::id::ID; -use sui_types::object::{Object, ObjectRead}; -use sui_types::sui_serde::BigInt; -use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary; -use sui_types::sui_system_state::PoolTokenExchangeRate; -use sui_types::sui_system_state::SuiSystemStateTrait; -use sui_types::sui_system_state::{get_validator_from_table, SuiSystemState}; - -use crate::authority_state::StateRead; -use crate::error::{Error, RpcInterimResult, SuiRpcInputError}; -use crate::{with_tracing, ObjectProvider, SuiRpcModule}; +use sui_types::{ + base_types::{ObjectID, SuiAddress}, + committee::EpochId, + dynamic_field::get_dynamic_field_from_store, + error::{SuiError, UserInputError}, + governance::StakedSui, + id::ID, + object::{Object, ObjectRead}, + sui_serde::BigInt, + sui_system_state::{ + get_validator_from_table, sui_system_state_summary::SuiSystemStateSummary, + PoolTokenExchangeRate, SuiSystemState, SuiSystemStateTrait, + }, + timelock::timelocked_staked_sui::TimelockedStakedSui, +}; +use tracing::{info, instrument}; + +use crate::{ + authority_state::StateRead, + error::{Error, RpcInterimResult, SuiRpcInputError}, + with_tracing, ObjectProvider, SuiRpcModule, +}; #[derive(Clone)] pub struct GovernanceReadApi { @@ -167,8 +166,10 @@ impl GovernanceReadApi { let _timer = self.metrics.get_delegated_sui_latency.start_timer(); let self_clone = self.clone(); - spawn_monitored_task!(self_clone - .get_delegated_timelocked_stakes(stakes.into_iter().map(|s| (s, true)).collect())) + spawn_monitored_task!( + self_clone + .get_delegated_timelocked_stakes(stakes.into_iter().map(|s| (s, true)).collect()) + ) .await? } @@ -533,8 +534,9 @@ fn stake_status( } } -/// Cached exchange rates for validators for the given epoch, the cache size is 1, it will be cleared when the epoch changes. -/// rates are in descending order by epoch. +/// Cached exchange rates for validators for the given epoch, the cache size is +/// 1, it will be cleared when the epoch changes. rates are in descending order +/// by epoch. #[cached( type = "SizedCache>", create = "{ SizedCache::with_size(1) }", diff --git a/crates/sui-json-rpc/src/indexer_api.rs b/crates/sui-json-rpc/src/indexer_api.rs index 08ba5f8d5b0..c15908b30c9 100644 --- a/crates/sui-json-rpc/src/indexer_api.rs +++ b/crates/sui-json-rpc/src/indexer_api.rs @@ -1,6 +1,8 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::sync::Arc; + use anyhow::bail; use async_trait::async_trait; use futures::{future, Stream}; @@ -13,7 +15,6 @@ use move_bytecode_utils::layout::TypeLayoutBuilder; use move_core_types::language_storage::TypeTag; use mysten_metrics::spawn_monitored_task; use serde::Serialize; -use std::sync::Arc; use sui_core::authority::AuthorityState; use sui_json::SuiJsonValue; use sui_json_rpc_api::{ @@ -155,7 +156,8 @@ impl IndexerApiServer for IndexerApi { .get_owner_objects_with_limit(address, cursor, limit + 1, filter) .map_err(Error::from)?; - // objects here are of size (limit + 1), where the last one is the cursor for the next page + // objects here are of size (limit + 1), where the last one is the cursor for + // the next page let has_next_page = objects.len() > limit; objects.truncate(limit); let next_cursor = objects @@ -398,22 +400,24 @@ impl IndexerApiServer for IndexerApi { requests.push(self.state.get_object(&parent_record_id)); } - // Couldn't find a `multi_get_object` for this crate (looks like it uses a k,v db) - // Always fetching both parent + child at the same time (even for node subdomains), - // to avoid sequential db reads. We do this because we do not know if the requested - // domain is a node subdomain or a leaf subdomain, and we can save a trip to the db. + // Couldn't find a `multi_get_object` for this crate (looks like it uses a k,v + // db) Always fetching both parent + child at the same time (even + // for node subdomains), to avoid sequential db reads. We do this + // because we do not know if the requested domain is a node + // subdomain or a leaf subdomain, and we can save a trip to the db. let mut results = future::try_join_all(requests).await?; - // Removing without checking vector len, since it is known (== 1 or 2 depending on whether - // it is a subdomain or not). + // Removing without checking vector len, since it is known (== 1 or 2 depending + // on whether it is a subdomain or not). let Some(object) = results.remove(0) else { return Ok(None); }; let name_record = NameRecord::try_from(object)?; - // Handling SLD names & node subdomains is the same (we handle them as `node` records) - // We check their expiration, and and if not expired, return the target address. + // Handling SLD names & node subdomains is the same (we handle them as `node` + // records) We check their expiration, and and if not expired, + // return the target address. if !name_record.is_leaf_record() { return if !name_record.is_node_expired(current_timestamp_ms) { Ok(name_record.target_address) diff --git a/crates/sui-json-rpc/src/lib.rs b/crates/sui-json-rpc/src/lib.rs index 6cd43ad722f..7da266f82b4 100644 --- a/crates/sui-json-rpc/src/lib.rs +++ b/crates/sui-json-rpc/src/lib.rs @@ -1,32 +1,28 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::env; -use std::net::SocketAddr; -use std::str::FromStr; - -use hyper::header::HeaderName; -use hyper::header::HeaderValue; -use hyper::Body; -use hyper::Method; -use hyper::Request; -use jsonrpsee::RpcModule; -use prometheus::Registry; -use tokio::runtime::Handle; -use tower_http::cors::{AllowOrigin, CorsLayer}; -use tower_http::trace::TraceLayer; -use tracing::info; +use std::{env, net::SocketAddr, str::FromStr}; pub use balance_changes::*; +use hyper::{ + header::{HeaderName, HeaderValue}, + Body, Method, Request, +}; +use jsonrpsee::RpcModule; pub use object_changes::*; +use prometheus::Registry; use sui_json_rpc_api::{ CLIENT_SDK_TYPE_HEADER, CLIENT_SDK_VERSION_HEADER, CLIENT_TARGET_API_VERSION_HEADER, }; use sui_open_rpc::{Module, Project}; +use tokio::runtime::Handle; +use tower_http::{ + cors::{AllowOrigin, CorsLayer}, + trace::TraceLayer, +}; +use tracing::info; -use crate::error::Error; -use crate::metrics::MetricsLogger; -use crate::routing_layer::RpcRouter; +use crate::{error::Error, metrics::MetricsLogger, routing_layer::RpcRouter}; pub mod authority_state; pub mod axum_router; diff --git a/crates/sui-json-rpc/src/metrics.rs b/crates/sui-json-rpc/src/metrics.rs index d4d69330190..fd2889f142b 100644 --- a/crates/sui-json-rpc/src/metrics.rs +++ b/crates/sui-json-rpc/src/metrics.rs @@ -1,18 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use hyper::body::HttpBody; -use std::collections::HashSet; -use std::net::SocketAddr; +use std::{collections::HashSet, net::SocketAddr}; -use jsonrpsee::server::logger::{HttpRequest, Logger, MethodKind, TransportProtocol}; -use jsonrpsee::types::Params; +use hyper::body::HttpBody; +use jsonrpsee::{ + server::logger::{HttpRequest, Logger, MethodKind, TransportProtocol}, + types::Params, +}; use prometheus::{ register_histogram_vec_with_registry, register_int_counter_vec_with_registry, register_int_gauge_vec_with_registry, HistogramVec, IntCounterVec, IntGaugeVec, }; -use sui_json_rpc_api::TRANSIENT_ERROR_CODE; -use sui_json_rpc_api::{CLIENT_SDK_TYPE_HEADER, CLIENT_TARGET_API_VERSION_HEADER}; +use sui_json_rpc_api::{ + CLIENT_SDK_TYPE_HEADER, CLIENT_TARGET_API_VERSION_HEADER, TRANSIENT_ERROR_CODE, +}; use tokio::time::Instant; const SPAM_LABEL: &str = "SPAM"; @@ -24,7 +26,8 @@ const LATENCY_SEC_BUCKETS: &[f64] = &[ pub struct Metrics { /// Counter of requests, route is a label (ie separate timeseries per route) requests_by_route: IntCounterVec, - /// Gauge of inflight requests, route is a label (ie separate timeseries per route) + /// Gauge of inflight requests, route is a label (ie separate timeseries per + /// route) inflight_requests_by_route: IntGaugeVec, /// Request latency, route is a label req_latency_by_route: HistogramVec, diff --git a/crates/sui-json-rpc/src/move_utils.rs b/crates/sui-json-rpc/src/move_utils.rs index ff94b3fecc7..24e79b05766 100644 --- a/crates/sui-json-rpc/src/move_utils.rs +++ b/crates/sui-json-rpc/src/move_utils.rs @@ -1,12 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::authority_state::StateRead; -use crate::error::{Error, SuiRpcInputError}; -use crate::{with_tracing, SuiRpcModule}; +use std::{collections::BTreeMap, sync::Arc}; + use async_trait::async_trait; -use jsonrpsee::core::RpcResult; -use jsonrpsee::RpcModule; +use jsonrpsee::{core::RpcResult, RpcModule}; #[cfg(test)] use mockall::automock; use move_binary_format::{ @@ -14,8 +12,6 @@ use move_binary_format::{ normalized::{Module as NormalizedModule, Type}, }; use move_core_types::identifier::Identifier; -use std::collections::BTreeMap; -use std::sync::Arc; use sui_core::authority::AuthorityState; use sui_json_rpc_api::{MoveUtilsOpenRpc, MoveUtilsServer}; use sui_json_rpc_types::{ @@ -23,12 +19,20 @@ use sui_json_rpc_types::{ SuiMoveNormalizedStruct, }; use sui_open_rpc::Module; -use sui_types::base_types::ObjectID; -use sui_types::move_package::normalize_modules; -use sui_types::object::{Data, ObjectRead}; +use sui_types::{ + base_types::ObjectID, + move_package::normalize_modules, + object::{Data, ObjectRead}, +}; use tap::TapFallible; use tracing::{error, instrument, warn}; +use crate::{ + authority_state::StateRead, + error::{Error, SuiRpcInputError}, + with_tracing, SuiRpcModule, +}; + #[cfg_attr(test, automock)] #[async_trait] pub trait MoveUtilsInternalTrait { @@ -91,8 +95,8 @@ impl MoveUtilsInternalTrait for MoveUtilsInternal { ObjectRead::Exists(_obj_ref, object, _layout) => { match object.into_inner().data { Data::Package(p) => { - // we are on the read path - it's OK to use VERSION_MAX of the supported Move - // binary format + // we are on the read path - it's OK to use VERSION_MAX of the supported + // Move binary format let binary_config = BinaryConfig::with_extraneous_bytes_check(false); normalize_modules( p.serialized_module_map().values(), @@ -229,8 +233,8 @@ impl MoveUtilsServer for MoveUtils { let normalized = match object_read { ObjectRead::Exists(_obj_ref, object, _layout) => match object.into_inner().data { Data::Package(p) => { - // we are on the read path - it's OK to use VERSION_MAX of the supported Move - // binary format + // we are on the read path - it's OK to use VERSION_MAX of the supported + // Move binary format let binary_config = BinaryConfig::with_extraneous_bytes_check(false); normalize_modules(p.serialized_module_map().values(), &binary_config) .map_err(Error::from) @@ -284,10 +288,11 @@ impl MoveUtilsServer for MoveUtils { mod tests { mod get_normalized_move_module_tests { - use super::super::*; use jsonrpsee::types::ErrorObjectOwned; use move_binary_format::file_format::basic_test_module; + use super::super::*; + fn setup() -> (ObjectID, String) { (ObjectID::random(), String::from("test_module")) } diff --git a/crates/sui-json-rpc/src/name_service.rs b/crates/sui-json-rpc/src/name_service.rs index 2ef8637681c..127af11f73f 100644 --- a/crates/sui-json-rpc/src/name_service.rs +++ b/crates/sui-json-rpc/src/name_service.rs @@ -1,19 +1,18 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use move_core_types::ident_str; -use move_core_types::identifier::IdentStr; -use move_core_types::language_storage::StructTag; +use std::{fmt, marker::PhantomData, str::FromStr}; + +use move_core_types::{ident_str, identifier::IdentStr, language_storage::StructTag}; use serde::{Deserialize, Serialize}; -use std::fmt; -use std::marker::PhantomData; -use std::str::FromStr; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::collection_types::VecMap; -use sui_types::dynamic_field::Field; -use sui_types::id::{ID, UID}; -use sui_types::object::{MoveObject, Object}; -use sui_types::TypeTag; +use sui_types::{ + base_types::{ObjectID, SuiAddress}, + collection_types::VecMap, + dynamic_field::Field, + id::{ID, UID}, + object::{MoveObject, Object}, + TypeTag, +}; const NAME_SERVICE_DOMAIN_MODULE: &IdentStr = ident_str!("domain"); const NAME_SERVICE_DOMAIN_STRUCT: &IdentStr = ident_str!("Domain"); diff --git a/crates/sui-json-rpc/src/object_changes.rs b/crates/sui-json-rpc/src/object_changes.rs index c3fbdf26ad6..b6a661a2fa7 100644 --- a/crates/sui-json-rpc/src/object_changes.rs +++ b/crates/sui-json-rpc/src/object_changes.rs @@ -4,10 +4,12 @@ use std::collections::BTreeMap; use sui_json_rpc_types::ObjectChange; -use sui_types::base_types::{ObjectID, ObjectRef, SequenceNumber, SuiAddress}; -use sui_types::effects::ObjectRemoveKind; -use sui_types::object::Owner; -use sui_types::storage::WriteKind; +use sui_types::{ + base_types::{ObjectID, ObjectRef, SequenceNumber, SuiAddress}, + effects::ObjectRemoveKind, + object::Owner, + storage::WriteKind, +}; use crate::ObjectProvider; diff --git a/crates/sui-json-rpc/src/read_api.rs b/crates/sui-json-rpc/src/read_api.rs index 19e4b101569..7517c305a36 100644 --- a/crates/sui-json-rpc/src/read_api.rs +++ b/crates/sui-json-rpc/src/read_api.rs @@ -1,22 +1,19 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::collections::HashMap; -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; use anyhow::anyhow; use async_trait::async_trait; use futures::future::join_all; use indexmap::map::IndexMap; use itertools::Itertools; -use jsonrpsee::core::RpcResult; -use jsonrpsee::RpcModule; +use jsonrpsee::{core::RpcResult, RpcModule}; use move_bytecode_utils::module_cache::GetModule; -use move_core_types::annotated_value::{MoveStruct, MoveStructLayout, MoveValue}; -use move_core_types::language_storage::StructTag; -use tap::TapFallible; -use tracing::{debug, error, info, instrument, trace, warn}; - +use move_core_types::{ + annotated_value::{MoveStruct, MoveStructLayout, MoveValue}, + language_storage::StructTag, +}; use mysten_metrics::spawn_monitored_task; use sui_core::authority::AuthorityState; use sui_json_rpc_api::{ @@ -25,42 +22,44 @@ use sui_json_rpc_api::{ }; use sui_json_rpc_types::{ BalanceChange, Checkpoint, CheckpointId, CheckpointPage, DisplayFieldsResponse, EventFilter, - ObjectChange, ProtocolConfigResponse, SuiEvent, SuiGetPastObjectRequest, SuiMoveStruct, - SuiMoveValue, SuiObjectDataOptions, SuiObjectResponse, SuiPastObjectResponse, - SuiTransactionBlock, SuiTransactionBlockEvents, SuiTransactionBlockResponse, - SuiTransactionBlockResponseOptions, + ObjectChange, ProtocolConfigResponse, SuiEvent, SuiGetPastObjectRequest, SuiLoadedChildObject, + SuiLoadedChildObjectsResponse, SuiMoveStruct, SuiMoveValue, SuiObjectDataOptions, + SuiObjectResponse, SuiPastObjectResponse, SuiTransactionBlock, SuiTransactionBlockEvents, + SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, }; -use sui_json_rpc_types::{SuiLoadedChildObject, SuiLoadedChildObjectsResponse}; use sui_open_rpc::Module; use sui_protocol_config::{ProtocolConfig, ProtocolVersion}; use sui_storage::key_value_store::TransactionKeyValueStore; -use sui_types::base_types::{ObjectID, SequenceNumber, TransactionDigest}; -use sui_types::collection_types::VecMap; -use sui_types::crypto::AggregateAuthoritySignature; -use sui_types::digests::TransactionEventsDigest; -use sui_types::display::DisplayVersionUpdatedEvent; -use sui_types::effects::{TransactionEffects, TransactionEffectsAPI, TransactionEvents}; -use sui_types::error::{SuiError, SuiObjectResponseError}; -use sui_types::messages_checkpoint::{ - CheckpointContents, CheckpointContentsDigest, CheckpointSequenceNumber, CheckpointSummary, - CheckpointTimestamp, +use sui_types::{ + base_types::{ObjectID, SequenceNumber, TransactionDigest}, + collection_types::VecMap, + crypto::AggregateAuthoritySignature, + digests::TransactionEventsDigest, + display::DisplayVersionUpdatedEvent, + effects::{TransactionEffects, TransactionEffectsAPI, TransactionEvents}, + error::{SuiError, SuiObjectResponseError}, + messages_checkpoint::{ + CheckpointContents, CheckpointContentsDigest, CheckpointSequenceNumber, CheckpointSummary, + CheckpointTimestamp, + }, + object::{Object, ObjectRead, PastObjectRead}, + sui_serde::BigInt, + transaction::{Transaction, TransactionDataAPI}, }; -use sui_types::object::{Object, ObjectRead, PastObjectRead}; -use sui_types::sui_serde::BigInt; -use sui_types::transaction::Transaction; -use sui_types::transaction::TransactionDataAPI; - -use crate::authority_state::{StateRead, StateReadError, StateReadResult}; -use crate::error::{Error, RpcInterimResult, SuiRpcInputError}; -use crate::with_tracing; +use tap::TapFallible; +use tracing::{debug, error, info, instrument, trace, warn}; + use crate::{ - get_balance_changes_from_effect, get_object_changes, ObjectProviderCache, SuiRpcModule, + authority_state::{StateRead, StateReadError, StateReadResult}, + error::{Error, RpcInterimResult, SuiRpcInputError}, + get_balance_changes_from_effect, get_object_changes, with_tracing, ObjectProviderCache, + SuiRpcModule, }; const MAX_DISPLAY_NESTED_LEVEL: usize = 10; -// An implementation of the read portion of the JSON-RPC interface intended for use in -// Fullnodes. +// An implementation of the read portion of the JSON-RPC interface intended for +// use in Fullnodes. #[derive(Clone)] pub struct ReadApi { pub state: Arc, @@ -69,8 +68,8 @@ pub struct ReadApi { } // Internal data structure to make it easy to work with data returned from -// authority store and also enable code sharing between get_transaction_with_options, -// multi_get_transaction_with_options, etc. +// authority store and also enable code sharing between +// get_transaction_with_options, multi_get_transaction_with_options, etc. #[derive(Default)] struct IntermediateTransactionResponse { digest: TransactionDigest, @@ -309,7 +308,8 @@ impl ReadApi { .as_ref() .unwrap(), ) - // Safe to unwrap because checkpoint_seq is guaranteed to exist in checkpoint_to_timestamp + // Safe to unwrap because checkpoint_seq is guaranteed to exist in + // checkpoint_to_timestamp .unwrap(); } } @@ -361,7 +361,9 @@ impl ReadApi { Some(Ok(e)) => cache_entry.events = Some(e), Some(Err(e)) => cache_entry.errors.push(e.to_string()), None => { - error!("Failed to fetch events with event digest {event_digest:?} for txn {transaction_digest}"); + error!( + "Failed to fetch events with event digest {event_digest:?} for txn {transaction_digest}" + ); cache_entry.errors.push(format!( "Failed to fetch events with event digest {event_digest:?}", )) @@ -1290,7 +1292,7 @@ fn get_value_from_move_struct( return Err(Error::UnexpectedError(format!( "Unexpected move value type for field {}", var_name - )))? + )))?; } } } diff --git a/crates/sui-json-rpc/src/routing_layer.rs b/crates/sui-json-rpc/src/routing_layer.rs index f885cb3bf11..4d63b50ea31 100644 --- a/crates/sui-json-rpc/src/routing_layer.rs +++ b/crates/sui-json-rpc/src/routing_layer.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use std::collections::{HashMap, HashSet}; + use sui_open_rpc::MethodRouting; #[derive(Debug, Clone)] diff --git a/crates/sui-json-rpc/src/transaction_builder_api.rs b/crates/sui-json-rpc/src/transaction_builder_api.rs index 8de7fe5813d..654672fcd56 100644 --- a/crates/sui-json-rpc/src/transaction_builder_api.rs +++ b/crates/sui-json-rpc/src/transaction_builder_api.rs @@ -5,26 +5,23 @@ use std::sync::Arc; use async_trait::async_trait; use fastcrypto::encoding::Base64; -use jsonrpsee::core::RpcResult; -use jsonrpsee::RpcModule; +use jsonrpsee::{core::RpcResult, RpcModule}; use move_core_types::language_storage::StructTag; - use sui_core::authority::AuthorityState; use sui_json::SuiJsonValue; use sui_json_rpc_api::{TransactionBuilderOpenRpc, TransactionBuilderServer}; -use sui_json_rpc_types::{RPCTransactionRequestParams, SuiObjectDataFilter}; use sui_json_rpc_types::{ - SuiObjectDataOptions, SuiObjectResponse, SuiTransactionBlockBuilderMode, SuiTypeTag, - TransactionBlockBytes, + RPCTransactionRequestParams, SuiObjectDataFilter, SuiObjectDataOptions, SuiObjectResponse, + SuiTransactionBlockBuilderMode, SuiTypeTag, TransactionBlockBytes, }; use sui_open_rpc::Module; use sui_transaction_builder::{DataReader, TransactionBuilder}; -use sui_types::base_types::ObjectInfo; -use sui_types::base_types::{ObjectID, SuiAddress}; -use sui_types::sui_serde::BigInt; +use sui_types::{ + base_types::{ObjectID, ObjectInfo, SuiAddress}, + sui_serde::BigInt, +}; -use crate::authority_state::StateRead; -use crate::SuiRpcModule; +use crate::{authority_state::StateRead, SuiRpcModule}; pub struct TransactionBuilderApi(TransactionBuilder); diff --git a/crates/sui-json-rpc/src/transaction_execution_api.rs b/crates/sui-json-rpc/src/transaction_execution_api.rs index 9a43c8e9d77..7199b341a9e 100644 --- a/crates/sui-json-rpc/src/transaction_execution_api.rs +++ b/crates/sui-json-rpc/src/transaction_execution_api.rs @@ -1,43 +1,42 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use async_trait::async_trait; -use fastcrypto::encoding::Base64; -use fastcrypto::traits::ToFromBytes; -use jsonrpsee::core::RpcResult; -use jsonrpsee::RpcModule; - +use fastcrypto::{encoding::Base64, traits::ToFromBytes}; +use jsonrpsee::{core::RpcResult, RpcModule}; use mysten_metrics::spawn_monitored_task; use shared_crypto::intent::{AppId, Intent, IntentMessage, IntentScope, IntentVersion}; -use sui_core::authority::AuthorityState; -use sui_core::authority_client::NetworkAuthorityClient; -use sui_core::transaction_orchestrator::TransactiondOrchestrator; +use sui_core::{ + authority::AuthorityState, authority_client::NetworkAuthorityClient, + transaction_orchestrator::TransactiondOrchestrator, +}; use sui_json_rpc_api::{JsonRpcMetrics, WriteApiOpenRpc, WriteApiServer}; use sui_json_rpc_types::{ DevInspectArgs, DevInspectResults, DryRunTransactionBlockResponse, SuiTransactionBlock, SuiTransactionBlockEvents, SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, }; use sui_open_rpc::Module; -use sui_types::base_types::SuiAddress; -use sui_types::crypto::default_hash; -use sui_types::digests::TransactionDigest; -use sui_types::effects::TransactionEffectsAPI; -use sui_types::quorum_driver_types::{ - ExecuteTransactionRequest, ExecuteTransactionRequestType, ExecuteTransactionResponse, -}; -use sui_types::signature::GenericSignature; -use sui_types::sui_serde::BigInt; -use sui_types::transaction::{ - InputObjectKind, Transaction, TransactionData, TransactionDataAPI, TransactionKind, +use sui_types::{ + base_types::SuiAddress, + crypto::default_hash, + digests::TransactionDigest, + effects::TransactionEffectsAPI, + quorum_driver_types::{ + ExecuteTransactionRequest, ExecuteTransactionRequestType, ExecuteTransactionResponse, + }, + signature::GenericSignature, + sui_serde::BigInt, + transaction::{ + InputObjectKind, Transaction, TransactionData, TransactionDataAPI, TransactionKind, + }, }; use tracing::instrument; -use crate::authority_state::StateRead; -use crate::error::{Error, SuiRpcInputError}; use crate::{ + authority_state::StateRead, + error::{Error, SuiRpcInputError}, get_balance_changes_from_effect, get_object_changes, with_tracing, ObjectProviderCache, SuiRpcModule, }; diff --git a/crates/sui-json/src/lib.rs b/crates/sui-json/src/lib.rs index 3bf69b81a8a..6835d8206f9 100644 --- a/crates/sui-json/src/lib.rs +++ b/crates/sui-json/src/lib.rs @@ -1,9 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::collections::{BTreeMap, VecDeque}; -use std::fmt::{self, Debug, Formatter}; -use std::str::FromStr; +use std::{ + collections::{BTreeMap, VecDeque}, + fmt::{self, Debug, Formatter}, + str::FromStr, +}; use anyhow::{anyhow, bail}; use fastcrypto::encoding::{Encoding, Hex}; @@ -12,32 +14,31 @@ use move_binary_format::{ file_format::SignatureToken, }; use move_bytecode_utils::resolve_struct; -use move_core_types::account_address::AccountAddress; -use move_core_types::annotated_value::MoveFieldLayout; pub use move_core_types::annotated_value::MoveTypeLayout; -use move_core_types::identifier::IdentStr; -use move_core_types::u256::U256; use move_core_types::{ - annotated_value::{MoveStruct, MoveStructLayout, MoveValue}, + account_address::AccountAddress, + annotated_value::{MoveFieldLayout, MoveStruct, MoveStructLayout, MoveValue}, ident_str, - identifier::Identifier, + identifier::{IdentStr, Identifier}, language_storage::{StructTag, TypeTag}, runtime_value as R, + u256::U256, }; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use serde_json::{json, Number, Value as JsonValue}; - -use sui_types::base_types::{ - is_primitive_type_tag, ObjectID, SuiAddress, TxContext, TxContextKind, RESOLVED_ASCII_STR, - RESOLVED_STD_OPTION, RESOLVED_UTF8_STR, STD_ASCII_MODULE_NAME, STD_ASCII_STRUCT_NAME, - STD_OPTION_MODULE_NAME, STD_OPTION_STRUCT_NAME, STD_UTF8_MODULE_NAME, STD_UTF8_STRUCT_NAME, +use sui_types::{ + base_types::{ + is_primitive_type_tag, ObjectID, SuiAddress, TxContext, TxContextKind, RESOLVED_ASCII_STR, + RESOLVED_STD_OPTION, RESOLVED_UTF8_STR, STD_ASCII_MODULE_NAME, STD_ASCII_STRUCT_NAME, + STD_OPTION_MODULE_NAME, STD_OPTION_STRUCT_NAME, STD_UTF8_MODULE_NAME, STD_UTF8_STRUCT_NAME, + }, + id::{ID, RESOLVED_SUI_ID}, + move_package::MovePackage, + object::bounded_visitor::BoundedVisitor, + transfer::RESOLVED_RECEIVING_STRUCT, + MOVE_STDLIB_ADDRESS, }; -use sui_types::id::{ID, RESOLVED_SUI_ID}; -use sui_types::move_package::MovePackage; -use sui_types::object::bounded_visitor::BoundedVisitor; -use sui_types::transfer::RESOLVED_RECEIVING_STRUCT; -use sui_types::MOVE_STDLIB_ADDRESS; const HEX_PREFIX: &str = "0x"; @@ -245,7 +246,8 @@ impl SuiJsonValue { // Bool to Bool is simple (JsonValue::Bool(b), MoveTypeLayout::Bool) => R::MoveValue::Bool(*b), - // In constructor, we have already checked that the JSON number is unsigned int of at most U32 + // In constructor, we have already checked that the JSON number is unsigned int of at + // most U32 (JsonValue::Number(n), MoveTypeLayout::U8) => match n.as_u64() { Some(x) => R::MoveValue::U8(u8::try_from(x)?), None => return Err(anyhow!("{} is not a valid number. Only u8 allowed.", n)), @@ -438,7 +440,8 @@ fn move_value_to_json(move_value: &MoveValue) -> Option { return None; } } - // We only care about values here, assuming struct type information is known at the client side. + // We only care about values here, assuming struct type information is known at the + // client side. MoveStruct { fields, .. } => { let fields = fields .iter() @@ -476,7 +479,8 @@ impl FromStr for SuiJsonValue { } json!(s) } - // if serde_json fails, the failure usually cause by missing quote escapes, try parse array manually. + // if serde_json fails, the failure usually cause by missing quote escapes, try + // parse array manually. SuiJsonValue::new(serde_json::from_str(s).unwrap_or_else(|_| try_escape_array(s))) } } @@ -492,7 +496,8 @@ enum ValidJsonType { } /// Check via BFS -/// The invariant is that all types at a given level must be the same or be empty, and all must be valid +/// The invariant is that all types at a given level must be the same or be +/// empty, and all must be valid pub fn check_valid_homogeneous(val: &JsonValue) -> Result<(), SuiJsonValueError> { let mut deq: VecDeque<&JsonValue> = VecDeque::new(); deq.push_back(val); @@ -500,7 +505,8 @@ pub fn check_valid_homogeneous(val: &JsonValue) -> Result<(), SuiJsonValueError> } /// Check via BFS -/// The invariant is that all types at a given level must be the same or be empty +/// The invariant is that all types at a given level must be the same or be +/// empty fn check_valid_homogeneous_rec(curr_q: &mut VecDeque<&JsonValue>) -> Result<(), SuiJsonValueError> { if curr_q.is_empty() { // Nothing to do @@ -527,7 +533,7 @@ fn check_valid_homogeneous_rec(curr_q: &mut VecDeque<&JsonValue>) -> Result<(), return Err(SuiJsonValueError::new( v, SuiJsonValueErrorKind::ValueTypeNotAllowed, - )) + )); } }; @@ -546,11 +552,12 @@ fn check_valid_homogeneous_rec(curr_q: &mut VecDeque<&JsonValue>) -> Result<(), check_valid_homogeneous_rec(&mut next_q) } -/// Checks if a give SignatureToken represents a primitive type and, if so, returns MoveTypeLayout -/// for this type (if available). The reason we need to return both information about whether a -/// SignatureToken represents a primitive and an Option representing MoveTypeLayout is that there -/// can be signature tokens that represent primitives but that do not have corresponding -/// MoveTypeLayout (e.g., SignatureToken::StructInstantiation). +/// Checks if a give SignatureToken represents a primitive type and, if so, +/// returns MoveTypeLayout for this type (if available). The reason we need to +/// return both information about whether a SignatureToken represents a +/// primitive and an Option representing MoveTypeLayout is that there +/// can be signature tokens that represent primitives but that do not have +/// corresponding MoveTypeLayout (e.g., SignatureToken::StructInstantiation). pub fn primitive_type( view: &BinaryIndexedView, type_args: &[TypeTag], @@ -589,7 +596,8 @@ pub fn primitive_type( })), ) } else if resolved_struct == RESOLVED_UTF8_STR { - // both structs structs representing strings have one field - a vector of type u8 + // both structs structs representing strings have one field - a vector of type + // u8 ( true, Some(MoveTypeLayout::Struct(MoveStructLayout { @@ -685,9 +693,9 @@ fn resolve_object_vec_arg(idx: usize, arg: &SuiJsonValue) -> Result { - // Due to how escaping of square bracket works, we may be dealing with a JSON string - // representing a JSON array rather than with the array itself ("[0x42,0x7]" rather than - // [0x42,0x7]). + // Due to how escaping of square bracket works, we may be dealing with a JSON + // string representing a JSON array rather than with the array + // itself ("[0x42,0x7]" rather than [0x42,0x7]). let mut object_ids = vec![]; for tok in s[1..s.len() - 1].to_string().split(',') { let id = JsonValue::String(tok.to_string()); @@ -745,8 +753,9 @@ fn resolve_call_arg( } } - // in terms of non-primitives we only currently support objects and "flat" (depth == 1) vectors - // of objects (but not, for example, vectors of references) + // in terms of non-primitives we only currently support objects and "flat" + // (depth == 1) vectors of objects (but not, for example, vectors of + // references) match param { SignatureToken::Struct(_) | SignatureToken::StructInstantiation(_) @@ -781,8 +790,8 @@ fn resolve_call_arg( pub fn is_receiving_argument(view: &BinaryIndexedView, arg_type: &SignatureToken) -> bool { use SignatureToken as ST; - // Progress down into references to determine if the underlying type is a receiving - // type or not. + // Progress down into references to determine if the underlying type is a + // receiving type or not. let mut token = arg_type; while let ST::Reference(inner) | ST::MutableReference(inner) = token { token = inner; @@ -808,8 +817,9 @@ fn resolve_call_args( .collect() } -/// Resolve the JSON args of a function into the expected formats to make them usable by Move call -/// This is because we have special types which we need to specify in other formats +/// Resolve the JSON args of a function into the expected formats to make them +/// usable by Move call This is because we have special types which we need to +/// specify in other formats pub fn resolve_move_function_args( package: &MovePackage, module_ident: Identifier, diff --git a/crates/sui-json/src/tests.rs b/crates/sui-json/src/tests.rs index 906edbbd51d..9ab3ecdac7e 100644 --- a/crates/sui-json/src/tests.rs +++ b/crates/sui-json/src/tests.rs @@ -1,33 +1,35 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::path::Path; -use std::str::FromStr; +use std::{path::Path, str::FromStr}; use fastcrypto::encoding::{Encoding, Hex}; -use move_core_types::annotated_value::{MoveFieldLayout, MoveStructLayout, MoveTypeLayout}; -use move_core_types::language_storage::StructTag; -use move_core_types::u256::U256; -use move_core_types::{account_address::AccountAddress, ident_str, identifier::Identifier}; +use move_core_types::{ + account_address::AccountAddress, + annotated_value::{MoveFieldLayout, MoveStructLayout, MoveTypeLayout}, + ident_str, + identifier::Identifier, + language_storage::StructTag, + u256::U256, +}; use serde_json::{json, Value}; -use test_fuzz::runtime::num_traits::ToPrimitive; - use sui_framework::BuiltInFramework; use sui_move_build::BuildConfig; -use sui_types::base_types::{ - ObjectID, SuiAddress, TransactionDigest, STD_ASCII_MODULE_NAME, STD_ASCII_STRUCT_NAME, - STD_OPTION_MODULE_NAME, STD_OPTION_STRUCT_NAME, +use sui_types::{ + base_types::{ + ObjectID, SuiAddress, TransactionDigest, STD_ASCII_MODULE_NAME, STD_ASCII_STRUCT_NAME, + STD_OPTION_MODULE_NAME, STD_OPTION_STRUCT_NAME, + }, + dynamic_field::derive_dynamic_field_id, + gas_coin::GasCoin, + object::Object, + parse_sui_type_tag, MOVE_STDLIB_ADDRESS, }; -use sui_types::dynamic_field::derive_dynamic_field_id; -use sui_types::gas_coin::GasCoin; -use sui_types::object::Object; -use sui_types::{parse_sui_type_tag, MOVE_STDLIB_ADDRESS}; +use test_fuzz::runtime::num_traits::ToPrimitive; +use super::{check_valid_homogeneous, resolve_move_function_args, SuiJsonValue, HEX_PREFIX}; use crate::ResolvedCallArg; -use super::{check_valid_homogeneous, HEX_PREFIX}; -use super::{resolve_move_function_args, SuiJsonValue}; - // Negative test cases #[test] fn test_json_not_homogeneous() { @@ -140,46 +142,53 @@ fn test_basic_args_linter_pure_args_bad() { let bad_hex_val = "0x1234AB CD"; let checks = vec![ - // Although U256 value can be encoded as num, we enforce it must be a string - ( - Value::from(123), - MoveTypeLayout::U256, - ), - // Space not allowed - (Value::from(" 9"), MoveTypeLayout::U8), - // Hex must start with 0x - (Value::from("AB"), MoveTypeLayout::U8), - // Too large - (Value::from("123456789"), MoveTypeLayout::U8), - // Too large - (Value::from("123456789123456789123456789123456789"), MoveTypeLayout::U64), - // Too large - (Value::from("123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789"), MoveTypeLayout::U128), - // U64 value greater than 255 cannot be used as U8 - (Value::from(900u64), MoveTypeLayout::U8), - // floats cannot be used as U8 - (Value::from(0.4f32), MoveTypeLayout::U8), - // floats cannot be used as U64 - (Value::from(3.4f32), MoveTypeLayout::U64), - // Negative cannot be used as U64 - (Value::from(-19), MoveTypeLayout::U64), - // Negative cannot be used as Unsigned - (Value::from(-1), MoveTypeLayout::U8), - // u8 vector from bad hex repr - ( - Value::from(bad_hex_val), - MoveTypeLayout::Vector(Box::new(MoveTypeLayout::U8)), - ), - // u8 vector from heterogeneous array - ( - json!([1, 2, 3, true, 5, 6, 7]), - MoveTypeLayout::Vector(Box::new(MoveTypeLayout::U8)), - ), - // U64 deep nest, bad because heterogeneous array - ( - json!([[[9, 53, 434], [0], [300]], [], [300, 4, 5, 6, 7]]), - MoveTypeLayout::Vector(Box::new(MoveTypeLayout::Vector(Box::new(MoveTypeLayout::U64)))), + // Although U256 value can be encoded as num, we enforce it must be a string + (Value::from(123), MoveTypeLayout::U256), + // Space not allowed + (Value::from(" 9"), MoveTypeLayout::U8), + // Hex must start with 0x + (Value::from("AB"), MoveTypeLayout::U8), + // Too large + (Value::from("123456789"), MoveTypeLayout::U8), + // Too large + ( + Value::from("123456789123456789123456789123456789"), + MoveTypeLayout::U64, + ), + // Too large + ( + Value::from( + "123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789", ), + MoveTypeLayout::U128, + ), + // U64 value greater than 255 cannot be used as U8 + (Value::from(900u64), MoveTypeLayout::U8), + // floats cannot be used as U8 + (Value::from(0.4f32), MoveTypeLayout::U8), + // floats cannot be used as U64 + (Value::from(3.4f32), MoveTypeLayout::U64), + // Negative cannot be used as U64 + (Value::from(-19), MoveTypeLayout::U64), + // Negative cannot be used as Unsigned + (Value::from(-1), MoveTypeLayout::U8), + // u8 vector from bad hex repr + ( + Value::from(bad_hex_val), + MoveTypeLayout::Vector(Box::new(MoveTypeLayout::U8)), + ), + // u8 vector from heterogeneous array + ( + json!([1, 2, 3, true, 5, 6, 7]), + MoveTypeLayout::Vector(Box::new(MoveTypeLayout::U8)), + ), + // U64 deep nest, bad because heterogeneous array + ( + json!([[[9, 53, 434], [0], [300]], [], [300, 4, 5, 6, 7]]), + MoveTypeLayout::Vector(Box::new(MoveTypeLayout::Vector(Box::new( + MoveTypeLayout::U64, + )))), + ), ]; // Driver @@ -434,21 +443,19 @@ fn test_basic_args_linter_top_level() { let module = Identifier::new("geniteam").unwrap(); let function = Identifier::new("create_monster").unwrap(); - /* - Function signature: - public fun create_monster( - _player: &mut Player, - farm: &mut Farm, - pet_monsters: &mut Collection, - monster_name: vector, - monster_img_index: u64, - breed: u8, - monster_affinity: u8, - monster_description: vector, - display: vector, - ctx: &mut TxContext - ) - */ + // Function signature: + // public fun create_monster( + // _player: &mut Player, + // farm: &mut Farm, + // pet_monsters: &mut Collection, + // monster_name: vector, + // monster_img_index: u64, + // breed: u8, + // monster_affinity: u8, + // monster_description: vector, + // display: vector, + // ctx: &mut TxContext + // ) let monster_name_raw = "MonsterName"; let monster_img_id_raw = "12345678"; @@ -546,10 +553,8 @@ fn test_basic_args_linter_top_level() { let module = Identifier::new("object_basics").unwrap(); let function = Identifier::new("create").unwrap(); - /* - Function signature: - public fun create(value: u64, recipient: vector, ctx: &mut TxContext) - */ + // Function signature: + // public fun create(value: u64, recipient: vector, ctx: &mut TxContext) let value_raw = "29897"; let address = SuiAddress::random_for_testing_only(); @@ -571,7 +576,8 @@ fn test_basic_args_linter_top_level() { ); // Need to verify this specially - // BCS serialzes addresses like vectors so there's a length prefix, which makes the vec longer by 1 + // BCS serialzes addresses like vectors so there's a length prefix, which makes + // the vec longer by 1 assert_eq!( args[1].0, ResolvedCallArg::Pure(bcs::to_bytes(&AccountAddress::from(address)).unwrap()), @@ -582,10 +588,8 @@ fn test_basic_args_linter_top_level() { let module = Identifier::new("object_basics").unwrap(); let function = Identifier::new("transfer").unwrap(); - /* - Function signature: - public fun transfer(o: Object, recipient: vector, _ctx: &mut TxContext) - */ + // Function signature: + // public fun transfer(o: Object, recipient: vector, _ctx: &mut TxContext) let object_id_raw = ObjectID::random(); let address = SuiAddress::random_for_testing_only(); @@ -609,7 +613,8 @@ fn test_basic_args_linter_top_level() { ); // Need to verify this specially - // BCS serialzes addresses like vectors so there's a length prefix, which makes the vec longer by 1 + // BCS serialzes addresses like vectors so there's a length prefix, which makes + // the vec longer by 1 assert_eq!( args[1].0, ResolvedCallArg::Pure(bcs::to_bytes(&AccountAddress::from(address)).unwrap()) @@ -633,10 +638,8 @@ fn test_basic_args_linter_top_level() { let module = Identifier::new("entry_point_vector").unwrap(); let function = Identifier::new("two_obj_vec_destroy").unwrap(); - /* - Function signature: - public entry fun two_obj_vec_destroy(v: vector, _: &mut TxContext) - */ + // Function signature: + // public entry fun two_obj_vec_destroy(v: vector, _: &mut TxContext) let object_id_raw1 = ObjectID::random(); let object_id_raw2 = ObjectID::random(); let object_id1 = json!(format!("0x{}", object_id_raw1)); diff --git a/crates/sui-keys/src/key_derive.rs b/crates/sui-keys/src/key_derive.rs index c3e47b7e4ee..c8124932a16 100644 --- a/crates/sui-keys/src/key_derive.rs +++ b/crates/sui-keys/src/key_derive.rs @@ -3,13 +3,11 @@ use anyhow::anyhow; use bip32::{ChildNumber, DerivationPath, XPrv}; - use bip39::{Language, Mnemonic, MnemonicType, Seed}; -use fastcrypto::ed25519::Ed25519KeyPair; -use fastcrypto::secp256r1::{Secp256r1KeyPair, Secp256r1PrivateKey}; use fastcrypto::{ - ed25519::Ed25519PrivateKey, + ed25519::{Ed25519KeyPair, Ed25519PrivateKey}, secp256k1::{Secp256k1KeyPair, Secp256k1PrivateKey}, + secp256r1::{Secp256r1KeyPair, Secp256r1PrivateKey}, traits::{KeyPair, ToFromBytes}, }; use slip10_ed25519::derive_ed25519_private_key; @@ -25,8 +23,9 @@ pub const DERVIATION_PATH_PURPOSE_SECP256K1: u32 = 54; pub const DERVIATION_PATH_PURPOSE_SECP256R1: u32 = 74; /// Ed25519 follows SLIP-0010 using hardened path: m/44'/4218'/0'/0'/{index}' -/// Secp256k1 follows BIP-32/44 using path where the first 3 levels are hardened: m/54'/4218'/0'/0/{index} -/// Secp256r1 follows BIP-32/44 using path where the first 3 levels are hardened: m/74'/4218'/0'/0/{index} +/// Secp256k1 follows BIP-32/44 using path where the first 3 levels are +/// hardened: m/54'/4218'/0'/0/{index} Secp256r1 follows BIP-32/44 using path +/// where the first 3 levels are hardened: m/74'/4218'/0'/0/{index} /// Note that the purpose node is used to distinguish signature schemes. pub fn derive_key_pair_from_path( seed: &[u8], @@ -77,7 +76,8 @@ pub fn validate_path( SignatureScheme::ED25519 => { match path { Some(p) => { - // The derivation path must be hardened at all levels with purpose = 44, coin_type = 4218 + // The derivation path must be hardened at all levels with purpose = 44, + // coin_type = 4218 if let &[purpose, coin_type, account, change, address] = p.as_ref() { if Some(purpose) == ChildNumber::new(DERVIATION_PATH_PURPOSE_ED25519, true).ok() @@ -105,7 +105,8 @@ pub fn validate_path( SignatureScheme::Secp256k1 => { match path { Some(p) => { - // The derivation path must be hardened at first 3 levels with purpose = 54, coin_type = 4218 + // The derivation path must be hardened at first 3 levels with purpose = 54, + // coin_type = 4218 if let &[purpose, coin_type, account, change, address] = p.as_ref() { if Some(purpose) == ChildNumber::new(DERVIATION_PATH_PURPOSE_SECP256K1, true).ok() @@ -133,7 +134,8 @@ pub fn validate_path( SignatureScheme::Secp256r1 => { match path { Some(p) => { - // The derivation path must be hardened at first 3 levels with purpose = 74, coin_type = 4218 + // The derivation path must be hardened at first 3 levels with purpose = 74, + // coin_type = 4218 if let &[purpose, coin_type, account, change, address] = p.as_ref() { if Some(purpose) == ChildNumber::new(DERVIATION_PATH_PURPOSE_SECP256R1, true).ok() diff --git a/crates/sui-keys/src/keypair_file.rs b/crates/sui-keys/src/keypair_file.rs index bc1a4b0504f..b120b762349 100644 --- a/crates/sui-keys/src/keypair_file.rs +++ b/crates/sui-keys/src/keypair_file.rs @@ -39,7 +39,8 @@ pub fn read_keypair_from_file>(path: P) -> anyhow::Res SuiKeyPair::decode_base64(contents.as_str().trim()).map_err(|e| anyhow!(e)) } -/// Read from file as Base64 encoded `flag || privkey` and return a NetworkKeyPair. +/// Read from file as Base64 encoded `flag || privkey` and return a +/// NetworkKeyPair. pub fn read_network_keypair_from_file>( path: P, ) -> anyhow::Result { diff --git a/crates/sui-keys/src/keystore.rs b/crates/sui-keys/src/keystore.rs index 11d02b2b45d..e26bb2f467a 100644 --- a/crates/sui-keys/src/keystore.rs +++ b/crates/sui-keys/src/keystore.rs @@ -1,8 +1,15 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::key_derive::{derive_key_pair_from_path, generate_new_key}; -use crate::random_names::{random_name, random_names}; +use std::{ + collections::{BTreeMap, HashSet}, + fmt::{Display, Formatter, Write}, + fs, + fs::File, + io::BufReader, + path::{Path, PathBuf}, +}; + use anyhow::{anyhow, bail, ensure, Context}; use bip32::DerivationPath; use bip39::{Language, Mnemonic, Seed}; @@ -10,17 +17,17 @@ use rand::{rngs::StdRng, SeedableRng}; use regex::Regex; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use shared_crypto::intent::{Intent, IntentMessage}; -use std::collections::{BTreeMap, HashSet}; -use std::fmt::Write; -use std::fmt::{Display, Formatter}; -use std::fs; -use std::fs::File; -use std::io::BufReader; -use std::path::{Path, PathBuf}; -use sui_types::base_types::SuiAddress; -use sui_types::crypto::get_key_pair_from_rng; -use sui_types::crypto::{ - enum_dispatch, EncodeDecodeBase64, PublicKey, Signature, SignatureScheme, SuiKeyPair, +use sui_types::{ + base_types::SuiAddress, + crypto::{ + enum_dispatch, get_key_pair_from_rng, EncodeDecodeBase64, PublicKey, Signature, + SignatureScheme, SuiKeyPair, + }, +}; + +use crate::{ + key_derive::{derive_key_pair_from_path, generate_new_key}, + random_names::{random_name, random_names}, }; #[derive(Serialize, Deserialize)] @@ -233,7 +240,8 @@ impl AccountKeystore for FileBasedKeystore { Ok(()) } - /// Return an array of `Alias`, consisting of every alias and its corresponding public key. + /// Return an array of `Alias`, consisting of every alias and its + /// corresponding public key. fn aliases(&self) -> Vec<&Alias> { self.aliases.values().collect() } @@ -242,7 +250,8 @@ impl AccountKeystore for FileBasedKeystore { self.aliases.iter().collect::>() } - /// Return an array of `Alias`, consisting of every alias and its corresponding public key. + /// Return an array of `Alias`, consisting of every alias and its + /// corresponding public key. fn aliases_mut(&mut self) -> Vec<&mut Alias> { self.aliases.values_mut().collect() } @@ -251,8 +260,8 @@ impl AccountKeystore for FileBasedKeystore { self.keys.values().map(|key| key.public()).collect() } - /// This function returns an error if the provided alias already exists. If the alias - /// has not already been used, then it returns the alias. + /// This function returns an error if the provided alias already exists. If + /// the alias has not already been used, then it returns the alias. /// If no alias has been passed, it will generate a new alias. fn create_alias(&self, alias: Option) -> Result { match alias { @@ -543,8 +552,8 @@ impl AccountKeystore for InMemKeystore { .map(|x| x.0) } - /// This function returns an error if the provided alias already exists. If the alias - /// has not already been used, then it returns the alias. + /// This function returns an error if the provided alias already exists. If + /// the alias has not already been used, then it returns the alias. /// If no alias has been passed, it will generate a new alias. fn create_alias(&self, alias: Option) -> Result { match alias { diff --git a/crates/sui-keys/src/random_names.rs b/crates/sui-keys/src/random_names.rs index 7434ac850d1..c96bec3788b 100644 --- a/crates/sui-keys/src/random_names.rs +++ b/crates/sui-keys/src/random_names.rs @@ -1,9 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use rand::{rngs::ThreadRng, thread_rng, Rng}; use std::collections::HashSet; +use rand::{rngs::ThreadRng, thread_rng, Rng}; + /// This library provides two functions to generate /// a random combination of an adjective /// and a precious stone name as a well formatted @@ -183,7 +184,8 @@ const RIGHT_LENGTH: usize = RIGHT_NAMES.len(); /// Return a random name formatted as first-second from a list of strings. /// -/// The main purpose of this function is to generate random aliases for addresses. +/// The main purpose of this function is to generate random aliases for +/// addresses. pub fn random_name(conflicts: &HashSet) -> String { let mut rng = thread_rng(); // as long as the generated name is in the list of conflicts, diff --git a/crates/sui-keys/tests/tests.rs b/crates/sui-keys/tests/tests.rs index 0484eae84bb..fcdc8beacee 100644 --- a/crates/sui-keys/tests/tests.rs +++ b/crates/sui-keys/tests/tests.rs @@ -1,20 +1,18 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::fs; -use std::str::FromStr; +use std::{fs, str::FromStr}; -use fastcrypto::hash::HashFunction; -use fastcrypto::traits::EncodeDecodeBase64; -use sui_keys::key_derive::generate_new_key; -use tempfile::TempDir; - -use sui_keys::keystore::{AccountKeystore, FileBasedKeystore, InMemKeystore, Keystore}; -use sui_types::crypto::{DefaultHash, SignatureScheme, SuiSignatureInner}; +use fastcrypto::{hash::HashFunction, traits::EncodeDecodeBase64}; +use sui_keys::{ + key_derive::generate_new_key, + keystore::{AccountKeystore, FileBasedKeystore, InMemKeystore, Keystore}, +}; use sui_types::{ base_types::{SuiAddress, SUI_ADDRESS_LENGTH}, - crypto::Ed25519SuiSignature, + crypto::{DefaultHash, Ed25519SuiSignature, SignatureScheme, SuiSignatureInner}, }; +use tempfile::TempDir; #[test] fn alias_exists_test() { @@ -105,7 +103,8 @@ fn keystore_no_aliases() { // and a new alias for it. // This idea is to test the correct conversion // from the old type (which only contains keys and an optional path) - // to the new type which contains keys and aliases (and an optional path), and if it creates the aliases file. + // to the new type which contains keys and aliases (and an optional path), and + // if it creates the aliases file. let temp_dir = TempDir::new().unwrap(); let mut keystore_path = temp_dir.path().join("sui.keystore"); @@ -210,7 +209,8 @@ fn mnemonic_test() { assert_eq!(address, imported_address); } -/// This test confirms rust's implementation of mnemonic is the same with the Sui Wallet +/// This test confirms rust's implementation of mnemonic is the same with the +/// Sui Wallet #[test] fn sui_wallet_address_mnemonic_test() -> Result<(), anyhow::Error> { let phrase = "result crisp session latin must fruit genuine question prevent start coconut brave speak student dismiss"; diff --git a/crates/sui-light-client/src/main.rs b/crates/sui-light-client/src/main.rs index 83d7b0d3574..fd31e29fc15 100644 --- a/crates/sui-light-client/src/main.rs +++ b/crates/sui-light-client/src/main.rs @@ -1,12 +1,24 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + fs, + io::{Read, Write}, + path::PathBuf, + str::FromStr, + sync::Arc, +}; + use anyhow::anyhow; use async_trait::async_trait; +use clap::{Parser, Subcommand}; use move_core_types::account_address::AccountAddress; +use sui_config::genesis::Genesis; +use sui_json::SuiJsonValue; use sui_json_rpc_types::SuiTransactionBlockResponseOptions; - +use sui_package_resolver::{Package, PackageStore, Resolver, Result as ResolverResult}; use sui_rest_api::{CheckpointData, Client}; +use sui_sdk::SuiClientBuilder; use sui_types::{ base_types::{ObjectID, SequenceNumber}, committee::Committee, @@ -18,17 +30,6 @@ use sui_types::{ object::{Data, Object}, }; -use sui_config::genesis::Genesis; - -use sui_json::SuiJsonValue; -use sui_package_resolver::Result as ResolverResult; -use sui_package_resolver::{Package, PackageStore, Resolver}; -use sui_sdk::SuiClientBuilder; - -use clap::{Parser, Subcommand}; -use std::{fs, io::Write, path::PathBuf, str::FromStr}; -use std::{io::Read, sync::Arc}; - /// A light client for the Sui blockchain #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] @@ -58,8 +59,8 @@ impl PackageStore for RemotePackageStore { async fn version(&self, id: AccountAddress) -> ResolverResult { Ok(self.client.get_object(id.into()).await.unwrap().version()) } - /// Read package contents. Fails if `id` is not an object, not a package, or is malformed in - /// some way. + /// Read package contents. Fails if `id` is not an object, not a package, or + /// is malformed in some way. async fn fetch(&self, id: AccountAddress) -> ResolverResult> { let object = get_verified_object(&self.config, id.into()).await.unwrap(); let package = Package::read(&object).unwrap(); @@ -87,7 +88,8 @@ enum SCommands { }, } -// The config file for the light client including the root of trust genesis digest +// The config file for the light client including the root of trust genesis +// digest #[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] struct Config { /// Full node url @@ -280,7 +282,8 @@ async fn check_and_sync_checkpoints(config: &Config) -> anyhow::Result<()> { let mut prev_committee = genesis_committee; for ckp_id in &checkpoints_list.checkpoints { - // check if there is a file with this name ckp_id.yaml in the checkpoint_summary_dir + // check if there is a file with this name ckp_id.yaml in the + // checkpoint_summary_dir let mut checkpoint_path = config.checkpoint_summary_dir.clone(); checkpoint_path.push(format!("{}.yaml", ckp_id)); @@ -545,10 +548,11 @@ pub async fn main() { // Make a test namespace #[cfg(test)] mod tests { + use std::path::{Path, PathBuf}; + use sui_types::messages_checkpoint::FullCheckpointContents; use super::*; - use std::path::{Path, PathBuf}; async fn read_full_checkpoint(checkpoint_path: &PathBuf) -> anyhow::Result { let mut reader = fs::File::open(checkpoint_path.clone())?; @@ -626,24 +630,30 @@ mod tests { // Change committee committee.epoch += 10; - assert!(extract_verified_effects_and_events( - &full_checkpoint, - &committee, - TransactionDigest::from_str("8RiKBwuAbtu8zNCtz8SrcfHyEUzto6zi6cMVA9t4WhWk").unwrap(), - ) - .is_err()); + assert!( + extract_verified_effects_and_events( + &full_checkpoint, + &committee, + TransactionDigest::from_str("8RiKBwuAbtu8zNCtz8SrcfHyEUzto6zi6cMVA9t4WhWk") + .unwrap(), + ) + .is_err() + ); } #[tokio::test] async fn test_checkpoint_no_transaction() { let (committee, full_checkpoint) = read_data().await; - assert!(extract_verified_effects_and_events( - &full_checkpoint, - &committee, - TransactionDigest::from_str("8RiKBwuAbtu8zNCtz8SrcfHyEUzto6zj6cMVA9t4WhWk").unwrap(), - ) - .is_err()); + assert!( + extract_verified_effects_and_events( + &full_checkpoint, + &committee, + TransactionDigest::from_str("8RiKBwuAbtu8zNCtz8SrcfHyEUzto6zj6cMVA9t4WhWk") + .unwrap(), + ) + .is_err() + ); } #[tokio::test] @@ -654,12 +664,15 @@ mod tests { let random_contents = FullCheckpointContents::random_for_testing(); full_checkpoint.checkpoint_contents = random_contents.checkpoint_contents(); - assert!(extract_verified_effects_and_events( - &full_checkpoint, - &committee, - TransactionDigest::from_str("8RiKBwuAbtu8zNCtz8SrcfHyEUzto6zj6cMVA9t4WhWk").unwrap(), - ) - .is_err()); + assert!( + extract_verified_effects_and_events( + &full_checkpoint, + &committee, + TransactionDigest::from_str("8RiKBwuAbtu8zNCtz8SrcfHyEUzto6zj6cMVA9t4WhWk") + .unwrap(), + ) + .is_err() + ); } #[tokio::test] @@ -679,11 +692,14 @@ mod tests { } } - assert!(extract_verified_effects_and_events( - &full_checkpoint, - &committee, - TransactionDigest::from_str("8RiKBwuAbtu8zNCtz8SrcfHyEUzto6zj6cMVA9t4WhWk").unwrap(), - ) - .is_err()); + assert!( + extract_verified_effects_and_events( + &full_checkpoint, + &committee, + TransactionDigest::from_str("8RiKBwuAbtu8zNCtz8SrcfHyEUzto6zj6cMVA9t4WhWk") + .unwrap(), + ) + .is_err() + ); } } diff --git a/crates/sui-macros/src/lib.rs b/crates/sui-macros/src/lib.rs index 6e3cd85ca19..3ce525956fc 100644 --- a/crates/sui-macros/src/lib.rs +++ b/crates/sui-macros/src/lib.rs @@ -1,15 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use futures::future::BoxFuture; -use std::collections::HashMap; -use std::future::Future; -use std::sync::Arc; +use std::{collections::HashMap, future::Future, sync::Arc}; +use futures::future::BoxFuture; pub use sui_proc_macros::*; -/// Evaluates an expression in a new thread which will not be subject to interception of -/// getrandom(), clock_gettime(), etc. +/// Evaluates an expression in a new thread which will not be subject to +/// interception of getrandom(), clock_gettime(), etc. #[cfg(msim)] #[macro_export] macro_rules! nondeterministic { @@ -41,9 +39,10 @@ fn with_fp_map(func: impl FnOnce(&mut FpMap) -> T) -> T { #[cfg(not(msim))] fn with_fp_map(func: impl FnOnce(&mut FpMap) -> T) -> T { - use once_cell::sync::Lazy; use std::sync::Mutex; + use once_cell::sync::Lazy; + static MAP: Lazy> = Lazy::new(Default::default); let mut map = MAP.lock().unwrap(); func(&mut map) @@ -147,8 +146,8 @@ pub fn register_fail_point(identifier: &'static str, callback: impl Fn() + Sync ); } -/// Register an asynchronous fail point. Because it is async it can yield execution of the calling -/// task, e.g. by sleeping. +/// Register an asynchronous fail point. Because it is async it can yield +/// execution of the calling task, e.g. by sleeping. pub fn register_fail_point_async( identifier: &'static str, callback: impl Fn() -> F + Sync + Send + 'static, @@ -233,7 +232,8 @@ pub fn clear_fail_point(identifier: &'static str) { clear_fail_point_impl(identifier); } -/// Trigger a fail point. Tests can trigger various behavior when the fail point is hit. +/// Trigger a fail point. Tests can trigger various behavior when the fail point +/// is hit. #[cfg(any(msim, fail_points))] #[macro_export] macro_rules! fail_point { @@ -242,8 +242,8 @@ macro_rules! fail_point { }; } -/// Trigger an async fail point. Tests can trigger various async behavior when the fail point is -/// hit. +/// Trigger an async fail point. Tests can trigger various async behavior when +/// the fail point is hit. #[cfg(any(msim, fail_points))] #[macro_export] macro_rules! fail_point_async { @@ -253,7 +253,8 @@ macro_rules! fail_point_async { } /// Trigger a failpoint that runs a callback at the callsite if it is enabled. -/// (whether it is enabled is controlled by whether the registration callback returns true/false). +/// (whether it is enabled is controlled by whether the registration callback +/// returns true/false). #[cfg(any(msim, fail_points))] #[macro_export] macro_rules! fail_point_if { @@ -265,8 +266,8 @@ macro_rules! fail_point_if { } /// Trigger a failpoint that runs a callback at the callsite if it is enabled. -/// If the registration callback returns Some(v), then the `v` is passed to the callback in the test. -/// Otherwise the failpoint is skipped +/// If the registration callback returns Some(v), then the `v` is passed to the +/// callback in the test. Otherwise the failpoint is skipped #[cfg(any(msim, fail_points))] #[macro_export] macro_rules! fail_point_arg { @@ -316,7 +317,8 @@ macro_rules! replay_log { }; } -// These tests need to be run in release mode, since debug mode does overflow checks by default! +// These tests need to be run in release mode, since debug mode does overflow +// checks by default! #[cfg(test)] mod test { use super::*; @@ -342,8 +344,9 @@ mod test { } } - // this will not panic even if we pass in (i32::MAX, 1), because we skipped processing - // the item macro, so we also need to make sure it doesn't panic in debug mode. + // this will not panic even if we pass in (i32::MAX, 1), because we skipped + // processing the item macro, so we also need to make sure it doesn't + // panic in debug mode. unchecked_add(1, 2); } diff --git a/crates/sui-metric-checker/src/lib.rs b/crates/sui-metric-checker/src/lib.rs index 93b4054c7dd..bf5da3d4fdc 100644 --- a/crates/sui-metric-checker/src/lib.rs +++ b/crates/sui-metric-checker/src/lib.rs @@ -135,9 +135,10 @@ fn unix_seconds_to_timestamp_string(unix_seconds: i64) -> String { #[cfg(test)] mod tests { - use super::*; use chrono::TimeZone; + use super::*; + struct MockNowProvider; impl NowProvider for MockNowProvider { diff --git a/crates/sui-metric-checker/src/main.rs b/crates/sui-metric-checker/src/main.rs index 9cbe870df77..bda21156ff0 100644 --- a/crates/sui-metric-checker/src/main.rs +++ b/crates/sui-metric-checker/src/main.rs @@ -1,17 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{fs::File, io::Read, time::Duration}; + use anyhow::anyhow; use backoff::{future::retry, ExponentialBackoff}; use chrono::{DateTime, Utc}; use clap::*; use once_cell::sync::Lazy; use prometheus_http_query::Client; -use std::fs::File; -use std::io::Read; -use std::time::Duration; -use sui_metric_checker::query::{instant_query, range_query}; use sui_metric_checker::{ - fails_threshold_condition, timestamp_string_to_unix_seconds, Config, NowProvider, QueryType, + fails_threshold_condition, + query::{instant_query, range_query}, + timestamp_string_to_unix_seconds, Config, NowProvider, QueryType, }; #[derive(Parser)] diff --git a/crates/sui-metric-checker/src/query.rs b/crates/sui-metric-checker/src/query.rs index f5baeeccc09..b34cee4f549 100644 --- a/crates/sui-metric-checker/src/query.rs +++ b/crates/sui-metric-checker/src/query.rs @@ -1,12 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::unix_seconds_to_timestamp_string; use anyhow::anyhow; use base64::{engine::general_purpose, Engine}; use prometheus_http_query::Client; use reqwest::header::{HeaderValue, AUTHORIZATION}; use tracing::debug; +use crate::unix_seconds_to_timestamp_string; + pub async fn instant_query( auth_header: &str, client: Client, @@ -41,7 +42,8 @@ pub async fn instant_query( } } -// This will return the average value of the queried metric over the given time range. +// This will return the average value of the queried metric over the given time +// range. pub async fn range_query( auth_header: &str, client: Client, diff --git a/crates/sui-move-build/src/lib.rs b/crates/sui-move-build/src/lib.rs index 8fd2cb067ad..eaf4240ca31 100644 --- a/crates/sui-move-build/src/lib.rs +++ b/crates/sui-move-build/src/lib.rs @@ -32,13 +32,10 @@ use move_package::{ build_plan::BuildPlan, compiled_package::CompiledPackage as MoveCompiledPackage, }, package_hooks::{PackageHooks, PackageIdentifier}, - resolution::resolution_graph::ResolvedGraph, + resolution::resolution_graph::{Package, ResolvedGraph}, + source_package::parsed_manifest::{CustomDepInfo, SourceManifest}, BuildConfig as MoveBuildConfig, }; -use move_package::{ - resolution::resolution_graph::Package, source_package::parsed_manifest::CustomDepInfo, - source_package::parsed_manifest::SourceManifest, -}; use move_symbol_pool::Symbol; use serde_reflection::Registry; use sui_protocol_config::{Chain, ProtocolConfig, ProtocolVersion}; @@ -56,7 +53,8 @@ use sui_verifier::{default_verifier_config, verifier as sui_bytecode_verifier}; #[path = "unit_tests/build_tests.rs"] mod build_tests; -/// Wrapper around the core Move `CompiledPackage` with some Sui-specific traits and info +/// Wrapper around the core Move `CompiledPackage` with some Sui-specific traits +/// and info #[derive(Debug, Clone)] pub struct CompiledPackage { pub package: MoveCompiledPackage, @@ -72,7 +70,8 @@ pub struct CompiledPackage { #[derive(Clone)] pub struct BuildConfig { pub config: MoveBuildConfig, - /// If true, run the Move bytecode verifier on the bytecode from a successful build + /// If true, run the Move bytecode verifier on the bytecode from a + /// successful build pub run_bytecode_verifier: bool, /// If true, print build diagnostics to stderr--no printing if false pub print_diags_to_stderr: bool, @@ -153,8 +152,9 @@ impl BuildConfig { Ok((compiled_pkg, fn_info.unwrap())) } - /// Given a `path` and a `build_config`, build the package in that path, including its dependencies. - /// If we are building the Sui framework, we skip the check that the addresses should be 0 + /// Given a `path` and a `build_config`, build the package in that path, + /// including its dependencies. If we are building the Sui framework, we + /// skip the check that the addresses should be 0 pub fn build(self, path: PathBuf) -> SuiResult { let print_diags_to_stderr = self.print_diags_to_stderr; let run_bytecode_verifier = self.run_bytecode_verifier; @@ -196,8 +196,9 @@ impl BuildConfig { } } -/// There may be additional information that needs to be displayed after diagnostics are reported -/// (optionally report diagnostics themselves if files argument is provided). +/// There may be additional information that needs to be displayed after +/// diagnostics are reported (optionally report diagnostics themselves if files +/// argument is provided). pub fn decorate_warnings(warning_diags: Diagnostics, files: Option<&FilesSourceText>) { let any_linter_warnings = warning_diags.any_with_prefix(LINT_WARNING_PREFIX); let (filtered_diags_num, filtered_categories) = @@ -209,12 +210,14 @@ pub fn decorate_warnings(warning_diags: Diagnostics, files: Option<&FilesSourceT eprintln!("Please report feedback on the linter warnings at https://forums.sui.io\n"); } if filtered_diags_num > 0 { - eprintln!("Total number of linter warnings suppressed: {filtered_diags_num} (filtered categories: {filtered_categories})"); + eprintln!( + "Total number of linter warnings suppressed: {filtered_diags_num} (filtered categories: {filtered_categories})" + ); } } -/// Sets build config's default flavor to `Flavor::Sui`. Returns error message if the flavor was -/// previously set to something else than `Flavor::Sui`. +/// Sets build config's default flavor to `Flavor::Sui`. Returns error message +/// if the flavor was previously set to something else than `Flavor::Sui`. pub fn set_sui_flavor(build_config: &mut MoveBuildConfig) -> Option { use move_compiler::editions::Flavor; @@ -242,13 +245,13 @@ pub fn build_from_resolution_graph( } else { BuildConfig::compile_package(resolution_graph, &mut std::io::sink()) }; - // write build failure diagnostics to stderr, convert `error` to `String` using `Debug` - // format to include anyhow's error context chain. + // write build failure diagnostics to stderr, convert `error` to `String` using + // `Debug` format to include anyhow's error context chain. let (package, fn_info) = match result { Err(error) => { return Err(SuiError::ModuleBuildFailure { error: format!("{:?}", error), - }) + }); } Ok((package, fn_info)) => (package, fn_info), }; @@ -280,16 +283,18 @@ pub fn build_from_resolution_graph( } impl CompiledPackage { - /// Return all of the bytecode modules in this package (not including direct or transitive deps) - /// Note: these are not topologically sorted by dependency--use `get_dependency_sorted_modules` to produce a list of modules suitable - /// for publishing or static analysis + /// Return all of the bytecode modules in this package (not including direct + /// or transitive deps) Note: these are not topologically sorted by + /// dependency--use `get_dependency_sorted_modules` to produce a list of + /// modules suitable for publishing or static analysis pub fn get_modules(&self) -> impl Iterator { self.package.root_modules().map(|m| &m.unit.module) } - /// Return all of the bytecode modules in this package (not including direct or transitive deps) - /// Note: these are not topologically sorted by dependency--use `get_dependency_sorted_modules` to produce a list of modules suitable - /// for publishing or static analysis + /// Return all of the bytecode modules in this package (not including direct + /// or transitive deps) Note: these are not topologically sorted by + /// dependency--use `get_dependency_sorted_modules` to produce a list of + /// modules suitable for publishing or static analysis pub fn into_modules(self) -> Vec { self.package .root_compiled_units @@ -298,8 +303,9 @@ impl CompiledPackage { .collect() } - /// Return all of the bytecode modules that this package depends on (both directly and transitively) - /// Note: these are not topologically sorted by dependency. + /// Return all of the bytecode modules that this package depends on (both + /// directly and transitively) Note: these are not topologically sorted + /// by dependency. pub fn get_dependent_modules(&self) -> impl Iterator { self.package .deps_compiled_units @@ -307,15 +313,17 @@ impl CompiledPackage { .map(|(_, m)| &m.unit.module) } - /// Return all of the bytecode modules in this package and the modules of its direct and transitive dependencies. - /// Note: these are not topologically sorted by dependency. + /// Return all of the bytecode modules in this package and the modules of + /// its direct and transitive dependencies. Note: these are not + /// topologically sorted by dependency. pub fn get_modules_and_deps(&self) -> impl Iterator { self.package.all_modules().map(|m| &m.unit.module) } - /// Return the bytecode modules in this package, topologically sorted in dependency order. - /// Optionally include dependencies that have not been published (are at address 0x0), if - /// `with_unpublished_deps` is true. This is the function to call if you would like to publish + /// Return the bytecode modules in this package, topologically sorted in + /// dependency order. Optionally include dependencies that have not been + /// published (are at address 0x0), if `with_unpublished_deps` is true. + /// This is the function to call if you would like to publish /// or statically analyze the modules. pub fn get_dependency_sorted_modules( &self, @@ -328,17 +336,18 @@ impl CompiledPackage { let modules = graph.compute_topological_order().unwrap(); if with_unpublished_deps { - // For each transitive dependent module, if they are not to be published, they must have - // a non-zero address (meaning they are already published on-chain). + // For each transitive dependent module, if they are not to be published, they + // must have a non-zero address (meaning they are already published + // on-chain). modules .filter(|module| module.address() == &AccountAddress::ZERO) .cloned() .collect() } else { - // Collect all module IDs from the current package to be published (module names are not - // sufficient as we may have modules with the same names in user code and in Sui - // framework which would result in the latter being pulled into a set of modules to be - // published). + // Collect all module IDs from the current package to be published (module names + // are not sufficient as we may have modules with the same names in + // user code and in Sui framework which would result in the latter + // being pulled into a set of modules to be published). let self_modules: HashSet<_> = self .package .root_modules_map() @@ -354,8 +363,8 @@ impl CompiledPackage { } } - /// Return the set of Object IDs corresponding to this package's transitive dependencies' - /// original package IDs. + /// Return the set of Object IDs corresponding to this package's transitive + /// dependencies' original package IDs. pub fn get_dependency_original_package_ids(&self) -> Vec { let mut ids: BTreeSet<_> = self .package @@ -364,8 +373,8 @@ impl CompiledPackage { .map(|(_, m)| ObjectID::from(*m.unit.module.address())) .collect(); - // `0x0` is not a real dependency ID -- it means that the package has unpublished - // dependencies. + // `0x0` is not a real dependency ID -- it means that the package has + // unpublished dependencies. ids.remove(&ObjectID::ZERO); ids.into_iter().collect() } @@ -379,7 +388,8 @@ impl CompiledPackage { ) } - /// Return a serialized representation of the bytecode modules in this package, topologically sorted in dependency order + /// Return a serialized representation of the bytecode modules in this + /// package, topologically sorted in dependency order pub fn get_package_bytes(&self, with_unpublished_deps: bool) -> Vec> { self.get_dependency_sorted_modules(with_unpublished_deps) .iter() @@ -391,7 +401,8 @@ impl CompiledPackage { .collect() } - /// Return the base64-encoded representation of the bytecode modules in this package, topologically sorted in dependency order + /// Return the base64-encoded representation of the bytecode modules in this + /// package, topologically sorted in dependency order pub fn get_package_base64(&self, with_unpublished_deps: bool) -> Vec { self.get_package_bytes(with_unpublished_deps) .iter() @@ -419,7 +430,8 @@ impl CompiledPackage { .filter(|m| *m.self_id().address() == SUI_SYSTEM_ADDRESS) } - /// Get bytecode modules from the Sui Framework that are used by this package + /// Get bytecode modules from the Sui Framework that are used by this + /// package pub fn get_sui_framework_modules(&self) -> impl Iterator { self.get_modules_and_deps() .filter(|m| *m.self_id().address() == SUI_FRAMEWORK_ADDRESS) @@ -443,11 +455,12 @@ impl CompiledPackage { .filter(|m| *m.self_id().address() == TIMELOCK_ADDRESS) } - /// Generate layout schemas for all types declared by this package, as well as - /// all struct types passed into `entry` functions declared by modules in this package - /// (either directly or by reference). - /// These layout schemas can be consumed by clients (e.g., the TypeScript SDK) to enable - /// BCS serialization/deserialization of the package's objects, tx arguments, and events. + /// Generate layout schemas for all types declared by this package, as well + /// as all struct types passed into `entry` functions declared by + /// modules in this package (either directly or by reference). + /// These layout schemas can be consumed by clients (e.g., the TypeScript + /// SDK) to enable BCS serialization/deserialization of the package's + /// objects, tx arguments, and events. pub fn generate_struct_layouts(&self) -> Registry { let mut package_types = BTreeSet::new(); for m in self.get_modules() { @@ -458,8 +471,10 @@ impl CompiledPackage { for t in &s.type_parameters { if t.is_phantom { // if all of t's type parameters are phantom, we can generate a type layout - // we make this happen by creating a StructTag with dummy `type_params`, since the layout generator won't look at them. - // we need to do this because SerdeLayoutBuilder will refuse to generate a layout for any open StructTag, but phantom types + // we make this happen by creating a StructTag with dummy `type_params`, + // since the layout generator won't look at them. we + // need to do this because SerdeLayoutBuilder will refuse to generate a + // layout for any open StructTag, but phantom types // cannot affect the layout of a struct, so we just use dummy values dummy_type_parameters.push(TypeTag::Signer) } else { @@ -519,8 +534,8 @@ impl CompiledPackage { is_system_package(published_at) } - /// Checks for root modules with non-zero package addresses. Returns an arbitrary one, if one - /// can can be found, otherwise returns `None`. + /// Checks for root modules with non-zero package addresses. Returns an + /// arbitrary one, if one can can be found, otherwise returns `None`. pub fn published_root_module(&self) -> Option<&CompiledModule> { self.package.root_compiled_units.iter().find_map(|unit| { if unit.unit.module.self_id().address() != &AccountAddress::ZERO { @@ -600,7 +615,8 @@ impl Default for BuildConfig { impl GetModule for CompiledPackage { type Error = anyhow::Error; - // TODO: return ref here for better efficiency? Borrow checker + all_modules_map() make it hard to do this + // TODO: return ref here for better efficiency? Borrow checker + + // all_modules_map() make it hard to do this type Item = CompiledModule; fn get_module_by_id(&self, id: &ModuleId) -> Result, Self::Error> { @@ -668,7 +684,8 @@ pub enum PublishedAtError { /// - The ID that the package itself is published at (if it is published) /// - The IDs of dependencies that have been published /// - The names of packages that have not been published on chain. -/// - The names of packages that have a `published-at` field that isn't filled with a valid address. +/// - The names of packages that have a `published-at` field that isn't filled +/// with a valid address. pub fn gather_published_ids( resolution_graph: &ResolvedGraph, ) -> (Result, PackageDependencies) { diff --git a/crates/sui-move-build/src/unit_tests/build_tests.rs b/crates/sui-move-build/src/unit_tests/build_tests.rs index dc268bb45e2..c287ca3f760 100644 --- a/crates/sui-move-build/src/unit_tests/build_tests.rs +++ b/crates/sui-move-build/src/unit_tests/build_tests.rs @@ -7,7 +7,8 @@ use crate::BuildConfig; #[test] fn generate_struct_layouts() { - // build the Sui framework and generate struct layouts to make sure nothing crashes + // build the Sui framework and generate struct layouts to make sure nothing + // crashes let path = Path::new(env!("CARGO_MANIFEST_DIR")) .parent() .unwrap() diff --git a/crates/sui-move/src/build.rs b/crates/sui-move/src/build.rs index 66e50f2b83f..ee7471c0955 100644 --- a/crates/sui-move/src/build.rs +++ b/crates/sui-move/src/build.rs @@ -1,12 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{fs, path::PathBuf}; + use clap::Parser; use move_cli::base; -use move_package::source_package::layout::SourcePackageLayout; -use move_package::BuildConfig as MoveBuildConfig; +use move_package::{source_package::layout::SourcePackageLayout, BuildConfig as MoveBuildConfig}; use serde_json::json; -use std::{fs, path::PathBuf}; use sui_move_build::{check_invalid_dependencies, check_unpublished_dependencies, BuildConfig}; const LAYOUTS_DIR: &str = "layouts"; @@ -15,18 +15,18 @@ const STRUCT_LAYOUTS_FILENAME: &str = "struct_layouts.yaml"; #[derive(Parser)] #[group(id = "sui-move-build")] pub struct Build { - /// Include the contents of packages in dependencies that haven't been published (only relevant - /// when dumping bytecode as base64) + /// Include the contents of packages in dependencies that haven't been + /// published (only relevant when dumping bytecode as base64) #[clap(long, global = true)] pub with_unpublished_dependencies: bool, /// Whether we are printing in base64. #[clap(long, global = true)] pub dump_bytecode_as_base64: bool, /// If true, generate struct layout schemas for - /// all struct types passed into `entry` functions declared by modules in this package - /// These layout schemas can be consumed by clients (e.g., - /// the TypeScript SDK) to enable serialization/deserialization of transaction arguments - /// and events. + /// all struct types passed into `entry` functions declared by modules in + /// this package These layout schemas can be consumed by clients (e.g., + /// the TypeScript SDK) to enable serialization/deserialization of + /// transaction arguments and events. #[clap(long, global = true)] pub generate_struct_layouts: bool, } diff --git a/crates/sui-move/src/coverage.rs b/crates/sui-move/src/coverage.rs index 830e0452933..c78b3351e86 100644 --- a/crates/sui-move/src/coverage.rs +++ b/crates/sui-move/src/coverage.rs @@ -1,10 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::path::PathBuf; + use clap::Parser; use move_cli::base::coverage; use move_package::BuildConfig; -use std::path::PathBuf; #[derive(Parser)] #[group(id = "sui-move-coverage")] diff --git a/crates/sui-move/src/disassemble.rs b/crates/sui-move/src/disassemble.rs index b035133c742..89f72eb2bfa 100644 --- a/crates/sui-move/src/disassemble.rs +++ b/crates/sui-move/src/disassemble.rs @@ -1,16 +1,18 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + fs::File, + io::{BufReader, Read}, + path::{Path, PathBuf}, +}; + use clap::Parser; use move_binary_format::{binary_views::BinaryIndexedView, CompiledModule}; use move_cli::base; use move_disassembler::disassembler::Disassembler; use move_ir_types::location::Spanned; use move_package::BuildConfig; -use std::fs::File; -use std::io::{BufReader, Read}; -use std::path::Path; -use std::path::PathBuf; #[derive(Parser)] #[group(id = "sui-move-disassemmble")] @@ -31,7 +33,8 @@ impl Disassemble { build_config: BuildConfig, ) -> anyhow::Result<()> { if base::reroot_path(Some(self.module_path.clone())).is_ok() { - // disassembling bytecode inside the source package that produced it--use the source info + // disassembling bytecode inside the source package that produced it--use the + // source info let module_name = self .module_path .file_stem() @@ -58,8 +61,9 @@ impl Disassemble { let mut bytes = Vec::new(); let mut file = BufReader::new(File::open(self.module_path)?); file.read_to_end(&mut bytes)?; - // this deserialized a module to the max version of the bytecode but it's OK here because - // it's not run as part of the deterministic replicated state machine. + // this deserialized a module to the max version of the bytecode but it's OK + // here because it's not run as part of the deterministic replicated + // state machine. let module = CompiledModule::deserialize_with_defaults(&bytes)?; if self.debug { diff --git a/crates/sui-move/src/lib.rs b/crates/sui-move/src/lib.rs index 18f9f8444fd..6e2aed8fcbc 100644 --- a/crates/sui-move/src/lib.rs +++ b/crates/sui-move/src/lib.rs @@ -1,11 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::path::PathBuf; + use clap::Parser; #[cfg(feature = "unit_test")] use move_cli::base::test::UnitTestResult; use move_package::BuildConfig; -use std::path::PathBuf; use sui_move_build::set_sui_flavor; #[cfg(feature = "build")] diff --git a/crates/sui-move/src/main.rs b/crates/sui-move/src/main.rs index 674278ad0f8..fe56307d978 100644 --- a/crates/sui-move/src/main.rs +++ b/crates/sui-move/src/main.rs @@ -39,7 +39,8 @@ struct Args { /// Path to a package which the command should be run with respect to. #[clap(long = "path", short = 'p', global = true)] pub package_path: Option, - /// If true, run the Move bytecode verifier on the bytecode from a successful build + /// If true, run the Move bytecode verifier on the bytecode from a + /// successful build #[clap(long = "path", short = 'p', global = true)] pub run_bytecode_verifier: bool, /// If true, print build diagnostics to stderr--no printing if false diff --git a/crates/sui-move/src/manage_package.rs b/crates/sui-move/src/manage_package.rs index 5dc087b8cc3..2ac1498bd66 100644 --- a/crates/sui-move/src/manage_package.rs +++ b/crates/sui-move/src/manage_package.rs @@ -1,12 +1,14 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::path::PathBuf; + use clap::Parser; use move_package::BuildConfig; -use std::path::PathBuf; use sui_types::base_types::ObjectID; -/// Record addresses (Object IDs) for where this package is published on chain (this command sets variables in Move.lock). +/// Record addresses (Object IDs) for where this package is published on chain +/// (this command sets variables in Move.lock). #[derive(Parser)] #[group(id = "sui-move-manage-package")] pub struct ManagePackage { @@ -17,10 +19,15 @@ pub struct ManagePackage { /// The original address (Object ID) where this package is published. pub original_id: ObjectID, #[clap(long = "latest-id", value_parser = ObjectID::from_hex_literal)] - /// The most recent address (Object ID) where this package is published. It is the same as 'original-id' if the package is immutable and published once. It is different from 'original-id' if the package has been upgraded to a different address. + /// The most recent address (Object ID) where this package is published. It + /// is the same as 'original-id' if the package is immutable and published + /// once. It is different from 'original-id' if the package has been + /// upgraded to a different address. pub latest_id: ObjectID, #[clap(long = "version-number")] - /// The version number of the published package. It is '1' if the package is immutable and published once. It is some number greater than '1' if the package has been upgraded once or more. + /// The version number of the published package. It is '1' if the package is + /// immutable and published once. It is some number greater than '1' if the + /// package has been upgraded once or more. pub version_number: u64, } diff --git a/crates/sui-move/src/migrate.rs b/crates/sui-move/src/migrate.rs index 63e88e07d96..b959e3a12a3 100644 --- a/crates/sui-move/src/migrate.rs +++ b/crates/sui-move/src/migrate.rs @@ -1,10 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::path::PathBuf; + use clap::Parser; use move_cli::base::migrate; use move_package::BuildConfig as MoveBuildConfig; -use std::path::PathBuf; #[derive(Parser)] #[group(id = "sui-move-migrate")] diff --git a/crates/sui-move/src/new.rs b/crates/sui-move/src/new.rs index ac8d13e4daa..d836387696d 100644 --- a/crates/sui-move/src/new.rs +++ b/crates/sui-move/src/new.rs @@ -1,18 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use clap::Parser; -use move_cli::base::new; -use move_package::source_package::layout::SourcePackageLayout; use std::{ fs::create_dir_all, io::Write, path::{Path, PathBuf}, }; +use clap::Parser; +use move_cli::base::new; +use move_package::source_package::layout::SourcePackageLayout; + const SUI_PKG_NAME: &str = "Sui"; -// Use testnet by default. Probably want to add options to make this configurable later +// Use testnet by default. Probably want to add options to make this +// configurable later const SUI_PKG_PATH: &str = "{ git = \"https://github.com/MystenLabs/sui.git\", subdir = \"crates/sui-framework/packages/sui-framework\", rev = \"framework/testnet\" }"; #[derive(Parser)] diff --git a/crates/sui-move/src/unit_test.rs b/crates/sui-move/src/unit_test.rs index 05cd1841151..4492ac2d92c 100644 --- a/crates/sui-move/src/unit_test.rs +++ b/crates/sui-move/src/unit_test.rs @@ -1,6 +1,8 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{collections::BTreeMap, path::PathBuf, sync::Arc}; + use clap::Parser; use move_cli::base::{ self, @@ -10,7 +12,6 @@ use move_package::BuildConfig; use move_unit_test::{extensions::set_extension_hook, UnitTestingConfig}; use move_vm_runtime::native_extensions::NativeContextExtensions; use once_cell::sync::Lazy; -use std::{collections::BTreeMap, path::PathBuf, sync::Arc}; use sui_move_build::decorate_warnings; use sui_move_natives::{object_runtime::ObjectRuntime, NativesCostTable}; use sui_protocol_config::ProtocolConfig; @@ -23,7 +24,8 @@ use sui_types::{ storage::ChildObjectResolver, }; -// Move unit tests will halt after executing this many steps. This is a protection to avoid divergence +// Move unit tests will halt after executing this many steps. This is a +// protection to avoid divergence const MAX_UNIT_TEST_INSTRUCTIONS: u64 = 1_000_000; #[derive(Parser)] @@ -45,7 +47,8 @@ impl Test { "The --coverage flag is currently supported only in debug builds. Please build the Sui CLI from source in debug mode." )); } - // find manifest file directory from a given path or (if missing) from current dir + // find manifest file directory from a given path or (if missing) from current + // dir let rerooted_path = base::reroot_path(path)?; let unit_test_config = self.test.unit_test_config(); run_move_unit_tests( @@ -84,8 +87,9 @@ static TEST_STORE: Lazy = Lazy::new(|| DummyChildObjectSt static SET_EXTENSION_HOOK: Lazy<()> = Lazy::new(|| set_extension_hook(Box::new(new_testing_object_and_natives_cost_runtime))); -/// This function returns a result of UnitTestResult. The outer result indicates whether it -/// successfully started running the test, and the inner result indicatests whether all tests pass. +/// This function returns a result of UnitTestResult. The outer result indicates +/// whether it successfully started running the test, and the inner result +/// indicatests whether all tests pass. pub fn run_move_unit_tests( path: PathBuf, build_config: BuildConfig, diff --git a/crates/sui-network/build.rs b/crates/sui-network/build.rs index 34bc3c387a7..84693cd993f 100644 --- a/crates/sui-network/build.rs +++ b/crates/sui-network/build.rs @@ -5,6 +5,7 @@ use std::{ env, path::{Path, PathBuf}, }; + use tonic_build::manual::{Builder, Method, Service}; type Result = ::std::result::Result>; diff --git a/crates/sui-network/src/discovery/builder.rs b/crates/sui-network/src/discovery/builder.rs index 71ce0641540..a957c5d3f69 100644 --- a/crates/sui-network/src/discovery/builder.rs +++ b/crates/sui-network/src/discovery/builder.rs @@ -1,16 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::{ - metrics::Metrics, server::Server, Discovery, DiscoveryEventLoop, DiscoveryServer, State, -}; -use crate::discovery::TrustedPeerChangeEvent; -use anemo::codegen::InboundRequestLayer; -use anemo_tower::rate_limit; use std::{ collections::HashMap, sync::{Arc, RwLock}, }; + +use anemo::codegen::InboundRequestLayer; +use anemo_tower::rate_limit; use sui_config::p2p::P2pConfig; use tap::Pipe; use tokio::{ @@ -18,6 +15,11 @@ use tokio::{ task::JoinSet, }; +use super::{ + metrics::Metrics, server::Server, Discovery, DiscoveryEventLoop, DiscoveryServer, State, +}; +use crate::discovery::TrustedPeerChangeEvent; + /// Discovery Service Builder. pub struct Builder { config: Option, @@ -166,8 +168,8 @@ impl UnstartedDiscovery { } } -/// A Handle to the Discovery subsystem. The Discovery system will be shutdown once its Handle has -/// been dropped. +/// A Handle to the Discovery subsystem. The Discovery system will be shutdown +/// once its Handle has been dropped. pub struct Handle { _shutdown_handle: Arc>, } diff --git a/crates/sui-network/src/discovery/metrics.rs b/crates/sui-network/src/discovery/metrics.rs index edddfac68c1..d2ca05c5601 100644 --- a/crates/sui-network/src/discovery/metrics.rs +++ b/crates/sui-network/src/discovery/metrics.rs @@ -1,8 +1,9 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use prometheus::{register_int_gauge_with_registry, IntGauge, Registry}; use std::sync::Arc; + +use prometheus::{register_int_gauge_with_registry, IntGauge, Registry}; use tap::Pipe; #[derive(Clone)] diff --git a/crates/sui-network/src/discovery/mod.rs b/crates/sui-network/src/discovery/mod.rs index 093bb2ffb49..00586067668 100644 --- a/crates/sui-network/src/discovery/mod.rs +++ b/crates/sui-network/src/discovery/mod.rs @@ -1,22 +1,23 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anemo::types::PeerInfo; -use anemo::{types::PeerEvent, Network, Peer, PeerId, Request, Response}; -use futures::StreamExt; -use serde::{Deserialize, Serialize}; use std::{ collections::HashMap, sync::{Arc, RwLock}, time::Duration, }; + +use anemo::{ + types::{PeerEvent, PeerInfo}, + Network, Peer, PeerId, Request, Response, +}; +use futures::StreamExt; +use serde::{Deserialize, Serialize}; use sui_config::p2p::{AccessType, DiscoveryConfig, P2pConfig, SeedPeer}; use sui_types::multiaddr::Multiaddr; use tap::{Pipe, TapFallible}; -use tokio::sync::broadcast::error::RecvError; -use tokio::sync::watch; use tokio::{ - sync::oneshot, + sync::{broadcast::error::RecvError, oneshot, watch}, task::{AbortHandle, JoinSet}, }; use tracing::{debug, info, trace}; @@ -42,7 +43,8 @@ pub use server::GetKnownPeersResponse; use self::metrics::Metrics; -/// The internal discovery state shared between the main event loop and the request handler +/// The internal discovery state shared between the main event loop and the +/// request handler struct State { our_info: Option, connected_peers: HashMap, @@ -51,8 +53,8 @@ struct State { /// The information necessary to dial another peer. /// -/// `NodeInfo` contains all the information that is shared with other nodes via the discovery -/// service to advertise how a node can be reached. +/// `NodeInfo` contains all the information that is shared with other nodes via +/// the discovery service to advertise how a node can be reached. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] pub struct NodeInfo { pub peer_id: PeerId, @@ -60,7 +62,8 @@ pub struct NodeInfo { /// Creation time. /// - /// This is used to determine which of two NodeInfo's from the same PeerId should be retained. + /// This is used to determine which of two NodeInfo's from the same PeerId + /// should be retained. pub timestamp_ms: u64, /// See docstring for `AccessType`. @@ -180,8 +183,8 @@ impl DiscoveryEventLoop { None }; - // TODO: once we have `PeerAffinity::Allowlisted` we should update allowlisted peers' - // affinity. + // TODO: once we have `PeerAffinity::Allowlisted` we should update allowlisted + // peers' affinity. let peer_info = anemo::types::PeerInfo { peer_id, affinity: anemo::types::PeerAffinity::High, @@ -197,8 +200,8 @@ impl DiscoveryEventLoop { } } - // TODO: we don't boot out old committee member yets, however we may want to do this - // in the future along with other network management work. + // TODO: we don't boot out old committee member yets, however we may want to do + // this in the future along with other network management work. fn handle_trusted_peer_change_event( &mut self, trusted_peer_change_event: TrustedPeerChangeEvent, @@ -305,8 +308,8 @@ impl DiscoveryEventLoop { self.pending_dials.insert(*peer_id, abort_handle); } - // If we aren't connected to anything and we aren't presently trying to connect to anyone - // we need to try the seed peers + // If we aren't connected to anything and we aren't presently trying to connect + // to anyone we need to try the seed peers if self.dial_seed_peers_task.is_none() && state.connected_peers.is_empty() && self.pending_dials.is_empty() diff --git a/crates/sui-network/src/discovery/server.rs b/crates/sui-network/src/discovery/server.rs index 9bf0314ff3a..4b868e0cd11 100644 --- a/crates/sui-network/src/discovery/server.rs +++ b/crates/sui-network/src/discovery/server.rs @@ -1,10 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::{Discovery, NodeInfo, State}; +use std::sync::{Arc, RwLock}; + use anemo::{Request, Response}; use serde::{Deserialize, Serialize}; -use std::sync::{Arc, RwLock}; + +use super::{Discovery, NodeInfo, State}; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct GetKnownPeersResponse { diff --git a/crates/sui-network/src/discovery/tests.rs b/crates/sui-network/src/discovery/tests.rs index 7d5fcbcdbfa..5ec355c8c79 100644 --- a/crates/sui-network/src/discovery/tests.rs +++ b/crates/sui-network/src/discovery/tests.rs @@ -1,16 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::*; -use crate::utils::{build_network, build_network_with_anemo_config}; -use anemo::types::PeerAffinity; -use anemo::Result; +use std::collections::HashSet; + +use anemo::{types::PeerAffinity, Result}; use fastcrypto::ed25519::Ed25519PublicKey; use futures::stream::FuturesUnordered; -use std::collections::HashSet; use sui_config::p2p::AllowlistedPeer; use tokio::time::timeout; +use super::*; +use crate::utils::{build_network, build_network_with_anemo_config}; + #[tokio::test] async fn get_known_peers() -> Result<()> { let config = P2pConfig::default(); @@ -251,20 +252,21 @@ async fn peers_are_added_from_reocnfig_channel() -> Result<()> { #[tokio::test] async fn test_access_types() { - // This test case constructs a mesh graph of 11 nodes, with the following topology. - // For allowlisted nodes, `+` means the peer is allowlisted with an address, otherwise not. - // An allowlisted peer with address will be proactively connected in anemo network. + // This test case constructs a mesh graph of 11 nodes, with the following + // topology. For allowlisted nodes, `+` means the peer is allowlisted with + // an address, otherwise not. An allowlisted peer with address will be + // proactively connected in anemo network. // // // The topology: - // ------------ 11 (private, seed: 1, allowed: 7, 8) - // / + // ------------ 11 (private, seed: 1, + // allowed: 7, 8) / // ------ 1 (public) ------ // / \ - // 2 (public, seed: 1, allowed: 7, 8) 3 (private, seed: 1, allowed: 4+, 5+) - // | / \ - // | 4 (private, allowed: 3+, 5, 6) 5 (private, allowed: 3, 4+) - // | \ + // 2 (public, seed: 1, allowed: 7, 8) 3 (private, seed: 1, allowed: + // 4+, 5+) | / \ + // | 4 (private, allowed: 3+, 5, 6) 5 (private, + // allowed: 3, 4+) | \ // | 6 (private, allowed: 4+) // 7 (private, allowed: 2+, 8+) // | @@ -491,7 +493,8 @@ async fn test_access_types() { ]), ); - // Node 1 is connected to everyone. But it does not "know" private nodes except the allowlisted ones 7 and 8. + // Node 1 is connected to everyone. But it does not "know" private nodes except + // the allowlisted ones 7 and 8. assert_peers( "Node 2", &network_2, @@ -554,7 +557,8 @@ async fn test_access_types() { HashSet::from_iter(vec![peer_id_1, peer_id_2, peer_id_4, peer_id_9]), ); - // Node 11 finds Node 7 via Node 2, and invites Node 7 to connect. Node 7 says yes. + // Node 11 finds Node 7 via Node 2, and invites Node 7 to connect. Node 7 says + // yes. assert_peers( "Node 7", &network_7, @@ -565,8 +569,8 @@ async fn test_access_types() { HashSet::from_iter(vec![peer_id_1, peer_id_2, peer_id_8, peer_id_9, peer_id_11]), ); - // Node 11 finds Node 8 via Node 2, and invites Node 8 to connect. Node 8 said No - // because its `max_concurrent_connections` is 0. + // Node 11 finds Node 8 via Node 2, and invites Node 8 to connect. Node 8 said + // No because its `max_concurrent_connections` is 0. assert_peers( "Node 8", &network_8, diff --git a/crates/sui-network/src/lib.rs b/crates/sui-network/src/lib.rs index cd421047179..1b79970c1f9 100644 --- a/crates/sui-network/src/lib.rs +++ b/crates/sui-network/src/lib.rs @@ -1,9 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use mysten_network::config::Config; use std::time::Duration; +use mysten_network::config::Config; + pub mod api; pub mod discovery; pub mod randomness; diff --git a/crates/sui-network/src/randomness/auth.rs b/crates/sui-network/src/randomness/auth.rs index c8951c6c159..9b008e06a04 100644 --- a/crates/sui-network/src/randomness/auth.rs +++ b/crates/sui-network/src/randomness/auth.rs @@ -1,10 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{collections::HashSet, sync::Arc}; + use anemo_tower::auth::AuthorizeRequest; use arc_swap::ArcSwap; use bytes::Bytes; -use std::{collections::HashSet, sync::Arc}; #[derive(Clone, Debug)] pub(crate) struct AllowedPeersUpdatable { diff --git a/crates/sui-network/src/randomness/builder.rs b/crates/sui-network/src/randomness/builder.rs index 957feda1ce1..9efcb526fbf 100644 --- a/crates/sui-network/src/randomness/builder.rs +++ b/crates/sui-network/src/randomness/builder.rs @@ -6,16 +6,17 @@ use std::{ sync::Arc, }; -use super::{ - auth::AllowedPeersUpdatable, metrics::Metrics, server::Server, Handle, RandomnessEventLoop, - RandomnessMessage, RandomnessServer, -}; use anemo::codegen::InboundRequestLayer; use anemo_tower::{auth::RequireAuthorizationLayer, inflight_limit}; use sui_config::p2p::RandomnessConfig; use sui_types::{base_types::AuthorityName, committee::EpochId, crypto::RandomnessRound}; use tokio::sync::mpsc; +use super::{ + auth::AllowedPeersUpdatable, metrics::Metrics, server::Server, Handle, RandomnessEventLoop, + RandomnessMessage, RandomnessServer, +}; + /// Randomness Service Builder. pub struct Builder { name: AuthorityName, diff --git a/crates/sui-network/src/randomness/metrics.rs b/crates/sui-network/src/randomness/metrics.rs index 971327ee04c..d8d834389ff 100644 --- a/crates/sui-network/src/randomness/metrics.rs +++ b/crates/sui-network/src/randomness/metrics.rs @@ -1,11 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::sync::Arc; + use prometheus::{ register_histogram_with_registry, register_int_gauge_with_registry, Histogram, IntGauge, Registry, }; -use std::sync::Arc; use sui_types::{committee::EpochId, crypto::RandomnessRound}; use tap::Pipe; diff --git a/crates/sui-network/src/randomness/mod.rs b/crates/sui-network/src/randomness/mod.rs index a98038586ab..4a2c4a6aa8a 100644 --- a/crates/sui-network/src/randomness/mod.rs +++ b/crates/sui-network/src/randomness/mod.rs @@ -1,7 +1,13 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use self::{auth::AllowedPeersUpdatable, metrics::Metrics}; +use std::{ + collections::{btree_map::BTreeMap, BTreeSet, HashMap}, + ops::Bound, + sync::Arc, + time::{self, Duration}, +}; + use anemo::PeerId; use anyhow::Result; use fastcrypto::groups::bls12381; @@ -9,12 +15,6 @@ use fastcrypto_tbls::{dkg, nodes::PartyId, tbls::ThresholdBls, types::ThresholdB use mysten_metrics::spawn_monitored_task; use mysten_network::anemo_ext::NetworkExt; use serde::{Deserialize, Serialize}; -use std::{ - collections::{btree_map::BTreeMap, BTreeSet, HashMap}, - ops::Bound, - sync::Arc, - time::{self, Duration}, -}; use sui_config::p2p::RandomnessConfig; use sui_types::{ base_types::AuthorityName, @@ -24,6 +24,8 @@ use sui_types::{ use tokio::sync::mpsc; use tracing::{debug, error, info, instrument, warn}; +use self::{auth::AllowedPeersUpdatable, metrics::Metrics}; + mod auth; mod builder; mod generated { @@ -54,16 +56,17 @@ pub struct SendSignaturesRequest { /// A handle to the Randomness network subsystem. /// -/// This handle can be cloned and shared. Once all copies of a Randomness system's Handle have been -/// dropped, the Randomness system will be gracefully shutdown. +/// This handle can be cloned and shared. Once all copies of a Randomness +/// system's Handle have been dropped, the Randomness system will be gracefully +/// shutdown. #[derive(Clone, Debug)] pub struct Handle { sender: mpsc::Sender, } impl Handle { - /// Transitions the Randomness system to a new epoch. Cancels all partial signature sends for - /// prior epochs. + /// Transitions the Randomness system to a new epoch. Cancels all partial + /// signature sends for prior epochs. pub fn update_epoch( &self, new_epoch: EpochId, @@ -81,14 +84,16 @@ impl Handle { .expect("RandomnessEventLoop mailbox should not overflow or be closed") } - /// Begins transmitting partial signatures for the given epoch and round until completed. + /// Begins transmitting partial signatures for the given epoch and round + /// until completed. pub fn send_partial_signatures(&self, epoch: EpochId, round: RandomnessRound) { self.sender .try_send(RandomnessMessage::SendPartialSignatures(epoch, round)) .expect("RandomnessEventLoop mailbox should not overflow or be closed") } - /// Records the given round as complete, stopping any partial signature sends. + /// Records the given round as complete, stopping any partial signature + /// sends. pub fn complete_round(&self, epoch: EpochId, round: RandomnessRound) { self.sender .try_send(RandomnessMessage::CompleteRound(epoch, round)) @@ -253,8 +258,13 @@ impl RandomnessEventLoop { let mut aggregate_rounds = BTreeSet::new(); for (epoch, round, _) in self.received_partial_sigs.keys() { if *epoch < new_epoch { - error!("BUG: received partial sigs for old epoch still present after attempting to remove them"); - debug_assert!(false, "received partial sigs for old epoch still present after attempting to remove them"); + error!( + "BUG: received partial sigs for old epoch still present after attempting to remove them" + ); + debug_assert!( + false, + "received partial sigs for old epoch still present after attempting to remove them" + ); continue; } if *epoch > new_epoch { @@ -350,8 +360,9 @@ impl RandomnessEventLoop { return; }; if sig_bytes.len() != *expected_share_count as usize { - // No need to verify share IDs here as well, since if we receive incorrect IDs, we - // will catch it later when aggregating/verifying the partial sigs. + // No need to verify share IDs here as well, since if we receive incorrect IDs, + // we will catch it later when aggregating/verifying the partial + // sigs. debug!( "received partial sigs with wrong share count: expected {expected_share_count}, got {}", sig_bytes.len(), @@ -372,14 +383,14 @@ impl RandomnessEventLoop { .saturating_add(self.config.max_partial_sigs_rounds_ahead()) { debug!( - "skipping received partial sigs, most recent round we completed was only {last_completed_round}", - ); + "skipping received partial sigs, most recent round we completed was only {last_completed_round}", + ); return; } if epoch > last_completed_epoch && round.0 >= self.config.max_partial_sigs_rounds_ahead() { debug!( - "skipping received partial sigs, most recent epoch we completed was only {last_completed_epoch}", - ); + "skipping received partial sigs, most recent epoch we completed was only {last_completed_epoch}", + ); return; } @@ -419,11 +430,14 @@ impl RandomnessEventLoop { if !(self.send_tasks.contains_key(&(epoch, round)) || self.pending_tasks.contains(&(epoch, round))) { - // We have to wait here, because even if we have enough information from other nodes - // to complete the signature, local shared object versions are not set until consensus - // finishes processing the corresponding commit. This function will be called again + // We have to wait here, because even if we have enough information from other + // nodes to complete the signature, local shared object versions are + // not set until consensus finishes processing the corresponding + // commit. This function will be called again // after maybe_start_pending_tasks begins this round locally. - debug!("waiting to aggregate randomness partial signatures until local consensus catches up"); + debug!( + "waiting to aggregate randomness partial signatures until local consensus catches up" + ); return; } @@ -445,20 +459,24 @@ impl RandomnessEventLoop { .received_partial_sigs .range(sig_bounds) .flat_map(|(_, sigs)| sigs); - let mut sig = - match ThresholdBls12381MinSig::aggregate(self.aggregation_threshold, sig_range) { - Ok(sig) => sig, - Err(fastcrypto::error::FastCryptoError::NotEnoughInputs) => return, // wait for more input - Err(e) => { - error!("error while aggregating randomness partial signatures: {e:?}"); - return; - } - }; + let mut sig = match ThresholdBls12381MinSig::aggregate( + self.aggregation_threshold, + sig_range, + ) { + Ok(sig) => sig, + Err(fastcrypto::error::FastCryptoError::NotEnoughInputs) => return, // wait for more + // input + Err(e) => { + error!("error while aggregating randomness partial signatures: {e:?}"); + return; + } + }; - // Try to verify the aggregated signature all at once. (Should work in the happy path.) + // Try to verify the aggregated signature all at once. (Should work in the happy + // path.) if ThresholdBls12381MinSig::verify(vss_pk.c0(), &round.signature_message(), &sig).is_err() { - // If verifiation fails, some of the inputs must be invalid. We have to go through - // one-by-one to find which. + // If verifiation fails, some of the inputs must be invalid. We have to go + // through one-by-one to find which. // TODO: add test for individual sig verification. self.received_partial_sigs .retain(|&(e, r, peer_id), partial_sigs| { @@ -487,7 +505,7 @@ impl RandomnessEventLoop { .flat_map(|(_, sigs)| sigs); sig = match ThresholdBls12381MinSig::aggregate(self.aggregation_threshold, sig_range) { Ok(sig) => sig, - Err(fastcrypto::error::FastCryptoError::NotEnoughInputs) => return, // wait for more input + Err(fastcrypto::error::FastCryptoError::NotEnoughInputs) => return, /* wait for more input */ Err(e) => { error!("error while aggregating randomness partial signatures: {e:?}"); return; @@ -496,8 +514,13 @@ impl RandomnessEventLoop { if let Err(e) = ThresholdBls12381MinSig::verify(vss_pk.c0(), &round.signature_message(), &sig) { - error!("error while verifying randomness partial signatures after removing invalid partials: {e:?}"); - debug_assert!(false, "error while verifying randomness partial signatures after removing invalid partials"); + error!( + "error while verifying randomness partial signatures after removing invalid partials: {e:?}" + ); + debug_assert!( + false, + "error while verifying randomness partial signatures after removing invalid partials" + ); return; } } @@ -517,7 +540,8 @@ impl RandomnessEventLoop { .map(|(key, _)| *key) .collect(); for key in keys_to_remove { - // Have to remove keys one-by-one because BTreeMap does not support range-removal. + // Have to remove keys one-by-one because BTreeMap does not support + // range-removal. self.received_partial_sigs.remove(&key); } @@ -616,8 +640,8 @@ impl RandomnessEventLoop { } self.update_rounds_pending_metric(); - // After starting a round, we have generated our own partial sigs. Check if that's - // enough for us to aggregate already. + // After starting a round, we have generated our own partial sigs. Check if + // that's enough for us to aggregate already. for (epoch, round) in rounds_to_aggregate { self.maybe_aggregate_partial_signatures(epoch, round); } diff --git a/crates/sui-network/src/randomness/server.rs b/crates/sui-network/src/randomness/server.rs index be60bd5e04f..dc493a562f1 100644 --- a/crates/sui-network/src/randomness/server.rs +++ b/crates/sui-network/src/randomness/server.rs @@ -1,10 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::{Randomness, RandomnessMessage, SendSignaturesRequest}; use anemo::{Request, Response}; use tokio::sync::mpsc; +use super::{Randomness, RandomnessMessage, SendSignaturesRequest}; + pub(super) struct Server { pub(super) sender: mpsc::WeakSender, } diff --git a/crates/sui-network/src/randomness/tests.rs b/crates/sui-network/src/randomness/tests.rs index b8f40a13fed..9c7ec25032e 100644 --- a/crates/sui-network/src/randomness/tests.rs +++ b/crates/sui-network/src/randomness/tests.rs @@ -1,7 +1,6 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{randomness::*, utils}; use fastcrypto::{groups::bls12381, serde_helpers::ToFromByteArray}; use fastcrypto_tbls::{mocked_dkg, nodes}; use sui_swarm_config::test_utils::CommitteeFixture; @@ -12,6 +11,8 @@ use sui_types::{ }; use tracing::Instrument; +use crate::{randomness::*, utils}; + type PkG = bls12381::G2Element; type EncG = bls12381::G2Element; @@ -174,9 +175,9 @@ async fn test_record_own_partial_sigs() { let nodes = nodes::Nodes::new(nodes).unwrap(); - // Only send partial sigs from authorities 0 and 1. They should still be able to reach - // the threshold to generate full signatures, only if they are correctly recording and using - // their own partial signatures as well. + // Only send partial sigs from authorities 0 and 1. They should still be able to + // reach the threshold to generate full signatures, only if they are + // correctly recording and using their own partial signatures as well. for (authority, handle) in handles.iter().take(2) { let mock_dkg_output = mocked_dkg::generate_mocked_output::( nodes.clone(), diff --git a/crates/sui-network/src/state_sync/builder.rs b/crates/sui-network/src/state_sync/builder.rs index 38e06deb3bc..e43fd31c4c0 100644 --- a/crates/sui-network/src/state_sync/builder.rs +++ b/crates/sui-network/src/state_sync/builder.rs @@ -1,16 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anemo::codegen::InboundRequestLayer; -use anemo_tower::{inflight_limit, rate_limit}; use std::{ collections::HashMap, sync::{Arc, RwLock}, time::Duration, }; + +use anemo::codegen::InboundRequestLayer; +use anemo_tower::{inflight_limit, rate_limit}; use sui_archival::reader::ArchiveReaderBalancer; use sui_config::p2p::StateSyncConfig; -use sui_types::messages_checkpoint::VerifiedCheckpoint; +use sui_types::{messages_checkpoint::VerifiedCheckpoint, storage::WriteStore}; use tap::Pipe; use tokio::{ sync::{broadcast, mpsc}, @@ -22,7 +23,6 @@ use super::{ server::{CheckpointContentsDownloadLimitLayer, Server}, Handle, PeerHeights, StateSync, StateSyncEventLoop, StateSyncMessage, StateSyncServer, }; -use sui_types::storage::WriteStore; pub struct Builder { store: Option, diff --git a/crates/sui-network/src/state_sync/metrics.rs b/crates/sui-network/src/state_sync/metrics.rs index 3d3e5b0ccd5..17e98e038fb 100644 --- a/crates/sui-network/src/state_sync/metrics.rs +++ b/crates/sui-network/src/state_sync/metrics.rs @@ -1,9 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::sync::Arc; + use mysten_metrics::histogram::Histogram; use prometheus::{register_int_gauge_with_registry, IntGauge, Registry}; -use std::sync::Arc; use sui_types::messages_checkpoint::CheckpointSequenceNumber; use tap::Pipe; diff --git a/crates/sui-network/src/state_sync/mod.rs b/crates/sui-network/src/state_sync/mod.rs index f5618354b68..cf3b72bc1d2 100644 --- a/crates/sui-network/src/state_sync/mod.rs +++ b/crates/sui-network/src/state_sync/mod.rs @@ -3,59 +3,72 @@ //! Peer-to-peer data synchronization of checkpoints. //! -//! This StateSync module is responsible for the synchronization and dissemination of checkpoints -//! and the transactions, and their effects, contained within. This module is *not* responsible for -//! the execution of the transactions included in a checkpoint, that process is left to another +//! This StateSync module is responsible for the synchronization and +//! dissemination of checkpoints and the transactions, and their effects, +//! contained within. This module is *not* responsible for the execution of the +//! transactions included in a checkpoint, that process is left to another //! component in the system. //! //! # High-level Overview of StateSync //! //! StateSync discovers new checkpoints via a few different sources: -//! 1. If this node is a Validator, checkpoints will be produced via consensus at which point -//! consensus can notify state-sync of the new checkpoint via [Handle::send_checkpoint]. -//! 2. A peer notifies us of the latest checkpoint which they have synchronized. State-Sync will -//! also periodically query its peers to discover what their latest checkpoint is. +//! 1. If this node is a Validator, checkpoints will be produced via consensus +//! at which point consensus can notify state-sync of the new checkpoint via +//! [Handle::send_checkpoint]. +//! 2. A peer notifies us of the latest checkpoint which they have synchronized. +//! State-Sync will also periodically query its peers to discover what their +//! latest checkpoint is. //! //! We keep track of two different watermarks: -//! * highest_verified_checkpoint - This is the highest checkpoint header that we've locally -//! verified. This indicated that we have in our persistent store (and have verified) all -//! checkpoint headers up to and including this value. -//! * highest_synced_checkpoint - This is the highest checkpoint that we've fully synchronized, -//! meaning we've downloaded and have in our persistent stores all of the transactions, and their -//! effects (but not the objects), for all checkpoints up to and including this point. This is -//! the watermark that is shared with other peers, either via notification or when they query for -//! our latest checkpoint, and is intended to be used as a guarantee of data availability. +//! * highest_verified_checkpoint - This is the highest checkpoint header that +//! we've locally verified. This indicated that we have in our persistent +//! store (and have verified) all checkpoint headers up to and including this +//! value. +//! * highest_synced_checkpoint - This is the highest checkpoint that we've +//! fully synchronized, meaning we've downloaded and have in our persistent +//! stores all of the transactions, and their effects (but not the objects), +//! for all checkpoints up to and including this point. This is the watermark +//! that is shared with other peers, either via notification or when they +//! query for our latest checkpoint, and is intended to be used as a guarantee +//! of data availability. //! -//! The `PeerHeights` struct is used to track the highest_synced_checkpoint watermark for all of -//! our peers. +//! The `PeerHeights` struct is used to track the highest_synced_checkpoint +//! watermark for all of our peers. //! -//! When a new checkpoint is discovered, and we've determined that it is higher than our -//! highest_verified_checkpoint, then StateSync will kick off a task to synchronize and verify all -//! checkpoints between our highest_synced_checkpoint and the newly discovered checkpoint. This -//! process is done by querying one of our peers for the checkpoints we're missing (using the -//! `PeerHeights` struct as a way to intelligently select which peers have the data available for -//! us to query) at which point we will locally verify the signatures on the checkpoint header with -//! the appropriate committee (based on the epoch). As checkpoints are verified, the -//! highest_synced_checkpoint watermark will be ratcheted up. +//! When a new checkpoint is discovered, and we've determined that it is higher +//! than our highest_verified_checkpoint, then StateSync will kick off a task to +//! synchronize and verify all checkpoints between our highest_synced_checkpoint +//! and the newly discovered checkpoint. This process is done by querying one of +//! our peers for the checkpoints we're missing (using the `PeerHeights` struct +//! as a way to intelligently select which peers have the data available for +//! us to query) at which point we will locally verify the signatures on the +//! checkpoint header with the appropriate committee (based on the epoch). As +//! checkpoints are verified, the highest_synced_checkpoint watermark will be +//! ratcheted up. //! -//! Once we've ratcheted up our highest_verified_checkpoint, and if it is higher than -//! highest_synced_checkpoint, StateSync will then kick off a task to synchronize the contents of -//! all of the checkpoints from highest_synced_checkpoint..=highest_verified_checkpoint. After the +//! Once we've ratcheted up our highest_verified_checkpoint, and if it is higher +//! than highest_synced_checkpoint, StateSync will then kick off a task to +//! synchronize the contents of all of the checkpoints from +//! highest_synced_checkpoint..=highest_verified_checkpoint. After the //! contents of each checkpoint is fully downloaded, StateSync will update our -//! highest_synced_checkpoint watermark and send out a notification on a broadcast channel -//! indicating that a new checkpoint has been fully downloaded. Notifications on this broadcast -//! channel will always be made in order. StateSync will also send out a notification to its peers -//! of the newly synchronized checkpoint so that it can help other peers synchronize. +//! highest_synced_checkpoint watermark and send out a notification on a +//! broadcast channel indicating that a new checkpoint has been fully +//! downloaded. Notifications on this broadcast channel will always be made in +//! order. StateSync will also send out a notification to its peers of the newly +//! synchronized checkpoint so that it can help other peers synchronize. -use anemo::{types::PeerEvent, PeerId, Request, Response, Result}; -use futures::{stream::FuturesOrdered, FutureExt, StreamExt}; -use rand::Rng; -use std::sync::atomic::{AtomicU64, Ordering}; use std::{ collections::{HashMap, VecDeque}, - sync::{Arc, RwLock}, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, RwLock, + }, time::Duration, }; + +use anemo::{types::PeerEvent, PeerId, Request, Response, Result}; +use futures::{stream::FuturesOrdered, FutureExt, StreamExt}; +use rand::Rng; use sui_config::p2p::StateSyncConfig; use sui_types::{ committee::Committee, @@ -67,9 +80,8 @@ use sui_types::{ storage::WriteStore, }; use tap::{Pipe, TapFallible, TapOptional}; -use tokio::sync::oneshot; use tokio::{ - sync::{broadcast, mpsc, watch}, + sync::{broadcast, mpsc, oneshot, watch}, task::{AbortHandle, JoinSet}, }; use tracing::{debug, info, instrument, trace, warn}; @@ -88,8 +100,7 @@ pub use generated::{ state_sync_client::StateSyncClient, state_sync_server::{StateSync, StateSyncServer}, }; -pub use server::GetCheckpointAvailabilityResponse; -pub use server::GetCheckpointSummaryRequest; +pub use server::{GetCheckpointAvailabilityResponse, GetCheckpointSummaryRequest}; use sui_archival::reader::ArchiveReaderBalancer; use sui_storage::verify_checkpoint; @@ -97,8 +108,9 @@ use self::{metrics::Metrics, server::CheckpointContentsDownloadLimitLayer}; /// A handle to the StateSync subsystem. /// -/// This handle can be cloned and shared. Once all copies of a StateSync system's Handle have been -/// dropped, the StateSync system will be gracefully shutdown. +/// This handle can be cloned and shared. Once all copies of a StateSync +/// system's Handle have been dropped, the StateSync system will be gracefully +/// shutdown. #[derive(Clone, Debug)] pub struct Handle { sender: mpsc::Sender, @@ -106,14 +118,15 @@ pub struct Handle { } impl Handle { - /// Send a newly minted checkpoint from Consensus to StateSync so that it can be disseminated - /// to other nodes on the network. + /// Send a newly minted checkpoint from Consensus to StateSync so that it + /// can be disseminated to other nodes on the network. /// /// # Invariant /// - /// Consensus must only notify StateSync of new checkpoints that have been fully committed to - /// persistent storage. This includes CheckpointContents and all Transactions and - /// TransactionEffects included therein. + /// Consensus must only notify StateSync of new checkpoints that have been + /// fully committed to persistent storage. This includes + /// CheckpointContents and all Transactions and TransactionEffects + /// included therein. pub async fn send_checkpoint(&self, checkpoint: VerifiedCheckpoint) { self.sender .send(StateSyncMessage::VerifiedCheckpoint(Box::new(checkpoint))) @@ -121,7 +134,8 @@ impl Handle { .unwrap() } - /// Subscribe to the stream of checkpoints that have been fully synchronized and downloaded. + /// Subscribe to the stream of checkpoints that have been fully synchronized + /// and downloaded. pub fn subscribe_to_synced_checkpoints(&self) -> broadcast::Receiver { self.checkpoint_event_sender.subscribe() } @@ -172,8 +186,8 @@ impl PeerHeights { // Returns a bool that indicates if the update was done successfully. // - // This will return false if the given peer doesn't have an entry or is not on the same chain - // as us + // This will return false if the given peer doesn't have an entry or is not on + // the same chain as us pub fn update_peer_info( &mut self, peer_id: PeerId, @@ -199,8 +213,9 @@ impl PeerHeights { match self.peers.entry(peer_id) { Entry::Occupied(mut entry) => { - // If there's already an entry and the genesis checkpoint digests match then update - // the maximum height. Otherwise we'll use the more recent one + // If there's already an entry and the genesis checkpoint digests match then + // update the maximum height. Otherwise we'll use the more + // recent one let entry = entry.get_mut(); if entry.genesis_checkpoint_digest == info.genesis_checkpoint_digest { entry.height = std::cmp::max(entry.height, info.height); @@ -227,7 +242,8 @@ impl PeerHeights { .retain(|&s, _digest| s > sequence_number); } - // TODO: also record who gives this checkpoint info for peer quality measurement? + // TODO: also record who gives this checkpoint info for peer quality + // measurement? pub fn insert_checkpoint(&mut self, checkpoint: Checkpoint) { let digest = *checkpoint.digest(); let sequence_number = *checkpoint.sequence_number(); @@ -266,7 +282,8 @@ impl PeerHeights { } } -// PeerBalancer is an Iterator that selects peers based on RTT with some added randomness. +// PeerBalancer is an Iterator that selects peers based on RTT with some added +// randomness. #[derive(Clone)] struct PeerBalancer { peers: VecDeque<(anemo::Peer, PeerStateSyncInfo)>, @@ -340,8 +357,8 @@ impl Iterator for PeerBalancer { #[derive(Clone, Debug)] enum StateSyncMessage { StartSyncJob, - // Validators will send this to the StateSyncEventLoop in order to kick off notifying our peers - // of the new checkpoint. + // Validators will send this to the StateSyncEventLoop in order to kick off notifying our + // peers of the new checkpoint. VerifiedCheckpoint(Box), // Notification that the checkpoint content sync task will send to the event loop in the event // it was able to successfully sync a checkpoint's contents. If multiple checkpoints were @@ -375,10 +392,11 @@ impl StateSyncEventLoop where S: WriteStore + Clone + Send + Sync + 'static, { - // Note: A great deal of care is taken to ensure that all event handlers are non-asynchronous - // and that the only "await" points are from the select macro picking which event to handle. - // This ensures that the event loop is able to process events at a high speed and reduce the - // chance for building up a backlog of events to process. + // Note: A great deal of care is taken to ensure that all event handlers are + // non-asynchronous and that the only "await" points are from the select + // macro picking which event to handle. This ensures that the event loop is + // able to process events at a high speed and reduce the chance for building + // up a backlog of events to process. pub async fn start(mut self) { info!("State-Synchronizer started"); @@ -423,10 +441,11 @@ where // Start archive based checkpoint content sync loop. // TODO: Consider switching to sync from archive only on startup. // Right now because the peer set is fixed at startup, a node may eventually - // end up with peers who have all purged their local state. In such a scenario it will be - // stuck until restart when it ends up with a different set of peers. Once the discovery - // mechanism can dynamically identify and connect to other peers on the network, we will rely - // on sync from archive as a fall back. + // end up with peers who have all purged their local state. In such a scenario + // it will be stuck until restart when it ends up with a different set + // of peers. Once the discovery mechanism can dynamically identify and + // connect to other peers on the network, we will rely on sync from + // archive as a fall back. let task = sync_checkpoint_contents_from_archive( self.network.clone(), self.archive_readers.clone(), @@ -516,7 +535,12 @@ where .unwrap_or_else(|| panic!("Got checkpoint {} from consensus but cannot find checkpoint {} in certified_checkpoints", checkpoint.sequence_number(), checkpoint.sequence_number() - 1)) .digest(); if checkpoint.previous_digest != Some(prev_digest) { - panic!("Checkpoint {} from consensus has mismatched previous_digest, expected: {:?}, actual: {:?}", checkpoint.sequence_number(), Some(prev_digest), checkpoint.previous_digest); + panic!( + "Checkpoint {} from consensus has mismatched previous_digest, expected: {:?}, actual: {:?}", + checkpoint.sequence_number(), + Some(prev_digest), + checkpoint.previous_digest + ); } let latest_checkpoint = self @@ -540,7 +564,8 @@ where } // Because checkpoint from consensus sends in order, when we have checkpoint n, - // we must have all of the checkpoints before n from either state sync or consensus. + // we must have all of the checkpoints before n from either state sync or + // consensus. #[cfg(debug_assertions)] { let _ = (next_sequence_number..=*checkpoint.sequence_number()) @@ -784,8 +809,8 @@ async fn get_latest_from_peer( } else { // TODO do we want to create a new API just for querying a node's chainid? // - // We need to query this node's genesis checkpoint to see if they're on the same chain - // as us + // We need to query this node's genesis checkpoint to see if they're on the same + // chain as us let request = Request::new(GetCheckpointSummaryRequest::BySequenceNumber(0)) .with_timeout(timeout); let response = client @@ -837,7 +862,8 @@ async fn get_latest_from_peer( .update_peer_info(peer_id, highest_checkpoint, low_watermark); } -/// Queries a peer for their highest_synced_checkpoint and low checkpoint watermark +/// Queries a peer for their highest_synced_checkpoint and low checkpoint +/// watermark async fn query_peer_for_latest_info( client: &mut StateSyncClient, timeout: Duration, @@ -1078,8 +1104,8 @@ where } current = checkpoint.clone(); - // Insert the newly verified checkpoint into our store, which will bump our highest - // verified checkpoint watermark as well. + // Insert the newly verified checkpoint into our store, which will bump our + // highest verified checkpoint watermark as well. store .insert_checkpoint(&checkpoint) .expect("store operation should not fail"); @@ -1146,7 +1172,11 @@ async fn sync_checkpoint_contents_from_archive( { warn!("State sync from archive failed with error: {:?}", err); } else { - info!("State sync from archive is complete. Checkpoints downloaded = {:?}, Txns downloaded = {:?}", checkpoint_counter.load(Ordering::Relaxed), txn_counter.load(Ordering::Relaxed)); + info!( + "State sync from archive is complete. Checkpoints downloaded = {:?}, Txns downloaded = {:?}", + checkpoint_counter.load(Ordering::Relaxed), + txn_counter.load(Ordering::Relaxed) + ); } } else { warn!("Failed to find an archive reader to complete the state sync request"); @@ -1264,7 +1294,8 @@ async fn sync_checkpoint_contents( if highest_synced.sequence_number() % checkpoint_content_download_concurrency as u64 == 0 || checkpoint_contents_tasks.is_empty() { - // Periodically notify event loop to notify our peers that we've synced to a new checkpoint height + // Periodically notify event loop to notify our peers that we've synced to a new + // checkpoint height if let Some(sender) = sender.upgrade() { let message = StateSyncMessage::SyncedCheckpoint(Box::new(highest_synced.clone())); let _ = sender.send(message).await; @@ -1286,8 +1317,8 @@ where { debug!("syncing checkpoint contents"); - // Check if we already have produced this checkpoint locally. If so, we don't need - // to get it from peers anymore. + // Check if we already have produced this checkpoint locally. If so, we don't + // need to get it from peers anymore. if store .get_highest_synced_checkpoint() .expect("store operation should not fail") @@ -1307,7 +1338,8 @@ where .with_checkpoint(*checkpoint.sequence_number()); let Some(_contents) = get_full_checkpoint_contents(peers, &store, &checkpoint, timeout).await else { - // Delay completion in case of error so we don't hammer the network with retries. + // Delay completion in case of error so we don't hammer the network with + // retries. let duration = peer_heights .read() .unwrap() @@ -1344,8 +1376,8 @@ where return Some(contents); } - // Iterate through our selected peers trying each one in turn until we're able to - // successfully get the target checkpoint + // Iterate through our selected peers trying each one in turn until we're able + // to successfully get the target checkpoint for mut peer in peers { debug!( ?timeout, diff --git a/crates/sui-network/src/state_sync/server.rs b/crates/sui-network/src/state_sync/server.rs index 03288d04ba1..bec7ca78733 100644 --- a/crates/sui-network/src/state_sync/server.rs +++ b/crates/sui-network/src/state_sync/server.rs @@ -1,13 +1,15 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::{PeerHeights, StateSync, StateSyncMessage}; +use std::{ + sync::{Arc, RwLock}, + task::{Context, Poll}, +}; + use anemo::{rpc::Status, types::response::StatusCode, Request, Response, Result}; use dashmap::DashMap; use futures::future::BoxFuture; use serde::{Deserialize, Serialize}; -use std::sync::{Arc, RwLock}; -use std::task::{Context, Poll}; use sui_types::{ digests::{CheckpointContentsDigest, CheckpointDigest}, messages_checkpoint::{ @@ -18,6 +20,8 @@ use sui_types::{ }; use tokio::sync::{mpsc, OwnedSemaphorePermit, Semaphore}; +use super::{PeerHeights, StateSync, StateSyncMessage}; + #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] pub enum GetCheckpointSummaryRequest { Latest, @@ -131,8 +135,8 @@ where } } -/// [`Layer`] for adding a per-checkpoint limit to the number of inflight GetCheckpointContent -/// requests. +/// [`Layer`] for adding a per-checkpoint limit to the number of inflight +/// GetCheckpointContent requests. #[derive(Clone)] pub(super) struct CheckpointContentsDownloadLimitLayer { inflight_per_checkpoint: Arc>>, @@ -169,8 +173,8 @@ impl tower::layer::Layer for CheckpointContentsDownloadLimitLayer { } } -/// Middleware for adding a per-checkpoint limit to the number of inflight GetCheckpointContent -/// requests. +/// Middleware for adding a per-checkpoint limit to the number of inflight +/// GetCheckpointContent requests. #[derive(Clone)] pub(super) struct CheckpointContentsDownloadLimit { inner: S, diff --git a/crates/sui-network/src/state_sync/tests.rs b/crates/sui-network/src/state_sync/tests.rs index c605d282fcb..9354a5f33e2 100644 --- a/crates/sui-network/src/state_sync/tests.rs +++ b/crates/sui-network/src/state_sync/tests.rs @@ -1,22 +1,16 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{ - state_sync::{ - Builder, GetCheckpointSummaryRequest, PeerStateSyncInfo, StateSync, StateSyncMessage, - UnstartedStateSync, - }, - utils::build_network, -}; +use std::{collections::HashMap, num::NonZeroUsize, time::Duration}; + use anemo::{PeerId, Request}; use anyhow::anyhow; use prometheus::Registry; -use std::num::NonZeroUsize; -use std::{collections::HashMap, time::Duration}; -use sui_archival::reader::ArchiveReaderBalancer; -use sui_archival::writer::ArchiveWriter; -use sui_config::node::ArchiveReaderConfig; -use sui_config::object_storage_config::{ObjectStoreConfig, ObjectStoreType}; +use sui_archival::{reader::ArchiveReaderBalancer, writer::ArchiveWriter}; +use sui_config::{ + node::ArchiveReaderConfig, + object_storage_config::{ObjectStoreConfig, ObjectStoreType}, +}; use sui_storage::{FileCompression, StorageFormat}; use sui_swarm_config::test_utils::{empty_contents, CommitteeFixture}; use sui_types::{ @@ -26,6 +20,14 @@ use sui_types::{ use tempfile::tempdir; use tokio::time::{timeout, Instant}; +use crate::{ + state_sync::{ + Builder, GetCheckpointSummaryRequest, PeerStateSyncInfo, StateSync, StateSyncMessage, + UnstartedStateSync, + }, + utils::build_network, +}; + #[tokio::test] async fn server_push_checkpoint() { let committee = CommitteeFixture::generate(rand::rngs::OsRng, 0, 4); @@ -301,9 +303,10 @@ async fn test_state_sync_using_archive() -> anyhow::Result<()> { empty_contents(), committee.committee().to_owned(), ); - // We ensure that only a part of the data exists in the archive store (and no new checkpoints after - // sequence number >= 50 are written to the archive store). This is to test the fact that a node - // can download latest checkpoints from a peer and back fill missing older data from archive + // We ensure that only a part of the data exists in the archive store (and no + // new checkpoints after sequence number >= 50 are written to the archive + // store). This is to test the fact that a node can download latest + // checkpoints from a peer and back fill missing older data from archive for checkpoint in &ordered_checkpoints[0..50] { test_store.inner_mut().insert_checkpoint(checkpoint); } @@ -330,8 +333,9 @@ async fn test_state_sync_using_archive() -> anyhow::Result<()> { } tokio::time::sleep(Duration::from_secs(1)).await; } - // Build and connect two nodes where Node 1 will be given access to an archive store - // Node 2 will prune older checkpoints, so Node 1 is forced to backfill from the archive + // Build and connect two nodes where Node 1 will be given access to an archive + // store Node 2 will prune older checkpoints, so Node 1 is forced to + // backfill from the archive let (builder, server) = Builder::new() .store(SharedInMemoryStore::default()) .archive_readers(archive_readers) @@ -813,8 +817,9 @@ async fn sync_with_checkpoints_watermark() { // Now set Peer 1's low watermark back to 0 store_1.inner_mut().set_lowest_available_checkpoint(0); - // Peer 2 and Peer 3 will know about this change by `get_checkpoint_availability` - // Soon we expect them to have all checkpoints's content. + // Peer 2 and Peer 3 will know about this change by + // `get_checkpoint_availability` Soon we expect them to have all + // checkpoints's content. timeout(Duration::from_secs(6), async { for (checkpoint, contents) in ordered_checkpoints[2..] .iter() diff --git a/crates/sui-node/src/admin.rs b/crates/sui-node/src/admin.rs index 9968c4ff4b5..219d1a911cd 100644 --- a/crates/sui-node/src/admin.rs +++ b/crates/sui-node/src/admin.rs @@ -1,7 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::SuiNode; +use std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + sync::Arc, +}; + use axum::{ extract::{Query, State}, http::StatusCode, @@ -10,12 +14,12 @@ use axum::{ }; use humantime::parse_duration; use serde::Deserialize; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::sync::Arc; use sui_types::error::SuiError; use telemetry_subscribers::TracingHandle; use tracing::info; +use crate::SuiNode; + // Example commands: // // Set buffer stake for current epoch 2 to 1500 basis points: @@ -31,7 +35,8 @@ use tracing::info; // // $ curl -X POST 'http://127.0.0.1:1337/force-close-epoch?epoch=2' // -// View current all capabilities from all authorities that have been received by this node: +// View current all capabilities from all authorities that have been received by +// this node: // // $ curl 'http://127.0.0.1:1337/capabilities' // @@ -39,8 +44,8 @@ use tracing::info; // // $ curl 'http://127.0.0.1:1337/node-config' // -// Set a time-limited tracing config. After the duration expires, tracing will be disabled -// automatically. +// Set a time-limited tracing config. After the duration expires, tracing will +// be disabled automatically. // // $ curl -X POST 'http://127.0.0.1:1337/enable-tracing?filter=info&duration=10s' // diff --git a/crates/sui-node/src/handle.rs b/crates/sui-node/src/handle.rs index f2084633f48..bbd9a8f0959 100644 --- a/crates/sui-node/src/handle.rs +++ b/crates/sui-node/src/handle.rs @@ -3,8 +3,9 @@ //! SuiNodeHandle wraps SuiNode in a way suitable for access by test code. //! -//! When starting a SuiNode directly, in a test (as opposed to using Swarm), the node may be -//! running inside of a simulator node. It is therefore a mistake to do something like: +//! When starting a SuiNode directly, in a test (as opposed to using Swarm), the +//! node may be running inside of a simulator node. It is therefore a mistake to +//! do something like: //! //! ```ignore //! use test_utils::authority::{start_node, spawn_checkpoint_processes}; @@ -13,8 +14,9 @@ //! spawn_checkpoint_processes(config, &[node]).await; //! ``` //! -//! Because this would cause the checkpointing processes to be running inside the current -//! simulator node rather than the node in which the SuiNode is running. +//! Because this would cause the checkpointing processes to be running inside +//! the current simulator node rather than the node in which the SuiNode is +//! running. //! //! SuiNodeHandle provides an easy way to do the right thing here: //! @@ -25,12 +27,13 @@ //! }); //! ``` //! -//! Code executed inside of with or with_async will run in the context of the simulator node. -//! This allows tests to break the simulator abstraction and magically mutate or inspect state that -//! is conceptually running on a different "machine", but without producing extremely confusing -//! behavior that might result otherwise. (For instance, any network connection that is initiated -//! from a task spawned from within a with or with_async will appear to originate from the correct -//! simulator node. +//! Code executed inside of with or with_async will run in the context of the +//! simulator node. This allows tests to break the simulator abstraction and +//! magically mutate or inspect state that is conceptually running on a +//! different "machine", but without producing extremely confusing behavior that +//! might result otherwise. (For instance, any network connection that is +//! initiated from a task spawned from within a with or with_async will appear +//! to originate from the correct simulator node. //! //! It is possible to exfiltrate state: //! @@ -40,13 +43,15 @@ //! do_stuff_with_state(state) //! ``` //! -//! We can't prevent this completely, but we can at least make the right way the easy way. +//! We can't prevent this completely, but we can at least make the right way the +//! easy way. + +use std::{future::Future, sync::Arc}; -use super::SuiNode; -use std::future::Future; -use std::sync::Arc; use sui_core::authority::AuthorityState; +use super::SuiNode; + /// Wrap SuiNode to allow correct access to SuiNode in simulator tests. pub struct SuiNodeHandle { node: Option>, diff --git a/crates/sui-node/src/lib.rs b/crates/sui-node/src/lib.rs index ef171c13d06..8540c4431d9 100644 --- a/crates/sui-node/src/lib.rs +++ b/crates/sui-node/src/lib.rs @@ -1,134 +1,122 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anemo::Network; -use anemo_tower::callback::CallbackLayer; -use anemo_tower::trace::DefaultMakeSpan; -use anemo_tower::trace::DefaultOnFailure; -use anemo_tower::trace::TraceLayer; -use anyhow::anyhow; -use anyhow::Result; -use arc_swap::ArcSwap; -use fastcrypto_zkp::bn254::zk_login::JwkId; -use fastcrypto_zkp::bn254::zk_login::OIDCProvider; -use futures::TryFutureExt; -use narwhal_worker::LazyNarwhalClient; -use prometheus::Registry; -use std::collections::{BTreeSet, HashMap, HashSet}; -use std::fmt; -use std::path::PathBuf; -use std::str::FromStr; #[cfg(msim)] use std::sync::atomic::Ordering; -use std::sync::Arc; -use std::time::Duration; -use sui_core::authority::RandomnessRoundReceiver; -use sui_core::authority::CHAIN_IDENTIFIER; -use sui_core::consensus_adapter::SubmitToConsensus; -use sui_core::epoch::randomness::RandomnessManager; -use sui_core::execution_cache::ExecutionCacheMetrics; -use sui_core::execution_cache::NotifyReadWrapper; -use sui_json_rpc::ServerType; -use sui_json_rpc_api::JsonRpcMetrics; -use sui_network::randomness; -use sui_protocol_config::ProtocolVersion; -use sui_types::base_types::ConciseableName; -use sui_types::crypto::RandomnessRound; -use sui_types::digests::ChainIdentifier; -use sui_types::message_envelope::get_google_jwk_bytes; -use sui_types::sui_system_state::SuiSystemState; -use tap::tap::TapFallible; -use tokio::runtime::Handle; -use tokio::sync::broadcast; -use tokio::sync::mpsc; -use tokio::sync::{watch, Mutex}; -use tokio::task::JoinHandle; -use tower::ServiceBuilder; -use tracing::{debug, error, warn}; -use tracing::{error_span, info, Instrument}; +use std::{ + collections::{BTreeSet, HashMap, HashSet}, + fmt, + path::PathBuf, + str::FromStr, + sync::Arc, + time::Duration, +}; -use fastcrypto_zkp::bn254::zk_login::JWK; +use anemo::Network; +use anemo_tower::{ + callback::CallbackLayer, + trace::{DefaultMakeSpan, DefaultOnFailure, TraceLayer}, +}; +use anyhow::{anyhow, Result}; +use arc_swap::ArcSwap; +use fastcrypto_zkp::bn254::zk_login::{JwkId, OIDCProvider, JWK}; +use futures::TryFutureExt; pub use handle::SuiNodeHandle; use mysten_metrics::{spawn_monitored_task, RegistryService}; use mysten_network::server::ServerBuilder; -use narwhal_network::metrics::MetricsMakeCallbackHandler; -use narwhal_network::metrics::{NetworkConnectionMetrics, NetworkMetrics}; -use sui_archival::reader::ArchiveReaderBalancer; -use sui_archival::writer::ArchiveWriter; -use sui_config::node::{ConsensusProtocol, DBCheckpointConfig, RunWithRange}; -use sui_config::node_config_metrics::NodeConfigMetrics; -use sui_config::object_storage_config::{ObjectStoreConfig, ObjectStoreType}; -use sui_config::{ConsensusConfig, NodeConfig}; -use sui_core::authority::authority_per_epoch_store::AuthorityPerEpochStore; -use sui_core::authority::authority_store_tables::AuthorityPerpetualTables; -use sui_core::authority::epoch_start_configuration::EpochStartConfigTrait; -use sui_core::authority::epoch_start_configuration::EpochStartConfiguration; -use sui_core::authority_aggregator::AuthorityAggregator; -use sui_core::authority_server::{ValidatorService, ValidatorServiceMetrics}; -use sui_core::checkpoints::checkpoint_executor::{CheckpointExecutor, StopReason}; -use sui_core::checkpoints::{ - CheckpointMetrics, CheckpointService, CheckpointStore, SendCheckpointToStateSync, - SubmitCheckpointToConsensus, +use narwhal_network::metrics::{ + MetricsMakeCallbackHandler, NetworkConnectionMetrics, NetworkMetrics, }; -use sui_core::consensus_adapter::{ - CheckConnection, ConnectionMonitorStatus, ConsensusAdapter, ConsensusAdapterMetrics, -}; -use sui_core::consensus_manager::{ConsensusManager, ConsensusManagerTrait}; -use sui_core::consensus_throughput_calculator::{ - ConsensusThroughputCalculator, ConsensusThroughputProfiler, ThroughputProfileRanges, +use narwhal_worker::LazyNarwhalClient; +use prometheus::Registry; +use sui_archival::{reader::ArchiveReaderBalancer, writer::ArchiveWriter}; +use sui_config::{ + node::{ConsensusProtocol, DBCheckpointConfig, RunWithRange}, + node_config_metrics::NodeConfigMetrics, + object_storage_config::{ObjectStoreConfig, ObjectStoreType}, + ConsensusConfig, NodeConfig, }; -use sui_core::consensus_validator::{SuiTxValidator, SuiTxValidatorMetrics}; -use sui_core::db_checkpoint_handler::DBCheckpointHandler; -use sui_core::epoch::committee_store::CommitteeStore; -use sui_core::epoch::data_removal::EpochDataRemover; -use sui_core::epoch::epoch_metrics::EpochMetrics; -use sui_core::epoch::reconfiguration::ReconfigurationInitiator; -use sui_core::execution_cache::{ExecutionCache, ExecutionCacheReconfigAPI}; -use sui_core::module_cache_metrics::ResolverMetrics; -use sui_core::overload_monitor::overload_monitor; -use sui_core::signature_verifier::SignatureVerifierMetrics; -use sui_core::state_accumulator::StateAccumulator; -use sui_core::storage::RocksDbStore; -use sui_core::transaction_orchestrator::TransactiondOrchestrator; use sui_core::{ - authority::{AuthorityState, AuthorityStore}, + authority::{ + authority_per_epoch_store::AuthorityPerEpochStore, + authority_store_tables::AuthorityPerpetualTables, + epoch_start_configuration::{EpochStartConfigTrait, EpochStartConfiguration}, + AuthorityState, AuthorityStore, RandomnessRoundReceiver, CHAIN_IDENTIFIER, + }, + authority_aggregator::AuthorityAggregator, authority_client::NetworkAuthorityClient, + authority_server::{ValidatorService, ValidatorServiceMetrics}, + checkpoints::{ + checkpoint_executor::{CheckpointExecutor, StopReason}, + CheckpointMetrics, CheckpointService, CheckpointStore, SendCheckpointToStateSync, + SubmitCheckpointToConsensus, + }, + consensus_adapter::{ + CheckConnection, ConnectionMonitorStatus, ConsensusAdapter, ConsensusAdapterMetrics, + SubmitToConsensus, + }, + consensus_manager::{ConsensusManager, ConsensusManagerTrait}, + consensus_throughput_calculator::{ + ConsensusThroughputCalculator, ConsensusThroughputProfiler, ThroughputProfileRanges, + }, + consensus_validator::{SuiTxValidator, SuiTxValidatorMetrics}, + db_checkpoint_handler::DBCheckpointHandler, + epoch::{ + committee_store::CommitteeStore, data_removal::EpochDataRemover, + epoch_metrics::EpochMetrics, randomness::RandomnessManager, + reconfiguration::ReconfigurationInitiator, + }, + execution_cache::{ + ExecutionCache, ExecutionCacheMetrics, ExecutionCacheReconfigAPI, NotifyReadWrapper, + }, + module_cache_metrics::ResolverMetrics, + overload_monitor::overload_monitor, + signature_verifier::SignatureVerifierMetrics, + state_accumulator::StateAccumulator, + storage::RocksDbStore, + transaction_orchestrator::TransactiondOrchestrator, +}; +use sui_json_rpc::{ + coin_api::CoinReadApi, governance_api::GovernanceReadApi, indexer_api::IndexerApi, + move_utils::MoveUtils, read_api::ReadApi, transaction_builder_api::TransactionBuilderApi, + transaction_execution_api::TransactionExecutionApi, JsonRpcServerBuilder, ServerType, }; -use sui_json_rpc::coin_api::CoinReadApi; -use sui_json_rpc::governance_api::GovernanceReadApi; -use sui_json_rpc::indexer_api::IndexerApi; -use sui_json_rpc::move_utils::MoveUtils; -use sui_json_rpc::read_api::ReadApi; -use sui_json_rpc::transaction_builder_api::TransactionBuilderApi; -use sui_json_rpc::transaction_execution_api::TransactionExecutionApi; -use sui_json_rpc::JsonRpcServerBuilder; -use sui_macros::fail_point; -use sui_macros::{fail_point_async, replay_log}; -use sui_network::api::ValidatorServer; -use sui_network::discovery; -use sui_network::discovery::TrustedPeerChangeEvent; -use sui_network::state_sync; -use sui_protocol_config::{Chain, ProtocolConfig, SupportedProtocolVersions}; +use sui_json_rpc_api::JsonRpcMetrics; +use sui_macros::{fail_point, fail_point_async, replay_log}; +use sui_network::{ + api::ValidatorServer, discovery, discovery::TrustedPeerChangeEvent, randomness, state_sync, +}; +use sui_protocol_config::{Chain, ProtocolConfig, ProtocolVersion, SupportedProtocolVersions}; use sui_snapshot::uploader::StateSnapshotUploader; use sui_storage::{ http_key_value_store::HttpKVStore, key_value_store::{FallbackTransactionKVStore, TransactionKeyValueStore}, key_value_store_metrics::KeyValueStoreMetrics, + FileCompression, IndexStore, StorageFormat, }; -use sui_storage::{FileCompression, IndexStore, StorageFormat}; -use sui_types::base_types::{AuthorityName, EpochId}; -use sui_types::committee::Committee; -use sui_types::crypto::KeypairTraits; -use sui_types::error::{SuiError, SuiResult}; -use sui_types::messages_consensus::{ - check_total_jwk_size, AuthorityCapabilities, ConsensusTransaction, +use sui_types::{ + base_types::{AuthorityName, ConciseableName, EpochId}, + committee::Committee, + crypto::{KeypairTraits, RandomnessRound}, + digests::ChainIdentifier, + error::{SuiError, SuiResult}, + message_envelope::get_google_jwk_bytes, + messages_consensus::{check_total_jwk_size, AuthorityCapabilities, ConsensusTransaction}, + quorum_driver_types::QuorumDriverEffectsQueueResult, + sui_system_state::{ + epoch_start_sui_system_state::{EpochStartSystemState, EpochStartSystemStateTrait}, + SuiSystemState, SuiSystemStateTrait, + }, +}; +use tap::tap::TapFallible; +use tokio::{ + runtime::Handle, + sync::{broadcast, mpsc, watch, Mutex}, + task::JoinHandle, }; -use sui_types::quorum_driver_types::QuorumDriverEffectsQueueResult; -use sui_types::sui_system_state::epoch_start_sui_system_state::EpochStartSystemState; -use sui_types::sui_system_state::epoch_start_sui_system_state::EpochStartSystemStateTrait; -use sui_types::sui_system_state::SuiSystemStateTrait; -use typed_store::rocks::default_db_options; -use typed_store::DBMetrics; +use tower::ServiceBuilder; +use tracing::{debug, error, error_span, info, warn, Instrument}; +use typed_store::{rocks::default_db_options, DBMetrics}; use crate::metrics::{GrpcMetrics, SuiNodeMetrics}; @@ -152,8 +140,9 @@ pub struct ValidatorComponents { #[cfg(msim)] mod simulator { - use super::*; use std::sync::atomic::AtomicBool; + + use super::*; pub(super) struct SimState { pub sim_node: sui_simulator::runtime::NodeHandle, pub sim_safe_mode_expected: AtomicBool, @@ -201,19 +190,20 @@ mod simulator { } } -#[cfg(msim)] -use simulator::*; - #[cfg(msim)] pub use simulator::set_jwk_injector; -use sui_core::consensus_handler::ConsensusHandlerInitializer; -use sui_core::mysticeti_adapter::LazyMysticetiClient; +#[cfg(msim)] +use simulator::*; +use sui_core::{ + consensus_handler::ConsensusHandlerInitializer, mysticeti_adapter::LazyMysticetiClient, +}; use sui_types::execution_config_utils::to_binary_config; pub struct SuiNode { config: NodeConfig, validator_components: Mutex>, - /// The http server responsible for serving JSON-RPC as well as the experimental rest service + /// The http server responsible for serving JSON-RPC as well as the + /// experimental rest service _http_server: Option>, state: Arc, transaction_orchestrator: Option>>, @@ -543,8 +533,9 @@ impl SuiNode { let _ = CHAIN_IDENTIFIER.set(chain_identifier); // Create network - // TODO only configure validators as seed/preferred peers for validators and not for - // fullnodes once we've had a chance to re-work fullnode configuration generation. + // TODO only configure validators as seed/preferred peers for validators and not + // for fullnodes once we've had a chance to re-work fullnode + // configuration generation. let archive_readers = ArchiveReaderBalancer::new(config.archive_reader_config(), &prometheus_registry)?; let (trusted_peer_change_tx, trusted_peer_change_rx) = watch::channel(Default::default()); @@ -567,8 +558,8 @@ impl SuiNode { &prometheus_registry, )?; - // We must explicitly send this instead of relying on the initial value to trigger - // watch value change, so that state-sync is able to process it. + // We must explicitly send this instead of relying on the initial value to + // trigger watch value change, so that state-sync is able to process it. send_trusted_peer_change( &config, &trusted_peer_change_tx, @@ -641,7 +632,8 @@ impl SuiNode { .unwrap(); } - // Start the loop that receives new randomness and generates transactions for it. + // Start the loop that receives new randomness and generates transactions for + // it. RandomnessRoundReceiver::spawn(state.clone(), randomness_rx); if config @@ -1000,8 +992,8 @@ impl SuiNode { .into_inner(); let mut anemo_config = config.p2p_config.anemo_config.clone().unwrap_or_default(); - // Set the max_frame_size to be 1 GB to work around the issue of there being too many - // staking events in the epoch change txn. + // Set the max_frame_size to be 1 GB to work around the issue of there being too + // many staking events in the epoch change txn. anemo_config.max_frame_size = Some(1 << 30); // Set a higher default value for socket send/receive buffers if not already @@ -1084,8 +1076,8 @@ impl SuiNode { .as_mut() .ok_or_else(|| anyhow!("Validator is missing consensus config"))?; - // Only allow overriding the consensus protocol, if the protocol version supports - // fields needed by Mysticeti. + // Only allow overriding the consensus protocol, if the protocol version + // supports fields needed by Mysticeti. if epoch_store.protocol_config().version >= ProtocolVersion::new(36) { if let Ok(consensus_choice) = std::env::var("CONSENSUS") { let consensus_protocol = match consensus_choice.as_str() { @@ -1100,7 +1092,9 @@ impl SuiNode { } _ => { let consensus = consensus_config.protocol.clone(); - warn!("Consensus env var was set to an invalid choice, using default consensus protocol {consensus:?}"); + warn!( + "Consensus env var was set to an invalid choice, using default consensus protocol {consensus:?}" + ); consensus } }; @@ -1152,7 +1146,8 @@ impl SuiNode { let mut consensus_epoch_data_remover = EpochDataRemover::new(consensus_manager.get_storage_base_path()); - // This only gets started up once, not on every epoch. (Make call to remove every epoch.) + // This only gets started up once, not on every epoch. (Make call to remove + // every epoch.) consensus_epoch_data_remover.run().await; let checkpoint_metrics = CheckpointMetrics::new(®istry_service.default_registry()); @@ -1233,9 +1228,10 @@ impl SuiNode { checkpoint_metrics.clone(), ); - // create a new map that gets injected into both the consensus handler and the consensus adapter - // the consensus handler will write values forwarded from consensus, and the consensus adapter - // will read the values to make decisions about which validator submits a transaction to consensus + // create a new map that gets injected into both the consensus handler and the + // consensus adapter the consensus handler will write values forwarded + // from consensus, and the consensus adapter will read the values to + // make decisions about which validator submits a transaction to consensus let low_scoring_authorities = Arc::new(ArcSwap::new(Arc::new(HashMap::new()))); consensus_adapter.swap_low_scoring_authorities(low_scoring_authorities.clone()); @@ -1371,7 +1367,8 @@ impl SuiNode { consensus_client: Arc, ) -> ConsensusAdapter { let ca_metrics = ConsensusAdapterMetrics::new(prometheus_registry); - // The consensus adapter allows the authority to send user certificates through consensus. + // The consensus adapter allows the authority to send user certificates through + // consensus. ConsensusAdapter::new( consensus_client, @@ -1430,11 +1427,9 @@ impl SuiNode { self.state.committee_store().clone() } - /* - pub fn clone_authority_store(&self) -> Arc { - self.state.db() - } - */ + // pub fn clone_authority_store(&self) -> Arc { + // self.state.db() + // } /// Clone an AuthorityAggregator currently used in this node's /// QuorumDriver, if the node is a fullnode. After reconfig, @@ -1471,8 +1466,9 @@ impl SuiNode { .ok_or_else(|| anyhow::anyhow!("Transaction Orchestrator is not enabled in this node.")) } - /// This function awaits the completion of checkpoint execution of the current epoch, - /// after which it iniitiates reconfiguration of the entire system. + /// This function awaits the completion of checkpoint execution of the + /// current epoch, after which it iniitiates reconfiguration of the + /// entire system. pub async fn monitor_reconfiguration(self: Arc) -> Result<()> { let mut checkpoint_executor = CheckpointExecutor::new( self.state_sync_handle.subscribe_to_synced_checkpoints(), @@ -1563,10 +1559,10 @@ impl SuiNode { fail_point_async!("reconfig_delay"); - // We save the connection monitor status map regardless of validator / fullnode status - // so that we don't need to restart the connection monitor every epoch. - // Update the mappings that will be used by the consensus adapter if it exists or is - // about to be created. + // We save the connection monitor status map regardless of validator / fullnode + // status so that we don't need to restart the connection monitor + // every epoch. Update the mappings that will be used by the + // consensus adapter if it exists or is about to be created. let authority_names_to_peer_ids = new_epoch_start_state.get_authority_names_to_peer_ids(); self.connection_monitor_status diff --git a/crates/sui-node/src/main.rs b/crates/sui-node/src/main.rs index 0a72297a48b..735d7ce2f67 100644 --- a/crates/sui-node/src/main.rs +++ b/crates/sui-node/src/main.rs @@ -1,24 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use clap::{ArgGroup, Parser}; -use std::path::PathBuf; -use std::sync::Arc; -use std::time::Duration; -use tokio::sync::broadcast; -use tokio::time::sleep; -use tracing::{error, info}; +use std::{path::PathBuf, sync::Arc, time::Duration}; +use clap::{ArgGroup, Parser}; use mysten_common::sync::async_once_cell::AsyncOnceCell; -use sui_config::node::RunWithRange; -use sui_config::{Config, NodeConfig}; +use sui_config::{node::RunWithRange, Config, NodeConfig}; use sui_core::runtime::SuiRuntimes; use sui_node::metrics; use sui_protocol_config::SupportedProtocolVersions; use sui_telemetry::send_telemetry_event; -use sui_types::committee::EpochId; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; -use sui_types::multiaddr::Multiaddr; +use sui_types::{ + committee::EpochId, messages_checkpoint::CheckpointSequenceNumber, multiaddr::Multiaddr, +}; +use tokio::{sync::broadcast, time::sleep}; +use tracing::{error, info}; const GIT_REVISION: &str = { if let Some(revision) = option_env!("GIT_REVISION") { @@ -57,8 +53,9 @@ struct Args { } fn main() { - // Ensure that a validator never calls get_for_min_version/get_for_max_version_UNSAFE. - // TODO: re-enable after we figure out how to eliminate crashes in prod because of this. + // Ensure that a validator never calls + // get_for_min_version/get_for_max_version_UNSAFE. TODO: re-enable after we + // figure out how to eliminate crashes in prod because of this. // ProtocolConfig::poison_get_for_min_version(); move_vm_profiler::gas_profiler_feature_enabled! { @@ -75,8 +72,8 @@ fn main() { // match run_with_range args // this means that we always modify the config used to start the node - // for run_with_range. i.e if this is set in the config, it is ignored. only the cli args - // enable/disable run_with_range + // for run_with_range. i.e if this is set in the config, it is ignored. only the + // cli args enable/disable run_with_range match (args.run_with_range_epoch, args.run_with_range_checkpoint) { (None, Some(checkpoint)) => { config.run_with_range = Some(RunWithRange::Checkpoint(checkpoint)) @@ -122,8 +119,8 @@ fn main() { let admin_interface_port = config.admin_interface_port; - // Run node in a separate runtime so that admin/monitoring functions continue to work - // if it deadlocks. + // Run node in a separate runtime so that admin/monitoring functions continue to + // work if it deadlocks. let node_once_cell = Arc::new(AsyncOnceCell::>::new()); let node_once_cell_clone = node_once_cell.clone(); let rpc_runtime = runtimes.json_rpc.handle().clone(); diff --git a/crates/sui-node/src/metrics.rs b/crates/sui-node/src/metrics.rs index bf51e21e089..27d1fe44286 100644 --- a/crates/sui-node/src/metrics.rs +++ b/crates/sui-node/src/metrics.rs @@ -1,17 +1,16 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + use axum::http::header; +use mysten_metrics::RegistryService; use mysten_network::metrics::MetricsCallbackProvider; use prometheus::{ register_histogram_vec_with_registry, register_int_counter_vec_with_registry, register_int_gauge_vec_with_registry, Encoder, HistogramVec, IntCounterVec, IntGaugeVec, Registry, PROTOBUF_FORMAT, }; - -use std::time::{Duration, SystemTime, UNIX_EPOCH}; use sui_network::tonic::Code; - -use mysten_metrics::RegistryService; use tracing::error; pub struct MetricsPushClient { @@ -47,8 +46,8 @@ impl MetricsPushClient { } } -/// Starts a task to periodically push metrics to a configured endpoint if a metrics push endpoint -/// is configured. +/// Starts a task to periodically push metrics to a configured endpoint if a +/// metrics push endpoint is configured. pub fn start_metrics_push_task(config: &sui_config::NodeConfig, registry: RegistryService) { use fastcrypto::traits::KeyPair; use sui_config::node::MetricsConfig; @@ -69,7 +68,8 @@ pub fn start_metrics_push_task(config: &sui_config::NodeConfig, registry: Regist _ => return, }; - // make a copy so we can make a new client later when we hit errors posting metrics + // make a copy so we can make a new client later when we hit errors posting + // metrics let config_copy = config.clone(); let mut client = MetricsPushClient::new(config_copy.network_key_pair().copy()); @@ -78,7 +78,8 @@ pub fn start_metrics_push_task(config: &sui_config::NodeConfig, registry: Regist url: &reqwest::Url, registry: &RegistryService, ) -> Result<(), anyhow::Error> { - // now represents a collection timestamp for all of the metrics we send to the proxy + // now represents a collection timestamp for all of the metrics we send to the + // proxy let now = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() @@ -261,9 +262,10 @@ impl MetricsCallbackProvider for GrpcMetrics { #[cfg(test)] mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use mysten_metrics::start_prometheus_server; use prometheus::{IntCounter, Registry}; - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; #[tokio::test] pub async fn test_metrics_endpoint_with_multiple_registries_add_remove() { diff --git a/crates/sui-open-rpc-macros/src/lib.rs b/crates/sui-open-rpc-macros/src/lib.rs index 467fde233f4..6532fa43f69 100644 --- a/crates/sui-open-rpc-macros/src/lib.rs +++ b/crates/sui-open-rpc-macros/src/lib.rs @@ -1,33 +1,34 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use proc_macro::TokenStream; - use derive_syn_parse::Parse; use itertools::Itertools; -use proc_macro2::{Ident, TokenTree}; -use proc_macro2::{Span, TokenStream as TokenStream2}; +use proc_macro::TokenStream; +use proc_macro2::{Ident, Span, TokenStream as TokenStream2, TokenTree}; use quote::{quote, ToTokens, TokenStreamExt}; -use syn::parse::{Parse, ParseStream}; -use syn::punctuated::Punctuated; -use syn::spanned::Spanned; -use syn::token::{Comma, Paren}; use syn::{ - parse, parse_macro_input, Attribute, GenericArgument, LitStr, PatType, Path, PathArguments, - Token, TraitItem, Type, + parse, + parse::{Parse, ParseStream}, + parse_macro_input, + punctuated::Punctuated, + spanned::Spanned, + token::{Comma, Paren}, + Attribute, GenericArgument, LitStr, PatType, Path, PathArguments, Token, TraitItem, Type, }; use unescape::unescape; const SUI_RPC_ATTRS: [&str; 2] = ["deprecated", "version"]; -/// Add a [Service name]OpenRpc struct and implementation providing access to Open RPC doc builder. -/// This proc macro must be use in conjunction with `jsonrpsee_proc_macro::rpc` +/// Add a [Service name]OpenRpc struct and implementation providing access to +/// Open RPC doc builder. This proc macro must be use in conjunction with +/// `jsonrpsee_proc_macro::rpc` /// /// The generated method `open_rpc` is added to [Service name]OpenRpc, -/// ideally we want to add this to the trait generated by jsonrpsee framework, creating a new struct -/// to provide access to the method is a workaround. +/// ideally we want to add this to the trait generated by jsonrpsee framework, +/// creating a new struct to provide access to the method is a workaround. /// -/// TODO: consider contributing the open rpc doc macro to jsonrpsee to simplify the logics. +/// TODO: consider contributing the open rpc doc macro to jsonrpsee to simplify +/// the logics. #[proc_macro_attribute] pub fn open_rpc(attr: TokenStream, item: TokenStream) -> TokenStream { let attr: OpenRpcAttributes = parse_macro_input!(attr); diff --git a/crates/sui-open-rpc/src/examples.rs b/crates/sui-open-rpc/src/examples.rs index c6d48a33a25..eb93293f866 100644 --- a/crates/sui-open-rpc/src/examples.rs +++ b/crates/sui-open-rpc/src/examples.rs @@ -1,69 +1,65 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::collections::BTreeMap; -use std::collections::HashMap; -use std::ops::Range; -use std::str::FromStr; +use std::{ + collections::{BTreeMap, HashMap}, + ops::Range, + str::FromStr, +}; use fastcrypto::traits::EncodeDecodeBase64; -use move_core_types::annotated_value::MoveStructLayout; -use move_core_types::identifier::Identifier; -use move_core_types::language_storage::ModuleId; -use move_core_types::language_storage::{StructTag, TypeTag}; -use move_core_types::resolver::ModuleResolver; -use rand::rngs::StdRng; -use rand::{Rng, SeedableRng}; +use move_core_types::{ + annotated_value::MoveStructLayout, + identifier::Identifier, + language_storage::{ModuleId, StructTag, TypeTag}, + resolver::ModuleResolver, +}; +use rand::{rngs::StdRng, Rng, SeedableRng}; use serde_json::json; - use sui_json::SuiJsonValue; use sui_json_rpc::error::Error; -use sui_json_rpc_types::DevInspectArgs; use sui_json_rpc_types::{ Balance, Checkpoint, CheckpointId, CheckpointPage, Coin, CoinPage, DelegatedStake, - DevInspectResults, DynamicFieldPage, EventFilter, EventPage, MoveCallParams, - MoveFunctionArgType, ObjectChange, ObjectValueKind::ByImmutableReference, - ObjectValueKind::ByMutableReference, ObjectValueKind::ByValue, ObjectsPage, OwnedObjectRef, - Page, ProtocolConfigResponse, RPCTransactionRequestParams, Stake, StakeStatus, SuiCoinMetadata, - SuiCommittee, SuiData, SuiEvent, SuiExecutionStatus, SuiGetPastObjectRequest, - SuiLoadedChildObject, SuiLoadedChildObjectsResponse, SuiMoveAbility, SuiMoveAbilitySet, - SuiMoveNormalizedFunction, SuiMoveNormalizedModule, SuiMoveNormalizedStruct, + DevInspectArgs, DevInspectResults, DynamicFieldPage, EventFilter, EventPage, MoveCallParams, + MoveFunctionArgType, ObjectChange, + ObjectValueKind::{ByImmutableReference, ByMutableReference, ByValue}, + ObjectsPage, OwnedObjectRef, Page, ProtocolConfigResponse, RPCTransactionRequestParams, Stake, + StakeStatus, SuiCoinMetadata, SuiCommittee, SuiData, SuiEvent, SuiExecutionStatus, + SuiGetPastObjectRequest, SuiLoadedChildObject, SuiLoadedChildObjectsResponse, SuiMoveAbility, + SuiMoveAbilitySet, SuiMoveNormalizedFunction, SuiMoveNormalizedModule, SuiMoveNormalizedStruct, SuiMoveNormalizedType, SuiMoveVisibility, SuiObjectData, SuiObjectDataFilter, SuiObjectDataOptions, SuiObjectRef, SuiObjectResponse, SuiObjectResponseQuery, SuiParsedData, SuiPastObjectResponse, SuiTransactionBlock, SuiTransactionBlockData, SuiTransactionBlockEffects, SuiTransactionBlockEffectsV1, SuiTransactionBlockEvents, SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, - SuiTransactionBlockResponseQuery, TransactionBlockBytes, TransactionBlocksPage, - TransactionFilter, TransferObjectParams, + SuiTransactionBlockResponseQuery, SuiTypeTag, TransactionBlockBytes, TransactionBlocksPage, + TransactionFilter, TransferObjectParams, ValidatorApy, ValidatorApys, }; -use sui_json_rpc_types::{SuiTypeTag, ValidatorApy, ValidatorApys}; use sui_open_rpc::ExamplePairing; -use sui_protocol_config::Chain; -use sui_protocol_config::ProtocolConfig; -use sui_types::balance::Supply; -use sui_types::base_types::random_object_ref; -use sui_types::base_types::{ - MoveObjectType, ObjectDigest, ObjectID, ObjectType, SequenceNumber, SuiAddress, - TransactionDigest, +use sui_protocol_config::{Chain, ProtocolConfig}; +use sui_types::{ + balance::Supply, + base_types::{ + random_object_ref, MoveObjectType, ObjectDigest, ObjectID, ObjectType, SequenceNumber, + SuiAddress, TransactionDigest, + }, + committee::Committee, + crypto::{get_key_pair_from_rng, AccountKeyPair, AggregateAuthoritySignature}, + digests::TransactionEventsDigest, + dynamic_field::{DynamicFieldInfo, DynamicFieldName, DynamicFieldType}, + event::EventID, + gas::GasCostSummary, + gas_coin::GasCoin, + messages_checkpoint::CheckpointDigest, + object::{MoveObject, Owner}, + parse_sui_struct_tag, + programmable_transaction_builder::ProgrammableTransactionBuilder, + quorum_driver_types::ExecuteTransactionRequestType, + signature::GenericSignature, + transaction::{CallArg, ObjectArg, TransactionData, TEST_ONLY_GAS_UNIT_FOR_TRANSFER}, + utils::to_sender_signed_transaction, + SUI_FRAMEWORK_PACKAGE_ID, }; -use sui_types::committee::Committee; -use sui_types::crypto::{get_key_pair_from_rng, AccountKeyPair, AggregateAuthoritySignature}; -use sui_types::digests::TransactionEventsDigest; -use sui_types::dynamic_field::{DynamicFieldInfo, DynamicFieldName, DynamicFieldType}; -use sui_types::event::EventID; -use sui_types::gas::GasCostSummary; -use sui_types::gas_coin::GasCoin; -use sui_types::messages_checkpoint::CheckpointDigest; -use sui_types::object::MoveObject; -use sui_types::object::Owner; -use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; -use sui_types::quorum_driver_types::ExecuteTransactionRequestType; -use sui_types::signature::GenericSignature; -use sui_types::transaction::ObjectArg; -use sui_types::transaction::TEST_ONLY_GAS_UNIT_FOR_TRANSFER; -use sui_types::transaction::{CallArg, TransactionData}; -use sui_types::utils::to_sender_signed_transaction; -use sui_types::{parse_sui_struct_tag, SUI_FRAMEWORK_PACKAGE_ID}; struct Examples { function_name: String, @@ -243,10 +239,12 @@ impl RpcExampleProvider { ("tx_bytes", json!(tx_bytes.tx_bytes)), ( "signatures", - json!(signatures - .into_iter() - .map(|sig| sig.encode_base64()) - .collect::>()), + json!( + signatures + .into_iter() + .map(|sig| sig.encode_base64()) + .collect::>() + ), ), ( "options", @@ -270,9 +268,7 @@ impl RpcExampleProvider { "sui_dryRunTransactionBlock", vec![ExamplePairing::new( "Dry runs a transaction block to get back estimated gas fees and other potential effects.", - vec![ - ("tx_bytes", json!(tx_bytes.tx_bytes)), - ], + vec![("tx_bytes", json!(tx_bytes.tx_bytes))], json!(result), )], ) @@ -296,7 +292,10 @@ impl RpcExampleProvider { vec![ExamplePairing::new( "Runs the transaction in dev-inspect mode. Which allows for nearly any transaction (or Move call) with any arguments. Detailed results are provided, including both the transaction effects and any return values.", vec![ - ("sender_address", json!(SuiAddress::from(ObjectID::new(self.rng.gen())))), + ( + "sender_address", + json!(SuiAddress::from(ObjectID::new(self.rng.gen()))), + ), ("tx_bytes", json!(tx_bytes.tx_bytes)), ("gas_price", json!(1000)), ("epoch", json!(8888)), @@ -466,16 +465,10 @@ impl RpcExampleProvider { "sui_getCheckpoints", vec![ExamplePairing::new( "Gets a paginated list in descending order of all checkpoints starting at the provided cursor. Each page of results has a maximum number of checkpoints set by the provided limit.", - vec![( - "cursor", json!(seq.to_string()), - ), - ( - "limit", json!(limit), - ), - ( - "descending_order", - json!(descending_order), - ), + vec![ + ("cursor", json!(seq.to_string())), + ("limit", json!(limit)), + ("descending_order", json!(descending_order)), ], json!(result), )], @@ -549,10 +542,12 @@ impl RpcExampleProvider { ("digest", json!(result.digest)), ( "options", - json!(SuiTransactionBlockResponseOptions::new() - .with_input() - .with_effects() - .with_events()), + json!( + SuiTransactionBlockResponseOptions::new() + .with_input() + .with_effects() + .with_events() + ), ), ], json!(result), @@ -611,10 +606,12 @@ impl RpcExampleProvider { ("digests", json!(digests)), ( "options", - json!(SuiTransactionBlockResponseOptions::new() - .with_input() - .with_effects() - .with_events()), + json!( + SuiTransactionBlockResponseOptions::new() + .with_input() + .with_effects() + .with_events() + ), ), ], json!(data), @@ -645,9 +642,7 @@ impl RpcExampleProvider { "sui_getProtocolConfig", vec![ExamplePairing::new( "Returns the protocol config for the given protocol version. If none is specified, the node uses the version of the latest epoch it has processed", - vec![ - ("version", json!(version)), - ], + vec![("version", json!(version))], json!(Self::get_protocol_config_impl(version)), )], ) @@ -868,7 +863,7 @@ impl RpcExampleProvider { version: SequenceNumber::from_u64(103626), digest: ObjectDigest::new(self.rng.gen()), balance: 200000000, - //locked_until_epoch: None, + // locked_until_epoch: None, previous_transaction: TransactionDigest::new(self.rng.gen()), }) .collect::>(); @@ -957,7 +952,7 @@ impl RpcExampleProvider { version: SequenceNumber::from_u64(103626), digest: ObjectDigest::new(self.rng.gen()), balance: 200000000, - //locked_until_epoch: None, + // locked_until_epoch: None, previous_transaction: TransactionDigest::new(self.rng.gen()), }) .collect::>(); @@ -1117,9 +1112,7 @@ impl RpcExampleProvider { "sui_getNormalizedMoveModulesByPackage", vec![ExamplePairing::new( "Gets structured representations of all the modules for the package in the request.", - vec![ - ("package", json!(ObjectID::new(self.rng.gen()))), - ], + vec![("package", json!(ObjectID::new(self.rng.gen())))], json!(result), )], ) @@ -1205,16 +1198,18 @@ impl RpcExampleProvider { has_next_page: true, }; - Examples::new("suix_getDynamicFields", - vec![ExamplePairing::new( - "Gets dynamic fields for the object the request provides in a paginated list of `limit` dynamic field results per page. The default limit is 50.", - vec![ - ("parent_object_id", json!(object_id)), - ("cursor", json!(ObjectID::new(self.rng.gen()))), - ("limit", json!(3)), - ], - json!(page), - )],) + Examples::new( + "suix_getDynamicFields", + vec![ExamplePairing::new( + "Gets dynamic fields for the object the request provides in a paginated list of `limit` dynamic field results per page. The default limit is 50.", + vec![ + ("parent_object_id", json!(object_id)), + ("cursor", json!(ObjectID::new(self.rng.gen()))), + ("limit", json!(3)), + ], + json!(page), + )], + ) } fn suix_get_dynamic_field_object(&mut self) -> Examples { @@ -1323,7 +1318,7 @@ impl RpcExampleProvider { ("address", json!(owner)), ("query", json!(query)), ("cursor", json!(object_id)), - ("limit", json!(3)) + ("limit", json!(3)), ], json!(result), )], diff --git a/crates/sui-open-rpc/src/generate_json_rpc_spec.rs b/crates/sui-open-rpc/src/generate_json_rpc_spec.rs index 7b2c43ece04..5de4ba73d39 100644 --- a/crates/sui-open-rpc/src/generate_json_rpc_spec.rs +++ b/crates/sui-open-rpc/src/generate_json_rpc_spec.rs @@ -1,23 +1,21 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::examples::RpcExampleProvider; -use clap::Parser; -use clap::ValueEnum; +use std::{fs::File, io::Write}; + +use clap::{Parser, ValueEnum}; use pretty_assertions::assert_str_eq; -use std::fs::File; -use std::io::Write; -//temporarily remove api ref content for indexer methods -//use sui_json_rpc::api::ExtendedApiOpenRpc; +// temporarily remove api ref content for indexer methods +// use sui_json_rpc::api::ExtendedApiOpenRpc; use sui_json_rpc::coin_api::CoinReadApi; -use sui_json_rpc::governance_api::GovernanceReadApi; -use sui_json_rpc::read_api::ReadApi; -use sui_json_rpc::sui_rpc_doc; -use sui_json_rpc::transaction_builder_api::TransactionBuilderApi; -use sui_json_rpc::transaction_execution_api::TransactionExecutionApi; -use sui_json_rpc::SuiRpcModule; -use sui_json_rpc_api::IndexerApiOpenRpc; -use sui_json_rpc_api::MoveUtilsOpenRpc; +use sui_json_rpc::{ + governance_api::GovernanceReadApi, read_api::ReadApi, sui_rpc_doc, + transaction_builder_api::TransactionBuilderApi, + transaction_execution_api::TransactionExecutionApi, SuiRpcModule, +}; +use sui_json_rpc_api::{IndexerApiOpenRpc, MoveUtilsOpenRpc}; + +use crate::examples::RpcExampleProvider; mod examples; @@ -54,8 +52,8 @@ async fn main() { open_rpc.add_module(TransactionExecutionApi::rpc_doc_module()); open_rpc.add_module(TransactionBuilderApi::rpc_doc_module()); open_rpc.add_module(GovernanceReadApi::rpc_doc_module()); - //temporarily remove api ref content for indexer methods - //open_rpc.add_module(ExtendedApiOpenRpc::module_doc()); + // temporarily remove api ref content for indexer methods + // open_rpc.add_module(ExtendedApiOpenRpc::module_doc()); open_rpc.add_module(MoveUtilsOpenRpc::module_doc()); open_rpc.add_examples(RpcExampleProvider::new().examples()); diff --git a/crates/sui-open-rpc/src/lib.rs b/crates/sui-open-rpc/src/lib.rs index c3e54e0e0cc..06305550e11 100644 --- a/crates/sui-open-rpc/src/lib.rs +++ b/crates/sui-open-rpc/src/lib.rs @@ -3,19 +3,20 @@ extern crate core; -use std::collections::btree_map::Entry::Occupied; -use std::collections::{BTreeMap, HashMap}; +use std::collections::{btree_map::Entry::Occupied, BTreeMap, HashMap}; -use schemars::gen::{SchemaGenerator, SchemaSettings}; -use schemars::schema::SchemaObject; -use schemars::JsonSchema; +use schemars::{ + gen::{SchemaGenerator, SchemaSettings}, + schema::SchemaObject, + JsonSchema, +}; use serde::{Deserialize, Serialize}; use serde_json::Value; use versions::Versioning; /// OPEN-RPC documentation following the OpenRPC specification -/// The implementation is partial, only required fields and subset of optional fields -/// in the specification are implemented catered to Sui's need. +/// The implementation is partial, only required fields and subset of optional +/// fields in the specification are implemented catered to Sui's need. #[derive(Serialize, Deserialize, Clone)] pub struct Project { openrpc: String, diff --git a/crates/sui-open-rpc/tests/generate-spec.rs b/crates/sui-open-rpc/tests/generate-spec.rs index 24eb71d1613..88be94a828b 100644 --- a/crates/sui-open-rpc/tests/generate-spec.rs +++ b/crates/sui-open-rpc/tests/generate-spec.rs @@ -4,8 +4,9 @@ #[test] #[cfg_attr(msim, ignore)] fn test_json_rpc_spec() { - // If this test breaks and you intended a json rpc schema change, you need to run to get the fresh schema: - // # cargo -q run --example generate-json-rpc-spec -- record + // If this test breaks and you intended a json rpc schema change, you need to + // run to get the fresh schema: # cargo -q run --example + // generate-json-rpc-spec -- record let status = std::process::Command::new("cargo") .current_dir("..") .args(["run", "--example", "generate-json-rpc-spec", "--"]) diff --git a/crates/sui-oracle/src/config.rs b/crates/sui-oracle/src/config.rs index a8947b19f65..158328dcabd 100644 --- a/crates/sui-oracle/src/config.rs +++ b/crates/sui-oracle/src/config.rs @@ -1,10 +1,9 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{collections::HashMap, net::SocketAddr, time::Duration}; + use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::net::SocketAddr; -use std::time::Duration; use sui_config::Config; use sui_types::base_types::ObjectID; diff --git a/crates/sui-oracle/src/lib.rs b/crates/sui-oracle/src/lib.rs index 3f649466280..f673d795971 100644 --- a/crates/sui-oracle/src/lib.rs +++ b/crates/sui-oracle/src/lib.rs @@ -1,41 +1,38 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + collections::HashMap, + ops::Add, + str::FromStr, + sync::Arc, + time::{Duration, Instant, SystemTime}, +}; + use chrono::{DateTime, Utc}; use config::{DownloadFeedConfigs, UploadFeedConfig, UploadParameters}; use metrics::OracleMetrics; use mysten_metrics::monitored_scope; use once_cell::sync::OnceCell; use prometheus::Registry; -use std::ops::Add; -use std::str::FromStr; -use std::sync::Arc; -use std::time::{Duration, SystemTime}; -use std::{collections::HashMap, time::Instant}; -use sui_json_rpc_types::SuiTransactionBlockResponse; use sui_json_rpc_types::{ SuiObjectDataOptions, SuiTransactionBlockEffects, SuiTransactionBlockEffectsAPI, - SuiTransactionBlockResponseOptions, + SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, +}; +use sui_sdk::{ + apis::ReadApi, rpc_types::SuiObjectResponse, wallet_context::WalletContext, SuiClient, }; -use sui_sdk::apis::ReadApi; -use sui_sdk::rpc_types::SuiObjectResponse; -use sui_sdk::SuiClient; -use sui_types::error::UserInputError; -use sui_types::object::{Object, Owner}; -use sui_types::parse_sui_type_tag; -use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; -use sui_types::quorum_driver_types::NON_RECOVERABLE_ERROR_MSG; -use sui_types::transaction::{Argument, Transaction}; -use sui_types::transaction::{Command, ObjectArg}; -use sui_types::Identifier; use sui_types::{ - base_types::SuiAddress, - transaction::{CallArg, TransactionData}, + base_types::{random_object_ref, ObjectID, ObjectRef, SuiAddress}, + error::UserInputError, + object::{Object, Owner}, + parse_sui_type_tag, + programmable_transaction_builder::ProgrammableTransactionBuilder, + quorum_driver_types::NON_RECOVERABLE_ERROR_MSG, + transaction::{Argument, CallArg, Command, ObjectArg, Transaction, TransactionData}, + Identifier, }; use tap::tap::TapFallible; - -use sui_sdk::wallet_context::WalletContext; -use sui_types::base_types::{random_object_ref, ObjectID, ObjectRef}; use tracing::{debug, error, info, warn}; pub mod config; mod metrics; @@ -360,7 +357,9 @@ impl OnChainDataUploader { let data_points = self.collect().await; if !data_points.is_empty() { if let Err(err) = self.upload(data_points).await { - error!("Upload failure: {err}. About to resting for {UPLOAD_FAILURE_RECOVER_SEC} sec."); + error!( + "Upload failure: {err}. About to resting for {UPLOAD_FAILURE_RECOVER_SEC} sec." + ); tokio::time::sleep(Duration::from_secs(UPLOAD_FAILURE_RECOVER_SEC)).await; self.gas_obj_ref = get_gas_obj_ref( self.client.read_api(), diff --git a/crates/sui-oracle/src/main.rs b/crates/sui-oracle/src/main.rs index ac1e9803434..a3fb31feba8 100644 --- a/crates/sui-oracle/src/main.rs +++ b/crates/sui-oracle/src/main.rs @@ -1,10 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{path::PathBuf, time::Duration}; + use clap::Parser; use mysten_metrics::start_prometheus_server; -use std::path::PathBuf; -use std::time::Duration; use sui_config::Config; use sui_oracle::{config::OracleNodeConfig, OracleNode}; use sui_sdk::wallet_context::WalletContext; diff --git a/crates/sui-oracle/src/metrics.rs b/crates/sui-oracle/src/metrics.rs index 552e68ae6da..02da8f14e45 100644 --- a/crates/sui-oracle/src/metrics.rs +++ b/crates/sui-oracle/src/metrics.rs @@ -1,13 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use mysten_metrics::histogram::{Histogram, HistogramVec}; use prometheus::{ register_int_counter_vec_with_registry, register_int_counter_with_registry, IntCounter, IntCounterVec, Registry, }; -use mysten_metrics::histogram::{Histogram, HistogramVec}; - #[derive(Clone)] pub struct OracleMetrics { pub(crate) data_source_successes: IntCounterVec, diff --git a/crates/sui-oracle/tests/integration_tests.rs b/crates/sui-oracle/tests/integration_tests.rs index 5a2dc658007..e33fb8ad9f6 100644 --- a/crates/sui-oracle/tests/integration_tests.rs +++ b/crates/sui-oracle/tests/integration_tests.rs @@ -1,25 +1,30 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::path::PathBuf; -use std::str::FromStr; +use std::{path::PathBuf, str::FromStr}; use shared_crypto::intent::Intent; -use sui_json_rpc_types::SuiTransactionBlockEffectsAPI; -use sui_json_rpc_types::{ObjectChange, SuiExecutionStatus}; +use sui_json_rpc_types::{ObjectChange, SuiExecutionStatus, SuiTransactionBlockEffectsAPI}; use sui_keys::keystore::{AccountKeystore, FileBasedKeystore, Keystore}; use sui_move_build::BuildConfig; -use sui_sdk::rpc_types::SuiTransactionBlockResponseOptions; -use sui_sdk::types::base_types::{ObjectID, SuiAddress}; -use sui_sdk::types::programmable_transaction_builder::ProgrammableTransactionBuilder; -use sui_sdk::types::quorum_driver_types::ExecuteTransactionRequestType; -use sui_sdk::types::transaction::{CallArg, ObjectArg, Transaction, TransactionData}; -use sui_sdk::types::Identifier; -use sui_sdk::{SuiClient, SuiClientBuilder}; -use sui_types::base_types::{ObjectRef, SequenceNumber}; -use sui_types::{parse_sui_type_tag, TypeTag}; - -// Integration tests for SUI Oracle, these test can be run manually on local or remote testnet. +use sui_sdk::{ + rpc_types::SuiTransactionBlockResponseOptions, + types::{ + base_types::{ObjectID, SuiAddress}, + programmable_transaction_builder::ProgrammableTransactionBuilder, + quorum_driver_types::ExecuteTransactionRequestType, + transaction::{CallArg, ObjectArg, Transaction, TransactionData}, + Identifier, + }, + SuiClient, SuiClientBuilder, +}; +use sui_types::{ + base_types::{ObjectRef, SequenceNumber}, + parse_sui_type_tag, TypeTag, +}; + +// Integration tests for SUI Oracle, these test can be run manually on local or +// remote testnet. #[ignore] #[tokio::test] async fn test_publish_primitive() { diff --git a/crates/sui-package-resolver/src/lib.rs b/crates/sui-package-resolver/src/lib.rs index ba6d03e28fe..5313c717d9f 100644 --- a/crates/sui-package-resolver/src/lib.rs +++ b/crates/sui-package-resolver/src/lib.rs @@ -1,27 +1,22 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use async_trait::async_trait; -use lru::LruCache; -use move_binary_format::file_format::{ - AbilitySet, FunctionDefinitionIndex, Signature as MoveSignature, SignatureIndex, - StructTypeParameter, Visibility, +use std::{ + borrow::Cow, + collections::{btree_map::Entry, BTreeMap, BTreeSet}, + num::NonZeroUsize, + sync::{Arc, Mutex}, }; -use std::collections::btree_map::Entry; -use std::collections::BTreeSet; -use std::num::NonZeroUsize; -use std::sync::{Arc, Mutex}; -use std::{borrow::Cow, collections::BTreeMap}; -use sui_types::base_types::is_primitive_type_tag; -use sui_types::transaction::{Argument, CallArg, Command, ProgrammableTransaction}; -use crate::error::Error; -use move_binary_format::errors::Location; +use async_trait::async_trait; +use lru::LruCache; use move_binary_format::{ access::ModuleAccess, + errors::Location, file_format::{ + AbilitySet, FunctionDefinitionIndex, Signature as MoveSignature, SignatureIndex, SignatureToken, StructDefinitionIndex, StructFieldInformation, StructHandleIndex, - TableIndex, + StructTypeParameter, TableIndex, Visibility, }, CompiledModule, }; @@ -30,9 +25,16 @@ use move_core_types::{ annotated_value::{MoveFieldLayout, MoveStructLayout, MoveTypeLayout}, language_storage::{StructTag, TypeTag}, }; -use sui_types::move_package::TypeOrigin; -use sui_types::object::Object; -use sui_types::{base_types::SequenceNumber, is_system_package, Identifier}; +use sui_types::{ + base_types::{is_primitive_type_tag, SequenceNumber}, + is_system_package, + move_package::TypeOrigin, + object::Object, + transaction::{Argument, CallArg, Command, ProgrammableTransaction}, + Identifier, +}; + +use crate::error::Error; pub mod error; @@ -42,16 +44,17 @@ const PACKAGE_CACHE_SIZE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(10 pub type Result = std::result::Result; -/// The Resolver is responsible for providing information about types. It relies on its internal -/// `package_store` to load packages and then type definitions from those packages. +/// The Resolver is responsible for providing information about types. It relies +/// on its internal `package_store` to load packages and then type definitions +/// from those packages. #[derive(Debug)] pub struct Resolver { package_store: S, limits: Option, } -/// Optional configuration that imposes limits on the work that the resolver can do for each -/// request. +/// Optional configuration that imposes limits on the work that the resolver can +/// do for each request. #[derive(Debug)] pub struct Limits { /// Maximum recursion depth through type parameters. @@ -64,9 +67,10 @@ pub struct Limits { pub max_move_value_depth: usize, } -/// Store which fetches package for the given address from the backend db and caches it -/// locally in an lru cache. On every call to `fetch` it checks backend db and if package -/// version is stale locally, it updates the local state before returning to the user +/// Store which fetches package for the given address from the backend db and +/// caches it locally in an lru cache. On every call to `fetch` it checks +/// backend db and if package version is stale locally, it updates the local +/// state before returning to the user pub struct PackageStoreWithLruCache { pub(crate) packages: Mutex>>, pub(crate) inner: T, @@ -77,16 +81,18 @@ pub struct Package { /// The ID this package was loaded from on-chain. storage_id: AccountAddress, - /// The ID that this package is associated with at runtime. Bytecode in other packages refers - /// to types and functions from this package using this ID. + /// The ID that this package is associated with at runtime. Bytecode in + /// other packages refers to types and functions from this package using + /// this ID. runtime_id: AccountAddress, - /// The package's transitive dependencies as a mapping from the package's runtime ID (the ID it - /// is referred to by in other packages) to its storage ID (the ID it is loaded from on chain). + /// The package's transitive dependencies as a mapping from the package's + /// runtime ID (the ID it is referred to by in other packages) to its + /// storage ID (the ID it is loaded from on chain). linkage: Linkage, - /// The version this package was loaded at -- necessary for cache invalidation of system - /// packages. + /// The version this package was loaded at -- necessary for cache + /// invalidation of system packages. version: SequenceNumber, modules: BTreeMap, @@ -98,12 +104,12 @@ type Linkage = BTreeMap; pub struct Module { bytecode: CompiledModule, - /// Index mapping struct names to their defining ID, and the index for their definition in the - /// bytecode, to speed up definition lookups. + /// Index mapping struct names to their defining ID, and the index for their + /// definition in the bytecode, to speed up definition lookups. struct_index: BTreeMap, - /// Index mapping function names to the index for their definition in the bytecode, to speed up - /// definition lookups. + /// Index mapping function names to the index for their definition in the + /// bytecode, to speed up definition lookups. function_index: BTreeMap, } @@ -119,8 +125,9 @@ pub struct StructDef { /// Ability constraints and phantom status for type parameters pub type_params: Vec, - /// Serialized representation of fields (names and deserialized signatures). Signatures refer to - /// packages at their runtime IDs (not their storage ID or defining ID). + /// Serialized representation of fields (names and deserialized signatures). + /// Signatures refer to packages at their runtime IDs (not their storage + /// ID or defining ID). pub fields: Vec<(String, OpenSignatureBody)>, } @@ -143,9 +150,9 @@ pub struct FunctionDef { pub return_: Vec, } -/// Fully qualified struct identifier. Uses copy-on-write strings so that when it is used as a key -/// to a map, an instance can be created to query the map without having to allocate strings on the -/// heap. +/// Fully qualified struct identifier. Uses copy-on-write strings so that when +/// it is used as a key to a map, an instance can be created to query the map +/// without having to allocate strings on the heap. #[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Clone, Hash)] pub struct DatatypeRef<'m, 'n> { pub package: AccountAddress, @@ -162,22 +169,24 @@ pub enum Reference { Mutable, } -/// A function parameter or return signature, with its type parameters instantiated. +/// A function parameter or return signature, with its type parameters +/// instantiated. #[derive(Clone, Debug)] pub struct Signature { pub ref_: Option, pub body: TypeTag, } -/// Deserialized representation of a type signature that could appear as a function parameter or -/// return. +/// Deserialized representation of a type signature that could appear as a +/// function parameter or return. #[derive(Clone, Debug)] pub struct OpenSignature { pub ref_: Option, pub body: OpenSignatureBody, } -/// Deserialized representation of a type signature that could appear as a field type for a struct. +/// Deserialized representation of a type signature that could appear as a field +/// type for a struct. #[derive(Clone, Debug)] pub enum OpenSignatureBody { Address, @@ -196,20 +205,21 @@ pub enum OpenSignatureBody { /// Information necessary to convert a type tag into a type layout. #[derive(Debug, Default)] struct ResolutionContext<'l> { - /// Definitions (field information) for structs referred to by types added to this context. + /// Definitions (field information) for structs referred to by types added + /// to this context. structs: BTreeMap, /// Limits configuration from the calling resolver. limits: Option<&'l Limits>, } -/// Interface to abstract over access to a store of live packages. Used to override the default -/// store during testing. +/// Interface to abstract over access to a store of live packages. Used to +/// override the default store during testing. #[async_trait] pub trait PackageStore: Send + Sync + 'static { /// Latest version of the object at `id`. async fn version(&self, id: AccountAddress) -> Result; - /// Read package contents. Fails if `id` is not an object, not a package, or is malformed in - /// some way. + /// Read package contents. Fails if `id` is not an object, not a package, or + /// is malformed in some way. async fn fetch(&self, id: AccountAddress) -> Result>; } @@ -230,8 +240,8 @@ macro_rules! as_ref_impl { as_ref_impl!(Arc); as_ref_impl!(Box); -/// Check $value does not exceed $limit in config, if the limit config exists, returning an error -/// containing the max value and actual value otherwise. +/// Check $value does not exceed $limit in config, if the limit config exists, +/// returning an error containing the max value and actual value otherwise. macro_rules! check_max_limit { ($err:ident, $config:expr; $limit:ident $op:tt $value:expr) => { if let Some(l) = $config { @@ -269,20 +279,22 @@ impl Resolver { } impl Resolver { - /// Return the type layout corresponding to the given type tag. The layout always refers to - /// structs in terms of their defining ID (i.e. their package ID always points to the first - /// package that introduced them). + /// Return the type layout corresponding to the given type tag. The layout + /// always refers to structs in terms of their defining ID (i.e. their + /// package ID always points to the first package that introduced them). pub async fn type_layout(&self, mut tag: TypeTag) -> Result { let mut context = ResolutionContext::new(self.limits.as_ref()); - // (1). Fetch all the information from this store that is necessary to resolve types - // referenced by this tag. + // (1). Fetch all the information from this store that is necessary to resolve + // types referenced by this tag. context .add_type_tag( &mut tag, &self.package_store, - /* visit_fields */ true, - /* visit_phantoms */ true, + // visit_fields + true, + // visit_phantoms + true, ) .await?; @@ -295,22 +307,25 @@ impl Resolver { Ok(context.resolve_type_layout(&tag, max_depth)?.0) } - /// Return the abilities of a concrete type, based on the abilities in its type definition, and - /// the abilities of its concrete type parameters: An instance of a generic type has `store`, - /// `copy, or `drop` if its definition has the ability, and all its non-phantom type parameters - /// have the ability as well. Similar rules apply for `key` except that it requires its type - /// parameters to have `store`. + /// Return the abilities of a concrete type, based on the abilities in its + /// type definition, and the abilities of its concrete type parameters: + /// An instance of a generic type has `store`, `copy, or `drop` if its + /// definition has the ability, and all its non-phantom type parameters + /// have the ability as well. Similar rules apply for `key` except that it + /// requires its type parameters to have `store`. pub async fn abilities(&self, mut tag: TypeTag) -> Result { let mut context = ResolutionContext::new(self.limits.as_ref()); - // (1). Fetch all the information from this store that is necessary to resolve types - // referenced by this tag. + // (1). Fetch all the information from this store that is necessary to resolve + // types referenced by this tag. context .add_type_tag( &mut tag, &self.package_store, - /* visit_fields */ false, - /* visit_phantoms */ false, + // visit_fields + false, + // visit_phantoms + false, ) .await?; @@ -318,8 +333,8 @@ impl Resolver { context.resolve_abilities(&tag) } - /// Returns the signatures of parameters to function `pkg::module::function` in the package - /// store, assuming the function exists. + /// Returns the signatures of parameters to function `pkg::module::function` + /// in the package store, assuming the function exists. pub async fn function_parameters( &self, pkg: AccountAddress, @@ -339,15 +354,16 @@ impl Resolver { let mut sigs = def.parameters.clone(); - // (1). Fetch all the information from this store that is necessary to resolve types - // referenced by this tag. + // (1). Fetch all the information from this store that is necessary to resolve + // types referenced by this tag. for sig in &sigs { context .add_signature( sig.body.clone(), &self.package_store, package.as_ref(), - /* visit_fields */ false, + // visit_fields + false, ) .await?; } @@ -360,15 +376,17 @@ impl Resolver { Ok(sigs) } - /// Attempts to infer the type layouts for pure inputs to the programmable transaction. + /// Attempts to infer the type layouts for pure inputs to the programmable + /// transaction. /// - /// The returned vector contains an element for each input to `tx`. Elements corresponding to - /// pure inputs that are used as arguments to transaction commands will contain `Some(layout)`. - /// Elements for other inputs (non-pure inputs, and unused pure inputs) will be `None`. + /// The returned vector contains an element for each input to `tx`. Elements + /// corresponding to pure inputs that are used as arguments to + /// transaction commands will contain `Some(layout)`. Elements for other + /// inputs (non-pure inputs, and unused pure inputs) will be `None`. /// - /// Layout resolution can fail if a type/module/package doesn't exist, if layout resolution hits - /// a limit, or if a pure input is somehow used in multiple conflicting occasions (with - /// different types). + /// Layout resolution can fail if a type/module/package doesn't exist, if + /// layout resolution hits a limit, or if a pure input is somehow used + /// in multiple conflicting occasions (with different types). pub async fn pure_input_layouts( &self, tx: &ProgrammableTransaction, @@ -432,8 +450,9 @@ impl Resolver { } } - // (2). Gather all the unique type tags to convert into layouts. There are relatively few - // primitive types so this is worth doing to avoid redundant work. + // (2). Gather all the unique type tags to convert into layouts. There are + // relatively few primitive types so this is worth doing to avoid + // redundant work. let unique_tags: BTreeSet<_> = tags.iter().filter_map(|t| t.clone()).collect(); // (3). Convert the type tags into layouts. @@ -479,10 +498,11 @@ impl PackageStore for PackageStoreWithLruCache { } let package = self.inner.fetch(id).await?; - // Try and insert the package into the cache, accounting for races. In most cases the - // racing fetches will produce the same package, but for system packages, they may not, so - // favour the package that has the newer version, or if they are the same, the package that - // is already in the cache. + // Try and insert the package into the cache, accounting for races. In most + // cases the racing fetches will produce the same package, but for + // system packages, they may not, so favour the package that has the + // newer version, or if they are the same, the package that is already + // in the cache. let mut packages = self.packages.lock().unwrap(); Ok(match packages.peek(&id) { @@ -578,11 +598,12 @@ impl Package { Ok(struct_def) } - /// Translate the `runtime_id` of a package to a specific storage ID using this package's - /// linkage table. Returns an error if the package in question is not present in the linkage - /// table. + /// Translate the `runtime_id` of a package to a specific storage ID using + /// this package's linkage table. Returns an error if the package in + /// question is not present in the linkage table. fn relocate(&self, runtime_id: AccountAddress) -> Result { - // Special case the current package, because it doesn't get an entry in the linkage table. + // Special case the current package, because it doesn't get an entry in the + // linkage table. if runtime_id == self.runtime_id { return Ok(self.storage_id); } @@ -595,9 +616,10 @@ impl Package { } impl Module { - /// Deserialize a module from its bytecode, and a table containing the origins of its structs. - /// Fails if the origin table is missing an entry for one of its types, returning the name of - /// the type in that case. + /// Deserialize a module from its bytecode, and a table containing the + /// origins of its structs. Fails if the origin table is missing an + /// entry for one of its types, returning the name of the type in that + /// case. fn read( bytecode: CompiledModule, mut origins: BTreeMap, @@ -642,8 +664,8 @@ impl Module { .as_str() } - /// Iterate over the structs with names strictly after `after` (or from the beginning), and - /// strictly before `before` (or to the end). + /// Iterate over the structs with names strictly after `after` (or from the + /// beginning), and strictly before `before` (or to the end). pub fn structs( &self, after: Option<&str>, @@ -658,9 +680,10 @@ impl Module { .map(|(name, _)| name.as_str()) } - /// Get the struct definition corresponding to the struct with name `name` in this module. - /// Returns `Ok(None)` if the struct cannot be found in this module, `Err(...)` if there was an - /// error deserializing it, and `Ok(Some(def))` on success. + /// Get the struct definition corresponding to the struct with name `name` + /// in this module. Returns `Ok(None)` if the struct cannot be found in + /// this module, `Err(...)` if there was an error deserializing it, and + /// `Ok(Some(def))` on success. pub fn struct_def(&self, name: &str) -> Result> { let Some(&(defining_id, index)) = self.struct_index.get(name) else { return Ok(None); @@ -692,8 +715,8 @@ impl Module { })) } - /// Iterate over the functions with names strictly after `after` (or from the beginning), and - /// strictly before `before` (or to the end). + /// Iterate over the functions with names strictly after `after` (or from + /// the beginning), and strictly before `before` (or to the end). pub fn functions( &self, after: Option<&str>, @@ -708,9 +731,10 @@ impl Module { .map(|(name, _)| name.as_str()) } - /// Get the function definition corresponding to the function with name `name` in this module. - /// Returns `Ok(None)` if the function cannot be found in this module, `Err(...)` if there was - /// an error deserializing it, and `Ok(Some(def))` on success. + /// Get the function definition corresponding to the function with name + /// `name` in this module. Returns `Ok(None)` if the function cannot be + /// found in this module, `Err(...)` if there was an error deserializing + /// it, and `Ok(Some(def))` on success. pub fn function_def(&self, name: &str) -> Result> { let Some(&index) = self.function_index.get(name) else { return Ok(None); @@ -750,10 +774,12 @@ impl OpenSignature { }) } - /// Return a specific instantiation of this signature, with `type_params` as the actual type - /// parameters. This function does not check that the supplied type parameters are valid (meet - /// the ability constraints of the struct or function this signature is part of), but will - /// produce an error if the signature references a type parameter that is out of bounds. + /// Return a specific instantiation of this signature, with `type_params` as + /// the actual type parameters. This function does not check that the + /// supplied type parameters are valid (meet the ability constraints of + /// the struct or function this signature is part of), but will + /// produce an error if the signature references a type parameter that is + /// out of bounds. pub fn instantiate(&self, type_params: &[TypeTag]) -> Result { Ok(Signature { ref_: self.ref_, @@ -865,18 +891,20 @@ impl<'l> ResolutionContext<'l> { } } - /// Gather definitions for types that contribute to the definition of `tag` into this resolution - /// context, fetching data from the `store` as necessary. Also updates package addresses in - /// `tag` to point to runtime IDs instead of storage IDs to ensure queries made using these - /// addresses during the subsequent resolution phase find the relevant type information in the - /// context. + /// Gather definitions for types that contribute to the definition of `tag` + /// into this resolution context, fetching data from the `store` as + /// necessary. Also updates package addresses in `tag` to point to + /// runtime IDs instead of storage IDs to ensure queries made using these + /// addresses during the subsequent resolution phase find the relevant type + /// information in the context. /// - /// The `visit_fields` flag controls whether the traversal looks inside types at their fields - /// (which is necessary for layout resolution) or not (only explores the outer type and any type - /// parameters). + /// The `visit_fields` flag controls whether the traversal looks inside + /// types at their fields (which is necessary for layout resolution) or + /// not (only explores the outer type and any type parameters). /// - /// The `visit_phantoms` flag controls whether the traversal recurses through phantom type - /// parameters (which is also necessary for type resolution) or not. + /// The `visit_phantoms` flag controls whether the traversal recurses + /// through phantom type parameters (which is also necessary for type + /// resolution) or not. async fn add_type_tag( &mut self, tag: &mut TypeTag, @@ -974,8 +1002,8 @@ impl<'l> ResolutionContext<'l> { Ok(()) } - // Like `add_type_tag` but for type signatures. Needs a linkage table to translate runtime IDs - // into storage IDs. + // Like `add_type_tag` but for type signatures. Needs a linkage table to + // translate runtime IDs into storage IDs. async fn add_signature( &mut self, sig: OpenSignatureBody, @@ -1046,12 +1074,14 @@ impl<'l> ResolutionContext<'l> { Ok(()) } - /// Translate a type `tag` into its layout using only the information contained in this context. - /// Requires that the necessary information was added to the context through calls to - /// `add_type_tag` and `add_signature` before being called. + /// Translate a type `tag` into its layout using only the information + /// contained in this context. Requires that the necessary information + /// was added to the context through calls to `add_type_tag` and + /// `add_signature` before being called. /// - /// `max_depth` controls how deep the layout is allowed to grow to. The actual depth reached is - /// returned alongside the layout (assuming it does not exceed `max_depth`). + /// `max_depth` controls how deep the layout is allowed to grow to. The + /// actual depth reached is returned alongside the layout (assuming it + /// does not exceed `max_depth`). fn resolve_type_layout( &self, tag: &TypeTag, @@ -1088,8 +1118,8 @@ impl<'l> ResolutionContext<'l> { // resolution. Relevant entries in that cache would need to be gathered in the // ResolutionContext as it is built, and then used here to avoid the recursive // exploration. This optimisation is complicated by the fact that in the cache, - // these layouts are naturally keyed based on defining ID, but during resolution, - // they are keyed by runtime IDs. + // these layouts are naturally keyed based on defining ID, but during + // resolution, they are keyed by runtime IDs. // SAFETY: `add_type_tag` ensures `structs` has an element with this key. let key = DatatypeRef::from(s.as_ref()); @@ -1102,10 +1132,11 @@ impl<'l> ResolutionContext<'l> { .. } = s.as_ref(); - // TODO (optimization): This could be made more efficient by only generating layouts - // for non-phantom types. This efficiency could be extended to the exploration - // phase (i.e. only explore layouts of non-phantom types). But this optimisation is - // complicated by the fact that we still need to create a correct type tag for a + // TODO (optimization): This could be made more efficient by only generating + // layouts for non-phantom types. This efficiency could be + // extended to the exploration phase (i.e. only explore layouts + // of non-phantom types). But this optimisation is complicated + // by the fact that we still need to create a correct type tag for a // phantom parameter, which is currently done by converting a type layout into a // tag. let param_layouts = type_params @@ -1115,9 +1146,10 @@ impl<'l> ResolutionContext<'l> { .map(|tag| self.resolve_type_layout(tag, max_depth - 1)) .collect::>>()?; - // SAFETY: `param_layouts` contains `MoveTypeLayout`-s that are generated by this - // `ResolutionContext`, which guarantees that struct layouts come with types, which - // is necessary to avoid errors when converting layouts into type tags. + // SAFETY: `param_layouts` contains `MoveTypeLayout`-s that are generated by + // this `ResolutionContext`, which guarantees that struct + // layouts come with types, which is necessary to avoid errors + // when converting layouts into type tags. let type_params = param_layouts.iter().map(|l| TypeTag::from(&l.0)).collect(); let type_ = StructTag { @@ -1149,11 +1181,13 @@ impl<'l> ResolutionContext<'l> { }) } - /// Like `resolve_type_tag` but for signatures. Needs to be provided the layouts of type - /// parameters which are substituted when a type parameter is encountered. + /// Like `resolve_type_tag` but for signatures. Needs to be provided the + /// layouts of type parameters which are substituted when a type + /// parameter is encountered. /// - /// `max_depth` controls how deep the layout is allowed to grow to. The actual depth reached is - /// returned alongside the layout (assuming it does not exceed `max_depth`). + /// `max_depth` controls how deep the layout is allowed to grow to. The + /// actual depth reached is returned alongside the layout (assuming it + /// does not exceed `max_depth`). fn resolve_signature_layout( &self, sig: &OpenSignatureBody, @@ -1186,8 +1220,8 @@ impl<'l> ResolutionContext<'l> { .cloned()?; // We need to re-check the type parameter before we use it because it might have - // been fine when it was created, but result in too deep a layout when we use it at - // this position. + // been fine when it was created, but result in too deep a layout when we use it + // at this position. if depth > max_depth { return Err(Error::ValueNesting( self.limits.map_or(0, |l| l.max_move_value_depth), @@ -1213,9 +1247,10 @@ impl<'l> ResolutionContext<'l> { .map(|sig| self.resolve_signature_layout(sig, param_layouts, max_depth - 1)) .collect::>>()?; - // SAFETY: `param_layouts` contains `MoveTypeLayout`-s that are generated by this - // `ResolutionContext`, which guarantees that struct layouts come with types, which - // is necessary to avoid errors when converting layouts into type tags. + // SAFETY: `param_layouts` contains `MoveTypeLayout`-s that are generated by + // this `ResolutionContext`, which guarantees that struct + // layouts come with types, which is necessary to avoid errors + // when converting layouts into type tags. let type_params = param_layouts.iter().map(|l| TypeTag::from(&l.0)).collect(); let type_ = StructTag { @@ -1246,8 +1281,9 @@ impl<'l> ResolutionContext<'l> { }) } - /// Calculate the abilities for a concrete type `tag`. Requires that the necessary information - /// was added to the context through calls to `add_type_tag` before being called. + /// Calculate the abilities for a concrete type `tag`. Requires that the + /// necessary information was added to the context through calls to + /// `add_type_tag` before being called. fn resolve_abilities(&self, tag: &TypeTag) -> Result { use TypeTag as T; Ok(match tag { @@ -1296,15 +1332,16 @@ impl<'l> ResolutionContext<'l> { }) } - /// Translate the (runtime) package IDs in `sig` to defining IDs using only the information - /// contained in this context. Requires that the necessary information was added to the context - /// through calls to `add_signature` before being called. + /// Translate the (runtime) package IDs in `sig` to defining IDs using only + /// the information contained in this context. Requires that the + /// necessary information was added to the context through calls to + /// `add_signature` before being called. fn relocate_signature(&self, sig: &mut OpenSignatureBody) -> Result<()> { use OpenSignatureBody as O; match sig { O::Address | O::Bool | O::U8 | O::U16 | O::U32 | O::U64 | O::U128 | O::U256 => { - /* nop */ + // nop } O::TypeParameter(_) => { /* nop */ } @@ -1336,13 +1373,14 @@ impl<'s> From<&'s StructTag> for DatatypeRef<'s, 's> { } } -/// Translate a string into an `Identifier`, but translating errors into this module's error type. +/// Translate a string into an `Identifier`, but translating errors into this +/// module's error type. fn ident(s: &str) -> Result { Identifier::new(s).map_err(|_| Error::NotAnIdentifier(s.to_string())) } -/// Read and deserialize a signature index (from function parameter or return types) into a vector -/// of signatures. +/// Read and deserialize a signature index (from function parameter or return +/// types) into a vector of signatures. fn read_signature(idx: SignatureIndex, bytecode: &CompiledModule) -> Result> { let MoveSignature(tokens) = bytecode.signature_at(idx); let mut sigs = Vec::with_capacity(tokens.len()); @@ -1356,20 +1394,26 @@ fn read_signature(idx: SignatureIndex, bytecode: &CompiledModule) -> Result>, u8>")) .await @@ -1560,9 +1607,10 @@ mod tests { .await .unwrap(); - // But this one fails, even though the big layout is for a phantom type parameter. This may - // change in future if we optimise the way we handle phantom type parameters to not - // calculate their full layout, just their type tag. + // But this one fails, even though the big layout is for a phantom type + // parameter. This may change in future if we optimise the way we handle + // phantom type parameters to not calculate their full layout, just + // their type tag. let err = resolver .type_layout(type_("0xd0::m::O>>")) .await @@ -1587,8 +1635,9 @@ mod tests { }, ); - // Make sure that even if all type parameters individually meet the depth requirements, - // that we correctly fail if they extend the layout's depth on application. + // Make sure that even if all type parameters individually meet the depth + // requirements, that we correctly fail if they extend the layout's + // depth on application. let err = resolver .type_layout(type_("0xd0::m::O, u8>")) .await @@ -1634,8 +1683,8 @@ mod tests { // Load A0. assert_eq!(stats(&inner), (1, 0)); - // Layouts are the same, no need to reload the package. Not a system package, so no version - // check needed. + // Layouts are the same, no need to reload the package. Not a system package, + // so no version check needed. let l1 = resolver.type_layout(type_("0xa0::m::T0")).await.unwrap(); assert_eq!(format!("{l0}"), format!("{l1}")); assert_eq!(stats(&inner), (1, 0)); @@ -1645,8 +1694,9 @@ mod tests { assert_ne!(format!("{l0}"), format!("{l2}")); assert_eq!(stats(&inner), (1, 0)); - // New package to load. It's a system package, which would need a version check if it - // already existed in the cache, but it doesn't yet, so we only see a fetch. + // New package to load. It's a system package, which would need a version check + // if it already existed in the cache, but it doesn't yet, so we only + // see a fetch. let l3 = resolver.type_layout(type_("0x1::m::T0")).await.unwrap(); assert_eq!(stats(&inner), (2, 0)); @@ -1661,9 +1711,10 @@ mod tests { cached_package(2, BTreeMap::new(), &build_package("s1"), &s1_types()), ); - // Reload the same system type again. The version check fails and the system package is - // refetched (even though the type is the same as before). This usage pattern (layouts for - // system types) is why a layout cache would be particularly helpful (future optimisation). + // Reload the same system type again. The version check fails and the system + // package is refetched (even though the type is the same as before). + // This usage pattern (layouts for system types) is why a layout cache + // would be particularly helpful (future optimisation). let l5 = resolver.type_layout(type_("0x1::m::T0")).await.unwrap(); assert_eq!(format!("{l4}"), format!("{l5}")); assert_eq!(stats(&inner), (3, 2)); @@ -1935,8 +1986,8 @@ mod tests { assert_eq!(a1, S::EMPTY | A::Copy | A::Drop); } - /// Key is different from other abilities in that it requires fields to have `store`, rather - /// than itself. + /// Key is different from other abilities in that it requires fields to have + /// `store`, rather than itself. #[tokio::test] async fn test_key_abilities() { use Ability as A; @@ -1960,8 +2011,8 @@ mod tests { .unwrap(); assert_eq!(a2, S::EMPTY | A::Key | A::Store); - // We would not be able to get an instance of this type, but in case the question is asked, - // its abilities would be empty. + // We would not be able to get an instance of this type, but in case the + // question is asked, its abilities would be empty. let a3 = resolver .abilities(type_("0xd0::m::O<0xd0::m::R, u64>")) .await @@ -2071,8 +2122,8 @@ mod tests { }, ); - // This request is OK, because one of O's type parameters is phantom, so we can avoid - // loading its definition. + // This request is OK, because one of O's type parameters is phantom, so we can + // avoid loading its definition. let a1 = resolver .abilities(type_("0xd0::m::O<0xd0::m::S, 0xd0::m::Q>")) .await @@ -2107,8 +2158,8 @@ mod tests { }, ); - // This request is OK, because one of O's type parameters is phantom, so we can avoid - // loading its definition. + // This request is OK, because one of O's type parameters is phantom, so we can + // avoid loading its definition. let a1 = resolver .abilities(type_( "0xd0::m::O<0xd0::m::S, 0xd0::m::T, vector>>", @@ -2247,7 +2298,9 @@ mod tests { ); } - /***** Test Helpers ***************************************************************************/ + /// *** Test Helpers + /// ************************************************************************ + /// ** type TypeOriginTable = Vec; @@ -2319,9 +2372,11 @@ mod tests { ] } - /// Build an in-memory package cache from locally compiled packages. Assumes that all packages - /// in `packages` are published (all modules have a non-zero package address and all packages - /// have a 'published-at' address), and their transitive dependencies are also in `packages`. + /// Build an in-memory package cache from locally compiled packages. + /// Assumes that all packages in `packages` are published (all modules + /// have a non-zero package address and all packages + /// have a 'published-at' address), and their transitive dependencies are + /// also in `packages`. fn package_cache( packages: impl IntoIterator, ) -> (Arc>, Box) { @@ -2455,8 +2510,8 @@ mod tests { } struct InMemoryPackageStore { - /// All the contents are stored in an `InnerStore` that can be probed and queried from - /// outside. + /// All the contents are stored in an `InnerStore` that can be probed + /// and queried from outside. inner: Arc>, } diff --git a/crates/sui-proc-macros/src/lib.rs b/crates/sui-proc-macros/src/lib.rs index b7689cbc904..38a93cbee43 100644 --- a/crates/sui-proc-macros/src/lib.rs +++ b/crates/sui-proc-macros/src/lib.rs @@ -153,7 +153,8 @@ pub fn init_static_initializers(_args: TokenStream, item: TokenStream) -> TokenS /// The sui_test macro will invoke either `#[msim::test]` or `#[tokio::test]`, /// depending on whether the simulator config var is enabled. /// -/// This should be used for tests that can meaningfully run in either environment. +/// This should be used for tests that can meaningfully run in either +/// environment. #[proc_macro_attribute] pub fn sui_test(args: TokenStream, item: TokenStream) -> TokenStream { let input = parse_macro_input!(item as syn::ItemFn); @@ -179,12 +180,14 @@ pub fn sui_test(args: TokenStream, item: TokenStream) -> TokenStream { result.into() } -/// The sim_test macro will invoke `#[msim::test]` if the simulator config var is enabled. +/// The sim_test macro will invoke `#[msim::test]` if the simulator config var +/// is enabled. /// -/// Otherwise, it will emit an ignored test - if forcibly run, the ignored test will panic. +/// Otherwise, it will emit an ignored test - if forcibly run, the ignored test +/// will panic. /// -/// This macro must be used in order to pass any simulator-specific arguments, such as -/// `check_determinism`, which is not understood by tokio. +/// This macro must be used in order to pass any simulator-specific arguments, +/// such as `check_determinism`, which is not understood by tokio. #[proc_macro_attribute] pub fn sim_test(args: TokenStream, item: TokenStream) -> TokenStream { let input = parse_macro_input!(item as syn::ItemFn); @@ -319,7 +322,7 @@ impl CheckArithmetic { let Ok(exprs) = parser.parse(tokens.clone().into()) else { return Err(syn::Error::new_spanned( tokens, - "could not process macro contents - use #[skip_checked_arithmetic] to skip this macro" + "could not process macro contents - use #[skip_checked_arithmetic] to skip this macro", )); }; @@ -537,10 +540,10 @@ impl Fold for CheckArithmetic { } } -/// This proc macro generates a function `order_to_variant_map` which returns a map -/// of the position of each variant to the name of the variant. -/// It is intended to catch changes in enum order when backward compat is required. -/// ```rust,ignore +/// This proc macro generates a function `order_to_variant_map` which returns a +/// map of the position of each variant to the name of the variant. +/// It is intended to catch changes in enum order when backward compat is +/// required. ```rust,ignore /// /// Example for this enum /// #[derive(EnumVariantOrder)] /// pub enum MyEnum { diff --git a/crates/sui-protocol-config-macros/src/lib.rs b/crates/sui-protocol-config-macros/src/lib.rs index 82484b76b67..5a2135fb438 100644 --- a/crates/sui-protocol-config-macros/src/lib.rs +++ b/crates/sui-protocol-config-macros/src/lib.rs @@ -7,31 +7,34 @@ use proc_macro::TokenStream; use quote::quote; use syn::{parse_macro_input, Data, DeriveInput, Fields, Type}; -/// This proc macro generates getters, attribute lookup, etc for protocol config fields of type `Option` -/// and for the feature flags -/// Example for a field: `new_constant: Option`, and for feature flags `feature: bool`, we derive -/// ```rust,ignore -/// /// Returns the value of the field if exists at the given version, otherise panic -/// pub fn new_constant(&self) -> u64 { +/// This proc macro generates getters, attribute lookup, etc for protocol config +/// fields of type `Option` and for the feature flags +/// Example for a field: `new_constant: Option`, and for feature flags +/// `feature: bool`, we derive ```rust,ignore +/// /// Returns the value of the field if exists at the given version, +/// otherise panic pub fn new_constant(&self) -> u64 { /// self.new_constant.expect(Self::CONSTANT_ERR_MSG) /// } -/// /// Returns the value of the field if exists at the given version, otherise None. -/// pub fn new_constant_as_option(&self) -> Option { +/// /// Returns the value of the field if exists at the given version, +/// otherise None. pub fn new_constant_as_option(&self) -> Option { /// self.new_constant /// } -/// // We auto derive an enum such that the variants are all the types of the fields -/// pub enum ProtocolConfigValue { +/// // We auto derive an enum such that the variants are all the types of +/// the fields pub enum ProtocolConfigValue { /// u32(u32), /// u64(u64), /// .............. /// } -/// // This enum is used to return field values so that the type is also encoded in the response +/// // This enum is used to return field values so that the type is also +/// encoded in the response /// -/// /// Returns the value of the field if exists at the given version, otherise None -/// pub fn lookup_attr(&self, value: String) -> Option; +/// /// Returns the value of the field if exists at the given version, +/// otherise None pub fn lookup_attr(&self, value: String) -> +/// Option; /// /// /// Returns a map of all configs to values -/// pub fn attr_map(&self) -> std::collections::BTreeMap>; +/// pub fn attr_map(&self) -> std::collections::BTreeMap>; /// /// /// Returns a feature by the string name or None if it doesn't exist /// pub fn lookup_feature(&self, value: String) -> Option; diff --git a/crates/sui-protocol-config/src/lib.rs b/crates/sui-protocol-config/src/lib.rs index 625c0c3c6cb..39887054d06 100644 --- a/crates/sui-protocol-config/src/lib.rs +++ b/crates/sui-protocol-config/src/lib.rs @@ -1,12 +1,15 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::{ + cell::RefCell, + collections::BTreeSet, + sync::atomic::{AtomicBool, Ordering}, +}; + use clap::*; use serde::{Deserialize, Serialize}; use serde_with::skip_serializing_none; -use std::cell::RefCell; -use std::collections::BTreeSet; -use std::sync::atomic::{AtomicBool, Ordering}; use sui_protocol_config_macros::{ProtocolConfigAccessors, ProtocolConfigFeatureFlagsGetters}; use tracing::{info, warn}; @@ -17,84 +20,88 @@ const MAX_PROTOCOL_VERSION: u64 = 42; // Record history of protocol version allocations here: // // Version 1: Original version. -// Version 2: Framework changes, including advancing epoch_start_time in safemode. -// Version 3: gas model v2, including all sui conservation fixes. Fix for loaded child object -// changes, enable package upgrades, add limits on `max_size_written_objects`, -// `max_size_written_objects_system_tx` -// Version 4: New reward slashing rate. Framework changes to skip stake susbidy when the epoch -// length is short. -// Version 5: Package upgrade compatibility error fix. New gas cost table. New scoring decision -// mechanism that includes up to f scoring authorities. -// Version 6: Change to how bytes are charged in the gas meter, increase buffer stake to 0.5f -// Version 7: Disallow adding new abilities to types during package upgrades, -// disable_invariant_violation_check_in_swap_loc, -// disable init functions becoming entry, -// hash module bytes individually before computing package digest. -// Version 8: Disallow changing abilities and type constraints for type parameters in structs -// during upgrades. +// Version 2: Framework changes, including advancing epoch_start_time in +// safemode. Version 3: gas model v2, including all sui conservation fixes. Fix +// for loaded child object changes, enable package upgrades, add +// limits on `max_size_written_objects`, +// `max_size_written_objects_system_tx` Version 4: New reward slashing rate. +// Framework changes to skip stake susbidy when the epoch length is +// short. Version 5: Package upgrade compatibility error fix. New gas cost +// table. New scoring decision mechanism that includes up to f +// scoring authorities. Version 6: Change to how bytes are charged in the gas +// meter, increase buffer stake to 0.5f Version 7: Disallow adding new abilities +// to types during package upgrades, +// disable_invariant_violation_check_in_swap_loc, disable init +// functions becoming entry, hash module bytes individually before +// computing package digest. Version 8: Disallow changing abilities and type +// constraints for type parameters in structs during upgrades. // Version 9: Limit the length of Move idenfitiers to 128. // Disallow extraneous module bytes, // advance_to_highest_supported_protocol_version, -// Version 10:increase bytecode verifier `max_verifier_meter_ticks_per_function` and -// `max_meter_ticks_per_module` limits each from 6_000_000 to 16_000_000. sui-system -// framework changes. -// Version 11: Introduce `std::type_name::get_with_original_ids` to the system frameworks. Bound max depth of values within the VM. -// Version 12: Changes to deepbook in framework to add API for querying marketplace. -// Change NW Batch to use versioned metadata field. -// Changes to sui-system package to add PTB-friendly unstake function, and minor cleanup. -// Version 13: System package change deprecating `0xdee9::clob` and `0xdee9::custodian`, replaced by -// `0xdee9::clob_v2` and `0xdee9::custodian_v2`. -// Version 14: Introduce a config variable to allow charging of computation to be either -// bucket base or rounding up. The presence of `gas_rounding_step` (or `None`) -// decides whether rounding is applied or not. -// Version 15: Add reordering of user transactions by gas price after consensus. -// Add `sui::table_vec::drop` to the framework via a system package upgrade. -// Version 16: Enabled simplified_unwrap_then_delete feature flag, which allows the execution engine -// to no longer consult the object store when generating unwrapped_then_deleted in the -// effects; this also allows us to stop including wrapped tombstones in accumulator. -// Add self-matching prevention for deepbook. +// Version 10:increase bytecode verifier `max_verifier_meter_ticks_per_function` +// and `max_meter_ticks_per_module` limits each from 6_000_000 to +// 16_000_000. sui-system framework changes. +// Version 11: Introduce `std::type_name::get_with_original_ids` to the system +// frameworks. Bound max depth of values within the VM. Version 12: Changes to +// deepbook in framework to add API for querying marketplace. Change +// NW Batch to use versioned metadata field. Changes to sui-system +// package to add PTB-friendly unstake function, and minor cleanup. Version 13: +// System package change deprecating `0xdee9::clob` and `0xdee9::custodian`, +// replaced by `0xdee9::clob_v2` and `0xdee9::custodian_v2`. +// Version 14: Introduce a config variable to allow charging of computation to +// be either bucket base or rounding up. The presence of +// `gas_rounding_step` (or `None`) decides whether rounding is +// applied or not. Version 15: Add reordering of user transactions by gas price +// after consensus. Add `sui::table_vec::drop` to the framework via +// a system package upgrade. Version 16: Enabled simplified_unwrap_then_delete +// feature flag, which allows the execution engine to no longer +// consult the object store when generating unwrapped_then_deleted in the +// effects; this also allows us to stop including wrapped tombstones +// in accumulator. Add self-matching prevention for deepbook. // Version 17: Enable upgraded multisig support. -// Version 18: Introduce execution layer versioning, preserve all existing behaviour in v0. -// Gas minimum charges moved to be a multiplier over the reference gas price. In this -// protocol version the multiplier is the same as the lowest bucket of computation -// such that the minimum transaction cost is the same as the minimum computation +// Version 18: Introduce execution layer versioning, preserve all existing +// behaviour in v0. Gas minimum charges moved to be a multiplier +// over the reference gas price. In this protocol version the +// multiplier is the same as the lowest bucket of computation such +// that the minimum transaction cost is the same as the minimum computation // bucket. -// Add a feature flag to indicate the changes semantics of `base_tx_cost_fixed`. -// Version 19: Changes to sui-system package to enable liquid staking. -// Add limit for total size of events. +// Add a feature flag to indicate the changes semantics of +// `base_tx_cost_fixed`. Version 19: Changes to sui-system package to enable +// liquid staking. Add limit for total size of events. // Increase limit for number of events emitted to 1024. -// Version 20: Enables the flag `narwhal_new_leader_election_schedule` for the new narwhal leader -// schedule algorithm for enhanced fault tolerance and sets the bad node stake threshold -// value. Both values are set for all the environments except mainnet. -// Version 21: ZKLogin known providers. -// Version 22: Child object format change. -// Version 23: Enabling the flag `narwhal_new_leader_election_schedule` for the new narwhal leader -// schedule algorithm for enhanced fault tolerance and sets the bad node stake threshold -// value for mainnet. -// Version 24: Re-enable simple gas conservation checks. +// Version 20: Enables the flag `narwhal_new_leader_election_schedule` for the +// new narwhal leader schedule algorithm for enhanced fault +// tolerance and sets the bad node stake threshold value. Both +// values are set for all the environments except mainnet. Version 21: ZKLogin +// known providers. Version 22: Child object format change. +// Version 23: Enabling the flag `narwhal_new_leader_election_schedule` for the +// new narwhal leader schedule algorithm for enhanced fault +// tolerance and sets the bad node stake threshold value for +// mainnet. Version 24: Re-enable simple gas conservation checks. // Package publish/upgrade number in a single transaction limited. // JWK / authenticator state flags. -// Version 25: Add sui::table_vec::swap and sui::table_vec::swap_remove to system packages. -// Version 26: New gas model version. -// Add support for receiving objects off of other objects in devnet only. -// Version 28: Add sui::zklogin::verify_zklogin_id and related functions to sui framework. -// Enable transaction effects v2 in devnet. -// Version 29: Add verify_legacy_zklogin_address flag to sui framework, this add ability to verify -// transactions from a legacy zklogin address. +// Version 25: Add sui::table_vec::swap and sui::table_vec::swap_remove to +// system packages. Version 26: New gas model version. +// Add support for receiving objects off of other objects in devnet +// only. Version 28: Add sui::zklogin::verify_zklogin_id and related functions +// to sui framework. Enable transaction effects v2 in devnet. +// Version 29: Add verify_legacy_zklogin_address flag to sui framework, this add +// ability to verify transactions from a legacy zklogin address. // Version 30: Enable Narwhal CertificateV2 // Add support for random beacon. // Enable transaction effects v2 in testnet. -// Deprecate supported oauth providers from protocol config and rely on node config -// instead. -// In execution, has_public_transfer is recomputed when loading the object. -// Add support for shared obj deletion and receiving objects off of other objects in devnet only. -// Version 31: Add support for shared object deletion in devnet only. -// Add support for getting object ID referenced by receiving object in sui framework. -// Create new execution layer version, and preserve previous behavior in v1. -// Update semantics of `sui::transfer::receive` and add `sui::transfer::public_receive`. -// Version 32: Add delete functions for VerifiedID and VerifiedIssuer. -// Add sui::token module to sui framework. -// Enable transfer to object in testnet. +// Deprecate supported oauth providers from protocol config and rely +// on node config instead. +// In execution, has_public_transfer is recomputed when loading the +// object. Add support for shared obj deletion and receiving objects +// off of other objects in devnet only. Version 31: Add support for shared +// object deletion in devnet only. Add support for getting object ID +// referenced by receiving object in sui framework. Create new +// execution layer version, and preserve previous behavior in v1. +// Update semantics of `sui::transfer::receive` and add +// `sui::transfer::public_receive`. Version 32: Add delete functions for +// VerifiedID and VerifiedIssuer. Add sui::token module to sui +// framework. Enable transfer to object in testnet. // Enable Narwhal CertificateV2 on mainnet // Make critbit tree and order getters public in deepbook. // Version 33: Add support for `receiving_object_id` function in framework @@ -107,21 +114,23 @@ const MAX_PROTOCOL_VERSION: u64 = 42; // Enable coin deny list. // Version 36: Enable group operations native functions in devnet. // Enable shared object deletion in mainnet. -// Set the consensus accepted transaction size and the included transactions size in the proposed block. -// Version 37: Reject entry functions with mutable Random. -// Version 38: Introduce limits for binary tables size. +// Set the consensus accepted transaction size and the included +// transactions size in the proposed block. Version 37: Reject entry functions +// with mutable Random. Version 38: Introduce limits for binary tables size. // Version 39: Allow skipped epochs for randomness updates. // Extra version to fix `test_upgrade_compatibility` simtest. // Version 40: -// Version 41: Enable group operations native functions in testnet and mainnet (without msm). -// Version 42: Migrate sui framework and related code to Move 2024 +// Version 41: Enable group operations native functions in testnet and mainnet +// (without msm). Version 42: Migrate sui framework and related code to Move +// 2024 #[derive(Copy, Clone, Debug, Hash, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] pub struct ProtocolVersion(u64); impl ProtocolVersion { - // The minimum and maximum protocol version supported by this binary. Counterintuitively, this constant may - // change over time as support for old protocol versions is removed from the source. This - // ensures that when a new network (such as a testnet) is created, its genesis committee will + // The minimum and maximum protocol version supported by this binary. + // Counterintuitively, this constant may change over time as support for old + // protocol versions is removed from the source. This ensures that when a + // new network (such as a testnet) is created, its genesis committee will // use a protocol version that is actually supported by the binary. pub const MIN: Self = Self(MIN_PROTOCOL_VERSION); @@ -130,7 +139,8 @@ impl ProtocolVersion { #[cfg(not(msim))] const MAX_ALLOWED: Self = Self::MAX; - // We create one additional "fake" version in simulator builds so that we can test upgrades. + // We create one additional "fake" version in simulator builds so that we can + // test upgrades. #[cfg(msim)] pub const MAX_ALLOWED: Self = Self(MAX_PROTOCOL_VERSION + 1); @@ -142,8 +152,8 @@ impl ProtocolVersion { self.0 } - // For serde deserialization - we don't define a Default impl because there isn't a single - // universally appropriate default value. + // For serde deserialization - we don't define a Default impl because there + // isn't a single universally appropriate default value. pub fn max() -> Self { Self::MAX } @@ -170,8 +180,8 @@ impl std::ops::Add for ProtocolVersion { } /// Models the set of protocol versions supported by a validator. -/// The `sui-node` binary will always use the SYSTEM_DEFAULT constant, but for testing we need -/// to be able to inject arbitrary versions into SuiNode. +/// The `sui-node` binary will always use the SYSTEM_DEFAULT constant, but for +/// testing we need to be able to inject arbitrary versions into SuiNode. #[derive(Serialize, Deserialize, Debug, Clone, Copy, Hash, PartialEq, Eq)] pub struct SupportedProtocolVersions { pub min: ProtocolVersion, @@ -184,8 +194,8 @@ impl SupportedProtocolVersions { max: ProtocolVersion::MAX, }; - /// Use by VersionedProtocolMessage implementors to describe in which range of versions a - /// message variant is supported. + /// Use by VersionedProtocolMessage implementors to describe in which range + /// of versions a message variant is supported. pub fn new_for_message(min: u64, max: u64) -> Self { let min = ProtocolVersion::new(min); let max = ProtocolVersion::new(max); @@ -256,8 +266,8 @@ struct FeatureFlags { // Disables unnecessary invariant check in the Move VM when swapping the value out of a local #[serde(skip_serializing_if = "is_false")] disable_invariant_violation_check_in_swap_loc: bool, - // advance to highest supported protocol version at epoch change, instead of the next consecutive - // protocol version. + // advance to highest supported protocol version at epoch change, instead of the next + // consecutive protocol version. #[serde(skip_serializing_if = "is_false")] advance_to_highest_supported_protocol_version: bool, // If true, disallow entry modifiers on entry functions @@ -283,10 +293,10 @@ struct FeatureFlags { #[serde(skip_serializing_if = "ConsensusTransactionOrdering::is_none")] consensus_transaction_ordering: ConsensusTransactionOrdering, - // Previously, the unwrapped_then_deleted field in TransactionEffects makes a distinction between - // whether an object has existed in the store previously (i.e. whether there is a tombstone). - // Such dependency makes effects generation inefficient, and requires us to include wrapped - // tombstone in state root hash. + // Previously, the unwrapped_then_deleted field in TransactionEffects makes a distinction + // between whether an object has existed in the store previously (i.e. whether there is a + // tombstone). Such dependency makes effects generation inefficient, and requires us to + // include wrapped tombstone in state root hash. // To prepare for effects V2, with this flag set to true, we simplify the definition of // unwrapped_then_deleted to always include unwrapped then deleted objects, // regardless of their previous state in the store. @@ -406,7 +416,8 @@ fn is_empty(b: &BTreeSet) -> bool { /// Ordering mechanism for transactions in one Narwhal consensus output. #[derive(Default, Copy, Clone, PartialEq, Eq, Serialize, Debug)] pub enum ConsensusTransactionOrdering { - /// No ordering. Transactions are processed in the order they appear in the consensus output. + /// No ordering. Transactions are processed in the order they appear in the + /// consensus output. #[default] None, /// Order transactions by gas price, highest first. @@ -421,9 +432,9 @@ impl ConsensusTransactionOrdering { /// Constants that change the behavior of the protocol. /// -/// The value of each constant here must be fixed for a given protocol version. To change the value -/// of a constant, advance the protocol version, and add support for it in `get_for_version` under -/// the new version number. +/// The value of each constant here must be fixed for a given protocol version. +/// To change the value of a constant, advance the protocol version, and add +/// support for it in `get_for_version` under the new version number. /// (below). /// /// To add a new field to this struct, use the following procedure: @@ -432,8 +443,9 @@ impl ConsensusTransactionOrdering { /// - Initialize the field to `None` in prior protocol versions. /// - Initialize the field to `Some(val)` for your new protocol version. /// - Add a public getter that simply unwraps the field. -/// - Two public getters of the form `field(&self) -> field_type` -/// and `field_as_option(&self) -> Option` will be automatically generated for you. +/// - Two public getters of the form `field(&self) -> field_type` and +/// `field_as_option(&self) -> Option` will be automatically +/// generated for you. /// Example for a field: `new_constant: Option` /// ```rust,ignore /// pub fn new_constant(&self) -> u64 { @@ -443,12 +455,13 @@ impl ConsensusTransactionOrdering { /// self.new_constant.expect(Self::CONSTANT_ERR_MSG) /// } /// ``` -/// With `pub fn new_constant(&self) -> u64`, if the constant is accessed in a protocol version -/// in which it is not defined, the validator will crash. (Crashing is necessary because -/// this type of error would almost always result in forking if not prevented here). -/// If you don't want the validator to crash, you can use the -/// `pub fn new_constant_as_option(&self) -> Option` getter, which will -/// return `None` if the field is not defined at that version. +/// With `pub fn new_constant(&self) -> u64`, if the constant is accessed in a +/// protocol version in which it is not defined, the validator will crash. +/// (Crashing is necessary because this type of error would almost always result +/// in forking if not prevented here). If you don't want the validator to crash, +/// you can use the `pub fn new_constant_as_option(&self) -> Option` +/// getter, which will return `None` if the field is not defined at that +/// version. /// - If you want a customized getter, you can add a method in the impl. #[skip_serializing_none] #[derive(Clone, Serialize, Debug, ProtocolConfigAccessors)] @@ -461,15 +474,18 @@ pub struct ProtocolConfig { /// Maximum serialized size of a transaction (in bytes). max_tx_size_bytes: Option, - /// Maximum number of input objects to a transaction. Enforced by the transaction input checker + /// Maximum number of input objects to a transaction. Enforced by the + /// transaction input checker max_input_objects: Option, - /// Max size of objects a transaction can write to disk after completion. Enforce by the Sui adapter. - /// This is the sum of the serialized size of all objects written to disk. - /// The max size of individual objects on the other hand is `max_move_object_size`. + /// Max size of objects a transaction can write to disk after completion. + /// Enforce by the Sui adapter. This is the sum of the serialized size + /// of all objects written to disk. The max size of individual objects + /// on the other hand is `max_move_object_size`. max_size_written_objects: Option, - /// Max size of objects a system transaction can write to disk after completion. Enforce by the Sui adapter. - /// Similar to `max_size_written_objects` but for system transactions. + /// Max size of objects a system transaction can write to disk after + /// completion. Enforce by the Sui adapter. Similar to + /// `max_size_written_objects` but for system transactions. max_size_written_objects_system_tx: Option, /// Maximum size of serialized transaction effects. @@ -487,8 +503,8 @@ pub struct ProtocolConfig { /// Maximum number of transitive dependencies in a package when publishing. max_package_dependencies: Option, - /// Maximum number of arguments in a move call or a ProgrammableTransaction's - /// TransferObjects command. + /// Maximum number of arguments in a move call or a + /// ProgrammableTransaction's TransferObjects command. max_arguments: Option, /// Maximum number of total type arguments, computed recursively. @@ -504,7 +520,8 @@ pub struct ProtocolConfig { max_programmable_tx_commands: Option, // ==== Move VM, Move bytecode verifier, and execution limits === - /// Maximum Move bytecode version the VM understands. All older versions are accepted. + /// Maximum Move bytecode version the VM understands. All older versions are + /// accepted. move_binary_format_version: Option, /// Configuration controlling binary tables size. binary_module_handles: Option, @@ -522,23 +539,30 @@ pub struct ProtocolConfig { binary_field_instantiations: Option, binary_friend_decls: Option, - /// Maximum size of the `contents` part of an object, in bytes. Enforced by the Sui adapter when effects are produced. + /// Maximum size of the `contents` part of an object, in bytes. Enforced by + /// the Sui adapter when effects are produced. max_move_object_size: Option, - // TODO: Option 500 KB exceeds the max computation gas cost - /// Maximum size of a Move package object, in bytes. Enforced by the Sui adapter at the end of a publish transaction. + // TODO: Option 500 KB exceeds the max + // computation gas cost + /// Maximum size of a Move package object, in bytes. Enforced by the Sui + /// adapter at the end of a publish transaction. max_move_package_size: Option, - /// Max number of publish or upgrade commands allowed in a programmable transaction block. + /// Max number of publish or upgrade commands allowed in a programmable + /// transaction block. max_publish_or_upgrade_per_ptb: Option, - /// Maximum number of gas units that a single MoveCall transaction can use. Enforced by the Sui adapter. + /// Maximum number of gas units that a single MoveCall transaction can use. + /// Enforced by the Sui adapter. max_tx_gas: Option, - /// Maximum amount of the proposed gas price in MIST (defined in the transaction). + /// Maximum amount of the proposed gas price in MIST (defined in the + /// transaction). max_gas_price: Option, - /// The max computation bucket for gas. This is the max that can be charged for computation. + /// The max computation bucket for gas. This is the max that can be charged + /// for computation. max_gas_computation_bucket: Option, // Define the value used to round up computation gas charges @@ -547,55 +571,72 @@ pub struct ProtocolConfig { /// Maximum number of nested loops. Enforced by the Move bytecode verifier. max_loop_depth: Option, - /// Maximum number of type arguments that can be bound to generic type parameters. Enforced by the Move bytecode verifier. + /// Maximum number of type arguments that can be bound to generic type + /// parameters. Enforced by the Move bytecode verifier. max_generic_instantiation_length: Option, - /// Maximum number of parameters that a Move function can have. Enforced by the Move bytecode verifier. + /// Maximum number of parameters that a Move function can have. Enforced by + /// the Move bytecode verifier. max_function_parameters: Option, - /// Maximum number of basic blocks that a Move function can have. Enforced by the Move bytecode verifier. + /// Maximum number of basic blocks that a Move function can have. Enforced + /// by the Move bytecode verifier. max_basic_blocks: Option, /// Maximum stack size value. Enforced by the Move bytecode verifier. max_value_stack_size: Option, - /// Maximum number of "type nodes", a metric for how big a SignatureToken will be when expanded into a fully qualified type. Enforced by the Move bytecode verifier. + /// Maximum number of "type nodes", a metric for how big a SignatureToken + /// will be when expanded into a fully qualified type. Enforced by the Move + /// bytecode verifier. max_type_nodes: Option, - /// Maximum number of push instructions in one function. Enforced by the Move bytecode verifier. + /// Maximum number of push instructions in one function. Enforced by the + /// Move bytecode verifier. max_push_size: Option, - /// Maximum number of struct definitions in a module. Enforced by the Move bytecode verifier. + /// Maximum number of struct definitions in a module. Enforced by the Move + /// bytecode verifier. max_struct_definitions: Option, - /// Maximum number of function definitions in a module. Enforced by the Move bytecode verifier. + /// Maximum number of function definitions in a module. Enforced by the Move + /// bytecode verifier. max_function_definitions: Option, - /// Maximum number of fields allowed in a struct definition. Enforced by the Move bytecode verifier. + /// Maximum number of fields allowed in a struct definition. Enforced by the + /// Move bytecode verifier. max_fields_in_struct: Option, - /// Maximum dependency depth. Enforced by the Move linker when loading dependent modules. + /// Maximum dependency depth. Enforced by the Move linker when loading + /// dependent modules. max_dependency_depth: Option, - /// Maximum number of Move events that a single transaction can emit. Enforced by the VM during execution. + /// Maximum number of Move events that a single transaction can emit. + /// Enforced by the VM during execution. max_num_event_emit: Option, - /// Maximum number of new IDs that a single transaction can create. Enforced by the VM during execution. + /// Maximum number of new IDs that a single transaction can create. Enforced + /// by the VM during execution. max_num_new_move_object_ids: Option, - /// Maximum number of new IDs that a single system transaction can create. Enforced by the VM during execution. + /// Maximum number of new IDs that a single system transaction can create. + /// Enforced by the VM during execution. max_num_new_move_object_ids_system_tx: Option, - /// Maximum number of IDs that a single transaction can delete. Enforced by the VM during execution. + /// Maximum number of IDs that a single transaction can delete. Enforced by + /// the VM during execution. max_num_deleted_move_object_ids: Option, - /// Maximum number of IDs that a single system transaction can delete. Enforced by the VM during execution. + /// Maximum number of IDs that a single system transaction can delete. + /// Enforced by the VM during execution. max_num_deleted_move_object_ids_system_tx: Option, - /// Maximum number of IDs that a single transaction can transfer. Enforced by the VM during execution. + /// Maximum number of IDs that a single transaction can transfer. Enforced + /// by the VM during execution. max_num_transferred_move_object_ids: Option, - /// Maximum number of IDs that a single system transaction can transfer. Enforced by the VM during execution. + /// Maximum number of IDs that a single system transaction can transfer. + /// Enforced by the VM during execution. max_num_transferred_move_object_ids_system_tx: Option, /// Maximum size of a Move user event. Enforced by the VM during execution. @@ -604,39 +645,49 @@ pub struct ProtocolConfig { /// Maximum size of a Move user event. Enforced by the VM during execution. max_event_emit_size_total: Option, - /// Maximum length of a vector in Move. Enforced by the VM during execution, and for constants, by the verifier. + /// Maximum length of a vector in Move. Enforced by the VM during execution, + /// and for constants, by the verifier. max_move_vector_len: Option, - /// Maximum length of an `Identifier` in Move. Enforced by the bytecode verifier at signing. + /// Maximum length of an `Identifier` in Move. Enforced by the bytecode + /// verifier at signing. max_move_identifier_len: Option, /// Maximum depth of a Move value within the VM. max_move_value_depth: Option, - /// Maximum number of back edges in Move function. Enforced by the bytecode verifier at signing. + /// Maximum number of back edges in Move function. Enforced by the bytecode + /// verifier at signing. max_back_edges_per_function: Option, - /// Maximum number of back edges in Move module. Enforced by the bytecode verifier at signing. + /// Maximum number of back edges in Move module. Enforced by the bytecode + /// verifier at signing. max_back_edges_per_module: Option, - /// Maximum number of meter `ticks` spent verifying a Move function. Enforced by the bytecode verifier at signing. + /// Maximum number of meter `ticks` spent verifying a Move function. + /// Enforced by the bytecode verifier at signing. max_verifier_meter_ticks_per_function: Option, - /// Maximum number of meter `ticks` spent verifying a Move function. Enforced by the bytecode verifier at signing. + /// Maximum number of meter `ticks` spent verifying a Move function. + /// Enforced by the bytecode verifier at signing. max_meter_ticks_per_module: Option, // === Object runtime internal operation limits ==== // These affect dynamic fields - /// Maximum number of cached objects in the object runtime ObjectStore. Enforced by object runtime during execution + /// Maximum number of cached objects in the object runtime ObjectStore. + /// Enforced by object runtime during execution object_runtime_max_num_cached_objects: Option, - /// Maximum number of cached objects in the object runtime ObjectStore in system transaction. Enforced by object runtime during execution + /// Maximum number of cached objects in the object runtime ObjectStore in + /// system transaction. Enforced by object runtime during execution object_runtime_max_num_cached_objects_system_tx: Option, - /// Maximum number of stored objects accessed by object runtime ObjectStore. Enforced by object runtime during execution + /// Maximum number of stored objects accessed by object runtime ObjectStore. + /// Enforced by object runtime during execution object_runtime_max_num_store_entries: Option, - /// Maximum number of stored objects accessed by object runtime ObjectStore in system transaction. Enforced by object runtime during execution + /// Maximum number of stored objects accessed by object runtime ObjectStore + /// in system transaction. Enforced by object runtime during execution object_runtime_max_num_store_entries_system_tx: Option, // === Execution gas costs ==== @@ -644,11 +695,13 @@ pub struct ProtocolConfig { base_tx_cost_fixed: Option, /// Additional cost for a transaction that publishes a package - /// i.e., the base cost of such a transaction is base_tx_cost_fixed + package_publish_cost_fixed + /// i.e., the base cost of such a transaction is base_tx_cost_fixed + + /// package_publish_cost_fixed package_publish_cost_fixed: Option, /// Cost per byte of a Move call transaction - /// i.e., the cost of such a transaction is base_cost + (base_tx_cost_per_byte * size) + /// i.e., the cost of such a transaction is base_cost + + /// (base_tx_cost_per_byte * size) base_tx_cost_per_byte: Option, /// Cost per byte for a transaction that publishes a package @@ -681,23 +734,24 @@ pub struct ProtocolConfig { /// === Storage gas costs === - /// Per-byte cost of storing an object in the Sui global object store. Some of this cost may be refundable if the object is later freed + /// Per-byte cost of storing an object in the Sui global object store. Some + /// of this cost may be refundable if the object is later freed obj_data_cost_refundable: Option, - // Per-byte cost of storing an object in the Sui transaction log (e.g., in CertifiedTransactionEffects) - // This depends on the size of various fields including the effects - // TODO: Option, /// === Tokenomics === // TODO: Option, - /// 5% of the storage fund's share of rewards are reinvested into the storage fund. - /// In basis point. + /// 5% of the storage fund's share of rewards are reinvested into the + /// storage fund. In basis point. storage_fund_reinvest_rate: Option, /// The share of rewards that will be slashed and redistributed is 50%. @@ -710,18 +764,19 @@ pub struct ProtocolConfig { /// === Core Protocol === /// Max number of transactions per checkpoint. - /// Note that this is a protocol constant and not a config as validators must have this set to - /// the same value, otherwise they *will* fork. + /// Note that this is a protocol constant and not a config as validators + /// must have this set to the same value, otherwise they *will* fork. max_transactions_per_checkpoint: Option, /// Max size of a checkpoint in bytes. - /// Note that this is a protocol constant and not a config as validators must have this set to - /// the same value, otherwise they *will* fork. + /// Note that this is a protocol constant and not a config as validators + /// must have this set to the same value, otherwise they *will* fork. max_checkpoint_size_bytes: Option, - /// A protocol upgrade always requires 2f+1 stake to agree. We support a buffer of additional - /// stake (as a fraction of f, expressed in basis points) that is required before an upgrade - /// can happen automatically. 10000bps would indicate that complete unanimity is required (all + /// A protocol upgrade always requires 2f+1 stake to agree. We support a + /// buffer of additional stake (as a fraction of f, expressed in basis + /// points) that is required before an upgrade can happen automatically. + /// 10000bps would indicate that complete unanimity is required (all /// 3f+1 must vote), while 0bps would indicate that 2f+1 is sufficient. buffer_stake_for_protocol_upgrade_bps: Option, @@ -736,27 +791,33 @@ pub struct ProtocolConfig { address_from_u256_cost_base: Option, // `dynamic_field` module - // Cost params for the Move native function `hash_type_and_key(parent: address, k: K): address` + // Cost params for the Move native function `hash_type_and_key(parent: + // address, k: K): address` dynamic_field_hash_type_and_key_cost_base: Option, dynamic_field_hash_type_and_key_type_cost_per_byte: Option, dynamic_field_hash_type_and_key_value_cost_per_byte: Option, dynamic_field_hash_type_and_key_type_tag_cost_per_byte: Option, - // Cost params for the Move native function `add_child_object(parent: address, child: Child)` + // Cost params for the Move native function `add_child_object(parent: address, + // child: Child)` dynamic_field_add_child_object_cost_base: Option, dynamic_field_add_child_object_type_cost_per_byte: Option, dynamic_field_add_child_object_value_cost_per_byte: Option, dynamic_field_add_child_object_struct_tag_cost_per_byte: Option, - // Cost params for the Move native function `borrow_child_object_mut(parent: &mut UID, id: address): &mut Child` + // Cost params for the Move native function `borrow_child_object_mut(parent: &mut + // UID, id: address): &mut Child` dynamic_field_borrow_child_object_cost_base: Option, dynamic_field_borrow_child_object_child_ref_cost_per_byte: Option, dynamic_field_borrow_child_object_type_cost_per_byte: Option, - // Cost params for the Move native function `remove_child_object(parent: address, id: address): Child` + // Cost params for the Move native function `remove_child_object(parent: address, + // id: address): Child` dynamic_field_remove_child_object_cost_base: Option, dynamic_field_remove_child_object_child_cost_per_byte: Option, dynamic_field_remove_child_object_type_cost_per_byte: Option, - // Cost params for the Move native function `has_child_object(parent: address, id: address): bool` + // Cost params for the Move native function `has_child_object(parent: address, id: address): + // bool` dynamic_field_has_child_object_cost_base: Option, - // Cost params for the Move native function `has_child_object_with_ty(parent: address, id: address): bool` + // Cost params for the Move native function `has_child_object_with_ty(parent: + // address, id: address): bool` dynamic_field_has_child_object_with_ty_cost_base: Option, dynamic_field_has_child_object_with_ty_type_cost_per_byte: Option, dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: Option, @@ -936,8 +997,8 @@ pub struct ProtocolConfig { execution_version: Option, // Dictates the threshold (percentage of stake) that is used to calculate the "bad" nodes to be - // swapped when creating the consensus schedule. The values should be of the range [0 - 33]. Anything - // above 33 (f) will not be allowed. + // swapped when creating the consensus schedule. The values should be of the range [0 - 33]. + // Anything above 33 (f) will not be allowed. consensus_bad_nodes_stake_threshold: Option, max_jwk_votes_per_validator_per_epoch: Option, @@ -948,20 +1009,21 @@ pub struct ProtocolConfig { /// === random beacon === - /// Maximum allowed precision loss when reducing voting weights for the random beacon - /// protocol. + /// Maximum allowed precision loss when reducing voting weights for the + /// random beacon protocol. random_beacon_reduction_allowed_delta: Option, - /// Minimum number of shares below which voting weights will not be reduced for the - /// random beacon protocol. + /// Minimum number of shares below which voting weights will not be reduced + /// for the random beacon protocol. random_beacon_reduction_lower_bound: Option, - /// Consensus Round after which DKG should be aborted and randomness disabled for - /// the epoch, if it hasn't already completed. + /// Consensus Round after which DKG should be aborted and randomness + /// disabled for the epoch, if it hasn't already completed. random_beacon_dkg_timeout_round: Option, - /// The maximum serialised transaction size (in bytes) accepted by consensus. That should be bigger than the - /// `max_tx_size_bytes` with some additional headroom. + /// The maximum serialised transaction size (in bytes) accepted by + /// consensus. That should be bigger than the `max_tx_size_bytes` with + /// some additional headroom. consensus_max_transaction_size_bytes: Option, /// The maximum size of transactions included in a consensus proposed block consensus_max_transactions_in_block_bytes: Option, @@ -1199,7 +1261,8 @@ thread_local! { // Instantiations for each protocol version. impl ProtocolConfig { - /// Get the value ProtocolConfig that are in effect during the given protocol version. + /// Get the value ProtocolConfig that are in effect during the given + /// protocol version. pub fn get_for_version(version: ProtocolVersion, chain: Chain) -> Self { // ProtocolVersion can be deserialized so we need to check it here as well. assert!( @@ -1230,8 +1293,8 @@ impl ProtocolConfig { }) } - /// Get the value ProtocolConfig that are in effect during the given protocol version. - /// Or none if the version is not supported. + /// Get the value ProtocolConfig that are in effect during the given + /// protocol version. Or none if the version is not supported. pub fn get_for_version_if_supported(version: ProtocolVersion, chain: Chain) -> Option { if version.0 >= ProtocolVersion::MIN.0 && version.0 <= ProtocolVersion::MAX_ALLOWED.0 { let mut ret = Self::get_for_version_impl(version, chain); @@ -1262,8 +1325,9 @@ impl ProtocolConfig { POISON_VERSION_METHODS.with(|p| p.load(Ordering::Relaxed)) } - /// Convenience to get the constants at the current minimum supported version. - /// Mainly used by client code that may not yet be protocol-version aware. + /// Convenience to get the constants at the current minimum supported + /// version. Mainly used by client code that may not yet be + /// protocol-version aware. pub fn get_for_min_version() -> Self { if Self::load_poison_get_for_min_version() { panic!("get_for_min_version called on validator"); @@ -1273,13 +1337,14 @@ impl ProtocolConfig { /// CAREFUL! - You probably want to use `get_for_version` instead. /// - /// Convenience to get the constants at the current maximum supported version. - /// Mainly used by genesis. Note well that this function uses the max version - /// supported locally by the node, which is not necessarily the current version - /// of the network. ALSO, this function disregards chain specific config (by - /// using Chain::Unknown), thereby potentially returning a protocol config that - /// is incorrect for some feature flags. Definitely safe for testing and for - /// protocol version 11 and prior. + /// Convenience to get the constants at the current maximum supported + /// version. Mainly used by genesis. Note well that this function uses + /// the max version supported locally by the node, which is not + /// necessarily the current version of the network. ALSO, this function + /// disregards chain specific config (by using Chain::Unknown), thereby + /// potentially returning a protocol config that is incorrect for some + /// feature flags. Definitely safe for testing and for protocol version + /// 11 and prior. #[allow(non_snake_case)] pub fn get_for_max_version_UNSAFE() -> Self { if Self::load_poison_get_for_min_version() { @@ -1299,8 +1364,9 @@ impl ProtocolConfig { } } - // IMPORTANT: Never modify the value of any constant for a pre-existing protocol version. - // To change the values here you must create a new protocol version with the new values! + // IMPORTANT: Never modify the value of any constant for a pre-existing protocol + // version. To change the values here you must create a new protocol + // version with the new values! let mut cfg = Self { // will be overwritten before being returned version, @@ -1309,7 +1375,8 @@ impl ProtocolConfig { feature_flags: Default::default(), max_tx_size_bytes: Some(128 * 1024), - // We need this number to be at least 100x less than `max_serialized_tx_effects_size_bytes`otherwise effects can be huge + // We need this number to be at least 100x less than + // `max_serialized_tx_effects_size_bytes`otherwise effects can be huge max_input_objects: Some(2048), max_serialized_tx_effects_size_bytes: Some(512 * 1024), max_serialized_tx_effects_size_bytes_system_tx: Some(512 * 1024 * 16), @@ -1411,27 +1478,33 @@ impl ProtocolConfig { address_from_u256_cost_base: Some(52), // `dynamic_field` module - // Cost params for the Move native function `hash_type_and_key(parent: address, k: K): address` + // Cost params for the Move native function `hash_type_and_key(parent: address, k: K): address` dynamic_field_hash_type_and_key_cost_base: Some(100), dynamic_field_hash_type_and_key_type_cost_per_byte: Some(2), dynamic_field_hash_type_and_key_value_cost_per_byte: Some(2), dynamic_field_hash_type_and_key_type_tag_cost_per_byte: Some(2), - // Cost params for the Move native function `add_child_object(parent: address, child: Child)` + // Cost params for the Move native function `add_child_object(parent: + // address, child: Child)` dynamic_field_add_child_object_cost_base: Some(100), dynamic_field_add_child_object_type_cost_per_byte: Some(10), dynamic_field_add_child_object_value_cost_per_byte: Some(10), dynamic_field_add_child_object_struct_tag_cost_per_byte: Some(10), - // Cost params for the Move native function `borrow_child_object_mut(parent: &mut UID, id: address): &mut Child` + // Cost params for the Move native function `borrow_child_object_mut(parent: + // &mut UID, id: address): &mut Child` dynamic_field_borrow_child_object_cost_base: Some(100), dynamic_field_borrow_child_object_child_ref_cost_per_byte: Some(10), dynamic_field_borrow_child_object_type_cost_per_byte: Some(10), - // Cost params for the Move native function `remove_child_object(parent: address, id: address): Child` + // Cost params for the Move native function `remove_child_object(parent: + // address, id: address): Child` dynamic_field_remove_child_object_cost_base: Some(100), dynamic_field_remove_child_object_child_cost_per_byte: Some(2), dynamic_field_remove_child_object_type_cost_per_byte: Some(2), - // Cost params for the Move native function `has_child_object(parent: address, id: address): bool` + // Cost params for the Move native function `has_child_object(parent: address, id: + // address): bool` dynamic_field_has_child_object_cost_base: Some(100), - // Cost params for the Move native function `has_child_object_with_ty(parent: address, id: address): bool` + // Cost params for the Move native function `has_child_object_with_ty(parent: address, id: address): bool` dynamic_field_has_child_object_with_ty_cost_base: Some(100), dynamic_field_has_child_object_with_ty_type_cost_per_byte: Some(2), dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: Some(2), @@ -1452,7 +1525,8 @@ impl ProtocolConfig { object_record_new_uid_cost_base: Some(52), // `transfer` module - // Cost params for the Move native function `transfer_impl(obj: T, recipient: address)` + // Cost params for the Move native function `transfer_impl(obj: T, recipient: + // address)` transfer_transfer_internal_cost_base: Some(52), // Cost params for the Move native function `freeze_object(obj: T)` transfer_freeze_object_cost_base: Some(52), @@ -1461,7 +1535,8 @@ impl ProtocolConfig { transfer_receive_object_cost_base: None, // `tx_context` module - // Cost params for the Move native function `transfer_impl(obj: T, recipient: address)` + // Cost params for the Move native function `transfer_impl(obj: T, recipient: + // address)` tx_context_derive_id_cost_base: Some(52), // `types` module @@ -1471,7 +1546,8 @@ impl ProtocolConfig { types_is_one_time_witness_type_cost_per_byte: Some(2), // `validator` module - // Cost params for the Move native function `validate_metadata_bcs(metadata: vector)` + // Cost params for the Move native function `validate_metadata_bcs(metadata: + // vector)` validator_validate_metadata_cost_base: Some(52), validator_validate_metadata_data_cost_per_byte: Some(2), @@ -1730,9 +1806,9 @@ impl ProtocolConfig { } 18 => { cfg.execution_version = Some(1); - // Following flags are implied by this execution version. Once support for earlier - // protocol versions is dropped, these flags can be removed: - // cfg.feature_flags.package_upgrades = true; + // Following flags are implied by this execution version. Once support for + // earlier protocol versions is dropped, these flags can be + // removed: cfg.feature_flags.package_upgrades = true; // cfg.feature_flags.disallow_adding_abilities_on_upgrade = true; // cfg.feature_flags.disallow_change_struct_type_params_on_upgrade = true; // cfg.feature_flags.loaded_child_objects_fixed = true; @@ -1747,7 +1823,7 @@ impl ProtocolConfig { // We maintain the same total size limit for events, but increase the number of // events that can be emitted. cfg.max_event_emit_size_total = Some( - 256 /* former event count limit */ * 250 * 1024, /* size limit per event */ + 256 /* former event count limit */ * 250 * 1024, // size limit per event ); } 20 => { @@ -1775,9 +1851,10 @@ impl ProtocolConfig { cfg.feature_flags.loaded_child_object_format_type = true; cfg.feature_flags.narwhal_new_leader_election_schedule = true; // Taking a baby step approach, we consider only 20% by stake as bad nodes so we - // have a 80% by stake of nodes participating in the leader committee. That allow - // us for more redundancy in case we have validators under performing - since the - // responsibility is shared amongst more nodes. We can increase that once we do have + // have a 80% by stake of nodes participating in the leader committee. That + // allow us for more redundancy in case we have validators + // under performing - since the responsibility is shared + // amongst more nodes. We can increase that once we do have // higher confidence. cfg.consensus_bad_nodes_stake_threshold = Some(20); } @@ -1914,7 +1991,8 @@ impl ProtocolConfig { if chain != Chain::Mainnet && chain != Chain::Testnet { cfg.feature_flags.enable_group_ops_native_functions = true; cfg.feature_flags.enable_group_ops_native_function_msm = true; - // Next values are arbitrary in a similar way as the other crypto native functions. + // Next values are arbitrary in a similar way as the other crypto native + // functions. cfg.group_ops_bls12381_decode_scalar_cost = Some(52); cfg.group_ops_bls12381_decode_g1_cost = Some(52); cfg.group_ops_bls12381_decode_g2_cost = Some(52); @@ -1986,13 +2064,15 @@ impl ProtocolConfig { cfg.execution_version = Some(3); } 39 => { - // It is important that we keep this protocol version blank due to an issue with random.move. + // It is important that we keep this protocol version blank + // due to an issue with random.move. } 40 => {} 41 => { // Enable group ops and all networks (but not msm) cfg.feature_flags.enable_group_ops_native_functions = true; - // Next values are arbitrary in a similar way as the other crypto native functions. + // Next values are arbitrary in a similar way as the other crypto native + // functions. cfg.group_ops_bls12381_decode_scalar_cost = Some(52); cfg.group_ops_bls12381_decode_g1_cost = Some(52); cfg.group_ops_bls12381_decode_g2_cost = Some(52); @@ -2042,8 +2122,9 @@ impl ProtocolConfig { } /// Override one or more settings in the config, for testing. - /// This must be called at the beginning of the test, before get_for_(min|max)_version is - /// called, since those functions cache their return value. + /// This must be called at the beginning of the test, before + /// get_for_(min|max)_version is called, since those functions cache + /// their return value. pub fn apply_overrides_for_testing( override_fn: impl Fn(ProtocolVersion, Self) -> Self + Send + 'static, ) -> OverrideGuard { @@ -2087,7 +2168,8 @@ impl ProtocolConfig { pub fn set_simplified_unwrap_then_delete(&mut self, val: bool) { self.feature_flags.simplified_unwrap_then_delete = val; if val == false { - // Given that we will never enable effect V2 before turning on simplified_unwrap_then_delete, we also need to disable effect V2 here. + // Given that we will never enable effect V2 before turning on + // simplified_unwrap_then_delete, we also need to disable effect V2 here. self.set_enable_effects_v2(false); } } @@ -2141,7 +2223,8 @@ impl Drop for OverrideGuard { } /// Defines which limit got crossed. -/// The value which crossed the limit and value of the limit crossed are embedded +/// The value which crossed the limit and value of the limit crossed are +/// embedded #[derive(PartialEq, Eq)] pub enum LimitThresholdCrossed { None, @@ -2161,8 +2244,8 @@ pub fn check_limit_in_range, U: Into, V: PartialOrd + Into>( debug_assert!(soft_limit <= hard_limit); - // It is important to preserve this comparison order because if soft_limit == hard_limit - // we want LimitThresholdCrossed::Hard + // It is important to preserve this comparison order because if soft_limit == + // hard_limit we want LimitThresholdCrossed::Hard if x >= hard_limit { LimitThresholdCrossed::Hard(x.into(), hard_limit.into()) } else if x < soft_limit { @@ -2212,9 +2295,10 @@ macro_rules! check_limit_by_meter { #[cfg(all(test, not(msim)))] mod test { - use super::*; use insta::assert_yaml_snapshot; + use super::*; + #[test] fn snapshot_tests() { println!("\n============================================================================"); @@ -2223,9 +2307,10 @@ mod test { println!("! !"); println!("============================================================================\n"); for chain_id in &[Chain::Unknown, Chain::Mainnet, Chain::Testnet] { - // make Chain::Unknown snapshots compatible with pre-chain-id snapshots so that we - // don't break the release-time compatibility tests. Once Chain Id configs have been - // released everywhere, we can remove this and only test Mainnet and Testnet + // make Chain::Unknown snapshots compatible with pre-chain-id snapshots so that + // we don't break the release-time compatibility tests. Once Chain + // Id configs have been released everywhere, we can remove this and + // only test Mainnet and Testnet let chain_str = match chain_id { Chain::Unknown => "".to_string(), _ => format!("{:?}_", chain_id), @@ -2280,9 +2365,10 @@ mod test { ); // We didnt have this in version 1 - assert!(prot - .lookup_attr("max_move_identifier_len".to_string()) - .is_none()); + assert!( + prot.lookup_attr("max_move_identifier_len".to_string()) + .is_none() + ); // But we did in version 9 let prot: ProtocolConfig = @@ -2295,11 +2381,12 @@ mod test { let prot: ProtocolConfig = ProtocolConfig::get_for_version(ProtocolVersion::new(1), Chain::Unknown); // We didnt have this in version 1 - assert!(prot - .attr_map() - .get("max_move_identifier_len") - .unwrap() - .is_none()); + assert!( + prot.attr_map() + .get("max_move_identifier_len") + .unwrap() + .is_none() + ); // We had this in version 1 assert!( prot.attr_map().get("max_arguments").unwrap() @@ -2310,15 +2397,17 @@ mod test { let prot: ProtocolConfig = ProtocolConfig::get_for_version(ProtocolVersion::new(1), Chain::Unknown); // Does not exist - assert!(prot - .feature_flags - .lookup_attr("some random string".to_owned()) - .is_none()); - assert!(prot - .feature_flags - .attr_map() - .get("some random string") - .is_none()); + assert!( + prot.feature_flags + .lookup_attr("some random string".to_owned()) + .is_none() + ); + assert!( + prot.feature_flags + .attr_map() + .get("some random string") + .is_none() + ); // Was false in v1 assert!( @@ -2361,9 +2450,10 @@ mod test { LimitThresholdCrossed::Soft(255u128, 100) )); // This wont compile because lossy - //assert!(check_limit!(100000000u128, low, high) == LimitThresholdCrossed::None); - // This wont compile because lossy - //assert!(check_limit!(100000000usize, low, high) == LimitThresholdCrossed::None); + // assert!(check_limit!(100000000u128, low, high) == + // LimitThresholdCrossed::None); This wont compile because lossy + // assert!(check_limit!(100000000usize, low, high) == + // LimitThresholdCrossed::None); assert!(matches!( check_limit!(2550000u64, low, high), diff --git a/crates/sui-proxy/build.rs b/crates/sui-proxy/build.rs index 3edaa174aa5..133ea834e5c 100644 --- a/crates/sui-proxy/build.rs +++ b/crates/sui-proxy/build.rs @@ -5,7 +5,8 @@ fn main() -> Result<()> { println!("cargo:rerun-if-changed=build.rs"); println!("cargo:rerun-if-env-changed=BUILD_REMOTE_WRITE"); - // add this env var to build. you'll need protoc installed locally and a copy of the proto files + // add this env var to build. you'll need protoc installed locally and a copy of + // the proto files if option_env!("BUILD_REMOTE_WRITE").is_some() { prost_build::compile_protos( &["protobufs/remote.proto", "protobufs/types.proto"], diff --git a/crates/sui-proxy/src/admin.rs b/crates/sui-proxy/src/admin.rs index f6bc2999e31..252ee90d29f 100644 --- a/crates/sui-proxy/src/admin.rs +++ b/crates/sui-proxy/src/admin.rs @@ -1,24 +1,19 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::config::{DynamicPeerValidationConfig, RemoteWriteConfig, StaticPeerValidationConfig}; -use crate::handlers::publish_metrics; -use crate::histogram_relay::HistogramRelay; -use crate::ip::{is_private, to_multiaddr}; -use crate::middleware::{ - expect_content_length, expect_mysten_proxy_header, expect_valid_public_key, +use std::{ + fs, + io::BufReader, + net::{IpAddr, SocketAddr}, + sync::Arc, + time::Duration, }; -use crate::peers::{SuiNodeProvider, SuiPeer}; -use crate::var; -use anyhow::Error; -use anyhow::Result; + +use anyhow::{Error, Result}; use axum::{extract::DefaultBodyLimit, middleware, routing::post, Extension, Router}; -use fastcrypto::ed25519::{Ed25519KeyPair, Ed25519PublicKey}; -use fastcrypto::traits::{KeyPair, ToFromBytes}; -use std::fs; -use std::io::BufReader; -use std::net::{IpAddr, SocketAddr}; -use std::sync::Arc; -use std::time::Duration; +use fastcrypto::{ + ed25519::{Ed25519KeyPair, Ed25519PublicKey}, + traits::{KeyPair, ToFromBytes}, +}; use sui_tls::{rustls::ServerConfig, AllowAll, CertVerifier, SelfSignedCertificate, TlsAcceptor}; use tokio::signal; use tower::ServiceBuilder; @@ -28,6 +23,16 @@ use tower_http::{ }; use tracing::{error, info, Level}; +use crate::{ + config::{DynamicPeerValidationConfig, RemoteWriteConfig, StaticPeerValidationConfig}, + handlers::publish_metrics, + histogram_relay::HistogramRelay, + ip::{is_private, to_multiaddr}, + middleware::{expect_content_length, expect_mysten_proxy_header, expect_valid_public_key}, + peers::{SuiNodeProvider, SuiPeer}, + var, +}; + /// Configure our graceful shutdown scenarios pub async fn shutdown_signal(h: axum_server::Handle) { let ctrl_c = async { @@ -196,7 +201,8 @@ fn load_private_key(filename: &str) -> rustls::PrivateKey { ); } -/// load the static keys we'll use to allow external non-validator nodes to push metrics +/// load the static keys we'll use to allow external non-validator nodes to push +/// metrics fn load_static_peers( static_peers: Option, ) -> Result, Error> { @@ -226,7 +232,8 @@ fn load_static_peers( Ok(static_keys) } -/// Default allow mode for server, we don't verify clients, everything is accepted +/// Default allow mode for server, we don't verify clients, everything is +/// accepted pub fn create_server_cert_default_allow( hostname: String, ) -> Result { @@ -238,8 +245,8 @@ pub fn create_server_cert_default_allow( ) } -/// Verify clients against sui blockchain, clients that are not found in sui_getValidators -/// will be rejected +/// Verify clients against sui blockchain, clients that are not found in +/// sui_getValidators will be rejected pub fn create_server_cert_enforce_peer( dynamic_peers: DynamicPeerValidationConfig, static_peers: Option, diff --git a/crates/sui-proxy/src/config.rs b/crates/sui-proxy/src/config.rs index f3aad0e955f..ef129c886b6 100644 --- a/crates/sui-proxy/src/config.rs +++ b/crates/sui-proxy/src/config.rs @@ -1,10 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anyhow::{Context, Result}; use core::time::Duration; +use std::net::SocketAddr; + +use anyhow::{Context, Result}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_with::{serde_as, DurationSeconds}; -use std::net::SocketAddr; use tracing::debug; #[serde_as] @@ -38,9 +39,10 @@ pub struct RemoteWriteConfig { pub pool_max_idle_per_host: usize, } -/// DynamicPeerValidationConfig controls what sui-node binaries that are functioning as a validator that we'll speak with. -/// Peer in this case is peers within the consensus committee, for each epoch. This membership is determined dynamically -/// for each epoch via json-rpc calls to a full node. +/// DynamicPeerValidationConfig controls what sui-node binaries that are +/// functioning as a validator that we'll speak with. Peer in this case is peers +/// within the consensus committee, for each epoch. This membership is +/// determined dynamically for each epoch via json-rpc calls to a full node. #[serde_as] #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(rename_all = "kebab-case")] @@ -49,21 +51,22 @@ pub struct DynamicPeerValidationConfig { pub url: String, #[serde_as(as = "DurationSeconds")] pub interval: Duration, - /// if certificate_file and private_key are not provided, we'll create a self-signed - /// cert using this hostname + /// if certificate_file and private_key are not provided, we'll create a + /// self-signed cert using this hostname #[serde(default = "hostname_default")] pub hostname: Option, - /// incoming client connections to this proxy will be presented with this pub key - /// please use an absolute path + /// incoming client connections to this proxy will be presented with this + /// pub key please use an absolute path pub certificate_file: Option, /// private key for tls /// please use an absolute path pub private_key: Option, } -/// StaticPeerValidationConfig, unlike the DynamicPeerValidationConfig, is not determined dynamically from rpc -/// calls. It instead searches a local directory for pub keys that we will add to an allow list. +/// StaticPeerValidationConfig, unlike the DynamicPeerValidationConfig, is not +/// determined dynamically from rpc calls. It instead searches a local +/// directory for pub keys that we will add to an allow list. #[serde_as] #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(rename_all = "kebab-case")] @@ -71,8 +74,9 @@ pub struct StaticPeerValidationConfig { pub pub_keys: Vec, } -/// StaticPubKey holds a human friendly name, ip and the key file for the pub key -/// if you don't have a valid public routable ip, use an ip from 169.254.0.0/16. +/// StaticPubKey holds a human friendly name, ip and the key file for the pub +/// key if you don't have a valid public routable ip, use an ip from +/// 169.254.0.0/16. #[serde_as] #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(rename_all = "kebab-case")] diff --git a/crates/sui-proxy/src/consumer.rs b/crates/sui-proxy/src/consumer.rs index 7135c5eaa67..53e231171ff 100644 --- a/crates/sui-proxy/src/consumer.rs +++ b/crates/sui-proxy/src/consumer.rs @@ -1,24 +1,25 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::admin::ReqwestClient; -use crate::prom_to_mimir::Mimir; -use crate::remote_write::WriteRequest; +use std::io::Read; + use anyhow::Result; -use axum::body::Bytes; -use axum::http::StatusCode; +use axum::{body::Bytes, http::StatusCode}; use bytes::buf::Reader; use fastcrypto::ed25519::Ed25519PublicKey; use multiaddr::Multiaddr; use once_cell::sync::Lazy; -use prometheus::proto::{self, MetricFamily}; -use prometheus::{register_counter, register_counter_vec, register_histogram_vec}; -use prometheus::{Counter, CounterVec, HistogramVec}; +use prometheus::{ + proto::{self, MetricFamily}, + register_counter, register_counter_vec, register_histogram_vec, Counter, CounterVec, + HistogramVec, +}; use prost::Message; use protobuf::CodedInputStream; -use std::io::Read; use tracing::{debug, error}; +use crate::{admin::ReqwestClient, prom_to_mimir::Mimir, remote_write::WriteRequest}; + static CONSUMER_OPS_SUBMITTED: Lazy = Lazy::new(|| { register_counter!( "consumer_operations_submitted", @@ -74,8 +75,8 @@ pub struct NodeMetric { pub data: Vec, // decoded protobuf of prometheus data } -/// The ProtobufDecoder will decode message delimited protobuf messages from prom_model.proto types -/// They are delimited by size, eg a format is such: +/// The ProtobufDecoder will decode message delimited protobuf messages from +/// prom_model.proto types They are delimited by size, eg a format is such: /// []byte{size, data, size, data, size, data}, etc etc pub struct ProtobufDecoder { buf: Reader, @@ -85,7 +86,8 @@ impl ProtobufDecoder { pub fn new(buf: Reader) -> Self { Self { buf } } - /// parse a delimited buffer of protobufs. this is used to consume data sent from a sui-node + /// parse a delimited buffer of protobufs. this is used to consume data sent + /// from a sui-node pub fn parse(&mut self) -> Result> { let timer = CONSUMER_OPERATION_DURATION .with_label_values(&["decode_len_delim_protobuf"]) @@ -264,12 +266,13 @@ async fn convert( Ok(result) } -/// convert_to_remote_write is an expensive method due to the time it takes to submit to mimir. -/// other operations here are optimized for async, within reason. The post process uses a single -/// connection to mimir and thus incurs the seriliaztion delay for each metric family sent. Possible -/// future optimizations would be to use multiple tcp connections to mimir, within reason. Nevertheless -/// we await on each post of each metric family so it shouldn't block any other async work in a -/// significant way. +/// convert_to_remote_write is an expensive method due to the time it takes to +/// submit to mimir. other operations here are optimized for async, within +/// reason. The post process uses a single connection to mimir and thus incurs +/// the seriliaztion delay for each metric family sent. Possible +/// future optimizations would be to use multiple tcp connections to mimir, +/// within reason. Nevertheless we await on each post of each metric family so +/// it shouldn't block any other async work in a significant way. pub async fn convert_to_remote_write( rc: ReqwestClient, node_metric: NodeMetric, diff --git a/crates/sui-proxy/src/handlers.rs b/crates/sui-proxy/src/handlers.rs index 03587e4225c..42809fd7dcf 100644 --- a/crates/sui-proxy/src/handlers.rs +++ b/crates/sui-proxy/src/handlers.rs @@ -1,19 +1,22 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::admin::{Labels, ReqwestClient}; -use crate::consumer::{convert_to_remote_write, populate_labels, NodeMetric}; -use crate::histogram_relay::HistogramRelay; -use crate::middleware::LenDelimProtobuf; -use crate::peers::SuiPeer; +use std::net::SocketAddr; + use axum::{ extract::{ConnectInfo, Extension}, http::StatusCode, }; use multiaddr::Multiaddr; use once_cell::sync::Lazy; -use prometheus::{register_counter_vec, register_histogram_vec}; -use prometheus::{CounterVec, HistogramVec}; -use std::net::SocketAddr; +use prometheus::{register_counter_vec, register_histogram_vec, CounterVec, HistogramVec}; + +use crate::{ + admin::{Labels, ReqwestClient}, + consumer::{convert_to_remote_write, populate_labels, NodeMetric}, + histogram_relay::HistogramRelay, + middleware::LenDelimProtobuf, + peers::SuiPeer, +}; static HANDLER_HITS: Lazy = Lazy::new(|| { register_counter_vec!( @@ -37,10 +40,11 @@ static HTTP_HANDLER_DURATION: Lazy = Lazy::new(|| { .unwrap() }); -/// Publish handler which receives metrics from nodes. Nodes will call us at this endpoint -/// and we relay them to the upstream tsdb +/// Publish handler which receives metrics from nodes. Nodes will call us at +/// this endpoint and we relay them to the upstream tsdb /// -/// Clients will receive a response after successfully relaying the metrics upstream +/// Clients will receive a response after successfully relaying the metrics +/// upstream pub async fn publish_metrics( Extension(labels): Extension, Extension(client): Extension, diff --git a/crates/sui-proxy/src/histogram_relay.rs b/crates/sui-proxy/src/histogram_relay.rs index 5ed3e302cce..f88d3f122d7 100644 --- a/crates/sui-proxy/src/histogram_relay.rs +++ b/crates/sui-proxy/src/histogram_relay.rs @@ -1,20 +1,24 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anyhow::{bail, Result}; -use axum::{extract::Extension, http::StatusCode, routing::get, Router}; -use once_cell::sync::Lazy; -use prometheus::proto::{Metric, MetricFamily}; -use prometheus::{register_counter_vec, register_histogram_vec}; -use prometheus::{CounterVec, HistogramVec}; -use std::net::TcpListener; -use std::time::{SystemTime, UNIX_EPOCH}; use std::{ collections::VecDeque, + net::TcpListener, sync::{Arc, Mutex}, + time::{SystemTime, UNIX_EPOCH}, +}; + +use anyhow::{bail, Result}; +use axum::{extract::Extension, http::StatusCode, routing::get, Router}; +use once_cell::sync::Lazy; +use prometheus::{ + proto::{Metric, MetricFamily}, + register_counter_vec, register_histogram_vec, CounterVec, HistogramVec, }; use tower::ServiceBuilder; -use tower_http::trace::{DefaultOnResponse, TraceLayer}; -use tower_http::LatencyUnit; +use tower_http::{ + trace::{DefaultOnResponse, TraceLayer}, + LatencyUnit, +}; use tracing::{info, Level}; use crate::var; @@ -44,7 +48,8 @@ static RELAY_DURATION: Lazy = Lazy::new(|| { // Creates a new http server that has as a sole purpose to expose // and endpoint that prometheus agent can use to poll for the metrics. -// A RegistryService is returned that can be used to get access in prometheus Registries. +// A RegistryService is returned that can be used to get access in prometheus +// Registries. pub fn start_prometheus_server(addr: TcpListener) -> HistogramRelay { let relay = HistogramRelay::new(); let app = Router::new() @@ -95,8 +100,9 @@ impl HistogramRelay { Self::default() } /// submit will take metric family submissions and store them for scraping - /// in doing so, it will also wrap each entry in a timestamp which will be use - /// for pruning old entires on each submission call. this may not be ideal long term. + /// in doing so, it will also wrap each entry in a timestamp which will be + /// use for pruning old entires on each submission call. this may not be + /// ideal long term. pub fn submit(&self, data: Vec) { RELAY_PRESSURE.with_label_values(&["submit"]).inc(); let timer = RELAY_DURATION.with_label_values(&["submit"]).start_timer(); @@ -129,7 +135,8 @@ impl HistogramRelay { pub fn export(&self) -> Result { RELAY_PRESSURE.with_label_values(&["export"]).inc(); let timer = RELAY_DURATION.with_label_values(&["export"]).start_timer(); - // totally drain all metrics whenever we get a scrape request from the metrics handler + // totally drain all metrics whenever we get a scrape request from the metrics + // handler let mut queue = self .0 .lock() diff --git a/crates/sui-proxy/src/ip.rs b/crates/sui-proxy/src/ip.rs index cdfa65bcb64..2e5625551e7 100644 --- a/crates/sui-proxy/src/ip.rs +++ b/crates/sui-proxy/src/ip.rs @@ -1,9 +1,10 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use ipnetwork::IpNetwork; use multiaddr::Multiaddr; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; pub fn to_multiaddr(addr: IpAddr) -> Multiaddr { match addr { @@ -12,7 +13,8 @@ pub fn to_multiaddr(addr: IpAddr) -> Multiaddr { } } -/// is_private makes a decent guess at determining of an addr is publicly routable. +/// is_private makes a decent guess at determining of an addr is publicly +/// routable. pub fn is_private(addr: IpAddr) -> bool { match addr { IpAddr::V4(a) => is_private_v4(a), @@ -20,7 +22,8 @@ pub fn is_private(addr: IpAddr) -> bool { } } -/// is_private_v4 will say just that, is it private? we ignore 169.254.0.0/16 in this consideration +/// is_private_v4 will say just that, is it private? we ignore 169.254.0.0/16 in +/// this consideration fn is_private_v4(addr: Ipv4Addr) -> bool { // special case we will allow let allowed_private: IpNetwork = "169.254.0.0/16".parse().unwrap(); @@ -31,8 +34,8 @@ fn is_private_v4(addr: Ipv4Addr) -> bool { addr.is_private() } -/// is_private_v6 and the funcs below are based on an unstable const fn in core. yoinked it. -/// taken from https://github.com/rust-lang/rust/blob/340bb19fea20fd5f9357bbfac542fad84fc7ea2b/library/core/src/net/ip_addr.rs#L691-L783 +/// is_private_v6 and the funcs below are based on an unstable const fn in core. +/// yoinked it. taken from https://github.com/rust-lang/rust/blob/340bb19fea20fd5f9357bbfac542fad84fc7ea2b/library/core/src/net/ip_addr.rs#L691-L783 #[allow(clippy::manual_range_contains)] fn is_private_v6(addr: Ipv6Addr) -> bool { addr.is_unspecified() diff --git a/crates/sui-proxy/src/lib.rs b/crates/sui-proxy/src/lib.rs index 989ae5ff6b1..a9da073c68b 100644 --- a/crates/sui-proxy/src/lib.rs +++ b/crates/sui-proxy/src/lib.rs @@ -13,9 +13,9 @@ pub mod prom_to_mimir; pub mod remote_write; /// var extracts environment variables at runtime with a default fallback value -/// if a default is not provided, the value is simply an empty string if not found -/// This function will return the provided default if env::var cannot find the key -/// or if the key is somehow malformed. +/// if a default is not provided, the value is simply an empty string if not +/// found This function will return the provided default if env::var cannot find +/// the key or if the key is somehow malformed. #[macro_export] macro_rules! var { ($key:expr) => { @@ -34,23 +34,27 @@ macro_rules! var { #[cfg(test)] mod tests { - use super::*; - use crate::admin::Labels; - use crate::histogram_relay::HistogramRelay; - use crate::prom_to_mimir::tests::*; - - use crate::{admin::CertKeyPair, config::RemoteWriteConfig, peers::SuiNodeProvider}; - use axum::http::{header, StatusCode}; - use axum::routing::post; - use axum::Router; + use std::{net::TcpListener, time::Duration}; + + use axum::{ + http::{header, StatusCode}, + routing::post, + Router, + }; use multiaddr::Multiaddr; - use prometheus::Encoder; - use prometheus::PROTOBUF_FORMAT; + use prometheus::{Encoder, PROTOBUF_FORMAT}; use protobuf::RepeatedField; - use std::net::TcpListener; - use std::time::Duration; use sui_tls::{CertVerifier, TlsAcceptor}; + use super::*; + use crate::{ + admin::{CertKeyPair, Labels}, + config::RemoteWriteConfig, + histogram_relay::HistogramRelay, + peers::SuiNodeProvider, + prom_to_mimir::tests::*, + }; + async fn run_dummy_remote_write(listener: TcpListener) { /// i accept everything, send me the trash async fn handler() -> StatusCode { @@ -68,10 +72,12 @@ mod tests { .unwrap(); } - /// axum_acceptor is a basic e2e test that creates a mock remote_write post endpoint and has a simple - /// sui-node client that posts data to the proxy using the protobuf format. The server processes this - /// data and sends it to the mock remote_write which accepts everything. Future work is to make this more - /// robust and expand the scope of coverage, probabaly moving this test elsewhere and renaming it. + /// axum_acceptor is a basic e2e test that creates a mock remote_write post + /// endpoint and has a simple sui-node client that posts data to the + /// proxy using the protobuf format. The server processes this data and + /// sends it to the mock remote_write which accepts everything. Future work + /// is to make this more robust and expand the scope of coverage, + /// probabaly moving this test elsewhere and renaming it. #[tokio::test] async fn axum_acceptor() { // generate self-signed certificates @@ -141,7 +147,8 @@ mod tests { // Client request is rejected because it isn't in the allowlist client.get(&server_url).send().await.unwrap_err(); - // Insert the client's public key into the allowlist and verify the request is successful + // Insert the client's public key into the allowlist and verify the request is + // successful allower.get_mut().write().unwrap().insert( client_pub_key.to_owned(), peers::SuiPeer { diff --git a/crates/sui-proxy/src/main.rs b/crates/sui-proxy/src/main.rs index db0698150a0..cfe32e7aa6c 100644 --- a/crates/sui-proxy/src/main.rs +++ b/crates/sui-proxy/src/main.rs @@ -1,15 +1,15 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::env; + use anyhow::Result; use clap::Parser; -use std::env; -use sui_proxy::config::ProxyConfig; use sui_proxy::{ admin::{ app, create_server_cert_default_allow, create_server_cert_enforce_peer, make_reqwest_client, server, Labels, }, - config::load, + config::{load, ProxyConfig}, histogram_relay, metrics, }; use sui_tls::TlsAcceptor; @@ -18,8 +18,9 @@ use tracing::info; // WARNING!!! // -// Do not move or use similar logic to generate git revision information outside of a binary entry -// point (e.g. main.rs). Placing the below logic into a library can result in unessesary builds. +// Do not move or use similar logic to generate git revision information outside +// of a binary entry point (e.g. main.rs). Placing the below logic into a +// library can result in unessesary builds. const GIT_REVISION: &str = { if let Some(revision) = option_env!("GIT_REVISION") { revision diff --git a/crates/sui-proxy/src/metrics.rs b/crates/sui-proxy/src/metrics.rs index 5ee5110e9db..9f2b65f7d7c 100644 --- a/crates/sui-proxy/src/metrics.rs +++ b/crates/sui-proxy/src/metrics.rs @@ -1,19 +1,23 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use std::net::TcpListener; + use axum::{extract::Extension, http::StatusCode, routing::get, Router}; use mysten_metrics::RegistryService; use prometheus::{Registry, TextEncoder}; -use std::net::TcpListener; use tower::ServiceBuilder; -use tower_http::trace::{DefaultOnResponse, TraceLayer}; -use tower_http::LatencyUnit; +use tower_http::{ + trace::{DefaultOnResponse, TraceLayer}, + LatencyUnit, +}; use tracing::Level; const METRICS_ROUTE: &str = "/metrics"; // Creates a new http server that has as a sole purpose to expose // and endpoint that prometheus agent can use to poll for the metrics. -// A RegistryService is returned that can be used to get access in prometheus Registries. +// A RegistryService is returned that can be used to get access in prometheus +// Registries. pub fn start_prometheus_server(addr: TcpListener) -> RegistryService { let registry = Registry::new(); @@ -43,7 +47,8 @@ pub fn start_prometheus_server(addr: TcpListener) -> RegistryService { registry_service } -// DO NOT remove this handler, it is not compatible with the mysten_metrics::metric equivalent +// DO NOT remove this handler, it is not compatible with the +// mysten_metrics::metric equivalent async fn metrics(Extension(registry_service): Extension) -> (StatusCode, String) { let mut metric_families = registry_service.gather_all(); metric_families.extend(prometheus::gather()); diff --git a/crates/sui-proxy/src/middleware.rs b/crates/sui-proxy/src/middleware.rs index 6af09d11ed6..86d523f6a1a 100644 --- a/crates/sui-proxy/src/middleware.rs +++ b/crates/sui-proxy/src/middleware.rs @@ -1,6 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{consumer::ProtobufDecoder, peers::SuiNodeProvider}; +use std::sync::Arc; + use axum::{ async_trait, body::Bytes, @@ -15,10 +16,11 @@ use bytes::Buf; use hyper::header::CONTENT_ENCODING; use once_cell::sync::Lazy; use prometheus::{proto::MetricFamily, register_counter_vec, CounterVec}; -use std::sync::Arc; use sui_tls::TlsConnectionInfo; use tracing::error; +use crate::{consumer::ProtobufDecoder, peers::SuiNodeProvider}; + static MIDDLEWARE_OPS: Lazy = Lazy::new(|| { register_counter_vec!( "middleware_operations", diff --git a/crates/sui-proxy/src/peers.rs b/crates/sui-proxy/src/peers.rs index 250ba4b3bcc..7b8bcf5f13b 100644 --- a/crates/sui-proxy/src/peers.rs +++ b/crates/sui-proxy/src/peers.rs @@ -1,18 +1,17 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use anyhow::{bail, Context, Result}; -use fastcrypto::ed25519::Ed25519PublicKey; -use fastcrypto::traits::ToFromBytes; -use multiaddr::Multiaddr; -use once_cell::sync::Lazy; -use prometheus::{register_counter_vec, register_histogram_vec}; -use prometheus::{CounterVec, HistogramVec}; -use serde::Deserialize; -use std::time::Duration; use std::{ collections::HashMap, sync::{Arc, RwLock}, + time::Duration, }; + +use anyhow::{bail, Context, Result}; +use fastcrypto::{ed25519::Ed25519PublicKey, traits::ToFromBytes}; +use multiaddr::Multiaddr; +use once_cell::sync::Lazy; +use prometheus::{register_counter_vec, register_histogram_vec, CounterVec, HistogramVec}; +use serde::Deserialize; use sui_tls::Allower; use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary; use tracing::{debug, error, info}; @@ -49,10 +48,12 @@ pub struct SuiPeer { pub public_key: Ed25519PublicKey, } -/// SuiNodeProvider queries the sui blockchain and keeps a record of known validators based on the response from -/// sui_getValidators. The node name, public key and other info is extracted from the chain and stored in this -/// data structure. We pass this struct to the tls verifier and it depends on the state contained within. -/// Handlers also use this data in an Extractor extension to check incoming clients on the http api against known keys. +/// SuiNodeProvider queries the sui blockchain and keeps a record of known +/// validators based on the response from sui_getValidators. The node name, +/// public key and other info is extracted from the chain and stored in this +/// data structure. We pass this struct to the tls verifier and it depends on +/// the state contained within. Handlers also use this data in an Extractor +/// extension to check incoming clients on the http api against known keys. #[derive(Debug, Clone)] pub struct SuiNodeProvider { nodes: SuiPeers, @@ -70,7 +71,8 @@ impl Allower for SuiNodeProvider { impl SuiNodeProvider { pub fn new(rpc_url: String, rpc_poll_interval: Duration, static_peers: Vec) -> Self { - // build our hashmap with the static pub keys. we only do this one time at binary startup. + // build our hashmap with the static pub keys. we only do this one time at + // binary startup. let static_nodes: HashMap = static_peers .into_iter() .map(|v| (v.public_key.clone(), v)) @@ -218,9 +220,10 @@ impl SuiNodeProvider { } } -/// extract will get the network pubkey bytes from a SuiValidatorSummary type. This type comes from a -/// full node rpc result. See get_validators for details. The key here, if extracted successfully, will -/// ultimately be stored in the allow list and let us communicate with those actual peers via tls. +/// extract will get the network pubkey bytes from a SuiValidatorSummary type. +/// This type comes from a full node rpc result. See get_validators for +/// details. The key here, if extracted successfully, will ultimately be stored +/// in the allow list and let us communicate with those actual peers via tls. fn extract(summary: SuiSystemStateSummary) -> impl Iterator { summary.active_validators.into_iter().filter_map(|vm| { match Ed25519PublicKey::from_bytes(&vm.network_pubkey_bytes) { @@ -258,15 +261,17 @@ fn extract(summary: SuiSystemStateSummary) -> impl Iterator { state: S, @@ -117,7 +117,8 @@ impl From> for Mimir> { Self { state: timeseries .into_iter() - // the upstream remote_write should have a max sample size per request set to this number + // the upstream remote_write should have a max sample size per request set to this + // number .chunks(var!("MIMIR_MAX_SAMPLE_SIZE", 500)) .into_iter() .map(|ts| remote_write::WriteRequest { @@ -160,9 +161,10 @@ impl From for Mimir> { ts.labels .extend(Mimir::>::from(metric)); - // assumption here is that since a MetricFamily will have one MetricType, we'll only need - // to look for one of these types. Setting two different types on Metric at the same time - // in a way that is conflicting with the MetricFamily type will result in undefined mimir + // assumption here is that since a MetricFamily will have one MetricType, we'll + // only need to look for one of these types. Setting two different + // types on Metric at the same time in a way that is conflicting + // with the MetricFamily type will result in undefined mimir // behavior, probably an error. if metric.has_counter() { let mut s = Mimir::::from(metric.get_counter()).sample(); @@ -175,7 +177,8 @@ impl From for Mimir> { } else if metric.has_histogram() { // TODO implement // ts.mut_histograms() - // .push(Mimir::::from(metric.get_histogram()).histogram()); + // .push(Mimir::::from(metric. + // get_histogram()).histogram()); } else if metric.has_summary() { // TODO implement error!("summary is not implemented for a metric type"); @@ -194,11 +197,11 @@ impl Mimir { #[cfg(test)] pub mod tests { - use crate::prom_to_mimir::Mimir; - use crate::remote_write; use prometheus::proto; use protobuf::RepeatedField; + use crate::{prom_to_mimir::Mimir, remote_write}; + // protobuf stuff pub fn create_metric_family( name: &str, diff --git a/crates/sui-proxy/src/remote_write.rs b/crates/sui-proxy/src/remote_write.rs index eab672c4362..99d60db7f61 100644 --- a/crates/sui-proxy/src/remote_write.rs +++ b/crates/sui-proxy/src/remote_write.rs @@ -29,10 +29,12 @@ pub mod metric_metadata { Stateset = 7, } impl MetricType { - /// String value of the enum field names used in the ProtoBuf definition. + /// String value of the enum field names used in the ProtoBuf + /// definition. /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. + /// The values are not transformed in any way and thus are considered + /// stable (if the ProtoBuf definition does not change) and safe + /// for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { MetricType::Unknown => "UNKNOWN", @@ -115,7 +117,8 @@ pub struct Histogram { /// regular histograms with integer counts, the latter for float /// histograms. /// - /// Count delta of each bucket compared to previous one (or to zero for 1st bucket). + /// Count delta of each bucket compared to previous one (or to zero for 1st + /// bucket). #[prost(sint64, repeated, tag = "9")] pub negative_deltas: ::prost::alloc::vec::Vec, /// Absolute count of each bucket. @@ -128,7 +131,8 @@ pub struct Histogram { /// regular histograms with integer counts, the latter for float /// histograms. /// - /// Count delta of each bucket compared to previous one (or to zero for 1st bucket). + /// Count delta of each bucket compared to previous one (or to zero for 1st + /// bucket). #[prost(sint64, repeated, tag = "12")] pub positive_deltas: ::prost::alloc::vec::Vec, /// Absolute count of each bucket. @@ -162,10 +166,12 @@ pub mod histogram { Gauge = 3, } impl ResetHint { - /// String value of the enum field names used in the ProtoBuf definition. + /// String value of the enum field names used in the ProtoBuf + /// definition. /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. + /// The values are not transformed in any way and thus are considered + /// stable (if the ProtoBuf definition does not change) and safe + /// for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { ResetHint::Unknown => "UNKNOWN", @@ -212,7 +218,8 @@ pub mod histogram { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BucketSpan { - /// Gap to previous span, or starting point for 1st span (which can be negative). + /// Gap to previous span, or starting point for 1st span (which can be + /// negative). #[prost(sint32, tag = "1")] pub offset: i32, /// Length of consecutive buckets. @@ -224,7 +231,8 @@ pub struct BucketSpan { #[derive(Clone, PartialEq, ::prost::Message)] pub struct TimeSeries { /// For a timeseries to be valid, and for the samples and exemplars - /// to be ingested by the remote system properly, the labels field is required. + /// to be ingested by the remote system properly, the labels field is + /// required. #[prost(message, repeated, tag = "1")] pub labels: ::prost::alloc::vec::Vec