From 1744e9efd74d83aeb15b384a8174949dbe753172 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 9 Apr 2024 18:12:26 -0500 Subject: [PATCH 01/13] BankingStage Forwarding Filter (#685) * add PacketFlags::FROM_STAKED_NODE * Only forward packets from staked node * fix local-cluster test forwarding * review comment * tpu_votes get marked as from_staked_node --- bench-streamer/src/main.rs | 1 + core/src/banking_stage/forwarder.rs | 1 + core/src/fetch_stage.rs | 3 ++ core/src/repair/ancestor_hashes_service.rs | 2 ++ core/src/repair/serve_repair_service.rs | 1 + core/src/shred_fetch_stage.rs | 1 + gossip/src/gossip_service.rs | 1 + local-cluster/tests/local_cluster.rs | 42 ++++++++++++++++------ sdk/src/packet.rs | 12 +++++++ streamer/src/nonblocking/quic.rs | 1 + streamer/src/streamer.rs | 9 ++++- 11 files changed, 63 insertions(+), 11 deletions(-) diff --git a/bench-streamer/src/main.rs b/bench-streamer/src/main.rs index de300345ebad42..2d6998f298f3b8 100644 --- a/bench-streamer/src/main.rs +++ b/bench-streamer/src/main.rs @@ -117,6 +117,7 @@ fn main() -> Result<()> { Duration::from_millis(1), // coalesce true, None, + false, )); } diff --git a/core/src/banking_stage/forwarder.rs b/core/src/banking_stage/forwarder.rs index e1c2bdc3049621..0af6c5f8519ca5 100644 --- a/core/src/banking_stage/forwarder.rs +++ b/core/src/banking_stage/forwarder.rs @@ -161,6 +161,7 @@ impl Forwarder { self.update_data_budget(); let packet_vec: Vec<_> = forwardable_packets .filter(|p| !p.meta().forwarded()) + .filter(|p| p.meta().is_from_staked_node()) .filter(|p| self.data_budget.take(p.meta().size)) .filter_map(|p| p.data(..).map(|data| data.to_vec())) .collect(); diff --git a/core/src/fetch_stage.rs b/core/src/fetch_stage.rs index 5e972e626166ce..1f668a6ec02964 100644 --- a/core/src/fetch_stage.rs +++ b/core/src/fetch_stage.rs @@ -171,6 +171,7 @@ impl FetchStage { coalesce, true, in_vote_only_mode.clone(), + false, // unstaked connections ) }) .collect() @@ -194,6 +195,7 @@ impl FetchStage { coalesce, true, in_vote_only_mode.clone(), + false, // unstaked connections ) }) .collect() @@ -216,6 +218,7 @@ impl FetchStage { coalesce, true, None, + true, // only staked connections should be voting ) }) .collect(); diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index 8f455cbd6a1c19..0b7846f8115fea 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -171,6 +171,7 @@ impl AncestorHashesService { Duration::from_millis(1), // coalesce false, // use_pinned_memory None, // in_vote_only_mode + false, // is_staked_service ); let (quic_endpoint_response_sender, quic_endpoint_response_receiver) = unbounded(); @@ -1304,6 +1305,7 @@ mod test { Duration::from_millis(1), // coalesce false, None, + false, ); let (remote_request_sender, remote_request_receiver) = unbounded(); let t_packet_adapter = Builder::new() diff --git a/core/src/repair/serve_repair_service.rs b/core/src/repair/serve_repair_service.rs index 3fe424d0768a85..2be1a6712045de 100644 --- a/core/src/repair/serve_repair_service.rs +++ b/core/src/repair/serve_repair_service.rs @@ -47,6 +47,7 @@ impl ServeRepairService { Duration::from_millis(1), // coalesce false, // use_pinned_memory None, // in_vote_only_mode + false, // is_staked_service ); let t_packet_adapter = Builder::new() .name(String::from("solServRAdapt")) diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index 84f1520e649822..a0a88102698a1f 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -175,6 +175,7 @@ impl ShredFetchStage { PACKET_COALESCE_DURATION, true, // use_pinned_memory None, // in_vote_only_mode + false, ) }) .collect(); diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index 76ab14f27a6b4a..006c633e602947 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -63,6 +63,7 @@ impl GossipService { Duration::from_millis(1), // coalesce false, None, + false, ); let (consume_sender, listen_receiver) = unbounded(); let t_socket_consume = cluster_info.clone().start_socket_consume_thread( diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 44032aeeb4d38b..ed95bf85d6c056 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -9,7 +9,7 @@ use { solana_accounts_db::{ hardened_unpack::open_genesis_config, utils::create_accounts_run_and_snapshot_dirs, }, - solana_client::thin_client::ThinClient, + solana_client::{connection_cache::ConnectionCache, thin_client::ThinClient}, solana_core::{ consensus::{ tower_storage::FileTowerStorage, Tower, SWITCH_FORK_THRESHOLD, VOTE_THRESHOLD_DEPTH, @@ -56,12 +56,9 @@ use { response::RpcSignatureResult, }, solana_runtime::{ - commitment::VOTE_THRESHOLD_SIZE, - snapshot_archive_info::SnapshotArchiveInfoGetter, - snapshot_bank_utils, - snapshot_config::SnapshotConfig, - snapshot_package::SnapshotKind, - snapshot_utils::{self}, + commitment::VOTE_THRESHOLD_SIZE, snapshot_archive_info::SnapshotArchiveInfoGetter, + snapshot_bank_utils, snapshot_config::SnapshotConfig, snapshot_package::SnapshotKind, + snapshot_utils, }, solana_sdk::{ account::AccountSharedData, @@ -78,7 +75,7 @@ use { system_program, system_transaction, vote::state::VoteStateUpdate, }, - solana_streamer::socket::SocketAddrSpace, + solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, solana_turbine::broadcast_stage::{ broadcast_duplicates_run::{BroadcastDuplicatesConfig, ClusterPartition}, BroadcastStageType, @@ -90,11 +87,12 @@ use { fs, io::Read, iter, + net::{IpAddr, Ipv4Addr}, num::NonZeroUsize, path::Path, sync::{ atomic::{AtomicBool, AtomicUsize, Ordering}, - Arc, Mutex, + Arc, Mutex, RwLock, }, thread::{sleep, Builder, JoinHandle}, time::{Duration, Instant}, @@ -363,6 +361,13 @@ fn test_forwarding() { ), ..ClusterConfig::default() }; + + let client_keypair = Keypair::new(); + let mut overrides = HashMap::new(); + let stake = DEFAULT_NODE_STAKE * 10; + let total_stake = stake + config.node_stakes.iter().sum::(); + overrides.insert(client_keypair.pubkey(), stake); + config.validator_configs[1].staked_nodes_overrides = Arc::new(RwLock::new(overrides)); let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let cluster_nodes = discover_cluster( @@ -380,11 +385,28 @@ fn test_forwarding() { .find(|c| c.pubkey() != &leader_pubkey) .unwrap(); + let stakes = HashMap::from([ + (client_keypair.pubkey(), stake), + (Pubkey::new_unique(), total_stake - stake), + ]); + let staked_nodes = Arc::new(RwLock::new(StakedNodes::new( + Arc::new(stakes), + HashMap::::default(), // overrides + ))); + + let client_connection_cache = Arc::new(ConnectionCache::new_with_client_options( + "client-connection-cache", + 1, + None, + Some((&client_keypair, IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)))), + Some((&staked_nodes, &client_keypair.pubkey())), + )); + // Confirm that transactions were forwarded to and processed by the leader. cluster_tests::send_many_transactions( validator_info, &cluster.funding_keypair, - &cluster.connection_cache, + &client_connection_cache, 10, 20, ); diff --git a/sdk/src/packet.rs b/sdk/src/packet.rs index 8300b57218c696..661e8acee6855a 100644 --- a/sdk/src/packet.rs +++ b/sdk/src/packet.rs @@ -35,6 +35,8 @@ bitflags! { const ROUND_COMPUTE_UNIT_PRICE = 0b0010_0000; /// For tracking performance const PERF_TRACK_PACKET = 0b0100_0000; + /// For marking packets from staked nodes + const FROM_STAKED_NODE = 0b1000_0000; } } @@ -215,6 +217,11 @@ impl Meta { self.port = socket_addr.port(); } + pub fn set_from_staked_node(&mut self, from_staked_node: bool) { + self.flags + .set(PacketFlags::FROM_STAKED_NODE, from_staked_node); + } + #[inline] pub fn discard(&self) -> bool { self.flags.contains(PacketFlags::DISCARD) @@ -278,6 +285,11 @@ impl Meta { pub fn round_compute_unit_price(&self) -> bool { self.flags.contains(PacketFlags::ROUND_COMPUTE_UNIT_PRICE) } + + #[inline] + pub fn is_from_staked_node(&self) -> bool { + self.flags.contains(PacketFlags::FROM_STAKED_NODE) + } } impl Default for Meta { diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index 63a657d9113f07..ddf4005c432e3d 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -924,6 +924,7 @@ async fn handle_chunk( if packet_accum.is_none() { let mut meta = Meta::default(); meta.set_socket_addr(remote_addr); + meta.set_from_staked_node(matches!(peer_type, ConnectionPeerType::Staked(_))); *packet_accum = Some(PacketAccumulator { meta, chunks: Vec::new(), diff --git a/streamer/src/streamer.rs b/streamer/src/streamer.rs index 7b68619082fe15..a79445c3b8e7ac 100644 --- a/streamer/src/streamer.rs +++ b/streamer/src/streamer.rs @@ -110,6 +110,7 @@ fn recv_loop( coalesce: Duration, use_pinned_memory: bool, in_vote_only_mode: Option>, + is_staked_service: bool, ) -> Result<()> { loop { let mut packet_batch = if use_pinned_memory { @@ -147,7 +148,9 @@ fn recv_loop( if len == PACKETS_PER_BATCH { full_packet_batches_count.fetch_add(1, Ordering::Relaxed); } - + packet_batch + .iter_mut() + .for_each(|p| p.meta_mut().set_from_staked_node(is_staked_service)); packet_batch_sender.send(packet_batch)?; } break; @@ -156,6 +159,7 @@ fn recv_loop( } } +#[allow(clippy::too_many_arguments)] pub fn receiver( thread_name: String, socket: Arc, @@ -166,6 +170,7 @@ pub fn receiver( coalesce: Duration, use_pinned_memory: bool, in_vote_only_mode: Option>, + is_staked_service: bool, ) -> JoinHandle<()> { let res = socket.set_read_timeout(Some(Duration::new(1, 0))); assert!(res.is_ok(), "streamer::receiver set_read_timeout error"); @@ -181,6 +186,7 @@ pub fn receiver( coalesce, use_pinned_memory, in_vote_only_mode, + is_staked_service, ); }) .unwrap() @@ -490,6 +496,7 @@ mod test { Duration::from_millis(1), // coalesce true, None, + false, ); const NUM_PACKETS: usize = 5; let t_responder = { From 2470b454686fb50699d355d03fa0e1c5cb674fd6 Mon Sep 17 00:00:00 2001 From: Tyera Date: Tue, 9 Apr 2024 17:29:50 -0600 Subject: [PATCH 02/13] Check EpochRewards::active within the Stake Program (#617) * Add new StakeError variant * Add closure to return error if EpochRewards::active * Use error_during_epoch_rewards for Instructions that mutate stake accounts * Use try instead of manually matching Ok/Err * Consolidate error_during_epoch_rewards check * Add new test helper * Add test demonstrating which ix return StakeError::EpochRewardsActive * Remove single-use fn --- programs/stake/src/stake_instruction.rs | 258 +++++++++++++++++++++--- sdk/program/src/stake/instruction.rs | 3 + 2 files changed, 235 insertions(+), 26 deletions(-) diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index 02196bfd647a35..1ffe4bb20478c4 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -13,7 +13,7 @@ use { program_utils::limited_deserialize, pubkey::Pubkey, stake::{ - instruction::{LockupArgs, StakeInstruction}, + instruction::{LockupArgs, StakeError, StakeInstruction}, program::id, state::{Authorized, Lockup}, }, @@ -64,14 +64,29 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| Ok(me) }; + // The EpochRewards sysvar only exists after the + // enable_partitioned_epoch_reward feature is activated. If it exists, check + // the `active` field + let epoch_rewards_active = invoke_context + .get_sysvar_cache() + .get_epoch_rewards() + .map(|epoch_rewards| epoch_rewards.active) + .unwrap_or(false); + let signers = instruction_context.get_signers(transaction_context)?; - match limited_deserialize(data) { - Ok(StakeInstruction::Initialize(authorized, lockup)) => { + + let stake_instruction: StakeInstruction = limited_deserialize(data)?; + if epoch_rewards_active && !matches!(stake_instruction, StakeInstruction::GetMinimumDelegation) + { + return Err(StakeError::EpochRewardsActive.into()); + } + match stake_instruction { + StakeInstruction::Initialize(authorized, lockup) => { let mut me = get_stake_account()?; let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; initialize(&mut me, &authorized, &lockup, &rent) } - Ok(StakeInstruction::Authorize(authorized_pubkey, stake_authorize)) => { + StakeInstruction::Authorize(authorized_pubkey, stake_authorize) => { let mut me = get_stake_account()?; let clock = get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; @@ -88,7 +103,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| custodian_pubkey, ) } - Ok(StakeInstruction::AuthorizeWithSeed(args)) => { + StakeInstruction::AuthorizeWithSeed(args) => { let mut me = get_stake_account()?; instruction_context.check_number_of_instruction_accounts(2)?; let clock = @@ -109,7 +124,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| custodian_pubkey, ) } - Ok(StakeInstruction::DelegateStake) => { + StakeInstruction::DelegateStake => { let me = get_stake_account()?; instruction_context.check_number_of_instruction_accounts(2)?; let clock = @@ -133,7 +148,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| &invoke_context.feature_set, ) } - Ok(StakeInstruction::Split(lamports)) => { + StakeInstruction::Split(lamports) => { let me = get_stake_account()?; instruction_context.check_number_of_instruction_accounts(2)?; drop(me); @@ -147,7 +162,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| &signers, ) } - Ok(StakeInstruction::Merge) => { + StakeInstruction::Merge => { let me = get_stake_account()?; instruction_context.check_number_of_instruction_accounts(2)?; let clock = @@ -169,7 +184,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| &signers, ) } - Ok(StakeInstruction::Withdraw(lamports)) => { + StakeInstruction::Withdraw(lamports) => { let me = get_stake_account()?; instruction_context.check_number_of_instruction_accounts(2)?; let clock = @@ -198,18 +213,18 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| new_warmup_cooldown_rate_epoch(invoke_context), ) } - Ok(StakeInstruction::Deactivate) => { + StakeInstruction::Deactivate => { let mut me = get_stake_account()?; let clock = get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; deactivate(invoke_context, &mut me, &clock, &signers) } - Ok(StakeInstruction::SetLockup(lockup)) => { + StakeInstruction::SetLockup(lockup) => { let mut me = get_stake_account()?; let clock = invoke_context.get_sysvar_cache().get_clock()?; set_lockup(&mut me, &lockup, &signers, &clock) } - Ok(StakeInstruction::InitializeChecked) => { + StakeInstruction::InitializeChecked => { let mut me = get_stake_account()?; instruction_context.check_number_of_instruction_accounts(4)?; let staker_pubkey = transaction_context.get_key_of_account_at_index( @@ -230,7 +245,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; initialize(&mut me, &authorized, &Lockup::default(), &rent) } - Ok(StakeInstruction::AuthorizeChecked(stake_authorize)) => { + StakeInstruction::AuthorizeChecked(stake_authorize) => { let mut me = get_stake_account()?; let clock = get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; @@ -253,7 +268,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| custodian_pubkey, ) } - Ok(StakeInstruction::AuthorizeCheckedWithSeed(args)) => { + StakeInstruction::AuthorizeCheckedWithSeed(args) => { let mut me = get_stake_account()?; instruction_context.check_number_of_instruction_accounts(2)?; let clock = @@ -281,7 +296,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| custodian_pubkey, ) } - Ok(StakeInstruction::SetLockupChecked(lockup_checked)) => { + StakeInstruction::SetLockupChecked(lockup_checked) => { let mut me = get_stake_account()?; let custodian_pubkey = get_optional_pubkey(transaction_context, instruction_context, 2, true)?; @@ -294,7 +309,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| let clock = invoke_context.get_sysvar_cache().get_clock()?; set_lockup(&mut me, &lockup, &signers, &clock) } - Ok(StakeInstruction::GetMinimumDelegation) => { + StakeInstruction::GetMinimumDelegation => { let feature_set = invoke_context.feature_set.as_ref(); let minimum_delegation = crate::get_minimum_delegation(feature_set); let minimum_delegation = Vec::from(minimum_delegation.to_le_bytes()); @@ -302,7 +317,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| .transaction_context .set_return_data(id(), minimum_delegation) } - Ok(StakeInstruction::DeactivateDelinquent) => { + StakeInstruction::DeactivateDelinquent => { let mut me = get_stake_account()?; instruction_context.check_number_of_instruction_accounts(3)?; @@ -317,7 +332,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| clock.epoch, ) } - Ok(StakeInstruction::Redelegate) => { + StakeInstruction::Redelegate => { let mut me = get_stake_account()?; if invoke_context .feature_set @@ -337,7 +352,6 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| Err(InstructionError::InvalidInstructionData) } } - Err(err) => Err(err), } }); @@ -382,7 +396,11 @@ mod tests { }, stake_history::{StakeHistory, StakeHistoryEntry}, system_program, - sysvar::{clock, epoch_schedule, rent, rewards, stake_history}, + sysvar::{ + clock, + epoch_rewards::{self, EpochRewards}, + epoch_schedule, rent, rewards, stake_history, + }, }, solana_vote_program::vote_state::{self, VoteState, VoteStateVersions}, std::{collections::HashSet, str::FromStr, sync::Arc}, @@ -448,11 +466,9 @@ mod tests { ) } - fn process_instruction_as_one_arg( - feature_set: Arc, + fn get_default_transaction_accounts( instruction: &Instruction, - expected_result: Result<(), InstructionError>, - ) -> Vec { + ) -> Vec<(Pubkey, AccountSharedData)> { let mut pubkeys: HashSet = instruction .accounts .iter() @@ -461,7 +477,7 @@ mod tests { pubkeys.insert(clock::id()); pubkeys.insert(epoch_schedule::id()); #[allow(deprecated)] - let transaction_accounts = pubkeys + pubkeys .iter() .map(|pubkey| { ( @@ -489,7 +505,15 @@ mod tests { }, ) }) - .collect(); + .collect() + } + + fn process_instruction_as_one_arg( + feature_set: Arc, + instruction: &Instruction, + expected_result: Result<(), InstructionError>, + ) -> Vec { + let transaction_accounts = get_default_transaction_accounts(instruction); process_instruction( Arc::clone(&feature_set), &instruction.data, @@ -7830,4 +7854,186 @@ mod tests { Err(StakeError::RedelegateToSameVoteAccount.into()), ); } + + #[test] + fn test_stake_process_instruction_with_epoch_rewards_active() { + let feature_set = feature_set_all_enabled(); + + let process_instruction_as_one_arg = |feature_set: Arc, + instruction: &Instruction, + expected_result: Result<(), InstructionError>| + -> Vec { + let mut transaction_accounts = get_default_transaction_accounts(instruction); + + // Initialize EpochRewards sysvar account + let epoch_rewards_sysvar = EpochRewards { + active: true, + ..EpochRewards::default() + }; + transaction_accounts.push(( + epoch_rewards::id(), + create_account_shared_data_for_test(&epoch_rewards_sysvar), + )); + + process_instruction( + Arc::clone(&feature_set), + &instruction.data, + transaction_accounts, + instruction.accounts.clone(), + expected_result, + ) + }; + + process_instruction_as_one_arg( + Arc::clone(&feature_set), + &instruction::initialize( + &Pubkey::new_unique(), + &Authorized::default(), + &Lockup::default(), + ), + Err(StakeError::EpochRewardsActive.into()), + ); + process_instruction_as_one_arg( + Arc::clone(&feature_set), + &instruction::authorize( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + &Pubkey::new_unique(), + StakeAuthorize::Staker, + None, + ), + Err(StakeError::EpochRewardsActive.into()), + ); + process_instruction_as_one_arg( + Arc::clone(&feature_set), + &instruction::delegate_stake( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + &invalid_vote_state_pubkey(), + ), + Err(StakeError::EpochRewardsActive.into()), + ); + process_instruction_as_one_arg( + Arc::clone(&feature_set), + &instruction::split( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + 100, + &invalid_stake_state_pubkey(), + )[2], + Err(StakeError::EpochRewardsActive.into()), + ); + process_instruction_as_one_arg( + Arc::clone(&feature_set), + &instruction::withdraw( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + &Pubkey::new_unique(), + 100, + None, + ), + Err(StakeError::EpochRewardsActive.into()), + ); + process_instruction_as_one_arg( + Arc::clone(&feature_set), + &instruction::deactivate_stake(&Pubkey::new_unique(), &Pubkey::new_unique()), + Err(StakeError::EpochRewardsActive.into()), + ); + process_instruction_as_one_arg( + Arc::clone(&feature_set), + &instruction::set_lockup( + &Pubkey::new_unique(), + &LockupArgs::default(), + &Pubkey::new_unique(), + ), + Err(StakeError::EpochRewardsActive.into()), + ); + process_instruction_as_one_arg( + Arc::clone(&feature_set), + &instruction::merge( + &Pubkey::new_unique(), + &invalid_stake_state_pubkey(), + &Pubkey::new_unique(), + )[0], + Err(StakeError::EpochRewardsActive.into()), + ); + process_instruction_as_one_arg( + Arc::clone(&feature_set), + &instruction::authorize_with_seed( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + "seed".to_string(), + &Pubkey::new_unique(), + &Pubkey::new_unique(), + StakeAuthorize::Staker, + None, + ), + Err(StakeError::EpochRewardsActive.into()), + ); + + process_instruction_as_one_arg( + Arc::clone(&feature_set), + &instruction::initialize_checked(&Pubkey::new_unique(), &Authorized::default()), + Err(StakeError::EpochRewardsActive.into()), + ); + process_instruction_as_one_arg( + Arc::clone(&feature_set), + &instruction::authorize_checked( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + &Pubkey::new_unique(), + StakeAuthorize::Staker, + None, + ), + Err(StakeError::EpochRewardsActive.into()), + ); + process_instruction_as_one_arg( + Arc::clone(&feature_set), + &instruction::authorize_checked_with_seed( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + "seed".to_string(), + &Pubkey::new_unique(), + &Pubkey::new_unique(), + StakeAuthorize::Staker, + None, + ), + Err(StakeError::EpochRewardsActive.into()), + ); + process_instruction_as_one_arg( + Arc::clone(&feature_set), + &instruction::set_lockup_checked( + &Pubkey::new_unique(), + &LockupArgs::default(), + &Pubkey::new_unique(), + ), + Err(StakeError::EpochRewardsActive.into()), + ); + process_instruction_as_one_arg( + Arc::clone(&feature_set), + &instruction::deactivate_delinquent_stake( + &Pubkey::new_unique(), + &invalid_vote_state_pubkey(), + &Pubkey::new_unique(), + ), + Err(StakeError::EpochRewardsActive.into()), + ); + process_instruction_as_one_arg( + Arc::clone(&feature_set), + &instruction::redelegate( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + &Pubkey::new_unique(), + &Pubkey::new_unique(), + )[2], + Err(StakeError::EpochRewardsActive.into()), + ); + + // Only GetMinimumDelegation should not return StakeError::EpochRewardsActive + process_instruction_as_one_arg( + Arc::clone(&feature_set), + &instruction::get_minimum_delegation(), + Ok(()), + ); + } } diff --git a/sdk/program/src/stake/instruction.rs b/sdk/program/src/stake/instruction.rs index 0bc80be6643de1..ff2e51d4491b5c 100644 --- a/sdk/program/src/stake/instruction.rs +++ b/sdk/program/src/stake/instruction.rs @@ -70,6 +70,9 @@ pub enum StakeError { #[error("redelegated stake must be fully activated before deactivation")] RedelegatedStakeMustFullyActivateBeforeDeactivationIsPermitted, + + #[error("stake action is not permitted while the epoch rewards period is active")] + EpochRewardsActive, } impl DecodeError for StakeError { From 70c4cb0ba1fbef2b3aa5796420ed285c46f5c4f6 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Tue, 9 Apr 2024 16:39:57 -0700 Subject: [PATCH 03/13] consensus: add dev-context-only-utils to tower (#687) --- Cargo.lock | 1 + accounts-cluster-bench/Cargo.toml | 2 +- bench-tps/Cargo.toml | 7 +++++-- core/src/consensus.rs | 1 + dos/Cargo.toml | 5 +++++ local-cluster/Cargo.toml | 5 +++++ scripts/check-dev-context-only-utils.sh | 1 + transaction-dos/Cargo.toml | 2 ++ 8 files changed, 21 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5b55f3b0ba5b8e..60055dabdf6674 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6078,6 +6078,7 @@ dependencies = [ "solana-quic-client", "solana-rpc", "solana-rpc-client", + "solana-runtime", "solana-sdk", "solana-streamer", "solana-tpu-client", diff --git a/accounts-cluster-bench/Cargo.toml b/accounts-cluster-bench/Cargo.toml index 3d8c8c721ca375..8ff7cc12b26276 100644 --- a/accounts-cluster-bench/Cargo.toml +++ b/accounts-cluster-bench/Cargo.toml @@ -33,7 +33,7 @@ spl-token = { workspace = true, features = ["no-entrypoint"] } [dev-dependencies] solana-accounts-db = { workspace = true } -solana-core = { workspace = true } +solana-core = { workspace = true, features = ["dev-context-only-utils"] } solana-local-cluster = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-test-validator = { workspace = true } diff --git a/bench-tps/Cargo.toml b/bench-tps/Cargo.toml index 80a09fc8048ccd..afa927073dc006 100644 --- a/bench-tps/Cargo.toml +++ b/bench-tps/Cargo.toml @@ -23,7 +23,7 @@ solana-clap-utils = { workspace = true } solana-cli-config = { workspace = true } solana-client = { workspace = true } solana-connection-cache = { workspace = true } -solana-core = { workspace = true } +solana-core = { workspace = true, features = ["dev-context-only-utils"] } solana-faucet = { workspace = true } solana-genesis = { workspace = true } solana-gossip = { workspace = true } @@ -36,7 +36,7 @@ solana-rpc = { workspace = true } solana-rpc-client = { workspace = true } solana-rpc-client-api = { workspace = true } solana-rpc-client-nonce-utils = { workspace = true } -solana-runtime = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-sdk = { workspace = true } solana-streamer = { workspace = true } solana-tpu-client = { workspace = true } @@ -54,3 +54,6 @@ tempfile = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[features] +dev-context-only-utils = [] diff --git a/core/src/consensus.rs b/core/src/consensus.rs index b9a65160328d56..c5c0bd90eb7d4e 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -642,6 +642,7 @@ impl Tower { } } + #[cfg(feature = "dev-context-only-utils")] pub fn record_vote(&mut self, slot: Slot, hash: Hash) -> Option { self.record_bank_vote_and_update_lockouts(slot, hash) } diff --git a/dos/Cargo.toml b/dos/Cargo.toml index 0d7c76b007c4ea..344f1bbc1dabad 100644 --- a/dos/Cargo.toml +++ b/dos/Cargo.toml @@ -38,4 +38,9 @@ solana-version = { workspace = true } targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] +solana-core = { workspace = true, features = ["dev-context-only-utils"] } solana-local-cluster = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } + +[features] +dev-context-only-utils = [] diff --git a/local-cluster/Cargo.toml b/local-cluster/Cargo.toml index 07b30030295e52..0d72a867c60ac1 100644 --- a/local-cluster/Cargo.toml +++ b/local-cluster/Cargo.toml @@ -45,8 +45,13 @@ assert_matches = { workspace = true } fs_extra = { workspace = true } gag = { workspace = true } serial_test = { workspace = true } +solana-core = { workspace = true, features = ["dev-context-only-utils"] } solana-download-utils = { workspace = true } solana-ledger = { workspace = true, features = ["dev-context-only-utils"] } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[features] +dev-context-only-utils = [] diff --git a/scripts/check-dev-context-only-utils.sh b/scripts/check-dev-context-only-utils.sh index 6a4f798c633e26..7f54b38c8ef7cd 100755 --- a/scripts/check-dev-context-only-utils.sh +++ b/scripts/check-dev-context-only-utils.sh @@ -32,6 +32,7 @@ declare tainted_packages=( solana-accounts-bench solana-banking-bench agave-ledger-tool + solana-bench-tps ) # convert to comma separeted (ref: https://stackoverflow.com/a/53839433) diff --git a/transaction-dos/Cargo.toml b/transaction-dos/Cargo.toml index 58821a1b953fc5..1a112b2ff34e4a 100644 --- a/transaction-dos/Cargo.toml +++ b/transaction-dos/Cargo.toml @@ -31,7 +31,9 @@ solana-transaction-status = { workspace = true } solana-version = { workspace = true } [dev-dependencies] +solana-core = { workspace = true, features = ["dev-context-only-utils"] } solana-local-cluster = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] From 0c5d56bd3376babdc8d00540d24a036b6896ab3a Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 9 Apr 2024 22:13:10 -0500 Subject: [PATCH 04/13] pass &LoadedAccount scan_account_storage (#693) --- accounts-db/src/accounts.rs | 6 +++--- accounts-db/src/accounts_db.rs | 35 +++++++++++++++++----------------- 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index bfacc83b69a9e1..78400f66a6318e 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -201,16 +201,16 @@ impl Accounts { /// returns only the latest/current version of B for this slot pub fn scan_slot(&self, slot: Slot, func: F) -> Vec where - F: Fn(LoadedAccount) -> Option + Send + Sync, + F: Fn(&LoadedAccount) -> Option + Send + Sync, B: Sync + Send + Default + std::cmp::Eq, { let scan_result = self.accounts_db.scan_account_storage( slot, - |loaded_account: LoadedAccount| { + |loaded_account: &LoadedAccount| { // Cache only has one version per key, don't need to worry about versioning func(loaded_account) }, - |accum: &DashMap, loaded_account: LoadedAccount| { + |accum: &DashMap, loaded_account: &LoadedAccount| { let loaded_account_pubkey = *loaded_account.pubkey(); if let Some(val) = func(loaded_account) { accum.insert(loaded_account_pubkey, val); diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index bfc201e99b2015..cfc3b57f01f620 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -919,7 +919,7 @@ impl<'a> LoadedAccount<'a> { } } - pub fn take_account(self) -> AccountSharedData { + pub fn take_account(&self) -> AccountSharedData { match self { LoadedAccount::Stored(stored_account_meta) => { stored_account_meta.to_account_shared_data() @@ -4847,8 +4847,8 @@ impl AccountsDb { pub fn scan_account_storage( &self, slot: Slot, - cache_map_func: impl Fn(LoadedAccount) -> Option + Sync, - storage_scan_func: impl Fn(&B, LoadedAccount) + Sync, + cache_map_func: impl Fn(&LoadedAccount) -> Option + Sync, + storage_scan_func: impl Fn(&B, &LoadedAccount) + Sync, ) -> ScanStorageResult where R: Send, @@ -4862,7 +4862,7 @@ impl AccountsDb { slot_cache .par_iter() .filter_map(|cached_account| { - cache_map_func(LoadedAccount::Cached(Cow::Borrowed( + cache_map_func(&LoadedAccount::Cached(Cow::Borrowed( cached_account.value(), ))) }) @@ -4873,7 +4873,7 @@ impl AccountsDb { slot_cache .iter() .filter_map(|cached_account| { - cache_map_func(LoadedAccount::Cached(Cow::Borrowed( + cache_map_func(&LoadedAccount::Cached(Cow::Borrowed( cached_account.value(), ))) }) @@ -4898,10 +4898,9 @@ impl AccountsDb { .storage .get_slot_storage_entry_shrinking_in_progress_ok(slot) { - storage - .accounts - .account_iter() - .for_each(|account| storage_scan_func(&retval, LoadedAccount::Stored(account))); + storage.accounts.account_iter().for_each(|account| { + storage_scan_func(&retval, &LoadedAccount::Stored(account)) + }); } ScanStorageResult::Stored(retval) @@ -7569,11 +7568,11 @@ impl AccountsDb { let scan_result: ScanStorageResult<(Pubkey, AccountHash), DashMap> = self.scan_account_storage( slot, - |loaded_account: LoadedAccount| { + |loaded_account: &LoadedAccount| { // Cache only has one version per key, don't need to worry about versioning Some((*loaded_account.pubkey(), loaded_account.loaded_hash())) }, - |accum: &DashMap, loaded_account: LoadedAccount| { + |accum: &DashMap, loaded_account: &LoadedAccount| { let mut loaded_hash = loaded_account.loaded_hash(); if loaded_hash == AccountHash(Hash::default()) { loaded_hash = Self::hash_account_data( @@ -7605,7 +7604,7 @@ impl AccountsDb { ScanStorageResult>; let scan_result: ScanResult = self.scan_account_storage( slot, - |loaded_account: LoadedAccount| { + |loaded_account: &LoadedAccount| { // Cache only has one version per key, don't need to worry about versioning Some(PubkeyHashAccount { pubkey: *loaded_account.pubkey(), @@ -7614,7 +7613,7 @@ impl AccountsDb { }) }, |accum: &DashMap, - loaded_account: LoadedAccount| { + loaded_account: &LoadedAccount| { // Storage may have duplicates so only keep the latest version for each key let mut loaded_hash = loaded_account.loaded_hash(); let key = *loaded_account.pubkey(); @@ -14186,7 +14185,7 @@ pub mod tests { if let ScanStorageResult::Stored(slot_accounts) = accounts_db.scan_account_storage( *slot as Slot, |_| Some(0), - |slot_accounts: &DashSet, loaded_account: LoadedAccount| { + |slot_accounts: &DashSet, loaded_account: &LoadedAccount| { slot_accounts.insert(*loaded_account.pubkey()); }, ) { @@ -14226,7 +14225,7 @@ pub mod tests { if let ScanStorageResult::Stored(slot_account) = accounts_db.scan_account_storage( *slot as Slot, |_| Some(0), - |slot_account: &Arc>, loaded_account: LoadedAccount| { + |slot_account: &Arc>, loaded_account: &LoadedAccount| { *slot_account.write().unwrap() = *loaded_account.pubkey(); }, ) { @@ -14291,7 +14290,7 @@ pub mod tests { for slot in &slots { let slot_accounts = accounts_db.scan_account_storage( *slot as Slot, - |loaded_account: LoadedAccount| { + |loaded_account: &LoadedAccount| { assert!( !is_cache_at_limit, "When cache is at limit, all roots should have been flushed to storage" @@ -14301,7 +14300,7 @@ pub mod tests { assert!(*slot > requested_flush_root); Some(*loaded_account.pubkey()) }, - |slot_accounts: &DashSet, loaded_account: LoadedAccount| { + |slot_accounts: &DashSet, loaded_account: &LoadedAccount| { slot_accounts.insert(*loaded_account.pubkey()); if !is_cache_at_limit { // Only true when the limit hasn't been reached and there are still @@ -14419,7 +14418,7 @@ pub mod tests { .scan_account_storage( *slot as Slot, |_| Some(0), - |slot_account: &DashSet, loaded_account: LoadedAccount| { + |slot_account: &DashSet, loaded_account: &LoadedAccount| { slot_account.insert(*loaded_account.pubkey()); }, ) { From 4535ea60a93feabba1fc956c86bad7e421aa9696 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 9 Apr 2024 22:14:52 -0500 Subject: [PATCH 05/13] remove ReadableAccount trait from LoadedAccount (#692) --- accounts-db/src/accounts_db.rs | 37 +++++++++++++++++----------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index cfc3b57f01f620..1ab6e9bc6f5e59 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -82,7 +82,7 @@ use { solana_nohash_hasher::{IntMap, IntSet}, solana_rayon_threadlimit::get_thread_count, solana_sdk::{ - account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, + account::{Account, AccountSharedData, ReadableAccount}, clock::{BankId, Epoch, Slot}, epoch_schedule::EpochSchedule, genesis_config::{ClusterType, GenesisConfig}, @@ -942,9 +942,7 @@ impl<'a> LoadedAccount<'a> { pub fn data_len(&self) -> usize { self.data().len() } -} -impl<'a> ReadableAccount for LoadedAccount<'a> { fn lamports(&self) -> u64 { match self { LoadedAccount::Stored(stored_account_meta) => stored_account_meta.lamports(), @@ -958,7 +956,7 @@ impl<'a> ReadableAccount for LoadedAccount<'a> { LoadedAccount::Cached(cached_account) => cached_account.account.data(), } } - fn owner(&self) -> &Pubkey { + pub(crate) fn owner(&self) -> &Pubkey { match self { LoadedAccount::Stored(stored_account_meta) => stored_account_meta.owner(), LoadedAccount::Cached(cached_account) => cached_account.account.owner(), @@ -976,19 +974,6 @@ impl<'a> ReadableAccount for LoadedAccount<'a> { LoadedAccount::Cached(cached_account) => cached_account.account.rent_epoch(), } } - fn to_account_shared_data(&self) -> AccountSharedData { - match self { - LoadedAccount::Stored(_stored_account_meta) => AccountSharedData::create( - self.lamports(), - self.data().to_vec(), - *self.owner(), - self.executable(), - self.rent_epoch(), - ), - // clone here to prevent data copy - LoadedAccount::Cached(cached_account) => cached_account.account.clone(), - } - } } #[derive(Debug)] @@ -2266,7 +2251,14 @@ impl<'a> AppendVecScan for ScanState<'a> { let hash_is_missing = loaded_hash == AccountHash(Hash::default()); if self.config.check_hash || hash_is_missing { - let computed_hash = AccountsDb::hash_account(loaded_account, loaded_account.pubkey()); + let computed_hash = AccountsDb::hash_account_data( + loaded_account.lamports(), + loaded_account.owner(), + loaded_account.executable(), + loaded_account.rent_epoch(), + loaded_account.data(), + loaded_account.pubkey(), + ); if hash_is_missing { loaded_hash = computed_hash; } else if self.config.check_hash && computed_hash != loaded_hash { @@ -6624,7 +6616,14 @@ impl AccountsDb { let balance = loaded_account.lamports(); let hash_is_missing = loaded_hash == AccountHash(Hash::default()); if config.check_hash || hash_is_missing { - let computed_hash = AccountsDb::hash_account(&loaded_account, loaded_account.pubkey()); + let computed_hash = Self::hash_account_data( + loaded_account.lamports(), + loaded_account.owner(), + loaded_account.executable(), + loaded_account.rent_epoch(), + loaded_account.data(), + loaded_account.pubkey(), + ); if hash_is_missing { loaded_hash = computed_hash; } From dd8e1f4c7311eae0e8864ea87fa2a72b2f2fbdcd Mon Sep 17 00:00:00 2001 From: Tyera Date: Tue, 9 Apr 2024 21:55:45 -0600 Subject: [PATCH 06/13] Remove overly restrictive check_account_access for partitioned epoch rewards (#631) * Remove rewards-interval-related check_account_access implementation * Move RewardsInterval to tests module * Update test to new StakeProgram functionality --- runtime/src/bank.rs | 23 +--- .../src/bank/partitioned_epoch_rewards/mod.rs | 123 +++++++++++++----- svm/src/account_loader.rs | 2 - 3 files changed, 88 insertions(+), 60 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index b7e8217adafc55..2216219ae4a99a 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -42,8 +42,7 @@ use { builtins::{BuiltinPrototype, BUILTINS}, metrics::*, partitioned_epoch_rewards::{ - EpochRewardCalculateParamInfo, EpochRewardStatus, RewardInterval, StakeRewards, - VoteRewardsAccounts, + EpochRewardCalculateParamInfo, EpochRewardStatus, StakeRewards, VoteRewardsAccounts, }, }, bank_forks::BankForks, @@ -6862,26 +6861,6 @@ impl TransactionProcessingCallback for Bank { self.feature_set.clone() } - fn check_account_access( - &self, - message: &SanitizedMessage, - account_index: usize, - account: &AccountSharedData, - error_counters: &mut TransactionErrorMetrics, - ) -> Result<()> { - if self.get_reward_interval() == RewardInterval::InsideInterval - && message.is_writable(account_index) - && solana_stake_program::check_id(account.owner()) - { - error_counters.program_execution_temporarily_restricted += 1; - Err(TransactionError::ProgramExecutionTemporarilyRestricted { - account_index: account_index as u8, - }) - } else { - Ok(()) - } - } - fn get_program_match_criteria(&self, program: &Pubkey) -> LoadedProgramMatchCriteria { if self.check_program_modification_slot { self.transaction_processor diff --git a/runtime/src/bank/partitioned_epoch_rewards/mod.rs b/runtime/src/bank/partitioned_epoch_rewards/mod.rs index c4c4ef4b0257b7..9c1f16559cf0cf 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/mod.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/mod.rs @@ -18,14 +18,6 @@ use { std::sync::Arc, }; -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub(super) enum RewardInterval { - /// the slot within the epoch is INSIDE the reward distribution interval - InsideInterval, - /// the slot within the epoch is OUTSIDE the reward distribution interval - OutsideInterval, -} - #[derive(AbiExample, Debug, Clone, PartialEq, Serialize, Deserialize)] pub(crate) struct StartBlockHeightAndRewards { /// the block height of the slot at which rewards distribution began @@ -155,15 +147,6 @@ impl Bank { } } - /// Return `RewardInterval` enum for current bank - pub(super) fn get_reward_interval(&self) -> RewardInterval { - if matches!(self.epoch_reward_status, EpochRewardStatus::Active(_)) { - RewardInterval::InsideInterval - } else { - RewardInterval::OutsideInterval - } - } - /// true if it is ok to run partitioned rewards code. /// This means the feature is activated or certain testing situations. pub(super) fn is_partitioned_rewards_code_enabled(&self) -> bool { @@ -206,16 +189,37 @@ mod tests { partitioned_rewards::TestPartitionedEpochRewards, }, solana_sdk::{ + account::Account, epoch_schedule::EpochSchedule, native_token::LAMPORTS_PER_SOL, signature::Signer, + signer::keypair::Keypair, + stake::instruction::StakeError, system_transaction, + transaction::Transaction, vote::state::{VoteStateVersions, MAX_LOCKOUT_HISTORY}, }, solana_vote_program::{vote_state, vote_transaction}, }; + #[derive(Debug, PartialEq, Eq, Copy, Clone)] + enum RewardInterval { + /// the slot within the epoch is INSIDE the reward distribution interval + InsideInterval, + /// the slot within the epoch is OUTSIDE the reward distribution interval + OutsideInterval, + } + impl Bank { + /// Return `RewardInterval` enum for current bank + fn get_reward_interval(&self) -> RewardInterval { + if matches!(self.epoch_reward_status, EpochRewardStatus::Active(_)) { + RewardInterval::InsideInterval + } else { + RewardInterval::OutsideInterval + } + } + /// Return the total number of blocks in reward interval (including both calculation and crediting). pub(in crate::bank) fn get_reward_total_num_blocks(&self, rewards: &StakeRewards) -> u64 { self.get_reward_calculation_num_blocks() @@ -642,28 +646,54 @@ mod tests { } } - /// Test that program execution that involves stake accounts should fail during reward period. - /// Any programs, which result in stake account changes, will throw `ProgramExecutionTemporarilyRestricted` error when - /// in reward period. + /// Test that program execution that attempts to mutate a stake account + /// incorrectly should fail during reward period. A credit should succeed, + /// but a withdrawal shoudl fail. #[test] fn test_program_execution_restricted_for_stake_account_in_reward_period() { - use solana_sdk::transaction::TransactionError::ProgramExecutionTemporarilyRestricted; + use solana_sdk::transaction::TransactionError::InstructionError; let validator_vote_keypairs = ValidatorVoteKeypairs::new_rand(); let validator_keypairs = vec![&validator_vote_keypairs]; - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( + let GenesisConfigInfo { + mut genesis_config, + mint_keypair, + .. + } = create_genesis_config_with_vote_accounts( 1_000_000_000, &validator_keypairs, vec![1_000_000_000; 1], ); - let node_key = &validator_keypairs[0].node_keypair; - let stake_key = &validator_keypairs[0].stake_keypair; + // Add stake account to try to mutate + let vote_key = validator_keypairs[0].vote_keypair.pubkey(); + let vote_account = genesis_config + .accounts + .iter() + .find(|(&address, _)| address == vote_key) + .map(|(_, account)| account) + .unwrap() + .clone(); + + let new_stake_signer = Keypair::new(); + let new_stake_address = new_stake_signer.pubkey(); + let new_stake_account = Account::from(solana_stake_program::stake_state::create_account( + &new_stake_address, + &vote_key, + &vote_account.into(), + &genesis_config.rent, + 2_000_000_000, + )); + genesis_config + .accounts + .extend(vec![(new_stake_address, new_stake_account)]); let (mut previous_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let num_slots_in_epoch = previous_bank.get_slots_in_epoch(previous_bank.epoch()); assert_eq!(num_slots_in_epoch, 32); + let transfer_amount = 5_000; + for slot in 1..=num_slots_in_epoch + 2 { let bank = new_bank_from_parent_with_bank_forks( bank_forks.as_ref(), @@ -685,25 +715,46 @@ mod tests { ); bank.process_transaction(&vote).unwrap(); - // Insert a transfer transaction from node account to stake account - let tx = system_transaction::transfer( - node_key, - &stake_key.pubkey(), - 1, + // Insert a transfer transaction from the mint to new stake account + let system_tx = system_transaction::transfer( + &mint_keypair, + &new_stake_address, + transfer_amount, + bank.last_blockhash(), + ); + let system_result = bank.process_transaction(&system_tx); + + // Credits should always succeed + assert!(system_result.is_ok()); + + // Attempt to withdraw from new stake account to the mint + let stake_ix = solana_sdk::stake::instruction::withdraw( + &new_stake_address, + &new_stake_address, + &mint_keypair.pubkey(), + transfer_amount, + None, + ); + let stake_tx = Transaction::new_signed_with_payer( + &[stake_ix], + Some(&mint_keypair.pubkey()), + &[&mint_keypair, &new_stake_signer], bank.last_blockhash(), ); - let r = bank.process_transaction(&tx); + let stake_result = bank.process_transaction(&stake_tx); if slot == num_slots_in_epoch { - // When the bank is at the beginning of the new epoch, i.e. slot 32, - // ProgramExecutionTemporarilyRestricted should be thrown for the transfer transaction. + // When the bank is at the beginning of the new epoch, i.e. slot + // 32, StakeError::EpochRewardsActive should be thrown for + // actions like StakeInstruction::Withdraw assert_eq!( - r, - Err(ProgramExecutionTemporarilyRestricted { account_index: 1 }) + stake_result, + Err(InstructionError(0, StakeError::EpochRewardsActive.into())) ); } else { - // When the bank is outside of reward interval, the transfer transaction should not be affected and will succeed. - assert!(r.is_ok()); + // When the bank is outside of reward interval, the withdraw + // transaction should not be affected and will succeed. + assert!(stake_result.is_ok()); } // Push a dummy blockhash, so that the latest_blockhash() for the transfer transaction in each diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index dd47da3c6092cc..e2d547126fd462 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -298,8 +298,6 @@ fn load_transaction_accounts( validated_fee_payer = true; } - callbacks.check_account_access(message, i, &account, error_counters)?; - tx_rent += rent; rent_debits.insert(key, rent, account.lamports()); From 72ee2709436b2282c2fd9a4db71934bffbd3353f Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Wed, 10 Apr 2024 14:29:48 +0800 Subject: [PATCH 07/13] Update cargo metadata to Anza (#635) --- CONTRIBUTING.md | 6 +++--- Cargo.toml | 6 +++--- programs/sbf/Cargo.toml | 11 ++++------- scripts/reserve-cratesio-package-name.sh | 6 +++--- sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml | 6 +++--- sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml | 6 +++--- 6 files changed, 19 insertions(+), 22 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f5cbd4f5e09ce7..35ea55e913f1af 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -255,9 +255,9 @@ before the PR can be merged. Here are the steps: name = "solana-" version = "0.0.1" description = "" -authors = ["Solana Labs Maintainers "] -repository = "https://github.com/solana-labs/solana" -homepage = "https://solana.com/" +authors = ["Anza Maintainers "] +repository = "https://github.com/anza-xyz/agave" +homepage = "https://anza.xyz" documentation = "https://docs.rs/solana-" license = "Apache-2.0" edition = "2021" diff --git a/Cargo.toml b/Cargo.toml index b675929fa22bdb..3eaf79f3b7b7b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -130,9 +130,9 @@ resolver = "2" [workspace.package] version = "2.0.0" -authors = ["Solana Labs Maintainers "] -repository = "https://github.com/solana-labs/solana" -homepage = "https://solanalabs.com/" +authors = ["Anza Maintainers "] +repository = "https://github.com/anza-xyz/agave" +homepage = "https://anza.xyz/" license = "Apache-2.0" edition = "2021" diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index c8f2b431f28e7a..830b57d8e5359b 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -1,9 +1,9 @@ [workspace.package] version = "2.0.0" description = "Solana SBF test program written in Rust" -authors = ["Solana Labs Maintainers "] -repository = "https://github.com/solana-labs/solana" -homepage = "https://solanalabs.com/" +authors = ["Anza Maintainers "] +repository = "https://github.com/anza-xyz/agave" +homepage = "https://anza.xyz" license = "Apache-2.0" edition = "2021" @@ -69,10 +69,7 @@ edition = { workspace = true } [features] sbf_c = [] sbf_rust = [] -dummy-for-ci-check = [ - "sbf_c", - "sbf_rust", -] +dummy-for-ci-check = ["sbf_c", "sbf_rust"] [build-dependencies] walkdir = "2" diff --git a/scripts/reserve-cratesio-package-name.sh b/scripts/reserve-cratesio-package-name.sh index 2e751163cefbb9..e172e1640094d0 100755 --- a/scripts/reserve-cratesio-package-name.sh +++ b/scripts/reserve-cratesio-package-name.sh @@ -103,10 +103,10 @@ if pushd "${tmpdir}" &>/dev/null; then name = "${package_name}" version = "0.0.0" description = "reserved for future use" -authors = ["Solana Labs Maintainers "] -repository = "https://github.com/solana-labs/solana" +authors = ["Anza Maintainers "] +repository = "https://github.com/anza-xyz/agave" license = "Apache-2.0" -homepage = "https://solanalabs.com" +homepage = "https://anza.xyz" documentation = "https://docs.rs/${package_name}" edition = "2021" EOF diff --git a/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml b/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml index 7dc085d721af50..bcb610f7e38c47 100644 --- a/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml +++ b/sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml @@ -2,10 +2,10 @@ name = "fail" version = "2.0.0" description = "Solana SBF test program written in Rust" -authors = ["Solana Labs Maintainers "] -repository = "https://github.com/solana-labs/solana" +authors = ["Anza Maintainers "] +repository = "https://github.com/anza-xyz/agave" license = "Apache-2.0" -homepage = "https://solanalabs.com/" +homepage = "https://anza.xyz" edition = "2021" publish = false diff --git a/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml b/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml index 3d3946decdb6ab..0dad397a2d7b00 100644 --- a/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml +++ b/sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml @@ -2,10 +2,10 @@ name = "noop" version = "2.0.0" description = "Solana SBF test program written in Rust" -authors = ["Solana Labs Maintainers "] -repository = "https://github.com/solana-labs/solana" +authors = ["Anza Maintainers "] +repository = "https://github.com/anza-xyz/agave" license = "Apache-2.0" -homepage = "https://solanalabs.com/" +homepage = "https://anza.xyz" edition = "2021" publish = false From 9778e489b098c067745b664717f133e69bb80d8b Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 10 Apr 2024 09:22:37 -0500 Subject: [PATCH 08/13] add ScanAccountStorageData (#694) --- accounts-db/src/accounts.rs | 7 +++--- accounts-db/src/accounts_db.rs | 39 ++++++++++++++++++++++++++-------- 2 files changed, 34 insertions(+), 12 deletions(-) diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 78400f66a6318e..ce066f6bbf7b5a 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -1,8 +1,8 @@ use { crate::{ accounts_db::{ - AccountsAddRootTiming, AccountsDb, LoadHint, LoadedAccount, ScanStorageResult, - VerifyAccountsHashAndLamportsConfig, + AccountsAddRootTiming, AccountsDb, LoadHint, LoadedAccount, ScanAccountStorageData, + ScanStorageResult, VerifyAccountsHashAndLamportsConfig, }, accounts_index::{IndexKey, ScanConfig, ScanError, ScanResult, ZeroLamport}, ancestors::Ancestors, @@ -210,12 +210,13 @@ impl Accounts { // Cache only has one version per key, don't need to worry about versioning func(loaded_account) }, - |accum: &DashMap, loaded_account: &LoadedAccount| { + |accum: &DashMap, loaded_account: &LoadedAccount, _data| { let loaded_account_pubkey = *loaded_account.pubkey(); if let Some(val) = func(loaded_account) { accum.insert(loaded_account_pubkey, val); } }, + ScanAccountStorageData::NoData, ); match scan_result { diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 1ab6e9bc6f5e59..877d122565d88b 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -170,6 +170,15 @@ enum ScanAccountStorageResult { CacheFileNeedsToBeCreated((String, Range)), } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum ScanAccountStorageData { + /// callback for accounts in storage will not include `data` + NoData, + /// return data (&[u8]) for each account. + /// This can be expensive to get and is not necessary for many scan operations. + DataRefForStorage, +} + #[derive(Default, Debug)] /// hold alive accounts /// alive means in the accounts index @@ -4836,11 +4845,12 @@ impl AccountsDb { } /// Scan a specific slot through all the account storage - pub fn scan_account_storage( + pub(crate) fn scan_account_storage( &self, slot: Slot, cache_map_func: impl Fn(&LoadedAccount) -> Option + Sync, - storage_scan_func: impl Fn(&B, &LoadedAccount) + Sync, + storage_scan_func: impl Fn(&B, &LoadedAccount, Option<&[u8]>) + Sync, + scan_account_storage_data: ScanAccountStorageData, ) -> ScanStorageResult where R: Send, @@ -4891,7 +4901,11 @@ impl AccountsDb { .get_slot_storage_entry_shrinking_in_progress_ok(slot) { storage.accounts.account_iter().for_each(|account| { - storage_scan_func(&retval, &LoadedAccount::Stored(account)) + let loaded_account = LoadedAccount::Stored(account); + let data = (scan_account_storage_data + == ScanAccountStorageData::DataRefForStorage) + .then_some(loaded_account.data()); + storage_scan_func(&retval, &loaded_account, data) }); } @@ -7571,7 +7585,7 @@ impl AccountsDb { // Cache only has one version per key, don't need to worry about versioning Some((*loaded_account.pubkey(), loaded_account.loaded_hash())) }, - |accum: &DashMap, loaded_account: &LoadedAccount| { + |accum: &DashMap, loaded_account: &LoadedAccount, _data| { let mut loaded_hash = loaded_account.loaded_hash(); if loaded_hash == AccountHash(Hash::default()) { loaded_hash = Self::hash_account_data( @@ -7585,6 +7599,7 @@ impl AccountsDb { } accum.insert(*loaded_account.pubkey(), loaded_hash); }, + ScanAccountStorageData::NoData, ); scan.stop(); @@ -7612,7 +7627,8 @@ impl AccountsDb { }) }, |accum: &DashMap, - loaded_account: &LoadedAccount| { + loaded_account: &LoadedAccount, + _data| { // Storage may have duplicates so only keep the latest version for each key let mut loaded_hash = loaded_account.loaded_hash(); let key = *loaded_account.pubkey(); @@ -7622,6 +7638,7 @@ impl AccountsDb { } accum.insert(key, (loaded_hash, account)); }, + ScanAccountStorageData::NoData, ); match scan_result { @@ -14184,9 +14201,10 @@ pub mod tests { if let ScanStorageResult::Stored(slot_accounts) = accounts_db.scan_account_storage( *slot as Slot, |_| Some(0), - |slot_accounts: &DashSet, loaded_account: &LoadedAccount| { + |slot_accounts: &DashSet, loaded_account: &LoadedAccount, _data| { slot_accounts.insert(*loaded_account.pubkey()); }, + ScanAccountStorageData::NoData, ) { if *slot == alive_slot { assert_eq!(slot_accounts.len(), keys.len()); @@ -14224,9 +14242,10 @@ pub mod tests { if let ScanStorageResult::Stored(slot_account) = accounts_db.scan_account_storage( *slot as Slot, |_| Some(0), - |slot_account: &Arc>, loaded_account: &LoadedAccount| { + |slot_account: &Arc>, loaded_account: &LoadedAccount, _data| { *slot_account.write().unwrap() = *loaded_account.pubkey(); }, + ScanAccountStorageData::NoData, ) { assert_eq!(*slot_account.read().unwrap(), keys[*slot as usize]); } else { @@ -14299,7 +14318,7 @@ pub mod tests { assert!(*slot > requested_flush_root); Some(*loaded_account.pubkey()) }, - |slot_accounts: &DashSet, loaded_account: &LoadedAccount| { + |slot_accounts: &DashSet, loaded_account: &LoadedAccount, _data| { slot_accounts.insert(*loaded_account.pubkey()); if !is_cache_at_limit { // Only true when the limit hasn't been reached and there are still @@ -14307,6 +14326,7 @@ pub mod tests { assert!(*slot <= requested_flush_root); } }, + ScanAccountStorageData::NoData, ); let slot_accounts = match slot_accounts { @@ -14417,9 +14437,10 @@ pub mod tests { .scan_account_storage( *slot as Slot, |_| Some(0), - |slot_account: &DashSet, loaded_account: &LoadedAccount| { + |slot_account: &DashSet, loaded_account: &LoadedAccount, _data| { slot_account.insert(*loaded_account.pubkey()); }, + ScanAccountStorageData::NoData, ) { slot_accounts.into_iter().collect::>() } else { From d308acdc84afed2cd1402875217966d01a11c635 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 10 Apr 2024 14:28:34 +0000 Subject: [PATCH 09/13] removes feature gated code for dropping legacy shreds (#677) --- core/src/shred_fetch_stage.rs | 176 ------------- ledger/src/shred.rs | 447 ++++++++++++++++++++-------------- ledger/src/shred/stats.rs | 24 +- 3 files changed, 278 insertions(+), 369 deletions(-) diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index a0a88102698a1f..94a2d1c6f5e961 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -104,14 +104,6 @@ impl ShredFetchStage { // Limit shreds to 2 epochs away. let max_slot = last_slot + 2 * slots_per_epoch; - let should_drop_legacy_shreds = |shred_slot| { - check_feature_activation( - &feature_set::drop_legacy_shreds::id(), - shred_slot, - &feature_set, - &epoch_schedule, - ) - }; let enable_chained_merkle_shreds = |shred_slot| { check_feature_activation( &feature_set::enable_chained_merkle_shreds::id(), @@ -128,7 +120,6 @@ impl ShredFetchStage { last_root, max_slot, shred_version, - should_drop_legacy_shreds, enable_chained_merkle_shreds, &mut stats, ) @@ -435,170 +426,3 @@ fn check_feature_activation( } } } - -#[cfg(test)] -mod tests { - use { - super::*, - solana_ledger::{ - blockstore::MAX_DATA_SHREDS_PER_SLOT, - shred::{ReedSolomonCache, Shred, ShredFlags}, - }, - solana_sdk::packet::Packet, - }; - - #[test] - fn test_data_code_same_index() { - solana_logger::setup(); - let mut packet = Packet::default(); - let mut stats = ShredFetchStats::default(); - - let slot = 2; - let shred_version = 45189; - let shred = Shred::new_from_data( - slot, - 3, // shred index - 1, // parent offset - &[], // data - ShredFlags::LAST_SHRED_IN_SLOT, - 0, // reference_tick - shred_version, - 3, // fec_set_index - ); - shred.copy_to_packet(&mut packet); - - let last_root = 0; - let last_slot = 100; - let slots_per_epoch = 10; - let max_slot = last_slot + 2 * slots_per_epoch; - assert!(!should_discard_shred( - &packet, - last_root, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats, - )); - let coding = solana_ledger::shred::Shredder::generate_coding_shreds( - &[shred], - 3, // next_code_index - &ReedSolomonCache::default(), - ); - coding[0].copy_to_packet(&mut packet); - assert!(!should_discard_shred( - &packet, - last_root, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats, - )); - } - - #[test] - fn test_shred_filter() { - solana_logger::setup(); - let mut packet = Packet::default(); - let mut stats = ShredFetchStats::default(); - let last_root = 0; - let last_slot = 100; - let slots_per_epoch = 10; - let shred_version = 59445; - let max_slot = last_slot + 2 * slots_per_epoch; - - // packet size is 0, so cannot get index - assert!(should_discard_shred( - &packet, - last_root, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats, - )); - assert_eq!(stats.index_overrun, 1); - let shred = Shred::new_from_data( - 2, // slot - 3, // index - 1, // parent_offset - &[], // data - ShredFlags::LAST_SHRED_IN_SLOT, - 0, // reference_tick - shred_version, - 0, // fec_set_index - ); - shred.copy_to_packet(&mut packet); - - // rejected slot is 2, root is 3 - assert!(should_discard_shred( - &packet, - 3, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats, - )); - assert_eq!(stats.slot_out_of_range, 1); - - assert!(should_discard_shred( - &packet, - last_root, - max_slot, - 345, // shred_version - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats, - )); - assert_eq!(stats.shred_version_mismatch, 1); - - // Accepted for 1,3 - assert!(!should_discard_shred( - &packet, - last_root, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats, - )); - - let shred = Shred::new_from_data( - 1_000_000, - 3, - 0, - &[], - ShredFlags::LAST_SHRED_IN_SLOT, - 0, - 0, - 0, - ); - shred.copy_to_packet(&mut packet); - - // Slot 1 million is too high - assert!(should_discard_shred( - &packet, - last_root, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats, - )); - - let index = MAX_DATA_SHREDS_PER_SLOT as u32; - let shred = Shred::new_from_data(5, index, 0, &[], ShredFlags::LAST_SHRED_IN_SLOT, 0, 0, 0); - shred.copy_to_packet(&mut packet); - assert!(should_discard_shred( - &packet, - last_root, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats, - )); - } -} diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 2b6f6f136784c2..bb93b7628eed37 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -1035,7 +1035,6 @@ pub fn should_discard_shred( root: Slot, max_slot: Slot, shred_version: u16, - should_drop_legacy_shreds: impl Fn(Slot) -> bool, enable_chained_merkle_shreds: impl Fn(Slot) -> bool, stats: &mut ShredFetchStats, ) -> bool { @@ -1112,9 +1111,7 @@ pub fn should_discard_shred( } match shred_variant { ShredVariant::LegacyCode | ShredVariant::LegacyData => { - if should_drop_legacy_shreds(slot) { - return true; - } + return true; } ShredVariant::MerkleCode { chained: false, .. } => { stats.num_shreds_merkle_code = stats.num_shreds_merkle_code.saturating_add(1); @@ -1199,7 +1196,9 @@ mod tests { bincode::serialized_size, rand::Rng, rand_chacha::{rand_core::SeedableRng, ChaChaRng}, + rayon::ThreadPoolBuilder, solana_sdk::{shred_version, signature::Signer, signer::keypair::keypair_from_seed}, + std::io::{Cursor, Seek, SeekFrom, Write}, test_case::test_case, }; @@ -1317,185 +1316,271 @@ mod tests { ); } - #[test] - fn test_should_discard_shred() { + #[test_case(false, false)] + #[test_case(false, true)] + #[test_case(true, false)] + #[test_case(true, true)] + fn test_should_discard_shred(chained: bool, is_last_in_slot: bool) { solana_logger::setup(); - let mut packet = Packet::default(); - let root = 1; - let shred_version = 798; - let max_slot = 16; - let shred = Shred::new_from_data( - 2, // slot - 3, // index - 1, // parent_offset - &[], // data - ShredFlags::LAST_SHRED_IN_SLOT, - 0, // reference_tick - shred_version, - 0, // fec_set_index - ); - shred.copy_to_packet(&mut packet); - let mut stats = ShredFetchStats::default(); - assert!(!should_discard_shred( - &packet, - root, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats - )); - assert_eq!(stats, ShredFetchStats::default()); - - packet.meta_mut().size = OFFSET_OF_SHRED_VARIANT; - assert!(should_discard_shred( - &packet, - root, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats - )); - assert_eq!(stats.index_overrun, 1); - - packet.meta_mut().size = OFFSET_OF_SHRED_INDEX; - assert!(should_discard_shred( - &packet, - root, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats - )); - assert_eq!(stats.index_overrun, 2); - - packet.meta_mut().size = OFFSET_OF_SHRED_INDEX + 1; - assert!(should_discard_shred( - &packet, - root, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats - )); - assert_eq!(stats.index_overrun, 3); - - packet.meta_mut().size = OFFSET_OF_SHRED_INDEX + SIZE_OF_SHRED_INDEX - 1; - assert!(should_discard_shred( - &packet, - root, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats - )); - assert_eq!(stats.index_overrun, 4); - - packet.meta_mut().size = OFFSET_OF_SHRED_INDEX + SIZE_OF_SHRED_INDEX + 2; - assert!(should_discard_shred( - &packet, - root, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats - )); - assert_eq!(stats.bad_parent_offset, 1); - - let shred = Shred::new_from_parity_shard( - 8, // slot - 2, // index - &[], // parity_shard - 10, // fec_set_index - 30, // num_data - 4, // num_code - 1, // position - shred_version, - ); - shred.copy_to_packet(&mut packet); - assert!(!should_discard_shred( - &packet, - root, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats - )); - - let shred = Shred::new_from_data( - 2, // slot - std::u32::MAX - 10, // index - 1, // parent_offset - &[], // data - ShredFlags::LAST_SHRED_IN_SLOT, - 0, // reference_tick - shred_version, - 0, // fec_set_index - ); - shred.copy_to_packet(&mut packet); - assert!(should_discard_shred( - &packet, - root, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats - )); - assert_eq!(1, stats.index_out_of_bounds); - - let shred = Shred::new_from_parity_shard( - 8, // slot - 2, // index - &[], // parity_shard - 10, // fec_set_index - 30, // num_data_shreds - 4, // num_coding_shreds - 3, // position - shred_version, - ); - shred.copy_to_packet(&mut packet); - assert!(!should_discard_shred( - &packet, - root, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats - )); - packet.buffer_mut()[OFFSET_OF_SHRED_VARIANT] = u8::MAX; - - assert!(should_discard_shred( - &packet, - root, - max_slot, - shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats - )); - assert_eq!(1, stats.bad_shred_type); - assert_eq!(stats.shred_version_mismatch, 0); - - packet.buffer_mut()[OFFSET_OF_SHRED_INDEX + SIZE_OF_SHRED_INDEX + 1] = u8::MAX; - assert!(should_discard_shred( - &packet, - root, - max_slot, + let mut rng = rand::thread_rng(); + let thread_pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap(); + let reed_solomon_cache = ReedSolomonCache::default(); + let keypair = Keypair::new(); + let chained_merkle_root = chained.then(|| Hash::new_from_array(rng.gen())); + let slot = 18_291; + let parent_slot = rng.gen_range(1..slot); + let shred_version = rng.gen(); + let reference_tick = rng.gen_range(1..64); + let next_shred_index = rng.gen_range(0..671); + let next_code_index = rng.gen_range(0..781); + let mut data = vec![0u8; 1200 * 5]; + rng.fill(&mut data[..]); + let shreds = merkle::make_shreds_from_data( + &thread_pool, + &keypair, + chained_merkle_root, + &data[..], + slot, + parent_slot, shred_version, - |_| false, // should_drop_legacy_shreds - |_| true, // enable_chained_merkle_shreds - &mut stats - )); - assert_eq!(1, stats.bad_shred_type); - assert_eq!(stats.shred_version_mismatch, 1); + reference_tick, + is_last_in_slot, + next_shred_index, + next_code_index, + &reed_solomon_cache, + &mut ProcessShredsStats::default(), + ) + .unwrap(); + assert_eq!(shreds.len(), 1); + let shreds: Vec<_> = shreds.into_iter().flatten().map(Shred::from).collect(); + + let root = rng.gen_range(0..parent_slot); + let max_slot = slot + rng.gen_range(1..65536); + let mut packet = Packet::default(); + + // Data shred sanity checks! + { + let shred = shreds.first().unwrap(); + assert_eq!(shred.shred_type(), ShredType::Data); + shred.copy_to_packet(&mut packet); + let mut stats = ShredFetchStats::default(); + assert!(!should_discard_shred( + &packet, + root, + max_slot, + shred_version, + |_| true, // enable_chained_merkle_shreds + &mut stats + )); + } + { + let mut packet = packet.clone(); + let mut stats = ShredFetchStats::default(); + packet.meta_mut().size = OFFSET_OF_SHRED_VARIANT; + assert!(should_discard_shred( + &packet, + root, + max_slot, + shred_version, + |_| true, // enable_chained_merkle_shreds + &mut stats + )); + assert_eq!(stats.index_overrun, 1); + + packet.meta_mut().size = OFFSET_OF_SHRED_INDEX; + assert!(should_discard_shred( + &packet, + root, + max_slot, + shred_version, + |_| true, // enable_chained_merkle_shreds + &mut stats + )); + assert_eq!(stats.index_overrun, 2); + + packet.meta_mut().size = OFFSET_OF_SHRED_INDEX + 1; + assert!(should_discard_shred( + &packet, + root, + max_slot, + shred_version, + |_| true, // enable_chained_merkle_shreds + &mut stats + )); + assert_eq!(stats.index_overrun, 3); + + packet.meta_mut().size = OFFSET_OF_SHRED_INDEX + SIZE_OF_SHRED_INDEX - 1; + assert!(should_discard_shred( + &packet, + root, + max_slot, + shred_version, + |_| true, // enable_chained_merkle_shreds + &mut stats + )); + assert_eq!(stats.index_overrun, 4); + + packet.meta_mut().size = OFFSET_OF_SHRED_INDEX + SIZE_OF_SHRED_INDEX + 2; + assert!(should_discard_shred( + &packet, + root, + max_slot, + shred_version, + |_| true, // enable_chained_merkle_shreds + &mut stats + )); + assert_eq!(stats.bad_parent_offset, 1); + } + { + let mut stats = ShredFetchStats::default(); + assert!(should_discard_shred( + &packet, + root, + max_slot, + shred_version.wrapping_add(1), + |_| true, // enable_chained_merkle_shreds + &mut stats + )); + assert_eq!(stats.shred_version_mismatch, 1); + } + { + let mut stats = ShredFetchStats::default(); + assert!(should_discard_shred( + &packet, + parent_slot + 1, // root + max_slot, + shred_version, + |_| true, // enable_chained_merkle_shreds + &mut stats + )); + assert_eq!(stats.slot_out_of_range, 1); + } + { + let parent_offset = 0u16; + { + let mut cursor = Cursor::new(packet.buffer_mut()); + cursor.seek(SeekFrom::Start(83)).unwrap(); + cursor.write_all(&parent_offset.to_le_bytes()).unwrap(); + } + assert_eq!( + layout::get_parent_offset(packet.data(..).unwrap()), + Some(parent_offset) + ); + let mut stats = ShredFetchStats::default(); + assert!(should_discard_shred( + &packet, + root, + max_slot, + shred_version, + |_| true, // enable_chained_merkle_shreds + &mut stats + )); + assert_eq!(stats.slot_out_of_range, 1); + } + { + let parent_offset = u16::try_from(slot + 1).unwrap(); + { + let mut cursor = Cursor::new(packet.buffer_mut()); + cursor.seek(SeekFrom::Start(83)).unwrap(); + cursor.write_all(&parent_offset.to_le_bytes()).unwrap(); + } + assert_eq!( + layout::get_parent_offset(packet.data(..).unwrap()), + Some(parent_offset) + ); + let mut stats = ShredFetchStats::default(); + assert!(should_discard_shred( + &packet, + root, + max_slot, + shred_version, + |_| true, // enable_chained_merkle_shreds + &mut stats + )); + assert_eq!(stats.bad_parent_offset, 1); + } + { + let index = std::u32::MAX - 10; + { + let mut cursor = Cursor::new(packet.buffer_mut()); + cursor + .seek(SeekFrom::Start(OFFSET_OF_SHRED_INDEX as u64)) + .unwrap(); + cursor.write_all(&index.to_le_bytes()).unwrap(); + } + assert_eq!(layout::get_index(packet.data(..).unwrap()), Some(index)); + let mut stats = ShredFetchStats::default(); + assert!(should_discard_shred( + &packet, + root, + max_slot, + shred_version, + |_| true, // enable_chained_merkle_shreds + &mut stats + )); + assert_eq!(stats.index_out_of_bounds, 1); + } + + // Coding shred sanity checks! + { + let shred = shreds.last().unwrap(); + assert_eq!(shred.shred_type(), ShredType::Code); + shreds.last().unwrap().copy_to_packet(&mut packet); + let mut stats = ShredFetchStats::default(); + assert!(!should_discard_shred( + &packet, + root, + max_slot, + shred_version, + |_| true, // enable_chained_merkle_shreds + &mut stats + )); + } + { + let mut stats = ShredFetchStats::default(); + assert!(should_discard_shred( + &packet, + root, + max_slot, + shred_version.wrapping_add(1), + |_| true, // enable_chained_merkle_shreds + &mut stats + )); + assert_eq!(stats.shred_version_mismatch, 1); + } + { + let mut stats = ShredFetchStats::default(); + assert!(should_discard_shred( + &packet, + slot, // root + max_slot, + shred_version, + |_| true, // enable_chained_merkle_shreds + &mut stats + )); + assert_eq!(stats.slot_out_of_range, 1); + } + { + let index = u32::try_from(MAX_CODE_SHREDS_PER_SLOT).unwrap(); + { + let mut cursor = Cursor::new(packet.buffer_mut()); + cursor + .seek(SeekFrom::Start(OFFSET_OF_SHRED_INDEX as u64)) + .unwrap(); + cursor.write_all(&index.to_le_bytes()).unwrap(); + } + assert_eq!(layout::get_index(packet.data(..).unwrap()), Some(index)); + let mut stats = ShredFetchStats::default(); + assert!(should_discard_shred( + &packet, + root, + max_slot, + shred_version, + |_| true, // enable_chained_merkle_shreds + &mut stats + )); + assert_eq!(stats.index_out_of_bounds, 1); + } } // Asserts that ShredType is backward compatible with u8. diff --git a/ledger/src/shred/stats.rs b/ledger/src/shred/stats.rs index 60dfa9a79859c2..696d75b3a7b240 100644 --- a/ledger/src/shred/stats.rs +++ b/ledger/src/shred/stats.rs @@ -32,21 +32,21 @@ pub struct ProcessShredsStats { #[derive(Default, Debug, Eq, PartialEq)] pub struct ShredFetchStats { - pub index_overrun: usize, + pub(super) index_overrun: usize, pub shred_count: usize, - pub(crate) num_shreds_merkle_code: usize, - pub(crate) num_shreds_merkle_code_chained: usize, - pub(crate) num_shreds_merkle_data: usize, - pub(crate) num_shreds_merkle_data_chained: usize, + pub(super) num_shreds_merkle_code: usize, + pub(super) num_shreds_merkle_code_chained: usize, + pub(super) num_shreds_merkle_data: usize, + pub(super) num_shreds_merkle_data_chained: usize, pub ping_count: usize, pub ping_err_verify_count: usize, - pub(crate) index_bad_deserialize: usize, - pub(crate) index_out_of_bounds: usize, - pub(crate) slot_bad_deserialize: usize, - pub slot_out_of_range: usize, - pub(crate) bad_shred_type: usize, - pub shred_version_mismatch: usize, - pub(crate) bad_parent_offset: usize, + pub(super) index_bad_deserialize: usize, + pub(super) index_out_of_bounds: usize, + pub(super) slot_bad_deserialize: usize, + pub(super) slot_out_of_range: usize, + pub(super) bad_shred_type: usize, + pub(super) shred_version_mismatch: usize, + pub(super) bad_parent_offset: usize, since: Option, } From d06e3e95cb418f52b0e2697453931607770a65b3 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Wed, 10 Apr 2024 16:22:28 +0100 Subject: [PATCH 10/13] program-test: move test-case to dev-dependencies (#710) --- program-test/Cargo.toml | 2 +- programs/sbf/Cargo.lock | 36 ------------------------------------ 2 files changed, 1 insertion(+), 37 deletions(-) diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index 1456d0dcca8018..2852b20ad43bc9 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -30,9 +30,9 @@ solana-sdk = { workspace = true } solana-svm = { workspace = true } solana-vote-program = { workspace = true } solana_rbpf = { workspace = true } -test-case = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } [dev-dependencies] solana-stake-program = { workspace = true } +test-case = { workspace = true } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index ada0f1b2ee5062..b6762a94288184 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5480,7 +5480,6 @@ dependencies = [ "solana-svm", "solana-vote-program", "solana_rbpf", - "test-case", "thiserror", "tokio", ] @@ -7132,41 +7131,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" -[[package]] -name = "test-case" -version = "3.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb2550dd13afcd286853192af8601920d959b14c401fcece38071d53bf0768a8" -dependencies = [ - "test-case-macros", -] - -[[package]] -name = "test-case-core" -version = "3.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54c25e2cb8f5fcd7318157634e8838aa6f7e4715c96637f969fabaccd1ef5462" -dependencies = [ - "cfg-if 1.0.0", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 2.0.52", -] - -[[package]] -name = "test-case-macros" -version = "3.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37cfd7bbc88a0104e304229fba519bdc45501a30b760fb72240342f1289ad257" -dependencies = [ - "proc-macro-error", - "proc-macro2", - "quote", - "syn 2.0.52", - "test-case-core", -] - [[package]] name = "textwrap" version = "0.11.0" From ff8a25b97f8d44cd4fbb31e8cfe83ad607b15480 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Wed, 10 Apr 2024 16:22:51 +0100 Subject: [PATCH 11/13] svm: move bpf-loader-program to dev-dependencies (#711) --- programs/sbf/Cargo.lock | 1 - svm/Cargo.toml | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b6762a94288184..dc64ef979db218 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6350,7 +6350,6 @@ dependencies = [ "percentage", "rustc_version", "serde", - "solana-bpf-loader-program", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-loader-v4-program", diff --git a/svm/Cargo.toml b/svm/Cargo.toml index ba5286e984c114..2135dd6aacd7f8 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -14,7 +14,6 @@ itertools = { workspace = true } log = { workspace = true } percentage = { workspace = true } serde = { workspace = true, features = ["derive", "rc"] } -solana-bpf-loader-program = { workspace = true } solana-frozen-abi = { workspace = true } solana-frozen-abi-macro = { workspace = true } solana-loader-v4-program = { workspace = true } @@ -32,6 +31,7 @@ name = "solana_svm" bincode = { workspace = true } libsecp256k1 = { workspace = true } rand = { workspace = true } +solana-bpf-loader-program = { workspace = true } solana-logger = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } From 51f99727b9217a34aeb595d093c43e0aefd9feaa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Apr 2024 23:49:56 +0800 Subject: [PATCH 12/13] build(deps): bump quote from 1.0.35 to 1.0.36 (#706) * build(deps): bump quote from 1.0.35 to 1.0.36 Bumps [quote](https://github.com/dtolnay/quote) from 1.0.35 to 1.0.36. - [Release notes](https://github.com/dtolnay/quote/releases) - [Commits](https://github.com/dtolnay/quote/compare/1.0.35...1.0.36) --- updated-dependencies: - dependency-name: quote dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- programs/sbf/Cargo.lock | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 60055dabdf6674..f4ceeb86f87b2a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4364,9 +4364,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index dc64ef979db218..9b03914665eaa2 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3793,9 +3793,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] From e91a5e27444af47c63946a971244c1075bbc99bf Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Wed, 10 Apr 2024 15:33:07 -0500 Subject: [PATCH 13/13] default staked client in LocalCluster (#716) * default staked client in LocalCluster * fix underflow --- local-cluster/src/local_cluster.rs | 56 ++++++++++++++++++++++------ local-cluster/tests/local_cluster.rs | 32 ++-------------- 2 files changed, 48 insertions(+), 40 deletions(-) diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index fe27a4a1aed21c..8e43f1875ecf22 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -2,6 +2,7 @@ use { crate::{ cluster::{Cluster, ClusterValidatorInfo, QuicTpuClient, ValidatorInfo}, cluster_tests, + integration_tests::DEFAULT_NODE_STAKE, validator_configs::*, }, itertools::izip, @@ -47,7 +48,7 @@ use { transaction::Transaction, }, solana_stake_program::stake_state, - solana_streamer::socket::SocketAddrSpace, + solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, solana_tpu_client::tpu_client::{ TpuClient, TpuClientConfig, DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP, DEFAULT_TPU_USE_QUIC, @@ -60,7 +61,7 @@ use { collections::HashMap, io::{Error, ErrorKind, Result}, iter, - net::UdpSocket, + net::{IpAddr, Ipv4Addr, UdpSocket}, path::{Path, PathBuf}, sync::{Arc, RwLock}, }, @@ -189,6 +190,46 @@ impl LocalCluster { pub fn new(config: &mut ClusterConfig, socket_addr_space: SocketAddrSpace) -> Self { assert_eq!(config.validator_configs.len(), config.node_stakes.len()); + let connection_cache = match config.tpu_use_quic { + true => { + let client_keypair = Keypair::new(); + let stake = DEFAULT_NODE_STAKE; + + for validator_config in config.validator_configs.iter_mut() { + let mut overrides = HashMap::new(); + overrides.insert(client_keypair.pubkey(), stake); + validator_config.staked_nodes_overrides = Arc::new(RwLock::new(overrides)); + } + + assert!( + config.tpu_use_quic, + "no support for staked override forwarding without quic" + ); + + let total_stake = config.node_stakes.iter().sum::(); + let stakes = HashMap::from([ + (client_keypair.pubkey(), stake), + (Pubkey::new_unique(), total_stake.saturating_sub(stake)), + ]); + let staked_nodes = Arc::new(RwLock::new(StakedNodes::new( + Arc::new(stakes), + HashMap::::default(), // overrides + ))); + + Arc::new(ConnectionCache::new_with_client_options( + "connection_cache_local_cluster_quic_staked", + config.tpu_connection_pool_size, + None, + Some((&client_keypair, IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)))), + Some((&staked_nodes, &client_keypair.pubkey())), + )) + } + false => Arc::new(ConnectionCache::with_udp( + "connection_cache_local_cluster_udp", + config.tpu_connection_pool_size, + )), + }; + let mut validator_keys = { if let Some(ref keys) = config.validator_keys { assert_eq!(config.validator_configs.len(), keys.len()); @@ -321,16 +362,7 @@ impl LocalCluster { entry_point_info: leader_contact_info, validators, genesis_config, - connection_cache: match config.tpu_use_quic { - true => Arc::new(ConnectionCache::new_quic( - "connection_cache_local_cluster_quic", - config.tpu_connection_pool_size, - )), - false => Arc::new(ConnectionCache::with_udp( - "connection_cache_local_cluster_udp", - config.tpu_connection_pool_size, - )), - }, + connection_cache, }; let node_pubkey_to_vote_key: HashMap> = keys_in_genesis diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index ed95bf85d6c056..804c0db7884371 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -9,7 +9,7 @@ use { solana_accounts_db::{ hardened_unpack::open_genesis_config, utils::create_accounts_run_and_snapshot_dirs, }, - solana_client::{connection_cache::ConnectionCache, thin_client::ThinClient}, + solana_client::thin_client::ThinClient, solana_core::{ consensus::{ tower_storage::FileTowerStorage, Tower, SWITCH_FORK_THRESHOLD, VOTE_THRESHOLD_DEPTH, @@ -75,7 +75,7 @@ use { system_program, system_transaction, vote::state::VoteStateUpdate, }, - solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, + solana_streamer::socket::SocketAddrSpace, solana_turbine::broadcast_stage::{ broadcast_duplicates_run::{BroadcastDuplicatesConfig, ClusterPartition}, BroadcastStageType, @@ -87,12 +87,11 @@ use { fs, io::Read, iter, - net::{IpAddr, Ipv4Addr}, num::NonZeroUsize, path::Path, sync::{ atomic::{AtomicBool, AtomicUsize, Ordering}, - Arc, Mutex, RwLock, + Arc, Mutex, }, thread::{sleep, Builder, JoinHandle}, time::{Duration, Instant}, @@ -362,12 +361,6 @@ fn test_forwarding() { ..ClusterConfig::default() }; - let client_keypair = Keypair::new(); - let mut overrides = HashMap::new(); - let stake = DEFAULT_NODE_STAKE * 10; - let total_stake = stake + config.node_stakes.iter().sum::(); - overrides.insert(client_keypair.pubkey(), stake); - config.validator_configs[1].staked_nodes_overrides = Arc::new(RwLock::new(overrides)); let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let cluster_nodes = discover_cluster( @@ -385,28 +378,11 @@ fn test_forwarding() { .find(|c| c.pubkey() != &leader_pubkey) .unwrap(); - let stakes = HashMap::from([ - (client_keypair.pubkey(), stake), - (Pubkey::new_unique(), total_stake - stake), - ]); - let staked_nodes = Arc::new(RwLock::new(StakedNodes::new( - Arc::new(stakes), - HashMap::::default(), // overrides - ))); - - let client_connection_cache = Arc::new(ConnectionCache::new_with_client_options( - "client-connection-cache", - 1, - None, - Some((&client_keypair, IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)))), - Some((&staked_nodes, &client_keypair.pubkey())), - )); - // Confirm that transactions were forwarded to and processed by the leader. cluster_tests::send_many_transactions( validator_info, &cluster.funding_keypair, - &client_connection_cache, + &cluster.connection_cache, 10, 20, );