diff --git a/chain/client/src/client.rs b/chain/client/src/client.rs index c331ac6943e..300d005568a 100644 --- a/chain/client/src/client.rs +++ b/chain/client/src/client.rs @@ -207,10 +207,6 @@ impl Client { .produce_chunk_add_transactions_time_limit .update(update_client_config.produce_chunk_add_transactions_time_limit); } - - pub(crate) fn update_validator_signer(&self, signer: Arc) -> bool { - self.validator_signer.update(Some(signer)) - } } // Debug information about the upcoming block. @@ -403,7 +399,7 @@ impl Client { shards_manager_adapter, sharded_tx_pool, network_adapter, - validator_signer: MutableConfigValue::new(validator_signer, "validator_signer"), + validator_signer, pending_approvals: lru::LruCache::new(num_block_producer_seats), catchup_state_syncs: HashMap::new(), epoch_sync, diff --git a/chain/client/src/client_actor.rs b/chain/client/src/client_actor.rs index 98ffe1e4ee7..673e9d52465 100644 --- a/chain/client/src/client_actor.rs +++ b/chain/client/src/client_actor.rs @@ -1159,17 +1159,12 @@ impl ClientActorInner { /// Returns the delay before the next time `check_triggers` should be called, which is /// min(time until the closest trigger, 1 second). pub(crate) fn check_triggers(&mut self, ctx: &mut dyn DelayedActionRunner) -> Duration { - let _span = tracing::debug_span!(target: "client", "check_triggers").entered(); + let _span: tracing::span::EnteredSpan = + tracing::debug_span!(target: "client", "check_triggers").entered(); if let Some(config_updater) = &mut self.config_updater { - config_updater.try_update( - &|updateable_client_config| { - self.client.update_client_config(updateable_client_config) - }, - &|validator_signer| self.client.update_validator_signer(validator_signer), - ); - if config_updater.was_validator_signer_updated() { - self.network_adapter.send(PeerManagerMessageRequest::AdvertiseTier1Proxies); - } + config_updater.try_update(&|updateable_client_config| { + self.client.update_client_config(updateable_client_config) + }); } // Check block height to trigger expected shutdown diff --git a/chain/client/src/config_updater.rs b/chain/client/src/config_updater.rs index 08f9ce26a32..1f584b08cc4 100644 --- a/chain/client/src/config_updater.rs +++ b/chain/client/src/config_updater.rs @@ -1,6 +1,5 @@ use near_chain_configs::UpdateableClientConfig; use near_dyn_configs::{UpdateableConfigLoaderError, UpdateableConfigs}; -use near_primitives::validator_signer::ValidatorSigner; use std::sync::Arc; use tokio::sync::broadcast::Receiver; @@ -10,29 +9,18 @@ pub struct ConfigUpdater { rx_config_update: Receiver>>, /// Represents the latest Error of reading the dynamically reloadable configs. updateable_configs_error: Option>, - /// Represents whether validator key was updated during the last reload. - validator_signer_updated: bool, } impl ConfigUpdater { pub fn new( rx_config_update: Receiver>>, ) -> Self { - Self { rx_config_update, updateable_configs_error: None, validator_signer_updated: false } - } - - pub fn was_validator_signer_updated(&self) -> bool { - self.validator_signer_updated + Self { rx_config_update, updateable_configs_error: None } } /// Check if any of the configs were updated. /// If they did, the receiver (rx_config_update) will contain a clone of the new configs. - pub fn try_update( - &mut self, - update_client_config_fn: &dyn Fn(UpdateableClientConfig), - update_validator_signer_fn: &dyn Fn(Arc) -> bool, - ) { - self.validator_signer_updated = false; + pub fn try_update(&mut self, update_client_config_fn: &dyn Fn(UpdateableClientConfig)) { while let Ok(maybe_updateable_configs) = self.rx_config_update.try_recv() { match maybe_updateable_configs { Ok(updateable_configs) => { @@ -40,12 +28,6 @@ impl ConfigUpdater { update_client_config_fn(client_config); tracing::info!(target: "config", "Updated ClientConfig"); } - if let Some(validator_signer) = updateable_configs.validator_signer { - if update_validator_signer_fn(validator_signer) { - self.validator_signer_updated = true; - } - tracing::info!(target: "config", "Updated validator key"); - } self.updateable_configs_error = None; } Err(err) => { diff --git a/chain/network/src/peer_manager/peer_manager_actor.rs b/chain/network/src/peer_manager/peer_manager_actor.rs index d21d23dc2da..07f8dbc4bed 100644 --- a/chain/network/src/peer_manager/peer_manager_actor.rs +++ b/chain/network/src/peer_manager/peer_manager_actor.rs @@ -1026,14 +1026,6 @@ impl PeerManagerActor { self.handle_msg_network_requests(msg, ctx), ) } - PeerManagerMessageRequest::AdvertiseTier1Proxies => { - let state = self.state.clone(); - let clock = self.clock.clone(); - ctx.spawn(wrap_future(async move { - state.tier1_advertise_proxies(&clock).await; - })); - PeerManagerMessageResponse::AdvertiseTier1Proxies - } PeerManagerMessageRequest::OutboundTcpConnect(stream) => { let peer_addr = stream.peer_addr; if let Err(err) = diff --git a/chain/network/src/types.rs b/chain/network/src/types.rs index 50c37311ce0..befa5cf5248 100644 --- a/chain/network/src/types.rs +++ b/chain/network/src/types.rs @@ -162,9 +162,6 @@ pub struct SetChainInfo(pub ChainInfo); #[rtype(result = "PeerManagerMessageResponse")] pub enum PeerManagerMessageRequest { NetworkRequests(NetworkRequests), - /// Request PeerManager to advertise tier 1 proxies. - /// Used internally. - AdvertiseTier1Proxies, /// Request PeerManager to connect to the given peer. /// Used in tests and internally by PeerManager. /// TODO: replace it with AsyncContext::spawn/run_later for internal use. @@ -196,7 +193,6 @@ impl PeerManagerMessageRequest { #[derive(actix::MessageResponse, Debug)] pub enum PeerManagerMessageResponse { NetworkResponses(NetworkResponses), - AdvertiseTier1Proxies, /// TEST-ONLY OutboundTcpConnect, FetchRoutingTable(RoutingTableInfo), diff --git a/core/dyn-configs/src/lib.rs b/core/dyn-configs/src/lib.rs index 834eed57a2b..17330e21676 100644 --- a/core/dyn-configs/src/lib.rs +++ b/core/dyn-configs/src/lib.rs @@ -3,7 +3,6 @@ use near_async::time::Clock; use near_chain_configs::UpdateableClientConfig; use near_o11y::log_config::LogConfig; -use near_primitives::validator_signer::ValidatorSigner; use serde::{Deserialize, Serialize}; use std::path::PathBuf; use std::sync::Arc; @@ -18,8 +17,6 @@ pub struct UpdateableConfigs { pub log_config: Option, /// Contents of the `config.json` corresponding to the mutable fields of `ClientConfig`. pub client_config: Option, - /// Validator key hot loaded from file. - pub validator_signer: Option>, } /// Pushes the updates to listeners. @@ -38,8 +35,6 @@ pub enum UpdateableConfigLoaderError { OpenAndRead { file: PathBuf, err: std::io::Error }, #[error("Can't open or read the config file {file:?}: {err:?}")] ConfigFileError { file: PathBuf, err: anyhow::Error }, - #[error("Can't open or read the validator key file {file:?}: {err:?}")] - ValidatorKeyFileError { file: PathBuf, err: anyhow::Error }, #[error("One or multiple dynamic config files reload errors {0:?}")] Errors(Vec), #[error("No home dir set")] diff --git a/nearcore/src/dyn_config.rs b/nearcore/src/dyn_config.rs index c2582ba4606..0daf5ed1a94 100644 --- a/nearcore/src/dyn_config.rs +++ b/nearcore/src/dyn_config.rs @@ -2,12 +2,8 @@ use crate::config::Config; use near_chain_configs::UpdateableClientConfig; use near_dyn_configs::{UpdateableConfigLoaderError, UpdateableConfigs}; use near_o11y::log_config::LogConfig; -use near_primitives::validator_signer::ValidatorSigner; use serde::Deserialize; -use std::{ - path::{Path, PathBuf}, - sync::Arc, -}; +use std::path::{Path, PathBuf}; pub const LOG_CONFIG_FILENAME: &str = "log_config.json"; @@ -35,22 +31,9 @@ pub fn read_updateable_configs( }; let updateable_client_config = config.as_ref().map(get_updateable_client_config); - let validator_signer = if let Some(config) = config { - read_validator_key(home_dir, &config).unwrap_or_else(|err| { - errs.push(err); - None - }) - } else { - None - }; - if errs.is_empty() { crate::metrics::CONFIG_CORRECT.set(1); - Ok(UpdateableConfigs { - log_config, - client_config: updateable_client_config, - validator_signer, - }) + Ok(UpdateableConfigs { log_config, client_config: updateable_client_config }) } else { tracing::warn!(target: "neard", "Dynamically updateable configs are not valid. Please fix this ASAP otherwise the node will be unable to restart: {:?}", &errs); crate::metrics::CONFIG_CORRECT.set(0); @@ -103,23 +86,3 @@ where }, } } - -fn read_validator_key( - home_dir: &Path, - config: &Config, -) -> Result>, UpdateableConfigLoaderError> { - let validator_file: PathBuf = home_dir.join(&config.validator_key_file); - match crate::config::load_validator_key(&validator_file) { - Ok(Some(validator_signer)) => { - tracing::info!(target: "neard", "Hot loading validator key {}.", validator_file.display()); - Ok(Some(validator_signer)) - } - Ok(None) => { - tracing::info!(target: "neard", "No validator key {}.", validator_file.display()); - Ok(None) - } - Err(err) => { - Err(UpdateableConfigLoaderError::ValidatorKeyFileError { file: validator_file, err }) - } - } -} diff --git a/nightly/pytest-sanity.txt b/nightly/pytest-sanity.txt index 5668f049449..af343e4bc7c 100644 --- a/nightly/pytest-sanity.txt +++ b/nightly/pytest-sanity.txt @@ -112,8 +112,6 @@ pytest --timeout=240 sanity/switch_node_key.py pytest --timeout=240 sanity/switch_node_key.py --features nightly pytest --timeout=240 sanity/validator_switch_key.py pytest --timeout=240 sanity/validator_switch_key.py --features nightly -pytest --timeout=120 sanity/validator_switch_key_quick.py -pytest --timeout=120 sanity/validator_switch_key_quick.py --features nightly pytest sanity/proxy_simple.py pytest sanity/proxy_simple.py --features nightly pytest sanity/proxy_restart.py diff --git a/pytest/lib/cluster.py b/pytest/lib/cluster.py index f2a53abcc62..76c2df1d324 100644 --- a/pytest/lib/cluster.py +++ b/pytest/lib/cluster.py @@ -536,11 +536,6 @@ def kill(self, *, gentle=False): self._process.wait(5) self._process = None - def reload_updateable_config(self): - logger.info(f"Reloading updateable config for node {self.ordinal}.") - """Sends SIGHUP signal to the process in order to trigger updateable config reload.""" - self._process.send_signal(signal.SIGHUP) - def reset_data(self): shutil.rmtree(os.path.join(self.node_dir, "data")) diff --git a/pytest/tests/sanity/validator_switch_key_quick.py b/pytest/tests/sanity/validator_switch_key_quick.py deleted file mode 100755 index 70f91583074..00000000000 --- a/pytest/tests/sanity/validator_switch_key_quick.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python3 -# Starts three validating nodes and one non-validating node -# Set a new validator key that has the same account id as one of -# the validating nodes. Stake that account with the new key -# and make sure that the network doesn't stall even after -# the non-validating node becomes a validator. - -import sys, time -import pathlib - -sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib')) - -from cluster import start_cluster - -EPOCH_LENGTH = 20 -TIMEOUT = 100 -NUM_VALIDATORS = 2 - -client_config = { - "tracked_shards": [0], # Track all shards - "state_sync_enabled": True, - "store.state_snapshot_enabled": True -} -config_map = {i: client_config for i in range(NUM_VALIDATORS + 1)} -nodes = start_cluster( - NUM_VALIDATORS, 1, 1, None, - [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 10], - ["chunk_producer_kickout_threshold", 10]], config_map) -time.sleep(2) - -nodes[NUM_VALIDATORS].reset_validator_key(nodes[0].validator_key) -nodes[0].kill() -nodes[NUM_VALIDATORS].reload_updateable_config() -nodes[NUM_VALIDATORS].stop_checking_store() -time.sleep(2) - -block = nodes[1].get_latest_block() -max_height = block.height + 4 * EPOCH_LENGTH -start_time = time.time() - -while True: - assert time.time() - start_time < TIMEOUT, 'Validators got stuck' - old_validator_height = nodes[1].get_latest_block().height - new_validator_height = nodes[NUM_VALIDATORS].get_latest_block().height - if old_validator_height > max_height and new_validator_height > max_height: - break - info = nodes[1].json_rpc('validators', 'latest') - next_validators = info['result']['next_validators'] - account_ids = [v['account_id'] for v in next_validators] - print(account_ids) - assert len(account_ids) == NUM_VALIDATORS, 'Number of validators do not match' - - # We copied over 'test0' validator key, along with validator account ID. - # Therefore, despite nodes[0] being stopped, 'test0' still figures as active validator. - assert sorted(account_ids)[0] == 'test0' - statuses = sorted([(node_idx, nodes[node_idx].get_latest_block()) for node_idx in range(1, NUM_VALIDATORS + 1)], - key=lambda element: element[1].height) - print(statuses) - last = statuses.pop() - cur_height = last[1].height - node = nodes[last[0]] - succeed = True - for _, block in statuses: - try: - node.get_block(block.hash) - except Exception: - succeed = False - break - print('Succeed', succeed) - if statuses[0][1].height > max_height - EPOCH_LENGTH // 2 and succeed: - sys.exit(0) - time.sleep(1) - -assert False, 'Nodes are not synced'