From c60c94ffba76a549e5b45c41cf4e30940c5dadf6 Mon Sep 17 00:00:00 2001 From: benthecarman Date: Wed, 21 Jun 2023 14:27:32 -0500 Subject: [PATCH] Response to review --- mutiny-core/src/ldkstorage.rs | 99 ++++++++++++++++++------------ mutiny-core/src/node.rs | 104 ++++++++++++++++++++------------ mutiny-core/src/nodemanager.rs | 107 ++++++++++++++++++++++----------- mutiny-core/src/scb/mod.rs | 2 + mutiny-wasm/src/lib.rs | 11 +++- 5 files changed, 212 insertions(+), 111 deletions(-) diff --git a/mutiny-core/src/ldkstorage.rs b/mutiny-core/src/ldkstorage.rs index 6e4daea0b..9929f99ba 100644 --- a/mutiny-core/src/ldkstorage.rs +++ b/mutiny-core/src/ldkstorage.rs @@ -177,7 +177,7 @@ impl MutinyNodePersister { let read_args = ChannelManagerReadArgs::new( keys_manager.clone(), keys_manager.clone(), - keys_manager.clone(), + keys_manager, fee_estimator, chain_monitor, mutiny_chain, @@ -199,48 +199,73 @@ impl MutinyNodePersister { Err(_) => { // no key manager stored, start a new one - // if regtest, we don't need to get the tip hash and can - // just use genesis, this also lets us use regtest in tests - let best_block = if network == Network::Regtest { - BestBlock::from_network(network) - } else { - let height_future = esplora - .get_height() - .map_err(|_| MutinyError::ChainAccessFailed); - let hash_future = esplora - .get_tip_hash() - .map_err(|_| MutinyError::ChainAccessFailed); - let (height, hash) = try_join!(height_future, hash_future)?; - BestBlock::new(hash, height) - }; - let chain_params = ChainParameters { + Self::create_new_channel_manager( network, - best_block, - }; - - let fresh_channel_manager: PhantomChannelManager = - channelmanager::ChannelManager::new( - fee_estimator, - chain_monitor, - mutiny_chain, - router, - mutiny_logger, - keys_manager.clone(), - keys_manager.clone(), - keys_manager, - default_user_config(), - chain_params, - ); - - Ok(ReadChannelManager { - channel_manager: fresh_channel_manager, - is_restarting: false, + chain_monitor, + mutiny_chain, + fee_estimator, + mutiny_logger, + keys_manager, + router, channel_monitors, - }) + esplora, + ) + .await } } } + #[allow(clippy::too_many_arguments)] + pub(crate) async fn create_new_channel_manager( + network: Network, + chain_monitor: Arc>, + mutiny_chain: Arc>, + fee_estimator: Arc>, + mutiny_logger: Arc, + keys_manager: Arc>, + router: Arc, + channel_monitors: Vec<(BlockHash, ChannelMonitor)>, + esplora: Arc, + ) -> Result, MutinyError> { + // if regtest, we don't need to get the tip hash and can + // just use genesis, this also lets us use regtest in tests + let best_block = if network == Network::Regtest { + BestBlock::from_network(network) + } else { + let height_future = esplora + .get_height() + .map_err(|_| MutinyError::ChainAccessFailed); + let hash_future = esplora + .get_tip_hash() + .map_err(|_| MutinyError::ChainAccessFailed); + let (height, hash) = try_join!(height_future, hash_future)?; + BestBlock::new(hash, height) + }; + let chain_params = ChainParameters { + network, + best_block, + }; + + let fresh_channel_manager: PhantomChannelManager = channelmanager::ChannelManager::new( + fee_estimator, + chain_monitor, + mutiny_chain, + router, + mutiny_logger, + keys_manager.clone(), + keys_manager.clone(), + keys_manager, + default_user_config(), + chain_params, + ); + + Ok(ReadChannelManager { + channel_manager: fresh_channel_manager, + is_restarting: false, + channel_monitors, + }) + } + pub(crate) fn persist_payment_info( &self, payment_hash: &PaymentHash, diff --git a/mutiny-core/src/node.rs b/mutiny-core/src/node.rs index 5817923c5..86680b4c4 100644 --- a/mutiny-core/src/node.rs +++ b/mutiny-core/src/node.rs @@ -171,6 +171,7 @@ impl Node { lsp_clients: &[LspClient], logger: Arc, do_not_connect_peers: bool, + empty_state: bool, #[cfg(target_arch = "wasm32")] websocket_proxy_addr: String, ) -> Result { log_info!(logger, "initializing a new node: {uuid}"); @@ -203,11 +204,17 @@ impl Node { )); // read channelmonitor state from disk - let channel_monitors = persister - .read_channel_monitors(keys_manager.clone()) - .map_err(|e| MutinyError::ReadError { - source: MutinyStorageError::Other(anyhow!("failed to read channel monitors: {e}")), - })?; + let channel_monitors = if empty_state { + vec![] + } else { + persister + .read_channel_monitors(keys_manager.clone()) + .map_err(|e| MutinyError::ReadError { + source: MutinyStorageError::Other(anyhow!( + "failed to read channel monitors: {e}" + )), + })? + }; let network_graph = gossip_sync.network_graph().clone(); @@ -219,8 +226,8 @@ impl Node { )); // init channel manager - let mut read_channel_manager = persister - .read_channel_manager( + let mut read_channel_manager = if empty_state { + MutinyNodePersister::create_new_channel_manager( network, chain_monitor.clone(), chain.clone(), @@ -231,7 +238,22 @@ impl Node { channel_monitors, esplora, ) - .await?; + .await? + } else { + persister + .read_channel_manager( + network, + chain_monitor.clone(), + chain.clone(), + fee_estimator.clone(), + logger.clone(), + keys_manager.clone(), + router.clone(), + channel_monitors, + esplora, + ) + .await? + }; let channel_manager: Arc> = Arc::new(read_channel_manager.channel_manager); @@ -352,34 +374,36 @@ impl Node { // processor so we prevent any race conditions. // if we fail to read the spendable outputs, just log a warning and // continue - let retry_spendable_outputs = persister - .get_failed_spendable_outputs() - .map_err(|e| MutinyError::ReadError { - source: MutinyStorageError::Other(anyhow!( - "failed to read retry spendable outputs: {e}" - )), - }) - .unwrap_or_else(|e| { - log_warn!(logger, "Failed to read retry spendable outputs: {e}"); - vec![] - }); - - if !retry_spendable_outputs.is_empty() { - log_info!( - logger, - "Retrying {} spendable outputs", - retry_spendable_outputs.len() - ); - - match event_handler - .handle_spendable_outputs(&retry_spendable_outputs) - .await - { - Ok(_) => { - log_info!(logger, "Successfully retried spendable outputs"); - persister.clear_failed_spendable_outputs()?; + if !empty_state { + let retry_spendable_outputs = persister + .get_failed_spendable_outputs() + .map_err(|e| MutinyError::ReadError { + source: MutinyStorageError::Other(anyhow!( + "failed to read retry spendable outputs: {e}" + )), + }) + .unwrap_or_else(|e| { + log_warn!(logger, "Failed to read retry spendable outputs: {e}"); + vec![] + }); + + if !retry_spendable_outputs.is_empty() { + log_info!( + logger, + "Retrying {} spendable outputs", + retry_spendable_outputs.len() + ); + + match event_handler + .handle_spendable_outputs(&retry_spendable_outputs) + .await + { + Ok(_) => { + log_info!(logger, "Successfully retried spendable outputs"); + persister.clear_failed_spendable_outputs()?; + } + Err(e) => log_warn!(logger, "Failed to retry spendable outputs {e}"), } - Err(e) => log_warn!(logger, "Failed to retry spendable outputs {e}"), } } @@ -1349,15 +1373,19 @@ impl Node { self.await_chan_funding_tx(init, &pubkey, timeout).await } - pub fn create_static_channel_backup(&self) -> StaticChannelBackup { + pub fn create_static_channel_backup(&self) -> Result { let mut monitors = HashMap::new(); for outpoint in self.chain_monitor.list_monitors() { - let monitor = self.chain_monitor.get_monitor(outpoint).unwrap(); + let monitor = self + .chain_monitor + .get_monitor(outpoint) + .map_err(|_| MutinyError::Other(anyhow!("Failed to get channel monitor")))?; + let monitor_bytes = monitor.encode(); monitors.insert(outpoint.into_bitcoin_outpoint(), monitor_bytes); } - StaticChannelBackup { monitors } + Ok(StaticChannelBackup { monitors }) } pub async fn recover_from_static_channel_backup( diff --git a/mutiny-core/src/nodemanager.rs b/mutiny-core/src/nodemanager.rs index e358f4709..e9af2f04a 100644 --- a/mutiny-core/src/nodemanager.rs +++ b/mutiny-core/src/nodemanager.rs @@ -7,7 +7,10 @@ use crate::gossip::*; use crate::labels::LabelStorage; use crate::logging::LOGGING_KEY; use crate::redshift::{RedshiftManager, RedshiftStatus, RedshiftStorage}; -use crate::scb::{EncryptedSCB, StaticChannelBackup, StaticChannelBackupStorage}; +use crate::scb::{ + EncryptedSCB, StaticChannelBackup, StaticChannelBackupStorage, + SCB_ENCRYPTION_KEY_DERIVATION_PATH, +}; use crate::storage::{MutinyStorage, KEYCHAIN_STORE_KEY}; use crate::utils::sleep; use crate::{ @@ -621,6 +624,7 @@ impl NodeManager { &lsp_clients, logger.clone(), c.do_not_connect_peers, + false, #[cfg(target_arch = "wasm32")] websocket_proxy_addr.clone(), ) @@ -1924,18 +1928,19 @@ impl NodeManager { fn get_scb_key(&self) -> SecretKey { let seed = self.mnemonic.to_seed(""); let xprivkey = ExtendedPrivKey::new_master(self.network, &seed).unwrap(); - let path = DerivationPath::from_str("m/444'/444'/444'").unwrap(); + let path = DerivationPath::from_str(SCB_ENCRYPTION_KEY_DERIVATION_PATH).unwrap(); let context = Secp256k1::new(); xprivkey.derive_priv(&context, &path).unwrap().private_key } - // todo add docs - pub async fn create_static_channel_backup(&self) -> EncryptedSCB { + /// Creates a static channel backup for all the nodes in the node manager. + /// The backup is encrypted with the SCB key. + pub async fn create_static_channel_backup(&self) -> Result { let nodes = self.nodes.lock().await; let mut backups: HashMap = HashMap::new(); for (_, node) in nodes.iter() { - let scb = node.create_static_channel_backup(); + let scb = node.create_static_channel_backup()?; backups.insert(node.pubkey, (node.node_index(), scb)); } @@ -1953,10 +1958,17 @@ impl NodeManager { // encrypt let encryption_key = self.get_scb_key(); - scb.encrypt(&encryption_key) + let scb = scb.encrypt(&encryption_key); + log_debug!( + self.logger, + "Created SCB with a size of {} bytes", + scb.encode().len() + ); + Ok(scb) } - // todo add docs + /// Takes an encrypted static channel backup and recovers the channels from it. + /// If the backup is encrypted with a different key than the current key, it will fail. pub async fn recover_from_static_channel_backup( &self, scb: EncryptedSCB, @@ -1965,37 +1977,61 @@ impl NodeManager { let encryption_key = self.get_scb_key(); let scb = scb.decrypt(&encryption_key)?; - let max_index = scb - .backups - .values() - .map(|(node_index, _)| node_index.child_index) - .max() - .unwrap_or(0); - - let current_index = self - .node_storage - .lock() - .await - .nodes - .values() - .map(|n| n.child_index) - .max() - .unwrap_or(0); - - let needed_nodes = max_index - current_index; - if needed_nodes > 0 { - log_info!( - self.logger, - "Creating {needed_nodes} new nodes to recover from backup" - ); - for _ in 0..needed_nodes { - self.new_node().await?; - } + // stop all nodes, todo stop in parallel + for node in self.nodes.lock().await.values() { + node.stop().await?; } - // fixme: run these futures in parallel for (pubkey, (node_index, backup)) in scb.backups { - match self.get_node(&pubkey).await { + // find the uuid if we have it, otherwise create a new one and save it + let uuid = { + let mut node_mutex = self.node_storage.lock().await; + let current = node_mutex + .nodes + .iter() + .find(|(_, n)| *n == &node_index) + .map(|(uuid, _)| uuid.clone()); + + match current { + Some(uuid) => uuid, + None => { + let mut existing_nodes = self.storage.get_nodes()?; + let new_uuid = Uuid::new_v4().to_string(); + existing_nodes + .nodes + .insert(new_uuid.clone(), node_index.clone()); + + self.storage.insert_nodes(existing_nodes.clone())?; + node_mutex.nodes = existing_nodes.nodes.clone(); + + new_uuid + } + } + }; + + // create a fresh instance of each node + let new_node = Node::new( + uuid, + &node_index, + &self.mnemonic, + self.storage.clone(), + self.gossip_sync.clone(), + self.scorer.clone(), + self.chain.clone(), + self.fee_estimator.clone(), + self.wallet.clone(), + self.network, + self.esplora.clone(), + &self.lsp_clients, + self.logger.clone(), + true, + true, + #[cfg(target_arch = "wasm32")] + self.websocket_proxy_addr.clone(), + ) + .await; + + match new_node { Ok(node) => { debug_assert!(node.node_index().child_index == node_index.child_index); log_info!( @@ -2284,6 +2320,7 @@ pub(crate) async fn create_new_node_from_node_manager( &node_manager.lsp_clients, node_manager.logger.clone(), node_manager.do_not_connect_peers, + false, #[cfg(target_arch = "wasm32")] node_manager.websocket_proxy_addr.clone(), ) diff --git a/mutiny-core/src/scb/mod.rs b/mutiny-core/src/scb/mod.rs index becfa83f0..e2353fff0 100644 --- a/mutiny-core/src/scb/mod.rs +++ b/mutiny-core/src/scb/mod.rs @@ -20,6 +20,8 @@ use std::str::FromStr; type Aes256CbcEnc = Encryptor; type Aes256CbcDec = Decryptor; +pub const SCB_ENCRYPTION_KEY_DERIVATION_PATH: &str = "m/444'/444'/444'"; + /// A static channel backup is a backup for the channels for a given node. /// These are backups of the channel monitors, which store the necessary /// information to recover the channel in case of a failure. diff --git a/mutiny-wasm/src/lib.rs b/mutiny-wasm/src/lib.rs index a85187ceb..8ed83010b 100644 --- a/mutiny-wasm/src/lib.rs +++ b/mutiny-wasm/src/lib.rs @@ -728,6 +728,8 @@ impl MutinyWallet { )?) } + /// Takes an encrypted static channel backup and recovers the channels from it. + /// If the backup is encrypted with a different key than the current key, it will fail. #[wasm_bindgen] pub async fn recover_from_static_channel_backup( &self, @@ -741,8 +743,15 @@ impl MutinyWallet { Ok(()) } + /// Creates a static channel backup for all the nodes in the node manager. + /// The backup is encrypted with the SCB key. + #[wasm_bindgen] pub async fn create_static_channel_backup(&self) -> Result { - let scb = self.inner.node_manager.create_static_channel_backup().await; + let scb = self + .inner + .node_manager + .create_static_channel_backup() + .await?; Ok(scb.to_string()) }