Skip to content

Commit

Permalink
Response to review
Browse files Browse the repository at this point in the history
  • Loading branch information
benthecarman committed Jun 23, 2023
1 parent a1186c9 commit c60c94f
Show file tree
Hide file tree
Showing 5 changed files with 212 additions and 111 deletions.
99 changes: 62 additions & 37 deletions mutiny-core/src/ldkstorage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ impl<S: MutinyStorage> MutinyNodePersister<S> {
let read_args = ChannelManagerReadArgs::new(
keys_manager.clone(),
keys_manager.clone(),
keys_manager.clone(),
keys_manager,
fee_estimator,
chain_monitor,
mutiny_chain,
Expand All @@ -199,48 +199,73 @@ impl<S: MutinyStorage> MutinyNodePersister<S> {
Err(_) => {
// no key manager stored, start a new one

// if regtest, we don't need to get the tip hash and can
// just use genesis, this also lets us use regtest in tests
let best_block = if network == Network::Regtest {
BestBlock::from_network(network)
} else {
let height_future = esplora
.get_height()
.map_err(|_| MutinyError::ChainAccessFailed);
let hash_future = esplora
.get_tip_hash()
.map_err(|_| MutinyError::ChainAccessFailed);
let (height, hash) = try_join!(height_future, hash_future)?;
BestBlock::new(hash, height)
};
let chain_params = ChainParameters {
Self::create_new_channel_manager(
network,
best_block,
};

let fresh_channel_manager: PhantomChannelManager<S> =
channelmanager::ChannelManager::new(
fee_estimator,
chain_monitor,
mutiny_chain,
router,
mutiny_logger,
keys_manager.clone(),
keys_manager.clone(),
keys_manager,
default_user_config(),
chain_params,
);

Ok(ReadChannelManager {
channel_manager: fresh_channel_manager,
is_restarting: false,
chain_monitor,
mutiny_chain,
fee_estimator,
mutiny_logger,
keys_manager,
router,
channel_monitors,
})
esplora,
)
.await
}
}
}

#[allow(clippy::too_many_arguments)]
pub(crate) async fn create_new_channel_manager(
network: Network,
chain_monitor: Arc<ChainMonitor<S>>,
mutiny_chain: Arc<MutinyChain<S>>,
fee_estimator: Arc<MutinyFeeEstimator<S>>,
mutiny_logger: Arc<MutinyLogger>,
keys_manager: Arc<PhantomKeysManager<S>>,
router: Arc<Router>,
channel_monitors: Vec<(BlockHash, ChannelMonitor<InMemorySigner>)>,
esplora: Arc<AsyncClient>,
) -> Result<ReadChannelManager<S>, MutinyError> {
// if regtest, we don't need to get the tip hash and can
// just use genesis, this also lets us use regtest in tests
let best_block = if network == Network::Regtest {
BestBlock::from_network(network)
} else {
let height_future = esplora
.get_height()
.map_err(|_| MutinyError::ChainAccessFailed);
let hash_future = esplora
.get_tip_hash()
.map_err(|_| MutinyError::ChainAccessFailed);
let (height, hash) = try_join!(height_future, hash_future)?;
BestBlock::new(hash, height)
};
let chain_params = ChainParameters {
network,
best_block,
};

let fresh_channel_manager: PhantomChannelManager<S> = channelmanager::ChannelManager::new(
fee_estimator,
chain_monitor,
mutiny_chain,
router,
mutiny_logger,
keys_manager.clone(),
keys_manager.clone(),
keys_manager,
default_user_config(),
chain_params,
);

Ok(ReadChannelManager {
channel_manager: fresh_channel_manager,
is_restarting: false,
channel_monitors,
})
}

pub(crate) fn persist_payment_info(
&self,
payment_hash: &PaymentHash,
Expand Down
104 changes: 66 additions & 38 deletions mutiny-core/src/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,7 @@ impl<S: MutinyStorage> Node<S> {
lsp_clients: &[LspClient],
logger: Arc<MutinyLogger>,
do_not_connect_peers: bool,
empty_state: bool,
#[cfg(target_arch = "wasm32")] websocket_proxy_addr: String,
) -> Result<Self, MutinyError> {
log_info!(logger, "initializing a new node: {uuid}");
Expand Down Expand Up @@ -203,11 +204,17 @@ impl<S: MutinyStorage> Node<S> {
));

// read channelmonitor state from disk
let channel_monitors = persister
.read_channel_monitors(keys_manager.clone())
.map_err(|e| MutinyError::ReadError {
source: MutinyStorageError::Other(anyhow!("failed to read channel monitors: {e}")),
})?;
let channel_monitors = if empty_state {
vec![]
} else {
persister
.read_channel_monitors(keys_manager.clone())
.map_err(|e| MutinyError::ReadError {
source: MutinyStorageError::Other(anyhow!(
"failed to read channel monitors: {e}"
)),
})?
};

let network_graph = gossip_sync.network_graph().clone();

Expand All @@ -219,8 +226,8 @@ impl<S: MutinyStorage> Node<S> {
));

// init channel manager
let mut read_channel_manager = persister
.read_channel_manager(
let mut read_channel_manager = if empty_state {
MutinyNodePersister::create_new_channel_manager(
network,
chain_monitor.clone(),
chain.clone(),
Expand All @@ -231,7 +238,22 @@ impl<S: MutinyStorage> Node<S> {
channel_monitors,
esplora,
)
.await?;
.await?
} else {
persister
.read_channel_manager(
network,
chain_monitor.clone(),
chain.clone(),
fee_estimator.clone(),
logger.clone(),
keys_manager.clone(),
router.clone(),
channel_monitors,
esplora,
)
.await?
};

let channel_manager: Arc<PhantomChannelManager<S>> =
Arc::new(read_channel_manager.channel_manager);
Expand Down Expand Up @@ -352,34 +374,36 @@ impl<S: MutinyStorage> Node<S> {
// processor so we prevent any race conditions.
// if we fail to read the spendable outputs, just log a warning and
// continue
let retry_spendable_outputs = persister
.get_failed_spendable_outputs()
.map_err(|e| MutinyError::ReadError {
source: MutinyStorageError::Other(anyhow!(
"failed to read retry spendable outputs: {e}"
)),
})
.unwrap_or_else(|e| {
log_warn!(logger, "Failed to read retry spendable outputs: {e}");
vec![]
});

if !retry_spendable_outputs.is_empty() {
log_info!(
logger,
"Retrying {} spendable outputs",
retry_spendable_outputs.len()
);

match event_handler
.handle_spendable_outputs(&retry_spendable_outputs)
.await
{
Ok(_) => {
log_info!(logger, "Successfully retried spendable outputs");
persister.clear_failed_spendable_outputs()?;
if !empty_state {
let retry_spendable_outputs = persister
.get_failed_spendable_outputs()
.map_err(|e| MutinyError::ReadError {
source: MutinyStorageError::Other(anyhow!(
"failed to read retry spendable outputs: {e}"
)),
})
.unwrap_or_else(|e| {
log_warn!(logger, "Failed to read retry spendable outputs: {e}");
vec![]
});

if !retry_spendable_outputs.is_empty() {
log_info!(
logger,
"Retrying {} spendable outputs",
retry_spendable_outputs.len()
);

match event_handler
.handle_spendable_outputs(&retry_spendable_outputs)
.await
{
Ok(_) => {
log_info!(logger, "Successfully retried spendable outputs");
persister.clear_failed_spendable_outputs()?;
}
Err(e) => log_warn!(logger, "Failed to retry spendable outputs {e}"),
}
Err(e) => log_warn!(logger, "Failed to retry spendable outputs {e}"),
}
}

Expand Down Expand Up @@ -1349,15 +1373,19 @@ impl<S: MutinyStorage> Node<S> {
self.await_chan_funding_tx(init, &pubkey, timeout).await
}

pub fn create_static_channel_backup(&self) -> StaticChannelBackup {
pub fn create_static_channel_backup(&self) -> Result<StaticChannelBackup, MutinyError> {
let mut monitors = HashMap::new();
for outpoint in self.chain_monitor.list_monitors() {
let monitor = self.chain_monitor.get_monitor(outpoint).unwrap();
let monitor = self
.chain_monitor
.get_monitor(outpoint)
.map_err(|_| MutinyError::Other(anyhow!("Failed to get channel monitor")))?;

let monitor_bytes = monitor.encode();
monitors.insert(outpoint.into_bitcoin_outpoint(), monitor_bytes);
}

StaticChannelBackup { monitors }
Ok(StaticChannelBackup { monitors })
}

pub async fn recover_from_static_channel_backup(
Expand Down
Loading

0 comments on commit c60c94f

Please sign in to comment.