diff --git a/Cargo.lock b/Cargo.lock index 9d4c36fd49..ff608d7186 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4781,6 +4781,7 @@ name = "tari_wallet" version = "0.10.1" dependencies = [ "aes-gcm 0.8.0", + "async-trait", "bincode", "blake2", "chrono", @@ -4820,7 +4821,7 @@ dependencies = [ [[package]] name = "tari_wallet_ffi" -version = "0.18.8" +version = "0.19.0" dependencies = [ "chrono", "env_logger 0.7.1", diff --git a/applications/daily_tests/cron_jobs.js b/applications/daily_tests/cron_jobs.js index 826897007b..80f44b992c 100644 --- a/applications/daily_tests/cron_jobs.js +++ b/applications/daily_tests/cron_jobs.js @@ -52,10 +52,10 @@ async function runWalletRecoveryTest(instances) { scannedRate, recoveredAmount, } = await walletRecoveryTest({ - seedWords: - "spare man patrol essay divide hollow trip visual actress sadness country hungry toy blouse body club depend capital sleep aim high recycle crystal abandon", - log: LOG_FILE, - numWallets: instances, + seedWords: + "spare man patrol essay divide hollow trip visual actress sadness country hungry toy blouse body club depend capital sleep aim high recycle crystal abandon", + log: LOG_FILE, + numWallets: instances, baseDir, }); diff --git a/applications/ffi_client/index.js b/applications/ffi_client/index.js index aa9473740b..0c1b750df5 100644 --- a/applications/ffi_client/index.js +++ b/applications/ffi_client/index.js @@ -195,9 +195,9 @@ try { let id = lib.wallet_start_transaction_validation(wallet, err); console.log("tx validation request id", id); - console.log("start utxo validation"); - id = lib.wallet_start_utxo_validation(wallet, err); - console.log("utxo validation request id", id); + console.log("start txo validation"); + id = lib.wallet_start_txo_validation(wallet, err); + console.log("txo validation request id", id); } catch (e) { console.error("validation error: ", e); } diff --git a/applications/ffi_client/lib/index.js b/applications/ffi_client/lib/index.js index db77ed0a1f..fb4d4ef50c 100644 --- a/applications/ffi_client/lib/index.js +++ b/applications/ffi_client/lib/index.js @@ -74,7 +74,7 @@ const libWallet = ffi.Library("./libtari_wallet_ffi.dylib", { wallet_get_num_confirmations_required: [u64, [walletRef, errPtr]], wallet_set_num_confirmations_required: ["void", [walletRef, u64, errPtr]], wallet_start_transaction_validation: [u64, [walletRef, errPtr]], - wallet_start_utxo_validation: [u64, [walletRef, errPtr]], + wallet_start_txo_validation: [u64, [walletRef, errPtr]], wallet_start_recovery: [bool, [walletRef, publicKeyRef, fn, errPtr]], }); diff --git a/applications/tari_app_utilities/src/utilities.rs b/applications/tari_app_utilities/src/utilities.rs index 23a1ebf9ab..cc682bf8bd 100644 --- a/applications/tari_app_utilities/src/utilities.rs +++ b/applications/tari_app_utilities/src/utilities.rs @@ -76,6 +76,8 @@ pub enum ExitCodes { NoPassword, #[error("Tor connection is offline")] TorOffline, + #[error("Database is in inconsistent state: {0}")] + DbInconsistentState(String), } impl ExitCodes { @@ -94,6 +96,29 @@ impl ExitCodes { Self::ConversionError(_) => 111, Self::IncorrectPassword | Self::NoPassword => 112, Self::TorOffline => 113, + Self::DbInconsistentState(_) => 115, + } + } + + pub fn eprint_details(&self) { + use ExitCodes::*; + match self { + TorOffline => { + eprintln!("Unable to connect to the Tor control port."); + eprintln!( + "Please check that you have the Tor proxy running and that access to the Tor control port is \ + turned on.", + ); + eprintln!("If you are unsure of what to do, use the following command to start the Tor proxy:"); + eprintln!( + "tor --allow-missing-torrc --ignore-missing-torrc --clientonly 1 --socksport 9050 --controlport \ + 127.0.0.1:9051 --log \"notice stdout\" --clientuseipv6 1", + ); + }, + + e => { + eprintln!("{}", e); + }, } } } diff --git a/applications/tari_base_node/src/main.rs b/applications/tari_base_node/src/main.rs index aa01c908fa..23a15bbf60 100644 --- a/applications/tari_base_node/src/main.rs +++ b/applications/tari_base_node/src/main.rs @@ -115,6 +115,7 @@ use tari_app_utilities::{ }; use tari_common::{configuration::bootstrap::ApplicationType, ConfigBootstrap, GlobalConfig}; use tari_comms::{peer_manager::PeerFeatures, tor::HiddenServiceControllerError}; +use tari_core::chain_storage::ChainStorageError; use tari_shutdown::{Shutdown, ShutdownSignal}; use tokio::{ runtime, @@ -128,7 +129,7 @@ const LOG_TARGET: &str = "base_node::app"; /// Application entry point fn main() { if let Err(exit_code) = main_inner() { - eprintln!("{:?}", exit_code); + exit_code.eprint_details(); error!( target: LOG_TARGET, "Exiting with code ({}): {:?}", @@ -205,21 +206,15 @@ async fn run_node(node_config: Arc, bootstrap: ConfigBootstrap) -> .await .map_err(|err| { for boxed_error in err.chain() { - if let Some(HiddenServiceControllerError::TorControlPortOffline) = - boxed_error.downcast_ref::() - { - println!("Unable to connect to the Tor control port."); - println!( - "Please check that you have the Tor proxy running and that access to the Tor control port is \ - turned on.", - ); - println!("If you are unsure of what to do, use the following command to start the Tor proxy:"); - println!( - "tor --allow-missing-torrc --ignore-missing-torrc --clientonly 1 --socksport 9050 --controlport \ - 127.0.0.1:9051 --log \"notice stdout\" --clientuseipv6 1", - ); + if let Some(HiddenServiceControllerError::TorControlPortOffline) = boxed_error.downcast_ref() { return ExitCodes::TorOffline; } + if let Some(ChainStorageError::DatabaseResyncRequired(reason)) = boxed_error.downcast_ref() { + return ExitCodes::DbInconsistentState(format!( + "You may need to resync your database because {}", + reason + )); + } // todo: find a better way to do this if boxed_error.to_string().contains("Invalid force sync peer") { diff --git a/applications/tari_console_wallet/Cargo.toml b/applications/tari_console_wallet/Cargo.toml index 703d6693e6..ced9e4ca02 100644 --- a/applications/tari_console_wallet/Cargo.toml +++ b/applications/tari_console_wallet/Cargo.toml @@ -56,4 +56,4 @@ default-features = false features = ["crossterm"] [features] -avx2 = [] \ No newline at end of file +avx2 = [] diff --git a/applications/tari_console_wallet/src/automation/commands.rs b/applications/tari_console_wallet/src/automation/commands.rs index 7bbdf8da44..c30dad8a7c 100644 --- a/applications/tari_console_wallet/src/automation/commands.rs +++ b/applications/tari_console_wallet/src/automation/commands.rs @@ -500,14 +500,21 @@ pub async fn monitor_transactions( } } }, - TransactionEvent::TransactionMinedUnconfirmed(id, confirmations) if tx_ids.contains(id) => { + TransactionEvent::TransactionMinedUnconfirmed { + tx_id, + num_confirmations, + is_valid, + } if tx_ids.contains(tx_id) => { debug!( target: LOG_TARGET, - "tx mined unconfirmed event for tx_id: {}, confirmations: {}", *id, confirmations + "tx mined unconfirmed event for tx_id: {}, confirmations: {}, is_valid: {}", + *tx_id, + num_confirmations, + is_valid ); if wait_stage == TransactionStage::MinedUnconfirmed { results.push(SentTransaction { - id: *id, + id: *tx_id, stage: TransactionStage::MinedUnconfirmed, }); if results.len() == tx_ids.len() { @@ -515,11 +522,14 @@ pub async fn monitor_transactions( } } }, - TransactionEvent::TransactionMined(id) if tx_ids.contains(id) => { - debug!(target: LOG_TARGET, "tx mined confirmed event for tx_id: {}", *id); + TransactionEvent::TransactionMined { tx_id, is_valid } if tx_ids.contains(tx_id) => { + debug!( + target: LOG_TARGET, + "tx mined confirmed event for tx_id: {}, is_valid:{}", *tx_id, is_valid + ); if wait_stage == TransactionStage::Mined { results.push(SentTransaction { - id: *id, + id: *tx_id, stage: TransactionStage::Mined, }); if results.len() == tx_ids.len() { diff --git a/applications/tari_console_wallet/src/init/mod.rs b/applications/tari_console_wallet/src/init/mod.rs index a38a5ead41..e99289be5d 100644 --- a/applications/tari_console_wallet/src/init/mod.rs +++ b/applications/tari_console_wallet/src/init/mod.rs @@ -47,13 +47,9 @@ use tari_shutdown::ShutdownSignal; use tari_wallet::{ base_node_service::config::BaseNodeServiceConfig, error::{WalletError, WalletStorageError}, - output_manager_service::{config::OutputManagerServiceConfig, TxoValidationType}, + output_manager_service::config::OutputManagerServiceConfig, storage::{database::WalletDatabase, sqlite_utilities::initialize_sqlite_database_backends}, - transaction_service::{ - config::{TransactionRoutingMechanism, TransactionServiceConfig}, - tasks::start_transaction_validation_and_broadcast_protocols::start_transaction_validation_and_broadcast_protocols, - }, - types::ValidationRetryStrategy, + transaction_service::config::{TransactionRoutingMechanism, TransactionServiceConfig}, Wallet, WalletConfig, WalletSqlite, @@ -390,7 +386,7 @@ pub async fn init_wallet( base_node_query_timeout: config.base_node_query_timeout, prevent_fee_gt_amount: config.prevent_fee_gt_amount, event_channel_size: config.output_manager_event_channel_size, - base_node_update_publisher_channel_size: config.base_node_update_publisher_channel_size, + num_confirmations_required: config.transaction_num_confirmations_required, ..Default::default() }), config.network.into(), @@ -500,12 +496,7 @@ pub async fn start_wallet( if let Err(e) = wallet.transaction_service.restart_transaction_protocols().await { error!(target: LOG_TARGET, "Problem restarting transaction protocols: {}", e); } - if let Err(e) = start_transaction_validation_and_broadcast_protocols( - wallet.transaction_service.clone(), - ValidationRetryStrategy::UntilSuccess, - ) - .await - { + if let Err(e) = wallet.transaction_service.validate_transactions().await { error!( target: LOG_TARGET, "Problem validating and restarting transaction protocols: {}", e @@ -521,37 +512,12 @@ pub async fn start_wallet( async fn validate_txos(wallet: &mut WalletSqlite) -> Result<(), ExitCodes> { debug!(target: LOG_TARGET, "Starting TXO validations."); - // Unspent TXOs - wallet - .output_manager_service - .validate_txos(TxoValidationType::Unspent, ValidationRetryStrategy::UntilSuccess) - .await - .map_err(|e| { - error!(target: LOG_TARGET, "Error validating Unspent TXOs: {}", e); - ExitCodes::WalletError(e.to_string()) - })?; - - // Spent TXOs - wallet - .output_manager_service - .validate_txos(TxoValidationType::Spent, ValidationRetryStrategy::UntilSuccess) - .await - .map_err(|e| { - error!(target: LOG_TARGET, "Error validating Spent TXOs: {}", e); - ExitCodes::WalletError(e.to_string()) - })?; - - // Invalid TXOs - wallet - .output_manager_service - .validate_txos(TxoValidationType::Invalid, ValidationRetryStrategy::UntilSuccess) - .await - .map_err(|e| { - error!(target: LOG_TARGET, "Error validating Invalid TXOs: {}", e); - ExitCodes::WalletError(e.to_string()) - })?; + wallet.output_manager_service.validate_txos().await.map_err(|e| { + error!(target: LOG_TARGET, "Error validating Unspent TXOs: {}", e); + ExitCodes::WalletError(e.to_string()) + })?; - debug!(target: LOG_TARGET, "TXO validations completed."); + debug!(target: LOG_TARGET, "TXO validations started."); Ok(()) } diff --git a/applications/tari_console_wallet/src/ui/components/base_node.rs b/applications/tari_console_wallet/src/ui/components/base_node.rs index ade233b90c..cd5cb0a56f 100644 --- a/applications/tari_console_wallet/src/ui/components/base_node.rs +++ b/applications/tari_console_wallet/src/ui/components/base_node.rs @@ -21,7 +21,7 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::ui::{components::Component, state::AppState}; -use tari_wallet::connectivity_service::OnlineStatus; +use tari_wallet::connectivity_service::{OnlineStatus, WalletConnectivityInterface}; use tui::{ backend::Backend, layout::Rect, diff --git a/applications/tari_console_wallet/src/ui/components/transactions_tab.rs b/applications/tari_console_wallet/src/ui/components/transactions_tab.rs index f50dc0b7b9..1baf29065f 100644 --- a/applications/tari_console_wallet/src/ui/components/transactions_tab.rs +++ b/applications/tari_console_wallet/src/ui/components/transactions_tab.rs @@ -208,7 +208,7 @@ impl TransactionsTab { format!("{}", local_time.format("%Y-%m-%d %H:%M:%S")), Style::default().fg(text_color), ))); - let status = if t.cancelled && t.status == TransactionStatus::Coinbase { + let status = if (t.cancelled || !t.valid) && t.status == TransactionStatus::Coinbase { "Abandoned".to_string() } else if t.cancelled { "Cancelled".to_string() diff --git a/applications/tari_console_wallet/src/ui/state/app_state.rs b/applications/tari_console_wallet/src/ui/state/app_state.rs index 449ea4ffd6..e3a61b10db 100644 --- a/applications/tari_console_wallet/src/ui/state/app_state.rs +++ b/applications/tari_console_wallet/src/ui/state/app_state.rs @@ -52,12 +52,11 @@ use tari_wallet::{ base_node_service::{handle::BaseNodeEventReceiver, service::BaseNodeState}, connectivity_service::WalletConnectivityHandle, contacts_service::storage::database::Contact, - output_manager_service::{handle::OutputManagerEventReceiver, service::Balance, TxId, TxoValidationType}, + output_manager_service::{handle::OutputManagerEventReceiver, service::Balance, TxId}, transaction_service::{ handle::TransactionEventReceiver, storage::models::{CompletedTransaction, TransactionStatus}, }, - types::ValidationRetryStrategy, WalletSqlite, }; @@ -351,7 +350,7 @@ impl AppState { self.cached_data .completed_txs .iter() - .filter(|tx| !(tx.cancelled && tx.status == TransactionStatus::Coinbase)) + .filter(|tx| !((tx.cancelled || !tx.valid) && tx.status == TransactionStatus::Coinbase)) .collect() } else { self.cached_data.completed_txs.iter().collect() @@ -815,33 +814,13 @@ impl AppStateInner { let mut output_manager_service = self.wallet.output_manager_service.clone(); task::spawn(async move { - if let Err(e) = txn_service - .validate_transactions(ValidationRetryStrategy::UntilSuccess) - .await - { + if let Err(e) = txn_service.validate_transactions().await { error!(target: LOG_TARGET, "Problem validating transactions: {}", e); } - if let Err(e) = output_manager_service - .validate_txos(TxoValidationType::Unspent, ValidationRetryStrategy::UntilSuccess) - .await - { + if let Err(e) = output_manager_service.validate_txos().await { error!(target: LOG_TARGET, "Problem validating UTXOs: {}", e); } - - if let Err(e) = output_manager_service - .validate_txos(TxoValidationType::Spent, ValidationRetryStrategy::UntilSuccess) - .await - { - error!(target: LOG_TARGET, "Problem validating STXOs: {}", e); - } - - if let Err(e) = output_manager_service - .validate_txos(TxoValidationType::Invalid, ValidationRetryStrategy::UntilSuccess) - .await - { - error!(target: LOG_TARGET, "Problem validating Invalid TXOs: {}", e); - } }); } diff --git a/applications/tari_console_wallet/src/ui/state/wallet_event_monitor.rs b/applications/tari_console_wallet/src/ui/state/wallet_event_monitor.rs index 48ba73cd4a..ed42ceaeb0 100644 --- a/applications/tari_console_wallet/src/ui/state/wallet_event_monitor.rs +++ b/applications/tari_console_wallet/src/ui/state/wallet_event_monitor.rs @@ -26,6 +26,7 @@ use std::sync::Arc; use tari_comms::{connectivity::ConnectivityEvent, peer_manager::Peer}; use tari_wallet::{ base_node_service::{handle::BaseNodeEvent, service::BaseNodeState}, + connectivity_service::WalletConnectivityInterface, output_manager_service::{handle::OutputManagerEvent, TxId}, transaction_service::handle::TransactionEvent, }; @@ -62,6 +63,7 @@ impl WalletEventMonitor { let mut connectivity_events = self.app_state_inner.read().await.get_connectivity_event_stream(); let wallet_connectivity = self.app_state_inner.read().await.get_wallet_connectivity(); let mut connectivity_status = wallet_connectivity.get_connectivity_status_watch(); + let mut base_node_changed = wallet_connectivity.get_current_base_node_watcher(); let mut base_node_events = self.app_state_inner.read().await.get_base_node_event_stream(); let mut software_update_notif = self @@ -85,13 +87,13 @@ impl WalletEventMonitor { self.trigger_balance_refresh(); notifier.transaction_received(tx_id); }, - TransactionEvent::TransactionMinedUnconfirmed(tx_id, confirmations) => { - self.trigger_confirmations_refresh(tx_id, confirmations).await; + TransactionEvent::TransactionMinedUnconfirmed{tx_id, num_confirmations, is_valid: _} => { + self.trigger_confirmations_refresh(tx_id, num_confirmations).await; self.trigger_tx_state_refresh(tx_id).await; self.trigger_balance_refresh(); - notifier.transaction_mined_unconfirmed(tx_id, confirmations); + notifier.transaction_mined_unconfirmed(tx_id, num_confirmations); }, - TransactionEvent::TransactionMined(tx_id) => { + TransactionEvent::TransactionMined{tx_id, is_valid: _} => { self.trigger_confirmations_cleanup(tx_id).await; self.trigger_tx_state_refresh(tx_id).await; self.trigger_balance_refresh(); @@ -166,6 +168,13 @@ impl WalletEventMonitor { Err(broadcast::error::RecvError::Closed) => {} } }, + _ = base_node_changed.changed() => { + let peer = base_node_changed.borrow().as_ref().cloned(); + if let Some(peer) = peer { + self.trigger_base_node_peer_refresh(peer).await; + self.trigger_balance_refresh(); + } + } result = base_node_events.recv() => { match result { Ok(msg) => { @@ -174,10 +183,6 @@ impl WalletEventMonitor { BaseNodeEvent::BaseNodeStateChanged(state) => { self.trigger_base_node_state_refresh(state).await; } - BaseNodeEvent::BaseNodePeerSet(peer) => { - self.trigger_base_node_peer_refresh(*peer).await; - self.trigger_balance_refresh(); - } } }, Err(broadcast::error::RecvError::Lagged(n)) => { @@ -190,7 +195,7 @@ impl WalletEventMonitor { match result { Ok(msg) => { trace!(target: LOG_TARGET, "Output Manager Service Callback Handler event {:?}", msg); - if let OutputManagerEvent::TxoValidationSuccess(_,_) = &*msg { + if let OutputManagerEvent::TxoValidationSuccess(_) = &*msg { self.trigger_balance_refresh(); } }, diff --git a/applications/tari_console_wallet/src/wallet_modes.rs b/applications/tari_console_wallet/src/wallet_modes.rs index 23b1ee6330..0fbc183cbb 100644 --- a/applications/tari_console_wallet/src/wallet_modes.rs +++ b/applications/tari_console_wallet/src/wallet_modes.rs @@ -223,7 +223,7 @@ pub fn tui_mode(config: WalletModeConfig, mut wallet: WalletSqlite) -> Result<() base_node_config.base_node_custom = base_node_custom.clone(); if let Some(peer) = base_node_custom { base_node_selected = peer; - } else if let Some(peer) = handle.block_on(wallet.get_base_node_peer())? { + } else if let Some(peer) = handle.block_on(wallet.get_base_node_peer()) { base_node_selected = peer; } diff --git a/base_layer/core/src/base_node/proto/wallet_rpc.proto b/base_layer/core/src/base_node/proto/wallet_rpc.proto index d67750f9b5..96a1613df8 100644 --- a/base_layer/core/src/base_node/proto/wallet_rpc.proto +++ b/base_layer/core/src/base_node/proto/wallet_rpc.proto @@ -43,11 +43,14 @@ message TxQueryBatchResponse { TxLocation location = 2; google.protobuf.BytesValue block_hash = 3; uint64 confirmations = 4; + uint64 block_height = 5; } message TxQueryBatchResponses { repeated TxQueryBatchResponse responses = 1; bool is_synced = 2; + google.protobuf.BytesValue tip_hash = 3; + uint64 height_of_longest_chain = 4; } @@ -60,6 +63,41 @@ message FetchUtxosResponse { bool is_synced = 2; } + +message QueryDeletedRequest{ + repeated uint64 mmr_positions = 1; + google.protobuf.BytesValue chain_must_include_header = 2; + bool include_deleted_block_data = 3; +} + +message QueryDeletedResponse { + repeated uint64 deleted_positions = 1; + repeated uint64 not_deleted_positions = 2; + bytes best_block = 3; + uint64 height_of_longest_chain = 4; + repeated bytes blocks_deleted_in = 5; + repeated uint64 heights_deleted_at = 6; +} + +message UtxoQueryRequest{ + repeated bytes output_hashes =1; +} + +message UtxoQueryResponses { + repeated UtxoQueryResponse responses =1; + bytes best_block = 3; + uint64 height_of_longest_chain = 4; +} + +message UtxoQueryResponse { + tari.types.TransactionOutput output = 1; + uint64 mmr_position = 2; + uint64 mined_height =3; + bytes mined_in_block = 4; + bytes output_hash = 5; + +} + message TipInfoResponse { ChainMetadata metadata = 1; bool is_synced = 2; diff --git a/base_layer/core/src/base_node/proto/wallet_rpc.rs b/base_layer/core/src/base_node/proto/wallet_rpc.rs index 60b7c82e37..6daaa9e736 100644 --- a/base_layer/core/src/base_node/proto/wallet_rpc.rs +++ b/base_layer/core/src/base_node/proto/wallet_rpc.rs @@ -133,6 +133,7 @@ pub struct TxQueryBatchResponse { pub location: TxLocation, pub block_hash: Option, pub confirmations: u64, + pub block_height: u64, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] @@ -223,6 +224,7 @@ impl TryFrom for TxQueryBatchResponse { .ok_or_else(|| "Invalid or unrecognised `TxLocation` enum".to_string())?, )?, block_hash: proto_response.block_hash, + block_height: proto_response.block_height, confirmations: proto_response.confirmations, }) } diff --git a/base_layer/core/src/base_node/rpc/mod.rs b/base_layer/core/src/base_node/rpc/mod.rs index 44cdefdbc3..d0a54d4320 100644 --- a/base_layer/core/src/base_node/rpc/mod.rs +++ b/base_layer/core/src/base_node/rpc/mod.rs @@ -48,6 +48,7 @@ use crate::{ }, }; +use crate::proto::base_node::{QueryDeletedRequest, QueryDeletedResponse, UtxoQueryRequest, UtxoQueryResponses}; use tari_comms::protocol::rpc::{Request, Response, RpcStatus}; use tari_comms_rpc_macros::tari_rpc; @@ -79,6 +80,21 @@ pub trait BaseNodeWalletService: Send + Sync + 'static { #[rpc(method = 6)] async fn get_header(&self, request: Request) -> Result, RpcStatus>; + + #[rpc(method = 7)] + async fn utxo_query(&self, request: Request) -> Result, RpcStatus>; + + #[rpc(method = 8)] + async fn query_deleted( + &self, + request: Request, + ) -> Result, RpcStatus>; + + #[rpc(method = 9)] + async fn get_header_by_height( + &self, + request: Request, + ) -> Result, RpcStatus>; } #[cfg(feature = "base_node")] diff --git a/base_layer/core/src/base_node/rpc/service.rs b/base_layer/core/src/base_node/rpc/service.rs index dbd1b141e4..7edca0efa9 100644 --- a/base_layer/core/src/base_node/rpc/service.rs +++ b/base_layer/core/src/base_node/rpc/service.rs @@ -19,16 +19,17 @@ // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - use crate::{ base_node::{rpc::BaseNodeWalletService, state_machine_service::states::StateInfo, StateMachineHandle}, - chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, PrunedOutput}, + chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, PrunedOutput, UtxoMinedInfo}, mempool::{service::MempoolHandle, TxStorageResponse}, proto, proto::{ base_node::{ FetchMatchingUtxos, FetchUtxosResponse, + QueryDeletedRequest, + QueryDeletedResponse, Signatures as SignaturesProto, TipInfoResponse, TxLocation, @@ -37,6 +38,9 @@ use crate::{ TxQueryResponse, TxSubmissionRejectionReason, TxSubmissionResponse, + UtxoQueryRequest, + UtxoQueryResponse, + UtxoQueryResponses, }, types::{Signature as SignatureProto, Transaction as TransactionProto}, }, @@ -261,6 +265,12 @@ impl BaseNodeWalletService for BaseNodeWalletRpc let mut responses: Vec = Vec::new(); + let metadata = self + .db + .get_chain_metadata() + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))?; + for sig in message.sigs { let signature = Signature::try_from(sig).map_err(|_| RpcStatus::bad_request("Signature was invalid"))?; let response: TxQueryResponse = self.fetch_kernel(signature.clone()).await?; @@ -269,9 +279,15 @@ impl BaseNodeWalletService for BaseNodeWalletRpc location: response.location, block_hash: response.block_hash, confirmations: response.confirmations, + block_height: response.height_of_longest_chain - response.confirmations, }); } - Ok(Response::new(TxQueryBatchResponses { responses, is_synced })) + Ok(Response::new(TxQueryBatchResponses { + responses, + is_synced, + tip_hash: Some(metadata.best_block().clone()), + height_of_longest_chain: metadata.height_of_longest_chain(), + })) } async fn fetch_matching_utxos( @@ -290,13 +306,13 @@ impl BaseNodeWalletService for BaseNodeWalletRpc let db = self.db(); let mut res = Vec::with_capacity(message.output_hashes.len()); - for (pruned_output, spent) in (db + let utxos = db .fetch_utxos(message.output_hashes) .await - .map_err(RpcStatus::log_internal_error(LOG_TARGET))?) - .into_iter() - .flatten() - { + .map_err(RpcStatus::log_internal_error(LOG_TARGET))? + .into_iter() + .flatten(); + for (pruned_output, spent) in utxos { if let PrunedOutput::NotPruned { output } = pruned_output { if !spent { res.push(output); @@ -310,6 +326,130 @@ impl BaseNodeWalletService for BaseNodeWalletRpc })) } + async fn utxo_query(&self, request: Request) -> Result, RpcStatus> { + let message = request.into_message(); + let db = self.db(); + let mut res = Vec::with_capacity(message.output_hashes.len()); + for UtxoMinedInfo { + output, + mmr_position, + mined_height: height, + header_hash, + } in (db + .fetch_utxos_and_mined_info(message.output_hashes) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))?) + .into_iter() + .flatten() + { + res.push((output, mmr_position, height, header_hash)); + } + + let metadata = self + .db + .get_chain_metadata() + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))?; + + Ok(Response::new(UtxoQueryResponses { + height_of_longest_chain: metadata.height_of_longest_chain(), + best_block: metadata.best_block().clone(), + responses: res + .into_iter() + .map( + |(output, mmr_position, mined_height, mined_in_block)| UtxoQueryResponse { + mmr_position: mmr_position.into(), + mined_height, + mined_in_block, + output_hash: output.hash(), + output: match output { + PrunedOutput::Pruned { .. } => None, + PrunedOutput::NotPruned { output } => Some(output.into()), + }, + }, + ) + .collect(), + })) + } + + /// Currently the wallet cannot use the deleted bitmap because it can't compile croaring + /// at some point in the future, it might be better to send the wallet the actual bitmap so + /// it can check itself + async fn query_deleted( + &self, + request: Request, + ) -> Result, RpcStatus> { + let message = request.into_message(); + + if let Some(chain_must_include_header) = message.chain_must_include_header { + if self + .db + .fetch_header_by_block_hash(chain_must_include_header) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))? + .is_none() + { + return Err(RpcStatus::not_found( + "Chain does not include header. It might have been reorged out", + )); + } + } + + let deleted_bitmap = self + .db + .fetch_deleted_bitmap_at_tip() + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))?; + + let mut deleted_positions = vec![]; + let mut not_deleted_positions = vec![]; + + for position in message.mmr_positions { + if position > u32::MAX as u64 { + // TODO: in future, bitmap may support higher than u32 + return Err(RpcStatus::bad_request("position must fit into a u32")); + } + let position = position as u32; + if deleted_bitmap.bitmap().contains(position) { + deleted_positions.push(position); + } else { + not_deleted_positions.push(position); + } + } + + let mut blocks_deleted_in = Vec::new(); + let mut heights_deleted_at = Vec::new(); + if message.include_deleted_block_data { + let headers = self + .db + .fetch_header_hash_by_deleted_mmr_positions(deleted_positions.clone()) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))?; + + heights_deleted_at.reserve(headers.len()); + blocks_deleted_in.reserve(headers.len()); + for (height, hash) in headers.into_iter().flatten() { + heights_deleted_at.push(height); + blocks_deleted_in.push(hash); + } + } + + let metadata = self + .db + .get_chain_metadata() + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))?; + + Ok(Response::new(QueryDeletedResponse { + height_of_longest_chain: metadata.height_of_longest_chain(), + best_block: metadata.best_block().clone(), + deleted_positions: deleted_positions.into_iter().map(|v| v as u64).collect(), + not_deleted_positions: not_deleted_positions.into_iter().map(|v| v as u64).collect(), + blocks_deleted_in, + heights_deleted_at, + })) + } + async fn get_tip_info(&self, _request: Request<()>) -> Result, RpcStatus> { let state_machine = self.state_machine(); let status_watch = state_machine.get_status_info_watch(); @@ -341,4 +481,19 @@ impl BaseNodeWalletService for BaseNodeWalletRpc Ok(Response::new(header.into())) } + + async fn get_header_by_height( + &self, + request: Request, + ) -> Result, RpcStatus> { + let height = request.into_message(); + let header = self + .db() + .fetch_header(height) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))? + .ok_or_else(|| RpcStatus::not_found(format!("Header not found at height {}", height)))?; + + Ok(Response::new(header.into())) + } } diff --git a/base_layer/core/src/chain_storage/accumulated_data.rs b/base_layer/core/src/chain_storage/accumulated_data.rs index eb6d50e499..5c050290d4 100644 --- a/base_layer/core/src/chain_storage/accumulated_data.rs +++ b/base_layer/core/src/chain_storage/accumulated_data.rs @@ -227,6 +227,10 @@ impl CompleteDeletedBitmap { pub fn dissolve(self) -> (Bitmap, u64, HashOutput) { (self.deleted, self.height, self.hash) } + + pub fn into_bytes(self) -> Vec { + self.deleted.serialize() + } } pub struct BlockHeaderAccumulatedDataBuilder<'a> { diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index f95d44489c..7ec8c153e2 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -25,6 +25,7 @@ use crate::{ chain_storage::{ accumulated_data::BlockHeaderAccumulatedData, blockchain_database::MmrRoots, + utxo_mined_info::UtxoMinedInfo, BlockAccumulatedData, BlockAddResult, BlockchainBackend, @@ -36,6 +37,7 @@ use crate::{ DbBasicStats, DbTotalSizeStats, DbTransaction, + DeletedBitmap, HistoricalBlock, HorizonData, MmrTree, @@ -146,6 +148,8 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_utxos(hashes: Vec) -> Vec>, "fetch_utxos"); + make_async_fn!(fetch_utxos_and_mined_info(hashes: Vec) -> Vec>, "fetch_utxos_and_mined_info"); + make_async_fn!(fetch_utxos_by_mmr_position(start: u64, end: u64, deleted: Arc) -> (Vec, Bitmap), "fetch_utxos_by_mmr_position"); //---------------------------------- Kernel --------------------------------------------// @@ -232,6 +236,10 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_complete_deleted_bitmap_at(hash: HashOutput) -> CompleteDeletedBitmap, "fetch_deleted_bitmap"); + make_async_fn!(fetch_deleted_bitmap_at_tip() -> DeletedBitmap, "fetch_deleted_bitmap_at_tip"); + + make_async_fn!(fetch_header_hash_by_deleted_mmr_positions(mmr_positions: Vec) -> Vec>, "fetch_headers_of_deleted_positions"); + make_async_fn!(get_stats() -> DbBasicStats, "get_stats"); make_async_fn!(fetch_total_size_stats() -> DbTotalSizeStats, "fetch_total_size_stats"); diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index c1f57c09d3..86794ab4d3 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -3,6 +3,7 @@ use crate::{ chain_storage::{ accumulated_data::DeletedBitmap, pruned_output::PrunedOutput, + utxo_mined_info::UtxoMinedInfo, BlockAccumulatedData, BlockHeaderAccumulatedData, ChainBlock, @@ -105,7 +106,7 @@ pub trait BlockchainBackend: Send + Sync { ) -> Result<(Vec, Bitmap), ChainStorageError>; /// Fetch a specific output. Returns the output and the leaf index in the output MMR - fn fetch_output(&self, output_hash: &HashOutput) -> Result, ChainStorageError>; + fn fetch_output(&self, output_hash: &HashOutput) -> Result, ChainStorageError>; /// Returns the unspent TransactionOutput output that matches the given commitment if it exists in the current UTXO /// set, otherwise None is returned. @@ -167,4 +168,10 @@ pub trait BlockchainBackend: Send + Sync { /// Returns total size information about each internal database. This call may be very slow and will obtain a read /// lock for the duration. fn fetch_total_size_stats(&self) -> Result; + + /// Returns a (block height/hash) tuple for each mmr position of the height it was spent, or None if it is not spent + fn fetch_header_hash_by_deleted_mmr_positions( + &self, + mmr_positions: Vec, + ) -> Result>, ChainStorageError>; } diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 652f57e371..9fdaad9851 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -31,12 +31,14 @@ use crate::{ db_transaction::{DbKey, DbTransaction, DbValue}, error::ChainStorageError, pruned_output::PrunedOutput, + utxo_mined_info::UtxoMinedInfo, BlockAddResult, BlockchainBackend, ChainBlock, ChainHeader, DbBasicStats, DbTotalSizeStats, + DeletedBitmap, HistoricalBlock, HorizonData, MmrTree, @@ -293,7 +295,7 @@ where B: BlockchainBackend // Fetch the utxo pub fn fetch_utxo(&self, hash: HashOutput) -> Result, ChainStorageError> { let db = self.db_read_access()?; - Ok(db.fetch_output(&hash)?.map(|(out, _index, _)| out)) + Ok(db.fetch_output(&hash)?.map(|mined_info| mined_info.output)) } pub fn fetch_unspent_output_by_commitment( @@ -314,7 +316,22 @@ where B: BlockchainBackend let mut result = Vec::with_capacity(hashes.len()); for hash in hashes { let output = db.fetch_output(&hash)?; - result.push(output.map(|(out, mmr_index, _)| (out, deleted.bitmap().contains(mmr_index)))); + result + .push(output.map(|mined_info| (mined_info.output, deleted.bitmap().contains(mined_info.mmr_position)))); + } + Ok(result) + } + + pub fn fetch_utxos_and_mined_info( + &self, + hashes: Vec, + ) -> Result>, ChainStorageError> { + let db = self.db_read_access()?; + + let mut result = Vec::with_capacity(hashes.len()); + for hash in hashes { + let output = db.fetch_output(&hash)?; + result.push(output); } Ok(result) } @@ -929,6 +946,23 @@ where B: BlockchainBackend )) } + pub fn fetch_deleted_bitmap_at_tip(&self) -> Result { + let db = self.db_read_access()?; + db.fetch_deleted_bitmap() + } + + pub fn fetch_header_hash_by_deleted_mmr_positions( + &self, + mmr_positions: Vec, + ) -> Result>, ChainStorageError> { + if mmr_positions.is_empty() { + return Ok(Vec::new()); + } + + let db = self.db_read_access()?; + db.fetch_header_hash_by_deleted_mmr_positions(mmr_positions) + } + pub fn get_stats(&self) -> Result { let lock = self.db_read_access()?; lock.get_stats() @@ -1307,19 +1341,10 @@ fn fetch_block_with_utxo( db: &T, commitment: Commitment, ) -> Result, ChainStorageError> { - match db.fetch_output(&commitment.to_vec()) { - Ok(output) => match output { - Some((_output, leaf, _height)) => { - let header = db.fetch_header_containing_utxo_mmr(leaf as u64)?; - fetch_block_by_hash(db, header.hash().to_owned()) - }, - None => Ok(None), - }, - Err(_) => Err(ChainStorageError::ValueNotFound { - entity: "Output", - field: "Commitment", - value: commitment.to_hex(), - }), + let output = db.fetch_output(&commitment.to_vec())?; + match output { + Some(mined_info) => fetch_block_by_hash(db, mined_info.header_hash), + None => Ok(None), } } diff --git a/base_layer/core/src/chain_storage/error.rs b/base_layer/core/src/chain_storage/error.rs index e6182a570b..ac9208025e 100644 --- a/base_layer/core/src/chain_storage/error.rs +++ b/base_layer/core/src/chain_storage/error.rs @@ -114,6 +114,8 @@ pub enum ChainStorageError { DbResizeRequired, #[error("DB transaction was too large ({0} operations)")] DbTransactionTooLarge(usize), + #[error("DB needs to be resynced: {0}")] + DatabaseResyncRequired(&'static str), } impl ChainStorageError { diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index c30b97e684..f58eea66be 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -51,27 +51,9 @@ use crate::{ TransactionInputRowData, TransactionKernelRowData, TransactionOutputRowData, - LMDB_DB_BLOCK_ACCUMULATED_DATA, - LMDB_DB_BLOCK_HASHES, - LMDB_DB_HEADERS, - LMDB_DB_HEADER_ACCUMULATED_DATA, - LMDB_DB_INPUTS, - LMDB_DB_KERNELS, - LMDB_DB_KERNEL_EXCESS_INDEX, - LMDB_DB_KERNEL_EXCESS_SIG_INDEX, - LMDB_DB_KERNEL_MMR_SIZE_INDEX, - LMDB_DB_METADATA, - LMDB_DB_MONERO_SEED_HEIGHT, - LMDB_DB_ORPHANS, - LMDB_DB_ORPHAN_CHAIN_TIPS, - LMDB_DB_ORPHAN_HEADER_ACCUMULATED_DATA, - LMDB_DB_ORPHAN_PARENT_MAP_INDEX, - LMDB_DB_TXOS_HASH_TO_INDEX, - LMDB_DB_UTXOS, - LMDB_DB_UTXO_COMMITMENT_INDEX, - LMDB_DB_UTXO_MMR_SIZE_INDEX, }, stats::DbTotalSizeStats, + utxo_mined_info::UtxoMinedInfo, BlockchainBackend, ChainBlock, ChainHeader, @@ -105,22 +87,60 @@ type DatabaseRef = Arc>; pub const LOG_TARGET: &str = "c::cs::lmdb_db::lmdb_db"; -struct OutputKey<'a> { - header_hash: &'a [u8], - mmr_position: u32, -} +const LMDB_DB_METADATA: &str = "metadata"; +const LMDB_DB_HEADERS: &str = "headers"; +const LMDB_DB_HEADER_ACCUMULATED_DATA: &str = "header_accumulated_data"; +const LMDB_DB_BLOCK_ACCUMULATED_DATA: &str = "mmr_peak_data"; +const LMDB_DB_BLOCK_HASHES: &str = "block_hashes"; +const LMDB_DB_UTXOS: &str = "utxos"; +const LMDB_DB_INPUTS: &str = "inputs"; +const LMDB_DB_TXOS_HASH_TO_INDEX: &str = "txos_hash_to_index"; +const LMDB_DB_KERNELS: &str = "kernels"; +const LMDB_DB_KERNEL_EXCESS_INDEX: &str = "kernel_excess_index"; +const LMDB_DB_KERNEL_EXCESS_SIG_INDEX: &str = "kernel_excess_sig_index"; +const LMDB_DB_KERNEL_MMR_SIZE_INDEX: &str = "kernel_mmr_size_index"; +const LMDB_DB_UTXO_MMR_SIZE_INDEX: &str = "utxo_mmr_size_index"; +const LMDB_DB_DELETED_TXO_MMR_POSITION_TO_HEIGHT_INDEX: &str = "deleted_txo_mmr_position_to_height_index"; +const LMDB_DB_UTXO_COMMITMENT_INDEX: &str = "utxo_commitment_index"; +const LMDB_DB_ORPHANS: &str = "orphans"; +const LMDB_DB_MONERO_SEED_HEIGHT: &str = "monero_seed_height"; +const LMDB_DB_ORPHAN_HEADER_ACCUMULATED_DATA: &str = "orphan_accumulated_data"; +const LMDB_DB_ORPHAN_CHAIN_TIPS: &str = "orphan_chain_tips"; +const LMDB_DB_ORPHAN_PARENT_MAP_INDEX: &str = "orphan_parent_map_index"; -impl<'a> OutputKey<'a> { - pub fn new(header_hash: &'a [u8], mmr_position: u32) -> OutputKey { - OutputKey { - header_hash, - mmr_position, - } - } +pub fn create_lmdb_database>(path: P, config: LMDBConfig) -> Result { + let flags = db::CREATE; + let _ = std::fs::create_dir_all(&path); - pub fn get_key(&self) -> String { - format!("{}-{:010}", to_hex(self.header_hash), self.mmr_position) - } + let file_lock = acquire_exclusive_file_lock(&path.as_ref().to_path_buf())?; + + let lmdb_store = LMDBBuilder::new() + .set_path(path) + .set_env_config(config) + .set_max_number_of_databases(20) + .add_database(LMDB_DB_METADATA, flags | db::INTEGERKEY) + .add_database(LMDB_DB_HEADERS, flags | db::INTEGERKEY) + .add_database(LMDB_DB_HEADER_ACCUMULATED_DATA, flags | db::INTEGERKEY) + .add_database(LMDB_DB_BLOCK_ACCUMULATED_DATA, flags | db::INTEGERKEY) + .add_database(LMDB_DB_BLOCK_HASHES, flags) + .add_database(LMDB_DB_UTXOS, flags) + .add_database(LMDB_DB_INPUTS, flags) + .add_database(LMDB_DB_TXOS_HASH_TO_INDEX, flags) + .add_database(LMDB_DB_KERNELS, flags) + .add_database(LMDB_DB_KERNEL_EXCESS_INDEX, flags) + .add_database(LMDB_DB_KERNEL_EXCESS_SIG_INDEX, flags) + .add_database(LMDB_DB_KERNEL_MMR_SIZE_INDEX, flags) + .add_database(LMDB_DB_UTXO_MMR_SIZE_INDEX, flags) + .add_database(LMDB_DB_UTXO_COMMITMENT_INDEX, flags) + .add_database(LMDB_DB_DELETED_TXO_MMR_POSITION_TO_HEIGHT_INDEX, flags | db::INTEGERKEY) + .add_database(LMDB_DB_ORPHANS, flags) + .add_database(LMDB_DB_ORPHAN_HEADER_ACCUMULATED_DATA, flags) + .add_database(LMDB_DB_MONERO_SEED_HEIGHT, flags) + .add_database(LMDB_DB_ORPHAN_CHAIN_TIPS, flags) + .add_database(LMDB_DB_ORPHAN_PARENT_MAP_INDEX, flags | db::DUPSORT) + .build() + .map_err(|err| ChainStorageError::CriticalError(format!("Could not create LMDB store:{}", err)))?; + LMDBDatabase::new(lmdb_store, file_lock) } /// This is a lmdb-based blockchain database for persistent storage of the chain state. @@ -141,6 +161,7 @@ pub struct LMDBDatabase { kernel_mmr_size_index: DatabaseRef, output_mmr_size_index: DatabaseRef, utxo_commitment_index: DatabaseRef, + deleted_txo_mmr_position_to_height_index: DatabaseRef, orphans_db: DatabaseRef, monero_seed_height_db: DatabaseRef, orphan_header_accumulated_data_db: DatabaseRef, @@ -153,7 +174,7 @@ impl LMDBDatabase { pub fn new(store: LMDBStore, file_lock: File) -> Result { let env = store.env(); - let res = Self { + let db = Self { metadata_db: get_database(&store, LMDB_DB_METADATA)?, headers_db: get_database(&store, LMDB_DB_HEADERS)?, header_accumulated_data_db: get_database(&store, LMDB_DB_HEADER_ACCUMULATED_DATA)?, @@ -168,6 +189,10 @@ impl LMDBDatabase { kernel_mmr_size_index: get_database(&store, LMDB_DB_KERNEL_MMR_SIZE_INDEX)?, output_mmr_size_index: get_database(&store, LMDB_DB_UTXO_MMR_SIZE_INDEX)?, utxo_commitment_index: get_database(&store, LMDB_DB_UTXO_COMMITMENT_INDEX)?, + deleted_txo_mmr_position_to_height_index: get_database( + &store, + LMDB_DB_DELETED_TXO_MMR_POSITION_TO_HEIGHT_INDEX, + )?, orphans_db: get_database(&store, LMDB_DB_ORPHANS)?, orphan_header_accumulated_data_db: get_database(&store, LMDB_DB_ORPHAN_HEADER_ACCUMULATED_DATA)?, monero_seed_height_db: get_database(&store, LMDB_DB_MONERO_SEED_HEIGHT)?, @@ -178,7 +203,20 @@ impl LMDBDatabase { _file_lock: Arc::new(file_lock), }; - Ok(res) + db.build_indexes()?; + + Ok(db) + } + + fn build_indexes(&self) -> Result<(), ChainStorageError> { + let txn = self.read_transaction()?; + if lmdb_len(&txn, &self.deleted_txo_mmr_position_to_height_index)? == 0 && lmdb_len(&txn, &self.inputs_db)? > 0 + { + return Err(ChainStorageError::DatabaseResyncRequired( + "deleted_txo_mmr_position_to_height_index is needs to be built", + )); + } + Ok(()) } /// Try to establish a read lock on the LMDB database. If an exclusive write lock has been previously acquired, this @@ -376,7 +414,7 @@ impl LMDBDatabase { Ok(()) } - fn all_dbs(&self) -> [(&'static str, &DatabaseRef); 19] { + fn all_dbs(&self) -> [(&'static str, &DatabaseRef); 20] { [ ("metadata_db", &self.metadata_db), ("headers_db", &self.headers_db), @@ -392,6 +430,10 @@ impl LMDBDatabase { ("kernel_mmr_size_index", &self.kernel_mmr_size_index), ("output_mmr_size_index", &self.output_mmr_size_index), ("utxo_commitment_index", &self.utxo_commitment_index), + ( + "deleted_txo_mmr_position_to_height_index", + &self.deleted_txo_mmr_position_to_height_index, + ), ("orphans_db", &self.orphans_db), ( "orphan_header_accumulated_data_db", @@ -528,7 +570,7 @@ impl LMDBDatabase { "kernel_excess_index", )?; - let mut excess_sig_key = Vec::::new(); + let mut excess_sig_key = Vec::::with_capacity(32 * 2); excess_sig_key.extend(kernel.excess_sig.get_public_nonce().as_bytes()); excess_sig_key.extend(kernel.excess_sig.get_signature().as_bytes()); lmdb_insert( @@ -556,6 +598,7 @@ impl LMDBDatabase { fn insert_input( &self, txn: &WriteTransaction<'_>, + height: u64, header_hash: HashOutput, input: TransactionInput, mmr_position: u32, @@ -566,6 +609,13 @@ impl LMDBDatabase { input.commitment().as_bytes(), "utxo_commitment_index", )?; + lmdb_insert( + txn, + &self.deleted_txo_mmr_position_to_height_index, + &mmr_position, + &(height, &header_hash), + "deleted_txo_mmr_position_to_height_index", + )?; let hash = input.hash(); let key = format!("{}-{:010}-{}", header_hash.to_hex(), mmr_position, hash.to_hex()); @@ -859,6 +909,12 @@ impl LMDBDatabase { &row.input.output_hash(), "utxo_commitment_index", )?; + lmdb_delete( + txn, + &self.deleted_txo_mmr_position_to_height_index, + &row.mmr_position, + "deleted_txo_mmr_position_to_height_index", + )?; } Ok(()) } @@ -986,8 +1042,8 @@ impl LMDBDatabase { })? }; - let mut total_kernel_sum = Commitment::from_bytes(&[0u8; 32]).expect("Could not create commitment"); - let mut total_utxo_sum = Commitment::from_bytes(&[0u8; 32]).expect("Could not create commitment"); + let mut total_kernel_sum = Commitment::default(); + let mut total_utxo_sum = Commitment::default(); let BlockAccumulatedData { kernels: pruned_kernel_set, outputs: pruned_output_set, @@ -1036,14 +1092,14 @@ impl LMDBDatabase { ))); } debug!(target: LOG_TARGET, "Inserting input `{}`", input.commitment.to_hex()); - self.insert_input(txn, block_hash.clone(), input, index)?; + self.insert_input(txn, current_header_at_height.height, block_hash.clone(), input, index)?; } // Merge current deletions with the tip bitmap - let deleted = output_mmr.deleted().clone(); + let deleted_at_current_height = output_mmr.deleted().clone(); // Merge the new indexes with the blockchain deleted bitmap let mut deleted_bitmap = self.load_deleted_bitmap_model(txn)?; - deleted_bitmap.merge(&deleted)?; + deleted_bitmap.merge(&deleted_at_current_height)?; // Set the output MMR to the complete map so that the complete state can be committed to in the final MR output_mmr.set_deleted(deleted_bitmap.get().clone().into_bitmap()); @@ -1059,7 +1115,7 @@ impl LMDBDatabase { kernel_mmr.get_pruned_hash_set()?, output_mmr.mmr().get_pruned_hash_set()?, witness_mmr.get_pruned_hash_set()?, - deleted, + deleted_at_current_height, total_kernel_sum, ), )?; @@ -1251,40 +1307,6 @@ impl LMDBDatabase { } } -pub fn create_lmdb_database>(path: P, config: LMDBConfig) -> Result { - let flags = db::CREATE; - let _ = std::fs::create_dir_all(&path); - - let file_lock = acquire_exclusive_file_lock(&path.as_ref().to_path_buf())?; - - let lmdb_store = LMDBBuilder::new() - .set_path(path) - .set_env_config(config) - .set_max_number_of_databases(20) - .add_database(LMDB_DB_METADATA, flags | db::INTEGERKEY) - .add_database(LMDB_DB_HEADERS, flags | db::INTEGERKEY) - .add_database(LMDB_DB_HEADER_ACCUMULATED_DATA, flags | db::INTEGERKEY) - .add_database(LMDB_DB_BLOCK_ACCUMULATED_DATA, flags | db::INTEGERKEY) - .add_database(LMDB_DB_BLOCK_HASHES, flags) - .add_database(LMDB_DB_UTXOS, flags) - .add_database(LMDB_DB_INPUTS, flags) - .add_database(LMDB_DB_TXOS_HASH_TO_INDEX, flags) - .add_database(LMDB_DB_KERNELS, flags) - .add_database(LMDB_DB_KERNEL_EXCESS_INDEX, flags) - .add_database(LMDB_DB_KERNEL_EXCESS_SIG_INDEX, flags) - .add_database(LMDB_DB_KERNEL_MMR_SIZE_INDEX, flags) - .add_database(LMDB_DB_UTXO_MMR_SIZE_INDEX, flags) - .add_database(LMDB_DB_UTXO_COMMITMENT_INDEX, flags) - .add_database(LMDB_DB_ORPHANS, flags) - .add_database(LMDB_DB_ORPHAN_HEADER_ACCUMULATED_DATA, flags) - .add_database(LMDB_DB_MONERO_SEED_HEIGHT, flags) - .add_database(LMDB_DB_ORPHAN_CHAIN_TIPS, flags) - .add_database(LMDB_DB_ORPHAN_PARENT_MAP_INDEX, flags | db::DUPSORT) - .build() - .map_err(|err| ChainStorageError::CriticalError(format!("Could not create LMDB store:{}", err)))?; - LMDBDatabase::new(lmdb_store, file_lock) -} - pub fn create_recovery_lmdb_database>(path: P) -> Result<(), ChainStorageError> { let new_path = path.as_ref().join("temp_recovery"); let _ = fs::create_dir_all(&new_path); @@ -1796,7 +1818,7 @@ impl BlockchainBackend for LMDBDatabase { Ok((result, difference_bitmap)) } - fn fetch_output(&self, output_hash: &HashOutput) -> Result, ChainStorageError> { + fn fetch_output(&self, output_hash: &HashOutput) -> Result, ChainStorageError> { debug!(target: LOG_TARGET, "Fetch output: {}", output_hash.to_hex()); let txn = self.read_transaction()?; if let Some((index, key)) = @@ -1814,27 +1836,31 @@ impl BlockchainBackend for LMDBDatabase { output: Some(o), mmr_position, mined_height, + header_hash, .. - }) => Ok(Some(( - PrunedOutput::NotPruned { output: o }, + }) => Ok(Some(UtxoMinedInfo { + output: PrunedOutput::NotPruned { output: o }, mmr_position, mined_height, - ))), + header_hash, + })), Some(TransactionOutputRowData { output: None, mmr_position, mined_height, hash, witness_hash, + header_hash, .. - }) => Ok(Some(( - PrunedOutput::Pruned { + }) => Ok(Some(UtxoMinedInfo { + output: PrunedOutput::Pruned { output_hash: hash, witness_hash, }, mmr_position, mined_height, - ))), + header_hash, + })), _ => Ok(None), } } else { @@ -2042,6 +2068,18 @@ impl BlockchainBackend for LMDBDatabase { Ok(deleted_bitmap.get().clone()) } + fn fetch_header_hash_by_deleted_mmr_positions( + &self, + mmr_positions: Vec, + ) -> Result>, ChainStorageError> { + let txn = self.read_transaction()?; + + mmr_positions + .iter() + .map(|pos| lmdb_get(&txn, &self.deleted_txo_mmr_position_to_height_index, pos)) + .collect() + } + fn delete_oldest_orphans( &mut self, horizon_height: u64, @@ -2357,3 +2395,21 @@ impl<'a, 'b> DeletedBitmapModel<'a, WriteTransaction<'b>> { Ok(()) } } + +struct OutputKey<'a> { + header_hash: &'a [u8], + mmr_position: u32, +} + +impl<'a> OutputKey<'a> { + pub fn new(header_hash: &'a [u8], mmr_position: u32) -> OutputKey { + OutputKey { + header_hash, + mmr_position, + } + } + + pub fn get_key(&self) -> String { + format!("{}-{:010}", to_hex(self.header_hash), self.mmr_position) + } +} diff --git a/base_layer/core/src/chain_storage/lmdb_db/mod.rs b/base_layer/core/src/chain_storage/lmdb_db/mod.rs index f97c1c4878..7947da342d 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/mod.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/mod.rs @@ -29,26 +29,6 @@ pub use lmdb_db::{create_lmdb_database, create_recovery_lmdb_database, LMDBDatab use serde::{Deserialize, Serialize}; use tari_common_types::types::HashOutput; -pub const LMDB_DB_METADATA: &str = "metadata"; -pub const LMDB_DB_HEADERS: &str = "headers"; -pub const LMDB_DB_HEADER_ACCUMULATED_DATA: &str = "header_accumulated_data"; -pub const LMDB_DB_BLOCK_ACCUMULATED_DATA: &str = "mmr_peak_data"; -pub const LMDB_DB_BLOCK_HASHES: &str = "block_hashes"; -pub const LMDB_DB_UTXOS: &str = "utxos"; -pub const LMDB_DB_INPUTS: &str = "inputs"; -pub const LMDB_DB_TXOS_HASH_TO_INDEX: &str = "txos_hash_to_index"; -pub const LMDB_DB_KERNELS: &str = "kernels"; -pub const LMDB_DB_KERNEL_EXCESS_INDEX: &str = "kernel_excess_index"; -pub const LMDB_DB_KERNEL_EXCESS_SIG_INDEX: &str = "kernel_excess_sig_index"; -pub const LMDB_DB_KERNEL_MMR_SIZE_INDEX: &str = "kernel_mmr_size_index"; -pub const LMDB_DB_UTXO_MMR_SIZE_INDEX: &str = "utxo_mmr_size_index"; -pub const LMDB_DB_UTXO_COMMITMENT_INDEX: &str = "utxo_commitment_index"; -pub const LMDB_DB_ORPHANS: &str = "orphans"; -pub const LMDB_DB_MONERO_SEED_HEIGHT: &str = "monero_seed_height"; -pub const LMDB_DB_ORPHAN_HEADER_ACCUMULATED_DATA: &str = "orphan_accumulated_data"; -pub const LMDB_DB_ORPHAN_CHAIN_TIPS: &str = "orphan_chain_tips"; -pub const LMDB_DB_ORPHAN_PARENT_MAP_INDEX: &str = "orphan_parent_map_index"; - #[derive(Serialize, Deserialize, Debug)] pub(crate) struct TransactionOutputRowData { pub output: Option, diff --git a/base_layer/core/src/chain_storage/mod.rs b/base_layer/core/src/chain_storage/mod.rs index 2b5a9a3ee9..6e616e9640 100644 --- a/base_layer/core/src/chain_storage/mod.rs +++ b/base_layer/core/src/chain_storage/mod.rs @@ -80,21 +80,13 @@ mod pruned_output; pub use pruned_output::PrunedOutput; mod lmdb_db; -pub use lmdb_db::{ - create_lmdb_database, - create_recovery_lmdb_database, - LMDBDatabase, - LMDB_DB_BLOCK_HASHES, - LMDB_DB_HEADERS, - LMDB_DB_KERNELS, - LMDB_DB_METADATA, - LMDB_DB_MONERO_SEED_HEIGHT, - LMDB_DB_ORPHANS, - LMDB_DB_UTXOS, -}; +pub use lmdb_db::{create_lmdb_database, create_recovery_lmdb_database, LMDBDatabase}; mod stats; pub use stats::{DbBasicStats, DbSize, DbStat, DbTotalSizeStats}; mod target_difficulties; +mod utxo_mined_info; +pub use utxo_mined_info::*; + pub use target_difficulties::TargetDifficulties; diff --git a/base_layer/core/src/chain_storage/pruned_output.rs b/base_layer/core/src/chain_storage/pruned_output.rs index 8c753f30a5..20f54c37ef 100644 --- a/base_layer/core/src/chain_storage/pruned_output.rs +++ b/base_layer/core/src/chain_storage/pruned_output.rs @@ -21,6 +21,7 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::transactions::transaction::TransactionOutput; use tari_common_types::types::HashOutput; +use tari_crypto::tari_utilities::Hashable; #[allow(clippy::large_enum_variant)] #[derive(Debug, PartialEq)] @@ -38,4 +39,14 @@ impl PrunedOutput { pub fn is_pruned(&self) -> bool { matches!(self, PrunedOutput::Pruned { .. }) } + + pub fn hash(&self) -> Vec { + match self { + PrunedOutput::Pruned { + output_hash, + witness_hash: _, + } => output_hash.clone(), + PrunedOutput::NotPruned { output } => output.hash(), + } + } } diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index 517df77915..f0eabd8a48 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -429,6 +429,6 @@ mod fetch_total_size_stats { let db = setup(); let stats = db.fetch_total_size_stats().unwrap(); // Returns one per db - assert_eq!(stats.sizes().len(), 19); + assert_eq!(stats.sizes().len(), 20); } } diff --git a/base_layer/core/src/chain_storage/utxo_mined_info.rs b/base_layer/core/src/chain_storage/utxo_mined_info.rs new file mode 100644 index 0000000000..1105ceb981 --- /dev/null +++ b/base_layer/core/src/chain_storage/utxo_mined_info.rs @@ -0,0 +1,31 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use crate::chain_storage::PrunedOutput; +use tari_common_types::types::BlockHash; + +pub struct UtxoMinedInfo { + pub output: PrunedOutput, + pub mmr_position: u32, + pub mined_height: u64, + pub header_hash: BlockHash, +} diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index 6110ba9648..c6e0589666 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -43,6 +43,7 @@ use crate::{ LMDBDatabase, MmrTree, PrunedOutput, + UtxoMinedInfo, Validators, }, consensus::{chain_strength_comparer::ChainStrengthComparerBuilder, ConsensusConstantsBuilder, ConsensusManager}, @@ -272,7 +273,7 @@ impl BlockchainBackend for TempDatabase { .fetch_utxos_by_mmr_position(start, end, deleted) } - fn fetch_output(&self, output_hash: &HashOutput) -> Result, ChainStorageError> { + fn fetch_output(&self, output_hash: &HashOutput) -> Result, ChainStorageError> { self.db.as_ref().unwrap().fetch_output(output_hash) } @@ -368,6 +369,16 @@ impl BlockchainBackend for TempDatabase { fn fetch_total_size_stats(&self) -> Result { self.db.as_ref().unwrap().fetch_total_size_stats() } + + fn fetch_header_hash_by_deleted_mmr_positions( + &self, + mmr_positions: Vec, + ) -> Result>, ChainStorageError> { + self.db + .as_ref() + .unwrap() + .fetch_header_hash_by_deleted_mmr_positions(mmr_positions) + } } pub fn create_chained_blocks( diff --git a/base_layer/core/tests/chain_storage_tests/chain_storage.rs b/base_layer/core/tests/chain_storage_tests/chain_storage.rs index 99c7213ec8..a7c16f4dd3 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_storage.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_storage.rs @@ -1759,3 +1759,97 @@ fn input_malleability() { let mod_block_hash = mod_block.hash(); assert_ne!(*block_hash, mod_block_hash); } + +#[allow(clippy::identity_op)] +#[test] +fn fetch_deleted_position_block_hash() { + // Create Main Chain + let network = Network::LocalNet; + let (mut store, mut blocks, mut outputs, consensus_manager) = create_new_blockchain(network); + // Block 1 + let txs = vec![txn_schema!( + from: vec![outputs[0][0].clone()], + to: vec![11 * T, 12 * T, 13 * T, 14 * T] + )]; + generate_new_block_with_achieved_difficulty( + &mut store, + &mut blocks, + &mut outputs, + txs, + Difficulty::from(1), + &consensus_manager, + ) + .unwrap() + .assert_added(); + // Block 2 + let txs = vec![txn_schema!(from: vec![outputs[1][3].clone()], to: vec![6 * T])]; + generate_new_block_with_achieved_difficulty( + &mut store, + &mut blocks, + &mut outputs, + txs, + Difficulty::from(3), + &consensus_manager, + ) + .unwrap() + .assert_added(); + // Blocks 3 - 12 so we can test the search in the bottom and top half + for i in 0..10 { + generate_new_block_with_achieved_difficulty( + &mut store, + &mut blocks, + &mut outputs, + vec![], + Difficulty::from(4 + i), + &consensus_manager, + ) + .unwrap() + .assert_added(); + } + // Block 13 + let txs = vec![txn_schema!(from: vec![outputs[2][0].clone()], to: vec![2 * T])]; + generate_new_block_with_achieved_difficulty( + &mut store, + &mut blocks, + &mut outputs, + txs, + Difficulty::from(30), + &consensus_manager, + ) + .unwrap() + .assert_added(); + // Block 14 + let txs = vec![txn_schema!(from: vec![outputs[13][0].clone()], to: vec![1 * T])]; + generate_new_block_with_achieved_difficulty( + &mut store, + &mut blocks, + &mut outputs, + txs, + Difficulty::from(50), + &consensus_manager, + ) + .unwrap() + .assert_added(); + + let block1_hash = store.fetch_header(1).unwrap().unwrap().hash(); + let block2_hash = store.fetch_header(2).unwrap().unwrap().hash(); + let block13_hash = store.fetch_header(13).unwrap().unwrap().hash(); + let block14_hash = store.fetch_header(14).unwrap().unwrap().hash(); + + let deleted_positions = store + .fetch_complete_deleted_bitmap_at(block14_hash.clone()) + .unwrap() + .bitmap() + .to_vec(); + + let headers = store + .fetch_header_hash_by_deleted_mmr_positions(deleted_positions) + .unwrap(); + let mut headers = headers.into_iter().map(Option::unwrap).collect::>(); + headers.sort_by(|(a, _), (b, _)| a.cmp(b)); + + assert_eq!(headers[3], (14, block14_hash)); + assert_eq!(headers[2], (13, block13_hash)); + assert_eq!(headers[1], (2, block2_hash)); + assert_eq!(headers[0], (1, block1_hash)); +} diff --git a/base_layer/wallet/Cargo.toml b/base_layer/wallet/Cargo.toml index 05122dafe7..301ca45307 100644 --- a/base_layer/wallet/Cargo.toml +++ b/base_layer/wallet/Cargo.toml @@ -19,27 +19,28 @@ tari_shutdown = { version = "^0.10", path = "../../infrastructure/shutdown" } tari_storage = { version = "^0.10", path = "../../infrastructure/storage" } aes-gcm = "^0.8" +async-trait = "0.1.50" +bincode = "1.3.1" blake2 = "0.9.0" chrono = { version = "0.4.6", features = ["serde"] } crossbeam-channel = "0.3.8" -digest = "0.9.0" diesel = { version = "1.4.7", features = ["sqlite", "serde_json", "chrono"] } diesel_migrations = "1.4.0" -libsqlite3-sys = { version = ">=0.8.0, <0.13.0", features = ["bundled"], optional = true } +digest = "0.9.0" fs2 = "0.3.0" futures = { version = "^0.3.1", features = ["compat", "std"] } +libsqlite3-sys = { version = ">=0.8.0, <0.13.0", features = ["bundled"], optional = true } +lmdb-zero = "0.4.4" log = "0.4.6" log4rs = { version = "1.0.0", features = ["console_appender", "file_appender", "yaml_format"] } -lmdb-zero = "0.4.4" rand = "0.8" serde = { version = "1.0.89", features = ["derive"] } serde_json = "1.0.39" -tokio = { version = "1.11", features = ["sync", "macros"] } -tower = "0.3.0-alpha.2" tempfile = "3.1.0" -time = { version = "0.1.39" } thiserror = "1.0.26" -bincode = "1.3.1" +time = { version = "0.1.39" } +tokio = { version = "1.11", features = ["sync", "macros"] } +tower = "0.3.0-alpha.2" [dependencies.tari_core] path = "../../base_layer/core" diff --git a/base_layer/wallet/migrations/2019-10-30-084148_output_manager_service/down.sql b/base_layer/wallet/migrations/2019-10-30-084148_output_manager_service/down.sql index f6a37517fa..c1938c169c 100644 --- a/base_layer/wallet/migrations/2019-10-30-084148_output_manager_service/down.sql +++ b/base_layer/wallet/migrations/2019-10-30-084148_output_manager_service/down.sql @@ -1,13 +1,16 @@ -- Rename the master_key column to master_seed -PRAGMA foreign_keys=off; -ALTER TABLE key_manager_states RENAME TO key_manager_states_old; +PRAGMA foreign_keys=OFF; +ALTER TABLE key_manager_states + RENAME TO key_manager_states_old; CREATE TABLE key_manager_states ( - id INTEGER PRIMARY KEY, - master_seed BLOB NOT NULL, - branch_seed TEXT NOT NULL, - primary_key_index INTEGER NOT NULL, - timestamp DATETIME NOT NULL + id INTEGER PRIMARY KEY, + master_seed BLOB NOT NULL, + branch_seed TEXT NOT NULL, + primary_key_index INTEGER NOT NULL, + timestamp DATETIME NOT NULL ); -INSERT INTO key_manager_states (id, master_seed, branch_seed, primary_key_index, timestamp) SELECT id, master_key, branch_seed, primary_key_index, timestamp FROM key_manager_states_old; +INSERT INTO key_manager_states (id, master_seed, branch_seed, primary_key_index, timestamp) +SELECT id, master_key, branch_seed, primary_key_index, timestamp +FROM key_manager_states_old; DROP TABLE key_manager_states_old; -PRAGMA foreign_keys=on; \ No newline at end of file +PRAGMA foreign_keys=ON; diff --git a/base_layer/wallet/migrations/2019-10-30-084148_output_manager_service/up.sql b/base_layer/wallet/migrations/2019-10-30-084148_output_manager_service/up.sql index b03a9d1bda..50e7955fe6 100644 --- a/base_layer/wallet/migrations/2019-10-30-084148_output_manager_service/up.sql +++ b/base_layer/wallet/migrations/2019-10-30-084148_output_manager_service/up.sql @@ -1,22 +1,22 @@ CREATE TABLE outputs ( spending_key BLOB PRIMARY KEY NOT NULL, - value INTEGER NOT NULL, - flags INTEGER NOT NULL, - maturity INTEGER NOT NULL, - status INTEGER NOT NULL, - tx_id INTEGER NULL + value INTEGER NOT NULL, + flags INTEGER NOT NULL, + maturity INTEGER NOT NULL, + status INTEGER NOT NULL, + tx_id INTEGER NULL ); CREATE TABLE pending_transaction_outputs ( - tx_id INTEGER PRIMARY KEY NOT NULL, - short_term INTEGER NOT NULL, - timestamp DATETIME NOT NULL + tx_id BIGINT PRIMARY KEY NOT NULL, + short_term INTEGER NOT NULL, + timestamp DATETIME NOT NULL ); CREATE TABLE key_manager_states ( - id INTEGER PRIMARY KEY, - master_seed BLOB NOT NULL, - branch_seed TEXT NOT NULL, - primary_key_index INTEGER NOT NULL, - timestamp DATETIME NOT NULL -); \ No newline at end of file + id INTEGER PRIMARY KEY, + master_seed BLOB NOT NULL, + branch_seed TEXT NOT NULL, + primary_key_index INTEGER NOT NULL, + timestamp DATETIME NOT NULL +); diff --git a/base_layer/wallet/migrations/2019-11-20-090620_transaction_service/up.sql b/base_layer/wallet/migrations/2019-11-20-090620_transaction_service/up.sql index 29eb8b0935..d275563e06 100644 --- a/base_layer/wallet/migrations/2019-11-20-090620_transaction_service/up.sql +++ b/base_layer/wallet/migrations/2019-11-20-090620_transaction_service/up.sql @@ -1,37 +1,37 @@ CREATE TABLE outbound_transactions ( - tx_id INTEGER PRIMARY KEY NOT NULL, - destination_public_key BLOB NOT NULL, - amount INTEGER NOT NULL, - fee INTEGER NOT NULL, - sender_protocol TEXT NOT NULL, - message TEXT NOT NULL, - timestamp DATETIME NOT NULL + tx_id BIGINT PRIMARY KEY NOT NULL, + destination_public_key BLOB NOT NULL, + amount BIGINT NOT NULL, + fee BIGINT NOT NULL, + sender_protocol TEXT NOT NULL, + message TEXT NOT NULL, + timestamp DATETIME NOT NULL ); CREATE TABLE inbound_transactions ( - tx_id INTEGER PRIMARY KEY NOT NULL, - source_public_key BLOB NOT NULL, - amount INTEGER NOT NULL, - receiver_protocol TEXT NOT NULL, - message TEXT NOT NULL, - timestamp DATETIME NOT NULL + tx_id BIGINT PRIMARY KEY NOT NULL, + source_public_key BLOB NOT NULL, + amount BIGINT NOT NULL, + receiver_protocol TEXT NOT NULL, + message TEXT NOT NULL, + timestamp DATETIME NOT NULL ); CREATE TABLE coinbase_transactions ( - tx_id INTEGER PRIMARY KEY NOT NULL, - amount INTEGER NOT NULL, - commitment BLOB NOT NULL, - timestamp DATETIME NOT NULL + tx_id BIGINT PRIMARY KEY NOT NULL, + amount BIGINT NOT NULL, + commitment BLOB NOT NULL, + timestamp DATETIME NOT NULL ); CREATE TABLE completed_transactions ( - tx_id INTEGER PRIMARY KEY NOT NULL, - source_public_key BLOB NOT NULL, - destination_public_key BLOB NOT NULL, - amount INTEGER NOT NULL, - fee INTEGER NOT NULL, - transaction_protocol TEXT NOT NULL, - status INTEGER NOT NULL, - message TEXT NOT NULL, - timestamp DATETIME NOT NULL -); \ No newline at end of file + tx_id BIGINT PRIMARY KEY NOT NULL, + source_public_key BLOB NOT NULL, + destination_public_key BLOB NOT NULL, + amount BIGINT NOT NULL, + fee BIGINT NOT NULL, + transaction_protocol TEXT NOT NULL, + status INTEGER NOT NULL, + message TEXT NOT NULL, + timestamp DATETIME NOT NULL +); diff --git a/base_layer/wallet/migrations/2019-11-26-105357_contacts/up.sql b/base_layer/wallet/migrations/2019-11-26-105357_contacts/up.sql index ddb867a673..214289670a 100644 --- a/base_layer/wallet/migrations/2019-11-26-105357_contacts/up.sql +++ b/base_layer/wallet/migrations/2019-11-26-105357_contacts/up.sql @@ -1,4 +1,4 @@ CREATE TABLE contacts ( public_key BLOB PRIMARY KEY NOT NULL UNIQUE, - alias TEXT NOT NULL -); \ No newline at end of file + alias TEXT NOT NULL +); diff --git a/base_layer/wallet/migrations/2019-11-26-120903_peers/up.sql b/base_layer/wallet/migrations/2019-11-26-120903_peers/up.sql index 4a95468993..17a0212dd4 100644 --- a/base_layer/wallet/migrations/2019-11-26-120903_peers/up.sql +++ b/base_layer/wallet/migrations/2019-11-26-120903_peers/up.sql @@ -1,4 +1,4 @@ CREATE TABLE peers ( public_key BLOB PRIMARY KEY NOT NULL UNIQUE, - peer TEXT NOT NULL -); \ No newline at end of file + peer TEXT NOT NULL +); diff --git a/base_layer/wallet/migrations/2020-05-05-122254_add_canceled_flag/up.sql b/base_layer/wallet/migrations/2020-05-05-122254_add_canceled_flag/up.sql index bb86d7580c..6c7ad2e09a 100644 --- a/base_layer/wallet/migrations/2020-05-05-122254_add_canceled_flag/up.sql +++ b/base_layer/wallet/migrations/2020-05-05-122254_add_canceled_flag/up.sql @@ -8,5 +8,6 @@ ALTER TABLE outbound_transactions ADD COLUMN cancelled INTEGER NOT NULL DEFAULT 0; UPDATE completed_transactions -SET cancelled = 1, status = 1 -WHERE status = 5; \ No newline at end of file +SET cancelled = 1, + status = 1 +WHERE status = 5; diff --git a/base_layer/wallet/migrations/2020-05-11-124646_remove_coinbase_table/down.sql b/base_layer/wallet/migrations/2020-05-11-124646_remove_coinbase_table/down.sql index 66d45bf275..53f41a9ffa 100644 --- a/base_layer/wallet/migrations/2020-05-11-124646_remove_coinbase_table/down.sql +++ b/base_layer/wallet/migrations/2020-05-11-124646_remove_coinbase_table/down.sql @@ -1,6 +1,6 @@ CREATE TABLE coinbase_transactions ( - tx_id INTEGER PRIMARY KEY NOT NULL, - amount INTEGER NOT NULL, - commitment BLOB NOT NULL, - timestamp DATETIME NOT NULL -); \ No newline at end of file + tx_id INTEGER PRIMARY KEY NOT NULL, + amount INTEGER NOT NULL, + commitment BLOB NOT NULL, + timestamp DATETIME NOT NULL +); diff --git a/base_layer/wallet/migrations/2020-06-15-084821_add_wallet_settings/up.sql b/base_layer/wallet/migrations/2020-06-15-084821_add_wallet_settings/up.sql index 1829820865..b2233a2ebd 100644 --- a/base_layer/wallet/migrations/2020-06-15-084821_add_wallet_settings/up.sql +++ b/base_layer/wallet/migrations/2020-06-15-084821_add_wallet_settings/up.sql @@ -1,4 +1,4 @@ CREATE TABLE wallet_settings ( - key TEXT PRIMARY KEY NOT NULL, - value TEXT NOT NULL -); \ No newline at end of file + key TEXT PRIMARY KEY NOT NULL, + value TEXT NOT NULL +); diff --git a/base_layer/wallet/migrations/2020-06-29-130334_add_id_primary_key_to_outputs/down.sql b/base_layer/wallet/migrations/2020-06-29-130334_add_id_primary_key_to_outputs/down.sql index d2bbd93e64..e68095cee3 100644 --- a/base_layer/wallet/migrations/2020-06-29-130334_add_id_primary_key_to_outputs/down.sql +++ b/base_layer/wallet/migrations/2020-06-29-130334_add_id_primary_key_to_outputs/down.sql @@ -1,14 +1,17 @@ -PRAGMA foreign_keys=off; -ALTER TABLE outputs RENAME TO outputs_old; +PRAGMA foreign_keys=OFF; +ALTER TABLE outputs + RENAME TO outputs_old; CREATE TABLE outputs ( - spending_key BLOB PRIMARY KEY NOT NULL, - value INTEGER NOT NULL, - flags INTEGER NOT NULL, - maturity INTEGER NOT NULL, - status INTEGER NOT NULL, - tx_id INTEGER NULL, - hash BLOB NULL + spending_key BLOB PRIMARY KEY NOT NULL, + value INTEGER NOT NULL, + flags INTEGER NOT NULL, + maturity INTEGER NOT NULL, + status INTEGER NOT NULL, + tx_id INTEGER NULL, + hash BLOB NULL ); -INSERT INTO outputs (spending_key, value, flags, maturity, status, tx_id, hash) SELECT spending_key, value, flags, maturity, status, tx_id, hash FROM outputs_old; +INSERT INTO outputs (spending_key, value, flags, maturity, status, tx_id, hash) +SELECT spending_key, value, flags, maturity, status, tx_id, hash +FROM outputs_old; DROP TABLE outputs_old; -PRAGMA foreign_keys=on; \ No newline at end of file +PRAGMA foreign_keys=ON; diff --git a/base_layer/wallet/migrations/2020-06-29-130334_add_id_primary_key_to_outputs/up.sql b/base_layer/wallet/migrations/2020-06-29-130334_add_id_primary_key_to_outputs/up.sql index e6818e92eb..856088c794 100644 --- a/base_layer/wallet/migrations/2020-06-29-130334_add_id_primary_key_to_outputs/up.sql +++ b/base_layer/wallet/migrations/2020-06-29-130334_add_id_primary_key_to_outputs/up.sql @@ -1,16 +1,19 @@ -PRAGMA foreign_keys=off; -ALTER TABLE outputs RENAME TO outputs_old; +PRAGMA foreign_keys=OFF; +ALTER TABLE outputs + RENAME TO outputs_old; CREATE TABLE outputs ( - id INTEGER NOT NULL PRIMARY KEY, - commitment BLOB NULL, - spending_key BLOB NOT NULL, - value INTEGER NOT NULL, - flags INTEGER NOT NULL, - maturity INTEGER NOT NULL, - status INTEGER NOT NULL, - tx_id INTEGER NULL, - hash BLOB NULL + id INTEGER NOT NULL PRIMARY KEY, + commitment BLOB NULL, + spending_key BLOB NOT NULL, + value INTEGER NOT NULL, + flags INTEGER NOT NULL, + maturity INTEGER NOT NULL, + status INTEGER NOT NULL, + tx_id INTEGER NULL, + hash BLOB NULL ); -INSERT INTO outputs (spending_key, value, flags, maturity, status, tx_id, hash) SELECT spending_key, value, flags, maturity, status, tx_id, hash FROM outputs_old; +INSERT INTO outputs (spending_key, value, flags, maturity, status, tx_id, hash) +SELECT spending_key, value, flags, maturity, status, tx_id, hash +FROM outputs_old; DROP TABLE outputs_old; -PRAGMA foreign_keys=on; +PRAGMA foreign_keys=ON; diff --git a/base_layer/wallet/migrations/2020-07-08-083612_remove_peer_table/down.sql b/base_layer/wallet/migrations/2020-07-08-083612_remove_peer_table/down.sql index 4a95468993..17a0212dd4 100644 --- a/base_layer/wallet/migrations/2020-07-08-083612_remove_peer_table/down.sql +++ b/base_layer/wallet/migrations/2020-07-08-083612_remove_peer_table/down.sql @@ -1,4 +1,4 @@ CREATE TABLE peers ( public_key BLOB PRIMARY KEY NOT NULL UNIQUE, - peer TEXT NOT NULL -); \ No newline at end of file + peer TEXT NOT NULL +); diff --git a/base_layer/wallet/migrations/2020-07-20-084915_add_coinbase_handling/down.sql b/base_layer/wallet/migrations/2020-07-20-084915_add_coinbase_handling/down.sql index 959e03eeb1..069db33c5f 100644 --- a/base_layer/wallet/migrations/2020-07-20-084915_add_coinbase_handling/down.sql +++ b/base_layer/wallet/migrations/2020-07-20-084915_add_coinbase_handling/down.sql @@ -1,50 +1,66 @@ -- This file should undo anything in `up.sql` -PRAGMA foreign_keys=off; -ALTER TABLE key_manager_states RENAME TO key_manager_states_old; +PRAGMA foreign_keys=OFF; +ALTER TABLE key_manager_states + RENAME TO key_manager_states_old; CREATE TABLE key_manager_states ( - id INTEGER PRIMARY KEY, - master_seed BLOB NOT NULL, - branch_seed TEXT NOT NULL, - primary_key_index INTEGER NOT NULL, - timestamp DATETIME NOT NULL + id INTEGER PRIMARY KEY, + master_seed BLOB NOT NULL, + branch_seed TEXT NOT NULL, + primary_key_index INTEGER NOT NULL, + timestamp DATETIME NOT NULL ); INSERT INTO key_manager_states (id, master_seed, branch_seed, primary_key_index, timestamp) SELECT id, master_key, branch_seed, primary_key_index, timestamp FROM key_manager_states_old; DROP TABLE key_manager_states_old; -PRAGMA foreign_keys=on; +PRAGMA foreign_keys=ON; -PRAGMA foreign_keys=off; -ALTER TABLE pending_transaction_outputs RENAME TO pending_transaction_outputs_old; +PRAGMA foreign_keys=OFF; +ALTER TABLE pending_transaction_outputs + RENAME TO pending_transaction_outputs_old; CREATE TABLE pending_transaction_outputs ( - tx_id INTEGER PRIMARY KEY NOT NULL, - short_term INTEGER NOT NULL, - timestamp DATETIME NOT NULL + tx_id INTEGER PRIMARY KEY NOT NULL, + short_term INTEGER NOT NULL, + timestamp DATETIME NOT NULL ); -INSERT INTO pending_transaction_outputs (tx_id, short_term, timestamp) SELECT tx_id, short_term, timestamp FROM pending_transaction_outputs_old; +INSERT INTO pending_transaction_outputs (tx_id, short_term, timestamp) +SELECT tx_id, short_term, timestamp +FROM pending_transaction_outputs_old; DROP TABLE pending_transaction_outputs_old; -PRAGMA foreign_keys=on; +PRAGMA foreign_keys=ON; -PRAGMA foreign_keys=off; -ALTER TABLE completed_transactions RENAME TO completed_transactions_old; +PRAGMA foreign_keys=OFF; +ALTER TABLE completed_transactions + RENAME TO completed_transactions_old; CREATE TABLE completed_transactions ( - tx_id INTEGER PRIMARY KEY NOT NULL, - source_public_key BLOB NOT NULL, - destination_public_key BLOB NOT NULL, - amount INTEGER NOT NULL, - fee INTEGER NOT NULL, - transaction_protocol TEXT NOT NULL, - status INTEGER NOT NULL, - message TEXT NOT NULL, - timestamp DATETIME NOT NULL, - cancelled INTEGER NOT NULL DEFAULT 0, - direction INTEGER NULL + tx_id INTEGER PRIMARY KEY NOT NULL, + source_public_key BLOB NOT NULL, + destination_public_key BLOB NOT NULL, + amount INTEGER NOT NULL, + fee INTEGER NOT NULL, + transaction_protocol TEXT NOT NULL, + status INTEGER NOT NULL, + message TEXT NOT NULL, + timestamp DATETIME NOT NULL, + cancelled INTEGER NOT NULL DEFAULT 0, + direction INTEGER NULL ); -INSERT INTO completed_transactions (tx_id, source_public_key, destination_public_key, amount, fee, transaction_protocol, status, message, timestamp, cancelled, direction) -SELECT tx_id, source_public_key, destination_public_key, amount, fee, transaction_protocol, status, message, timestamp, cancelled, direction +INSERT INTO completed_transactions (tx_id, source_public_key, destination_public_key, amount, fee, transaction_protocol, + status, message, timestamp, cancelled, direction) +SELECT tx_id, + source_public_key, + destination_public_key, + amount, + fee, + transaction_protocol, + status, + message, + timestamp, + cancelled, + direction FROM completed_transactions_old; DROP TABLE completed_transactions_old; -PRAGMA foreign_keys=on; \ No newline at end of file +PRAGMA foreign_keys=ON; diff --git a/base_layer/wallet/migrations/2020-07-20-084915_add_coinbase_handling/up.sql b/base_layer/wallet/migrations/2020-07-20-084915_add_coinbase_handling/up.sql index 14d4297d0c..fcf8aa3ca0 100644 --- a/base_layer/wallet/migrations/2020-07-20-084915_add_coinbase_handling/up.sql +++ b/base_layer/wallet/migrations/2020-07-20-084915_add_coinbase_handling/up.sql @@ -1,20 +1,23 @@ -- Rename the master_seed column to master_key -PRAGMA foreign_keys=off; -ALTER TABLE key_manager_states RENAME TO key_manager_states_old; +PRAGMA foreign_keys=OFF; +ALTER TABLE key_manager_states + RENAME TO key_manager_states_old; CREATE TABLE key_manager_states ( - id INTEGER PRIMARY KEY, - master_key BLOB NOT NULL, - branch_seed TEXT NOT NULL, - primary_key_index INTEGER NOT NULL, - timestamp DATETIME NOT NULL + id INTEGER PRIMARY KEY NOT NULL, + master_key BLOB NOT NULL, + branch_seed TEXT NOT NULL, + primary_key_index BIGINT NOT NULL, + timestamp DATETIME NOT NULL ); INSERT INTO key_manager_states (id, master_key, branch_seed, primary_key_index, timestamp) SELECT id, master_seed, branch_seed, primary_key_index, timestamp - FROM key_manager_states_old; +FROM key_manager_states_old; DROP TABLE key_manager_states_old; -PRAGMA foreign_keys=on; -ALTER TABLE pending_transaction_outputs ADD COLUMN coinbase_block_height INTEGER NULL; +PRAGMA foreign_keys=ON; +ALTER TABLE pending_transaction_outputs + ADD COLUMN coinbase_block_height BIGINT NULL; -ALTER TABLE completed_transactions ADD COLUMN coinbase_block_height INTEGER NULL; +ALTER TABLE completed_transactions + ADD COLUMN coinbase_block_height BIGINT NULL; diff --git a/base_layer/wallet/migrations/2020-08-17-141407_add_resend_count_and_timestamp_add_status_to_pending_txs/down.sql b/base_layer/wallet/migrations/2020-08-17-141407_add_resend_count_and_timestamp_add_status_to_pending_txs/down.sql index 08b4292c51..07b5741e70 100644 --- a/base_layer/wallet/migrations/2020-08-17-141407_add_resend_count_and_timestamp_add_status_to_pending_txs/down.sql +++ b/base_layer/wallet/migrations/2020-08-17-141407_add_resend_count_and_timestamp_add_status_to_pending_txs/down.sql @@ -1,58 +1,90 @@ -PRAGMA foreign_keys=off; -ALTER TABLE completed_transactions RENAME TO completed_transactions_old; +PRAGMA foreign_keys=OFF; +ALTER TABLE completed_transactions + RENAME TO completed_transactions_old; CREATE TABLE completed_transactions ( - tx_id INTEGER PRIMARY KEY NOT NULL, - source_public_key BLOB NOT NULL, - destination_public_key BLOB NOT NULL, - amount INTEGER NOT NULL, - fee INTEGER NOT NULL, - transaction_protocol TEXT NOT NULL, - status INTEGER NOT NULL, - message TEXT NOT NULL, - timestamp DATETIME NOT NULL, - cancelled INTEGER NOT NULL DEFAULT 0, - direction INTEGER NULL, - coinbase_block_height INTEGER NULL + tx_id INTEGER PRIMARY KEY NOT NULL, + source_public_key BLOB NOT NULL, + destination_public_key BLOB NOT NULL, + amount INTEGER NOT NULL, + fee INTEGER NOT NULL, + transaction_protocol TEXT NOT NULL, + status INTEGER NOT NULL, + message TEXT NOT NULL, + timestamp DATETIME NOT NULL, + cancelled INTEGER NOT NULL DEFAULT 0, + direction INTEGER NULL, + coinbase_block_height INTEGER NULL ); -INSERT INTO completed_transactions (tx_id, source_public_key, destination_public_key, amount, fee, transaction_protocol, status, message, timestamp, cancelled, direction, coinbase_block_height) -SELECT tx_id, source_public_key, destination_public_key, amount, fee, transaction_protocol, status, message, timestamp, cancelled, direction, coinbase_block_height +INSERT INTO completed_transactions (tx_id, source_public_key, destination_public_key, amount, fee, transaction_protocol, + status, message, timestamp, cancelled, direction, coinbase_block_height) +SELECT tx_id, + source_public_key, + destination_public_key, + amount, + fee, + transaction_protocol, + status, + message, + timestamp, + cancelled, + direction, + coinbase_block_height FROM completed_transactions_old; DROP TABLE completed_transactions_old; -ALTER TABLE inbound_transactions RENAME TO inbound_transactions_old; +ALTER TABLE inbound_transactions + RENAME TO inbound_transactions_old; CREATE TABLE inbound_transactions ( - tx_id INTEGER PRIMARY KEY NOT NULL, - source_public_key BLOB NOT NULL, - amount INTEGER NOT NULL, - receiver_protocol TEXT NOT NULL, - message TEXT NOT NULL, - timestamp DATETIME NOT NULL, - cancelled INTEGER NOT NULL DEFAULT 0, - direct_send_success INTEGER NOT NULL DEFAULT 0 + tx_id INTEGER PRIMARY KEY NOT NULL, + source_public_key BLOB NOT NULL, + amount INTEGER NOT NULL, + receiver_protocol TEXT NOT NULL, + message TEXT NOT NULL, + timestamp DATETIME NOT NULL, + cancelled INTEGER NOT NULL DEFAULT 0, + direct_send_success INTEGER NOT NULL DEFAULT 0 ); -INSERT INTO inbound_transactions (tx_id, source_public_key, amount, receiver_protocol, message, timestamp, cancelled, direct_send_success) -SELECT tx_id, source_public_key, amount, receiver_protocol, message, timestamp, cancelled, direct_send_success +INSERT INTO inbound_transactions (tx_id, source_public_key, amount, receiver_protocol, message, timestamp, cancelled, + direct_send_success) +SELECT tx_id, + source_public_key, + amount, + receiver_protocol, + message, + timestamp, + cancelled, + direct_send_success FROM inbound_transactions_old; DROP TABLE inbound_transactions_old; -ALTER TABLE outbound_transactions RENAME TO outbound_transactions_old; +ALTER TABLE outbound_transactions + RENAME TO outbound_transactions_old; CREATE TABLE outbound_transactions ( - tx_id INTEGER PRIMARY KEY NOT NULL, - destination_public_key BLOB NOT NULL, - amount INTEGER NOT NULL, - fee INTEGER NOT NULL, - sender_protocol TEXT NOT NULL, - message TEXT NOT NULL, - timestamp DATETIME NOT NULL, - cancelled INTEGER NOT NULL DEFAULT 0, - direct_send_success INTEGER NOT NULL DEFAULT 0 + tx_id INTEGER PRIMARY KEY NOT NULL, + destination_public_key BLOB NOT NULL, + amount INTEGER NOT NULL, + fee INTEGER NOT NULL, + sender_protocol TEXT NOT NULL, + message TEXT NOT NULL, + timestamp DATETIME NOT NULL, + cancelled INTEGER NOT NULL DEFAULT 0, + direct_send_success INTEGER NOT NULL DEFAULT 0 ); -INSERT INTO outbound_transactions (tx_id, destination_public_key, amount, fee, sender_protocol, message, timestamp, cancelled, direct_send_success) -SELECT tx_id, destination_public_key, amount, fee, sender_protocol, message, timestamp, cancelled, direct_send_success +INSERT INTO outbound_transactions (tx_id, destination_public_key, amount, fee, sender_protocol, message, timestamp, + cancelled, direct_send_success) +SELECT tx_id, + destination_public_key, + amount, + fee, + sender_protocol, + message, + timestamp, + cancelled, + direct_send_success FROM outbound_transactions_old; DROP TABLE outbound_transactions_old; -PRAGMA foreign_keys=on; \ No newline at end of file +PRAGMA foreign_keys=ON; diff --git a/base_layer/wallet/migrations/2020-10-20-094420_add_client_key_value_store/up.sql b/base_layer/wallet/migrations/2020-10-20-094420_add_client_key_value_store/up.sql index bb15e74966..a01f2132e7 100644 --- a/base_layer/wallet/migrations/2020-10-20-094420_add_client_key_value_store/up.sql +++ b/base_layer/wallet/migrations/2020-10-20-094420_add_client_key_value_store/up.sql @@ -1,4 +1,4 @@ CREATE TABLE client_key_values ( - key TEXT PRIMARY KEY NOT NULL, - value TEXT NOT NULL -); \ No newline at end of file + key TEXT PRIMARY KEY NOT NULL, + value TEXT NOT NULL +); diff --git a/base_layer/wallet/migrations/2021-02-03-074953_add_valid_flag_to_transaction/down.sql b/base_layer/wallet/migrations/2021-02-03-074953_add_valid_flag_to_transaction/down.sql index b903ee9d24..94f890addd 100644 --- a/base_layer/wallet/migrations/2021-02-03-074953_add_valid_flag_to_transaction/down.sql +++ b/base_layer/wallet/migrations/2021-02-03-074953_add_valid_flag_to_transaction/down.sql @@ -1,21 +1,37 @@ -PRAGMA foreign_keys=off; -ALTER TABLE completed_transactions RENAME TO completed_transactions_old; +PRAGMA foreign_keys=OFF; +ALTER TABLE completed_transactions + RENAME TO completed_transactions_old; CREATE TABLE completed_transactions ( - tx_id INTEGER PRIMARY KEY NOT NULL, - source_public_key BLOB NOT NULL, - destination_public_key BLOB NOT NULL, - amount INTEGER NOT NULL, - fee INTEGER NOT NULL, - transaction_protocol TEXT NOT NULL, - status INTEGER NOT NULL, - message TEXT NOT NULL, - timestamp DATETIME NOT NULL, - cancelled INTEGER NOT NULL DEFAULT 0, - direction INTEGER NULL, - coinbase_block_height INTEGER NULL, - send_count INTEGER NOT NULL DEFAULT 0, - last_send_timestamp DATETIME NULL, + tx_id INTEGER PRIMARY KEY NOT NULL, + source_public_key BLOB NOT NULL, + destination_public_key BLOB NOT NULL, + amount INTEGER NOT NULL, + fee INTEGER NOT NULL, + transaction_protocol TEXT NOT NULL, + status INTEGER NOT NULL, + message TEXT NOT NULL, + timestamp DATETIME NOT NULL, + cancelled INTEGER NOT NULL DEFAULT 0, + direction INTEGER NULL, + coinbase_block_height INTEGER NULL, + send_count INTEGER NOT NULL DEFAULT 0, + last_send_timestamp DATETIME NULL, ); -INSERT INTO completed_transactions (tx_id, source_public_key, destination_public_key, amount, fee, transaction_protocol, status, message, timestamp, cancelled, direction, coinbase_block_height, send_count, last_send_timestamp) -SELECT tx_id, source_public_key, destination_public_key, amount, fee, transaction_protocol, status, message, timestamp, cancelled, direction, coinbase_block_height, send_count, last_send_timestamp -FROM completed_transactions_old; \ No newline at end of file +INSERT INTO completed_transactions (tx_id, source_public_key, destination_public_key, amount, fee, transaction_protocol, + status, message, timestamp, cancelled, direction, coinbase_block_height, send_count, + last_send_timestamp) +SELECT tx_id, + source_public_key, + destination_public_key, + amount, + fee, + transaction_protocol, + status, + message, + timestamp, + cancelled, + direction, + coinbase_block_height, + send_count, + last_send_timestamp +FROM completed_transactions_old; diff --git a/base_layer/wallet/migrations/2021-02-15-084900_add_outputs_unique_commitment/down.sql b/base_layer/wallet/migrations/2021-02-15-084900_add_outputs_unique_commitment/down.sql index f580495c34..f25583bae2 100644 --- a/base_layer/wallet/migrations/2021-02-15-084900_add_outputs_unique_commitment/down.sql +++ b/base_layer/wallet/migrations/2021-02-15-084900_add_outputs_unique_commitment/down.sql @@ -1,16 +1,19 @@ -PRAGMA foreign_keys=off; -ALTER TABLE outputs RENAME TO outputs_old; +PRAGMA foreign_keys=OFF; +ALTER TABLE outputs + RENAME TO outputs_old; CREATE TABLE outputs ( - id INTEGER NOT NULL PRIMARY KEY, - commitment BLOB NULL, - spending_key BLOB NOT NULL, - value INTEGER NOT NULL, - flags INTEGER NOT NULL, - maturity INTEGER NOT NULL, - status INTEGER NOT NULL, - tx_id INTEGER NULL, - hash BLOB NULL, + id INTEGER NOT NULL PRIMARY KEY, + commitment BLOB NULL, + spending_key BLOB NOT NULL, + value INTEGER NOT NULL, + flags INTEGER NOT NULL, + maturity INTEGER NOT NULL, + status INTEGER NOT NULL, + tx_id INTEGER NULL, + hash BLOB NULL, ); -INSERT INTO outputs SELECT * FROM outputs_old; +INSERT INTO outputs +SELECT * +FROM outputs_old; DROP TABLE outputs_old; -PRAGMA foreign_keys=on; +PRAGMA foreign_keys=ON; diff --git a/base_layer/wallet/migrations/2021-02-15-084900_add_outputs_unique_commitment/up.sql b/base_layer/wallet/migrations/2021-02-15-084900_add_outputs_unique_commitment/up.sql index 401f453184..6b3ed366e5 100644 --- a/base_layer/wallet/migrations/2021-02-15-084900_add_outputs_unique_commitment/up.sql +++ b/base_layer/wallet/migrations/2021-02-15-084900_add_outputs_unique_commitment/up.sql @@ -1,17 +1,20 @@ -PRAGMA foreign_keys=off; -ALTER TABLE outputs RENAME TO outputs_old; +PRAGMA foreign_keys=OFF; +ALTER TABLE outputs + RENAME TO outputs_old; CREATE TABLE outputs ( - id INTEGER NOT NULL PRIMARY KEY, - commitment BLOB NULL, - spending_key BLOB NOT NULL, - value INTEGER NOT NULL, - flags INTEGER NOT NULL, - maturity INTEGER NOT NULL, - status INTEGER NOT NULL, - tx_id INTEGER NULL, - hash BLOB NULL, + id INTEGER NOT NULL PRIMARY KEY, + commitment BLOB NULL, + spending_key BLOB NOT NULL, + value INTEGER NOT NULL, + flags INTEGER NOT NULL, + maturity INTEGER NOT NULL, + status INTEGER NOT NULL, + tx_id INTEGER NULL, + hash BLOB NULL, CONSTRAINT unique_commitment UNIQUE (commitment) ); -INSERT INTO outputs SELECT * FROM outputs_old; +INSERT INTO outputs +SELECT * +FROM outputs_old; DROP TABLE outputs_old; -PRAGMA foreign_keys=on; +PRAGMA foreign_keys=ON; diff --git a/base_layer/wallet/migrations/2021-03-23-082938_update-outputs-for-tari-script/up.sql b/base_layer/wallet/migrations/2021-03-23-082938_update-outputs-for-tari-script/up.sql index bd458f5dee..7109f61364 100644 --- a/base_layer/wallet/migrations/2021-03-23-082938_update-outputs-for-tari-script/up.sql +++ b/base_layer/wallet/migrations/2021-03-23-082938_update-outputs-for-tari-script/up.sql @@ -1,23 +1,23 @@ -- This migration is part of a testnet reset and should not be used on db's with existing old data in them -- thus this migration does not accommodate db's with existing rows. -PRAGMA foreign_keys=off; +PRAGMA foreign_keys=OFF; DROP TABLE outputs; CREATE TABLE outputs ( - id INTEGER NOT NULL PRIMARY KEY, - commitment BLOB NOT NULL, - spending_key BLOB NOT NULL, - value INTEGER NOT NULL, - flags INTEGER NOT NULL, - maturity INTEGER NOT NULL, - status INTEGER NOT NULL, - tx_id INTEGER NULL, - hash BLOB NOT NULL, - script BLOB NOT NULL, - input_data BLOB NOT NULL, - height INTEGER NOT NULL, - script_private_key BLOB NOT NULL, - sender_offset_public_key BLOB NOT NULL, + id INTEGER NOT NULL PRIMARY KEY, + commitment BLOB NOT NULL, + spending_key BLOB NOT NULL, + value INTEGER NOT NULL, + flags INTEGER NOT NULL, + maturity INTEGER NOT NULL, + status INTEGER NOT NULL, + tx_id INTEGER NULL, + hash BLOB NOT NULL, + script BLOB NOT NULL, + input_data BLOB NOT NULL, + height INTEGER NOT NULL, + script_private_key BLOB NOT NULL, + sender_offset_public_key BLOB NOT NULL, CONSTRAINT unique_commitment UNIQUE (commitment) ); -PRAGMA foreign_keys=on; +PRAGMA foreign_keys=ON; diff --git a/base_layer/wallet/migrations/2021-04-01-081220_add_transaction_confirmations/down.sql b/base_layer/wallet/migrations/2021-04-01-081220_add_transaction_confirmations/down.sql index 6a4ba25ecc..ea69cb57ca 100644 --- a/base_layer/wallet/migrations/2021-04-01-081220_add_transaction_confirmations/down.sql +++ b/base_layer/wallet/migrations/2021-04-01-081220_add_transaction_confirmations/down.sql @@ -1,22 +1,39 @@ -PRAGMA foreign_keys=off; -ALTER TABLE completed_transactions RENAME TO completed_transactions_old; +PRAGMA foreign_keys=OFF; +ALTER TABLE completed_transactions + RENAME TO completed_transactions_old; CREATE TABLE completed_transactions ( - tx_id INTEGER PRIMARY KEY NOT NULL, - source_public_key BLOB NOT NULL, - destination_public_key BLOB NOT NULL, - amount INTEGER NOT NULL, - fee INTEGER NOT NULL, - transaction_protocol TEXT NOT NULL, - status INTEGER NOT NULL, - message TEXT NOT NULL, - timestamp DATETIME NOT NULL, - cancelled INTEGER NOT NULL DEFAULT 0, - direction INTEGER NULL DEFAULT NULL, - coinbase_block_height INTEGER NULL DEFAULT NULL, - send_count INTEGER NOT NULL DEFAULT 0, - last_send_timestamp DATETIME NULL DEFAULT NULL, - valid INTEGER NOT NULL DEFAULT 0, + tx_id INTEGER PRIMARY KEY NOT NULL, + source_public_key BLOB NOT NULL, + destination_public_key BLOB NOT NULL, + amount INTEGER NOT NULL, + fee INTEGER NOT NULL, + transaction_protocol TEXT NOT NULL, + status INTEGER NOT NULL, + message TEXT NOT NULL, + timestamp DATETIME NOT NULL, + cancelled INTEGER NOT NULL DEFAULT 0, + direction INTEGER NULL DEFAULT NULL, + coinbase_block_height INTEGER NULL DEFAULT NULL, + send_count INTEGER NOT NULL DEFAULT 0, + last_send_timestamp DATETIME NULL DEFAULT NULL, + valid INTEGER NOT NULL DEFAULT 0, ); -INSERT INTO completed_transactions (tx_id, source_public_key, destination_public_key, amount, fee, transaction_protocol, status, message, timestamp, cancelled, direction, coinbase_block_height, send_count, last_send_timestamp, valid) -SELECT tx_id, source_public_key, destination_public_key, amount, fee, transaction_protocol, status, message, timestamp, cancelled, direction, coinbase_block_height, send_count, last_send_timestamp, valid -FROM completed_transactions_old; \ No newline at end of file +INSERT INTO completed_transactions (tx_id, source_public_key, destination_public_key, amount, fee, transaction_protocol, + status, message, timestamp, cancelled, direction, coinbase_block_height, send_count, + last_send_timestamp, valid) +SELECT tx_id, + source_public_key, + destination_public_key, + amount, + fee, + transaction_protocol, + status, + message, + timestamp, + cancelled, + direction, + coinbase_block_height, + send_count, + last_send_timestamp, + valid +FROM completed_transactions_old; diff --git a/base_layer/wallet/migrations/2021-04-01-081220_add_transaction_confirmations/up.sql b/base_layer/wallet/migrations/2021-04-01-081220_add_transaction_confirmations/up.sql index 0f583618a9..c88bd991f1 100644 --- a/base_layer/wallet/migrations/2021-04-01-081220_add_transaction_confirmations/up.sql +++ b/base_layer/wallet/migrations/2021-04-01-081220_add_transaction_confirmations/up.sql @@ -1,2 +1,2 @@ ALTER TABLE completed_transactions - ADD COLUMN confirmations INTEGER NULL DEFAULT NULL; \ No newline at end of file + ADD COLUMN confirmations BIGINT NULL DEFAULT NULL; diff --git a/base_layer/wallet/migrations/2021-04-19-085137_add_mined_height_to_completed_transaction/down.sql b/base_layer/wallet/migrations/2021-04-19-085137_add_mined_height_to_completed_transaction/down.sql index f017eb0d18..0babec895d 100644 --- a/base_layer/wallet/migrations/2021-04-19-085137_add_mined_height_to_completed_transaction/down.sql +++ b/base_layer/wallet/migrations/2021-04-19-085137_add_mined_height_to_completed_transaction/down.sql @@ -1,23 +1,41 @@ -PRAGMA foreign_keys=off; -ALTER TABLE completed_transactions RENAME TO completed_transactions_old; +PRAGMA foreign_keys=OFF; +ALTER TABLE completed_transactions + RENAME TO completed_transactions_old; CREATE TABLE completed_transactions ( - tx_id INTEGER PRIMARY KEY NOT NULL, - source_public_key BLOB NOT NULL, - destination_public_key BLOB NOT NULL, - amount INTEGER NOT NULL, - fee INTEGER NOT NULL, - transaction_protocol TEXT NOT NULL, - status INTEGER NOT NULL, - message TEXT NOT NULL, - timestamp DATETIME NOT NULL, - cancelled INTEGER NOT NULL DEFAULT 0, - direction INTEGER NULL DEFAULT NULL, - coinbase_block_height INTEGER NULL DEFAULT NULL, - send_count INTEGER NOT NULL DEFAULT 0, - last_send_timestamp DATETIME NULL DEFAULT NULL, - valid INTEGER NOT NULL DEFAULT 0, - confirmations INTEGER NULL DEFAULT NULL + tx_id INTEGER PRIMARY KEY NOT NULL, + source_public_key BLOB NOT NULL, + destination_public_key BLOB NOT NULL, + amount INTEGER NOT NULL, + fee INTEGER NOT NULL, + transaction_protocol TEXT NOT NULL, + status INTEGER NOT NULL, + message TEXT NOT NULL, + timestamp DATETIME NOT NULL, + cancelled INTEGER NOT NULL DEFAULT 0, + direction INTEGER NULL DEFAULT NULL, + coinbase_block_height INTEGER NULL DEFAULT NULL, + send_count INTEGER NOT NULL DEFAULT 0, + last_send_timestamp DATETIME NULL DEFAULT NULL, + valid INTEGER NOT NULL DEFAULT 0, + confirmations INTEGER NULL DEFAULT NULL ); -INSERT INTO completed_transactions (tx_id, source_public_key, destination_public_key, amount, fee, transaction_protocol, status, message, timestamp, cancelled, direction, coinbase_block_height, send_count, last_send_timestamp, valid, confirmations) -SELECT tx_id, source_public_key, destination_public_key, amount, fee, transaction_protocol, status, message, timestamp, cancelled, direction, coinbase_block_height, send_count, last_send_timestamp, valid, confirmations -FROM completed_transactions_old; \ No newline at end of file +INSERT INTO completed_transactions (tx_id, source_public_key, destination_public_key, amount, fee, transaction_protocol, + status, message, timestamp, cancelled, direction, coinbase_block_height, send_count, + last_send_timestamp, valid, confirmations) +SELECT tx_id, + source_public_key, + destination_public_key, + amount, + fee, + transaction_protocol, + status, + message, + timestamp, + cancelled, + direction, + coinbase_block_height, + send_count, + last_send_timestamp, + valid, + confirmations +FROM completed_transactions_old; diff --git a/base_layer/wallet/migrations/2021-04-19-085137_add_mined_height_to_completed_transaction/up.sql b/base_layer/wallet/migrations/2021-04-19-085137_add_mined_height_to_completed_transaction/up.sql index 5ec7109251..393d990670 100644 --- a/base_layer/wallet/migrations/2021-04-19-085137_add_mined_height_to_completed_transaction/up.sql +++ b/base_layer/wallet/migrations/2021-04-19-085137_add_mined_height_to_completed_transaction/up.sql @@ -1,2 +1,2 @@ ALTER TABLE completed_transactions - ADD COLUMN mined_height INTEGER NULL; \ No newline at end of file + ADD COLUMN mined_height BIGINT NULL; diff --git a/base_layer/wallet/migrations/2021-04-29-125155_known_scripts/down.sql b/base_layer/wallet/migrations/2021-04-29-125155_known_scripts/down.sql index 441416be3e..1d0edbc2d1 100644 --- a/base_layer/wallet/migrations/2021-04-29-125155_known_scripts/down.sql +++ b/base_layer/wallet/migrations/2021-04-29-125155_known_scripts/down.sql @@ -1 +1 @@ - DROP TABLE IF EXISTS known_one_sided_payment_scripts; +DROP TABLE IF EXISTS known_one_sided_payment_scripts; diff --git a/base_layer/wallet/migrations/2021-04-29-125155_known_scripts/up.sql b/base_layer/wallet/migrations/2021-04-29-125155_known_scripts/up.sql index ed02ad35ea..4c9e28ff3d 100644 --- a/base_layer/wallet/migrations/2021-04-29-125155_known_scripts/up.sql +++ b/base_layer/wallet/migrations/2021-04-29-125155_known_scripts/up.sql @@ -1,6 +1,6 @@ - CREATE TABLE known_one_sided_payment_scripts ( +CREATE TABLE known_one_sided_payment_scripts ( script_hash BLOB PRIMARY KEY NOT NULL, - private_key BLOB NOT NULL, - script BLOB NOT NULL, - input BLOB NOT NULL - ); + private_key BLOB NOT NULL, + script BLOB NOT NULL, + input BLOB NOT NULL +); diff --git a/base_layer/wallet/migrations/2021-06-22-143855_sender_meta_signature/up.sql b/base_layer/wallet/migrations/2021-06-22-143855_sender_meta_signature/up.sql index 846a2e4fa4..6e3d673b6a 100644 --- a/base_layer/wallet/migrations/2021-06-22-143855_sender_meta_signature/up.sql +++ b/base_layer/wallet/migrations/2021-06-22-143855_sender_meta_signature/up.sql @@ -1,25 +1,25 @@ -- This migration is part of a testnet reset and should not be used on db's with existing old data in them -- thus this migration does not accommodate db's with existing rows. -PRAGMA foreign_keys=off; +PRAGMA foreign_keys=OFF; DROP TABLE outputs; CREATE TABLE outputs ( - id INTEGER NOT NULL PRIMARY KEY, - commitment BLOB NOT NULL, - spending_key BLOB NOT NULL, - value INTEGER NOT NULL, - flags INTEGER NOT NULL, - maturity INTEGER NOT NULL, - status INTEGER NOT NULL, - tx_id INTEGER NULL, - hash BLOB NOT NULL, - script BLOB NOT NULL, - input_data BLOB NOT NULL, - height INTEGER NOT NULL, - script_private_key BLOB NOT NULL, - script_offset_public_key BLOB NOT NULL, - sender_metadata_signature_key BLOB NOT NULL, - sender_metadata_signature_nonce BLOB NOT NULL, + id INTEGER NOT NULL PRIMARY KEY, + commitment BLOB NOT NULL, + spending_key BLOB NOT NULL, + value INTEGER NOT NULL, + flags INTEGER NOT NULL, + maturity INTEGER NOT NULL, + status INTEGER NOT NULL, + tx_id INTEGER NULL, + hash BLOB NOT NULL, + script BLOB NOT NULL, + input_data BLOB NOT NULL, + height INTEGER NOT NULL, + script_private_key BLOB NOT NULL, + script_offset_public_key BLOB NOT NULL, + sender_metadata_signature_key BLOB NOT NULL, + sender_metadata_signature_nonce BLOB NOT NULL, CONSTRAINT unique_commitment UNIQUE (commitment) ); -PRAGMA foreign_keys=on; +PRAGMA foreign_keys=ON; diff --git a/base_layer/wallet/migrations/2021-07-02-090239_remove_height_from_output/down.sql b/base_layer/wallet/migrations/2021-07-02-090239_remove_height_from_output/down.sql index 9cca4de384..aa13da0bd7 100644 --- a/base_layer/wallet/migrations/2021-07-02-090239_remove_height_from_output/down.sql +++ b/base_layer/wallet/migrations/2021-07-02-090239_remove_height_from_output/down.sql @@ -1,27 +1,45 @@ -PRAGMA foreign_keys=off; -ALTER TABLE outputs RENAME TO outputs_old; +PRAGMA foreign_keys=OFF; +ALTER TABLE outputs + RENAME TO outputs_old; CREATE TABLE outputs ( - id INTEGER NOT NULL PRIMARY KEY, - commitment BLOB NOT NULL, - spending_key BLOB NOT NULL, - value INTEGER NOT NULL, - flags INTEGER NOT NULL, - maturity INTEGER NOT NULL, - status INTEGER NOT NULL, - tx_id INTEGER NULL, - hash BLOB NOT NULL, - script BLOB NOT NULL, - input_data BLOB NOT NULL, - height INTEGER NOT NULL, - script_private_key BLOB NOT NULL, - script_offset_public_key BLOB NOT NULL, - sender_metadata_signature_key BLOB NOT NULL, - sender_metadata_signature_nonce BLOB NOT NULL, - CONSTRAINT unique_commitment UNIQUE (commitment) + id INTEGER NOT NULL PRIMARY KEY, + commitment BLOB NOT NULL, + spending_key BLOB NOT NULL, + value INTEGER NOT NULL, + flags INTEGER NOT NULL, + maturity INTEGER NOT NULL, + status INTEGER NOT NULL, + tx_id INTEGER NULL, + hash BLOB NOT NULL, + script BLOB NOT NULL, + input_data BLOB NOT NULL, + height INTEGER NOT NULL, + script_private_key BLOB NOT NULL, + script_offset_public_key BLOB NOT NULL, + sender_metadata_signature_key BLOB NOT NULL, + sender_metadata_signature_nonce BLOB NOT NULL, + CONSTRAINT unique_commitment UNIQUE (commitment) ); -INSERT INTO outputs (id, commitment, spending_key, value, flags, maturity, status, tx_id, hash, script, input_data, height, script_private_key, script_offset_public_key, sender_metadata_signature_key, sender_metadata_signature_nonce) -SELECT id, commitment, spending_key, value, flags, maturity, status, tx_id, hash, script, input_data, 0, script_private_key, script_offset_public_key, sender_metadata_signature_key, sender_metadata_signature_nonce +INSERT INTO outputs (id, commitment, spending_key, value, flags, maturity, status, tx_id, hash, script, input_data, + height, script_private_key, script_offset_public_key, sender_metadata_signature_key, + sender_metadata_signature_nonce) +SELECT id, + commitment, + spending_key, + value, + flags, + maturity, + status, + tx_id, + hash, + script, + input_data, + 0, + script_private_key, + script_offset_public_key, + sender_metadata_signature_key, + sender_metadata_signature_nonce FROM outputs_old; DROP TABLE outputs_old; -PRAGMA foreign_keys=on; \ No newline at end of file +PRAGMA foreign_keys=ON; diff --git a/base_layer/wallet/migrations/2021-07-02-090239_remove_height_from_output/up.sql b/base_layer/wallet/migrations/2021-07-02-090239_remove_height_from_output/up.sql index 4a06483da7..9402ababc7 100644 --- a/base_layer/wallet/migrations/2021-07-02-090239_remove_height_from_output/up.sql +++ b/base_layer/wallet/migrations/2021-07-02-090239_remove_height_from_output/up.sql @@ -1,26 +1,43 @@ -PRAGMA foreign_keys=off; -ALTER TABLE outputs RENAME TO outputs_old; +PRAGMA foreign_keys=OFF; +ALTER TABLE outputs + RENAME TO outputs_old; CREATE TABLE outputs ( - id INTEGER NOT NULL PRIMARY KEY, - commitment BLOB NOT NULL, - spending_key BLOB NOT NULL, - value INTEGER NOT NULL, - flags INTEGER NOT NULL, - maturity INTEGER NOT NULL, - status INTEGER NOT NULL, - tx_id INTEGER NULL, - hash BLOB NOT NULL, - script BLOB NOT NULL, - input_data BLOB NOT NULL, - script_private_key BLOB NOT NULL, - script_offset_public_key BLOB NOT NULL, - sender_metadata_signature_key BLOB NOT NULL, - sender_metadata_signature_nonce BLOB NOT NULL, - CONSTRAINT unique_commitment UNIQUE (commitment) + id INTEGER NOT NULL PRIMARY KEY, + commitment BLOB NOT NULL, + spending_key BLOB NOT NULL, + value INTEGER NOT NULL, + flags INTEGER NOT NULL, + maturity INTEGER NOT NULL, + status INTEGER NOT NULL, + tx_id INTEGER NULL, + hash BLOB NOT NULL, + script BLOB NOT NULL, + input_data BLOB NOT NULL, + script_private_key BLOB NOT NULL, + script_offset_public_key BLOB NOT NULL, + sender_metadata_signature_key BLOB NOT NULL, + sender_metadata_signature_nonce BLOB NOT NULL, + CONSTRAINT unique_commitment UNIQUE (commitment) ); -INSERT INTO outputs (id, commitment, spending_key, value, flags, maturity, status, tx_id, hash, script, input_data, script_private_key, script_offset_public_key, sender_metadata_signature_key, sender_metadata_signature_nonce) -SELECT id, commitment, spending_key, value, flags, maturity, status, tx_id, hash, script, input_data, script_private_key, script_offset_public_key, sender_metadata_signature_key, sender_metadata_signature_nonce +INSERT INTO outputs (id, commitment, spending_key, value, flags, maturity, status, tx_id, hash, script, input_data, + script_private_key, script_offset_public_key, sender_metadata_signature_key, + sender_metadata_signature_nonce) +SELECT id, + commitment, + spending_key, + value, + flags, + maturity, + status, + tx_id, + hash, + script, + input_data, + script_private_key, + script_offset_public_key, + sender_metadata_signature_key, + sender_metadata_signature_nonce FROM outputs_old; DROP TABLE outputs_old; -PRAGMA foreign_keys=on; \ No newline at end of file +PRAGMA foreign_keys=ON; diff --git a/base_layer/wallet/migrations/2021-07-05-13201407_metadata_signature/up.sql b/base_layer/wallet/migrations/2021-07-05-13201407_metadata_signature/up.sql index 50656c8686..145be56e60 100644 --- a/base_layer/wallet/migrations/2021-07-05-13201407_metadata_signature/up.sql +++ b/base_layer/wallet/migrations/2021-07-05-13201407_metadata_signature/up.sql @@ -1,25 +1,25 @@ -- This migration is part of a testnet reset and should not be used on db's with existing old data in them -- thus this migration does not accommodate db's with existing rows. -PRAGMA foreign_keys=off; +PRAGMA foreign_keys=OFF; DROP TABLE outputs; CREATE TABLE outputs ( - id INTEGER NOT NULL PRIMARY KEY, - commitment BLOB NOT NULL, - spending_key BLOB NOT NULL, - value INTEGER NOT NULL, - flags INTEGER NOT NULL, - maturity INTEGER NOT NULL, - status INTEGER NOT NULL, - tx_id INTEGER NULL, - hash BLOB NOT NULL, - script BLOB NOT NULL, - input_data BLOB NOT NULL, - script_private_key BLOB NOT NULL, - sender_offset_public_key BLOB NOT NULL, - metadata_signature_nonce BLOB NOT NULL, - metadata_signature_u_key BLOB NOT NULL, - metadata_signature_v_key BLOB NOT NULL, + id INTEGER NOT NULL PRIMARY KEY, --auto inc, + commitment BLOB NULL, + spending_key BLOB NOT NULL, + value BIGINT NOT NULL, + flags INTEGER NOT NULL, + maturity BIGINT NOT NULL, + status INTEGER NOT NULL, + tx_id BIGINT NULL, + hash BLOB NULL, + script BLOB NOT NULL, + input_data BLOB NOT NULL, + script_private_key BLOB NOT NULL, + sender_offset_public_key BLOB NOT NULL, + metadata_signature_nonce BLOB NOT NULL, + metadata_signature_u_key BLOB NOT NULL, + metadata_signature_v_key BLOB NOT NULL, CONSTRAINT unique_commitment UNIQUE (commitment) ); -PRAGMA foreign_keys=on; +PRAGMA foreign_keys=ON; diff --git a/base_layer/wallet/migrations/2021-07-28-120000_add_mined_in_block/down.sql b/base_layer/wallet/migrations/2021-07-28-120000_add_mined_in_block/down.sql new file mode 100644 index 0000000000..6472d3ab75 --- /dev/null +++ b/base_layer/wallet/migrations/2021-07-28-120000_add_mined_in_block/down.sql @@ -0,0 +1 @@ +-- not supported diff --git a/base_layer/wallet/migrations/2021-07-28-120000_add_mined_in_block/up.sql b/base_layer/wallet/migrations/2021-07-28-120000_add_mined_in_block/up.sql new file mode 100644 index 0000000000..06ff11c0f6 --- /dev/null +++ b/base_layer/wallet/migrations/2021-07-28-120000_add_mined_in_block/up.sql @@ -0,0 +1,33 @@ +-- Copyright 2021. The Tari Project +-- +-- Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +-- following conditions are met: +-- +-- 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +-- disclaimer. +-- +-- 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +-- following disclaimer in the documentation and/or other materials provided with the distribution. +-- +-- 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +-- products derived from this software without specific prior written permission. +-- +-- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +-- INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +-- DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +-- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +-- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +-- WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-- USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +ALTER TABLE completed_transactions + ADD mined_in_block BLOB NULL; + +ALTER TABLE outputs + ADD mined_height UNSIGNED BIGINT NULL; + +ALTER TABLE outputs + ADD mined_in_block BLOB NULL; + +ALTER TABLE outputs + ADD mined_mmr_position BIGINT NULL; diff --git a/base_layer/wallet/migrations/2021-08-03-123456_update_outputs_mined/down.sql b/base_layer/wallet/migrations/2021-08-03-123456_update_outputs_mined/down.sql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/base_layer/wallet/migrations/2021-08-03-123456_update_outputs_mined/up.sql b/base_layer/wallet/migrations/2021-08-03-123456_update_outputs_mined/up.sql new file mode 100644 index 0000000000..e21d85bd9d --- /dev/null +++ b/base_layer/wallet/migrations/2021-08-03-123456_update_outputs_mined/up.sql @@ -0,0 +1,32 @@ +-- Copyright 2021. The Tari Project +-- +-- Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +-- following conditions are met: +-- +-- 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +-- disclaimer. +-- +-- 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +-- following disclaimer in the documentation and/or other materials provided with the distribution. +-- +-- 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +-- products derived from this software without specific prior written permission. +-- +-- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +-- INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +-- DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +-- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +-- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +-- WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-- USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +ALTER TABLE outputs + ADD marked_deleted_at_height BIGINT; +ALTER TABLE outputs + ADD marked_deleted_in_block BLOB; +ALTER TABLE outputs + ADD received_in_tx_id BIGINT; +ALTER TABLE outputs + ADD spent_in_tx_id BIGINT; +UPDATE outputs +SET received_in_tx_id = tx_id; diff --git a/base_layer/wallet/migrations/2021-09-07-145830_remove_pending_transactions_table_and_tx_id_in_outputs/down.sql b/base_layer/wallet/migrations/2021-09-07-145830_remove_pending_transactions_table_and_tx_id_in_outputs/down.sql new file mode 100644 index 0000000000..291a97c5ce --- /dev/null +++ b/base_layer/wallet/migrations/2021-09-07-145830_remove_pending_transactions_table_and_tx_id_in_outputs/down.sql @@ -0,0 +1 @@ +-- This file should undo anything in `up.sql` \ No newline at end of file diff --git a/base_layer/wallet/migrations/2021-09-07-145830_remove_pending_transactions_table_and_tx_id_in_outputs/up.sql b/base_layer/wallet/migrations/2021-09-07-145830_remove_pending_transactions_table_and_tx_id_in_outputs/up.sql new file mode 100644 index 0000000000..ea4cb4a772 --- /dev/null +++ b/base_layer/wallet/migrations/2021-09-07-145830_remove_pending_transactions_table_and_tx_id_in_outputs/up.sql @@ -0,0 +1,66 @@ +DROP TABLE IF EXISTS pending_transaction_outputs; + +-- Remove tx_id column +PRAGMA foreign_keys=OFF; +ALTER TABLE outputs + RENAME TO outputs_old; +CREATE TABLE outputs ( + id INTEGER NOT NULL PRIMARY KEY, --auto inc, + commitment BLOB NULL, + spending_key BLOB NOT NULL, + value BIGINT NOT NULL, + flags INTEGER NOT NULL, + maturity BIGINT NOT NULL, + status INTEGER NOT NULL, + hash BLOB NULL, + script BLOB NOT NULL, + input_data BLOB NOT NULL, + script_private_key BLOB NOT NULL, + sender_offset_public_key BLOB NOT NULL, + metadata_signature_nonce BLOB NOT NULL, + metadata_signature_u_key BLOB NOT NULL, + metadata_signature_v_key BLOB NOT NULL, + mined_height UNSIGNED BIGINT NULL, + mined_in_block BLOB NULL, + mined_mmr_position BIGINT NULL, + marked_deleted_at_height BIGINT, + marked_deleted_in_block BLOB, + received_in_tx_id BIGINT, + spent_in_tx_id BIGINT, + coinbase_block_height UNSIGNED BIGINT NULL, + CONSTRAINT unique_commitment UNIQUE (commitment) +); +PRAGMA foreign_keys=ON; + +INSERT INTO outputs (id, commitment, spending_key, value, flags, maturity, status, hash, script, input_data, + script_private_key, sender_offset_public_key, metadata_signature_nonce, metadata_signature_u_key, + metadata_signature_v_key, mined_height, mined_in_block, mined_mmr_position, marked_deleted_at_height, + marked_deleted_in_block, received_in_tx_id, spent_in_tx_id) +SELECT id, + commitment, + spending_key, + value, + flags, + maturity, + status, + hash, + script, + input_data, + script_private_key, + sender_offset_public_key, + metadata_signature_nonce, + metadata_signature_u_key, + metadata_signature_v_key, + mined_height, + mined_in_block, + mined_mmr_position, + marked_deleted_at_height, + marked_deleted_in_block, + received_in_tx_id, + spent_in_tx_id +FROM outputs_old; + +DROP TABLE outputs_old; +PRAGMA foreign_keys=ON; + + diff --git a/base_layer/wallet/migrations/2021-10-01-053552_clear_mined_height/down.sql b/base_layer/wallet/migrations/2021-10-01-053552_clear_mined_height/down.sql new file mode 100644 index 0000000000..291a97c5ce --- /dev/null +++ b/base_layer/wallet/migrations/2021-10-01-053552_clear_mined_height/down.sql @@ -0,0 +1 @@ +-- This file should undo anything in `up.sql` \ No newline at end of file diff --git a/base_layer/wallet/migrations/2021-10-01-053552_clear_mined_height/up.sql b/base_layer/wallet/migrations/2021-10-01-053552_clear_mined_height/up.sql new file mode 100644 index 0000000000..1bea3b8d28 --- /dev/null +++ b/base_layer/wallet/migrations/2021-10-01-053552_clear_mined_height/up.sql @@ -0,0 +1,5 @@ +-- mined_height and mined_in_block should always be set together, since mined_in_block is NULL we set mined_height to NULL +-- so that the transactions can be revalidated. +UPDATE completed_transactions +SET mined_height = NULL +WHERE mined_height IS NOT NULL AND mined_in_block IS NULL; \ No newline at end of file diff --git a/base_layer/wallet/src/base_node_service/handle.rs b/base_layer/wallet/src/base_node_service/handle.rs index f495479778..7e318b4a8b 100644 --- a/base_layer/wallet/src/base_node_service/handle.rs +++ b/base_layer/wallet/src/base_node_service/handle.rs @@ -23,7 +23,6 @@ use super::{error::BaseNodeServiceError, service::BaseNodeState}; use std::{sync::Arc, time::Duration}; use tari_common_types::chain_metadata::ChainMetadata; -use tari_comms::peer_manager::Peer; use tari_service_framework::reply_channel::SenderService; use tokio::sync::broadcast; use tower::Service; @@ -34,22 +33,17 @@ pub type BaseNodeEventReceiver = broadcast::Receiver>; #[derive(Debug)] pub enum BaseNodeServiceRequest { GetChainMetadata, - SetBaseNodePeer(Box), - GetBaseNodePeer, GetBaseNodeLatency, } /// API Response enum #[derive(Debug)] pub enum BaseNodeServiceResponse { ChainMetadata(Option), - BaseNodePeerSet, - BaseNodePeer(Option>), Latency(Option), } #[derive(Clone, Debug, Hash, PartialEq, Eq)] pub enum BaseNodeEvent { BaseNodeStateChanged(BaseNodeState), - BaseNodePeerSet(Box), } /// The Base Node Service Handle is a struct that contains the interfaces used to communicate with a running @@ -82,24 +76,6 @@ impl BaseNodeServiceHandle { } } - pub async fn set_base_node_peer(&mut self, peer: Peer) -> Result<(), BaseNodeServiceError> { - match self - .handle - .call(BaseNodeServiceRequest::SetBaseNodePeer(Box::new(peer))) - .await?? - { - BaseNodeServiceResponse::BaseNodePeerSet => Ok(()), - _ => Err(BaseNodeServiceError::UnexpectedApiResponse), - } - } - - pub async fn get_base_node_peer(&mut self) -> Result, BaseNodeServiceError> { - match self.handle.call(BaseNodeServiceRequest::GetBaseNodePeer).await?? { - BaseNodeServiceResponse::BaseNodePeer(peer) => Ok(peer.map(|p| *p)), - _ => Err(BaseNodeServiceError::UnexpectedApiResponse), - } - } - pub async fn get_base_node_latency(&mut self) -> Result, BaseNodeServiceError> { match self.handle.call(BaseNodeServiceRequest::GetBaseNodeLatency).await?? { BaseNodeServiceResponse::Latency(latency) => Ok(latency), diff --git a/base_layer/wallet/src/base_node_service/mock_base_node_service.rs b/base_layer/wallet/src/base_node_service/mock_base_node_service.rs index 9aa981150d..ff12309823 100644 --- a/base_layer/wallet/src/base_node_service/mock_base_node_service.rs +++ b/base_layer/wallet/src/base_node_service/mock_base_node_service.rs @@ -103,24 +103,12 @@ impl MockBaseNodeService { } } - fn set_base_node_peer(&mut self, peer: Peer) { - self.base_node_peer = Some(peer); - } - /// This handler is called when requests arrive from the various streams fn handle_request( &mut self, request: BaseNodeServiceRequest, ) -> Result { match request { - BaseNodeServiceRequest::SetBaseNodePeer(peer) => { - self.set_base_node_peer(*peer); - Ok(BaseNodeServiceResponse::BaseNodePeerSet) - }, - BaseNodeServiceRequest::GetBaseNodePeer => { - let peer = self.base_node_peer.clone(); - Ok(BaseNodeServiceResponse::BaseNodePeer(peer.map(Box::new))) - }, BaseNodeServiceRequest::GetChainMetadata => Ok(BaseNodeServiceResponse::ChainMetadata( self.state.chain_metadata.clone(), )), diff --git a/base_layer/wallet/src/base_node_service/monitor.rs b/base_layer/wallet/src/base_node_service/monitor.rs index b12f18341c..ba46b481ef 100644 --- a/base_layer/wallet/src/base_node_service/monitor.rs +++ b/base_layer/wallet/src/base_node_service/monitor.rs @@ -25,7 +25,7 @@ use crate::{ handle::{BaseNodeEvent, BaseNodeEventSender}, service::BaseNodeState, }, - connectivity_service::WalletConnectivityHandle, + connectivity_service::WalletConnectivityInterface, error::WalletStorageError, storage::database::{WalletBackend, WalletDatabase}, }; @@ -42,20 +42,24 @@ use tokio::{sync::RwLock, time}; const LOG_TARGET: &str = "wallet::base_node_service::chain_metadata_monitor"; -pub struct BaseNodeMonitor { +pub struct BaseNodeMonitor { interval: Duration, state: Arc>, - db: WalletDatabase, - wallet_connectivity: WalletConnectivityHandle, + db: WalletDatabase, + wallet_connectivity: TWalletConnectivity, event_publisher: BaseNodeEventSender, } -impl BaseNodeMonitor { +impl BaseNodeMonitor +where + TBackend: WalletBackend + 'static, + TWalletConnectivity: WalletConnectivityInterface, +{ pub fn new( interval: Duration, state: Arc>, - db: WalletDatabase, - wallet_connectivity: WalletConnectivityHandle, + db: WalletDatabase, + wallet_connectivity: TWalletConnectivity, event_publisher: BaseNodeEventSender, ) -> Self { Self { diff --git a/base_layer/wallet/src/base_node_service/service.rs b/base_layer/wallet/src/base_node_service/service.rs index eb2b91ebda..416ba1eab5 100644 --- a/base_layer/wallet/src/base_node_service/service.rs +++ b/base_layer/wallet/src/base_node_service/service.rs @@ -23,7 +23,7 @@ use super::{ config::BaseNodeServiceConfig, error::BaseNodeServiceError, - handle::{BaseNodeEvent, BaseNodeEventSender, BaseNodeServiceRequest, BaseNodeServiceResponse}, + handle::{BaseNodeEventSender, BaseNodeServiceRequest, BaseNodeServiceResponse}, }; use crate::{ base_node_service::monitor::BaseNodeMonitor, @@ -35,7 +35,6 @@ use futures::{future, StreamExt}; use log::*; use std::{sync::Arc, time::Duration}; use tari_common_types::chain_metadata::ChainMetadata; -use tari_comms::peer_manager::Peer; use tari_service_framework::reply_channel::Receiver; use tari_shutdown::ShutdownSignal; use tokio::sync::RwLock; @@ -153,12 +152,6 @@ where T: WalletBackend + 'static Ok(()) } - async fn set_base_node_peer(&mut self, peer: Peer) -> Result<(), BaseNodeServiceError> { - self.wallet_connectivity.set_base_node(peer.clone()).await?; - self.publish_event(BaseNodeEvent::BaseNodePeerSet(Box::new(peer))); - Ok(()) - } - /// This handler is called when requests arrive from the various streams async fn handle_request( &mut self, @@ -169,14 +162,6 @@ where T: WalletBackend + 'static "Handling Wallet Base Node Service Request: {:?}", request ); match request { - BaseNodeServiceRequest::SetBaseNodePeer(peer) => { - self.set_base_node_peer(*peer).await?; - Ok(BaseNodeServiceResponse::BaseNodePeerSet) - }, - BaseNodeServiceRequest::GetBaseNodePeer => { - let peer = self.wallet_connectivity.get_current_base_node_peer().map(Box::new); - Ok(BaseNodeServiceResponse::BaseNodePeer(peer)) - }, BaseNodeServiceRequest::GetChainMetadata => match self.get_state().await.chain_metadata.clone() { Some(metadata) => Ok(BaseNodeServiceResponse::ChainMetadata(Some(metadata))), None => { @@ -190,14 +175,4 @@ where T: WalletBackend + 'static }, } } - - fn publish_event(&self, event: BaseNodeEvent) { - trace!(target: LOG_TARGET, "Publishing event: {:?}", event); - let _ = self.event_publisher.send(Arc::new(event)).map_err(|_| { - trace!( - target: LOG_TARGET, - "Could not publish BaseNodeEvent as there are no subscribers" - ) - }); - } } diff --git a/base_layer/wallet/src/connectivity_service/handle.rs b/base_layer/wallet/src/connectivity_service/handle.rs index 5a35696e14..77ef2ddab3 100644 --- a/base_layer/wallet/src/connectivity_service/handle.rs +++ b/base_layer/wallet/src/connectivity_service/handle.rs @@ -21,10 +21,11 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use super::service::OnlineStatus; -use crate::connectivity_service::{error::WalletConnectivityError, watch::Watch}; +use crate::{connectivity_service::WalletConnectivityInterface, util::watch::Watch}; use tari_comms::{ peer_manager::{NodeId, Peer}, protocol::rpc::RpcClientLease, + types::CommsPublicKey, }; use tari_core::base_node::{rpc::BaseNodeWalletRpcClient, sync::rpc::BaseNodeSyncRpcClient}; use tokio::sync::{mpsc, oneshot, watch}; @@ -53,10 +54,16 @@ impl WalletConnectivityHandle { online_status_rx, } } +} + +#[async_trait::async_trait] +impl WalletConnectivityInterface for WalletConnectivityHandle { + fn set_base_node(&mut self, base_node_peer: Peer) { + self.base_node_watch.send(Some(base_node_peer)); + } - pub async fn set_base_node(&mut self, base_node_peer: Peer) -> Result<(), WalletConnectivityError> { - self.base_node_watch.broadcast(Some(base_node_peer)); - Ok(()) + fn get_current_base_node_watcher(&self) -> watch::Receiver> { + self.base_node_watch.get_receiver() } /// Obtain a BaseNodeWalletRpcClient. @@ -65,7 +72,7 @@ impl WalletConnectivityHandle { /// node/nodes. It will block until this happens. The ONLY other time it will return is if the node is /// shutting down, where it will return None. Use this function whenever no work can be done without a /// BaseNodeWalletRpcClient RPC session. - pub async fn obtain_base_node_wallet_rpc_client(&mut self) -> Option> { + async fn obtain_base_node_wallet_rpc_client(&mut self) -> Option> { let (reply_tx, reply_rx) = oneshot::channel(); // Under what conditions do the (1) mpsc channel and (2) oneshot channel error? // (1) when the receiver has been dropped @@ -88,7 +95,7 @@ impl WalletConnectivityHandle { /// node/nodes. It will block until this happens. The ONLY other time it will return is if the node is /// shutting down, where it will return None. Use this function whenever no work can be done without a /// BaseNodeSyncRpcClient RPC session. - pub async fn obtain_base_node_sync_rpc_client(&mut self) -> Option> { + async fn obtain_base_node_sync_rpc_client(&mut self) -> Option> { let (reply_tx, reply_rx) = oneshot::channel(); self.sender .send(WalletConnectivityRequest::ObtainBaseNodeSyncRpcClient(reply_tx)) @@ -98,19 +105,27 @@ impl WalletConnectivityHandle { reply_rx.await.ok() } - pub fn get_connectivity_status(&mut self) -> OnlineStatus { + fn get_connectivity_status(&mut self) -> OnlineStatus { *self.online_status_rx.borrow() } - pub fn get_connectivity_status_watch(&self) -> watch::Receiver { + fn get_connectivity_status_watch(&self) -> watch::Receiver { self.online_status_rx.clone() } - pub fn get_current_base_node_peer(&self) -> Option { + fn get_current_base_node_peer(&self) -> Option { self.base_node_watch.borrow().clone() } - pub fn get_current_base_node_id(&self) -> Option { + fn get_current_base_node_peer_public_key(&self) -> Option { + self.base_node_watch.borrow().as_ref().map(|p| p.public_key.clone()) + } + + fn get_current_base_node_id(&self) -> Option { self.base_node_watch.borrow().as_ref().map(|p| p.node_id.clone()) } + + fn is_base_node_set(&self) -> bool { + self.base_node_watch.borrow().is_some() + } } diff --git a/base_layer/wallet/src/connectivity_service/initializer.rs b/base_layer/wallet/src/connectivity_service/initializer.rs index 1610a834e3..d8a512c75a 100644 --- a/base_layer/wallet/src/connectivity_service/initializer.rs +++ b/base_layer/wallet/src/connectivity_service/initializer.rs @@ -28,8 +28,12 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use super::{handle::WalletConnectivityHandle, service::WalletConnectivityService, watch::Watch}; -use crate::{base_node_service::config::BaseNodeServiceConfig, connectivity_service::service::OnlineStatus}; +use super::{handle::WalletConnectivityHandle, service::WalletConnectivityService}; +use crate::{ + base_node_service::config::BaseNodeServiceConfig, + connectivity_service::service::OnlineStatus, + util::watch::Watch, +}; use tari_service_framework::{async_trait, ServiceInitializationError, ServiceInitializer, ServiceInitializerContext}; use tokio::sync::mpsc; diff --git a/base_layer/wallet/src/connectivity_service/interface.rs b/base_layer/wallet/src/connectivity_service/interface.rs new file mode 100644 index 0000000000..5dc382ec62 --- /dev/null +++ b/base_layer/wallet/src/connectivity_service/interface.rs @@ -0,0 +1,65 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use crate::connectivity_service::OnlineStatus; +use tari_comms::{ + peer_manager::{NodeId, Peer}, + protocol::rpc::RpcClientLease, + types::CommsPublicKey, +}; +use tari_core::base_node::{rpc::BaseNodeWalletRpcClient, sync::rpc::BaseNodeSyncRpcClient}; +use tokio::sync::watch; + +#[async_trait::async_trait] +pub trait WalletConnectivityInterface: Clone + Send + Sync + 'static { + fn set_base_node(&mut self, base_node_peer: Peer); + + fn get_current_base_node_watcher(&self) -> watch::Receiver>; + + /// Obtain a BaseNodeWalletRpcClient. + /// + /// This can be relied on to obtain a pooled BaseNodeWalletRpcClient rpc session from a currently selected base + /// node/nodes. It will block until this happens. The ONLY other time it will return is if the node is + /// shutting down, where it will return None. Use this function whenever no work can be done without a + /// BaseNodeWalletRpcClient RPC session. + async fn obtain_base_node_wallet_rpc_client(&mut self) -> Option>; + + /// Obtain a BaseNodeSyncRpcClient. + /// + /// This can be relied on to obtain a pooled BaseNodeSyncRpcClient rpc session from a currently selected base + /// node/nodes. It will block until this happens. The ONLY other time it will return is if the node is + /// shutting down, where it will return None. Use this function whenever no work can be done without a + /// BaseNodeSyncRpcClient RPC session. + async fn obtain_base_node_sync_rpc_client(&mut self) -> Option>; + + fn get_connectivity_status(&mut self) -> OnlineStatus; + + fn get_connectivity_status_watch(&self) -> watch::Receiver; + + fn get_current_base_node_peer(&self) -> Option; + + fn get_current_base_node_peer_public_key(&self) -> Option; + + fn get_current_base_node_id(&self) -> Option; + + fn is_base_node_set(&self) -> bool; +} diff --git a/base_layer/wallet/src/connectivity_service/mock.rs b/base_layer/wallet/src/connectivity_service/mock.rs new file mode 100644 index 0000000000..b693535c71 --- /dev/null +++ b/base_layer/wallet/src/connectivity_service/mock.rs @@ -0,0 +1,132 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use crate::{ + connectivity_service::{OnlineStatus, WalletConnectivityInterface}, + util::watch::Watch, +}; +use tari_comms::{ + peer_manager::{NodeId, Peer}, + protocol::rpc::RpcClientLease, + types::CommsPublicKey, +}; +use tari_core::base_node::{rpc::BaseNodeWalletRpcClient, sync::rpc::BaseNodeSyncRpcClient}; +use tokio::sync::watch::Receiver; + +pub fn create() -> WalletConnectivityMock { + WalletConnectivityMock::new() +} + +#[derive(Clone)] +pub struct WalletConnectivityMock { + online_status_watch: Watch, + base_node_watch: Watch>, + base_node_wallet_rpc_client: Watch>>, + base_node_sync_rpc_client: Watch>>, +} + +impl WalletConnectivityMock { + pub(self) fn new() -> Self { + Self { + online_status_watch: Watch::new(OnlineStatus::Offline), + base_node_watch: Watch::new(None), + base_node_wallet_rpc_client: Watch::new(None), + base_node_sync_rpc_client: Watch::new(None), + } + } +} + +impl WalletConnectivityMock { + pub fn set_base_node_wallet_rpc_client(&self, client: BaseNodeWalletRpcClient) { + self.base_node_wallet_rpc_client.send(Some(RpcClientLease::new(client))); + } + + pub fn set_base_node_sync_rpc_client(&self, client: BaseNodeSyncRpcClient) { + self.base_node_sync_rpc_client.send(Some(RpcClientLease::new(client))); + } + + pub fn notify_base_node_set(&self, base_node_peer: Peer) { + self.base_node_watch.send(Some(base_node_peer)); + } + + pub fn send_shutdown(&self) { + self.base_node_wallet_rpc_client.send(None); + self.base_node_sync_rpc_client.send(None); + } +} + +#[async_trait::async_trait] +impl WalletConnectivityInterface for WalletConnectivityMock { + fn set_base_node(&mut self, base_node_peer: Peer) { + self.notify_base_node_set(base_node_peer); + } + + fn get_current_base_node_watcher(&self) -> Receiver> { + self.base_node_watch.get_receiver() + } + + async fn obtain_base_node_wallet_rpc_client(&mut self) -> Option> { + let mut receiver = self.base_node_wallet_rpc_client.get_receiver(); + if let Some(client) = receiver.borrow().as_ref() { + return Some(client.clone()); + } + + receiver.changed().await.unwrap(); + let borrow = receiver.borrow(); + borrow.as_ref().cloned() + } + + async fn obtain_base_node_sync_rpc_client(&mut self) -> Option> { + let mut receiver = self.base_node_sync_rpc_client.get_receiver(); + if let Some(client) = receiver.borrow().as_ref() { + return Some(client.clone()); + } + + receiver.changed().await.unwrap(); + let borrow = receiver.borrow(); + borrow.as_ref().cloned() + } + + fn get_connectivity_status(&mut self) -> OnlineStatus { + *self.online_status_watch.borrow() + } + + fn get_connectivity_status_watch(&self) -> Receiver { + self.online_status_watch.get_receiver() + } + + fn get_current_base_node_peer(&self) -> Option { + self.base_node_watch.borrow().as_ref().cloned() + } + + fn get_current_base_node_peer_public_key(&self) -> Option { + self.base_node_watch.borrow().as_ref().map(|p| p.public_key.clone()) + } + + fn get_current_base_node_id(&self) -> Option { + self.base_node_watch.borrow().as_ref().map(|p| p.node_id.clone()) + } + + fn is_base_node_set(&self) -> bool { + self.base_node_watch.borrow().is_some() + } +} diff --git a/base_layer/wallet/src/connectivity_service/mod.rs b/base_layer/wallet/src/connectivity_service/mod.rs index 035bd34d64..75b99ed330 100644 --- a/base_layer/wallet/src/connectivity_service/mod.rs +++ b/base_layer/wallet/src/connectivity_service/mod.rs @@ -32,7 +32,11 @@ pub use initializer::WalletConnectivityInitializer; mod service; pub use service::OnlineStatus; -mod watch; - #[cfg(test)] mod test; + +mod mock; +pub use mock::{create as create_wallet_connectivity_mock, WalletConnectivityMock}; + +mod interface; +pub use interface::WalletConnectivityInterface; diff --git a/base_layer/wallet/src/connectivity_service/service.rs b/base_layer/wallet/src/connectivity_service/service.rs index cffb51ac9b..a3df1874b4 100644 --- a/base_layer/wallet/src/connectivity_service/service.rs +++ b/base_layer/wallet/src/connectivity_service/service.rs @@ -22,7 +22,8 @@ use crate::{ base_node_service::config::BaseNodeServiceConfig, - connectivity_service::{error::WalletConnectivityError, handle::WalletConnectivityRequest, watch::Watch}, + connectivity_service::{error::WalletConnectivityError, handle::WalletConnectivityRequest}, + util::watch::Watch, }; use log::*; use std::{mem, time::Duration}; @@ -51,7 +52,7 @@ pub enum OnlineStatus { pub struct WalletConnectivityService { config: BaseNodeServiceConfig, - request_stream: mpsc::Receiver, + request_receiver: mpsc::Receiver, connectivity: ConnectivityRequester, base_node_watch: watch::Receiver>, pools: Option, @@ -67,14 +68,14 @@ struct ClientPoolContainer { impl WalletConnectivityService { pub(super) fn new( config: BaseNodeServiceConfig, - request_stream: mpsc::Receiver, + request_receiver: mpsc::Receiver, base_node_watch: watch::Receiver>, online_status_watch: Watch, connectivity: ConnectivityRequester, ) -> Self { Self { config, - request_stream, + request_receiver, connectivity, base_node_watch, pools: None, @@ -100,7 +101,7 @@ impl WalletConnectivityService { } }, - Some(req) = self.request_stream.recv() => { + Some(req) = self.request_receiver.recv() => { self.handle_request(req).await; }, @@ -247,7 +248,7 @@ impl WalletConnectivityService { } fn set_online_status(&self, status: OnlineStatus) { - let _ = self.online_status_watch.broadcast(status); + let _ = self.online_status_watch.send(status); } async fn try_setup_rpc_pool(&mut self, peer: NodeId) -> Result { diff --git a/base_layer/wallet/src/connectivity_service/test.rs b/base_layer/wallet/src/connectivity_service/test.rs index 1ed50ce5cf..2db5d187d5 100644 --- a/base_layer/wallet/src/connectivity_service/test.rs +++ b/base_layer/wallet/src/connectivity_service/test.rs @@ -21,7 +21,10 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use super::service::WalletConnectivityService; -use crate::connectivity_service::{watch::Watch, OnlineStatus, WalletConnectivityHandle}; +use crate::{ + connectivity_service::{OnlineStatus, WalletConnectivityHandle, WalletConnectivityInterface}, + util::watch::Watch, +}; use core::convert; use futures::future; use std::{iter, sync::Arc}; @@ -81,7 +84,7 @@ async fn it_dials_peer_when_base_node_is_set() { // Set the mock to defer returning a result for the peer connection mock_state.set_pending_connection(base_node_peer.node_id()).await; // Initiate a connection to the base node - handle.set_base_node(base_node_peer.to_peer()).await.unwrap(); + handle.set_base_node(base_node_peer.to_peer()); // Wait for connection request mock_state.await_call_count(1).await; @@ -104,7 +107,7 @@ async fn it_resolves_many_pending_rpc_session_requests() { mock_state.set_pending_connection(base_node_peer.node_id()).await; // Initiate a connection to the base node - handle.set_base_node(base_node_peer.to_peer()).await.unwrap(); + handle.set_base_node(base_node_peer.to_peer()); let pending_requests = iter::repeat_with(|| { let mut handle = handle.clone(); @@ -136,7 +139,7 @@ async fn it_changes_to_a_new_base_node() { mock_state.add_active_connection(conn2).await; // Initiate a connection to the base node - handle.set_base_node(base_node_peer1.to_peer()).await.unwrap(); + handle.set_base_node(base_node_peer1.to_peer()); mock_state.await_call_count(1).await; mock_state.expect_dial_peer(base_node_peer1.node_id()).await; @@ -147,7 +150,7 @@ async fn it_changes_to_a_new_base_node() { assert!(rpc_client.is_connected()); // Initiate a connection to the base node - handle.set_base_node(base_node_peer2.to_peer()).await.unwrap(); + handle.set_base_node(base_node_peer2.to_peer()); mock_state.await_call_count(1).await; mock_state.expect_dial_peer(base_node_peer2.node_id()).await; @@ -166,7 +169,7 @@ async fn it_gracefully_handles_connect_fail_reconnect() { mock_state.set_pending_connection(base_node_peer.node_id()).await; // Initiate a connection to the base node - handle.set_base_node(base_node_peer.to_peer()).await.unwrap(); + handle.set_base_node(base_node_peer.to_peer()); // Now a connection will given to the service mock_state.add_active_connection(conn.clone()).await; @@ -206,7 +209,7 @@ async fn it_gracefully_handles_multiple_connection_failures() { let conn = mock_server.create_mockimpl_connection(base_node_peer.to_peer()).await; // Initiate a connection to the base node - handle.set_base_node(base_node_peer.to_peer()).await.unwrap(); + handle.set_base_node(base_node_peer.to_peer()); // Now a connection will given to the service mock_state.add_active_connection(conn.clone()).await; diff --git a/base_layer/wallet/src/contacts_service/storage/sqlite_db.rs b/base_layer/wallet/src/contacts_service/storage/sqlite_db.rs index b32f798777..b5f22c48bb 100644 --- a/base_layer/wallet/src/contacts_service/storage/sqlite_db.rs +++ b/base_layer/wallet/src/contacts_service/storage/sqlite_db.rs @@ -27,6 +27,7 @@ use crate::{ }, schema::contacts, storage::sqlite_utilities::WalletDbConnection, + util::diesel_ext::ExpectedRowsExtension, }; use diesel::{prelude::*, result::Error as DieselError, SqliteConnection}; use std::convert::TryFrom; @@ -141,15 +142,10 @@ impl ContactSql { updated_contact: UpdateContact, conn: &SqliteConnection, ) -> Result { - let num_updated = diesel::update(contacts::table.filter(contacts::public_key.eq(&self.public_key))) + diesel::update(contacts::table.filter(contacts::public_key.eq(&self.public_key))) .set(updated_contact) - .execute(conn)?; - - if num_updated == 0 { - return Err(ContactsServiceStorageError::UnexpectedResult( - "Database update error".to_string(), - )); - } + .execute(conn) + .num_rows_affected_or_not_found(1)?; ContactSql::find(&self.public_key, conn) } diff --git a/base_layer/wallet/src/lib.rs b/base_layer/wallet/src/lib.rs index 22bce8bfdb..463a83154b 100644 --- a/base_layer/wallet/src/lib.rs +++ b/base_layer/wallet/src/lib.rs @@ -6,6 +6,10 @@ #![deny(unreachable_patterns)] #![deny(unknown_lints)] #![recursion_limit = "2048"] +// Some functions have a large amount of dependencies (e.g. services) and historically this warning +// has lead to bundling of dependencies into a resources struct, which is then overused and is the +// wrong abstraction +#![allow(clippy::too_many_arguments)] #[macro_use] mod macros; diff --git a/base_layer/wallet/src/output_manager_service/config.rs b/base_layer/wallet/src/output_manager_service/config.rs index ece576c97c..f033024c69 100644 --- a/base_layer/wallet/src/output_manager_service/config.rs +++ b/base_layer/wallet/src/output_manager_service/config.rs @@ -31,7 +31,8 @@ pub struct OutputManagerServiceConfig { pub peer_dial_retry_timeout: Duration, pub seed_word_language: MnemonicLanguage, pub event_channel_size: usize, - pub base_node_update_publisher_channel_size: usize, + pub num_confirmations_required: u64, + pub tx_validator_batch_size: usize, } impl Default for OutputManagerServiceConfig { @@ -43,7 +44,8 @@ impl Default for OutputManagerServiceConfig { peer_dial_retry_timeout: Duration::from_secs(20), seed_word_language: MnemonicLanguage::English, event_channel_size: 250, - base_node_update_publisher_channel_size: 50, + num_confirmations_required: 3, + tx_validator_batch_size: 100, } } } diff --git a/base_layer/wallet/src/output_manager_service/error.rs b/base_layer/wallet/src/output_manager_service/error.rs index 1dfd3e969e..87c87f7bae 100644 --- a/base_layer/wallet/src/output_manager_service/error.rs +++ b/base_layer/wallet/src/output_manager_service/error.rs @@ -22,7 +22,7 @@ use crate::base_node_service::error::BaseNodeServiceError; use diesel::result::Error as DieselError; -use tari_comms::{peer_manager::node_id::NodeIdError, protocol::rpc::RpcError}; +use tari_comms::{connectivity::ConnectivityError, peer_manager::node_id::NodeIdError, protocol::rpc::RpcError}; use tari_comms_dht::outbound::DhtOutboundError; use tari_core::transactions::{ transaction::TransactionError, @@ -61,6 +61,8 @@ pub enum OutputManagerError { ConversionError(String), #[error("Not all the transaction inputs and outputs are present to be confirmed: {0}")] IncompleteTransaction(&'static str), + #[error("Inconsistent data found: {0}")] + InconsistentDataError(&'static str), #[error("Not enough funds to fulfil transaction")] NotEnoughFunds, #[error("Funds are still pending. Unable to fulfil transaction right now.")] @@ -109,6 +111,13 @@ pub enum OutputManagerError { MasterSecretKeyMismatch, #[error("Private Key is not found in the current Key Chain")] KeyNotFoundInKeyChain, + #[error("Connectivity error: {source}")] + ConnectivityError { + #[from] + source: ConnectivityError, + }, + #[error("Invalid message received:{0}")] + InvalidMessageError(String), } #[derive(Debug, Error, PartialEq)] @@ -178,3 +187,16 @@ impl From for OutputManagerError { tspe.error } } + +pub trait OutputManagerProtocolErrorExt { + fn for_protocol(self, id: u64) -> Result; +} + +impl> OutputManagerProtocolErrorExt for Result { + fn for_protocol(self, id: u64) -> Result { + match self { + Ok(r) => Ok(r), + Err(e) => Err(OutputManagerProtocolError::new(id, e.into())), + } + } +} diff --git a/base_layer/wallet/src/output_manager_service/handle.rs b/base_layer/wallet/src/output_manager_service/handle.rs index 54b082c900..4c9fd40498 100644 --- a/base_layer/wallet/src/output_manager_service/handle.rs +++ b/base_layer/wallet/src/output_manager_service/handle.rs @@ -20,23 +20,18 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use crate::{ - output_manager_service::{ - error::OutputManagerError, - service::Balance, - storage::{database::PendingTransactionOutputs, models::KnownOneSidedPaymentScript}, - tasks::TxoValidationType, - TxId, - }, - types::ValidationRetryStrategy, +use crate::output_manager_service::{ + error::OutputManagerError, + service::Balance, + storage::models::KnownOneSidedPaymentScript, + TxId, }; use aes_gcm::Aes256Gcm; -use std::{collections::HashMap, fmt, sync::Arc, time::Duration}; +use std::{fmt, sync::Arc}; use tari_common_types::types::PublicKey; -use tari_comms::types::CommsPublicKey; use tari_core::transactions::{ tari_amount::MicroTari, - transaction::{Transaction, TransactionInput, TransactionOutput, UnblindedOutput}, + transaction::{Transaction, TransactionOutput, UnblindedOutput}, transaction_protocol::sender::TransactionSenderMessage, ReceiverTransactionProtocol, SenderTransactionProtocol, @@ -55,18 +50,14 @@ pub enum OutputManagerRequest { GetRecipientTransaction(TransactionSenderMessage), GetCoinbaseTransaction((u64, MicroTari, MicroTari, u64)), ConfirmPendingTransaction(u64), - ConfirmTransaction((u64, Vec, Vec)), PrepareToSendTransaction((TxId, MicroTari, MicroTari, Option, String, TariScript)), CreatePayToSelfTransaction((TxId, MicroTari, MicroTari, Option, String)), CancelTransaction(u64), - TimeoutTransactions(Duration), - GetPendingTransactions, GetSpentOutputs, GetUnspentOutputs, GetInvalidOutputs, GetSeedWords, - SetBaseNodePublicKey(CommsPublicKey), - ValidateUtxos(TxoValidationType, ValidationRetryStrategy), + ValidateUtxos, CreateCoinSplit((MicroTari, usize, MicroTari, Option)), ApplyEncryption(Box), RemoveEncryption, @@ -76,6 +67,7 @@ pub enum OutputManagerRequest { ScanOutputs(Vec), AddKnownOneSidedPaymentScript(KnownOneSidedPaymentScript), ReinstateCancelledInboundTx(TxId), + SetCoinbaseAbandoned(TxId, bool), } impl fmt::Display for OutputManagerRequest { @@ -93,19 +85,15 @@ impl fmt::Display for OutputManagerRequest { v.metadata_signature.v().to_hex() ), GetRecipientTransaction(_) => write!(f, "GetRecipientTransaction"), - ConfirmTransaction(v) => write!(f, "ConfirmTransaction ({})", v.0), ConfirmPendingTransaction(v) => write!(f, "ConfirmPendingTransaction ({})", v), PrepareToSendTransaction((_, _, _, _, msg, _)) => write!(f, "PrepareToSendTransaction ({})", msg), CreatePayToSelfTransaction((_, _, _, _, msg)) => write!(f, "CreatePayToSelfTransaction ({})", msg), CancelTransaction(v) => write!(f, "CancelTransaction ({})", v), - TimeoutTransactions(d) => write!(f, "TimeoutTransactions ({}s)", d.as_secs()), - GetPendingTransactions => write!(f, "GetPendingTransactions"), GetSpentOutputs => write!(f, "GetSpentOutputs"), GetUnspentOutputs => write!(f, "GetUnspentOutputs"), GetInvalidOutputs => write!(f, "GetInvalidOutputs"), GetSeedWords => write!(f, "GetSeedWords"), - SetBaseNodePublicKey(k) => write!(f, "SetBaseNodePublicKey ({})", k), - ValidateUtxos(validation_type, retry) => write!(f, "{} ({:?})", validation_type, retry), + ValidateUtxos => write!(f, "ValidateUtxos"), CreateCoinSplit(v) => write!(f, "CreateCoinSplit ({})", v.0), ApplyEncryption(_) => write!(f, "ApplyEncryption"), RemoveEncryption => write!(f, "RemoveEncryption"), @@ -116,6 +104,7 @@ impl fmt::Display for OutputManagerRequest { ScanOutputs(_) => write!(f, "ScanOutputs"), AddKnownOneSidedPaymentScript(_) => write!(f, "AddKnownOneSidedPaymentScript"), ReinstateCancelledInboundTx(_) => write!(f, "ReinstateCancelledInboundTx"), + SetCoinbaseAbandoned(_, _) => write!(f, "SetCoinbaseAbandoned"), } } } @@ -131,17 +120,14 @@ pub enum OutputManagerResponse { OutputConfirmed, PendingTransactionConfirmed, PayToSelfTransaction((MicroTari, Transaction)), - TransactionConfirmed, TransactionToSend(SenderTransactionProtocol), TransactionCancelled, - TransactionsTimedOut, - PendingTransactions(HashMap), SpentOutputs(Vec), UnspentOutputs(Vec), InvalidOutputs(Vec), SeedWords(Vec), BaseNodePublicKeySet, - UtxoValidationStarted(u64), + TxoValidationStarted(u64), Transaction((u64, Transaction, MicroTari, MicroTari)), EncryptionApplied, EncryptionRemoved, @@ -151,6 +137,7 @@ pub enum OutputManagerResponse { ScanOutputs(Vec), AddKnownOneSidedPaymentScript, ReinstatedCancelledInboundTx, + CoinbaseAbandonedSet, } pub type OutputManagerEventSender = broadcast::Sender>; @@ -159,11 +146,11 @@ pub type OutputManagerEventReceiver = broadcast::Receiver, - received_outputs: Vec, - ) -> Result<(), OutputManagerError> { - match self - .handle - .call(OutputManagerRequest::ConfirmTransaction(( - tx_id, - spent_outputs, - received_outputs, - ))) - .await?? - { - OutputManagerResponse::TransactionConfirmed => Ok(()), - _ => Err(OutputManagerError::UnexpectedApiResponse), - } - } - pub async fn cancel_transaction(&mut self, tx_id: u64) -> Result<(), OutputManagerError> { match self .handle @@ -369,26 +336,6 @@ impl OutputManagerHandle { } } - pub async fn timeout_transactions(&mut self, period: Duration) -> Result<(), OutputManagerError> { - match self - .handle - .call(OutputManagerRequest::TimeoutTransactions(period)) - .await?? - { - OutputManagerResponse::TransactionsTimedOut => Ok(()), - _ => Err(OutputManagerError::UnexpectedApiResponse), - } - } - - pub async fn get_pending_transactions( - &mut self, - ) -> Result, OutputManagerError> { - match self.handle.call(OutputManagerRequest::GetPendingTransactions).await?? { - OutputManagerResponse::PendingTransactions(p) => Ok(p), - _ => Err(OutputManagerError::UnexpectedApiResponse), - } - } - pub async fn get_spent_outputs(&mut self) -> Result, OutputManagerError> { match self.handle.call(OutputManagerRequest::GetSpentOutputs).await?? { OutputManagerResponse::SpentOutputs(s) => Ok(s), @@ -425,28 +372,9 @@ impl OutputManagerHandle { } } - pub async fn set_base_node_public_key(&mut self, public_key: CommsPublicKey) -> Result<(), OutputManagerError> { - match self - .handle - .call(OutputManagerRequest::SetBaseNodePublicKey(public_key)) - .await?? - { - OutputManagerResponse::BaseNodePublicKeySet => Ok(()), - _ => Err(OutputManagerError::UnexpectedApiResponse), - } - } - - pub async fn validate_txos( - &mut self, - validation_type: TxoValidationType, - retries: ValidationRetryStrategy, - ) -> Result { - match self - .handle - .call(OutputManagerRequest::ValidateUtxos(validation_type, retries)) - .await?? - { - OutputManagerResponse::UtxoValidationStarted(request_key) => Ok(request_key), + pub async fn validate_txos(&mut self) -> Result { + match self.handle.call(OutputManagerRequest::ValidateUtxos).await?? { + OutputManagerResponse::TxoValidationStarted(request_key) => Ok(request_key), _ => Err(OutputManagerError::UnexpectedApiResponse), } } @@ -552,7 +480,10 @@ impl OutputManagerHandle { } } - pub async fn reinstate_cancelled_inbound_transaction(&mut self, tx_id: TxId) -> Result<(), OutputManagerError> { + pub async fn reinstate_cancelled_inbound_transaction_outputs( + &mut self, + tx_id: TxId, + ) -> Result<(), OutputManagerError> { match self .handle .call(OutputManagerRequest::ReinstateCancelledInboundTx(tx_id)) @@ -562,4 +493,15 @@ impl OutputManagerHandle { _ => Err(OutputManagerError::UnexpectedApiResponse), } } + + pub async fn set_coinbase_abandoned(&mut self, tx_id: TxId, abandoned: bool) -> Result<(), OutputManagerError> { + match self + .handle + .call(OutputManagerRequest::SetCoinbaseAbandoned(tx_id, abandoned)) + .await?? + { + OutputManagerResponse::CoinbaseAbandonedSet => Ok(()), + _ => Err(OutputManagerError::UnexpectedApiResponse), + } + } } diff --git a/base_layer/wallet/src/output_manager_service/master_key_manager.rs b/base_layer/wallet/src/output_manager_service/master_key_manager.rs index 4f33a909cf..38e5ed66bf 100644 --- a/base_layer/wallet/src/output_manager_service/master_key_manager.rs +++ b/base_layer/wallet/src/output_manager_service/master_key_manager.rs @@ -47,9 +47,7 @@ const KEY_MANAGER_RECOVERY_VIEWONLY_BRANCH_KEY: &str = "recovery_viewonly"; const KEY_MANAGER_RECOVERY_BLINDING_BRANCH_KEY: &str = "recovery_blinding"; const KEY_MANAGER_MAX_SEARCH_DEPTH: u64 = 1_000_000; -pub(crate) struct MasterKeyManager -where TBackend: OutputManagerBackend + 'static -{ +pub(crate) struct MasterKeyManager { utxo_key_manager: Mutex>, utxo_script_key_manager: Mutex>, coinbase_key_manager: Mutex>, diff --git a/base_layer/wallet/src/output_manager_service/mod.rs b/base_layer/wallet/src/output_manager_service/mod.rs index 80f02f2445..cd13c3ecb1 100644 --- a/base_layer/wallet/src/output_manager_service/mod.rs +++ b/base_layer/wallet/src/output_manager_service/mod.rs @@ -20,12 +20,21 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use crate::{ + base_node_service::handle::BaseNodeServiceHandle, + connectivity_service::WalletConnectivityHandle, + output_manager_service::{ + config::OutputManagerServiceConfig, + handle::OutputManagerHandle, + service::OutputManagerService, + storage::database::{OutputManagerBackend, OutputManagerDatabase}, + }, + transaction_service::handle::TransactionServiceHandle, +}; use futures::future; use log::*; -use tokio::sync::broadcast; - pub(crate) use master_key_manager::MasterKeyManager; -use tari_comms::{connectivity::ConnectivityRequester, types::CommsSecretKey}; +use tari_comms::types::CommsSecretKey; use tari_core::{ consensus::{ConsensusConstantsBuilder, NetworkConsensus}, transactions::CryptoFactories, @@ -37,18 +46,7 @@ use tari_service_framework::{ ServiceInitializer, ServiceInitializerContext, }; -pub use tasks::TxoValidationType; - -use crate::{ - base_node_service::handle::BaseNodeServiceHandle, - output_manager_service::{ - config::OutputManagerServiceConfig, - handle::OutputManagerHandle, - service::OutputManagerService, - storage::database::{OutputManagerBackend, OutputManagerDatabase}, - }, - transaction_service::handle::TransactionServiceHandle, -}; +use tokio::sync::broadcast; pub mod config; pub mod error; @@ -124,7 +122,7 @@ where T: OutputManagerBackend + 'static context.spawn_when_ready(move |handles| async move { let transaction_service = handles.expect_handle::(); let base_node_service_handle = handles.expect_handle::(); - let connectivity_manager = handles.expect_handle::(); + let connectivity = handles.expect_handle::(); let service = OutputManagerService::new( config, @@ -136,7 +134,7 @@ where T: OutputManagerBackend + 'static constants, handles.get_shutdown_signal(), base_node_service_handle, - connectivity_manager, + connectivity, master_secret_key, ) .await diff --git a/base_layer/wallet/src/output_manager_service/resources.rs b/base_layer/wallet/src/output_manager_service/resources.rs index f094b0b79c..d3cc7de414 100644 --- a/base_layer/wallet/src/output_manager_service/resources.rs +++ b/base_layer/wallet/src/output_manager_service/resources.rs @@ -20,35 +20,29 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::sync::Arc; - -use tari_comms::{connectivity::ConnectivityRequester, types::CommsPublicKey}; -use tari_core::{consensus::ConsensusConstants, transactions::CryptoFactories}; -use tari_shutdown::ShutdownSignal; - use crate::{ output_manager_service::{ config::OutputManagerServiceConfig, handle::OutputManagerEventSender, - storage::database::{OutputManagerBackend, OutputManagerDatabase}, + storage::database::OutputManagerDatabase, MasterKeyManager, }, transaction_service::handle::TransactionServiceHandle, }; +use std::sync::Arc; +use tari_core::{consensus::ConsensusConstants, transactions::CryptoFactories}; +use tari_shutdown::ShutdownSignal; /// This struct is a collection of the common resources that a async task in the service requires. #[derive(Clone)] -pub(crate) struct OutputManagerResources -where TBackend: OutputManagerBackend + 'static -{ +pub(crate) struct OutputManagerResources { pub config: OutputManagerServiceConfig, pub db: OutputManagerDatabase, pub transaction_service: TransactionServiceHandle, pub factories: CryptoFactories, - pub base_node_public_key: Option, pub event_publisher: OutputManagerEventSender, pub master_key_manager: Arc>, pub consensus_constants: ConsensusConstants, - pub connectivity_manager: ConnectivityRequester, + pub connectivity: TWalletConnectivity, pub shutdown_signal: ShutdownSignal, } diff --git a/base_layer/wallet/src/output_manager_service/service.rs b/base_layer/wallet/src/output_manager_service/service.rs index 05d781a754..07d1bb4c8b 100644 --- a/base_layer/wallet/src/output_manager_service/service.rs +++ b/base_layer/wallet/src/output_manager_service/service.rs @@ -20,47 +20,44 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{ - cmp::Ordering, - collections::HashMap, - fmt::{self, Display}, - sync::Arc, - time::Duration, +use crate::{ + base_node_service::handle::{BaseNodeEvent, BaseNodeServiceHandle}, + connectivity_service::WalletConnectivityInterface, + output_manager_service::{ + config::OutputManagerServiceConfig, + error::{OutputManagerError, OutputManagerProtocolError, OutputManagerStorageError}, + handle::{OutputManagerEventSender, OutputManagerRequest, OutputManagerResponse}, + recovery::StandardUtxoRecoverer, + resources::OutputManagerResources, + storage::{ + database::{OutputManagerBackend, OutputManagerDatabase}, + models::{DbUnblindedOutput, KnownOneSidedPaymentScript}, + }, + tasks::TxoValidationTask, + MasterKeyManager, + TxId, + }, + transaction_service::handle::TransactionServiceHandle, + types::HashDigest, }; - use blake2::Digest; -use chrono::Utc; use diesel::result::{DatabaseErrorKind, Error as DieselError}; use futures::{pin_mut, StreamExt}; use log::*; use rand::{rngs::OsRng, RngCore}; -use tari_crypto::{ - inputs, - keys::{DiffieHellmanSharedSecret, PublicKey as PublicKeyTrait, SecretKey}, - script, - script::TariScript, - tari_utilities::{hex::Hex, ByteArray}, +use std::{ + cmp::Ordering, + fmt::{self, Display}, + sync::Arc, }; -use tokio::sync::broadcast; - use tari_common_types::types::{PrivateKey, PublicKey}; -use tari_comms::{ - connectivity::ConnectivityRequester, - types::{CommsPublicKey, CommsSecretKey}, -}; +use tari_comms::types::{CommsPublicKey, CommsSecretKey}; use tari_core::{ consensus::ConsensusConstants, transactions::{ fee::Fee, tari_amount::MicroTari, - transaction::{ - KernelFeatures, - OutputFeatures, - Transaction, - TransactionInput, - TransactionOutput, - UnblindedOutput, - }, + transaction::{KernelFeatures, OutputFeatures, Transaction, TransactionOutput, UnblindedOutput}, transaction_protocol::sender::TransactionSenderMessage, CoinbaseBuilder, CryptoFactories, @@ -68,29 +65,16 @@ use tari_core::{ SenderTransactionProtocol, }, }; +use tari_crypto::{ + inputs, + keys::{DiffieHellmanSharedSecret, PublicKey as PublicKeyTrait, SecretKey}, + script, + script::TariScript, + tari_utilities::{hex::Hex, ByteArray}, +}; use tari_service_framework::reply_channel; use tari_shutdown::ShutdownSignal; -use crate::{ - base_node_service::handle::BaseNodeServiceHandle, - output_manager_service::{ - config::OutputManagerServiceConfig, - error::{OutputManagerError, OutputManagerProtocolError, OutputManagerStorageError}, - handle::{OutputManagerEventSender, OutputManagerRequest, OutputManagerResponse}, - recovery::StandardUtxoRecoverer, - resources::OutputManagerResources, - storage::{ - database::{OutputManagerBackend, OutputManagerDatabase, PendingTransactionOutputs}, - models::{DbUnblindedOutput, KnownOneSidedPaymentScript}, - }, - tasks::{TxoValidationTask, TxoValidationType}, - MasterKeyManager, - TxId, - }, - transaction_service::handle::TransactionServiceHandle, - types::{HashDigest, ValidationRetryStrategy}, -}; - const LOG_TARGET: &str = "wallet::output_manager_service"; const LOG_TARGET_STRESS: &str = "stress_test::output_manager_service"; @@ -98,18 +82,18 @@ const LOG_TARGET_STRESS: &str = "stress_test::output_manager_service"; /// The service will assemble transactions to be sent from the wallets available outputs and provide keys to receive /// outputs. When the outputs are detected on the blockchain the Transaction service will call this Service to confirm /// them to be moved to the spent and unspent output lists respectively. -pub struct OutputManagerService -where TBackend: OutputManagerBackend + 'static -{ - resources: OutputManagerResources, +pub struct OutputManagerService { + resources: OutputManagerResources, request_stream: Option>>, - base_node_update_publisher: broadcast::Sender, base_node_service: BaseNodeServiceHandle, + last_seen_tip_height: Option, } -impl OutputManagerService -where TBackend: OutputManagerBackend + 'static +impl OutputManagerService +where + TBackend: OutputManagerBackend + 'static, + TWalletConnectivity: WalletConnectivityInterface, { #[allow(clippy::too_many_arguments)] pub async fn new( @@ -125,9 +109,9 @@ where TBackend: OutputManagerBackend + 'static consensus_constants: ConsensusConstants, shutdown_signal: ShutdownSignal, base_node_service: BaseNodeServiceHandle, - connectivity_manager: ConnectivityRequester, + connectivity: TWalletConnectivity, master_secret_key: CommsSecretKey, - ) -> Result, OutputManagerError> { + ) -> Result { // Clear any encumberances for transactions that were being negotiated but did not complete to become official // Pending Transactions. db.clear_short_term_encumberances().await?; @@ -139,22 +123,18 @@ where TBackend: OutputManagerBackend + 'static db, transaction_service, factories, - base_node_public_key: None, + connectivity, event_publisher, master_key_manager: Arc::new(master_key_manager), consensus_constants, - connectivity_manager, shutdown_signal, }; - let (base_node_update_publisher, _) = - broadcast::channel(resources.config.base_node_update_publisher_channel_size); - - Ok(OutputManagerService { + Ok(Self { resources, request_stream: Some(request_stream), - base_node_update_publisher, base_node_service, + last_seen_tip_height: None, }) } @@ -168,11 +148,19 @@ where TBackend: OutputManagerBackend + 'static let mut shutdown = self.resources.shutdown_signal.clone(); + let mut base_node_service_event_stream = self.base_node_service.get_event_stream(); + info!(target: LOG_TARGET, "Output Manager Service started"); loop { tokio::select! { + event = base_node_service_event_stream.recv() => { + match event { + Ok(msg) => self.handle_base_node_service_event(msg), + Err(e) => debug!(target: LOG_TARGET, "Lagging read on base node event broadcast channel: {}", e), + } + }, Some(request_context) = request_stream.next() => { - trace!(target: LOG_TARGET, "Handling Service API Request"); + trace!(target: LOG_TARGET, "Handling Service API Request"); let (request, reply_tx) = request_context.split(); let response = self.handle_request(request).await.map_err(|e| { warn!(target: LOG_TARGET, "Error handling request: {:?}", e); @@ -253,22 +241,10 @@ where TBackend: OutputManagerBackend + 'static .confirm_encumberance(tx_id) .await .map(|_| OutputManagerResponse::PendingTransactionConfirmed), - OutputManagerRequest::ConfirmTransaction((tx_id, spent_outputs, received_outputs)) => self - .confirm_transaction(tx_id, &spent_outputs, &received_outputs) - .await - .map(|_| OutputManagerResponse::TransactionConfirmed), OutputManagerRequest::CancelTransaction(tx_id) => self .cancel_transaction(tx_id) .await .map(|_| OutputManagerResponse::TransactionCancelled), - OutputManagerRequest::TimeoutTransactions(period) => self - .timeout_pending_transactions(period) - .await - .map(|_| OutputManagerResponse::TransactionsTimedOut), - OutputManagerRequest::GetPendingTransactions => self - .fetch_pending_transaction_outputs() - .await - .map(OutputManagerResponse::PendingTransactions), OutputManagerRequest::GetSpentOutputs => { let outputs = self .fetch_spent_outputs() @@ -293,13 +269,9 @@ where TBackend: OutputManagerBackend + 'static .get_seed_words(&self.resources.config.seed_word_language) .await .map(OutputManagerResponse::SeedWords), - OutputManagerRequest::SetBaseNodePublicKey(pk) => self - .set_base_node_public_key(pk) - .await - .map(|_| OutputManagerResponse::BaseNodePublicKeySet), - OutputManagerRequest::ValidateUtxos(validation_type, retries) => self - .validate_outputs(validation_type, retries) - .map(OutputManagerResponse::UtxoValidationStarted), + OutputManagerRequest::ValidateUtxos => { + self.validate_outputs().map(OutputManagerResponse::TxoValidationStarted) + }, OutputManagerRequest::GetInvalidOutputs => { let outputs = self .fetch_invalid_outputs() @@ -348,53 +320,69 @@ where TBackend: OutputManagerBackend + 'static .await .map(|_| OutputManagerResponse::AddKnownOneSidedPaymentScript), OutputManagerRequest::ReinstateCancelledInboundTx(tx_id) => self - .reinstate_cancelled_inbound_transaction(tx_id) + .reinstate_cancelled_inbound_transaction_outputs(tx_id) .await .map(|_| OutputManagerResponse::ReinstatedCancelledInboundTx), + OutputManagerRequest::SetCoinbaseAbandoned(tx_id, abandoned) => self + .set_coinbase_abandoned(tx_id, abandoned) + .await + .map(|_| OutputManagerResponse::CoinbaseAbandonedSet), } } - fn validate_outputs( - &mut self, - validation_type: TxoValidationType, - retry_strategy: ValidationRetryStrategy, - ) -> Result { - match self.resources.base_node_public_key.as_ref() { - None => Err(OutputManagerError::NoBaseNodeKeysProvided), - Some(pk) => { - let id = OsRng.next_u64(); - - let utxo_validation_task = TxoValidationTask::new( - id, - validation_type, - retry_strategy, - self.resources.clone(), - pk.clone(), - self.base_node_update_publisher.subscribe(), - ); - - tokio::spawn(async move { - match utxo_validation_task.execute().await { - Ok(id) => { - info!( - target: LOG_TARGET, - "UTXO Validation Protocol (Id: {}) completed successfully", id - ); - }, - Err(OutputManagerProtocolError { id, error }) => { - warn!( - target: LOG_TARGET, - "Error completing UTXO Validation Protocol (Id: {}): {:?}", id, error - ); - }, - } - }); - - Ok(id) + fn handle_base_node_service_event(&mut self, event: Arc) { + match (*event).clone() { + BaseNodeEvent::BaseNodeStateChanged(state) => { + let trigger_validation = match (self.last_seen_tip_height, state.chain_metadata.clone()) { + (Some(last_seen_tip_height), Some(cm)) => last_seen_tip_height != cm.height_of_longest_chain(), + (None, _) => true, + _ => false, + }; + if trigger_validation { + let _ = self.validate_outputs().map_err(|e| { + warn!(target: LOG_TARGET, "Error validating txos: {:?}", e); + e + }); + } + self.last_seen_tip_height = state.chain_metadata.map(|cm| cm.height_of_longest_chain()); }, } } + fn validate_outputs(&mut self) -> Result { + if !self.resources.connectivity.is_base_node_set() { + return Err(OutputManagerError::NoBaseNodeKeysProvided); + } + let id = OsRng.next_u64(); + let utxo_validation = TxoValidationTask::new( + id, + self.resources.db.clone(), + self.resources.connectivity.clone(), + self.resources.event_publisher.clone(), + self.resources.config.clone(), + ); + + let shutdown = self.resources.shutdown_signal.clone(); + + tokio::spawn(async move { + match utxo_validation.execute(shutdown).await { + Ok(id) => { + info!( + target: LOG_TARGET, + "UTXO Validation Protocol (Id: {}) completed successfully", id + ); + }, + Err(OutputManagerProtocolError { id, error }) => { + warn!( + target: LOG_TARGET, + "Error completing UTXO Validation Protocol (Id: {}): {:?}", id, error + ); + }, + } + }); + Ok(id) + } + /// Add an unblinded output to the unspent outputs list pub async fn add_output(&mut self, tx_id: Option, output: UnblindedOutput) -> Result<(), OutputManagerError> { debug!( @@ -470,11 +458,9 @@ where TBackend: OutputManagerBackend + 'static self.resources .db - .accept_incoming_pending_transaction(single_round_sender_data.tx_id, output, None) + .add_output_to_be_received(single_round_sender_data.tx_id, output, None) .await?; - self.confirm_encumberance(single_round_sender_data.tx_id).await?; - let nonce = PrivateKey::random(&mut OsRng); let rtp = ReceiverTransactionProtocol::new_with_rewindable_output( @@ -489,46 +475,6 @@ where TBackend: OutputManagerBackend + 'static Ok(rtp) } - /// Confirm the reception of an expected transaction output. This will be called by the Transaction Service when it - /// detects the output on the blockchain - pub async fn confirm_received_transaction_output( - &mut self, - tx_id: u64, - received_output: &TransactionOutput, - ) -> Result<(), OutputManagerError> { - let pending_transaction = self.resources.db.fetch_pending_transaction_outputs(tx_id).await?; - - // Assumption: We are only allowing a single output per receiver in the current transaction protocols. - if pending_transaction.outputs_to_be_received.len() != 1 { - return Err(OutputManagerError::IncompleteTransaction( - "unexpected number of outputs to be received, exactly one is expected", - )); - } - - if pending_transaction.outputs_to_be_received[0] - .unblinded_output - .as_transaction_input(&self.resources.factories.commitment)? - .commitment != - received_output.commitment - { - return Err(OutputManagerError::IncompleteTransaction( - "unexpected commitment received", - )); - } - - self.resources - .db - .confirm_pending_transaction_outputs(pending_transaction.tx_id) - .await?; - - debug!( - target: LOG_TARGET, - "Confirm received transaction outputs for TxId: {}", tx_id - ); - - Ok(()) - } - /// Get a fee estimate for an amount of MicroTari, at a specified fee per gram and given number of kernels and /// outputs. async fn fee_estimate( @@ -696,11 +642,23 @@ where TBackend: OutputManagerBackend + 'static let output = DbUnblindedOutput::from_unblinded_output(unblinded_output, &self.resources.factories)?; - // Clear any existing pending coinbase transactions for this blockheight - self.resources + // Clear any existing pending coinbase transactions for this blockheight if they exist + if let Err(e) = self + .resources .db - .cancel_pending_transaction_at_block_height(block_height) - .await?; + .clear_pending_coinbase_transaction_at_block_height(block_height) + .await + { + match e { + OutputManagerStorageError::DieselError(DieselError::NotFound) => { + debug!( + target: LOG_TARGET, + "An existing pending coinbase was cleared for block height {}", block_height + ) + }, + _ => return Err(OutputManagerError::from(e)), + } + }; // Clear any matching outputs for this commitment. Even if the older output is valid // we are losing no information as this output has the same commitment. @@ -717,7 +675,7 @@ where TBackend: OutputManagerBackend + 'static self.resources .db - .accept_incoming_pending_transaction(tx_id, output, Some(block_height)) + .add_output_to_be_received(tx_id, output, Some(block_height)) .await?; self.confirm_encumberance(tx_id).await?; @@ -847,56 +805,6 @@ where TBackend: OutputManagerBackend + 'static Ok(()) } - /// Confirm that a received or sent transaction and its outputs have been detected on the base chain. The inputs and - /// outputs are checked to see that they match what the stored PendingTransaction contains. This will - /// be called by the Transaction Service which monitors the base chain. - async fn confirm_transaction( - &mut self, - tx_id: u64, - inputs: &[TransactionInput], - outputs: &[TransactionOutput], - ) -> Result<(), OutputManagerError> { - let pending_transaction = self.resources.db.fetch_pending_transaction_outputs(tx_id).await?; - - // Check that outputs to be spent can all be found in the provided transaction inputs - for output_to_spend in pending_transaction.outputs_to_be_spent.iter() { - let input_to_check = output_to_spend - .unblinded_output - .as_transaction_input(&self.resources.factories.commitment)?; - - if inputs.iter().all(|input| input.commitment != input_to_check.commitment) { - return Err(OutputManagerError::IncompleteTransaction( - "outputs to spend are missing", - )); - } - } - - // Check that outputs to be received can all be found in the provided transaction outputs - for output_to_receive in pending_transaction.outputs_to_be_received.iter() { - let output_to_check = output_to_receive - .unblinded_output - .as_transaction_input(&self.resources.factories.commitment)?; - - if outputs - .iter() - .all(|output| output.commitment != output_to_check.commitment) - { - return Err(OutputManagerError::IncompleteTransaction( - "outputs to receive are missing", - )); - } - } - - self.resources - .db - .confirm_pending_transaction_outputs(pending_transaction.tx_id) - .await?; - - trace!(target: LOG_TARGET, "Confirm transaction (TxId: {})", tx_id); - - Ok(()) - } - /// Cancel a pending transaction and place the encumbered outputs back into the unspent pool pub async fn cancel_transaction(&mut self, tx_id: u64) -> Result<(), OutputManagerError> { debug!( @@ -908,30 +816,12 @@ where TBackend: OutputManagerBackend + 'static /// Restore the pending transaction encumberance and output for an inbound transaction that was previously /// cancelled. - async fn reinstate_cancelled_inbound_transaction(&mut self, tx_id: TxId) -> Result<(), OutputManagerError> { - self.resources.db.reinstate_inbound_output(tx_id).await?; - - self.resources - .db - .add_pending_transaction_outputs(PendingTransactionOutputs { - tx_id, - outputs_to_be_spent: Vec::new(), - outputs_to_be_received: Vec::new(), - timestamp: Utc::now().naive_utc(), - coinbase_block_height: None, - }) - .await?; - - self.confirm_encumberance(tx_id).await?; + async fn reinstate_cancelled_inbound_transaction_outputs(&mut self, tx_id: TxId) -> Result<(), OutputManagerError> { + self.resources.db.reinstate_cancelled_inbound_output(tx_id).await?; Ok(()) } - /// Go through the pending transaction and if any have existed longer than the specified duration, cancel them - async fn timeout_pending_transactions(&mut self, period: Duration) -> Result<(), OutputManagerError> { - Ok(self.resources.db.timeout_pending_transaction_outputs(period).await?) - } - /// Select which unspent transaction outputs to use to send a transaction of the specified amount. Use the specified /// selection strategy to choose the outputs. It also determines if a change output is required. async fn select_utxos( @@ -1061,35 +951,6 @@ where TBackend: OutputManagerBackend + 'static Ok((utxos, require_change_output, utxos_total_value)) } - /// Set the base node public key to the list that will be used to check the status of UTXO's on the base chain. If - /// this is the first time the base node public key is set do the UTXO queries. - async fn set_base_node_public_key( - &mut self, - base_node_public_key: CommsPublicKey, - ) -> Result<(), OutputManagerError> { - info!( - target: LOG_TARGET, - "Setting base node public key {} for service", base_node_public_key - ); - - self.resources.base_node_public_key = Some(base_node_public_key.clone()); - if let Err(e) = self.base_node_update_publisher.send(base_node_public_key) { - trace!( - target: LOG_TARGET, - "No subscribers to receive base node public key update: {:?}", - e - ); - } - - Ok(()) - } - - pub async fn fetch_pending_transaction_outputs( - &self, - ) -> Result, OutputManagerError> { - Ok(self.resources.db.fetch_all_pending_transaction_outputs().await?) - } - pub async fn fetch_spent_outputs(&self) -> Result, OutputManagerError> { Ok(self.resources.db.fetch_spent_outputs().await?) } @@ -1103,6 +964,11 @@ where TBackend: OutputManagerBackend + 'static Ok(self.resources.db.get_invalid_outputs().await?) } + pub async fn set_coinbase_abandoned(&self, tx_id: TxId, abandoned: bool) -> Result<(), OutputManagerError> { + self.resources.db.set_coinbase_abandoned(tx_id, abandoned).await?; + Ok(()) + } + async fn create_coin_split( &mut self, amount_per_split: MicroTari, diff --git a/base_layer/wallet/src/output_manager_service/storage/database.rs b/base_layer/wallet/src/output_manager_service/storage/database.rs index 70badb5709..efc5e78d8f 100644 --- a/base_layer/wallet/src/output_manager_service/storage/database.rs +++ b/base_layer/wallet/src/output_manager_service/storage/database.rs @@ -27,15 +27,12 @@ use crate::output_manager_service::{ TxId, }; use aes_gcm::Aes256Gcm; -use chrono::{NaiveDateTime, Utc}; use log::*; use std::{ - collections::HashMap, fmt::{Display, Error, Formatter}, sync::Arc, - time::Duration, }; -use tari_common_types::types::{BlindingFactor, Commitment, PrivateKey}; +use tari_common_types::types::{BlindingFactor, Commitment, HashOutput, PrivateKey}; use tari_core::transactions::{tari_amount::MicroTari, transaction::TransactionOutput}; const LOG_TARGET: &str = "wallet::output_manager_service::database"; @@ -47,12 +44,35 @@ const LOG_TARGET: &str = "wallet::output_manager_service::database"; pub trait OutputManagerBackend: Send + Sync + Clone { /// Retrieve the record associated with the provided DbKey fn fetch(&self, key: &DbKey) -> Result, OutputManagerStorageError>; + /// Retrieve outputs that have been mined but not spent yet (have not been deleted) + fn fetch_mined_unspent_outputs(&self) -> Result, OutputManagerStorageError>; + /// Retrieve outputs that have not been found or confirmed in the block chain yet + fn fetch_unconfirmed_outputs(&self) -> Result, OutputManagerStorageError>; /// Modify the state the of the backend with a write operation fn write(&self, op: WriteOperation) -> Result, OutputManagerStorageError>; - /// This method is called when a pending transaction is to be confirmed. It must move the `outputs_to_be_spent` and - /// `outputs_to_be_received` from a `PendingTransactionOutputs` record into the `unspent_outputs` and - /// `spent_outputs` collections. - fn confirm_transaction(&self, tx_id: TxId) -> Result<(), OutputManagerStorageError>; + fn fetch_pending_incoming_outputs(&self) -> Result, OutputManagerStorageError>; + fn fetch_pending_outgoing_outputs(&self) -> Result, OutputManagerStorageError>; + + fn set_received_output_mined_height( + &self, + hash: Vec, + mined_height: u64, + mined_in_block: Vec, + mmr_position: u64, + confirmed: bool, + ) -> Result<(), OutputManagerStorageError>; + + fn set_output_to_unmined(&self, hash: Vec) -> Result<(), OutputManagerStorageError>; + + fn mark_output_as_spent( + &self, + hash: Vec, + mark_deleted_at_height: u64, + mark_deleted_in_block: Vec, + confirmed: bool, + ) -> Result<(), OutputManagerStorageError>; + + fn mark_output_as_unspent(&self, hash: Vec) -> Result<(), OutputManagerStorageError>; /// This method encumbers the specified outputs into a `PendingTransactionOutputs` record. This is a short term /// encumberance in case the app is closed or crashes before transaction neogtiation is complete. These will be /// cleared on startup of the service. @@ -72,43 +92,33 @@ pub trait OutputManagerBackend: Send + Sync + Clone { /// `UnspentOutputs` pool. The `outputs_to_be_received`'` will be marked as cancelled inbound outputs in case they /// need to be recovered. fn cancel_pending_transaction(&self, tx_id: TxId) -> Result<(), OutputManagerStorageError>; - /// This method must run through all the `PendingTransactionOutputs` and test if any have existed for longer that - /// the specified duration. If they have they should be cancelled. - fn timeout_pending_transactions(&self, period: Duration) -> Result<(), OutputManagerStorageError>; /// This method will increment the currently stored key index for the key manager config. Increment this after each /// key is generated fn increment_key_index(&self) -> Result<(), OutputManagerStorageError>; /// This method will set the currently stored key index for the key manager fn set_key_index(&self, index: u64) -> Result<(), OutputManagerStorageError>; - /// If an unspent output is detected as invalid (i.e. not available on the blockchain) then it should be moved to - /// the invalid outputs collection. The function will return the last recorded TxId associated with this output. - fn invalidate_unspent_output(&self, output: &DbUnblindedOutput) -> Result, OutputManagerStorageError>; /// This method will update an output's metadata signature, akin to 'finalize output' fn update_output_metadata_signature(&self, output: &TransactionOutput) -> Result<(), OutputManagerStorageError>; /// If an invalid output is found to be valid this function will turn it back into an unspent output fn revalidate_unspent_output(&self, spending_key: &Commitment) -> Result<(), OutputManagerStorageError>; - /// Check to see if there exist any pending transaction with a blockheight equal that provided and cancel those - /// pending transaction outputs. - fn cancel_pending_transaction_at_block_height(&self, block_height: u64) -> Result<(), OutputManagerStorageError>; /// Apply encryption to the backend. fn apply_encryption(&self, cipher: Aes256Gcm) -> Result<(), OutputManagerStorageError>; /// Remove encryption from the backend. fn remove_encryption(&self) -> Result<(), OutputManagerStorageError>; - /// Update a Spent output to be Unspent - fn update_spent_output_to_unspent( - &self, - commitment: &Commitment, - ) -> Result; -} -/// Holds the outputs that have been selected for a given pending transaction waiting for confirmation -#[derive(Debug, Clone, PartialEq)] -pub struct PendingTransactionOutputs { - pub tx_id: u64, - pub outputs_to_be_spent: Vec, - pub outputs_to_be_received: Vec, - pub timestamp: NaiveDateTime, - pub coinbase_block_height: Option, + /// Get the output that was most recently mined, ordered descending by mined height + fn get_last_mined_output(&self) -> Result, OutputManagerStorageError>; + /// Get the output that was most recently spent, ordered descending by mined height + fn get_last_spent_output(&self) -> Result, OutputManagerStorageError>; + /// Check if there is a pending coinbase transaction at this block height, if there is clear it. + fn clear_pending_coinbase_transaction_at_block_height( + &self, + block_height: u64, + ) -> Result<(), OutputManagerStorageError>; + /// Set if a coinbase output is abandoned or not + fn set_coinbase_abandoned(&self, tx_id: TxId, abandoned: bool) -> Result<(), OutputManagerStorageError>; + /// Reinstate a cancelled inbound output + fn reinstate_cancelled_inbound_output(&self, tx_id: TxId) -> Result<(), OutputManagerStorageError>; } /// Holds the state of the KeyManager being used by the Output Manager Service @@ -124,11 +134,9 @@ pub enum DbKey { SpentOutput(BlindingFactor), UnspentOutput(BlindingFactor), AnyOutputByCommitment(Commitment), - PendingTransactionOutputs(TxId), TimeLockedUnspentOutputs(u64), UnspentOutputs, SpentOutputs, - AllPendingTransactionOutputs, KeyManagerState, InvalidOutputs, KnownOneSidedPaymentScripts, @@ -139,11 +147,9 @@ pub enum DbKey { pub enum DbValue { SpentOutput(Box), UnspentOutput(Box), - PendingTransactionOutputs(Box), UnspentOutputs(Vec), SpentOutputs(Vec), InvalidOutputs(Vec), - AllPendingTransactionOutputs(HashMap), KeyManagerState(KeyManagerState), KnownOneSidedPaymentScripts(Vec), AnyOutput(Box), @@ -151,13 +157,11 @@ pub enum DbValue { } pub enum DbKeyValuePair { - SpentOutput(Commitment, Box), UnspentOutput(Commitment, Box), UnspentOutputWithTxId(Commitment, (TxId, Box)), - PendingTransactionOutputs(TxId, Box), + OutputToBeReceived(Commitment, (TxId, Box, Option)), KeyManagerState(KeyManagerState), KnownOneSidedPaymentScripts(KnownOneSidedPaymentScript), - UpdateOutputStatus(Commitment, OutputStatus), } pub enum WriteOperation { @@ -165,25 +169,10 @@ pub enum WriteOperation { Remove(DbKey), } -// Private macro that pulls out all the boiler plate of extracting a DB query result from its variants -macro_rules! fetch { - ($db:ident, $key_val:expr, $key_var:ident) => {{ - let key = DbKey::$key_var($key_val); - match $db.fetch(&key) { - Ok(None) => Err(OutputManagerStorageError::ValueNotFound), - Ok(Some(DbValue::$key_var(k))) => Ok(*k), - Ok(Some(other)) => unexpected_result(key, other), - Err(e) => log_error(key, e), - } - }}; -} - /// This structure holds an inner type that implements the `OutputManagerBackend` trait and contains the more complex /// data access logic required by the module built onto the functionality defined by the trait #[derive(Clone)] -pub struct OutputManagerDatabase -where T: OutputManagerBackend + 'static -{ +pub struct OutputManagerDatabase { db: Arc, } @@ -266,93 +255,17 @@ where T: OutputManagerBackend + 'static Ok(()) } - pub async fn get_balance(&self, current_chain_tip: Option) -> Result { - let db_clone = self.db.clone(); - let db_clone2 = self.db.clone(); - let db_clone3 = self.db.clone(); - - let pending_txs = tokio::task::spawn_blocking(move || { - db_clone.fetch(&DbKey::AllPendingTransactionOutputs)?.ok_or_else(|| { - OutputManagerStorageError::UnexpectedResult( - "Pending Transaction Outputs cannot be retrieved".to_string(), - ) - }) - }) - .await - .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; - - let unspent_outputs = tokio::task::spawn_blocking(move || { - db_clone2.fetch(&DbKey::UnspentOutputs)?.ok_or_else(|| { - OutputManagerStorageError::UnexpectedResult("Unspent Outputs cannot be retrieved".to_string()) - }) - }) - .await - .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; - - if let DbValue::UnspentOutputs(uo) = unspent_outputs { - if let DbValue::AllPendingTransactionOutputs(pto) = pending_txs { - let available_balance = uo - .iter() - .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value); - let time_locked_balance = if let Some(tip) = current_chain_tip { - let time_locked_outputs = tokio::task::spawn_blocking(move || { - db_clone3.fetch(&DbKey::TimeLockedUnspentOutputs(tip))?.ok_or_else(|| { - OutputManagerStorageError::UnexpectedResult( - "Time-locked Outputs cannot be retrieved".to_string(), - ) - }) - }) - .await - .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; - if let DbValue::UnspentOutputs(time_locked_uo) = time_locked_outputs { - Some( - time_locked_uo - .iter() - .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value), - ) - } else { - None - } - } else { - None - }; - let mut pending_incoming = MicroTari::from(0); - let mut pending_outgoing = MicroTari::from(0); - - for v in pto.values() { - pending_incoming += v - .outputs_to_be_received - .iter() - .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value); - pending_outgoing += v - .outputs_to_be_spent - .iter() - .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value); - } - - return Ok(Balance { - available_balance, - time_locked_balance, - pending_incoming_balance: pending_incoming, - pending_outgoing_balance: pending_outgoing, - }); - } - } - - Err(OutputManagerStorageError::UnexpectedResult( - "Unexpected result from database backend".to_string(), - )) - } - - pub async fn add_pending_transaction_outputs( + pub async fn add_output_to_be_received( &self, - pending_transaction_outputs: PendingTransactionOutputs, + tx_id: TxId, + output: DbUnblindedOutput, + coinbase_block_height: Option, ) -> Result<(), OutputManagerStorageError> { let db_clone = self.db.clone(); tokio::task::spawn_blocking(move || { - db_clone.write(WriteOperation::Insert(DbKeyValuePair::PendingTransactionOutputs( - pending_transaction_outputs.tx_id, - Box::new(pending_transaction_outputs), + db_clone.write(WriteOperation::Insert(DbKeyValuePair::OutputToBeReceived( + output.commitment.clone(), + (tx_id, Box::new(output), coinbase_block_height), ))) }) .await @@ -361,53 +274,71 @@ where T: OutputManagerBackend + 'static Ok(()) } - pub async fn fetch_pending_transaction_outputs( - &self, - tx_id: TxId, - ) -> Result { + pub async fn get_balance(&self, current_chain_tip: Option) -> Result { let db_clone = self.db.clone(); - tokio::task::spawn_blocking(move || fetch!(db_clone, tx_id, PendingTransactionOutputs)) - .await - .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string())) - .and_then(|inner_result| inner_result) - } + let db_clone2 = self.db.clone(); + let db_clone3 = self.db.clone(); + let db_clone4 = self.db.clone(); - /// This method is called when a pending transaction is confirmed. It moves the `outputs_to_be_spent` and - /// `outputs_to_be_received` from a `PendingTransactionOutputs` record into the `unspent_outputs` and - /// `spent_outputs` collections. - pub async fn confirm_pending_transaction_outputs(&self, tx_id: TxId) -> Result<(), OutputManagerStorageError> { - let db_clone = self.db.clone(); - tokio::task::spawn_blocking(move || db_clone.confirm_transaction(tx_id)) + let unspent_outputs = tokio::task::spawn_blocking(move || match db_clone.fetch(&DbKey::UnspentOutputs) { + Ok(None) => log_error( + DbKey::UnspentOutputs, + OutputManagerStorageError::UnexpectedResult("Could not retrieve unspent outputs".to_string()), + ), + Ok(Some(DbValue::UnspentOutputs(uo))) => Ok(uo), + Ok(Some(other)) => unexpected_result(DbKey::UnspentOutputs, other), + Err(e) => log_error(DbKey::UnspentOutputs, e), + }) + .await + .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; + + let pending_incoming_outputs = tokio::task::spawn_blocking(move || db_clone2.fetch_pending_incoming_outputs()) .await - .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string())) - .and_then(|inner_result| inner_result) - } + .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; - /// This method accepts and stores a pending inbound transaction and creates the `output_to_be_received` from the - /// amount and provided spending key. - pub async fn accept_incoming_pending_transaction( - &self, - tx_id: TxId, - output: DbUnblindedOutput, - coinbase_block_height: Option, - ) -> Result<(), OutputManagerStorageError> { - let db_clone = self.db.clone(); + let pending_outgoing_outputs = tokio::task::spawn_blocking(move || db_clone3.fetch_pending_outgoing_outputs()) + .await + .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; - tokio::task::spawn_blocking(move || { - db_clone.write(WriteOperation::Insert(DbKeyValuePair::PendingTransactionOutputs( - tx_id, - Box::new(PendingTransactionOutputs { - tx_id, - outputs_to_be_spent: Vec::new(), - outputs_to_be_received: vec![output], - timestamp: Utc::now().naive_utc(), - coinbase_block_height, - }), - ))) + let time_locked_balance = if let Some(tip) = current_chain_tip { + let time_locked_outputs = tokio::task::spawn_blocking(move || { + db_clone4.fetch(&DbKey::TimeLockedUnspentOutputs(tip))?.ok_or_else(|| { + OutputManagerStorageError::UnexpectedResult("Time-locked Outputs cannot be retrieved".to_string()) + }) + }) + .await + .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; + if let DbValue::UnspentOutputs(time_locked_uo) = time_locked_outputs { + Some( + time_locked_uo + .iter() + .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value), + ) + } else { + None + } + } else { + None + }; + + let available_balance = unspent_outputs + .iter() + .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value); + + let pending_incoming = pending_incoming_outputs + .iter() + .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value); + + let pending_outgoing = pending_outgoing_outputs + .iter() + .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value); + + Ok(Balance { + available_balance, + time_locked_balance, + pending_incoming_balance: pending_incoming, + pending_outgoing_balance: pending_outgoing, }) - .await - .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; - Ok(()) } /// This method is called when a transaction is built to be sent. It will encumber unspent outputs against a pending @@ -457,11 +388,13 @@ where T: OutputManagerBackend + 'static .and_then(|inner_result| inner_result) } - /// This method is check all pending transactions to see if any are older that the provided duration. If they are - /// they will be cancelled. - pub async fn timeout_pending_transaction_outputs(&self, period: Duration) -> Result<(), OutputManagerStorageError> { + /// Check if there is a pending coinbase transaction at this block height, if there is clear it. + pub async fn clear_pending_coinbase_transaction_at_block_height( + &self, + block_height: u64, + ) -> Result<(), OutputManagerStorageError> { let db_clone = self.db.clone(); - tokio::task::spawn_blocking(move || db_clone.timeout_pending_transactions(period)) + tokio::task::spawn_blocking(move || db_clone.clear_pending_coinbase_transaction_at_block_height(block_height)) .await .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string())) .and_then(|inner_result| inner_result) @@ -504,59 +437,20 @@ where T: OutputManagerBackend + 'static Ok(uo) } - pub async fn fetch_all_pending_transaction_outputs( - &self, - ) -> Result, OutputManagerStorageError> { - let db_clone = self.db.clone(); - - let uo = tokio::task::spawn_blocking(move || match db_clone.fetch(&DbKey::AllPendingTransactionOutputs) { - Ok(None) => log_error( - DbKey::AllPendingTransactionOutputs, - OutputManagerStorageError::UnexpectedResult( - "Could not retrieve pending transaction outputs".to_string(), - ), - ), - Ok(Some(DbValue::AllPendingTransactionOutputs(pt))) => Ok(pt), - Ok(Some(other)) => unexpected_result(DbKey::AllPendingTransactionOutputs, other), - Err(e) => log_error(DbKey::AllPendingTransactionOutputs, e), - }) - .await - .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; - Ok(uo) - } - - pub async fn get_unspent_outputs(&self) -> Result, OutputManagerStorageError> { + pub async fn fetch_unconfirmed_outputs(&self) -> Result, OutputManagerStorageError> { let db_clone = self.db.clone(); - - let uo = tokio::task::spawn_blocking(move || match db_clone.fetch(&DbKey::UnspentOutputs) { - Ok(None) => log_error( - DbKey::UnspentOutputs, - OutputManagerStorageError::UnexpectedResult("Could not retrieve unspent outputs".to_string()), - ), - Ok(Some(DbValue::UnspentOutputs(uo))) => Ok(uo), - Ok(Some(other)) => unexpected_result(DbKey::UnspentOutputs, other), - Err(e) => log_error(DbKey::UnspentOutputs, e), - }) - .await - .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; - Ok(uo) + let utxos = tokio::task::spawn_blocking(move || db_clone.fetch_unconfirmed_outputs()) + .await + .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; + Ok(utxos) } - pub async fn get_spent_outputs(&self) -> Result, OutputManagerStorageError> { + pub async fn fetch_mined_unspent_outputs(&self) -> Result, OutputManagerStorageError> { let db_clone = self.db.clone(); - - let uo = tokio::task::spawn_blocking(move || match db_clone.fetch(&DbKey::SpentOutputs) { - Ok(None) => log_error( - DbKey::SpentOutputs, - OutputManagerStorageError::UnexpectedResult("Could not retrieve spent outputs".to_string()), - ), - Ok(Some(DbValue::SpentOutputs(uo))) => Ok(uo), - Ok(Some(other)) => unexpected_result(DbKey::SpentOutputs, other), - Err(e) => log_error(DbKey::SpentOutputs, e), - }) - .await - .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; - Ok(uo) + let utxos = tokio::task::spawn_blocking(move || db_clone.fetch_mined_unspent_outputs()) + .await + .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; + Ok(utxos) } pub async fn get_timelocked_outputs(&self, tip: u64) -> Result, OutputManagerStorageError> { @@ -593,17 +487,6 @@ where T: OutputManagerBackend + 'static Ok(uo) } - pub async fn invalidate_output( - &self, - output: DbUnblindedOutput, - ) -> Result, OutputManagerStorageError> { - let db_clone = self.db.clone(); - tokio::task::spawn_blocking(move || db_clone.invalidate_unspent_output(&output)) - .await - .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string())) - .and_then(|inner_result| inner_result) - } - pub async fn update_output_metadata_signature( &self, output: TransactionOutput, @@ -623,23 +506,9 @@ where T: OutputManagerBackend + 'static .and_then(|inner_result| inner_result) } - pub async fn update_spent_output_to_unspent( - &self, - commitment: Commitment, - ) -> Result { - let db_clone = self.db.clone(); - tokio::task::spawn_blocking(move || db_clone.update_spent_output_to_unspent(&commitment)) - .await - .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string())) - .and_then(|inner_result| inner_result) - } - - pub async fn cancel_pending_transaction_at_block_height( - &self, - block_height: u64, - ) -> Result<(), OutputManagerStorageError> { + pub async fn reinstate_cancelled_inbound_output(&self, tx_id: TxId) -> Result<(), OutputManagerStorageError> { let db_clone = self.db.clone(); - tokio::task::spawn_blocking(move || db_clone.cancel_pending_transaction_at_block_height(block_height)) + tokio::task::spawn_blocking(move || db_clone.reinstate_cancelled_inbound_output(tx_id)) .await .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string())) .and_then(|inner_result| inner_result) @@ -680,6 +549,14 @@ where T: OutputManagerBackend + 'static Ok(scripts) } + pub async fn get_last_mined_output(&self) -> Result, OutputManagerStorageError> { + self.db.get_last_mined_output() + } + + pub async fn get_last_spent_output(&self) -> Result, OutputManagerStorageError> { + self.db.get_last_spent_output() + } + pub async fn add_known_script( &self, known_script: KnownOneSidedPaymentScript, @@ -711,45 +588,58 @@ where T: OutputManagerBackend + 'static Ok(()) } - /// Check if a single cancelled inbound output exists that matches this TxID, if it does then return its status to - /// EncumberedToBeReceived - pub async fn reinstate_inbound_output(&self, tx_id: TxId) -> Result<(), OutputManagerStorageError> { - let db_clone = self.db.clone(); - let outputs = tokio::task::spawn_blocking(move || { - match db_clone.fetch(&DbKey::OutputsByTxIdAndStatus(tx_id, OutputStatus::CancelledInbound)) { - Ok(None) => Err(OutputManagerStorageError::ValueNotFound), - Ok(Some(DbValue::AnyOutputs(o))) => Ok(o), - Ok(Some(other)) => unexpected_result( - DbKey::OutputsByTxIdAndStatus(tx_id, OutputStatus::CancelledInbound), - other, - ), - Err(e) => log_error(DbKey::OutputsByTxIdAndStatus(tx_id, OutputStatus::CancelledInbound), e), - } - }) - .await - .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string())) - .and_then(|inner_result| inner_result)?; - - if outputs.len() != 1 { - return Err(OutputManagerStorageError::UnexpectedResult( - "There should be only 1 output for a cancelled inbound transaction but more were found".to_string(), - )); - } - let db_clone2 = self.db.clone(); - + pub async fn set_received_output_mined_height( + &self, + hash: HashOutput, + mined_height: u64, + mined_in_block: HashOutput, + mmr_position: u64, + confirmed: bool, + ) -> Result<(), OutputManagerStorageError> { + let db = self.db.clone(); tokio::task::spawn_blocking(move || { - db_clone2.write(WriteOperation::Insert(DbKeyValuePair::UpdateOutputStatus( - outputs - .first() - .expect("Must be only one element in outputs") - .commitment - .clone(), - OutputStatus::EncumberedToBeReceived, - ))) + db.set_received_output_mined_height(hash, mined_height, mined_in_block, mmr_position, confirmed) }) .await .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; + Ok(()) + } + + pub async fn set_output_to_unmined(&self, hash: HashOutput) -> Result<(), OutputManagerStorageError> { + let db = self.db.clone(); + tokio::task::spawn_blocking(move || db.set_output_to_unmined(hash)) + .await + .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; + Ok(()) + } + pub async fn mark_output_as_spent( + &self, + hash: HashOutput, + deleted_height: u64, + deleted_in_block: HashOutput, + confirmed: bool, + ) -> Result<(), OutputManagerStorageError> { + let db = self.db.clone(); + tokio::task::spawn_blocking(move || db.mark_output_as_spent(hash, deleted_height, deleted_in_block, confirmed)) + .await + .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; + Ok(()) + } + + pub async fn mark_output_as_unspent(&self, hash: HashOutput) -> Result<(), OutputManagerStorageError> { + let db = self.db.clone(); + tokio::task::spawn_blocking(move || db.mark_output_as_unspent(hash)) + .await + .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; + Ok(()) + } + + pub async fn set_coinbase_abandoned(&self, tx_id: TxId, abandoned: bool) -> Result<(), OutputManagerStorageError> { + let db = self.db.clone(); + tokio::task::spawn_blocking(move || db.set_coinbase_abandoned(tx_id, abandoned)) + .await + .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; Ok(()) } } @@ -765,12 +655,8 @@ impl Display for DbKey { match self { DbKey::SpentOutput(_) => f.write_str(&"Spent Output Key".to_string()), DbKey::UnspentOutput(_) => f.write_str(&"Unspent Output Key".to_string()), - DbKey::PendingTransactionOutputs(tx_id) => { - f.write_str(&format!("Pending Transaction Outputs TX_ID: {}", tx_id)) - }, DbKey::UnspentOutputs => f.write_str(&"Unspent Outputs Key".to_string()), DbKey::SpentOutputs => f.write_str(&"Spent Outputs Key".to_string()), - DbKey::AllPendingTransactionOutputs => f.write_str(&"All Pending Transaction Outputs".to_string()), DbKey::KeyManagerState => f.write_str(&"Key Manager State".to_string()), DbKey::InvalidOutputs => f.write_str("Invalid Outputs Key"), DbKey::TimeLockedUnspentOutputs(_t) => f.write_str("Timelocked Outputs"), @@ -786,10 +672,8 @@ impl Display for DbValue { match self { DbValue::SpentOutput(_) => f.write_str("Spent Output"), DbValue::UnspentOutput(_) => f.write_str("Unspent Output"), - DbValue::PendingTransactionOutputs(_) => f.write_str("Pending Transaction Outputs"), DbValue::UnspentOutputs(_) => f.write_str("Unspent Outputs"), DbValue::SpentOutputs(_) => f.write_str("Spent Outputs"), - DbValue::AllPendingTransactionOutputs(_) => f.write_str("All Pending Transaction Outputs"), DbValue::KeyManagerState(_) => f.write_str("Key Manager State"), DbValue::InvalidOutputs(_) => f.write_str("Invalid Outputs"), DbValue::KnownOneSidedPaymentScripts(_) => f.write_str("Known claiming scripts"), diff --git a/base_layer/wallet/src/output_manager_service/storage/models.rs b/base_layer/wallet/src/output_manager_service/storage/models.rs index e0f00a0569..198ad10054 100644 --- a/base_layer/wallet/src/output_manager_service/storage/models.rs +++ b/base_layer/wallet/src/output_manager_service/storage/models.rs @@ -20,23 +20,25 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use crate::output_manager_service::error::OutputManagerStorageError; use std::cmp::Ordering; - -use tari_crypto::script::{ExecutionStack, TariScript}; - -use tari_common_types::types::{Commitment, HashOutput, PrivateKey}; +use tari_common_types::types::{BlockHash, Commitment, HashOutput, PrivateKey}; use tari_core::{ tari_utilities::hash::Hashable, transactions::{transaction::UnblindedOutput, transaction_protocol::RewindData, CryptoFactories}, }; - -use crate::output_manager_service::error::OutputManagerStorageError; +use tari_crypto::script::{ExecutionStack, TariScript}; #[derive(Debug, Clone)] pub struct DbUnblindedOutput { pub commitment: Commitment, pub unblinded_output: UnblindedOutput, pub hash: HashOutput, + pub mined_height: Option, + pub mined_in_block: Option, + pub mined_mmr_position: Option, + pub marked_deleted_at_height: Option, + pub marked_deleted_in_block: Option, } impl DbUnblindedOutput { @@ -49,6 +51,11 @@ impl DbUnblindedOutput { hash: tx_out.hash(), commitment: tx_out.commitment, unblinded_output: output, + mined_height: None, + mined_in_block: None, + mined_mmr_position: None, + marked_deleted_at_height: None, + marked_deleted_in_block: None, }) } @@ -62,6 +69,11 @@ impl DbUnblindedOutput { hash: tx_out.hash(), commitment: tx_out.commitment, unblinded_output: output, + mined_height: None, + mined_in_block: None, + mined_mmr_position: None, + marked_deleted_at_height: None, + marked_deleted_in_block: None, }) } } @@ -115,4 +127,9 @@ pub enum OutputStatus { EncumberedToBeSpent, Invalid, CancelledInbound, + UnspentMinedUnconfirmed, + ShortTermEncumberedToBeReceived, + ShortTermEncumberedToBeSpent, + SpentMinedUnconfirmed, + AbandonedCoinbase, } diff --git a/base_layer/wallet/src/output_manager_service/storage/sqlite_db.rs b/base_layer/wallet/src/output_manager_service/storage/sqlite_db.rs index 9898030c26..e71187118e 100644 --- a/base_layer/wallet/src/output_manager_service/storage/sqlite_db.rs +++ b/base_layer/wallet/src/output_manager_service/storage/sqlite_db.rs @@ -20,27 +20,31 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{ - collections::HashMap, - convert::TryFrom, - str::from_utf8, - sync::{Arc, RwLock}, - time::Duration, +use crate::{ + output_manager_service::{ + error::OutputManagerStorageError, + storage::{ + database::{DbKey, DbKeyValuePair, DbValue, KeyManagerState, OutputManagerBackend, WriteOperation}, + models::{DbUnblindedOutput, KnownOneSidedPaymentScript, OutputStatus}, + }, + TxId, + }, + schema::{key_manager_states, known_one_sided_payment_scripts, outputs}, + storage::sqlite_utilities::WalletDbConnection, + util::{ + diesel_ext::ExpectedRowsExtension, + encryption::{decrypt_bytes_integral_nonce, encrypt_bytes_integral_nonce, Encryptable}, + }, }; - use aes_gcm::{aead::Error as AeadError, Aes256Gcm, Error}; -use chrono::{Duration as ChronoDuration, NaiveDateTime, Utc}; +use chrono::{NaiveDateTime, Utc}; use diesel::{prelude::*, result::Error as DieselError, SqliteConnection}; use log::*; -use tari_crypto::{ - commitment::HomomorphicCommitmentFactory, - script::{ExecutionStack, TariScript}, - tari_utilities::{ - hex::{from_hex, Hex}, - ByteArray, - }, +use std::{ + convert::{TryFrom, TryInto}, + str::from_utf8, + sync::{Arc, RwLock}, }; - use tari_common_types::types::{ComSignature, Commitment, PrivateKey, PublicKey}; use tari_core::{ tari_utilities::hash::Hashable, @@ -50,27 +54,13 @@ use tari_core::{ CryptoFactories, }, }; - -use crate::{ - output_manager_service::{ - error::OutputManagerStorageError, - storage::{ - database::{ - DbKey, - DbKeyValuePair, - DbValue, - KeyManagerState, - OutputManagerBackend, - PendingTransactionOutputs, - WriteOperation, - }, - models::{DbUnblindedOutput, KnownOneSidedPaymentScript, OutputStatus}, - }, - TxId, +use tari_crypto::{ + commitment::HomomorphicCommitmentFactory, + script::{ExecutionStack, TariScript}, + tari_utilities::{ + hex::{from_hex, Hex}, + ByteArray, }, - schema::{key_manager_states, known_one_sided_payment_scripts, outputs, pending_transaction_outputs}, - storage::sqlite_utilities::WalletDbConnection, - util::encryption::{decrypt_bytes_integral_nonce, encrypt_bytes_integral_nonce, Encryptable}, }; const LOG_TARGET: &str = "wallet::output_manager_service::database::sqlite_db"; @@ -107,7 +97,53 @@ impl OutputManagerSqliteDatabase { } Ok(()) } + + fn insert(&self, key_value_pair: DbKeyValuePair, conn: &SqliteConnection) -> Result<(), OutputManagerStorageError> { + match key_value_pair { + DbKeyValuePair::UnspentOutput(c, o) => { + if OutputSql::find_by_commitment_and_cancelled(&c.to_vec(), false, &(*conn)).is_ok() { + return Err(OutputManagerStorageError::DuplicateOutput); + } + let mut new_output = NewOutputSql::new(*o, OutputStatus::Unspent, None, None)?; + self.encrypt_if_necessary(&mut new_output)?; + new_output.commit(&(*conn))? + }, + DbKeyValuePair::UnspentOutputWithTxId(c, (tx_id, o)) => { + if OutputSql::find_by_commitment_and_cancelled(&c.to_vec(), false, &(*conn)).is_ok() { + return Err(OutputManagerStorageError::DuplicateOutput); + } + let mut new_output = NewOutputSql::new(*o, OutputStatus::Unspent, Some(tx_id), None)?; + self.encrypt_if_necessary(&mut new_output)?; + new_output.commit(&(*conn))? + }, + DbKeyValuePair::OutputToBeReceived(c, (tx_id, o, coinbase_block_height)) => { + if OutputSql::find_by_commitment_and_cancelled(&c.to_vec(), false, &(*conn)).is_ok() { + return Err(OutputManagerStorageError::DuplicateOutput); + } + let mut new_output = NewOutputSql::new( + *o, + OutputStatus::EncumberedToBeReceived, + Some(tx_id), + coinbase_block_height, + )?; + self.encrypt_if_necessary(&mut new_output)?; + new_output.commit(&(*conn))? + }, + DbKeyValuePair::KeyManagerState(km) => { + let mut km_sql = NewKeyManagerStateSql::from(km); + self.encrypt_if_necessary(&mut km_sql)?; + km_sql.commit(&(*conn))? + }, + DbKeyValuePair::KnownOneSidedPaymentScripts(script) => { + let mut script_sql = KnownOneSidedPaymentScriptSql::from(script); + self.encrypt_if_necessary(&mut script_sql)?; + script_sql.commit(&(*conn))? + }, + } + Ok(()) + } } + impl OutputManagerBackend for OutputManagerSqliteDatabase { #[allow(clippy::cognitive_complexity)] fn fetch(&self, key: &DbKey) -> Result, OutputManagerStorageError> { @@ -156,29 +192,6 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { }, } }, - DbKey::PendingTransactionOutputs(tx_id) => match PendingTransactionOutputSql::find(*tx_id, &(*conn)) { - Ok(p) => { - let mut outputs = OutputSql::find_by_tx_id_and_encumbered(*tx_id, &(*conn))?; - for o in outputs.iter_mut() { - self.decrypt_if_necessary(o)?; - } - Some(DbValue::PendingTransactionOutputs(Box::new( - pending_transaction_outputs_from_sql_outputs( - p.tx_id as u64, - &p.timestamp, - outputs, - p.coinbase_block_height.map(|h| h as u64), - )?, - ))) - }, - Err(e) => { - match e { - OutputManagerStorageError::DieselError(DieselError::NotFound) => (), - e => return Err(e), - }; - None - }, - }, DbKey::OutputsByTxIdAndStatus(tx_id, status) => { let mut outputs = OutputSql::find_by_tx_id_and_status(*tx_id, *status, &(*conn))?; for o in outputs.iter_mut() { @@ -230,33 +243,15 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { .collect::, _>>()?, )) }, - DbKey::AllPendingTransactionOutputs => { - let pending_sql_txs = PendingTransactionOutputSql::index(&(*conn))?; - let mut pending_txs = HashMap::new(); - for p_tx in pending_sql_txs { - let mut outputs = OutputSql::find_by_tx_id_and_encumbered(p_tx.tx_id as u64, &(*conn))?; - - for o in outputs.iter_mut() { - self.decrypt_if_necessary(o)?; - } - - pending_txs.insert( - p_tx.tx_id as u64, - pending_transaction_outputs_from_sql_outputs( - p_tx.tx_id as u64, - &p_tx.timestamp, - outputs, - p_tx.coinbase_block_height.map(|h| h as u64), - )?, - ); - } - Some(DbValue::AllPendingTransactionOutputs(pending_txs)) - }, DbKey::KeyManagerState => match KeyManagerStateSql::get_state(&(*conn)).ok() { None => None, Some(mut km) => { self.decrypt_if_necessary(&mut km)?; + // TODO: This is a problem because the keymanager state does not have an index + // meaning that update round trips to the database can't be found again. + // I would suggest changing this to a different pattern for retrieval, perhaps + // only returning the columns that are needed. Some(DbValue::KeyManagerState(KeyManagerState::try_from(km)?)) }, }, @@ -291,115 +286,80 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { Ok(result) } - #[allow(clippy::cognitive_complexity)] - fn write(&self, op: WriteOperation) -> Result, OutputManagerStorageError> { + fn fetch_mined_unspent_outputs(&self) -> Result, OutputManagerStorageError> { let conn = self.database_connection.acquire_lock(); + let mut outputs = OutputSql::index_marked_deleted_in_block_is_null(&(*conn))?; + for output in outputs.iter_mut() { + self.decrypt_if_necessary(output)?; + } - match op { - WriteOperation::Insert(kvp) => match kvp { - DbKeyValuePair::SpentOutput(c, o) => { - if OutputSql::find_by_commitment_and_cancelled(&c.to_vec(), false, &(*conn)).is_ok() { - return Err(OutputManagerStorageError::DuplicateOutput); - } - let mut new_output = NewOutputSql::new(*o, OutputStatus::Spent, None)?; + outputs + .into_iter() + .map(DbUnblindedOutput::try_from) + .collect::, _>>() + } - self.encrypt_if_necessary(&mut new_output)?; + fn fetch_unconfirmed_outputs(&self) -> Result, OutputManagerStorageError> { + let conn = self.database_connection.acquire_lock(); + let mut outputs = OutputSql::index_unconfirmed(&(*conn))?; + for output in outputs.iter_mut() { + self.decrypt_if_necessary(output)?; + } - new_output.commit(&(*conn))? - }, - DbKeyValuePair::UnspentOutput(c, o) => { - if OutputSql::find_by_commitment_and_cancelled(&c.to_vec(), false, &(*conn)).is_ok() { - return Err(OutputManagerStorageError::DuplicateOutput); - } - let mut new_output = NewOutputSql::new(*o, OutputStatus::Unspent, None)?; - self.encrypt_if_necessary(&mut new_output)?; - new_output.commit(&(*conn))? - }, - DbKeyValuePair::UnspentOutputWithTxId(c, (tx_id, o)) => { - if OutputSql::find_by_commitment_and_cancelled(&c.to_vec(), false, &(*conn)).is_ok() { - return Err(OutputManagerStorageError::DuplicateOutput); - } - let mut new_output = NewOutputSql::new(*o, OutputStatus::Unspent, Some(tx_id))?; - self.encrypt_if_necessary(&mut new_output)?; - new_output.commit(&(*conn))? - }, - DbKeyValuePair::PendingTransactionOutputs(tx_id, p) => { - if PendingTransactionOutputSql::find(tx_id, &(*conn)).is_ok() { - return Err(OutputManagerStorageError::DuplicateTransaction); - } + outputs + .into_iter() + .map(DbUnblindedOutput::try_from) + .collect::, _>>() + } - PendingTransactionOutputSql::new( - p.tx_id, - true, - p.timestamp, - p.coinbase_block_height.map(|h| h as i64), - ) - .commit(&(*conn))?; - for o in p.outputs_to_be_spent { - let mut new_output = NewOutputSql::new(o, OutputStatus::EncumberedToBeSpent, Some(p.tx_id))?; - self.encrypt_if_necessary(&mut new_output)?; - new_output.commit(&(*conn))?; - } - for o in p.outputs_to_be_received { - let mut new_output = NewOutputSql::new(o, OutputStatus::EncumberedToBeReceived, Some(p.tx_id))?; - self.encrypt_if_necessary(&mut new_output)?; - new_output.commit(&(*conn))?; - } - }, - DbKeyValuePair::KeyManagerState(km) => { - let mut km_sql = KeyManagerStateSql::from(km); - self.encrypt_if_necessary(&mut km_sql)?; - km_sql.set_state(&(*conn))? - }, - DbKeyValuePair::KnownOneSidedPaymentScripts(script) => { - let mut script_sql = KnownOneSidedPaymentScriptSql::from(script); - self.encrypt_if_necessary(&mut script_sql)?; - script_sql.commit(&(*conn))? - }, - DbKeyValuePair::UpdateOutputStatus(commitment, status) => { - let output = OutputSql::find_by_commitment(&commitment.to_vec(), &(*conn))?; - output.update( - UpdateOutput { - status: Some(status), - tx_id: None, - spending_key: None, - script_private_key: None, - metadata_signature_nonce: None, - metadata_signature_u_key: None, - }, - &(*conn), - )?; - }, - }, + fn fetch_pending_incoming_outputs(&self) -> Result, OutputManagerStorageError> { + let conn = self.database_connection.acquire_lock(); + + let mut outputs = OutputSql::index_status(OutputStatus::EncumberedToBeReceived, &conn)?; + outputs.extend(OutputSql::index_status( + OutputStatus::ShortTermEncumberedToBeReceived, + &conn, + )?); + outputs.extend(OutputSql::index_status(OutputStatus::UnspentMinedUnconfirmed, &conn)?); + for o in outputs.iter_mut() { + self.decrypt_if_necessary(o)?; + } + outputs + .iter() + .map(|o| DbUnblindedOutput::try_from(o.clone())) + .collect::, _>>() + } + + fn fetch_pending_outgoing_outputs(&self) -> Result, OutputManagerStorageError> { + let conn = self.database_connection.acquire_lock(); + + let mut outputs = OutputSql::index_status(OutputStatus::EncumberedToBeSpent, &conn)?; + outputs.extend(OutputSql::index_status( + OutputStatus::ShortTermEncumberedToBeSpent, + &conn, + )?); + outputs.extend(OutputSql::index_status(OutputStatus::SpentMinedUnconfirmed, &conn)?); + for o in outputs.iter_mut() { + self.decrypt_if_necessary(o)?; + } + outputs + .iter() + .map(|o| DbUnblindedOutput::try_from(o.clone())) + .collect::, _>>() + } + + fn write(&self, op: WriteOperation) -> Result, OutputManagerStorageError> { + let conn = self.database_connection.acquire_lock(); + + match op { + WriteOperation::Insert(kvp) => self.insert(kvp, &conn)?, WriteOperation::Remove(k) => match k { - DbKey::SpentOutput(s) => match OutputSql::find_status(&s.to_vec(), OutputStatus::Spent, &(*conn)) { - Ok(o) => { - o.delete(&(*conn))?; - return Ok(Some(DbValue::SpentOutput(Box::new(DbUnblindedOutput::try_from(o)?)))); - }, - Err(e) => { - match e { - OutputManagerStorageError::DieselError(DieselError::NotFound) => (), - e => return Err(e), - }; - }, - }, - DbKey::UnspentOutput(k) => match OutputSql::find_status(&k.to_vec(), OutputStatus::Unspent, &(*conn)) { - Ok(o) => { - o.delete(&(*conn))?; - return Ok(Some(DbValue::UnspentOutput(Box::new(DbUnblindedOutput::try_from(o)?)))); - }, - Err(e) => { - match e { - OutputManagerStorageError::DieselError(DieselError::NotFound) => (), - e => return Err(e), - }; - }, - }, DbKey::AnyOutputByCommitment(commitment) => { + // Used by coinbase when mining. match OutputSql::find_by_commitment(&commitment.to_vec(), &(*conn)) { - Ok(o) => { + Ok(mut o) => { o.delete(&(*conn))?; + self.decrypt_if_necessary(&mut o)?; return Ok(Some(DbValue::AnyOutput(Box::new(DbUnblindedOutput::try_from(o)?)))); }, Err(e) => { @@ -410,34 +370,10 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { }, } }, - DbKey::PendingTransactionOutputs(tx_id) => match PendingTransactionOutputSql::find(tx_id, &(*conn)) { - Ok(p) => { - let mut outputs = OutputSql::find_by_tx_id_and_encumbered(p.tx_id as u64, &(*conn))?; - - for o in outputs.iter_mut() { - self.decrypt_if_necessary(o)?; - } - - p.delete(&(*conn))?; - return Ok(Some(DbValue::PendingTransactionOutputs(Box::new( - pending_transaction_outputs_from_sql_outputs( - p.tx_id as u64, - &p.timestamp, - outputs, - p.coinbase_block_height.map(|h| h as u64), - )?, - )))); - }, - Err(e) => { - match e { - OutputManagerStorageError::DieselError(DieselError::NotFound) => (), - e => return Err(e), - }; - }, - }, + DbKey::SpentOutput(_s) => return Err(OutputManagerStorageError::OperationNotSupported), + DbKey::UnspentOutput(_k) => return Err(OutputManagerStorageError::OperationNotSupported), DbKey::UnspentOutputs => return Err(OutputManagerStorageError::OperationNotSupported), DbKey::SpentOutputs => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::AllPendingTransactionOutputs => return Err(OutputManagerStorageError::OperationNotSupported), DbKey::KeyManagerState => return Err(OutputManagerStorageError::OperationNotSupported), DbKey::InvalidOutputs => return Err(OutputManagerStorageError::OperationNotSupported), DbKey::TimeLockedUnspentOutputs(_) => return Err(OutputManagerStorageError::OperationNotSupported), @@ -449,50 +385,141 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { Ok(None) } - fn confirm_transaction(&self, tx_id: u64) -> Result<(), OutputManagerStorageError> { + fn set_output_to_unmined(&self, hash: Vec) -> Result<(), OutputManagerStorageError> { let conn = self.database_connection.acquire_lock(); + // Only allow updating of non-deleted utxos + diesel::update(outputs::table.filter(outputs::hash.eq(hash).and(outputs::marked_deleted_at_height.is_null()))) + .set(( + outputs::mined_height.eq::>(None), + outputs::mined_in_block.eq::>>(None), + outputs::mined_mmr_position.eq::>(None), + outputs::status.eq(OutputStatus::Invalid as i32), + )) + .execute(&(*conn)) + .num_rows_affected_or_not_found(1)?; - match PendingTransactionOutputSql::find(tx_id, &(*conn)) { - Ok(p) => { - let outputs = OutputSql::find_by_tx_id_and_encumbered(tx_id, &(*conn))?; - - for o in outputs { - if o.status == (OutputStatus::EncumberedToBeReceived as i32) { - o.update( - UpdateOutput { - status: Some(OutputStatus::Unspent), - tx_id: None, - spending_key: None, - script_private_key: None, - metadata_signature_nonce: None, - metadata_signature_u_key: None, - }, - &(*conn), - )?; - } else if o.status == (OutputStatus::EncumberedToBeSpent as i32) { - o.update( - UpdateOutput { - status: Some(OutputStatus::Spent), - tx_id: None, - spending_key: None, - script_private_key: None, - metadata_signature_nonce: None, - metadata_signature_u_key: None, - }, - &(*conn), - )?; - } - } + Ok(()) + } - p.delete(&(*conn))?; - }, - Err(e) => { - match e { - OutputManagerStorageError::DieselError(DieselError::NotFound) => (), - e => return Err(e), - }; - }, - } + fn set_received_output_mined_height( + &self, + hash: Vec, + mined_height: u64, + mined_in_block: Vec, + mmr_position: u64, + confirmed: bool, + ) -> Result<(), OutputManagerStorageError> { + let conn = self.database_connection.acquire_lock(); + let status = if confirmed { + OutputStatus::Unspent as i32 + } else { + OutputStatus::UnspentMinedUnconfirmed as i32 + }; + error!( + target: LOG_TARGET, + "`set_received_output_mined_height` status: {}", status + ); + // Only allow updating of non-deleted utxos + diesel::update(outputs::table.filter(outputs::hash.eq(hash).and(outputs::marked_deleted_at_height.is_null()))) + .set(( + outputs::mined_height.eq(mined_height as i64), + outputs::mined_in_block.eq(mined_in_block), + outputs::mined_mmr_position.eq(mmr_position as i64), + outputs::status.eq(status), + )) + .execute(&(*conn)) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + + fn mark_output_as_spent( + &self, + hash: Vec, + mark_deleted_at_height: u64, + mark_deleted_in_block: Vec, + confirmed: bool, + ) -> Result<(), OutputManagerStorageError> { + let conn = self.database_connection.acquire_lock(); + let status = if confirmed { + OutputStatus::Spent as i32 + } else { + OutputStatus::SpentMinedUnconfirmed as i32 + }; + // Only allow updating of non-deleted utxos + diesel::update( + outputs::table.filter( + outputs::hash.eq(hash).and( + outputs::marked_deleted_in_block + .is_null() + .or(outputs::status.eq(OutputStatus::SpentMinedUnconfirmed as i32)), + ), + ), + ) + .set(( + outputs::marked_deleted_at_height.eq(mark_deleted_at_height as i64), + outputs::marked_deleted_in_block.eq(mark_deleted_in_block), + outputs::status.eq(status), + )) + .execute(&(*conn)) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + + fn mark_output_as_unspent(&self, hash: Vec) -> Result<(), OutputManagerStorageError> { + let conn = self.database_connection.acquire_lock(); + + debug!(target: LOG_TARGET, "mark_output_as_unspent({})", hash.to_hex()); + diesel::update( + outputs::table.filter( + outputs::hash + .eq(hash) + .and(outputs::marked_deleted_at_height.is_not_null()) + .and(outputs::mined_height.is_not_null()), + ), + ) + .set(( + outputs::marked_deleted_at_height.eq::>(None), + outputs::marked_deleted_in_block.eq::>>(None), + outputs::status.eq(OutputStatus::Unspent as i32), + )) + .execute(&(*conn)) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + + fn set_coinbase_abandoned(&self, tx_id: TxId, abandoned: bool) -> Result<(), OutputManagerStorageError> { + let conn = self.database_connection.acquire_lock(); + + if abandoned { + debug!( + target: LOG_TARGET, + "set_coinbase_abandoned(TxID: {}) as {}", tx_id, abandoned + ); + diesel::update( + outputs::table.filter( + outputs::received_in_tx_id + .eq(Some(tx_id as i64)) + .and(outputs::coinbase_block_height.is_not_null()), + ), + ) + .set((outputs::status.eq(OutputStatus::AbandonedCoinbase as i32),)) + .execute(&(*conn)) + .num_rows_affected_or_not_found(1)?; + } else { + let output = OutputSql::find_by_tx_id_and_status(tx_id, OutputStatus::AbandonedCoinbase, &conn)?; + for o in output.into_iter() { + o.update( + UpdateOutput { + status: Some(OutputStatus::EncumberedToBeReceived), + ..Default::default() + }, + &conn, + )?; + } + }; Ok(()) } @@ -508,30 +535,30 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { let mut outputs_to_be_spent = Vec::with_capacity(outputs_to_send.len()); for i in outputs_to_send { let output = OutputSql::find_by_commitment_and_cancelled(i.commitment.as_bytes(), false, &(*conn))?; - if output.status == (OutputStatus::Spent as i32) { + if output.status != (OutputStatus::Unspent as i32) { return Err(OutputManagerStorageError::OutputAlreadySpent); } outputs_to_be_spent.push(output); } - PendingTransactionOutputSql::new(tx_id, true, Utc::now().naive_utc(), None).commit(&(*conn))?; - for o in outputs_to_be_spent { o.update( UpdateOutput { - status: Some(OutputStatus::EncumberedToBeSpent), - tx_id: Some(tx_id), - spending_key: None, - script_private_key: None, - metadata_signature_nonce: None, - metadata_signature_u_key: None, + status: Some(OutputStatus::ShortTermEncumberedToBeSpent), + spent_in_tx_id: Some(Some(tx_id)), + ..Default::default() }, &(*conn), )?; } for co in outputs_to_receive { - let mut new_output = NewOutputSql::new(co.clone(), OutputStatus::EncumberedToBeReceived, Some(tx_id))?; + let mut new_output = NewOutputSql::new( + co.clone(), + OutputStatus::ShortTermEncumberedToBeReceived, + Some(tx_id), + None, + )?; self.encrypt_if_necessary(&mut new_output)?; new_output.commit(&(*conn))?; } @@ -542,18 +569,28 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { fn confirm_encumbered_outputs(&self, tx_id: TxId) -> Result<(), OutputManagerStorageError> { let conn = self.database_connection.acquire_lock(); - match PendingTransactionOutputSql::find(tx_id, &(*conn)) { - Ok(p) => { - p.clear_short_term(&(*conn))?; - }, - Err(e) => { - match e { - OutputManagerStorageError::DieselError(DieselError::NotFound) => { - return Err(OutputManagerStorageError::ValueNotFound) - }, - e => return Err(e), - }; - }, + let outputs_to_be_received = + OutputSql::find_by_tx_id_and_status(tx_id, OutputStatus::ShortTermEncumberedToBeReceived, &conn)?; + for o in outputs_to_be_received.iter() { + o.update( + UpdateOutput { + status: Some(OutputStatus::EncumberedToBeReceived), + ..Default::default() + }, + &(*conn), + )?; + } + + let outputs_to_be_spent = + OutputSql::find_by_tx_id_and_status(tx_id, OutputStatus::ShortTermEncumberedToBeSpent, &conn)?; + for o in outputs_to_be_spent.iter() { + o.update( + UpdateOutput { + status: Some(OutputStatus::EncumberedToBeSpent), + ..Default::default() + }, + &(*conn), + )?; } Ok(()) @@ -562,78 +599,100 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { fn clear_short_term_encumberances(&self) -> Result<(), OutputManagerStorageError> { let conn = self.database_connection.acquire_lock(); - let pending_transaction_outputs = PendingTransactionOutputSql::index_short_term(&(*conn))?; - drop(conn); + let outputs_to_be_received = OutputSql::index_status(OutputStatus::ShortTermEncumberedToBeReceived, &conn)?; + for o in outputs_to_be_received.iter() { + o.update( + UpdateOutput { + status: Some(OutputStatus::CancelledInbound), + ..Default::default() + }, + &(*conn), + )?; + } - for pto in pending_transaction_outputs.iter() { - self.cancel_pending_transaction(pto.tx_id as u64)?; + let outputs_to_be_spent = OutputSql::index_status(OutputStatus::ShortTermEncumberedToBeSpent, &conn)?; + for o in outputs_to_be_spent.iter() { + o.update( + UpdateOutput { + status: Some(OutputStatus::Unspent), + ..Default::default() + }, + &(*conn), + )?; } Ok(()) } - fn cancel_pending_transaction(&self, tx_id: u64) -> Result<(), OutputManagerStorageError> { + fn get_last_mined_output(&self) -> Result, OutputManagerStorageError> { let conn = self.database_connection.acquire_lock(); - match PendingTransactionOutputSql::find(tx_id, &(*conn)) { - Ok(p) => { - let outputs = OutputSql::find_by_tx_id_and_encumbered(tx_id, &(*conn))?; - - for o in outputs { - if o.status == (OutputStatus::EncumberedToBeReceived as i32) { - o.update( - UpdateOutput { - status: Some(OutputStatus::CancelledInbound), - tx_id: None, - spending_key: None, - script_private_key: None, - metadata_signature_nonce: None, - metadata_signature_u_key: None, - }, - &(*conn), - )?; - } else if o.status == (OutputStatus::EncumberedToBeSpent as i32) { - o.update( - UpdateOutput { - status: Some(OutputStatus::Unspent), - tx_id: None, - spending_key: None, - script_private_key: None, - metadata_signature_nonce: None, - metadata_signature_u_key: None, - }, - &(*conn), - )?; - o.update_null(NullOutputSql { tx_id: None }, &(*conn))?; - } - } + let output = OutputSql::first_by_mined_height_desc(&(*conn))?; + match output { + Some(mut o) => { + self.decrypt_if_necessary(&mut o)?; + Ok(Some(o.try_into()?)) + }, + None => Ok(None), + } + } - p.delete(&(*conn))?; + fn get_last_spent_output(&self) -> Result, OutputManagerStorageError> { + let conn = self.database_connection.acquire_lock(); + + let output = OutputSql::first_by_marked_deleted_height_desc(&(*conn))?; + match output { + Some(mut o) => { + self.decrypt_if_necessary(&mut o)?; + Ok(Some(o.try_into()?)) }, - Err(e) => { - match e { - OutputManagerStorageError::DieselError(DieselError::NotFound) => { - return Err(OutputManagerStorageError::ValueNotFound) + None => Ok(None), + } + } + + fn cancel_pending_transaction(&self, tx_id: TxId) -> Result<(), OutputManagerStorageError> { + let conn = self.database_connection.acquire_lock(); + + let outputs = OutputSql::find_by_tx_id_and_encumbered(tx_id, &conn)?; + + if outputs.is_empty() { + return Err(OutputManagerStorageError::ValueNotFound); + } + + for output in outputs.iter() { + if output.received_in_tx_id == Some(tx_id as i64) { + output.update( + UpdateOutput { + status: Some(OutputStatus::CancelledInbound), + ..Default::default() }, - e => return Err(e), - }; - }, + &(*conn), + )?; + } else if output.spent_in_tx_id == Some(tx_id as i64) { + output.update( + UpdateOutput { + status: Some(OutputStatus::Unspent), + spent_in_tx_id: Some(None), + ..Default::default() + }, + &(*conn), + )?; + } } Ok(()) } - fn timeout_pending_transactions(&self, period: Duration) -> Result<(), OutputManagerStorageError> { + fn clear_pending_coinbase_transaction_at_block_height( + &self, + block_height: u64, + ) -> Result<(), OutputManagerStorageError> { let conn = self.database_connection.acquire_lock(); - let older_pending_txs = PendingTransactionOutputSql::index_older( - Utc::now().naive_utc() - ChronoDuration::from_std(period)?, - &(*conn), - )?; - drop(conn); - for ptx in older_pending_txs { - self.cancel_pending_transaction(ptx.tx_id as u64)?; - } + let output = OutputSql::find_pending_coinbase_at_block_height(block_height, &conn)?; + + output.delete(&conn)?; + Ok(()) } @@ -653,36 +712,14 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { Ok(()) } - fn invalidate_unspent_output(&self, output: &DbUnblindedOutput) -> Result, OutputManagerStorageError> { - let conn = self.database_connection.acquire_lock(); - let output = OutputSql::find_by_commitment_and_cancelled(&output.commitment.to_vec(), false, &conn)?; - let tx_id = output.tx_id.map(|id| id as u64); - output.update( - UpdateOutput { - status: Some(OutputStatus::Invalid), - tx_id: None, - spending_key: None, - script_private_key: None, - metadata_signature_nonce: None, - metadata_signature_u_key: None, - }, - &(*conn), - )?; - - Ok(tx_id) - } - fn update_output_metadata_signature(&self, output: &TransactionOutput) -> Result<(), OutputManagerStorageError> { let conn = self.database_connection.acquire_lock(); let db_output = OutputSql::find_by_commitment_and_cancelled(&output.commitment.to_vec(), false, &conn)?; db_output.update( UpdateOutput { - status: None, - tx_id: None, - spending_key: None, - script_private_key: None, metadata_signature_nonce: Some(output.metadata_signature.public_nonce().to_vec()), metadata_signature_u_key: Some(output.metadata_signature.u().to_vec()), + ..Default::default() }, &(*conn), )?; @@ -700,52 +737,25 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { output.update( UpdateOutput { status: Some(OutputStatus::Unspent), - tx_id: None, - spending_key: None, - script_private_key: None, - metadata_signature_nonce: None, - metadata_signature_u_key: None, + ..Default::default() }, &(*conn), )?; Ok(()) } - fn update_spent_output_to_unspent( - &self, - commitment: &Commitment, - ) -> Result { + fn reinstate_cancelled_inbound_output(&self, tx_id: TxId) -> Result<(), OutputManagerStorageError> { let conn = self.database_connection.acquire_lock(); - let output = OutputSql::find_by_commitment_and_cancelled(&commitment.to_vec(), false, &conn)?; - - if OutputStatus::try_from(output.status)? != OutputStatus::Spent { - return Err(OutputManagerStorageError::ValuesNotFound); - } - - let mut o = output.update( - UpdateOutput { - status: Some(OutputStatus::Unspent), - tx_id: None, - spending_key: None, - script_private_key: None, - metadata_signature_nonce: None, - metadata_signature_u_key: None, - }, - &(*conn), - )?; - self.decrypt_if_necessary(&mut o)?; + let outputs = OutputSql::find_by_tx_id_and_status(tx_id, OutputStatus::CancelledInbound, &conn)?; - DbUnblindedOutput::try_from(o) - } - - fn cancel_pending_transaction_at_block_height(&self, block_height: u64) -> Result<(), OutputManagerStorageError> { - let pending_txs; - { - let conn = self.database_connection.acquire_lock(); - pending_txs = PendingTransactionOutputSql::index_block_height(block_height as i64, &conn)?; - } - for p in pending_txs { - self.cancel_pending_transaction(p.tx_id as u64)?; + for o in outputs { + o.update( + UpdateOutput { + status: Some(OutputStatus::EncumberedToBeReceived), + ..Default::default() + }, + &(*conn), + )?; } Ok(()) } @@ -848,32 +858,6 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { } } -/// A utility function to construct a PendingTransactionOutputs structure for a TxId, set of Outputs and a Timestamp -fn pending_transaction_outputs_from_sql_outputs( - tx_id: TxId, - timestamp: &NaiveDateTime, - outputs: Vec, - coinbase_block_height: Option, -) -> Result { - let mut outputs_to_be_spent = Vec::new(); - let mut outputs_to_be_received = Vec::new(); - for o in outputs { - if o.status == (OutputStatus::EncumberedToBeReceived as i32) { - outputs_to_be_received.push(DbUnblindedOutput::try_from(o.clone())?); - } else if o.status == (OutputStatus::EncumberedToBeSpent as i32) { - outputs_to_be_spent.push(DbUnblindedOutput::try_from(o.clone())?); - } - } - - Ok(PendingTransactionOutputs { - tx_id, - outputs_to_be_spent, - outputs_to_be_received, - timestamp: *timestamp, - coinbase_block_height, - }) -} - impl TryFrom for OutputStatus { type Error = OutputManagerStorageError; @@ -885,6 +869,10 @@ impl TryFrom for OutputStatus { 3 => Ok(OutputStatus::EncumberedToBeSpent), 4 => Ok(OutputStatus::Invalid), 5 => Ok(OutputStatus::CancelledInbound), + 6 => Ok(OutputStatus::UnspentMinedUnconfirmed), + 7 => Ok(OutputStatus::SpentMinedUnconfirmed), + 8 => Ok(OutputStatus::ShortTermEncumberedToBeSpent), + 9 => Ok(OutputStatus::ShortTermEncumberedToBeReceived), _ => Err(OutputManagerStorageError::ConversionError), } } @@ -901,7 +889,6 @@ struct NewOutputSql { flags: i32, maturity: i64, status: i32, - tx_id: Option, hash: Option>, script: Vec, input_data: Vec, @@ -910,13 +897,16 @@ struct NewOutputSql { metadata_signature_nonce: Vec, metadata_signature_u_key: Vec, metadata_signature_v_key: Vec, + received_in_tx_id: Option, + coinbase_block_height: Option, } impl NewOutputSql { pub fn new( output: DbUnblindedOutput, status: OutputStatus, - tx_id: Option, + received_in_tx_id: Option, + coinbase_block_height: Option, ) -> Result { Ok(Self { commitment: Some(output.commitment.to_vec()), @@ -925,7 +915,7 @@ impl NewOutputSql { flags: output.unblinded_output.features.flags.bits() as i32, maturity: output.unblinded_output.features.maturity as i64, status: status as i32, - tx_id: tx_id.map(|i| i as i64), + received_in_tx_id: received_in_tx_id.map(|i| i as i64), hash: Some(output.hash), script: output.unblinded_output.script.as_bytes(), input_data: output.unblinded_output.input_data.as_bytes(), @@ -934,6 +924,7 @@ impl NewOutputSql { metadata_signature_nonce: output.unblinded_output.metadata_signature.public_nonce().to_vec(), metadata_signature_u_key: output.unblinded_output.metadata_signature.u().to_vec(), metadata_signature_v_key: output.unblinded_output.metadata_signature.v().to_vec(), + coinbase_block_height: coinbase_block_height.map(|bh| bh as i64), }) } @@ -961,14 +952,13 @@ impl Encryptable for NewOutputSql { #[derive(Clone, Debug, Queryable, Identifiable, PartialEq)] #[table_name = "outputs"] struct OutputSql { - id: i32, + id: i32, // Auto inc primary key commitment: Option>, spending_key: Vec, value: i64, flags: i32, maturity: i64, status: i32, - tx_id: Option, hash: Option>, script: Vec, input_data: Vec, @@ -977,6 +967,14 @@ struct OutputSql { metadata_signature_nonce: Vec, metadata_signature_u_key: Vec, metadata_signature_v_key: Vec, + mined_height: Option, + mined_in_block: Option>, + mined_mmr_position: Option, + marked_deleted_at_height: Option, + marked_deleted_in_block: Option>, + received_in_tx_id: Option, + spent_in_tx_id: Option, + coinbase_block_height: Option, } impl OutputSql { @@ -1001,6 +999,47 @@ impl OutputSql { .load(conn)?) } + pub fn index_unconfirmed(conn: &SqliteConnection) -> Result, OutputManagerStorageError> { + Ok(outputs::table + .filter( + outputs::status + .eq(OutputStatus::UnspentMinedUnconfirmed as i32) + .or(outputs::mined_in_block.is_null()), + ) + .order(outputs::id.asc()) + .load(conn)?) + } + + pub fn index_marked_deleted_in_block_is_null( + conn: &SqliteConnection, + ) -> Result, OutputManagerStorageError> { + Ok(outputs::table + // Return outputs not marked as deleted or confirmed + .filter(outputs::marked_deleted_in_block.is_null().or(outputs::status.eq(OutputStatus::SpentMinedUnconfirmed as i32))) + // Only return mined + .filter(outputs::mined_in_block.is_not_null()) + .order(outputs::id.asc()) + .load(conn)?) + } + + pub fn first_by_mined_height_desc(conn: &SqliteConnection) -> Result, OutputManagerStorageError> { + Ok(outputs::table + .filter(outputs::mined_height.is_not_null()) + .order(outputs::mined_height.desc()) + .first(conn) + .optional()?) + } + + pub fn first_by_marked_deleted_height_desc( + conn: &SqliteConnection, + ) -> Result, OutputManagerStorageError> { + Ok(outputs::table + .filter(outputs::marked_deleted_at_height.is_not_null()) + .order(outputs::marked_deleted_at_height.desc()) + .first(conn) + .optional()?) + } + /// Find a particular Output, if it exists pub fn find(spending_key: &[u8], conn: &SqliteConnection) -> Result { Ok(outputs::table @@ -1040,7 +1079,11 @@ impl OutputSql { conn: &SqliteConnection, ) -> Result, OutputManagerStorageError> { Ok(outputs::table - .filter(outputs::tx_id.eq(Some(tx_id as i64))) + .filter( + outputs::received_in_tx_id + .eq(Some(tx_id as i64)) + .or(outputs::spent_in_tx_id.eq(Some(tx_id as i64))), + ) .filter(outputs::status.eq(status as i32)) .load(conn)?) } @@ -1051,11 +1094,17 @@ impl OutputSql { conn: &SqliteConnection, ) -> Result, OutputManagerStorageError> { Ok(outputs::table - .filter(outputs::tx_id.eq(Some(tx_id as i64))) + .filter( + outputs::received_in_tx_id + .eq(Some(tx_id as i64)) + .or(outputs::spent_in_tx_id.eq(Some(tx_id as i64))), + ) .filter( outputs::status .eq(OutputStatus::EncumberedToBeReceived as i32) - .or(outputs::status.eq(OutputStatus::EncumberedToBeSpent as i32)), + .or(outputs::status.eq(OutputStatus::EncumberedToBeSpent as i32)) + .or(outputs::status.eq(OutputStatus::ShortTermEncumberedToBeReceived as i32)) + .or(outputs::status.eq(OutputStatus::ShortTermEncumberedToBeSpent as i32)), ) .load(conn)?) } @@ -1072,6 +1121,17 @@ impl OutputSql { .first::(conn)?) } + /// Find a particular Output, if it exists and is in the specified Spent state + pub fn find_pending_coinbase_at_block_height( + block_height: u64, + conn: &SqliteConnection, + ) -> Result { + Ok(outputs::table + .filter(outputs::status.ne(OutputStatus::Unspent as i32)) + .filter(outputs::coinbase_block_height.eq(block_height as i64)) + .first::(conn)?) + } + pub fn delete(&self, conn: &SqliteConnection) -> Result<(), OutputManagerStorageError> { let num_deleted = diesel::delete(outputs::table.filter(outputs::spending_key.eq(&self.spending_key))).execute(conn)?; @@ -1088,34 +1148,10 @@ impl OutputSql { updated_output: UpdateOutput, conn: &SqliteConnection, ) -> Result { - let num_updated = diesel::update(outputs::table.filter(outputs::id.eq(&self.id))) + diesel::update(outputs::table.filter(outputs::id.eq(&self.id))) .set(UpdateOutputSql::from(updated_output)) - .execute(conn)?; - - if num_updated == 0 { - return Err(OutputManagerStorageError::UnexpectedResult( - "Database update error".to_string(), - )); - } - - OutputSql::find(&self.spending_key, conn) - } - - /// This function is used to update an existing record to set fields to null - pub fn update_null( - &self, - updated_null: NullOutputSql, - conn: &SqliteConnection, - ) -> Result { - let num_updated = diesel::update(outputs::table.filter(outputs::spending_key.eq(&self.spending_key))) - .set(updated_null) - .execute(conn)?; - - if num_updated == 0 { - return Err(OutputManagerStorageError::UnexpectedResult( - "Database update error".to_string(), - )); - } + .execute(conn) + .num_rows_affected_or_not_found(1)?; OutputSql::find(&self.spending_key, conn) } @@ -1124,12 +1160,9 @@ impl OutputSql { pub fn update_encryption(&self, conn: &SqliteConnection) -> Result<(), OutputManagerStorageError> { let _ = self.update( UpdateOutput { - status: None, - tx_id: None, spending_key: Some(self.spending_key.clone()), script_private_key: Some(self.script_private_key.clone()), - metadata_signature_nonce: None, - metadata_signature_u_key: None, + ..Default::default() }, conn, )?; @@ -1217,6 +1250,11 @@ impl TryFrom for DbUnblindedOutput { commitment, unblinded_output, hash, + mined_height: o.mined_height.map(|mh| mh as u64), + mined_in_block: o.mined_in_block, + mined_mmr_position: o.mined_mmr_position.map(|mp| mp as u64), + marked_deleted_at_height: o.marked_deleted_at_height.map(|d| d as u64), + marked_deleted_in_block: o.marked_deleted_in_block, }) } } @@ -1244,7 +1282,6 @@ impl From for NewOutputSql { flags: o.flags, maturity: o.maturity, status: o.status, - tx_id: o.tx_id, hash: o.hash, script: o.script, input_data: o.input_data, @@ -1253,6 +1290,8 @@ impl From for NewOutputSql { metadata_signature_nonce: o.metadata_signature_nonce, metadata_signature_u_key: o.metadata_signature_u_key, metadata_signature_v_key: o.metadata_signature_v_key, + received_in_tx_id: o.received_in_tx_id, + coinbase_block_height: o.coinbase_block_height, } } } @@ -1264,9 +1303,11 @@ impl PartialEq for OutputSql { } /// These are the fields that can be updated for an Output +#[derive(Default)] pub struct UpdateOutput { status: Option, - tx_id: Option, + received_in_tx_id: Option>, + spent_in_tx_id: Option>, spending_key: Option>, script_private_key: Option>, metadata_signature_nonce: Option>, @@ -1277,161 +1318,51 @@ pub struct UpdateOutput { #[table_name = "outputs"] pub struct UpdateOutputSql { status: Option, - tx_id: Option, + received_in_tx_id: Option>, + spent_in_tx_id: Option>, spending_key: Option>, script_private_key: Option>, metadata_signature_nonce: Option>, metadata_signature_u_key: Option>, } -#[derive(AsChangeset)] -#[table_name = "outputs"] -#[changeset_options(treat_none_as_null = "true")] -/// This struct is used to set the contained field to null -pub struct NullOutputSql { - tx_id: Option, -} - /// Map a Rust friendly UpdateOutput to the Sql data type form impl From for UpdateOutputSql { fn from(u: UpdateOutput) -> Self { Self { status: u.status.map(|t| t as i32), - tx_id: u.tx_id.map(|t| t as i64), spending_key: u.spending_key, script_private_key: u.script_private_key, metadata_signature_nonce: u.metadata_signature_nonce, metadata_signature_u_key: u.metadata_signature_u_key, + received_in_tx_id: u.received_in_tx_id.map(|o| o.map(|t| t as i64)), + spent_in_tx_id: u.spent_in_tx_id.map(|o| o.map(|t| t as i64)), } } } -/// This struct represents a PendingTransactionOutputs in the Sql database. A distinct struct is required to define the -/// Sql friendly equivalent datatypes for the members. -#[derive(Debug, Clone, Queryable, Insertable)] -#[table_name = "pending_transaction_outputs"] -struct PendingTransactionOutputSql { - tx_id: i64, - short_term: i32, +#[derive(Clone, Debug, Queryable, Identifiable)] +#[table_name = "key_manager_states"] +struct KeyManagerStateSql { + id: i32, + master_key: Vec, + branch_seed: String, + primary_key_index: i64, timestamp: NaiveDateTime, - coinbase_block_height: Option, } -impl PendingTransactionOutputSql { - pub fn new(tx_id: TxId, short_term: bool, timestamp: NaiveDateTime, coinbase_block_height: Option) -> Self { - Self { - tx_id: tx_id as i64, - short_term: short_term as i32, - timestamp, - coinbase_block_height, - } - } - - pub fn commit(&self, conn: &SqliteConnection) -> Result<(), OutputManagerStorageError> { - diesel::insert_into(pending_transaction_outputs::table) - .values(self.clone()) - .execute(conn)?; - Ok(()) - } - - pub fn find( - tx_id: TxId, - conn: &SqliteConnection, - ) -> Result { - Ok(pending_transaction_outputs::table - .filter(pending_transaction_outputs::tx_id.eq(tx_id as i64)) - .first::(conn)?) - } - - pub fn index(conn: &SqliteConnection) -> Result, OutputManagerStorageError> { - Ok(pending_transaction_outputs::table.load::(conn)?) - } - - pub fn index_short_term( - conn: &SqliteConnection, - ) -> Result, OutputManagerStorageError> { - Ok(pending_transaction_outputs::table - .filter(pending_transaction_outputs::short_term.eq(1i32)) - .load::(conn)?) - } - - pub fn index_older( - timestamp: NaiveDateTime, - conn: &SqliteConnection, - ) -> Result, OutputManagerStorageError> { - Ok(pending_transaction_outputs::table - .filter(pending_transaction_outputs::timestamp.lt(timestamp)) - .load::(conn)?) - } - /// Find pending transaction outputs with specified block_height - pub fn index_block_height( - block_height: i64, - conn: &SqliteConnection, - ) -> Result, OutputManagerStorageError> { - Ok(pending_transaction_outputs::table - .filter(pending_transaction_outputs::coinbase_block_height.eq(block_height)) - .load::(conn)?) - } - - pub fn delete(&self, conn: &SqliteConnection) -> Result<(), OutputManagerStorageError> { - let num_deleted = diesel::delete( - pending_transaction_outputs::table.filter(pending_transaction_outputs::tx_id.eq(&self.tx_id)), - ) - .execute(conn)?; - - if num_deleted == 0 { - return Err(OutputManagerStorageError::ValuesNotFound); - } - - let outputs = OutputSql::find_by_tx_id_and_encumbered(self.tx_id as u64, &(*conn))?; - for o in outputs { - o.delete(&(*conn))?; - } - - Ok(()) - } - - /// This function is used to update an existing record to set fields to null - pub fn clear_short_term( - &self, - conn: &SqliteConnection, - ) -> Result { - let num_updated = diesel::update( - pending_transaction_outputs::table.filter(pending_transaction_outputs::tx_id.eq(&self.tx_id)), - ) - .set(UpdatePendingTransactionOutputSql { short_term: Some(0i32) }) - .execute(conn)?; - - if num_updated == 0 { - return Err(OutputManagerStorageError::UnexpectedResult( - "Database update error".to_string(), - )); - } - - PendingTransactionOutputSql::find(self.tx_id as u64, conn) - } -} - -#[derive(AsChangeset)] -#[table_name = "pending_transaction_outputs"] -pub struct UpdatePendingTransactionOutputSql { - short_term: Option, -} - -#[derive(Clone, Debug, Queryable, Insertable)] +#[derive(Clone, Debug, Insertable)] #[table_name = "key_manager_states"] -struct KeyManagerStateSql { - id: Option, +struct NewKeyManagerStateSql { master_key: Vec, branch_seed: String, primary_key_index: i64, timestamp: NaiveDateTime, } -impl From for KeyManagerStateSql { +impl From for NewKeyManagerStateSql { fn from(km: KeyManagerState) -> Self { Self { - id: None, master_key: km.master_key.to_vec(), branch_seed: km.branch_seed, primary_key_index: km.primary_key_index as i64, @@ -1439,7 +1370,6 @@ impl From for KeyManagerStateSql { } } } - impl TryFrom for KeyManagerState { type Error = OutputManagerStorageError; @@ -1452,14 +1382,16 @@ impl TryFrom for KeyManagerState { } } -impl KeyManagerStateSql { +impl NewKeyManagerStateSql { fn commit(&self, conn: &SqliteConnection) -> Result<(), OutputManagerStorageError> { diesel::insert_into(key_manager_states::table) .values(self.clone()) .execute(conn)?; Ok(()) } +} +impl KeyManagerStateSql { pub fn get_state(conn: &SqliteConnection) -> Result { key_manager_states::table .first::(conn) @@ -1475,16 +1407,20 @@ impl KeyManagerStateSql { primary_key_index: Some(self.primary_key_index), }; - let num_updated = diesel::update(key_manager_states::table.filter(key_manager_states::id.eq(&km.id))) + diesel::update(key_manager_states::table.filter(key_manager_states::id.eq(&km.id))) .set(update) - .execute(conn)?; - if num_updated == 0 { - return Err(OutputManagerStorageError::UnexpectedResult( - "Database update error".to_string(), - )); - } + .execute(conn) + .num_rows_affected_or_not_found(1)?; + }, + Err(_) => { + let inserter = NewKeyManagerStateSql { + master_key: self.master_key.clone(), + branch_seed: self.branch_seed.clone(), + primary_key_index: self.primary_key_index, + timestamp: self.timestamp, + }; + inserter.commit(conn)?; }, - Err(_) => self.commit(conn)?, } Ok(()) } @@ -1498,14 +1434,10 @@ impl KeyManagerStateSql { branch_seed: None, primary_key_index: Some(current_index), }; - let num_updated = diesel::update(key_manager_states::table.filter(key_manager_states::id.eq(&km.id))) + diesel::update(key_manager_states::table.filter(key_manager_states::id.eq(&km.id))) .set(update) - .execute(conn)?; - if num_updated == 0 { - return Err(OutputManagerStorageError::UnexpectedResult( - "Database update error".to_string(), - )); - } + .execute(conn) + .num_rows_affected_or_not_found(1)?; current_index }, Err(_) => return Err(OutputManagerStorageError::KeyManagerNotInitialized), @@ -1520,14 +1452,10 @@ impl KeyManagerStateSql { branch_seed: None, primary_key_index: Some(index as i64), }; - let num_updated = diesel::update(key_manager_states::table.filter(key_manager_states::id.eq(&km.id))) + diesel::update(key_manager_states::table.filter(key_manager_states::id.eq(&km.id))) .set(update) - .execute(conn)?; - if num_updated == 0 { - return Err(OutputManagerStorageError::UnexpectedResult( - "Database update error".to_string(), - )); - } + .execute(conn) + .num_rows_affected_or_not_found(1)?; Ok(()) }, Err(_) => Err(OutputManagerStorageError::KeyManagerNotInitialized), @@ -1564,6 +1492,28 @@ impl Encryptable for KeyManagerStateSql { } } +impl Encryptable for NewKeyManagerStateSql { + fn encrypt(&mut self, cipher: &Aes256Gcm) -> Result<(), Error> { + let encrypted_master_key = encrypt_bytes_integral_nonce(cipher, self.master_key.clone())?; + let encrypted_branch_seed = encrypt_bytes_integral_nonce(cipher, self.branch_seed.clone().as_bytes().to_vec())?; + self.master_key = encrypted_master_key; + self.branch_seed = encrypted_branch_seed.to_hex(); + Ok(()) + } + + fn decrypt(&mut self, _cipher: &Aes256Gcm) -> Result<(), Error> { + unimplemented!("Not supported") + // let decrypted_master_key = decrypt_bytes_integral_nonce(&cipher, self.master_key.clone())?; + // let decrypted_branch_seed = + // decrypt_bytes_integral_nonce(&cipher, from_hex(self.branch_seed.as_str()).map_err(|_| Error)?)?; + // self.master_key = decrypted_master_key; + // self.branch_seed = from_utf8(decrypted_branch_seed.as_slice()) + // .map_err(|_| Error)? + // .to_string(); + // Ok(()) + } +} + #[derive(Clone, Debug, Queryable, Insertable, Identifiable, PartialEq, AsChangeset)] #[table_name = "known_one_sided_payment_scripts"] #[primary_key(script_hash)] @@ -1627,18 +1577,13 @@ impl KnownOneSidedPaymentScriptSql { updated_known_script: UpdateKnownOneSidedPaymentScript, conn: &SqliteConnection, ) -> Result { - let num_updated = diesel::update( + diesel::update( known_one_sided_payment_scripts::table .filter(known_one_sided_payment_scripts::script_hash.eq(&self.script_hash)), ) .set(updated_known_script) - .execute(conn)?; - - if num_updated == 0 { - return Err(OutputManagerStorageError::UnexpectedResult( - "Database update error".to_string(), - )); - } + .execute(conn) + .num_rows_affected_or_not_found(1)?; KnownOneSidedPaymentScriptSql::find(&self.script_hash, conn) } @@ -1717,13 +1662,12 @@ impl Encryptable for KnownOneSidedPaymentScriptSql { #[cfg(test)] mod test { - use std::{convert::TryFrom, time::Duration}; + use std::convert::TryFrom; use aes_gcm::{ aead::{generic_array::GenericArray, NewAead}, Aes256Gcm, }; - use chrono::{Duration as ChronoDuration, Utc}; use diesel::{Connection, SqliteConnection}; use rand::{rngs::OsRng, RngCore}; use tari_crypto::{keys::SecretKey, script}; @@ -1744,11 +1688,11 @@ mod test { models::DbUnblindedOutput, sqlite_db::{ KeyManagerStateSql, + NewKeyManagerStateSql, NewOutputSql, OutputManagerSqliteDatabase, OutputSql, OutputStatus, - PendingTransactionOutputSql, UpdateOutput, }, }, @@ -1789,7 +1733,7 @@ mod test { for _i in 0..2 { let (_, uo) = make_input(MicroTari::from(100 + OsRng.next_u64() % 1000)); let uo = DbUnblindedOutput::from_unblinded_output(uo, &factories).unwrap(); - let o = NewOutputSql::new(uo, OutputStatus::Unspent, None).unwrap(); + let o = NewOutputSql::new(uo, OutputStatus::Unspent, None, None).unwrap(); outputs.push(o.clone()); outputs_unspent.push(o.clone()); o.commit(&conn).unwrap(); @@ -1798,7 +1742,7 @@ mod test { for _i in 0..3 { let (_, uo) = make_input(MicroTari::from(100 + OsRng.next_u64() % 1000)); let uo = DbUnblindedOutput::from_unblinded_output(uo, &factories).unwrap(); - let o = NewOutputSql::new(uo, OutputStatus::Spent, None).unwrap(); + let o = NewOutputSql::new(uo, OutputStatus::Spent, None, None).unwrap(); outputs.push(o.clone()); outputs_spent.push(o.clone()); o.commit(&conn).unwrap(); @@ -1832,34 +1776,13 @@ mod test { assert_eq!(OutputSql::index(&conn).unwrap().len(), 4); - let tx_id = 44u64; - - PendingTransactionOutputSql::new(tx_id, true, Utc::now().naive_utc(), Some(1)) - .commit(&conn) - .unwrap(); - - PendingTransactionOutputSql::new(11u64, true, Utc::now().naive_utc(), Some(2)) - .commit(&conn) - .unwrap(); - - let pt = PendingTransactionOutputSql::find(tx_id, &conn).unwrap(); - - assert_eq!(pt.tx_id as u64, tx_id); - - let pts = PendingTransactionOutputSql::index(&conn).unwrap(); - - assert_eq!(pts.len(), 2); - let _updated1 = OutputSql::find(&outputs[0].spending_key, &conn) .unwrap() .update( UpdateOutput { status: Some(OutputStatus::Unspent), - tx_id: Some(44u64), - spending_key: None, - script_private_key: None, - metadata_signature_nonce: None, - metadata_signature_u_key: None, + received_in_tx_id: Some(Some(44u64)), + ..Default::default() }, &conn, ) @@ -1870,11 +1793,8 @@ mod test { .update( UpdateOutput { status: Some(OutputStatus::EncumberedToBeReceived), - tx_id: Some(44u64), - spending_key: None, - script_private_key: None, - metadata_signature_nonce: None, - metadata_signature_u_key: None, + received_in_tx_id: Some(Some(44u64)), + ..Default::default() }, &conn, ) @@ -1883,34 +1803,6 @@ mod test { let result = OutputSql::find_by_tx_id_and_encumbered(44u64, &conn).unwrap(); assert_eq!(result.len(), 1); assert_eq!(result[0].spending_key, outputs[1].spending_key); - - PendingTransactionOutputSql::new( - 12u64, - true, - Utc::now().naive_utc() - ChronoDuration::from_std(Duration::from_millis(600_000)).unwrap(), - Some(3), - ) - .commit(&conn) - .unwrap(); - - let pending_older1 = PendingTransactionOutputSql::index_older(Utc::now().naive_utc(), &conn).unwrap(); - assert_eq!(pending_older1.len(), 3); - - let pending_older2 = PendingTransactionOutputSql::index_older( - Utc::now().naive_utc() - ChronoDuration::from_std(Duration::from_millis(200_000)).unwrap(), - &conn, - ) - .unwrap(); - assert_eq!(pending_older2.len(), 1); - - PendingTransactionOutputSql::new(13u64, true, Utc::now().naive_utc(), None) - .commit(&conn) - .unwrap(); - - let pending_block_height = PendingTransactionOutputSql::index_block_height(2, &conn).unwrap(); - - assert_eq!(pending_block_height.len(), 1); - assert!(pending_block_height.iter().any(|p| p.tx_id == 11)); } #[test] @@ -1935,23 +1827,11 @@ mod test { primary_key_index: 0, }; - KeyManagerStateSql::from(state1.clone()).set_state(&conn).unwrap(); + NewKeyManagerStateSql::from(state1.clone()).commit(&conn).unwrap(); let state1_read = KeyManagerStateSql::get_state(&conn).unwrap(); assert_eq!(state1, KeyManagerState::try_from(state1_read).unwrap()); - let state2 = KeyManagerState { - master_key: PrivateKey::random(&mut OsRng), - branch_seed: random::string(8), - primary_key_index: 0, - }; - - KeyManagerStateSql::from(state2.clone()).set_state(&conn).unwrap(); - - let state2_read = KeyManagerStateSql::get_state(&conn).unwrap(); - - assert_eq!(state2, KeyManagerState::try_from(state2_read).unwrap()); - KeyManagerStateSql::increment_index(&conn).unwrap(); KeyManagerStateSql::increment_index(&conn).unwrap(); @@ -1977,7 +1857,7 @@ mod test { let (_, uo) = make_input(MicroTari::from(100 + OsRng.next_u64() % 1000)); let uo = DbUnblindedOutput::from_unblinded_output(uo, &factories).unwrap(); - let output = NewOutputSql::new(uo, OutputStatus::Unspent, None).unwrap(); + let output = NewOutputSql::new(uo, OutputStatus::Unspent, None, None).unwrap(); let key = GenericArray::from_slice(b"an example very very secret key."); let cipher = Aes256Gcm::new(key); @@ -2040,8 +1920,11 @@ mod test { primary_key_index: 1, }; - let state_sql = KeyManagerStateSql::from(starting_state.clone()); - state_sql.set_state(&conn).unwrap(); + NewKeyManagerStateSql::from(starting_state.clone()) + .commit(&conn) + .unwrap(); + + let state_sql = KeyManagerStateSql::get_state(&conn).unwrap(); let mut encrypted_state = state_sql; encrypted_state.encrypt(&cipher).unwrap(); @@ -2080,17 +1963,16 @@ mod test { primary_key_index: 1, }; - let state_sql = KeyManagerStateSql::from(starting_state); - state_sql.set_state(&conn).unwrap(); + let _state_sql = NewKeyManagerStateSql::from(starting_state).commit(&conn).unwrap(); let (_, uo) = make_input(MicroTari::from(100 + OsRng.next_u64() % 1000)); let uo = DbUnblindedOutput::from_unblinded_output(uo, &factories).unwrap(); - let output = NewOutputSql::new(uo, OutputStatus::Unspent, None).unwrap(); + let output = NewOutputSql::new(uo, OutputStatus::Unspent, None, None).unwrap(); output.commit(&conn).unwrap(); let (_, uo2) = make_input(MicroTari::from(100 + OsRng.next_u64() % 1000)); let uo2 = DbUnblindedOutput::from_unblinded_output(uo2, &factories).unwrap(); - let output2 = NewOutputSql::new(uo2, OutputStatus::Unspent, None).unwrap(); + let output2 = NewOutputSql::new(uo2, OutputStatus::Unspent, None, None).unwrap(); output2.commit(&conn).unwrap(); let key = GenericArray::from_slice(b"an example very very secret key."); diff --git a/base_layer/wallet/src/output_manager_service/tasks/mod.rs b/base_layer/wallet/src/output_manager_service/tasks/mod.rs index 0c28ca2c90..35968afb1e 100644 --- a/base_layer/wallet/src/output_manager_service/tasks/mod.rs +++ b/base_layer/wallet/src/output_manager_service/tasks/mod.rs @@ -22,4 +22,4 @@ mod txo_validation_task; -pub use txo_validation_task::{TxoValidationTask, TxoValidationType}; +pub use txo_validation_task::TxoValidationTask; diff --git a/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs b/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs index e08059e16b..c4680cfe45 100644 --- a/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs +++ b/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs @@ -1,4 +1,4 @@ -// Copyright 2020. The Tari Project +// Copyright 2021. The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: @@ -19,634 +19,434 @@ // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - use crate::{ + connectivity_service::WalletConnectivityInterface, output_manager_service::{ - error::{OutputManagerError, OutputManagerProtocolError}, - handle::OutputManagerEvent, - resources::OutputManagerResources, - storage::{database::OutputManagerBackend, models::DbUnblindedOutput}, + config::OutputManagerServiceConfig, + error::{OutputManagerError, OutputManagerProtocolError, OutputManagerProtocolErrorExt}, + handle::{OutputManagerEvent, OutputManagerEventSender}, + storage::{ + database::{OutputManagerBackend, OutputManagerDatabase}, + models::DbUnblindedOutput, + }, }, - transaction_service::storage::models::TransactionStatus, - types::ValidationRetryStrategy, }; -use futures::FutureExt; use log::*; -use std::{cmp, collections::HashMap, convert::TryFrom, fmt, sync::Arc, time::Duration}; -use tari_common_types::types::Signature; -use tari_comms::{peer_manager::NodeId, types::CommsPublicKey, PeerConnection}; +use std::{collections::HashMap, convert::TryInto, sync::Arc}; +use tari_common_types::types::BlockHash; +use tari_comms::protocol::rpc::{RpcError::RequestFailed, RpcStatusCode::NotFound}; use tari_core::{ base_node::rpc::BaseNodeWalletRpcClient, - proto::base_node::FetchMatchingUtxos, - transactions::transaction::TransactionOutput, + blocks::BlockHeader, + proto::base_node::{QueryDeletedRequest, UtxoQueryRequest}, }; -use tari_crypto::tari_utilities::{hash::Hashable, hex::Hex}; -use tokio::{sync::broadcast, time::sleep}; - -const LOG_TARGET: &str = "wallet::output_manager_service::utxo_validation_task"; +use tari_crypto::tari_utilities::{hex::Hex, Hashable}; +use tari_shutdown::ShutdownSignal; -const MAX_RETRY_DELAY: Duration = Duration::from_secs(300); +const LOG_TARGET: &str = "wallet::output_service::txo_validation_task"; -pub struct TxoValidationTask -where TBackend: OutputManagerBackend + 'static -{ - id: u64, - validation_type: TxoValidationType, - retry_strategy: ValidationRetryStrategy, - resources: OutputManagerResources, - base_node_public_key: CommsPublicKey, - retry_delay: Duration, - base_node_update_receiver: Option>, - base_node_synced: bool, +pub struct TxoValidationTask { + operation_id: u64, + db: OutputManagerDatabase, + connectivity: TWalletConnectivity, + event_publisher: OutputManagerEventSender, + config: OutputManagerServiceConfig, } -/// This protocol defines the process of submitting our current UTXO set to the Base Node to validate it. -impl TxoValidationTask -where TBackend: OutputManagerBackend + 'static +impl TxoValidationTask +where + TBackend: OutputManagerBackend + 'static, + TWalletConnectivity: WalletConnectivityInterface, { - #[allow(clippy::too_many_arguments)] - pub(crate) fn new( - id: u64, - validation_type: TxoValidationType, - retry_strategy: ValidationRetryStrategy, - resources: OutputManagerResources, - base_node_public_key: CommsPublicKey, - base_node_update_receiver: broadcast::Receiver, + pub fn new( + operation_id: u64, + db: OutputManagerDatabase, + connectivity: TWalletConnectivity, + event_publisher: OutputManagerEventSender, + config: OutputManagerServiceConfig, ) -> Self { - let retry_delay = resources.config.base_node_query_timeout; Self { - id, - validation_type, - retry_strategy, - resources, - base_node_public_key, - retry_delay, - base_node_update_receiver: Some(base_node_update_receiver), - base_node_synced: true, + operation_id, + db, + connectivity, + event_publisher, + config, } } - /// The task that defines the execution of the protocol. - pub async fn execute(mut self) -> Result { - let mut base_node_update_receiver = self.base_node_update_receiver.take().ok_or_else(|| { - OutputManagerProtocolError::new( - self.id, - OutputManagerError::ServiceError("A Base Node Update receiver was not provided".to_string()), - ) - })?; - - let mut shutdown = self.resources.shutdown_signal.clone(); - - let total_retries_str = match self.retry_strategy { - ValidationRetryStrategy::Limited(n) => n.to_string(), - ValidationRetryStrategy::UntilSuccess => "∞".to_string(), - }; + pub async fn execute(mut self, _shutdown: ShutdownSignal) -> Result { + let mut base_node_client = self + .connectivity + .obtain_base_node_wallet_rpc_client() + .await + .ok_or(OutputManagerError::Shutdown) + .for_protocol(self.operation_id)?; info!( target: LOG_TARGET, - "Starting TXO validation protocol (Id: {}) for {} with {} retries", - self.id, - self.validation_type, - total_retries_str + "Starting TXO validation protocol (Id: {})", self.operation_id, ); - let mut output_batches_to_query: Vec>> = self.get_output_batches().await?; + let last_mined_header = self.check_for_reorgs(&mut base_node_client).await?; + + self.update_unconfirmed_outputs(&mut base_node_client).await?; + + self.update_spent_outputs(&mut base_node_client, last_mined_header) + .await?; + self.publish_event(OutputManagerEvent::TxoValidationSuccess(self.operation_id)); + Ok(self.operation_id) + } + + async fn update_spent_outputs( + &self, + wallet_client: &mut BaseNodeWalletRpcClient, + last_mined_header_hash: Option, + ) -> Result<(), OutputManagerProtocolError> { + let mined_outputs = self + .db + .fetch_mined_unspent_outputs() + .await + .for_protocol(self.operation_id)?; + + if mined_outputs.is_empty() { + return Ok(()); + } - if output_batches_to_query.is_empty() { + for batch in mined_outputs.chunks(self.config.tx_validator_batch_size) { debug!( target: LOG_TARGET, - "TXO validation protocol (Id: {}) has no outputs to validate", self.id, + "Asking base node for status of {} mmr_positions", + batch.len() ); - let _ = self - .resources - .event_publisher - .send(Arc::new(OutputManagerEvent::TxoValidationSuccess( - self.id, - self.validation_type, - ))) - .map_err(|e| { - trace!( + + // We have to send positions to the base node because if the base node cannot find the hash of the output + // we can't tell if the output ever existed, as opposed to existing and was spent. + // This assumes that the base node has not reorged since the last time we asked. + let deleted_bitmap_response = wallet_client + .query_deleted(QueryDeletedRequest { + chain_must_include_header: last_mined_header_hash.clone(), + mmr_positions: batch.iter().filter_map(|ub| ub.mined_mmr_position).collect(), + include_deleted_block_data: true, + }) + .await + .for_protocol(self.operation_id)?; + + for output in batch { + let mined_mmr_position = output + .mined_mmr_position + .ok_or(OutputManagerError::InconsistentDataError( + "Mined Unspent output should have `mined_mmr_position`", + )) + .for_protocol(self.operation_id)?; + + if deleted_bitmap_response.deleted_positions.len() != deleted_bitmap_response.blocks_deleted_in.len() || + deleted_bitmap_response.deleted_positions.len() != + deleted_bitmap_response.heights_deleted_at.len() + { + return Err(OutputManagerProtocolError::new( + self.operation_id, + OutputManagerError::InconsistentDataError( + "`deleted_positions`, `blocks_deleted_in` and `heights_deleted_at` should be the same \ + length", + ), + )); + } + + if deleted_bitmap_response.deleted_positions.contains(&mined_mmr_position) { + let position = deleted_bitmap_response + .deleted_positions + .iter() + .position(|dp| dp == &mined_mmr_position) + .ok_or(OutputManagerError::InconsistentDataError( + "Deleted positions should include the `mined_mmr_position`", + )) + .for_protocol(self.operation_id)?; + + let deleted_height = deleted_bitmap_response.heights_deleted_at[position]; + let deleted_block = deleted_bitmap_response.blocks_deleted_in[position].clone(); + + let confirmed = (deleted_bitmap_response.height_of_longest_chain - deleted_height) >= + self.config.num_confirmations_required; + + self.db + .mark_output_as_spent(output.hash.clone(), deleted_height, deleted_block, confirmed) + .await + .for_protocol(self.operation_id)?; + info!( target: LOG_TARGET, - "Error sending event {:?}, because there are no subscribers.", - e.0 + "Updating output comm:{}: hash {} as spent at tip height {}", + output.commitment.to_hex(), + output.hash.to_hex(), + deleted_bitmap_response.height_of_longest_chain ); - e - }); - return Ok(self.id); - } - - let mut retries = 0; - let batch_total = output_batches_to_query.len(); + } - 'main: loop { - if let ValidationRetryStrategy::Limited(max_retries) = self.retry_strategy { - if retries > max_retries { + if deleted_bitmap_response.not_deleted_positions.contains( + &output + .mined_mmr_position + .ok_or(OutputManagerError::InconsistentDataError( + "Mined Unspent output should have `mined_mmr_position`", + )) + .for_protocol(self.operation_id)?, + ) && output.marked_deleted_at_height.is_some() + { + self.db + .mark_output_as_unspent(output.hash.clone()) + .await + .for_protocol(self.operation_id)?; info!( target: LOG_TARGET, - "Maximum attempts exceeded for TXO Validation Protocol (Id: {})", self.id + "Updating output comm:{}: hash {} as unspent at tip height {}", + output.commitment.to_hex(), + output.hash.to_hex(), + deleted_bitmap_response.height_of_longest_chain ); - // If this retry is not because of a !base_node_synced then we emit this error event, if the retries - // are due to a base node NOT being synced then we rely on the TxoValidationDelayed event - // because we were actually able to connect - if self.base_node_synced { - let _ = self - .resources - .event_publisher - .send(Arc::new(OutputManagerEvent::TxoValidationFailure( - self.id, - self.validation_type, - ))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event because there are no subscribers: {:?}", - e - ); - e - }); - } - return Err(OutputManagerProtocolError::new( - self.id, - OutputManagerError::MaximumAttemptsExceeded, - )); } } - // Assume base node is synced until we achieve a connection and it tells us it is not synced - self.base_node_synced = true; - - let base_node_node_id = NodeId::from_key(&self.base_node_public_key.clone()); - let mut connection: Option = None; + } + Ok(()) + } - let delay = sleep(self.resources.config.peer_dial_retry_timeout); + async fn update_unconfirmed_outputs( + &self, + wallet_client: &mut BaseNodeWalletRpcClient, + ) -> Result<(), OutputManagerProtocolError> { + let unconfirmed_outputs = self + .db + .fetch_unconfirmed_outputs() + .await + .for_protocol(self.operation_id)?; - debug!( + for batch in unconfirmed_outputs.chunks(self.config.tx_validator_batch_size) { + info!( target: LOG_TARGET, - "Connecting to Base Node (Public Key: {})", self.base_node_public_key, + "Asking base node for location of {} unconfirmed outputs by hash", + batch.len() ); - tokio::select! { - dial_result = self.resources.connectivity_manager.dial_peer(base_node_node_id.clone()) => { - match dial_result { - Ok(base_node_connection) => { - connection = Some(base_node_connection); - }, - Err(e) => { - info!(target: LOG_TARGET, "Problem connecting to base node: {} for Output TXO Validation Validation Protocol: {}", e, self.id); - }, - } - }, - new_base_node = base_node_update_receiver.recv() => { - match new_base_node { - Ok(_) => { - info!( - target: LOG_TARGET, - "TXO Validation protocol aborted due to Base Node Public key change" - ); - let _ = self - .resources - .event_publisher - .send(Arc::new(OutputManagerEvent::TxoValidationAborted(self.id, self.validation_type))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event {:?}, because there are no subscribers.", - e.0 - ); - e - }); - return Ok(self.id); - }, - Err(e) => { - trace!( - target: LOG_TARGET, - "TXO Validation protocol event 'base_node_update' triggered with error: {:?}", - - e, - ); - } - } - } - _ = shutdown.wait() => { - info!(target: LOG_TARGET, "TXO Validation Protocol (Id: {}) shutting down because it received the shutdown signal", self.id); - return Err(OutputManagerProtocolError::new(self.id, OutputManagerError::Shutdown)); - }, - } - - let mut base_node_connection = match connection { - None => { - tokio::select! { - _ = delay.fuse() => { - let _ = self - .resources - .event_publisher - .send(Arc::new(OutputManagerEvent::TxoValidationTimedOut(self.id, self.validation_type))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event {:?}, because there are no subscribers.", - e.0 - ); - e - }); - retries += 1; - continue; - }, - _ = shutdown.wait() => { - info!(target: LOG_TARGET, "TXO Validation Protocol (Id: {}) shutting down because it received the shutdown signal", self.id); - return Err(OutputManagerProtocolError::new(self.id, OutputManagerError::Shutdown)); - }, - } - }, - Some(c) => c, - }; - - let mut client = match base_node_connection - .connect_rpc_using_builder( - BaseNodeWalletRpcClient::builder() - .with_deadline(self.resources.config.base_node_query_timeout) - .with_handshake_timeout(self.resources.config.base_node_query_timeout), - ) + let (mined, unmined, tip_height) = self + .query_base_node_for_outputs(batch, wallet_client) .await - { - Ok(c) => c, - Err(e) => { - warn!(target: LOG_TARGET, "Problem establishing RPC connection: {}", e); - delay.await; - retries += 1; - continue; - }, - }; - let mut batch_num = 0; - debug!(target: LOG_TARGET, "RPC client connected"); - 'per_batch: loop { - let batch = if let Some(b) = output_batches_to_query.pop() { - batch_num += 1; - b - } else { - break 'main; - }; + .for_protocol(self.operation_id)?; + debug!( + target: LOG_TARGET, + "Base node returned {} outputs as mined and {} outputs as unmined", + mined.len(), + unmined.len() + ); + for (output, mined_height, mined_in_block, mmr_position) in &mined { info!( target: LOG_TARGET, - "Output Manager TXO Validation protocol (Id: {}) sending batch query {} of {}", - self.id, - batch_num, - batch_total + "Updating output comm:{}: hash {} as mined at height {} with current tip at {}", + output.commitment.to_hex(), + output.hash.to_hex(), + mined_height, + tip_height ); - let delay = sleep(self.retry_delay); - tokio::select! { - new_base_node = base_node_update_receiver.recv() => { - match new_base_node { - Ok(_bn) => { - info!(target: LOG_TARGET, "TXO Validation protocol aborted due to Base Node Public key change" ); - let _ = self - .resources - .event_publisher - .send(Arc::new(OutputManagerEvent::TxoValidationAborted(self.id, self.validation_type))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event {:?}, because there are no subscribers.", - e.0 - ); - e - }); - return Ok(self.id); - }, - Err(e) => { - trace!( - target: LOG_TARGET, - "TXO Validation protocol event 'base_node_update' triggered with error: {:?}", - e, - ); - } - } - }, - result = self.send_query_batch(batch.clone(), &mut client) => { - match result { - Ok(synced) => { - self.base_node_synced = synced; - if !synced { - info!(target: LOG_TARGET, "Base Node reports not being synced, will retry."); - let _ = self - .resources - .event_publisher - .send(Arc::new(OutputManagerEvent::TxoValidationDelayed(self.id, self.validation_type))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event {:?}, because there are no subscribers.", - e.0 - ); - e - }); - delay.await; - self.update_retry_delay(false); - output_batches_to_query = self.get_output_batches().await?; - retries += 1; - break 'per_batch; - } - self.update_retry_delay(true); - }, - Err(OutputManagerProtocolError{id: _, error: OutputManagerError::RpcError(e)}) => { - warn!(target: LOG_TARGET, "Error with RPC Client: {}. Retrying RPC client connection.", e); - delay.await; - self.update_retry_delay(false); - output_batches_to_query.push(batch); - retries += 1; - break 'per_batch; - } - Err(e) => { - let _ = self - .resources - .event_publisher - .send(Arc::new(OutputManagerEvent::TxoValidationFailure(self.id, self.validation_type))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event because there are no subscribers: {:?}", - e - ); - e - }); - return Err(e); - }, - } - }, - _ = shutdown.wait() => { - info!(target: LOG_TARGET, "TXO Validation Protocol (Id: {}) shutting down because it received the shutdown signal", self.id); - return Err(OutputManagerProtocolError::new(self.id, OutputManagerError::Shutdown)); - }, - } + self.update_output_as_mined(output, mined_in_block, *mined_height, *mmr_position, tip_height) + .await?; } } - let _ = self - .resources - .event_publisher - .send(Arc::new(OutputManagerEvent::TxoValidationSuccess( - self.id, - self.validation_type, - ))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event {:?}, because there are no subscribers.", - e.0 - ); - e - }); - Ok(self.id) + Ok(()) } - async fn send_query_batch( + // returns the last header found still in the chain + async fn check_for_reorgs( &mut self, - batch: Vec>, client: &mut BaseNodeWalletRpcClient, - ) -> Result { - let request = FetchMatchingUtxos { - output_hashes: batch.clone(), - }; + ) -> Result, OutputManagerProtocolError> { + let mut last_mined_header_hash = None; + info!( + target: LOG_TARGET, + "Checking last mined TXO to see if the base node has re-orged" + ); - let batch_response = client - .fetch_matching_utxos(request) - .await - .map_err(|e| OutputManagerProtocolError::new(self.id, OutputManagerError::from(e)))?; + while let Some(last_spent_output) = self.db.get_last_spent_output().await.for_protocol(self.operation_id)? { + let mined_height = last_spent_output + .marked_deleted_at_height + .ok_or(OutputManagerError::InconsistentDataError( + "Spent output should have `marked_deleted_at_height`", + )) + .for_protocol(self.operation_id)?; + let mined_in_block_hash = last_spent_output + .marked_deleted_in_block + .clone() + .ok_or(OutputManagerError::InconsistentDataError( + "Spent output should have `marked_deleted_in_block`", + )) + .for_protocol(self.operation_id)?; + let block_at_height = self + .get_base_node_block_at_height(mined_height, client) + .await + .for_protocol(self.operation_id)?; - if !batch_response.is_synced { - return Ok(false); + if block_at_height.is_none() || block_at_height.unwrap() != mined_in_block_hash { + // Chain has reorged since we last + warn!( + target: LOG_TARGET, + "The block that output ({}) was spent in has been reorged out, will try to find this output \ + again, but these funds have potentially been re-orged out of the chain", + last_spent_output.commitment.to_hex() + ); + self.db + .mark_output_as_unspent(last_spent_output.hash.clone()) + .await + .for_protocol(self.operation_id)?; + } else { + info!( + target: LOG_TARGET, + "Last mined transaction is still in the block chain according to base node." + ); + break; + } } - let mut returned_outputs = Vec::new(); - for output_proto in batch_response.outputs.iter() { - let output = TransactionOutput::try_from(output_proto.clone()).map_err(|_| { - OutputManagerProtocolError::new( - self.id, - OutputManagerError::ConversionError("Could not convert protobuf TransactionOutput".to_string()), - ) - })?; - returned_outputs.push(output); + while let Some(last_mined_output) = self.db.get_last_mined_output().await.for_protocol(self.operation_id)? { + if last_mined_output.mined_height.is_none() || last_mined_output.mined_in_block.is_none() { + return Err(OutputManagerProtocolError::new( + self.operation_id, + OutputManagerError::InconsistentDataError( + "Output marked as mined, but mined_height or mined_in_block was empty", + ), + )); + } + let mined_height = last_mined_output.mined_height.unwrap(); + let mined_in_block_hash = last_mined_output.mined_in_block.clone().unwrap(); + let block_at_height = self + .get_base_node_block_at_height(mined_height, client) + .await + .for_protocol(self.operation_id)?; + if block_at_height.is_none() || block_at_height.unwrap() != mined_in_block_hash { + // Chain has reorged since we last + warn!( + target: LOG_TARGET, + "The block that output ({}) was in has been reorged out, will try to find this output again, but \ + these funds have potentially been re-orged out of the chain", + last_mined_output.commitment.to_hex() + ); + self.db + .set_output_to_unmined(last_mined_output.hash.clone()) + .await + .for_protocol(self.operation_id)?; + } else { + info!( + target: LOG_TARGET, + "Last mined transaction is still in the block chain according to base node." + ); + last_mined_header_hash = Some(mined_in_block_hash); + break; + } } + Ok(last_mined_header_hash) + } - // complete validation - match self.validation_type { - TxoValidationType::Unspent => { - // Construct a HashMap of all the unspent outputs - let unspent_outputs: Vec = - self.resources.db.get_unspent_outputs().await.map_err(|e| { - OutputManagerProtocolError::new(self.id, OutputManagerError::OutputManagerStorageError(e)) - })?; - - // We only want to check outputs that we were expecting and are still valid - let mut output_hashes = HashMap::new(); - for uo in unspent_outputs.iter() { - let hash = uo.hash.clone(); - if batch.iter().any(|h| &hash == h) { - output_hashes.insert(hash, uo.clone()); - } - } - - // Go through all the returned UTXOs and if they are in the hashmap remove them - for output in returned_outputs.iter() { - let response_hash = output.hash(); - - let _ = output_hashes.remove(&response_hash); - } - - // If there are any remaining Unspent Outputs we will move them to the invalid collection - for (_k, v) in output_hashes { - // Get the transaction these belonged to so we can display the kernel signature of the transaction - // this output belonged to. + // TODO: remove this duplicated code from transaction validation protocol - warn!( - target: LOG_TARGET, - "Output with value {} not returned from Base Node query and is thus being invalidated", - v.unblinded_output.value, - ); - trace!( - target: LOG_TARGET, - "Output {} with features {} not returned from Base Node query and is thus being invalidated", - v.commitment.to_hex(), - v.unblinded_output.features, - ); - // If the output that is being invalidated has an associated TxId then get the kernel signature of - // the transaction and display for easier debugging - if let Some(tx_id) = self.resources.db.invalidate_output(v).await.map_err(|e| { - OutputManagerProtocolError::new(self.id, OutputManagerError::OutputManagerStorageError(e)) - })? { - if let Ok(transaction) = self - .resources - .transaction_service - .get_completed_transaction(tx_id) - .await - { - info!( - target: LOG_TARGET, - "Invalidated Output is from Transaction (TxId: {}) with message: {} and Kernel \ - Signature: {}", - transaction.tx_id, - transaction.message, - transaction - .transaction - .first_kernel_excess_sig() - .unwrap_or(&Signature::default()) - .get_signature() - .to_hex() - ); - - // If transaction is imported we will invalidate it. Normal transactions will be handled by - // the transaction validators. - if transaction.status == TransactionStatus::Imported && transaction.valid { - if let Err(e) = self - .resources - .transaction_service - .set_transaction_validity(transaction.tx_id, false) - .await - { - warn!(target: LOG_TARGET, "Problem setting transaction validity: {}", e); - } - } - } - } else { - info!( - target: LOG_TARGET, - "Invalidated Output does not have an associated TxId, it is likely a Coinbase output lost \ - to a Re-Org" - ); - } - } - }, - TxoValidationType::Invalid => { - let invalid_outputs = self.resources.db.get_invalid_outputs().await.map_err(|e| { - OutputManagerProtocolError::new(self.id, OutputManagerError::OutputManagerStorageError(e)) - })?; - - for output in returned_outputs.iter() { - let response_hash = output.hash(); - - if let Some(output) = invalid_outputs.iter().find(|o| o.hash == response_hash) { - if self - .resources - .db - .revalidate_output(output.commitment.clone()) - .await - .is_ok() - { - info!( - target: LOG_TARGET, - "Output with value {} has been restored to a valid spendable output", - output.unblinded_output.value - ); + async fn get_base_node_block_at_height( + &mut self, + height: u64, + client: &mut BaseNodeWalletRpcClient, + ) -> Result, OutputManagerError> { + let result = match client.get_header_by_height(height).await { + Ok(r) => r, + Err(rpc_error) => { + info!(target: LOG_TARGET, "Error asking base node for header:{}", rpc_error); + match &rpc_error { + RequestFailed(status) => { + if status.status_code() == NotFound { + return Ok(None); + } else { + return Err(rpc_error.into()); } - } - } - }, - TxoValidationType::Spent => { - // Go through the response outputs and check if they are currently Spent, if they are then they can be - // marked as Unspent because they exist in the UTXO set. Hooray! - for output in returned_outputs.iter() { - match self - .resources - .db - .update_spent_output_to_unspent(output.clone().commitment) - .await - { - Ok(uo) => info!( - target: LOG_TARGET, - "Spent output with value {} restored to Unspent output", uo.unblinded_output.value - ), - Err(e) => debug!(target: LOG_TARGET, "Unable to restore Spent output to Unspent: {}", e), - } + }, + _ => { + return Err(rpc_error.into()); + }, } }, - } - debug!( - target: LOG_TARGET, - "Completed validation query for one batch of output hashes" - ); + }; - Ok(true) + let block_header: BlockHeader = result + .try_into() + .map_err(|s| OutputManagerError::InvalidMessageError(format!("Could not convert block header: {}", s)))?; + Ok(Some(block_header.hash())) } - async fn get_output_batches(&self) -> Result>>, OutputManagerProtocolError> { - let mut outputs: Vec> = match self.validation_type { - TxoValidationType::Unspent => self - .resources - .db - .get_unspent_outputs() - .await - .map_err(|e| { - OutputManagerProtocolError::new(self.id, OutputManagerError::OutputManagerStorageError(e)) - })? - .iter() - .map(|uo| uo.hash.clone()) - .collect(), - TxoValidationType::Spent => self - .resources - .db - .get_spent_outputs() - .await - .map_err(|e| { - OutputManagerProtocolError::new(self.id, OutputManagerError::OutputManagerStorageError(e)) - })? - .iter() - .map(|uo| uo.hash.clone()) - .collect(), - TxoValidationType::Invalid => self - .resources - .db - .get_invalid_outputs() - .await - .map_err(|e| { - OutputManagerProtocolError::new(self.id, OutputManagerError::OutputManagerStorageError(e)) - })? - .into_iter() - .map(|uo| uo.hash) - .collect(), - }; - - // Determine how many rounds of base node request we need to query all the transactions in batches of - // max_tx_query_batch_size - let num_batches = - ((outputs.len() as f32) / (self.resources.config.max_utxo_query_size as f32 + 0.1)) as usize + 1; + async fn query_base_node_for_outputs( + &self, + batch: &[DbUnblindedOutput], + base_node_client: &mut BaseNodeWalletRpcClient, + ) -> Result< + ( + Vec<(DbUnblindedOutput, u64, BlockHash, u64)>, + Vec, + u64, + ), + OutputManagerError, + > { + let batch_hashes = batch.iter().map(|o| o.hash.clone()).collect(); + + let batch_response = base_node_client + .utxo_query(UtxoQueryRequest { + output_hashes: batch_hashes, + }) + .await?; + + let mut mined = vec![]; + let mut unmined = vec![]; + + let mut returned_outputs = HashMap::new(); + for output_proto in batch_response.responses.iter() { + returned_outputs.insert(output_proto.output_hash.clone(), output_proto); + } - let mut batches: Vec>> = Vec::new(); - for _b in 0..num_batches { - let mut batch = Vec::new(); - for o in outputs.drain(..cmp::min(self.resources.config.max_utxo_query_size, outputs.len())) { - batch.push(o); - } - if !batch.is_empty() { - batches.push(batch); + for output in batch { + if let Some(returned_output) = returned_outputs.get(&output.hash) { + mined.push(( + output.clone(), + returned_output.mined_height, + returned_output.mined_in_block.clone(), + returned_output.mmr_position, + )) + } else { + unmined.push(output.clone()); } } - Ok(batches) + + Ok((mined, unmined, batch_response.height_of_longest_chain)) } - // exponential back-off with max and min delays - fn update_retry_delay(&mut self, synced: bool) { - let new_delay = if synced { - self.resources.config.base_node_query_timeout - } else { - let delay = self.retry_delay; - cmp::min(delay * 2, MAX_RETRY_DELAY) - }; + #[allow(clippy::ptr_arg)] + async fn update_output_as_mined( + &self, + tx: &DbUnblindedOutput, + mined_in_block: &BlockHash, + mined_height: u64, + mmr_position: u64, + tip_height: u64, + ) -> Result<(), OutputManagerProtocolError> { + let confirmed = (tip_height - mined_height) >= self.config.num_confirmations_required; + + self.db + .set_received_output_mined_height( + tx.hash.clone(), + mined_height, + mined_in_block.clone(), + mmr_position, + confirmed, + ) + .await + .for_protocol(self.operation_id)?; - self.retry_delay = new_delay; + Ok(()) } -} - -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum TxoValidationType { - Unspent, - Spent, - Invalid, -} -impl fmt::Display for TxoValidationType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - TxoValidationType::Unspent => write!(f, "Unspent Outputs Validation"), - TxoValidationType::Spent => write!(f, "Spent Outputs Validation"), - TxoValidationType::Invalid => write!(f, "Invalid Outputs Validation"), + fn publish_event(&self, event: OutputManagerEvent) { + if let Err(e) = self.event_publisher.send(Arc::new(event)) { + debug!( + target: LOG_TARGET, + "Error sending event because there are no subscribers: {:?}", e + ); } } } diff --git a/base_layer/wallet/src/schema.rs b/base_layer/wallet/src/schema.rs index 6b6c61512a..8562c14884 100644 --- a/base_layer/wallet/src/schema.rs +++ b/base_layer/wallet/src/schema.rs @@ -24,6 +24,7 @@ table! { valid -> Integer, confirmations -> Nullable, mined_height -> Nullable, + mined_in_block -> Nullable, } } @@ -51,7 +52,7 @@ table! { table! { key_manager_states (id) { - id -> Nullable, + id -> Integer, master_key -> Binary, branch_seed -> Text, primary_key_index -> BigInt, @@ -93,7 +94,6 @@ table! { flags -> Integer, maturity -> BigInt, status -> Integer, - tx_id -> Nullable, hash -> Nullable, script -> Binary, input_data -> Binary, @@ -102,14 +102,13 @@ table! { metadata_signature_nonce -> Binary, metadata_signature_u_key -> Binary, metadata_signature_v_key -> Binary, - } -} - -table! { - pending_transaction_outputs (tx_id) { - tx_id -> BigInt, - short_term -> Integer, - timestamp -> Timestamp, + mined_height -> Nullable, + mined_in_block -> Nullable, + mined_mmr_position -> Nullable, + marked_deleted_at_height -> Nullable, + marked_deleted_in_block -> Nullable, + received_in_tx_id -> Nullable, + spent_in_tx_id -> Nullable, coinbase_block_height -> Nullable, } } @@ -130,6 +129,5 @@ allow_tables_to_appear_in_same_query!( known_one_sided_payment_scripts, outbound_transactions, outputs, - pending_transaction_outputs, wallet_settings, ); diff --git a/base_layer/wallet/src/transaction_service/config.rs b/base_layer/wallet/src/transaction_service/config.rs index b109b62969..42e6f6478b 100644 --- a/base_layer/wallet/src/transaction_service/config.rs +++ b/base_layer/wallet/src/transaction_service/config.rs @@ -39,6 +39,7 @@ pub struct TransactionServiceConfig { pub max_tx_query_batch_size: usize, pub transaction_routing_mechanism: TransactionRoutingMechanism, pub transaction_event_channel_size: usize, + pub transaction_mempool_resubmission_window: Duration, } impl Default for TransactionServiceConfig { @@ -53,9 +54,10 @@ impl Default for TransactionServiceConfig { resend_response_cooldown: Duration::from_secs(300), pending_transaction_cancellation_timeout: Duration::from_secs(259200), // 3 Days num_confirmations_required: 3, - max_tx_query_batch_size: 5000, + max_tx_query_batch_size: 20, transaction_routing_mechanism: TransactionRoutingMechanism::default(), transaction_event_channel_size: 1000, + transaction_mempool_resubmission_window: Duration::from_secs(600), } } } diff --git a/base_layer/wallet/src/transaction_service/error.rs b/base_layer/wallet/src/transaction_service/error.rs index 6a2a8af144..d3984aa25c 100644 --- a/base_layer/wallet/src/transaction_service/error.rs +++ b/base_layer/wallet/src/transaction_service/error.rs @@ -28,7 +28,7 @@ use crate::{ use diesel::result::Error as DieselError; use futures::channel::oneshot::Canceled; use serde_json::Error as SerdeJsonError; -use tari_comms::{peer_manager::node_id::NodeIdError, protocol::rpc::RpcError}; +use tari_comms::{connectivity::ConnectivityError, peer_manager::node_id::NodeIdError, protocol::rpc::RpcError}; use tari_comms_dht::outbound::DhtOutboundError; use tari_core::transactions::{transaction::TransactionError, transaction_protocol::TransactionProtocolError}; use tari_p2p::services::liveness::error::LivenessError; @@ -79,6 +79,8 @@ pub enum TransactionServiceError { DiscoveryProcessFailed(TxId), #[error("Invalid Completed Transaction provided")] InvalidCompletedTransaction, + #[error("Attempted to broadcast a coinbase transaction. TxId `{0}`")] + AttemptedToBroadcastCoinbaseTransaction(TxId), #[error("No Base Node public keys are provided for Base chain broadcast and monitoring")] NoBaseNodeKeysProvided, #[error("Error sending data to Protocol via registered channels")] @@ -145,6 +147,13 @@ pub enum TransactionServiceError { ServiceError(String), #[error("Wallet Recovery in progress so Transaction Service Messaging Requests ignored")] WalletRecoveryInProgress, + #[error("Connectivity error: {source}")] + ConnectivityError { + #[from] + source: ConnectivityError, + }, + #[error("Base Node is not synced")] + BaseNodeNotSynced, } #[derive(Debug, Error)] @@ -204,3 +213,16 @@ impl From for TransactionServiceError { tspe.error } } + +pub trait TransactionServiceProtocolErrorExt { + fn for_protocol(self, id: u64) -> Result; +} + +impl> TransactionServiceProtocolErrorExt for Result { + fn for_protocol(self, id: u64) -> Result { + match self { + Ok(r) => Ok(r), + Err(e) => Err(TransactionServiceProtocolError::new(id, e.into())), + } + } +} diff --git a/base_layer/wallet/src/transaction_service/handle.rs b/base_layer/wallet/src/transaction_service/handle.rs index 7a6ab48649..852b4e89ba 100644 --- a/base_layer/wallet/src/transaction_service/handle.rs +++ b/base_layer/wallet/src/transaction_service/handle.rs @@ -35,8 +35,6 @@ use tari_service_framework::reply_channel::SenderService; use tokio::sync::broadcast; use tower::Service; -use crate::types::ValidationRetryStrategy; - /// API Request enum #[allow(clippy::large_enum_variant)] pub enum TransactionServiceRequest { @@ -48,7 +46,6 @@ pub enum TransactionServiceRequest { GetCancelledCompletedTransactions, GetCompletedTransaction(TxId), GetAnyTransaction(TxId), - SetBaseNodePublicKey(CommsPublicKey), SendTransaction(CommsPublicKey, MicroTari, MicroTari, String), SendOneSidedTransaction(CommsPublicKey, MicroTari, MicroTari, String), CancelTransaction(TxId), @@ -63,8 +60,7 @@ pub enum TransactionServiceRequest { RestartBroadcastProtocols, GetNumConfirmationsRequired, SetNumConfirmationsRequired(u64), - SetCompletedTransactionValidity(u64, bool), - ValidateTransactions(ValidationRetryStrategy), + ValidateTransactions, } impl fmt::Display for TransactionServiceRequest { @@ -77,7 +73,6 @@ impl fmt::Display for TransactionServiceRequest { Self::GetCancelledPendingOutboundTransactions => f.write_str("GetCancelledPendingOutboundTransactions"), Self::GetCancelledCompletedTransactions => f.write_str("GetCancelledCompletedTransactions"), Self::GetCompletedTransaction(t) => f.write_str(&format!("GetCompletedTransaction({})", t)), - Self::SetBaseNodePublicKey(k) => f.write_str(&format!("SetBaseNodePublicKey ({})", k)), Self::SendTransaction(k, v, _, msg) => f.write_str(&format!("SendTransaction (to {}, {}, {})", k, v, msg)), Self::SendOneSidedTransaction(k, v, _, msg) => { f.write_str(&format!("SendOneSidedTransaction (to {}, {}, {})", k, v, msg)) @@ -105,11 +100,7 @@ impl fmt::Display for TransactionServiceRequest { Self::GetNumConfirmationsRequired => f.write_str("GetNumConfirmationsRequired"), Self::SetNumConfirmationsRequired(_) => f.write_str("SetNumConfirmationsRequired"), Self::GetAnyTransaction(t) => f.write_str(&format!("GetAnyTransaction({})", t)), - TransactionServiceRequest::ValidateTransactions(t) => f.write_str(&format!("ValidateTransaction({:?})", t)), - TransactionServiceRequest::SetCompletedTransactionValidity(tx_id, s) => f.write_str(&format!( - "SetCompletedTransactionValidity(TxId: {}, Validity: {:?})", - tx_id, s - )), + TransactionServiceRequest::ValidateTransactions => f.write_str("ValidateTransactions"), } } } @@ -153,15 +144,22 @@ pub enum TransactionEvent { TransactionCancelled(TxId), TransactionBroadcast(TxId), TransactionImported(TxId), - TransactionMined(TxId), + TransactionMined { + tx_id: TxId, + is_valid: bool, + }, TransactionMinedRequestTimedOut(TxId), - TransactionMinedUnconfirmed(TxId, u64), + // TODO: Split into normal transaction mined and coinbase transaction mined + TransactionMinedUnconfirmed { + tx_id: TxId, + num_confirmations: u64, + is_valid: bool, + }, TransactionValidationTimedOut(u64), TransactionValidationSuccess(u64), TransactionValidationFailure(u64), TransactionValidationAborted(u64), TransactionValidationDelayed(u64), - TransactionBaseNodeConnectionProblem(u64), Error(String), } @@ -351,20 +349,6 @@ impl TransactionServiceHandle { } } - pub async fn set_base_node_public_key( - &mut self, - public_key: CommsPublicKey, - ) -> Result<(), TransactionServiceError> { - match self - .handle - .call(TransactionServiceRequest::SetBaseNodePublicKey(public_key)) - .await?? - { - TransactionServiceResponse::BaseNodePublicKeySet => Ok(()), - _ => Err(TransactionServiceError::UnexpectedApiResponse), - } - } - pub async fn import_utxo( &mut self, amount: MicroTari, @@ -507,28 +491,14 @@ impl TransactionServiceHandle { } } - pub async fn validate_transactions( - &mut self, - retry_strategy: ValidationRetryStrategy, - ) -> Result { + pub async fn validate_transactions(&mut self) -> Result { match self .handle - .call(TransactionServiceRequest::ValidateTransactions(retry_strategy)) + .call(TransactionServiceRequest::ValidateTransactions) .await?? { TransactionServiceResponse::ValidationStarted(id) => Ok(id), _ => Err(TransactionServiceError::UnexpectedApiResponse), } } - - pub async fn set_transaction_validity(&mut self, tx_id: TxId, valid: bool) -> Result<(), TransactionServiceError> { - match self - .handle - .call(TransactionServiceRequest::SetCompletedTransactionValidity(tx_id, valid)) - .await?? - { - TransactionServiceResponse::CompletedTransactionValidityChanged => Ok(()), - _ => Err(TransactionServiceError::UnexpectedApiResponse), - } - } } diff --git a/base_layer/wallet/src/transaction_service/mod.rs b/base_layer/wallet/src/transaction_service/mod.rs index 48b3484dca..3d17a1b564 100644 --- a/base_layer/wallet/src/transaction_service/mod.rs +++ b/base_layer/wallet/src/transaction_service/mod.rs @@ -23,6 +23,7 @@ use std::sync::Arc; use crate::{ + base_node_service::handle::BaseNodeServiceHandle, output_manager_service::handle::OutputManagerHandle, storage::database::{WalletBackend, WalletDatabase}, transaction_service::{ @@ -36,7 +37,8 @@ use futures::{Stream, StreamExt}; use log::*; use tokio::sync::broadcast; -use tari_comms::{connectivity::ConnectivityRequester, peer_manager::NodeIdentity}; +use crate::connectivity_service::WalletConnectivityHandle; +use tari_comms::peer_manager::NodeIdentity; use tari_comms_dht::Dht; use tari_core::{ proto::base_node as base_node_proto, @@ -208,7 +210,8 @@ where context.spawn_when_ready(move |handles| async move { let outbound_message_service = handles.expect_handle::().outbound_requester(); let output_manager_service = handles.expect_handle::(); - let connectivity_manager = handles.expect_handle::(); + let connectivity = handles.expect_handle::(); + let base_node_service_handle = handles.expect_handle::(); let result = TransactionService::new( config, @@ -222,11 +225,12 @@ where transaction_cancelled_stream, output_manager_service, outbound_message_service, - connectivity_manager, + connectivity, publisher, node_identity, factories, handles.get_shutdown_signal(), + base_node_service_handle, ) .start() .await; diff --git a/base_layer/wallet/src/transaction_service/protocols/mod.rs b/base_layer/wallet/src/transaction_service/protocols/mod.rs index 772c3402bd..15bdb1dd1c 100644 --- a/base_layer/wallet/src/transaction_service/protocols/mod.rs +++ b/base_layer/wallet/src/transaction_service/protocols/mod.rs @@ -21,7 +21,6 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pub mod transaction_broadcast_protocol; -pub mod transaction_coinbase_monitoring_protocol; pub mod transaction_receive_protocol; pub mod transaction_send_protocol; pub mod transaction_validation_protocol; diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_broadcast_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_broadcast_protocol.rs index 4a28383226..61b83891de 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_broadcast_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_broadcast_protocol.rs @@ -21,6 +21,7 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::{ + connectivity_service::WalletConnectivityInterface, output_manager_service::TxId, transaction_service::{ error::{TransactionServiceError, TransactionServiceProtocolError}, @@ -34,9 +35,12 @@ use crate::{ }; use futures::FutureExt; use log::*; -use std::{convert::TryFrom, sync::Arc, time::Duration}; +use std::{ + convert::TryFrom, + sync::Arc, + time::{Duration, Instant}, +}; use tari_common_types::types::Signature; -use tari_comms::{peer_manager::NodeId, types::CommsPublicKey, PeerConnection}; use tari_core::{ base_node::{ proto::wallet_rpc::{TxLocation, TxQueryResponse, TxSubmissionRejectionReason, TxSubmissionResponse}, @@ -45,153 +49,51 @@ use tari_core::{ transactions::transaction::Transaction, }; use tari_crypto::tari_utilities::hex::Hex; -use tokio::{sync::broadcast, time::sleep}; +use tokio::{sync::watch, time::sleep}; const LOG_TARGET: &str = "wallet::transaction_service::protocols::broadcast_protocol"; -pub struct TransactionBroadcastProtocol -where TBackend: TransactionBackend + 'static -{ +pub struct TransactionBroadcastProtocol { tx_id: TxId, mode: TxBroadcastMode, - resources: TransactionServiceResources, - timeout: Duration, - base_node_public_key: CommsPublicKey, - timeout_update_receiver: Option>, - base_node_update_receiver: Option>, - first_rejection: bool, + resources: TransactionServiceResources, + timeout_update_receiver: watch::Receiver, + last_rejection: Option, } -impl TransactionBroadcastProtocol -where TBackend: TransactionBackend + 'static +impl TransactionBroadcastProtocol +where + TBackend: TransactionBackend + 'static, + TWalletConnectivity: WalletConnectivityInterface, { pub fn new( tx_id: TxId, - resources: TransactionServiceResources, - timeout: Duration, - base_node_public_key: CommsPublicKey, - timeout_update_receiver: broadcast::Receiver, - base_node_update_receiver: broadcast::Receiver, + resources: TransactionServiceResources, + timeout_update_receiver: watch::Receiver, ) -> Self { Self { tx_id, mode: TxBroadcastMode::TransactionSubmission, resources, - timeout, - base_node_public_key, - timeout_update_receiver: Some(timeout_update_receiver), - base_node_update_receiver: Some(base_node_update_receiver), - first_rejection: false, + timeout_update_receiver, + last_rejection: None, } } /// The task that defines the execution of the protocol. pub async fn execute(mut self) -> Result { - let mut timeout_update_receiver = self.timeout_update_receiver.take().ok_or_else(|| { - TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::InvalidStateError) - })?; - - let mut base_node_update_receiver = self.base_node_update_receiver.take().ok_or_else(|| { - TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::InvalidStateError) - })?; - let mut shutdown = self.resources.shutdown_signal.clone(); + let mut current_base_node_watcher = self.resources.connectivity.get_current_base_node_watcher(); + let mut timeout_update_receiver = self.timeout_update_receiver.clone(); + // Main protocol loop loop { - let base_node_node_id = NodeId::from_key(&self.base_node_public_key); - let mut connection: Option = None; - - let delay = sleep(self.timeout); - - debug!( - target: LOG_TARGET, - "Connecting to Base Node (Public Key: {})", self.base_node_public_key, - ); - tokio::select! { - dial_result = self.resources.connectivity_manager.dial_peer(base_node_node_id.clone()) => { - match dial_result { - Ok(base_node_connection) => { - connection = Some(base_node_connection); - }, - Err(e) => { - info!(target: LOG_TARGET, "Problem connecting to base node: {} for Transaction Broadcast Protocol (TxId: {})", e, self.tx_id); - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionBaseNodeConnectionProblem( - self.tx_id, - ))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event because there are no subscribers: {:?}", - e - ); - e - }); - }, - } - }, - updated_timeout = timeout_update_receiver.recv() => { - match updated_timeout { - Ok(to) => { - self.timeout = to; - info!( - target: LOG_TARGET, - "Transaction Broadcast protocol (TxId: {}) timeout updated to {:?}", self.tx_id, self.timeout - ); - }, - Err(e) => { - trace!( - target: LOG_TARGET, - "Transaction Broadcast protocol (TxId: {}) event 'updated_timeout' triggered with error: {:?}", - self.tx_id, - e, - ); - } - } - }, - new_base_node = base_node_update_receiver.recv() => { - match new_base_node { - Ok(bn) => { - self.base_node_public_key = bn; - info!( - target: LOG_TARGET, - "Transaction Broadcast protocol (TxId: {}) Base Node Public key updated to {:?}", self.tx_id, self.base_node_public_key - ); - self.first_rejection = false; - continue; - }, - Err(e) => { - trace!( - target: LOG_TARGET, - "Transaction Broadcast protocol (TxId: {}) event 'base_node_update' triggered with error: {:?}", - self.tx_id, - e, - ); - } - } - } - _ = shutdown.wait() => { - info!(target: LOG_TARGET, "Transaction Broadcast Protocol (TxId: {}) shutting down because it received the shutdown signal", self.tx_id); - return Err(TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::Shutdown)) - }, - } - - let mut base_node_connection = match connection { - None => { - tokio::select! { - _ = delay.fuse() => { - continue; - }, - _ = shutdown.wait() => { - info!(target: LOG_TARGET, "Transaction Broadcast Protocol (TxId: {}) shutting down because it received the shutdown signal", self.tx_id); - return Err(TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::Shutdown)) - }, - } - }, - Some(c) => c, - }; + let mut client = self + .resources + .connectivity + .obtain_base_node_wallet_rpc_client() + .await + .ok_or_else(|| TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::Shutdown))?; let completed_tx = match self.resources.db.get_completed_transaction(self.tx_id).await { Ok(tx) => tx, @@ -220,45 +122,17 @@ where TBackend: TransactionBackend + 'static return Ok(self.tx_id); } - let mut client = match base_node_connection - .connect_rpc_using_builder( - BaseNodeWalletRpcClient::builder() - .with_deadline(self.resources.config.broadcast_monitoring_timeout) - .with_handshake_timeout(self.resources.config.broadcast_monitoring_timeout), - ) - .await - { - Ok(c) => c, - Err(e) => { - warn!(target: LOG_TARGET, "Problem establishing RPC connection: {}", e); - delay.await; - continue; - }, - }; - - let delay = sleep(self.timeout); loop { tokio::select! { - new_base_node = base_node_update_receiver.recv() => { - match new_base_node { - Ok(bn) => { - self.base_node_public_key = bn; - info!( - target: LOG_TARGET, - "Transaction Broadcast protocol (TxId: {}) Base Node Public key updated to {:?}", self.tx_id, self.base_node_public_key - ); - self.first_rejection = false; - continue; - }, - Err(e) => { - trace!( + _ = current_base_node_watcher.changed() => { + if let Some(peer) = &*current_base_node_watcher.borrow() { + info!( target: LOG_TARGET, - "Transaction Broadcast protocol (TxId: {}) event 'base_node_update' triggered with error: {:?}", - self.tx_id, - e, + "Transaction Broadcast protocol (TxId: {}) Base Node Public key updated to {} (NodeID: {})", self.tx_id, peer.public_key, peer.node_id ); } - } + self.last_rejection = None; + continue; }, result = self.query_or_submit_transaction(completed_tx.clone(), &mut client).fuse() => { match self.mode { @@ -269,61 +143,23 @@ where TBackend: TransactionBackend + 'static }, TxBroadcastMode::TransactionQuery => { if result? { - // We are done! - self.resources - .output_manager_service - .confirm_transaction( - completed_tx.tx_id, - completed_tx.transaction.body.inputs().clone(), - completed_tx.transaction.body.outputs().clone(), - ) - .await - .map_err(|e| TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::from(e)))?; - - self.resources - .db - .confirm_broadcast_or_coinbase_transaction(completed_tx.tx_id) - .await - .map_err(|e| TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::from(e)))?; - - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionMined(self.tx_id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event because there are no subscribers: {:?}", - e - ); - e - }); - + debug!(target: LOG_TARGET, "Transaction broadcast, transaction validation protocol will continue from here"); return Ok(self.tx_id) } }, } // Wait out the remainder of the delay before proceeding with next loop drop(client); - delay.await; + let delay = *timeout_update_receiver.borrow(); + sleep(delay).await; break; }, - updated_timeout = timeout_update_receiver.recv() => { - if let Ok(to) = updated_timeout { - self.timeout = to; - info!( - target: LOG_TARGET, - "Transaction Broadcast protocol (TxId: {}) timeout updated to {:?}", self.tx_id, self.timeout - ); - break; - } else { - trace!( - target: LOG_TARGET, - "Transaction Broadcast protocol event 'updated_timeout' triggered (TxId: {}) ({:?})", - self.tx_id, - updated_timeout, - ); - } + _ = timeout_update_receiver.changed() => { + info!( + target: LOG_TARGET, + "Transaction Broadcast protocol (TxId: {}) timeout updated to {:?}", self.tx_id, timeout_update_receiver.borrow() + ); + break; }, _ = shutdown.wait() => { info!(target: LOG_TARGET, "Transaction Broadcast Protocol (TxId: {}) shutting down because it received the shutdown signal", self.tx_id); @@ -403,25 +239,10 @@ where TBackend: TransactionBackend + 'static } else if response.rejection_reason == TxSubmissionRejectionReason::AlreadyMined { info!( target: LOG_TARGET, - "Transaction (TxId: {}) is Already Mined according to Base Node.", self.tx_id + "Transaction (TxId: {}) is Already Mined according to Base Node. Will be completed by transaction \ + validation protocol.", + self.tx_id ); - self.resources - .db - .mine_completed_transaction(self.tx_id) - .await - .map_err(|e| TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::from(e)))?; - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionMined(self.tx_id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event because there are no subscribers: {:?}", - e - ); - e - }); } else { info!( target: LOG_TARGET, @@ -490,64 +311,23 @@ where TBackend: TransactionBackend + 'static // Mined? if response.location == TxLocation::Mined { - self.resources - .db - .set_transaction_confirmations(self.tx_id, response.confirmations) - .await - .map_err(|e| TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::from(e)))?; - - self.resources - .db - .set_transaction_mined_height( - self.tx_id, - response.height_of_longest_chain.saturating_sub(response.confirmations), - ) - .await - .map_err(|e| TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::from(e)))?; - - if response.confirmations >= self.resources.config.num_confirmations_required as u64 { - info!( - target: LOG_TARGET, - "Transaction (TxId: {}) detected as mined and CONFIRMED with {} confirmations", - self.tx_id, - response.confirmations - ); - return Ok(true); - } info!( target: LOG_TARGET, - "Transaction (TxId: {}) detected as mined but UNCONFIRMED with {} confirmations", - self.tx_id, - response.confirmations + "Broadcast transaction detected as mined, will be managed by transaction validation protocol" ); - self.resources - .db - .mine_completed_transaction(self.tx_id) - .await - .map_err(|e| TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::from(e)))?; - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionMinedUnconfirmed( - self.tx_id, - response.confirmations, - ))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event because there are no subscribers: {:?}", - e - ); - e - }); + Ok(true) } else if response.location != TxLocation::InMempool { - if !self.first_rejection { + if self.last_rejection.is_none() || + self.last_rejection.unwrap().elapsed() > + self.resources.config.transaction_mempool_resubmission_window + { info!( target: LOG_TARGET, "Transaction (TxId: {}) not found in mempool, attempting to resubmit transaction", self.tx_id ); self.mode = TxBroadcastMode::TransactionSubmission; - self.first_rejection = true; + self.last_rejection = Some(Instant::now()); + Ok(false) } else { error!( target: LOG_TARGET, @@ -569,19 +349,18 @@ where TBackend: TransactionBackend + 'static ); e }); - return Err(TransactionServiceProtocolError::new( + Err(TransactionServiceProtocolError::new( self.tx_id, TransactionServiceError::MempoolRejection, - )); + )) } } else { info!( target: LOG_TARGET, "Transaction (TxId: {}) found in mempool.", self.tx_id ); + Ok(true) } - - Ok(false) } async fn query_or_submit_transaction( diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_coinbase_monitoring_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_coinbase_monitoring_protocol.rs deleted file mode 100644 index f368d33d89..0000000000 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_coinbase_monitoring_protocol.rs +++ /dev/null @@ -1,593 +0,0 @@ -// Copyright 2020. The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use crate::{ - output_manager_service::TxId, - transaction_service::{ - error::{TransactionServiceError, TransactionServiceProtocolError}, - handle::TransactionEvent, - service::TransactionServiceResources, - storage::{database::TransactionBackend, models::CompletedTransaction}, - }, -}; -use futures::FutureExt; -use log::*; -use std::{convert::TryFrom, sync::Arc, time::Duration}; -use tari_common_types::types::Signature; -use tari_comms::{peer_manager::NodeId, types::CommsPublicKey, PeerConnection}; -use tari_core::base_node::{ - proto::wallet_rpc::{TxLocation, TxQueryResponse}, - rpc::BaseNodeWalletRpcClient, -}; -use tari_crypto::tari_utilities::{hex::Hex, Hashable}; -use tokio::{sync::broadcast, time::sleep}; - -const LOG_TARGET: &str = "wallet::transaction_service::protocols::coinbase_monitoring"; - -/// This protocol defines the process of monitoring a mempool and base node to detect when a Broadcast transaction is -/// Mined or leaves the mempool in which case it should be cancelled - -pub struct TransactionCoinbaseMonitoringProtocol -where TBackend: TransactionBackend + 'static -{ - tx_id: TxId, - block_height: u64, - resources: TransactionServiceResources, - timeout: Duration, - base_node_public_key: CommsPublicKey, - base_node_update_receiver: Option>, - timeout_update_receiver: Option>, -} - -impl TransactionCoinbaseMonitoringProtocol -where TBackend: TransactionBackend + 'static -{ - #[allow(clippy::too_many_arguments)] - pub fn new( - tx_id: TxId, - block_height: u64, - resources: TransactionServiceResources, - timeout: Duration, - base_node_public_key: CommsPublicKey, - base_node_update_receiver: broadcast::Receiver, - timeout_update_receiver: broadcast::Receiver, - ) -> Self { - Self { - tx_id, - block_height, - resources, - timeout, - base_node_public_key, - base_node_update_receiver: Some(base_node_update_receiver), - timeout_update_receiver: Some(timeout_update_receiver), - } - } - - /// The task that defines the execution of the protocol. - pub async fn execute(mut self) -> Result { - let mut base_node_update_receiver = self.base_node_update_receiver.take().ok_or_else(|| { - TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::InvalidStateError) - })?; - - let mut timeout_update_receiver = self.timeout_update_receiver.take().ok_or_else(|| { - TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::InvalidStateError) - })?; - - trace!( - target: LOG_TARGET, - "Starting coinbase monitoring protocol for transaction (TxId: {})", - self.tx_id - ); - - // This is the main loop of the protocol and following the following steps - // 1) Check transaction being monitored is still in the Coinbase state and needs to be monitored - // 2) Make a transaction_query RPC call to the base node - // 3) Wait for both Base Node responses OR a Timeout - // a) If the chain tip moves beyond this block height and require confirmations AND the coinbase kernel is - // not in the blockchain cancel this transaction - // b) If the coinbase kernel is in the blockchain the protocol can end with success - // c) IF timeout is reached, start again - let mut shutdown = self.resources.shutdown_signal.clone(); - loop { - let completed_tx = match self.resources.db.get_completed_transaction(self.tx_id).await { - Ok(tx) => tx, - Err(e) => { - info!( - target: LOG_TARGET, - "Cannot find Coinbase Transaction (TxId: {}) likely due to being cancelled: {}", self.tx_id, e - ); - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionCancelled(self.tx_id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event, usually because there are no subscribers: {:?}", - e - ); - e - }); - return Ok(self.tx_id); - }, - }; - debug!( - target: LOG_TARGET, - "Coinbase transaction (TxId: {}) has status '{}' and is cancelled ({}) and is valid ({}).", - self.tx_id, - completed_tx.status, - completed_tx.cancelled, - completed_tx.valid, - ); - - let mut hashes = Vec::new(); - for o in completed_tx.transaction.body.outputs() { - hashes.push(o.hash()); - } - - info!( - target: LOG_TARGET, - "Sending Transaction Mined? request for Coinbase Tx with TxId: {} and Kernel Signature {} to Base Node", - self.tx_id, - completed_tx.transaction.body.kernels()[0] - .excess_sig - .get_signature() - .to_hex(), - ); - - // Get a base node RPC connection - let base_node_node_id = NodeId::from_key(&self.base_node_public_key); - let mut connection: Option = None; - debug!( - target: LOG_TARGET, - "Connecting to Base Node (Public Key: {}) for transaction (TxId: {})", - self.base_node_public_key, - self.tx_id, - ); - tokio::select! { - dial_result = self.resources.connectivity_manager.dial_peer(base_node_node_id.clone()).fuse() => { - match dial_result { - Ok(base_node_connection) => { - connection = Some(base_node_connection); - }, - Err(e) => { - warn!( - target: LOG_TARGET, - "Problem connecting to base node for Coinbase Monitoring Protocol (TxId: {}): {}", - self.tx_id, - e, - ); - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionBaseNodeConnectionProblem( - self.tx_id, - ))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event because there are no subscribers: {:?}", - e - ); - e - }); - }, - } - }, - updated_timeout = timeout_update_receiver.recv() => { - match updated_timeout { - Ok(to) => { - self.timeout = to; - info!( - target: LOG_TARGET, - "Coinbase Monitoring protocol (TxId: {}) timeout updated to {:?}", - self.tx_id, - self.timeout - ); - }, - Err(e) => { - error!( - target: LOG_TARGET, - "Coinbase Monitoring protocol (TxId: {}) event 'updated_timeout' triggered with \ - error: {:?}", - self.tx_id, - e, - ); - } - } - }, - new_base_node = base_node_update_receiver.recv() => { - match new_base_node { - Ok(bn) => { - self.base_node_public_key = bn; - info!( - target: LOG_TARGET, - "Coinbase Monitoring protocol (TxId: {}) Base Node Public key updated to {:?}", - self.tx_id, - self.base_node_public_key - ); - continue; - }, - Err(e) => { - error!( - target: LOG_TARGET, - "Coinbase Monitoring protocol (TxId: {}) event 'base_node_update' triggered with \ - error: {:?}", - self.tx_id, - e, - ); - } - } - } - _ = shutdown.wait() => { - info!( - target: LOG_TARGET, - "Coinbase Monitoring protocol (TxId: {}) shutting down because it received the shutdown \ - signal (at 1)", - self.tx_id - ); - return Err(TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::Shutdown)) - }, - } - - let delay = sleep(self.timeout); - let mut base_node_connection = match connection { - None => { - tokio::select! { - _ = delay.fuse() => { - continue; - }, - _ = shutdown.wait() => { - info!( - target: LOG_TARGET, - "Coinbase Monitoring Protocol (TxId: {}) shutting down because it received the \ - shutdown signal (at 2)", - self.tx_id - ); - return Err(TransactionServiceProtocolError::new( - self.tx_id, - TransactionServiceError::Shutdown - )) - }, - } - }, - Some(c) => c, - }; - let mut client = match base_node_connection - .connect_rpc_using_builder( - BaseNodeWalletRpcClient::builder() - .with_deadline(self.resources.config.chain_monitoring_timeout) - .with_handshake_timeout(self.resources.config.chain_monitoring_timeout), - ) - .await - { - Ok(c) => c, - Err(e) => { - warn!( - target: LOG_TARGET, - "Problem establishing RPC connection (TxId: {}): {}", self.tx_id, e - ); - delay.await; - continue; - }, - }; - - let signature: Signature; - if !completed_tx.transaction.body.kernels().is_empty() { - signature = completed_tx.transaction.body.kernels()[0].clone().excess_sig; - } else { - error!( - target: LOG_TARGET, - "Malformed transaction (TxId: {}); signature does not exist", self.tx_id, - ); - return Err(TransactionServiceProtocolError::new( - self.tx_id, - TransactionServiceError::InvalidCompletedTransaction, - )); - } - let delay = sleep(self.timeout).fuse(); - loop { - tokio::select! { - new_base_node = base_node_update_receiver.recv() => { - match new_base_node { - Ok(bn) => { - self.base_node_public_key = bn; - info!( - target: LOG_TARGET, - "Coinbase Monitoring protocol (TxId: {}) Base Node Public key updated to {:?}", - self.tx_id, - self.base_node_public_key - ); - continue; - }, - Err(e) => { - error!( - target: LOG_TARGET, - "Coinbase Monitoring protocol (TxId: {}) event 'base_node_update' triggered with \ - error: {:?}", - self.tx_id, - e, - ); - } - } - } - result = self.query_coinbase_transaction(signature.clone(), completed_tx.clone(), &mut client).fuse() => { - let (coinbase_kernel_found, metadata) = match result { - Ok(r) => r, - _ => (false, None), - }; - if coinbase_kernel_found { - // We are done! - info!( - target: LOG_TARGET, - "Coinbase monitoring protocol for transaction (TxId: {}) completed successfully.", - self.tx_id, - ); - return Ok(self.tx_id); - } - if let Some(tip) = metadata { - // If the tip has moved beyond this Coinbase transaction's blockheight and required - // number of confirmations and it wasn't mined then it should be cancelled - if tip > self.block_height + self.resources.config.num_confirmations_required { - warn!( - target: LOG_TARGET, - "Chain tip has moved ahead of this Coinbase transaction's block height and \ - required number of confirmations without it being mined. Cancelling Coinbase \ - transaction (TxId: {}).", - self.tx_id - ); - self.cancel_transaction().await; - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionCancelled(self.tx_id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event, usually because there are no subscribers: {:?}", - e - ); - e - }); - return Err(TransactionServiceProtocolError::new( - self.tx_id, - TransactionServiceError::ChainTipHigherThanCoinbaseHeight, - )); - }; - } - info!( - target: LOG_TARGET, - "Coinbase transaction (TxId: {}) not mined yet, still waiting.", self.tx_id, - ); - // Wait out the remainder of the delay before proceeding with next loop - delay.await; - break; - }, - updated_timeout = timeout_update_receiver.recv() => { - if let Ok(to) = updated_timeout { - self.timeout = to; - info!( - target: LOG_TARGET, - "Coinbase monitoring protocol (TxId: {}) timeout updated to {:?}", - self.tx_id, - self.timeout - ); - break; - } else { - trace!( - target: LOG_TARGET, - "Coinbase monitoring protocol event 'updated_timeout' triggered (TxId: {}) ({:?})", - self.tx_id, - updated_timeout, - ); - } - }, - _ = shutdown.wait() => { - info!( - target: LOG_TARGET, - "Coinbase Monitoring Protocol (TxId: {}) shutting down because it received the shutdown \ - signal (at 3)", - self.tx_id - ); - return Err(TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::Shutdown)) - }, - } - info!( - target: LOG_TARGET, - "Coinbase monitoring process timed out for transaction (TxId: {})", self.tx_id - ); - - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionMinedRequestTimedOut(self.tx_id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event, usually because there are no subscribers: {:?}", - e - ); - e - }); - } - } - } - - /// Attempt to query the location of the transaction from the base node via RPC. - /// # Returns: - /// `Ok((true, Some(u64)))` => Transaction was successfully mined and confirmed. - /// `Ok((false, Some(u64)))` => Either the transaction is mined but does not have the required number of - /// confirmations yet, or it is not mined and still in the mempool, or it is not mined - /// and not found in the mempool. - /// `Ok((false, None))` => There was a problem with the RPC call. - async fn query_coinbase_transaction( - &mut self, - signature: Signature, - completed_tx: CompletedTransaction, - client: &mut BaseNodeWalletRpcClient, - ) -> Result<(bool, Option), TransactionServiceProtocolError> { - trace!( - target: LOG_TARGET, - "Querying status for coinbase transaction (TxId: {})", - self.tx_id, - ); - let response = match client.transaction_query(signature.into()).await { - Ok(r) => match TxQueryResponse::try_from(r) { - Ok(r) => r, - Err(_) => { - trace!( - target: LOG_TARGET, - "Could not convert proto TxQueryResponse for coinbase transaction (TxId: {})", - self.tx_id, - ); - return Ok((false, None)); - }, - }, - Err(e) => { - error!( - target: LOG_TARGET, - "Coinbase transaction Query RPC Call to Base Node failed for coinbase transaction (TxId: {}): {}", - self.tx_id, - e - ); - return Ok((false, None)); - }, - }; - - if !(response.is_synced || - response.location == TxLocation::Mined && - response.confirmations >= self.resources.config.num_confirmations_required) - { - info!( - target: LOG_TARGET, - "Base Node reports not being synced, coinbase monitoring will be retried." - ); - return Ok((false, Some(response.height_of_longest_chain))); - } - - // Mined? - if response.location == TxLocation::Mined { - if response.confirmations >= self.resources.config.num_confirmations_required { - info!( - target: LOG_TARGET, - "Coinbase transaction (TxId: {}) detected as mined and CONFIRMED with {} confirmations", - self.tx_id, - response.confirmations - ); - self.resources - .output_manager_service - .confirm_transaction( - self.tx_id, - completed_tx.transaction.body.inputs().clone(), - completed_tx.transaction.body.outputs().clone(), - ) - .await - .map_err(|e| TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::from(e)))?; - - self.resources - .db - .confirm_broadcast_or_coinbase_transaction(self.tx_id) - .await - .map_err(|e| TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::from(e)))?; - - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionMined(self.tx_id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event because there are no subscribers: {:?}", - e - ); - e - }); - return Ok((true, Some(response.height_of_longest_chain))); - } - info!( - target: LOG_TARGET, - "Coinbase transaction (TxId: {}) detected as mined but UNCONFIRMED with {} confirmations", - self.tx_id, - response.confirmations - ); - - self.resources - .db - .set_transaction_mined_height(self.tx_id, self.block_height) - .await - .map_err(|e| TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::from(e)))?; - self.resources - .db - .mine_completed_transaction(self.tx_id) - .await - .map_err(|e| TransactionServiceProtocolError::new(self.tx_id, TransactionServiceError::from(e)))?; - - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionMinedUnconfirmed( - self.tx_id, - response.confirmations, - ))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event because there are no subscribers: {:?}", - e - ); - e - }); - } else if response.location == TxLocation::InMempool { - debug!( - target: LOG_TARGET, - "Coinbase transaction (TxId: {}) found in mempool, still waiting.", self.tx_id - ); - } else { - debug!( - target: LOG_TARGET, - "Coinbase transaction (TxId: {}) not found in mempool, still waiting.", self.tx_id - ); - } - - Ok((false, Some(response.height_of_longest_chain))) - } - - async fn cancel_transaction(&mut self) { - if let Err(e) = self - .resources - .output_manager_service - .cancel_transaction(self.tx_id) - .await - { - warn!( - target: LOG_TARGET, - "Failed to Cancel outputs for Coinbase transaction (TxId: {}) with error: {:?}", self.tx_id, e - ); - } - if let Err(e) = self.resources.db.cancel_completed_transaction(self.tx_id).await { - warn!( - target: LOG_TARGET, - "Failed to Cancel Coinbase transaction (TxId: {}) with error: {:?}", self.tx_id, e - ); - } - } -} diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_receive_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_receive_protocol.rs index 0a6bd89fb2..fe3ac75d50 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_receive_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_receive_protocol.rs @@ -40,6 +40,7 @@ use std::sync::Arc; use tari_comms::types::CommsPublicKey; use tokio::sync::{mpsc, oneshot}; +use crate::connectivity_service::WalletConnectivityInterface; use tari_core::transactions::{ transaction::Transaction, transaction_protocol::{recipient::RecipientState, sender::TransactionSenderMessage}, @@ -56,27 +57,27 @@ pub enum TransactionReceiveProtocolStage { WaitForFinalize, } -pub struct TransactionReceiveProtocol -where TBackend: TransactionBackend + 'static -{ +pub struct TransactionReceiveProtocol { id: u64, source_pubkey: CommsPublicKey, sender_message: TransactionSenderMessage, stage: TransactionReceiveProtocolStage, - resources: TransactionServiceResources, + resources: TransactionServiceResources, transaction_finalize_receiver: Option>, cancellation_receiver: Option>, } -impl TransactionReceiveProtocol -where TBackend: TransactionBackend + 'static +impl TransactionReceiveProtocol +where + TBackend: TransactionBackend + 'static, + TWalletConnectivity: WalletConnectivityInterface, { pub fn new( id: u64, source_pubkey: CommsPublicKey, sender_message: TransactionSenderMessage, stage: TransactionReceiveProtocolStage, - resources: TransactionServiceResources, + resources: TransactionServiceResources, transaction_finalize_receiver: mpsc::Receiver<(CommsPublicKey, TxId, Transaction)>, cancellation_receiver: oneshot::Receiver<()>, ) -> Self { diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_send_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_send_protocol.rs index 7fc6f1a3b8..c4e3db99bc 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_send_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_send_protocol.rs @@ -20,19 +20,22 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use crate::transaction_service::{ - config::TransactionRoutingMechanism, - error::{TransactionServiceError, TransactionServiceProtocolError}, - handle::{TransactionEvent, TransactionServiceResponse}, - service::TransactionServiceResources, - storage::{ - database::TransactionBackend, - models::{CompletedTransaction, OutboundTransaction, TransactionDirection, TransactionStatus}, - }, - tasks::{ - send_finalized_transaction::send_finalized_transaction_message, - send_transaction_cancelled::send_transaction_cancelled_message, - wait_on_dial::wait_on_dial, +use crate::{ + connectivity_service::WalletConnectivityInterface, + transaction_service::{ + config::TransactionRoutingMechanism, + error::{TransactionServiceError, TransactionServiceProtocolError}, + handle::{TransactionEvent, TransactionServiceResponse}, + service::TransactionServiceResources, + storage::{ + database::TransactionBackend, + models::{CompletedTransaction, OutboundTransaction, TransactionDirection, TransactionStatus}, + }, + tasks::{ + send_finalized_transaction::send_finalized_transaction_message, + send_transaction_cancelled::send_transaction_cancelled_message, + wait_on_dial::wait_on_dial, + }, }, }; use chrono::Utc; @@ -66,9 +69,7 @@ pub enum TransactionSendProtocolStage { WaitForReply, } -pub struct TransactionSendProtocol -where TBackend: TransactionBackend + 'static -{ +pub struct TransactionSendProtocol { id: u64, dest_pubkey: CommsPublicKey, amount: MicroTari, @@ -76,18 +77,20 @@ where TBackend: TransactionBackend + 'static message: String, service_request_reply_channel: Option>>, stage: TransactionSendProtocolStage, - resources: TransactionServiceResources, + resources: TransactionServiceResources, transaction_reply_receiver: Option>, cancellation_receiver: Option>, } #[allow(clippy::too_many_arguments)] -impl TransactionSendProtocol -where TBackend: TransactionBackend + 'static +impl TransactionSendProtocol +where + TBackend: TransactionBackend + 'static, + TWalletConnectivity: WalletConnectivityInterface, { pub fn new( id: u64, - resources: TransactionServiceResources, + resources: TransactionServiceResources, transaction_reply_receiver: Receiver<(CommsPublicKey, RecipientSignedMessage)>, cancellation_receiver: oneshot::Receiver<()>, dest_pubkey: CommsPublicKey, diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs index dcf072c272..3f09efa2a3 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs @@ -1,4 +1,4 @@ -// Copyright 2020. The Tari Project +// Copyright 2021. The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: @@ -21,567 +21,419 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::{ + connectivity_service::WalletConnectivityInterface, + output_manager_service::handle::OutputManagerHandle, transaction_service::{ - error::{TransactionServiceError, TransactionServiceProtocolError}, - handle::TransactionEvent, - service::TransactionServiceResources, + config::TransactionServiceConfig, + error::{TransactionServiceError, TransactionServiceProtocolError, TransactionServiceProtocolErrorExt}, + handle::{TransactionEvent, TransactionEventSender}, storage::{ - database::TransactionBackend, + database::{TransactionBackend, TransactionDatabase}, models::{CompletedTransaction, TransactionStatus}, }, }, - types::ValidationRetryStrategy, }; -use futures::FutureExt; use log::*; -use std::{cmp, convert::TryFrom, sync::Arc, time::Duration}; -use tari_comms::{peer_manager::NodeId, types::CommsPublicKey, PeerConnection}; +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, + sync::Arc, +}; +use tari_common_types::types::BlockHash; +use tari_comms::protocol::rpc::{RpcError::RequestFailed, RpcStatusCode::NotFound}; use tari_core::{ base_node::{ proto::wallet_rpc::{TxLocation, TxQueryBatchResponse}, rpc::BaseNodeWalletRpcClient, }, + blocks::BlockHeader, proto::{base_node::Signatures as SignaturesProto, types::Signature as SignatureProto}, }; -use tokio::{sync::broadcast, time::sleep}; +use tari_crypto::tari_utilities::{hex::Hex, Hashable}; const LOG_TARGET: &str = "wallet::transaction_service::protocols::validation_protocol"; -pub struct TransactionValidationProtocol -where TBackend: TransactionBackend + 'static -{ - id: u64, - resources: TransactionServiceResources, - timeout: Duration, - base_node_public_key: CommsPublicKey, - base_node_update_receiver: Option>, - timeout_update_receiver: Option>, - retry_strategy: ValidationRetryStrategy, - base_node_synced: bool, +pub struct TransactionValidationProtocol { + operation_id: u64, + db: TransactionDatabase, + connectivity: TWalletConnectivity, + config: TransactionServiceConfig, + event_publisher: TransactionEventSender, + output_manager_handle: OutputManagerHandle, } -/// This protocol will check all of the mined transactions (both valid and invalid) in the db to see if they are present -/// on the current base node. # Behaviour -/// - If a valid transaction is not present the protocol will mark the transaction as invalid -/// - If an invalid transaction is present on th ebase node it will be marked as valid -/// - If a Confirmed mined transaction is present but no longer confirmed its status will change to MinedUnconfirmed -impl TransactionValidationProtocol -where TBackend: TransactionBackend + 'static +#[allow(unused_variables)] +impl TransactionValidationProtocol +where + TTransactionBackend: TransactionBackend + 'static, + TWalletConnectivity: WalletConnectivityInterface, { pub fn new( - id: u64, - resources: TransactionServiceResources, - base_node_public_key: CommsPublicKey, - timeout: Duration, - base_node_update_receiver: broadcast::Receiver, - timeout_update_receiver: broadcast::Receiver, - retry_strategy: ValidationRetryStrategy, + operation_id: u64, + db: TransactionDatabase, + connectivity: TWalletConnectivity, + config: TransactionServiceConfig, + event_publisher: TransactionEventSender, + output_manager_handle: OutputManagerHandle, ) -> Self { Self { - id, - resources, - timeout, - base_node_public_key, - base_node_update_receiver: Some(base_node_update_receiver), - timeout_update_receiver: Some(timeout_update_receiver), - retry_strategy, - base_node_synced: true, + operation_id, + db, + connectivity, + config, + event_publisher, + output_manager_handle, } } - /// The task that defines the execution of the protocol. pub async fn execute(mut self) -> Result { - let mut timeout_update_receiver = self - .timeout_update_receiver - .take() - .ok_or_else(|| TransactionServiceProtocolError::new(self.id, TransactionServiceError::InvalidStateError))?; - - let mut base_node_update_receiver = self - .base_node_update_receiver - .take() - .ok_or_else(|| TransactionServiceProtocolError::new(self.id, TransactionServiceError::InvalidStateError))?; - - let mut shutdown = self.resources.shutdown_signal.clone(); - - let total_retries_str = match self.retry_strategy { - ValidationRetryStrategy::Limited(n) => format!("{}", n), - ValidationRetryStrategy::UntilSuccess => "∞".to_string(), - }; + let mut base_node_wallet_client = self + .connectivity + .obtain_base_node_wallet_rpc_client() + .await + .ok_or(TransactionServiceError::Shutdown) + .for_protocol(self.operation_id)?; + self.check_for_reorgs(&mut *base_node_wallet_client).await?; info!( - "Starting Transaction Validation Protocol (Id: {}) with {} retries", - self.id, total_retries_str + target: LOG_TARGET, + "Checking if transactions have been mined since last we checked" ); - - let mut batches = self - .get_transaction_batches() + let unmined_transactions = self + .db + .fetch_unconfirmed_transactions() .await - .map_err(|e| TransactionServiceProtocolError::new(self.id, e))?; - let mut retries = 0; + .for_protocol(self.operation_id) + .unwrap(); - // Main protocol loop - 'main: loop { - if let ValidationRetryStrategy::Limited(max_retries) = self.retry_strategy { - if retries > max_retries { - info!( - target: LOG_TARGET, - "Maximum attempts exceeded for Transaction Validation Protocol (Id: {})", self.id - ); - // If this retry is not because of a !base_node_synced then we emit this error event, if the retries - // are due to a base node NOT being synced then we rely on the TransactionValidationDelayed event - // because we were actually able to connect - if self.base_node_synced { - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionValidationFailure(self.id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event because there are no subscribers: {:?}", - e - ); - e - }); - } - return Err(TransactionServiceProtocolError::new( - self.id, - TransactionServiceError::MaximumAttemptsExceeded, - )); - } - } - // Assume base node is synced until we achieve a connection and it tells us it is not synced - self.base_node_synced = true; - - let base_node_node_id = NodeId::from_key(&self.base_node_public_key); - let mut connection: Option = None; - - let delay = sleep(self.timeout); - - debug!( + for batch in unmined_transactions.chunks(self.config.max_tx_query_batch_size) { + let (mined, unmined, tip_info) = self + .query_base_node_for_transactions(batch, &mut *base_node_wallet_client) + .await + .for_protocol(self.operation_id)?; + info!( target: LOG_TARGET, - "Connecting to Base Node (Public Key: {})", self.base_node_public_key, + "Base node returned {} as mined and {} as unmined", + mined.len(), + unmined.len() ); - tokio::select! { - dial_result = self.resources.connectivity_manager.dial_peer(base_node_node_id.clone()).fuse() => { - match dial_result { - Ok(base_node_connection) => { - connection = Some(base_node_connection); - }, - Err(e) => { - info!(target: LOG_TARGET, "Problem connecting to base node: {} for Transaction Validation Protocol", e); - }, - } - }, - new_base_node = base_node_update_receiver.recv() => { - - match new_base_node { - Ok(_) => { - info!(target: LOG_TARGET, "Aborting Transaction Validation Protocol as new Base node is set"); - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionValidationAborted(self.id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event because there are no subscribers: {:?}", - e - ); - e - }); - return Ok(self.id); - }, - Err(e) => { - trace!( + for (tx, mined_height, mined_in_block, num_confirmations) in &mined { + info!(target: LOG_TARGET, "Updating transaction {} as mined", tx.tx_id); + self.update_transaction_as_mined(tx, mined_in_block, *mined_height, *num_confirmations) + .await?; + } + if let Some((tip_height, tip_block)) = tip_info { + for tx in &unmined { + // Treat coinbases separately + if tx.is_coinbase() { + if tx.coinbase_block_height.unwrap_or_default() <= tip_height { + info!(target: LOG_TARGET, "Updated coinbase {} as abandoned", tx.tx_id); + self.update_coinbase_as_abandoned( + tx, + &tip_block, + tip_height, + tip_height.saturating_sub(tx.coinbase_block_height.unwrap_or_default()), + ) + .await?; + } else { + info!( target: LOG_TARGET, - "Transaction Validation protocol event 'base_node_update' triggered with error: {:?}", - - e, + "Coinbase not found, but it is for a block that is not yet in the chain. Coinbase \ + height: {}, tip height:{}", + tx.coinbase_block_height.unwrap_or_default(), + tip_height ); } + } else { + info!(target: LOG_TARGET, "Updated transaction {} as unmined", tx.tx_id); + self.update_transaction_as_unmined(tx).await?; } } - updated_timeout = timeout_update_receiver.recv() => { - match updated_timeout { - Ok(to) => { - self.timeout = to; - info!( - target: LOG_TARGET, - "Transaction Validation protocol timeout updated to {:?}", self.timeout - ); - }, - Err(e) => { - trace!( - target: LOG_TARGET, - "Transaction Validation protocol event 'updated_timeout' triggered with error: {:?}", - - e, - ); - } - } - }, - _ = shutdown.wait() => { - info!(target: LOG_TARGET, "Transaction Validation Protocol shutting down because it received the shutdown signal"); - return Err(TransactionServiceProtocolError::new(self.id, TransactionServiceError::Shutdown)) - }, } + } + self.publish_event(TransactionEvent::TransactionValidationSuccess(self.operation_id)); + Ok(self.operation_id) + } - let mut base_node_connection = match connection { - None => { - tokio::select! { - _ = delay.fuse() => { - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionValidationTimedOut(self.id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event {:?}, because there are no subscribers.", - e.0 - ); - e - }); - retries += 1; - continue; - }, - _ = shutdown.wait() => { - info!(target: LOG_TARGET, "Transaction Validation Protocol shutting down because it received the shutdown signal"); - return Err(TransactionServiceProtocolError::new(self.id, TransactionServiceError::Shutdown)) - }, - } - }, - Some(c) => c, - }; + fn publish_event(&self, event: TransactionEvent) { + if let Err(e) = self.event_publisher.send(Arc::new(event)) { + debug!( + target: LOG_TARGET, + "Error sending event because there are no subscribers: {:?}", e + ); + } + } - let mut client = match base_node_connection - .connect_rpc_using_builder( - BaseNodeWalletRpcClient::builder() - .with_deadline(self.timeout) - .with_handshake_timeout(self.timeout), - ) + async fn check_for_reorgs( + &mut self, + client: &mut BaseNodeWalletRpcClient, + ) -> Result<(), TransactionServiceProtocolError> { + info!( + target: LOG_TARGET, + "Checking last mined transactions to see if the base node has re-orged" + ); + while let Some(last_mined_transaction) = self + .db + .fetch_last_mined_transaction() + .await + .for_protocol(self.operation_id)? + { + let mined_height = last_mined_transaction + .mined_height + .ok_or_else(|| { + TransactionServiceError::ServiceError( + "fetch_last_mined_transaction() should return a transaction with a mined_height".to_string(), + ) + }) + .for_protocol(self.operation_id)?; + let mined_in_block_hash = last_mined_transaction + .mined_in_block + .clone() + .ok_or_else(|| { + TransactionServiceError::ServiceError( + "fetch_last_mined_transaction() should return a transaction with a mined_in_block hash" + .to_string(), + ) + }) + .for_protocol(self.operation_id)?; + + let block_at_height = self + .get_base_node_block_at_height(mined_height, client) .await - { - Ok(c) => c, - Err(e) => { - warn!(target: LOG_TARGET, "Problem establishing RPC connection: {}", e); - delay.await; - retries += 1; - continue; - }, - }; - - debug!(target: LOG_TARGET, "RPC client connected"); - - 'per_tx: loop { - let batch = if let Some(b) = batches.pop() { - b - } else { - break 'main; - }; - let delay = sleep(self.timeout); - tokio::select! { - new_base_node = base_node_update_receiver.recv() => { - match new_base_node { - Ok(_) => { - info!(target: LOG_TARGET, "Aborting Transaction Validation Protocol as new Base node is set"); - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionValidationAborted(self.id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event because there are no subscribers: {:?}", - e - ); - e - }); - return Ok(self.id); - }, - Err(e) => { - trace!( - target: LOG_TARGET, - "Transaction Validation protocol event 'base_node_update' triggered with error: {:?}", - - e, - ); - } - } - }, - result = self.transaction_query_batch(batch.clone(), &mut client).fuse() => { - match result { - Ok(synced) => { - self.base_node_synced = synced; - if !synced { - info!(target: LOG_TARGET, "Base Node reports not being synced, will retry."); - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionValidationDelayed(self.id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event because there are no subscribers: {:?}", - e - ); - e - }); - delay.await; - retries += 1; - batches = self.get_transaction_batches().await.map_err(|e| TransactionServiceProtocolError::new(self.id, e))?; - break 'per_tx; - } - }, - Err(TransactionServiceError::RpcError(e)) => { - warn!(target: LOG_TARGET, "Error with RPC Client: {}. Retrying RPC client connection.", e); - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionValidationTimedOut(self.id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event {:?}, because there are no subscribers.", - e.0 - ); - e - }); - delay.await; - batches.push(batch); - retries += 1; - break 'per_tx; - } - Err(e) => { - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionValidationFailure(self.id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event because there are no subscribers: {:?}", - e - ); - e - }); - return Err(TransactionServiceProtocolError::new(self.id,e)); - }, - } - }, - updated_timeout = timeout_update_receiver.recv() => { - match updated_timeout { - Ok(to) => { - self.timeout = to; - info!( - target: LOG_TARGET, - "Transaction Validation protocol timeout updated to {:?}", self.timeout - ); - }, - Err(e) => { - trace!( - target: LOG_TARGET, - "Transaction Validation protocol event 'updated_timeout' triggered with error: {:?}", - - e, - ); - } - } - }, - _ = shutdown.wait() => { - info!(target: LOG_TARGET, "Transaction Validation Protocol shutting down because it received the shutdown signal"); - return Err(TransactionServiceProtocolError::new(self.id, TransactionServiceError::Shutdown)) - }, - } - } - } + .for_protocol(self.operation_id)?; - let _ = self - .resources - .event_publisher - .send(Arc::new(TransactionEvent::TransactionValidationSuccess(self.id))) - .map_err(|e| { - trace!( + if block_at_height.is_none() || block_at_height.unwrap() != mined_in_block_hash { + // Chain has reorged since we last + warn!( target: LOG_TARGET, - "Error sending event because there are no subscribers: {:?}", - e + "The block that transaction (excess:{}) was in has been reorged out, will try to find this \ + transaction again, but these funds have potentially been re-orged out of the chain", + last_mined_transaction + .transaction + .body + .kernels() + .first() + .map(|k| k.excess.to_hex()) + .unwrap() ); - e - }); - - Ok(self.id) + self.update_transaction_as_unmined(&last_mined_transaction).await?; + } else { + info!( + target: LOG_TARGET, + "Last mined transaction is still in the block chain according to base node." + ); + break; + } + } + Ok(()) } - /// Attempt to query the location of the transaction from the base node via RPC. - /// # Returns: - /// `Ok(true)` => Transaction was successfully mined and confirmed - /// `Ok(false)` => There was a problem with the RPC call or the transaction is not mined but still in the mempool - /// and this should be retried `Err(_)` => The transaction was rejected by the base node and the protocol should - /// end. - async fn transaction_query_batch( - &mut self, - batch: Vec, - client: &mut BaseNodeWalletRpcClient, - ) -> Result { - let mut batch_signatures = Vec::new(); + async fn query_base_node_for_transactions( + &self, + batch: &[CompletedTransaction], + base_node_client: &mut BaseNodeWalletRpcClient, + ) -> Result< + ( + Vec<(CompletedTransaction, u64, BlockHash, u64)>, + Vec, + Option<(u64, BlockHash)>, + ), + TransactionServiceError, + > { + let mut mined = vec![]; + let mut unmined = vec![]; + + let mut batch_signatures = HashMap::new(); for tx in batch.iter() { - let signature = tx - .transaction - .first_kernel_excess_sig() - .ok_or(TransactionServiceError::InvalidTransaction)?; - batch_signatures.push(SignatureProto::from(signature.clone())); + // Imported transactions do not have a signature + if let Some(sig) = tx.transaction.first_kernel_excess_sig() { + batch_signatures.insert(sig.clone(), tx); + } + } + + if batch_signatures.is_empty() { + info!(target: LOG_TARGET, "No transactions needed to query with the base node"); + return Ok((mined, unmined, None)); } - let batch_response = client - .transaction_batch_query(SignaturesProto { sigs: batch_signatures }) + info!( + target: LOG_TARGET, + "Asking base node for location of {} transactions by excess signature", + batch.len() + ); + + let batch_response = base_node_client + .transaction_batch_query(SignaturesProto { + sigs: batch_signatures + .keys() + .map(|s| SignatureProto::from(s.clone())) + .collect(), + }) .await?; if !batch_response.is_synced { - return Ok(false); + info!( + target: LOG_TARGET, + "Base Node reports not being synced, aborting transaction validation" + ); + return Err(TransactionServiceError::BaseNodeNotSynced); } for response_proto in batch_response.responses { let response = TxQueryBatchResponse::try_from(response_proto) .map_err(TransactionServiceError::ProtobufConversionError)?; - - if let Some(queried_tx) = batch.iter().find(|tx| { - if let Some(sig) = tx.transaction.first_kernel_excess_sig() { - sig == &response.signature + let sig = response.signature; + if let Some(completed_tx) = batch_signatures.get(&sig) { + if response.location == TxLocation::Mined { + mined.push(( + (*completed_tx).clone(), + response.block_height, + response.block_hash.unwrap(), + response.confirmations, + )); } else { - false + unmined.push((*completed_tx).clone()); } - }) { - // Mined? - if response.location == TxLocation::Mined { - if !queried_tx.valid { - info!( - target: LOG_TARGET, - "Transaction (TxId: {}) is VALID according to base node, status will be updated", - queried_tx.tx_id - ); - if let Err(e) = self - .resources - .db - .set_completed_transaction_validity(queried_tx.tx_id, true) - .await - { - warn!( - target: LOG_TARGET, - "Error setting transaction (TxId: {}) validity: {}", queried_tx.tx_id, e - ); - } - } - if response.confirmations >= self.resources.config.num_confirmations_required as u64 { - if queried_tx.status == TransactionStatus::MinedUnconfirmed { - info!( - target: LOG_TARGET, - "Transaction (TxId: {}) is MINED and CONFIRMED according to base node, status will be \ - updated", - queried_tx.tx_id - ); - if let Err(e) = self - .resources - .db - .confirm_broadcast_or_coinbase_transaction(queried_tx.tx_id) - .await - { - warn!( - target: LOG_TARGET, - "Error confirming mined transaction (TxId: {}): {}", queried_tx.tx_id, e - ); - } - if let Err(e) = self - .resources - .output_manager_service - .confirm_transaction( - queried_tx.tx_id, - queried_tx.transaction.body.inputs().clone(), - queried_tx.transaction.body.outputs().clone(), - ) - .await - { - debug!( - target: LOG_TARGET, - "Error confirming outputs transaction (TxId: {}) that was validated with new base \ - node: {}. Usually means this transaction was confirmed in the past", - queried_tx.tx_id, - e - ); - } - } - } else if queried_tx.status == TransactionStatus::MinedConfirmed { - info!( - target: LOG_TARGET, - "Transaction (TxId: {}) is MINED but UNCONFIRMED according to base node, status will be \ - updated", - queried_tx.tx_id - ); - if let Err(e) = self.resources.db.unconfirm_mined_transaction(queried_tx.tx_id).await { - warn!( - target: LOG_TARGET, - "Error unconfirming mined transaction (TxId: {}): {}", queried_tx.tx_id, e - ); + } + } + Ok(( + mined, + unmined, + Some(( + batch_response.height_of_longest_chain, + batch_response.tip_hash.ok_or_else(|| { + TransactionServiceError::ProtobufConversionError("Missing `tip_hash` field".to_string()) + })?, + )), + )) + } + + async fn get_base_node_block_at_height( + &mut self, + height: u64, + client: &mut BaseNodeWalletRpcClient, + ) -> Result, TransactionServiceError> { + let result = match client.get_header_by_height(height).await { + Ok(r) => r, + Err(rpc_error) => { + warn!(target: LOG_TARGET, "Error asking base node for header:{}", rpc_error); + match &rpc_error { + RequestFailed(status) => { + if status.status_code() == NotFound { + return Ok(None); + } else { + return Err(rpc_error.into()); } - } - } else if queried_tx.valid { - info!( - target: LOG_TARGET, - "Transaction (TxId: {}) is INVALID according to base node, status will be updated", - queried_tx.tx_id - ); - if let Err(e) = self - .resources - .db - .set_completed_transaction_validity(queried_tx.tx_id, false) - .await - { - warn!( - target: LOG_TARGET, - "Error setting transaction (TxId: {}) validity: {}", queried_tx.tx_id, e - ); - } + }, + _ => { + return Err(rpc_error.into()); + }, } - } else { - debug!( + }, + }; + + let block_header: BlockHeader = result.try_into().map_err(|s| { + TransactionServiceError::InvalidMessageError(format!("Could not convert block header: {}", s)) + })?; + Ok(Some(block_header.hash())) + } + + #[allow(clippy::ptr_arg)] + async fn update_transaction_as_mined( + &mut self, + tx: &CompletedTransaction, + mined_in_block: &BlockHash, + mined_height: u64, + num_confirmations: u64, + ) -> Result<(), TransactionServiceProtocolError> { + self.db + .set_transaction_mined_height( + tx.tx_id, + true, + mined_height, + mined_in_block.clone(), + num_confirmations, + num_confirmations >= self.config.num_confirmations_required, + ) + .await + .for_protocol(self.operation_id)?; + + if num_confirmations >= self.config.num_confirmations_required { + self.publish_event(TransactionEvent::TransactionMined { + tx_id: tx.tx_id, + is_valid: true, + }) + } else { + self.publish_event(TransactionEvent::TransactionMinedUnconfirmed { + tx_id: tx.tx_id, + num_confirmations, + is_valid: true, + }) + } + + if tx.status == TransactionStatus::Coinbase { + if let Err(e) = self.output_manager_handle.set_coinbase_abandoned(tx.tx_id, false).await { + warn!( target: LOG_TARGET, - "Could not find transaction corresponding to returned query response" + "Could not mark coinbase output for TxId: {} as not abandoned: {}", tx.tx_id, e ); - } + }; } - Ok(true) + + Ok(()) } - /// Get completed transactions from db and sort the mined transactions into batches - async fn get_transaction_batches(&self) -> Result>, TransactionServiceError> { - let mut completed_txs: Vec = self - .resources - .db - .get_completed_transactions() - .await? - .values() - .filter(|tx| { - tx.status == TransactionStatus::MinedUnconfirmed || tx.status == TransactionStatus::MinedConfirmed - }) - .cloned() - .collect(); - // Determine how many rounds of base node request we need to query all the transactions in batches of - // max_tx_query_batch_size - let num_batches = - ((completed_txs.len() as f32) / (self.resources.config.max_tx_query_batch_size as f32 + 0.1)) as usize + 1; + #[allow(clippy::ptr_arg)] + async fn update_coinbase_as_abandoned( + &mut self, + tx: &CompletedTransaction, + mined_in_block: &BlockHash, + mined_height: u64, + num_confirmations: u64, + ) -> Result<(), TransactionServiceProtocolError> { + self.db + .set_transaction_mined_height( + tx.tx_id, + false, + mined_height, + mined_in_block.clone(), + num_confirmations, + num_confirmations >= self.config.num_confirmations_required, + ) + .await + .for_protocol(self.operation_id)?; - let mut batches: Vec> = Vec::new(); - for _b in 0..num_batches { - let mut batch = Vec::new(); - for tx in - completed_txs.drain(..cmp::min(self.resources.config.max_tx_query_batch_size, completed_txs.len())) - { - batch.push(tx); - } - if !batch.is_empty() { - batches.push(batch); - } + if let Err(e) = self.output_manager_handle.set_coinbase_abandoned(tx.tx_id, true).await { + warn!( + target: LOG_TARGET, + "Could not mark coinbase output for TxId: {} as abandoned: {}", tx.tx_id, e + ); + }; + + self.publish_event(TransactionEvent::TransactionCancelled(tx.tx_id)); + + Ok(()) + } + + async fn update_transaction_as_unmined( + &mut self, + tx: &CompletedTransaction, + ) -> Result<(), TransactionServiceProtocolError> { + self.db + .set_transaction_as_unmined(tx.tx_id) + .await + .for_protocol(self.operation_id)?; + + if tx.status == TransactionStatus::Coinbase { + if let Err(e) = self.output_manager_handle.set_coinbase_abandoned(tx.tx_id, false).await { + warn!( + target: LOG_TARGET, + "Could not mark coinbase output for TxId: {} as not abandoned: {}", tx.tx_id, e + ); + }; } - Ok(batches) + + self.publish_event(TransactionEvent::TransactionBroadcast(tx.tx_id)); + Ok(()) } } diff --git a/base_layer/wallet/src/transaction_service/service.rs b/base_layer/wallet/src/transaction_service/service.rs index 85685e8a96..9ff5169477 100644 --- a/base_layer/wallet/src/transaction_service/service.rs +++ b/base_layer/wallet/src/transaction_service/service.rs @@ -21,6 +21,8 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::{ + base_node_service::handle::{BaseNodeEvent, BaseNodeServiceHandle}, + connectivity_service::WalletConnectivityInterface, output_manager_service::{handle::OutputManagerHandle, TxId}, storage::database::{WalletBackend, WalletDatabase}, transaction_service::{ @@ -29,7 +31,6 @@ use crate::{ handle::{TransactionEvent, TransactionEventSender, TransactionServiceRequest, TransactionServiceResponse}, protocols::{ transaction_broadcast_protocol::TransactionBroadcastProtocol, - transaction_coinbase_monitoring_protocol::TransactionCoinbaseMonitoringProtocol, transaction_receive_protocol::{TransactionReceiveProtocol, TransactionReceiveProtocolStage}, transaction_send_protocol::{TransactionSendProtocol, TransactionSendProtocolStage}, transaction_validation_protocol::TransactionValidationProtocol, @@ -44,7 +45,8 @@ use crate::{ send_transaction_reply::send_transaction_reply, }, }, - types::{HashDigest, ValidationRetryStrategy}, + types::HashDigest, + util::watch::Watch, utxo_scanner_service::utxo_scanning::RECOVERY_KEY, }; use chrono::{NaiveDateTime, Utc}; @@ -59,7 +61,7 @@ use std::{ time::{Duration, Instant}, }; use tari_common_types::types::PrivateKey; -use tari_comms::{connectivity::ConnectivityRequester, peer_manager::NodeIdentity, types::CommsPublicKey}; +use tari_comms::{peer_manager::NodeIdentity, types::CommsPublicKey}; use tari_comms_dht::outbound::OutboundMessageRequester; use tari_core::{ crypto::keys::SecretKey, @@ -82,7 +84,7 @@ use tari_p2p::domain_message::DomainMessage; use tari_service_framework::{reply_channel, reply_channel::Receiver}; use tari_shutdown::ShutdownSignal; use tokio::{ - sync::{broadcast, mpsc, mpsc::Sender, oneshot}, + sync::{mpsc, mpsc::Sender, oneshot}, task::JoinHandle, }; @@ -108,11 +110,9 @@ pub struct TransactionService< BNResponseStream, TBackend, TTxCancelledStream, - WBackend, -> where - TBackend: TransactionBackend + 'static, - WBackend: WalletBackend + 'static, -{ + TWalletBackend, + TWalletConnectivity, +> { config: TransactionServiceConfig, db: TransactionDatabase, output_manager_service: OutputManagerHandle, @@ -126,23 +126,30 @@ pub struct TransactionService< >, event_publisher: TransactionEventSender, node_identity: Arc, - base_node_public_key: Option, - resources: TransactionServiceResources, + resources: TransactionServiceResources, pending_transaction_reply_senders: HashMap>, base_node_response_senders: HashMap)>, send_transaction_cancellation_senders: HashMap>, finalized_transaction_senders: HashMap>, receiver_transaction_cancellation_senders: HashMap>, active_transaction_broadcast_protocols: HashSet, - active_coinbase_monitoring_protocols: HashSet, - timeout_update_publisher: broadcast::Sender, - base_node_update_publisher: broadcast::Sender, - power_mode: PowerMode, - wallet_db: WalletDatabase, + timeout_update_watch: Watch, + wallet_db: WalletDatabase, + base_node_service: BaseNodeServiceHandle, + last_seen_tip_height: Option, } #[allow(clippy::too_many_arguments)] -impl +impl< + TTxStream, + TTxReplyStream, + TTxFinalizedStream, + BNResponseStream, + TBackend, + TTxCancelledStream, + TWalletBackend, + TWalletConnectivity, + > TransactionService< TTxStream, TTxReplyStream, @@ -150,7 +157,8 @@ impl where TTxStream: Stream>, @@ -159,12 +167,13 @@ where BNResponseStream: Stream>, TTxCancelledStream: Stream>, TBackend: TransactionBackend + 'static, - WBackend: WalletBackend + 'static, + TWalletBackend: WalletBackend + 'static, + TWalletConnectivity: WalletConnectivityInterface, { pub fn new( config: TransactionServiceConfig, db: TransactionDatabase, - wallet_db: WalletDatabase, + wallet_db: WalletDatabase, request_stream: Receiver< TransactionServiceRequest, Result, @@ -176,11 +185,12 @@ where transaction_cancelled_stream: TTxCancelledStream, output_manager_service: OutputManagerHandle, outbound_message_service: OutboundMessageRequester, - connectivity_manager: ConnectivityRequester, + connectivity: TWalletConnectivity, event_publisher: TransactionEventSender, node_identity: Arc, factories: CryptoFactories, shutdown_signal: ShutdownSignal, + base_node_service: BaseNodeServiceHandle, ) -> Self { // Collect the resources that all protocols will need so that they can be neatly cloned as the protocols are // spawned. @@ -188,7 +198,7 @@ where db: db.clone(), output_manager_service: output_manager_service.clone(), outbound_message_service, - connectivity_manager, + connectivity, event_publisher: event_publisher.clone(), node_identity: node_identity.clone(), factories, @@ -196,10 +206,14 @@ where shutdown_signal, }; - let (timeout_update_publisher, _) = broadcast::channel(20); - let (base_node_update_publisher, _) = broadcast::channel(20); + let power_mode = PowerMode::default(); + let timeout = match power_mode { + PowerMode::Low => config.low_power_polling_timeout, + PowerMode::Normal => config.broadcast_monitoring_timeout, + }; + let timeout_update_watch = Watch::new(timeout); - TransactionService { + Self { config, db, output_manager_service, @@ -211,7 +225,6 @@ where request_stream: Some(request_stream), event_publisher, node_identity, - base_node_public_key: None, resources, pending_transaction_reply_senders: HashMap::new(), base_node_response_senders: HashMap::new(), @@ -219,11 +232,10 @@ where finalized_transaction_senders: HashMap::new(), receiver_transaction_cancellation_senders: HashMap::new(), active_transaction_broadcast_protocols: HashSet::new(), - active_coinbase_monitoring_protocols: HashSet::new(), - timeout_update_publisher, - base_node_update_publisher, - power_mode: PowerMode::Normal, + timeout_update_watch, + base_node_service, wallet_db, + last_seen_tip_height: None, } } @@ -280,17 +292,22 @@ where JoinHandle>, > = FuturesUnordered::new(); - let mut coinbase_transaction_monitoring_protocol_handles: FuturesUnordered< - JoinHandle>, - > = FuturesUnordered::new(); - let mut transaction_validation_protocol_handles: FuturesUnordered< JoinHandle>, > = FuturesUnordered::new(); + let mut base_node_service_event_stream = self.base_node_service.get_event_stream(); + info!(target: LOG_TARGET, "Transaction Service started"); loop { tokio::select! { + // Base Node Monitoring Service event + event = base_node_service_event_stream.recv() => { + match event { + Ok(msg) => self.handle_base_node_service_event(msg, &mut transaction_validation_protocol_handles).await, + Err(e) => debug!(target: LOG_TARGET, "Lagging read on base node event broadcast channel: {}", e), + }; + }, //Incoming request Some(request_context) = request_stream.next() => { // TODO: Remove time measurements; this is to aid in system testing only @@ -302,18 +319,16 @@ where &mut send_transaction_protocol_handles, &mut receive_transaction_protocol_handles, &mut transaction_broadcast_protocol_handles, - &mut coinbase_transaction_monitoring_protocol_handles, &mut transaction_validation_protocol_handles, reply_tx, ).await.map_err(|e| { warn!(target: LOG_TARGET, "Error handling request: {:?}", e); e }); - let finish = Instant::now(); trace!(target: LOG_TARGET, "{}, processed in {}ms", event, - finish.duration_since(start).as_millis() + start.elapsed().as_millis() ); }, // Incoming Transaction messages from the Comms layer @@ -339,11 +354,10 @@ where } _ => (), } - let finish = Instant::now(); trace!(target: LOG_TARGET, "Handling Transaction Message, Trace: {}, processed in {}ms", msg.dht_header.message_tag, - finish.duration_since(start).as_millis(), + start.elapsed().as_millis(), ); }, // Incoming Transaction Reply messages from the Comms layer @@ -370,11 +384,10 @@ where }, Ok(_) => (), } - let finish = Instant::now(); trace!(target: LOG_TARGET, "Handling Transaction Reply Message, Trace: {}, processed in {}ms", msg.dht_header.message_tag, - finish.duration_since(start).as_millis(), + start.elapsed().as_millis(), ); }, // Incoming Finalized Transaction messages from the Comms layer @@ -408,11 +421,10 @@ where }, Ok(_) => () } - let finish = Instant::now(); trace!(target: LOG_TARGET, "Handling Transaction Finalized Message, Trace: {}, processed in {}ms", msg.dht_header.message_tag.as_value(), - finish.duration_since(start).as_millis(), + start.elapsed().as_millis(), ); }, // Incoming messages from the Comms layer @@ -427,11 +439,10 @@ where msg.dht_header.message_tag.as_value()); e }); - let finish = Instant::now(); trace!(target: LOG_TARGET, "Handling Base Node Response, Trace: {}, processed in {}ms", msg.dht_header.message_tag, - finish.duration_since(start).as_millis(), + start.elapsed().as_millis(), ); } // Incoming messages from the Comms layer @@ -443,11 +454,10 @@ where if let Err(e) = self.handle_transaction_cancelled_message(origin_public_key, inner_msg, ).await { warn!(target: LOG_TARGET, "Error handing Transaction Cancelled Message: {:?}", e); } - let finish = Instant::now(); trace!(target: LOG_TARGET, "Handling Transaction Cancelled message, Trace: {}, processed in {}ms", msg.dht_header.message_tag, - finish.duration_since(start).as_millis(), + start.elapsed().as_millis(), ); } Some(join_result) = send_transaction_protocol_handles.next() => { @@ -477,18 +487,10 @@ where Err(e) => error!(target: LOG_TARGET, "Error resolving Broadcast Protocol: {:?}", e), }; } - Some(join_result) = coinbase_transaction_monitoring_protocol_handles.next() => { - trace!(target: LOG_TARGET, "Coinbase transaction monitoring protocol has ended with result {:?}", - join_result); - match join_result { - Ok(join_result_inner) => self.complete_coinbase_transaction_monitoring_protocol(join_result_inner), - Err(e) => error!(target: LOG_TARGET, "Error resolving Coinbase Monitoring protocol: {:?}", e), - }; - } Some(join_result) = transaction_validation_protocol_handles.next() => { trace!(target: LOG_TARGET, "Transaction Validation protocol has ended with result {:?}", join_result); match join_result { - Ok(join_result_inner) => self.complete_transaction_validation_protocol(join_result_inner).await, + Ok(join_result_inner) => self.complete_transaction_validation_protocol(join_result_inner, &mut transaction_broadcast_protocol_handles,).await, Err(e) => error!(target: LOG_TARGET, "Error resolving Transaction Validation protocol: {:?}", e), }; } @@ -513,9 +515,6 @@ where transaction_broadcast_join_handles: &mut FuturesUnordered< JoinHandle>, >, - coinbase_monitoring_join_handles: &mut FuturesUnordered< - JoinHandle>, - >, transaction_validation_join_handles: &mut FuturesUnordered< JoinHandle>, >, @@ -590,10 +589,6 @@ where TransactionServiceRequest::GetAnyTransaction(tx_id) => Ok(TransactionServiceResponse::AnyTransaction( Box::new(self.db.get_any_transaction(tx_id).await?), )), - TransactionServiceRequest::SetBaseNodePublicKey(public_key) => { - self.set_base_node_public_key(public_key).await; - Ok(TransactionServiceResponse::BaseNodePublicKeySet) - }, TransactionServiceRequest::ImportUtxo(value, source_public_key, message, maturity) => self .add_utxo_import_transaction(value, source_public_key, message, maturity) .await @@ -603,7 +598,7 @@ where .await .map(|_| TransactionServiceResponse::TransactionSubmitted), TransactionServiceRequest::GenerateCoinbaseTransaction(reward, fees, block_height) => self - .generate_coinbase_transaction(reward, fees, block_height, coinbase_monitoring_join_handles) + .generate_coinbase_transaction(reward, fees, block_height) .await .map(|tx| TransactionServiceResponse::CoinbaseTransactionGenerated(Box::new(tx))), TransactionServiceRequest::SetLowPowerMode => { @@ -634,7 +629,7 @@ where .await .map(|_| TransactionServiceResponse::ProtocolsRestarted), TransactionServiceRequest::RestartBroadcastProtocols => self - .restart_broadcast_protocols(transaction_broadcast_join_handles, coinbase_monitoring_join_handles) + .restart_broadcast_protocols(transaction_broadcast_join_handles) .await .map(|_| TransactionServiceResponse::ProtocolsRestarted), TransactionServiceRequest::GetNumConfirmationsRequired => Ok( @@ -644,14 +639,10 @@ where self.resources.config.num_confirmations_required = number; Ok(TransactionServiceResponse::NumConfirmationsSet) }, - TransactionServiceRequest::ValidateTransactions(retry_strategy) => self - .start_transaction_validation_protocol(retry_strategy, transaction_validation_join_handles) + TransactionServiceRequest::ValidateTransactions => self + .start_transaction_validation_protocol(transaction_validation_join_handles) .await .map(TransactionServiceResponse::ValidationStarted), - TransactionServiceRequest::SetCompletedTransactionValidity(tx_id, validity) => self - .set_completed_transaction_validity(tx_id, validity) - .await - .map(|_| TransactionServiceResponse::CompletedTransactionValidityChanged), }; // If the individual handlers did not already send the API response then do it here. @@ -664,6 +655,35 @@ where Ok(()) } + async fn handle_base_node_service_event( + &mut self, + event: Arc, + transaction_validation_join_handles: &mut FuturesUnordered< + JoinHandle>, + >, + ) { + match (*event).clone() { + BaseNodeEvent::BaseNodeStateChanged(state) => { + let trigger_validation = match (self.last_seen_tip_height, state.chain_metadata.clone()) { + (Some(last_seen_tip_height), Some(cm)) => last_seen_tip_height != cm.height_of_longest_chain(), + (None, _) => true, + _ => false, + }; + + if trigger_validation { + let _ = self + .start_transaction_validation_protocol(transaction_validation_join_handles) + .await + .map_err(|e| { + warn!(target: LOG_TARGET, "Error validating txos: {:?}", e); + e + }); + } + self.last_seen_tip_height = state.chain_metadata.map(|cm| cm.height_of_longest_chain()); + }, + } + } + /// Sends a new transaction to a recipient /// # Arguments /// 'dest_pubkey': The Comms pubkey of the recipient node @@ -1113,18 +1133,18 @@ where Ok(()) } - async fn set_completed_transaction_validity( - &mut self, - tx_id: TxId, - valid: bool, - ) -> Result<(), TransactionServiceError> { - self.resources - .db - .set_completed_transaction_validity(tx_id, valid) - .await?; - - Ok(()) - } + // async fn set_completed_transaction_validity( + // &mut self, + // tx_id: TxId, + // valid: bool, + // ) -> Result<(), TransactionServiceError> { + // self.resources + // .db + // .set_completed_transaction_validity(tx_id, valid) + // .await?; + // + // Ok(()) + // } /// Handle a Transaction Cancelled message received from the Comms layer pub async fn handle_transaction_cancelled_message( @@ -1364,7 +1384,7 @@ where ); self.db.uncancel_pending_transaction(tx_id).await?; self.output_manager_service - .reinstate_cancelled_inbound_transaction(tx_id) + .reinstate_cancelled_inbound_transaction_outputs(tx_id) .await?; self.restart_receive_transaction_protocol(tx_id, source_pubkey.clone(), join_handles); @@ -1496,25 +1516,6 @@ where } } - /// Add a base node public key to the list that will be used to broadcast transactions and monitor the base chain - /// for the presence of spendable outputs. If this is the first time the base node public key is set do the initial - /// mempool broadcast - async fn set_base_node_public_key(&mut self, base_node_public_key: CommsPublicKey) { - info!( - target: LOG_TARGET, - "Setting base node public key {} for service", base_node_public_key - ); - - self.base_node_public_key = Some(base_node_public_key.clone()); - if let Err(e) = self.base_node_update_publisher.send(base_node_public_key) { - trace!( - target: LOG_TARGET, - "No subscribers to receive base node public key update: {:?}", - e - ); - } - } - async fn restart_transaction_negotiation_protocols( &mut self, send_transaction_join_handles: &mut FuturesUnordered>>, @@ -1548,34 +1549,25 @@ where async fn start_transaction_validation_protocol( &mut self, - retry_strategy: ValidationRetryStrategy, join_handles: &mut FuturesUnordered>>, ) -> Result { - if self.base_node_public_key.is_none() { + if !self.connectivity().is_base_node_set() { return Err(TransactionServiceError::NoBaseNodeKeysProvided); } - trace!(target: LOG_TARGET, "Starting transaction validation protocols"); + trace!(target: LOG_TARGET, "Starting transaction validation protocol"); let id = OsRng.next_u64(); - let timeout = match self.power_mode { - PowerMode::Normal => self.config.broadcast_monitoring_timeout, - PowerMode::Low => self.config.low_power_polling_timeout, - }; - match self.base_node_public_key.clone() { - None => return Err(TransactionServiceError::NoBaseNodeKeysProvided), - Some(pk) => { - let protocol = TransactionValidationProtocol::new( - id, - self.resources.clone(), - pk, - timeout, - self.base_node_update_publisher.subscribe(), - self.timeout_update_publisher.subscribe(), - retry_strategy, - ); - let join_handle = tokio::spawn(protocol.execute()); - join_handles.push(join_handle); - }, - } + + let protocol = TransactionValidationProtocol::new( + id, + self.resources.db.clone(), + self.resources.connectivity.clone(), + self.resources.config.clone(), + self.event_publisher.clone(), + self.resources.output_manager_service.clone(), + ); + + let join_handle = tokio::spawn(protocol.execute()); + join_handles.push(join_handle); Ok(id) } @@ -1584,6 +1576,9 @@ where async fn complete_transaction_validation_protocol( &mut self, join_result: Result, + transaction_broadcast_join_handles: &mut FuturesUnordered< + JoinHandle>, + >, ) { match join_result { Ok(id) => { @@ -1591,6 +1586,11 @@ where target: LOG_TARGET, "Transaction Validation Protocol (Id: {}) completed successfully", id ); + // Restart broadcast protocols for any transactions that were found to be no longer mined. + let _ = self + .restart_broadcast_protocols(transaction_broadcast_join_handles) + .await + .map_err(|e| warn!(target: LOG_TARGET, "Error restarting broadcast protocols: {}", e)); }, Err(TransactionServiceProtocolError { id, error }) => { if let TransactionServiceError::Shutdown = error { @@ -1610,11 +1610,8 @@ where async fn restart_broadcast_protocols( &mut self, broadcast_join_handles: &mut FuturesUnordered>>, - coinbase_transaction_join_handles: &mut FuturesUnordered< - JoinHandle>, - >, ) -> Result<(), TransactionServiceError> { - if self.base_node_public_key.is_none() { + if !self.connectivity().is_base_node_set() { return Err(TransactionServiceError::NoBaseNodeKeysProvided); } @@ -1629,16 +1626,6 @@ where resp })?; - self.restart_chain_monitoring_for_all_coinbase_transactions(coinbase_transaction_join_handles) - .await - .map_err(|resp| { - error!( - target: LOG_TARGET, - "Error restarting protocols for all coinbase transactions: {:?}", resp - ); - resp - })?; - Ok(()) } @@ -1656,32 +1643,31 @@ where { return Err(TransactionServiceError::InvalidCompletedTransaction); } - let timeout = match self.power_mode { - PowerMode::Normal => self.config.broadcast_monitoring_timeout, - PowerMode::Low => self.config.low_power_polling_timeout, - }; - match self.base_node_public_key.clone() { - None => return Err(TransactionServiceError::NoBaseNodeKeysProvided), - Some(pk) => { - // Check if the protocol has already been started - if self.active_transaction_broadcast_protocols.insert(tx_id) { - let protocol = TransactionBroadcastProtocol::new( - tx_id, - self.resources.clone(), - timeout, - pk, - self.timeout_update_publisher.subscribe(), - self.base_node_update_publisher.subscribe(), - ); - let join_handle = tokio::spawn(protocol.execute()); - join_handles.push(join_handle); - } else { - debug!( - target: LOG_TARGET, - "Transaction Broadcast Protocol (TxId: {}) already started", tx_id - ); - } - }, + if completed_tx.is_coinbase() { + return Err(TransactionServiceError::AttemptedToBroadcastCoinbaseTransaction( + completed_tx.tx_id, + )); + } + + if !self.resources.connectivity.is_base_node_set() { + return Err(TransactionServiceError::NoBaseNodeKeysProvided); + } + + // Check if the protocol has already been started + if self.active_transaction_broadcast_protocols.insert(tx_id) { + let protocol = TransactionBroadcastProtocol::new( + tx_id, + self.resources.clone(), + self.timeout_update_watch.get_receiver(), + ); + let join_handle = tokio::spawn(protocol.execute()); + join_handles.push(join_handle); + } else { + trace!( + target: LOG_TARGET, + "Transaction Broadcast Protocol (TxId: {}) already started", + tx_id + ); } Ok(()) @@ -1698,8 +1684,8 @@ where for (_, completed_tx) in completed_txs { if completed_tx.valid && (completed_tx.status == TransactionStatus::Completed || - completed_tx.status == TransactionStatus::Broadcast || - completed_tx.status == TransactionStatus::MinedUnconfirmed) + completed_tx.status == TransactionStatus::Broadcast) && + !completed_tx.is_coinbase() { self.broadcast_completed_transaction(completed_tx, join_handles).await?; } @@ -1763,18 +1749,11 @@ where } async fn set_power_mode(&mut self, mode: PowerMode) -> Result<(), TransactionServiceError> { - self.power_mode = mode; let timeout = match mode { PowerMode::Low => self.config.low_power_polling_timeout, PowerMode::Normal => self.config.broadcast_monitoring_timeout, }; - if let Err(e) = self.timeout_update_publisher.send(timeout) { - trace!( - target: LOG_TARGET, - "Could not send Timeout update, no subscribers to receive. (Err {:?})", - e - ); - } + self.timeout_update_watch.send(timeout); Ok(()) } @@ -1873,9 +1852,6 @@ where reward: MicroTari, fees: MicroTari, block_height: u64, - coinbase_monitoring_protocol_join_handles: &mut FuturesUnordered< - JoinHandle>, - >, ) -> Result { let amount = reward + fees; @@ -1885,7 +1861,7 @@ where .find_coinbase_transaction_at_block_height(block_height, amount) .await?; - let (tx_id, completed_transaction) = match find_result { + let completed_transaction = match find_result { Some(completed_tx) => { debug!( target: LOG_TARGET, @@ -1895,7 +1871,7 @@ where amount ); - (completed_tx.tx_id, completed_tx.transaction) + completed_tx.transaction }, None => { // otherwise create a new coinbase tx @@ -1942,128 +1918,17 @@ where e }); - debug!( + info!( target: LOG_TARGET, "Coinbase transaction (TxId: {}) for Block Height: {} added", tx_id, block_height ); - (tx_id, tx) + tx }, }; - if let Err(e) = self - .start_coinbase_transaction_monitoring_protocol(tx_id, coinbase_monitoring_protocol_join_handles) - .await - { - warn!( - target: LOG_TARGET, - "Could not start chain monitoring for Coinbase transaction (TxId: {}): {:?}", tx_id, e - ); - } - Ok(completed_transaction) } - /// Send a request to the Base Node to see if the specified coinbase transaction has been mined yet. This function - /// will send the request and store a timeout future to check in on the status of the transaction in the future. - async fn start_coinbase_transaction_monitoring_protocol( - &mut self, - tx_id: TxId, - join_handles: &mut FuturesUnordered>>, - ) -> Result<(), TransactionServiceError> { - let completed_tx = self.db.get_completed_transaction(tx_id).await?; - - if completed_tx.status != TransactionStatus::Coinbase || completed_tx.coinbase_block_height.is_none() { - return Err(TransactionServiceError::InvalidCompletedTransaction); - } - - let block_height = if let Some(bh) = completed_tx.coinbase_block_height { - bh - } else { - 0 - }; - - let timeout = match self.power_mode { - PowerMode::Normal => self.config.broadcast_monitoring_timeout, - PowerMode::Low => self.config.low_power_polling_timeout, - }; - match self.base_node_public_key.clone() { - None => return Err(TransactionServiceError::NoBaseNodeKeysProvided), - Some(pk) => { - if self.active_coinbase_monitoring_protocols.insert(tx_id) { - let protocol = TransactionCoinbaseMonitoringProtocol::new( - completed_tx.tx_id, - block_height, - self.resources.clone(), - timeout, - pk, - self.base_node_update_publisher.subscribe(), - self.timeout_update_publisher.subscribe(), - ); - let join_handle = tokio::spawn(protocol.execute()); - join_handles.push(join_handle); - } else { - debug!( - target: LOG_TARGET, - "Coinbase Monitoring Protocol (TxId: {}) already started", tx_id - ); - } - }, - } - Ok(()) - } - - /// Handle the final clean up after a Coinbase Transaction Monitoring protocol completes - fn complete_coinbase_transaction_monitoring_protocol( - &mut self, - join_result: Result, - ) { - match join_result { - Ok(id) => { - // Cleanup any registered senders - let _ = self.active_coinbase_monitoring_protocols.remove(&id); - - debug!( - target: LOG_TARGET, - "Coinbase Transaction monitoring Protocol for TxId: {} completed successfully", id - ); - }, - Err(TransactionServiceProtocolError { id, error }) => { - let _ = self.active_coinbase_monitoring_protocols.remove(&id); - if let TransactionServiceError::Shutdown = error { - return; - } - warn!( - target: LOG_TARGET, - "Error completing Coinbase Transaction monitoring Protocol (Id: {}): {:?}", id, error - ); - let _ = self - .event_publisher - .send(Arc::new(TransactionEvent::Error(format!("{:?}", error)))); - }, - } - } - - /// Go through all completed transactions that have the Coinbase status and start querying the base_node to see if - /// they have been mined - async fn restart_chain_monitoring_for_all_coinbase_transactions( - &mut self, - join_handles: &mut FuturesUnordered>>, - ) -> Result<(), TransactionServiceError> { - trace!( - target: LOG_TARGET, - "Starting Coinbase monitoring for all Broadcast Transactions" - ); - let completed_txs = self.db.get_completed_transactions().await?; - for completed_tx in completed_txs.values() { - if completed_tx.status == TransactionStatus::Coinbase { - self.start_coinbase_transaction_monitoring_protocol(completed_tx.tx_id, join_handles) - .await?; - } - } - - Ok(()) - } - /// Check if a Recovery Status is currently stored in the databse, this indicates that a wallet recovery is in /// progress async fn check_recovery_status(&self) -> Result<(), TransactionServiceError> { @@ -2073,17 +1938,19 @@ where Some(_) => Err(TransactionServiceError::WalletRecoveryInProgress), } } + + fn connectivity(&self) -> &TWalletConnectivity { + &self.resources.connectivity + } } /// This struct is a collection of the common resources that a protocol in the service requires. #[derive(Clone)] -pub struct TransactionServiceResources -where TBackend: TransactionBackend + 'static -{ +pub struct TransactionServiceResources { pub db: TransactionDatabase, pub output_manager_service: OutputManagerHandle, pub outbound_message_service: OutboundMessageRequester, - pub connectivity_manager: ConnectivityRequester, + pub connectivity: TWalletConnectivity, pub event_publisher: TransactionEventSender, pub node_identity: Arc, pub factories: CryptoFactories, @@ -2097,6 +1964,12 @@ enum PowerMode { Normal, } +impl Default for PowerMode { + fn default() -> Self { + PowerMode::Normal + } +} + /// Contains the generated TxId and SpendingKey for a Pending Coinbase transaction #[derive(Debug)] pub struct PendingCoinbaseSpendingKey { diff --git a/base_layer/wallet/src/transaction_service/storage/database.rs b/base_layer/wallet/src/transaction_service/storage/database.rs index 7cbaa52c85..93574b9965 100644 --- a/base_layer/wallet/src/transaction_service/storage/database.rs +++ b/base_layer/wallet/src/transaction_service/storage/database.rs @@ -40,10 +40,11 @@ use log::*; use crate::transaction_service::storage::models::WalletTransaction; use std::{ collections::HashMap, + fmt, fmt::{Display, Error, Formatter}, sync::Arc, }; -use tari_common_types::types::BlindingFactor; +use tari_common_types::types::{BlindingFactor, BlockHash}; use tari_comms::types::CommsPublicKey; use tari_core::transactions::{tari_amount::MicroTari, transaction::Transaction}; @@ -56,6 +57,11 @@ const LOG_TARGET: &str = "wallet::transaction_service::database"; pub trait TransactionBackend: Send + Sync + Clone { /// Retrieve the record associated with the provided DbKey fn fetch(&self, key: &DbKey) -> Result, TransactionStorageError>; + + fn fetch_last_mined_transaction(&self) -> Result, TransactionStorageError>; + + fn fetch_unconfirmed_transactions(&self) -> Result, TransactionStorageError>; + /// Check if a record with the provided key exists in the backend. fn contains(&self, key: &DbKey) -> Result; /// Modify the state the of the backend with a write operation @@ -78,15 +84,6 @@ pub trait TransactionBackend: Send + Sync + Clone { ) -> Result<(), TransactionStorageError>; /// Indicated that a completed transaction has been broadcast to the mempools fn broadcast_completed_transaction(&self, tx_id: TxId) -> Result<(), TransactionStorageError>; - /// Indicated that a completed transaction has been detected as mined on a base node - fn mine_completed_transaction(&self, tx_id: TxId) -> Result<(), TransactionStorageError>; - /// Indicated that a broadcast transaction has been detected as confirm on a base node - fn confirm_broadcast_or_coinbase_transaction(&self, tx_id: TxId) -> Result<(), TransactionStorageError>; - /// Indicated that a mined transaction has been detected as unconfirmed on a base node, due to reorg or base node - /// switch - fn unconfirm_mined_transaction(&self, tx_id: TxId) -> Result<(), TransactionStorageError>; - /// Set transaction's validity - fn set_completed_transaction_validity(&self, tx_id: TxId, valid: bool) -> Result<(), TransactionStorageError>; /// Cancel Completed transaction, this will update the transaction status fn cancel_completed_transaction(&self, tx_id: TxId) -> Result<(), TransactionStorageError>; /// Set cancellation on Pending transaction, this will update the transaction status @@ -116,13 +113,26 @@ pub trait TransactionBackend: Send + Sync + Clone { fn remove_encryption(&self) -> Result<(), TransactionStorageError>; /// Increment the send counter and timestamp of a transaction fn increment_send_count(&self, tx_id: TxId) -> Result<(), TransactionStorageError>; - /// Update a transactions number of confirmations - fn update_confirmations(&self, tx_id: TxId, confirmations: u64) -> Result<(), TransactionStorageError>; - /// Update a transactions mined height - fn update_mined_height(&self, tx_id: TxId, mined_height: u64) -> Result<(), TransactionStorageError>; + /// Update a transactions mined height. A transaction can either be mined as valid or mined as invalid + /// A normal transaction can only be mined with valid = true, + /// A coinbase transaction can either be mined as valid = true, meaning that it is the coinbase in that block + /// or valid =false, meaning that the coinbase has been awarded to another tx, but this has been confirmed by blocks + /// The mined height and block are used to determine reorgs + fn update_mined_height( + &self, + tx_id: TxId, + is_valid: bool, + mined_height: u64, + mined_in_block: BlockHash, + num_confirmations: u64, + is_confirmed: bool, + ) -> Result<(), TransactionStorageError>; + + /// Clears the mined block and height of a transaction + fn set_transaction_as_unmined(&self, tx_id: TxId) -> Result<(), TransactionStorageError>; } -#[derive(Debug, Clone, PartialEq)] +#[derive(Clone, PartialEq)] pub enum DbKey { PendingOutboundTransaction(TxId), PendingInboundTransaction(TxId), @@ -138,6 +148,59 @@ pub enum DbKey { AnyTransaction(TxId), } +impl fmt::Debug for DbKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use DbKey::*; + // Add in i64 representatives for easy debugging in sqlite. This should probably be removed at some point + match self { + PendingOutboundTransaction(tx_id) => { + write!(f, "PendingOutboundTransaction ({}u64, {}i64)", tx_id, *tx_id as i64) + }, + PendingInboundTransaction(tx_id) => { + write!(f, "PendingInboundTransaction ({}u64, {}i64)", tx_id, *tx_id as i64) + }, + CompletedTransaction(tx_id) => { + write!(f, "CompletedTransaction ({}u64, {}i64)", tx_id, *tx_id as i64) + }, + PendingOutboundTransactions => { + write!(f, "PendingOutboundTransactions ") + }, + PendingInboundTransactions => { + write!(f, "PendingInboundTransactions") + }, + CompletedTransactions => { + write!(f, "CompletedTransactions ") + }, + CancelledPendingOutboundTransactions => { + write!(f, "CancelledPendingOutboundTransactions ") + }, + CancelledPendingInboundTransactions => { + write!(f, "CancelledPendingInboundTransactions") + }, + CancelledCompletedTransactions => { + write!(f, "CancelledCompletedTransactions") + }, + CancelledPendingOutboundTransaction(tx_id) => { + write!( + f, + "CancelledPendingOutboundTransaction ({}u64, {}i64)", + tx_id, *tx_id as i64 + ) + }, + CancelledPendingInboundTransaction(tx_id) => { + write!( + f, + "CancelledPendingInboundTransaction ({}u64, {}i64)", + tx_id, *tx_id as i64 + ) + }, + AnyTransaction(tx_id) => { + write!(f, "AnyTransaction ({}u64, {}i64)", tx_id, *tx_id as i64) + }, + } + } +} + #[derive(Debug)] pub enum DbValue { PendingOutboundTransaction(Box), @@ -163,9 +226,7 @@ pub enum WriteOperation { /// This structure holds an inner type that implements the `TransactionBackend` trait and contains the more complex /// data access logic required by the module built onto the functionality defined by the trait #[derive(Clone)] -pub struct TransactionDatabase -where T: TransactionBackend + 'static -{ +pub struct TransactionDatabase { db: Arc, } @@ -359,6 +420,14 @@ where T: TransactionBackend + 'static Ok(*t) } + pub async fn fetch_last_mined_transaction(&self) -> Result, TransactionStorageError> { + self.db.fetch_last_mined_transaction() + } + + pub async fn fetch_unconfirmed_transactions(&self) -> Result, TransactionStorageError> { + self.db.fetch_unconfirmed_transactions() + } + pub async fn get_completed_transaction_cancelled_or_not( &self, tx_id: TxId, @@ -591,16 +660,6 @@ where T: TransactionBackend + 'static .and_then(|inner_result| inner_result) } - /// Indicated that the specified completed transaction has been detected as mined on the base layer - pub async fn mine_completed_transaction(&self, tx_id: TxId) -> Result<(), TransactionStorageError> { - let db_clone = self.db.clone(); - - tokio::task::spawn_blocking(move || db_clone.mine_completed_transaction(tx_id)) - .await - .map_err(|err| TransactionStorageError::BlockingTaskSpawnError(err.to_string())) - .and_then(|inner_result| inner_result) - } - pub async fn add_utxo_import_transaction( &self, tx_id: TxId, @@ -691,41 +750,9 @@ where T: TransactionBackend + 'static Ok(()) } - pub async fn confirm_broadcast_or_coinbase_transaction(&self, tx_id: TxId) -> Result<(), TransactionStorageError> { - let db_clone = self.db.clone(); - tokio::task::spawn_blocking(move || db_clone.confirm_broadcast_or_coinbase_transaction(tx_id)) - .await - .map_err(|err| TransactionStorageError::BlockingTaskSpawnError(err.to_string()))??; - Ok(()) - } - - pub async fn unconfirm_mined_transaction(&self, tx_id: TxId) -> Result<(), TransactionStorageError> { - let db_clone = self.db.clone(); - tokio::task::spawn_blocking(move || db_clone.unconfirm_mined_transaction(tx_id)) - .await - .map_err(|err| TransactionStorageError::BlockingTaskSpawnError(err.to_string()))??; - Ok(()) - } - - pub async fn set_completed_transaction_validity( - &self, - tx_id: TxId, - valid: bool, - ) -> Result<(), TransactionStorageError> { - let db_clone = self.db.clone(); - tokio::task::spawn_blocking(move || db_clone.set_completed_transaction_validity(tx_id, valid)) - .await - .map_err(|err| TransactionStorageError::BlockingTaskSpawnError(err.to_string()))??; - Ok(()) - } - - pub async fn set_transaction_confirmations( - &self, - tx_id: TxId, - confirmations: u64, - ) -> Result<(), TransactionStorageError> { + pub async fn set_transaction_as_unmined(&self, tx_id: TxId) -> Result<(), TransactionStorageError> { let db_clone = self.db.clone(); - tokio::task::spawn_blocking(move || db_clone.update_confirmations(tx_id, confirmations)) + tokio::task::spawn_blocking(move || db_clone.set_transaction_as_unmined(tx_id)) .await .map_err(|err| TransactionStorageError::BlockingTaskSpawnError(err.to_string()))??; Ok(()) @@ -734,12 +761,25 @@ where T: TransactionBackend + 'static pub async fn set_transaction_mined_height( &self, tx_id: TxId, + is_valid: bool, mined_height: u64, + mined_in_block: BlockHash, + num_confirmations: u64, + is_confirmed: bool, ) -> Result<(), TransactionStorageError> { let db_clone = self.db.clone(); - tokio::task::spawn_blocking(move || db_clone.update_mined_height(tx_id, mined_height)) - .await - .map_err(|err| TransactionStorageError::BlockingTaskSpawnError(err.to_string()))??; + tokio::task::spawn_blocking(move || { + db_clone.update_mined_height( + tx_id, + is_valid, + mined_height, + mined_in_block, + num_confirmations, + is_confirmed, + ) + }) + .await + .map_err(|err| TransactionStorageError::BlockingTaskSpawnError(err.to_string()))??; Ok(()) } } diff --git a/base_layer/wallet/src/transaction_service/storage/models.rs b/base_layer/wallet/src/transaction_service/storage/models.rs index d0ffc5658a..c3fbd274b6 100644 --- a/base_layer/wallet/src/transaction_service/storage/models.rs +++ b/base_layer/wallet/src/transaction_service/storage/models.rs @@ -27,7 +27,7 @@ use std::{ convert::TryFrom, fmt::{Display, Error, Formatter}, }; -use tari_common_types::types::PrivateKey; +use tari_common_types::types::{BlockHash, PrivateKey}; use tari_comms::types::CommsPublicKey; use tari_core::transactions::{ tari_amount::MicroTari, @@ -201,6 +201,7 @@ pub struct CompletedTransaction { pub valid: bool, pub confirmations: Option, pub mined_height: Option, + pub mined_in_block: Option, } impl CompletedTransaction { @@ -236,11 +237,12 @@ impl CompletedTransaction { valid: true, confirmations: None, mined_height: None, + mined_in_block: None, } } pub fn is_coinbase(&self) -> bool { - self.status == TransactionStatus::Coinbase + self.coinbase_block_height.is_some() } } @@ -334,6 +336,7 @@ impl From for CompletedTransaction { valid: true, confirmations: None, mined_height: None, + mined_in_block: None, } } } @@ -358,6 +361,7 @@ impl From for CompletedTransaction { valid: true, confirmations: None, mined_height: None, + mined_in_block: None, } } } diff --git a/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs b/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs index f74f289a8a..41fd022d7b 100644 --- a/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs +++ b/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs @@ -20,26 +20,6 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{ - collections::HashMap, - convert::TryFrom, - str::from_utf8, - sync::{Arc, MutexGuard, RwLock}, -}; - -use aes_gcm::{self, aead::Error as AeadError, Aes256Gcm}; -use chrono::{NaiveDateTime, Utc}; -use diesel::{prelude::*, result::Error as DieselError, SqliteConnection}; -use log::*; -use tari_crypto::tari_utilities::{ - hex::{from_hex, Hex}, - ByteArray, -}; - -use tari_common_types::types::PublicKey; -use tari_comms::types::CommsPublicKey; -use tari_core::transactions::tari_amount::MicroTari; - use crate::{ output_manager_service::TxId, schema::{completed_transactions, inbound_transactions, outbound_transactions}, @@ -58,7 +38,27 @@ use crate::{ }, }, }, - util::encryption::{decrypt_bytes_integral_nonce, encrypt_bytes_integral_nonce, Encryptable}, + util::{ + diesel_ext::ExpectedRowsExtension, + encryption::{decrypt_bytes_integral_nonce, encrypt_bytes_integral_nonce, Encryptable}, + }, +}; +use aes_gcm::{self, aead::Error as AeadError, Aes256Gcm}; +use chrono::{NaiveDateTime, Utc}; +use diesel::{prelude::*, result::Error as DieselError, SqliteConnection}; +use log::*; +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, + str::from_utf8, + sync::{Arc, MutexGuard, RwLock}, +}; +use tari_common_types::types::{BlockHash, PublicKey}; +use tari_comms::types::CommsPublicKey; +use tari_core::transactions::tari_amount::MicroTari; +use tari_crypto::tari_utilities::{ + hex::{from_hex, Hex}, + ByteArray, }; const LOG_TARGET: &str = "wallet::transaction_service::database::sqlite_db"; @@ -509,17 +509,10 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { Ok(v) => { if TransactionStatus::try_from(v.status)? == TransactionStatus::Completed { v.update( - UpdateCompletedTransactionSql::from(UpdateCompletedTransaction { - status: Some(TransactionStatus::Broadcast), - timestamp: None, - cancelled: None, - direction: None, - send_count: None, - last_send_timestamp: None, - valid: None, - confirmations: None, - mined_height: None, - }), + UpdateCompletedTransactionSql { + status: Some(TransactionStatus::Broadcast as i32), + ..Default::default() + }, &(*conn), )?; } @@ -534,36 +527,6 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { Ok(()) } - fn mine_completed_transaction(&self, tx_id: u64) -> Result<(), TransactionStorageError> { - let conn = self.database_connection.acquire_lock(); - - match CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { - Ok(v) => { - v.update( - UpdateCompletedTransactionSql::from(UpdateCompletedTransaction { - status: Some(TransactionStatus::MinedUnconfirmed), - timestamp: None, - cancelled: None, - direction: None, - send_count: None, - last_send_timestamp: None, - valid: None, - confirmations: None, - mined_height: None, - }), - &(*conn), - )?; - }, - Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { - return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( - tx_id, - ))) - }, - Err(e) => return Err(e), - }; - Ok(()) - } - fn cancel_completed_transaction(&self, tx_id: u64) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); match CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { @@ -783,16 +746,9 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { if let Ok(tx) = CompletedTransactionSql::find(tx_id, &conn) { let update = UpdateCompletedTransactionSql { - status: None, - timestamp: None, - cancelled: None, - direction: None, - transaction_protocol: None, send_count: Some(tx.send_count + 1), last_send_timestamp: Some(Some(Utc::now().naive_utc())), - valid: None, - confirmations: None, - mined_height: None, + ..Default::default() }; tx.update(update, &conn)?; } else if let Ok(tx) = OutboundTransactionSql::find(tx_id, &conn) { @@ -820,19 +776,26 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { Ok(()) } - fn confirm_broadcast_or_coinbase_transaction(&self, tx_id: u64) -> Result<(), TransactionStorageError> { + fn update_mined_height( + &self, + tx_id: u64, + is_valid: bool, + mined_height: u64, + mined_in_block: BlockHash, + num_confirmations: u64, + is_confirmed: bool, + ) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); - match CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { + match CompletedTransactionSql::find(tx_id, &(*conn)) { Ok(v) => { - if v.status == TransactionStatus::MinedUnconfirmed as i32 || - v.status == TransactionStatus::MinedConfirmed as i32 || - v.status == TransactionStatus::Broadcast as i32 || - v.status == TransactionStatus::Coinbase as i32 - { - v.confirm(&(*conn))?; - } else { - return Err(TransactionStorageError::TransactionNotMined(tx_id)); - } + v.update_mined_height( + is_valid, + mined_height, + mined_in_block, + num_confirmations, + is_confirmed, + &(*conn), + )?; }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( @@ -844,65 +807,48 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { Ok(()) } - fn unconfirm_mined_transaction(&self, tx_id: u64) -> Result<(), TransactionStorageError> { + fn fetch_last_mined_transaction(&self) -> Result, TransactionStorageError> { let conn = self.database_connection.acquire_lock(); - match CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { - Ok(v) => { - if v.status == TransactionStatus::MinedUnconfirmed as i32 || - v.status == TransactionStatus::MinedConfirmed as i32 - { - v.unconfirm(&(*conn))?; - } else { - return Err(TransactionStorageError::TransactionNotMined(tx_id)); - } - }, - Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { - return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( - tx_id, - ))); + let tx = completed_transactions::table + .filter(completed_transactions::mined_height.is_not_null()) + .order_by(completed_transactions::mined_height.desc()) + .first::(&*conn) + .optional()?; + Ok(match tx { + Some(mut tx) => { + self.decrypt_if_necessary(&mut tx)?; + Some(tx.try_into()?) }, - Err(e) => return Err(e), - }; - Ok(()) + None => None, + }) } - fn set_completed_transaction_validity(&self, tx_id: u64, valid: bool) -> Result<(), TransactionStorageError> { + fn fetch_unconfirmed_transactions(&self) -> Result, TransactionStorageError> { let conn = self.database_connection.acquire_lock(); - match CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { - Ok(v) => { - v.set_validity(valid, &(*conn))?; - }, - Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { - return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( - tx_id, - ))); - }, - Err(e) => return Err(e), - }; - Ok(()) - } + let txs = completed_transactions::table + .filter( + completed_transactions::mined_height + .is_null() + .or(completed_transactions::status.eq(TransactionStatus::MinedUnconfirmed as i32)), + ) + .filter(completed_transactions::cancelled.eq(false as i32)) + .order_by(completed_transactions::tx_id) + .load::(&*conn)?; + + let mut result = vec![]; + for mut tx in txs { + self.decrypt_if_necessary(&mut tx)?; + result.push(tx.try_into()?); + } - fn update_confirmations(&self, tx_id: u64, confirmations: u64) -> Result<(), TransactionStorageError> { - let conn = self.database_connection.acquire_lock(); - match CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { - Ok(v) => { - v.update_confirmations(confirmations, &(*conn))?; - }, - Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { - return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( - tx_id, - ))); - }, - Err(e) => return Err(e), - }; - Ok(()) + Ok(result) } - fn update_mined_height(&self, tx_id: u64, mined_height: u64) -> Result<(), TransactionStorageError> { + fn set_transaction_as_unmined(&self, tx_id: u64) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); - match CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { + match CompletedTransactionSql::find(tx_id, &(*conn)) { Ok(v) => { - v.update_mined_height(mined_height, &(*conn))?; + v.set_as_unmined(&(*conn))?; }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( @@ -992,7 +938,7 @@ impl InboundTransactionSql { if num_updated == 0 { return Err(TransactionStorageError::UnexpectedResult( - "Database update error".to_string(), + "Updating inbound transactions failed. No rows were affected".to_string(), )); } @@ -1151,14 +1097,9 @@ impl OutboundTransactionSql { } pub fn delete(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { - let num_deleted = - diesel::delete(outbound_transactions::table.filter(outbound_transactions::tx_id.eq(&self.tx_id))) - .execute(conn)?; - - if num_deleted == 0 { - return Err(TransactionStorageError::ValuesNotFound); - } - + diesel::delete(outbound_transactions::table.filter(outbound_transactions::tx_id.eq(&self.tx_id))) + .execute(conn) + .num_rows_affected_or_not_found(1)?; Ok(()) } @@ -1167,16 +1108,10 @@ impl OutboundTransactionSql { update: UpdateOutboundTransactionSql, conn: &SqliteConnection, ) -> Result<(), TransactionStorageError> { - let num_updated = - diesel::update(outbound_transactions::table.filter(outbound_transactions::tx_id.eq(&self.tx_id))) - .set(update) - .execute(conn)?; - - if num_updated == 0 { - return Err(TransactionStorageError::UnexpectedResult( - "Database update error".to_string(), - )); - } + diesel::update(outbound_transactions::table.filter(outbound_transactions::tx_id.eq(&self.tx_id))) + .set(update) + .execute(conn) + .num_rows_affected_or_not_found(1)?; Ok(()) } @@ -1300,6 +1235,7 @@ struct CompletedTransactionSql { valid: i32, confirmations: Option, mined_height: Option, + mined_in_block: Option>, } impl CompletedTransactionSql { @@ -1367,33 +1303,18 @@ impl CompletedTransactionSql { updated_tx: UpdateCompletedTransactionSql, conn: &SqliteConnection, ) -> Result<(), TransactionStorageError> { - let num_updated = - diesel::update(completed_transactions::table.filter(completed_transactions::tx_id.eq(&self.tx_id))) - .set(updated_tx) - .execute(conn)?; - - if num_updated == 0 { - return Err(TransactionStorageError::UnexpectedResult( - "Database update error".to_string(), - )); - } - + diesel::update(completed_transactions::table.filter(completed_transactions::tx_id.eq(&self.tx_id))) + .set(updated_tx) + .execute(conn) + .num_rows_affected_or_not_found(1)?; Ok(()) } pub fn cancel(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { self.update( UpdateCompletedTransactionSql { - status: None, - timestamp: None, cancelled: Some(1i32), - direction: None, - transaction_protocol: None, - send_count: None, - last_send_timestamp: None, - valid: None, - confirmations: None, - mined_height: None, + ..Default::default() }, conn, )?; @@ -1401,62 +1322,30 @@ impl CompletedTransactionSql { Ok(()) } - pub fn confirm(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { - self.update( - UpdateCompletedTransactionSql { - status: Some(TransactionStatus::MinedConfirmed as i32), - timestamp: None, - cancelled: None, - direction: None, - transaction_protocol: None, - send_count: None, - last_send_timestamp: None, - valid: None, - confirmations: None, - mined_height: None, - }, - conn, - )?; - - Ok(()) - } + pub fn set_as_unmined(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + let status = if self.coinbase_block_height.is_some() { + Some(TransactionStatus::Coinbase as i32) + } else if self.status == TransactionStatus::Broadcast as i32 { + Some(TransactionStatus::Broadcast as i32) + } else { + Some(TransactionStatus::Completed as i32) + }; - pub fn unconfirm(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { self.update( UpdateCompletedTransactionSql { - status: Some(TransactionStatus::MinedUnconfirmed as i32), - timestamp: None, - cancelled: None, - direction: None, - transaction_protocol: None, - send_count: None, - last_send_timestamp: None, - valid: None, - confirmations: None, - mined_height: None, + status, + mined_in_block: Some(None), + mined_height: Some(None), + confirmations: Some(None), + // Resets to valid + valid: Some(1), + ..Default::default() }, conn, )?; - Ok(()) - } - - pub fn set_validity(&self, valid: bool, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { - self.update( - UpdateCompletedTransactionSql { - status: None, - timestamp: None, - cancelled: None, - direction: None, - transaction_protocol: None, - send_count: None, - last_send_timestamp: None, - valid: Some(valid as i32), - confirmations: None, - mined_height: None, - }, - conn, - )?; + // Ideally the outputs should be marked unmined here as well, but because of the separation of classes, + // that will be done in the outputs service. Ok(()) } @@ -1464,40 +1353,8 @@ impl CompletedTransactionSql { pub fn update_encryption(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { self.update( UpdateCompletedTransactionSql { - status: None, - timestamp: None, - cancelled: None, - direction: None, transaction_protocol: Some(self.transaction_protocol.clone()), - send_count: None, - last_send_timestamp: None, - valid: None, - confirmations: None, - mined_height: None, - }, - conn, - )?; - - Ok(()) - } - - pub fn update_confirmations( - &self, - confirmations: u64, - conn: &SqliteConnection, - ) -> Result<(), TransactionStorageError> { - self.update( - UpdateCompletedTransactionSql { - status: None, - timestamp: None, - cancelled: None, - direction: None, - transaction_protocol: Some(self.transaction_protocol.clone()), - send_count: None, - last_send_timestamp: None, - valid: None, - confirmations: Some(Some(confirmations as i64)), - mined_height: None, + ..Default::default() }, conn, )?; @@ -1507,21 +1364,31 @@ impl CompletedTransactionSql { pub fn update_mined_height( &self, + is_valid: bool, mined_height: u64, + mined_in_block: BlockHash, + num_confirmations: u64, + is_confirmed: bool, conn: &SqliteConnection, ) -> Result<(), TransactionStorageError> { + let status = if self.coinbase_block_height.is_some() && !is_valid { + TransactionStatus::Coinbase as i32 + } else if is_confirmed { + TransactionStatus::MinedConfirmed as i32 + } else { + TransactionStatus::MinedUnconfirmed as i32 + }; + self.update( UpdateCompletedTransactionSql { - status: None, - timestamp: None, - cancelled: None, - direction: None, - transaction_protocol: None, - send_count: None, - last_send_timestamp: None, - valid: None, - confirmations: None, + confirmations: Some(Some(num_confirmations as i64)), + status: Some(status), mined_height: Some(Some(mined_height as i64)), + mined_in_block: Some(Some(mined_in_block)), + valid: Some(is_valid as i32), + // If the tx is mined, then it can't be cancelled + cancelled: Some(0), + ..Default::default() }, conn, )?; @@ -1571,6 +1438,7 @@ impl TryFrom for CompletedTransactionSql { valid: c.valid as i32, confirmations: c.confirmations.map(|ic| ic as i64), mined_height: c.mined_height.map(|ic| ic as i64), + mined_in_block: c.mined_in_block, }) } } @@ -1599,24 +1467,12 @@ impl TryFrom for CompletedTransaction { valid: c.valid != 0, confirmations: c.confirmations.map(|ic| ic as u64), mined_height: c.mined_height.map(|ic| ic as u64), + mined_in_block: c.mined_in_block, }) } } -/// These are the fields that can be updated for a Completed Transaction -pub struct UpdateCompletedTransaction { - status: Option, - timestamp: Option, - cancelled: Option, - direction: Option, - send_count: Option, - last_send_timestamp: Option>, - valid: Option, - confirmations: Option>, - mined_height: Option>, -} - -#[derive(AsChangeset)] +#[derive(AsChangeset, Default)] #[table_name = "completed_transactions"] pub struct UpdateCompletedTransactionSql { status: Option, @@ -1629,24 +1485,7 @@ pub struct UpdateCompletedTransactionSql { valid: Option, confirmations: Option>, mined_height: Option>, -} - -/// Map a Rust friendly UpdateCompletedTransaction to the Sql data type form -impl From for UpdateCompletedTransactionSql { - fn from(u: UpdateCompletedTransaction) -> Self { - Self { - status: u.status.map(|s| s as i32), - timestamp: u.timestamp, - cancelled: u.cancelled.map(|c| c as i32), - direction: u.direction.map(|d| d as i32), - transaction_protocol: None, - send_count: u.send_count.map(|c| c as i32), - last_send_timestamp: u.last_send_timestamp, - valid: u.valid.map(|c| c as i32), - confirmations: u.confirmations.map(|c| c.map(|ic| ic as i64)), - mined_height: u.mined_height.map(|c| c.map(|ic| ic as i64)), - } - } + mined_in_block: Option>>, } #[cfg(test)] @@ -1878,6 +1717,7 @@ mod test { valid: true, confirmations: None, mined_height: None, + mined_in_block: None, }; let completed_tx2 = CompletedTransaction { tx_id: 3, @@ -1897,6 +1737,7 @@ mod test { valid: true, confirmations: None, mined_height: None, + mined_in_block: None, }; CompletedTransactionSql::try_from(completed_tx1.clone()) @@ -2025,6 +1866,7 @@ mod test { valid: true, confirmations: None, mined_height: None, + mined_in_block: None, }; let coinbase_tx2 = CompletedTransaction { @@ -2045,6 +1887,7 @@ mod test { valid: true, confirmations: None, mined_height: None, + mined_in_block: None, }; let coinbase_tx3 = CompletedTransaction { @@ -2065,6 +1908,7 @@ mod test { valid: true, confirmations: None, mined_height: None, + mined_in_block: None, }; CompletedTransactionSql::try_from(coinbase_tx1) @@ -2175,6 +2019,7 @@ mod test { valid: true, confirmations: None, mined_height: None, + mined_in_block: None, }; let mut completed_tx_sql = CompletedTransactionSql::try_from(completed_tx.clone()).unwrap(); @@ -2256,6 +2101,7 @@ mod test { valid: true, confirmations: None, mined_height: None, + mined_in_block: None, }; let completed_tx_sql = CompletedTransactionSql::try_from(completed_tx).unwrap(); completed_tx_sql.commit(&conn).unwrap(); diff --git a/base_layer/wallet/src/transaction_service/tasks/mod.rs b/base_layer/wallet/src/transaction_service/tasks/mod.rs index da6e3df211..65a6c83890 100644 --- a/base_layer/wallet/src/transaction_service/tasks/mod.rs +++ b/base_layer/wallet/src/transaction_service/tasks/mod.rs @@ -23,5 +23,4 @@ pub mod send_finalized_transaction; pub mod send_transaction_cancelled; pub mod send_transaction_reply; -pub mod start_transaction_validation_and_broadcast_protocols; pub mod wait_on_dial; diff --git a/base_layer/wallet/src/transaction_service/tasks/start_transaction_validation_and_broadcast_protocols.rs b/base_layer/wallet/src/transaction_service/tasks/start_transaction_validation_and_broadcast_protocols.rs deleted file mode 100644 index 522bacdbb9..0000000000 --- a/base_layer/wallet/src/transaction_service/tasks/start_transaction_validation_and_broadcast_protocols.rs +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2020. The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use crate::{ - transaction_service::{ - error::TransactionServiceError, - handle::{TransactionEvent, TransactionServiceHandle}, - }, - types::ValidationRetryStrategy, -}; -use log::*; -use tokio::sync::broadcast; - -const LOG_TARGET: &str = "wallet::transaction_service::tasks::start_tx_validation_and_broadcast"; - -pub async fn start_transaction_validation_and_broadcast_protocols( - mut handle: TransactionServiceHandle, - retry_strategy: ValidationRetryStrategy, -) -> Result<(), TransactionServiceError> { - let mut event_stream = handle.get_event_stream(); - let our_id = handle.validate_transactions(retry_strategy).await?; - - // Now that its started we will spawn an task to monitor the event bus and when its successful we will start the - // Broadcast protocols - - tokio::spawn(async move { - loop { - match event_stream.recv().await { - Ok(event) => match &*event { - TransactionEvent::TransactionValidationSuccess(_id) => { - info!( - target: LOG_TARGET, - "Transaction Validation success, restarting broadcast protocols" - ); - if let Err(e) = handle.restart_broadcast_protocols().await { - error!( - target: LOG_TARGET, - "Error restarting transaction broadcast protocols: {:?}", e - ); - } - }, - TransactionEvent::TransactionValidationFailure(id) => { - if our_id == *id { - error!(target: LOG_TARGET, "Transaction Validation failed!"); - break; - } - }, - _ => (), - }, - Err(e @ broadcast::error::RecvError::Lagged(_)) => { - warn!( - target: LOG_TARGET, - "start_transaction_validation_and_broadcast_protocols: {}", e - ); - continue; - }, - Err(broadcast::error::RecvError::Closed) => { - debug!( - target: LOG_TARGET, - "start_transaction_validation_and_broadcast_protocols is exiting because the event stream \ - closed", - ); - break; - }, - } - } - }); - - Ok(()) -} diff --git a/base_layer/wallet/src/types.rs b/base_layer/wallet/src/types.rs index 25457cc666..c2b8641453 100644 --- a/base_layer/wallet/src/types.rs +++ b/base_layer/wallet/src/types.rs @@ -32,9 +32,3 @@ pub type KeyDigest = Blake256; /// Specify the Hash function used when constructing challenges during transaction building pub type HashDigest = Blake256; - -#[derive(Debug)] -pub enum ValidationRetryStrategy { - Limited(u8), - UntilSuccess, -} diff --git a/base_layer/wallet/src/util/diesel_ext.rs b/base_layer/wallet/src/util/diesel_ext.rs new file mode 100644 index 0000000000..3af2cd67aa --- /dev/null +++ b/base_layer/wallet/src/util/diesel_ext.rs @@ -0,0 +1,42 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use diesel::{result::Error as DieselError, QueryResult}; + +pub trait ExpectedRowsExtension { + fn num_rows_affected_or_not_found(self, num_rows: usize) -> Result; +} + +impl ExpectedRowsExtension for QueryResult { + fn num_rows_affected_or_not_found(self, num_rows: usize) -> Result { + match self { + Ok(s) => { + if s == num_rows { + Ok(s) + } else { + Err(DieselError::NotFound) + } + }, + Err(e) => Err(e), + } + } +} diff --git a/base_layer/wallet/src/util/mod.rs b/base_layer/wallet/src/util/mod.rs index 7217ac5056..adc24249d0 100644 --- a/base_layer/wallet/src/util/mod.rs +++ b/base_layer/wallet/src/util/mod.rs @@ -20,4 +20,6 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +pub mod diesel_ext; pub mod encryption; +pub mod watch; diff --git a/base_layer/wallet/src/connectivity_service/watch.rs b/base_layer/wallet/src/util/watch.rs similarity index 86% rename from base_layer/wallet/src/connectivity_service/watch.rs rename to base_layer/wallet/src/util/watch.rs index 4669b355f6..eefab53a6c 100644 --- a/base_layer/wallet/src/connectivity_service/watch.rs +++ b/base_layer/wallet/src/util/watch.rs @@ -36,8 +36,16 @@ impl Watch { self.receiver().borrow() } - pub fn broadcast(&self, item: T) { - // PANIC: broadcast becomes infallible because the receiver is owned in Watch and so has the same lifetime + pub async fn changed(&mut self) { + if self.1.changed().await.is_err() { + // Result::expect requires E: fmt::Debug and `watch::SendError` is not, this is equivalent + panic!("watch internal receiver is dropped"); + } + } + + pub fn send(&self, item: T) { + // PANIC: broadcast becomes infallible because the receiver is owned in Watch and so the failure case is + // unreachable if self.sender().send(item).is_err() { // Result::expect requires E: fmt::Debug and `watch::SendError` is not, this is equivalent panic!("watch internal receiver is dropped"); diff --git a/base_layer/wallet/src/utxo_scanner_service/handle.rs b/base_layer/wallet/src/utxo_scanner_service/handle.rs index c7a2e4fdeb..845f45147c 100644 --- a/base_layer/wallet/src/utxo_scanner_service/handle.rs +++ b/base_layer/wallet/src/utxo_scanner_service/handle.rs @@ -20,22 +20,11 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use crate::utxo_scanner_service::error::UtxoScannerError; use std::time::Duration; -use tari_comms::{peer_manager::NodeId, types::CommsPublicKey}; +use tari_comms::peer_manager::NodeId; use tari_core::transactions::tari_amount::MicroTari; -use tari_service_framework::{reply_channel::SenderService, Service}; use tokio::sync::broadcast; -#[derive(Debug)] -pub enum UtxoScannerRequest { - SetBaseNodePublicKey(CommsPublicKey), -} - -pub enum UtxoScannerResponse { - BaseNodePublicKeySet, -} - #[derive(Debug, Clone)] pub enum UtxoScannerEvent { ConnectingToBaseNode(NodeId), @@ -69,29 +58,15 @@ pub enum UtxoScannerEvent { #[derive(Clone)] pub struct UtxoScannerHandle { - handle: SenderService>, event_sender: broadcast::Sender, } impl UtxoScannerHandle { - pub fn new( - handle: SenderService>, - event_sender: broadcast::Sender, - ) -> Self { - UtxoScannerHandle { handle, event_sender } + pub fn new(event_sender: broadcast::Sender) -> Self { + UtxoScannerHandle { event_sender } } pub fn get_event_receiver(&mut self) -> broadcast::Receiver { self.event_sender.subscribe() } - - pub async fn set_base_node_public_key(&mut self, public_key: CommsPublicKey) -> Result<(), UtxoScannerError> { - match self - .handle - .call(UtxoScannerRequest::SetBaseNodePublicKey(public_key)) - .await?? - { - UtxoScannerResponse::BaseNodePublicKeySet => Ok(()), - } - } } diff --git a/base_layer/wallet/src/utxo_scanner_service/mod.rs b/base_layer/wallet/src/utxo_scanner_service/mod.rs index 956a32848b..e8b9db5aea 100644 --- a/base_layer/wallet/src/utxo_scanner_service/mod.rs +++ b/base_layer/wallet/src/utxo_scanner_service/mod.rs @@ -21,6 +21,7 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::{ + connectivity_service::{WalletConnectivityHandle, WalletConnectivityInterface}, output_manager_service::handle::OutputManagerHandle, storage::database::{WalletBackend, WalletDatabase}, transaction_service::handle::TransactionServiceHandle, @@ -34,13 +35,7 @@ use log::*; use std::{sync::Arc, time::Duration}; use tari_comms::{connectivity::ConnectivityRequester, NodeIdentity}; use tari_core::transactions::CryptoFactories; -use tari_service_framework::{ - async_trait, - reply_channel, - ServiceInitializationError, - ServiceInitializer, - ServiceInitializerContext, -}; +use tari_service_framework::{async_trait, ServiceInitializationError, ServiceInitializer, ServiceInitializerContext}; use tokio::sync::broadcast; pub mod error; @@ -49,9 +44,7 @@ pub mod utxo_scanning; const LOG_TARGET: &str = "wallet::utxo_scanner_service::initializer"; -pub struct UtxoScannerServiceInitializer -where T: WalletBackend + 'static -{ +pub struct UtxoScannerServiceInitializer { interval: Duration, backend: Option>, factories: CryptoFactories, @@ -83,11 +76,10 @@ where T: WalletBackend + 'static async fn initialize(&mut self, context: ServiceInitializerContext) -> Result<(), ServiceInitializationError> { trace!(target: LOG_TARGET, "Utxo scanner initialization"); - let (sender, receiver) = reply_channel::unbounded(); let (event_sender, _) = broadcast::channel(200); // Register handle before waiting for handles to be ready - let utxo_scanner_handle = UtxoScannerHandle::new(sender, event_sender.clone()); + let utxo_scanner_handle = UtxoScannerHandle::new(event_sender.clone()); context.register_handle(utxo_scanner_handle); let backend = self @@ -101,7 +93,8 @@ where T: WalletBackend + 'static context.spawn_when_ready(move |handles| async move { let transaction_service = handles.expect_handle::(); let output_manager_service = handles.expect_handle::(); - let connectivity_manager = handles.expect_handle::(); + let comms_connectivity = handles.expect_handle::(); + let wallet_connectivity = handles.expect_handle::(); let scanning_service = UtxoScannerService::::builder() .with_peers(vec![]) @@ -110,13 +103,13 @@ where T: WalletBackend + 'static .with_mode(UtxoScannerMode::Scanning) .build_with_resources( backend, - connectivity_manager, + comms_connectivity, + wallet_connectivity.get_current_base_node_watcher(), output_manager_service, transaction_service, node_identity, factories, handles.get_shutdown_signal(), - receiver, event_sender, ) .run(); diff --git a/base_layer/wallet/src/utxo_scanner_service/utxo_scanning.rs b/base_layer/wallet/src/utxo_scanner_service/utxo_scanning.rs index ca974acbcc..03f4629f03 100644 --- a/base_layer/wallet/src/utxo_scanner_service/utxo_scanning.rs +++ b/base_layer/wallet/src/utxo_scanner_service/utxo_scanning.rs @@ -30,14 +30,13 @@ use std::{ }; use chrono::Utc; -use futures::{pin_mut, StreamExt}; +use futures::StreamExt; use log::*; use serde::{Deserialize, Serialize}; use tokio::{sync::broadcast, task, time}; use tari_common_types::types::HashOutput; use tari_comms::{ - connectivity::ConnectivityRequester, peer_manager::NodeId, protocol::rpc::{RpcError, RpcStatus}, types::CommsPublicKey, @@ -57,10 +56,10 @@ use tari_core::{ CryptoFactories, }, }; -use tari_service_framework::{reply_channel, reply_channel::SenderService}; use tari_shutdown::ShutdownSignal; use crate::{ + connectivity_service::WalletConnectivityInterface, error::WalletError, output_manager_service::{handle::OutputManagerHandle, TxId}, storage::{ @@ -68,13 +67,11 @@ use crate::{ sqlite_db::WalletSqliteDatabase, }, transaction_service::handle::TransactionServiceHandle, - utxo_scanner_service::{ - error::UtxoScannerError, - handle::{UtxoScannerEvent, UtxoScannerRequest, UtxoScannerResponse}, - }, + utxo_scanner_service::{error::UtxoScannerError, handle::UtxoScannerEvent}, WalletSqlite, }; -use tokio::time::MissedTickBehavior; +use tari_comms::{connectivity::ConnectivityRequester, peer_manager::Peer}; +use tokio::{sync::watch, time::MissedTickBehavior}; pub const LOG_TARGET: &str = "wallet::utxo_scanning"; @@ -104,7 +101,8 @@ pub struct UtxoScannerServiceBuilder { #[derive(Clone)] struct UtxoScannerResources { pub db: WalletDatabase, - pub connectivity: ConnectivityRequester, + pub comms_connectivity: ConnectivityRequester, + pub current_base_node_watcher: watch::Receiver>, pub output_manager_service: OutputManagerHandle, pub transaction_service: TransactionServiceHandle, pub node_identity: Arc, @@ -141,16 +139,14 @@ impl UtxoScannerServiceBuilder { ) -> UtxoScannerService { let resources = UtxoScannerResources { db: wallet.db.clone(), - connectivity: wallet.comms.connectivity(), + comms_connectivity: wallet.comms.connectivity(), + current_base_node_watcher: wallet.wallet_connectivity.get_current_base_node_watcher(), output_manager_service: wallet.output_manager_service.clone(), transaction_service: wallet.transaction_service.clone(), node_identity: wallet.comms.node_identity(), factories: wallet.factories.clone(), }; - // When the Utxo Scanner is built using this method it is not going to run as a Service so we will pass in the - // sender to be held by the service so that the receiver will not error when it is polled - let (sender, receiver) = reply_channel::unbounded(); let (event_sender, _) = broadcast::channel(200); let interval = self @@ -163,9 +159,7 @@ impl UtxoScannerServiceBuilder { resources, interval, shutdown_signal, - receiver, event_sender, - Some(sender), ) } @@ -173,18 +167,19 @@ impl UtxoScannerServiceBuilder { pub fn build_with_resources( &mut self, db: WalletDatabase, - connectivity: ConnectivityRequester, + comms_connectivity: ConnectivityRequester, + base_node_watcher: watch::Receiver>, output_manager_service: OutputManagerHandle, transaction_service: TransactionServiceHandle, node_identity: Arc, factories: CryptoFactories, shutdown_signal: ShutdownSignal, - request_stream: reply_channel::Receiver>, event_sender: broadcast::Sender, ) -> UtxoScannerService { let resources = UtxoScannerResources { db, - connectivity, + comms_connectivity, + current_base_node_watcher: base_node_watcher, output_manager_service, transaction_service, node_identity, @@ -200,9 +195,7 @@ impl UtxoScannerServiceBuilder { resources, interval, shutdown_signal, - request_stream, event_sender, - None, ) } } @@ -253,7 +246,7 @@ where TBackend: WalletBackend + 'static target: LOG_TARGET, "Attempting UTXO sync with seed peer {} ({})", self.peer_index, peer, ); - match self.resources.connectivity.dial_peer(peer.clone()).await { + match self.resources.comms_connectivity.dial_peer(peer.clone()).await { Ok(conn) => Ok(conn), Err(e) => { self.publish_event(UtxoScannerEvent::ConnectionFailedToBaseNode { @@ -324,7 +317,10 @@ where TBackend: WalletBackend + 'static timer.elapsed(), num_scanned ); + + // let num_scanned = 0; total_scanned += num_scanned; + // return Ok((total_scanned, start_index, timer.elapsed())); } } @@ -647,10 +643,7 @@ where TBackend: WalletBackend + 'static is_running: Arc, scan_for_utxo_interval: Duration, shutdown_signal: ShutdownSignal, - request_stream: Option>>, event_sender: broadcast::Sender, - _request_stream_sender_holder: - Option>>, } impl UtxoScannerService @@ -664,11 +657,7 @@ where TBackend: WalletBackend + 'static resources: UtxoScannerResources, scan_for_utxo_interval: Duration, shutdown_signal: ShutdownSignal, - request_stream: reply_channel::Receiver>, event_sender: broadcast::Sender, - _request_stream_sender_holder: Option< - SenderService>, - >, ) -> Self { Self { resources, @@ -678,9 +667,7 @@ where TBackend: WalletBackend + 'static is_running: Arc::new(AtomicBool::new(false)), scan_for_utxo_interval, shutdown_signal, - request_stream: Some(request_stream), event_sender, - _request_stream_sender_holder, } } @@ -711,13 +698,6 @@ where TBackend: WalletBackend + 'static "UTXO scanning service starting (interval = {:.2?})", self.scan_for_utxo_interval ); - let request_stream = self - .request_stream - .take() - .expect("UTXO Scanner Service initialized without request_stream") - .fuse(); - pin_mut!(request_stream); - let mut shutdown = self.shutdown_signal.clone(); let start_at = Instant::now() + Duration::from_secs(1); let mut work_interval = time::interval_at(start_at.into(), self.scan_for_utxo_interval); @@ -736,19 +716,19 @@ where TBackend: WalletBackend + 'static //we make sure the flag is set to false here running_flag.store(false, Ordering::Relaxed); }); + if self.mode == UtxoScannerMode::Recovery { + return Ok(()); + } } }, - request_context = request_stream.select_next_some() => { - trace!(target: LOG_TARGET, "Handling Service API Request"); - let (request, reply_tx) = request_context.split(); - let response = self.handle_request(request).await.map_err(|e| { - warn!(target: LOG_TARGET, "Error handling request: {:?}", e); - e - }); - let _ = reply_tx.send(response).map_err(|e| { - warn!(target: LOG_TARGET, "Failed to send reply"); - e - }); + _ = self.resources.current_base_node_watcher.changed() => { + debug!(target: LOG_TARGET, "Base node change detected."); + let peer = self.resources.current_base_node_watcher.borrow().as_ref().cloned(); + if let Some(peer) = peer { + self.peer_seeds = vec![peer.public_key]; + } + + self.is_running.store(false, Ordering::Relaxed); }, _ = shutdown.wait() => { // this will stop the task if its running, and let that thread exit gracefully @@ -757,21 +737,6 @@ where TBackend: WalletBackend + 'static return Ok(()); } } - - if self.mode == UtxoScannerMode::Recovery { - return Ok(()); - } - } - } - - async fn handle_request(&mut self, request: UtxoScannerRequest) -> Result { - trace!(target: LOG_TARGET, "Handling Service Request: {:?}", request); - match request { - UtxoScannerRequest::SetBaseNodePublicKey(pk) => { - self.is_running.store(false, Ordering::Relaxed); - self.peer_seeds = vec![pk]; - Ok(UtxoScannerResponse::BaseNodePublicKeySet) - }, } } } diff --git a/base_layer/wallet/src/wallet.rs b/base_layer/wallet/src/wallet.rs index 72733c459b..01c55ab3e6 100644 --- a/base_layer/wallet/src/wallet.rs +++ b/base_layer/wallet/src/wallet.rs @@ -68,7 +68,7 @@ use tari_shutdown::ShutdownSignal; use crate::{ base_node_service::{handle::BaseNodeServiceHandle, BaseNodeServiceInitializer}, config::{WalletConfig, KEY_MANAGER_COMMS_SECRET_KEY_BRANCH_KEY}, - connectivity_service::{WalletConnectivityHandle, WalletConnectivityInitializer}, + connectivity_service::{WalletConnectivityHandle, WalletConnectivityInitializer, WalletConnectivityInterface}, contacts_service::{handle::ContactsServiceHandle, storage::database::ContactsBackend, ContactsServiceInitializer}, error::WalletError, output_manager_service::{ @@ -93,13 +93,7 @@ const LOG_TARGET: &str = "wallet"; /// A structure containing the config and services that a Wallet application will require. This struct will start up all /// the services and provide the APIs that applications will use to interact with the services #[derive(Clone)] -pub struct Wallet -where - T: WalletBackend + 'static, - U: TransactionBackend + 'static, - V: OutputManagerBackend + 'static, - W: ContactsBackend + 'static, -{ +pub struct Wallet { pub comms: CommsNode, pub dht_service: Dht, pub store_and_forward_requester: StoreAndForwardRequester, @@ -132,7 +126,7 @@ where contacts_backend: W, shutdown_signal: ShutdownSignal, recovery_master_key: Option, - ) -> Result, WalletError> { + ) -> Result { let master_secret_key = read_or_create_master_secret_key(recovery_master_key, &mut wallet_database.clone()).await?; let comms_secret_key = derive_comms_secret_key(&master_secret_key)?; @@ -252,7 +246,7 @@ where .set_node_features(comms.node_identity().features()) .await?; - Ok(Wallet { + Ok(Self { comms, dht_service: dht, store_and_forward_requester, @@ -301,29 +295,13 @@ where ); self.comms.peer_manager().add_peer(peer.clone()).await?; - - self.transaction_service - .set_base_node_public_key(peer.public_key.clone()) - .await?; - - self.output_manager_service - .set_base_node_public_key(peer.public_key.clone()) - .await?; - - self.utxo_scanner_service - .set_base_node_public_key(peer.public_key.clone()) - .await?; - - self.base_node_service.set_base_node_peer(peer).await?; + self.wallet_connectivity.set_base_node(peer); Ok(()) } - pub async fn get_base_node_peer(&mut self) -> Result, WalletError> { - self.base_node_service - .get_base_node_peer() - .await - .map_err(WalletError::BaseNodeServiceError) + pub async fn get_base_node_peer(&mut self) -> Option { + self.wallet_connectivity.get_current_base_node_peer() } pub async fn check_for_update(&self) -> Option { diff --git a/base_layer/wallet/tests/output_manager_service/service.rs b/base_layer/wallet/tests/output_manager_service/service.rs index 198be6a43b..a22439e902 100644 --- a/base_layer/wallet/tests/output_manager_service/service.rs +++ b/base_layer/wallet/tests/output_manager_service/service.rs @@ -20,36 +20,31 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::support::{ + comms_rpc::{connect_rpc_client, BaseNodeWalletRpcMockService, BaseNodeWalletRpcMockState}, data::get_temp_sqlite_database_connection, - rpc::{BaseNodeWalletRpcMockService, BaseNodeWalletRpcMockState}, utils::{make_input, make_input_with_features, TestParams}, }; -use futures::FutureExt; use rand::{rngs::OsRng, RngCore}; -use std::{sync::Arc, time::Duration}; +use std::{collections::HashMap, sync::Arc, time::Duration}; use tari_common_types::types::{PrivateKey, PublicKey}; use tari_comms::{ peer_manager::{NodeIdentity, PeerFeatures}, - protocol::rpc::{mock::MockRpcServer, NamedProtocolService, RpcClientConfig, RpcStatus}, - test_utils::{ - mocks::{create_connectivity_mock, ConnectivityManagerMockState}, - node_identity::build_node_identity, - }, + protocol::rpc::{mock::MockRpcServer, NamedProtocolService}, + test_utils::node_identity::build_node_identity, types::CommsSecretKey, }; use tari_core::{ base_node::rpc::BaseNodeWalletRpcServer, + blocks::BlockHeader, consensus::ConsensusConstantsBuilder, + crypto::tari_utilities::Hashable, + proto::base_node::{QueryDeletedResponse, UtxoQueryResponse, UtxoQueryResponses}, transactions::{ fee::Fee, helpers::{create_unblinded_output, TestParams as TestParamsHelpers}, tari_amount::{uT, MicroTari}, - transaction::{KernelFeatures, OutputFeatures, Transaction}, - transaction_protocol::{ - recipient::RecipientState, - sender::TransactionSenderMessage, - single_receiver::SingleReceiverTransactionProtocol, - }, + transaction::OutputFeatures, + transaction_protocol::sender::TransactionSenderMessage, CryptoFactories, SenderTransactionProtocol, }, @@ -65,27 +60,28 @@ use tari_p2p::Network; use tari_service_framework::reply_channel; use tari_shutdown::Shutdown; use tari_wallet::{ - base_node_service::{handle::BaseNodeServiceHandle, mock_base_node_service::MockBaseNodeService}, + base_node_service::{ + handle::{BaseNodeEvent, BaseNodeServiceHandle}, + mock_base_node_service::MockBaseNodeService, + service::BaseNodeState, + }, + connectivity_service::{create_wallet_connectivity_mock, WalletConnectivityMock}, output_manager_service::{ config::OutputManagerServiceConfig, error::{OutputManagerError, OutputManagerStorageError}, - handle::{OutputManagerEvent, OutputManagerHandle}, + handle::OutputManagerHandle, service::OutputManagerService, storage::{ - database::{DbKey, DbKeyValuePair, DbValue, OutputManagerBackend, OutputManagerDatabase, WriteOperation}, - models::{DbUnblindedOutput, OutputStatus}, + database::{OutputManagerBackend, OutputManagerDatabase}, sqlite_db::OutputManagerSqliteDatabase, }, TxId, - TxoValidationType, }, transaction_service::handle::TransactionServiceHandle, - types::ValidationRetryStrategy, }; use tokio::{ sync::{broadcast, broadcast::channel}, task, - time, }; #[allow(clippy::type_complexity)] @@ -94,12 +90,13 @@ async fn setup_output_manager_service( with_connection: bool, ) -> ( OutputManagerHandle, + WalletConnectivityMock, Shutdown, TransactionServiceHandle, MockRpcServer>, Arc, BaseNodeWalletRpcMockState, - ConnectivityManagerMockState, + broadcast::Sender>, ) { let shutdown = Shutdown::new(); let factories = CryptoFactories::default(); @@ -116,14 +113,15 @@ async fn setup_output_manager_service( let (sender, receiver_bns) = reply_channel::unbounded(); let (event_publisher_bns, _) = broadcast::channel(100); - let basenode_service_handle = BaseNodeServiceHandle::new(sender, event_publisher_bns); + let basenode_service_handle = BaseNodeServiceHandle::new(sender, event_publisher_bns.clone()); let mut mock_base_node_service = MockBaseNodeService::new(receiver_bns, shutdown.to_signal()); mock_base_node_service.set_default_base_node_state(); task::spawn(mock_base_node_service.run()); - let (connectivity_manager, connectivity_mock) = create_connectivity_mock(); - let connectivity_mock_state = connectivity_mock.get_shared_state(); - task::spawn(connectivity_mock.run()); + let wallet_connectivity_mock = create_wallet_connectivity_mock(); + // let (connectivity, connectivity_mock) = create_connectivity_mock(); + // let connectivity_mock_state = connectivity_mock.get_shared_state(); + // task::spawn(connectivity_mock.run()); let service = BaseNodeWalletRpcMockService::new(); let rpc_service_state = service.get_state(); @@ -133,14 +131,14 @@ async fn setup_output_manager_service( let server_node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let mut mock_server = MockRpcServer::new(server, server_node_identity.clone()); - mock_server.serve(); if with_connection { - let connection = mock_server + let mut connection = mock_server .create_connection(server_node_identity.to_peer(), protocol_name.into()) .await; - connectivity_mock_state.add_active_connection(connection).await; + + wallet_connectivity_mock.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); } let output_manager_service = OutputManagerService::new( OutputManagerServiceConfig { @@ -157,7 +155,7 @@ async fn setup_output_manager_service( constants, shutdown.to_signal(), basenode_service_handle, - connectivity_manager, + wallet_connectivity_mock.clone(), CommsSecretKey::default(), ) .await @@ -168,47 +166,16 @@ async fn setup_output_manager_service( ( output_manager_service_handle, + wallet_connectivity_mock, shutdown, ts_handle, mock_server, server_node_identity, rpc_service_state, - connectivity_mock_state, + event_publisher_bns, ) } -async fn complete_transaction(mut stp: SenderTransactionProtocol, mut oms: OutputManagerHandle) -> Transaction { - let factories = CryptoFactories::default(); - - let sender_tx_id = stp.get_tx_id().unwrap(); - // Is there change? Unlikely not to be but the random amounts MIGHT produce a no change output situation - if stp.get_amount_to_self().unwrap() > MicroTari::from(0) { - let pt = oms.get_pending_transactions().await.unwrap(); - assert_eq!(pt.len(), 1); - assert_eq!( - pt.get(&sender_tx_id).unwrap().outputs_to_be_received[0] - .unblinded_output - .value, - stp.get_amount_to_self().unwrap() - ); - } - let msg = stp.build_single_round_message().unwrap(); - let b = TestParams::new(&mut OsRng); - let recv_info = SingleReceiverTransactionProtocol::create( - &msg, - b.nonce, - b.spend_key, - OutputFeatures::default(), - &factories, - None, - ) - .unwrap(); - stp.add_single_recipient_info(recv_info, &factories.range_proof) - .unwrap(); - stp.finalize(KernelFeatures::empty(), &factories).unwrap(); - stp.get_transaction().unwrap().clone() -} - pub async fn setup_oms_with_bn_state( backend: T, height: Option, @@ -217,6 +184,7 @@ pub async fn setup_oms_with_bn_state( Shutdown, TransactionServiceHandle, BaseNodeServiceHandle, + broadcast::Sender>, ) { let shutdown = Shutdown::new(); let factories = CryptoFactories::default(); @@ -233,14 +201,11 @@ pub async fn setup_oms_with_bn_state( let (sender, receiver_bns) = reply_channel::unbounded(); let (event_publisher_bns, _) = broadcast::channel(100); - let base_node_service_handle = BaseNodeServiceHandle::new(sender, event_publisher_bns); + let base_node_service_handle = BaseNodeServiceHandle::new(sender, event_publisher_bns.clone()); let mut mock_base_node_service = MockBaseNodeService::new(receiver_bns, shutdown.to_signal()); mock_base_node_service.set_base_node_state(height); task::spawn(mock_base_node_service.run()); - - let (connectivity_manager, connectivity_mock) = create_connectivity_mock(); - let _connectivity_mock_state = connectivity_mock.get_shared_state(); - task::spawn(connectivity_mock.run()); + let connectivity = create_wallet_connectivity_mock(); let output_manager_service = OutputManagerService::new( OutputManagerServiceConfig { @@ -257,7 +222,7 @@ pub async fn setup_oms_with_bn_state( constants, shutdown.to_signal(), base_node_service_handle.clone(), - connectivity_manager, + connectivity, CommsSecretKey::default(), ) .await @@ -271,6 +236,7 @@ pub async fn setup_oms_with_bn_state( shutdown, ts_handle, base_node_service_handle, + event_publisher_bns, ) } @@ -317,7 +283,7 @@ async fn fee_estimate() { let backend = OutputManagerSqliteDatabase::new(connection, None); let factories = CryptoFactories::default(); - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; + let (mut oms, _, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; let (_, uo) = make_input(&mut OsRng.clone(), MicroTari::from(3000), &factories.commitment); oms.add_output(uo).await.unwrap(); @@ -354,7 +320,7 @@ async fn test_utxo_selection_no_chain_metadata() { let (connection, _tempdir) = get_temp_sqlite_database_connection(); // no chain metadata - let (mut oms, _shutdown, _, _) = + let (mut oms, _shutdown, _, _, _) = setup_oms_with_bn_state(OutputManagerSqliteDatabase::new(connection, None), None).await; // no utxos - not enough funds @@ -448,7 +414,7 @@ async fn test_utxo_selection_with_chain_metadata() { let (connection, _tempdir) = get_temp_sqlite_database_connection(); // setup with chain metadata at a height of 6 - let (mut oms, _shutdown, _, _) = + let (mut oms, _shutdown, _, _, _) = setup_oms_with_bn_state(OutputManagerSqliteDatabase::new(connection, None), Some(6)).await; // no utxos - not enough funds @@ -554,115 +520,6 @@ async fn test_utxo_selection_with_chain_metadata() { } } -#[tokio::test] -async fn sending_transaction_and_confirmation() { - let factories = CryptoFactories::default(); - let (connection, _tempdir) = get_temp_sqlite_database_connection(); - let backend = OutputManagerSqliteDatabase::new(connection, None); - - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend.clone(), true).await; - - let (_ti, uo) = make_input( - &mut OsRng.clone(), - MicroTari::from(100 + OsRng.next_u64() % 1000), - &factories.commitment, - ); - oms.add_output(uo.clone()).await.unwrap(); - match oms.add_output(uo).await { - Err(OutputManagerError::OutputManagerStorageError(OutputManagerStorageError::DuplicateOutput)) => {}, - _ => panic!("Incorrect error message"), - }; - let num_outputs = 20; - for _i in 0..num_outputs { - let (_ti, uo) = make_input( - &mut OsRng.clone(), - MicroTari::from(100 + OsRng.next_u64() % 1000), - &factories.commitment, - ); - oms.add_output(uo).await.unwrap(); - } - - let stp = oms - .prepare_transaction_to_send( - OsRng.next_u64(), - MicroTari::from(1000), - MicroTari::from(20), - None, - "".to_string(), - script!(Nop), - ) - .await - .unwrap(); - - let sender_tx_id = stp.get_tx_id().unwrap(); - - let tx = complete_transaction(stp, oms.clone()).await; - - let rewind_public_keys = oms.get_rewind_public_keys().await.unwrap(); - - // 1 of the 2 outputs should be rewindable, there should be 2 outputs due to change but if we get unlucky enough - // that there is no change we will skip this aspect of the test - if tx.body.outputs().len() > 1 { - let mut num_rewound = 0; - - let output = tx.body.outputs()[0].clone(); - if output - .rewind_range_proof_value_only( - &factories.range_proof, - &rewind_public_keys.rewind_public_key, - &rewind_public_keys.rewind_blinding_public_key, - ) - .is_ok() - { - num_rewound += 1; - } - - let output = tx.body.outputs()[1].clone(); - if output - .rewind_range_proof_value_only( - &factories.range_proof, - &rewind_public_keys.rewind_public_key, - &rewind_public_keys.rewind_blinding_public_key, - ) - .is_ok() - { - num_rewound += 1; - } - assert_eq!(num_rewound, 1, "Should only be 1 rewindable output"); - } - - oms.confirm_transaction(sender_tx_id, tx.body.inputs().clone(), tx.body.outputs().clone()) - .await - .unwrap(); - - assert_eq!( - oms.get_pending_transactions().await.unwrap().len(), - 0, - "Should have no pending tx" - ); - assert_eq!( - oms.get_spent_outputs().await.unwrap().len(), - tx.body.inputs().len(), - "# Outputs should equal number of sent inputs" - ); - assert_eq!( - oms.get_unspent_outputs().await.unwrap().len(), - num_outputs + 1 - oms.get_spent_outputs().await.unwrap().len() + tx.body.outputs().len() - 1, - "Unspent outputs" - ); - - if let DbValue::KeyManagerState(km) = backend.fetch(&DbKey::KeyManagerState).unwrap().unwrap() { - // if we dont have change, we did not move the index forward - if tx.body.outputs().len() > 1 { - assert_eq!(km.primary_key_index, 1); - } else { - assert_eq!(km.primary_key_index, 0); - } - } else { - panic!("No Key Manager set"); - } -} - #[tokio::test] async fn send_not_enough_funds() { let factories = CryptoFactories::default(); @@ -670,7 +527,7 @@ async fn send_not_enough_funds() { let (connection, _tempdir) = get_temp_sqlite_database_connection(); let backend = OutputManagerSqliteDatabase::new(connection, None); - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; + let (mut oms, _, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; let num_outputs = 20; for _i in 0..num_outputs { let (_ti, uo) = make_input( @@ -699,12 +556,10 @@ async fn send_not_enough_funds() { #[tokio::test] async fn send_no_change() { - let factories = CryptoFactories::default(); - let (connection, _tempdir) = get_temp_sqlite_database_connection(); let backend = OutputManagerSqliteDatabase::new(connection, None); - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; + let (mut oms, _, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; let fee_per_gram = MicroTari::from(20); let fee_without_change = Fee::calculate(fee_per_gram, 1, 2, 1); @@ -727,7 +582,7 @@ async fn send_no_change() { .await .unwrap(); - let mut stp = oms + let stp = oms .prepare_transaction_to_send( OsRng.next_u64(), MicroTari::from(value1 + value2) - fee_without_change, @@ -739,46 +594,18 @@ async fn send_no_change() { .await .unwrap(); - let sender_tx_id = stp.get_tx_id().unwrap(); assert_eq!(stp.get_amount_to_self().unwrap(), MicroTari::from(0)); - assert_eq!(oms.get_pending_transactions().await.unwrap().len(), 1); - - let msg = stp.build_single_round_message().unwrap(); - - let b = TestParams::new(&mut OsRng); - - let recv_info = SingleReceiverTransactionProtocol::create( - &msg, - b.nonce, - b.spend_key, - OutputFeatures::default(), - &factories, - None, - ) - .unwrap(); - - stp.add_single_recipient_info(recv_info, &factories.range_proof) - .unwrap(); - - stp.finalize(KernelFeatures::empty(), &factories).unwrap(); - - let tx = stp.get_transaction().unwrap(); - - oms.confirm_transaction(sender_tx_id, tx.body.inputs().clone(), tx.body.outputs().clone()) - .await - .unwrap(); - - assert_eq!(oms.get_pending_transactions().await.unwrap().len(), 0); - assert_eq!(oms.get_spent_outputs().await.unwrap().len(), tx.body.inputs().len()); - assert_eq!(oms.get_unspent_outputs().await.unwrap().len(), 0); + assert_eq!( + oms.get_balance().await.unwrap().pending_incoming_balance, + MicroTari::from(0) + ); } - #[tokio::test] async fn send_not_enough_for_change() { let (connection, _tempdir) = get_temp_sqlite_database_connection(); let backend = OutputManagerSqliteDatabase::new(connection, None); - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; + let (mut oms, _, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; let fee_per_gram = MicroTari::from(20); let fee_without_change = Fee::calculate(fee_per_gram, 1, 2, 1); @@ -817,30 +644,6 @@ async fn send_not_enough_for_change() { } } -#[tokio::test] -async fn receiving_and_confirmation() { - let (connection, _tempdir) = get_temp_sqlite_database_connection(); - let backend = OutputManagerSqliteDatabase::new(connection, None); - - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; - - let value = MicroTari::from(5000); - let (tx_id, sender_message) = generate_sender_transaction_message(value); - let rtp = oms.get_recipient_transaction(sender_message).await.unwrap(); - assert_eq!(oms.get_unspent_outputs().await.unwrap().len(), 0); - assert_eq!(oms.get_pending_transactions().await.unwrap().len(), 1); - - let output = match rtp.state { - RecipientState::Finalized(s) => s.output, - RecipientState::Failed(_) => panic!("Should not be in Failed state"), - }; - - oms.confirm_transaction(tx_id, vec![], vec![output]).await.unwrap(); - - assert_eq!(oms.get_pending_transactions().await.unwrap().len(), 0); - assert_eq!(oms.get_unspent_outputs().await.unwrap().len(), 1); -} - #[tokio::test] async fn cancel_transaction() { let factories = CryptoFactories::default(); @@ -848,7 +651,7 @@ async fn cancel_transaction() { let (connection, _tempdir) = get_temp_sqlite_database_connection(); let backend = OutputManagerSqliteDatabase::new(connection, None); - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; + let (mut oms, _, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; let num_outputs = 20; for _i in 0..num_outputs { @@ -886,91 +689,28 @@ async fn cancel_transaction_and_reinstate_inbound_tx() { let (connection, _tempdir) = get_temp_sqlite_database_connection(); let backend = OutputManagerSqliteDatabase::new(connection, None); - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend.clone(), true).await; + let (mut oms, _, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend.clone(), true).await; let value = MicroTari::from(5000); let (tx_id, sender_message) = generate_sender_transaction_message(value); let _rtp = oms.get_recipient_transaction(sender_message).await.unwrap(); assert_eq!(oms.get_unspent_outputs().await.unwrap().len(), 0); - let pending_txs = oms.get_pending_transactions().await.unwrap(); - - assert_eq!(pending_txs.len(), 1); - - let output = pending_txs - .get(&tx_id) - .unwrap() - .outputs_to_be_received - .first() - .unwrap() - .clone(); - - oms.cancel_transaction(tx_id).await.unwrap(); - - let cancelled_output = backend - .fetch(&DbKey::OutputsByTxIdAndStatus(tx_id, OutputStatus::CancelledInbound)) - .unwrap() - .unwrap(); - - if let DbValue::AnyOutputs(o) = cancelled_output { - let o = o.first().expect("Should be one output in here"); - assert_eq!(o.commitment, output.commitment); - } else { - panic!("Should have found cancelled output"); - } - - assert_eq!(oms.get_pending_transactions().await.unwrap().len(), 0); - - oms.reinstate_cancelled_inbound_transaction(tx_id).await.unwrap(); - - assert_eq!(oms.get_pending_transactions().await.unwrap().len(), 1); - let balance = oms.get_balance().await.unwrap(); - assert_eq!(balance.pending_incoming_balance, value); -} - -#[tokio::test] -async fn timeout_transaction() { - let factories = CryptoFactories::default(); - let (connection, _tempdir) = get_temp_sqlite_database_connection(); - let backend = OutputManagerSqliteDatabase::new(connection, None); + oms.cancel_transaction(tx_id).await.unwrap(); - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; + let balance = oms.get_balance().await.unwrap(); + assert_eq!(balance.pending_incoming_balance, MicroTari::from(0)); - let num_outputs = 20; - for _i in 0..num_outputs { - let (_ti, uo) = make_input( - &mut OsRng.clone(), - MicroTari::from(100 + OsRng.next_u64() % 1000), - &factories.commitment, - ); - oms.add_output(uo).await.unwrap(); - } - let _stp = oms - .prepare_transaction_to_send( - OsRng.next_u64(), - MicroTari::from(1000), - MicroTari::from(20), - None, - "".to_string(), - script!(Nop), - ) + oms.reinstate_cancelled_inbound_transaction_outputs(tx_id) .await .unwrap(); - let remaining_outputs = oms.get_unspent_outputs().await.unwrap().len(); - - time::sleep(Duration::from_millis(2)).await; - - oms.timeout_transactions(Duration::from_millis(1000)).await.unwrap(); - - assert_eq!(oms.get_unspent_outputs().await.unwrap().len(), remaining_outputs); - - oms.timeout_transactions(Duration::from_millis(1)).await.unwrap(); + let balance = oms.get_balance().await.unwrap(); - assert_eq!(oms.get_unspent_outputs().await.unwrap().len(), num_outputs); + assert_eq!(balance.pending_incoming_balance, value); } #[tokio::test] @@ -980,7 +720,7 @@ async fn test_get_balance() { let (connection, _tempdir) = get_temp_sqlite_database_connection(); let backend = OutputManagerSqliteDatabase::new(connection, None); - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; + let (mut oms, _, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; let balance = oms.get_balance().await.unwrap(); @@ -1022,40 +762,6 @@ async fn test_get_balance() { assert_eq!(output_val, balance.pending_outgoing_balance); } -#[tokio::test] -async fn test_confirming_received_output() { - let (connection, _tempdir) = get_temp_sqlite_database_connection(); - let backend = OutputManagerSqliteDatabase::new(connection, None); - - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; - - let value = MicroTari::from(5000); - let (tx_id, sender_message) = generate_sender_transaction_message(value); - let rtp = oms.get_recipient_transaction(sender_message).await.unwrap(); - assert_eq!(oms.get_unspent_outputs().await.unwrap().len(), 0); - assert_eq!(oms.get_pending_transactions().await.unwrap().len(), 1); - - let output = match rtp.state { - RecipientState::Finalized(s) => s.output, - RecipientState::Failed(_) => panic!("Should not be in Failed state"), - }; - oms.confirm_transaction(tx_id, vec![], vec![output.clone()]) - .await - .unwrap(); - assert_eq!(oms.get_balance().await.unwrap().available_balance, value); - - let factories = CryptoFactories::default(); - let rewind_public_keys = oms.get_rewind_public_keys().await.unwrap(); - let rewind_result = output - .rewind_range_proof_value_only( - &factories.range_proof, - &rewind_public_keys.rewind_public_key, - &rewind_public_keys.rewind_blinding_public_key, - ) - .unwrap(); - assert_eq!(rewind_result.committed_value, value); -} - #[tokio::test] async fn sending_transaction_with_short_term_clear() { let factories = CryptoFactories::default(); @@ -1063,7 +769,7 @@ async fn sending_transaction_with_short_term_clear() { let (connection, _tempdir) = get_temp_sqlite_database_connection(); let backend = OutputManagerSqliteDatabase::new(connection, None); - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend.clone(), true).await; + let (mut oms, _, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend.clone(), true).await; let available_balance = 10_000 * uT; let (_ti, uo) = make_input(&mut OsRng.clone(), available_balance, &factories.commitment); @@ -1083,32 +789,10 @@ async fn sending_transaction_with_short_term_clear() { .unwrap(); let balance = oms.get_balance().await.unwrap(); - let expected_change = balance.pending_incoming_balance; assert_eq!(balance.pending_outgoing_balance, available_balance); drop(oms); - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend.clone(), true).await; - - let balance = oms.get_balance().await.unwrap(); - assert_eq!(balance.available_balance, available_balance); - - // Check that a unconfirm Pending Transaction can be cancelled - let stp = oms - .prepare_transaction_to_send( - OsRng.next_u64(), - MicroTari::from(1000), - MicroTari::from(20), - None, - "".to_string(), - script!(Nop), - ) - .await - .unwrap(); - let sender_tx_id = stp.get_tx_id().unwrap(); - - let balance = oms.get_balance().await.unwrap(); - assert_eq!(balance.pending_outgoing_balance, available_balance); - oms.cancel_transaction(sender_tx_id).await.unwrap(); + let (mut oms, _, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend.clone(), true).await; let balance = oms.get_balance().await.unwrap(); assert_eq!(balance.available_balance, available_balance); @@ -1129,19 +813,10 @@ async fn sending_transaction_with_short_term_clear() { oms.confirm_pending_transaction(sender_tx_id).await.unwrap(); drop(oms); - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; + let (mut oms, _, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; let balance = oms.get_balance().await.unwrap(); assert_eq!(balance.pending_outgoing_balance, available_balance); - - let tx = complete_transaction(stp, oms.clone()).await; - - oms.confirm_transaction(sender_tx_id, tx.body.inputs().clone(), tx.body.outputs().clone()) - .await - .unwrap(); - - let balance = oms.get_balance().await.unwrap(); - assert_eq!(balance.available_balance, expected_change); } #[tokio::test] @@ -1149,7 +824,7 @@ async fn coin_split_with_change() { let factories = CryptoFactories::default(); let (connection, _tempdir) = get_temp_sqlite_database_connection(); let backend = OutputManagerSqliteDatabase::new(connection, None); - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; + let (mut oms, _, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; let val1 = 6_000 * uT; let val2 = 7_000 * uT; @@ -1178,7 +853,7 @@ async fn coin_split_no_change() { let factories = CryptoFactories::default(); let (connection, _tempdir) = get_temp_sqlite_database_connection(); let backend = OutputManagerSqliteDatabase::new(connection, None); - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; + let (mut oms, _, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; let fee_per_gram = MicroTari::from(25); let split_count = 15; @@ -1208,7 +883,7 @@ async fn handle_coinbase() { let factories = CryptoFactories::default(); let (connection, _tempdir) = get_temp_sqlite_database_connection(); let backend = OutputManagerSqliteDatabase::new(connection, None); - let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; + let (mut oms, _, _shutdown, _, _, _, _, _) = setup_output_manager_service(backend, true).await; let reward1 = MicroTari::from(1000); let fees1 = MicroTari::from(500); @@ -1222,15 +897,12 @@ async fn handle_coinbase() { let _ = oms.get_coinbase_transaction(1, reward1, fees1, 1).await.unwrap(); assert_eq!(oms.get_unspent_outputs().await.unwrap().len(), 0); - assert_eq!(oms.get_pending_transactions().await.unwrap().len(), 1); assert_eq!(oms.get_balance().await.unwrap().pending_incoming_balance, value1); let _tx2 = oms.get_coinbase_transaction(2, reward2, fees2, 1).await.unwrap(); assert_eq!(oms.get_unspent_outputs().await.unwrap().len(), 0); - assert_eq!(oms.get_pending_transactions().await.unwrap().len(), 1); assert_eq!(oms.get_balance().await.unwrap().pending_incoming_balance, value2); let tx3 = oms.get_coinbase_transaction(3, reward3, fees3, 2).await.unwrap(); assert_eq!(oms.get_unspent_outputs().await.unwrap().len(), 0); - assert_eq!(oms.get_pending_transactions().await.unwrap().len(), 2); assert_eq!( oms.get_balance().await.unwrap().pending_incoming_balance, value2 + value3 @@ -1247,620 +919,454 @@ async fn handle_coinbase() { ) .unwrap(); assert_eq!(rewind_result.committed_value, value3); - - oms.confirm_transaction(3, vec![], vec![output]).await.unwrap(); - - assert_eq!(oms.get_pending_transactions().await.unwrap().len(), 1); - assert_eq!(oms.get_unspent_outputs().await.unwrap().len(), 1); - assert_eq!(oms.get_balance().await.unwrap().available_balance, value3); - assert_eq!(oms.get_balance().await.unwrap().pending_incoming_balance, value2); - assert_eq!( - oms.get_balance().await.unwrap().pending_outgoing_balance, - MicroTari::from(0) - ); } #[tokio::test] -async fn test_utxo_stxo_invalid_txo_validation() { +async fn test_txo_validation() { let factories = CryptoFactories::default(); let (connection, _tempdir) = get_temp_sqlite_database_connection(); let backend = OutputManagerSqliteDatabase::new(connection, None); + let oms_db = backend.clone(); - let invalid_value = 666; - let invalid_output = create_unblinded_output( - TariScript::default(), - OutputFeatures::default(), - TestParamsHelpers::new(), - MicroTari::from(invalid_value), - ); - let invalid_tx_output = invalid_output.as_transaction_output(&factories).unwrap(); - - let invalid_db_output = DbUnblindedOutput::from_unblinded_output(invalid_output.clone(), &factories).unwrap(); - backend - .write(WriteOperation::Insert(DbKeyValuePair::UnspentOutput( - invalid_db_output.commitment.clone(), - Box::new(invalid_db_output), - ))) - .unwrap(); - backend - .invalidate_unspent_output( - &DbUnblindedOutput::from_unblinded_output(invalid_output.clone(), &factories).unwrap(), - ) - .unwrap(); - - let spent_value1 = 500; - let spent_output1 = create_unblinded_output( - TariScript::default(), - OutputFeatures::default(), - TestParamsHelpers::new(), - MicroTari::from(spent_value1), - ); - let spent_tx_output1 = spent_output1.as_transaction_output(&factories).unwrap(); - let spent_db_output1 = DbUnblindedOutput::from_unblinded_output(spent_output1.clone(), &factories).unwrap(); - - backend - .write(WriteOperation::Insert(DbKeyValuePair::SpentOutput( - spent_db_output1.commitment.clone(), - Box::new(spent_db_output1), - ))) - .unwrap(); - - let spent_value2 = 800; - let spent_output2 = create_unblinded_output( - TariScript::default(), - OutputFeatures::default(), - TestParamsHelpers::new(), - MicroTari::from(spent_value2), - ); - - let spent_db_output2 = DbUnblindedOutput::from_unblinded_output(spent_output2, &factories).unwrap(); - backend - .write(WriteOperation::Insert(DbKeyValuePair::SpentOutput( - spent_db_output2.commitment.clone(), - Box::new(spent_db_output2), - ))) - .unwrap(); + let ( + mut oms, + wallet_connectivity, + _shutdown, + _ts, + mock_rpc_server, + server_node_identity, + rpc_service_state, + base_node_service_event_publisher, + ) = setup_output_manager_service(backend, true).await; - let (mut oms, _shutdown, _ts, _mock_rpc_server, server_node_identity, rpc_service_state, _) = - setup_output_manager_service(backend, true).await; - let mut event_stream = oms.get_event_stream(); + wallet_connectivity.notify_base_node_set(server_node_identity.to_peer()); + // Now we add the connection + let mut connection = mock_rpc_server + .create_connection(server_node_identity.to_peer(), "t/bnwallet/1".into()) + .await; + wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); - let unspent_value1 = 500; - let unspent_output1 = create_unblinded_output( - TariScript::default(), + let output1_value = 1_000_000; + let output1 = create_unblinded_output( + script!(Nop), OutputFeatures::default(), TestParamsHelpers::new(), - MicroTari::from(unspent_value1), + MicroTari::from(output1_value), ); - let unspent_tx_output1 = unspent_output1.as_transaction_output(&factories).unwrap(); - - oms.add_output(unspent_output1.clone()).await.unwrap(); + let output1_tx_output = output1.as_transaction_output(&factories).unwrap(); + oms.add_output_with_tx_id(1, output1.clone()).await.unwrap(); - let unspent_value2 = 800; - let unspent_output2 = create_unblinded_output( - TariScript::default(), + let output2_value = 2_000_000; + let output2 = create_unblinded_output( + script!(Nop), OutputFeatures::default(), TestParamsHelpers::new(), - MicroTari::from(unspent_value2), + MicroTari::from(output2_value), ); + let output2_tx_output = output2.as_transaction_output(&factories).unwrap(); - oms.add_output(unspent_output2).await.unwrap(); + oms.add_output_with_tx_id(2, output2.clone()).await.unwrap(); - let unspent_value3 = 900; - let unspent_output3 = create_unblinded_output( - TariScript::default(), + let output3_value = 4_000_000; + let output3 = create_unblinded_output( + script!(Nop), OutputFeatures::default(), TestParamsHelpers::new(), - MicroTari::from(unspent_value3), + MicroTari::from(output3_value), ); - let unspent_tx_output3 = unspent_output3.as_transaction_output(&factories).unwrap(); - - oms.add_output(unspent_output3.clone()).await.unwrap(); - let unspent_value4 = 901; - let unspent_output4 = create_unblinded_output( - TariScript::default(), - OutputFeatures::default(), - TestParamsHelpers::new(), - MicroTari::from(unspent_value4), - ); - let unspent_tx_output4 = unspent_output4.as_transaction_output(&factories).unwrap(); + oms.add_output_with_tx_id(3, output3.clone()).await.unwrap(); + + let mut block1_header = BlockHeader::new(1); + block1_header.height = 1; + let mut block4_header = BlockHeader::new(1); + block4_header.height = 4; + + let mut block_headers = HashMap::new(); + block_headers.insert(1, block1_header.clone()); + block_headers.insert(4, block4_header.clone()); + rpc_service_state.set_blocks(block_headers.clone()); + + // These responses will mark outputs 1 and 2 and mined confirmed + let responses = vec![ + UtxoQueryResponse { + output: Some(output1_tx_output.clone().into()), + mmr_position: 1, + mined_height: 1, + mined_in_block: block1_header.hash(), + output_hash: output1_tx_output.hash(), + }, + UtxoQueryResponse { + output: Some(output2_tx_output.clone().into()), + mmr_position: 2, + mined_height: 1, + mined_in_block: block1_header.hash(), + output_hash: output2_tx_output.hash(), + }, + ]; - oms.add_output(unspent_output4.clone()).await.unwrap(); + let utxo_query_responses = UtxoQueryResponses { + best_block: block4_header.hash(), + height_of_longest_chain: 4, + responses, + }; - rpc_service_state.set_utxos(vec![invalid_output.as_transaction_output(&factories).unwrap()]); + rpc_service_state.set_utxo_query_response(utxo_query_responses.clone()); - oms.set_base_node_public_key(server_node_identity.public_key().clone()) - .await - .unwrap(); + // This response sets output1 as spent in the transaction that produced output4 + let query_deleted_response = QueryDeletedResponse { + best_block: block4_header.hash(), + height_of_longest_chain: 4, + deleted_positions: vec![], + not_deleted_positions: vec![1, 2], + heights_deleted_at: vec![], + blocks_deleted_in: vec![], + }; - oms.validate_txos(TxoValidationType::Invalid, ValidationRetryStrategy::Limited(5)) + rpc_service_state.set_query_deleted_response(query_deleted_response.clone()); + oms.validate_txos().await.unwrap(); + let _utxo_query_calls = rpc_service_state + .wait_pop_utxo_query_calls(1, Duration::from_secs(60)) .await .unwrap(); - - let _fetch_utxo_calls = rpc_service_state - .wait_pop_fetch_utxos_calls(1, Duration::from_secs(60)) + let _query_deleted_calls = rpc_service_state + .wait_pop_query_deleted(1, Duration::from_secs(60)) .await .unwrap(); - let delay = time::sleep(Duration::from_secs(60)).fuse(); - tokio::pin!(delay); - let mut success = false; - loop { - tokio::select! { - Ok(event) = event_stream.recv() => { - if let OutputManagerEvent::TxoValidationSuccess(_,TxoValidationType::Invalid) = &*event { - success = true; - break; - } - }, - () = &mut delay => { - break; - }, - } - } - assert!(success, "Did not receive validation success event"); - - let outputs = oms.get_unspent_outputs().await.unwrap(); - - assert_eq!(outputs.len(), 5); + oms.prepare_transaction_to_send( + 4, + MicroTari::from(900_000), + MicroTari::from(10), + None, + "".to_string(), + script!(Nop), + ) + .await + .unwrap(); - rpc_service_state.set_utxos(vec![ - unspent_tx_output1, - invalid_tx_output, - unspent_tx_output4, - unspent_tx_output3, - ]); + let recv_value = MicroTari::from(8_000_000); + let (_recv_tx_id, sender_message) = generate_sender_transaction_message(recv_value); - oms.validate_txos(TxoValidationType::Unspent, ValidationRetryStrategy::UntilSuccess) - .await - .unwrap(); + let _ = oms.get_recipient_transaction(sender_message).await.unwrap(); - let _fetch_utxo_calls = rpc_service_state - .wait_pop_fetch_utxos_calls(3, Duration::from_secs(60)) + oms.get_coinbase_transaction(6, MicroTari::from(15_000_000), MicroTari::from(1_000_000), 2) .await .unwrap(); - let delay = time::sleep(Duration::from_secs(60)).fuse(); - tokio::pin!(delay); - let mut success = false; - loop { - tokio::select! { - Ok(event) = event_stream.recv() => { - if let OutputManagerEvent::TxoValidationSuccess(_,TxoValidationType::Unspent) = &*event { - success = true; - break; - } - }, - () = &mut delay => { - break; - }, - } - } - assert!(success, "Did not receive validation success event"); - - let outputs = oms.get_unspent_outputs().await.unwrap(); - - assert_eq!(outputs.len(), 4); - assert!(outputs.iter().any(|o| o == &unspent_output1)); - assert!(outputs.iter().any(|o| o == &unspent_output3)); - assert!(outputs.iter().any(|o| o == &unspent_output4)); - assert!(outputs.iter().any(|o| o == &invalid_output)); + let mut outputs = oms_db.fetch_pending_incoming_outputs().unwrap(); + assert_eq!(outputs.len(), 3); - rpc_service_state.set_utxos(vec![spent_tx_output1]); - - oms.validate_txos(TxoValidationType::Spent, ValidationRetryStrategy::UntilSuccess) - .await + let o5_pos = outputs + .iter() + .position(|o| o.unblinded_output.value == MicroTari::from(8_000_000)) .unwrap(); - - let _fetch_utxo_calls = rpc_service_state - .wait_pop_fetch_utxos_calls(1, Duration::from_secs(60)) - .await + let output5 = outputs.remove(o5_pos); + let o6_pos = outputs + .iter() + .position(|o| o.unblinded_output.value == MicroTari::from(16_000_000)) .unwrap(); + let output6 = outputs.remove(o6_pos); + let output4 = outputs[0].clone(); - let delay = time::sleep(Duration::from_secs(60)).fuse(); - tokio::pin!(delay); - let mut success = false; - loop { - tokio::select! { - Ok(msg) = event_stream.recv() => { - if let OutputManagerEvent::TxoValidationSuccess(_, TxoValidationType::Spent) = (*msg).clone() { - success = true; - break; - }; - }, - () = &mut delay => { - break; - }, - } - } - assert!(success, "Did not receive validation success event"); - - let outputs = oms.get_unspent_outputs().await.unwrap(); - - assert_eq!(outputs.len(), 5); - assert!(outputs.iter().any(|o| o == &spent_output1)); -} + let output4_tx_output = output4.unblinded_output.as_transaction_output(&factories).unwrap(); + let output5_tx_output = output5.unblinded_output.as_transaction_output(&factories).unwrap(); + let output6_tx_output = output6.unblinded_output.as_transaction_output(&factories).unwrap(); -#[tokio::test] -async fn test_base_node_switch_during_validation() { - let factories = CryptoFactories::default(); - - let (connection, _tempdir) = get_temp_sqlite_database_connection(); - let backend = OutputManagerSqliteDatabase::new(connection, None); - - let ( - mut oms, - _shutdown, - _ts, - _mock_rpc_server, - server_node_identity, - mut rpc_service_state, - _connectivity_mock_state, - ) = setup_output_manager_service(backend, true).await; - let mut event_stream = oms.get_event_stream(); - - let unspent_value1 = 500; - let unspent_output1 = create_unblinded_output( - TariScript::default(), - OutputFeatures::default(), - TestParamsHelpers::new(), - MicroTari::from(unspent_value1), + let balance = oms.get_balance().await.unwrap(); + assert_eq!( + balance.available_balance, + MicroTari::from(output2_value) + MicroTari::from(output3_value) ); - let unspent_tx_output1 = unspent_output1.as_transaction_output(&factories).unwrap(); - - oms.add_output(unspent_output1).await.unwrap(); - - let unspent_value2 = 800; - let unspent_output2 = create_unblinded_output( - TariScript::default(), - OutputFeatures::default(), - TestParamsHelpers::new(), - MicroTari::from(unspent_value2), + assert_eq!(balance.pending_outgoing_balance, MicroTari::from(output1_value)); + assert_eq!( + balance.pending_incoming_balance, + MicroTari::from(output1_value) - + MicroTari::from(900_300) + //Output4 = output 1 -900_000 and 300 for fees + MicroTari::from(8_000_000) + + MicroTari::from(16_000_000) ); - oms.add_output(unspent_output2).await.unwrap(); + // Output 1: Spent in Block 5 - Unconfirmed + // Output 2: Mined block 1 Confirmed Block 4 + // Output 3: Imported so will have Unspent status. + // Output 4: Received in Block 5 - Unconfirmed - Change from spending Output 1 + // Output 5: Received in Block 5 - Unconfirmed + // Output 6: Coinbase from Block 5 - Unconfirmed + + let mut block5_header = BlockHeader::new(1); + block5_header.height = 5; + block_headers.insert(5, block5_header.clone()); + rpc_service_state.set_blocks(block_headers.clone()); + + let responses = vec![ + UtxoQueryResponse { + output: Some(output1_tx_output.clone().into()), + mmr_position: 1, + mined_height: 1, + mined_in_block: block1_header.hash(), + output_hash: output1_tx_output.hash(), + }, + UtxoQueryResponse { + output: Some(output2_tx_output.clone().into()), + mmr_position: 2, + mined_height: 1, + mined_in_block: block1_header.hash(), + output_hash: output2_tx_output.hash(), + }, + UtxoQueryResponse { + output: Some(output4_tx_output.clone().into()), + mmr_position: 4, + mined_height: 5, + mined_in_block: block5_header.hash(), + output_hash: output4_tx_output.hash(), + }, + UtxoQueryResponse { + output: Some(output5_tx_output.clone().into()), + mmr_position: 5, + mined_height: 5, + mined_in_block: block5_header.hash(), + output_hash: output5_tx_output.hash(), + }, + UtxoQueryResponse { + output: Some(output6_tx_output.clone().into()), + mmr_position: 6, + mined_height: 5, + mined_in_block: block5_header.hash(), + output_hash: output6_tx_output.hash(), + }, + ]; - let unspent_value3 = 900; - let unspent_output3 = create_unblinded_output( - TariScript::default(), - OutputFeatures::default(), - TestParamsHelpers::new(), - MicroTari::from(unspent_value3), - ); - let unspent_tx_output3 = unspent_output3.as_transaction_output(&factories).unwrap(); + let mut utxo_query_responses = UtxoQueryResponses { + best_block: block5_header.hash(), + height_of_longest_chain: 5, + responses, + }; - oms.add_output(unspent_output3).await.unwrap(); + rpc_service_state.set_utxo_query_response(utxo_query_responses.clone()); - // First RPC server state - rpc_service_state.set_utxos(vec![unspent_tx_output1, unspent_tx_output3]); - rpc_service_state.set_response_delay(Some(Duration::from_secs(8))); + // This response sets output1 as spent in the transaction that produced output4 + let mut query_deleted_response = QueryDeletedResponse { + best_block: block5_header.hash(), + height_of_longest_chain: 5, + deleted_positions: vec![1], + not_deleted_positions: vec![2, 4, 5, 6], + heights_deleted_at: vec![5], + blocks_deleted_in: vec![block5_header.hash()], + }; - // New base node we will switch to - let new_server_node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); + rpc_service_state.set_query_deleted_response(query_deleted_response.clone()); - oms.set_base_node_public_key(server_node_identity.public_key().clone()) - .await - .unwrap(); + oms.validate_txos().await.unwrap(); - oms.validate_txos(TxoValidationType::Unspent, ValidationRetryStrategy::UntilSuccess) + let utxo_query_calls = rpc_service_state + .wait_pop_utxo_query_calls(1, Duration::from_secs(60)) .await .unwrap(); - let _fetch_utxo_calls = rpc_service_state - .wait_pop_fetch_utxos_calls(1, Duration::from_secs(60)) - .await - .unwrap(); + assert_eq!(utxo_query_calls[0].len(), 4); - oms.set_base_node_public_key(new_server_node_identity.public_key().clone()) + let query_deleted_calls = rpc_service_state + .wait_pop_query_deleted(1, Duration::from_secs(60)) .await .unwrap(); + assert_eq!(query_deleted_calls[0].mmr_positions.len(), 5); - let delay = time::sleep(Duration::from_secs(60)).fuse(); - tokio::pin!(delay); - let mut abort = false; - loop { - tokio::select! { - Ok(msg) = event_stream.recv() => { - if let OutputManagerEvent::TxoValidationAborted(_,_) = (*msg).clone() { - abort = true; - break; - } - } - () = &mut delay => { - break; - }, - } - } - assert!(abort, "Did not receive validation abort"); -} + let balance = oms.get_balance().await.unwrap(); + assert_eq!( + balance.available_balance, + MicroTari::from(output2_value) + MicroTari::from(output3_value) + ); -#[tokio::test] -async fn test_txo_validation_connection_timeout_retries() { - let (connection, _tempdir) = get_temp_sqlite_database_connection(); - let backend = OutputManagerSqliteDatabase::new(connection, None); + assert_eq!(oms.get_unspent_outputs().await.unwrap().len(), 2); - let (mut oms, _shutdown, _ts, _mock_rpc_server, server_node_identity, _rpc_service_state, _connectivity_mock_state) = - setup_output_manager_service(backend, false).await; - let mut event_stream = oms.get_event_stream(); + assert!(oms.get_spent_outputs().await.unwrap().is_empty()); - let unspent_value1 = 500; - let unspent_output1 = create_unblinded_output( - TariScript::default(), - OutputFeatures::default(), - TestParamsHelpers::new(), - MicroTari::from(unspent_value1), - ); + // Now we will update the mined_height in the responses so that the outputs are confirmed + // Output 1: Spent in Block 5 - Confirmed + // Output 2: Mined block 1 Confirmed Block 4 + // Output 3: Imported so will have Unspent status + // Output 4: Received in Block 5 - Confirmed - Change from spending Output 1 + // Output 5: Received in Block 5 - Confirmed + // Output 6: Coinbase from Block 5 - Confirmed - oms.add_output(unspent_output1).await.unwrap(); + utxo_query_responses.height_of_longest_chain = 8; + utxo_query_responses.best_block = [8u8; 16].to_vec(); + rpc_service_state.set_utxo_query_response(utxo_query_responses); - let unspent_value2 = 800; - let unspent_output2 = create_unblinded_output( - TariScript::default(), - OutputFeatures::default(), - TestParamsHelpers::new(), - MicroTari::from(unspent_value2), - ); + query_deleted_response.height_of_longest_chain = 8; + query_deleted_response.best_block = [8u8; 16].to_vec(); + rpc_service_state.set_query_deleted_response(query_deleted_response); - oms.add_output(unspent_output2).await.unwrap(); + oms.validate_txos().await.unwrap(); - oms.set_base_node_public_key(server_node_identity.public_key().clone()) + let utxo_query_calls = rpc_service_state + .wait_pop_utxo_query_calls(1, Duration::from_secs(60)) .await .unwrap(); - oms.validate_txos(TxoValidationType::Unspent, ValidationRetryStrategy::Limited(1)) + // The spent transaction is not checked during this second validation + assert_eq!(utxo_query_calls[0].len(), 4); + + let query_deleted_calls = rpc_service_state + .wait_pop_query_deleted(1, Duration::from_secs(60)) .await .unwrap(); + assert_eq!(query_deleted_calls[0].mmr_positions.len(), 5); - let delay = time::sleep(Duration::from_secs(60)); - tokio::pin!(delay); - let mut timeout = 0; - let mut failed = 0; - loop { - tokio::select! { - Ok(event) = event_stream.recv() => { - match &*event { - OutputManagerEvent::TxoValidationTimedOut(_,_) => { - timeout+=1; - }, - OutputManagerEvent::TxoValidationFailure(_,_) => { - failed+=1; - }, - _ => (), - } - - if timeout+failed >= 3 { - break; - } - }, - () = &mut delay => { - break; - }, - } - } - assert_eq!(failed, 1); - assert_eq!(timeout, 2); -} - -#[tokio::test] -async fn test_txo_validation_rpc_error_retries() { - let (connection, _tempdir) = get_temp_sqlite_database_connection(); - let backend = OutputManagerSqliteDatabase::new(connection, None); - - let (mut oms, _shutdown, _ts, _mock_rpc_server, server_node_identity, rpc_service_state, _connectivity_mock_state) = - setup_output_manager_service(backend, true).await; - let mut event_stream = oms.get_event_stream(); - rpc_service_state.set_rpc_status_error(Some(RpcStatus::bad_request("blah".to_string()))); - - let unspent_value1 = 500; - let unspent_output1 = create_unblinded_output( - TariScript::default(), - OutputFeatures::default(), - TestParamsHelpers::new(), - MicroTari::from(unspent_value1), - ); - - oms.add_output(unspent_output1).await.unwrap(); - - let unspent_value2 = 800; - let unspent_output2 = create_unblinded_output( - TariScript::default(), - OutputFeatures::default(), - TestParamsHelpers::new(), - MicroTari::from(unspent_value2), + let balance = oms.get_balance().await.unwrap(); + assert_eq!( + balance.available_balance, + MicroTari::from(output2_value) + MicroTari::from(output3_value) + MicroTari::from(output1_value) - + MicroTari::from(900_300) + //spent 900_000 and 300 for fees + MicroTari::from(8_000_000) + //output 5 + MicroTari::from(16_000_000) // output 6 ); + assert_eq!(balance.pending_outgoing_balance, MicroTari::from(0)); + assert_eq!(balance.pending_incoming_balance, MicroTari::from(0)); - oms.add_output(unspent_output2).await.unwrap(); + // Trigger another validation and only Output3 should be checked + oms.validate_txos().await.unwrap(); - oms.set_base_node_public_key(server_node_identity.public_key().clone()) + let utxo_query_calls = rpc_service_state + .wait_pop_utxo_query_calls(1, Duration::from_secs(60)) .await .unwrap(); + assert_eq!(utxo_query_calls.len(), 1); + assert_eq!(utxo_query_calls[0].len(), 1); + assert_eq!( + utxo_query_calls[0][0], + output3.as_transaction_output(&factories).unwrap().hash() + ); - oms.validate_txos(TxoValidationType::Unspent, ValidationRetryStrategy::Limited(1)) - .await - .unwrap(); - - let delay = time::sleep(Duration::from_secs(60)).fuse(); - tokio::pin!(delay); - let mut failed = 0; - loop { - tokio::select! { - event = event_stream.recv() => { - if let Ok(msg) = event { - if let OutputManagerEvent::TxoValidationFailure(_,_) = (*msg).clone() { - failed+=1; - } - } - - if failed >= 1 { - break; - } - }, - () = &mut delay => { - break; - }, - } - } - assert_eq!(failed, 1); -} - -#[tokio::test] -async fn test_txo_validation_rpc_timeout() { - let (connection, _tempdir) = get_temp_sqlite_database_connection(); - let backend = OutputManagerSqliteDatabase::new(connection, None); + // Now we will create responses that result in a reorg of block 5, keeping block4 the same. + // Output 1: Spent in Block 5 - Unconfirmed + // Output 2: Mined block 1 Confirmed Block 4 + // Output 3: Imported so will have Unspent + // Output 4: Received in Block 5 - Unconfirmed - Change from spending Output 1 + // Output 5: Reorged out + // Output 6: Reorged out + let block5_header_reorg = BlockHeader::new(2); + block5_header.height = 5; + let mut block_headers = HashMap::new(); + block_headers.insert(1, block1_header.clone()); + block_headers.insert(4, block4_header.clone()); + block_headers.insert(5, block5_header_reorg.clone()); + rpc_service_state.set_blocks(block_headers.clone()); + + // Update UtxoResponses to not have the received output5 and coinbase output6 + let responses = vec![ + UtxoQueryResponse { + output: Some(output1_tx_output.clone().into()), + mmr_position: 1, + mined_height: 1, + mined_in_block: block1_header.hash(), + output_hash: output1_tx_output.hash(), + }, + UtxoQueryResponse { + output: Some(output2_tx_output.clone().into()), + mmr_position: 2, + mined_height: 1, + mined_in_block: block1_header.hash(), + output_hash: output2_tx_output.hash(), + }, + UtxoQueryResponse { + output: Some(output4_tx_output.clone().into()), + mmr_position: 4, + mined_height: 5, + mined_in_block: block5_header_reorg.hash(), + output_hash: output4_tx_output.hash(), + }, + ]; - let ( - mut oms, - _shutdown, - _ts, - _mock_rpc_server, - server_node_identity, - mut rpc_service_state, - _connectivity_mock_state, - ) = setup_output_manager_service(backend, true).await; - let mut event_stream = oms.get_event_stream(); - rpc_service_state.set_response_delay(Some(Duration::from_secs(120))); + let mut utxo_query_responses = UtxoQueryResponses { + best_block: block5_header_reorg.hash(), + height_of_longest_chain: 5, + responses, + }; - let unspent_value1 = 500; - let unspent_output1 = create_unblinded_output( - TariScript::default(), - OutputFeatures::default(), - TestParamsHelpers::new(), - MicroTari::from(unspent_value1), - ); + rpc_service_state.set_utxo_query_response(utxo_query_responses.clone()); - oms.add_output(unspent_output1).await.unwrap(); + // This response sets output1 as spent in the transaction that produced output4 + let mut query_deleted_response = QueryDeletedResponse { + best_block: block5_header_reorg.hash(), + height_of_longest_chain: 5, + deleted_positions: vec![1], + not_deleted_positions: vec![2, 4, 5, 6], + heights_deleted_at: vec![5], + blocks_deleted_in: vec![block5_header_reorg.hash()], + }; - let unspent_value2 = 800; - let unspent_output2 = create_unblinded_output( - TariScript::default(), - OutputFeatures::default(), - TestParamsHelpers::new(), - MicroTari::from(unspent_value2), - ); + rpc_service_state.set_query_deleted_response(query_deleted_response.clone()); - oms.add_output(unspent_output2).await.unwrap(); + // Trigger validation through a base_node_service event + base_node_service_event_publisher + .send(Arc::new(BaseNodeEvent::BaseNodeStateChanged(BaseNodeState::default()))) + .unwrap(); - oms.set_base_node_public_key(server_node_identity.public_key().clone()) + let _ = rpc_service_state + .wait_pop_get_header_by_height_calls(2, Duration::from_secs(60)) .await .unwrap(); - oms.validate_txos(TxoValidationType::Unspent, ValidationRetryStrategy::Limited(1)) + let _utxo_query_calls = rpc_service_state + .wait_pop_utxo_query_calls(1, Duration::from_secs(60)) .await .unwrap(); - let delay = - time::sleep(RpcClientConfig::default().timeout_with_grace_period().unwrap() + Duration::from_secs(30)).fuse(); - tokio::pin!(delay); - let mut failed = 0; - loop { - tokio::select! { - event = event_stream.recv() => { - if let Ok(msg) = event { - if let OutputManagerEvent::TxoValidationFailure(_,_) = &*msg { - failed+=1; - } - } - - if failed >= 1 { - break; - } - }, - () = &mut delay => { - break; - }, - } - } - assert_eq!(failed, 1); -} - -#[tokio::test] -async fn test_txo_validation_base_node_not_synced() { - let factories = CryptoFactories::default(); - - let (connection, _tempdir) = get_temp_sqlite_database_connection(); - let backend = OutputManagerSqliteDatabase::new(connection, None); - - let (mut oms, _shutdown, _ts, _mock_rpc_server, server_node_identity, rpc_service_state, _connectivity_mock_state) = - setup_output_manager_service(backend, true).await; - let mut event_stream = oms.get_event_stream(); - rpc_service_state.set_is_synced(false); + let _query_deleted_calls = rpc_service_state + .wait_pop_query_deleted(1, Duration::from_secs(60)) + .await + .unwrap(); - let unspent_value1 = 500; - let unspent_output1 = create_unblinded_output( - TariScript::default(), - OutputFeatures::default(), - TestParamsHelpers::new(), - MicroTari::from(unspent_value1), + let balance = oms.get_balance().await.unwrap(); + assert_eq!( + balance.available_balance, + MicroTari::from(output2_value) + MicroTari::from(output3_value) + ); + assert_eq!(balance.pending_outgoing_balance, MicroTari::from(output1_value)); + assert_eq!( + balance.pending_incoming_balance, + MicroTari::from(output1_value) - MicroTari::from(900_300) ); - let unspent_tx_output1 = unspent_output1.as_transaction_output(&factories).unwrap(); - oms.add_output(unspent_output1.clone()).await.unwrap(); + // Now we will update the mined_height in the responses so that the outputs on the reorged chain are confirmed + // Output 1: Spent in Block 5 - Confirmed + // Output 2: Mined block 1 Confirmed Block 4 + // Output 3: Imported so will have Unspent + // Output 4: Received in Block 5 - Confirmed - Change from spending Output 1 + // Output 5: Reorged out + // Output 6: Reorged out - let unspent_value2 = 800; - let unspent_output2 = create_unblinded_output( - TariScript::default(), - OutputFeatures::default(), - TestParamsHelpers::new(), - MicroTari::from(unspent_value2), - ); + utxo_query_responses.height_of_longest_chain = 8; + utxo_query_responses.best_block = [8u8; 16].to_vec(); + rpc_service_state.set_utxo_query_response(utxo_query_responses); + + query_deleted_response.height_of_longest_chain = 8; + query_deleted_response.best_block = [8u8; 16].to_vec(); + rpc_service_state.set_query_deleted_response(query_deleted_response); - oms.add_output(unspent_output2).await.unwrap(); + oms.validate_txos().await.unwrap(); - oms.set_base_node_public_key(server_node_identity.public_key().clone()) + let _utxo_query_calls = rpc_service_state + .wait_pop_utxo_query_calls(1, Duration::from_secs(60)) .await .unwrap(); - oms.validate_txos(TxoValidationType::Unspent, ValidationRetryStrategy::Limited(5)) + let _query_deleted_calls = rpc_service_state + .wait_pop_query_deleted(1, Duration::from_secs(60)) .await .unwrap(); - let delay = time::sleep(Duration::from_secs(60)).fuse(); - tokio::pin!(delay); - let mut delayed = 0; - loop { - tokio::select! { - Ok(event) = event_stream.recv() => { - if let OutputManagerEvent::TxoValidationDelayed(_,_) = &*event { - delayed += 1; - } - if delayed >= 2 { - break; - } - }, - () = &mut delay => { - break; - }, - } - } - assert_eq!(delayed, 2); - - rpc_service_state.set_is_synced(true); - rpc_service_state.set_utxos(vec![unspent_tx_output1]); - - let delay = time::sleep(Duration::from_secs(60)).fuse(); - tokio::pin!(delay); - let mut success = false; - loop { - tokio::select! { - Ok(event) = event_stream.recv() => { - if let OutputManagerEvent::TxoValidationSuccess(_,_) = &*event { - success = true; - break; - } - }, - () = &mut delay => { - break; - }, - } - } - assert!(success, "Did not receive validation success event"); - - let outputs = oms.get_unspent_outputs().await.unwrap(); - - assert_eq!(outputs.len(), 1); - assert!(outputs.iter().any(|o| o == &unspent_output1)); + let balance = oms.get_balance().await.unwrap(); + assert_eq!( + balance.available_balance, + MicroTari::from(output2_value) + MicroTari::from(output3_value) + MicroTari::from(output1_value) - + MicroTari::from(900_300) + ); + assert_eq!(balance.pending_outgoing_balance, MicroTari::from(0)); + assert_eq!(balance.pending_incoming_balance, MicroTari::from(0)); } #[tokio::test] @@ -1882,7 +1388,7 @@ async fn test_oms_key_manager_discrepancy() { mock_base_node_service.set_default_base_node_state(); task::spawn(mock_base_node_service.run()); - let (connectivity_manager, _connectivity_mock) = create_connectivity_mock(); + let wallet_connectivity = create_wallet_connectivity_mock(); let (connection, _tempdir) = get_temp_sqlite_database_connection(); let db = OutputManagerDatabase::new(OutputManagerSqliteDatabase::new(connection, None)); @@ -1899,7 +1405,7 @@ async fn test_oms_key_manager_discrepancy() { constants.clone(), shutdown.to_signal(), basenode_service_handle.clone(), - connectivity_manager.clone(), + wallet_connectivity.clone(), master_key1.clone(), ) .await @@ -1918,7 +1424,7 @@ async fn test_oms_key_manager_discrepancy() { constants.clone(), shutdown.to_signal(), basenode_service_handle.clone(), - connectivity_manager.clone(), + wallet_connectivity.clone(), master_key1, ) .await @@ -1937,7 +1443,7 @@ async fn test_oms_key_manager_discrepancy() { constants, shutdown.to_signal(), basenode_service_handle, - connectivity_manager, + wallet_connectivity, master_key2, ) .await; @@ -1947,26 +1453,3 @@ async fn test_oms_key_manager_discrepancy() { Err(OutputManagerError::MasterSecretKeyMismatch) )); } - -#[tokio::test] -async fn get_coinbase_tx_for_same_height() { - let (connection, _tempdir) = get_temp_sqlite_database_connection(); - - let (mut oms, _shutdown, _, _, _, _, _) = - setup_output_manager_service(OutputManagerSqliteDatabase::new(connection, None), true).await; - - oms.get_coinbase_transaction(1, 100_000.into(), 100.into(), 1) - .await - .unwrap(); - - let pending_transactions = oms.get_pending_transactions().await.unwrap(); - assert!(pending_transactions.values().any(|p| p.tx_id == 1)); - - oms.get_coinbase_transaction(2, 100_000.into(), 100.into(), 1) - .await - .unwrap(); - - let pending_transactions = oms.get_pending_transactions().await.unwrap(); - assert!(!pending_transactions.values().any(|p| p.tx_id == 1)); - assert!(pending_transactions.values().any(|p| p.tx_id == 2)); -} diff --git a/base_layer/wallet/tests/output_manager_service/storage.rs b/base_layer/wallet/tests/output_manager_service/storage.rs index 84e75e805b..d22b17d46a 100644 --- a/base_layer/wallet/tests/output_manager_service/storage.rs +++ b/base_layer/wallet/tests/output_manager_service/storage.rs @@ -20,34 +20,24 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::time::Duration; - use aes_gcm::{ aead::{generic_array::GenericArray, NewAead}, Aes256Gcm, }; -use chrono::{Duration as ChronoDuration, Utc}; -use diesel::result::{DatabaseErrorKind, Error::DatabaseError}; use rand::{rngs::OsRng, RngCore}; -use tari_crypto::{commitment::HomomorphicCommitmentFactory, keys::SecretKey, script::TariScript}; -use tokio::runtime::Runtime; - use tari_common_types::types::PrivateKey; -use tari_core::transactions::{ - helpers::{create_unblinded_output, TestParams}, - tari_amount::MicroTari, - transaction::OutputFeatures, - CryptoFactories, -}; +use tari_core::transactions::{tari_amount::MicroTari, CryptoFactories}; +use tari_crypto::keys::SecretKey; use tari_wallet::output_manager_service::{ error::OutputManagerStorageError, service::Balance, storage::{ - database::{KeyManagerState, OutputManagerBackend, OutputManagerDatabase, PendingTransactionOutputs}, + database::{KeyManagerState, OutputManagerBackend, OutputManagerDatabase}, models::DbUnblindedOutput, sqlite_db::OutputManagerSqliteDatabase, }, }; +use tokio::runtime::Runtime; use crate::support::{data::get_temp_sqlite_database_connection, utils::make_input}; @@ -80,27 +70,35 @@ pub fn test_db_backend(backend: T) { let time_locked_balance = unspent_outputs[4].unblinded_output.value; unspent_outputs.sort(); - // Add some pending transactions + + let outputs = runtime.block_on(db.fetch_sorted_unspent_outputs()).unwrap(); + assert_eq!(unspent_outputs, outputs); + + // Add some sent transactions with outputs to be spent and received + struct PendingTransactionOutputs { + tx_id: u64, + outputs_to_be_spent: Vec, + outputs_to_be_received: Vec, + } + let mut pending_txs = Vec::new(); - for i in 0..3 { + for _ in 0..3 { let mut pending_tx = PendingTransactionOutputs { tx_id: OsRng.next_u64(), outputs_to_be_spent: vec![], outputs_to_be_received: vec![], - timestamp: Utc::now().naive_utc() - - ChronoDuration::from_std(Duration::from_millis(120_000_000 * i)).unwrap(), - coinbase_block_height: None, }; - for _ in 0..(OsRng.next_u64() % 5 + 1) { + for _ in 0..4 { let (_ti, uo) = make_input( &mut OsRng, MicroTari::from(100 + OsRng.next_u64() % 1000), &factories.commitment, ); let uo = DbUnblindedOutput::from_unblinded_output(uo, &factories).unwrap(); + runtime.block_on(db.add_unspent_output(uo.clone())).unwrap(); pending_tx.outputs_to_be_spent.push(uo); } - for _ in 0..(OsRng.next_u64() % 5 + 1) { + for _ in 0..2 { let (_ti, uo) = make_input( &mut OsRng, MicroTari::from(100 + OsRng.next_u64() % 1000), @@ -110,29 +108,17 @@ pub fn test_db_backend(backend: T) { pending_tx.outputs_to_be_received.push(uo); } runtime - .block_on(db.add_pending_transaction_outputs(pending_tx.clone())) + .block_on(db.encumber_outputs( + pending_tx.tx_id, + pending_tx.outputs_to_be_spent.clone(), + pending_tx.outputs_to_be_received.clone(), + )) .unwrap(); pending_txs.push(pending_tx); } - let outputs = runtime.block_on(db.fetch_sorted_unspent_outputs()).unwrap(); - assert_eq!(unspent_outputs, outputs); - - let p_tx = runtime.block_on(db.fetch_all_pending_transaction_outputs()).unwrap(); - - for (k, v) in p_tx.iter() { - assert_eq!(v, pending_txs.iter().find(|i| &i.tx_id == k).unwrap()); - } - - assert_eq!( - runtime - .block_on(db.fetch_pending_transaction_outputs(pending_txs[0].tx_id)) - .unwrap(), - pending_txs[0] - ); - // Test balance calc - let mut available_balance = unspent_outputs + let available_balance = unspent_outputs .iter() .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value); let mut pending_incoming_balance = MicroTari(0); @@ -164,24 +150,9 @@ pub fn test_db_backend(backend: T) { pending_outgoing_balance }); - runtime - .block_on(db.confirm_pending_transaction_outputs(pending_txs[0].tx_id)) - .unwrap(); - - available_balance += pending_txs[0] - .outputs_to_be_received - .iter() - .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value); - - pending_incoming_balance -= pending_txs[0] - .outputs_to_be_received - .iter() - .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value); - - pending_outgoing_balance -= pending_txs[0] - .outputs_to_be_spent - .iter() - .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value); + for v in pending_txs.iter() { + runtime.block_on(db.confirm_encumbered_outputs(v.tx_id)).unwrap(); + } let balance = runtime.block_on(db.get_balance(None)).unwrap(); assert_eq!(balance, Balance { @@ -191,66 +162,23 @@ pub fn test_db_backend(backend: T) { pending_outgoing_balance }); - let spent_outputs = runtime.block_on(db.fetch_spent_outputs()).unwrap(); - - assert!(!spent_outputs.is_empty()); - assert_eq!( - spent_outputs - .iter() - .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value), - pending_txs[0] - .outputs_to_be_spent - .iter() - .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value) - ); - - let (_ti, uo_change) = make_input( - &mut OsRng.clone(), - MicroTari::from(100 + OsRng.next_u64() % 1000), - &factories.commitment, - ); - let uo_change = DbUnblindedOutput::from_unblinded_output(uo_change, &factories).unwrap(); - let outputs_to_encumber = vec![outputs[0].clone(), outputs[1].clone()]; - let total_encumbered = outputs[0].clone().unblinded_output.value + outputs[1].clone().unblinded_output.value; - runtime - .block_on(db.encumber_outputs(2, outputs_to_encumber, vec![uo_change.clone()])) - .unwrap(); - runtime.block_on(db.confirm_encumbered_outputs(2)).unwrap(); - - available_balance -= total_encumbered; - pending_incoming_balance += uo_change.unblinded_output.value; - pending_outgoing_balance += total_encumbered; + // Set first pending tx to mined but unconfirmed + let mut mmr_pos = 0; + for o in pending_txs[0].outputs_to_be_received.iter() { + runtime + .block_on(db.set_received_output_mined_height(o.hash.clone(), 2, vec![], mmr_pos, false)) + .unwrap(); + mmr_pos += 1; + } + for o in pending_txs[0].outputs_to_be_spent.iter() { + runtime + .block_on(db.mark_output_as_spent(o.hash.clone(), 3, vec![], false)) + .unwrap(); + } + // Balance shouldn't change let balance = runtime.block_on(db.get_balance(None)).unwrap(); - assert_eq!(balance, Balance { - available_balance, - time_locked_balance: None, - pending_incoming_balance, - pending_outgoing_balance - }); - - let (_ti, uo_incoming) = make_input( - &mut OsRng.clone(), - MicroTari::from(100 + OsRng.next_u64() % 1000), - &factories.commitment, - ); - let output = create_unblinded_output( - TariScript::default(), - OutputFeatures::default(), - TestParams::new(), - uo_incoming.value, - ); - runtime - .block_on(db.accept_incoming_pending_transaction( - 5, - DbUnblindedOutput::from_unblinded_output(output, &factories).unwrap(), - None, - )) - .unwrap(); - - pending_incoming_balance += uo_incoming.value; - let balance = runtime.block_on(db.get_balance(None)).unwrap(); assert_eq!(balance, Balance { available_balance, time_locked_balance: None, @@ -258,122 +186,132 @@ pub fn test_db_backend(backend: T) { pending_outgoing_balance }); - runtime - .block_on(db.cancel_pending_transaction_outputs(pending_txs[1].tx_id)) - .unwrap(); + // Set second pending tx to mined and confirmed + for o in pending_txs[1].outputs_to_be_received.iter() { + runtime + .block_on(db.set_received_output_mined_height(o.hash.clone(), 4, vec![], mmr_pos, true)) + .unwrap(); + mmr_pos += 1; + } + for o in pending_txs[1].outputs_to_be_spent.iter() { + runtime + .block_on(db.mark_output_as_spent(o.hash.clone(), 5, vec![], true)) + .unwrap(); + } - let mut cancelled_incoming = MicroTari(0); - let mut cancelled_outgoing = MicroTari(0); + // Balance with confirmed second pending tx + let mut available_balance = unspent_outputs + .iter() + .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value); + let mut pending_incoming_balance = MicroTari(0); + let mut pending_outgoing_balance = MicroTari(0); - cancelled_outgoing += pending_txs[1] + pending_outgoing_balance += pending_txs[0] .outputs_to_be_spent .iter() .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value); - cancelled_incoming += pending_txs[1] + pending_outgoing_balance += pending_txs[2] + .outputs_to_be_spent + .iter() + .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value); + pending_incoming_balance += pending_txs[0] + .outputs_to_be_received + .iter() + .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value); + pending_incoming_balance += pending_txs[2] .outputs_to_be_received .iter() .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value); - available_balance += cancelled_outgoing; - pending_incoming_balance -= cancelled_incoming; - pending_outgoing_balance -= cancelled_outgoing; + available_balance += pending_txs[1] + .outputs_to_be_received + .iter() + .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value); let balance = runtime.block_on(db.get_balance(None)).unwrap(); - assert_eq!(balance, Balance { - available_balance, - time_locked_balance: None, - pending_incoming_balance, - pending_outgoing_balance - }); - - let remaining_p_tx = runtime.block_on(db.fetch_all_pending_transaction_outputs()).unwrap(); + assert_eq!( + balance, + Balance { + available_balance, + time_locked_balance: None, + pending_incoming_balance, + pending_outgoing_balance + }, + "Balance should change" + ); + // Add output to be received + let (_ti, uo) = make_input( + &mut OsRng, + MicroTari::from(100 + OsRng.next_u64() % 1000), + &factories.commitment, + ); + let output_to_be_received = DbUnblindedOutput::from_unblinded_output(uo, &factories).unwrap(); runtime - .block_on(db.timeout_pending_transaction_outputs(Duration::from_millis(120_000_000_000))) + .block_on(db.add_output_to_be_received(11, output_to_be_received.clone(), None)) .unwrap(); + pending_incoming_balance += output_to_be_received.unblinded_output.value; + let balance = runtime.block_on(db.get_balance(None)).unwrap(); assert_eq!( - runtime - .block_on(db.fetch_all_pending_transaction_outputs()) - .unwrap() - .len(), - remaining_p_tx.len() + balance, + Balance { + available_balance, + time_locked_balance: None, + pending_incoming_balance, + pending_outgoing_balance + }, + "Balance should reflect new output to be received" ); + let spent_outputs = runtime.block_on(db.fetch_spent_outputs()).unwrap(); + assert_eq!(spent_outputs.len(), 4); + + let unconfirmed_outputs = runtime.block_on(db.fetch_unconfirmed_outputs()).unwrap(); + assert_eq!(unconfirmed_outputs.len(), 22); + + let mined_unspent_outputs = runtime.block_on(db.fetch_mined_unspent_outputs()).unwrap(); + assert_eq!(mined_unspent_outputs.len(), 4); + + // Spend a received and confirmed output runtime - .block_on(db.timeout_pending_transaction_outputs(Duration::from_millis(6_000_000))) + .block_on(db.mark_output_as_spent(pending_txs[1].outputs_to_be_received[0].hash.clone(), 6, vec![], true)) .unwrap(); - assert_eq!( - runtime - .block_on(db.fetch_all_pending_transaction_outputs()) - .unwrap() - .len(), - remaining_p_tx.len() - 1 - ); + let mined_unspent_outputs = runtime.block_on(db.fetch_mined_unspent_outputs()).unwrap(); + assert_eq!(mined_unspent_outputs.len(), 3); - assert!(!runtime - .block_on(db.fetch_all_pending_transaction_outputs()) - .unwrap() - .contains_key(&pending_txs[2].tx_id)); - - // Test invalidating an output - let invalid_outputs = runtime.block_on(db.get_invalid_outputs()).unwrap(); - assert_eq!(invalid_outputs.len(), 0); - let unspent_outputs = runtime.block_on(db.get_unspent_outputs()).unwrap(); - let _ = runtime - .block_on(db.invalidate_output(unspent_outputs[0].clone())) - .unwrap(); - let invalid_outputs = runtime.block_on(db.get_invalid_outputs()).unwrap(); + let unspent_outputs = runtime.block_on(db.fetch_sorted_unspent_outputs()).unwrap(); + assert_eq!(unspent_outputs.len(), 6); - assert_eq!(invalid_outputs.len(), 1); - assert_eq!(invalid_outputs[0], unspent_outputs[0]); + let last_mined_output = runtime.block_on(db.get_last_mined_output()).unwrap().unwrap(); + assert!(pending_txs[1] + .outputs_to_be_received + .iter() + .any(|o| o.commitment == last_mined_output.commitment)); - // test revalidating output - let unspent_outputs = runtime.block_on(db.get_unspent_outputs()).unwrap(); - assert!( - !unspent_outputs - .iter() - .any(|o| o.unblinded_output == invalid_outputs[0].unblinded_output), - "Should not find output" + let last_spent_output = runtime.block_on(db.get_last_spent_output()).unwrap().unwrap(); + assert_eq!( + last_spent_output.commitment, + pending_txs[1].outputs_to_be_received[0].commitment ); - assert!(runtime - .block_on(db.revalidate_output(factories.commitment.commit( - &pending_txs[2].outputs_to_be_spent[0].unblinded_output.spending_key, - &pending_txs[2].outputs_to_be_spent[0].unblinded_output.value.into() - ))) - .is_err()); runtime - .block_on(db.revalidate_output(factories.commitment.commit( - &invalid_outputs[0].unblinded_output.spending_key, - &invalid_outputs[0].unblinded_output.value.into(), - ))) + .block_on(db.remove_output_by_commitment(last_spent_output.commitment)) .unwrap(); - let new_invalid_outputs = runtime.block_on(db.get_invalid_outputs()).unwrap(); - assert_eq!(new_invalid_outputs.len(), 0); - let unspent_outputs = runtime.block_on(db.get_unspent_outputs()).unwrap(); - assert!( - unspent_outputs - .iter() - .any(|o| o.unblinded_output == invalid_outputs[0].unblinded_output), - "Should find revalidated output" + let last_spent_output = runtime.block_on(db.get_last_spent_output()).unwrap().unwrap(); + assert_ne!( + last_spent_output.commitment, + pending_txs[1].outputs_to_be_received[0].commitment ); - let result = runtime.block_on(db.update_spent_output_to_unspent(unspent_outputs[0].commitment.clone())); - assert!(result.is_err()); - let spent_outputs = runtime.block_on(db.get_spent_outputs()).unwrap(); - let updated_output = runtime - .block_on(db.update_spent_output_to_unspent(spent_outputs[0].commitment.clone())) + // Test cancelling a pending transaction + runtime + .block_on(db.cancel_pending_transaction_outputs(pending_txs[2].tx_id)) .unwrap(); - let unspent_outputs = runtime.block_on(db.get_unspent_outputs()).unwrap(); - assert!( - unspent_outputs - .iter() - .any(|o| o.unblinded_output == updated_output.unblinded_output), - "Should find updated spent output" - ); + let unspent_outputs = runtime.block_on(db.fetch_sorted_unspent_outputs()).unwrap(); + assert_eq!(unspent_outputs.len(), 10); } #[test] @@ -414,17 +352,6 @@ pub fn test_key_manager_crud() { let read_state1 = runtime.block_on(db.get_key_manager_state()).unwrap().unwrap(); assert_eq!(state1, read_state1); - let state2 = KeyManagerState { - master_key: PrivateKey::random(&mut OsRng), - branch_seed: "blah2".to_string(), - primary_key_index: 0, - }; - - runtime.block_on(db.set_key_manager_state(state2.clone())).unwrap(); - - let read_state2 = runtime.block_on(db.get_key_manager_state()).unwrap().unwrap(); - assert_eq!(state2, read_state2); - runtime.block_on(db.increment_key_index()).unwrap(); runtime.block_on(db.increment_key_index()).unwrap(); @@ -439,77 +366,54 @@ pub async fn test_short_term_encumberance() { let backend = OutputManagerSqliteDatabase::new(connection, None); let db = OutputManagerDatabase::new(backend); - // Add a pending tx - let mut available_balance = MicroTari(0); - let mut pending_tx = PendingTransactionOutputs { - tx_id: OsRng.next_u64(), - outputs_to_be_spent: vec![], - outputs_to_be_received: vec![], - timestamp: Utc::now().naive_utc() - ChronoDuration::from_std(Duration::from_millis(120_000_000)).unwrap(), - coinbase_block_height: None, - }; - for i in 1..4 { - let (_ti, uo) = make_input(&mut OsRng, MicroTari::from(1000 * i), &factories.commitment); - available_balance += uo.value; - let uo = DbUnblindedOutput::from_unblinded_output(uo, &factories).unwrap(); + let mut unspent_outputs = Vec::new(); + for i in 0..5 { + let (_ti, uo) = make_input( + &mut OsRng, + MicroTari::from(100 + OsRng.next_u64() % 1000), + &factories.commitment, + ); + let mut uo = DbUnblindedOutput::from_unblinded_output(uo, &factories).unwrap(); + uo.unblinded_output.features.maturity = i; db.add_unspent_output(uo.clone()).await.unwrap(); - pending_tx.outputs_to_be_spent.push(uo); + unspent_outputs.push(uo); } - let (_ti, uo) = make_input(&mut OsRng, MicroTari::from(50), &factories.commitment); - let uo = DbUnblindedOutput::from_unblinded_output(uo, &factories).unwrap(); - pending_tx.outputs_to_be_received.push(uo); - - db.encumber_outputs(pending_tx.tx_id, pending_tx.outputs_to_be_spent.clone(), vec![ - pending_tx.outputs_to_be_received[0].clone(), - ]) - .await - .unwrap(); + db.encumber_outputs(1, unspent_outputs[0..=2].to_vec(), vec![]) + .await + .unwrap(); let balance = db.get_balance(None).await.unwrap(); - assert_eq!(balance.available_balance, MicroTari(0)); + assert_eq!( + balance.available_balance, + unspent_outputs[3..5] + .iter() + .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value) + ); db.clear_short_term_encumberances().await.unwrap(); let balance = db.get_balance(None).await.unwrap(); - assert_eq!(available_balance, balance.available_balance); - - pending_tx.outputs_to_be_received.clear(); - let (_ti, uo) = make_input(&mut OsRng, MicroTari::from(50), &factories.commitment); - let uo = DbUnblindedOutput::from_unblinded_output(uo, &factories).unwrap(); - pending_tx.outputs_to_be_received.push(uo); + assert_eq!( + balance.available_balance, + unspent_outputs + .iter() + .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value) + ); - db.encumber_outputs(pending_tx.tx_id, pending_tx.outputs_to_be_spent.clone(), vec![ - pending_tx.outputs_to_be_received[0].clone(), - ]) - .await - .unwrap(); + db.encumber_outputs(2, unspent_outputs[0..=2].to_vec(), vec![]) + .await + .unwrap(); - db.confirm_encumbered_outputs(pending_tx.tx_id).await.unwrap(); + db.confirm_encumbered_outputs(2).await.unwrap(); db.clear_short_term_encumberances().await.unwrap(); - let balance = db.get_balance(None).await.unwrap(); - assert_eq!(balance.available_balance, MicroTari(0)); - - pending_tx.outputs_to_be_received.clear(); - let (_ti, uo) = make_input(&mut OsRng, MicroTari::from(50), &factories.commitment); - let uo = DbUnblindedOutput::from_unblinded_output(uo, &factories).unwrap(); - pending_tx.outputs_to_be_received.push(uo); - - db.cancel_pending_transaction_outputs(pending_tx.tx_id).await.unwrap(); - - db.encumber_outputs(pending_tx.tx_id, pending_tx.outputs_to_be_spent.clone(), vec![ - pending_tx.outputs_to_be_received[0].clone(), - ]) - .await - .unwrap(); - - db.confirm_pending_transaction_outputs(pending_tx.tx_id).await.unwrap(); - let balance = db.get_balance(None).await.unwrap(); assert_eq!( balance.available_balance, - pending_tx.outputs_to_be_received[0].unblinded_output.value + unspent_outputs[3..5] + .iter() + .fold(MicroTari::from(0), |acc, x| acc + x.unblinded_output.value) ); } @@ -527,44 +431,20 @@ pub async fn test_no_duplicate_outputs() { // add it to the database let result = db.add_unspent_output(uo.clone()).await; assert!(result.is_ok()); - let outputs = db.get_unspent_outputs().await.unwrap(); + let outputs = db.fetch_sorted_unspent_outputs().await.unwrap(); assert_eq!(outputs.len(), 1); // adding it again should be an error let err = db.add_unspent_output(uo.clone()).await.unwrap_err(); assert!(matches!(err, OutputManagerStorageError::DuplicateOutput)); - let outputs = db.get_unspent_outputs().await.unwrap(); + let outputs = db.fetch_sorted_unspent_outputs().await.unwrap(); assert_eq!(outputs.len(), 1); // add a pending transaction with the same duplicate output - let pending_tx = PendingTransactionOutputs { - tx_id: OsRng.next_u64(), - outputs_to_be_spent: vec![], - outputs_to_be_received: vec![uo], - timestamp: Utc::now().naive_utc() - ChronoDuration::from_std(Duration::from_millis(120_000_000)).unwrap(), - coinbase_block_height: None, - }; - match db.add_pending_transaction_outputs(pending_tx.clone()).await { - Ok(()) => { - // memory db storage allows the pending tx but trying to confirm the transaction should be an error - let err = db - .confirm_pending_transaction_outputs(pending_tx.tx_id) - .await - .unwrap_err(); - assert!(matches!(err, OutputManagerStorageError::DuplicateOutput)); - }, - Err(e) => { - // sqlite db storage should not even allow the pending tx, since it adds a duplicate in the outputs table - if let OutputManagerStorageError::DieselError(DatabaseError(db_err, _)) = e { - assert!(matches!(db_err, DatabaseErrorKind::UniqueViolation)); - } else { - panic!("Unexpected output manager storage error type: {}", e); - } - }, - } + assert!(db.encumber_outputs(2, vec![], vec![uo.clone()]).await.is_err()); // we should still only have 1 unspent output - let outputs = db.get_unspent_outputs().await.unwrap(); + let outputs = db.fetch_sorted_unspent_outputs().await.unwrap(); assert_eq!(outputs.len(), 1); } diff --git a/base_layer/wallet/tests/support/rpc.rs b/base_layer/wallet/tests/support/comms_rpc.rs similarity index 70% rename from base_layer/wallet/tests/support/rpc.rs rename to base_layer/wallet/tests/support/comms_rpc.rs index 29e99e3372..3a4f306fc4 100644 --- a/base_layer/wallet/tests/support/rpc.rs +++ b/base_layer/wallet/tests/support/comms_rpc.rs @@ -21,12 +21,16 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::{ + collections::HashMap, convert::TryFrom, sync::{Arc, Mutex}, time::{Duration, Instant}, }; use tari_common_types::types::Signature; -use tari_comms::protocol::rpc::{Request, Response, RpcStatus}; +use tari_comms::{ + protocol::rpc::{NamedProtocolService, Request, Response, RpcClient, RpcStatus}, + PeerConnection, +}; use tari_core::{ base_node::{ proto::wallet_rpc::{TxLocation, TxQueryResponse, TxSubmissionRejectionReason, TxSubmissionResponse}, @@ -36,15 +40,18 @@ use tari_core::{ proto, proto::{ base_node::{ - ChainMetadata, + ChainMetadata as ChainMetadataProto, FetchMatchingUtxos, FetchUtxosResponse, + QueryDeletedRequest, + QueryDeletedResponse, Signatures as SignaturesProto, TipInfoResponse, - TxQueryBatchResponse as TxQueryBatchResponseProto, TxQueryBatchResponses as TxQueryBatchResponsesProto, TxQueryResponse as TxQueryResponseProto, TxSubmissionResponse as TxSubmissionResponseProto, + UtxoQueryRequest, + UtxoQueryResponses, }, types::{ Signature as SignatureProto, @@ -57,22 +64,18 @@ use tari_core::{ }; use tokio::time::sleep; -/// This macro unlocks a Mutex or RwLock. If the lock is -/// poisoned (i.e. panic while unlocked) the last value -/// before the panic is used. -macro_rules! acquire_lock { - ($e:expr, $m:ident) => { - match $e.$m() { - Ok(lock) => lock, - Err(poisoned) => { - log::warn!(target: "wallet", "Lock has been POISONED and will be silently recovered"); - poisoned.into_inner() - }, - } - }; - ($e:expr) => { - acquire_lock!($e, lock) - }; +pub async fn connect_rpc_client(connection: &mut PeerConnection) -> T +where T: From + NamedProtocolService { + let framed = connection + .open_framed_substream(&T::PROTOCOL_NAME.into(), 1024 * 1024) + .await + .unwrap(); + + RpcClient::builder() + .with_protocol_id(T::PROTOCOL_NAME.into()) + .connect(framed) + .await + .unwrap() } #[derive(Clone, Debug)] @@ -80,15 +83,22 @@ pub struct BaseNodeWalletRpcMockState { submit_transaction_calls: Arc>>, transaction_query_calls: Arc>>, transaction_batch_query_calls: Arc>>>, + utxo_query_calls: Arc>>>>, + query_deleted_calls: Arc>>, + get_header_by_height_calls: Arc>>, submit_transaction_response: Arc>, transaction_query_response: Arc>, + transaction_query_batch_response: Arc>, tip_info_response: Arc>, + utxo_query_response: Arc>, + query_deleted_response: Arc>, fetch_utxos_calls: Arc>>>>, response_delay: Arc>>, rpc_status_error: Arc>>, get_header_response: Arc>>, synced: Arc>, utxos: Arc>>, + blocks: Arc>>, } #[allow(clippy::mutex_atomic)] @@ -98,6 +108,9 @@ impl BaseNodeWalletRpcMockState { submit_transaction_calls: Arc::new(Mutex::new(Vec::new())), transaction_query_calls: Arc::new(Mutex::new(Vec::new())), transaction_batch_query_calls: Arc::new(Mutex::new(Vec::new())), + utxo_query_calls: Arc::new(Mutex::new(vec![])), + query_deleted_calls: Arc::new(Mutex::new(vec![])), + get_header_by_height_calls: Arc::new(Mutex::new(vec![])), submit_transaction_response: Arc::new(Mutex::new(TxSubmissionResponse { accepted: true, rejection_reason: TxSubmissionRejectionReason::None, @@ -110,8 +123,14 @@ impl BaseNodeWalletRpcMockState { is_synced: true, height_of_longest_chain: 0, })), + transaction_query_batch_response: Arc::new(Mutex::new(TxQueryBatchResponsesProto { + responses: vec![], + tip_hash: Some(vec![]), + is_synced: true, + height_of_longest_chain: 0, + })), tip_info_response: Arc::new(Mutex::new(TipInfoResponse { - metadata: Some(ChainMetadata { + metadata: Some(ChainMetadataProto { height_of_longest_chain: Some(std::u64::MAX), best_block: Some(Vec::new()), accumulated_difficulty: Vec::new(), @@ -119,12 +138,26 @@ impl BaseNodeWalletRpcMockState { }), is_synced: true, })), + utxo_query_response: Arc::new(Mutex::new(UtxoQueryResponses { + responses: vec![], + best_block: vec![], + height_of_longest_chain: 1, + })), + query_deleted_response: Arc::new(Mutex::new(QueryDeletedResponse { + deleted_positions: vec![], + not_deleted_positions: vec![], + best_block: vec![], + height_of_longest_chain: 1, + heights_deleted_at: vec![], + blocks_deleted_in: vec![], + })), fetch_utxos_calls: Arc::new(Mutex::new(Vec::new())), response_delay: Arc::new(Mutex::new(None)), rpc_status_error: Arc::new(Mutex::new(None)), get_header_response: Arc::new(Mutex::new(None)), synced: Arc::new(Mutex::new(true)), utxos: Arc::new(Mutex::new(Vec::new())), + blocks: Arc::new(Mutex::new(Default::default())), } } @@ -143,7 +176,22 @@ impl BaseNodeWalletRpcMockState { *lock = response; } - pub fn set_response_delay(&mut self, delay: Option) { + pub fn set_transaction_query_batch_responses(&self, response: TxQueryBatchResponsesProto) { + let mut lock = acquire_lock!(self.transaction_query_batch_response); + *lock = response; + } + + pub fn set_utxo_query_response(&self, response: UtxoQueryResponses) { + let mut lock = acquire_lock!(self.utxo_query_response); + *lock = response; + } + + pub fn set_query_deleted_response(&self, response: QueryDeletedResponse) { + let mut lock = acquire_lock!(self.query_deleted_response); + *lock = response; + } + + pub fn set_response_delay(&self, delay: Option) { let mut lock = acquire_lock!(self.response_delay); *lock = delay; } @@ -164,6 +212,28 @@ impl BaseNodeWalletRpcMockState { *lock = utxos; } + /// This method sets the contents of the UTXO set against which the queries will be made + pub fn set_blocks(&self, blocks: HashMap) { + let mut lock = acquire_lock!(self.blocks); + *lock = blocks; + } + + pub fn take_utxo_query_calls(&self) -> Vec>> { + acquire_lock!(self.utxo_query_calls).drain(..).collect() + } + + pub fn pop_utxo_query_call(&self) -> Option>> { + acquire_lock!(self.utxo_query_calls).pop() + } + + pub fn take_query_deleted_calls(&self) -> Vec { + acquire_lock!(self.query_deleted_calls).drain(..).collect() + } + + pub fn pop_query_deleted_call(&self) -> Option { + acquire_lock!(self.query_deleted_calls).pop() + } + pub fn take_submit_transaction_calls(&self) -> Vec { acquire_lock!(self.submit_transaction_calls).drain(..).collect() } @@ -196,6 +266,58 @@ impl BaseNodeWalletRpcMockState { acquire_lock!(self.fetch_utxos_calls).pop() } + pub fn take_get_header_by_height_calls(&self) -> Vec { + acquire_lock!(self.get_header_by_height_calls).drain(..).collect() + } + + pub fn pop_get_header_by_height_calls(&self) -> Option { + acquire_lock!(self.get_header_by_height_calls).pop() + } + + pub async fn wait_pop_get_header_by_height_calls( + &self, + num_calls: usize, + timeout: Duration, + ) -> Result, String> { + let now = Instant::now(); + let mut count = 0usize; + while now.elapsed() < timeout { + let mut lock = acquire_lock!(self.get_header_by_height_calls); + count = (*lock).len(); + if (*lock).len() >= num_calls { + return Ok((*lock).drain(..num_calls).collect()); + } + drop(lock); + sleep(Duration::from_millis(100)).await; + } + Err(format!( + "Did not receive enough calls within the timeout period, received {}, expected {}.", + count, num_calls + )) + } + + pub async fn wait_pop_utxo_query_calls( + &self, + num_calls: usize, + timeout: Duration, + ) -> Result>>, String> { + let now = Instant::now(); + let mut count = 0usize; + while now.elapsed() < timeout { + let mut lock = acquire_lock!(self.utxo_query_calls); + count = (*lock).len(); + if (*lock).len() >= num_calls { + return Ok((*lock).drain(..num_calls).collect()); + } + drop(lock); + sleep(Duration::from_millis(100)).await; + } + Err(format!( + "Did not receive enough calls within the timeout period, received {}, expected {}.", + count, num_calls + )) + } + pub async fn wait_pop_transaction_query_calls( &self, num_calls: usize, @@ -278,6 +400,23 @@ impl BaseNodeWalletRpcMockState { } Err("Did not receive enough calls within the timeout period".to_string()) } + + pub async fn wait_pop_query_deleted( + &self, + num_calls: usize, + timeout: Duration, + ) -> Result, String> { + let now = Instant::now(); + while now.elapsed() < timeout { + let mut lock = acquire_lock!(self.query_deleted_calls); + if (*lock).len() >= num_calls { + return Ok((*lock).drain(..num_calls).collect()); + } + drop(lock); + sleep(Duration::from_millis(100)).await; + } + Err("Did not receive enough calls within the timeout period".to_string()) + } } impl Default for BaseNodeWalletRpcMockState { @@ -333,7 +472,6 @@ impl BaseNodeWalletService for BaseNodeWalletRpcMockService { } let submit_transaction_response_lock = acquire_lock!(self.state.submit_transaction_response); - Ok(Response::new(submit_transaction_response_lock.clone().into())) } @@ -341,6 +479,9 @@ impl BaseNodeWalletService for BaseNodeWalletRpcMockService { &self, request: Request, ) -> Result, RpcStatus> { + // TODO: delay_lock is blocking any other RPC method from being called (as well as blocking an async task) + // until this method returns. + // Although this is sort of fine in tests it is probably unintentional let delay_lock = *acquire_lock!(self.state.response_delay); if let Some(delay) = delay_lock { sleep(delay).await; @@ -381,30 +522,20 @@ impl BaseNodeWalletService for BaseNodeWalletRpcMockService { log::info!("Transaction Batch Query call received: {:?}", signatures); let mut transaction_query_calls_lock = acquire_lock!(self.state.transaction_batch_query_calls); - (*transaction_query_calls_lock).push(signatures.clone()); + (*transaction_query_calls_lock).push(signatures); let status_lock = acquire_lock!(self.state.rpc_status_error); if let Some(status) = (*status_lock).clone() { return Err(status); } - let transaction_query_response_lock = acquire_lock!(self.state.transaction_query_response); - let transaction_query_response = TxQueryResponseProto::from(transaction_query_response_lock.clone()); - let mut responses = Vec::new(); - for sig in signatures.iter() { - let response = TxQueryBatchResponseProto { - signature: Some(sig.clone().into()), - location: transaction_query_response.location, - block_hash: transaction_query_response.block_hash.clone(), - confirmations: transaction_query_response.confirmations, - }; - responses.push(response); - } + let transaction_query_response_lock = acquire_lock!(self.state.transaction_query_batch_response); + + let mut response = transaction_query_response_lock.clone(); + let sync_lock = acquire_lock!(self.state.synced); - Ok(Response::new(TxQueryBatchResponsesProto { - responses, - is_synced: *sync_lock, - })) + response.is_synced = *sync_lock; + Ok(Response::new(response)) } async fn fetch_matching_utxos( @@ -469,11 +600,54 @@ impl BaseNodeWalletService for BaseNodeWalletRpcMockService { .ok_or_else(|| RpcStatus::not_found("get_header_response set to None"))?; Ok(Response::new(resp.into())) } + + async fn utxo_query(&self, request: Request) -> Result, RpcStatus> { + let message = request.into_message(); + + let mut utxo_query_lock = acquire_lock!(self.state.utxo_query_calls); + (*utxo_query_lock).push(message.output_hashes); + + let lock = acquire_lock!(self.state.utxo_query_response); + Ok(Response::new(lock.clone())) + } + + async fn query_deleted( + &self, + request: Request, + ) -> Result, RpcStatus> { + let message = request.into_message(); + + let mut query_deleted_lock = acquire_lock!(self.state.query_deleted_calls); + (*query_deleted_lock).push(message); + + let lock = acquire_lock!(self.state.query_deleted_response); + Ok(Response::new(lock.clone())) + } + + async fn get_header_by_height( + &self, + request: Request, + ) -> Result, RpcStatus> { + let height = request.into_message(); + + let mut header_by_height_lock = acquire_lock!(self.state.get_header_by_height_calls); + (*header_by_height_lock).push(height); + + let block_lock = acquire_lock!(self.state.blocks); + + let header = (*block_lock).get(&height).cloned(); + + if let Some(h) = header { + Ok(Response::new(h.into())) + } else { + Err(RpcStatus::not_found("Header not found")) + } + } } #[cfg(test)] mod test { - use crate::support::rpc::BaseNodeWalletRpcMockService; + use crate::support::comms_rpc::BaseNodeWalletRpcMockService; use tari_comms::{ peer_manager::PeerFeatures, protocol::rpc::{mock::MockRpcServer, NamedProtocolService}, diff --git a/base_layer/wallet/tests/support/data.rs b/base_layer/wallet/tests/support/data.rs index 87f177e277..2f5681f765 100644 --- a/base_layer/wallet/tests/support/data.rs +++ b/base_layer/wallet/tests/support/data.rs @@ -49,6 +49,7 @@ pub fn get_temp_sqlite_database_connection() -> (WalletDbConnection, TempDir) { let db_tempdir = tempdir().unwrap(); let db_folder = db_tempdir.path().to_str().unwrap().to_string(); let db_path = format!("{}/{}", db_folder, db_name); + // let db_path = "/tmp/test.sqlite3".to_string(); let connection = run_migration_and_create_sqlite_connection(&db_path).unwrap(); (connection, db_tempdir) diff --git a/base_layer/wallet/tests/support/mod.rs b/base_layer/wallet/tests/support/mod.rs index 71d3ff3262..ea01f6c3d9 100644 --- a/base_layer/wallet/tests/support/mod.rs +++ b/base_layer/wallet/tests/support/mod.rs @@ -19,8 +19,8 @@ // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - +#[macro_use] +pub mod utils; pub mod comms_and_services; +pub mod comms_rpc; pub mod data; -pub mod rpc; -pub mod utils; diff --git a/base_layer/wallet/tests/support/utils.rs b/base_layer/wallet/tests/support/utils.rs index dbc64b7a37..d2fa99c33c 100644 --- a/base_layer/wallet/tests/support/utils.rs +++ b/base_layer/wallet/tests/support/utils.rs @@ -110,3 +110,21 @@ pub fn make_input_with_features( utxo, ) } + +/// This macro unlocks a Mutex or RwLock. If the lock is +/// poisoned (i.e. panic while unlocked) the last value +/// before the panic is used. +macro_rules! acquire_lock { + ($e:expr, $m:ident) => { + match $e.$m() { + Ok(lock) => lock, + Err(poisoned) => { + log::warn!(target: "wallet", "Lock has been POISONED and will be silently recovered"); + poisoned.into_inner() + }, + } + }; + ($e:expr) => { + acquire_lock!($e, lock) + }; +} diff --git a/base_layer/wallet/tests/transaction_service/service.rs b/base_layer/wallet/tests/transaction_service/service.rs index a835508dfa..b34638efe5 100644 --- a/base_layer/wallet/tests/transaction_service/service.rs +++ b/base_layer/wallet/tests/transaction_service/service.rs @@ -20,13 +20,11 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{ - convert::{TryFrom, TryInto}, - path::Path, - sync::Arc, - time::Duration, +use crate::support::{ + comms_and_services::{create_dummy_message, get_next_memory_address, setup_comms_services}, + comms_rpc::{connect_rpc_client, BaseNodeWalletRpcMockService, BaseNodeWalletRpcMockState}, + utils::{make_input, TestParams}, }; - use chrono::{Duration as ChronoDuration, Utc}; use futures::{ channel::{mpsc, mpsc::Sender}, @@ -35,28 +33,12 @@ use futures::{ }; use prost::Message; use rand::{rngs::OsRng, RngCore}; -use tari_crypto::{ - commitment::HomomorphicCommitmentFactory, - common::Blake256, - inputs, - keys::{PublicKey as PK, SecretKey as SK}, - script, - script::{ExecutionStack, TariScript}, -}; -use tempfile::tempdir; -use tokio::{ - runtime, - runtime::{Builder, Runtime}, - sync::{broadcast, broadcast::channel}, -}; - -use crate::{ - support::{ - comms_and_services::{create_dummy_message, get_next_memory_address, setup_comms_services}, - rpc::{BaseNodeWalletRpcMockService, BaseNodeWalletRpcMockState}, - utils::{make_input, TestParams}, - }, - transaction_service::transaction_protocols::add_transaction_to_database, +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, + path::Path, + sync::Arc, + time::Duration, }; use tari_common_types::{ chain_metadata::ChainMetadata, @@ -66,12 +48,10 @@ use tari_comms::{ message::EnvelopeBody, peer_manager::{NodeIdentity, PeerFeatures}, protocol::rpc::{mock::MockRpcServer, NamedProtocolService}, - test_utils::{ - mocks::{create_connectivity_mock, ConnectivityManagerMockState}, - node_identity::build_node_identity, - }, + test_utils::node_identity::build_node_identity, types::CommsSecretKey, CommsNode, + PeerConnection, }; use tari_comms_dht::outbound::mock::{ create_outbound_service_mock, @@ -84,8 +64,18 @@ use tari_core::{ proto::wallet_rpc::{TxLocation, TxQueryResponse, TxSubmissionRejectionReason, TxSubmissionResponse}, rpc::BaseNodeWalletRpcServer, }, + blocks::BlockHeader, consensus::ConsensusConstantsBuilder, - proto::base_node as base_node_proto, + crypto::tari_utilities::Hashable, + proto::{ + base_node as base_node_proto, + base_node::{ + TxLocation as TxLocationProto, + TxQueryBatchResponse as TxQueryBatchResponseProto, + TxQueryBatchResponses as TxQueryBatchResponsesProto, + }, + types::Signature as SignatureProto, + }, transactions::{ fee::Fee, helpers::{create_unblinded_output, TestParams as TestParamsHelpers}, @@ -97,6 +87,14 @@ use tari_core::{ SenderTransactionProtocol, }, }; +use tari_crypto::{ + commitment::HomomorphicCommitmentFactory, + common::Blake256, + inputs, + keys::{PublicKey as PK, SecretKey as SK}, + script, + script::{ExecutionStack, TariScript}, +}; use tari_p2p::{comms_connector::pubsub_connector, domain_message::DomainMessage, Network}; use tari_service_framework::{reply_channel, RegisterHandle, StackBuilder}; use tari_shutdown::{Shutdown, ShutdownSignal}; @@ -104,15 +102,21 @@ use tari_test_utils::random; use tari_wallet::{ base_node_service::{ config::BaseNodeServiceConfig, - handle::BaseNodeServiceHandle, + handle::{BaseNodeEvent, BaseNodeServiceHandle}, mock_base_node_service::MockBaseNodeService, BaseNodeServiceInitializer, }, - connectivity_service::WalletConnectivityInitializer, + connectivity_service::{ + create_wallet_connectivity_mock, + WalletConnectivityHandle, + WalletConnectivityInitializer, + WalletConnectivityInterface, + WalletConnectivityMock, + }, output_manager_service::{ config::OutputManagerServiceConfig, handle::OutputManagerHandle, - service::OutputManagerService, + service::{Balance, OutputManagerService}, storage::{ database::OutputManagerDatabase, models::KnownOneSidedPaymentScript, @@ -142,12 +146,17 @@ use tari_wallet::{ }, sqlite_db::TransactionServiceSqliteDatabase, }, - tasks::start_transaction_validation_and_broadcast_protocols::start_transaction_validation_and_broadcast_protocols, TransactionServiceInitializer, }, - types::{HashDigest, ValidationRetryStrategy}, + types::HashDigest, +}; +use tempfile::tempdir; +use tokio::{ + runtime, + runtime::{Builder, Runtime}, + sync::{broadcast, broadcast::channel}, + time::sleep, }; -use tokio::time::sleep; fn create_runtime() -> Runtime { Builder::new_multi_thread() @@ -167,7 +176,12 @@ pub fn setup_transaction_service>( database_path: P, discovery_request_timeout: Duration, shutdown_signal: ShutdownSignal, -) -> (TransactionServiceHandle, OutputManagerHandle, CommsNode) { +) -> ( + TransactionServiceHandle, + OutputManagerHandle, + CommsNode, + WalletConnectivityHandle, +) { let _enter = runtime.enter(); let (publisher, subscription_factory) = pubsub_connector(100, 20); let subscription_factory = Arc::new(subscription_factory); @@ -220,10 +234,15 @@ pub fn setup_transaction_service>( let output_manager_handle = handles.expect_handle::(); let transaction_service_handle = handles.expect_handle::(); + let connectivity_service_handle = handles.expect_handle::(); - (transaction_service_handle, output_manager_handle, comms) + ( + transaction_service_handle, + output_manager_handle, + comms, + connectivity_service_handle, + ) } - /// This utility function creates a Transaction service without using the Service Framework Stack and exposes all the /// streams for testing purposes. #[allow(clippy::type_complexity)] @@ -236,31 +255,6 @@ pub fn setup_transaction_service_no_comms( TransactionServiceHandle, OutputManagerHandle, OutboundServiceMockState, - ConnectivityManagerMockState, - Sender>, - Sender>, - Sender>, - Sender>, - Sender>, - Shutdown, - MockRpcServer>, - Arc, - BaseNodeWalletRpcMockState, -) { - setup_transaction_service_no_comms_and_oms_backend(runtime, factories, db_connection, config) -} - -#[allow(clippy::type_complexity)] -pub fn setup_transaction_service_no_comms_and_oms_backend( - runtime: &mut Runtime, - factories: CryptoFactories, - db_connection: WalletDbConnection, - config: Option, -) -> ( - TransactionServiceHandle, - OutputManagerHandle, - OutboundServiceMockState, - ConnectivityManagerMockState, Sender>, Sender>, Sender>, @@ -270,6 +264,9 @@ pub fn setup_transaction_service_no_comms_and_oms_backend( MockRpcServer>, Arc, BaseNodeWalletRpcMockState, + broadcast::Sender>, + WalletConnectivityMock, + PeerConnection, ) { let (oms_request_sender, oms_request_receiver) = reply_channel::unbounded(); @@ -288,15 +285,12 @@ pub fn setup_transaction_service_no_comms_and_oms_backend( let outbound_mock_state = mock_outbound_service.get_state(); runtime.spawn(mock_outbound_service.run()); - let (connectivity_manager, connectivity_mock) = create_connectivity_mock(); - let connectivity_mock_state = connectivity_mock.get_shared_state(); - runtime.spawn(connectivity_mock.run()); - let service = BaseNodeWalletRpcMockService::new(); let rpc_service_state = service.get_state(); let server = BaseNodeWalletRpcServer::new(service); let protocol_name = server.as_protocol_name(); + let server_node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let mut mock_server = { @@ -309,12 +303,17 @@ pub fn setup_transaction_service_no_comms_and_oms_backend( mock_server.serve(); } - let connection = runtime.block_on(async { + let wallet_connectivity = create_wallet_connectivity_mock(); + + let mut rpc_server_connection = runtime.block_on(async { mock_server .create_connection(server_node_identity.to_peer(), protocol_name.into()) .await }); - runtime.block_on(connectivity_mock_state.add_active_connection(connection)); + + runtime.block_on(async { + wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut rpc_server_connection).await) + }); let constants = ConsensusConstantsBuilder::new(Network::Weatherwax).build(); @@ -323,7 +322,7 @@ pub fn setup_transaction_service_no_comms_and_oms_backend( let (sender, receiver_bns) = reply_channel::unbounded(); let (event_publisher_bns, _) = broadcast::channel(100); - let basenode_service_handle = BaseNodeServiceHandle::new(sender, event_publisher_bns); + let basenode_service_handle = BaseNodeServiceHandle::new(sender, event_publisher_bns.clone()); let mut mock_base_node_service = MockBaseNodeService::new(receiver_bns, shutdown.to_signal()); mock_base_node_service.set_default_base_node_state(); runtime.spawn(mock_base_node_service.run()); @@ -344,8 +343,8 @@ pub fn setup_transaction_service_no_comms_and_oms_backend( factories.clone(), constants, shutdown.to_signal(), - basenode_service_handle, - connectivity_manager.clone(), + basenode_service_handle.clone(), + wallet_connectivity.clone(), CommsSecretKey::default(), )) .unwrap(); @@ -361,6 +360,7 @@ pub fn setup_transaction_service_no_comms_and_oms_backend( transaction_resend_period: Duration::from_secs(200), resend_response_cooldown: Duration::from_secs(200), pending_transaction_cancellation_timeout: Duration::from_secs(300), + transaction_mempool_resubmission_window: Duration::from_secs(2), max_tx_query_batch_size: 2, ..Default::default() }); @@ -377,7 +377,7 @@ pub fn setup_transaction_service_no_comms_and_oms_backend( tx_cancelled_receiver, output_manager_service_handle.clone(), outbound_message_requester, - connectivity_manager, + wallet_connectivity.clone(), event_publisher, Arc::new(NodeIdentity::random( &mut OsRng, @@ -386,6 +386,7 @@ pub fn setup_transaction_service_no_comms_and_oms_backend( )), factories, shutdown.to_signal(), + basenode_service_handle, ); runtime.spawn(async move { output_manager_service.start().await.unwrap() }); runtime.spawn(async move { ts_service.start().await.unwrap() }); @@ -393,7 +394,6 @@ pub fn setup_transaction_service_no_comms_and_oms_backend( ts_handle, output_manager_service_handle, outbound_mock_state, - connectivity_mock_state, tx_sender, tx_ack_sender, tx_finalized_sender, @@ -403,6 +403,9 @@ pub fn setup_transaction_service_no_comms_and_oms_backend( mock_server, server_node_identity, rpc_service_state, + event_publisher_bns, + wallet_connectivity, + rpc_server_connection, ) } @@ -493,7 +496,7 @@ fn manage_single_transaction() { let (bob_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); let shutdown = Shutdown::new(); - let (mut alice_ts, mut alice_oms, _alice_comms) = setup_transaction_service( + let (mut alice_ts, mut alice_oms, _alice_comms, mut alice_connectivity) = setup_transaction_service( &mut runtime, alice_node_identity.clone(), vec![], @@ -503,15 +506,14 @@ fn manage_single_transaction() { Duration::from_secs(0), shutdown.to_signal(), ); - runtime - .block_on(alice_ts.set_base_node_public_key(base_node_identity.public_key().clone())) - .unwrap(); + + alice_connectivity.set_base_node(base_node_identity.to_peer()); let mut alice_event_stream = alice_ts.get_event_stream(); runtime.block_on(async { sleep(Duration::from_secs(2)).await }); - let (mut bob_ts, mut bob_oms, bob_comms) = setup_transaction_service( + let (mut bob_ts, mut bob_oms, bob_comms, mut bob_connectivity) = setup_transaction_service( &mut runtime, bob_node_identity.clone(), vec![alice_node_identity.clone()], @@ -521,9 +523,7 @@ fn manage_single_transaction() { Duration::from_secs(0), shutdown.to_signal(), ); - runtime - .block_on(bob_ts.set_base_node_public_key(base_node_identity.public_key().clone())) - .unwrap(); + bob_connectivity.set_base_node(base_node_identity.to_peer()); let mut bob_event_stream = bob_ts.get_event_stream(); @@ -584,10 +584,10 @@ fn manage_single_transaction() { loop { tokio::select! { event = bob_event_stream.recv() => { - println!("bob: {:?}", &*event.as_ref().unwrap()); if let TransactionEvent::ReceivedFinalizedTransaction(id) = &*event.unwrap() { tx_id = *id; finalized+=1; + break; } }, () = &mut delay => { @@ -600,16 +600,15 @@ fn manage_single_transaction() { assert!(runtime.block_on(bob_ts.get_completed_transaction(999)).is_err()); - let bob_completed_tx = runtime + let _bob_completed_tx = runtime .block_on(bob_ts.get_completed_transaction(tx_id)) .expect("Could not find tx"); - runtime - .block_on(bob_oms.confirm_transaction(tx_id, vec![], bob_completed_tx.transaction.body.outputs().clone())) - .unwrap(); - assert_eq!( - runtime.block_on(bob_oms.get_balance()).unwrap().available_balance, + runtime + .block_on(bob_oms.get_balance()) + .unwrap() + .pending_incoming_balance, value ); } @@ -644,7 +643,7 @@ fn single_transaction_to_self() { let (db_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); let shutdown = Shutdown::new(); - let (mut alice_ts, mut alice_oms, _alice_comms) = setup_transaction_service( + let (mut alice_ts, mut alice_oms, _alice_comms, mut alice_connectivity) = setup_transaction_service( &mut runtime, alice_node_identity.clone(), vec![], @@ -655,14 +654,11 @@ fn single_transaction_to_self() { shutdown.to_signal(), ); - runtime.block_on(async move { - alice_ts - .set_base_node_public_key(base_node_identity.public_key().clone()) - .await - .unwrap(); + alice_connectivity.set_base_node(base_node_identity.to_peer()); + runtime.block_on(async move { let initial_wallet_value = 2500.into(); - let (utxo, uo1) = make_input(&mut OsRng, initial_wallet_value, &factories.commitment); + let (_utxo, uo1) = make_input(&mut OsRng, initial_wallet_value, &factories.commitment); alice_oms.add_output(uo1).await.unwrap(); let message = "TAKE MAH _OWN_ MONEYS!".to_string(); @@ -682,14 +678,10 @@ fn single_transaction_to_self() { .await .expect("Could not find tx"); - alice_oms - .confirm_transaction(tx_id, vec![utxo], completed_tx.transaction.body.outputs().clone()) - .await - .unwrap(); let fees = completed_tx.fee; assert_eq!( - alice_oms.get_balance().await.unwrap().available_balance, + alice_oms.get_balance().await.unwrap().pending_incoming_balance, initial_wallet_value - fees ); }); @@ -733,7 +725,7 @@ fn send_one_sided_transaction_to_other() { let (db_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); let shutdown = Shutdown::new(); - let (mut alice_ts, mut alice_oms, _alice_comms) = setup_transaction_service( + let (mut alice_ts, mut alice_oms, _alice_comms, mut alice_connectivity) = setup_transaction_service( &mut runtime, alice_node_identity, vec![], @@ -746,12 +738,10 @@ fn send_one_sided_transaction_to_other() { let mut alice_event_stream = alice_ts.get_event_stream(); - runtime - .block_on(alice_ts.set_base_node_public_key(base_node_identity.public_key().clone())) - .unwrap(); + alice_connectivity.set_base_node(base_node_identity.to_peer()); let initial_wallet_value = 2500.into(); - let (utxo, uo1) = make_input(&mut OsRng, initial_wallet_value, &factories.commitment); + let (_utxo, uo1) = make_input(&mut OsRng, initial_wallet_value, &factories.commitment); let mut alice_oms_clone = alice_oms.clone(); runtime.block_on(async move { alice_oms_clone.add_output(uo1).await.unwrap() }); @@ -776,14 +766,10 @@ fn send_one_sided_transaction_to_other() { .await .expect("Could not find completed one-sided tx"); - alice_oms - .confirm_transaction(tx_id, vec![utxo], completed_tx.transaction.body.outputs().clone()) - .await - .unwrap(); let fees = completed_tx.fee; assert_eq!( - alice_oms.get_balance().await.unwrap().available_balance, + alice_oms.get_balance().await.unwrap().pending_incoming_balance, initial_wallet_value - value - fees ); }); @@ -852,7 +838,7 @@ fn recover_one_sided_transaction() { let (bob_connection, _tempdir) = make_wallet_database_connection(Some(database_path2.clone())); let shutdown = Shutdown::new(); - let (mut alice_ts, alice_oms, _alice_comms) = setup_transaction_service( + let (mut alice_ts, alice_oms, _alice_comms, mut alice_connectivity) = setup_transaction_service( &mut runtime, alice_node_identity, vec![], @@ -863,7 +849,7 @@ fn recover_one_sided_transaction() { shutdown.to_signal(), ); - let (_bob_ts, mut bob_oms, _bob_comms) = setup_transaction_service( + let (_bob_ts, mut bob_oms, _bob_comms, _bob_connectivity) = setup_transaction_service( &mut runtime, bob_node_identity.clone(), vec![], @@ -885,9 +871,7 @@ fn recover_one_sided_transaction() { cloned_bob_oms.add_known_script(known_script).await.unwrap(); }); - runtime - .block_on(alice_ts.set_base_node_public_key(base_node_identity.public_key().clone())) - .unwrap(); + alice_connectivity.set_base_node(base_node_identity.to_peer()); let initial_wallet_value = 2500.into(); let (_utxo, uo1) = make_input(&mut OsRng, initial_wallet_value, &factories.commitment); @@ -960,7 +944,7 @@ fn send_one_sided_transaction_to_self() { let (alice_connection, _tempdir) = make_wallet_database_connection(Some(database_path.clone())); let shutdown = Shutdown::new(); - let (mut alice_ts, alice_oms, _alice_comms) = setup_transaction_service( + let (alice_ts, alice_oms, _alice_comms, mut alice_connectivity) = setup_transaction_service( &mut runtime, alice_node_identity.clone(), vec![], @@ -971,9 +955,7 @@ fn send_one_sided_transaction_to_self() { shutdown.to_signal(), ); - runtime - .block_on(alice_ts.set_base_node_public_key(base_node_identity.public_key().clone())) - .unwrap(); + alice_connectivity.set_base_node(base_node_identity.to_peer()); let initial_wallet_value = 2500.into(); let (_utxo, uo1) = make_input(&mut OsRng, initial_wallet_value, &factories.commitment); @@ -1045,7 +1027,7 @@ fn manage_multiple_transactions() { let mut shutdown = Shutdown::new(); - let (mut alice_ts, mut alice_oms, alice_comms) = setup_transaction_service( + let (mut alice_ts, mut alice_oms, alice_comms, _alice_connectivity) = setup_transaction_service( &mut runtime, alice_node_identity.clone(), vec![bob_node_identity.clone(), carol_node_identity.clone()], @@ -1060,7 +1042,7 @@ fn manage_multiple_transactions() { runtime.block_on(async { sleep(Duration::from_secs(5)).await }); // Spin up Bob and Carol - let (mut bob_ts, mut bob_oms, bob_comms) = setup_transaction_service( + let (mut bob_ts, mut bob_oms, bob_comms, _bob_connectivity) = setup_transaction_service( &mut runtime, bob_node_identity.clone(), vec![alice_node_identity.clone()], @@ -1073,7 +1055,7 @@ fn manage_multiple_transactions() { let mut bob_event_stream = bob_ts.get_event_stream(); runtime.block_on(async { sleep(Duration::from_secs(5)).await }); - let (mut carol_ts, mut carol_oms, carol_comms) = setup_transaction_service( + let (mut carol_ts, mut carol_oms, carol_comms, _carol_connectivity) = setup_transaction_service( &mut runtime, carol_node_identity.clone(), vec![alice_node_identity.clone()], @@ -1275,7 +1257,6 @@ fn test_accepting_unknown_tx_id_and_malformed_reply() { mut alice_ts, mut alice_output_manager, alice_outbound_service, - _, _alice_tx_sender, mut alice_tx_ack_sender, _, @@ -1285,6 +1266,9 @@ fn test_accepting_unknown_tx_id_and_malformed_reply() { _, _, _, + _, + _, + _rpc_server_connection, ) = setup_transaction_service_no_comms(&mut runtime, factories.clone(), connection_alice, None); let mut alice_event_stream = alice_ts.get_event_stream(); @@ -1337,20 +1321,17 @@ fn test_accepting_unknown_tx_id_and_malformed_reply() { runtime.block_on(async { let delay = sleep(Duration::from_secs(30)); -tokio::pin!(delay); - tokio::pin!(delay); + let mut errors = 0; loop { tokio::select! { event = alice_event_stream.recv() => { - log::error!("ERROR: {:?}", event); if let TransactionEvent::Error(s) = &*event.unwrap() { - if s == &"TransactionProtocolError(TransactionBuildError(InvalidSignatureError(\"Verifying kernel signature\")))".to_string() - { + if s == &"TransactionProtocolError(TransactionBuildError(InvalidSignatureError(\"Verifying kernel signature\")))".to_string() { errors+=1; } - if errors >= 2 { + if errors >= 1 { break; } } @@ -1383,7 +1364,6 @@ fn finalize_tx_with_incorrect_pubkey() { mut alice_ts, _alice_output_manager, alice_outbound_service, - _, mut alice_tx_sender, _alice_tx_ack_sender, mut alice_tx_finalized, @@ -1393,6 +1373,9 @@ fn finalize_tx_with_incorrect_pubkey() { _, _, _, + _, + _, + _rpc_server_connection, ) = setup_transaction_service_no_comms(&mut runtime, factories.clone(), connection_alice, None); let mut alice_event_stream = alice_ts.get_event_stream(); @@ -1407,17 +1390,17 @@ fn finalize_tx_with_incorrect_pubkey() { _, _, _, - _, _shutdown, _, _, _, + _, + _, + _rpc_server_connection, ) = setup_transaction_service_no_comms(&mut runtime, factories.clone(), connection_bob, None); let (_utxo, uo) = make_input(&mut OsRng, MicroTari(250000), &factories.commitment); - runtime.block_on(bob_output_manager.add_output(uo)).unwrap(); - let mut stp = runtime .block_on(bob_output_manager.prepare_transaction_to_send( OsRng.next_u64(), @@ -1509,7 +1492,6 @@ fn finalize_tx_with_missing_output() { mut alice_ts, _alice_output_manager, alice_outbound_service, - _, mut alice_tx_sender, _alice_tx_ack_sender, mut alice_tx_finalized, @@ -1519,6 +1501,9 @@ fn finalize_tx_with_missing_output() { _, _, _, + _, + _, + _rpc_server_connection, ) = setup_transaction_service_no_comms(&mut runtime, factories.clone(), connection_alice, None); let mut alice_event_stream = alice_ts.get_event_stream(); @@ -1528,7 +1513,6 @@ fn finalize_tx_with_missing_output() { _bob_ts, mut bob_output_manager, _bob_outbound_service, - _, _bob_tx_sender, _bob_tx_ack_sender, _, @@ -1538,6 +1522,9 @@ fn finalize_tx_with_missing_output() { _, _, _, + _, + _, + _rpc_server_connection_bob, ) = setup_transaction_service_no_comms(&mut runtime, factories.clone(), connection_bob, None); let (_utxo, uo) = make_input(&mut OsRng, MicroTari(250000), &factories.commitment); @@ -1667,7 +1654,7 @@ fn discovery_async_return_test() { let (carol_connection, _temp_dir1) = make_wallet_database_connection(None); - let (_carol_ts, _carol_oms, carol_comms) = setup_transaction_service( + let (_carol_ts, _carol_oms, carol_comms, _carol_connectivity) = setup_transaction_service( &mut runtime, carol_node_identity.clone(), vec![], @@ -1680,7 +1667,7 @@ fn discovery_async_return_test() { let (alice_connection, _temp_dir2) = make_wallet_database_connection(None); - let (mut alice_ts, mut alice_oms, alice_comms) = setup_transaction_service( + let (mut alice_ts, mut alice_oms, alice_comms, _alice_connectivity) = setup_transaction_service( &mut runtime, alice_node_identity, vec![carol_node_identity.clone()], @@ -1754,8 +1741,6 @@ fn discovery_async_return_test() { let delay = sleep(Duration::from_secs(60)); tokio::pin!(delay); - tokio::pin!(delay); - loop { tokio::select! { event = alice_event_stream.recv() => { @@ -1839,6 +1824,7 @@ fn test_power_mode_updates() { valid: true, confirmations: None, mined_height: None, + mined_in_block: None, }; let completed_tx2 = CompletedTransaction { @@ -1859,6 +1845,7 @@ fn test_power_mode_updates() { valid: true, confirmations: None, mined_height: None, + mined_in_block: None, }; tx_backend @@ -1883,16 +1870,26 @@ fn test_power_mode_updates() { _, _, _, - _, _shutdown, - _, + _mock_rpc_server, server_node_identity, rpc_service_state, + _, + mut alice_connectivity, + _rpc_server_connection, ) = setup_transaction_service_no_comms(&mut runtime, factories, connection, None); - runtime - .block_on(alice_ts.set_base_node_public_key(server_node_identity.public_key().clone())) - .unwrap(); + alice_connectivity.set_base_node(server_node_identity.to_peer()); + + alice_connectivity.notify_base_node_set(server_node_identity.to_peer()); + + rpc_service_state.set_transaction_query_response(TxQueryResponse { + location: TxLocation::NotStored, + block_hash: None, + confirmations: 0, + is_synced: true, + height_of_longest_chain: 10, + }); let result = runtime.block_on(alice_ts.restart_broadcast_protocols()); @@ -1900,7 +1897,7 @@ fn test_power_mode_updates() { // Wait for first 4 messages let _ = runtime - .block_on(rpc_service_state.wait_pop_transaction_query_calls(4, Duration::from_secs(60))) + .block_on(rpc_service_state.wait_pop_transaction_query_calls(4, Duration::from_secs(20))) .unwrap(); runtime.block_on(alice_ts.set_low_power_mode()).unwrap(); @@ -1923,16 +1920,17 @@ fn test_set_num_confirmations() { let (connection, _temp_dir) = make_wallet_database_connection(None); - let (mut ts, _, _, _, _, _, _, _, _, _shutdown, _, _, _) = setup_transaction_service_no_comms( - &mut runtime, - factories, - connection, - Some(TransactionServiceConfig { - broadcast_monitoring_timeout: Duration::from_secs(20), - chain_monitoring_timeout: Duration::from_secs(20), - ..Default::default() - }), - ); + let (mut ts, _, _, _, _, _, _, _, _shutdown, _, _, _, _, _, _rpc_server_connection) = + setup_transaction_service_no_comms( + &mut runtime, + factories, + connection, + Some(TransactionServiceConfig { + broadcast_monitoring_timeout: Duration::from_secs(20), + chain_monitoring_timeout: Duration::from_secs(20), + ..Default::default() + }), + ); let num_confirmations_required = runtime.block_on(ts.get_num_confirmations_required()).unwrap(); assert_eq!( @@ -1963,7 +1961,6 @@ fn test_transaction_cancellation() { mut alice_ts, mut alice_output_manager, alice_outbound_service, - _, mut alice_tx_sender, _, _, @@ -1973,6 +1970,9 @@ fn test_transaction_cancellation() { _, _, _, + _, + _, + _rpc_server_connection, ) = setup_transaction_service_no_comms( &mut runtime, factories.clone(), @@ -2274,7 +2274,6 @@ fn test_direct_vs_saf_send_of_tx_reply_and_finalize() { mut alice_ts, mut alice_output_manager, alice_outbound_service, - _, mut _alice_tx_sender, mut alice_tx_ack_sender, _, @@ -2284,6 +2283,9 @@ fn test_direct_vs_saf_send_of_tx_reply_and_finalize() { _, _, _, + _, + _, + _rpc_server_connection, ) = setup_transaction_service_no_comms(&mut runtime, factories.clone(), connection, None); let alice_total_available = 250000 * uT; @@ -2325,17 +2327,32 @@ fn test_direct_vs_saf_send_of_tx_reply_and_finalize() { let (connection, _temp_dir) = make_wallet_database_connection(None); // Test sending the Reply to a receiver with Direct and then with SAF and never both - let (_bob_ts, _, bob_outbound_service, _, mut bob_tx_sender, _, _, _, _, _shutdown, _, _, _) = - setup_transaction_service_no_comms( - &mut runtime, - factories.clone(), - connection, - Some(TransactionServiceConfig { - broadcast_monitoring_timeout: Duration::from_secs(20), - chain_monitoring_timeout: Duration::from_secs(20), - ..Default::default() - }), - ); + let ( + _bob_ts, + _, + bob_outbound_service, + mut bob_tx_sender, + _, + _, + _, + _, + _shutdown, + _, + _, + _, + _, + _, + _rpc_server_connection_bob, + ) = setup_transaction_service_no_comms( + &mut runtime, + factories.clone(), + connection, + Some(TransactionServiceConfig { + broadcast_monitoring_timeout: Duration::from_secs(20), + chain_monitoring_timeout: Duration::from_secs(20), + ..Default::default() + }), + ); bob_outbound_service.set_behaviour(MockBehaviour { direct: ResponseType::Queued, @@ -2366,17 +2383,32 @@ fn test_direct_vs_saf_send_of_tx_reply_and_finalize() { assert_eq!(bob_outbound_service.call_count(), 0, "Should be no more calls"); let (connection, _temp_dir) = make_wallet_database_connection(None); - let (_bob2_ts, _, bob2_outbound_service, _, mut bob2_tx_sender, _, _, _, _, _shutdown, _, _, _) = - setup_transaction_service_no_comms( - &mut runtime, - factories.clone(), - connection, - Some(TransactionServiceConfig { - broadcast_monitoring_timeout: Duration::from_secs(20), - chain_monitoring_timeout: Duration::from_secs(20), - ..Default::default() - }), - ); + let ( + _bob2_ts, + _, + bob2_outbound_service, + mut bob2_tx_sender, + _, + _, + _, + _, + _shutdown, + _, + _, + _, + _, + _, + _rpc_server_connection_bob2, + ) = setup_transaction_service_no_comms( + &mut runtime, + factories.clone(), + connection, + Some(TransactionServiceConfig { + broadcast_monitoring_timeout: Duration::from_secs(20), + chain_monitoring_timeout: Duration::from_secs(20), + ..Default::default() + }), + ); bob2_outbound_service.set_behaviour(MockBehaviour { direct: ResponseType::Failed, broadcast: ResponseType::Queued, @@ -2512,7 +2544,6 @@ fn test_tx_direct_send_behaviour() { mut alice_ts, mut alice_output_manager, alice_outbound_service, - _, mut _alice_tx_sender, mut _alice_tx_ack_sender, _, @@ -2522,6 +2553,9 @@ fn test_tx_direct_send_behaviour() { _, _, _, + _, + _, + _rpc_server_connection, ) = setup_transaction_service_no_comms(&mut runtime, factories.clone(), connection, None); let mut alice_event_stream = alice_ts.get_event_stream(); @@ -2552,7 +2586,7 @@ fn test_tx_direct_send_behaviour() { runtime.block_on(async { let delay = sleep(Duration::from_secs(60)); -tokio::pin!(delay); + tokio::pin!(delay); let mut direct_count = 0; let mut saf_count = 0; loop { @@ -2560,7 +2594,8 @@ tokio::pin!(delay); event = alice_event_stream.recv() => { match &*event.unwrap() { TransactionEvent::TransactionDirectSendResult(_, result) => if !result { direct_count+=1 }, - TransactionEvent::TransactionStoreForwardSendResult(_, result) => if !result { saf_count+=1}, _ => (), + TransactionEvent::TransactionStoreForwardSendResult(_, result) => if !result { saf_count+=1}, + _ => (), } if direct_count == 1 && saf_count == 1 { @@ -2596,7 +2631,7 @@ tokio::pin!(delay); runtime.block_on(async { let delay = sleep(Duration::from_secs(60)); -tokio::pin!(delay); + tokio::pin!(delay); let mut direct_count = 0; let mut saf_count = 0; loop { @@ -2604,8 +2639,8 @@ tokio::pin!(delay); event = alice_event_stream.recv() => { match &*event.unwrap() { TransactionEvent::TransactionDirectSendResult(_, result) => if !result { direct_count+=1 }, - TransactionEvent::TransactionStoreForwardSendResult(_, result) => if *result { saf_count+=1 -}, _ => (), + TransactionEvent::TransactionStoreForwardSendResult(_, result) => if *result { saf_count+=1 }, + _ => (), } if direct_count == 1 && saf_count == 1 { @@ -2641,7 +2676,7 @@ tokio::pin!(delay); runtime.block_on(async { let delay = sleep(Duration::from_secs(60)); -tokio::pin!(delay); + tokio::pin!(delay); let mut direct_count = 0; loop { tokio::select! { @@ -2690,9 +2725,8 @@ tokio::pin!(delay); tokio::select! { event = alice_event_stream.recv() => { match &*event.unwrap() { - TransactionEvent::TransactionStoreForwardSendResult(_, result) => if *result { saf_count+=1 -}, TransactionEvent::TransactionDirectSendResult(_, result) => if *result { panic!( -"Should be no direct messages") }, _ => (), + TransactionEvent::TransactionStoreForwardSendResult(_, result) => if *result { saf_count+=1}, + TransactionEvent::TransactionDirectSendResult(_, result) => if *result { panic!("Should be no direct messages") }, _ => (), } if saf_count >= 1 { @@ -2832,19 +2866,32 @@ fn test_restarting_transaction_protocols() { .unwrap(); // Test that Bob's node restarts the send protocol - let (mut bob_ts, _bob_oms, _bob_outbound_service, _, _, mut bob_tx_reply, _, _, _, _shutdown, _, _, _) = - setup_transaction_service_no_comms(&mut runtime, factories.clone(), bob_connection, None); - let mut bob_event_stream = bob_ts.get_event_stream(); - - runtime - .block_on(bob_ts.set_base_node_public_key(base_node_identity.public_key().clone())) - .unwrap(); - assert!(runtime.block_on(bob_ts.restart_transaction_protocols()).is_ok()); - - runtime - .block_on(bob_tx_reply.send(create_dummy_message(alice_reply.into(), alice_identity.public_key()))) - .unwrap(); - + let ( + mut bob_ts, + _bob_oms, + _bob_outbound_service, + _, + mut bob_tx_reply, + _, + _, + _, + _shutdown, + _, + _, + _, + _, + mut bob_connectivity, + _rpc_server_connection_bob, + ) = setup_transaction_service_no_comms(&mut runtime, factories.clone(), bob_connection, None); + let mut bob_event_stream = bob_ts.get_event_stream(); + + bob_connectivity.set_base_node(base_node_identity.to_peer()); + assert!(runtime.block_on(bob_ts.restart_transaction_protocols()).is_ok()); + + runtime + .block_on(bob_tx_reply.send(create_dummy_message(alice_reply.into(), alice_identity.public_key()))) + .unwrap(); + runtime.block_on(async { let delay = sleep(Duration::from_secs(15)); tokio::pin!(delay); @@ -2867,13 +2914,27 @@ fn test_restarting_transaction_protocols() { }); // Test Alice's node restarts the receive protocol - let (mut alice_ts, _alice_oms, _alice_outbound_service, _, _, _, mut alice_tx_finalized, _, _, _shutdown, _, _, _) = - setup_transaction_service_no_comms(&mut runtime, factories, alice_connection, None); + let ( + mut alice_ts, + _alice_oms, + _alice_outbound_service, + _, + _, + mut alice_tx_finalized, + _, + _, + _shutdown, + _, + _, + _, + _, + mut alice_connectivity, + _rpc_server_connection, + ) = setup_transaction_service_no_comms(&mut runtime, factories, alice_connection, None); let mut alice_event_stream = alice_ts.get_event_stream(); - runtime - .block_on(alice_ts.set_base_node_public_key(base_node_identity.public_key().clone())) - .unwrap(); + alice_connectivity.set_base_node(base_node_identity.to_peer()); + assert!(runtime.block_on(alice_ts.restart_transaction_protocols()).is_ok()); let finalized_transaction_message = proto::TransactionFinalizedMessage { @@ -2921,7 +2982,6 @@ fn test_coinbase_transactions_rejection_same_height() { mut alice_ts, mut alice_output_manager, _, - _connectivity_mock_state, _, _, _, @@ -2931,6 +2991,9 @@ fn test_coinbase_transactions_rejection_same_height() { _mock_rpc_server, _server_node_identity, _rpc_service_state, + _, + _, + _rpc_server_connection, ) = setup_transaction_service_no_comms(&mut runtime, factories, connection, None); let block_height_a = 10; @@ -3009,17 +3072,17 @@ fn test_coinbase_transactions_rejection_same_height() { } #[test] -fn test_coinbase_monitoring_stuck_in_mempool() { +fn test_coinbase_generation_and_monitoring() { let factories = CryptoFactories::default(); let mut runtime = Runtime::new().unwrap(); let (connection, _temp_dir) = make_wallet_database_connection(None); - + let tx_backend = TransactionServiceSqliteDatabase::new(connection.clone(), None); + let db = TransactionDatabase::new(tx_backend); let ( mut alice_ts, mut alice_output_manager, _, - _connectivity_mock_state, _, _, _, @@ -3028,7 +3091,10 @@ fn test_coinbase_monitoring_stuck_in_mempool() { _shutdown, _mock_rpc_server, server_node_identity, - mut rpc_service_state, + rpc_service_state, + _, + mut alice_connectivity, + _rpc_server_connection, ) = setup_transaction_service_no_comms(&mut runtime, factories, connection, None); let mut alice_event_stream = alice_ts.get_event_stream(); rpc_service_state.set_response_delay(Some(Duration::from_secs(1))); @@ -3040,6 +3106,7 @@ fn test_coinbase_monitoring_stuck_in_mempool() { let reward1 = 1_000_000 * uT; let fees2 = 2000 * uT; + let fees2b = 5000 * uT; let reward2 = 2_000_000 * uT; // Create a coinbase Txn at the first block height @@ -3080,107 +3147,157 @@ fn test_coinbase_monitoring_stuck_in_mempool() { fees1 + reward1 + fees2 + reward2 ); + // Take out a second one at the second height which should overwrite the initial one + let _tx2b = runtime + .block_on(alice_ts.generate_coinbase_transaction(reward2, fees2b, block_height_b)) + .unwrap(); + let transactions = runtime.block_on(alice_ts.get_completed_transactions()).unwrap(); + assert_eq!(transactions.len(), 2); + let tx_id2b = transactions + .values() + .find(|tx| tx.amount == fees2b + reward2) + .unwrap() + .tx_id; + assert_eq!( + runtime + .block_on(alice_output_manager.get_balance()) + .unwrap() + .pending_incoming_balance, + fees1 + reward1 + fees2b + reward2 + ); + assert!(transactions.values().any(|tx| tx.amount == fees1 + reward1)); - assert!(transactions.values().any(|tx| tx.amount == fees2 + reward2)); + assert!(transactions.values().any(|tx| tx.amount == fees2b + reward2)); // Start the transaction protocols - runtime - .block_on(alice_ts.set_base_node_public_key(server_node_identity.public_key().clone())) - .unwrap(); - let height_of_longest_chain = block_height_a; - let confirmations = height_of_longest_chain - block_height_a; - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::NotStored, - block_hash: None, - confirmations, - is_synced: true, - height_of_longest_chain, - }); - assert!(runtime.block_on(alice_ts.restart_broadcast_protocols()).is_ok()); - if let Err(e) = runtime.block_on(rpc_service_state.wait_pop_transaction_query_calls(4, Duration::from_secs(30))) { - println!(" {}", e) - } + alice_connectivity.set_base_node(server_node_identity.to_peer()); - // Test when coinbase transactions are stuck in mempool - let height_of_longest_chain = block_height_a + TransactionServiceConfig::default().num_confirmations_required - 1; - let confirmations = height_of_longest_chain - block_height_a; - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::InMempool, - block_hash: None, - confirmations, - is_synced: true, - height_of_longest_chain, - }); - if let Err(e) = runtime.block_on(rpc_service_state.wait_pop_transaction_query_calls(4, Duration::from_secs(30))) { - println!(" {}", e) - } runtime.block_on(async { let delay = sleep(Duration::from_secs(30)); tokio::pin!(delay); let mut count = 0usize; loop { tokio::select! { - event = alice_event_stream.recv() => { - if let TransactionEvent::ReceivedFinalizedTransaction(tx_id) = &*event.unwrap() { - if tx_id == &tx_id1 || tx_id == &tx_id2 { - count += 1; - } - if count == 2 { - break; - } - } - }, - () = &mut delay => { - break; - }, + event = alice_event_stream.recv() => { + if let TransactionEvent::ReceivedFinalizedTransaction(tx_id) = &*event.unwrap() { + if tx_id == &tx_id1 || tx_id == &tx_id2 || tx_id == &tx_id2b { + count += 1; + } + if count == 3 { + break; } + } + }, + () = &mut delay => { + break; + }, + } } assert_eq!( - count, 2, + count, 3, "Expected exactly two 'ReceivedFinalizedTransaction(_)' events" ); }); - // Both coinbase transactions should be cancelled if the block height advances past the confirmation height - let height_of_longest_chain = block_height_b + TransactionServiceConfig::default().num_confirmations_required + 1; - let confirmations = height_of_longest_chain - block_height_a; - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::InMempool, - block_hash: None, - confirmations, - is_synced: true, - height_of_longest_chain, - }); - if let Err(e) = runtime.block_on(rpc_service_state.wait_pop_transaction_query_calls(2, Duration::from_secs(30))) { - println!(" {}", e) + // Now we will test validation where tx1 will not be found but tx2b will be unconfirmed, then confirmed. + let tx1 = runtime.block_on(db.get_completed_transaction(tx_id1)).unwrap(); + let tx2b = runtime.block_on(db.get_completed_transaction(tx_id2b)).unwrap(); + + let mut block_headers = HashMap::new(); + for i in 0..=4 { + let mut block_header = BlockHeader::new(1); + block_header.height = i; + block_headers.insert(i, block_header.clone()); } - runtime.block_on(async { - let delay = sleep(Duration::from_secs(30)); - tokio::pin!(delay); - let mut count = 0usize; - loop { - tokio::select! { - event = alice_event_stream.recv() => { - if let TransactionEvent::TransactionCancelled(tx_id) = &*event.unwrap() { - if tx_id == &tx_id1 || tx_id == &tx_id2 { - count += 1; - } - if count == 2 { - break; - } - } - }, - () = &mut delay => { - break; - }, - } - } - assert_eq!(count, 2, "Expected exactly two 'TransactionCancelled(_)' events"); + rpc_service_state.set_blocks(block_headers.clone()); + let mut transaction_query_batch_responses = vec![ + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + tx1.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::NotStored) as i32, + block_hash: None, + confirmations: 0, + block_height: 0, + }, + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + tx2b.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some(block_headers.get(&1).unwrap().hash()), + confirmations: 0, + block_height: 1, + }, + ]; + let batch_query_response = TxQueryBatchResponsesProto { + responses: transaction_query_batch_responses.clone(), + is_synced: true, + tip_hash: Some(block_headers.get(&1).unwrap().hash()), + height_of_longest_chain: 1, + }; + + rpc_service_state.set_transaction_query_batch_responses(batch_query_response); + + alice_connectivity.set_base_node(server_node_identity.to_peer()); + + runtime + .block_on(alice_ts.validate_transactions()) + .expect("Validation should start"); + + let _tx_batch_query_calls = runtime + .block_on(rpc_service_state.wait_pop_transaction_batch_query_calls(1, Duration::from_secs(30))) + .unwrap(); + + let completed_txs = runtime.block_on(alice_ts.get_completed_transactions()).unwrap(); + + assert_eq!(completed_txs.len(), 2); + + let tx = completed_txs.get(&tx_id1).unwrap(); + assert_eq!(tx.status, TransactionStatus::Coinbase); + assert!(tx.valid); + + let tx = completed_txs.get(&tx_id2b).unwrap(); + assert_eq!(tx.status, TransactionStatus::MinedUnconfirmed); + assert!(tx.valid); + + // Now we will have tx_id2b becoming confirmed + let _ = transaction_query_batch_responses.pop(); + transaction_query_batch_responses.push(TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + tx2b.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some(block_headers.get(&4).unwrap().hash()), + confirmations: 3, + block_height: 4, }); + + let batch_query_response = TxQueryBatchResponsesProto { + responses: transaction_query_batch_responses, + is_synced: true, + tip_hash: Some(block_headers.get(&4).unwrap().hash()), + height_of_longest_chain: 4, + }; + rpc_service_state.set_transaction_query_batch_responses(batch_query_response); + + runtime + .block_on(alice_ts.validate_transactions()) + .expect("Validation should start"); + + let _tx_batch_query_calls = runtime + .block_on(rpc_service_state.wait_pop_transaction_batch_query_calls(1, Duration::from_secs(30))) + .unwrap(); + + let completed_txs = runtime.block_on(alice_ts.get_completed_transactions()).unwrap(); + + let tx = completed_txs.get(&tx_id2b).unwrap(); + assert_eq!(tx.status, TransactionStatus::MinedConfirmed); + assert!(tx.valid); } #[test] -fn test_coinbase_monitoring_with_base_node_change_and_mined() { +fn test_coinbase_abandoned() { let factories = CryptoFactories::default(); let mut runtime = Runtime::new().unwrap(); @@ -3190,7 +3307,6 @@ fn test_coinbase_monitoring_with_base_node_change_and_mined() { mut alice_ts, mut alice_output_manager, _, - connectivity_mock_state, _, _, _, @@ -3199,22 +3315,21 @@ fn test_coinbase_monitoring_with_base_node_change_and_mined() { _shutdown, _mock_rpc_server, server_node_identity, - mut rpc_service_state, + rpc_service_state, + _, + mut alice_connectivity, + _rpc_server_connection, ) = setup_transaction_service_no_comms(&mut runtime, factories, connection, None); let mut alice_event_stream = alice_ts.get_event_stream(); rpc_service_state.set_response_delay(Some(Duration::from_secs(1))); let block_height_a = 10; - let block_height_b = block_height_a + 1; + // First we create un unmined coinbase and then abandon it let fees1 = 1000 * uT; let reward1 = 1_000_000 * uT; - let fees2 = 2000 * uT; - let reward2 = 2_000_000 * uT; - - // Create a coinbase Txn at the first block height - let _tx1 = runtime + let tx1 = runtime .block_on(alice_ts.generate_coinbase_transaction(reward1, fees1, block_height_a)) .unwrap(); let transactions = runtime.block_on(alice_ts.get_completed_transactions()).unwrap(); @@ -3232,8 +3347,78 @@ fn test_coinbase_monitoring_with_base_node_change_and_mined() { fees1 + reward1 ); - // Create another coinbase Txn at the next block height - let _tx2 = runtime + let transaction_query_batch_responses = vec![TxQueryBatchResponseProto { + signature: Some(SignatureProto::from(tx1.first_kernel_excess_sig().unwrap().clone())), + location: TxLocationProto::from(TxLocation::InMempool) as i32, + block_hash: None, + confirmations: 0, + block_height: 0, + }]; + + let batch_query_response = TxQueryBatchResponsesProto { + responses: transaction_query_batch_responses, + is_synced: true, + tip_hash: Some([5u8; 16].to_vec()), + height_of_longest_chain: block_height_a + TransactionServiceConfig::default().num_confirmations_required + 1, + }; + + rpc_service_state.set_transaction_query_batch_responses(batch_query_response); + + // Start the transaction protocols + alice_connectivity.set_base_node(server_node_identity.to_peer()); + + let balance = runtime.block_on(alice_output_manager.get_balance()).unwrap(); + assert_eq!(balance.pending_incoming_balance, fees1 + reward1); + + runtime + .block_on(alice_ts.validate_transactions()) + .expect("Validation should start"); + + runtime.block_on(async { + let delay = sleep(Duration::from_secs(30)); + tokio::pin!(delay); + let mut count = 0usize; + loop { + tokio::select! { + event = alice_event_stream.recv() => { + if let TransactionEvent::TransactionCancelled(tx_id) = &*event.unwrap() { + if tx_id == &tx_id1 { + count += 1; + } + if count == 1 { + break; + } + } + }, + () = &mut delay => { + break; + }, + } + } + assert_eq!(count, 1, "Expected a TransactionCancelled event"); + }); + + let tx = runtime.block_on(alice_ts.get_completed_transaction(tx_id1)).unwrap(); + assert_eq!(tx.status, TransactionStatus::Coinbase); + assert!(!tx.valid); + + let balance = runtime.block_on(alice_output_manager.get_balance()).unwrap(); + assert_eq!(balance, Balance { + available_balance: MicroTari(0), + time_locked_balance: Some(MicroTari(0)), + pending_incoming_balance: MicroTari(0), + pending_outgoing_balance: MicroTari(0) + }); + + let invalid_txs = runtime.block_on(alice_output_manager.get_invalid_outputs()).unwrap(); + assert!(invalid_txs.is_empty()); + + // Now we will make a coinbase that will be mined, reorged out and then reorged back in + let fees2 = 2000 * uT; + let reward2 = 2_000_000 * uT; + let block_height_b = 11; + + let tx2 = runtime .block_on(alice_ts.generate_coinbase_transaction(reward2, fees2, block_height_b)) .unwrap(); let transactions = runtime.block_on(alice_ts.get_completed_transactions()).unwrap(); @@ -3248,310 +3433,219 @@ fn test_coinbase_monitoring_with_base_node_change_and_mined() { .block_on(alice_output_manager.get_balance()) .unwrap() .pending_incoming_balance, - fees1 + reward1 + fees2 + reward2 + fees2 + reward2 ); - assert!(transactions.values().any(|tx| tx.amount == fees1 + reward1)); - assert!(transactions.values().any(|tx| tx.amount == fees2 + reward2)); - - // Start the transaction protocols - runtime - .block_on(alice_ts.set_base_node_public_key(server_node_identity.public_key().clone())) - .unwrap(); + let transaction_query_batch_responses = vec![TxQueryBatchResponseProto { + signature: Some(SignatureProto::from(tx2.first_kernel_excess_sig().unwrap().clone())), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some([11u8; 16].to_vec()), + confirmations: 2, + block_height: block_height_b, + }]; - let height_of_longest_chain = block_height_a; - let confirmations = height_of_longest_chain - block_height_a; - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::NotStored, - block_hash: None, - confirmations, + let batch_query_response = TxQueryBatchResponsesProto { + responses: transaction_query_batch_responses, is_synced: true, - height_of_longest_chain, - }); - assert!(runtime.block_on(alice_ts.restart_broadcast_protocols()).is_ok()); - if let Err(e) = runtime.block_on(rpc_service_state.wait_pop_transaction_query_calls(4, Duration::from_secs(30))) { - println!(" {}", e) - } + tip_hash: Some([13u8; 16].to_vec()), + height_of_longest_chain: block_height_b + 2, + }; - // Test when coinbase transactions are mined but unconfirmed - let height_of_longest_chain = block_height_a + TransactionServiceConfig::default().num_confirmations_required - 1; - let confirmations = height_of_longest_chain - block_height_a; - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations, - is_synced: true, - height_of_longest_chain, - }); - if let Err(e) = runtime.block_on(rpc_service_state.wait_pop_transaction_query_calls(2, Duration::from_secs(30))) { - println!(" {}", e) + rpc_service_state.set_transaction_query_batch_responses(batch_query_response); + + let mut block_headers = HashMap::new(); + for i in 0..=(block_height_b + 2) { + let mut block_header = BlockHeader::new(1); + block_header.height = i; + block_headers.insert(i, block_header.clone()); } + rpc_service_state.set_blocks(block_headers); + runtime + .block_on(alice_ts.validate_transactions()) + .expect("Validation should start"); + runtime.block_on(async { let delay = sleep(Duration::from_secs(30)); tokio::pin!(delay); let mut count = 0usize; loop { tokio::select! { - event = alice_event_stream.recv() => { - if let TransactionEvent::TransactionMinedUnconfirmed(tx_id, _) = &*event.unwrap() { - if tx_id == &tx_id1 || tx_id == &tx_id2 { - count += 1; - } - if count == 2 { - break; - } - } - - }, - () = &mut delay => { - break; - }, + event = alice_event_stream.recv() => { + if let TransactionEvent::TransactionMinedUnconfirmed{tx_id, num_confirmations:_, is_valid: _} = &*event.unwrap() { if tx_id == &tx_id2 { + count += 1; + } + if count == 1 { + break; } + } + }, + () = &mut delay => { + break; + }, + } } - assert_eq!(count, 2, "Expected exactly two 'TransactionMinedUnconfirmed(_)' events"); + assert_eq!(count, 1, "Expected a TransactionMinedUnconfirmed event"); }); - // Change the base node halfway through the protocol while still at the previous height - let new_server_node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); - let service = BaseNodeWalletRpcMockService::new(); - let mut rpc_service_state = service.get_state(); - rpc_service_state.set_response_delay(Some(Duration::from_secs(1))); - let new_server = BaseNodeWalletRpcServer::new(service); - let protocol_name = new_server.as_protocol_name(); - let mut new_mock_server = { - let _enter = runtime.handle().enter(); - MockRpcServer::new(new_server, new_server_node_identity.clone()) + let tx = runtime.block_on(alice_ts.get_completed_transaction(tx_id2)).unwrap(); + assert_eq!(tx.status, TransactionStatus::MinedUnconfirmed); + + // Now we create a reorg + let transaction_query_batch_responses = vec![ + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from(tx1.first_kernel_excess_sig().unwrap().clone())), + location: TxLocationProto::from(TxLocation::NotStored) as i32, + block_hash: None, + confirmations: 0, + block_height: 0, + }, + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from(tx2.first_kernel_excess_sig().unwrap().clone())), + location: TxLocationProto::from(TxLocation::NotStored) as i32, + block_hash: None, + confirmations: 0, + block_height: 0, + }, + ]; + + let batch_query_response = TxQueryBatchResponsesProto { + responses: transaction_query_batch_responses, + is_synced: true, + tip_hash: Some([12u8; 16].to_vec()), + height_of_longest_chain: block_height_b + TransactionServiceConfig::default().num_confirmations_required + 1, }; - { - let _enter = runtime.handle().enter(); - new_mock_server.serve(); + + rpc_service_state.set_transaction_query_batch_responses(batch_query_response); + + let mut block_headers = HashMap::new(); + for i in 0..=(block_height_b + TransactionServiceConfig::default().num_confirmations_required + 1) { + let mut block_header = BlockHeader::new(2); + block_header.height = i; + block_headers.insert(i, block_header.clone()); } - let connection = - runtime.block_on(new_mock_server.create_connection(new_server_node_identity.to_peer(), protocol_name.into())); - runtime.block_on(connectivity_mock_state.add_active_connection(connection)); - let height_of_longest_chain = block_height_a + TransactionServiceConfig::default().num_confirmations_required - 1; - let confirmations = height_of_longest_chain - block_height_a; - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations, - is_synced: true, - height_of_longest_chain, - }); + rpc_service_state.set_blocks(block_headers); runtime - .block_on(alice_ts.set_base_node_public_key(new_server_node_identity.public_key().clone())) - .unwrap(); - if let Err(e) = runtime.block_on(rpc_service_state.wait_pop_transaction_query_calls(4, Duration::from_secs(30))) { - println!(" {}", e) - } + .block_on(alice_ts.validate_transactions()) + .expect("Validation should start"); - // Test when coinbase transactions are mined and confirmed - let height_of_longest_chain = block_height_b + TransactionServiceConfig::default().num_confirmations_required + 1; - let confirmations = height_of_longest_chain - block_height_a; - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations, - is_synced: true, - height_of_longest_chain, - }); - if let Err(e) = runtime.block_on(rpc_service_state.wait_pop_transaction_query_calls(2, Duration::from_secs(30))) { - println!(" {}", e) - } runtime.block_on(async { let delay = sleep(Duration::from_secs(30)); tokio::pin!(delay); let mut count = 0usize; loop { tokio::select! { - event = alice_event_stream.recv() => { - if let TransactionEvent::TransactionMined(tx_id) = &*event.unwrap() { - if tx_id == &tx_id1 || tx_id == &tx_id2 { - count += 1; - } - if count == 2 { - break; - } - } - }, - () = &mut delay => { - break; - }, + event = alice_event_stream.recv() => { + match &*event.unwrap() { + TransactionEvent::TransactionBroadcast(tx_id) => { + if tx_id == &tx_id2 { + count += 1; + } + }, + TransactionEvent::TransactionCancelled(tx_id) => { + if tx_id == &tx_id2 { + count += 1; + } + }, + _ => (), + } + + if count == 2 { + break; } + }, + () = &mut delay => { + break; + }, + } } - assert_eq!(count, 2, "Expected exactly two 'TransactionMined(_)' events"); + assert_eq!( + count, 2, + "Expected a TransactionBroadcast and Transaction Cancelled event" + ); }); -} - -#[test] -fn test_coinbase_monitoring_mined_not_synced() { - let factories = CryptoFactories::default(); - let mut runtime = Runtime::new().unwrap(); - - let (connection, _temp_dir) = make_wallet_database_connection(None); - - let ( - mut alice_ts, - mut alice_output_manager, - _, - _connectivity_mock_state, - _, - _, - _, - _, - _, - _shutdown, - _mock_rpc_server, - server_node_identity, - mut rpc_service_state, - ) = setup_transaction_service_no_comms(&mut runtime, factories, connection, None); - let mut alice_event_stream = alice_ts.get_event_stream(); - rpc_service_state.set_response_delay(Some(Duration::from_secs(1))); - - let block_height_a = 10; - let block_height_b = block_height_a + 1; - let fees1 = 1000 * uT; - let reward1 = 1_000_000 * uT; + let tx = runtime.block_on(alice_ts.get_completed_transaction(tx_id2)).unwrap(); + assert_eq!(tx.status, TransactionStatus::Coinbase); + assert!(!tx.valid); - let fees2 = 2000 * uT; - let reward2 = 2_000_000 * uT; + let balance = runtime.block_on(alice_output_manager.get_balance()).unwrap(); + assert_eq!(balance, Balance { + available_balance: MicroTari(0), + time_locked_balance: Some(MicroTari(0)), + pending_incoming_balance: MicroTari(0), + pending_outgoing_balance: MicroTari(0) + }); - // Create a coinbase Txn at the first block height - let _tx1 = runtime - .block_on(alice_ts.generate_coinbase_transaction(reward1, fees1, block_height_a)) - .unwrap(); - let transactions = runtime.block_on(alice_ts.get_completed_transactions()).unwrap(); - assert_eq!(transactions.len(), 1); - let tx_id1 = transactions - .values() - .find(|tx| tx.amount == fees1 + reward1) - .unwrap() - .tx_id; - assert_eq!( - runtime - .block_on(alice_output_manager.get_balance()) - .unwrap() - .pending_incoming_balance, - fees1 + reward1 - ); + // Now reorg again and have tx2 be mined + let mut block_headers = HashMap::new(); + for i in 0..=15 { + let mut block_header = BlockHeader::new(1); + block_header.height = i; + block_headers.insert(i, block_header.clone()); + } + rpc_service_state.set_blocks(block_headers.clone()); + + let transaction_query_batch_responses = vec![ + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from(tx1.first_kernel_excess_sig().unwrap().clone())), + location: TxLocationProto::from(TxLocation::NotStored) as i32, + block_hash: None, + confirmations: 0, + block_height: 0, + }, + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from(tx2.first_kernel_excess_sig().unwrap().clone())), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some(block_headers.get(&10).unwrap().hash()), + confirmations: 5, + block_height: 10, + }, + ]; - // Create another coinbase Txn at the next block height - let _tx2 = runtime - .block_on(alice_ts.generate_coinbase_transaction(reward2, fees2, block_height_b)) - .unwrap(); - let transactions = runtime.block_on(alice_ts.get_completed_transactions()).unwrap(); - assert_eq!(transactions.len(), 2); - let tx_id2 = transactions - .values() - .find(|tx| tx.amount == fees2 + reward2) - .unwrap() - .tx_id; - assert_eq!( - runtime - .block_on(alice_output_manager.get_balance()) - .unwrap() - .pending_incoming_balance, - fees1 + reward1 + fees2 + reward2 - ); + let batch_query_response = TxQueryBatchResponsesProto { + responses: transaction_query_batch_responses, + is_synced: true, + tip_hash: Some([20u8; 16].to_vec()), + height_of_longest_chain: 20, + }; - assert!(transactions.values().any(|tx| tx.amount == fees1 + reward1)); - assert!(transactions.values().any(|tx| tx.amount == fees2 + reward2)); + rpc_service_state.set_transaction_query_batch_responses(batch_query_response); - // Start the transaction protocols runtime - .block_on(alice_ts.set_base_node_public_key(server_node_identity.public_key().clone())) - .unwrap(); - - let height_of_longest_chain = block_height_a; - let confirmations = height_of_longest_chain - block_height_a; - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::NotStored, - block_hash: None, - confirmations, - is_synced: false, - height_of_longest_chain, - }); - assert!(runtime.block_on(alice_ts.restart_broadcast_protocols()).is_ok()); - if let Err(e) = runtime.block_on(rpc_service_state.wait_pop_transaction_query_calls(1, Duration::from_secs(30))) { - println!(" {}", e) - } + .block_on(alice_ts.validate_transactions()) + .expect("Validation should start"); - // Test when coinbase transactions are mined but unconfirmed - let height_of_longest_chain = block_height_a + TransactionServiceConfig::default().num_confirmations_required - 1; - let confirmations = height_of_longest_chain - block_height_a; - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations, - is_synced: false, - height_of_longest_chain, - }); - if let Err(e) = runtime.block_on(rpc_service_state.wait_pop_transaction_query_calls(1, Duration::from_secs(30))) { - println!(" {}", e) - } runtime.block_on(async { let delay = sleep(Duration::from_secs(30)); tokio::pin!(delay); let mut count = 0usize; loop { tokio::select! { - event = alice_event_stream.recv() => { - if let TransactionEvent::ReceivedFinalizedTransaction(tx_id) = &*event.unwrap() { - if tx_id == &tx_id1 || tx_id == &tx_id2 { - count += 1; - } - if count == 2 { - break; - } - } - }, - () = &mut delay => { - break; - }, - } - } - assert_eq!( - count, 2, - "Expected exactly two 'ReceivedFinalizedTransaction(_)' events" - ); - }); + event = alice_event_stream.recv() => { + match &*event.unwrap() { + TransactionEvent::TransactionMined { tx_id, is_valid: _ } => { + if tx_id == &tx_id2 { + count += 1; + } + }, + TransactionEvent::TransactionCancelled(tx_id) => { + if tx_id == &tx_id1 { + count += 1; + } + }, + _ => (), + } - // Test when coinbase transactions are mined and confirmed - let height_of_longest_chain = block_height_b + TransactionServiceConfig::default().num_confirmations_required + 1; - let confirmations = height_of_longest_chain - block_height_a; - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations, - is_synced: false, - height_of_longest_chain, - }); - if let Err(e) = runtime.block_on(rpc_service_state.wait_pop_transaction_query_calls(4, Duration::from_secs(30))) { - println!(" {}", e) - } - runtime.block_on(async { - let delay = sleep(Duration::from_secs(30)); - tokio::pin!(delay); - let mut count = 0usize; - loop { - tokio::select! { - event = alice_event_stream.recv() => { - if let TransactionEvent::TransactionMined(tx_id) = &*event.unwrap() { - if tx_id == &tx_id1 || tx_id == &tx_id2 { - count += 1; - } - if count == 2 { - break; - } - } - }, - () = &mut delay => { - break; - }, + if count == 2 { + break; } + }, + () = &mut delay => { + break; + }, + } } - assert_eq!(count, 2, "Expected exactly two 'TransactionMined(_)' events"); + assert_eq!(count, 2, "Expected a TransactionMined and TransactionCancelled event"); }); } @@ -3561,7 +3655,7 @@ fn test_coinbase_transaction_reused_for_same_height() { let mut runtime = Runtime::new().unwrap(); let (connection, _temp_dir) = make_wallet_database_connection(None); - let (mut tx_service, mut output_service, _, _, _, _, _, _, _, _shutdown, _, _, _) = + let (mut tx_service, mut output_service, _, _, _, _, _, _, _shutdown, _, _, _, _, _, _rpc_server_connection) = setup_transaction_service_no_comms(&mut runtime, factories, connection, None); let blockheight1 = 10; @@ -3651,7 +3745,6 @@ fn test_transaction_resending() { mut alice_ts, mut alice_output_manager, alice_outbound_service, - _, _alice_tx_sender, mut alice_tx_reply_sender, _, @@ -3661,6 +3754,9 @@ fn test_transaction_resending() { _, _, _, + _, + _, + _rpc_server_connection, ) = setup_transaction_service_no_comms( &mut runtime, factories.clone(), @@ -3711,7 +3807,6 @@ fn test_transaction_resending() { _bob_ts, _bob_output_manager, bob_outbound_service, - _, mut bob_tx_sender, mut _bob_tx_reply_sender, _, @@ -3721,6 +3816,9 @@ fn test_transaction_resending() { _, _, _, + _, + _, + _rpc_server_connection, ) = setup_transaction_service_no_comms( &mut runtime, factories, @@ -3898,22 +3996,36 @@ fn test_resend_on_startup() { ))) .unwrap(); - let (mut alice_ts, _, alice_outbound_service, _, _, _, _, _, _, _shutdown, _, _, _) = - setup_transaction_service_no_comms( - &mut runtime, - factories.clone(), - connection, - Some(TransactionServiceConfig { - transaction_resend_period: Duration::from_secs(10), - resend_response_cooldown: Duration::from_secs(5), - ..Default::default() - }), - ); + let ( + mut alice_ts, + _, + alice_outbound_service, + _, + _, + _, + _, + _, + _shutdown, + _, + _, + _, + _, + mut alice_connectivity, + _rpc_server_connection, + ) = setup_transaction_service_no_comms( + &mut runtime, + factories.clone(), + connection, + Some(TransactionServiceConfig { + transaction_resend_period: Duration::from_secs(10), + resend_response_cooldown: Duration::from_secs(5), + ..Default::default() + }), + ); // Need to set something for alices base node, doesn't matter what - runtime - .block_on(alice_ts.set_base_node_public_key(alice_node_identity.public_key().clone())) - .unwrap(); + alice_connectivity.set_base_node(alice_node_identity.to_peer()); + assert!(runtime.block_on(alice_ts.restart_broadcast_protocols()).is_ok()); assert!(runtime.block_on(alice_ts.restart_transaction_protocols()).is_ok()); @@ -3938,22 +4050,36 @@ fn test_resend_on_startup() { ))) .unwrap(); - let (mut alice_ts2, _, alice_outbound_service2, _, _, _, _, _, _, _shutdown, _, _, _) = - setup_transaction_service_no_comms( - &mut runtime, - factories.clone(), - connection2, - Some(TransactionServiceConfig { - transaction_resend_period: Duration::from_secs(10), - resend_response_cooldown: Duration::from_secs(5), - ..Default::default() - }), - ); + let ( + mut alice_ts2, + _, + alice_outbound_service2, + _, + _, + _, + _, + _, + _shutdown, + _, + _, + _, + _, + mut alice_connectivity2, + _rpc_server_connection2, + ) = setup_transaction_service_no_comms( + &mut runtime, + factories.clone(), + connection2, + Some(TransactionServiceConfig { + transaction_resend_period: Duration::from_secs(10), + resend_response_cooldown: Duration::from_secs(5), + ..Default::default() + }), + ); // Need to set something for alices base node, doesn't matter what - runtime - .block_on(alice_ts2.set_base_node_public_key(alice_node_identity.public_key().clone())) - .unwrap(); + alice_connectivity2.set_base_node(alice_node_identity.to_peer()); + assert!(runtime.block_on(alice_ts2.restart_broadcast_protocols()).is_ok()); assert!(runtime.block_on(alice_ts2.restart_transaction_protocols()).is_ok()); @@ -4003,22 +4129,36 @@ fn test_resend_on_startup() { ))) .unwrap(); - let (mut bob_ts, _, bob_outbound_service, _, _, _, _, _, _, _shutdown, _, _, _) = - setup_transaction_service_no_comms( - &mut runtime, - factories.clone(), - bob_connection, - Some(TransactionServiceConfig { - transaction_resend_period: Duration::from_secs(10), - resend_response_cooldown: Duration::from_secs(5), - ..Default::default() - }), - ); + let ( + mut bob_ts, + _, + bob_outbound_service, + _, + _, + _, + _, + _, + _shutdown, + _, + _, + _, + _, + mut bob_connectivity, + _rpc_server_connection_bob, + ) = setup_transaction_service_no_comms( + &mut runtime, + factories.clone(), + bob_connection, + Some(TransactionServiceConfig { + transaction_resend_period: Duration::from_secs(10), + resend_response_cooldown: Duration::from_secs(5), + ..Default::default() + }), + ); // Need to set something for bobs base node, doesn't matter what - runtime - .block_on(bob_ts.set_base_node_public_key(alice_node_identity.public_key().clone())) - .unwrap(); + bob_connectivity.set_base_node(alice_node_identity.to_peer()); + assert!(runtime.block_on(bob_ts.restart_broadcast_protocols()).is_ok()); assert!(runtime.block_on(bob_ts.restart_transaction_protocols()).is_ok()); @@ -4039,22 +4179,35 @@ fn test_resend_on_startup() { ))) .unwrap(); - let (mut bob_ts2, _, bob_outbound_service2, _, _, _, _, _, _, _shutdown, _, _, _) = - setup_transaction_service_no_comms( - &mut runtime, - factories, - bob_connection2, - Some(TransactionServiceConfig { - transaction_resend_period: Duration::from_secs(10), - resend_response_cooldown: Duration::from_secs(5), - ..Default::default() - }), - ); + let ( + mut bob_ts2, + _, + bob_outbound_service2, + _, + _, + _, + _, + _, + _shutdown, + _, + _, + _, + _, + mut bob_connectivity2, + _rpc_server_connection_bob2, + ) = setup_transaction_service_no_comms( + &mut runtime, + factories, + bob_connection2, + Some(TransactionServiceConfig { + transaction_resend_period: Duration::from_secs(10), + resend_response_cooldown: Duration::from_secs(5), + ..Default::default() + }), + ); // Need to set something for bobs base node, doesn't matter what - runtime - .block_on(bob_ts2.set_base_node_public_key(alice_node_identity.public_key().clone())) - .unwrap(); + bob_connectivity2.set_base_node(alice_node_identity.to_peer()); assert!(runtime.block_on(bob_ts2.restart_broadcast_protocols()).is_ok()); assert!(runtime.block_on(bob_ts2.restart_transaction_protocols()).is_ok()); @@ -4086,7 +4239,6 @@ fn test_replying_to_cancelled_tx() { mut alice_ts, mut alice_output_manager, alice_outbound_service, - _, _alice_tx_sender, mut alice_tx_reply_sender, _, @@ -4096,6 +4248,9 @@ fn test_replying_to_cancelled_tx() { _, _, _, + _, + _, + _rpc_server_connection, ) = setup_transaction_service_no_comms( &mut runtime, factories.clone(), @@ -4144,7 +4299,6 @@ fn test_replying_to_cancelled_tx() { _bob_ts, _bob_output_manager, bob_outbound_service, - _, mut bob_tx_sender, mut _bob_tx_reply_sender, _, @@ -4154,6 +4308,9 @@ fn test_replying_to_cancelled_tx() { _, _, _, + _, + _, + _rpc_server_connection, ) = setup_transaction_service_no_comms( &mut runtime, factories, @@ -4216,7 +4373,6 @@ fn test_transaction_timeout_cancellation() { mut alice_ts, mut alice_output_manager, alice_outbound_service, - _, _alice_tx_sender, _alice_tx_reply_sender, _, @@ -4226,6 +4382,9 @@ fn test_transaction_timeout_cancellation() { _, _, _, + _, + _, + _rpc_server_connection, ) = setup_transaction_service_no_comms( &mut runtime, factories.clone(), @@ -4346,23 +4505,36 @@ fn test_transaction_timeout_cancellation() { ))) .unwrap(); - let (mut bob_ts, _, bob_outbound_service, _, _, _, _, _, _, _shutdown, _, _, _) = - setup_transaction_service_no_comms( - &mut runtime, - factories.clone(), - bob_connection, - Some(TransactionServiceConfig { - transaction_resend_period: Duration::from_secs(10), - resend_response_cooldown: Duration::from_secs(5), - pending_transaction_cancellation_timeout: Duration::from_secs(15), - ..Default::default() - }), - ); + let ( + mut bob_ts, + _, + bob_outbound_service, + _, + _, + _, + _, + _, + _shutdown, + _, + _, + _, + _, + mut bob_connectivity, + _rpc_server_connection_bob, + ) = setup_transaction_service_no_comms( + &mut runtime, + factories.clone(), + bob_connection, + Some(TransactionServiceConfig { + transaction_resend_period: Duration::from_secs(10), + resend_response_cooldown: Duration::from_secs(5), + pending_transaction_cancellation_timeout: Duration::from_secs(15), + ..Default::default() + }), + ); // Need to set something for bobs base node, doesn't matter what - runtime - .block_on(bob_ts.set_base_node_public_key(bob_node_identity.public_key().clone())) - .unwrap(); + bob_connectivity.set_base_node(bob_node_identity.to_peer()); assert!(runtime.block_on(bob_ts.restart_broadcast_protocols()).is_ok()); assert!(runtime.block_on(bob_ts.restart_transaction_protocols()).is_ok()); @@ -4380,18 +4552,33 @@ fn test_transaction_timeout_cancellation() { let (carol_connection, _temp_dir) = make_wallet_database_connection(None); // Now to do this for the Receiver - let (carol_ts, _, carol_outbound_service, _, mut carol_tx_sender, _, _, _, _, _shutdown, _, _, _) = - setup_transaction_service_no_comms( - &mut runtime, - factories, - carol_connection, - Some(TransactionServiceConfig { - transaction_resend_period: Duration::from_secs(10), - resend_response_cooldown: Duration::from_secs(5), - pending_transaction_cancellation_timeout: Duration::from_secs(15), - ..Default::default() - }), - ); + let ( + carol_ts, + _, + carol_outbound_service, + mut carol_tx_sender, + _, + _, + _, + _, + _shutdown, + _, + _, + _, + _, + _, + _rpc_server_connection_carol, + ) = setup_transaction_service_no_comms( + &mut runtime, + factories, + carol_connection, + Some(TransactionServiceConfig { + transaction_resend_period: Duration::from_secs(10), + resend_response_cooldown: Duration::from_secs(5), + pending_transaction_cancellation_timeout: Duration::from_secs(15), + ..Default::default() + }), + ); let mut carol_event_stream = carol_ts.get_event_stream(); runtime @@ -4422,25 +4609,25 @@ fn test_transaction_timeout_cancellation() { let mut transaction_cancelled = false; loop { tokio::select! { - event = carol_event_stream.recv() => { - if let TransactionEvent::TransactionCancelled(t) = &*event.unwrap() { - if t == &tx_id { - transaction_cancelled = true; - break; - } - } - }, - () = &mut delay => { - break; - }, + event = carol_event_stream.recv() => { + if let TransactionEvent::TransactionCancelled(t) = &*event.unwrap() { + if t == &tx_id { + transaction_cancelled = true; + break; } + } + }, + () = &mut delay => { + break; + }, + } } assert!(transaction_cancelled, "Transaction must be cancelled"); }); } -/// This test will check that the Transaction Service starts the tx broadcast protocol correctly and reacts correctly to -/// a tx being mined and confirmed and to a tx being rejected. +/// This test will check that the Transaction Service starts the tx broadcast protocol correctly and reacts correctly +/// to a tx being broadcast and to a tx being rejected. #[test] fn transaction_service_tx_broadcast() { let factories = CryptoFactories::default(); @@ -4457,7 +4644,6 @@ fn transaction_service_tx_broadcast() { mut alice_ts, mut alice_output_manager, alice_outbound_service, - _, mut _alice_tx_sender, mut alice_tx_ack_sender, _, @@ -4467,16 +4653,32 @@ fn transaction_service_tx_broadcast() { _mock_rpc_server, server_node_identity, rpc_service_state, + _, + mut alice_connectivity, + _rpc_server_connection, ) = setup_transaction_service_no_comms(&mut runtime, factories.clone(), connection, None); let mut alice_event_stream = alice_ts.get_event_stream(); - runtime - .block_on(alice_ts.set_base_node_public_key(server_node_identity.public_key().clone())) - .unwrap(); + alice_connectivity.set_base_node(server_node_identity.to_peer()); let (connection2, _temp_dir2) = make_wallet_database_connection(None); - let (_bob_ts, _bob_output_manager, bob_outbound_service, _, mut bob_tx_sender, _, _, _, _, _shutdown, _, _, _) = - setup_transaction_service_no_comms(&mut runtime, factories.clone(), connection2, None); + let ( + _bob_ts, + _bob_output_manager, + bob_outbound_service, + mut bob_tx_sender, + _, + _, + _, + _, + _shutdown, + _, + _, + _, + _, + _, + _rpc_server_connection_bob, + ) = setup_transaction_service_no_comms(&mut runtime, factories.clone(), connection2, None); let alice_output_value = MicroTari(250000); @@ -4645,13 +4847,14 @@ fn transaction_service_tx_broadcast() { runtime.block_on(async { let delay = sleep(Duration::from_secs(60)); tokio::pin!(delay); - let mut tx1_mined = false; + let mut tx1_broadcast = false; loop { tokio::select! { event = alice_event_stream.recv() => { - if let TransactionEvent::TransactionMined(tx_id) = &*event.unwrap(){ + println!("Event: {:?}", event); + if let TransactionEvent::TransactionBroadcast(tx_id) = &*event.unwrap(){ if tx_id == &tx_id1 { - tx1_mined = true; + tx1_broadcast = true; break; } } @@ -4661,7 +4864,7 @@ fn transaction_service_tx_broadcast() { }, } } - assert!(tx1_mined); + assert!(tx1_broadcast); }); runtime @@ -4693,21 +4896,6 @@ fn transaction_service_tx_broadcast() { assert!(tx2_received); }); - let alice_completed_tx2 = runtime - .block_on(alice_ts.get_completed_transactions()) - .unwrap() - .remove(&tx_id2) - .expect("Transaction must be in collection"); - - assert_eq!(alice_completed_tx2.status, TransactionStatus::Completed); - - let _ = runtime - .block_on(rpc_service_state.wait_pop_submit_transaction_calls(1, Duration::from_secs(30))) - .expect("Should receive a tx submission"); - let _ = runtime - .block_on(rpc_service_state.wait_pop_transaction_query_calls(1, Duration::from_secs(30))) - .expect("Should receive a tx query"); - rpc_service_state.set_submit_transaction_response(TxSubmissionResponse { accepted: false, rejection_reason: TxSubmissionRejectionReason::Orphan, @@ -4722,6 +4910,18 @@ fn transaction_service_tx_broadcast() { height_of_longest_chain: 0, }); + let alice_completed_tx2 = runtime + .block_on(alice_ts.get_completed_transactions()) + .unwrap() + .remove(&tx_id2) + .expect("Transaction must be in collection"); + + assert_eq!(alice_completed_tx2.status, TransactionStatus::Completed); + + let _ = runtime + .block_on(rpc_service_state.wait_pop_submit_transaction_calls(1, Duration::from_secs(30))) + .expect("Should receive a tx submission"); + runtime.block_on(async { let delay = sleep(Duration::from_secs(60)); tokio::pin!(delay); @@ -4748,9 +4948,10 @@ fn transaction_service_tx_broadcast() { let balance = runtime.block_on(alice_output_manager.get_balance()).unwrap(); assert_eq!( - balance.available_balance, - alice_output_value + alice_output_value - amount_sent1 - tx1_fee + balance.pending_incoming_balance, + alice_output_value - amount_sent1 - tx1_fee ); + assert_eq!(balance.available_balance, alice_output_value); } #[test] @@ -4791,6 +4992,7 @@ fn broadcast_all_completed_transactions_on_startup() { valid: true, confirmations: None, mined_height: None, + mined_in_block: None, }; let completed_tx2 = CompletedTransaction { @@ -4823,8 +5025,23 @@ fn broadcast_all_completed_transactions_on_startup() { ))) .unwrap(); - let (mut alice_ts, _, _, _, _, _, _, _, _, _shutdown, _mock_rpc_server, server_node_identity, rpc_service_state) = - setup_transaction_service_no_comms(&mut runtime, factories, connection, None); + let ( + mut alice_ts, + _, + _, + _, + _, + _, + _, + _, + _shutdown, + _mock_rpc_server, + server_node_identity, + rpc_service_state, + _, + mut alice_connectivity, + _rpc_server_connection, + ) = setup_transaction_service_no_comms(&mut runtime, factories, connection, None); rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::Mined, @@ -4836,9 +5053,7 @@ fn broadcast_all_completed_transactions_on_startup() { assert!(runtime.block_on(alice_ts.restart_broadcast_protocols()).is_err()); - runtime - .block_on(alice_ts.set_base_node_public_key(server_node_identity.public_key().clone())) - .unwrap(); + alice_connectivity.set_base_node(server_node_identity.to_peer()); assert!(runtime.block_on(alice_ts.restart_broadcast_protocols()).is_ok()); @@ -4879,286 +5094,6 @@ fn broadcast_all_completed_transactions_on_startup() { }); } -/// This test the case where a transaction broadcast protocol has started and the base node pubkey gets updated, does -/// the update make it through to the protocol -#[test] -fn transaction_service_tx_broadcast_with_base_node_change() { - let factories = CryptoFactories::default(); - let mut runtime = Runtime::new().unwrap(); - - let alice_node_identity = - NodeIdentity::random(&mut OsRng, get_next_memory_address(), PeerFeatures::COMMUNICATION_NODE); - - let bob_node_identity = - NodeIdentity::random(&mut OsRng, get_next_memory_address(), PeerFeatures::COMMUNICATION_NODE); - let (connection, _temp_dir) = make_wallet_database_connection(None); - - let ( - mut alice_ts, - mut alice_output_manager, - alice_outbound_service, - connectivity_mock_state, - mut _alice_tx_sender, - mut alice_tx_ack_sender, - _, - _alice_base_node_response_sender, - _, - _shutdown, - _mock_rpc_server, - server_node_identity, - rpc_service_state, - ) = setup_transaction_service_no_comms(&mut runtime, factories.clone(), connection, None); - let mut alice_event_stream = alice_ts.get_event_stream(); - - runtime - .block_on(alice_ts.set_base_node_public_key(server_node_identity.public_key().clone())) - .unwrap(); - let (connection2, _temp_dir2) = make_wallet_database_connection(None); - - let (_bob_ts, _bob_output_manager, bob_outbound_service, _, mut bob_tx_sender, _, _, _, _, _shutdown, _, _, _) = - setup_transaction_service_no_comms(&mut runtime, factories.clone(), connection2, None); - - let alice_output_value = MicroTari(250000); - - let (_utxo, uo) = make_input(&mut OsRng, alice_output_value, &factories.commitment); - runtime.block_on(alice_output_manager.add_output(uo)).unwrap(); - - let (_utxo, uo2) = make_input(&mut OsRng, alice_output_value, &factories.commitment); - runtime.block_on(alice_output_manager.add_output(uo2)).unwrap(); - - let amount_sent1 = 10000 * uT; - - // Send Tx1 - let tx_id1 = runtime - .block_on(alice_ts.send_transaction( - bob_node_identity.public_key().clone(), - amount_sent1, - 100 * uT, - "Testing Message".to_string(), - )) - .unwrap(); - alice_outbound_service - .wait_call_count(2, Duration::from_secs(60)) - .expect("Alice call wait 1"); - let (_, _body) = alice_outbound_service.pop_call().unwrap(); - let (_, body) = alice_outbound_service.pop_call().unwrap(); - - let envelope_body = EnvelopeBody::decode(body.to_vec().as_slice()).unwrap(); - let tx_sender_msg: TransactionSenderMessage = envelope_body - .decode_part::(1) - .unwrap() - .unwrap() - .try_into() - .unwrap(); - match tx_sender_msg { - TransactionSenderMessage::Single(_) => (), - _ => { - panic!("Transaction is the not a single rounder sender variant"); - }, - }; - - runtime - .block_on(bob_tx_sender.send(create_dummy_message( - tx_sender_msg.into(), - alice_node_identity.public_key(), - ))) - .unwrap(); - bob_outbound_service - .wait_call_count(2, Duration::from_secs(60)) - .expect("bob call wait 1"); - - let _ = bob_outbound_service.pop_call().unwrap(); - let call = bob_outbound_service.pop_call().unwrap(); - - let envelope_body = EnvelopeBody::decode(&mut call.1.to_vec().as_slice()).unwrap(); - let bob_tx_reply_msg1: RecipientSignedMessage = envelope_body - .decode_part::(1) - .unwrap() - .unwrap() - .try_into() - .unwrap(); - - // Give Alice the tx reply to start the broadcast process. - runtime - .block_on(alice_tx_ack_sender.send(create_dummy_message( - bob_tx_reply_msg1.into(), - bob_node_identity.public_key(), - ))) - .unwrap(); - - runtime.block_on(async { - let delay = sleep(Duration::from_secs(60)); - tokio::pin!(delay); - let mut tx1_received = false; - loop { - tokio::select! { - event = alice_event_stream.recv() => { - if let TransactionEvent::ReceivedTransactionReply(tx_id) = &*event.unwrap(){ - if tx_id == &tx_id1 { - tx1_received = true; - break; - } - } - }, - () = &mut delay => { - break; - }, - } - } - assert!(tx1_received); - }); - - let alice_completed_tx1 = runtime - .block_on(alice_ts.get_completed_transactions()) - .unwrap() - .remove(&tx_id1) - .expect("Transaction must be in collection"); - - assert_eq!(alice_completed_tx1.status, TransactionStatus::Completed); - - let _ = runtime - .block_on(rpc_service_state.wait_pop_submit_transaction_calls(1, Duration::from_secs(20))) - .expect("Should receive a tx submission"); - let _ = runtime - .block_on(rpc_service_state.wait_pop_transaction_query_calls(1, Duration::from_secs(20))) - .expect("Should receive a tx query"); - - // Setup new RPC Server - let new_server_node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); - let service = BaseNodeWalletRpcMockService::new(); - let new_rpc_service_state = service.get_state(); - - let new_server = BaseNodeWalletRpcServer::new(service); - let protocol_name = new_server.as_protocol_name(); - - let mut new_mock_server = { - let _enter = runtime.handle().enter(); - MockRpcServer::new(new_server, new_server_node_identity.clone()) - }; - - { - let _enter = runtime.handle().enter(); - new_mock_server.serve(); - } - - let connection = - runtime.block_on(new_mock_server.create_connection(new_server_node_identity.to_peer(), protocol_name.into())); - runtime.block_on(connectivity_mock_state.add_active_connection(connection)); - - // Set new Base Node response to be mined but unconfirmed - new_rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: 1, - is_synced: true, - height_of_longest_chain: 0, - }); - - runtime - .block_on(alice_ts.set_base_node_public_key(new_server_node_identity.public_key().clone())) - .unwrap(); - - // Wait for 1 query - let _ = runtime - .block_on(new_rpc_service_state.wait_pop_transaction_query_calls(1, Duration::from_secs(60))) - .unwrap(); - - new_rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: TransactionServiceConfig::default().num_confirmations_required, - is_synced: true, - height_of_longest_chain: 0, - }); - - runtime.block_on(async { - let delay = sleep(Duration::from_secs(60)); - tokio::pin!(delay); - let mut tx_mined = false; - loop { - tokio::select! { - event = alice_event_stream.recv() => { - if let TransactionEvent::TransactionMined(_) = &*event.unwrap(){ - tx_mined = true; - break; - } - }, - () = &mut delay => { - break; - }, - } - } - assert!(tx_mined); - }); -} - -#[test] -fn only_start_one_tx_broadcast_protocol_at_a_time() { - let mut runtime = Runtime::new().unwrap(); - let factories = CryptoFactories::default(); - - let temp_dir = tempdir().unwrap(); - let db_name = format!("{}.sqlite3", random::string(8).as_str()); - let db_path = format!("{}/{}", temp_dir.path().to_str().unwrap(), db_name); - let connection = run_migration_and_create_sqlite_connection(&db_path).unwrap(); - let backend = TransactionServiceSqliteDatabase::new(connection.clone(), None); - - let kernel = KernelBuilder::new() - .with_excess(&factories.commitment.zero()) - .with_signature(&Signature::default()) - .build() - .unwrap(); - - let tx = Transaction::new( - vec![], - vec![], - vec![kernel], - PrivateKey::random(&mut OsRng), - PrivateKey::random(&mut OsRng), - ); - - let completed_tx1 = CompletedTransaction { - tx_id: 1, - source_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), - destination_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), - amount: 5000 * uT, - fee: MicroTari::from(100), - transaction: tx, - status: TransactionStatus::Completed, - message: "Yo!".to_string(), - timestamp: Utc::now().naive_utc(), - cancelled: false, - direction: TransactionDirection::Outbound, - coinbase_block_height: None, - send_count: 0, - last_send_timestamp: None, - valid: true, - confirmations: None, - mined_height: None, - }; - - backend - .write(WriteOperation::Insert(DbKeyValuePair::CompletedTransaction( - completed_tx1.tx_id, - Box::new(completed_tx1), - ))) - .unwrap(); - - let (mut alice_ts, _, _, _, _, _, _, _, _, _shutdown, _mock_rpc_server, server_node_identity, rpc_service_state) = - setup_transaction_service_no_comms(&mut runtime, factories, connection, None); - - runtime - .block_on(alice_ts.set_base_node_public_key(server_node_identity.public_key().clone())) - .unwrap(); - - assert!(runtime.block_on(alice_ts.restart_broadcast_protocols()).is_ok()); - assert!(runtime.block_on(alice_ts.restart_broadcast_protocols()).is_ok()); - - let tx_submit_calls = - runtime.block_on(rpc_service_state.wait_pop_submit_transaction_calls(2, Duration::from_secs(2))); - assert!(tx_submit_calls.is_err(), "Should not be 2 calls made"); -} - #[test] fn dont_broadcast_invalid_transactions() { let mut runtime = Runtime::new().unwrap(); @@ -5202,6 +5137,7 @@ fn dont_broadcast_invalid_transactions() { valid: false, confirmations: None, mined_height: None, + mined_in_block: None, }; backend @@ -5211,84 +5147,10 @@ fn dont_broadcast_invalid_transactions() { ))) .unwrap(); - let (mut alice_ts, _, _, _, _, _, _, _, _, _shutdown, _mock_rpc_server, server_node_identity, rpc_service_state) = - setup_transaction_service_no_comms(&mut runtime, factories, connection, None); - - runtime - .block_on(alice_ts.set_base_node_public_key(server_node_identity.public_key().clone())) - .unwrap(); - - assert!(runtime.block_on(alice_ts.restart_broadcast_protocols()).is_ok()); - - let tx_submit_calls = - runtime.block_on(rpc_service_state.wait_pop_submit_transaction_calls(1, Duration::from_secs(5))); - assert!(tx_submit_calls.is_err(), "Should be no calls made"); -} - -#[test] -fn start_validation_protocol_then_broadcast_protocol_change_base_node() { - let mut runtime = Runtime::new().unwrap(); - let factories = CryptoFactories::default(); - - let temp_dir = tempdir().unwrap(); - let db_name = format!("{}.sqlite3", random::string(8).as_str()); - let db_path = format!("{}/{}", temp_dir.path().to_str().unwrap(), db_name); - let connection = run_migration_and_create_sqlite_connection(&db_path).unwrap(); - let tx_backend = TransactionServiceSqliteDatabase::new(connection.clone(), None); - - let db = TransactionDatabase::new(tx_backend); - - runtime.block_on(add_transaction_to_database( - 1, - 10 * T, - true, - Some(TransactionStatus::MinedConfirmed), - db.clone(), - )); - - runtime.block_on(add_transaction_to_database( - 2, - 2 * T, - false, - Some(TransactionStatus::MinedConfirmed), - db.clone(), - )); - runtime.block_on(add_transaction_to_database( - 3, - 3 * T, - true, - Some(TransactionStatus::Completed), - db.clone(), - )); - - runtime.block_on(add_transaction_to_database( - 4, - 4 * T, - true, - Some(TransactionStatus::MinedConfirmed), - db.clone(), - )); - - runtime.block_on(add_transaction_to_database( - 5, - 5 * T, - false, - Some(TransactionStatus::MinedConfirmed), - db.clone(), - )); - runtime.block_on(add_transaction_to_database( - 6, - 6 * T, - true, - Some(TransactionStatus::MinedConfirmed), - db, - )); - let ( mut alice_ts, _, _, - connectivity_mock_state, _, _, _, @@ -5297,115 +5159,17 @@ fn start_validation_protocol_then_broadcast_protocol_change_base_node() { _shutdown, _mock_rpc_server, server_node_identity, - mut rpc_service_state, + rpc_service_state, + _, + mut alice_connectivity, + _rpc_server_connection, ) = setup_transaction_service_no_comms(&mut runtime, factories, connection, None); - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: 1, - is_synced: true, - height_of_longest_chain: 0, - }); - rpc_service_state.set_response_delay(Some(Duration::from_secs(2))); + alice_connectivity.set_base_node(server_node_identity.to_peer()); - runtime - .block_on(alice_ts.set_base_node_public_key(server_node_identity.public_key().clone())) - .unwrap(); - - runtime - .block_on(start_transaction_validation_and_broadcast_protocols( - alice_ts.clone(), - ValidationRetryStrategy::UntilSuccess, - )) - .expect("Validation should start"); - - let _tx_batch_query_calls = - runtime.block_on(rpc_service_state.wait_pop_transaction_batch_query_calls(1, Duration::from_secs(60))); - - let _tx_submit_calls = - runtime.block_on(rpc_service_state.wait_pop_submit_transaction_calls(1, Duration::from_secs(60))); - - let _tx_query_calls = - runtime.block_on(rpc_service_state.wait_pop_transaction_query_calls(7, Duration::from_secs(60))); - - let completed_txs = runtime.block_on(alice_ts.get_completed_transactions()).unwrap(); - - assert_eq!(completed_txs.len(), 6); - - for (_, tx) in completed_txs.iter() { - assert_eq!(tx.status, TransactionStatus::MinedUnconfirmed); - assert!(tx.valid); - } - - let new_server_node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); - let service = BaseNodeWalletRpcMockService::new(); - let mut new_rpc_service_state = service.get_state(); - - let new_server = BaseNodeWalletRpcServer::new(service); - let protocol_name = new_server.as_protocol_name(); - - let mut new_mock_server = { - let _enter = runtime.handle().enter(); - MockRpcServer::new(new_server, new_server_node_identity.clone()) - }; - - { - let _enter = runtime.handle().enter(); - new_mock_server.serve(); - } - - let connection = - runtime.block_on(new_mock_server.create_connection(new_server_node_identity.to_peer(), protocol_name.into())); - runtime.block_on(connectivity_mock_state.add_active_connection(connection)); - - // Set new Base Node response to be mined but unconfirmed - new_rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::NotStored, - block_hash: None, - confirmations: 1, - is_synced: true, - height_of_longest_chain: 0, - }); - - new_rpc_service_state.set_response_delay(Some(Duration::from_secs(2))); - - runtime - .block_on(alice_ts.set_base_node_public_key(new_server_node_identity.public_key().clone())) - .unwrap(); - - runtime - .block_on(alice_ts.validate_transactions(ValidationRetryStrategy::UntilSuccess)) - .unwrap(); - - let _tx_batch_query_calls = - runtime.block_on(new_rpc_service_state.wait_pop_transaction_batch_query_calls(1, Duration::from_secs(60))); - - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: TransactionServiceConfig::default().num_confirmations_required, - is_synced: true, - height_of_longest_chain: 0, - }); - - runtime - .block_on(alice_ts.set_base_node_public_key(server_node_identity.public_key().clone())) - .unwrap(); - - runtime - .block_on(alice_ts.validate_transactions(ValidationRetryStrategy::UntilSuccess)) - .unwrap(); - - let _tx_batch_query_calls = - runtime.block_on(rpc_service_state.wait_pop_transaction_batch_query_calls(6, Duration::from_secs(30))); - - let completed_txs = runtime.block_on(alice_ts.get_completed_transactions()).unwrap(); - - assert_eq!(completed_txs.len(), 6); + assert!(runtime.block_on(alice_ts.restart_broadcast_protocols()).is_ok()); - for (_, tx) in completed_txs.iter() { - assert_eq!(tx.status, TransactionStatus::MinedConfirmed); - assert!(tx.valid); - } + let tx_submit_calls = + runtime.block_on(rpc_service_state.wait_pop_submit_transaction_calls(1, Duration::from_secs(5))); + assert!(tx_submit_calls.is_err(), "Should be no calls made"); } diff --git a/base_layer/wallet/tests/transaction_service/storage.rs b/base_layer/wallet/tests/transaction_service/storage.rs index 5573bd63e5..662c265eb7 100644 --- a/base_layer/wallet/tests/transaction_service/storage.rs +++ b/base_layer/wallet/tests/transaction_service/storage.rs @@ -270,6 +270,7 @@ pub fn test_db_backend(backend: T) { valid: true, confirmations: None, mined_height: None, + mined_in_block: None, }); runtime .block_on(db.complete_outbound_transaction(outbound_txs[i].tx_id, completed_txs[i].clone())) @@ -314,13 +315,25 @@ pub fn test_db_backend(backend: T) { assert!(retrieved_completed_tx.last_send_timestamp.is_some()); assert!(retrieved_completed_tx.confirmations.is_none()); + assert!(runtime.block_on(db.fetch_last_mined_transaction()).unwrap().is_none()); + runtime - .block_on(db.set_transaction_confirmations(retrieved_completed_tx.tx_id, 1)) + .block_on(db.set_transaction_mined_height(completed_txs[0].tx_id, true, 10, [0u8; 16].to_vec(), 5, true)) .unwrap(); + + assert_eq!( + runtime + .block_on(db.fetch_last_mined_transaction()) + .unwrap() + .unwrap() + .tx_id, + completed_txs[0].tx_id + ); + let retrieved_completed_tx = runtime .block_on(db.get_completed_transaction(completed_txs[0].tx_id)) .unwrap(); - assert_eq!(retrieved_completed_tx.confirmations, Some(1)); + assert_eq!(retrieved_completed_tx.confirmations, Some(5)); let any_completed_tx = runtime .block_on(db.get_any_transaction(completed_txs[0].tx_id)) @@ -332,8 +345,8 @@ pub fn test_db_backend(backend: T) { panic!("Should have found completed tx"); } - let completed_txs = runtime.block_on(db.get_completed_transactions()).unwrap(); - let num_completed_txs = completed_txs.len(); + let completed_txs_map = runtime.block_on(db.get_completed_transactions()).unwrap(); + let num_completed_txs = completed_txs_map.len(); assert_eq!( runtime .block_on(db.get_cancelled_completed_transactions()) @@ -342,15 +355,15 @@ pub fn test_db_backend(backend: T) { 0 ); - let cancelled_tx_id = completed_txs[&1].tx_id; + let cancelled_tx_id = completed_txs_map[&1].tx_id; assert!(runtime .block_on(db.get_cancelled_completed_transaction(cancelled_tx_id)) .is_err()); runtime .block_on(db.cancel_completed_transaction(cancelled_tx_id)) .unwrap(); - let completed_txs = runtime.block_on(db.get_completed_transactions()).unwrap(); - assert_eq!(completed_txs.len(), num_completed_txs - 1); + let completed_txs_map = runtime.block_on(db.get_completed_transactions()).unwrap(); + assert_eq!(completed_txs_map.len(), num_completed_txs - 1); runtime .block_on(db.get_cancelled_completed_transaction(cancelled_tx_id)) @@ -521,6 +534,17 @@ pub fn test_db_backend(backend: T) { } else { panic!("Should have found cancelled outbound tx"); } + + let unmined_txs = runtime.block_on(db.fetch_unconfirmed_transactions()).unwrap(); + + assert_eq!(unmined_txs.len(), 4); + + runtime + .block_on(db.set_transaction_as_unmined(completed_txs[0].tx_id)) + .unwrap(); + + let unmined_txs = runtime.block_on(db.fetch_unconfirmed_transactions()).unwrap(); + assert_eq!(unmined_txs.len(), 5); } #[test] diff --git a/base_layer/wallet/tests/transaction_service/transaction_protocols.rs b/base_layer/wallet/tests/transaction_service/transaction_protocols.rs index 31d12b8fd7..f8582a3b85 100644 --- a/base_layer/wallet/tests/transaction_service/transaction_protocols.rs +++ b/base_layer/wallet/tests/transaction_service/transaction_protocols.rs @@ -20,16 +20,18 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use crate::support::{ + comms_rpc::{connect_rpc_client, BaseNodeWalletRpcMockService, BaseNodeWalletRpcMockState}, + utils::make_input, +}; use chrono::Utc; use futures::StreamExt; use rand::rngs::OsRng; +use std::{collections::HashMap, sync::Arc, time::Duration}; use tari_comms::{ peer_manager::PeerFeatures, - protocol::rpc::{mock::MockRpcServer, NamedProtocolService, RpcStatus}, - test_utils::{ - mocks::{create_connectivity_mock, ConnectivityManagerMockState}, - node_identity::build_node_identity, - }, + protocol::rpc::{mock::MockRpcServer, NamedProtocolService}, + test_utils::node_identity::build_node_identity, types::CommsPublicKey, NodeIdentity, }; @@ -39,6 +41,16 @@ use tari_core::{ proto::wallet_rpc::{TxLocation, TxQueryResponse, TxSubmissionRejectionReason, TxSubmissionResponse}, rpc::BaseNodeWalletRpcServer, }, + blocks::BlockHeader, + crypto::tari_utilities::Hashable, + proto::{ + base_node::{ + TxLocation as TxLocationProto, + TxQueryBatchResponse as TxQueryBatchResponseProto, + TxQueryBatchResponses as TxQueryBatchResponsesProto, + }, + types::Signature as SignatureProto, + }, transactions::{ helpers::schema_to_transaction, tari_amount::{uT, MicroTari, T}, @@ -50,6 +62,7 @@ use tari_service_framework::{reply_channel, reply_channel::Receiver}; use tari_shutdown::Shutdown; use tari_test_utils::random; use tari_wallet::{ + connectivity_service::{create_wallet_connectivity_mock, WalletConnectivityMock}, output_manager_service::{ error::OutputManagerError, handle::{OutputManagerHandle, OutputManagerRequest, OutputManagerResponse}, @@ -58,7 +71,7 @@ use tari_wallet::{ storage::sqlite_utilities::run_migration_and_create_sqlite_connection, transaction_service::{ config::TransactionServiceConfig, - error::TransactionServiceError, + error::{TransactionServiceError, TransactionServiceProtocolError}, handle::{TransactionEvent, TransactionEventReceiver, TransactionEventSender}, protocols::{ transaction_broadcast_protocol::TransactionBroadcastProtocol, @@ -71,17 +84,11 @@ use tari_wallet::{ sqlite_db::TransactionServiceSqliteDatabase, }, }, - types::ValidationRetryStrategy, + util::watch::Watch, }; use tempfile::{tempdir, TempDir}; use tokio::{sync::broadcast, task, time::sleep}; -use crate::support::{ - rpc::{BaseNodeWalletRpcMockService, BaseNodeWalletRpcMockState}, - utils::make_input, -}; -use std::{sync::Arc, time::Duration}; - // Just in case other options become apparent in later testing #[derive(PartialEq)] pub enum TxProtocolTestConfig { @@ -92,47 +99,43 @@ pub enum TxProtocolTestConfig { pub async fn setup( config: TxProtocolTestConfig, ) -> ( - TransactionServiceResources, - ConnectivityManagerMockState, + TransactionServiceResources, OutboundServiceMockState, MockRpcServer>, Arc, BaseNodeWalletRpcMockState, - broadcast::Sender, Shutdown, TempDir, TransactionEventReceiver, + WalletConnectivityMock, ) { let client_node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let server_node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); - let (connectivity_manager, connectivity_mock) = create_connectivity_mock(); - - let connectivity_mock_state = connectivity_mock.get_shared_state(); - - connectivity_mock.spawn(); - let service = BaseNodeWalletRpcMockService::new(); let rpc_service_state = service.get_state(); let server = BaseNodeWalletRpcServer::new(service); let protocol_name = server.as_protocol_name(); - let mut mock_server = MockRpcServer::new(server, server_node_identity.clone()); + let mut mock_rpc_server = MockRpcServer::new(server, server_node_identity.clone()); + mock_rpc_server.serve(); - mock_server.serve(); + let wallet_connectivity = create_wallet_connectivity_mock(); if config == TxProtocolTestConfig::WithConnection { - let connection = mock_server + let mut connection = mock_rpc_server .create_connection(server_node_identity.to_peer(), protocol_name.into()) .await; - connectivity_mock_state.add_active_connection(connection).await; + + wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); } let db_name = format!("{}.sqlite3", random::string(8).as_str()); let temp_dir = tempdir().unwrap(); let db_folder = temp_dir.path().to_str().unwrap().to_string(); let db_connection = run_migration_and_create_sqlite_connection(&format!("{}/{}", db_folder, db_name)).unwrap(); + let db = TransactionDatabase::new(TransactionServiceSqliteDatabase::new(db_connection, None)); let (oms_request_sender, oms_request_receiver) = reply_channel::unbounded(); @@ -154,7 +157,7 @@ pub async fn setup( db, output_manager_service: output_manager_service_handle, outbound_message_service: outbound_message_requester, - connectivity_manager, + connectivity: wallet_connectivity.clone(), event_publisher: ts_event_publisher, node_identity: client_node_identity, factories: CryptoFactories::default(), @@ -166,19 +169,16 @@ pub async fn setup( shutdown_signal: shutdown.to_signal(), }; - let (timeout_update_publisher, _) = broadcast::channel(20); - ( resources, - connectivity_mock_state, outbound_mock_state, - mock_server, + mock_rpc_server, server_node_identity, rpc_service_state, - timeout_update_publisher, shutdown, temp_dir, ts_event_receiver, + wallet_connectivity, ) } @@ -187,6 +187,7 @@ pub async fn add_transaction_to_database( amount: MicroTari, valid: bool, status: Option, + coinbase_block_height: Option, db: TransactionDatabase, ) { let factories = CryptoFactories::default(); @@ -204,7 +205,7 @@ pub async fn add_transaction_to_database( "Test".to_string(), Utc::now().naive_local(), TransactionDirection::Outbound, - None, + coinbase_block_height, ); completed_tx1.valid = valid; db.insert_completed_transaction(tx_id, completed_tx1).await.unwrap(); @@ -218,7 +219,6 @@ pub async fn oms_reply_channel_task( while let Some(request_context) = receiver.next().await { let (request, reply_tx) = request_context.split(); let response = match request { - OutputManagerRequest::ConfirmTransaction(_) => Ok(OutputManagerResponse::TransactionConfirmed), OutputManagerRequest::CancelTransaction(_) => Ok(OutputManagerResponse::TransactionCancelled), _ => Err(OutputManagerError::InvalidResponseError( "Unhandled request type".to_string(), @@ -229,53 +229,46 @@ pub async fn oms_reply_channel_task( } } -/// A happy path test by submitting a transaction into the mempool, have it mined but unconfirmed and then confirmed. +/// A happy path test by submitting a transaction into the mempool #[tokio::test] #[allow(clippy::identity_op)] -async fn tx_broadcast_protocol_submit_success_i() { +async fn tx_broadcast_protocol_submit_success() { let ( resources, - _connectivity_mock_state, _outbound_mock_state, - _mock_rpc_server, + mock_rpc_server, server_node_identity, rpc_service_state, - timeout_update_publisher, _shutdown, _temp_dir, - mut transaction_event_receiver, + _transaction_event_receiver, + wallet_connectivity, ) = setup(TxProtocolTestConfig::WithConnection).await; let mut event_stream = resources.event_publisher.subscribe(); - let (base_node_update_publisher, _) = broadcast::channel(20); - let protocol = TransactionBroadcastProtocol::new( - 2, - resources.clone(), - Duration::from_secs(1), - server_node_identity.public_key().clone(), - timeout_update_publisher.subscribe(), - base_node_update_publisher.subscribe(), - ); + wallet_connectivity.notify_base_node_set(server_node_identity.to_peer()); + // Now we add the connection + let mut connection = mock_rpc_server + .create_connection(server_node_identity.to_peer(), "t/bnwallet/1".into()) + .await; + wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); + + let timeout_watch = Watch::new(Duration::from_secs(1)); + + let protocol = TransactionBroadcastProtocol::new(2, resources.clone(), timeout_watch.get_receiver()); let join_handle = task::spawn(protocol.execute()); // Fails because there is no transaction in the database to be broadcast assert!(join_handle.await.unwrap().is_err()); - add_transaction_to_database(1, 1 * T, true, None, resources.db.clone()).await; + add_transaction_to_database(1, 1 * T, true, None, None, resources.db.clone()).await; let db_completed_tx = resources.db.get_completed_transaction(1).await.unwrap(); assert!(db_completed_tx.confirmations.is_none()); - let protocol = TransactionBroadcastProtocol::new( - 1, - resources.clone(), - Duration::from_secs(1), - server_node_identity.public_key().clone(), - timeout_update_publisher.subscribe(), - base_node_update_publisher.subscribe(), - ); + let protocol = TransactionBroadcastProtocol::new(1, resources.clone(), timeout_watch.get_receiver()); - let join_handle = task::spawn(protocol.execute()); + task::spawn(protocol.execute()); // Set Base Node response to be not synced but in mempool rpc_service_state.set_submit_transaction_response(TxSubmissionResponse { @@ -301,161 +294,52 @@ async fn tx_broadcast_protocol_submit_success_i() { .await .unwrap(); - // Set Base Node response to be mined but unconfirmed - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: 1, - is_synced: false, - height_of_longest_chain: 10, - }); - // Wait for 1 query - let _ = rpc_service_state - .wait_pop_transaction_query_calls(2, Duration::from_secs(5)) - .await - .unwrap(); - - // Check transaction status is updated - let db_completed_tx = resources.db.get_completed_transaction(1).await.unwrap(); - assert_eq!(db_completed_tx.status, TransactionStatus::Broadcast); - - // Set Base Node response to be mined but unconfirmed - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: 1, - is_synced: true, - height_of_longest_chain: 10, - }); - // Wait for 1 query - let _ = rpc_service_state - .wait_pop_transaction_query_calls(2, Duration::from_secs(5)) - .await - .unwrap(); - - // Check transaction status is updated - let db_completed_tx = resources.db.get_completed_transaction(1).await.unwrap(); - assert_eq!(db_completed_tx.status, TransactionStatus::MinedUnconfirmed); - assert_eq!(db_completed_tx.confirmations, Some(1)); - - // Set base node response to mined and confirmed but not synced - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: resources.config.num_confirmations_required, - is_synced: false, - height_of_longest_chain: 10, - }); - - let _ = rpc_service_state - .wait_pop_transaction_query_calls(1, Duration::from_secs(5)) - .await - .unwrap(); - // lets wait for the transaction service event to notify us of a confirmed tx - // We need to do this to ensure that the wallet db has been updated to "Mined" - loop { - let v = transaction_event_receiver.recv().await; - let event = v.unwrap(); - match (*event).clone() { - TransactionEvent::TransactionMined(_) => { - break; - }, - _ => continue, - } - } - // Check transaction status is updated - let db_completed_tx = resources.db.get_completed_transaction(1).await.unwrap(); - assert_eq!(db_completed_tx.status, TransactionStatus::MinedConfirmed); - - // Set base node response to mined and confirmed and synced - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: resources.config.num_confirmations_required, - is_synced: true, - height_of_longest_chain: 10, - }); - - // Check that the protocol ends with success - let result = join_handle.await.unwrap(); - assert_eq!(result.unwrap(), 1); - - // Check transaction status is updated - let db_completed_tx = resources.db.get_completed_transaction(1).await.unwrap(); - assert_eq!(db_completed_tx.status, TransactionStatus::MinedConfirmed); - assert_eq!( - db_completed_tx.confirmations, - Some(resources.config.num_confirmations_required) - ); - assert_eq!( - db_completed_tx.mined_height, - Some(10 - resources.config.num_confirmations_required) - ); - // Check that the appropriate events were emitted let delay = sleep(Duration::from_secs(5)); tokio::pin!(delay); let mut broadcast = false; - let mut unconfirmed = false; - let mut confirmed = false; loop { tokio::select! { event = event_stream.recv() => { - match &*event.unwrap() { - TransactionEvent::TransactionMinedUnconfirmed(_, confirmations) => if *confirmations == 1 { - unconfirmed = true; - } - TransactionEvent::TransactionMined(_) => { - confirmed = true; - }, - TransactionEvent::TransactionBroadcast(_) => { - broadcast = true; - }, - _ => (), - } + if let TransactionEvent::TransactionBroadcast(_) = &*event.unwrap() { + broadcast = true; + } }, () = &mut delay => { break; }, } } - assert!( - unconfirmed, - "Should have received at least 1 TransactionEvent::TransactionMinedUnconfirmed event" - ); - assert!(confirmed, "Should have received a confirmed event"); + assert!(broadcast, "Should have received a broadcast event"); } - /// Test submitting a transaction that is immediately rejected #[tokio::test] #[allow(clippy::identity_op)] async fn tx_broadcast_protocol_submit_rejection() { let ( resources, - _connectivity_mock_state, _outbound_mock_state, - _mock_rpc_server, + mock_rpc_server, server_node_identity, rpc_service_state, - timeout_update_publisher, _shutdown, _temp_dir, _transaction_event_receiver, + wallet_connectivity, ) = setup(TxProtocolTestConfig::WithConnection).await; let mut event_stream = resources.event_publisher.subscribe(); - let (base_node_update_publisher, _) = broadcast::channel(20); - add_transaction_to_database(1, 1 * T, true, None, resources.db.clone()).await; + add_transaction_to_database(1, 1 * T, true, None, None, resources.db.clone()).await; + let timeout_update_watch = Watch::new(Duration::from_secs(1)); + wallet_connectivity.notify_base_node_set(server_node_identity.to_peer()); + // Now we add the connection + let mut connection = mock_rpc_server + .create_connection(server_node_identity.to_peer(), "t/bnwallet/1".into()) + .await; + wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); - let protocol = TransactionBroadcastProtocol::new( - 1, - resources.clone(), - Duration::from_secs(1), - server_node_identity.public_key().clone(), - timeout_update_publisher.subscribe(), - base_node_update_publisher.subscribe(), - ); + let protocol = TransactionBroadcastProtocol::new(1, resources.clone(), timeout_update_watch.get_receiver()); rpc_service_state.set_submit_transaction_response(TxSubmissionResponse { accepted: false, @@ -487,7 +371,7 @@ async fn tx_broadcast_protocol_submit_rejection() { tokio::select! { event = event_stream.recv() => { if let TransactionEvent::TransactionCancelled(_) = &*event.unwrap() { - cancelled = true; + cancelled = true; } }, () = &mut delay => { @@ -506,19 +390,17 @@ async fn tx_broadcast_protocol_submit_rejection() { async fn tx_broadcast_protocol_restart_protocol_as_query() { let ( resources, - _connectivity_mock_state, _outbound_mock_state, - _mock_rpc_server, + mock_rpc_server, server_node_identity, rpc_service_state, - timeout_update_publisher, _shutdown, _temp_dir, _transaction_event_receiver, + wallet_connectivity, ) = setup(TxProtocolTestConfig::WithConnection).await; - let (base_node_update_publisher, _) = broadcast::channel(20); - add_transaction_to_database(1, 1 * T, true, None, resources.db.clone()).await; + add_transaction_to_database(1, 1 * T, true, None, None, resources.db.clone()).await; // Set Base Node query response to be not stored, as if the base node does not have the tx in its pool rpc_service_state.set_transaction_query_response(TxQueryResponse { @@ -529,15 +411,16 @@ async fn tx_broadcast_protocol_restart_protocol_as_query() { height_of_longest_chain: 0, }); - let protocol = TransactionBroadcastProtocol::new( - 1, - resources.clone(), - Duration::from_secs(1), - server_node_identity.public_key().clone(), - timeout_update_publisher.subscribe(), - base_node_update_publisher.subscribe(), - ); + let timeout_update_watch = Watch::new(Duration::from_secs(1)); + wallet_connectivity.notify_base_node_set(server_node_identity.to_peer()); + + // Now we add the connection + let mut connection = mock_rpc_server + .create_connection(server_node_identity.to_peer(), "t/bnwallet/1".into()) + .await; + wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); + let protocol = TransactionBroadcastProtocol::new(1, resources.clone(), timeout_update_watch.get_receiver()); let join_handle = task::spawn(protocol.execute()); // Check if in mempool (its not) @@ -556,7 +439,7 @@ async fn tx_broadcast_protocol_restart_protocol_as_query() { height_of_longest_chain: 0, }); - // Should receive a resummission call + // Should receive a resubmission call let _ = rpc_service_state .wait_pop_submit_transaction_calls(1, Duration::from_secs(5)) .await @@ -583,50 +466,47 @@ async fn tx_broadcast_protocol_restart_protocol_as_query() { // Check transaction status is updated let db_completed_tx = resources.db.get_completed_transaction(1).await.unwrap(); - assert_eq!(db_completed_tx.status, TransactionStatus::MinedConfirmed); + assert_eq!(db_completed_tx.status, TransactionStatus::Broadcast); } -/// This test will submit a Tx which will be accepted and then dropped from the mempool, resulting in a resubmit which -/// will be rejected and result in a cancelled transaction +/// This test will submit a Tx which will be accepted on submission but rejected on query, intially it will be done +/// slower than the resubmission window but then the resubmission window will be reduced so the transaction will be +/// reject twice within the window resulting in a cancelled transaction #[tokio::test] #[allow(clippy::identity_op)] async fn tx_broadcast_protocol_submit_success_followed_by_rejection() { let ( - resources, - _connectivity_mock_state, + mut resources, _outbound_mock_state, - _mock_rpc_server, + mock_rpc_server, server_node_identity, rpc_service_state, - timeout_update_publisher, _shutdown, _temp_dir, _transaction_event_receiver, + wallet_connectivity, ) = setup(TxProtocolTestConfig::WithConnection).await; let mut event_stream = resources.event_publisher.subscribe(); - let (base_node_update_publisher, _) = broadcast::channel(20); - add_transaction_to_database(1, 1 * T, true, None, resources.db.clone()).await; + add_transaction_to_database(1, 1 * T, true, None, None, resources.db.clone()).await; - let protocol = TransactionBroadcastProtocol::new( - 1, - resources.clone(), - Duration::from_secs(1), - server_node_identity.public_key().clone(), - timeout_update_publisher.subscribe(), - base_node_update_publisher.subscribe(), - ); + resources.config.transaction_mempool_resubmission_window = Duration::from_secs(3); + resources.config.broadcast_monitoring_timeout = Duration::from_secs(60); - let join_handle = task::spawn(protocol.execute()); + let timeout_update_watch = Watch::new(Duration::from_secs(1)); + wallet_connectivity.notify_base_node_set(server_node_identity.to_peer()); - // Accepted in the mempool but not mined yet - // Wait for 1 query - let _ = rpc_service_state - .wait_pop_transaction_query_calls(1, Duration::from_secs(5)) - .await - .unwrap(); + // Now we add the connection + let mut connection = mock_rpc_server + .create_connection(server_node_identity.to_peer(), "t/bnwallet/1".into()) + .await; + wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); + + let protocol = TransactionBroadcastProtocol::new(1, resources.clone(), timeout_update_watch.get_receiver()); + + let join_handle = task::spawn(protocol.execute()); - // Set Base Node response to be rejected by mempool + // Accepted in the mempool on submit but not query rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::NotStored, block_hash: None, @@ -635,28 +515,20 @@ async fn tx_broadcast_protocol_submit_success_followed_by_rejection() { height_of_longest_chain: 0, }); - // Set Base Node to reject resubmission - rpc_service_state.set_submit_transaction_response(TxSubmissionResponse { - accepted: false, - rejection_reason: TxSubmissionRejectionReason::TimeLocked, - is_synced: true, - }); - // Wait for 1 query let _ = rpc_service_state - .wait_pop_transaction_query_calls(1, Duration::from_secs(5)) + .wait_pop_transaction_query_calls(1, Duration::from_secs(30)) .await .unwrap(); let _ = rpc_service_state - .wait_pop_submit_transaction_calls(1, Duration::from_secs(5)) + .wait_pop_submit_transaction_calls(2, Duration::from_secs(30)) .await .unwrap(); // Check that the protocol ends with rejection error if let Err(e) = join_handle.await.unwrap() { - println!("{:?}", e); - if let TransactionServiceError::MempoolRejectionTimeLocked = e.error { + if let TransactionServiceError::MempoolRejection = e.error { } else { panic!("Tx broadcast Should have failed with mempool rejection for being time locked"); } @@ -688,1059 +560,613 @@ async fn tx_broadcast_protocol_submit_success_followed_by_rejection() { assert!(cancelled, "Should have cancelled transaction"); } -/// This test will submit a tx which is accepted and mined but unconfirmed, then the next query it will not exist -/// resulting in a resubmission which we will let run to being mined with success +/// Submit a transaction that is Already Mined for the submission, should end up being completed as the validation will +/// deal with it #[tokio::test] #[allow(clippy::identity_op)] -async fn tx_broadcast_protocol_submit_mined_then_not_mined_resubmit_success() { +async fn tx_broadcast_protocol_submit_already_mined() { let ( resources, - _connectivity_mock_state, _outbound_mock_state, - _mock_rpc_server, + mock_rpc_server, server_node_identity, rpc_service_state, - timeout_update_publisher, _shutdown, _temp_dir, - mut transaction_event_receiver, + _transaction_event_receiver, + wallet_connectivity, ) = setup(TxProtocolTestConfig::WithConnection).await; - let (base_node_update_publisher, _) = broadcast::channel(20); + add_transaction_to_database(1, 1 * T, true, None, None, resources.db.clone()).await; + + // Set Base Node to respond with AlreadyMined + rpc_service_state.set_submit_transaction_response(TxSubmissionResponse { + accepted: false, + rejection_reason: TxSubmissionRejectionReason::AlreadyMined, + is_synced: true, + }); - add_transaction_to_database(1, 1 * T, true, None, resources.db.clone()).await; + let timeout_update_watch = Watch::new(Duration::from_secs(1)); + wallet_connectivity.notify_base_node_set(server_node_identity.to_peer()); + // Now we add the connection + let mut connection = mock_rpc_server + .create_connection(server_node_identity.to_peer(), "t/bnwallet/1".into()) + .await; + wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); - let protocol = TransactionBroadcastProtocol::new( - 1, - resources.clone(), - Duration::from_secs(1), - server_node_identity.public_key().clone(), - timeout_update_publisher.subscribe(), - base_node_update_publisher.subscribe(), - ); + let protocol = TransactionBroadcastProtocol::new(1, resources.clone(), timeout_update_watch.get_receiver()); let join_handle = task::spawn(protocol.execute()); - // Wait for the correct amount of queries - if let Err(e) = rpc_service_state - .wait_pop_transaction_query_calls(4, Duration::from_secs(5)) + let _ = rpc_service_state + .wait_pop_submit_transaction_calls(1, Duration::from_secs(5)) .await - { - println!(" {}", e) - } + .expect("Should receive a submission call"); - // Accepted in the mempool but not mined yet - // Wait for the correct amount of queries - if let Err(e) = rpc_service_state - .wait_pop_transaction_query_calls(4, Duration::from_secs(5)) + let _ = rpc_service_state + .wait_pop_transaction_query_calls(1, Duration::from_secs(5)) .await - { - println!(" {}", e) - } + .unwrap(); - // Set Base Node response to be mined but unconfirmed + // Set base node response to mined and confirmed rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::Mined, block_hash: None, - confirmations: 1, + confirmations: resources.config.num_confirmations_required, is_synced: true, - height_of_longest_chain: 0, + height_of_longest_chain: 10, }); - // Wait for the correct amount of queries - if let Err(e) = rpc_service_state - .wait_pop_transaction_query_calls(4, Duration::from_secs(5)) - .await - { - println!(" {}", e) - } - // Wait for the "TransactionMinedUnconfirmed" tx event to ensure that the wallet db state is "MinedUnconfirmed" - let mut count = 0u16; - loop { - let v = transaction_event_receiver.recv().await; - let event = v.unwrap(); - match (*event).clone() { - TransactionEvent::TransactionMinedUnconfirmed(_, _) => { - break; - }, - _ => { - count += 1; - if count >= 10 { - break; - } - continue; - }, - } - } + // Check that the protocol ends with success + let result = join_handle.await.unwrap(); + assert_eq!(result.unwrap(), 1); // Check transaction status is updated let db_completed_tx = resources.db.get_completed_transaction(1).await.unwrap(); - assert_eq!(db_completed_tx.status, TransactionStatus::MinedUnconfirmed); + assert_eq!(db_completed_tx.status, TransactionStatus::Completed); +} + +/// A test to see that the broadcast protocol can handle a change to the base node address while it runs. +#[tokio::test] +#[allow(clippy::identity_op)] +async fn tx_broadcast_protocol_submit_and_base_node_gets_changed() { + let ( + mut resources, + _outbound_mock_state, + mock_rpc_server, + server_node_identity, + rpc_service_state, + _shutdown, + _temp_dir, + _transaction_event_receiver, + wallet_connectivity, + ) = setup(TxProtocolTestConfig::WithConnection).await; + + add_transaction_to_database(1, 1 * T, true, None, None, resources.db.clone()).await; + + resources.config.broadcast_monitoring_timeout = Duration::from_secs(60); - // Set base node response to mined and confirmed rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::NotStored, block_hash: None, - confirmations: 0, + confirmations: 1, is_synced: true, height_of_longest_chain: 0, }); - // Should receive a resubmission call + let timeout_update_watch = Watch::new(Duration::from_secs(1)); + wallet_connectivity.notify_base_node_set(server_node_identity.to_peer()); + // Now we add the connection + let mut connection = mock_rpc_server + .create_connection(server_node_identity.to_peer(), "t/bnwallet/1".into()) + .await; + wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); + + let protocol = TransactionBroadcastProtocol::new(1, resources.clone(), timeout_update_watch.get_receiver()); + + let join_handle = task::spawn(protocol.execute()); + + // Wait for 1 queries let _ = rpc_service_state - .wait_pop_submit_transaction_calls(1, Duration::from_secs(5)) + .wait_pop_transaction_query_calls(1, Duration::from_secs(205)) .await - .expect("Should receive a resubmission call"); + .unwrap(); - // Set Base Node response to be mined and confirmed - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, + // Setup new RPC Server + let new_server_node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); + let service = BaseNodeWalletRpcMockService::new(); + let new_rpc_service_state = service.get_state(); + + let new_server = BaseNodeWalletRpcServer::new(service); + let protocol_name = new_server.as_protocol_name(); + let mut new_mock_server = MockRpcServer::new(new_server, new_server_node_identity.clone()); + new_mock_server.serve(); + + let mut connection = new_mock_server + .create_connection(new_server_node_identity.to_peer(), protocol_name.into()) + .await; + wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); + + // Set new Base Node response to be accepted + new_rpc_service_state.set_transaction_query_response(TxQueryResponse { + location: TxLocation::InMempool, block_hash: None, - confirmations: resources.config.num_confirmations_required as u64 + 1u64, + confirmations: resources.config.num_confirmations_required, is_synced: true, height_of_longest_chain: 0, }); + // Change Base Node + wallet_connectivity.notify_base_node_set(new_server_node_identity.to_peer()); + + // Wait for 1 query + let _ = new_rpc_service_state + .wait_pop_transaction_query_calls(1, Duration::from_secs(20)) + .await + .unwrap(); + + // Update old base node to reject the tx to check that the protocol is using the new base node + rpc_service_state.set_submit_transaction_response(TxSubmissionResponse { + accepted: false, + rejection_reason: TxSubmissionRejectionReason::Orphan, + is_synced: true, + }); + // Check that the protocol ends with success let result = join_handle.await.unwrap(); assert_eq!(result.unwrap(), 1); // Check transaction status is updated let db_completed_tx = resources.db.get_completed_transaction(1).await.unwrap(); - assert_eq!(db_completed_tx.status, TransactionStatus::MinedConfirmed); + assert_eq!(db_completed_tx.status, TransactionStatus::Broadcast); } -/// Test being unable to connect and then connection becoming available. +/// Test that validation detects transactions becoming mined unconfirmed and then confirmed with some going back to +/// completed #[tokio::test] #[allow(clippy::identity_op)] -async fn tx_broadcast_protocol_connection_problem() { +async fn tx_validation_protocol_tx_becomes_mined_unconfirmed_then_confirmed() { let ( resources, - connectivity_mock_state, _outbound_mock_state, mock_rpc_server, server_node_identity, rpc_service_state, - timeout_update_publisher, - _shutdown, - _temp_dir, - _transaction_event_receiver, - ) = setup(TxProtocolTestConfig::WithoutConnection).await; - let (base_node_update_publisher, _) = broadcast::channel(20); - - let mut event_stream = resources.event_publisher.subscribe(); - - add_transaction_to_database(1, 1 * T, true, None, resources.db.clone()).await; - - let protocol = TransactionBroadcastProtocol::new( - 1, - resources.clone(), - Duration::from_secs(1), - server_node_identity.public_key().clone(), - timeout_update_publisher.subscribe(), - base_node_update_publisher.subscribe(), - ); - - let join_handle = task::spawn(protocol.execute()); - - // Check that the connection problem event was emitted at least twice - let delay = sleep(Duration::from_secs(10)); - tokio::pin!(delay); - let mut connection_issues = 0; - loop { - tokio::select! { - event = event_stream.recv() => { - if let TransactionEvent::TransactionBaseNodeConnectionProblem(_) = &*event.unwrap() { - connection_issues+=1; - } - if connection_issues >= 2 { - break; - } - }, - () = &mut delay => { - break; - }, - } - } - assert!(connection_issues >= 2, "Should have retried connection at least twice"); - - // Now we add the connection - let connection = mock_rpc_server - .create_connection(server_node_identity.to_peer(), "t/bnwallet/1".into()) - .await; - connectivity_mock_state.add_active_connection(connection).await; - - // Check that the protocol ends with success - // Set Base Node response to be mined and confirmed - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: resources.config.num_confirmations_required as u64 + 1u64, - is_synced: true, - height_of_longest_chain: 0, - }); - let result = join_handle.await.unwrap(); - assert_eq!(result.unwrap(), 1); -} - -/// Submit a transaction that is Already Mined for the submission, the subsequent query should confirm the transaction -#[tokio::test] -#[allow(clippy::identity_op)] -async fn tx_broadcast_protocol_submit_already_mined() { - let ( - resources, - _connectivity_mock_state, - _outbound_mock_state, - _mock_rpc_server, - server_node_identity, - rpc_service_state, - timeout_update_publisher, - _shutdown, - _temp_dir, - _transaction_event_receiver, - ) = setup(TxProtocolTestConfig::WithConnection).await; - let (base_node_update_publisher, _) = broadcast::channel(20); - - add_transaction_to_database(1, 1 * T, true, None, resources.db.clone()).await; - - // Set Base Node to respond with AlreadyMined - rpc_service_state.set_submit_transaction_response(TxSubmissionResponse { - accepted: false, - rejection_reason: TxSubmissionRejectionReason::AlreadyMined, - is_synced: true, - }); - - let protocol = TransactionBroadcastProtocol::new( - 1, - resources.clone(), - Duration::from_secs(1), - server_node_identity.public_key().clone(), - timeout_update_publisher.subscribe(), - base_node_update_publisher.subscribe(), - ); - - let join_handle = task::spawn(protocol.execute()); - - let _ = rpc_service_state - .wait_pop_submit_transaction_calls(1, Duration::from_secs(5)) - .await - .expect("Should receive a submission call"); - - let _ = rpc_service_state - .wait_pop_transaction_query_calls(1, Duration::from_secs(5)) - .await - .unwrap(); - - // Set base node response to mined and confirmed - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: resources.config.num_confirmations_required, - is_synced: true, - height_of_longest_chain: 10, - }); - - // Check that the protocol ends with success - let result = join_handle.await.unwrap(); - assert_eq!(result.unwrap(), 1); - - // Check transaction status is updated - let db_completed_tx = resources.db.get_completed_transaction(1).await.unwrap(); - assert_eq!(db_completed_tx.status, TransactionStatus::MinedConfirmed); - assert_eq!( - db_completed_tx.mined_height, - Some(10 - resources.config.num_confirmations_required) - ); -} - -/// A test to see that the broadcast protocol can handle a change to the base node address while it runs. -#[tokio::test] -#[allow(clippy::identity_op)] -async fn tx_broadcast_protocol_submit_and_base_node_gets_changed() { - let ( - resources, - connectivity_mock_state, - _outbound_mock_state, - _mock_rpc_server, - server_node_identity, - rpc_service_state, - timeout_update_publisher, - _shutdown, - _temp_dir, - _transaction_event_receiver, - ) = setup(TxProtocolTestConfig::WithConnection).await; - let (base_node_update_publisher, _) = broadcast::channel(20); - - add_transaction_to_database(1, 1 * T, true, None, resources.db.clone()).await; - - let protocol = TransactionBroadcastProtocol::new( - 1, - resources.clone(), - Duration::from_secs(1), - server_node_identity.public_key().clone(), - timeout_update_publisher.subscribe(), - base_node_update_publisher.subscribe(), - ); - - let join_handle = task::spawn(protocol.execute()); - - // Accepted in the mempool but not mined yet - // Wait for 2 queries - let _ = rpc_service_state - .wait_pop_transaction_query_calls(2, Duration::from_secs(5)) - .await - .unwrap(); - - // Setup new RPC Server - let new_server_node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); - let service = BaseNodeWalletRpcMockService::new(); - let new_rpc_service_state = service.get_state(); - - let new_server = BaseNodeWalletRpcServer::new(service); - let protocol_name = new_server.as_protocol_name(); - let mut new_mock_server = MockRpcServer::new(new_server, new_server_node_identity.clone()); - new_mock_server.serve(); - - let connection = new_mock_server - .create_connection(new_server_node_identity.to_peer(), protocol_name.into()) - .await; - connectivity_mock_state.add_active_connection(connection).await; - - // Set new Base Node response to be mined but unconfirmed - new_rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: 1, - is_synced: true, - height_of_longest_chain: 0, - }); - - // Change Base Node - base_node_update_publisher - .send(new_server_node_identity.public_key().clone()) - .unwrap(); - - // Update old base node to reject the tx to check that the protocol is using the new base node - // Set Base Node query response to be InMempool as if the base node does not have the tx in its pool - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::NotStored, - block_hash: None, - confirmations: 0, - is_synced: true, - height_of_longest_chain: 0, - }); - - // Wait for 1 query - let _ = new_rpc_service_state - .wait_pop_transaction_query_calls(1, Duration::from_secs(5)) - .await - .unwrap(); - - // Set base node response to mined and confirmed - new_rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: resources.config.num_confirmations_required, - is_synced: true, - height_of_longest_chain: 0, - }); - - // Check that the protocol ends with success - let result = join_handle.await.unwrap(); - assert_eq!(result.unwrap(), 1); - - // Check transaction status is updated - let db_completed_tx = resources.db.get_completed_transaction(1).await.unwrap(); - assert_eq!(db_completed_tx.status, TransactionStatus::MinedConfirmed); -} - -/// Validate completed transactions, will check that valid ones stay valid and incorrectly marked invalid tx become -/// valid. -#[tokio::test] -#[allow(clippy::identity_op)] -async fn tx_validation_protocol_tx_becomes_valid() { - let ( - resources, - _connectivity_mock_state, - _outbound_mock_state, - _mock_rpc_server, - server_node_identity, - rpc_service_state, - _timeout_update_publisher, - _shutdown, - _temp_dir, - _transaction_event_receiver, - ) = setup(TxProtocolTestConfig::WithConnection).await; - let (base_node_update_publisher, _) = broadcast::channel(20); - let (_timeout_update_publisher, _) = broadcast::channel(20); - - add_transaction_to_database( - 1, - 1 * T, - true, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; - add_transaction_to_database( - 2, - 2 * T, - false, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; - add_transaction_to_database( - 3, - 3 * T, - true, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; - add_transaction_to_database( - 4, - 4 * T, - false, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; - - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: resources.config.num_confirmations_required, - is_synced: true, - height_of_longest_chain: 0, - }); - - rpc_service_state.set_is_synced(false); - - let protocol = TransactionValidationProtocol::new( - 1, - resources.clone(), - server_node_identity.public_key().clone(), - Duration::from_secs(1), - base_node_update_publisher.subscribe(), - _timeout_update_publisher.subscribe(), - ValidationRetryStrategy::UntilSuccess, - ); - - let join_handle = task::spawn(protocol.execute()); - - let _ = rpc_service_state - .wait_pop_transaction_batch_query_calls(6, Duration::from_secs(60)) - .await - .unwrap(); - - rpc_service_state.set_is_synced(true); - - let _ = rpc_service_state - .wait_pop_transaction_batch_query_calls(2, Duration::from_secs(60)) - .await - .unwrap(); - - // Check that the protocol ends with success - let result = join_handle.await.unwrap(); - assert!(result.is_ok()); - - // Check transaction status is updated - let db_completed_txs = resources.db.get_completed_transactions().await.unwrap(); - - for tx in db_completed_txs.values() { - assert!(tx.valid, "TxId: {} should be valid", tx.tx_id); - } -} - -/// Validate completed transaction, the transaction should become invalid -#[tokio::test] -#[allow(clippy::identity_op)] -async fn tx_validation_protocol_tx_becomes_invalid() { - let ( - resources, - _connectivity_mock_state, - _outbound_mock_state, - _mock_rpc_server, - server_node_identity, - rpc_service_state, - _timeout_update_publisher, - _shutdown, - _temp_dir, - _transaction_event_receiver, - ) = setup(TxProtocolTestConfig::WithConnection).await; - let (base_node_update_publisher, _) = broadcast::channel(20); - let (_timeout_update_publisher, _) = broadcast::channel(20); - - add_transaction_to_database( - 1, - 1 * T, - true, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; - - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::NotStored, - block_hash: None, - confirmations: resources.config.num_confirmations_required, - is_synced: true, - height_of_longest_chain: 0, - }); - - let protocol = TransactionValidationProtocol::new( - 1, - resources.clone(), - server_node_identity.public_key().clone(), - Duration::from_secs(1), - base_node_update_publisher.subscribe(), - _timeout_update_publisher.subscribe(), - ValidationRetryStrategy::UntilSuccess, - ); - - let join_handle = task::spawn(protocol.execute()); - - let _ = rpc_service_state - .wait_pop_transaction_batch_query_calls(1, Duration::from_secs(60)) - .await - .unwrap(); - - // Check that the protocol ends with success - let result = join_handle.await.unwrap(); - assert!(result.is_ok()); - - // Check transaction status is updated - let db_completed_txs = resources.db.get_completed_transactions().await.unwrap(); - - for tx in db_completed_txs.values() { - assert!(!tx.valid, "TxId: {} should be invalid", tx.tx_id); - } -} - -/// Validate completed transactions, the transaction should become invalid -#[tokio::test] -#[allow(clippy::identity_op)] -async fn tx_validation_protocol_tx_becomes_unconfirmed() { - let ( - resources, - _connectivity_mock_state, - _outbound_mock_state, - _mock_rpc_server, - server_node_identity, - rpc_service_state, - _timeout_update_publisher, - _shutdown, - _temp_dir, - _transaction_event_receiver, - ) = setup(TxProtocolTestConfig::WithConnection).await; - let (base_node_update_publisher, _) = broadcast::channel(20); - let (_timeout_update_publisher, _) = broadcast::channel(20); - - add_transaction_to_database( - 1, - 1 * T, - true, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; - - // Set Base Node to respond with AlreadyMined - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: 1, - is_synced: true, - height_of_longest_chain: 0, - }); - - let protocol = TransactionValidationProtocol::new( - 1, - resources.clone(), - server_node_identity.public_key().clone(), - Duration::from_secs(1), - base_node_update_publisher.subscribe(), - _timeout_update_publisher.subscribe(), - ValidationRetryStrategy::UntilSuccess, - ); - - let join_handle = task::spawn(protocol.execute()); - - let _ = rpc_service_state - .wait_pop_transaction_batch_query_calls(1, Duration::from_secs(60)) - .await - .unwrap(); - - // Check that the protocol ends with success - let result = join_handle.await.unwrap(); - assert!(result.is_ok()); - - // Check transaction status is updated - let db_completed_txs = resources.db.get_completed_transactions().await.unwrap(); - - for tx in db_completed_txs.values() { - assert_eq!( - tx.status, - TransactionStatus::MinedUnconfirmed, - "TxId: {} should be unconfirmed", - tx.tx_id - ); - } -} - -/// Test the validation protocol reacts correctly to a change in base node and redoes the full validation based on the -/// new base node -#[tokio::test] -#[allow(clippy::identity_op)] -async fn tx_validation_protocol_tx_ends_on_base_node_end() { - let ( - resources, - _connectivity_mock_state, - _outbound_mock_state, - _mock_rpc_server, - server_node_identity, - mut rpc_service_state, - _timeout_update_publisher, - _shutdown, - _temp_dir, - _transaction_event_receiver, - ) = setup(TxProtocolTestConfig::WithConnection).await; - let (base_node_update_publisher, _) = broadcast::channel(20); - let (_timeout_update_publisher, _) = broadcast::channel(20); - let mut event_stream = resources.event_publisher.subscribe(); - - add_transaction_to_database( - 1, - 1 * T, - true, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; - - add_transaction_to_database( - 2, - 2 * T, - false, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; - - add_transaction_to_database( - 3, - 3 * T, - true, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; - - add_transaction_to_database( - 4, - 4 * T, - true, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; - - add_transaction_to_database( - 5, - 5 * T, - false, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; - - add_transaction_to_database( - 6, - 6 * T, - true, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; - - // Set Base Node to respond with AlreadyMined - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: 1, - is_synced: true, - height_of_longest_chain: 0, - }); - - rpc_service_state.set_response_delay(Some(Duration::from_secs(5))); - - let protocol = TransactionValidationProtocol::new( - 1, - resources.clone(), - server_node_identity.public_key().clone(), - Duration::from_secs(10), - base_node_update_publisher.subscribe(), - _timeout_update_publisher.subscribe(), - ValidationRetryStrategy::UntilSuccess, - ); - - let join_handle = task::spawn(protocol.execute()); - - let _ = rpc_service_state - .wait_pop_transaction_batch_query_calls(1, Duration::from_secs(60)) - .await - .unwrap(); - - // Setup new RPC Server - let new_server_node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); - - // Change Base Node - base_node_update_publisher - .send(new_server_node_identity.public_key().clone()) - .unwrap(); - - // Check that the protocol ends with success - let result = join_handle.await.unwrap(); - assert!(result.is_ok()); - - let delay = sleep(Duration::from_secs(1)); - tokio::pin!(delay); - let mut aborted = false; - loop { - tokio::select! { - event = event_stream.recv() => { - if let TransactionEvent::TransactionValidationAborted(_) = &*event.unwrap() { - aborted = true; - } - }, - () = &mut delay => { - break; - }, - } - } - assert!(aborted, "Validation protocol should have aborted"); -} - -/// Test the validation protocol reacts correctly when the RPC client returns an error between calls. -#[tokio::test] -#[allow(clippy::identity_op)] -async fn tx_validation_protocol_rpc_client_broken_between_calls() { - let ( - resources, - _connectivity_mock_state, - _outbound_mock_state, - _mock_rpc_server, - server_node_identity, - mut rpc_service_state, - _timeout_update_publisher, _shutdown, _temp_dir, _transaction_event_receiver, + wallet_connectivity, ) = setup(TxProtocolTestConfig::WithConnection).await; - let (base_node_update_publisher, _) = broadcast::channel(20); - let (_timeout_update_publisher, _) = broadcast::channel(20); - + // Now we add the connection + let mut connection = mock_rpc_server + .create_connection(server_node_identity.to_peer(), "t/bnwallet/1".into()) + .await; + wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); add_transaction_to_database( 1, 1 * T, true, - Some(TransactionStatus::MinedConfirmed), + Some(TransactionStatus::Broadcast), + None, resources.db.clone(), ) .await; - add_transaction_to_database( 2, 2 * T, - false, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; - - add_transaction_to_database( - 3, - 3 * T, - true, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; - - add_transaction_to_database( - 4, - 4 * T, true, - Some(TransactionStatus::MinedConfirmed), + Some(TransactionStatus::Completed), + None, resources.db.clone(), ) .await; - add_transaction_to_database( - 5, - 5 * T, - false, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; + let tx2 = resources.db.get_completed_transaction(2).await.unwrap(); - add_transaction_to_database( - 6, - 6 * T, - true, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; + let transaction_query_batch_responses = vec![TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + tx2.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some([1u8; 16].to_vec()), + confirmations: 0, + block_height: 1, + }]; - // Set Base Node to respond with AlreadyMined - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: 1, + let mut batch_query_response = TxQueryBatchResponsesProto { + responses: transaction_query_batch_responses.clone(), is_synced: true, - height_of_longest_chain: 0, - }); + tip_hash: Some([1u8; 16].to_vec()), + height_of_longest_chain: 1, + }; + + rpc_service_state.set_transaction_query_batch_responses(batch_query_response.clone()); - rpc_service_state.set_response_delay(Some(Duration::from_secs(5))); + rpc_service_state.set_is_synced(false); + + wallet_connectivity.notify_base_node_set(server_node_identity.to_peer()); let protocol = TransactionValidationProtocol::new( 1, - resources.clone(), - server_node_identity.public_key().clone(), - Duration::from_secs(10), - base_node_update_publisher.subscribe(), - _timeout_update_publisher.subscribe(), - ValidationRetryStrategy::UntilSuccess, + resources.db.clone(), + wallet_connectivity.clone(), + resources.config.clone(), + resources.event_publisher.clone(), + resources.output_manager_service.clone(), ); let join_handle = task::spawn(protocol.execute()); - let _ = rpc_service_state - .wait_pop_transaction_batch_query_calls(1, Duration::from_secs(60)) - .await - .unwrap(); - - rpc_service_state.set_rpc_status_error(Some(RpcStatus::bad_request("blah".to_string()))); + // Check that the protocol ends with error due to base node not being synced + let result = join_handle.await.unwrap(); + assert!(matches!( + result, + Err(TransactionServiceProtocolError { + id: 1, + error: TransactionServiceError::BaseNodeNotSynced, + }) + )); - let _ = rpc_service_state - .wait_pop_transaction_batch_query_calls(1, Duration::from_secs(60)) - .await - .unwrap(); + rpc_service_state.set_is_synced(true); - rpc_service_state.set_rpc_status_error(None); - rpc_service_state.set_response_delay(None); + let protocol = TransactionValidationProtocol::new( + 2, + resources.db.clone(), + wallet_connectivity.clone(), + resources.config.clone(), + resources.event_publisher.clone(), + resources.output_manager_service.clone(), + ); - // Check that the protocol ends with success + let join_handle = task::spawn(protocol.execute()); let result = join_handle.await.unwrap(); assert!(result.is_ok()); - // Check transaction status is updated - let db_completed_txs = resources.db.get_completed_transactions().await.unwrap(); + let completed_txs = resources.db.get_completed_transactions().await.unwrap(); - for tx in db_completed_txs.values() { - assert!(tx.valid, "TxId: {} should be valid", tx.tx_id); - } -} + assert_eq!(completed_txs.get(&1).unwrap().status, TransactionStatus::Broadcast); + assert_eq!( + completed_txs.get(&2).unwrap().status, + TransactionStatus::MinedUnconfirmed + ); -/// Test the validation protocol reacts correctly when the RPC client returns an error between calls and only retry -/// finite amount of times -#[tokio::test] -#[allow(clippy::identity_op)] -async fn tx_validation_protocol_rpc_client_broken_finite_retries() { - let ( - resources, - _connectivity_mock_state, - _outbound_mock_state, - _mock_rpc_server, - server_node_identity, - mut rpc_service_state, - _timeout_update_publisher, - _shutdown, - _temp_dir, - _transaction_event_receiver, - ) = setup(TxProtocolTestConfig::WithConnection).await; - let (base_node_update_publisher, _) = broadcast::channel(20); - let (_timeout_update_publisher, _) = broadcast::channel(20); - let mut event_stream = resources.event_publisher.subscribe(); - add_transaction_to_database( - 1, - 1 * T, - true, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; + // set Tx2 back to unmined + batch_query_response.responses = vec![]; + rpc_service_state.set_transaction_query_batch_responses(batch_query_response.clone()); - add_transaction_to_database( - 2, - 2 * T, - false, - Some(TransactionStatus::MinedConfirmed), + let protocol = TransactionValidationProtocol::new( + 3, resources.db.clone(), - ) - .await; + wallet_connectivity.clone(), + resources.config.clone(), + resources.event_publisher.clone(), + resources.output_manager_service.clone(), + ); - // Set Base Node to respond with AlreadyMined - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: 1, + let join_handle = task::spawn(protocol.execute()); + let result = join_handle.await.unwrap(); + assert!(result.is_ok()); + + let completed_txs = resources.db.get_completed_transactions().await.unwrap(); + + assert_eq!(completed_txs.get(&2).unwrap().status, TransactionStatus::Completed); + + // Now the tx will be fully mined + let transaction_query_batch_responses = vec![TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + tx2.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some([5u8; 16].to_vec()), + confirmations: 4, + block_height: 5, + }]; + + let batch_query_response = TxQueryBatchResponsesProto { + responses: transaction_query_batch_responses.clone(), is_synced: true, - height_of_longest_chain: 0, - }); - rpc_service_state.set_rpc_status_error(Some(RpcStatus::bad_request("blah".to_string()))); + tip_hash: Some([5u8; 16].to_vec()), + height_of_longest_chain: 5, + }; - rpc_service_state.set_response_delay(Some(Duration::from_secs(1))); + rpc_service_state.set_transaction_query_batch_responses(batch_query_response.clone()); let protocol = TransactionValidationProtocol::new( - 1, - resources.clone(), - server_node_identity.public_key().clone(), - Duration::from_secs(5), - base_node_update_publisher.subscribe(), - _timeout_update_publisher.subscribe(), - ValidationRetryStrategy::Limited(2), + 4, + resources.db.clone(), + wallet_connectivity.clone(), + resources.config.clone(), + resources.event_publisher.clone(), + resources.output_manager_service.clone(), ); let join_handle = task::spawn(protocol.execute()); - - let _ = rpc_service_state - .wait_pop_transaction_batch_query_calls(3, Duration::from_secs(60)) - .await - .unwrap(); - - // Check that the protocol ends with success let result = join_handle.await.unwrap(); - assert!(result.is_err()); + assert!(result.is_ok()); - // Check that the connection problem event was emitted at least twice - let delay = sleep(Duration::from_secs(10)); - tokio::pin!(delay); - let mut timeouts = 0i32; - let mut failures = 0i32; - loop { - tokio::select! { - event = event_stream.recv() => { - log::error!("EVENT: {:?}", event); - match &*event.unwrap() { - TransactionEvent::TransactionValidationTimedOut(_) => { - timeouts +=1 ; - } - TransactionEvent::TransactionValidationFailure(_) => { - failures +=1 ; - } - _ => (), - } - if failures + timeouts >= 4 { - break; - } - }, - () = &mut delay => { - break; - }, - } - } - assert!(timeouts >= 3, "Should have timed out twice"); - assert!(failures >= 1, "Should have failed"); + let completed_txs = resources.db.get_completed_transactions().await.unwrap(); + + assert_eq!(completed_txs.get(&2).unwrap().status, TransactionStatus::MinedConfirmed); + assert_eq!(completed_txs.get(&2).unwrap().confirmations.unwrap(), 4); } -/// Validate completed transactions, will check that valid ones stay valid and incorrectly marked invalid tx become -/// valid. +/// Test that validation detects transactions becoming mined unconfirmed and then confirmed with some going back to +/// completed #[tokio::test] #[allow(clippy::identity_op)] -async fn tx_validation_protocol_base_node_not_synced() { +async fn tx_validation_protocol_reorg() { let ( resources, - _connectivity_mock_state, _outbound_mock_state, - _mock_rpc_server, + mock_rpc_server, server_node_identity, rpc_service_state, - _timeout_update_publisher, _shutdown, _temp_dir, _transaction_event_receiver, + wallet_connectivity, ) = setup(TxProtocolTestConfig::WithConnection).await; - let (base_node_update_publisher, _) = broadcast::channel(20); - let (_timeout_update_publisher, _) = broadcast::channel(20); - let mut event_stream = resources.event_publisher.subscribe(); + // Now we add the connection + let mut connection = mock_rpc_server + .create_connection(server_node_identity.to_peer(), "t/bnwallet/1".into()) + .await; + wallet_connectivity.set_base_node_wallet_rpc_client(connect_rpc_client(&mut connection).await); + + for i in 1..=5 { + add_transaction_to_database( + i, + i * T, + true, + Some(TransactionStatus::Broadcast), + None, + resources.db.clone(), + ) + .await; + } add_transaction_to_database( - 1, - 1 * T, + 6, + 6 * T, true, - Some(TransactionStatus::MinedConfirmed), - resources.db.clone(), - ) - .await; - add_transaction_to_database( - 2, - 2 * T, - false, - Some(TransactionStatus::MinedConfirmed), + Some(TransactionStatus::Coinbase), + Some(8), resources.db.clone(), ) .await; add_transaction_to_database( - 3, - 3 * T, + 7, + 7 * T, true, - Some(TransactionStatus::MinedConfirmed), + Some(TransactionStatus::Coinbase), + Some(9), resources.db.clone(), ) .await; - rpc_service_state.set_transaction_query_response(TxQueryResponse { - location: TxLocation::Mined, - block_hash: None, - confirmations: resources.config.num_confirmations_required, - is_synced: false, - height_of_longest_chain: 0, - }); + let mut block_headers = HashMap::new(); + for i in 0..=10 { + let mut block_header = BlockHeader::new(1); + block_header.height = i; + block_headers.insert(i, block_header.clone()); + } + rpc_service_state.set_blocks(block_headers.clone()); + + let tx1 = resources.db.get_completed_transaction(1).await.unwrap(); + let tx2 = resources.db.get_completed_transaction(2).await.unwrap(); + let tx3 = resources.db.get_completed_transaction(3).await.unwrap(); + let tx4 = resources.db.get_completed_transaction(4).await.unwrap(); + let tx5 = resources.db.get_completed_transaction(5).await.unwrap(); + let coinbase_tx1 = resources.db.get_completed_transaction(6).await.unwrap(); + let coinbase_tx2 = resources.db.get_completed_transaction(7).await.unwrap(); + + let transaction_query_batch_responses = vec![ + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + tx1.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some(block_headers.get(&5).unwrap().hash()), + confirmations: 5, + block_height: 5, + }, + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + tx2.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some(block_headers.get(&6).unwrap().hash()), + confirmations: 4, + block_height: 6, + }, + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + tx3.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some(block_headers.get(&7).unwrap().hash()), + confirmations: 3, + block_height: 7, + }, + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + tx4.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some(block_headers.get(&8).unwrap().hash()), + confirmations: 2, + block_height: 8, + }, + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + coinbase_tx1.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some(block_headers.get(&8).unwrap().hash()), + confirmations: 2, + block_height: 8, + }, + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + tx5.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some(block_headers.get(&9).unwrap().hash()), + confirmations: 1, + block_height: 9, + }, + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + coinbase_tx2.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some(block_headers.get(&9).unwrap().hash()), + confirmations: 1, + block_height: 9, + }, + ]; - rpc_service_state.set_is_synced(false); + let batch_query_response = TxQueryBatchResponsesProto { + responses: transaction_query_batch_responses.clone(), + is_synced: true, + tip_hash: Some(block_headers.get(&10).unwrap().hash()), + height_of_longest_chain: 10, + }; + + rpc_service_state.set_transaction_query_batch_responses(batch_query_response.clone()); let protocol = TransactionValidationProtocol::new( 1, - resources.clone(), - server_node_identity.public_key().clone(), - Duration::from_secs(1), - base_node_update_publisher.subscribe(), - _timeout_update_publisher.subscribe(), - ValidationRetryStrategy::Limited(0), + resources.db.clone(), + wallet_connectivity.clone(), + resources.config.clone(), + resources.event_publisher.clone(), + resources.output_manager_service.clone(), ); let join_handle = task::spawn(protocol.execute()); - - // Check that the protocol ends with success let result = join_handle.await.unwrap(); - assert!(result.is_err()); - - let delay = sleep(Duration::from_secs(10)); - tokio::pin!(delay); - let mut delayed = 0i32; - let mut failures = 0i32; - loop { - tokio::select! { - event = event_stream.recv() => { - match &*event.unwrap() { - TransactionEvent::TransactionValidationDelayed(_) => { - delayed +=1 ; - } - TransactionEvent::TransactionValidationFailure(_) => { - failures +=1 ; - } - _ => (), - } + assert!(result.is_ok()); - }, - () = &mut delay => { - break; - }, + let completed_txs = resources.db.get_completed_transactions().await.unwrap(); + let mut unconfirmed_count = 0; + let mut confirmed_count = 0; + for (_k, tx) in completed_txs.iter() { + if tx.status == TransactionStatus::MinedUnconfirmed { + unconfirmed_count += 1; + } + if tx.status == TransactionStatus::MinedConfirmed { + confirmed_count += 1; } } - assert!(delayed >= 1, "Should have been delayed"); - assert_eq!(failures, 0, "Should not have failed when BN is not synced"); + assert_eq!(confirmed_count, 3); + assert_eq!(unconfirmed_count, 4); + + // Now we will reorg to new blocks 8 and 9, tx 4 will disappear and tx5 will appear in block 9, coinbase_tx2 should + // become invalid and coinbase_tx1 should return to coinbase status + + let _ = block_headers.remove(&9); + let _ = block_headers.remove(&10); + let mut block_header = BlockHeader::new(2); + block_header.height = 8; + block_headers.insert(8, block_header.clone()); + + rpc_service_state.set_blocks(block_headers.clone()); + + let transaction_query_batch_responses = vec![ + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + tx1.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some(block_headers.get(&5).unwrap().hash()), + confirmations: 4, + block_height: 5, + }, + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + tx2.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some(block_headers.get(&6).unwrap().hash()), + confirmations: 3, + block_height: 6, + }, + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + tx3.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some(block_headers.get(&7).unwrap().hash()), + confirmations: 2, + block_height: 7, + }, + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + coinbase_tx1.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::NotStored) as i32, + block_hash: None, + confirmations: 0, + block_height: 0, + }, + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + tx5.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::Mined) as i32, + block_hash: Some(block_headers.get(&8).unwrap().hash()), + confirmations: 1, + block_height: 8, + }, + TxQueryBatchResponseProto { + signature: Some(SignatureProto::from( + coinbase_tx2.transaction.first_kernel_excess_sig().unwrap().clone(), + )), + location: TxLocationProto::from(TxLocation::NotStored) as i32, + block_hash: None, + confirmations: 0, + block_height: 0, + }, + ]; + + let batch_query_response = TxQueryBatchResponsesProto { + responses: transaction_query_batch_responses.clone(), + is_synced: true, + tip_hash: Some(block_headers.get(&8).unwrap().hash()), + height_of_longest_chain: 8, + }; + + rpc_service_state.set_transaction_query_batch_responses(batch_query_response.clone()); + let _ = rpc_service_state.take_get_header_by_height_calls(); + + let protocol = TransactionValidationProtocol::new( + 2, + resources.db.clone(), + wallet_connectivity.clone(), + resources.config.clone(), + resources.event_publisher.clone(), + resources.output_manager_service.clone(), + ); + + let join_handle = task::spawn(protocol.execute()); + let result = join_handle.await.unwrap(); + assert!(result.is_ok()); + + let _calls = rpc_service_state + .wait_pop_get_header_by_height_calls(5, Duration::from_secs(30)) + .await + .unwrap(); + + assert_eq!(rpc_service_state.take_get_header_by_height_calls().len(), 0); + + let completed_txs = resources.db.get_completed_transactions().await.unwrap(); + assert_eq!(completed_txs.get(&4).unwrap().status, TransactionStatus::Completed); + assert_eq!( + completed_txs.get(&5).unwrap().status, + TransactionStatus::MinedUnconfirmed + ); + assert_eq!(completed_txs.get(&5).cloned().unwrap().mined_height.unwrap(), 8); + assert_eq!(completed_txs.get(&5).cloned().unwrap().confirmations.unwrap(), 1); + + assert!(!completed_txs.get(&6).unwrap().valid); + assert_eq!(completed_txs.get(&7).unwrap().status, TransactionStatus::Coinbase); } diff --git a/base_layer/wallet_ffi/Cargo.toml b/base_layer/wallet_ffi/Cargo.toml index 735ddfbabd..b5e7a0490b 100644 --- a/base_layer/wallet_ffi/Cargo.toml +++ b/base_layer/wallet_ffi/Cargo.toml @@ -3,7 +3,7 @@ name = "tari_wallet_ffi" authors = ["The Tari Development Community"] description = "Tari cryptocurrency wallet C FFI bindings" license = "BSD-3-Clause" -version = "0.18.8" +version = "0.19.0" edition = "2018" [dependencies] diff --git a/base_layer/wallet_ffi/src/callback_handler.rs b/base_layer/wallet_ffi/src/callback_handler.rs index af90c0bfaf..42067d555a 100644 --- a/base_layer/wallet_ffi/src/callback_handler.rs +++ b/base_layer/wallet_ffi/src/callback_handler.rs @@ -56,7 +56,6 @@ use tari_wallet::{ output_manager_service::{ handle::{OutputManagerEvent, OutputManagerEventReceiver}, TxId, - TxoValidationType, }, transaction_service::{ handle::{TransactionEvent, TransactionEventReceiver}, @@ -89,9 +88,7 @@ where TBackend: TransactionBackend + 'static callback_direct_send_result: unsafe extern "C" fn(TxId, bool), callback_store_and_forward_send_result: unsafe extern "C" fn(TxId, bool), callback_transaction_cancellation: unsafe extern "C" fn(*mut CompletedTransaction), - callback_utxo_validation_complete: unsafe extern "C" fn(u64, u8), - callback_stxo_validation_complete: unsafe extern "C" fn(u64, u8), - callback_invalid_txo_validation_complete: unsafe extern "C" fn(u64, u8), + callback_txo_validation_complete: unsafe extern "C" fn(u64, u8), callback_transaction_validation_complete: unsafe extern "C" fn(u64, u8), callback_saf_messages_received: unsafe extern "C" fn(), db: TransactionDatabase, @@ -122,9 +119,7 @@ where TBackend: TransactionBackend + 'static callback_direct_send_result: unsafe extern "C" fn(TxId, bool), callback_store_and_forward_send_result: unsafe extern "C" fn(TxId, bool), callback_transaction_cancellation: unsafe extern "C" fn(*mut CompletedTransaction), - callback_utxo_validation_complete: unsafe extern "C" fn(TxId, u8), - callback_stxo_validation_complete: unsafe extern "C" fn(TxId, u8), - callback_invalid_txo_validation_complete: unsafe extern "C" fn(TxId, u8), + callback_txo_validation_complete: unsafe extern "C" fn(TxId, u8), callback_transaction_validation_complete: unsafe extern "C" fn(TxId, u8), callback_saf_messages_received: unsafe extern "C" fn(), ) -> Self { @@ -166,15 +161,7 @@ where TBackend: TransactionBackend + 'static ); info!( target: LOG_TARGET, - "UtxoValidationCompleteCallback -> Assigning Fn: {:?}", callback_utxo_validation_complete - ); - info!( - target: LOG_TARGET, - "StxoValidationCompleteCallback -> Assigning Fn: {:?}", callback_stxo_validation_complete - ); - info!( - target: LOG_TARGET, - "InvalidTxoValidationCompleteCallback -> Assigning Fn: {:?}", callback_invalid_txo_validation_complete + "TxoValidationCompleteCallback -> Assigning Fn: {:?}", callback_txo_validation_complete ); info!( target: LOG_TARGET, @@ -195,9 +182,7 @@ where TBackend: TransactionBackend + 'static callback_direct_send_result, callback_store_and_forward_send_result, callback_transaction_cancellation, - callback_utxo_validation_complete, - callback_stxo_validation_complete, - callback_invalid_txo_validation_complete, + callback_txo_validation_complete, callback_transaction_validation_complete, callback_saf_messages_received, db, @@ -245,11 +230,11 @@ where TBackend: TransactionBackend + 'static TransactionEvent::TransactionBroadcast(tx_id) => { self.receive_transaction_broadcast_event(tx_id).await; }, - TransactionEvent::TransactionMined(tx_id) => { + TransactionEvent::TransactionMined{tx_id, is_valid: _} => { self.receive_transaction_mined_event(tx_id).await; }, - TransactionEvent::TransactionMinedUnconfirmed(tx_id, c) => { - self.receive_transaction_mined_unconfirmed_event(tx_id, c).await; + TransactionEvent::TransactionMinedUnconfirmed{tx_id, num_confirmations, is_valid: _} => { + self.receive_transaction_mined_unconfirmed_event(tx_id, num_confirmations).await; }, TransactionEvent::TransactionValidationSuccess(tx_id) => { self.transaction_validation_complete_event(tx_id, CallbackValidationResults::Success); @@ -275,17 +260,17 @@ where TBackend: TransactionBackend + 'static Ok(msg) => { trace!(target: LOG_TARGET, "Output Manager Service Callback Handler event {:?}", msg); match (*msg).clone() { - OutputManagerEvent::TxoValidationSuccess(request_key, validation_type) => { - self.output_validation_complete_event(request_key, validation_type, CallbackValidationResults::Success); + OutputManagerEvent::TxoValidationSuccess(request_key) => { + self.output_validation_complete_event(request_key, CallbackValidationResults::Success); }, - OutputManagerEvent::TxoValidationFailure(request_key, validation_type) => { - self.output_validation_complete_event(request_key, validation_type, CallbackValidationResults::Failure); + OutputManagerEvent::TxoValidationFailure(request_key) => { + self.output_validation_complete_event(request_key, CallbackValidationResults::Failure); }, - OutputManagerEvent::TxoValidationAborted(request_key, validation_type) => { - self.output_validation_complete_event(request_key, validation_type, CallbackValidationResults::Aborted); + OutputManagerEvent::TxoValidationAborted(request_key) => { + self.output_validation_complete_event(request_key, CallbackValidationResults::Aborted); }, - OutputManagerEvent::TxoValidationDelayed(request_key, validation_type) => { - self.output_validation_complete_event(request_key, validation_type, CallbackValidationResults::BaseNodeNotInSync); + OutputManagerEvent::TxoValidationDelayed(request_key) => { + self.output_validation_complete_event(request_key, CallbackValidationResults::BaseNodeNotInSync); }, // Only the above variants are mapped to callbacks _ => (), @@ -490,80 +475,29 @@ where TBackend: TransactionBackend + 'static } } - fn output_validation_complete_event( - &mut self, - request_key: u64, - validation_type: TxoValidationType, - result: CallbackValidationResults, - ) { + fn output_validation_complete_event(&mut self, request_key: u64, result: CallbackValidationResults) { debug!( target: LOG_TARGET, - "Calling Output Validation Complete callback function for Request Key: {} with with type {} result {:?}", + "Calling Output Validation Complete callback function for Request Key: {} with result {:?}", request_key, - validation_type, result as u8, ); - match validation_type { - TxoValidationType::Unspent => match result { - CallbackValidationResults::Success => unsafe { - (self.callback_utxo_validation_complete)(request_key, CallbackValidationResults::Success as u8); - }, - CallbackValidationResults::Aborted => unsafe { - (self.callback_utxo_validation_complete)(request_key, CallbackValidationResults::Aborted as u8); - }, - CallbackValidationResults::Failure => unsafe { - (self.callback_utxo_validation_complete)(request_key, CallbackValidationResults::Failure as u8); - }, - CallbackValidationResults::BaseNodeNotInSync => unsafe { - (self.callback_utxo_validation_complete)( - request_key, - CallbackValidationResults::BaseNodeNotInSync as u8, - ); - }, + match result { + CallbackValidationResults::Success => unsafe { + (self.callback_txo_validation_complete)(request_key, CallbackValidationResults::Success as u8); }, - TxoValidationType::Spent => match result { - CallbackValidationResults::Success => unsafe { - (self.callback_stxo_validation_complete)(request_key, CallbackValidationResults::Success as u8); - }, - CallbackValidationResults::Aborted => unsafe { - (self.callback_stxo_validation_complete)(request_key, CallbackValidationResults::Aborted as u8); - }, - CallbackValidationResults::Failure => unsafe { - (self.callback_stxo_validation_complete)(request_key, CallbackValidationResults::Failure as u8); - }, - CallbackValidationResults::BaseNodeNotInSync => unsafe { - (self.callback_stxo_validation_complete)( - request_key, - CallbackValidationResults::BaseNodeNotInSync as u8, - ); - }, + CallbackValidationResults::Aborted => unsafe { + (self.callback_txo_validation_complete)(request_key, CallbackValidationResults::Aborted as u8); }, - TxoValidationType::Invalid => match result { - CallbackValidationResults::Success => unsafe { - (self.callback_invalid_txo_validation_complete)( - request_key, - CallbackValidationResults::Success as u8, - ); - }, - CallbackValidationResults::Aborted => unsafe { - (self.callback_invalid_txo_validation_complete)( - request_key, - CallbackValidationResults::Aborted as u8, - ); - }, - CallbackValidationResults::Failure => unsafe { - (self.callback_invalid_txo_validation_complete)( - request_key, - CallbackValidationResults::Failure as u8, - ); - }, - CallbackValidationResults::BaseNodeNotInSync => unsafe { - (self.callback_invalid_txo_validation_complete)( - request_key, - CallbackValidationResults::BaseNodeNotInSync as u8, - ); - }, + CallbackValidationResults::Failure => unsafe { + (self.callback_txo_validation_complete)(request_key, CallbackValidationResults::Failure as u8); + }, + CallbackValidationResults::BaseNodeNotInSync => unsafe { + (self.callback_txo_validation_complete)( + request_key, + CallbackValidationResults::BaseNodeNotInSync as u8, + ); }, } } @@ -597,7 +531,7 @@ mod test { use tari_crypto::keys::{PublicKey as PublicKeyTrait, SecretKey}; use tari_shutdown::Shutdown; use tari_wallet::{ - output_manager_service::{handle::OutputManagerEvent, TxoValidationType}, + output_manager_service::handle::OutputManagerEvent, test_utils::make_wallet_database_connection, transaction_service::{ handle::TransactionEvent, @@ -628,9 +562,7 @@ mod test { pub tx_cancellation_callback_called_completed: bool, pub tx_cancellation_callback_called_inbound: bool, pub tx_cancellation_callback_called_outbound: bool, - pub callback_utxo_validation_complete: u32, - pub callback_stxo_validation_complete: u32, - pub callback_invalid_txo_validation_complete: u32, + pub callback_txo_validation_complete: u32, pub callback_transaction_validation_complete: u32, pub saf_messages_received: bool, } @@ -646,9 +578,7 @@ mod test { mined_tx_unconfirmed_callback_called: 0, direct_send_callback_called: false, store_and_forward_send_callback_called: false, - callback_utxo_validation_complete: 0, - callback_stxo_validation_complete: 0, - callback_invalid_txo_validation_complete: 0, + callback_txo_validation_complete: 0, callback_transaction_validation_complete: 0, tx_cancellation_callback_called_completed: false, tx_cancellation_callback_called_inbound: false, @@ -735,26 +665,10 @@ mod test { Box::from_raw(tx); } - unsafe extern "C" fn utxo_validation_complete_callback(_tx_id: u64, result: u8) { - let mut lock = CALLBACK_STATE.lock().unwrap(); - - lock.callback_utxo_validation_complete += result as u32; - - drop(lock); - } - - unsafe extern "C" fn stxo_validation_complete_callback(_tx_id: u64, result: u8) { - let mut lock = CALLBACK_STATE.lock().unwrap(); - - lock.callback_stxo_validation_complete += result as u32; - - drop(lock); - } - - unsafe extern "C" fn invalid_txo_validation_complete_callback(_tx_id: u64, result: u8) { + unsafe extern "C" fn txo_validation_complete_callback(_tx_id: u64, result: u8) { let mut lock = CALLBACK_STATE.lock().unwrap(); - lock.callback_invalid_txo_validation_complete += result as u32; + lock.callback_txo_validation_complete += result as u32; drop(lock); } @@ -863,9 +777,7 @@ mod test { direct_send_callback, store_and_forward_send_callback, tx_cancellation_callback, - utxo_validation_complete_callback, - stxo_validation_complete_callback, - invalid_txo_validation_complete_callback, + txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, ); @@ -885,11 +797,18 @@ mod test { .send(Arc::new(TransactionEvent::TransactionBroadcast(2u64))) .unwrap(); tx_sender - .send(Arc::new(TransactionEvent::TransactionMined(2u64))) + .send(Arc::new(TransactionEvent::TransactionMined { + tx_id: 2u64, + is_valid: true, + })) .unwrap(); tx_sender - .send(Arc::new(TransactionEvent::TransactionMinedUnconfirmed(2u64, 22u64))) + .send(Arc::new(TransactionEvent::TransactionMinedUnconfirmed { + tx_id: 2u64, + num_confirmations: 22u64, + is_valid: true, + })) .unwrap(); tx_sender @@ -911,24 +830,15 @@ mod test { .unwrap(); oms_sender - .send(Arc::new(OutputManagerEvent::TxoValidationSuccess( - 1u64, - TxoValidationType::Unspent, - ))) + .send(Arc::new(OutputManagerEvent::TxoValidationSuccess(1u64))) .unwrap(); oms_sender - .send(Arc::new(OutputManagerEvent::TxoValidationSuccess( - 1u64, - TxoValidationType::Spent, - ))) + .send(Arc::new(OutputManagerEvent::TxoValidationSuccess(1u64))) .unwrap(); oms_sender - .send(Arc::new(OutputManagerEvent::TxoValidationSuccess( - 1u64, - TxoValidationType::Invalid, - ))) + .send(Arc::new(OutputManagerEvent::TxoValidationSuccess(1u64))) .unwrap(); tx_sender @@ -936,24 +846,15 @@ mod test { .unwrap(); oms_sender - .send(Arc::new(OutputManagerEvent::TxoValidationFailure( - 1u64, - TxoValidationType::Unspent, - ))) + .send(Arc::new(OutputManagerEvent::TxoValidationFailure(1u64))) .unwrap(); oms_sender - .send(Arc::new(OutputManagerEvent::TxoValidationFailure( - 1u64, - TxoValidationType::Spent, - ))) + .send(Arc::new(OutputManagerEvent::TxoValidationFailure(1u64))) .unwrap(); oms_sender - .send(Arc::new(OutputManagerEvent::TxoValidationFailure( - 1u64, - TxoValidationType::Invalid, - ))) + .send(Arc::new(OutputManagerEvent::TxoValidationFailure(1u64))) .unwrap(); tx_sender @@ -961,24 +862,15 @@ mod test { .unwrap(); oms_sender - .send(Arc::new(OutputManagerEvent::TxoValidationAborted( - 1u64, - TxoValidationType::Unspent, - ))) + .send(Arc::new(OutputManagerEvent::TxoValidationAborted(1u64))) .unwrap(); oms_sender - .send(Arc::new(OutputManagerEvent::TxoValidationAborted( - 1u64, - TxoValidationType::Spent, - ))) + .send(Arc::new(OutputManagerEvent::TxoValidationAborted(1u64))) .unwrap(); oms_sender - .send(Arc::new(OutputManagerEvent::TxoValidationAborted( - 1u64, - TxoValidationType::Invalid, - ))) + .send(Arc::new(OutputManagerEvent::TxoValidationAborted(1u64))) .unwrap(); tx_sender @@ -986,24 +878,15 @@ mod test { .unwrap(); oms_sender - .send(Arc::new(OutputManagerEvent::TxoValidationDelayed( - 1u64, - TxoValidationType::Unspent, - ))) + .send(Arc::new(OutputManagerEvent::TxoValidationDelayed(1u64))) .unwrap(); oms_sender - .send(Arc::new(OutputManagerEvent::TxoValidationDelayed( - 1u64, - TxoValidationType::Spent, - ))) + .send(Arc::new(OutputManagerEvent::TxoValidationDelayed(1u64))) .unwrap(); oms_sender - .send(Arc::new(OutputManagerEvent::TxoValidationDelayed( - 1u64, - TxoValidationType::Invalid, - ))) + .send(Arc::new(OutputManagerEvent::TxoValidationDelayed(1u64))) .unwrap(); tx_sender @@ -1029,10 +912,7 @@ mod test { assert!(lock.tx_cancellation_callback_called_completed); assert!(lock.tx_cancellation_callback_called_outbound); assert!(lock.saf_messages_received); - - assert_eq!(lock.callback_utxo_validation_complete, 6); - assert_eq!(lock.callback_stxo_validation_complete, 6); - assert_eq!(lock.callback_invalid_txo_validation_complete, 6); + assert_eq!(lock.callback_txo_validation_complete, 18); assert_eq!(lock.callback_transaction_validation_complete, 6); drop(lock); diff --git a/base_layer/wallet_ffi/src/lib.rs b/base_layer/wallet_ffi/src/lib.rs index 178b9a98be..5584d1d672 100644 --- a/base_layer/wallet_ffi/src/lib.rs +++ b/base_layer/wallet_ffi/src/lib.rs @@ -166,7 +166,6 @@ use tari_shutdown::Shutdown; use tari_wallet::{ contacts_service::storage::database::Contact, error::{WalletError, WalletStorageError}, - output_manager_service::TxoValidationType, storage::{ database::WalletDatabase, sqlite_db::WalletSqliteDatabase, @@ -186,7 +185,6 @@ use tari_wallet::{ }, }, }, - types::ValidationRetryStrategy, utxo_scanner_service::utxo_scanning::{UtxoScannerService, RECOVERY_KEY}, Wallet, WalletConfig, @@ -2729,14 +2727,8 @@ unsafe fn init_logging(log_path: *const c_char, num_rolling_log_files: c_uint, s /// `callback_discovery_process_complete` - The callback function pointer matching the function signature. This will be /// called when a `send_transacion(..)` call is made to a peer whose address is not known and a discovery process must /// be conducted. The outcome of the discovery process is relayed via this callback -/// `callback_utxo_validation_complete` - The callback function pointer matching the function signature. This is called -/// when a UTXO validation process is completed. The request_key is used to identify which request this -/// callback references and the second parameter is a u8 that represent the ClassbackValidationResults enum. -/// `callback_stxo_validation_complete` - The callback function pointer matching the function signature. This is called -/// when a STXO validation process is completed. The request_key is used to identify which request this -/// callback references and the second parameter is a u8 that represent the ClassbackValidationResults enum. -/// `callback_invalid_txo_validation_complete` - The callback function pointer matching the function signature. This is -/// called when a invalid TXO validation process is completed. The request_key is used to identify which request this +/// `callback_txo_validation_complete` - The callback function pointer matching the function signature. This is called +/// when a TXO validation process is completed. The request_key is used to identify which request this /// callback references and the second parameter is a u8 that represent the ClassbackValidationResults enum. /// `callback_transaction_validation_complete` - The callback function pointer matching the function signature. This is /// called when a Transaction validation process is completed. The request_key is used to identify which request this @@ -2772,9 +2764,7 @@ pub unsafe extern "C" fn wallet_create( callback_direct_send_result: unsafe extern "C" fn(c_ulonglong, bool), callback_store_and_forward_send_result: unsafe extern "C" fn(c_ulonglong, bool), callback_transaction_cancellation: unsafe extern "C" fn(*mut TariCompletedTransaction), - callback_utxo_validation_complete: unsafe extern "C" fn(u64, u8), - callback_stxo_validation_complete: unsafe extern "C" fn(u64, u8), - callback_invalid_txo_validation_complete: unsafe extern "C" fn(u64, u8), + callback_txo_validation_complete: unsafe extern "C" fn(u64, u8), callback_transaction_validation_complete: unsafe extern "C" fn(u64, u8), callback_saf_messages_received: unsafe extern "C" fn(), recovery_in_progress: *mut bool, @@ -2921,9 +2911,7 @@ pub unsafe extern "C" fn wallet_create( callback_direct_send_result, callback_store_and_forward_send_result, callback_transaction_cancellation, - callback_utxo_validation_complete, - callback_stxo_validation_complete, - callback_invalid_txo_validation_complete, + callback_txo_validation_complete, callback_transaction_validation_complete, callback_saf_messages_received, ); @@ -4302,107 +4290,8 @@ pub unsafe extern "C" fn wallet_cancel_pending_transaction( } } -/// This function will tell the wallet to query the set base node to confirm the status of unspent transaction outputs -/// (UTXOs). -/// -/// ## Arguments -/// `wallet` - The TariWallet pointer -/// `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions -/// as an out parameter. -/// -/// ## Returns -/// `c_ulonglong` - Returns a unique Request Key that is used to identify which callbacks refer to this specific sync -/// request. Note the result will be 0 if there was an error -/// -/// # Safety -/// None -#[no_mangle] -pub unsafe extern "C" fn wallet_start_utxo_validation(wallet: *mut TariWallet, error_out: *mut c_int) -> c_ulonglong { - let mut error = 0; - ptr::swap(error_out, &mut error as *mut c_int); - if wallet.is_null() { - error = LibWalletError::from(InterfaceError::NullError("wallet".to_string())).code; - ptr::swap(error_out, &mut error as *mut c_int); - return 0; - } - - if let Err(e) = (*wallet).runtime.block_on( - (*wallet) - .wallet - .store_and_forward_requester - .request_saf_messages_from_neighbours(), - ) { - error = LibWalletError::from(e).code; - ptr::swap(error_out, &mut error as *mut c_int); - return 0; - } - - match (*wallet).runtime.block_on( - (*wallet) - .wallet - .output_manager_service - .validate_txos(TxoValidationType::Unspent, ValidationRetryStrategy::Limited(0)), - ) { - Ok(request_key) => request_key, - Err(e) => { - error = LibWalletError::from(WalletError::OutputManagerError(e)).code; - ptr::swap(error_out, &mut error as *mut c_int); - 0 - }, - } -} - -/// This function will tell the wallet to query the set base node to confirm the status of spent transaction outputs -/// (UTXOs). -/// -/// ## Arguments -/// `wallet` - The TariWallet pointer -/// `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions -/// as an out parameter. -/// -/// ## Returns -/// `c_ulonglong` - Returns a unique Request Key that is used to identify which callbacks refer to this specific sync -/// request. Note the result will be 0 if there was an error -/// -/// # Safety -/// None -#[no_mangle] -pub unsafe extern "C" fn wallet_start_stxo_validation(wallet: *mut TariWallet, error_out: *mut c_int) -> c_ulonglong { - let mut error = 0; - ptr::swap(error_out, &mut error as *mut c_int); - if wallet.is_null() { - error = LibWalletError::from(InterfaceError::NullError("wallet".to_string())).code; - ptr::swap(error_out, &mut error as *mut c_int); - return 0; - } - - if let Err(e) = (*wallet).runtime.block_on( - (*wallet) - .wallet - .store_and_forward_requester - .request_saf_messages_from_neighbours(), - ) { - error = LibWalletError::from(e).code; - ptr::swap(error_out, &mut error as *mut c_int); - return 0; - } - - match (*wallet).runtime.block_on( - (*wallet) - .wallet - .output_manager_service - .validate_txos(TxoValidationType::Spent, ValidationRetryStrategy::Limited(0)), - ) { - Ok(request_key) => request_key, - Err(e) => { - error = LibWalletError::from(WalletError::OutputManagerError(e)).code; - ptr::swap(error_out, &mut error as *mut c_int); - 0 - }, - } -} - -/// This function will tell the wallet to query the set base node to confirm the status of invalid transaction outputs. +/// This function will tell the wallet to query the set base node to confirm the status of transaction outputs +/// (TXOs). /// /// ## Arguments /// `wallet` - The TariWallet pointer @@ -4416,10 +4305,7 @@ pub unsafe extern "C" fn wallet_start_stxo_validation(wallet: *mut TariWallet, e /// # Safety /// None #[no_mangle] -pub unsafe extern "C" fn wallet_start_invalid_txo_validation( - wallet: *mut TariWallet, - error_out: *mut c_int, -) -> c_ulonglong { +pub unsafe extern "C" fn wallet_start_txo_validation(wallet: *mut TariWallet, error_out: *mut c_int) -> c_ulonglong { let mut error = 0; ptr::swap(error_out, &mut error as *mut c_int); if wallet.is_null() { @@ -4439,12 +4325,10 @@ pub unsafe extern "C" fn wallet_start_invalid_txo_validation( return 0; } - match (*wallet).runtime.block_on( - (*wallet) - .wallet - .output_manager_service - .validate_txos(TxoValidationType::Invalid, ValidationRetryStrategy::Limited(0)), - ) { + match (*wallet) + .runtime + .block_on((*wallet).wallet.output_manager_service.validate_txos()) + { Ok(request_key) => request_key, Err(e) => { error = LibWalletError::from(WalletError::OutputManagerError(e)).code; @@ -4491,12 +4375,10 @@ pub unsafe extern "C" fn wallet_start_transaction_validation( return 0; } - match (*wallet).runtime.block_on( - (*wallet) - .wallet - .transaction_service - .validate_transactions(ValidationRetryStrategy::Limited(0)), - ) { + match (*wallet) + .runtime + .block_on((*wallet).wallet.transaction_service.validate_transactions()) + { Ok(request_key) => request_key, Err(e) => { error = LibWalletError::from(WalletError::TransactionServiceError(e)).code; @@ -5292,9 +5174,7 @@ mod test { pub direct_send_callback_called: bool, pub store_and_forward_send_callback_called: bool, pub tx_cancellation_callback_called: bool, - pub callback_utxo_validation_complete: bool, - pub callback_stxo_validation_complete: bool, - pub callback_invalid_txo_validation_complete: bool, + pub callback_txo_validation_complete: bool, pub callback_transaction_validation_complete: bool, } @@ -5310,9 +5190,7 @@ mod test { direct_send_callback_called: false, store_and_forward_send_callback_called: false, tx_cancellation_callback_called: false, - callback_utxo_validation_complete: false, - callback_stxo_validation_complete: false, - callback_invalid_txo_validation_complete: false, + callback_txo_validation_complete: false, callback_transaction_validation_complete: false, } } @@ -5432,15 +5310,7 @@ mod test { completed_transaction_destroy(tx); } - unsafe extern "C" fn utxo_validation_complete_callback(_tx_id: c_ulonglong, _result: u8) { - // assert!(true); //optimized out by compiler - } - - unsafe extern "C" fn stxo_validation_complete_callback(_tx_id: c_ulonglong, _result: u8) { - // assert!(true); //optimized out by compiler - } - - unsafe extern "C" fn invalid_txo_validation_complete_callback(_tx_id: c_ulonglong, _result: u8) { + unsafe extern "C" fn txo_validation_complete_callback(_tx_id: c_ulonglong, _result: u8) { // assert!(true); //optimized out by compiler } @@ -5778,9 +5648,7 @@ mod test { direct_send_callback, store_and_forward_send_callback, tx_cancellation_callback, - utxo_validation_complete_callback, - stxo_validation_complete_callback, - invalid_txo_validation_complete_callback, + txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, recovery_in_progress_ptr, @@ -5818,9 +5686,7 @@ mod test { direct_send_callback, store_and_forward_send_callback, tx_cancellation_callback, - utxo_validation_complete_callback, - stxo_validation_complete_callback, - invalid_txo_validation_complete_callback, + txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, recovery_in_progress_ptr, @@ -5924,9 +5790,7 @@ mod test { direct_send_callback, store_and_forward_send_callback, tx_cancellation_callback, - utxo_validation_complete_callback, - stxo_validation_complete_callback, - invalid_txo_validation_complete_callback, + txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, recovery_in_progress_ptr, @@ -5972,9 +5836,7 @@ mod test { direct_send_callback, store_and_forward_send_callback, tx_cancellation_callback, - utxo_validation_complete_callback, - stxo_validation_complete_callback, - invalid_txo_validation_complete_callback, + txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, recovery_in_progress_ptr, @@ -6003,9 +5865,7 @@ mod test { direct_send_callback, store_and_forward_send_callback, tx_cancellation_callback, - utxo_validation_complete_callback, - stxo_validation_complete_callback, - invalid_txo_validation_complete_callback, + txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, recovery_in_progress_ptr, @@ -6029,9 +5889,7 @@ mod test { direct_send_callback, store_and_forward_send_callback, tx_cancellation_callback, - utxo_validation_complete_callback, - stxo_validation_complete_callback, - invalid_txo_validation_complete_callback, + txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, recovery_in_progress_ptr, @@ -6076,9 +5934,7 @@ mod test { direct_send_callback, store_and_forward_send_callback, tx_cancellation_callback, - utxo_validation_complete_callback, - stxo_validation_complete_callback, - invalid_txo_validation_complete_callback, + txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, recovery_in_progress_ptr, @@ -6152,9 +6008,7 @@ mod test { direct_send_callback, store_and_forward_send_callback, tx_cancellation_callback, - utxo_validation_complete_callback, - stxo_validation_complete_callback, - invalid_txo_validation_complete_callback, + txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, recovery_in_progress_ptr, @@ -6303,9 +6157,7 @@ mod test { direct_send_callback, store_and_forward_send_callback, tx_cancellation_callback, - utxo_validation_complete_callback, - stxo_validation_complete_callback, - invalid_txo_validation_complete_callback, + txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, recovery_in_progress_ptr, @@ -6358,9 +6210,7 @@ mod test { direct_send_callback, store_and_forward_send_callback, tx_cancellation_callback, - utxo_validation_complete_callback, - stxo_validation_complete_callback, - invalid_txo_validation_complete_callback, + txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, recovery_in_progress_ptr, diff --git a/base_layer/wallet_ffi/wallet.h b/base_layer/wallet_ffi/wallet.h index 0dfd400482..dc67011396 100644 --- a/base_layer/wallet_ffi/wallet.h +++ b/base_layer/wallet_ffi/wallet.h @@ -421,14 +421,8 @@ void comms_config_destroy(struct TariCommsConfig *wc); /// `callback_discovery_process_complete` - The callback function pointer matching the function signature. This will be /// called when a `send_transacion(..)` call is made to a peer whose address is not known and a discovery process must /// be conducted. The outcome of the discovery process is relayed via this callback -/// `callback_utxo_validation_complete` - The callback function pointer matching the function signature. This is called -/// when a UTXO validation process is completed. The request_key is used to identify which request this -/// callback references and the second parameter is a u8 that represent the CallbackValidationResults enum. -/// `callback_stxo_validation_complete` - The callback function pointer matching the function signature. This is called -/// when a STXO validation process is completed. The request_key is used to identify which request this -/// callback references and the second parameter is a u8 that represent the CallbackValidationResults enum. -/// `callback_invalid_txo_validation_complete` - The callback function pointer matching the function signature. This is -/// called when a invalid TXO validation process is completed. The request_key is used to identify which request this +/// `callback_txo_validation_complete` - The callback function pointer matching the function signature. This is called +/// when a TXO validation process is completed. The request_key is used to identify which request this /// callback references and the second parameter is a u8 that represent the CallbackValidationResults enum. /// `callback_transaction_validation_complete` - The callback function pointer matching the function signature. This is /// called when a Transaction validation process is completed. The request_key is used to identify which request this @@ -469,9 +463,7 @@ struct TariWallet *wallet_create(struct TariCommsConfig *config, void (*callback_direct_send_result)(unsigned long long, bool), void (*callback_store_and_forward_send_result)(unsigned long long, bool), void (*callback_transaction_cancellation)(struct TariCompletedTransaction *), - void (*callback_utxo_validation_complete)(unsigned long long, unsigned char), - void (*callback_stxo_validation_complete)(unsigned long long, unsigned char), - void (*callback_invalid_txo_validation_complete)(unsigned long long, unsigned char), + void (*callback_txo_validation_complete)(unsigned long long, unsigned char), void (*callback_transaction_validation_complete)(unsigned long long, unsigned char), void (*callback_saf_message_received)(), bool *recovery_in_progress, @@ -548,13 +540,7 @@ struct TariCompletedTransaction *wallet_get_cancelled_transaction_by_id(struct T unsigned long long wallet_import_utxo(struct TariWallet *wallet, unsigned long long amount, struct TariPrivateKey *spending_key, struct TariPublicKey *source_public_key, const char *message, int *error_out); // This function will tell the wallet to query the set base node to confirm the status of unspent transaction outputs (UTXOs). -unsigned long long wallet_start_utxo_validation(struct TariWallet *wallet, int *error_out); - -// This function will tell the wallet to query the set base node to confirm the status of spent transaction outputs (STXOs). -unsigned long long wallet_start_stxo_validation(struct TariWallet *wallet, int *error_out); - -// This function will tell the wallet to query the set base node to confirm the status of invalid transaction outputs. -unsigned long long wallet_start_invalid_txo_validation(struct TariWallet *wallet, int *error_out); +unsigned long long wallet_start_txo_validation(struct TariWallet *wallet, int *error_out); //This function will tell the wallet to query the set base node to confirm the status of mined transactions. unsigned long long wallet_start_transaction_validation(struct TariWallet *wallet, int *error_out); diff --git a/common/src/configuration/global.rs b/common/src/configuration/global.rs index 4f97031d75..8920868d73 100644 --- a/common/src/configuration/global.rs +++ b/common/src/configuration/global.rs @@ -109,7 +109,6 @@ pub struct GlobalConfig { pub transaction_event_channel_size: usize, pub base_node_event_channel_size: usize, pub output_manager_event_channel_size: usize, - pub base_node_update_publisher_channel_size: usize, pub console_wallet_password: Option, pub wallet_command_send_wait_stage: String, pub wallet_command_send_wait_timeout: u64, @@ -487,9 +486,6 @@ fn convert_node_config( let key = "wallet.output_manager_event_channel_size"; let output_manager_event_channel_size = optional(cfg.get_int(key))?.unwrap_or(250) as usize; - let key = "wallet.base_node_update_publisher_channel_size"; - let base_node_update_publisher_channel_size = optional(cfg.get_int(key))?.unwrap_or(50) as usize; - let key = "wallet.prevent_fee_gt_amount"; let prevent_fee_gt_amount = cfg .get_bool(key) @@ -761,7 +757,6 @@ fn convert_node_config( transaction_event_channel_size, base_node_event_channel_size, output_manager_event_channel_size, - base_node_update_publisher_channel_size, console_wallet_password, wallet_command_send_wait_stage, wallet_command_send_wait_timeout, diff --git a/comms/rpc_macros/src/generator.rs b/comms/rpc_macros/src/generator.rs index a6f4ac1917..1a5ed3e023 100644 --- a/comms/rpc_macros/src/generator.rs +++ b/comms/rpc_macros/src/generator.rs @@ -194,7 +194,8 @@ impl RpcCodeGenerator { .collect::(); let client_struct_body = quote! { - pub async fn connect(framed: #dep_mod::CanonicalFraming<#dep_mod::Substream>) -> Result { + pub async fn connect(framed: #dep_mod::CanonicalFraming) -> Result + where TSubstream: #dep_mod::AsyncRead + #dep_mod::AsyncWrite + Unpin + Send + #dep_mod::StreamId + 'static { use #dep_mod::NamedProtocolService; let inner = #dep_mod::RpcClient::connect(Default::default(), framed, Self::PROTOCOL_NAME.into()).await?; Ok(Self { inner }) diff --git a/comms/src/peer_manager/node_id.rs b/comms/src/peer_manager/node_id.rs index 830956cdff..651f3d1b46 100644 --- a/comms/src/peer_manager/node_id.rs +++ b/comms/src/peer_manager/node_id.rs @@ -359,6 +359,12 @@ impl TryFrom<&[u8]> for NodeId { } } +impl From for NodeId { + fn from(pk: CommsPublicKey) -> Self { + NodeId::from_public_key(&pk) + } +} + impl Hash for NodeId { /// Require the implementation of the Hash trait for Hashmaps fn hash(&self, state: &mut H) { diff --git a/comms/src/protocol/rpc/client.rs b/comms/src/protocol/rpc/client.rs index 96883cd3af..2f19d4646b 100644 --- a/comms/src/protocol/rpc/client.rs +++ b/comms/src/protocol/rpc/client.rs @@ -79,6 +79,11 @@ pub struct RpcClient { } impl RpcClient { + pub fn builder() -> RpcClientBuilder + where T: NamedProtocolService { + RpcClientBuilder::new().with_protocol_id(T::PROTOCOL_NAME.into()) + } + /// Create a new RpcClient using the given framed substream and perform the RPC handshake. pub async fn connect( config: RpcClientConfig, @@ -192,9 +197,7 @@ impl Default for RpcClientBuilder { } } -impl RpcClientBuilder -where TClient: From + NamedProtocolService -{ +impl RpcClientBuilder { pub fn new() -> Self { Default::default() } @@ -233,14 +236,21 @@ where TClient: From + NamedProtocolService self.protocol_id = Some(protocol_id); self } +} +impl RpcClientBuilder +where TClient: From + NamedProtocolService +{ /// Negotiates and establishes a session to the peer's RPC service pub async fn connect(self, framed: CanonicalFraming) -> Result where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + StreamId + 'static { RpcClient::connect( self.config, framed, - self.protocol_id.as_ref().cloned().unwrap_or_default(), + self.protocol_id + .as_ref() + .cloned() + .unwrap_or_else(|| ProtocolId::from_static(TClient::PROTOCOL_NAME)), ) .await .map(Into::into) diff --git a/comms/src/protocol/rpc/handshake.rs b/comms/src/protocol/rpc/handshake.rs index 198755c198..2d7abbc435 100644 --- a/comms/src/protocol/rpc/handshake.rs +++ b/comms/src/protocol/rpc/handshake.rs @@ -87,7 +87,7 @@ where T: AsyncRead + AsyncWrite + Unpin .find(|v| msg.supported_versions.contains(v)); if let Some(version) = version { event!(Level::INFO, version = version, "Server accepted version"); - debug!(target: LOG_TARGET, "Server accepted version {}", version); + debug!(target: LOG_TARGET, "Server accepted version: {}", version); let reply = proto::rpc::RpcSessionReply { session_result: Some(proto::rpc::rpc_session_reply::SessionResult::AcceptedVersion(*version)), ..Default::default() diff --git a/comms/src/protocol/rpc/mod.rs b/comms/src/protocol/rpc/mod.rs index 6332d9e43a..fb3fee42f6 100644 --- a/comms/src/protocol/rpc/mod.rs +++ b/comms/src/protocol/rpc/mod.rs @@ -89,7 +89,6 @@ mod not_found; pub mod __macro_reexports { pub use crate::{ framing::CanonicalFraming, - multiplexing::Substream, protocol::{ rpc::{ client_pool::RpcPoolClient, @@ -105,6 +104,7 @@ pub mod __macro_reexports { }, ProtocolId, }, + stream_id::StreamId, Bytes, }; pub use futures::{future, future::BoxFuture}; diff --git a/comms/src/protocol/rpc/server/mock.rs b/comms/src/protocol/rpc/server/mock.rs index dae0f9ce93..61c1651d7e 100644 --- a/comms/src/protocol/rpc/server/mock.rs +++ b/comms/src/protocol/rpc/server/mock.rs @@ -234,7 +234,8 @@ where } } - /// Create a PeerConnection that can open a substream to this mock server. + /// Create a PeerConnection that can open a substream to this mock server, notifying the server of the given + /// protocol_id. pub async fn create_connection(&self, peer: Peer, protocol_id: ProtocolId) -> PeerConnection { let peer_node_id = peer.node_id.clone(); let (_, our_conn_mock, peer_conn, _) = create_peer_connection_mock_pair(peer, self.our_node.to_peer()).await; diff --git a/comms/src/protocol/rpc/server/mod.rs b/comms/src/protocol/rpc/server/mod.rs index 11b11fcbe1..a78cd146b0 100644 --- a/comms/src/protocol/rpc/server/mod.rs +++ b/comms/src/protocol/rpc/server/mod.rs @@ -637,7 +637,7 @@ where async fn log_timing>(context_str: Arc, request_id: u32, tag: &str, fut: F) -> R { let t = Instant::now(); - let span = span!(Level::TRACE, "rpc::internal::timing::{}::{}", request_id, tag); + let span = span!(Level::TRACE, "rpc::internal::timing", request_id, tag); let ret = fut.instrument(span).await; let elapsed = t.elapsed(); trace!( diff --git a/comms/tests/rpc_stress.rs b/comms/tests/rpc_stress.rs index 933e158398..d1cff350f2 100644 --- a/comms/tests/rpc_stress.rs +++ b/comms/tests/rpc_stress.rs @@ -33,7 +33,7 @@ use helpers::create_comms; use futures::{future, StreamExt}; use std::{future::Future, time::Duration}; use tari_comms::{ - protocol::rpc::{RpcClientBuilder, RpcServer}, + protocol::rpc::{RpcClient, RpcServer}, transports::TcpTransport, CommsNode, }; @@ -100,7 +100,7 @@ async fn run_stress_test(test_params: Params) { let client_pool = conn1_2.create_rpc_client_pool::( num_concurrent_sessions, - RpcClientBuilder::new().with_deadline(deadline), + RpcClient::builder().with_deadline(deadline), ); let mut tasks = Vec::with_capacity(num_tasks); diff --git a/integration_tests/features/WalletBaseNodeSwitch.feature b/integration_tests/features/WalletBaseNodeSwitch.feature new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration_tests/features/WalletFFI.feature b/integration_tests/features/WalletFFI.feature index fee7273dae..9248974871 100644 --- a/integration_tests/features/WalletFFI.feature +++ b/integration_tests/features/WalletFFI.feature @@ -92,9 +92,10 @@ Feature: Wallet FFI And mining node MINER mines 10 blocks Then I wait for wallet RECEIVER to have at least 1000000 uT And I have 1 received and 1 send transaction in ffi wallet FFI_WALLET + And I start TXO validation on ffi wallet FFI_WALLET + And I start TX validation on ffi wallet FFI_WALLET + Then I wait for ffi wallet FFI_WALLET to receive 1 mined Then I want to view the transaction kernels for completed transactions in ffi wallet FFI_WALLET - And I start STXO validation on ffi wallet FFI_WALLET - And I start UTXO validation on ffi wallet FFI_WALLET And I stop ffi wallet FFI_WALLET Scenario: As a client I want to receive Tari via my Public Key sent while I am offline when I come back online diff --git a/integration_tests/features/WalletMonitoring.feature b/integration_tests/features/WalletMonitoring.feature index a10787d0fe..845690916f 100644 --- a/integration_tests/features/WalletMonitoring.feature +++ b/integration_tests/features/WalletMonitoring.feature @@ -1,4 +1,4 @@ -@coinbase_reorg +@coinbase_reorg @wallet Feature: Wallet Monitoring Scenario: Wallets monitoring coinbase after a reorg @@ -48,9 +48,7 @@ Feature: Wallet Monitoring When I wait 30 seconds And I list all COINBASE transactions for wallet WALLET_A1 And I list all COINBASE transactions for wallet WALLET_B1 - Then the number of coinbase transactions for wallet WALLET_A1 and wallet WALLET_B1 are 3 less - # TODO: Uncomment this step when wallets can handle reorg -# Then all COINBASE transactions for wallet WALLET_A1 and wallet WALLET_B1 have consistent but opposing validity + Then all COINBASE transactions for wallet WALLET_A1 and wallet WALLET_B1 have consistent but opposing validity # 18+ mins on circle ci @long-running diff --git a/integration_tests/features/WalletPasswordChange.feature b/integration_tests/features/WalletPasswordChange.feature new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration_tests/features/WalletQuery.feature b/integration_tests/features/WalletQuery.feature index 825854a312..7c022bdc52 100644 --- a/integration_tests/features/WalletQuery.feature +++ b/integration_tests/features/WalletQuery.feature @@ -1,3 +1,4 @@ +@wallet Feature: Wallet Querying Scenario: As a wallet I want to query the status of utxos in blocks diff --git a/integration_tests/features/WalletRecovery.feature b/integration_tests/features/WalletRecovery.feature index bf6147472d..6ca324ee16 100644 --- a/integration_tests/features/WalletRecovery.feature +++ b/integration_tests/features/WalletRecovery.feature @@ -1,4 +1,4 @@ -@wallet-recovery +@wallet-recovery @wallet Feature: Wallet Recovery @critical @@ -66,4 +66,4 @@ Feature: Wallet Recovery Then I wait for wallet WALLET_C to have less than 100000 uT When I merge mine 5 blocks via PROXY Then all nodes are at height 25 - Then I wait for wallet WALLET_C to have at least 1000000 uT \ No newline at end of file + Then I wait for wallet WALLET_C to have at least 1000000 uT diff --git a/integration_tests/features/WalletRoutingMechanism.feature b/integration_tests/features/WalletRoutingMechanism.feature index 7363c5d5ee..e1f4a9d87e 100644 --- a/integration_tests/features/WalletRoutingMechanism.feature +++ b/integration_tests/features/WalletRoutingMechanism.feature @@ -1,4 +1,4 @@ -@routing_mechanism +@routing_mechanism @wallet Feature: Wallet Routing Mechanism Scenario Outline: Wallets transacting via specified routing mechanism only diff --git a/integration_tests/features/WalletTransactions.feature b/integration_tests/features/WalletTransactions.feature index 4409438e0d..596b7c9d64 100644 --- a/integration_tests/features/WalletTransactions.feature +++ b/integration_tests/features/WalletTransactions.feature @@ -78,11 +78,15 @@ Feature: Wallet Transactions When I have wallet WALLET_C connected to all seed nodes Then I import WALLET_B spent outputs to WALLET_C Then I wait for wallet WALLET_C to have at least 1000000 uT + Then I wait for 5 seconds Then I restart wallet WALLET_C Then I wait for wallet WALLET_C to have less than 1 uT - Then I check if last imported transactions are invalid in wallet WALLET_C + # TODO Either remove the check for invalid Faux tx and change the test name or implement a new way to invalidate Faux Tx + # The concept of invalidating the Faux transaction doesn't exist in this branch anymore. There has been talk of removing the Faux transaction + # for imported UTXO's anyway so until that is decided we will just check that the imported output becomes Spent + #Then I check if last imported transactions are invalid in wallet WALLET_C - @critical @flaky + @critical @flakey Scenario: Wallet imports reorged outputs that become invalidated # Chain 1 Given I have a seed node SEED_B @@ -100,7 +104,10 @@ Feature: Wallet Transactions Then I stop wallet WALLET_RECEIVE_TX When I have wallet WALLET_IMPORTED connected to base node B Then I import WALLET_RECEIVE_TX unspent outputs to WALLET_IMPORTED - # Chain 2 + Then I wait for wallet WALLET_IMPORTED to have at least 1000000 uT + # This triggers a validation of the imported outputs + Then I restart wallet WALLET_IMPORTED + # Chain 2 Given I have a seed node SEED_C And I have a base node C connected to seed SEED_C And I have wallet WC connected to base node C @@ -115,7 +122,13 @@ Feature: Wallet Transactions And node C is at height 10 Then I restart wallet WALLET_IMPORTED Then I wait for wallet WALLET_IMPORTED to have less than 1 uT - Then I check if last imported transactions are invalid in wallet WALLET_IMPORTED + And mining node CM mines 1 blocks with min difficulty 1000 and max difficulty 9999999999 + And node B is at height 11 + And node C is at height 11 + # TODO Either remove the check for invalid Faux tx and change the test name or implement a new way to invalidate Faux Tx + # The concept of invalidating the Faux transaction doesn't exist in this branch anymore. There has been talk of removing the Faux transaction + # for imported UTXO's anyway so until that is decided we will just check that the imported output becomes invalid + # Then I check if last imported transactions are invalid in wallet WALLET_IMPORTED @critical Scenario: Wallet imports faucet UTXO diff --git a/integration_tests/features/support/ffi_steps.js b/integration_tests/features/support/ffi_steps.js index 2428614077..7a599c9fe7 100644 --- a/integration_tests/features/support/ffi_steps.js +++ b/integration_tests/features/support/ffi_steps.js @@ -538,22 +538,22 @@ When("I stop ffi wallet {word}", function (walletName) { }); Then( - "I start STXO validation on ffi wallet {word}", + "I start TXO validation on ffi wallet {word}", async function (wallet_name) { const wallet = this.getWallet(wallet_name); - await wallet.startStxoValidation(); - while (!wallet.getStxoValidationStatus().stxo_validation_complete) { + await wallet.startTxoValidation(); + while (!wallet.getTxoValidationStatus().txo_validation_complete) { await sleep(1000); } } ); Then( - "I start UTXO validation on ffi wallet {word}", + "I start TX validation on ffi wallet {word}", async function (wallet_name) { const wallet = this.getWallet(wallet_name); - await wallet.startUtxoValidation(); - while (!wallet.getUtxoValidationStatus().utxo_validation_complete) { + await wallet.startTxValidation(); + while (!wallet.getTxValidationStatus().tx_validation_complete) { await sleep(1000); } } diff --git a/integration_tests/features/support/steps.js b/integration_tests/features/support/steps.js index 2a7b2f6777..56a0efb4db 100644 --- a/integration_tests/features/support/steps.js +++ b/integration_tests/features/support/steps.js @@ -2971,36 +2971,6 @@ Then( } ); -Then( - /the number of coinbase transactions for wallet (.*) and wallet (.*) are (.*) less/, - { timeout: 20 * 1000 }, - async function (walletNameA, walletNameB, count) { - const walletClientA = await this.getWallet(walletNameA).connectClient(); - const transactionsA = await walletClientA.getAllCoinbaseTransactions(); - const walletClientB = await this.getWallet(walletNameB).connectClient(); - const transactionsB = await walletClientB.getAllCoinbaseTransactions(); - if (this.resultStack.length >= 2) { - const walletStats = [this.resultStack.pop(), this.resultStack.pop()]; - console.log( - "\nCoinbase comparison: Expect this (current + deficit)", - transactionsA.length, - transactionsB.length, - Number(count), - "to equal this (previous)", - walletStats[0][1], - walletStats[1][1] - ); - expect( - transactionsA.length + transactionsB.length + Number(count) - ).to.equal(walletStats[0][1] + walletStats[1][1]); - } else { - expect( - "\nCoinbase comparison: Not enough results saved on the stack!" - ).to.equal(""); - } - } -); - Then( /all (.*) transactions for wallet (.*) and wallet (.*) have consistent but opposing validity/, { timeout: 20 * 1000 }, diff --git a/integration_tests/helpers/ffi/ffiInterface.js b/integration_tests/helpers/ffi/ffiInterface.js index e2f19f69ee..4eb485baef 100644 --- a/integration_tests/helpers/ffi/ffiInterface.js +++ b/integration_tests/helpers/ffi/ffiInterface.js @@ -285,8 +285,6 @@ class InterfaceFFI { this.ptr, this.ptr, this.ptr, - this.ptr, - this.ptr, this.boolPtr, this.intPtr, ], @@ -383,12 +381,7 @@ class InterfaceFFI { this.intPtr, ], ], - wallet_start_utxo_validation: [this.ulonglong, [this.ptr, this.intPtr]], - wallet_start_stxo_validation: [this.ulonglong, [this.ptr, this.intPtr]], - wallet_start_invalid_txo_validation: [ - this.ulonglong, - [this.ptr, this.intPtr], - ], + wallet_start_txo_validation: [this.ulonglong, [this.ptr, this.intPtr]], wallet_start_transaction_validation: [ this.ulonglong, [this.ptr, this.intPtr], @@ -1123,13 +1116,7 @@ class InterfaceFFI { static createCallbackTransactionCancellation(fn) { return ffi.Callback(this.void, [this.ptr], fn); } - static createCallbackUtxoValidationComplete(fn) { - return ffi.Callback(this.void, [this.ulonglong, this.uchar], fn); - } - static createCallbackStxoValidationComplete(fn) { - return ffi.Callback(this.void, [this.ulonglong, this.uchar], fn); - } - static createCallbackInvalidTxoValidationComplete(fn) { + static createCallbackTxoValidationComplete(fn) { return ffi.Callback(this.void, [this.ulonglong, this.uchar], fn); } static createCallbackTransactionValidationComplete(fn) { @@ -1163,9 +1150,7 @@ class InterfaceFFI { callback_direct_send_result, callback_store_and_forward_send_result, callback_transaction_cancellation, - callback_utxo_validation_complete, - callback_stxo_validation_complete, - callback_invalid_txo_validation_complete, + callback_txo_validation_complete, callback_transaction_validation_complete, callback_saf_message_received ) { @@ -1188,9 +1173,7 @@ class InterfaceFFI { callback_direct_send_result, callback_store_and_forward_send_result, callback_transaction_cancellation, - callback_utxo_validation_complete, - callback_stxo_validation_complete, - callback_invalid_txo_validation_complete, + callback_txo_validation_complete, callback_transaction_validation_complete, callback_saf_message_received, recovery_in_progress, @@ -1426,24 +1409,10 @@ class InterfaceFFI { return result; } - static walletStartUtxoValidation(ptr) { - let error = this.initError(); - let result = this.fn.wallet_start_utxo_validation(ptr, error); - this.checkErrorResult(error, `walletStartUtxoValidation`); - return result; - } - - static walletStartStxoValidation(ptr) { - let error = this.initError(); - let result = this.fn.wallet_start_stxo_validation(ptr, error); - this.checkErrorResult(error, `walletStartStxoValidation`); - return result; - } - - static walletStartInvalidTxoValidation(ptr) { + static walletStartTxoValidation(ptr) { let error = this.initError(); - let result = this.fn.wallet_start_invalid_txo_validation(ptr, error); - this.checkErrorResult(error, `walletStartInvalidUtxoValidation`); + let result = this.fn.wallet_start_txo_validation(ptr, error); + this.checkErrorResult(error, `walletStartTxoValidation`); return result; } diff --git a/integration_tests/helpers/ffi/wallet.js b/integration_tests/helpers/ffi/wallet.js index db974617cc..70856b18f4 100644 --- a/integration_tests/helpers/ffi/wallet.js +++ b/integration_tests/helpers/ffi/wallet.js @@ -18,12 +18,10 @@ class Wallet { transactionBroadcast = 0; transactionMined = 0; saf_messages = 0; - - utxo_validation_complete = false; - utxo_validation_result = 0; - stxo_validation_complete = false; - stxo_validation_result = 0; - + txo_validation_complete = false; + txo_validation_result = 0; + tx_validation_complete = false; + tx_validation_result = 0; callback_received_transaction; callback_received_transaction_reply; callback_received_finalized_transaction; @@ -33,24 +31,21 @@ class Wallet { callback_direct_send_result; callback_store_and_forward_send_result; callback_transaction_cancellation; - callback_utxo_validation_complete; - callback_stxo_validation_complete; - callback_invalid_txo_validation_complete; callback_transaction_validation_complete; callback_saf_message_received; recoveryProgressCallback; - getUtxoValidationStatus() { + getTxoValidationStatus() { return { - utxo_validation_complete: this.utxo_validation_complete, - utxo_validation_result: this.utxo_validation_result, + txo_validation_complete: this.txo_validation_complete, + txo_validation_result: this.txo_validation_result, }; } - getStxoValidationStatus() { + getTxValidationStatus() { return { - stxo_validation_complete: this.stxo_validation_complete, - stxo_validation_result: this.stxo_validation_result, + tx_validation_complete: this.tx_validation_complete, + tx_validation_result: this.tx_validation_result, }; } @@ -120,17 +115,9 @@ class Wallet { InterfaceFFI.createCallbackTransactionCancellation( this.onTransactionCancellation ); - this.callback_utxo_validation_complete = - InterfaceFFI.createCallbackUtxoValidationComplete( - this.onUtxoValidationComplete - ); - this.callback_stxo_validation_complete = - InterfaceFFI.createCallbackStxoValidationComplete( - this.onStxoValidationComplete - ); - this.callback_invalid_txo_validation_complete = - InterfaceFFI.createCallbackInvalidTxoValidationComplete( - this.onInvalidTxoValidationComplete + this.callback_txo_validation_complete = + InterfaceFFI.createCallbackTxoValidationComplete( + this.onTxoValidationComplete ); this.callback_transaction_validation_complete = InterfaceFFI.createCallbackTransactionValidationComplete( @@ -177,9 +164,7 @@ class Wallet { this.callback_direct_send_result, this.callback_store_and_forward_send_result, this.callback_transaction_cancellation, - this.callback_utxo_validation_complete, - this.callback_stxo_validation_complete, - this.callback_invalid_txo_validation_complete, + this.callback_txo_validation_complete, this.callback_transaction_validation_complete, this.callback_saf_message_received ); @@ -268,36 +253,20 @@ class Wallet { ); }; - onUtxoValidationComplete = (request_key, validation_results) => { - console.log( - `${new Date().toISOString()} callbackUtxoValidationComplete(${request_key},${validation_results})` - ); - this.utxo_validation_complete = true; - this.utxo_validation_result = validation_results; - }; - - onStxoValidationComplete = (request_key, validation_results) => { - console.log( - `${new Date().toISOString()} callbackStxoValidationComplete(${request_key},${validation_results})` - ); - this.stxo_validation_complete = true; - this.stxo_validation_result = validation_results; - }; - - onInvalidTxoValidationComplete = (request_key, validation_results) => { + onTxoValidationComplete = (request_key, validation_results) => { console.log( - `${new Date().toISOString()} callbackInvalidTxoValidationComplete(${request_key},${validation_results})` + `${new Date().toISOString()} callbackTxoValidationComplete(${request_key},${validation_results})` ); - //this.invalidtxo_validation_complete = true; - //this.invalidtxo_validation_result = validation_results; + this.txo_validation_complete = true; + this.txo_validation_result = validation_results; }; onTransactionValidationComplete = (request_key, validation_results) => { console.log( `${new Date().toISOString()} callbackTransactionValidationComplete(${request_key},${validation_results})` ); - //this.transaction_validation_complete = true; - //this.transaction_validation_result = validation_results; + this.tx_validation_complete = true; + this.tx_validation_result = validation_results; }; onSafMessageReceived = () => { @@ -431,12 +400,12 @@ class Wallet { return InterfaceFFI.walletCancelPendingTransaction(this.ptr, tx_id); } - startUtxoValidation() { - return InterfaceFFI.walletStartUtxoValidation(this.ptr); + startTxoValidation() { + return InterfaceFFI.walletStartTxoValidation(this.ptr); } - startStxoValidation() { - return InterfaceFFI.walletStartStxoValidation(this.ptr); + startTxValidation() { + return InterfaceFFI.walletStartTransactionValidation(this.ptr); } destroy() { @@ -452,9 +421,7 @@ class Wallet { this.callback_direct_send_result = this.callback_store_and_forward_send_result = this.callback_transaction_cancellation = - this.callback_utxo_validation_complete = - this.callback_stxo_validation_complete = - this.callback_invalid_txo_validation_complete = + this.callback_txo_validation_complete = this.callback_transaction_validation_complete = this.callback_saf_message_received = this.recoveryProgressCallback = diff --git a/integration_tests/helpers/ffi/walletFFI.js b/integration_tests/helpers/ffi/walletFFI.js index 7816253500..e2c032a5fd 100644 --- a/integration_tests/helpers/ffi/walletFFI.js +++ b/integration_tests/helpers/ffi/walletFFI.js @@ -156,11 +156,7 @@ class WalletFFI { ffi.Callback("void", ["uint64", "bool"], callback); this.createCallbackTransactionCancellation = (callback) => ffi.Callback("void", [this.tari_completed_transaction_ptr], callback); - this.createCallbackUtxoValidationComplete = (callback) => - ffi.Callback("void", ["uint64", "uchar"], callback); - this.createCallbackStxoValidationComplete = (callback) => - ffi.Callback("void", ["uint64", "uchar"], callback); - this.createCallbackInvalidTxoValidationComplete = (callback) => + this.createCallbackTxoValidationComplete = (callback) => ffi.Callback("void", ["uint64", "uchar"], callback); this.createCallbackTransactionValidationComplete = (callback) => ffi.Callback("void", ["uint64", "uchar"], callback); @@ -543,12 +539,7 @@ class WalletFFI { "int*", ], ], - wallet_start_utxo_validation: ["uint64", [this.tari_wallet_ptr, "int*"]], - wallet_start_stxo_validation: ["uint64", [this.tari_wallet_ptr, "int*"]], - wallet_start_invalid_txo_validation: [ - "uint64", - [this.tari_wallet_ptr, "int*"], - ], + wallet_start_txo_validation: ["uint64", [this.tari_wallet_ptr, "int*"]], wallet_start_transaction_validation: [ "uint64", [this.tari_wallet_ptr, "int*"], @@ -1493,9 +1484,7 @@ class WalletFFI { callback_direct_send_result, callback_store_and_forward_send_result, callback_transaction_cancellation, - callback_utxo_validation_complete, - callback_stxo_validation_complete, - callback_invalid_txo_validation_complete, + callback_txo_validation_complete, callback_transaction_validation_complete, callback_saf_message_received ) { @@ -1516,9 +1505,7 @@ class WalletFFI { callback_direct_send_result, callback_store_and_forward_send_result, callback_transaction_cancellation, - callback_utxo_validation_complete, - callback_stxo_validation_complete, - callback_invalid_txo_validation_complete, + callback_txo_validation_complete, callback_transaction_validation_complete, callback_saf_message_received, this.recovery_in_progress, @@ -1817,32 +1804,12 @@ class WalletFFI { ); } - static walletStartUtxoValidation(wallet) { - return new Promise((resolve, reject) => - this.#fn.wallet_start_utxo_validation.async( - wallet, - this.error, - this.checkAsyncRes(resolve, reject, "walletStartUtxoValidation") - ) - ); - } - - static walletStartStxoValidation(wallet) { - return new Promise((resolve, reject) => - this.#fn.wallet_start_stxo_validation.async( - wallet, - this.error, - this.checkAsyncRes(resolve, reject, "walletStartStxoValidation") - ) - ); - } - - static walletStartInvalidTxoValidation(wallet) { + static walletStartTxoValidation(wallet) { return new Promise((resolve, reject) => - this.#fn.wallet_start_invalid_txo_validation.async( + this.#fn.wallet_start_txo_validation.async( wallet, this.error, - this.checkAsyncRes(resolve, reject, "walletStartInvalidTxoValidation") + this.checkAsyncRes(resolve, reject, "walletStartTxoValidation") ) ); } diff --git a/integration_tests/helpers/walletFFIClient.js b/integration_tests/helpers/walletFFIClient.js index a02fbc7b4b..08d509fb9c 100644 --- a/integration_tests/helpers/walletFFIClient.js +++ b/integration_tests/helpers/walletFFIClient.js @@ -53,13 +53,14 @@ class WalletFFIClient { this.start(seed_words_text, pass_phrase); } - getStxoValidationStatus() { - return this.wallet.getStxoValidationStatus(); + getTxoValidationStatus() { + return this.wallet.getTxoValidationStatus(); } - getUtxoValidationStatus() { - return this.wallet.getUtxoValidationStatus(); + getTxValidationStatus() { + return this.wallet.getTxValidationStatus(); } + identify() { return this.wallet.getPublicKey(); } @@ -112,12 +113,12 @@ class WalletFFIClient { this.wallet.applyEncryption(passphrase); } - startStxoValidation() { - this.wallet.startStxoValidation(); + startTxoValidation() { + this.wallet.startTxoValidation(); } - startUtxoValidation() { - this.wallet.startUtxoValidation(); + startTxValidation() { + this.wallet.startTxValidation(); } getCounters() {