From 8ee5a05da3bc1de49ac65a6674c60381f72af21f Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Mon, 29 Aug 2022 13:55:56 +0400 Subject: [PATCH 01/21] feat(core): add template registration sidechain features (#4470) Description --- - adds CodeTemplateRegistration output type - adds TemplateRegistration to SideChainFeatures - add grpc methods for registering a code template Motivation and Context --- Code templates need to be committed to on L1. The `binary_url` and `repo_url` fields are not validated by base node consensus because: 1. The rust implementation of Multiaddr does not currently support paths in http addresses e.g. `/dns4/github.com/tcp/443/http/tari-project/tari.git` is not well-formed however [the spec](https://multiformats.io/multiaddr/) says it should be. 2. Url parsing is complex and adding it to consensus code could introduce bugs (general/security/DoS) Depends on #4466 Depends on #4496 How Has This Been Tested? --- TODO: add cucumber tests for this output type --- .../tari_app_grpc/proto/sidechain_types.proto | 31 +++ applications/tari_app_grpc/proto/wallet.proto | 10 + .../src/conversions/output_features.rs | 3 +- .../src/conversions/sidechain_features.rs | 127 ++++++++++- .../src/grpc/wallet_grpc_server.rs | 58 ++++- .../tests/blockchain_database.rs | 1 - .../core/src/consensus/consensus_encoding.rs | 13 ++ .../src/consensus/consensus_encoding/bytes.rs | 58 ++++- .../consensus/consensus_encoding/string.rs | 215 ++++++++++++++++++ base_layer/core/src/consensus/mod.rs | 3 + base_layer/core/src/covenants/fields.rs | 14 +- base_layer/core/src/covenants/filters/and.rs | 4 - .../src/covenants/filters/fields_hashed_eq.rs | 6 +- .../src/covenants/filters/fields_preserved.rs | 6 +- base_layer/core/src/covenants/test.rs | 28 ++- base_layer/core/src/proto/mod.rs | 2 + .../core/src/proto/sidechain_features.proto | 39 ++++ .../core/src/proto/sidechain_features.rs | 158 +++++++++++++ base_layer/core/src/proto/transaction.proto | 22 +- base_layer/core/src/proto/transaction.rs | 18 +- .../transaction_components/output_features.rs | 54 ++++- .../transaction_components/output_type.rs | 5 +- .../transaction_components/side_chain/mod.rs | 5 +- .../side_chain/sidechain_features.rs | 83 +++++-- .../side_chain/template_registration.rs | 186 +++++++++++++++ 25 files changed, 1048 insertions(+), 101 deletions(-) create mode 100644 base_layer/core/src/consensus/consensus_encoding/string.rs create mode 100644 base_layer/core/src/proto/sidechain_features.proto create mode 100644 base_layer/core/src/proto/sidechain_features.rs create mode 100644 base_layer/core/src/transactions/transaction_components/side_chain/template_registration.rs diff --git a/applications/tari_app_grpc/proto/sidechain_types.proto b/applications/tari_app_grpc/proto/sidechain_types.proto index d03b7b187b..759732c2d1 100644 --- a/applications/tari_app_grpc/proto/sidechain_types.proto +++ b/applications/tari_app_grpc/proto/sidechain_types.proto @@ -23,5 +23,36 @@ syntax = "proto3"; package tari.rpc; +import "types.proto"; + message SideChainFeatures { + oneof side_chain_features { + TemplateRegistration template_registration = 1; + } +} + +message TemplateRegistration { + bytes author_public_key = 1; + Signature author_signature = 2; + string template_name = 3; + uint32 template_version = 4; + TemplateType template_type = 5; + BuildInfo build_info = 6; + bytes binary_sha = 7; + string binary_url = 8; +} + +message TemplateType { + oneof template_type { + WasmInfo wasm = 1; + } +} + +message WasmInfo { + uint32 abi_version = 1; +} + +message BuildInfo { + string repo_url = 1; + bytes commit_hash = 2; } diff --git a/applications/tari_app_grpc/proto/wallet.proto b/applications/tari_app_grpc/proto/wallet.proto index 2e08212ad5..a873130a71 100644 --- a/applications/tari_app_grpc/proto/wallet.proto +++ b/applications/tari_app_grpc/proto/wallet.proto @@ -71,6 +71,8 @@ service Wallet { rpc ClaimShaAtomicSwapTransaction(ClaimShaAtomicSwapRequest) returns (ClaimShaAtomicSwapResponse); // This will claim a HTLC refund transaction rpc ClaimHtlcRefundTransaction(ClaimHtlcRefundRequest) returns (ClaimHtlcRefundResponse); + // Creates a transaction with a template registration output + rpc CreateTemplateRegistration(CreateTemplateRegistrationRequest) returns (CreateTemplateRegistrationResponse); rpc SetBaseNode(SetBaseNodeRequest) returns (SetBaseNodeResponse); rpc StreamTransactionEvents(TransactionEventRequest) returns (stream TransactionEventResponse); @@ -99,6 +101,7 @@ message CreateBurnTransactionRequest{ string message = 3; } + message PaymentRecipient { string address = 1; uint64 amount = 2; @@ -259,6 +262,13 @@ message ImportUtxosResponse { repeated uint64 tx_ids = 1; } +message CreateTemplateRegistrationRequest { + TemplateRegistration template_registration = 1; + uint64 fee_per_gram = 2; +} + +message CreateTemplateRegistrationResponse { } + message CancelTransactionRequest { uint64 tx_id = 1; } diff --git a/applications/tari_app_grpc/src/conversions/output_features.rs b/applications/tari_app_grpc/src/conversions/output_features.rs index 872188e6b8..15e6e40325 100644 --- a/applications/tari_app_grpc/src/conversions/output_features.rs +++ b/applications/tari_app_grpc/src/conversions/output_features.rs @@ -37,6 +37,7 @@ impl TryFrom for OutputFeatures { fn try_from(features: grpc::OutputFeatures) -> Result { let sidechain_features = features .sidechain_features + .and_then(|f| f.side_chain_features) .map(SideChainFeatures::try_from) .transpose()?; @@ -64,7 +65,7 @@ impl From for grpc::OutputFeatures { output_type: u32::from(features.output_type.as_byte()), maturity: features.maturity, metadata: features.metadata, - sidechain_features: features.sidechain_features.map(|v| *v).map(Into::into), + sidechain_features: features.sidechain_features.map(Into::into), } } } diff --git a/applications/tari_app_grpc/src/conversions/sidechain_features.rs b/applications/tari_app_grpc/src/conversions/sidechain_features.rs index e97bda9428..6cb6a04956 100644 --- a/applications/tari_app_grpc/src/conversions/sidechain_features.rs +++ b/applications/tari_app_grpc/src/conversions/sidechain_features.rs @@ -20,22 +20,135 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::convert::TryFrom; +use std::convert::{TryFrom, TryInto}; -use tari_core::transactions::transaction_components::SideChainFeatures; +use tari_common_types::types::{PublicKey, Signature}; +use tari_core::{ + consensus::MaxSizeString, + transactions::transaction_components::{BuildInfo, CodeTemplateRegistration, SideChainFeatures, TemplateType}, +}; +use tari_utilities::ByteArray; use crate::tari_rpc as grpc; +//---------------------------------- SideChainFeatures --------------------------------------------// impl From for grpc::SideChainFeatures { - fn from(_value: SideChainFeatures) -> Self { - Self {} + fn from(value: SideChainFeatures) -> Self { + value.into() } } -impl TryFrom for SideChainFeatures { +impl From for grpc::side_chain_features::SideChainFeatures { + fn from(value: SideChainFeatures) -> Self { + match value { + SideChainFeatures::TemplateRegistration(template_reg) => { + grpc::side_chain_features::SideChainFeatures::TemplateRegistration(template_reg.into()) + }, + } + } +} + +impl TryFrom for SideChainFeatures { + type Error = String; + + fn try_from(features: grpc::side_chain_features::SideChainFeatures) -> Result { + match features { + grpc::side_chain_features::SideChainFeatures::TemplateRegistration(template_reg) => { + Ok(SideChainFeatures::TemplateRegistration(template_reg.try_into()?)) + }, + } + } +} + +// -------------------------------- TemplateRegistration -------------------------------- // +impl TryFrom for CodeTemplateRegistration { + type Error = String; + + fn try_from(value: grpc::TemplateRegistration) -> Result { + Ok(Self { + author_public_key: PublicKey::from_bytes(&value.author_public_key).map_err(|e| e.to_string())?, + author_signature: value + .author_signature + .map(Signature::try_from) + .ok_or("author_signature not provided")??, + template_name: MaxSizeString::try_from(value.template_name).map_err(|e| e.to_string())?, + template_version: value + .template_version + .try_into() + .map_err(|_| "Invalid template version")?, + template_type: value + .template_type + .map(TryFrom::try_from) + .ok_or("Template type not provided")??, + build_info: value + .build_info + .map(TryFrom::try_from) + .ok_or("Build info not provided")??, + binary_sha: value.binary_sha.try_into().map_err(|_| "Invalid commit sha")?, + binary_url: MaxSizeString::try_from(value.binary_url).map_err(|e| e.to_string())?, + }) + } +} + +impl From for grpc::TemplateRegistration { + fn from(value: CodeTemplateRegistration) -> Self { + Self { + author_public_key: value.author_public_key.to_vec(), + author_signature: Some(value.author_signature.into()), + template_name: value.template_name.to_string(), + template_version: u32::from(value.template_version), + template_type: Some(value.template_type.into()), + build_info: Some(value.build_info.into()), + binary_sha: value.binary_sha.to_vec(), + binary_url: value.binary_url.to_string(), + } + } +} + +// -------------------------------- TemplateType -------------------------------- // +impl TryFrom for TemplateType { + type Error = String; + + fn try_from(value: grpc::TemplateType) -> Result { + let template_type = value.template_type.ok_or("Template type not provided")?; + match template_type { + grpc::template_type::TemplateType::Wasm(wasm) => Ok(TemplateType::Wasm { + abi_version: wasm.abi_version.try_into().map_err(|_| "abi_version overflowed")?, + }), + } + } +} + +impl From for grpc::TemplateType { + fn from(value: TemplateType) -> Self { + match value { + TemplateType::Wasm { abi_version } => Self { + template_type: Some(grpc::template_type::TemplateType::Wasm(grpc::WasmInfo { + abi_version: abi_version.into(), + })), + }, + } + } +} + +// -------------------------------- BuildInfo -------------------------------- // + +impl TryFrom for BuildInfo { type Error = String; - fn try_from(_features: grpc::SideChainFeatures) -> Result { - Ok(Self {}) + fn try_from(value: grpc::BuildInfo) -> Result { + Ok(Self { + repo_url: value.repo_url.try_into().map_err(|_| "Invalid repo url")?, + commit_hash: value.commit_hash.try_into().map_err(|_| "Invalid commit hash")?, + }) + } +} + +impl From for grpc::BuildInfo { + fn from(value: BuildInfo) -> Self { + Self { + repo_url: value.repo_url.into_string(), + commit_hash: value.commit_hash.into_vec(), + } } } diff --git a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs index 4fdb2fb3f2..d7acb6a44b 100644 --- a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs +++ b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs @@ -48,6 +48,8 @@ use tari_app_grpc::{ CoinSplitResponse, CreateBurnTransactionRequest, CreateBurnTransactionResponse, + CreateTemplateRegistrationRequest, + CreateTemplateRegistrationResponse, FileDeletedResponse, GetBalanceRequest, GetBalanceResponse, @@ -89,13 +91,19 @@ use tari_common_types::{ }; use tari_comms::{multiaddr::Multiaddr, types::CommsPublicKey, CommsNode}; use tari_core::transactions::{ - tari_amount::MicroTari, - transaction_components::{OutputFeatures, UnblindedOutput}, + tari_amount::{MicroTari, T}, + transaction_components::{ + CodeTemplateRegistration, + OutputFeatures, + OutputType, + SideChainFeatures, + UnblindedOutput, + }, }; use tari_utilities::{hex::Hex, ByteArray}; use tari_wallet::{ connectivity_service::{OnlineStatus, WalletConnectivityInterface}, - output_manager_service::handle::OutputManagerHandle, + output_manager_service::{handle::OutputManagerHandle, UtxoSelectionCriteria}, transaction_service::{ handle::TransactionServiceHandle, storage::models::{self, WalletTransaction}, @@ -915,6 +923,50 @@ impl wallet_server::Wallet for WalletGrpcServer { Ok(Response::new(FileDeletedResponse {})) } + + async fn create_template_registration( + &self, + request: Request, + ) -> Result, Status> { + let mut output_manager = self.wallet.output_manager_service.clone(); + let mut transaction_service = self.wallet.transaction_service.clone(); + let message = request.into_inner(); + + let template_registration = CodeTemplateRegistration::try_from( + message + .template_registration + .ok_or_else(|| Status::invalid_argument("template_registration is empty"))?, + ) + .map_err(|e| Status::invalid_argument(format!("template_registration is invalid: {}", e)))?; + let fee_per_gram = message.fee_per_gram; + + let message = format!("Template registration {}", template_registration.template_name); + let output = output_manager + .create_output_with_features(1 * T, OutputFeatures { + output_type: OutputType::CodeTemplateRegistration, + sidechain_features: Some(SideChainFeatures::TemplateRegistration(template_registration)), + ..Default::default() + }) + .await + .map_err(|e| Status::internal(e.to_string()))?; + + let (tx_id, transaction) = output_manager + .create_send_to_self_with_output(vec![output], fee_per_gram.into(), UtxoSelectionCriteria::default()) + .await + .map_err(|e| Status::internal(e.to_string()))?; + + debug!( + target: LOG_TARGET, + "Template registration transaction: {:?}", transaction + ); + + let _ = transaction_service + .submit_transaction(tx_id, transaction, 0.into(), message) + .await + .map_err(|e| Status::internal(e.to_string()))?; + + Ok(Response::new(CreateTemplateRegistrationResponse {})) + } } async fn handle_completed_tx( diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index b32a7cd756..69ec71043e 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -41,7 +41,6 @@ use crate::{ }, txn_schema, }; - fn setup() -> BlockchainDatabase { create_new_blockchain() } diff --git a/base_layer/core/src/consensus/consensus_encoding.rs b/base_layer/core/src/consensus/consensus_encoding.rs index 2a998daa03..1917766f0d 100644 --- a/base_layer/core/src/consensus/consensus_encoding.rs +++ b/base_layer/core/src/consensus/consensus_encoding.rs @@ -30,10 +30,13 @@ mod hashing; mod integers; mod micro_tari; mod script; +mod string; mod vec; + use std::io; pub use hashing::{ConsensusHasher, DomainSeparatedConsensusHasher}; +pub use string::MaxSizeString; pub use vec::MaxSizeVec; pub use self::bytes::MaxSizeBytes; @@ -93,6 +96,16 @@ impl FromConsensusBytes for T { } } +pub fn read_byte(reader: &mut R) -> Result { + let mut buf = [0u8; 1]; + reader.read_exact(&mut buf)?; + Ok(buf[0]) +} + +pub fn write_byte(writer: &mut W, byte: u8) -> Result<(), io::Error> { + writer.write_all(&[byte]) +} + #[cfg(test)] pub mod test { use super::*; diff --git a/base_layer/core/src/consensus/consensus_encoding/bytes.rs b/base_layer/core/src/consensus/consensus_encoding/bytes.rs index 6e29da6753..a3a4385fe5 100644 --- a/base_layer/core/src/consensus/consensus_encoding/bytes.rs +++ b/base_layer/core/src/consensus/consensus_encoding/bytes.rs @@ -21,6 +21,7 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::{ + cmp, convert::TryFrom, io, io::{Error, Read, Write}, @@ -28,6 +29,7 @@ use std::{ }; use integer_encoding::{VarInt, VarIntReader, VarIntWriter}; +use serde::{Deserialize, Serialize}; use crate::consensus::{ConsensusDecoding, ConsensusEncoding, ConsensusEncodingSized}; @@ -47,10 +49,34 @@ impl ConsensusEncodingSized for Vec { } } +#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Default, Deserialize, Serialize)] pub struct MaxSizeBytes { inner: Vec, } +impl MaxSizeBytes { + pub fn into_vec(self) -> Vec { + self.inner + } + + pub fn from_bytes_checked>(bytes: T) -> Option { + let b = bytes.as_ref(); + if b.len() > MAX { + None + } else { + Some(Self { inner: b.to_vec() }) + } + } + + pub fn from_bytes_truncate>(bytes: T) -> Self { + let b = bytes.as_ref(); + let len = cmp::min(b.len(), MAX); + Self { + inner: b[..len].to_vec(), + } + } +} + impl From> for Vec { fn from(value: MaxSizeBytes) -> Self { value.inner @@ -68,6 +94,18 @@ impl TryFrom> for MaxSizeBytes { } } +impl ConsensusEncoding for MaxSizeBytes { + fn consensus_encode(&self, writer: &mut W) -> Result<(), io::Error> { + self.inner.consensus_encode(writer) + } +} + +impl ConsensusEncodingSized for MaxSizeBytes { + fn consensus_encode_exact_size(&self) -> usize { + self.inner.consensus_encode_exact_size() + } +} + impl ConsensusDecoding for MaxSizeBytes { fn consensus_decode(reader: &mut R) -> Result { let len = reader.read_varint()?; @@ -108,7 +146,8 @@ impl ConsensusEncoding for &[u8] { impl ConsensusEncodingSized for &[u8] { fn consensus_encode_exact_size(&self) -> usize { - self.len() + let len = self.len(); + len.required_space() + len } } @@ -139,7 +178,7 @@ mod test { use rand::{rngs::OsRng, RngCore}; use super::*; - use crate::consensus::{check_consensus_encoding_correctness, ToConsensusBytes}; + use crate::consensus::check_consensus_encoding_correctness; #[test] fn it_encodes_and_decodes_correctly() { @@ -147,9 +186,18 @@ mod test { OsRng.fill_bytes(&mut subject); check_consensus_encoding_correctness(subject).unwrap(); + // &[u8] consensus encoding + let mut buf = Vec::new(); + let slice = subject.as_slice(); + slice.consensus_encode(&mut buf).unwrap(); + assert_eq!(buf.len(), slice.consensus_encode_exact_size()); + let mut reader = buf.as_slice(); + let decoded: MaxSizeBytes<1024> = ConsensusDecoding::consensus_decode(&mut reader).unwrap(); + assert_eq!(&*decoded, slice); + assert!(reader.is_empty()); + // Get vec encoding with length byte - let encoded = subject.to_vec().to_consensus_bytes(); - let decoded = MaxSizeBytes::<1024>::consensus_decode(&mut encoded.as_slice()).unwrap(); - assert_eq!(*decoded, *subject.as_slice()); + let subject = MaxSizeBytes::<1024>::from_bytes_checked(&subject).unwrap(); + check_consensus_encoding_correctness(subject).unwrap(); } } diff --git a/base_layer/core/src/consensus/consensus_encoding/string.rs b/base_layer/core/src/consensus/consensus_encoding/string.rs new file mode 100644 index 0000000000..41bb97cc1a --- /dev/null +++ b/base_layer/core/src/consensus/consensus_encoding/string.rs @@ -0,0 +1,215 @@ +// Copyright 2022. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::{ + convert::TryFrom, + fmt::Display, + io, + io::{Read, Write}, +}; + +use serde::{Deserialize, Serialize}; + +use crate::consensus::{ConsensusDecoding, ConsensusEncoding, ConsensusEncodingSized, MaxSizeBytes}; + +/// A string that can only be a up to MAX length long +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub struct MaxSizeString { + string: String, +} + +impl MaxSizeString { + pub fn from_str_checked(s: &str) -> Option { + if s.len() > MAX { + return None; + } + Some(Self { string: s.to_string() }) + } + + pub fn from_utf8_bytes_checked>(bytes: T) -> Option { + let b = bytes.as_ref(); + if b.len() > MAX { + return None; + } + + let s = String::from_utf8(b.to_vec()).ok()?; + Some(Self { string: s }) + } + + pub fn len(&self) -> usize { + self.string.len() + } + + pub fn is_empty(&self) -> bool { + self.string.is_empty() + } + + pub fn as_str(&self) -> &str { + &self.string + } + + pub fn into_string(self) -> String { + self.string + } +} + +impl TryFrom for MaxSizeString { + type Error = MaxSizeStringLengthError; + + fn try_from(value: String) -> Result { + if value.len() > MAX { + return Err(MaxSizeStringLengthError { + actual: value.len(), + expected: MAX, + }); + } + Ok(Self { string: value }) + } +} + +impl TryFrom<&str> for MaxSizeString { + type Error = MaxSizeStringLengthError; + + fn try_from(value: &str) -> Result { + if value.len() > MAX { + return Err(MaxSizeStringLengthError { + actual: value.len(), + expected: MAX, + }); + } + Ok(Self { + string: value.to_string(), + }) + } +} + +impl AsRef<[u8]> for MaxSizeString { + fn as_ref(&self) -> &[u8] { + self.string.as_ref() + } +} + +impl Display for MaxSizeString { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.string) + } +} + +impl ConsensusEncoding for MaxSizeString { + fn consensus_encode(&self, writer: &mut W) -> Result<(), io::Error> { + self.string.as_bytes().consensus_encode(writer) + } +} + +impl ConsensusEncodingSized for MaxSizeString { + fn consensus_encode_exact_size(&self) -> usize { + self.string.as_bytes().consensus_encode_exact_size() + } +} + +impl ConsensusDecoding for MaxSizeString { + fn consensus_decode(reader: &mut R) -> Result { + let raw_bytes = MaxSizeBytes::::consensus_decode(reader)?; + let s = String::from_utf8(raw_bytes.into_vec()) + .map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "Invalid UTF-8"))?; + Ok(Self { string: s }) + } +} + +#[derive(Debug, thiserror::Error)] +#[error("Invalid String length: expected {expected}, got {actual}")] +pub struct MaxSizeStringLengthError { + expected: usize, + actual: usize, +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::consensus::check_consensus_encoding_correctness; + + mod from_str_checked { + use super::*; + + #[test] + fn it_returns_none_if_size_exceeded() { + let s = MaxSizeString::<10>::from_str_checked("12345678901234567890"); + assert_eq!(s, None); + } + + #[test] + fn it_returns_some_if_size_in_bounds() { + let s = MaxSizeString::<0>::from_str_checked("").unwrap(); + assert_eq!(s.as_str(), ""); + assert_eq!(s.len(), 0); + + let s = MaxSizeString::<10>::from_str_checked("1234567890").unwrap(); + assert_eq!(s.as_str(), "1234567890"); + assert_eq!(s.len(), 10); + + let s = MaxSizeString::<10>::from_str_checked("1234").unwrap(); + assert_eq!(s.as_str(), "1234"); + assert_eq!(s.len(), 4); + + let s = MaxSizeString::<8>::from_str_checked("🚀🚀").unwrap(); + assert_eq!(s.as_str(), "🚀🚀"); + // 8 here because an emoji char take 4 bytes each + assert_eq!(s.len(), 8); + } + } + + mod from_utf8_bytes_checked { + use super::*; + + #[test] + fn it_returns_none_if_size_exceeded() { + let s = MaxSizeString::<10>::from_utf8_bytes_checked(&[0u8; 11]); + assert_eq!(s, None); + } + + #[test] + fn it_returns_some_if_size_in_bounds() { + let s = MaxSizeString::<12>::from_utf8_bytes_checked("💡🧭🛖".as_bytes()).unwrap(); + assert_eq!(s.as_str(), "💡🧭🛖"); + assert_eq!(s.len(), 12); + } + + #[test] + fn it_returns_none_if_invalid_utf8() { + let s = MaxSizeString::<10>::from_utf8_bytes_checked(&[255u8; 10]); + assert_eq!(s, None); + } + } + + mod consensus_encoding { + use super::*; + + #[test] + fn it_encodes_and_decodes_correctly() { + let s = MaxSizeString::<16>::from_utf8_bytes_checked("💡🧭🛖".as_bytes()).unwrap(); + check_consensus_encoding_correctness(s).unwrap(); + + let s = MaxSizeString::<0>::from_str_checked("").unwrap(); + check_consensus_encoding_correctness(s).unwrap(); + } + } +} diff --git a/base_layer/core/src/consensus/mod.rs b/base_layer/core/src/consensus/mod.rs index cff83e7f5f..595bf05942 100644 --- a/base_layer/core/src/consensus/mod.rs +++ b/base_layer/core/src/consensus/mod.rs @@ -33,6 +33,8 @@ mod consensus_encoding; #[cfg(test)] pub(crate) use consensus_encoding::test::check_consensus_encoding_correctness; pub use consensus_encoding::{ + read_byte, + write_byte, ConsensusDecoding, ConsensusEncoding, ConsensusEncodingSized, @@ -40,6 +42,7 @@ pub use consensus_encoding::{ DomainSeparatedConsensusHasher, FromConsensusBytes, MaxSizeBytes, + MaxSizeString, MaxSizeVec, ToConsensusBytes, }; diff --git a/base_layer/core/src/covenants/fields.rs b/base_layer/core/src/covenants/fields.rs index 087d948c66..a66ca3b47a 100644 --- a/base_layer/core/src/covenants/fields.rs +++ b/base_layer/core/src/covenants/fields.rs @@ -335,13 +335,14 @@ impl FromIterator for OutputFields { #[cfg(test)] mod test { + use tari_common_types::types::{Commitment, PublicKey}; use tari_script::script; use super::*; use crate::{ covenant, - covenants::test::{create_input, create_outputs}, + covenants::test::{create_input, create_outputs, make_sample_sidechain_features}, transactions::{ test_helpers::UtxoTestParams, transaction_components::{OutputFeatures, OutputType, SpentOutput}, @@ -352,14 +353,15 @@ mod test { use super::*; mod is_eq { + use super::*; - use crate::transactions::transaction_components::SideChainFeatures; #[test] fn it_returns_true_if_eq() { + let side_chain_features = make_sample_sidechain_features(); let output = create_outputs(1, UtxoTestParams { features: OutputFeatures { - sidechain_features: Some(Box::new(SideChainFeatures {})), + sidechain_features: Some(side_chain_features), ..Default::default() }, script: script![Drop Nop], @@ -377,9 +379,6 @@ mod test { assert!(OutputField::FeaturesOutputType .is_eq(&output, &output.features.output_type) .unwrap()); - assert!(OutputField::FeaturesSideChainFeatures - .is_eq(&output, &SideChainFeatures {}) - .unwrap()); assert!(OutputField::FeaturesSideChainFeatures .is_eq(&output, output.features.sidechain_features.as_ref().unwrap()) .unwrap()); @@ -393,9 +392,10 @@ mod test { #[test] fn it_returns_false_if_not_eq() { + let side_chain_features = make_sample_sidechain_features(); let output = create_outputs(1, UtxoTestParams { features: OutputFeatures { - sidechain_features: Some(Box::new(SideChainFeatures {})), + sidechain_features: Some(side_chain_features), ..Default::default() }, script: script![Drop Nop], diff --git a/base_layer/core/src/covenants/filters/and.rs b/base_layer/core/src/covenants/filters/and.rs index 726b7e16e1..22aacff685 100644 --- a/base_layer/core/src/covenants/filters/and.rs +++ b/base_layer/core/src/covenants/filters/and.rs @@ -45,7 +45,6 @@ mod test { use crate::{ covenant, covenants::{filters::test::setup_filter_test, test::create_input}, - transactions::transaction_components::SideChainFeatures, }; #[test] @@ -56,12 +55,9 @@ mod test { let input = create_input(); let (mut context, outputs) = setup_filter_test(&covenant, &input, 0, |outputs| { outputs[5].features.maturity = 42; - outputs[5].features.sidechain_features = Some(Box::new(SideChainFeatures {})); outputs[7].features.maturity = 42; - outputs[7].features.sidechain_features = Some(Box::new(SideChainFeatures {})); // Does not have maturity = 42 outputs[8].features.maturity = 123; - outputs[8].features.sidechain_features = Some(Box::new(SideChainFeatures {})); }); let mut output_set = OutputSet::new(&outputs); diff --git a/base_layer/core/src/covenants/filters/fields_hashed_eq.rs b/base_layer/core/src/covenants/filters/fields_hashed_eq.rs index 1eaf6e9dc5..2fda7c00a7 100644 --- a/base_layer/core/src/covenants/filters/fields_hashed_eq.rs +++ b/base_layer/core/src/covenants/filters/fields_hashed_eq.rs @@ -49,18 +49,18 @@ mod test { covenant, covenants::{ filters::test::setup_filter_test, - test::create_input, + test::{create_input, make_sample_sidechain_features}, BaseLayerCovenantsDomain, COVENANTS_FIELD_HASHER_LABEL, }, - transactions::transaction_components::{OutputFeatures, SideChainFeatures}, + transactions::transaction_components::OutputFeatures, }; #[test] fn it_filters_outputs_with_fields_that_hash_to_given_hash() { let features = OutputFeatures { maturity: 42, - sidechain_features: Some(Box::new(SideChainFeatures {})), + sidechain_features: Some(make_sample_sidechain_features()), ..Default::default() }; let mut hasher = Challenge::new(); diff --git a/base_layer/core/src/covenants/filters/fields_preserved.rs b/base_layer/core/src/covenants/filters/fields_preserved.rs index 9b64b8a1a3..0f37390d0d 100644 --- a/base_layer/core/src/covenants/filters/fields_preserved.rs +++ b/base_layer/core/src/covenants/filters/fields_preserved.rs @@ -41,7 +41,7 @@ mod test { use crate::{ covenant, covenants::{filters::test::setup_filter_test, test::create_input}, - transactions::transaction_components::{OutputType, SideChainFeatures}, + transactions::transaction_components::OutputType, }; #[test] @@ -49,17 +49,13 @@ mod test { let covenant = covenant!(fields_preserved(@fields(@field::features_maturity, @field::features_output_type))); let mut input = create_input(); input.set_maturity(42).unwrap(); - input.features_mut().unwrap().sidechain_features = Some(Box::new(SideChainFeatures {})); input.features_mut().unwrap().output_type = OutputType::Standard; let (mut context, outputs) = setup_filter_test(&covenant, &input, 0, |outputs| { outputs[5].features.maturity = 42; - outputs[5].features.sidechain_features = Some(Box::new(SideChainFeatures {})); outputs[5].features.output_type = OutputType::Standard; outputs[7].features.maturity = 42; outputs[7].features.output_type = OutputType::Standard; - outputs[7].features.sidechain_features = Some(Box::new(SideChainFeatures {})); outputs[8].features.maturity = 42; - outputs[8].features.sidechain_features = Some(Box::new(SideChainFeatures {})); outputs[8].features.output_type = OutputType::Coinbase; }); let mut output_set = OutputSet::new(&outputs); diff --git a/base_layer/core/src/covenants/test.rs b/base_layer/core/src/covenants/test.rs index 96a700ca99..a549a1d6ab 100644 --- a/base_layer/core/src/covenants/test.rs +++ b/base_layer/core/src/covenants/test.rs @@ -20,13 +20,20 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::iter; +use std::{convert::TryInto, iter}; use crate::{ covenants::{context::CovenantContext, Covenant}, transactions::{ test_helpers::{TestParams, UtxoTestParams}, - transaction_components::{TransactionInput, TransactionOutput}, + transaction_components::{ + BuildInfo, + CodeTemplateRegistration, + SideChainFeatures, + TemplateType, + TransactionInput, + TransactionOutput, + }, }, }; @@ -50,3 +57,20 @@ pub fn create_context<'a>(covenant: &Covenant, input: &'a TransactionInput, bloc let tokens = covenant.tokens().to_vec(); CovenantContext::new(tokens.into(), input, block_height) } + +pub fn make_sample_sidechain_features() -> SideChainFeatures { + let template_reg = CodeTemplateRegistration { + author_public_key: Default::default(), + author_signature: Default::default(), + template_name: "test".to_string().try_into().unwrap(), + template_version: 0, + template_type: TemplateType::Wasm { abi_version: 0 }, + build_info: BuildInfo { + repo_url: "https://github.com/tari-project/tari.git".try_into().unwrap(), + commit_hash: Default::default(), + }, + binary_sha: Default::default(), + binary_url: "https://github.com/tari-project/tari.git".try_into().unwrap(), + }; + SideChainFeatures::TemplateRegistration(template_reg) +} diff --git a/base_layer/core/src/proto/mod.rs b/base_layer/core/src/proto/mod.rs index 41209a82b2..9294f73535 100644 --- a/base_layer/core/src/proto/mod.rs +++ b/base_layer/core/src/proto/mod.rs @@ -52,4 +52,6 @@ mod block; #[cfg(any(feature = "base_node", feature = "base_node_proto"))] mod block_header; #[cfg(any(feature = "base_node", feature = "base_node_proto"))] +mod sidechain_features; +#[cfg(any(feature = "base_node", feature = "base_node_proto"))] mod utils; diff --git a/base_layer/core/src/proto/sidechain_features.proto b/base_layer/core/src/proto/sidechain_features.proto new file mode 100644 index 0000000000..96df8f61ad --- /dev/null +++ b/base_layer/core/src/proto/sidechain_features.proto @@ -0,0 +1,39 @@ +// Copyright 2022 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +syntax = "proto3"; + +import "types.proto"; + +package tari.types; + +message SideChainFeatures { + oneof side_chain_features { + TemplateRegistration template_registration = 1; + } +} + +message TemplateRegistration { + bytes author_public_key = 1; + Signature author_signature = 2; + string template_name = 3; + uint32 template_version = 4; + TemplateType template_type = 5; + BuildInfo build_info = 6; + bytes binary_sha = 7; + string binary_url = 8; +} + +message TemplateType { + oneof template_type { + WasmInfo wasm = 1; + } +} + message WasmInfo { + uint32 abi_version = 1; + } + + message BuildInfo { + string repo_url = 1; + bytes commit_hash = 2; +} diff --git a/base_layer/core/src/proto/sidechain_features.rs b/base_layer/core/src/proto/sidechain_features.rs new file mode 100644 index 0000000000..8002346a6c --- /dev/null +++ b/base_layer/core/src/proto/sidechain_features.rs @@ -0,0 +1,158 @@ +// Copyright 2019, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//! Impls for sidechain_features proto + +use std::convert::{TryFrom, TryInto}; + +use tari_common_types::types::{PublicKey, Signature}; +use tari_utilities::ByteArray; + +use crate::{ + consensus::MaxSizeString, + proto, + transactions::transaction_components::{BuildInfo, CodeTemplateRegistration, SideChainFeatures, TemplateType}, +}; + +//---------------------------------- SideChainFeatures --------------------------------------------// +impl From for proto::types::SideChainFeatures { + fn from(value: SideChainFeatures) -> Self { + value.into() + } +} + +impl From for proto::types::side_chain_features::SideChainFeatures { + fn from(value: SideChainFeatures) -> Self { + match value { + SideChainFeatures::TemplateRegistration(template_reg) => { + proto::types::side_chain_features::SideChainFeatures::TemplateRegistration(template_reg.into()) + }, + } + } +} + +impl TryFrom for SideChainFeatures { + type Error = String; + + fn try_from(features: proto::types::side_chain_features::SideChainFeatures) -> Result { + match features { + proto::types::side_chain_features::SideChainFeatures::TemplateRegistration(template_reg) => { + Ok(SideChainFeatures::TemplateRegistration(template_reg.try_into()?)) + }, + } + } +} + +// -------------------------------- TemplateRegistration -------------------------------- // +impl TryFrom for CodeTemplateRegistration { + type Error = String; + + fn try_from(value: proto::types::TemplateRegistration) -> Result { + Ok(Self { + author_public_key: PublicKey::from_bytes(&value.author_public_key).map_err(|e| e.to_string())?, + author_signature: value + .author_signature + .map(Signature::try_from) + .ok_or("author_signature not provided")??, + template_name: MaxSizeString::try_from(value.template_name).map_err(|e| e.to_string())?, + template_version: value + .template_version + .try_into() + .map_err(|_| "Invalid template version")?, + template_type: value + .template_type + .map(TryFrom::try_from) + .ok_or("Template type not provided")??, + build_info: value + .build_info + .map(TryFrom::try_from) + .ok_or("Build info not provided")??, + binary_sha: value.binary_sha.try_into().map_err(|_| "Invalid commit sha")?, + binary_url: MaxSizeString::try_from(value.binary_url).map_err(|e| e.to_string())?, + }) + } +} + +impl From for proto::types::TemplateRegistration { + fn from(value: CodeTemplateRegistration) -> Self { + Self { + author_public_key: value.author_public_key.to_vec(), + author_signature: Some(value.author_signature.into()), + template_name: value.template_name.to_string(), + template_version: u32::from(value.template_version), + template_type: Some(value.template_type.into()), + build_info: Some(value.build_info.into()), + binary_sha: value.binary_sha.to_vec(), + binary_url: value.binary_url.to_string(), + } + } +} + +// -------------------------------- TemplateType -------------------------------- // +impl TryFrom for TemplateType { + type Error = String; + + fn try_from(value: proto::types::TemplateType) -> Result { + let template_type = value.template_type.ok_or("Template type not provided")?; + match template_type { + proto::types::template_type::TemplateType::Wasm(wasm) => Ok(TemplateType::Wasm { + abi_version: wasm.abi_version.try_into().map_err(|_| "abi_version overflowed")?, + }), + } + } +} + +impl From for proto::types::TemplateType { + fn from(value: TemplateType) -> Self { + match value { + TemplateType::Wasm { abi_version } => Self { + template_type: Some(proto::types::template_type::TemplateType::Wasm( + proto::types::WasmInfo { + abi_version: abi_version.into(), + }, + )), + }, + } + } +} + +// -------------------------------- BuildInfo -------------------------------- // + +impl TryFrom for BuildInfo { + type Error = String; + + fn try_from(value: proto::types::BuildInfo) -> Result { + Ok(Self { + repo_url: value.repo_url.try_into().map_err(|_| "Invalid repo url")?, + commit_hash: value.commit_hash.try_into().map_err(|_| "Invalid commit hash")?, + }) + } +} + +impl From for proto::types::BuildInfo { + fn from(value: BuildInfo) -> Self { + Self { + repo_url: value.repo_url.into_string(), + commit_hash: value.commit_hash.into_vec(), + } + } +} diff --git a/base_layer/core/src/proto/transaction.proto b/base_layer/core/src/proto/transaction.proto index 231580af57..34d3f2b77e 100644 --- a/base_layer/core/src/proto/transaction.proto +++ b/base_layer/core/src/proto/transaction.proto @@ -4,6 +4,7 @@ syntax = "proto3"; import "types.proto"; +import "sidechain_features.proto"; package tari.types; @@ -97,26 +98,7 @@ message OutputFeatures { // require a min maturity of the Coinbase_lock_height, this should be checked on receiving new blocks. uint64 maturity = 3; bytes metadata = 4; - SideChainFeatures sidechain_features = 6; -} - -message SideChainFeatures {} - - -message TemplateParameter { - uint32 template_id = 1; - uint32 template_data_version = 2; - bytes template_data = 3; -} - -message PublicFunction { - bytes name = 1; - FunctionRef function = 2; -} - -message FunctionRef { - bytes template_id = 1; - uint32 function_id = 2; + SideChainFeatures sidechain_features = 5; } // The components of the block or transaction. The same struct can be used for either, since in Mimblewimble, diff --git a/base_layer/core/src/proto/transaction.rs b/base_layer/core/src/proto/transaction.rs index e5d0b593c4..d199623bd9 100644 --- a/base_layer/core/src/proto/transaction.rs +++ b/base_layer/core/src/proto/transaction.rs @@ -296,6 +296,7 @@ impl TryFrom for OutputFeatures { fn try_from(features: proto::types::OutputFeatures) -> Result { let sidechain_features = features .sidechain_features + .and_then(|features| features.side_chain_features) .map(SideChainFeatures::try_from) .transpose()?; @@ -323,26 +324,11 @@ impl From for proto::types::OutputFeatures { maturity: features.maturity, metadata: features.metadata, version: features.version as u32, - sidechain_features: features.sidechain_features.map(|v| *v).map(Into::into), + sidechain_features: features.sidechain_features.map(Into::into), } } } -//---------------------------------- SideChainFeatures --------------------------------------------// -impl From for proto::types::SideChainFeatures { - fn from(_value: SideChainFeatures) -> Self { - Self {} - } -} - -impl TryFrom for SideChainFeatures { - type Error = String; - - fn try_from(_features: proto::types::SideChainFeatures) -> Result { - Ok(Self {}) - } -} - //---------------------------------- AggregateBody --------------------------------------------// impl TryFrom for AggregateBody { diff --git a/base_layer/core/src/transactions/transaction_components/output_features.rs b/base_layer/core/src/transactions/transaction_components/output_features.rs index f18dfc2ac9..aaa8a48b2a 100644 --- a/base_layer/core/src/transactions/transaction_components/output_features.rs +++ b/base_layer/core/src/transactions/transaction_components/output_features.rs @@ -33,7 +33,7 @@ use serde::{Deserialize, Serialize}; use super::OutputFeaturesVersion; use crate::{ consensus::{ConsensusDecoding, ConsensusEncoding, ConsensusEncodingSized, MaxSizeBytes}, - transactions::transaction_components::{side_chain::SideChainFeatures, OutputType}, + transactions::transaction_components::{side_chain::SideChainFeatures, CodeTemplateRegistration, OutputType}, }; /// Options for UTXO's @@ -46,24 +46,23 @@ pub struct OutputFeatures { /// require a min maturity of the Coinbase_lock_height, this should be checked on receiving new blocks. pub maturity: u64, pub metadata: Vec, - pub sidechain_features: Option>, + pub sidechain_features: Option, } impl OutputFeatures { pub fn new( version: OutputFeaturesVersion, - flags: OutputType, + output_type: OutputType, maturity: u64, metadata: Vec, sidechain_features: Option, ) -> OutputFeatures { - let boxed_sidechain_features = sidechain_features.map(Box::new); OutputFeatures { version, - output_type: flags, + output_type, maturity, metadata, - sidechain_features: boxed_sidechain_features, + sidechain_features, } } @@ -98,6 +97,15 @@ impl OutputFeatures { } } + /// Creates template registration output features + pub fn for_template_registration(template_registration: CodeTemplateRegistration) -> OutputFeatures { + OutputFeatures { + output_type: OutputType::CodeTemplateRegistration, + sidechain_features: Some(SideChainFeatures::TemplateRegistration(template_registration)), + ..Default::default() + } + } + pub fn is_coinbase(&self) -> bool { matches!(self.output_type, OutputType::Coinbase) } @@ -124,7 +132,7 @@ impl ConsensusDecoding for OutputFeatures { let version = OutputFeaturesVersion::consensus_decode(reader)?; let maturity = u64::consensus_decode(reader)?; let flags = OutputType::consensus_decode(reader)?; - let sidechain_features = > as ConsensusDecoding>::consensus_decode(reader)?; + let sidechain_features = ConsensusDecoding::consensus_decode(reader)?; const MAX_METADATA_SIZE: usize = 1024; let metadata = as ConsensusDecoding>::consensus_decode(reader)?; Ok(Self { @@ -167,17 +175,43 @@ impl Display for OutputFeatures { #[cfg(test)] mod test { + use std::convert::TryInto; + + use tari_utilities::hex::from_hex; + use super::*; - use crate::consensus::check_consensus_encoding_correctness; + use crate::{ + consensus::{check_consensus_encoding_correctness, MaxSizeString}, + transactions::transaction_components::{BuildInfo, TemplateType}, + }; - #[allow(clippy::too_many_lines)] fn make_fully_populated_output_features(version: OutputFeaturesVersion) -> OutputFeatures { OutputFeatures { version, output_type: OutputType::Standard, maturity: u64::MAX, metadata: vec![1; 1024], - sidechain_features: Some(Box::new(SideChainFeatures {})), + sidechain_features: Some(SideChainFeatures::TemplateRegistration(CodeTemplateRegistration { + author_public_key: Default::default(), + author_signature: Default::default(), + template_name: MaxSizeString::from_str_checked("🚀🚀🚀🚀🚀🚀🚀🚀").unwrap(), + template_version: 1, + template_type: TemplateType::Wasm { abi_version: 123 }, + build_info: BuildInfo { + repo_url: "/dns/github.com/https/tari_project/wasm_examples".try_into().unwrap(), + commit_hash: from_hex("ea29c9f92973fb7eda913902ff6173c62cb1e5df") + .unwrap() + .try_into() + .unwrap(), + }, + binary_sha: from_hex("c93747637517e3de90839637f0ce1ab7c8a3800b") + .unwrap() + .try_into() + .unwrap(), + binary_url: "/dns4/github.com/https/tari_project/wasm_examples/releases/download/v0.0.6/coin.zip" + .try_into() + .unwrap(), + })), } } diff --git a/base_layer/core/src/transactions/transaction_components/output_type.rs b/base_layer/core/src/transactions/transaction_components/output_type.rs index cba40dcbf3..30043649d1 100644 --- a/base_layer/core/src/transactions/transaction_components/output_type.rs +++ b/base_layer/core/src/transactions/transaction_components/output_type.rs @@ -44,6 +44,8 @@ pub enum OutputType { Coinbase = 1, /// Output is a burned output and can not be spent ever. Burn = 2, + /// Output defines a new re-usable code template. + CodeTemplateRegistration = 3, } impl OutputType { @@ -113,7 +115,8 @@ mod tests { assert_eq!(OutputType::from_byte(0), Some(OutputType::Standard)); assert_eq!(OutputType::from_byte(1), Some(OutputType::Coinbase)); assert_eq!(OutputType::from_byte(2), Some(OutputType::Burn)); - assert_eq!(OutputType::from_byte(255), None); + assert_eq!(OutputType::from_byte(3), Some(OutputType::CodeTemplateRegistration)); + assert_eq!(OutputType::from_byte(108), None); } #[test] diff --git a/base_layer/core/src/transactions/transaction_components/side_chain/mod.rs b/base_layer/core/src/transactions/transaction_components/side_chain/mod.rs index 22ff243a37..ec3d641714 100644 --- a/base_layer/core/src/transactions/transaction_components/side_chain/mod.rs +++ b/base_layer/core/src/transactions/transaction_components/side_chain/mod.rs @@ -22,11 +22,10 @@ mod sidechain_features; pub use sidechain_features::SideChainFeatures; -// Length of FixedString -pub const FIXED_STR_LEN: usize = 32; -pub type FixedString = [u8; FIXED_STR_LEN]; +mod template_registration; use tari_crypto::{hash::blake2::Blake256, hash_domain, hashing::DomainSeparatedHasher}; +pub use template_registration::{BuildInfo, CodeTemplateRegistration, TemplateType}; hash_domain!( ContractAcceptanceHashDomain, diff --git a/base_layer/core/src/transactions/transaction_components/side_chain/sidechain_features.rs b/base_layer/core/src/transactions/transaction_components/side_chain/sidechain_features.rs index b002614013..49eef7681e 100644 --- a/base_layer/core/src/transactions/transaction_components/side_chain/sidechain_features.rs +++ b/base_layer/core/src/transactions/transaction_components/side_chain/sidechain_features.rs @@ -20,19 +20,39 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::io::{Error, Read, Write}; +use std::io::{Error, ErrorKind, Read, Write}; use serde::{Deserialize, Serialize}; -use crate::consensus::{ConsensusDecoding, ConsensusEncoding, ConsensusEncodingSized}; +use crate::{ + consensus::{read_byte, write_byte, ConsensusDecoding, ConsensusEncoding, ConsensusEncodingSized}, + transactions::transaction_components::CodeTemplateRegistration, +}; #[derive(Debug, Clone, Hash, PartialEq, Deserialize, Serialize, Eq)] -pub struct SideChainFeatures {} - -impl SideChainFeatures {} +pub enum SideChainFeatures { + TemplateRegistration(CodeTemplateRegistration), +} +impl SideChainFeatures { + pub fn as_byte(&self) -> u8 { + #[allow(clippy::enum_glob_use)] + use SideChainFeatures::*; + match self { + TemplateRegistration(_) => 0x01, + } + } +} impl ConsensusEncoding for SideChainFeatures { - fn consensus_encode(&self, _writer: &mut W) -> Result<(), Error> { + fn consensus_encode(&self, writer: &mut W) -> Result<(), Error> { + #[allow(clippy::enum_glob_use)] + use SideChainFeatures::*; + write_byte(writer, self.as_byte())?; + match self { + TemplateRegistration(template_registration) => { + template_registration.consensus_encode(writer)?; + }, + } Ok(()) } } @@ -40,19 +60,56 @@ impl ConsensusEncoding for SideChainFeatures { impl ConsensusEncodingSized for SideChainFeatures {} impl ConsensusDecoding for SideChainFeatures { - fn consensus_decode(_reader: &mut R) -> Result { - Ok(Self {}) + fn consensus_decode(reader: &mut R) -> Result { + #[allow(clippy::enum_glob_use)] + use SideChainFeatures::*; + let byte = read_byte(reader)?; + match byte { + 0x01 => Ok(TemplateRegistration(ConsensusDecoding::consensus_decode(reader)?)), + _ => Err(Error::new( + ErrorKind::InvalidData, + format!("Invalid SideChainFeatures byte '{}'", byte), + )), + } } } #[cfg(test)] -mod test { +mod tests { + use std::convert::TryInto; + + use tari_utilities::hex::from_hex; + use super::*; - use crate::consensus::check_consensus_encoding_correctness; + use crate::{ + consensus::{check_consensus_encoding_correctness, MaxSizeString}, + transactions::transaction_components::{BuildInfo, TemplateType}, + }; #[test] - fn consensus_encoding() { - let features = SideChainFeatures {}; - check_consensus_encoding_correctness(features).unwrap(); + fn it_encodes_and_decodes_correctly() { + let subject = SideChainFeatures::TemplateRegistration(CodeTemplateRegistration { + author_public_key: Default::default(), + author_signature: Default::default(), + template_name: MaxSizeString::from_str_checked("🚀🚀🚀🚀🚀🚀🚀🚀").unwrap(), + template_version: 1, + template_type: TemplateType::Wasm { abi_version: 123 }, + build_info: BuildInfo { + repo_url: "/dns/github.com/https/tari_project/wasm_examples".try_into().unwrap(), + commit_hash: from_hex("ea29c9f92973fb7eda913902ff6173c62cb1e5df") + .unwrap() + .try_into() + .unwrap(), + }, + binary_sha: from_hex("c93747637517e3de90839637f0ce1ab7c8a3800b") + .unwrap() + .try_into() + .unwrap(), + binary_url: "/dns4/github.com/https/tari_project/wasm_examples/releases/download/v0.0.6/coin.zip" + .try_into() + .unwrap(), + }); + + check_consensus_encoding_correctness(subject).unwrap(); } } diff --git a/base_layer/core/src/transactions/transaction_components/side_chain/template_registration.rs b/base_layer/core/src/transactions/transaction_components/side_chain/template_registration.rs new file mode 100644 index 0000000000..83f67156f6 --- /dev/null +++ b/base_layer/core/src/transactions/transaction_components/side_chain/template_registration.rs @@ -0,0 +1,186 @@ +// Copyright 2022. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::io::{Error, ErrorKind, Read, Write}; + +use serde::{Deserialize, Serialize}; +use tari_common_types::types::{PublicKey, Signature}; + +use crate::consensus::{ + read_byte, + ConsensusDecoding, + ConsensusEncoding, + ConsensusEncodingSized, + MaxSizeBytes, + MaxSizeString, +}; + +#[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize)] +pub struct CodeTemplateRegistration { + pub author_public_key: PublicKey, + pub author_signature: Signature, + pub template_name: MaxSizeString<32>, + pub template_version: u16, + pub template_type: TemplateType, + pub build_info: BuildInfo, + pub binary_sha: MaxSizeBytes<32>, + pub binary_url: MaxSizeString<255>, +} + +impl ConsensusEncoding for CodeTemplateRegistration { + fn consensus_encode(&self, writer: &mut W) -> Result<(), Error> { + self.author_public_key.consensus_encode(writer)?; + self.author_signature.consensus_encode(writer)?; + self.template_name.consensus_encode(writer)?; + self.template_version.consensus_encode(writer)?; + self.template_type.consensus_encode(writer)?; + self.build_info.consensus_encode(writer)?; + self.binary_sha.consensus_encode(writer)?; + self.binary_url.consensus_encode(writer)?; + Ok(()) + } +} + +impl ConsensusEncodingSized for CodeTemplateRegistration {} + +impl ConsensusDecoding for CodeTemplateRegistration { + fn consensus_decode(reader: &mut R) -> Result { + let author_public_key = PublicKey::consensus_decode(reader)?; + let author_signature = Signature::consensus_decode(reader)?; + let template_name = MaxSizeString::consensus_decode(reader)?; + let template_version = u16::consensus_decode(reader)?; + let template_type = TemplateType::consensus_decode(reader)?; + let build_info = BuildInfo::consensus_decode(reader)?; + let binary_sha = MaxSizeBytes::consensus_decode(reader)?; + let binary_url = MaxSizeString::consensus_decode(reader)?; + + Ok(CodeTemplateRegistration { + author_public_key, + author_signature, + template_name, + template_version, + template_type, + build_info, + binary_sha, + binary_url, + }) + } +} + +// -------------------------------- TemplateType -------------------------------- // + +#[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize)] +pub enum TemplateType { + /// Indicates that the template is a WASM module + Wasm { abi_version: u16 }, +} + +impl TemplateType { + fn as_type_byte(&self) -> u8 { + match self { + TemplateType::Wasm { .. } => 0, + } + } +} + +impl ConsensusEncoding for TemplateType { + fn consensus_encode(&self, writer: &mut W) -> Result<(), Error> { + writer.write_all(&[self.as_type_byte()])?; + match self { + TemplateType::Wasm { abi_version } => { + abi_version.consensus_encode(writer)?; + }, + } + + Ok(()) + } +} + +impl ConsensusEncodingSized for TemplateType {} + +impl ConsensusDecoding for TemplateType { + fn consensus_decode(reader: &mut R) -> Result { + let type_byte = read_byte(reader)?; + match type_byte { + 0 => { + let abi_version = u16::consensus_decode(reader)?; + Ok(TemplateType::Wasm { abi_version }) + }, + _ => Err(Error::new(ErrorKind::InvalidData, "Invalid template type")), + } + } +} + +// -------------------------------- BuildInfo -------------------------------- // + +#[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize)] +pub struct BuildInfo { + pub repo_url: MaxSizeString<255>, + pub commit_hash: MaxSizeBytes<32>, +} + +impl ConsensusEncoding for BuildInfo { + fn consensus_encode(&self, writer: &mut W) -> Result<(), Error> { + self.repo_url.consensus_encode(writer)?; + self.commit_hash.consensus_encode(writer)?; + Ok(()) + } +} + +impl ConsensusEncodingSized for BuildInfo {} + +impl ConsensusDecoding for BuildInfo { + fn consensus_decode(reader: &mut R) -> Result { + let repo_url = MaxSizeString::consensus_decode(reader)?; + let commit_hash = MaxSizeBytes::consensus_decode(reader)?; + Ok(Self { repo_url, commit_hash }) + } +} + +#[cfg(test)] +mod tests { + use std::convert::TryInto; + + use super::*; + use crate::consensus::check_consensus_encoding_correctness; + + #[test] + fn it_encodes_and_decodes_correctly() { + let subject = CodeTemplateRegistration { + author_public_key: Default::default(), + author_signature: Default::default(), + template_name: "🐢 all the way down".try_into().unwrap(), + template_version: 0xff, + template_type: TemplateType::Wasm { abi_version: 0xffff }, + build_info: BuildInfo { + repo_url: "https://github.com/tari-project/wasm_template.git".try_into().unwrap(), + commit_hash: Default::default(), + }, + binary_sha: Default::default(), + binary_url: "/dns4/github.com/tcp/443/http/tari-project/wasm_examples/releases/download/v0.0.6/coin.zip" + .try_into() + .unwrap(), + }; + + check_consensus_encoding_correctness(subject).unwrap(); + } +} From 96a30c1662a88e10059da17d114148fe06bf9c43 Mon Sep 17 00:00:00 2001 From: Martin Stefcek <35243812+Cifko@users.noreply.github.com> Date: Tue, 6 Sep 2022 14:58:41 +0200 Subject: [PATCH 02/21] feat: add validator node registration (#4507) --- .../tari_app_grpc/proto/base_node.proto | 22 ++ applications/tari_app_grpc/proto/block.proto | 4 + .../tari_app_grpc/proto/transaction.proto | 2 + applications/tari_app_grpc/proto/wallet.proto | 15 ++ .../src/conversions/active_validator_node.rs | 59 ++++++ .../src/conversions/block_header.rs | 2 + .../tari_app_grpc/src/conversions/mod.rs | 1 + .../src/conversions/new_block_template.rs | 2 + .../src/conversions/output_features.rs | 15 ++ applications/tari_base_node/src/builder.rs | 2 + .../src/grpc/base_node_grpc_server.rs | 96 +++++++++ applications/tari_base_node/src/recovery.rs | 13 +- .../src/automation/commands.rs | 36 +++- applications/tari_console_wallet/src/cli.rs | 10 + .../src/grpc/wallet_grpc_server.rs | 42 ++++ .../tari_console_wallet/src/wallet_modes.rs | 1 + .../comms_interface/comms_request.rs | 8 + .../comms_interface/comms_response.rs | 6 +- .../comms_interface/inbound_handlers.rs | 14 +- .../comms_interface/local_interface.rs | 30 +++ .../src/base_node/sync/header_sync/error.rs | 8 + .../base_node/sync/header_sync/validator.rs | 8 +- base_layer/core/src/blocks/block_header.rs | 11 +- base_layer/core/src/blocks/genesis_block.rs | 7 + .../src/blocks/new_blockheader_template.rs | 3 + .../chain_storage/active_validator_node.rs | 32 +++ base_layer/core/src/chain_storage/async_db.rs | 7 + .../src/chain_storage/blockchain_backend.rs | 4 + .../src/chain_storage/blockchain_database.rs | 20 ++ .../core/src/chain_storage/db_transaction.rs | 13 +- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 192 +++++++++++++++++- base_layer/core/src/chain_storage/mod.rs | 3 + .../tests/blockchain_database.rs | 11 +- .../core/src/consensus/consensus_constants.rs | 13 ++ base_layer/core/src/lib.rs | 8 + .../src/proof_of_work/monero_rx/helpers.rs | 15 ++ base_layer/core/src/proto/block.proto | 4 + base_layer/core/src/proto/block.rs | 2 + base_layer/core/src/proto/block_header.rs | 2 + base_layer/core/src/proto/transaction.proto | 2 + base_layer/core/src/proto/transaction.rs | 12 +- .../core/src/test_helpers/blockchain.rs | 18 +- base_layer/core/src/test_helpers/mod.rs | 3 +- .../transaction_components/error.rs | 2 + .../transaction_components/kernel_features.rs | 6 + .../transaction_components/output_features.rs | 30 ++- .../transaction_output.rs | 16 ++ .../block_validators/async_validator.rs | 24 +++ .../src/validation/block_validators/test.rs | 1 + base_layer/core/src/validation/error.rs | 2 + base_layer/core/src/validation/test.rs | 9 +- .../chain_storage_tests/chain_backend.rs | 11 +- .../chain_storage_tests/chain_storage.rs | 21 +- .../core/tests/helpers/block_builders.rs | 13 +- base_layer/core/tests/mempool.rs | 2 + .../wallet/src/transaction_service/handle.rs | 36 +++- .../wallet/src/transaction_service/service.rs | 53 ++++- base_layer/wallet_ffi/src/lib.rs | 10 +- 58 files changed, 959 insertions(+), 55 deletions(-) create mode 100644 applications/tari_app_grpc/src/conversions/active_validator_node.rs create mode 100644 base_layer/core/src/chain_storage/active_validator_node.rs diff --git a/applications/tari_app_grpc/proto/base_node.proto b/applications/tari_app_grpc/proto/base_node.proto index 0df29e5b98..d5cb26a707 100644 --- a/applications/tari_app_grpc/proto/base_node.proto +++ b/applications/tari_app_grpc/proto/base_node.proto @@ -88,6 +88,9 @@ service BaseNode { rpc ListConnectedPeers(Empty) returns (ListConnectedPeersResponse); // Get mempool stats rpc GetMempoolStats(Empty) returns (MempoolStatsResponse); + // Get VNs + rpc GetActiveValidatorNodes(GetActiveValidatorNodesRequest) returns (stream ActiveValidatorNode); + rpc GetCommittee(GetCommitteeRequest) returns (GetCommitteeResponse); } @@ -434,3 +437,22 @@ message MempoolStatsResponse { uint64 unconfirmed_weight = 4; } +message GetActiveValidatorNodesRequest { + uint64 height = 1; +} + +message ActiveValidatorNode { + bytes shard_key = 1; + uint64 from_height = 2; + uint64 to_height = 3; + bytes public_key = 4; +} + +message GetCommitteeRequest { + uint64 height = 1; + bytes shard_key = 2; +} + +message GetCommitteeResponse { + repeated bytes public_key = 1; +} \ No newline at end of file diff --git a/applications/tari_app_grpc/proto/block.proto b/applications/tari_app_grpc/proto/block.proto index 821487e224..04904a91a8 100644 --- a/applications/tari_app_grpc/proto/block.proto +++ b/applications/tari_app_grpc/proto/block.proto @@ -61,6 +61,8 @@ message BlockHeader { uint64 output_mmr_size = 14; // Sum of script offsets for all kernels in this block. bytes total_script_offset = 15; + // Merkle root of validator nodes + bytes validator_node_merkle_root = 16; } // Metadata required for validating the Proof of Work calculation @@ -117,6 +119,8 @@ message NewBlockHeaderTemplate { // uint64 target_difficulty = 6; // Sum of script offsets for all kernels in this block. bytes total_script_offset = 7; + // Merkle root of validator nodes + bytes validator_node_merkle_root = 8; } // The new block template is used constructing a new partial block, allowing a miner to added the coinbase utxo and as a final step the Base node to add the MMR roots to the header. diff --git a/applications/tari_app_grpc/proto/transaction.proto b/applications/tari_app_grpc/proto/transaction.proto index a713f7f7f8..4e78b71378 100644 --- a/applications/tari_app_grpc/proto/transaction.proto +++ b/applications/tari_app_grpc/proto/transaction.proto @@ -124,6 +124,8 @@ message OutputFeatures { uint64 maturity = 3; bytes metadata = 4; SideChainFeatures sidechain_features = 5; + bytes validator_node_public_key = 6; + Signature validator_node_signature = 7; } diff --git a/applications/tari_app_grpc/proto/wallet.proto b/applications/tari_app_grpc/proto/wallet.proto index a873130a71..7e46740fb0 100644 --- a/applications/tari_app_grpc/proto/wallet.proto +++ b/applications/tari_app_grpc/proto/wallet.proto @@ -78,6 +78,8 @@ service Wallet { rpc StreamTransactionEvents(TransactionEventRequest) returns (stream TransactionEventResponse); rpc SeedWords(Empty) returns (SeedWordsResponse); rpc DeleteSeedWordsFile(Empty) returns (FileDeletedResponse); + + rpc RegisterValidatorNode(RegisterValidatorNodeRequest) returns (RegisterValidatorNodeResponse); } message GetVersionRequest { } @@ -326,4 +328,17 @@ message SeedWordsResponse { message FileDeletedResponse { +} + +message RegisterValidatorNodeRequest { + string validator_node_public_key = 1; + Signature validator_node_signature = 2; + uint64 fee_per_gram = 3; + string message = 4; +} + +message RegisterValidatorNodeResponse { + uint64 transaction_id = 1; + bool is_success = 2; + string failure_message = 3; } \ No newline at end of file diff --git a/applications/tari_app_grpc/src/conversions/active_validator_node.rs b/applications/tari_app_grpc/src/conversions/active_validator_node.rs new file mode 100644 index 0000000000..4c22cd8116 --- /dev/null +++ b/applications/tari_app_grpc/src/conversions/active_validator_node.rs @@ -0,0 +1,59 @@ +// Copyright 2020. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::convert::{TryFrom, TryInto}; + +use tari_common_types::types::PublicKey; +use tari_core::chain_storage::ActiveValidatorNode; +use tari_utilities::ByteArray; + +use crate::tari_rpc as grpc; + +impl TryFrom for grpc::ActiveValidatorNode { + type Error = String; + + fn try_from(active_validator_node: ActiveValidatorNode) -> Result { + Ok(Self { + shard_key: active_validator_node.shard_key.to_vec(), + from_height: active_validator_node.from_height, + to_height: active_validator_node.to_height, + public_key: active_validator_node.public_key.to_vec(), + }) + } +} + +impl TryFrom for ActiveValidatorNode { + type Error = String; + + fn try_from(active_validator_node: grpc::ActiveValidatorNode) -> Result { + let shard_key = active_validator_node.shard_key.try_into().unwrap(); + let public_key = + PublicKey::from_vec(&active_validator_node.public_key).map_err(|_| "Could not public key".to_string())?; + + Ok(Self { + shard_key, + from_height: active_validator_node.from_height, + to_height: active_validator_node.to_height, + public_key, + }) + } +} diff --git a/applications/tari_app_grpc/src/conversions/block_header.rs b/applications/tari_app_grpc/src/conversions/block_header.rs index f1a72173b1..18705ff909 100644 --- a/applications/tari_app_grpc/src/conversions/block_header.rs +++ b/applications/tari_app_grpc/src/conversions/block_header.rs @@ -53,6 +53,7 @@ impl From for grpc::BlockHeader { pow_algo: pow_algo.as_u64(), pow_data: h.pow.pow_data, }), + validator_node_merkle_root: h.validator_node_merkle_root, } } } @@ -91,6 +92,7 @@ impl TryFrom for BlockHeader { total_script_offset, nonce: header.nonce, pow, + validator_node_merkle_root: header.validator_node_merkle_root, }) } } diff --git a/applications/tari_app_grpc/src/conversions/mod.rs b/applications/tari_app_grpc/src/conversions/mod.rs index c08c3d0cdb..69380b8d29 100644 --- a/applications/tari_app_grpc/src/conversions/mod.rs +++ b/applications/tari_app_grpc/src/conversions/mod.rs @@ -20,6 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +mod active_validator_node; mod aggregate_body; mod base_node_state; mod block; diff --git a/applications/tari_app_grpc/src/conversions/new_block_template.rs b/applications/tari_app_grpc/src/conversions/new_block_template.rs index 54e5a58c03..86a176c4de 100644 --- a/applications/tari_app_grpc/src/conversions/new_block_template.rs +++ b/applications/tari_app_grpc/src/conversions/new_block_template.rs @@ -45,6 +45,7 @@ impl TryFrom for grpc::NewBlockTemplate { pow_algo: block.header.pow.pow_algo.as_u64(), pow_data: block.header.pow.pow_data, }), + validator_node_merkle_root: block.header.validator_node_merkle_root, }; Ok(Self { body: Some(grpc::AggregateBody { @@ -91,6 +92,7 @@ impl TryFrom for NewBlockTemplate { total_kernel_offset, total_script_offset, pow, + validator_node_merkle_root: header.validator_node_merkle_root, }; let body = block .body diff --git a/applications/tari_app_grpc/src/conversions/output_features.rs b/applications/tari_app_grpc/src/conversions/output_features.rs index 15e6e40325..50b4603309 100644 --- a/applications/tari_app_grpc/src/conversions/output_features.rs +++ b/applications/tari_app_grpc/src/conversions/output_features.rs @@ -22,12 +22,14 @@ use std::convert::{TryFrom, TryInto}; +use tari_common_types::types::PublicKey; use tari_core::transactions::transaction_components::{ OutputFeatures, OutputFeaturesVersion, OutputType, SideChainFeatures, }; +use tari_utilities::ByteArray; use crate::tari_rpc as grpc; @@ -46,6 +48,9 @@ impl TryFrom for OutputFeatures { .try_into() .map_err(|_| "Invalid output type: overflow")?; + let validator_node_public_key = PublicKey::from_vec(&features.validator_node_public_key).ok(); + let validator_node_signature = features.validator_node_signature.map(|s| s.try_into()).transpose()?; + Ok(OutputFeatures::new( OutputFeaturesVersion::try_from( u8::try_from(features.version).map_err(|_| "Invalid version: overflowed u8")?, @@ -54,6 +59,8 @@ impl TryFrom for OutputFeatures { features.maturity, features.metadata, sidechain_features, + validator_node_public_key, + validator_node_signature, )) } } @@ -66,6 +73,14 @@ impl From for grpc::OutputFeatures { maturity: features.maturity, metadata: features.metadata, sidechain_features: features.sidechain_features.map(Into::into), + validator_node_public_key: features + .validator_node_public_key + .map(|pk| pk.as_bytes().to_vec()) + .unwrap_or_default(), + validator_node_signature: features.validator_node_signature.map(|s| grpc::Signature { + public_nonce: Vec::from(s.get_public_nonce().as_bytes()), + signature: Vec::from(s.get_signature().as_bytes()), + }), } } } diff --git a/applications/tari_base_node/src/builder.rs b/applications/tari_base_node/src/builder.rs index 8eff0b5f0b..9ea1b0a661 100644 --- a/applications/tari_base_node/src/builder.rs +++ b/applications/tari_base_node/src/builder.rs @@ -173,9 +173,11 @@ pub async fn configure_and_initialize_node( ) -> Result { let result = match &app_config.base_node.db_type { DatabaseType::Lmdb => { + let rules = ConsensusManager::builder(app_config.base_node.network).build(); let backend = create_lmdb_database( app_config.base_node.lmdb_path.as_path(), app_config.base_node.lmdb.clone(), + rules, ) .map_err(|e| ExitError::new(ExitCode::DatabaseError, e))?; build_node_context(backend, app_config, node_identity, interrupt_signal).await? diff --git a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs index 3fbe992ea6..a7b70eb5bc 100644 --- a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs +++ b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs @@ -134,6 +134,7 @@ impl BaseNodeGrpcServer {} #[tonic::async_trait] impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { type FetchMatchingUtxosStream = mpsc::Receiver>; + type GetActiveValidatorNodesStream = mpsc::Receiver>; type GetBlocksStream = mpsc::Receiver>; type GetMempoolTransactionsStream = mpsc::Receiver>; type GetNetworkDifficultyStream = mpsc::Receiver>; @@ -1576,6 +1577,101 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { Ok(Response::new(response)) } + + async fn get_committee( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + let report_error_flag = self.report_error_flag(); + debug!(target: LOG_TARGET, "Incoming GRPC request for GetCommittee"); + let mut handler = self.node_service.clone(); + let response = handler + .get_committee(request.height, request.shard_key.try_into().unwrap()) + .await + .map_err(|e| { + error!(target: LOG_TARGET, "Error {}", e); + report_error(report_error_flag, Status::internal(e.to_string())) + })? + .iter() + .map(|a| a.shard_key.to_vec()) + .collect(); + Ok(Response::new(tari_rpc::GetCommitteeResponse { public_key: response })) + } + + async fn get_active_validator_nodes( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + let report_error_flag = self.report_error_flag(); + debug!(target: LOG_TARGET, "Incoming GRPC request for GetActiveValidatorNodes"); + + let mut handler = self.node_service.clone(); + let (mut tx, rx) = mpsc::channel(1000); + + task::spawn(async move { + let active_validator_nodes = match handler.get_active_validator_nodes(request.height).await { + Err(err) => { + warn!(target: LOG_TARGET, "Error communicating with base node: {}", err,); + return; + }, + Ok(data) => data, + }; + for active_validator_node in active_validator_nodes { + let active_validator_node = match tari_rpc::ActiveValidatorNode::try_from(active_validator_node) { + Ok(t) => t, + Err(e) => { + warn!( + target: LOG_TARGET, + "Error sending converting active validator node for GRPC: {}", e + ); + match tx + .send(Err(report_error( + report_error_flag, + Status::internal("Error converting active validator node"), + ))) + .await + { + Ok(_) => (), + Err(send_err) => { + warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) + }, + } + return; + }, + }; + + match tx.send(Ok(active_validator_node)).await { + Ok(_) => (), + Err(err) => { + warn!( + target: LOG_TARGET, + "Error sending mempool transaction via GRPC: {}", err + ); + match tx + .send(Err(report_error( + report_error_flag, + Status::unknown("Error sending data"), + ))) + .await + { + Ok(_) => (), + Err(send_err) => { + warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) + }, + } + return; + }, + } + } + }); + debug!( + target: LOG_TARGET, + "Sending GetActiveValidatorNodes response stream to client" + ); + Ok(Response::new(rx)) + } } enum BlockGroupType { diff --git a/applications/tari_base_node/src/recovery.rs b/applications/tari_base_node/src/recovery.rs index eb9eea3b4b..fd94dbf6b4 100644 --- a/applications/tari_base_node/src/recovery.rs +++ b/applications/tari_base_node/src/recovery.rs @@ -74,22 +74,23 @@ pub fn initiate_recover_db(config: &BaseNodeConfig) -> Result<(), ExitError> { pub async fn run_recovery(node_config: &BaseNodeConfig) -> Result<(), anyhow::Error> { println!("Starting recovery mode"); + let rules = ConsensusManager::builder(node_config.network).build(); let (temp_db, main_db, temp_path) = match &node_config.db_type { DatabaseType::Lmdb => { - let backend = create_lmdb_database(&node_config.lmdb_path, node_config.lmdb.clone()).map_err(|e| { - error!(target: LOG_TARGET, "Error opening db: {}", e); - anyhow!("Could not open DB: {}", e) - })?; + let backend = create_lmdb_database(&node_config.lmdb_path, node_config.lmdb.clone(), rules.clone()) + .map_err(|e| { + error!(target: LOG_TARGET, "Error opening db: {}", e); + anyhow!("Could not open DB: {}", e) + })?; let temp_path = temp_dir().join("temp_recovery"); - let temp = create_lmdb_database(&temp_path, node_config.lmdb.clone()).map_err(|e| { + let temp = create_lmdb_database(&temp_path, node_config.lmdb.clone(), rules.clone()).map_err(|e| { error!(target: LOG_TARGET, "Error opening recovery db: {}", e); anyhow!("Could not open recovery DB: {}", e) })?; (temp, backend, temp_path) }, }; - let rules = ConsensusManager::builder(node_config.network).build(); let factories = CryptoFactories::default(); let randomx_factory = RandomXFactory::new(node_config.max_randomx_vms); let validators = Validators::new( diff --git a/applications/tari_console_wallet/src/automation/commands.rs b/applications/tari_console_wallet/src/automation/commands.rs index 4e686bb468..484de6dcec 100644 --- a/applications/tari_console_wallet/src/automation/commands.rs +++ b/applications/tari_console_wallet/src/automation/commands.rs @@ -41,7 +41,7 @@ use tari_app_grpc::authentication::salted_password::create_salted_hashed_passwor use tari_common_types::{ emoji::EmojiId, transaction::TxId, - types::{CommitmentFactory, FixedHash, PublicKey}, + types::{CommitmentFactory, FixedHash, PublicKey, Signature}, }; use tari_comms::{ connectivity::{ConnectivityEvent, ConnectivityRequester}, @@ -53,6 +53,7 @@ use tari_core::transactions::{ tari_amount::{uT, MicroTari, Tari}, transaction_components::{OutputFeatures, TransactionOutput, UnblindedOutput}, }; +use tari_crypto::ristretto::RistrettoSecretKey; use tari_utilities::{hex::Hex, ByteArray}; use tari_wallet::{ connectivity_service::WalletConnectivityInterface, @@ -176,6 +177,24 @@ pub async fn claim_htlc_refund( Ok(tx_id) } +pub async fn register_validator_node( + mut wallet_transaction_service: TransactionServiceHandle, + validator_node_public_key: PublicKey, + validator_node_signature: Signature, + fee_per_gram: MicroTari, + message: String, +) -> Result { + wallet_transaction_service + .register_validator_node( + validator_node_public_key, + validator_node_signature, + fee_per_gram, + message, + ) + .await + .map_err(CommandError::TransactionServiceError) +} + /// Send a one-sided transaction to a recipient pub async fn send_one_sided( mut wallet_transaction_service: TransactionServiceHandle, @@ -795,6 +814,21 @@ pub async fn command_runner( ); } }, + RegisterValidatorNode(args) => { + let tx_id = register_validator_node( + transaction_service.clone(), + args.validator_node_public_key.into(), + Signature::new( + args.validator_node_public_nonce.into(), + RistrettoSecretKey::from_vec(&args.validator_node_signature).unwrap(), + ), + config.fee_per_gram * uT, + args.message, + ) + .await?; + debug!(target: LOG_TARGET, "Registering VN tx_id {}", tx_id); + tx_ids.push(tx_id); + }, } } diff --git a/applications/tari_console_wallet/src/cli.rs b/applications/tari_console_wallet/src/cli.rs index 3bb56faa94..e76574267f 100644 --- a/applications/tari_console_wallet/src/cli.rs +++ b/applications/tari_console_wallet/src/cli.rs @@ -132,6 +132,7 @@ pub enum CliCommands { ClaimShaAtomicSwapRefund(ClaimShaAtomicSwapRefundArgs), RevalidateWalletDb, HashGrpcPassword(HashPasswordArgs), + RegisterValidatorNode(RegisterValidatorNodeArgs), } #[derive(Debug, Args, Clone)] @@ -260,3 +261,12 @@ pub struct HashPasswordArgs { /// If true, only output the hashed password and the salted password. Otherwise a usage explanation is output. pub short: bool, } + +#[derive(Debug, Args, Clone)] +pub struct RegisterValidatorNodeArgs { + pub validator_node_public_key: UniPublicKey, + pub validator_node_public_nonce: UniPublicKey, + pub validator_node_signature: Vec, + #[clap(short, long, default_value = "Registering VN")] + pub message: String, +} diff --git a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs index d7acb6a44b..aeece46823 100644 --- a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs +++ b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs @@ -67,6 +67,8 @@ use tari_app_grpc::{ GetVersionResponse, ImportUtxosRequest, ImportUtxosResponse, + RegisterValidatorNodeRequest, + RegisterValidatorNodeResponse, RevalidateRequest, RevalidateResponse, SeedWordsResponse, @@ -967,6 +969,46 @@ impl wallet_server::Wallet for WalletGrpcServer { Ok(Response::new(CreateTemplateRegistrationResponse {})) } + + async fn register_validator_node( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + let mut transaction_service = self.get_transaction_service(); + let validator_node_public_key = CommsPublicKey::from_hex(&request.validator_node_public_key) + .map_err(|_| Status::internal("Destination address is malformed".to_string()))?; + let validator_node_signature = request + .validator_node_signature + .ok_or_else(|| Status::invalid_argument("Validator node signature is missing!"))? + .try_into() + .unwrap(); + + let response = match transaction_service + .register_validator_node( + validator_node_public_key, + validator_node_signature, + request.fee_per_gram.into(), + request.message, + ) + .await + { + Ok(tx) => RegisterValidatorNodeResponse { + transaction_id: tx.as_u64(), + is_success: true, + failure_message: Default::default(), + }, + Err(e) => { + error!(target: LOG_TARGET, "Transaction service error: {}", e); + RegisterValidatorNodeResponse { + transaction_id: Default::default(), + is_success: false, + failure_message: e.to_string(), + } + }, + }; + Ok(Response::new(response)) + } } async fn handle_completed_tx( diff --git a/applications/tari_console_wallet/src/wallet_modes.rs b/applications/tari_console_wallet/src/wallet_modes.rs index 6d287138b8..2e307cb117 100644 --- a/applications/tari_console_wallet/src/wallet_modes.rs +++ b/applications/tari_console_wallet/src/wallet_modes.rs @@ -463,6 +463,7 @@ mod test { CliCommands::ClaimShaAtomicSwapRefund(_) => {}, CliCommands::RevalidateWalletDb => {}, CliCommands::HashGrpcPassword(_) => {}, + CliCommands::RegisterValidatorNode(_) => {}, } } assert!(get_balance && send_tari && make_it_rain && coin_split && discover_peer && whois); diff --git a/base_layer/core/src/base_node/comms_interface/comms_request.rs b/base_layer/core/src/base_node/comms_interface/comms_request.rs index 60d35ff753..9c77b2bf1f 100644 --- a/base_layer/core/src/base_node/comms_interface/comms_request.rs +++ b/base_layer/core/src/base_node/comms_interface/comms_request.rs @@ -56,6 +56,8 @@ pub enum NodeCommsRequest { GetNewBlock(NewBlockTemplate), FetchKernelByExcessSig(Signature), FetchMempoolTransactionsByExcessSigs { excess_sigs: Vec }, + FetchValidatorNodesKeys { height: u64 }, + FetchCommittee { height: u64, shard: [u8; 32] }, } #[derive(Debug, Serialize, Deserialize)] @@ -94,6 +96,12 @@ impl Display for NodeCommsRequest { FetchMempoolTransactionsByExcessSigs { .. } => { write!(f, "FetchMempoolTransactionsByExcessSigs") }, + FetchValidatorNodesKeys { height } => { + write!(f, "FetchValidatorNodesKeys ({})", height) + }, + FetchCommittee { height, shard } => { + write!(f, "FetchCommittee height ({}), shard({:?})", height, shard) + }, } } } diff --git a/base_layer/core/src/base_node/comms_interface/comms_response.rs b/base_layer/core/src/base_node/comms_interface/comms_response.rs index 81c3173160..9dde44f55c 100644 --- a/base_layer/core/src/base_node/comms_interface/comms_response.rs +++ b/base_layer/core/src/base_node/comms_interface/comms_response.rs @@ -32,7 +32,7 @@ use tari_common_types::{ use crate::{ blocks::{Block, ChainHeader, HistoricalBlock, NewBlockTemplate}, - chain_storage::UtxoMinedInfo, + chain_storage::{ActiveValidatorNode, UtxoMinedInfo}, proof_of_work::Difficulty, transactions::transaction_components::{Transaction, TransactionKernel, TransactionOutput}, }; @@ -71,6 +71,8 @@ pub enum NodeCommsResponse { FetchOutputsByContractIdResponse { outputs: Vec, }, + FetchValidatorNodesKeysResponse(Vec), + FetchCommitteeResponse(Vec), } impl Display for NodeCommsResponse { @@ -109,6 +111,8 @@ impl Display for NodeCommsResponse { ), FetchOutputsForBlockResponse { .. } => write!(f, "FetchConstitutionsResponse"), FetchOutputsByContractIdResponse { .. } => write!(f, "FetchOutputsByContractIdResponse"), + FetchValidatorNodesKeysResponse(_) => write!(f, "FetchValidatorNodesKeysResponse"), + FetchCommitteeResponse(_) => write!(f, "FetchCommitteeResponse"), } } } diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index 1067179be8..c5419cc62e 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -274,8 +274,8 @@ where B: BlockchainBackend + 'static }, NodeCommsRequest::GetNewBlockTemplate(request) => { let best_block_header = self.blockchain_db.fetch_tip_header().await?; - - let mut header = BlockHeader::from_previous(best_block_header.header()); + let vns = self.blockchain_db.get_validator_nodes_mr().await?; + let mut header = BlockHeader::from_previous(best_block_header.header(), vns); let constants = self.consensus_manager.consensus_constants(header.height); header.version = constants.blockchain_version(); header.pow.pow_algo = request.algo; @@ -363,6 +363,16 @@ where B: BlockchainBackend + 'static }, )) }, + NodeCommsRequest::FetchValidatorNodesKeys { height } => { + let active_validator_nodes = self.blockchain_db.fetch_active_validator_nodes(height).await?; + Ok(NodeCommsResponse::FetchValidatorNodesKeysResponse( + active_validator_nodes, + )) + }, + NodeCommsRequest::FetchCommittee { height, shard } => { + let validator_nodes = self.blockchain_db.fetch_committee(height, shard).await?; + Ok(NodeCommsResponse::FetchCommitteeResponse(validator_nodes)) + }, } } diff --git a/base_layer/core/src/base_node/comms_interface/local_interface.rs b/base_layer/core/src/base_node/comms_interface/local_interface.rs index ee3789a663..b51373f218 100644 --- a/base_layer/core/src/base_node/comms_interface/local_interface.rs +++ b/base_layer/core/src/base_node/comms_interface/local_interface.rs @@ -38,6 +38,7 @@ use crate::{ NodeCommsResponse, }, blocks::{Block, ChainHeader, HistoricalBlock, NewBlockTemplate}, + chain_storage::ActiveValidatorNode, proof_of_work::PowAlgorithm, transactions::transaction_components::{TransactionKernel, TransactionOutput}, }; @@ -271,4 +272,33 @@ impl LocalNodeCommsInterface { _ => Err(CommsInterfaceError::UnexpectedApiResponse), } } + + pub async fn get_active_validator_nodes( + &mut self, + height: u64, + ) -> Result, CommsInterfaceError> { + match self + .request_sender + .call(NodeCommsRequest::FetchValidatorNodesKeys { height }) + .await?? + { + NodeCommsResponse::FetchValidatorNodesKeysResponse(validator_node) => Ok(validator_node), + _ => Err(CommsInterfaceError::UnexpectedApiResponse), + } + } + + pub async fn get_committee( + &mut self, + height: u64, + shard: [u8; 32], + ) -> Result, CommsInterfaceError> { + match self + .request_sender + .call(NodeCommsRequest::FetchCommittee { height, shard }) + .await?? + { + NodeCommsResponse::FetchCommitteeResponse(validator_node) => Ok(validator_node), + _ => Err(CommsInterfaceError::UnexpectedApiResponse), + } + } } diff --git a/base_layer/core/src/base_node/sync/header_sync/error.rs b/base_layer/core/src/base_node/sync/header_sync/error.rs index c744c5e4c5..accd093510 100644 --- a/base_layer/core/src/base_node/sync/header_sync/error.rs +++ b/base_layer/core/src/base_node/sync/header_sync/error.rs @@ -92,4 +92,12 @@ pub enum BlockHeaderSyncError { }, #[error("All sync peers exceeded max allowed latency")] AllSyncPeersExceedLatency, + #[error( + "Validator node MMR at height {height} is not correct. Expected {actual} to equal the computed {computed}" + )] + ValidatorNodeMmmr { + height: u64, + actual: String, + computed: String, + }, } diff --git a/base_layer/core/src/base_node/sync/header_sync/validator.rs b/base_layer/core/src/base_node/sync/header_sync/validator.rs index ce2d5e0c66..3bff458033 100644 --- a/base_layer/core/src/base_node/sync/header_sync/validator.rs +++ b/base_layer/core/src/base_node/sync/header_sync/validator.rs @@ -261,7 +261,7 @@ mod test { let (validator, db) = setup(); let mut tip = db.fetch_tip_header().await.unwrap(); for _ in 0..n { - let mut header = BlockHeader::from_previous(tip.header()); + let mut header = BlockHeader::from_previous(tip.header(), tip.header().validator_node_merkle_root.clone()); // Needed to have unique keys for the blockchain db mmr count indexes (MDB_KEY_EXIST error) header.kernel_mmr_size += 1; header.output_mmr_size += 1; @@ -316,11 +316,11 @@ mod test { let (mut validator, _, tip) = setup_with_headers(1).await; validator.initialize_state(tip.hash()).await.unwrap(); assert!(validator.valid_headers().is_empty()); - let next = BlockHeader::from_previous(tip.header()); + let next = BlockHeader::from_previous(tip.header(), tip.header().validator_node_merkle_root.clone()); validator.validate(next).unwrap(); assert_eq!(validator.valid_headers().len(), 1); let tip = validator.valid_headers().last().cloned().unwrap(); - let next = BlockHeader::from_previous(tip.header()); + let next = BlockHeader::from_previous(tip.header(), tip.header().validator_node_merkle_root.clone()); validator.validate(next).unwrap(); assert_eq!(validator.valid_headers().len(), 2); } @@ -329,7 +329,7 @@ mod test { async fn it_fails_if_height_is_not_serial() { let (mut validator, _, tip) = setup_with_headers(2).await; validator.initialize_state(tip.hash()).await.unwrap(); - let mut next = BlockHeader::from_previous(tip.header()); + let mut next = BlockHeader::from_previous(tip.header(), tip.header().validator_node_merkle_root.clone()); next.height = 10; let err = validator.validate(next).unwrap_err(); unpack_enum!(BlockHeaderSyncError::InvalidBlockHeight { expected, actual } = err); diff --git a/base_layer/core/src/blocks/block_header.rs b/base_layer/core/src/blocks/block_header.rs index 74fcf2393a..186ac3cb96 100644 --- a/base_layer/core/src/blocks/block_header.rs +++ b/base_layer/core/src/blocks/block_header.rs @@ -57,6 +57,7 @@ use crate::{ blocks::BlocksHashDomain, consensus::{ConsensusDecoding, ConsensusEncoding, ConsensusEncodingSized, DomainSeparatedConsensusHasher}, proof_of_work::{PowAlgorithm, PowError, ProofOfWork}, + ValidatorNodeMmr, }; #[derive(Debug, Error)] @@ -110,11 +111,14 @@ pub struct BlockHeader { pub nonce: u64, /// Proof of work summary pub pow: ProofOfWork, + // Merkle root of all active validator node. + pub validator_node_merkle_root: Vec, } impl BlockHeader { /// Create a new, default header with the given version. pub fn new(blockchain_version: u16) -> BlockHeader { + let vn_mmr = ValidatorNodeMmr::new(Vec::new()); BlockHeader { version: blockchain_version, height: 0, @@ -130,6 +134,7 @@ impl BlockHeader { total_script_offset: BlindingFactor::default(), nonce: 0, pow: ProofOfWork::default(), + validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), } } @@ -145,7 +150,7 @@ impl BlockHeader { /// Create a new block header using relevant data from the previous block. The height is incremented by one, the /// previous block hash is set, the timestamp is set to the current time, and the kernel/output mmr sizes are set to /// the previous block. All other fields, including proof of work are set to defaults. - pub fn from_previous(prev: &BlockHeader) -> BlockHeader { + pub fn from_previous(prev: &BlockHeader, validator_node_merkle_root: Vec) -> BlockHeader { let prev_hash = prev.hash(); BlockHeader { version: prev.version, @@ -162,6 +167,7 @@ impl BlockHeader { total_script_offset: BlindingFactor::default(), nonce: 0, pow: ProofOfWork::default(), + validator_node_merkle_root, } } @@ -263,6 +269,7 @@ impl From for BlockHeader { total_script_offset: header_template.total_script_offset, nonce: 0, pow: header_template.pow, + validator_node_merkle_root: header_template.validator_node_merkle_root, } } } @@ -362,7 +369,7 @@ mod test { h1.nonce = 7600; assert_eq!(h1.height, 0, "Default block height"); let hash1 = h1.hash(); - let h2 = BlockHeader::from_previous(&h1); + let h2 = BlockHeader::from_previous(&h1, h1.validator_node_merkle_root.clone()); assert_eq!(h2.height, h1.height + 1, "Incrementing block height"); assert!(h2.timestamp > h1.timestamp, "Timestamp"); assert_eq!(h2.prev_hash, hash1, "Previous hash"); diff --git a/base_layer/core/src/blocks/genesis_block.rs b/base_layer/core/src/blocks/genesis_block.rs index 22eb542971..dad7848d8b 100644 --- a/base_layer/core/src/blocks/genesis_block.rs +++ b/base_layer/core/src/blocks/genesis_block.rs @@ -47,6 +47,7 @@ use crate::{ TransactionOutputVersion, }, }, + ValidatorNodeMmr, }; /// Returns the genesis block for the selected network. @@ -161,6 +162,7 @@ fn get_igor_genesis_block_raw() -> Block { let genesis = DateTime::parse_from_rfc2822("08 Aug 2022 10:00:00 +0200").unwrap(); #[allow(clippy::cast_sign_loss)] let timestamp = genesis.timestamp() as u64; + let vn_mmr = ValidatorNodeMmr::new(Vec::new()); Block { header: BlockHeader { version: 0, @@ -187,6 +189,7 @@ fn get_igor_genesis_block_raw() -> Block { pow_algo: PowAlgorithm::Sha3, pow_data: vec![], }, + validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), }, body, } @@ -281,6 +284,8 @@ fn get_esmeralda_genesis_block_raw() -> Block { maturity: 6, metadata: Vec::new(), sidechain_features: None, + validator_node_public_key: None, + validator_node_signature: None, }, Commitment::from_hex("2afed894ae877b5e9c7450cc0e29de46aeb6b118cd3d6b0a77da8c8156a1e234").unwrap(), BulletRangeProof::from_hex("0136b44930772f85b17139dd8e83789f84ccc2134cf6b2416d908fb8403efa4d3bc0247ec4afbbb1f7f7498d129226f26199eec988bd3e5ccce2572fd7aee16f2c4a2d710fac0e3bc1d612d700af2265e230ae1c45e3b0e4d3aab43cb87534217b56dcdb6598ed859d0cd6d70fae5acaaa38db5bbae6df8339e5e3dd594388bd53cef6f2acda4ac002d8ac6e01d430bdcf8565b8b8823ff3fb7dc8b359e687dd6feab0edf86c7444c713f34d2513145049b9664aae2e3dbc8a3365baae9d26842852ec9f401112a9742560ec220e61b05f65448d75b714839a6bafc723e9a04f25c69c036775fc55b7ec2bb28ef1de25a32cac51c288ed6d43f3819b1c3356d7699ea5f10217d553e90e6c93641649bd289dedb9e5725579539df07301f15093496c8fca3ec66a43332d1be3a3f94b530e1b8ca7feaa24c4ca73e60397a786ab742ac8933ba6bd504ef3c1a53fa1ff4397aba7c42a526507f930fdf9ff00a2a07b521841574d4e2b5beece946a15fa2545c8e556e704eed0ed10c0e3cbb9f5d6147e6e2d260666c79fa04d89c8901eeb3d3793239a68218a2c105f1bcb4211631eea037102bd5c840de751d84f473bb5cf6c41b3b97ec1c978700ec3c132e09a28d0a92c7e141e9968d0d2852c339a85c052356049f6752cb57c3d2b8c03db24525aa1f7db4a4f4d7d48639e27faa8c8bc695ad6c4f7688d43feedabef4d05c20b349ebc1697b3b899038b22fa308546efff290902cdacbe9992450cc31b61fc00652cffe4335c080d8398b061add986626068e17d5982ee9f6f28b4f4579d0406").unwrap(), @@ -311,6 +316,7 @@ fn get_esmeralda_genesis_block_raw() -> Block { let genesis = DateTime::parse_from_rfc2822("24 Aug 2022 22:00:00 +0200").unwrap(); #[allow(clippy::cast_sign_loss)] let timestamp = genesis.timestamp() as u64; + let vn_mmr = ValidatorNodeMmr::new(Vec::new()); Block { header: BlockHeader { version: 0, @@ -337,6 +343,7 @@ fn get_esmeralda_genesis_block_raw() -> Block { pow_algo: PowAlgorithm::Sha3, pow_data: vec![], }, + validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), }, body, } diff --git a/base_layer/core/src/blocks/new_blockheader_template.rs b/base_layer/core/src/blocks/new_blockheader_template.rs index 5864adbadb..4b2eeebec7 100644 --- a/base_layer/core/src/blocks/new_blockheader_template.rs +++ b/base_layer/core/src/blocks/new_blockheader_template.rs @@ -45,6 +45,8 @@ pub struct NewBlockHeaderTemplate { pub total_script_offset: BlindingFactor, /// Proof of work summary pub pow: ProofOfWork, + // Merkle root of all active validator node. + pub validator_node_merkle_root: Vec, } impl NewBlockHeaderTemplate { @@ -56,6 +58,7 @@ impl NewBlockHeaderTemplate { total_kernel_offset: header.total_kernel_offset, total_script_offset: header.total_script_offset, pow: header.pow, + validator_node_merkle_root: header.validator_node_merkle_root, } } } diff --git a/base_layer/core/src/chain_storage/active_validator_node.rs b/base_layer/core/src/chain_storage/active_validator_node.rs new file mode 100644 index 0000000000..fa6a4a7dfe --- /dev/null +++ b/base_layer/core/src/chain_storage/active_validator_node.rs @@ -0,0 +1,32 @@ +// Copyright 2022, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use serde::{Deserialize, Serialize}; +use tari_common_types::types::PublicKey; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ActiveValidatorNode { + pub shard_key: [u8; 32], + pub from_height: u64, + pub to_height: u64, + pub public_key: PublicKey, +} diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index 031e6405b1..42940705f5 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -30,6 +30,7 @@ use tari_common_types::{ }; use tari_utilities::epoch_time::EpochTime; +use super::ActiveValidatorNode; use crate::{ blocks::{ Block, @@ -264,6 +265,12 @@ impl AsyncBlockchainDb { make_async_fn!(get_stats() -> DbBasicStats, "get_stats"); make_async_fn!(fetch_total_size_stats() -> DbTotalSizeStats, "fetch_total_size_stats"); + + make_async_fn!(fetch_active_validator_nodes(height: u64) -> Vec, "fetch_active_validator_nodes"); + + make_async_fn!(fetch_committee(height: u64, shard: [u8;32]) -> Vec, "fetch_committee"); + + make_async_fn!(get_validator_nodes_mr() -> Vec, "get_validator_nodes_mr"); } impl From> for AsyncBlockchainDb { diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index 25291c6765..08478a13a3 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -7,6 +7,7 @@ use tari_common_types::{ types::{Commitment, HashOutput, Signature}, }; +use super::ActiveValidatorNode; use crate::{ blocks::{ Block, @@ -191,4 +192,7 @@ pub trait BlockchainBackend: Send + Sync { /// Fetches all tracked reorgs fn fetch_all_reorgs(&self) -> Result, ChainStorageError>; + + fn fetch_active_validator_nodes(&self, height: u64) -> Result, ChainStorageError>; + fn fetch_committee(&self, height: u64, shard: [u8; 32]) -> Result, ChainStorageError>; } diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index caeb10a176..b32a21976b 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -41,6 +41,7 @@ use tari_common_types::{ use tari_mmr::pruned_hashset::PrunedHashSet; use tari_utilities::{epoch_time::EpochTime, hex::Hex, ByteArray}; +use super::ActiveValidatorNode; use crate::{ blocks::{ Block, @@ -91,6 +92,7 @@ use crate::{ PrunedInputMmr, PrunedKernelMmr, PrunedWitnessMmr, + ValidatorNodeMmr, }; const LOG_TARGET: &str = "c::cs::database"; @@ -838,6 +840,14 @@ where B: BlockchainBackend db.fetch_mmr_size(tree) } + pub fn get_validator_nodes_mr(&self) -> Result, ChainStorageError> { + let tip = self.get_height()?; + let validator_nodes = self.fetch_active_validator_nodes(tip + 1)?; + // Note: MMR is not balanced + let mmr = ValidatorNodeMmr::new(validator_nodes.iter().map(|vn| vn.shard_key.to_vec()).collect()); + Ok(mmr.get_merkle_root().unwrap()) + } + /// Tries to add a block to the longest chain. /// /// The block is added to the longest chain if and only if @@ -1154,6 +1164,16 @@ where B: BlockchainBackend txn.clear_all_reorgs(); db.write(txn) } + + pub fn fetch_active_validator_nodes(&self, height: u64) -> Result, ChainStorageError> { + let db = self.db_read_access()?; + db.fetch_active_validator_nodes(height) + } + + pub fn fetch_committee(&self, height: u64, shard: [u8; 32]) -> Result, ChainStorageError> { + let db = self.db_read_access()?; + db.fetch_committee(height, shard) + } } fn unexpected_result(request: DbKey, response: DbValue) -> Result { diff --git a/base_layer/core/src/chain_storage/db_transaction.rs b/base_layer/core/src/chain_storage/db_transaction.rs index e1fcbbcb6a..105f217f29 100644 --- a/base_layer/core/src/chain_storage/db_transaction.rs +++ b/base_layer/core/src/chain_storage/db_transaction.rs @@ -27,9 +27,10 @@ use std::{ }; use croaring::Bitmap; -use tari_common_types::types::{BlockHash, Commitment, HashOutput}; +use tari_common_types::types::{BlockHash, Commitment, HashOutput, PublicKey}; use tari_utilities::hex::Hex; +use super::ActiveValidatorNode; use crate::{ blocks::{Block, BlockHeader, BlockHeaderAccumulatedData, ChainBlock, ChainHeader, UpdateBlockAccumulatedData}, chain_storage::{error::ChainStorageError, HorizonData, Reorg}, @@ -358,6 +359,12 @@ pub enum WriteOperation { reorg: Reorg, }, ClearAllReorgs, + InsertValidatorNode { + validator_node: ActiveValidatorNode, + }, + DeleteValidatorNode { + public_key: PublicKey, + }, } impl fmt::Display for WriteOperation { @@ -454,6 +461,10 @@ impl fmt::Display for WriteOperation { SetHorizonData { .. } => write!(f, "Set horizon data"), InsertReorg { .. } => write!(f, "Insert reorg"), ClearAllReorgs => write!(f, "Clear all reorgs"), + InsertValidatorNode { validator_node } => { + write!(f, "Inserting VN {:?}", validator_node) + }, + DeleteValidatorNode { public_key } => write!(f, "Delete VN key {}", public_key), } } } diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 8e042220bf..6cf12d69bd 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -34,7 +34,7 @@ use log::*; use serde::{Deserialize, Serialize}; use tari_common_types::{ chain_metadata::ChainMetadata, - types::{BlockHash, Commitment, HashOutput, Signature}, + types::{BlockHash, Commitment, HashOutput, PublicKey, Signature}, }; use tari_storage::lmdb_store::{db, LMDBBuilder, LMDBConfig, LMDBStore}; use tari_utilities::{ @@ -42,6 +42,7 @@ use tari_utilities::{ ByteArray, }; +use super::{key_prefix_cursor::KeyPrefixCursor, lmdb::lmdb_get_prefix_cursor}; use crate::{ blocks::{ Block, @@ -83,6 +84,7 @@ use crate::{ }, stats::DbTotalSizeStats, utxo_mined_info::UtxoMinedInfo, + ActiveValidatorNode, BlockchainBackend, DbBasicStats, DbSize, @@ -91,9 +93,17 @@ use crate::{ PrunedOutput, Reorg, }, + consensus::{ConsensusManager, DomainSeparatedConsensusHasher}, transactions::{ aggregated_body::AggregateBody, - transaction_components::{TransactionError, TransactionInput, TransactionKernel, TransactionOutput}, + transaction_components::{ + SpentOutput, + TransactionError, + TransactionInput, + TransactionKernel, + TransactionOutput, + }, + TransactionHashDomain, }, MutablePrunedOutputMmr, PrunedKernelMmr, @@ -128,8 +138,14 @@ const LMDB_DB_ORPHAN_CHAIN_TIPS: &str = "orphan_chain_tips"; const LMDB_DB_ORPHAN_PARENT_MAP_INDEX: &str = "orphan_parent_map_index"; const LMDB_DB_BAD_BLOCK_LIST: &str = "bad_blocks"; const LMDB_DB_REORGS: &str = "reorgs"; - -pub fn create_lmdb_database>(path: P, config: LMDBConfig) -> Result { +const LMDB_DB_VALIDATOR_NODES: &str = "validator_nodes"; +const LMDB_DB_VALIDATOR_NODES_MAPPING: &str = "validator_nodes_mapping"; + +pub fn create_lmdb_database>( + path: P, + config: LMDBConfig, + consensus_manager: ConsensusManager, +) -> Result { let flags = db::CREATE; debug!(target: LOG_TARGET, "Creating LMDB database at {:?}", path.as_ref()); std::fs::create_dir_all(&path)?; @@ -166,10 +182,12 @@ pub fn create_lmdb_database>(path: P, config: LMDBConfig) -> Resu .add_database(LMDB_DB_ORPHAN_PARENT_MAP_INDEX, flags | db::DUPSORT) .add_database(LMDB_DB_BAD_BLOCK_LIST, flags) .add_database(LMDB_DB_REORGS, flags | db::INTEGERKEY) + .add_database(LMDB_DB_VALIDATOR_NODES, flags) + .add_database(LMDB_DB_VALIDATOR_NODES_MAPPING, flags | db::DUPSORT) .build() .map_err(|err| ChainStorageError::CriticalError(format!("Could not create LMDB store:{}", err)))?; debug!(target: LOG_TARGET, "LMDB database creation successful"); - LMDBDatabase::new(&lmdb_store, file_lock) + LMDBDatabase::new(&lmdb_store, file_lock, consensus_manager) } /// This is a lmdb-based blockchain database for persistent storage of the chain state. @@ -224,11 +242,20 @@ pub struct LMDBDatabase { bad_blocks: DatabaseRef, /// Stores reorgs by epochtime and Reorg reorgs: DatabaseRef, + /// Maps VN Public Key -> ActiveValidatorNode + validator_nodes: DatabaseRef, + /// Maps VN Shard Key -> VN Public Key + validator_nodes_mapping: DatabaseRef, _file_lock: Arc, + consensus_manager: ConsensusManager, } impl LMDBDatabase { - pub fn new(store: &LMDBStore, file_lock: File) -> Result { + pub fn new( + store: &LMDBStore, + file_lock: File, + consensus_manager: ConsensusManager, + ) -> Result { let env = store.env(); let db = Self { @@ -259,9 +286,12 @@ impl LMDBDatabase { orphan_parent_map_index: get_database(store, LMDB_DB_ORPHAN_PARENT_MAP_INDEX)?, bad_blocks: get_database(store, LMDB_DB_BAD_BLOCK_LIST)?, reorgs: get_database(store, LMDB_DB_REORGS)?, + validator_nodes: get_database(store, LMDB_DB_VALIDATOR_NODES)?, + validator_nodes_mapping: get_database(store, LMDB_DB_VALIDATOR_NODES_MAPPING)?, env, env_config: store.env_config(), _file_lock: Arc::new(file_lock), + consensus_manager, }; Ok(db) @@ -460,6 +490,14 @@ impl LMDBDatabase { ClearAllReorgs => { lmdb_clear(&write_txn, &self.reorgs)?; }, + InsertValidatorNode { validator_node } => { + self.insert_validator_node(&write_txn, validator_node)?; + }, + DeleteValidatorNode { public_key } => { + let txn = self.read_transaction()?; + let shard_key = self.get_vn_mapping(&txn, public_key)?; + self.delete_validator_node(&write_txn, public_key, &shard_key)?; + }, } } write_txn.commit()?; @@ -467,7 +505,7 @@ impl LMDBDatabase { Ok(()) } - fn all_dbs(&self) -> [(&'static str, &DatabaseRef); 24] { + fn all_dbs(&self) -> [(&'static str, &DatabaseRef); 26] { [ ("metadata_db", &self.metadata_db), ("headers_db", &self.headers_db), @@ -499,6 +537,8 @@ impl LMDBDatabase { ("orphan_parent_map_index", &self.orphan_parent_map_index), ("bad_blocks", &self.bad_blocks), ("reorgs", &self.reorgs), + ("validator_nodes", &self.validator_nodes), + ("validator_nodes_mapping", &self.validator_nodes_mapping), ] } @@ -1228,6 +1268,16 @@ impl LMDBDatabase { None => return Err(ChainStorageError::UnspendableInput), }, }; + if let SpentOutput::OutputData { + version: _, features, .. + } = &input.spent_output + { + if let Some(validator_node_public_key) = &features.validator_node_public_key { + let read_txn = self.read_transaction()?; + let shard_key = self.get_vn_mapping(&read_txn, validator_node_public_key)?; + self.delete_validator_node(txn, validator_node_public_key, &shard_key)?; + } + } if !output_mmr.delete(index) { return Err(ChainStorageError::InvalidOperation(format!( "Could not delete index {} from the output MMR", @@ -1246,6 +1296,24 @@ impl LMDBDatabase { mmr_count )) })?; + if let Some(validator_node_public_key) = &output.features.validator_node_public_key { + let shard_key = DomainSeparatedConsensusHasher::::new("validator_node_root") + .chain(&validator_node_public_key.as_bytes()) + .chain(&block_hash) + .finalize(); + + let validator_node = ActiveValidatorNode { + shard_key, + from_height: header.height + 1, // The node is active one block after it's mined + to_height: header.height + + 1 + + self.consensus_manager + .consensus_constants(header.height) + .get_validator_node_timeout(), + public_key: validator_node_public_key.clone(), + }; + self.insert_validator_node(txn, &validator_node)?; + } self.insert_output( txn, &block_hash, @@ -1487,6 +1555,42 @@ impl LMDBDatabase { Ok(()) } + fn insert_validator_node( + &self, + txn: &WriteTransaction<'_>, + validator_node: &ActiveValidatorNode, + ) -> Result<(), ChainStorageError> { + lmdb_insert( + txn, + &self.validator_nodes, + &validator_node.public_key.to_vec(), + validator_node, + "validator_nodes", + )?; + lmdb_insert( + txn, + &self.validator_nodes_mapping, + &validator_node.shard_key, + &validator_node.public_key.to_vec(), + "validator_nodes_mapping", + ) + } + + fn get_vn_mapping(&self, txn: &ReadTransaction<'_>, public_key: &PublicKey) -> Result<[u8; 32], ChainStorageError> { + let x: ActiveValidatorNode = lmdb_get(txn, &self.validator_nodes, &public_key.to_vec())?.unwrap(); + Ok(x.shard_key) + } + + fn delete_validator_node( + &self, + txn: &WriteTransaction<'_>, + public_key: &PublicKey, + shard_key: &[u8; 32], + ) -> Result<(), ChainStorageError> { + lmdb_delete(txn, &self.validator_nodes, &public_key.to_vec(), "validator_nodes")?; + lmdb_delete(txn, &self.validator_nodes, shard_key, "validator_nodes_mapping") + } + fn fetch_output_in_txn( &self, txn: &ConstTransaction<'_>, @@ -2293,6 +2397,80 @@ impl BlockchainBackend for LMDBDatabase { let txn = self.read_transaction()?; lmdb_filter_map_values(&txn, &self.reorgs, Some) } + + fn fetch_active_validator_nodes(&self, height: u64) -> Result, ChainStorageError> { + let txn = self.read_transaction()?; + lmdb_filter_map_values(&txn, &self.validator_nodes, |vn: ActiveValidatorNode| { + if vn.from_height <= height && vn.to_height >= height { + Some(vn) + } else { + None + } + }) + } + + fn fetch_committee(&self, height: u64, shard: [u8; 32]) -> Result, ChainStorageError> { + // TODO: I'm not sure how effective this is compared to getting all and selecting by yourself. Also if there is + // less validator nodes than committee size this gets weird. + let txn = self.read_transaction()?; + let mut cursor: KeyPrefixCursor = + lmdb_get_prefix_cursor(&txn, &self.validator_nodes, &shard)?; + let mut result = vec![]; + let committee_half_size = 5u64; + let mut size = 0u64; + // Right side of the committee + while let Some((_, val)) = cursor.next()? { + if val.from_height <= height && height <= val.to_height { + result.push(val); + size += 1; + if size == committee_half_size { + break; + } + } + } + // Check if it wraps around + if size < committee_half_size { + let mut cursor: KeyPrefixCursor = + lmdb_get_prefix_cursor(&txn, &self.validator_nodes, &[0; 32])?; + while let Some((_, val)) = cursor.next()? { + if val.from_height <= height && height <= val.to_height { + result.push(val); + size += 1; + if size == committee_half_size { + break; + } + } + } + } + let mut cursor: KeyPrefixCursor = + lmdb_get_prefix_cursor(&txn, &self.validator_nodes, &shard)?; + let mut size = 0u64; + // Left side of the committee + while let Some((_, val)) = cursor.prev()? { + if val.from_height <= height && height <= val.to_height { + result.push(val); + size += 1; + if size == committee_half_size { + break; + } + } + } + // Check if it wraps around + if size < committee_half_size { + let mut cursor: KeyPrefixCursor = + lmdb_get_prefix_cursor(&txn, &self.validator_nodes, &[255; 32])?; + while let Some((_, val)) = cursor.prev()? { + if val.from_height <= height && height <= val.to_height { + result.push(val); + size += 1; + if size == committee_half_size { + break; + } + } + } + } + Ok(result) + } } // Fetch the chain metadata diff --git a/base_layer/core/src/chain_storage/mod.rs b/base_layer/core/src/chain_storage/mod.rs index 8dfa32d3bc..d374dccf1b 100644 --- a/base_layer/core/src/chain_storage/mod.rs +++ b/base_layer/core/src/chain_storage/mod.rs @@ -79,3 +79,6 @@ mod target_difficulties; mod utxo_mined_info; pub use target_difficulties::TargetDifficulties; pub use utxo_mined_info::*; + +mod active_validator_node; +pub use active_validator_node::ActiveValidatorNode; diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index 69ec71043e..0846620edf 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -496,7 +496,8 @@ mod prepare_new_block { fn it_errors_for_non_tip_template() { let db = setup(); let genesis = db.fetch_block(0).unwrap(); - let next_block = BlockHeader::from_previous(genesis.header()); + let next_block = + BlockHeader::from_previous(genesis.header(), genesis.header().validator_node_merkle_root.clone()); let mut template = NewBlockTemplate::from_block(next_block.into_builder().build(), Difficulty::min(), 5000 * T); // This would cause a panic if the sanity checks were not there template.header.height = 100; @@ -511,7 +512,8 @@ mod prepare_new_block { fn it_prepares_the_first_block() { let db = setup(); let genesis = db.fetch_block(0).unwrap(); - let next_block = BlockHeader::from_previous(genesis.header()); + let next_block = + BlockHeader::from_previous(genesis.header(), genesis.header().validator_node_merkle_root.clone()); let template = NewBlockTemplate::from_block(next_block.into_builder().build(), Difficulty::min(), 5000 * T); let block = db.prepare_new_block(template).unwrap(); assert_eq!(block.header.height, 1); @@ -631,7 +633,10 @@ mod clear_all_pending_headers { let mut prev_header = prev_block.try_into_chain_block().unwrap().to_chain_header(); let headers = (0..5) .map(|_| { - let mut header = BlockHeader::from_previous(prev_header.header()); + let mut header = BlockHeader::from_previous( + prev_header.header(), + prev_header.header().validator_node_merkle_root.clone(), + ); header.kernel_mmr_size += 1; header.output_mmr_size += 1; let accum = BlockHeaderAccumulatedData::builder(&prev_accum) diff --git a/base_layer/core/src/consensus/consensus_constants.rs b/base_layer/core/src/consensus/consensus_constants.rs index 4692e0a403..eed45fd849 100644 --- a/base_layer/core/src/consensus/consensus_constants.rs +++ b/base_layer/core/src/consensus/consensus_constants.rs @@ -94,6 +94,8 @@ pub struct ConsensusConstants { kernel_version_range: RangeInclusive, /// An allowlist of output types permitted_output_types: &'static [OutputType], + /// How long does it take to timeout validator node registration + validator_node_timeout: u64, } // todo: remove this once OutputFeaturesVersion is removed in favor of just TransactionOutputVersion @@ -286,6 +288,10 @@ impl ConsensusConstants { self.permitted_output_types } + pub fn get_validator_node_timeout(&self) -> u64 { + self.validator_node_timeout + } + pub fn localnet() -> Vec { let difficulty_block_window = 90; let mut algos = HashMap::new(); @@ -323,6 +329,7 @@ impl ConsensusConstants { output_version_range, kernel_version_range, permitted_output_types: OutputType::all(), + validator_node_timeout: 0, }] } @@ -363,6 +370,7 @@ impl ConsensusConstants { output_version_range, kernel_version_range, permitted_output_types: Self::current_permitted_output_types(), + validator_node_timeout: 0, }] } @@ -406,6 +414,7 @@ impl ConsensusConstants { output_version_range, kernel_version_range, permitted_output_types: Self::current_permitted_output_types(), + validator_node_timeout: 0, }] } @@ -456,6 +465,7 @@ impl ConsensusConstants { output_version_range: output_version_range.clone(), kernel_version_range: kernel_version_range.clone(), permitted_output_types: Self::current_permitted_output_types(), + validator_node_timeout: 0, }, ConsensusConstants { effective_from_height: 23000, @@ -479,6 +489,7 @@ impl ConsensusConstants { output_version_range, kernel_version_range, permitted_output_types: Self::current_permitted_output_types(), + validator_node_timeout: 0, }, ] } @@ -527,6 +538,7 @@ impl ConsensusConstants { output_version_range, kernel_version_range, permitted_output_types: Self::current_permitted_output_types(), + validator_node_timeout: 50, }] } @@ -568,6 +580,7 @@ impl ConsensusConstants { output_version_range, kernel_version_range, permitted_output_types: Self::current_permitted_output_types(), + validator_node_timeout: 0, }] } diff --git a/base_layer/core/src/lib.rs b/base_layer/core/src/lib.rs index da8a129755..0deca2dcef 100644 --- a/base_layer/core/src/lib.rs +++ b/base_layer/core/src/lib.rs @@ -108,6 +108,14 @@ mod domain_hashing { ); pub type InputMmrHasherBlake256 = DomainSeparatedHasher; pub type PrunedInputMmr = MerkleMountainRange; + + hash_domain!( + ValidatorNodeMmrHashDomain, + "com.tari.tari_project.base_layer.core.validator_node_mmr", + 1 + ); + pub type ValidatorNodeMmrHasherBlake256 = DomainSeparatedHasher; + pub type ValidatorNodeMmr = MerkleMountainRange>; } #[cfg(feature = "base_node")] pub use domain_hashing::*; diff --git a/base_layer/core/src/proof_of_work/monero_rx/helpers.rs b/base_layer/core/src/proof_of_work/monero_rx/helpers.rs index da95343643..78a7c650ec 100644 --- a/base_layer/core/src/proof_of_work/monero_rx/helpers.rs +++ b/base_layer/core/src/proof_of_work/monero_rx/helpers.rs @@ -203,6 +203,7 @@ mod test { use crate::{ consensus::ConsensusEncoding, proof_of_work::{monero_rx::fixed_array::FixedByteArray, PowAlgorithm, ProofOfWork}, + ValidatorNodeMmr, }; // This tests checks the hash of monero-rs @@ -292,6 +293,7 @@ mod test { let seed_hash = "9f02e032f9b15d2aded991e0f68cc3c3427270b568b782e55fbd269ead0bad97".to_string(); let bytes = hex::decode(blocktemplate_blob).unwrap(); let mut block = deserialize::(&bytes[..]).unwrap(); + let vn_mmr = ValidatorNodeMmr::new(Vec::new()); let mut block_header = BlockHeader { version: 0, height: 0, @@ -307,6 +309,7 @@ mod test { total_script_offset: Default::default(), nonce: 0, pow: ProofOfWork::default(), + validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), }; let hash = block_header.mining_hash(); append_merge_mining_tag(&mut block, hash).unwrap(); @@ -348,6 +351,7 @@ mod test { let seed_hash = "9f02e032f9b15d2aded991e0f68cc3c3427270b568b782e55fbd269ead0bad97".to_string(); let bytes = hex::decode(blocktemplate_blob).unwrap(); let mut block = deserialize::(&bytes[..]).unwrap(); + let vn_mmr = ValidatorNodeMmr::new(Vec::new()); let mut block_header = BlockHeader { version: 0, height: 0, @@ -363,6 +367,7 @@ mod test { total_script_offset: Default::default(), nonce: 0, pow: ProofOfWork::default(), + validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), }; let hash = block_header.mining_hash(); append_merge_mining_tag(&mut block, hash).unwrap(); @@ -400,6 +405,7 @@ mod test { let seed_hash = "9f02e032f9b15d2aded991e0f68cc3c3427270b568b782e55fbd269ead0bad97".to_string(); let bytes = hex::decode(blocktemplate_blob).unwrap(); let block = deserialize::(&bytes[..]).unwrap(); + let vn_mmr = ValidatorNodeMmr::new(Vec::new()); let mut block_header = BlockHeader { version: 0, height: 0, @@ -415,6 +421,7 @@ mod test { total_script_offset: Default::default(), nonce: 0, pow: ProofOfWork::default(), + validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), }; let count = 1 + (u16::try_from(block.tx_hashes.len()).unwrap()); let mut hashes = Vec::with_capacity(count as usize); @@ -451,6 +458,7 @@ mod test { let seed_hash = "9f02e032f9b15d2aded991e0f68cc3c3427270b568b782e55fbd269ead0bad97".to_string(); let bytes = hex::decode(blocktemplate_blob).unwrap(); let mut block = deserialize::(&bytes[..]).unwrap(); + let vn_mmr = ValidatorNodeMmr::new(Vec::new()); let mut block_header = BlockHeader { version: 0, height: 0, @@ -466,6 +474,7 @@ mod test { total_script_offset: Default::default(), nonce: 0, pow: ProofOfWork::default(), + validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), }; let hash = Hash::null(); append_merge_mining_tag(&mut block, hash).unwrap(); @@ -506,6 +515,7 @@ mod test { let seed_hash = "9f02e032f9b15d2aded991e0f68cc3c3427270b568b782e55fbd269ead0bad97".to_string(); let bytes = hex::decode(blocktemplate_blob).unwrap(); let mut block = deserialize::(&bytes[..]).unwrap(); + let vn_mmr = ValidatorNodeMmr::new(Vec::new()); let mut block_header = BlockHeader { version: 0, height: 0, @@ -521,6 +531,7 @@ mod test { total_script_offset: Default::default(), nonce: 0, pow: ProofOfWork::default(), + validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), }; let hash = block_header.mining_hash(); append_merge_mining_tag(&mut block, hash).unwrap(); @@ -557,6 +568,7 @@ mod test { #[test] fn test_verify_header_no_data() { + let vn_mmr = ValidatorNodeMmr::new(Vec::new()); let mut block_header = BlockHeader { version: 0, height: 0, @@ -572,6 +584,7 @@ mod test { total_script_offset: Default::default(), nonce: 0, pow: ProofOfWork::default(), + validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), }; let monero_data = MoneroPowData { header: Default::default(), @@ -599,6 +612,7 @@ mod test { let seed_hash = "9f02e032f9b15d2aded991e0f68cc3c3427270b568b782e55fbd269ead0bad97".to_string(); let bytes = hex::decode(blocktemplate_blob).unwrap(); let mut block = deserialize::(&bytes[..]).unwrap(); + let vn_mmr = ValidatorNodeMmr::new(Vec::new()); let mut block_header = BlockHeader { version: 0, height: 0, @@ -614,6 +628,7 @@ mod test { total_script_offset: Default::default(), nonce: 0, pow: ProofOfWork::default(), + validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), }; let hash = block_header.mining_hash(); append_merge_mining_tag(&mut block, hash).unwrap(); diff --git a/base_layer/core/src/proto/block.proto b/base_layer/core/src/proto/block.proto index d42555b2f6..badfd84e6c 100644 --- a/base_layer/core/src/proto/block.proto +++ b/base_layer/core/src/proto/block.proto @@ -51,6 +51,8 @@ message BlockHeader { uint64 output_mmr_size = 14; // Sum of script offsets for all kernels in this block. bytes total_script_offset = 15; + // Merkle root of validator nodes + bytes validator_node_merkle_root = 16; } // A Tari block. Blocks are linked together into a blockchain. @@ -108,6 +110,8 @@ message NewBlockHeaderTemplate { ProofOfWork pow = 5; // Sum of script offsets for all kernels in this block. bytes total_script_offset = 6; + // Merkle root of validator nodes + bytes validator_node_merkle_root = 7; } // The new block template is used constructing a new partial block, allowing a miner to added the coinbase utxo and as a final step the Base node to add the MMR roots to the header. diff --git a/base_layer/core/src/proto/block.rs b/base_layer/core/src/proto/block.rs index 62c79d928e..e651673cc0 100644 --- a/base_layer/core/src/proto/block.rs +++ b/base_layer/core/src/proto/block.rs @@ -221,6 +221,7 @@ impl TryFrom for NewBlockHeaderTemplate { total_kernel_offset, total_script_offset, pow, + validator_node_merkle_root: header.validator_node_merkle_root, }) } } @@ -234,6 +235,7 @@ impl From for proto::NewBlockHeaderTemplate { total_kernel_offset: header.total_kernel_offset.to_vec(), total_script_offset: header.total_script_offset.to_vec(), pow: Some(proto::ProofOfWork::from(header.pow)), + validator_node_merkle_root: header.validator_node_merkle_root, } } } diff --git a/base_layer/core/src/proto/block_header.rs b/base_layer/core/src/proto/block_header.rs index 47fa2e20c2..e221608aad 100644 --- a/base_layer/core/src/proto/block_header.rs +++ b/base_layer/core/src/proto/block_header.rs @@ -68,6 +68,7 @@ impl TryFrom for BlockHeader { total_script_offset, nonce: header.nonce, pow, + validator_node_merkle_root: header.validator_node_merkle_root, }) } } @@ -90,6 +91,7 @@ impl From for proto::BlockHeader { pow: Some(proto::ProofOfWork::from(header.pow)), kernel_mmr_size: header.kernel_mmr_size, output_mmr_size: header.output_mmr_size, + validator_node_merkle_root: header.validator_node_merkle_root, } } } diff --git a/base_layer/core/src/proto/transaction.proto b/base_layer/core/src/proto/transaction.proto index 34d3f2b77e..77aefa0d8a 100644 --- a/base_layer/core/src/proto/transaction.proto +++ b/base_layer/core/src/proto/transaction.proto @@ -99,6 +99,8 @@ message OutputFeatures { uint64 maturity = 3; bytes metadata = 4; SideChainFeatures sidechain_features = 5; + bytes validator_node_public_key = 6; + Signature validator_node_signature = 7; } // The components of the block or transaction. The same struct can be used for either, since in Mimblewimble, diff --git a/base_layer/core/src/proto/transaction.rs b/base_layer/core/src/proto/transaction.rs index d199623bd9..123e15c49b 100644 --- a/base_layer/core/src/proto/transaction.rs +++ b/base_layer/core/src/proto/transaction.rs @@ -27,7 +27,7 @@ use std::{ sync::Arc, }; -use tari_common_types::types::{BlindingFactor, BulletRangeProof, Commitment, PublicKey}; +use tari_common_types::types::{BlindingFactor, BulletRangeProof, Commitment, PublicKey, Signature}; use tari_crypto::tari_utilities::{ByteArray, ByteArrayError}; use tari_script::{ExecutionStack, TariScript}; use tari_utilities::convert::try_convert_all; @@ -300,6 +300,9 @@ impl TryFrom for OutputFeatures { .map(SideChainFeatures::try_from) .transpose()?; + let validator_node_public_key = PublicKey::from_bytes(features.validator_node_public_key.as_bytes()).ok(); + let validator_node_signature = features.validator_node_signature.map(Signature::try_from).transpose()?; + let flags = features .flags .try_into() @@ -313,6 +316,8 @@ impl TryFrom for OutputFeatures { features.maturity, features.metadata, sidechain_features, + validator_node_public_key, + validator_node_signature, )) } } @@ -325,6 +330,11 @@ impl From for proto::types::OutputFeatures { metadata: features.metadata, version: features.version as u32, sidechain_features: features.sidechain_features.map(Into::into), + validator_node_public_key: features + .validator_node_public_key + .map(|pk| pk.as_bytes().to_vec()) + .unwrap_or_default(), + validator_node_signature: features.validator_node_signature.map(Into::into), } } } diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index 9572ac9ac2..92579237d4 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -51,6 +51,7 @@ use crate::{ }, chain_storage::{ create_lmdb_database, + ActiveValidatorNode, BlockAddResult, BlockchainBackend, BlockchainDatabase, @@ -159,17 +160,19 @@ pub struct TempDatabase { impl TempDatabase { pub fn new() -> Self { let temp_path = create_temporary_data_path(); + let rules = create_consensus_rules(); Self { - db: Some(create_lmdb_database(&temp_path, LMDBConfig::default()).unwrap()), + db: Some(create_lmdb_database(&temp_path, LMDBConfig::default(), rules).unwrap()), path: temp_path, delete_on_drop: true, } } pub fn from_path>(temp_path: P) -> Self { + let rules = create_consensus_rules(); Self { - db: Some(create_lmdb_database(&temp_path, LMDBConfig::default()).unwrap()), + db: Some(create_lmdb_database(&temp_path, LMDBConfig::default(), rules).unwrap()), path: temp_path.as_ref().to_path_buf(), delete_on_drop: true, } @@ -410,6 +413,17 @@ impl BlockchainBackend for TempDatabase { fn fetch_all_reorgs(&self) -> Result, ChainStorageError> { self.db.as_ref().unwrap().fetch_all_reorgs() } + + fn fetch_active_validator_nodes( + &self, + height: u64, + ) -> Result, ChainStorageError> { + self.db.as_ref().unwrap().fetch_active_validator_nodes(height) + } + + fn fetch_committee(&self, height: u64, shard: [u8; 32]) -> Result, ChainStorageError> { + self.db.as_ref().unwrap().fetch_committee(height, shard) + } } pub fn create_chained_blocks>( diff --git a/base_layer/core/src/test_helpers/mod.rs b/base_layer/core/src/test_helpers/mod.rs index aac67e4318..f77890a6db 100644 --- a/base_layer/core/src/test_helpers/mod.rs +++ b/base_layer/core/src/test_helpers/mod.rs @@ -63,7 +63,8 @@ pub fn create_orphan_block(block_height: u64, transactions: Vec, co } pub fn create_block(rules: &ConsensusManager, prev_block: &Block, spec: BlockSpec) -> (Block, UnblindedOutput) { - let mut header = BlockHeader::from_previous(&prev_block.header); + let mut header = + BlockHeader::from_previous(&prev_block.header, prev_block.header.validator_node_merkle_root.clone()); let block_height = spec.height_override.unwrap_or(prev_block.header.height + 1); header.height = block_height; // header.prev_hash = prev_block.hash(); diff --git a/base_layer/core/src/transactions/transaction_components/error.rs b/base_layer/core/src/transactions/transaction_components/error.rs index 1ef013fb59..43e0913ce9 100644 --- a/base_layer/core/src/transactions/transaction_components/error.rs +++ b/base_layer/core/src/transactions/transaction_components/error.rs @@ -73,6 +73,8 @@ pub enum TransactionError { ConsensusEncodingError(String), #[error("Committee contains too many members: contains {len} members but maximum is {max}")] InvalidCommitteeLength { len: usize, max: usize }, + #[error("Missing validator node signature")] + MissingValidatorNodeSignature, } impl From for TransactionError { diff --git a/base_layer/core/src/transactions/transaction_components/kernel_features.rs b/base_layer/core/src/transactions/transaction_components/kernel_features.rs index f85efad411..68856f8b60 100644 --- a/base_layer/core/src/transactions/transaction_components/kernel_features.rs +++ b/base_layer/core/src/transactions/transaction_components/kernel_features.rs @@ -38,6 +38,8 @@ bitflags! { const COINBASE_KERNEL = 1u8; /// Burned output transaction const BURN_KERNEL = 2u8; + /// Validator node registration transaction + const VALIDATOR_NODE_REGISTRATION = 3u8; } } @@ -56,6 +58,10 @@ impl KernelFeatures { pub fn is_burned(&self) -> bool { self.contains(KernelFeatures::BURN_KERNEL) } + + pub fn create_validator_node_registration() -> KernelFeatures { + KernelFeatures::VALIDATOR_NODE_REGISTRATION + } } impl Default for KernelFeatures { diff --git a/base_layer/core/src/transactions/transaction_components/output_features.rs b/base_layer/core/src/transactions/transaction_components/output_features.rs index aaa8a48b2a..fd968263a9 100644 --- a/base_layer/core/src/transactions/transaction_components/output_features.rs +++ b/base_layer/core/src/transactions/transaction_components/output_features.rs @@ -29,6 +29,7 @@ use std::{ }; use serde::{Deserialize, Serialize}; +use tari_common_types::types::{PublicKey, Signature}; use super::OutputFeaturesVersion; use crate::{ @@ -47,6 +48,8 @@ pub struct OutputFeatures { pub maturity: u64, pub metadata: Vec, pub sidechain_features: Option, + pub validator_node_public_key: Option, + pub validator_node_signature: Option, } impl OutputFeatures { @@ -56,6 +59,8 @@ impl OutputFeatures { maturity: u64, metadata: Vec, sidechain_features: Option, + validator_node_public_key: Option, + validator_node_signature: Option, ) -> OutputFeatures { OutputFeatures { version, @@ -63,6 +68,8 @@ impl OutputFeatures { maturity, metadata, sidechain_features, + validator_node_public_key, + validator_node_signature, } } @@ -71,6 +78,8 @@ impl OutputFeatures { maturity: u64, metadata: Vec, sidechain_features: Option, + validator_node_public_key: Option, + validator_node_signature: Option, ) -> OutputFeatures { OutputFeatures::new( OutputFeaturesVersion::get_current_version(), @@ -78,6 +87,8 @@ impl OutputFeatures { maturity, metadata, sidechain_features, + validator_node_public_key, + validator_node_signature, ) } @@ -106,6 +117,17 @@ impl OutputFeatures { } } + pub fn create_validator_node_registration( + validator_node_public_key: PublicKey, + validator_node_signature: Signature, + ) -> OutputFeatures { + OutputFeatures { + validator_node_public_key: Some(validator_node_public_key), + validator_node_signature: Some(validator_node_signature), + ..Default::default() + } + } + pub fn is_coinbase(&self) -> bool { matches!(self.output_type, OutputType::Coinbase) } @@ -135,19 +157,23 @@ impl ConsensusDecoding for OutputFeatures { let sidechain_features = ConsensusDecoding::consensus_decode(reader)?; const MAX_METADATA_SIZE: usize = 1024; let metadata = as ConsensusDecoding>::consensus_decode(reader)?; + let validator_node_public_key = None; + let validator_node_signature = None; Ok(Self { version, output_type: flags, maturity, sidechain_features, metadata: metadata.into(), + validator_node_public_key, + validator_node_signature, }) } } impl Default for OutputFeatures { fn default() -> Self { - OutputFeatures::new_current_version(OutputType::default(), 0, vec![], None) + OutputFeatures::new_current_version(OutputType::default(), 0, vec![], None, None, None) } } @@ -212,6 +238,8 @@ mod test { .try_into() .unwrap(), })), + validator_node_public_key: None, + validator_node_signature: None, } } diff --git a/base_layer/core/src/transactions/transaction_components/transaction_output.rs b/base_layer/core/src/transactions/transaction_components/transaction_output.rs index d1f0004f63..bff71791b5 100644 --- a/base_layer/core/src/transactions/transaction_components/transaction_output.rs +++ b/base_layer/core/src/transactions/transaction_components/transaction_output.rs @@ -215,6 +215,22 @@ impl TransactionOutput { Ok(()) } + pub fn verify_validator_node_signature(&self) -> Result<(), TransactionError> { + if let Some(public_key) = &self.features.validator_node_public_key { + let signature = self + .features + .validator_node_signature + .clone() + .ok_or(TransactionError::MissingValidatorNodeSignature)?; + if !signature.verify_challenge(public_key, &[0]) { + return Err(TransactionError::InvalidSignatureError( + "Validator node signature is not valid!".to_string(), + )); + } + } + Ok(()) + } + /// Attempt to rewind the range proof to reveal the mask (blinding factor) pub fn recover_mask( &self, diff --git a/base_layer/core/src/validation/block_validators/async_validator.rs b/base_layer/core/src/validation/block_validators/async_validator.rs index ab9988f9a0..696b90ca8f 100644 --- a/base_layer/core/src/validation/block_validators/async_validator.rs +++ b/base_layer/core/src/validation/block_validators/async_validator.rs @@ -54,6 +54,7 @@ use crate::{ BlockSyncBodyValidation, ValidationError, }, + ValidatorNodeMmr, }; /// This validator checks whether a block satisfies consensus rules. @@ -100,6 +101,8 @@ impl BlockValidator { let inputs_task = self.start_input_validation(&valid_header, outputs.iter().map(|o| o.hash()).collect(), inputs); + let validator_node_mmr_task = self.start_validator_node_mmr_validation(&valid_header); + // Output order cannot be checked concurrently so it is checked here first if !helpers::is_all_unique_and_sorted(&outputs) { return Err(ValidationError::UnsortedOrDuplicateOutput); @@ -110,6 +113,7 @@ impl BlockValidator { let outputs_result = outputs_task.await??; let inputs_result = inputs_task.await??; let kernels_result = kernels_task.await??; + validator_node_mmr_task.await??; // Perform final checks using validation outputs helpers::check_coinbase_maturity(&self.rules, valid_header.height, outputs_result.coinbase())?; @@ -405,6 +409,7 @@ impl BlockValidator { helpers::check_permitted_output_types(&constants, output)?; helpers::check_tari_script_byte_size(&output.script, max_script_size)?; output.verify_metadata_signature()?; + output.verify_validator_node_signature()?; helpers::check_not_duplicate_txo(&*db, output)?; commitment_sum = &commitment_sum + &output.commitment; } @@ -465,6 +470,25 @@ impl BlockValidator { }) .into() } + + fn start_validator_node_mmr_validation( + &self, + header: &BlockHeader, + ) -> AbortOnDropJoinHandle> { + let vn_root = header.validator_node_merkle_root.clone(); + let height = header.height; + let db = self.db.inner().clone(); + task::spawn(async move { + let vns = db.fetch_active_validator_nodes(height)?; + let mmr = ValidatorNodeMmr::new(vns.iter().map(|vn| vn.shard_key.to_vec()).collect()); + if mmr.get_merkle_root().unwrap() == vn_root { + Ok(()) + } else { + Err(ValidationError::ValidatorNodeMmmrError) + } + }) + .into() + } } #[async_trait] diff --git a/base_layer/core/src/validation/block_validators/test.rs b/base_layer/core/src/validation/block_validators/test.rs index 0be1c4527b..2a5df07937 100644 --- a/base_layer/core/src/validation/block_validators/test.rs +++ b/base_layer/core/src/validation/block_validators/test.rs @@ -88,6 +88,7 @@ async fn it_checks_the_coinbase_reward() { let (block, _) = blockchain.create_chained_block(block_spec!("A", parent: "GB", reward: 10 * T, )); let err = validator.validate_block_body(block.block().clone()).await.unwrap_err(); + println!("err {:?}", err); assert!(matches!( err, ValidationError::TransactionError(TransactionError::InvalidCoinbase) diff --git a/base_layer/core/src/validation/error.rs b/base_layer/core/src/validation/error.rs index ae34950a65..7fa41e1a9d 100644 --- a/base_layer/core/src/validation/error.rs +++ b/base_layer/core/src/validation/error.rs @@ -132,6 +132,8 @@ pub enum ValidationError { OutputTypeNotPermitted { output_type: OutputType }, #[error("FixedHash size error: {0}")] FixedHashSizeError(#[from] FixedHashSizeError), + #[error("Validator node MMR is not correct")] + ValidatorNodeMmmrError, } // ChainStorageError has a ValidationError variant, so to prevent a cyclic dependency we use a string representation in diff --git a/base_layer/core/src/validation/test.rs b/base_layer/core/src/validation/test.rs index 71f41083ff..90723ccc68 100644 --- a/base_layer/core/src/validation/test.rs +++ b/base_layer/core/src/validation/test.rs @@ -113,7 +113,8 @@ mod header_validators { let genesis = db.fetch_chain_header(0).unwrap(); - let mut header = BlockHeader::from_previous(genesis.header()); + let mut header = + BlockHeader::from_previous(genesis.header(), genesis.header().validator_node_merkle_root.clone()); header.version = u16::MAX; let validator = HeaderValidator::new(consensus_manager.clone()); @@ -201,7 +202,7 @@ fn chain_balance_validation() { .build() .unwrap(); - let mut header1 = BlockHeader::from_previous(genesis.header()); + let mut header1 = BlockHeader::from_previous(genesis.header(), genesis.header().validator_node_merkle_root.clone()); header1.kernel_mmr_size += 1; header1.output_mmr_size += 1; let achieved_difficulty = AchievedTargetDifficulty::try_construct( @@ -253,7 +254,7 @@ fn chain_balance_validation() { .build() .unwrap(); - let mut header2 = BlockHeader::from_previous(header1.header()); + let mut header2 = BlockHeader::from_previous(header1.header(), header1.header().validator_node_merkle_root.clone()); header2.kernel_mmr_size += 1; header2.output_mmr_size += 1; let achieved_difficulty = AchievedTargetDifficulty::try_construct( @@ -375,7 +376,7 @@ fn chain_balance_validation_burned() { .build() .unwrap(); burned_sum = &burned_sum + kernel2.get_burn_commitment().unwrap(); - let mut header1 = BlockHeader::from_previous(genesis.header()); + let mut header1 = BlockHeader::from_previous(genesis.header(), genesis.header().validator_node_merkle_root.clone()); header1.kernel_mmr_size += 2; header1.output_mmr_size += 2; let achieved_difficulty = AchievedTargetDifficulty::try_construct( diff --git a/base_layer/core/tests/chain_storage_tests/chain_backend.rs b/base_layer/core/tests/chain_storage_tests/chain_backend.rs index 6ace36eb98..fcdc74b6d7 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_backend.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_backend.rs @@ -23,7 +23,7 @@ use tari_common::configuration::Network; use tari_core::{ chain_storage::{create_lmdb_database, BlockchainBackend, ChainStorageError, DbKey, DbTransaction, DbValue}, - consensus::ConsensusManagerBuilder, + consensus::{ConsensusManager, ConsensusManagerBuilder}, test_helpers::blockchain::create_test_db, tx, }; @@ -69,17 +69,18 @@ fn lmdb_file_lock() { // Perform test { - let db = create_lmdb_database(&temp_path, LMDBConfig::default()).unwrap(); + let consensus_manager = ConsensusManager::builder(Network::LocalNet).build(); + let db = create_lmdb_database(&temp_path, LMDBConfig::default(), consensus_manager.clone()).unwrap(); - match create_lmdb_database(&temp_path, LMDBConfig::default()) { + match create_lmdb_database(&temp_path, LMDBConfig::default(), consensus_manager.clone()) { Err(ChainStorageError::CannotAcquireFileLock) => {}, _ => panic!("Should not be able to make this db"), } drop(db); - let _db2 = - create_lmdb_database(&temp_path, LMDBConfig::default()).expect("Should be able to make a new lmdb now"); + let _db2 = create_lmdb_database(&temp_path, LMDBConfig::default(), consensus_manager) + .expect("Should be able to make a new lmdb now"); } // Cleanup test data - in Windows the LMBD `set_mapsize` sets file size equals to map size; Linux use sparse files diff --git a/base_layer/core/tests/chain_storage_tests/chain_storage.rs b/base_layer/core/tests/chain_storage_tests/chain_storage.rs index 91653f5590..313a2fa43b 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_storage.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_storage.rs @@ -36,7 +36,7 @@ use tari_core::{ MmrTree, Validators, }, - consensus::{emission::Emission, ConsensusConstantsBuilder, ConsensusManagerBuilder}, + consensus::{emission::Emission, ConsensusConstantsBuilder, ConsensusManager, ConsensusManagerBuilder}, proof_of_work::Difficulty, test_helpers::blockchain::{ create_store_with_consensus, @@ -89,7 +89,10 @@ fn insert_and_fetch_header() { let _consensus_manager = ConsensusManagerBuilder::new(network).build(); let store = create_test_blockchain_db(); let genesis_block = store.fetch_tip_header().unwrap(); - let mut header1 = BlockHeader::from_previous(genesis_block.header()); + let mut header1 = BlockHeader::from_previous( + genesis_block.header(), + genesis_block.header().validator_node_merkle_root.clone(), + ); header1.kernel_mmr_size += 1; header1.output_mmr_size += 1; @@ -97,7 +100,7 @@ fn insert_and_fetch_header() { let chain1 = create_chain_header(header1.clone(), genesis_block.accumulated_data()); store.insert_valid_headers(vec![chain1.clone()]).unwrap(); - let mut header2 = BlockHeader::from_previous(&header1); + let mut header2 = BlockHeader::from_previous(&header1, header1.validator_node_merkle_root.clone()); header2.kernel_mmr_size += 2; header2.output_mmr_size += 2; let chain2 = create_chain_header(header2.clone(), chain1.accumulated_data()); @@ -1529,7 +1532,6 @@ fn orphan_cleanup_on_reorg() { fn orphan_cleanup_delete_all_orphans() { let path = create_temporary_data_path(); let network = Network::LocalNet; - let consensus_manager = ConsensusManagerBuilder::new(network).build(); let validators = Validators::new( MockValidator::new(true), MockValidator::new(true), @@ -1543,7 +1545,8 @@ fn orphan_cleanup_delete_all_orphans() { }; // Test cleanup during runtime { - let db = create_lmdb_database(&path, LMDBConfig::default()).unwrap(); + let consensus_manager = ConsensusManager::builder(network).build(); + let db = create_lmdb_database(&path, LMDBConfig::default(), consensus_manager.clone()).unwrap(); let store = BlockchainDatabase::new( db, consensus_manager.clone(), @@ -1596,13 +1599,14 @@ fn orphan_cleanup_delete_all_orphans() { // Test orphans are present on open { - let db = create_lmdb_database(&path, LMDBConfig::default()).unwrap(); + let consensus_manager = ConsensusManager::builder(Network::LocalNet).build(); + let db = create_lmdb_database(&path, LMDBConfig::default(), consensus_manager.clone()).unwrap(); let store = BlockchainDatabase::new( db, consensus_manager.clone(), validators.clone(), config, - DifficultyCalculator::new(consensus_manager.clone(), Default::default()), + DifficultyCalculator::new(consensus_manager, Default::default()), ) .unwrap(); assert_eq!(store.db_read_access().unwrap().orphan_count().unwrap(), 5); @@ -1610,7 +1614,8 @@ fn orphan_cleanup_delete_all_orphans() { // Test orphans cleanup on open { - let db = create_lmdb_database(&path, LMDBConfig::default()).unwrap(); + let consensus_manager = ConsensusManager::builder(Network::LocalNet).build(); + let db = create_lmdb_database(&path, LMDBConfig::default(), consensus_manager.clone()).unwrap(); config.cleanup_orphans_at_startup = true; let store = BlockchainDatabase::new( db, diff --git a/base_layer/core/tests/helpers/block_builders.rs b/base_layer/core/tests/helpers/block_builders.rs index 3dcffbb439..36fa4c5e8d 100644 --- a/base_layer/core/tests/helpers/block_builders.rs +++ b/base_layer/core/tests/helpers/block_builders.rs @@ -338,7 +338,8 @@ pub fn chain_block( transactions: Vec, consensus: &ConsensusManager, ) -> NewBlockTemplate { - let mut header = BlockHeader::from_previous(&prev_block.header); + let mut header = + BlockHeader::from_previous(&prev_block.header, prev_block.header.validator_node_merkle_root.clone()); header.version = consensus.consensus_constants(header.height).blockchain_version(); let height = header.height; let reward = consensus.get_block_reward_at(height); @@ -366,7 +367,10 @@ pub fn chain_block_with_coinbase( coinbase_kernel: TransactionKernel, consensus: &ConsensusManager, ) -> NewBlockTemplate { - let mut header = BlockHeader::from_previous(prev_block.header()); + let mut header = BlockHeader::from_previous( + prev_block.header(), + prev_block.header().validator_node_merkle_root.clone(), + ); header.version = consensus.consensus_constants(header.height).blockchain_version(); let height = header.height; NewBlockTemplate::from_block( @@ -397,7 +401,10 @@ pub fn chain_block_with_new_coinbase( coinbase_value, height + consensus_manager.consensus_constants(height).coinbase_lock_height(), ); - let mut header = BlockHeader::from_previous(prev_block.header()); + let mut header = BlockHeader::from_previous( + prev_block.header(), + prev_block.header().validator_node_merkle_root.clone(), + ); header.height = height; header.version = consensus_manager .consensus_constants(header.height) diff --git a/base_layer/core/tests/mempool.rs b/base_layer/core/tests/mempool.rs index 87911a2e7b..3caa202711 100644 --- a/base_layer/core/tests/mempool.rs +++ b/base_layer/core/tests/mempool.rs @@ -1075,6 +1075,8 @@ async fn consensus_validation_versions() { 0, Default::default(), None, + None, + None, ); let test_params = TestParams::new(); diff --git a/base_layer/wallet/src/transaction_service/handle.rs b/base_layer/wallet/src/transaction_service/handle.rs index 9a68c97690..86ff23093c 100644 --- a/base_layer/wallet/src/transaction_service/handle.rs +++ b/base_layer/wallet/src/transaction_service/handle.rs @@ -31,7 +31,7 @@ use chacha20poly1305::XChaCha20Poly1305; use chrono::NaiveDateTime; use tari_common_types::{ transaction::{ImportStatus, TxId}, - types::PublicKey, + types::{PublicKey, Signature}, }; use tari_comms::types::CommsPublicKey; use tari_core::{ @@ -85,6 +85,12 @@ pub enum TransactionServiceRequest { fee_per_gram: MicroTari, message: String, }, + RegisterValidatorNode { + validator_node_public_key: CommsPublicKey, + validator_node_signature: Signature, + fee_per_gram: MicroTari, + message: String, + }, SendOneSidedTransaction { dest_pubkey: CommsPublicKey, amount: MicroTari, @@ -151,6 +157,12 @@ impl fmt::Display for TransactionServiceRequest { message )), Self::BurnTari { amount, message, .. } => f.write_str(&format!("Burning Tari ({}, {})", amount, message)), + Self::RegisterValidatorNode { + validator_node_public_key, + validator_node_signature: _, + fee_per_gram: _, + message, + } => f.write_str(&format!("Registering VN ({}, {})", validator_node_public_key, message)), Self::SendOneSidedTransaction { dest_pubkey, amount, @@ -448,6 +460,28 @@ impl TransactionServiceHandle { } } + pub async fn register_validator_node( + &mut self, + validator_node_public_key: PublicKey, + validator_node_signature: Signature, + fee_per_gram: MicroTari, + message: String, + ) -> Result { + match self + .handle + .call(TransactionServiceRequest::RegisterValidatorNode { + validator_node_public_key, + validator_node_signature, + fee_per_gram, + message, + }) + .await?? + { + TransactionServiceResponse::TransactionSent(tx_id) => Ok(tx_id), + _ => Err(TransactionServiceError::UnexpectedApiResponse), + } + } + pub async fn send_one_sided_transaction( &mut self, dest_pubkey: CommsPublicKey, diff --git a/base_layer/wallet/src/transaction_service/service.rs b/base_layer/wallet/src/transaction_service/service.rs index dd621eea8c..8d9ea4e299 100644 --- a/base_layer/wallet/src/transaction_service/service.rs +++ b/base_layer/wallet/src/transaction_service/service.rs @@ -35,7 +35,7 @@ use rand::rngs::OsRng; use sha2::Sha256; use tari_common_types::{ transaction::{ImportStatus, TransactionDirection, TransactionStatus, TxId}, - types::{PrivateKey, PublicKey}, + types::{PrivateKey, PublicKey, Signature}, }; use tari_comms::{peer_manager::NodeIdentity, types::CommsPublicKey}; use tari_comms_dht::outbound::OutboundMessageRequester; @@ -634,6 +634,25 @@ where .burn_tari(amount, fee_per_gram, message, transaction_broadcast_join_handles) .await .map(TransactionServiceResponse::TransactionSent), + TransactionServiceRequest::RegisterValidatorNode { + validator_node_public_key, + validator_node_signature, + fee_per_gram, + message, + } => { + let rp = reply_channel.take().expect("Cannot be missing"); + self.register_validator_node( + validator_node_public_key, + validator_node_signature, + fee_per_gram, + message, + send_transaction_join_handles, + transaction_broadcast_join_handles, + rp, + ) + .await?; + return Ok(()); + }, TransactionServiceRequest::SendShaAtomicSwapTransaction(dest_pubkey, amount, fee_per_gram, message) => { Ok(TransactionServiceResponse::ShaAtomicSwapTransactionSent( self.send_sha_atomic_swap_transaction( @@ -1444,6 +1463,38 @@ where Ok(tx_id) } + pub async fn register_validator_node( + &mut self, + validator_node_public_key: CommsPublicKey, + validator_node_signature: Signature, + fee_per_gram: MicroTari, + message: String, + join_handles: &mut FuturesUnordered< + JoinHandle>>, + >, + transaction_broadcast_join_handles: &mut FuturesUnordered< + JoinHandle>>, + >, + reply_channel: oneshot::Sender>, + ) -> Result<(), TransactionServiceError> { + let output_features = + OutputFeatures::create_validator_node_registration(validator_node_public_key, validator_node_signature); + let tx_meta = + TransactionMetadata::new_with_features(0.into(), 3, KernelFeatures::create_validator_node_registration()); + self.send_transaction( + self.node_identity.public_key().clone(), + MicroTari::from(1), + output_features, + fee_per_gram, + message, + tx_meta, + join_handles, + transaction_broadcast_join_handles, + reply_channel, + ) + .await + } + /// Sends a one side payment transaction to a recipient /// # Arguments /// 'dest_pubkey': The Comms pubkey of the recipient node diff --git a/base_layer/wallet_ffi/src/lib.rs b/base_layer/wallet_ffi/src/lib.rs index 579187881d..161a79e7c7 100644 --- a/base_layer/wallet_ffi/src/lib.rs +++ b/base_layer/wallet_ffi/src/lib.rs @@ -1532,7 +1532,15 @@ pub unsafe extern "C" fn output_features_create_from_bytes( let decoded_metadata = (*metadata).0.clone(); - let output_features = TariOutputFeatures::new(decoded_version, output_type, maturity, decoded_metadata, None); + let output_features = TariOutputFeatures::new( + decoded_version, + output_type, + maturity, + decoded_metadata, + None, + None, + None, + ); Box::into_raw(Box::new(output_features)) } From 0fd32569c9bb321fc866681301bbb759888d83ae Mon Sep 17 00:00:00 2001 From: Martin Stefcek <35243812+Cifko@users.noreply.github.com> Date: Mon, 12 Sep 2022 10:16:14 +0200 Subject: [PATCH 03/21] feat: add grpc to get shard key for public key (#4654) Description --- Add GRPC call for getting shard key for a public key. --- .../tari_app_grpc/proto/base_node.proto | 11 +++++++++- .../src/grpc/base_node_grpc_server.rs | 20 ++++++++++++++++++- .../comms_interface/comms_request.rs | 6 +++++- .../comms_interface/comms_response.rs | 2 ++ .../comms_interface/inbound_handlers.rs | 4 ++++ .../comms_interface/local_interface.rs | 13 +++++++++++- base_layer/core/src/chain_storage/async_db.rs | 4 +++- .../src/chain_storage/blockchain_backend.rs | 3 ++- .../src/chain_storage/blockchain_database.rs | 7 ++++++- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 17 +++++++++++++++- base_layer/core/src/lib.rs | 12 +++++++++-- .../core/src/test_helpers/blockchain.rs | 6 +++++- 12 files changed, 94 insertions(+), 11 deletions(-) diff --git a/applications/tari_app_grpc/proto/base_node.proto b/applications/tari_app_grpc/proto/base_node.proto index d5cb26a707..a9bbcaf999 100644 --- a/applications/tari_app_grpc/proto/base_node.proto +++ b/applications/tari_app_grpc/proto/base_node.proto @@ -91,7 +91,7 @@ service BaseNode { // Get VNs rpc GetActiveValidatorNodes(GetActiveValidatorNodesRequest) returns (stream ActiveValidatorNode); rpc GetCommittee(GetCommitteeRequest) returns (GetCommitteeResponse); - + rpc GetShardKey(GetShardKeyRequest) returns (GetShardKeyResponse); } message GetAssetMetadataRequest { @@ -455,4 +455,13 @@ message GetCommitteeRequest { message GetCommitteeResponse { repeated bytes public_key = 1; +} + +message GetShardKeyRequest { + uint64 height = 1; + bytes public_key = 2; +} + +message GetShardKeyResponse { + bytes shard_key = 1; } \ No newline at end of file diff --git a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs index a7b70eb5bc..d10aea2bd2 100644 --- a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs +++ b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs @@ -33,7 +33,7 @@ use tari_app_grpc::{ tari_rpc::{CalcType, Sorting}, }; use tari_app_utilities::consts; -use tari_common_types::types::{Commitment, Signature}; +use tari_common_types::types::{Commitment, PublicKey, Signature}; use tari_comms::{Bytes, CommsNode}; use tari_core::{ base_node::{ @@ -1599,6 +1599,24 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { Ok(Response::new(tari_rpc::GetCommitteeResponse { public_key: response })) } + async fn get_shard_key( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + let report_error_flag = self.report_error_flag(); + let mut handler = self.node_service.clone(); + let public_key = PublicKey::from_bytes(&request.public_key) + .map_err(|e| report_error(report_error_flag, Status::invalid_argument(e.to_string())))?; + let shard_key = handler.get_shard_key(request.height, public_key).await.map_err(|e| { + error!(target: LOG_TARGET, "Error {}", e); + report_error(report_error_flag, Status::internal(e.to_string())) + })?; + Ok(Response::new(tari_rpc::GetShardKeyResponse { + shard_key: shard_key.to_vec(), + })) + } + async fn get_active_validator_nodes( &self, request: Request, diff --git a/base_layer/core/src/base_node/comms_interface/comms_request.rs b/base_layer/core/src/base_node/comms_interface/comms_request.rs index 9c77b2bf1f..817438bc05 100644 --- a/base_layer/core/src/base_node/comms_interface/comms_request.rs +++ b/base_layer/core/src/base_node/comms_interface/comms_request.rs @@ -26,7 +26,7 @@ use std::{ }; use serde::{Deserialize, Serialize}; -use tari_common_types::types::{Commitment, HashOutput, PrivateKey, Signature}; +use tari_common_types::types::{Commitment, HashOutput, PrivateKey, PublicKey, Signature}; use tari_utilities::hex::Hex; use crate::{blocks::NewBlockTemplate, chain_storage::MmrTree, proof_of_work::PowAlgorithm}; @@ -58,6 +58,7 @@ pub enum NodeCommsRequest { FetchMempoolTransactionsByExcessSigs { excess_sigs: Vec }, FetchValidatorNodesKeys { height: u64 }, FetchCommittee { height: u64, shard: [u8; 32] }, + GetShardKey { height: u64, public_key: PublicKey }, } #[derive(Debug, Serialize, Deserialize)] @@ -102,6 +103,9 @@ impl Display for NodeCommsRequest { FetchCommittee { height, shard } => { write!(f, "FetchCommittee height ({}), shard({:?})", height, shard) }, + GetShardKey { height, public_key } => { + write!(f, "GetShardKey height ({}), public key ({:?})", height, public_key) + }, } } } diff --git a/base_layer/core/src/base_node/comms_interface/comms_response.rs b/base_layer/core/src/base_node/comms_interface/comms_response.rs index 9dde44f55c..6298ba8cc7 100644 --- a/base_layer/core/src/base_node/comms_interface/comms_response.rs +++ b/base_layer/core/src/base_node/comms_interface/comms_response.rs @@ -73,6 +73,7 @@ pub enum NodeCommsResponse { }, FetchValidatorNodesKeysResponse(Vec), FetchCommitteeResponse(Vec), + GetShardKeyResponse([u8; 32]), } impl Display for NodeCommsResponse { @@ -113,6 +114,7 @@ impl Display for NodeCommsResponse { FetchOutputsByContractIdResponse { .. } => write!(f, "FetchOutputsByContractIdResponse"), FetchValidatorNodesKeysResponse(_) => write!(f, "FetchValidatorNodesKeysResponse"), FetchCommitteeResponse(_) => write!(f, "FetchCommitteeResponse"), + GetShardKeyResponse(_) => write!(f, "GetShardKeyResponse"), } } } diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index c5419cc62e..ce6e4f221c 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -373,6 +373,10 @@ where B: BlockchainBackend + 'static let validator_nodes = self.blockchain_db.fetch_committee(height, shard).await?; Ok(NodeCommsResponse::FetchCommitteeResponse(validator_nodes)) }, + NodeCommsRequest::GetShardKey { height, public_key } => { + let shard_key = self.blockchain_db.get_shard_key(height, public_key).await?; + Ok(NodeCommsResponse::GetShardKeyResponse(shard_key)) + }, } } diff --git a/base_layer/core/src/base_node/comms_interface/local_interface.rs b/base_layer/core/src/base_node/comms_interface/local_interface.rs index b51373f218..be699e725d 100644 --- a/base_layer/core/src/base_node/comms_interface/local_interface.rs +++ b/base_layer/core/src/base_node/comms_interface/local_interface.rs @@ -24,7 +24,7 @@ use std::{ops::RangeInclusive, sync::Arc}; use tari_common_types::{ chain_metadata::ChainMetadata, - types::{BlockHash, Commitment, HashOutput, Signature}, + types::{BlockHash, Commitment, HashOutput, PublicKey, Signature}, }; use tari_service_framework::{reply_channel::SenderService, Service}; use tokio::sync::broadcast; @@ -301,4 +301,15 @@ impl LocalNodeCommsInterface { _ => Err(CommsInterfaceError::UnexpectedApiResponse), } } + + pub async fn get_shard_key(&mut self, height: u64, public_key: PublicKey) -> Result<[u8; 32], CommsInterfaceError> { + match self + .request_sender + .call(NodeCommsRequest::GetShardKey { height, public_key }) + .await?? + { + NodeCommsResponse::GetShardKeyResponse(shard_key) => Ok(shard_key), + _ => Err(CommsInterfaceError::UnexpectedApiResponse), + } + } } diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index 42940705f5..7e03494073 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -26,7 +26,7 @@ use log::*; use rand::{rngs::OsRng, RngCore}; use tari_common_types::{ chain_metadata::ChainMetadata, - types::{BlockHash, Commitment, HashOutput, Signature}, + types::{BlockHash, Commitment, HashOutput, PublicKey, Signature}, }; use tari_utilities::epoch_time::EpochTime; @@ -271,6 +271,8 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_committee(height: u64, shard: [u8;32]) -> Vec, "fetch_committee"); make_async_fn!(get_validator_nodes_mr() -> Vec, "get_validator_nodes_mr"); + + make_async_fn!(get_shard_key(height:u64, public_key:PublicKey) -> [u8;32], "get_shard_key"); } impl From> for AsyncBlockchainDb { diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index 08478a13a3..c7f0d72626 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -4,7 +4,7 @@ use croaring::Bitmap; use tari_common_types::{ chain_metadata::ChainMetadata, - types::{Commitment, HashOutput, Signature}, + types::{Commitment, HashOutput, PublicKey, Signature}, }; use super::ActiveValidatorNode; @@ -195,4 +195,5 @@ pub trait BlockchainBackend: Send + Sync { fn fetch_active_validator_nodes(&self, height: u64) -> Result, ChainStorageError>; fn fetch_committee(&self, height: u64, shard: [u8; 32]) -> Result, ChainStorageError>; + fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result<[u8; 32], ChainStorageError>; } diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index b32a21976b..4dc5195e36 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -36,7 +36,7 @@ use log::*; use serde::{Deserialize, Serialize}; use tari_common_types::{ chain_metadata::ChainMetadata, - types::{BlockHash, Commitment, FixedHash, HashOutput, Signature}, + types::{BlockHash, Commitment, FixedHash, HashOutput, PublicKey, Signature}, }; use tari_mmr::pruned_hashset::PrunedHashSet; use tari_utilities::{epoch_time::EpochTime, hex::Hex, ByteArray}; @@ -848,6 +848,11 @@ where B: BlockchainBackend Ok(mmr.get_merkle_root().unwrap()) } + pub fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result<[u8; 32], ChainStorageError> { + let db = self.db_read_access()?; + db.get_shard_key(height, public_key) + } + /// Tries to add a block to the longest chain. /// /// The block is added to the longest chain if and only if diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 6cf12d69bd..d3a448a6f7 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -182,7 +182,7 @@ pub fn create_lmdb_database>( .add_database(LMDB_DB_ORPHAN_PARENT_MAP_INDEX, flags | db::DUPSORT) .add_database(LMDB_DB_BAD_BLOCK_LIST, flags) .add_database(LMDB_DB_REORGS, flags | db::INTEGERKEY) - .add_database(LMDB_DB_VALIDATOR_NODES, flags) + .add_database(LMDB_DB_VALIDATOR_NODES, flags | db::DUPSORT) .add_database(LMDB_DB_VALIDATOR_NODES_MAPPING, flags | db::DUPSORT) .build() .map_err(|err| ChainStorageError::CriticalError(format!("Could not create LMDB store:{}", err)))?; @@ -2471,6 +2471,21 @@ impl BlockchainBackend for LMDBDatabase { } Ok(result) } + + fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result<[u8; 32], ChainStorageError> { + let txn = self.read_transaction()?; + let validator_nodes: Vec = + lmdb_get_multiple(&txn, &self.validator_nodes, public_key.as_bytes())?; + validator_nodes + .iter() + .find(|a| a.from_height <= height && height <= a.to_height) + .map(|a| a.shard_key) + .ok_or(ChainStorageError::ValueNotFound { + entity: "ShardKey", + field: "public_key", + value: public_key.to_hex(), + }) + } } // Fetch the chain metadata diff --git a/base_layer/core/src/lib.rs b/base_layer/core/src/lib.rs index 0deca2dcef..a00af389af 100644 --- a/base_layer/core/src/lib.rs +++ b/base_layer/core/src/lib.rs @@ -108,6 +108,14 @@ mod domain_hashing { ); pub type InputMmrHasherBlake256 = DomainSeparatedHasher; pub type PrunedInputMmr = MerkleMountainRange; +} + +#[cfg(feature = "base_node")] +pub use domain_hashing::*; + +mod validator_domain_hashing { + use tari_crypto::{hash::blake2::Blake256, hash_domain, hashing::DomainSeparatedHasher}; + use tari_mmr::{Hash, MerkleMountainRange}; hash_domain!( ValidatorNodeMmrHashDomain, @@ -117,5 +125,5 @@ mod domain_hashing { pub type ValidatorNodeMmrHasherBlake256 = DomainSeparatedHasher; pub type ValidatorNodeMmr = MerkleMountainRange>; } -#[cfg(feature = "base_node")] -pub use domain_hashing::*; + +pub use validator_domain_hashing::*; diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index 92579237d4..c37c03cd05 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -32,7 +32,7 @@ use croaring::Bitmap; use tari_common::configuration::Network; use tari_common_types::{ chain_metadata::ChainMetadata, - types::{Commitment, HashOutput, Signature}, + types::{Commitment, HashOutput, PublicKey, Signature}, }; use tari_storage::lmdb_store::LMDBConfig; use tari_test_utils::paths::create_temporary_data_path; @@ -424,6 +424,10 @@ impl BlockchainBackend for TempDatabase { fn fetch_committee(&self, height: u64, shard: [u8; 32]) -> Result, ChainStorageError> { self.db.as_ref().unwrap().fetch_committee(height, shard) } + + fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result<[u8; 32], ChainStorageError> { + self.db.as_ref().unwrap().get_shard_key(height, public_key) + } } pub fn create_chained_blocks>( From b11786157c6e04ddb428e952755e1ec4f5c23814 Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Fri, 16 Sep 2022 12:14:29 +0400 Subject: [PATCH 04/21] fix merge issues --- applications/tari_console_wallet/src/automation/commands.rs | 3 +++ .../tari_console_wallet/src/grpc/wallet_grpc_server.rs | 1 + base_layer/wallet/src/transaction_service/handle.rs | 6 ++++-- base_layer/wallet/src/transaction_service/service.rs | 4 ++++ 4 files changed, 12 insertions(+), 2 deletions(-) diff --git a/applications/tari_console_wallet/src/automation/commands.rs b/applications/tari_console_wallet/src/automation/commands.rs index e4701d853e..cab0fe61f2 100644 --- a/applications/tari_console_wallet/src/automation/commands.rs +++ b/applications/tari_console_wallet/src/automation/commands.rs @@ -195,6 +195,7 @@ pub async fn register_validator_node( mut wallet_transaction_service: TransactionServiceHandle, validator_node_public_key: PublicKey, validator_node_signature: Signature, + selection_criteria: UtxoSelectionCriteria, fee_per_gram: MicroTari, message: String, ) -> Result { @@ -202,6 +203,7 @@ pub async fn register_validator_node( .register_validator_node( validator_node_public_key, validator_node_signature, + selection_criteria, fee_per_gram, message, ) @@ -974,6 +976,7 @@ pub async fn command_runner( args.validator_node_public_nonce.into(), RistrettoSecretKey::from_vec(&args.validator_node_signature).unwrap(), ), + UtxoSelectionCriteria::default(), config.fee_per_gram * uT, args.message, ) diff --git a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs index e5ed5a23a0..8fd7b4445b 100644 --- a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs +++ b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs @@ -954,6 +954,7 @@ impl wallet_server::Wallet for WalletGrpcServer { .register_validator_node( validator_node_public_key, validator_node_signature, + UtxoSelectionCriteria::default(), request.fee_per_gram.into(), request.message, ) diff --git a/base_layer/wallet/src/transaction_service/handle.rs b/base_layer/wallet/src/transaction_service/handle.rs index 64f16b2500..7fea7f19e6 100644 --- a/base_layer/wallet/src/transaction_service/handle.rs +++ b/base_layer/wallet/src/transaction_service/handle.rs @@ -91,6 +91,7 @@ pub enum TransactionServiceRequest { RegisterValidatorNode { validator_node_public_key: CommsPublicKey, validator_node_signature: Signature, + selection_criteria: UtxoSelectionCriteria, fee_per_gram: MicroTari, message: String, }, @@ -164,9 +165,8 @@ impl fmt::Display for TransactionServiceRequest { Self::BurnTari { amount, message, .. } => f.write_str(&format!("Burning Tari ({}, {})", amount, message)), Self::RegisterValidatorNode { validator_node_public_key, - validator_node_signature: _, - fee_per_gram: _, message, + .. } => f.write_str(&format!("Registering VN ({}, {})", validator_node_public_key, message)), Self::SendOneSidedTransaction { dest_pubkey, @@ -471,6 +471,7 @@ impl TransactionServiceHandle { &mut self, validator_node_public_key: PublicKey, validator_node_signature: Signature, + selection_criteria: UtxoSelectionCriteria, fee_per_gram: MicroTari, message: String, ) -> Result { @@ -479,6 +480,7 @@ impl TransactionServiceHandle { .call(TransactionServiceRequest::RegisterValidatorNode { validator_node_public_key, validator_node_signature, + selection_criteria, fee_per_gram, message, }) diff --git a/base_layer/wallet/src/transaction_service/service.rs b/base_layer/wallet/src/transaction_service/service.rs index f4a999e65e..1c80938651 100644 --- a/base_layer/wallet/src/transaction_service/service.rs +++ b/base_layer/wallet/src/transaction_service/service.rs @@ -650,6 +650,7 @@ where TransactionServiceRequest::RegisterValidatorNode { validator_node_public_key, validator_node_signature, + selection_criteria, fee_per_gram, message, } => { @@ -657,6 +658,7 @@ where self.register_validator_node( validator_node_public_key, validator_node_signature, + selection_criteria, fee_per_gram, message, send_transaction_join_handles, @@ -1472,6 +1474,7 @@ where &mut self, validator_node_public_key: CommsPublicKey, validator_node_signature: Signature, + selection_criteria: UtxoSelectionCriteria, fee_per_gram: MicroTari, message: String, join_handles: &mut FuturesUnordered< @@ -1489,6 +1492,7 @@ where self.send_transaction( self.node_identity.public_key().clone(), MicroTari::from(1), + selection_criteria, output_features, fee_per_gram, message, From 0fef17463faf67ea3a427d4f4a43b1e690acfab7 Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Fri, 16 Sep 2022 17:20:52 +0400 Subject: [PATCH 05/21] feat(core): add validator registration sidechain feature (#4690) Description --- - add validator registration sidechain feature - add basic VN signature - add output feature for VN reg - allow vn reg output feature in igor Motivation and Context --- DAN vn reg How Has This Been Tested? --- Existing tests --- .../tari_app_grpc/proto/sidechain_types.proto | 12 +- .../tari_app_grpc/proto/transaction.proto | 4 +- .../tari_app_grpc/src/conversions/mod.rs | 2 +- .../src/conversions/output_features.rs | 29 +--- ...chain_features.rs => sidechain_feature.rs} | 60 ++++++-- .../src/grpc/wallet_grpc_server.rs | 10 +- base_layer/core/src/blocks/genesis_block.rs | 11 +- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 56 ++++---- .../core/src/consensus/consensus_constants.rs | 3 +- base_layer/core/src/covenants/fields.rs | 22 +-- .../src/covenants/filters/fields_hashed_eq.rs | 4 +- base_layer/core/src/covenants/test.rs | 6 +- base_layer/core/src/proto/mod.rs | 2 +- ...features.proto => sidechain_feature.proto} | 12 +- ...chain_features.rs => sidechain_feature.rs} | 62 +++++++-- base_layer/core/src/proto/transaction.proto | 6 +- base_layer/core/src/proto/transaction.rs | 26 ++-- .../transaction_components/output_features.rs | 56 ++++---- .../transaction_components/output_type.rs | 15 +- .../transaction_components/side_chain/mod.rs | 6 +- ...chain_features.rs => sidechain_feature.rs} | 45 ++++-- .../side_chain/validator_node_registration.rs | 129 ++++++++++++++++++ .../transaction_output.rs | 15 +- base_layer/core/tests/mempool.rs | 2 - .../wallet/src/transaction_service/handle.rs | 68 ++++----- .../wallet/src/transaction_service/service.rs | 2 +- base_layer/wallet_ffi/src/lib.rs | 10 +- .../helpers/transactionBuilder.js | 2 +- .../helpers/transactionOutputHashing.js | 2 +- 29 files changed, 439 insertions(+), 240 deletions(-) rename applications/tari_app_grpc/src/conversions/{sidechain_features.rs => sidechain_feature.rs} (70%) rename base_layer/core/src/proto/{sidechain_features.proto => sidechain_feature.proto} (70%) rename base_layer/core/src/proto/{sidechain_features.rs => sidechain_feature.rs} (69%) rename base_layer/core/src/transactions/transaction_components/side_chain/{sidechain_features.rs => sidechain_feature.rs} (75%) create mode 100644 base_layer/core/src/transactions/transaction_components/side_chain/validator_node_registration.rs diff --git a/applications/tari_app_grpc/proto/sidechain_types.proto b/applications/tari_app_grpc/proto/sidechain_types.proto index 759732c2d1..421a1bd8f0 100644 --- a/applications/tari_app_grpc/proto/sidechain_types.proto +++ b/applications/tari_app_grpc/proto/sidechain_types.proto @@ -25,12 +25,18 @@ package tari.rpc; import "types.proto"; -message SideChainFeatures { - oneof side_chain_features { - TemplateRegistration template_registration = 1; +message SideChainFeature { + oneof side_chain_feature { + ValidatorNodeRegistration validator_node_registration = 1; + TemplateRegistration template_registration = 2; } } +message ValidatorNodeRegistration { + bytes public_key = 1; + Signature signature = 2; +} + message TemplateRegistration { bytes author_public_key = 1; Signature author_signature = 2; diff --git a/applications/tari_app_grpc/proto/transaction.proto b/applications/tari_app_grpc/proto/transaction.proto index 4e78b71378..8d21963fb9 100644 --- a/applications/tari_app_grpc/proto/transaction.proto +++ b/applications/tari_app_grpc/proto/transaction.proto @@ -123,9 +123,7 @@ message OutputFeatures { // require a min maturity of the Coinbase_lock_height, this should be checked on receiving new blocks. uint64 maturity = 3; bytes metadata = 4; - SideChainFeatures sidechain_features = 5; - bytes validator_node_public_key = 6; - Signature validator_node_signature = 7; + SideChainFeature sidechain_feature = 5; } diff --git a/applications/tari_app_grpc/src/conversions/mod.rs b/applications/tari_app_grpc/src/conversions/mod.rs index 69380b8d29..eb58f7d42c 100644 --- a/applications/tari_app_grpc/src/conversions/mod.rs +++ b/applications/tari_app_grpc/src/conversions/mod.rs @@ -33,7 +33,7 @@ mod new_block_template; mod output_features; mod peer; mod proof_of_work; -mod sidechain_features; +mod sidechain_feature; mod signature; mod transaction; mod transaction_input; diff --git a/applications/tari_app_grpc/src/conversions/output_features.rs b/applications/tari_app_grpc/src/conversions/output_features.rs index 50b4603309..a1dc0633de 100644 --- a/applications/tari_app_grpc/src/conversions/output_features.rs +++ b/applications/tari_app_grpc/src/conversions/output_features.rs @@ -22,14 +22,12 @@ use std::convert::{TryFrom, TryInto}; -use tari_common_types::types::PublicKey; use tari_core::transactions::transaction_components::{ OutputFeatures, OutputFeaturesVersion, OutputType, - SideChainFeatures, + SideChainFeature, }; -use tari_utilities::ByteArray; use crate::tari_rpc as grpc; @@ -37,10 +35,10 @@ impl TryFrom for OutputFeatures { type Error = String; fn try_from(features: grpc::OutputFeatures) -> Result { - let sidechain_features = features - .sidechain_features - .and_then(|f| f.side_chain_features) - .map(SideChainFeatures::try_from) + let sidechain_feature = features + .sidechain_feature + .and_then(|f| f.side_chain_feature) + .map(SideChainFeature::try_from) .transpose()?; let output_type = features @@ -48,9 +46,6 @@ impl TryFrom for OutputFeatures { .try_into() .map_err(|_| "Invalid output type: overflow")?; - let validator_node_public_key = PublicKey::from_vec(&features.validator_node_public_key).ok(); - let validator_node_signature = features.validator_node_signature.map(|s| s.try_into()).transpose()?; - Ok(OutputFeatures::new( OutputFeaturesVersion::try_from( u8::try_from(features.version).map_err(|_| "Invalid version: overflowed u8")?, @@ -58,9 +53,7 @@ impl TryFrom for OutputFeatures { OutputType::from_byte(output_type).ok_or_else(|| "Invalid or unrecognised output type".to_string())?, features.maturity, features.metadata, - sidechain_features, - validator_node_public_key, - validator_node_signature, + sidechain_feature, )) } } @@ -72,15 +65,7 @@ impl From for grpc::OutputFeatures { output_type: u32::from(features.output_type.as_byte()), maturity: features.maturity, metadata: features.metadata, - sidechain_features: features.sidechain_features.map(Into::into), - validator_node_public_key: features - .validator_node_public_key - .map(|pk| pk.as_bytes().to_vec()) - .unwrap_or_default(), - validator_node_signature: features.validator_node_signature.map(|s| grpc::Signature { - public_nonce: Vec::from(s.get_public_nonce().as_bytes()), - signature: Vec::from(s.get_signature().as_bytes()), - }), + sidechain_feature: features.sidechain_feature.map(Into::into), } } } diff --git a/applications/tari_app_grpc/src/conversions/sidechain_features.rs b/applications/tari_app_grpc/src/conversions/sidechain_feature.rs similarity index 70% rename from applications/tari_app_grpc/src/conversions/sidechain_features.rs rename to applications/tari_app_grpc/src/conversions/sidechain_feature.rs index 6cb6a04956..4cd7541452 100644 --- a/applications/tari_app_grpc/src/conversions/sidechain_features.rs +++ b/applications/tari_app_grpc/src/conversions/sidechain_feature.rs @@ -25,41 +25,77 @@ use std::convert::{TryFrom, TryInto}; use tari_common_types::types::{PublicKey, Signature}; use tari_core::{ consensus::MaxSizeString, - transactions::transaction_components::{BuildInfo, CodeTemplateRegistration, SideChainFeatures, TemplateType}, + transactions::transaction_components::{ + BuildInfo, + CodeTemplateRegistration, + SideChainFeature, + TemplateType, + ValidatorNodeRegistration, + }, }; use tari_utilities::ByteArray; use crate::tari_rpc as grpc; -//---------------------------------- SideChainFeatures --------------------------------------------// -impl From for grpc::SideChainFeatures { - fn from(value: SideChainFeatures) -> Self { +//---------------------------------- SideChainFeature --------------------------------------------// +impl From for grpc::SideChainFeature { + fn from(value: SideChainFeature) -> Self { value.into() } } -impl From for grpc::side_chain_features::SideChainFeatures { - fn from(value: SideChainFeatures) -> Self { +impl From for grpc::side_chain_feature::SideChainFeature { + fn from(value: SideChainFeature) -> Self { match value { - SideChainFeatures::TemplateRegistration(template_reg) => { - grpc::side_chain_features::SideChainFeatures::TemplateRegistration(template_reg.into()) + SideChainFeature::ValidatorNodeRegistration(template_reg) => { + grpc::side_chain_feature::SideChainFeature::ValidatorNodeRegistration(template_reg.into()) + }, + SideChainFeature::TemplateRegistration(template_reg) => { + grpc::side_chain_feature::SideChainFeature::TemplateRegistration(template_reg.into()) }, } } } -impl TryFrom for SideChainFeatures { +impl TryFrom for SideChainFeature { type Error = String; - fn try_from(features: grpc::side_chain_features::SideChainFeatures) -> Result { + fn try_from(features: grpc::side_chain_feature::SideChainFeature) -> Result { match features { - grpc::side_chain_features::SideChainFeatures::TemplateRegistration(template_reg) => { - Ok(SideChainFeatures::TemplateRegistration(template_reg.try_into()?)) + grpc::side_chain_feature::SideChainFeature::ValidatorNodeRegistration(vn_reg) => { + Ok(SideChainFeature::ValidatorNodeRegistration(vn_reg.try_into()?)) + }, + grpc::side_chain_feature::SideChainFeature::TemplateRegistration(template_reg) => { + Ok(SideChainFeature::TemplateRegistration(template_reg.try_into()?)) }, } } } +// -------------------------------- ValidatorNodeRegistration -------------------------------- // +impl TryFrom for ValidatorNodeRegistration { + type Error = String; + + fn try_from(value: grpc::ValidatorNodeRegistration) -> Result { + Ok(Self { + public_key: PublicKey::from_bytes(&value.public_key).map_err(|e| e.to_string())?, + signature: value + .signature + .map(Signature::try_from) + .ok_or("signature not provided")??, + }) + } +} + +impl From for grpc::ValidatorNodeRegistration { + fn from(value: ValidatorNodeRegistration) -> Self { + Self { + public_key: value.public_key.to_vec(), + signature: Some(value.signature.into()), + } + } +} + // -------------------------------- TemplateRegistration -------------------------------- // impl TryFrom for CodeTemplateRegistration { type Error = String; diff --git a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs index 8fd7b4445b..04cdaa3331 100644 --- a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs +++ b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs @@ -87,13 +87,7 @@ use tari_common_types::{ use tari_comms::{multiaddr::Multiaddr, types::CommsPublicKey, CommsNode}; use tari_core::transactions::{ tari_amount::{MicroTari, T}, - transaction_components::{ - CodeTemplateRegistration, - OutputFeatures, - OutputType, - SideChainFeatures, - UnblindedOutput, - }, + transaction_components::{CodeTemplateRegistration, OutputFeatures, OutputType, SideChainFeature, UnblindedOutput}, }; use tari_utilities::{hex::Hex, ByteArray}; use tari_wallet::{ @@ -912,7 +906,7 @@ impl wallet_server::Wallet for WalletGrpcServer { let output = output_manager .create_output_with_features(1 * T, OutputFeatures { output_type: OutputType::CodeTemplateRegistration, - sidechain_features: Some(SideChainFeatures::TemplateRegistration(template_registration)), + sidechain_feature: Some(SideChainFeature::TemplateRegistration(template_registration)), ..Default::default() }) .await diff --git a/base_layer/core/src/blocks/genesis_block.rs b/base_layer/core/src/blocks/genesis_block.rs index 9374da0dcf..d82534b9d8 100644 --- a/base_layer/core/src/blocks/genesis_block.rs +++ b/base_layer/core/src/blocks/genesis_block.rs @@ -39,7 +39,6 @@ use crate::{ EncryptedValue, KernelFeatures, OutputFeatures, - OutputFeaturesVersion, OutputType, TransactionKernel, TransactionKernelVersion, @@ -278,15 +277,7 @@ fn get_esmeralda_genesis_block_raw() -> Block { ); let coinbase = TransactionOutput::new( TransactionOutputVersion::get_current_version(), - OutputFeatures { - version: OutputFeaturesVersion::get_current_version(), - output_type: OutputType::Coinbase, - maturity: 6, - metadata: Vec::new(), - sidechain_features: None, - validator_node_public_key: None, - validator_node_signature: None, - }, + OutputFeatures::create_coinbase(6), Commitment::from_hex("46eec110cf173557e149d453734f6707fea9ed27c9a0dd0276bb43eb1f6e3322").unwrap(), BulletRangeProof::from_hex("01b05c72ea976764b8f9a56bb302990829dacae5f9b2d26e028e97c66a7ac3a14c7809ea5da55fb1e88a16195619d67381f28181b1ad7e0c9661c726e1c56ad7770eb75e314b51a89d716a2dd7737b26a40d8e956911ff45d4c47a1164edae5505aaca58ec6f95762daaa02545dc2ce502e9892d98422849352b6dbcc3322b6b1adae4d33461dd8b5b75b4a9bf52b3e3b00ef7579b16e59f17f43c45ea5e82db063c23ce2d214f93a211cd8f7a3cb220071c68ba3a348b082c3eebb8b6d6339d18decd0372b82e762a9f16e5e7ed23b21c1025ba093b676c55cfa603d888bcc315bc95e8e4bebad9ec51124aab0fe4a8abfc9053db1fb1560c5214b9485826e0127448a2aa84c25f17c5833b15bf434903db7a676bfb11ace2ece255b018428457122da112d481c8a742f916cca069b874e6762248fbb00fa6895f7d4b8a9a8829164baf6ad1d3ad5775c679766ead9da782977fdeb5af7e4b2eb6828e87551179f888ed1c598dd1b81c46b335fb4a827fadf7669e007ff4ed6f260d0bde3eb42282983f58bb0f11a44e064a80503154f4cdb76537192411b2755c2b453b90b3754e9253e64837f15c933b7a479fbb9b1ea8d45364fff67b4aa71ecf67f16c497b5846ff50aaae882e71ac5e6f3ba29189d03da3ed91511074747db413a3e8f90fd9b8fa0751e8ecde29324f4fe8d9023405e33e0d07741056941f9593e8931d0c22553af6447d5c38c762e45afaa89cc11c6843e77430cea44b41fcef0ad11d08d3be1f279ee791fd3b4a8b39d2889a51a4cb2a81885ef6cab119e8de29908a0e").unwrap(), // A default script can never be spent, intentionally diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index d3a448a6f7..ea43b223fc 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -96,13 +96,7 @@ use crate::{ consensus::{ConsensusManager, DomainSeparatedConsensusHasher}, transactions::{ aggregated_body::AggregateBody, - transaction_components::{ - SpentOutput, - TransactionError, - TransactionInput, - TransactionKernel, - TransactionOutput, - }, + transaction_components::{TransactionError, TransactionInput, TransactionKernel, TransactionOutput}, TransactionHashDomain, }, MutablePrunedOutputMmr, @@ -1268,16 +1262,18 @@ impl LMDBDatabase { None => return Err(ChainStorageError::UnspendableInput), }, }; - if let SpentOutput::OutputData { - version: _, features, .. - } = &input.spent_output + + let features = input.features()?; + if let Some(vn_reg) = features + .sidechain_feature + .as_ref() + .and_then(|f| f.validator_node_registration()) { - if let Some(validator_node_public_key) = &features.validator_node_public_key { - let read_txn = self.read_transaction()?; - let shard_key = self.get_vn_mapping(&read_txn, validator_node_public_key)?; - self.delete_validator_node(txn, validator_node_public_key, &shard_key)?; - } + let read_txn = self.read_transaction()?; + let shard_key = self.get_vn_mapping(&read_txn, &vn_reg.public_key)?; + self.delete_validator_node(txn, &vn_reg.public_key, &shard_key)?; } + if !output_mmr.delete(index) { return Err(ChainStorageError::InvalidOperation(format!( "Could not delete index {} from the output MMR", @@ -1296,9 +1292,16 @@ impl LMDBDatabase { mmr_count )) })?; - if let Some(validator_node_public_key) = &output.features.validator_node_public_key { + + if let Some(vn_reg) = output + .features + .sidechain_feature + .as_ref() + .and_then(|f| f.validator_node_registration()) + { let shard_key = DomainSeparatedConsensusHasher::::new("validator_node_root") - .chain(&validator_node_public_key.as_bytes()) + // + .chain(vn_reg) .chain(&block_hash) .finalize(); @@ -1310,7 +1313,7 @@ impl LMDBDatabase { self.consensus_manager .consensus_constants(header.height) .get_validator_node_timeout(), - public_key: validator_node_public_key.clone(), + public_key: vn_reg.public_key.clone(), }; self.insert_validator_node(txn, &validator_node)?; } @@ -1563,7 +1566,7 @@ impl LMDBDatabase { lmdb_insert( txn, &self.validator_nodes, - &validator_node.public_key.to_vec(), + validator_node.public_key.as_bytes(), validator_node, "validator_nodes", )?; @@ -1571,13 +1574,19 @@ impl LMDBDatabase { txn, &self.validator_nodes_mapping, &validator_node.shard_key, - &validator_node.public_key.to_vec(), + &validator_node.public_key.as_bytes(), "validator_nodes_mapping", ) } fn get_vn_mapping(&self, txn: &ReadTransaction<'_>, public_key: &PublicKey) -> Result<[u8; 32], ChainStorageError> { - let x: ActiveValidatorNode = lmdb_get(txn, &self.validator_nodes, &public_key.to_vec())?.unwrap(); + let x: ActiveValidatorNode = lmdb_get(txn, &self.validator_nodes, public_key.as_bytes())?.ok_or_else(|| { + ChainStorageError::ValueNotFound { + entity: "ActiveValidatorNode", + field: "public_key", + value: public_key.to_hex(), + } + })?; Ok(x.shard_key) } @@ -1587,8 +1596,9 @@ impl LMDBDatabase { public_key: &PublicKey, shard_key: &[u8; 32], ) -> Result<(), ChainStorageError> { - lmdb_delete(txn, &self.validator_nodes, &public_key.to_vec(), "validator_nodes")?; - lmdb_delete(txn, &self.validator_nodes, shard_key, "validator_nodes_mapping") + lmdb_delete(txn, &self.validator_nodes, public_key.as_bytes(), "validator_nodes")?; + lmdb_delete(txn, &self.validator_nodes, shard_key, "validator_nodes_mapping")?; + Ok(()) } fn fetch_output_in_txn( diff --git a/base_layer/core/src/consensus/consensus_constants.rs b/base_layer/core/src/consensus/consensus_constants.rs index eed45fd849..0b658722cc 100644 --- a/base_layer/core/src/consensus/consensus_constants.rs +++ b/base_layer/core/src/consensus/consensus_constants.rs @@ -413,7 +413,8 @@ impl ConsensusConstants { input_version_range, output_version_range, kernel_version_range, - permitted_output_types: Self::current_permitted_output_types(), + // igor is the first network to support the new output types + permitted_output_types: OutputType::all(), validator_node_timeout: 0, }] } diff --git a/base_layer/core/src/covenants/fields.rs b/base_layer/core/src/covenants/fields.rs index a66ca3b47a..570ec2de91 100644 --- a/base_layer/core/src/covenants/fields.rs +++ b/base_layer/core/src/covenants/fields.rs @@ -92,7 +92,7 @@ impl OutputField { Features => &output.features as &dyn Any, FeaturesOutputType => &output.features.output_type as &dyn Any, FeaturesMaturity => &output.features.maturity as &dyn Any, - FeaturesSideChainFeatures => &output.features.sidechain_features as &dyn Any, + FeaturesSideChainFeatures => &output.features.sidechain_feature as &dyn Any, FeaturesMetadata => &output.features.metadata as &dyn Any, }; val.downcast_ref::() @@ -109,7 +109,7 @@ impl OutputField { Features => output.features.to_consensus_bytes(), FeaturesOutputType => output.features.output_type.to_consensus_bytes(), FeaturesMaturity => output.features.maturity.to_consensus_bytes(), - FeaturesSideChainFeatures => output.features.sidechain_features.to_consensus_bytes(), + FeaturesSideChainFeatures => output.features.sidechain_feature.to_consensus_bytes(), FeaturesMetadata => output.features.metadata.to_consensus_bytes(), } } @@ -145,7 +145,7 @@ impl OutputField { .unwrap_or(false), FeaturesSideChainFeatures => input .features() - .map(|features| features.sidechain_features == output.features.sidechain_features) + .map(|features| features.sidechain_feature == output.features.sidechain_feature) .unwrap_or(false), FeaturesMetadata => input .features() @@ -228,7 +228,7 @@ impl OutputField { #[allow(dead_code)] #[allow(dead_code)] - pub fn features_sidechain_features() -> Self { + pub fn features_sidechain_feature() -> Self { OutputField::FeaturesSideChainFeatures } @@ -249,7 +249,7 @@ impl Display for OutputField { Covenant => write!(f, "field::covenant"), Features => write!(f, "field::features"), FeaturesOutputType => write!(f, "field::features_flags"), - FeaturesSideChainFeatures => write!(f, "field::features_sidechain_features"), + FeaturesSideChainFeatures => write!(f, "field::features_sidechain_feature"), FeaturesMetadata => write!(f, "field::features_metadata"), FeaturesMaturity => write!(f, "field::features_maturity"), } @@ -342,7 +342,7 @@ mod test { use super::*; use crate::{ covenant, - covenants::test::{create_input, create_outputs, make_sample_sidechain_features}, + covenants::test::{create_input, create_outputs, make_sample_sidechain_feature}, transactions::{ test_helpers::UtxoTestParams, transaction_components::{OutputFeatures, OutputType, SpentOutput}, @@ -358,10 +358,10 @@ mod test { #[test] fn it_returns_true_if_eq() { - let side_chain_features = make_sample_sidechain_features(); + let side_chain_features = make_sample_sidechain_feature(); let output = create_outputs(1, UtxoTestParams { features: OutputFeatures { - sidechain_features: Some(side_chain_features), + sidechain_feature: Some(side_chain_features), ..Default::default() }, script: script![Drop Nop], @@ -380,7 +380,7 @@ mod test { .is_eq(&output, &output.features.output_type) .unwrap()); assert!(OutputField::FeaturesSideChainFeatures - .is_eq(&output, output.features.sidechain_features.as_ref().unwrap()) + .is_eq(&output, output.features.sidechain_feature.as_ref().unwrap()) .unwrap()); assert!(OutputField::FeaturesMetadata .is_eq(&output, &output.features.metadata) @@ -392,10 +392,10 @@ mod test { #[test] fn it_returns_false_if_not_eq() { - let side_chain_features = make_sample_sidechain_features(); + let side_chain_features = make_sample_sidechain_feature(); let output = create_outputs(1, UtxoTestParams { features: OutputFeatures { - sidechain_features: Some(side_chain_features), + sidechain_feature: Some(side_chain_features), ..Default::default() }, script: script![Drop Nop], diff --git a/base_layer/core/src/covenants/filters/fields_hashed_eq.rs b/base_layer/core/src/covenants/filters/fields_hashed_eq.rs index 2fda7c00a7..8a72b58c71 100644 --- a/base_layer/core/src/covenants/filters/fields_hashed_eq.rs +++ b/base_layer/core/src/covenants/filters/fields_hashed_eq.rs @@ -49,7 +49,7 @@ mod test { covenant, covenants::{ filters::test::setup_filter_test, - test::{create_input, make_sample_sidechain_features}, + test::{create_input, make_sample_sidechain_feature}, BaseLayerCovenantsDomain, COVENANTS_FIELD_HASHER_LABEL, }, @@ -60,7 +60,7 @@ mod test { fn it_filters_outputs_with_fields_that_hash_to_given_hash() { let features = OutputFeatures { maturity: 42, - sidechain_features: Some(make_sample_sidechain_features()), + sidechain_feature: Some(make_sample_sidechain_feature()), ..Default::default() }; let mut hasher = Challenge::new(); diff --git a/base_layer/core/src/covenants/test.rs b/base_layer/core/src/covenants/test.rs index a549a1d6ab..677444a73d 100644 --- a/base_layer/core/src/covenants/test.rs +++ b/base_layer/core/src/covenants/test.rs @@ -29,7 +29,7 @@ use crate::{ transaction_components::{ BuildInfo, CodeTemplateRegistration, - SideChainFeatures, + SideChainFeature, TemplateType, TransactionInput, TransactionOutput, @@ -58,7 +58,7 @@ pub fn create_context<'a>(covenant: &Covenant, input: &'a TransactionInput, bloc CovenantContext::new(tokens.into(), input, block_height) } -pub fn make_sample_sidechain_features() -> SideChainFeatures { +pub fn make_sample_sidechain_feature() -> SideChainFeature { let template_reg = CodeTemplateRegistration { author_public_key: Default::default(), author_signature: Default::default(), @@ -72,5 +72,5 @@ pub fn make_sample_sidechain_features() -> SideChainFeatures { binary_sha: Default::default(), binary_url: "https://github.com/tari-project/tari.git".try_into().unwrap(), }; - SideChainFeatures::TemplateRegistration(template_reg) + SideChainFeature::TemplateRegistration(template_reg) } diff --git a/base_layer/core/src/proto/mod.rs b/base_layer/core/src/proto/mod.rs index 9294f73535..d78b990bb6 100644 --- a/base_layer/core/src/proto/mod.rs +++ b/base_layer/core/src/proto/mod.rs @@ -52,6 +52,6 @@ mod block; #[cfg(any(feature = "base_node", feature = "base_node_proto"))] mod block_header; #[cfg(any(feature = "base_node", feature = "base_node_proto"))] -mod sidechain_features; +mod sidechain_feature; #[cfg(any(feature = "base_node", feature = "base_node_proto"))] mod utils; diff --git a/base_layer/core/src/proto/sidechain_features.proto b/base_layer/core/src/proto/sidechain_feature.proto similarity index 70% rename from base_layer/core/src/proto/sidechain_features.proto rename to base_layer/core/src/proto/sidechain_feature.proto index 96df8f61ad..4e3d1d9487 100644 --- a/base_layer/core/src/proto/sidechain_features.proto +++ b/base_layer/core/src/proto/sidechain_feature.proto @@ -7,12 +7,18 @@ import "types.proto"; package tari.types; -message SideChainFeatures { - oneof side_chain_features { - TemplateRegistration template_registration = 1; +message SideChainFeature { + oneof side_chain_feature { + ValidatorNodeRegistration validator_node_registration = 1; + TemplateRegistration template_registration = 2; } } +message ValidatorNodeRegistration { + bytes public_key = 1; + Signature signature = 2; +} + message TemplateRegistration { bytes author_public_key = 1; Signature author_signature = 2; diff --git a/base_layer/core/src/proto/sidechain_features.rs b/base_layer/core/src/proto/sidechain_feature.rs similarity index 69% rename from base_layer/core/src/proto/sidechain_features.rs rename to base_layer/core/src/proto/sidechain_feature.rs index 8002346a6c..3540f6b903 100644 --- a/base_layer/core/src/proto/sidechain_features.rs +++ b/base_layer/core/src/proto/sidechain_feature.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -//! Impls for sidechain_features proto +//! Impls for sidechain_feature proto use std::convert::{TryFrom, TryInto}; @@ -30,38 +30,74 @@ use tari_utilities::ByteArray; use crate::{ consensus::MaxSizeString, proto, - transactions::transaction_components::{BuildInfo, CodeTemplateRegistration, SideChainFeatures, TemplateType}, + transactions::transaction_components::{ + BuildInfo, + CodeTemplateRegistration, + SideChainFeature, + TemplateType, + ValidatorNodeRegistration, + }, }; -//---------------------------------- SideChainFeatures --------------------------------------------// -impl From for proto::types::SideChainFeatures { - fn from(value: SideChainFeatures) -> Self { +//---------------------------------- SideChainFeature --------------------------------------------// +impl From for proto::types::SideChainFeature { + fn from(value: SideChainFeature) -> Self { value.into() } } -impl From for proto::types::side_chain_features::SideChainFeatures { - fn from(value: SideChainFeatures) -> Self { +impl From for proto::types::side_chain_feature::SideChainFeature { + fn from(value: SideChainFeature) -> Self { match value { - SideChainFeatures::TemplateRegistration(template_reg) => { - proto::types::side_chain_features::SideChainFeatures::TemplateRegistration(template_reg.into()) + SideChainFeature::ValidatorNodeRegistration(template_reg) => { + proto::types::side_chain_feature::SideChainFeature::ValidatorNodeRegistration(template_reg.into()) + }, + SideChainFeature::TemplateRegistration(template_reg) => { + proto::types::side_chain_feature::SideChainFeature::TemplateRegistration(template_reg.into()) }, } } } -impl TryFrom for SideChainFeatures { +impl TryFrom for SideChainFeature { type Error = String; - fn try_from(features: proto::types::side_chain_features::SideChainFeatures) -> Result { + fn try_from(features: proto::types::side_chain_feature::SideChainFeature) -> Result { match features { - proto::types::side_chain_features::SideChainFeatures::TemplateRegistration(template_reg) => { - Ok(SideChainFeatures::TemplateRegistration(template_reg.try_into()?)) + proto::types::side_chain_feature::SideChainFeature::ValidatorNodeRegistration(vn_reg) => { + Ok(SideChainFeature::ValidatorNodeRegistration(vn_reg.try_into()?)) + }, + proto::types::side_chain_feature::SideChainFeature::TemplateRegistration(template_reg) => { + Ok(SideChainFeature::TemplateRegistration(template_reg.try_into()?)) }, } } } +// -------------------------------- ValidatorNodeRegistration -------------------------------- // +impl TryFrom for ValidatorNodeRegistration { + type Error = String; + + fn try_from(value: proto::types::ValidatorNodeRegistration) -> Result { + Ok(Self { + public_key: PublicKey::from_bytes(&value.public_key).map_err(|e| e.to_string())?, + signature: value + .signature + .map(Signature::try_from) + .ok_or("signature not provided")??, + }) + } +} + +impl From for proto::types::ValidatorNodeRegistration { + fn from(value: ValidatorNodeRegistration) -> Self { + Self { + public_key: value.public_key.to_vec(), + signature: Some(value.signature.into()), + } + } +} + // -------------------------------- TemplateRegistration -------------------------------- // impl TryFrom for CodeTemplateRegistration { type Error = String; diff --git a/base_layer/core/src/proto/transaction.proto b/base_layer/core/src/proto/transaction.proto index 77aefa0d8a..a34aba7226 100644 --- a/base_layer/core/src/proto/transaction.proto +++ b/base_layer/core/src/proto/transaction.proto @@ -4,7 +4,7 @@ syntax = "proto3"; import "types.proto"; -import "sidechain_features.proto"; +import "sidechain_feature.proto"; package tari.types; @@ -98,9 +98,7 @@ message OutputFeatures { // require a min maturity of the Coinbase_lock_height, this should be checked on receiving new blocks. uint64 maturity = 3; bytes metadata = 4; - SideChainFeatures sidechain_features = 5; - bytes validator_node_public_key = 6; - Signature validator_node_signature = 7; + SideChainFeature sidechain_feature = 5; } // The components of the block or transaction. The same struct can be used for either, since in Mimblewimble, diff --git a/base_layer/core/src/proto/transaction.rs b/base_layer/core/src/proto/transaction.rs index 123e15c49b..700e7cb3d5 100644 --- a/base_layer/core/src/proto/transaction.rs +++ b/base_layer/core/src/proto/transaction.rs @@ -27,7 +27,7 @@ use std::{ sync::Arc, }; -use tari_common_types::types::{BlindingFactor, BulletRangeProof, Commitment, PublicKey, Signature}; +use tari_common_types::types::{BlindingFactor, BulletRangeProof, Commitment, PublicKey}; use tari_crypto::tari_utilities::{ByteArray, ByteArrayError}; use tari_script::{ExecutionStack, TariScript}; use tari_utilities::convert::try_convert_all; @@ -44,7 +44,7 @@ use crate::{ OutputFeatures, OutputFeaturesVersion, OutputType, - SideChainFeatures, + SideChainFeature, Transaction, TransactionInput, TransactionInputVersion, @@ -294,15 +294,12 @@ impl TryFrom for OutputFeatures { type Error = String; fn try_from(features: proto::types::OutputFeatures) -> Result { - let sidechain_features = features - .sidechain_features - .and_then(|features| features.side_chain_features) - .map(SideChainFeatures::try_from) + let sidechain_feature = features + .sidechain_feature + .and_then(|features| features.side_chain_feature) + .map(SideChainFeature::try_from) .transpose()?; - let validator_node_public_key = PublicKey::from_bytes(features.validator_node_public_key.as_bytes()).ok(); - let validator_node_signature = features.validator_node_signature.map(Signature::try_from).transpose()?; - let flags = features .flags .try_into() @@ -315,9 +312,7 @@ impl TryFrom for OutputFeatures { OutputType::from_byte(flags).ok_or_else(|| "Invalid or unrecognised output type".to_string())?, features.maturity, features.metadata, - sidechain_features, - validator_node_public_key, - validator_node_signature, + sidechain_feature, )) } } @@ -329,12 +324,7 @@ impl From for proto::types::OutputFeatures { maturity: features.maturity, metadata: features.metadata, version: features.version as u32, - sidechain_features: features.sidechain_features.map(Into::into), - validator_node_public_key: features - .validator_node_public_key - .map(|pk| pk.as_bytes().to_vec()) - .unwrap_or_default(), - validator_node_signature: features.validator_node_signature.map(Into::into), + sidechain_feature: features.sidechain_feature.map(Into::into), } } } diff --git a/base_layer/core/src/transactions/transaction_components/output_features.rs b/base_layer/core/src/transactions/transaction_components/output_features.rs index fd968263a9..e7c75d94de 100644 --- a/base_layer/core/src/transactions/transaction_components/output_features.rs +++ b/base_layer/core/src/transactions/transaction_components/output_features.rs @@ -34,7 +34,12 @@ use tari_common_types::types::{PublicKey, Signature}; use super::OutputFeaturesVersion; use crate::{ consensus::{ConsensusDecoding, ConsensusEncoding, ConsensusEncodingSized, MaxSizeBytes}, - transactions::transaction_components::{side_chain::SideChainFeatures, CodeTemplateRegistration, OutputType}, + transactions::transaction_components::{ + side_chain::SideChainFeature, + CodeTemplateRegistration, + OutputType, + ValidatorNodeRegistration, + }, }; /// Options for UTXO's @@ -47,9 +52,7 @@ pub struct OutputFeatures { /// require a min maturity of the Coinbase_lock_height, this should be checked on receiving new blocks. pub maturity: u64, pub metadata: Vec, - pub sidechain_features: Option, - pub validator_node_public_key: Option, - pub validator_node_signature: Option, + pub sidechain_feature: Option, } impl OutputFeatures { @@ -58,18 +61,14 @@ impl OutputFeatures { output_type: OutputType, maturity: u64, metadata: Vec, - sidechain_features: Option, - validator_node_public_key: Option, - validator_node_signature: Option, + sidechain_feature: Option, ) -> OutputFeatures { OutputFeatures { version, output_type, maturity, metadata, - sidechain_features, - validator_node_public_key, - validator_node_signature, + sidechain_feature, } } @@ -77,18 +76,14 @@ impl OutputFeatures { flags: OutputType, maturity: u64, metadata: Vec, - sidechain_features: Option, - validator_node_public_key: Option, - validator_node_signature: Option, + sidechain_feature: Option, ) -> OutputFeatures { OutputFeatures::new( OutputFeaturesVersion::get_current_version(), flags, maturity, metadata, - sidechain_features, - validator_node_public_key, - validator_node_signature, + sidechain_feature, ) } @@ -112,18 +107,21 @@ impl OutputFeatures { pub fn for_template_registration(template_registration: CodeTemplateRegistration) -> OutputFeatures { OutputFeatures { output_type: OutputType::CodeTemplateRegistration, - sidechain_features: Some(SideChainFeatures::TemplateRegistration(template_registration)), + sidechain_feature: Some(SideChainFeature::TemplateRegistration(template_registration)), ..Default::default() } } - pub fn create_validator_node_registration( + pub fn for_validator_node_registration( validator_node_public_key: PublicKey, validator_node_signature: Signature, ) -> OutputFeatures { OutputFeatures { - validator_node_public_key: Some(validator_node_public_key), - validator_node_signature: Some(validator_node_signature), + output_type: OutputType::ValidatorNodeRegistration, + sidechain_feature: Some(SideChainFeature::ValidatorNodeRegistration(ValidatorNodeRegistration { + public_key: validator_node_public_key, + signature: validator_node_signature, + })), ..Default::default() } } @@ -138,7 +136,7 @@ impl ConsensusEncoding for OutputFeatures { self.version.consensus_encode(writer)?; self.maturity.consensus_encode(writer)?; self.output_type.consensus_encode(writer)?; - self.sidechain_features.consensus_encode(writer)?; + self.sidechain_feature.consensus_encode(writer)?; self.metadata.consensus_encode(writer)?; Ok(()) @@ -154,26 +152,22 @@ impl ConsensusDecoding for OutputFeatures { let version = OutputFeaturesVersion::consensus_decode(reader)?; let maturity = u64::consensus_decode(reader)?; let flags = OutputType::consensus_decode(reader)?; - let sidechain_features = ConsensusDecoding::consensus_decode(reader)?; + let sidechain_feature = ConsensusDecoding::consensus_decode(reader)?; const MAX_METADATA_SIZE: usize = 1024; let metadata = as ConsensusDecoding>::consensus_decode(reader)?; - let validator_node_public_key = None; - let validator_node_signature = None; Ok(Self { version, output_type: flags, maturity, - sidechain_features, + sidechain_feature, metadata: metadata.into(), - validator_node_public_key, - validator_node_signature, }) } } impl Default for OutputFeatures { fn default() -> Self { - OutputFeatures::new_current_version(OutputType::default(), 0, vec![], None, None, None) + OutputFeatures::new_current_version(OutputType::default(), 0, vec![], None) } } @@ -217,7 +211,7 @@ mod test { output_type: OutputType::Standard, maturity: u64::MAX, metadata: vec![1; 1024], - sidechain_features: Some(SideChainFeatures::TemplateRegistration(CodeTemplateRegistration { + sidechain_feature: Some(SideChainFeature::TemplateRegistration(CodeTemplateRegistration { author_public_key: Default::default(), author_signature: Default::default(), template_name: MaxSizeString::from_str_checked("🚀🚀🚀🚀🚀🚀🚀🚀").unwrap(), @@ -238,8 +232,6 @@ mod test { .try_into() .unwrap(), })), - validator_node_public_key: None, - validator_node_signature: None, } } @@ -255,7 +247,7 @@ mod test { #[test] fn it_encodes_and_decodes_correctly_in_none_case() { let mut subject = make_fully_populated_output_features(OutputFeaturesVersion::V1); - subject.sidechain_features = None; + subject.sidechain_feature = None; check_consensus_encoding_correctness(subject).unwrap(); } } diff --git a/base_layer/core/src/transactions/transaction_components/output_type.rs b/base_layer/core/src/transactions/transaction_components/output_type.rs index 30043649d1..9db69b1ef2 100644 --- a/base_layer/core/src/transactions/transaction_components/output_type.rs +++ b/base_layer/core/src/transactions/transaction_components/output_type.rs @@ -44,8 +44,10 @@ pub enum OutputType { Coinbase = 1, /// Output is a burned output and can not be spent ever. Burn = 2, + /// Output defines a validator node registration + ValidatorNodeRegistration = 3, /// Output defines a new re-usable code template. - CodeTemplateRegistration = 3, + CodeTemplateRegistration = 4, } impl OutputType { @@ -61,7 +63,13 @@ impl OutputType { } pub const fn all() -> &'static [Self] { - &[OutputType::Standard, OutputType::Coinbase, OutputType::Burn] + &[ + OutputType::Standard, + OutputType::Coinbase, + OutputType::Burn, + OutputType::ValidatorNodeRegistration, + OutputType::CodeTemplateRegistration, + ] } } @@ -115,7 +123,8 @@ mod tests { assert_eq!(OutputType::from_byte(0), Some(OutputType::Standard)); assert_eq!(OutputType::from_byte(1), Some(OutputType::Coinbase)); assert_eq!(OutputType::from_byte(2), Some(OutputType::Burn)); - assert_eq!(OutputType::from_byte(3), Some(OutputType::CodeTemplateRegistration)); + assert_eq!(OutputType::from_byte(3), Some(OutputType::ValidatorNodeRegistration)); + assert_eq!(OutputType::from_byte(4), Some(OutputType::CodeTemplateRegistration)); assert_eq!(OutputType::from_byte(108), None); } diff --git a/base_layer/core/src/transactions/transaction_components/side_chain/mod.rs b/base_layer/core/src/transactions/transaction_components/side_chain/mod.rs index ec3d641714..4922052163 100644 --- a/base_layer/core/src/transactions/transaction_components/side_chain/mod.rs +++ b/base_layer/core/src/transactions/transaction_components/side_chain/mod.rs @@ -20,12 +20,14 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -mod sidechain_features; -pub use sidechain_features::SideChainFeatures; +mod sidechain_feature; +pub use sidechain_feature::SideChainFeature; mod template_registration; +mod validator_node_registration; use tari_crypto::{hash::blake2::Blake256, hash_domain, hashing::DomainSeparatedHasher}; pub use template_registration::{BuildInfo, CodeTemplateRegistration, TemplateType}; +pub use validator_node_registration::ValidatorNodeRegistration; hash_domain!( ContractAcceptanceHashDomain, diff --git a/base_layer/core/src/transactions/transaction_components/side_chain/sidechain_features.rs b/base_layer/core/src/transactions/transaction_components/side_chain/sidechain_feature.rs similarity index 75% rename from base_layer/core/src/transactions/transaction_components/side_chain/sidechain_features.rs rename to base_layer/core/src/transactions/transaction_components/side_chain/sidechain_feature.rs index 49eef7681e..1fb81f2801 100644 --- a/base_layer/core/src/transactions/transaction_components/side_chain/sidechain_features.rs +++ b/base_layer/core/src/transactions/transaction_components/side_chain/sidechain_feature.rs @@ -26,29 +26,49 @@ use serde::{Deserialize, Serialize}; use crate::{ consensus::{read_byte, write_byte, ConsensusDecoding, ConsensusEncoding, ConsensusEncodingSized}, - transactions::transaction_components::CodeTemplateRegistration, + transactions::transaction_components::{CodeTemplateRegistration, ValidatorNodeRegistration}, }; #[derive(Debug, Clone, Hash, PartialEq, Deserialize, Serialize, Eq)] -pub enum SideChainFeatures { +pub enum SideChainFeature { + ValidatorNodeRegistration(ValidatorNodeRegistration), TemplateRegistration(CodeTemplateRegistration), } -impl SideChainFeatures { + +impl SideChainFeature { + pub fn template_registration(&self) -> Option<&CodeTemplateRegistration> { + match self { + Self::TemplateRegistration(v) => Some(v), + _ => None, + } + } + + pub fn validator_node_registration(&self) -> Option<&ValidatorNodeRegistration> { + match self { + Self::ValidatorNodeRegistration(v) => Some(v), + _ => None, + } + } + pub fn as_byte(&self) -> u8 { #[allow(clippy::enum_glob_use)] - use SideChainFeatures::*; + use SideChainFeature::*; match self { - TemplateRegistration(_) => 0x01, + ValidatorNodeRegistration(_) => 0x01, + TemplateRegistration(_) => 0x02, } } } -impl ConsensusEncoding for SideChainFeatures { +impl ConsensusEncoding for SideChainFeature { fn consensus_encode(&self, writer: &mut W) -> Result<(), Error> { #[allow(clippy::enum_glob_use)] - use SideChainFeatures::*; + use SideChainFeature::*; write_byte(writer, self.as_byte())?; match self { + ValidatorNodeRegistration(validator_node_registration) => { + validator_node_registration.consensus_encode(writer)?; + }, TemplateRegistration(template_registration) => { template_registration.consensus_encode(writer)?; }, @@ -57,15 +77,16 @@ impl ConsensusEncoding for SideChainFeatures { } } -impl ConsensusEncodingSized for SideChainFeatures {} +impl ConsensusEncodingSized for SideChainFeature {} -impl ConsensusDecoding for SideChainFeatures { +impl ConsensusDecoding for SideChainFeature { fn consensus_decode(reader: &mut R) -> Result { #[allow(clippy::enum_glob_use)] - use SideChainFeatures::*; + use SideChainFeature::*; let byte = read_byte(reader)?; match byte { - 0x01 => Ok(TemplateRegistration(ConsensusDecoding::consensus_decode(reader)?)), + 0x01 => Ok(ValidatorNodeRegistration(ConsensusDecoding::consensus_decode(reader)?)), + 0x02 => Ok(TemplateRegistration(ConsensusDecoding::consensus_decode(reader)?)), _ => Err(Error::new( ErrorKind::InvalidData, format!("Invalid SideChainFeatures byte '{}'", byte), @@ -88,7 +109,7 @@ mod tests { #[test] fn it_encodes_and_decodes_correctly() { - let subject = SideChainFeatures::TemplateRegistration(CodeTemplateRegistration { + let subject = SideChainFeature::TemplateRegistration(CodeTemplateRegistration { author_public_key: Default::default(), author_signature: Default::default(), template_name: MaxSizeString::from_str_checked("🚀🚀🚀🚀🚀🚀🚀🚀").unwrap(), diff --git a/base_layer/core/src/transactions/transaction_components/side_chain/validator_node_registration.rs b/base_layer/core/src/transactions/transaction_components/side_chain/validator_node_registration.rs new file mode 100644 index 0000000000..813d340ed1 --- /dev/null +++ b/base_layer/core/src/transactions/transaction_components/side_chain/validator_node_registration.rs @@ -0,0 +1,129 @@ +// Copyright 2022. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::io::{Error, Read, Write}; + +use rand::rngs::OsRng; +use serde::{Deserialize, Serialize}; +use tari_common_types::types::{FixedHash, PrivateKey, PublicKey, Signature}; +use tari_crypto::keys::PublicKey as PublicKeyT; + +use crate::{ + consensus::{ConsensusDecoding, ConsensusEncoding, ConsensusEncodingSized, DomainSeparatedConsensusHasher}, + transactions::TransactionHashDomain, +}; + +#[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize)] +pub struct ValidatorNodeRegistration { + pub public_key: PublicKey, + pub signature: Signature, +} + +impl ValidatorNodeRegistration { + pub fn is_valid_signature_for(&self, msg: &[u8]) -> bool { + let challenge = Self::construct_challenge(&self.public_key, self.signature.get_public_nonce(), msg); + self.signature.verify_challenge(&self.public_key, &*challenge) + } + + pub fn new_signed(private_key: &PrivateKey, msg: &[u8]) -> Self { + let (secret_nonce, public_nonce) = PublicKey::random_keypair(&mut OsRng); + let public_key = PublicKey::from_secret_key(private_key); + let challenge = Self::construct_challenge(&public_key, &public_nonce, msg); + let signature = Signature::sign(private_key.clone(), secret_nonce, &*challenge) + .expect("Sign cannot fail with 32-byte challenge and a RistrettoPublicKey"); + Self { public_key, signature } + } + + pub fn construct_challenge(public_key: &PublicKey, public_nonce: &PublicKey, msg: &[u8]) -> FixedHash { + DomainSeparatedConsensusHasher::::new("validator_node_registration") + .chain(public_key) + .chain(public_nonce) + .chain(&msg) + .finalize() + .into() + } +} + +impl ConsensusEncoding for ValidatorNodeRegistration { + fn consensus_encode(&self, writer: &mut W) -> Result<(), Error> { + self.public_key.consensus_encode(writer)?; + self.signature.consensus_encode(writer)?; + Ok(()) + } +} + +impl ConsensusEncodingSized for ValidatorNodeRegistration { + fn consensus_encode_exact_size(&self) -> usize { + self.public_key.consensus_encode_exact_size() + self.signature.consensus_encode_exact_size() + } +} + +impl ConsensusDecoding for ValidatorNodeRegistration { + fn consensus_decode(reader: &mut R) -> Result { + Ok(Self { + public_key: ConsensusDecoding::consensus_decode(reader)?, + signature: ConsensusDecoding::consensus_decode(reader)?, + }) + } +} + +#[cfg(test)] +mod test { + use rand::rngs::OsRng; + use tari_crypto::keys::SecretKey; + + use super::*; + use crate::consensus::check_consensus_encoding_correctness; + + fn create_instance() -> ValidatorNodeRegistration { + let sk = PrivateKey::random(&mut OsRng); + ValidatorNodeRegistration::new_signed(&sk, b"valid") + } + + #[test] + fn it_encodes_and_decodes_correctly() { + check_consensus_encoding_correctness(create_instance()).unwrap(); + } + + mod is_valid_signature_for { + use super::*; + + #[test] + fn it_returns_true_for_invalid_signature() { + let reg = create_instance(); + assert!(reg.is_valid_signature_for(b"valid")); + } + + #[test] + fn it_returns_false_for_invalid_challenge() { + let reg = create_instance(); + assert!(!reg.is_valid_signature_for(b"there's wally")); + } + + #[test] + fn it_returns_false_for_invalid_signature() { + let mut reg = create_instance(); + reg.public_key = create_instance().public_key; + assert!(!reg.is_valid_signature_for(b"valid")); + } + } +} diff --git a/base_layer/core/src/transactions/transaction_components/transaction_output.rs b/base_layer/core/src/transactions/transaction_components/transaction_output.rs index 3f9f0ca721..ff4b9175aa 100644 --- a/base_layer/core/src/transactions/transaction_components/transaction_output.rs +++ b/base_layer/core/src/transactions/transaction_components/transaction_output.rs @@ -217,13 +217,14 @@ impl TransactionOutput { } pub fn verify_validator_node_signature(&self) -> Result<(), TransactionError> { - if let Some(public_key) = &self.features.validator_node_public_key { - let signature = self - .features - .validator_node_signature - .clone() - .ok_or(TransactionError::MissingValidatorNodeSignature)?; - if !signature.verify_challenge(public_key, &[0]) { + if let Some(validator_node_reg) = self + .features + .sidechain_feature + .as_ref() + .and_then(|f| f.validator_node_registration()) + { + // TODO: figure out what the validator node should sign + if !validator_node_reg.is_valid_signature_for(b"") { return Err(TransactionError::InvalidSignatureError( "Validator node signature is not valid!".to_string(), )); diff --git a/base_layer/core/tests/mempool.rs b/base_layer/core/tests/mempool.rs index ffeb0cc4a1..30a7fa818f 100644 --- a/base_layer/core/tests/mempool.rs +++ b/base_layer/core/tests/mempool.rs @@ -1077,8 +1077,6 @@ async fn consensus_validation_versions() { 0, Default::default(), None, - None, - None, ); let test_params = TestParams::new(); diff --git a/base_layer/wallet/src/transaction_service/handle.rs b/base_layer/wallet/src/transaction_service/handle.rs index 7fea7f19e6..c0c4574b35 100644 --- a/base_layer/wallet/src/transaction_service/handle.rs +++ b/base_layer/wallet/src/transaction_service/handle.rs @@ -144,56 +144,59 @@ pub enum TransactionServiceRequest { impl fmt::Display for TransactionServiceRequest { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - Self::GetPendingInboundTransactions => f.write_str("GetPendingInboundTransactions"), - Self::GetPendingOutboundTransactions => f.write_str("GetPendingOutboundTransactions"), - Self::GetCompletedTransactions => f.write_str("GetCompletedTransactions"), - Self::GetCancelledPendingInboundTransactions => f.write_str("GetCancelledPendingInboundTransactions"), - Self::GetCancelledPendingOutboundTransactions => f.write_str("GetCancelledPendingOutboundTransactions"), - Self::GetCancelledCompletedTransactions => f.write_str("GetCancelledCompletedTransactions"), - Self::GetCompletedTransaction(t) => f.write_str(&format!("GetCompletedTransaction({})", t)), + Self::GetPendingInboundTransactions => write!(f, "GetPendingInboundTransactions"), + Self::GetPendingOutboundTransactions => write!(f, "GetPendingOutboundTransactions"), + Self::GetCompletedTransactions => write!(f, "GetCompletedTransactions"), + Self::GetCancelledPendingInboundTransactions => write!(f, "GetCancelledPendingInboundTransactions"), + Self::GetCancelledPendingOutboundTransactions => write!(f, "GetCancelledPendingOutboundTransactions"), + Self::GetCancelledCompletedTransactions => write!(f, "GetCancelledCompletedTransactions"), + Self::GetCompletedTransaction(t) => write!(f, "GetCompletedTransaction({})", t), Self::SendTransaction { dest_pubkey, amount, message, .. - } => f.write_str(&format!( + } => write!( + f, "SendTransaction (to {}, {}, {})", dest_pubkey.to_hex(), amount, message - )), - Self::BurnTari { amount, message, .. } => f.write_str(&format!("Burning Tari ({}, {})", amount, message)), + ), + Self::BurnTari { amount, message, .. } => write!(f, "Burning Tari ({}, {})", amount, message), Self::RegisterValidatorNode { validator_node_public_key, message, .. - } => f.write_str(&format!("Registering VN ({}, {})", validator_node_public_key, message)), + } => write!(f, "Registering VN ({}, {})", validator_node_public_key, message), Self::SendOneSidedTransaction { dest_pubkey, amount, message, .. - } => f.write_str(&format!( + } => write!( + f, "SendOneSidedTransaction (to {}, {}, {})", dest_pubkey.to_hex(), amount, message - )), + ), Self::SendOneSidedToStealthAddressTransaction { dest_pubkey, amount, message, .. - } => f.write_str(&format!( + } => write!( + f, "SendOneSidedToStealthAddressTransaction (to {}, {}, {})", dest_pubkey.to_hex(), amount, message - )), + ), Self::SendShaAtomicSwapTransaction(k, _, v, _, msg) => { - f.write_str(&format!("SendShaAtomicSwapTransaction (to {}, {}, {})", k, v, msg)) + write!(f, "SendShaAtomicSwapTransaction (to {}, {}, {})", k, v, msg) }, - Self::CancelTransaction(t) => f.write_str(&format!("CancelTransaction ({})", t)), + Self::CancelTransaction(t) => write!(f, "CancelTransaction ({})", t), Self::ImportUtxoWithStatus { amount, source_public_key, @@ -203,7 +206,8 @@ impl fmt::Display for TransactionServiceRequest { tx_id, current_height, mined_timestamp, - } => f.write_str(&format!( + } => write!( + f, "ImportUtxo (from {}, {}, {} with maturity {} and {:?} and {:?} and {:?} and {:?})", source_public_key, amount, @@ -213,22 +217,22 @@ impl fmt::Display for TransactionServiceRequest { tx_id, current_height, mined_timestamp - )), - Self::SubmitTransactionToSelf(tx_id, _, _, _, _) => f.write_str(&format!("SubmitTransaction ({})", tx_id)), - Self::SetLowPowerMode => f.write_str("SetLowPowerMode "), - Self::SetNormalPowerMode => f.write_str("SetNormalPowerMode"), - Self::ApplyEncryption(_) => f.write_str("ApplyEncryption"), - Self::RemoveEncryption => f.write_str("RemoveEncryption"), + ), + Self::SubmitTransactionToSelf(tx_id, _, _, _, _) => write!(f, "SubmitTransaction ({})", tx_id), + Self::SetLowPowerMode => write!(f, "SetLowPowerMode "), + Self::SetNormalPowerMode => write!(f, "SetNormalPowerMode"), + Self::ApplyEncryption(_) => write!(f, "ApplyEncryption"), + Self::RemoveEncryption => write!(f, "RemoveEncryption"), Self::GenerateCoinbaseTransaction(_, _, bh) => { - f.write_str(&format!("GenerateCoinbaseTransaction (Blockheight {})", bh)) + write!(f, "GenerateCoinbaseTransaction (Blockheight {})", bh) }, - Self::RestartTransactionProtocols => f.write_str("RestartTransactionProtocols"), - Self::RestartBroadcastProtocols => f.write_str("RestartBroadcastProtocols"), - Self::GetNumConfirmationsRequired => f.write_str("GetNumConfirmationsRequired"), - Self::SetNumConfirmationsRequired(_) => f.write_str("SetNumConfirmationsRequired"), - Self::GetAnyTransaction(t) => f.write_str(&format!("GetAnyTransaction({})", t)), - Self::ValidateTransactions => f.write_str("ValidateTransactions"), - Self::ReValidateTransactions => f.write_str("ReValidateTransactions"), + Self::RestartTransactionProtocols => write!(f, "RestartTransactionProtocols"), + Self::RestartBroadcastProtocols => write!(f, "RestartBroadcastProtocols"), + Self::GetNumConfirmationsRequired => write!(f, "GetNumConfirmationsRequired"), + Self::SetNumConfirmationsRequired(_) => write!(f, "SetNumConfirmationsRequired"), + Self::GetAnyTransaction(t) => write!(f, "GetAnyTransaction({})", t), + Self::ValidateTransactions => write!(f, "ValidateTransactions"), + Self::ReValidateTransactions => write!(f, "ReValidateTransactions"), Self::GetFeePerGramStatsPerBlock { count } => { write!(f, "GetFeePerGramEstimatesPerBlock(count: {})", count,) }, diff --git a/base_layer/wallet/src/transaction_service/service.rs b/base_layer/wallet/src/transaction_service/service.rs index 1c80938651..274066bd9e 100644 --- a/base_layer/wallet/src/transaction_service/service.rs +++ b/base_layer/wallet/src/transaction_service/service.rs @@ -1486,7 +1486,7 @@ where reply_channel: oneshot::Sender>, ) -> Result<(), TransactionServiceError> { let output_features = - OutputFeatures::create_validator_node_registration(validator_node_public_key, validator_node_signature); + OutputFeatures::for_validator_node_registration(validator_node_public_key, validator_node_signature); let tx_meta = TransactionMetadata::new_with_features(0.into(), 3, KernelFeatures::create_validator_node_registration()); self.send_transaction( diff --git a/base_layer/wallet_ffi/src/lib.rs b/base_layer/wallet_ffi/src/lib.rs index 9985c4c332..73c2901e9c 100644 --- a/base_layer/wallet_ffi/src/lib.rs +++ b/base_layer/wallet_ffi/src/lib.rs @@ -1533,15 +1533,7 @@ pub unsafe extern "C" fn output_features_create_from_bytes( let decoded_metadata = (*metadata).0.clone(); - let output_features = TariOutputFeatures::new( - decoded_version, - output_type, - maturity, - decoded_metadata, - None, - None, - None, - ); + let output_features = TariOutputFeatures::new(decoded_version, output_type, maturity, decoded_metadata, None); Box::into_raw(Box::new(output_features)) } diff --git a/integration_tests/helpers/transactionBuilder.js b/integration_tests/helpers/transactionBuilder.js index a165b22d23..d0acd2aa0a 100644 --- a/integration_tests/helpers/transactionBuilder.js +++ b/integration_tests/helpers/transactionBuilder.js @@ -224,7 +224,7 @@ class TransactionBuilder { unique_id: features.unique_id ? Buffer.from(features.unique_id, "utf8") : null, - sidechain_features: null, + sidechain_feature: null, parent_public_key: null, asset: null, mint_non_fungible: null, diff --git a/integration_tests/helpers/transactionOutputHashing.js b/integration_tests/helpers/transactionOutputHashing.js index d51178d4c9..21df98e064 100644 --- a/integration_tests/helpers/transactionOutputHashing.js +++ b/integration_tests/helpers/transactionOutputHashing.js @@ -22,7 +22,7 @@ const featuresToConsensusBytes = function (features) { Buffer.from([parseInt(features.maturity || 0)]), // output_type Buffer.from([features.output_type]), - // sidechain_features + // sidechain_feature // TODO: SideChainFeatures encodeOption(null), // metadata From 613b65571540814afee49cdbfee834e5995dc85b Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Tue, 20 Sep 2022 17:09:05 +0400 Subject: [PATCH 06/21] fix(core): bring validator node MR inline with other merkle root code (#4692) Description --- - calculates validator node MR in `calculate_merkle_roots` function - allows tari_core and wallet to compile without base node feature - removes validator_node_mr param from `BlockHeader::from_previous` - removes validator_node_mr from NewBlocktemplate - removes validator node mr validation task from async validator - adds validator node mr validator to `check_merkle_roots` - removes unused get_validator_mr function from blockchain db - checks validator node mr in genesis block sanity check - removes panic from grpc conversion code Motivation and Context --- Validator node MR is created and checked in a different way from other merkle roots, this PR brings that code inline with other the current merkle root code + number of minor improvements. How Has This Been Tested? --- Existing tests - TODO: write validator node registration tests for blockchain db and block validators --- applications/tari_app_grpc/proto/block.proto | 4 +- applications/tari_app_grpc/proto/wallet.proto | 2 +- .../src/conversions/block_header.rs | 6 +- .../src/conversions/new_block_template.rs | 2 - .../src/conversions/sidechain_feature.rs | 4 +- .../src/grpc/wallet_grpc_server.rs | 2 +- .../common_types/src/types/fixed_hash.rs | 5 + base_layer/core/Cargo.toml | 4 +- .../comms_interface/inbound_handlers.rs | 3 +- .../base_node/sync/header_sync/validator.rs | 8 +- base_layer/core/src/blocks/block_header.rs | 16 +-- base_layer/core/src/blocks/genesis_block.rs | 134 ++++++------------ .../src/blocks/new_blockheader_template.rs | 3 - base_layer/core/src/chain_storage/async_db.rs | 6 +- .../src/chain_storage/blockchain_database.rs | 29 ++-- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 9 +- .../tests/blockchain_database.rs | 12 +- base_layer/core/src/lib.rs | 11 +- .../src/proof_of_work/monero_rx/helpers.rs | 22 +-- base_layer/core/src/proto/block.proto | 2 - base_layer/core/src/proto/block.rs | 2 - base_layer/core/src/proto/block_header.rs | 4 +- .../core/src/proto/sidechain_feature.rs | 4 +- base_layer/core/src/test_helpers/mod.rs | 3 +- .../side_chain/validator_node_registration.rs | 8 ++ .../block_validators/async_validator.rs | 23 --- base_layer/core/src/validation/helpers.rs | 13 ++ base_layer/core/src/validation/test.rs | 9 +- .../chain_storage_tests/chain_storage.rs | 7 +- .../core/tests/helpers/block_builders.rs | 21 ++- 30 files changed, 145 insertions(+), 233 deletions(-) diff --git a/applications/tari_app_grpc/proto/block.proto b/applications/tari_app_grpc/proto/block.proto index 04904a91a8..223ef6d815 100644 --- a/applications/tari_app_grpc/proto/block.proto +++ b/applications/tari_app_grpc/proto/block.proto @@ -62,7 +62,7 @@ message BlockHeader { // Sum of script offsets for all kernels in this block. bytes total_script_offset = 15; // Merkle root of validator nodes - bytes validator_node_merkle_root = 16; + bytes validator_node_mr = 16; } // Metadata required for validating the Proof of Work calculation @@ -119,8 +119,6 @@ message NewBlockHeaderTemplate { // uint64 target_difficulty = 6; // Sum of script offsets for all kernels in this block. bytes total_script_offset = 7; - // Merkle root of validator nodes - bytes validator_node_merkle_root = 8; } // The new block template is used constructing a new partial block, allowing a miner to added the coinbase utxo and as a final step the Base node to add the MMR roots to the header. diff --git a/applications/tari_app_grpc/proto/wallet.proto b/applications/tari_app_grpc/proto/wallet.proto index 164fda5ef1..5a8357c637 100644 --- a/applications/tari_app_grpc/proto/wallet.proto +++ b/applications/tari_app_grpc/proto/wallet.proto @@ -321,7 +321,7 @@ message TransactionEventResponse { } message RegisterValidatorNodeRequest { - string validator_node_public_key = 1; + bytes validator_node_public_key = 1; Signature validator_node_signature = 2; uint64 fee_per_gram = 3; string message = 4; diff --git a/applications/tari_app_grpc/src/conversions/block_header.rs b/applications/tari_app_grpc/src/conversions/block_header.rs index 18705ff909..0577701bd2 100644 --- a/applications/tari_app_grpc/src/conversions/block_header.rs +++ b/applications/tari_app_grpc/src/conversions/block_header.rs @@ -53,7 +53,7 @@ impl From for grpc::BlockHeader { pow_algo: pow_algo.as_u64(), pow_data: h.pow.pow_data, }), - validator_node_merkle_root: h.validator_node_merkle_root, + validator_node_mr: h.validator_node_mr.to_vec(), } } } @@ -86,13 +86,13 @@ impl TryFrom for BlockHeader { output_mr: FixedHash::try_from(header.output_mr).map_err(|err| err.to_string())?, witness_mr: FixedHash::try_from(header.witness_mr).map_err(|err| err.to_string())?, output_mmr_size: header.output_mmr_size, - kernel_mr: FixedHash::try_from(header.kernel_mr).expect("Array size 32 cannot fail"), + kernel_mr: FixedHash::try_from(header.kernel_mr).map_err(|err| err.to_string())?, kernel_mmr_size: header.kernel_mmr_size, total_kernel_offset, total_script_offset, nonce: header.nonce, pow, - validator_node_merkle_root: header.validator_node_merkle_root, + validator_node_mr: FixedHash::try_from(header.validator_node_mr).map_err(|err| err.to_string())?, }) } } diff --git a/applications/tari_app_grpc/src/conversions/new_block_template.rs b/applications/tari_app_grpc/src/conversions/new_block_template.rs index 86a176c4de..54e5a58c03 100644 --- a/applications/tari_app_grpc/src/conversions/new_block_template.rs +++ b/applications/tari_app_grpc/src/conversions/new_block_template.rs @@ -45,7 +45,6 @@ impl TryFrom for grpc::NewBlockTemplate { pow_algo: block.header.pow.pow_algo.as_u64(), pow_data: block.header.pow.pow_data, }), - validator_node_merkle_root: block.header.validator_node_merkle_root, }; Ok(Self { body: Some(grpc::AggregateBody { @@ -92,7 +91,6 @@ impl TryFrom for NewBlockTemplate { total_kernel_offset, total_script_offset, pow, - validator_node_merkle_root: header.validator_node_merkle_root, }; let body = block .body diff --git a/applications/tari_app_grpc/src/conversions/sidechain_feature.rs b/applications/tari_app_grpc/src/conversions/sidechain_feature.rs index 4cd7541452..815f0c5818 100644 --- a/applications/tari_app_grpc/src/conversions/sidechain_feature.rs +++ b/applications/tari_app_grpc/src/conversions/sidechain_feature.rs @@ -40,7 +40,9 @@ use crate::tari_rpc as grpc; //---------------------------------- SideChainFeature --------------------------------------------// impl From for grpc::SideChainFeature { fn from(value: SideChainFeature) -> Self { - value.into() + Self { + side_chain_feature: Some(value.into()), + } } } diff --git a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs index 04cdaa3331..980f1052f4 100644 --- a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs +++ b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs @@ -936,7 +936,7 @@ impl wallet_server::Wallet for WalletGrpcServer { ) -> Result, Status> { let request = request.into_inner(); let mut transaction_service = self.get_transaction_service(); - let validator_node_public_key = CommsPublicKey::from_hex(&request.validator_node_public_key) + let validator_node_public_key = CommsPublicKey::from_bytes(&request.validator_node_public_key) .map_err(|_| Status::internal("Destination address is malformed".to_string()))?; let validator_node_signature = request .validator_node_signature diff --git a/base_layer/common_types/src/types/fixed_hash.rs b/base_layer/common_types/src/types/fixed_hash.rs index efe9d3e078..43fc39da4a 100644 --- a/base_layer/common_types/src/types/fixed_hash.rs +++ b/base_layer/common_types/src/types/fixed_hash.rs @@ -104,6 +104,11 @@ impl PartialEq> for FixedHash { self == other.as_slice() } } +impl PartialEq for Vec { + fn eq(&self, other: &FixedHash) -> bool { + self == other.as_slice() + } +} impl AsRef<[u8]> for FixedHash { fn as_ref(&self) -> &[u8] { diff --git a/base_layer/core/Cargo.toml b/base_layer/core/Cargo.toml index bb7b0de177..0a3f9383f6 100644 --- a/base_layer/core/Cargo.toml +++ b/base_layer/core/Cargo.toml @@ -10,10 +10,10 @@ version = "0.38.3" edition = "2018" [features] -default = ["croaring", "tari_mmr", "transactions", "base_node", "mempool_proto", "base_node_proto", "monero", "randomx-rs"] +default = ["base_node"] transactions = [] mempool_proto = [] -base_node = ["croaring", "tari_mmr", "transactions", "base_node_proto", "monero", "randomx-rs"] +base_node = ["croaring", "tari_mmr", "transactions", "mempool_proto", "base_node_proto", "monero", "randomx-rs"] base_node_proto = [] avx2 = ["tari_crypto/simd_backend"] benches = ["base_node", "criterion"] diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index ce6e4f221c..378b77967d 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -274,8 +274,7 @@ where B: BlockchainBackend + 'static }, NodeCommsRequest::GetNewBlockTemplate(request) => { let best_block_header = self.blockchain_db.fetch_tip_header().await?; - let vns = self.blockchain_db.get_validator_nodes_mr().await?; - let mut header = BlockHeader::from_previous(best_block_header.header(), vns); + let mut header = BlockHeader::from_previous(best_block_header.header()); let constants = self.consensus_manager.consensus_constants(header.height); header.version = constants.blockchain_version(); header.pow.pow_algo = request.algo; diff --git a/base_layer/core/src/base_node/sync/header_sync/validator.rs b/base_layer/core/src/base_node/sync/header_sync/validator.rs index 3bff458033..ce2d5e0c66 100644 --- a/base_layer/core/src/base_node/sync/header_sync/validator.rs +++ b/base_layer/core/src/base_node/sync/header_sync/validator.rs @@ -261,7 +261,7 @@ mod test { let (validator, db) = setup(); let mut tip = db.fetch_tip_header().await.unwrap(); for _ in 0..n { - let mut header = BlockHeader::from_previous(tip.header(), tip.header().validator_node_merkle_root.clone()); + let mut header = BlockHeader::from_previous(tip.header()); // Needed to have unique keys for the blockchain db mmr count indexes (MDB_KEY_EXIST error) header.kernel_mmr_size += 1; header.output_mmr_size += 1; @@ -316,11 +316,11 @@ mod test { let (mut validator, _, tip) = setup_with_headers(1).await; validator.initialize_state(tip.hash()).await.unwrap(); assert!(validator.valid_headers().is_empty()); - let next = BlockHeader::from_previous(tip.header(), tip.header().validator_node_merkle_root.clone()); + let next = BlockHeader::from_previous(tip.header()); validator.validate(next).unwrap(); assert_eq!(validator.valid_headers().len(), 1); let tip = validator.valid_headers().last().cloned().unwrap(); - let next = BlockHeader::from_previous(tip.header(), tip.header().validator_node_merkle_root.clone()); + let next = BlockHeader::from_previous(tip.header()); validator.validate(next).unwrap(); assert_eq!(validator.valid_headers().len(), 2); } @@ -329,7 +329,7 @@ mod test { async fn it_fails_if_height_is_not_serial() { let (mut validator, _, tip) = setup_with_headers(2).await; validator.initialize_state(tip.hash()).await.unwrap(); - let mut next = BlockHeader::from_previous(tip.header(), tip.header().validator_node_merkle_root.clone()); + let mut next = BlockHeader::from_previous(tip.header()); next.height = 10; let err = validator.validate(next).unwrap_err(); unpack_enum!(BlockHeaderSyncError::InvalidBlockHeight { expected, actual } = err); diff --git a/base_layer/core/src/blocks/block_header.rs b/base_layer/core/src/blocks/block_header.rs index 186ac3cb96..ad54b1107f 100644 --- a/base_layer/core/src/blocks/block_header.rs +++ b/base_layer/core/src/blocks/block_header.rs @@ -57,7 +57,6 @@ use crate::{ blocks::BlocksHashDomain, consensus::{ConsensusDecoding, ConsensusEncoding, ConsensusEncodingSized, DomainSeparatedConsensusHasher}, proof_of_work::{PowAlgorithm, PowError, ProofOfWork}, - ValidatorNodeMmr, }; #[derive(Debug, Error)] @@ -111,14 +110,13 @@ pub struct BlockHeader { pub nonce: u64, /// Proof of work summary pub pow: ProofOfWork, - // Merkle root of all active validator node. - pub validator_node_merkle_root: Vec, + /// Merkle root of all active validator node. + pub validator_node_mr: FixedHash, } impl BlockHeader { /// Create a new, default header with the given version. pub fn new(blockchain_version: u16) -> BlockHeader { - let vn_mmr = ValidatorNodeMmr::new(Vec::new()); BlockHeader { version: blockchain_version, height: 0, @@ -134,7 +132,7 @@ impl BlockHeader { total_script_offset: BlindingFactor::default(), nonce: 0, pow: ProofOfWork::default(), - validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), + validator_node_mr: FixedHash::zero(), } } @@ -150,7 +148,7 @@ impl BlockHeader { /// Create a new block header using relevant data from the previous block. The height is incremented by one, the /// previous block hash is set, the timestamp is set to the current time, and the kernel/output mmr sizes are set to /// the previous block. All other fields, including proof of work are set to defaults. - pub fn from_previous(prev: &BlockHeader, validator_node_merkle_root: Vec) -> BlockHeader { + pub fn from_previous(prev: &BlockHeader) -> BlockHeader { let prev_hash = prev.hash(); BlockHeader { version: prev.version, @@ -167,7 +165,7 @@ impl BlockHeader { total_script_offset: BlindingFactor::default(), nonce: 0, pow: ProofOfWork::default(), - validator_node_merkle_root, + validator_node_mr: FixedHash::zero(), } } @@ -269,7 +267,7 @@ impl From for BlockHeader { total_script_offset: header_template.total_script_offset, nonce: 0, pow: header_template.pow, - validator_node_merkle_root: header_template.validator_node_merkle_root, + validator_node_mr: FixedHash::zero(), } } } @@ -369,7 +367,7 @@ mod test { h1.nonce = 7600; assert_eq!(h1.height, 0, "Default block height"); let hash1 = h1.hash(); - let h2 = BlockHeader::from_previous(&h1, h1.validator_node_merkle_root.clone()); + let h2 = BlockHeader::from_previous(&h1); assert_eq!(h2.height, h1.height + 1, "Incrementing block height"); assert!(h2.timestamp > h1.timestamp, "Timestamp"); assert_eq!(h2.prev_hash, hash1, "Previous hash"); diff --git a/base_layer/core/src/blocks/genesis_block.rs b/base_layer/core/src/blocks/genesis_block.rs index d82534b9d8..02c9e5d2bf 100644 --- a/base_layer/core/src/blocks/genesis_block.rs +++ b/base_layer/core/src/blocks/genesis_block.rs @@ -46,7 +46,6 @@ use crate::{ TransactionOutputVersion, }, }, - ValidatorNodeMmr, }; /// Returns the genesis block for the selected network. @@ -161,7 +160,6 @@ fn get_igor_genesis_block_raw() -> Block { let genesis = DateTime::parse_from_rfc2822("30 Aug 2022 11:48:00 +0100").unwrap(); #[allow(clippy::cast_sign_loss)] let timestamp = genesis.timestamp() as u64; - let vn_mmr = ValidatorNodeMmr::new(Vec::new()); Block { header: BlockHeader { version: 0, @@ -188,7 +186,8 @@ fn get_igor_genesis_block_raw() -> Block { pow_algo: PowAlgorithm::Sha3, pow_data: vec![], }, - validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), + validator_node_mr: FixedHash::from_hex("e1d55f91ecc7e435080ac2641280516a355a5ecbe231158987da217b5af30047") + .unwrap(), }, body, } @@ -234,14 +233,27 @@ pub fn get_esmeralda_genesis_block() -> ChainBlock { // for o in block.body.outputs() { // witness_mmr.push(o.witness_hash().to_vec()).unwrap(); // output_mmr.push(o.hash().to_vec()).unwrap(); + // if matches!(o.features.output_type, OutputType::ValidatorNodeRegistration) { + // let reg = o + // .features + // .sidechain_feature + // .as_ref() + // .and_then(|f| f.validator_node_registration()) + // .unwrap(); + // vn_mmr.push(reg.derive_shard_key(block.hash()).to_vec()).unwrap(); + // } // } + // let vn_mmr = ValidatorNodeMmr::new(Vec::new()); + // block.header.kernel_mr = FixedHash::try_from(kernel_mmr.get_merkle_root().unwrap()).unwrap(); // block.header.witness_mr = FixedHash::try_from(witness_mmr.get_merkle_root().unwrap()).unwrap(); // block.header.output_mr = FixedHash::try_from(output_mmr.get_merkle_root().unwrap()).unwrap(); + // block.header.validator_node_mr = FixedHash::try_from(vn_mmr.get_merkle_root().unwrap()).unwrap(); // println!("kernel mr: {}", block.header.kernel_mr.to_hex()); // println!("witness mr: {}", block.header.witness_mr.to_hex()); // println!("output mr: {}", block.header.output_mr.to_hex()); + // println!("vn mr: {}", block.header.validator_node_mr.to_hex()); // Hardcode the Merkle roots once they've been computed above block.header.kernel_mr = @@ -250,6 +262,8 @@ pub fn get_esmeralda_genesis_block() -> ChainBlock { FixedHash::from_hex("8e6bb075239bf307e311f497d35c12c77c4563f218c156895e6630a7d9633de3").unwrap(); block.header.output_mr = FixedHash::from_hex("163304b3fe0f9072170db341945854bf88c8e23e23ecaac3ed86b9231b20e16f").unwrap(); + block.header.validator_node_mr = + FixedHash::from_hex("e1d55f91ecc7e435080ac2641280516a355a5ecbe231158987da217b5af30047").unwrap(); let accumulated_data = BlockHeaderAccumulatedData { hash: block.hash(), @@ -307,7 +321,6 @@ fn get_esmeralda_genesis_block_raw() -> Block { let genesis = DateTime::parse_from_rfc2822("30 Aug 2022 11:45:00 +0100").unwrap(); #[allow(clippy::cast_sign_loss)] let timestamp = genesis.timestamp() as u64; - let vn_mmr = ValidatorNodeMmr::new(Vec::new()); Block { header: BlockHeader { version: 0, @@ -334,7 +347,8 @@ fn get_esmeralda_genesis_block_raw() -> Block { pow_algo: PowAlgorithm::Sha3, pow_data: vec![], }, - validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), + validator_node_mr: FixedHash::from_hex("e1d55f91ecc7e435080ac2641280516a355a5ecbe231158987da217b5af30047") + .unwrap(), }, body, } @@ -354,6 +368,7 @@ mod test { validation::{ChainBalanceValidator, FinalHorizonStateValidation}, KernelMmr, MutableOutputMmr, + ValidatorNodeMmr, WitnessMmr, }; @@ -362,82 +377,19 @@ mod test { // Note: Generate new data for `pub fn get_esmeralda_genesis_block()` and `fn get_esmeralda_genesis_block_raw()` // if consensus values change, e.g. new faucet or other let block = get_esmeralda_genesis_block(); - assert_eq!(block.block().body.outputs().len(), 4001); - - let factories = CryptoFactories::default(); - assert!(block.block().body.outputs().iter().any(|o| o.is_coinbase())); - let outputs = block.block().body.outputs().iter().collect::>(); - batch_verify_range_proofs(&factories.range_proof, &outputs).unwrap(); - // Coinbase and faucet kernel - assert_eq!( - block.block().body.kernels().len() as u64, - block.header().kernel_mmr_size - ); - assert_eq!( - block.block().body.outputs().len() as u64, - block.header().output_mmr_size - ); - - for kernel in block.block().body.kernels() { - kernel.verify_signature().unwrap(); - } - assert!(block - .block() - .body - .kernels() - .iter() - .any(|k| k.features.contains(KernelFeatures::COINBASE_KERNEL))); - - // Check MMR - let mut kernel_mmr = KernelMmr::new(Vec::new()); - for k in block.block().body.kernels() { - kernel_mmr.push(k.hash().to_vec()).unwrap(); - } - - let mut witness_mmr = WitnessMmr::new(Vec::new()); - let mut output_mmr = MutableOutputMmr::new(Vec::new(), Bitmap::create()).unwrap(); - - for o in block.block().body.outputs() { - o.verify_metadata_signature().unwrap(); - - witness_mmr.push(o.witness_hash().to_vec()).unwrap(); - output_mmr.push(o.hash().to_vec()).unwrap(); - } - - assert_eq!( - kernel_mmr.get_merkle_root().unwrap().as_slice(), - block.header().kernel_mr.as_slice() - ); - assert_eq!( - witness_mmr.get_merkle_root().unwrap().as_slice(), - block.header().witness_mr.as_slice() - ); - assert_eq!( - output_mmr.get_merkle_root().unwrap().as_slice(), - block.header().output_mr.as_slice() - ); - - // Check that the faucet UTXOs balance (the faucet_value consensus constant is set correctly and faucet kernel - // is correct) - - let utxo_sum = block.block().body.outputs().iter().map(|o| &o.commitment).sum(); - let kernel_sum = block.block().body.kernels().iter().map(|k| &k.excess).sum(); - - let db = create_new_blockchain_with_network(Network::Esmeralda); - - let lock = db.db_read_access().unwrap(); - ChainBalanceValidator::new( - ConsensusManager::builder(Network::Esmeralda).build(), - Default::default(), - ) - .validate(&*lock, 0, &utxo_sum, &kernel_sum, &Commitment::default()) - .unwrap(); + check_block(Network::Esmeralda, &block, 4001, 2); } #[test] fn igor_genesis_sanity_check() { let block = get_igor_genesis_block(); - assert_eq!(block.block().body.outputs().len(), 1); + check_block(Network::Igor, &block, 1, 1); + } + + fn check_block(network: Network, block: &ChainBlock, expected_outputs: usize, expected_kernels: usize) { + assert!(block.block().body.inputs().is_empty()); + assert_eq!(block.block().body.kernels().len(), expected_kernels); + assert_eq!(block.block().body.outputs().len(), expected_outputs); let factories = CryptoFactories::default(); assert!(block.block().body.outputs().iter().any(|o| o.is_coinbase())); @@ -471,25 +423,25 @@ mod test { let mut witness_mmr = WitnessMmr::new(Vec::new()); let mut output_mmr = MutableOutputMmr::new(Vec::new(), Bitmap::create()).unwrap(); - assert_eq!(block.block().body.kernels().len(), 1); - assert_eq!(block.block().body.outputs().len(), 1); + let mut vn_mmr = ValidatorNodeMmr::new(Vec::new()); for o in block.block().body.outputs() { witness_mmr.push(o.witness_hash().to_vec()).unwrap(); output_mmr.push(o.hash().to_vec()).unwrap(); + if matches!(o.features.output_type, OutputType::ValidatorNodeRegistration) { + let reg = o + .features + .sidechain_feature + .as_ref() + .and_then(|f| f.validator_node_registration()) + .unwrap(); + vn_mmr.push(reg.derive_shard_key(block.hash()).to_vec()).unwrap(); + } } - assert_eq!( - kernel_mmr.get_merkle_root().unwrap().as_slice(), - block.header().kernel_mr.as_slice() - ); - assert_eq!( - witness_mmr.get_merkle_root().unwrap().as_slice(), - block.header().witness_mr.as_slice() - ); - assert_eq!( - output_mmr.get_merkle_root().unwrap().as_slice(), - block.header().output_mr.as_slice() - ); + assert_eq!(kernel_mmr.get_merkle_root().unwrap(), block.header().kernel_mr,); + assert_eq!(witness_mmr.get_merkle_root().unwrap(), block.header().witness_mr,); + assert_eq!(output_mmr.get_merkle_root().unwrap(), block.header().output_mr,); + assert_eq!(vn_mmr.get_merkle_root().unwrap(), block.header().validator_node_mr); // Check that the faucet UTXOs balance (the faucet_value consensus constant is set correctly and faucet kernel // is correct) @@ -500,7 +452,7 @@ mod test { let db = create_new_blockchain_with_network(Network::Igor); let lock = db.db_read_access().unwrap(); - ChainBalanceValidator::new(ConsensusManager::builder(Network::Igor).build(), Default::default()) + ChainBalanceValidator::new(ConsensusManager::builder(network).build(), Default::default()) .validate(&*lock, 0, &utxo_sum, &kernel_sum, &Commitment::default()) .unwrap(); } diff --git a/base_layer/core/src/blocks/new_blockheader_template.rs b/base_layer/core/src/blocks/new_blockheader_template.rs index 4b2eeebec7..5864adbadb 100644 --- a/base_layer/core/src/blocks/new_blockheader_template.rs +++ b/base_layer/core/src/blocks/new_blockheader_template.rs @@ -45,8 +45,6 @@ pub struct NewBlockHeaderTemplate { pub total_script_offset: BlindingFactor, /// Proof of work summary pub pow: ProofOfWork, - // Merkle root of all active validator node. - pub validator_node_merkle_root: Vec, } impl NewBlockHeaderTemplate { @@ -58,7 +56,6 @@ impl NewBlockHeaderTemplate { total_kernel_offset: header.total_kernel_offset, total_script_offset: header.total_script_offset, pow: header.pow, - validator_node_merkle_root: header.validator_node_merkle_root, } } } diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index 7e03494073..95bccf91b1 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -110,7 +110,7 @@ macro_rules! make_async_fn { $(#[$outer:meta])* $fn:ident$(< $( $lt:tt $( : $clt:path )? ),+ >)?($($param:ident:$ptype:ty),+) -> $rtype:ty, $name:expr) => { $(#[$outer])* - pub async fn $fn$(< $( $lt $( : $clt )? ),+ +Sync+Send + 'static >)?(&self, $($param: $ptype),+) -> Result<$rtype, ChainStorageError> { + pub async fn $fn$(< $( $lt $( : $clt )? ),+ + Sync + Send + 'static >)?(&self, $($param: $ptype),+) -> Result<$rtype, ChainStorageError> { let db = self.db.clone(); let mut mdc = vec![]; log_mdc::iter(|k, v| mdc.push((k.to_owned(), v.to_owned()))); @@ -270,9 +270,7 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_committee(height: u64, shard: [u8;32]) -> Vec, "fetch_committee"); - make_async_fn!(get_validator_nodes_mr() -> Vec, "get_validator_nodes_mr"); - - make_async_fn!(get_shard_key(height:u64, public_key:PublicKey) -> [u8;32], "get_shard_key"); + make_async_fn!(get_shard_key(height:u64, public_key: PublicKey) -> [u8;32], "get_shard_key"); } impl From> for AsyncBlockchainDb { diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index fd3b4cf587..0b3b882428 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -817,6 +817,7 @@ where B: BlockchainBackend block.header.output_mr = roots.output_mr; block.header.witness_mr = roots.witness_mr; block.header.output_mmr_size = roots.output_mmr_size; + block.header.validator_node_mr = roots.validator_node_mr; Ok(block) } @@ -840,14 +841,6 @@ where B: BlockchainBackend db.fetch_mmr_size(tree) } - pub fn get_validator_nodes_mr(&self) -> Result, ChainStorageError> { - let tip = self.get_height()?; - let validator_nodes = self.fetch_active_validator_nodes(tip + 1)?; - // Note: MMR is not balanced - let mmr = ValidatorNodeMmr::new(validator_nodes.iter().map(|vn| vn.shard_key.to_vec()).collect()); - Ok(mmr.get_merkle_root().unwrap()) - } - pub fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result<[u8; 32], ChainStorageError> { let db = self.db_read_access()?; db.get_shard_key(height, public_key) @@ -1208,20 +1201,24 @@ pub struct MmrRoots { pub output_mr: FixedHash, pub witness_mr: FixedHash, pub output_mmr_size: u64, + pub validator_node_mr: FixedHash, } impl std::fmt::Display for MmrRoots { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, "MMR Roots")?; - writeln!(f, "Input MR : {}", &self.input_mr.to_hex())?; - writeln!(f, "Witness MR : {}", &self.witness_mr.to_hex())?; - writeln!(f, "Kernel MR : {}", &self.kernel_mr.to_hex())?; - writeln!(f, "Kernel MMR Size : {}", &self.kernel_mmr_size)?; - writeln!(f, "Output MR : {}", &self.output_mr.to_hex())?; - writeln!(f, "Output MMR Size : {}", &self.output_mmr_size) + writeln!(f, "Input MR : {}", self.input_mr)?; + writeln!(f, "Witness MR : {}", self.witness_mr)?; + writeln!(f, "Kernel MR : {}", self.kernel_mr)?; + writeln!(f, "Kernel MMR Size : {}", self.kernel_mmr_size)?; + writeln!(f, "Output MR : {}", self.output_mr)?; + writeln!(f, "Output MMR Size : {}", self.output_mmr_size)?; + writeln!(f, "Validator MR : {}", self.validator_node_mr)?; + Ok(()) } } +#[allow(clippy::too_many_lines)] #[allow(clippy::similar_names)] pub fn calculate_mmr_roots(db: &T, block: &Block) -> Result { let header = &block.header; @@ -1324,6 +1321,9 @@ pub fn calculate_mmr_roots(db: &T, block: &Block) -> Resul output_mmr.compress(); + let validator_nodes = db.fetch_active_validator_nodes(metadata.height_of_longest_chain() + 1)?; + let vn_mmr = ValidatorNodeMmr::new(validator_nodes.iter().map(|vn| vn.shard_key.to_vec()).collect()); + let mmr_roots = MmrRoots { kernel_mr: FixedHash::try_from(kernel_mmr.get_merkle_root()?)?, kernel_mmr_size: kernel_mmr.get_leaf_count()? as u64, @@ -1331,6 +1331,7 @@ pub fn calculate_mmr_roots(db: &T, block: &Block) -> Resul output_mr: FixedHash::try_from(output_mmr.get_merkle_root()?)?, output_mmr_size: output_mmr.get_leaf_count() as u64, witness_mr: FixedHash::try_from(witness_mmr.get_merkle_root()?)?, + validator_node_mr: FixedHash::try_from(vn_mmr.get_merkle_root()?)?, }; Ok(mmr_roots) } diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index ea43b223fc..813809f85d 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -93,11 +93,10 @@ use crate::{ PrunedOutput, Reorg, }, - consensus::{ConsensusManager, DomainSeparatedConsensusHasher}, + consensus::ConsensusManager, transactions::{ aggregated_body::AggregateBody, transaction_components::{TransactionError, TransactionInput, TransactionKernel, TransactionOutput}, - TransactionHashDomain, }, MutablePrunedOutputMmr, PrunedKernelMmr, @@ -1299,11 +1298,7 @@ impl LMDBDatabase { .as_ref() .and_then(|f| f.validator_node_registration()) { - let shard_key = DomainSeparatedConsensusHasher::::new("validator_node_root") - // - .chain(vn_reg) - .chain(&block_hash) - .finalize(); + let shard_key = vn_reg.derive_shard_key(&block_hash); let validator_node = ActiveValidatorNode { shard_key, diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index 0846620edf..bab4e94574 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -70,6 +70,7 @@ fn apply_mmr_to_block(db: &BlockchainDatabase, block: Block) -> Bl block.header.output_mmr_size = mmr_roots.output_mmr_size; block.header.kernel_mr = mmr_roots.kernel_mr; block.header.kernel_mmr_size = mmr_roots.kernel_mmr_size; + block.header.validator_node_mr = mmr_roots.validator_node_mr; block } @@ -496,8 +497,7 @@ mod prepare_new_block { fn it_errors_for_non_tip_template() { let db = setup(); let genesis = db.fetch_block(0).unwrap(); - let next_block = - BlockHeader::from_previous(genesis.header(), genesis.header().validator_node_merkle_root.clone()); + let next_block = BlockHeader::from_previous(genesis.header()); let mut template = NewBlockTemplate::from_block(next_block.into_builder().build(), Difficulty::min(), 5000 * T); // This would cause a panic if the sanity checks were not there template.header.height = 100; @@ -512,8 +512,7 @@ mod prepare_new_block { fn it_prepares_the_first_block() { let db = setup(); let genesis = db.fetch_block(0).unwrap(); - let next_block = - BlockHeader::from_previous(genesis.header(), genesis.header().validator_node_merkle_root.clone()); + let next_block = BlockHeader::from_previous(genesis.header()); let template = NewBlockTemplate::from_block(next_block.into_builder().build(), Difficulty::min(), 5000 * T); let block = db.prepare_new_block(template).unwrap(); assert_eq!(block.header.height, 1); @@ -633,10 +632,7 @@ mod clear_all_pending_headers { let mut prev_header = prev_block.try_into_chain_block().unwrap().to_chain_header(); let headers = (0..5) .map(|_| { - let mut header = BlockHeader::from_previous( - prev_header.header(), - prev_header.header().validator_node_merkle_root.clone(), - ); + let mut header = BlockHeader::from_previous(prev_header.header()); header.kernel_mmr_size += 1; header.output_mmr_size += 1; let accum = BlockHeaderAccumulatedData::builder(&prev_accum) diff --git a/base_layer/core/src/lib.rs b/base_layer/core/src/lib.rs index a00af389af..92b0a7f124 100644 --- a/base_layer/core/src/lib.rs +++ b/base_layer/core/src/lib.rs @@ -108,14 +108,6 @@ mod domain_hashing { ); pub type InputMmrHasherBlake256 = DomainSeparatedHasher; pub type PrunedInputMmr = MerkleMountainRange; -} - -#[cfg(feature = "base_node")] -pub use domain_hashing::*; - -mod validator_domain_hashing { - use tari_crypto::{hash::blake2::Blake256, hash_domain, hashing::DomainSeparatedHasher}; - use tari_mmr::{Hash, MerkleMountainRange}; hash_domain!( ValidatorNodeMmrHashDomain, @@ -126,4 +118,5 @@ mod validator_domain_hashing { pub type ValidatorNodeMmr = MerkleMountainRange>; } -pub use validator_domain_hashing::*; +#[cfg(feature = "base_node")] +pub use domain_hashing::*; diff --git a/base_layer/core/src/proof_of_work/monero_rx/helpers.rs b/base_layer/core/src/proof_of_work/monero_rx/helpers.rs index 78a7c650ec..d78c6b8022 100644 --- a/base_layer/core/src/proof_of_work/monero_rx/helpers.rs +++ b/base_layer/core/src/proof_of_work/monero_rx/helpers.rs @@ -203,7 +203,6 @@ mod test { use crate::{ consensus::ConsensusEncoding, proof_of_work::{monero_rx::fixed_array::FixedByteArray, PowAlgorithm, ProofOfWork}, - ValidatorNodeMmr, }; // This tests checks the hash of monero-rs @@ -293,7 +292,6 @@ mod test { let seed_hash = "9f02e032f9b15d2aded991e0f68cc3c3427270b568b782e55fbd269ead0bad97".to_string(); let bytes = hex::decode(blocktemplate_blob).unwrap(); let mut block = deserialize::(&bytes[..]).unwrap(); - let vn_mmr = ValidatorNodeMmr::new(Vec::new()); let mut block_header = BlockHeader { version: 0, height: 0, @@ -309,7 +307,7 @@ mod test { total_script_offset: Default::default(), nonce: 0, pow: ProofOfWork::default(), - validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), + validator_node_mr: FixedHash::zero(), }; let hash = block_header.mining_hash(); append_merge_mining_tag(&mut block, hash).unwrap(); @@ -351,7 +349,6 @@ mod test { let seed_hash = "9f02e032f9b15d2aded991e0f68cc3c3427270b568b782e55fbd269ead0bad97".to_string(); let bytes = hex::decode(blocktemplate_blob).unwrap(); let mut block = deserialize::(&bytes[..]).unwrap(); - let vn_mmr = ValidatorNodeMmr::new(Vec::new()); let mut block_header = BlockHeader { version: 0, height: 0, @@ -367,7 +364,7 @@ mod test { total_script_offset: Default::default(), nonce: 0, pow: ProofOfWork::default(), - validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), + validator_node_mr: FixedHash::zero(), }; let hash = block_header.mining_hash(); append_merge_mining_tag(&mut block, hash).unwrap(); @@ -405,7 +402,6 @@ mod test { let seed_hash = "9f02e032f9b15d2aded991e0f68cc3c3427270b568b782e55fbd269ead0bad97".to_string(); let bytes = hex::decode(blocktemplate_blob).unwrap(); let block = deserialize::(&bytes[..]).unwrap(); - let vn_mmr = ValidatorNodeMmr::new(Vec::new()); let mut block_header = BlockHeader { version: 0, height: 0, @@ -421,7 +417,7 @@ mod test { total_script_offset: Default::default(), nonce: 0, pow: ProofOfWork::default(), - validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), + validator_node_mr: FixedHash::zero(), }; let count = 1 + (u16::try_from(block.tx_hashes.len()).unwrap()); let mut hashes = Vec::with_capacity(count as usize); @@ -458,7 +454,6 @@ mod test { let seed_hash = "9f02e032f9b15d2aded991e0f68cc3c3427270b568b782e55fbd269ead0bad97".to_string(); let bytes = hex::decode(blocktemplate_blob).unwrap(); let mut block = deserialize::(&bytes[..]).unwrap(); - let vn_mmr = ValidatorNodeMmr::new(Vec::new()); let mut block_header = BlockHeader { version: 0, height: 0, @@ -474,7 +469,7 @@ mod test { total_script_offset: Default::default(), nonce: 0, pow: ProofOfWork::default(), - validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), + validator_node_mr: FixedHash::zero(), }; let hash = Hash::null(); append_merge_mining_tag(&mut block, hash).unwrap(); @@ -515,7 +510,6 @@ mod test { let seed_hash = "9f02e032f9b15d2aded991e0f68cc3c3427270b568b782e55fbd269ead0bad97".to_string(); let bytes = hex::decode(blocktemplate_blob).unwrap(); let mut block = deserialize::(&bytes[..]).unwrap(); - let vn_mmr = ValidatorNodeMmr::new(Vec::new()); let mut block_header = BlockHeader { version: 0, height: 0, @@ -531,7 +525,7 @@ mod test { total_script_offset: Default::default(), nonce: 0, pow: ProofOfWork::default(), - validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), + validator_node_mr: FixedHash::zero(), }; let hash = block_header.mining_hash(); append_merge_mining_tag(&mut block, hash).unwrap(); @@ -568,7 +562,6 @@ mod test { #[test] fn test_verify_header_no_data() { - let vn_mmr = ValidatorNodeMmr::new(Vec::new()); let mut block_header = BlockHeader { version: 0, height: 0, @@ -584,7 +577,7 @@ mod test { total_script_offset: Default::default(), nonce: 0, pow: ProofOfWork::default(), - validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), + validator_node_mr: FixedHash::zero(), }; let monero_data = MoneroPowData { header: Default::default(), @@ -612,7 +605,6 @@ mod test { let seed_hash = "9f02e032f9b15d2aded991e0f68cc3c3427270b568b782e55fbd269ead0bad97".to_string(); let bytes = hex::decode(blocktemplate_blob).unwrap(); let mut block = deserialize::(&bytes[..]).unwrap(); - let vn_mmr = ValidatorNodeMmr::new(Vec::new()); let mut block_header = BlockHeader { version: 0, height: 0, @@ -628,7 +620,7 @@ mod test { total_script_offset: Default::default(), nonce: 0, pow: ProofOfWork::default(), - validator_node_merkle_root: vn_mmr.get_merkle_root().unwrap(), + validator_node_mr: FixedHash::zero(), }; let hash = block_header.mining_hash(); append_merge_mining_tag(&mut block, hash).unwrap(); diff --git a/base_layer/core/src/proto/block.proto b/base_layer/core/src/proto/block.proto index badfd84e6c..452ef7a2cb 100644 --- a/base_layer/core/src/proto/block.proto +++ b/base_layer/core/src/proto/block.proto @@ -110,8 +110,6 @@ message NewBlockHeaderTemplate { ProofOfWork pow = 5; // Sum of script offsets for all kernels in this block. bytes total_script_offset = 6; - // Merkle root of validator nodes - bytes validator_node_merkle_root = 7; } // The new block template is used constructing a new partial block, allowing a miner to added the coinbase utxo and as a final step the Base node to add the MMR roots to the header. diff --git a/base_layer/core/src/proto/block.rs b/base_layer/core/src/proto/block.rs index e651673cc0..62c79d928e 100644 --- a/base_layer/core/src/proto/block.rs +++ b/base_layer/core/src/proto/block.rs @@ -221,7 +221,6 @@ impl TryFrom for NewBlockHeaderTemplate { total_kernel_offset, total_script_offset, pow, - validator_node_merkle_root: header.validator_node_merkle_root, }) } } @@ -235,7 +234,6 @@ impl From for proto::NewBlockHeaderTemplate { total_kernel_offset: header.total_kernel_offset.to_vec(), total_script_offset: header.total_script_offset.to_vec(), pow: Some(proto::ProofOfWork::from(header.pow)), - validator_node_merkle_root: header.validator_node_merkle_root, } } } diff --git a/base_layer/core/src/proto/block_header.rs b/base_layer/core/src/proto/block_header.rs index e221608aad..a49668ec85 100644 --- a/base_layer/core/src/proto/block_header.rs +++ b/base_layer/core/src/proto/block_header.rs @@ -68,7 +68,7 @@ impl TryFrom for BlockHeader { total_script_offset, nonce: header.nonce, pow, - validator_node_merkle_root: header.validator_node_merkle_root, + validator_node_mr: FixedHash::try_from(header.validator_node_merkle_root).map_err(|err| err.to_string())?, }) } } @@ -91,7 +91,7 @@ impl From for proto::BlockHeader { pow: Some(proto::ProofOfWork::from(header.pow)), kernel_mmr_size: header.kernel_mmr_size, output_mmr_size: header.output_mmr_size, - validator_node_merkle_root: header.validator_node_merkle_root, + validator_node_merkle_root: header.validator_node_mr.to_vec(), } } } diff --git a/base_layer/core/src/proto/sidechain_feature.rs b/base_layer/core/src/proto/sidechain_feature.rs index 3540f6b903..7f3998c67c 100644 --- a/base_layer/core/src/proto/sidechain_feature.rs +++ b/base_layer/core/src/proto/sidechain_feature.rs @@ -42,7 +42,9 @@ use crate::{ //---------------------------------- SideChainFeature --------------------------------------------// impl From for proto::types::SideChainFeature { fn from(value: SideChainFeature) -> Self { - value.into() + Self { + side_chain_feature: Some(value.into()), + } } } diff --git a/base_layer/core/src/test_helpers/mod.rs b/base_layer/core/src/test_helpers/mod.rs index f77890a6db..aac67e4318 100644 --- a/base_layer/core/src/test_helpers/mod.rs +++ b/base_layer/core/src/test_helpers/mod.rs @@ -63,8 +63,7 @@ pub fn create_orphan_block(block_height: u64, transactions: Vec, co } pub fn create_block(rules: &ConsensusManager, prev_block: &Block, spec: BlockSpec) -> (Block, UnblindedOutput) { - let mut header = - BlockHeader::from_previous(&prev_block.header, prev_block.header.validator_node_merkle_root.clone()); + let mut header = BlockHeader::from_previous(&prev_block.header); let block_height = spec.height_override.unwrap_or(prev_block.header.height + 1); header.height = block_height; // header.prev_hash = prev_block.hash(); diff --git a/base_layer/core/src/transactions/transaction_components/side_chain/validator_node_registration.rs b/base_layer/core/src/transactions/transaction_components/side_chain/validator_node_registration.rs index 813d340ed1..c37fb83689 100644 --- a/base_layer/core/src/transactions/transaction_components/side_chain/validator_node_registration.rs +++ b/base_layer/core/src/transactions/transaction_components/side_chain/validator_node_registration.rs @@ -61,6 +61,14 @@ impl ValidatorNodeRegistration { .finalize() .into() } + + pub fn derive_shard_key(&self, block_hash: &FixedHash) -> [u8; 32] { + DomainSeparatedConsensusHasher::::new("validator_node_root") + // + .chain(self) + .chain(block_hash) + .finalize() + } } impl ConsensusEncoding for ValidatorNodeRegistration { diff --git a/base_layer/core/src/validation/block_validators/async_validator.rs b/base_layer/core/src/validation/block_validators/async_validator.rs index 696b90ca8f..a2f17463cd 100644 --- a/base_layer/core/src/validation/block_validators/async_validator.rs +++ b/base_layer/core/src/validation/block_validators/async_validator.rs @@ -54,7 +54,6 @@ use crate::{ BlockSyncBodyValidation, ValidationError, }, - ValidatorNodeMmr, }; /// This validator checks whether a block satisfies consensus rules. @@ -101,8 +100,6 @@ impl BlockValidator { let inputs_task = self.start_input_validation(&valid_header, outputs.iter().map(|o| o.hash()).collect(), inputs); - let validator_node_mmr_task = self.start_validator_node_mmr_validation(&valid_header); - // Output order cannot be checked concurrently so it is checked here first if !helpers::is_all_unique_and_sorted(&outputs) { return Err(ValidationError::UnsortedOrDuplicateOutput); @@ -113,7 +110,6 @@ impl BlockValidator { let outputs_result = outputs_task.await??; let inputs_result = inputs_task.await??; let kernels_result = kernels_task.await??; - validator_node_mmr_task.await??; // Perform final checks using validation outputs helpers::check_coinbase_maturity(&self.rules, valid_header.height, outputs_result.coinbase())?; @@ -470,25 +466,6 @@ impl BlockValidator { }) .into() } - - fn start_validator_node_mmr_validation( - &self, - header: &BlockHeader, - ) -> AbortOnDropJoinHandle> { - let vn_root = header.validator_node_merkle_root.clone(); - let height = header.height; - let db = self.db.inner().clone(); - task::spawn(async move { - let vns = db.fetch_active_validator_nodes(height)?; - let mmr = ValidatorNodeMmr::new(vns.iter().map(|vn| vn.shard_key.to_vec()).collect()); - if mmr.get_merkle_root().unwrap() == vn_root { - Ok(()) - } else { - Err(ValidationError::ValidatorNodeMmmrError) - } - }) - .into() - } } #[async_trait] diff --git a/base_layer/core/src/validation/helpers.rs b/base_layer/core/src/validation/helpers.rs index f771647284..506994024c 100644 --- a/base_layer/core/src/validation/helpers.rs +++ b/base_layer/core/src/validation/helpers.rs @@ -552,6 +552,19 @@ pub fn check_mmr_roots(header: &BlockHeader, mmr_roots: &MmrRoots) -> Result<(), kind: "Input", })); } + if header.validator_node_mr != mmr_roots.validator_node_mr { + warn!( + target: LOG_TARGET, + "Block header validator node merkle root in {} do not match calculated root. Header.validator_node_mr: \ + {}, Calculated: {}", + header.hash().to_hex(), + header.validator_node_mr.to_hex(), + mmr_roots.validator_node_mr.to_hex() + ); + return Err(ValidationError::BlockError(BlockValidationError::MismatchedMmrRoots { + kind: "Validator Node", + })); + } Ok(()) } diff --git a/base_layer/core/src/validation/test.rs b/base_layer/core/src/validation/test.rs index 90723ccc68..71f41083ff 100644 --- a/base_layer/core/src/validation/test.rs +++ b/base_layer/core/src/validation/test.rs @@ -113,8 +113,7 @@ mod header_validators { let genesis = db.fetch_chain_header(0).unwrap(); - let mut header = - BlockHeader::from_previous(genesis.header(), genesis.header().validator_node_merkle_root.clone()); + let mut header = BlockHeader::from_previous(genesis.header()); header.version = u16::MAX; let validator = HeaderValidator::new(consensus_manager.clone()); @@ -202,7 +201,7 @@ fn chain_balance_validation() { .build() .unwrap(); - let mut header1 = BlockHeader::from_previous(genesis.header(), genesis.header().validator_node_merkle_root.clone()); + let mut header1 = BlockHeader::from_previous(genesis.header()); header1.kernel_mmr_size += 1; header1.output_mmr_size += 1; let achieved_difficulty = AchievedTargetDifficulty::try_construct( @@ -254,7 +253,7 @@ fn chain_balance_validation() { .build() .unwrap(); - let mut header2 = BlockHeader::from_previous(header1.header(), header1.header().validator_node_merkle_root.clone()); + let mut header2 = BlockHeader::from_previous(header1.header()); header2.kernel_mmr_size += 1; header2.output_mmr_size += 1; let achieved_difficulty = AchievedTargetDifficulty::try_construct( @@ -376,7 +375,7 @@ fn chain_balance_validation_burned() { .build() .unwrap(); burned_sum = &burned_sum + kernel2.get_burn_commitment().unwrap(); - let mut header1 = BlockHeader::from_previous(genesis.header(), genesis.header().validator_node_merkle_root.clone()); + let mut header1 = BlockHeader::from_previous(genesis.header()); header1.kernel_mmr_size += 2; header1.output_mmr_size += 2; let achieved_difficulty = AchievedTargetDifficulty::try_construct( diff --git a/base_layer/core/tests/chain_storage_tests/chain_storage.rs b/base_layer/core/tests/chain_storage_tests/chain_storage.rs index 313a2fa43b..a473b8ce6c 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_storage.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_storage.rs @@ -89,10 +89,7 @@ fn insert_and_fetch_header() { let _consensus_manager = ConsensusManagerBuilder::new(network).build(); let store = create_test_blockchain_db(); let genesis_block = store.fetch_tip_header().unwrap(); - let mut header1 = BlockHeader::from_previous( - genesis_block.header(), - genesis_block.header().validator_node_merkle_root.clone(), - ); + let mut header1 = BlockHeader::from_previous(genesis_block.header()); header1.kernel_mmr_size += 1; header1.output_mmr_size += 1; @@ -100,7 +97,7 @@ fn insert_and_fetch_header() { let chain1 = create_chain_header(header1.clone(), genesis_block.accumulated_data()); store.insert_valid_headers(vec![chain1.clone()]).unwrap(); - let mut header2 = BlockHeader::from_previous(&header1, header1.validator_node_merkle_root.clone()); + let mut header2 = BlockHeader::from_previous(&header1); header2.kernel_mmr_size += 2; header2.output_mmr_size += 2; let chain2 = create_chain_header(header2.clone(), chain1.accumulated_data()); diff --git a/base_layer/core/tests/helpers/block_builders.rs b/base_layer/core/tests/helpers/block_builders.rs index 36fa4c5e8d..242cd44e10 100644 --- a/base_layer/core/tests/helpers/block_builders.rs +++ b/base_layer/core/tests/helpers/block_builders.rs @@ -58,6 +58,7 @@ use tari_core::{ KernelMmr, KernelMmrHasherBlake256, MutableOutputMmr, + ValidatorNodeMmr, WitnessMmr, WitnessMmrHasherBlake256, }; @@ -107,24 +108,24 @@ fn genesis_template( (block, output) } -#[test] // #[ignore = "used to generate a new esmeralda genesis block"] /// This is a helper function to generate and print out a block that can be used as the genesis block. /// 1. Run `cargo test --package tari_core --test mempool -- helpers::block_builders::print_new_genesis_block_esmeralda /// --exact --nocapture` /// 1. The block and range proof will be printed /// 1. Profit! +#[test] fn print_new_genesis_block_esmeralda() { print_new_genesis_block(Network::Esmeralda); } -#[test] // #[ignore = "used to generate a new igor genesis block"] /// This is a helper function to generate and print out a block that can be used as the genesis block. /// 1. Run `cargo test --package tari_core --test mempool -- helpers::block_builders::print_new_genesis_block_igor /// --exact --nocapture` /// 1. The block and range proof will be printed /// 1. Profit! +#[test] fn print_new_genesis_block_igor() { print_new_genesis_block(Network::Igor); } @@ -159,12 +160,14 @@ fn print_new_genesis_block(network: Network) { witness_mmr.push(utxo.witness_hash().to_vec()).unwrap(); let mut output_mmr = MutableOutputMmr::new(Vec::new(), Bitmap::create()).unwrap(); output_mmr.push(utxo.hash().to_vec()).unwrap(); + let vn_mmr = ValidatorNodeMmr::new(Vec::new()); header.kernel_mr = FixedHash::try_from(kernel_mmr.get_merkle_root().unwrap()).unwrap(); header.kernel_mmr_size += 1; header.output_mr = FixedHash::try_from(output_mmr.get_merkle_root().unwrap()).unwrap(); header.witness_mr = FixedHash::try_from(witness_mmr.get_merkle_root().unwrap()).unwrap(); header.output_mmr_size += 1; + header.validator_node_mr = FixedHash::try_from(vn_mmr.get_merkle_root().unwrap()).unwrap(); // header.kernel_mr = kernel.hash(); // header.kernel_mmr_size += 1; @@ -213,6 +216,7 @@ fn print_new_genesis_block(network: Network) { println!("header output_mr: {}", block.header.output_mr.to_hex()); println!("header witness_mr: {}", block.header.witness_mr.to_hex()); println!("header kernel_mr: {}", block.header.kernel_mr.to_hex()); + println!("header validator_node_mr: {}", block.header.validator_node_mr.to_hex()); println!( "header total_kernel_offset: {}", block.header.total_kernel_offset.to_hex() @@ -338,8 +342,7 @@ pub fn chain_block( transactions: Vec, consensus: &ConsensusManager, ) -> NewBlockTemplate { - let mut header = - BlockHeader::from_previous(&prev_block.header, prev_block.header.validator_node_merkle_root.clone()); + let mut header = BlockHeader::from_previous(&prev_block.header); header.version = consensus.consensus_constants(header.height).blockchain_version(); let height = header.height; let reward = consensus.get_block_reward_at(height); @@ -367,10 +370,7 @@ pub fn chain_block_with_coinbase( coinbase_kernel: TransactionKernel, consensus: &ConsensusManager, ) -> NewBlockTemplate { - let mut header = BlockHeader::from_previous( - prev_block.header(), - prev_block.header().validator_node_merkle_root.clone(), - ); + let mut header = BlockHeader::from_previous(prev_block.header()); header.version = consensus.consensus_constants(header.height).blockchain_version(); let height = header.height; NewBlockTemplate::from_block( @@ -401,10 +401,7 @@ pub fn chain_block_with_new_coinbase( coinbase_value, height + consensus_manager.consensus_constants(height).coinbase_lock_height(), ); - let mut header = BlockHeader::from_previous( - prev_block.header(), - prev_block.header().validator_node_merkle_root.clone(), - ); + let mut header = BlockHeader::from_previous(prev_block.header()); header.height = height; header.version = consensus_manager .consensus_constants(header.height) From e7f2d34739a704eaa209a5f92110edc43d493eef Mon Sep 17 00:00:00 2001 From: stringhandler Date: Wed, 21 Sep 2022 08:57:24 +0200 Subject: [PATCH 07/21] fix after merge --- .../tari_base_node/src/grpc/base_node_grpc_server.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs index 8585d9d154..258dbdc3d7 100644 --- a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs +++ b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs @@ -1442,7 +1442,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { .await .map_err(|e| { error!(target: LOG_TARGET, "Error {}", e); - report_error(report_error_flag, Status::internal(e.to_string())) + obscure_error_if_true(report_error_flag, Status::internal(e.to_string())) })? .iter() .map(|a| a.shard_key.to_vec()) @@ -1458,10 +1458,10 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { let report_error_flag = self.report_error_flag(); let mut handler = self.node_service.clone(); let public_key = PublicKey::from_bytes(&request.public_key) - .map_err(|e| report_error(report_error_flag, Status::invalid_argument(e.to_string())))?; + .map_err(|e| obscure_error_if_true(report_error_flag, Status::invalid_argument(e.to_string())))?; let shard_key = handler.get_shard_key(request.height, public_key).await.map_err(|e| { error!(target: LOG_TARGET, "Error {}", e); - report_error(report_error_flag, Status::internal(e.to_string())) + obscure_error_if_true(report_error_flag, Status::internal(e.to_string())) })?; Ok(Response::new(tari_rpc::GetShardKeyResponse { shard_key: shard_key.to_vec(), @@ -1496,7 +1496,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { "Error sending converting active validator node for GRPC: {}", e ); match tx - .send(Err(report_error( + .send(Err(obscure_error_if_true( report_error_flag, Status::internal("Error converting active validator node"), ))) @@ -1519,7 +1519,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { "Error sending mempool transaction via GRPC: {}", err ); match tx - .send(Err(report_error( + .send(Err(obscure_error_if_true( report_error_flag, Status::unknown("Error sending data"), ))) From cfa05beca87d3ac4687e1794c7d6b6aded5b0671 Mon Sep 17 00:00:00 2001 From: Miguel Naveira <47919901+mrnaveira@users.noreply.github.com> Date: Fri, 23 Sep 2022 03:59:51 -0600 Subject: [PATCH 08/21] feat(base_node_grpc_client): add getActiveValidatorNodes method (#4719) Description --- Adding the `getActiveValidatorNodes` method to `base_node_grpc_client` JS client. Motivation and Context --- For the `tari-explorer` web app (and other future usages, like cucumber integration tests), we want to retrieve a list of the active validator nodes from the base node JS client. The method is already implemented in the base node itself, this PR is only to allow calling it from the client. How Has This Been Tested? --- Manually from a modified version of the `tari-explorer` web app --- clients/base_node_grpc_client/src/index.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clients/base_node_grpc_client/src/index.js b/clients/base_node_grpc_client/src/index.js index 8a83925d9f..4413265dd7 100644 --- a/clients/base_node_grpc_client/src/index.js +++ b/clients/base_node_grpc_client/src/index.js @@ -42,7 +42,8 @@ function Client(address = "127.0.0.1:18142") { "getTipInfo", "searchUtxos", "getTokens", - "getNetworkDifficulty" + "getNetworkDifficulty", + "getActiveValidatorNodes", ]; methods.forEach((method) => { this[method] = (arg) => this.inner[method]().sendMessage(arg); From 72018f4834b8ee8fe1228c25a6be33189bdd2a3c Mon Sep 17 00:00:00 2001 From: stringhandler Date: Tue, 27 Sep 2022 11:31:46 +0200 Subject: [PATCH 09/21] fix: fix validator node registration logic (#4718) --- .../tari_app_grpc/proto/base_node.proto | 11 +- .../src/conversions/active_validator_node.rs | 59 --------- .../tari_app_grpc/src/conversions/mod.rs | 1 - .../src/grpc/base_node_grpc_server.rs | 29 +---- .../comms_interface/comms_response.rs | 4 +- .../comms_interface/local_interface.rs | 2 +- .../chain_storage/active_validator_node.rs | 3 +- base_layer/core/src/chain_storage/async_db.rs | 3 +- .../src/chain_storage/blockchain_backend.rs | 2 +- .../src/chain_storage/blockchain_database.rs | 4 +- .../core/src/chain_storage/db_transaction.rs | 6 +- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 115 ++++++++++++------ .../core/src/consensus/consensus_constants.rs | 4 +- .../core/src/test_helpers/blockchain.rs | 5 +- 14 files changed, 103 insertions(+), 145 deletions(-) delete mode 100644 applications/tari_app_grpc/src/conversions/active_validator_node.rs diff --git a/applications/tari_app_grpc/proto/base_node.proto b/applications/tari_app_grpc/proto/base_node.proto index 063ff4481c..957176e14b 100644 --- a/applications/tari_app_grpc/proto/base_node.proto +++ b/applications/tari_app_grpc/proto/base_node.proto @@ -89,7 +89,7 @@ service BaseNode { // Get mempool stats rpc GetMempoolStats(Empty) returns (MempoolStatsResponse); // Get VNs - rpc GetActiveValidatorNodes(GetActiveValidatorNodesRequest) returns (stream ActiveValidatorNode); + rpc GetActiveValidatorNodes(GetActiveValidatorNodesRequest) returns (stream GetActiveValidatorNodesResponse); rpc GetCommittee(GetCommitteeRequest) returns (GetCommitteeResponse); rpc GetShardKey(GetShardKeyRequest) returns (GetShardKeyResponse); } @@ -442,13 +442,12 @@ message GetActiveValidatorNodesRequest { uint64 height = 1; } -message ActiveValidatorNode { +message GetActiveValidatorNodesResponse { bytes shard_key = 1; - uint64 from_height = 2; - uint64 to_height = 3; - bytes public_key = 4; + bytes public_key = 2; } + message GetCommitteeRequest { uint64 height = 1; bytes shard_key = 2; @@ -465,4 +464,4 @@ message GetShardKeyRequest { message GetShardKeyResponse { bytes shard_key = 1; -} \ No newline at end of file +} diff --git a/applications/tari_app_grpc/src/conversions/active_validator_node.rs b/applications/tari_app_grpc/src/conversions/active_validator_node.rs deleted file mode 100644 index 4c22cd8116..0000000000 --- a/applications/tari_app_grpc/src/conversions/active_validator_node.rs +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2020. The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use std::convert::{TryFrom, TryInto}; - -use tari_common_types::types::PublicKey; -use tari_core::chain_storage::ActiveValidatorNode; -use tari_utilities::ByteArray; - -use crate::tari_rpc as grpc; - -impl TryFrom for grpc::ActiveValidatorNode { - type Error = String; - - fn try_from(active_validator_node: ActiveValidatorNode) -> Result { - Ok(Self { - shard_key: active_validator_node.shard_key.to_vec(), - from_height: active_validator_node.from_height, - to_height: active_validator_node.to_height, - public_key: active_validator_node.public_key.to_vec(), - }) - } -} - -impl TryFrom for ActiveValidatorNode { - type Error = String; - - fn try_from(active_validator_node: grpc::ActiveValidatorNode) -> Result { - let shard_key = active_validator_node.shard_key.try_into().unwrap(); - let public_key = - PublicKey::from_vec(&active_validator_node.public_key).map_err(|_| "Could not public key".to_string())?; - - Ok(Self { - shard_key, - from_height: active_validator_node.from_height, - to_height: active_validator_node.to_height, - public_key, - }) - } -} diff --git a/applications/tari_app_grpc/src/conversions/mod.rs b/applications/tari_app_grpc/src/conversions/mod.rs index eb58f7d42c..f9b6d30455 100644 --- a/applications/tari_app_grpc/src/conversions/mod.rs +++ b/applications/tari_app_grpc/src/conversions/mod.rs @@ -20,7 +20,6 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -mod active_validator_node; mod aggregate_body; mod base_node_state; mod block; diff --git a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs index 98f78a490a..61666eac5d 100644 --- a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs +++ b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs @@ -135,7 +135,7 @@ impl BaseNodeGrpcServer {} #[tonic::async_trait] impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { type FetchMatchingUtxosStream = mpsc::Receiver>; - type GetActiveValidatorNodesStream = mpsc::Receiver>; + type GetActiveValidatorNodesStream = mpsc::Receiver>; type GetBlocksStream = mpsc::Receiver>; type GetMempoolTransactionsStream = mpsc::Receiver>; type GetNetworkDifficultyStream = mpsc::Receiver>; @@ -1488,28 +1488,11 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { }, Ok(data) => data, }; - for active_validator_node in active_validator_nodes { - let active_validator_node = match tari_rpc::ActiveValidatorNode::try_from(active_validator_node) { - Ok(t) => t, - Err(e) => { - warn!( - target: LOG_TARGET, - "Error sending converting active validator node for GRPC: {}", e - ); - match tx - .send(Err(obscure_error_if_true( - report_error_flag, - Status::internal("Error converting active validator node"), - ))) - .await - { - Ok(_) => (), - Err(send_err) => { - warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) - }, - } - return; - }, + dbg!(&active_validator_nodes); + for (public_key, shard_key) in active_validator_nodes { + let active_validator_node = tari_rpc::GetActiveValidatorNodesResponse { + public_key: public_key.to_vec(), + shard_key: shard_key.to_vec(), }; match tx.send(Ok(active_validator_node)).await { diff --git a/base_layer/core/src/base_node/comms_interface/comms_response.rs b/base_layer/core/src/base_node/comms_interface/comms_response.rs index 6298ba8cc7..214b00216e 100644 --- a/base_layer/core/src/base_node/comms_interface/comms_response.rs +++ b/base_layer/core/src/base_node/comms_interface/comms_response.rs @@ -27,7 +27,7 @@ use std::{ use tari_common_types::{ chain_metadata::ChainMetadata, - types::{HashOutput, PrivateKey}, + types::{HashOutput, PrivateKey, PublicKey}, }; use crate::{ @@ -71,7 +71,7 @@ pub enum NodeCommsResponse { FetchOutputsByContractIdResponse { outputs: Vec, }, - FetchValidatorNodesKeysResponse(Vec), + FetchValidatorNodesKeysResponse(Vec<(PublicKey, [u8; 32])>), FetchCommitteeResponse(Vec), GetShardKeyResponse([u8; 32]), } diff --git a/base_layer/core/src/base_node/comms_interface/local_interface.rs b/base_layer/core/src/base_node/comms_interface/local_interface.rs index be699e725d..ada66a3644 100644 --- a/base_layer/core/src/base_node/comms_interface/local_interface.rs +++ b/base_layer/core/src/base_node/comms_interface/local_interface.rs @@ -276,7 +276,7 @@ impl LocalNodeCommsInterface { pub async fn get_active_validator_nodes( &mut self, height: u64, - ) -> Result, CommsInterfaceError> { + ) -> Result, CommsInterfaceError> { match self .request_sender .call(NodeCommsRequest::FetchValidatorNodesKeys { height }) diff --git a/base_layer/core/src/chain_storage/active_validator_node.rs b/base_layer/core/src/chain_storage/active_validator_node.rs index fa6a4a7dfe..374317bc20 100644 --- a/base_layer/core/src/chain_storage/active_validator_node.rs +++ b/base_layer/core/src/chain_storage/active_validator_node.rs @@ -21,7 +21,7 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use serde::{Deserialize, Serialize}; -use tari_common_types::types::PublicKey; +use tari_common_types::types::{HashOutput, PublicKey}; #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct ActiveValidatorNode { @@ -29,4 +29,5 @@ pub struct ActiveValidatorNode { pub from_height: u64, pub to_height: u64, pub public_key: PublicKey, + pub output_hash: HashOutput, } diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index 95bccf91b1..14ad5b9c35 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -64,7 +64,6 @@ use crate::{ proof_of_work::{PowAlgorithm, TargetDifficultyWindow}, transactions::transaction_components::{TransactionKernel, TransactionOutput}, }; - const LOG_TARGET: &str = "c::bn::async_db"; fn trace_log(name: &str, f: F) -> R @@ -266,7 +265,7 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_total_size_stats() -> DbTotalSizeStats, "fetch_total_size_stats"); - make_async_fn!(fetch_active_validator_nodes(height: u64) -> Vec, "fetch_active_validator_nodes"); + make_async_fn!(fetch_active_validator_nodes(height: u64) -> Vec<(PublicKey, [u8;32])>, "fetch_active_validator_nodes"); make_async_fn!(fetch_committee(height: u64, shard: [u8;32]) -> Vec, "fetch_committee"); diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index c7f0d72626..fbc8dc1b4d 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -193,7 +193,7 @@ pub trait BlockchainBackend: Send + Sync { /// Fetches all tracked reorgs fn fetch_all_reorgs(&self) -> Result, ChainStorageError>; - fn fetch_active_validator_nodes(&self, height: u64) -> Result, ChainStorageError>; + fn fetch_active_validator_nodes(&self, height: u64) -> Result, ChainStorageError>; fn fetch_committee(&self, height: u64, shard: [u8; 32]) -> Result, ChainStorageError>; fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result<[u8; 32], ChainStorageError>; } diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 0b3b882428..59d54b4c23 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -1172,7 +1172,7 @@ where B: BlockchainBackend db.write(txn) } - pub fn fetch_active_validator_nodes(&self, height: u64) -> Result, ChainStorageError> { + pub fn fetch_active_validator_nodes(&self, height: u64) -> Result, ChainStorageError> { let db = self.db_read_access()?; db.fetch_active_validator_nodes(height) } @@ -1322,7 +1322,7 @@ pub fn calculate_mmr_roots(db: &T, block: &Block) -> Resul output_mmr.compress(); let validator_nodes = db.fetch_active_validator_nodes(metadata.height_of_longest_chain() + 1)?; - let vn_mmr = ValidatorNodeMmr::new(validator_nodes.iter().map(|vn| vn.shard_key.to_vec()).collect()); + let vn_mmr = ValidatorNodeMmr::new(validator_nodes.iter().map(|vn| vn.1.to_vec()).collect()); let mmr_roots = MmrRoots { kernel_mr: FixedHash::try_from(kernel_mmr.get_merkle_root()?)?, diff --git a/base_layer/core/src/chain_storage/db_transaction.rs b/base_layer/core/src/chain_storage/db_transaction.rs index 105f217f29..6eb2ed3f4f 100644 --- a/base_layer/core/src/chain_storage/db_transaction.rs +++ b/base_layer/core/src/chain_storage/db_transaction.rs @@ -27,7 +27,7 @@ use std::{ }; use croaring::Bitmap; -use tari_common_types::types::{BlockHash, Commitment, HashOutput, PublicKey}; +use tari_common_types::types::{BlockHash, Commitment, HashOutput}; use tari_utilities::hex::Hex; use super::ActiveValidatorNode; @@ -362,9 +362,6 @@ pub enum WriteOperation { InsertValidatorNode { validator_node: ActiveValidatorNode, }, - DeleteValidatorNode { - public_key: PublicKey, - }, } impl fmt::Display for WriteOperation { @@ -464,7 +461,6 @@ impl fmt::Display for WriteOperation { InsertValidatorNode { validator_node } => { write!(f, "Inserting VN {:?}", validator_node) }, - DeleteValidatorNode { public_key } => write!(f, "Delete VN key {}", public_key), } } } diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 8586c28516..977b381319 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -25,7 +25,18 @@ #![allow(clippy::ptr_arg)] -use std::{convert::TryFrom, fmt, fs, fs::File, mem, ops::Deref, path::Path, sync::Arc, time::Instant}; +use std::{ + collections::HashMap, + convert::TryFrom, + fmt, + fs, + fs::File, + mem, + ops::Deref, + path::Path, + sync::Arc, + time::Instant, +}; use croaring::Bitmap; use fs2::FileExt; @@ -133,6 +144,7 @@ const LMDB_DB_BAD_BLOCK_LIST: &str = "bad_blocks"; const LMDB_DB_REORGS: &str = "reorgs"; const LMDB_DB_VALIDATOR_NODES: &str = "validator_nodes"; const LMDB_DB_VALIDATOR_NODES_MAPPING: &str = "validator_nodes_mapping"; +const LMDB_DB_VALIDATOR_NODE_ENDING: &str = "validator_node_ending"; pub fn create_lmdb_database>( path: P, @@ -177,6 +189,7 @@ pub fn create_lmdb_database>( .add_database(LMDB_DB_REORGS, flags | db::INTEGERKEY) .add_database(LMDB_DB_VALIDATOR_NODES, flags | db::DUPSORT) .add_database(LMDB_DB_VALIDATOR_NODES_MAPPING, flags | db::DUPSORT) + .add_database(LMDB_DB_VALIDATOR_NODE_ENDING, flags | db::INTEGERKEY | db::DUPSORT) .build() .map_err(|err| ChainStorageError::CriticalError(format!("Could not create LMDB store:{}", err)))?; debug!(target: LOG_TARGET, "LMDB database creation successful"); @@ -239,6 +252,8 @@ pub struct LMDBDatabase { validator_nodes: DatabaseRef, /// Maps VN Shard Key -> VN Public Key validator_nodes_mapping: DatabaseRef, + /// Maps the end block height of nodes + validator_nodes_ending: DatabaseRef, _file_lock: Arc, consensus_manager: ConsensusManager, } @@ -281,6 +296,7 @@ impl LMDBDatabase { reorgs: get_database(store, LMDB_DB_REORGS)?, validator_nodes: get_database(store, LMDB_DB_VALIDATOR_NODES)?, validator_nodes_mapping: get_database(store, LMDB_DB_VALIDATOR_NODES_MAPPING)?, + validator_nodes_ending: get_database(store, LMDB_DB_VALIDATOR_NODE_ENDING)?, env, env_config: store.env_config(), _file_lock: Arc::new(file_lock), @@ -486,11 +502,6 @@ impl LMDBDatabase { InsertValidatorNode { validator_node } => { self.insert_validator_node(&write_txn, validator_node)?; }, - DeleteValidatorNode { public_key } => { - let txn = self.read_transaction()?; - let shard_key = self.get_vn_mapping(&txn, public_key)?; - self.delete_validator_node(&write_txn, public_key, &shard_key)?; - }, } } write_txn.commit()?; @@ -1272,9 +1283,7 @@ impl LMDBDatabase { .as_ref() .and_then(|f| f.validator_node_registration()) { - let read_txn = self.read_transaction()?; - let shard_key = self.get_vn_mapping(&read_txn, &vn_reg.public_key)?; - self.delete_validator_node(txn, &vn_reg.public_key, &shard_key)?; + self.delete_validator_node(txn, &vn_reg.public_key, &input.output_hash())?; } if !output_mmr.delete(index) { @@ -1311,8 +1320,9 @@ impl LMDBDatabase { 1 + self.consensus_manager .consensus_constants(header.height) - .get_validator_node_timeout(), + .validator_node_timeout(), public_key: vn_reg.public_key.clone(), + output_hash: output.hash(), }; self.insert_validator_node(txn, &validator_node)?; } @@ -1562,12 +1572,14 @@ impl LMDBDatabase { txn: &WriteTransaction<'_>, validator_node: &ActiveValidatorNode, ) -> Result<(), ChainStorageError> { - lmdb_insert( + let mut key = validator_node.public_key.to_vec(); + key.extend(validator_node.output_hash.as_slice()); + lmdb_insert(txn, &self.validator_nodes, &key, validator_node, "validator_nodes")?; + lmdb_insert_dup( txn, - &self.validator_nodes, - validator_node.public_key.as_bytes(), + &self.validator_nodes_ending, + &validator_node.to_height.to_le_bytes(), validator_node, - "validator_nodes", )?; lmdb_insert( txn, @@ -1578,25 +1590,29 @@ impl LMDBDatabase { ) } - fn get_vn_mapping(&self, txn: &ReadTransaction<'_>, public_key: &PublicKey) -> Result<[u8; 32], ChainStorageError> { - let x: ActiveValidatorNode = lmdb_get(txn, &self.validator_nodes, public_key.as_bytes())?.ok_or_else(|| { - ChainStorageError::ValueNotFound { - entity: "ActiveValidatorNode", - field: "public_key", - value: public_key.to_hex(), - } - })?; - Ok(x.shard_key) - } - fn delete_validator_node( &self, txn: &WriteTransaction<'_>, public_key: &PublicKey, - shard_key: &[u8; 32], + output_hash: &HashOutput, ) -> Result<(), ChainStorageError> { - lmdb_delete(txn, &self.validator_nodes, public_key.as_bytes(), "validator_nodes")?; - lmdb_delete(txn, &self.validator_nodes, shard_key, "validator_nodes_mapping")?; + let mut key = public_key.to_vec(); + key.extend(output_hash.as_slice()); + let x: ActiveValidatorNode = + lmdb_get(txn, &self.validator_nodes, &key)?.ok_or_else(|| ChainStorageError::ValueNotFound { + entity: "ActiveValidatorNode", + field: "public_key and outputhash", + value: key.to_hex(), + })?; + + lmdb_delete_key_value(txn, &self.validator_nodes_ending, &x.to_height.to_le_bytes(), &x)?; + lmdb_delete(txn, &self.validator_nodes, &key, "validator_nodes")?; + lmdb_delete( + txn, + &self.validator_nodes_mapping, + &x.shard_key, + "validator_nodes_mapping", + )?; Ok(()) } @@ -2407,15 +2423,42 @@ impl BlockchainBackend for LMDBDatabase { lmdb_filter_map_values(&txn, &self.reorgs, Some) } - fn fetch_active_validator_nodes(&self, height: u64) -> Result, ChainStorageError> { + // The clippy warning is because PublicKey has a public inner type that could change. In this case + // it should be fine to ignore the warning. You could also change the logic to use something + // other than a hashmap. + #[allow(clippy::mutable_key_type)] + fn fetch_active_validator_nodes(&self, height: u64) -> Result, ChainStorageError> { let txn = self.read_transaction()?; - lmdb_filter_map_values(&txn, &self.validator_nodes, |vn: ActiveValidatorNode| { - if vn.from_height <= height && vn.to_height >= height { - Some(vn) - } else { - None - } - }) + let validator_node_timeout = self + .consensus_manager + .consensus_constants(height) + .validator_node_timeout(); + let mut pub_keys = HashMap::new(); + + let end = height + validator_node_timeout; + for h in height..end { + lmdb_get_multiple(&txn, &self.validator_nodes_ending, &h.to_le_bytes())? + .into_iter() + .for_each(|v: ActiveValidatorNode| { + if v.from_height <= height { + if let Some((shard_key, start)) = + pub_keys.insert(v.public_key.clone(), (v.shard_key, v.from_height)) + { + // If the node is already in the map, check if the start height is higher. If it is, replace + // the old value with the new one. + if start > v.from_height { + pub_keys.insert(v.public_key, (shard_key, start)); + } + } + } + }); + } + + // now remove the heights + Ok(pub_keys + .into_iter() + .map(|(pk, (shard_key, _))| (pk, shard_key)) + .collect()) } fn fetch_committee(&self, height: u64, shard: [u8; 32]) -> Result, ChainStorageError> { diff --git a/base_layer/core/src/consensus/consensus_constants.rs b/base_layer/core/src/consensus/consensus_constants.rs index 0b658722cc..e4cbcf6708 100644 --- a/base_layer/core/src/consensus/consensus_constants.rs +++ b/base_layer/core/src/consensus/consensus_constants.rs @@ -288,7 +288,7 @@ impl ConsensusConstants { self.permitted_output_types } - pub fn get_validator_node_timeout(&self) -> u64 { + pub fn validator_node_timeout(&self) -> u64 { self.validator_node_timeout } @@ -415,7 +415,7 @@ impl ConsensusConstants { kernel_version_range, // igor is the first network to support the new output types permitted_output_types: OutputType::all(), - validator_node_timeout: 0, + validator_node_timeout: 100, }] } diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index c37c03cd05..88b7e9204c 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -414,10 +414,7 @@ impl BlockchainBackend for TempDatabase { self.db.as_ref().unwrap().fetch_all_reorgs() } - fn fetch_active_validator_nodes( - &self, - height: u64, - ) -> Result, ChainStorageError> { + fn fetch_active_validator_nodes(&self, height: u64) -> Result, ChainStorageError> { self.db.as_ref().unwrap().fetch_active_validator_nodes(height) } From 27f77b27e67f748631664f7cc94e34065fe48b7c Mon Sep 17 00:00:00 2001 From: Miguel Naveira <47919901+mrnaveira@users.noreply.github.com> Date: Tue, 27 Sep 2022 10:46:00 -0600 Subject: [PATCH 10/21] feat(core): store and fetch templates from lmdb (#4726) Description --- * Created a new `lmdb` database for template registrations, with methods for inserting and fetching them. * Base layer stores in the database all templates that appear in new blocks. * New gRPC method `GetTemplateRegistrations` to retrieve all new templates since a specific block height Motivation and Context --- In [previous work](https://github.com/tari-project/tari/pull/4470) we added template registration to UTXO sidechain features. The next step is for the base layer to store and index all template registrations that appear in blocks, as well as provide a gRPC method to retrieve them. As the main query for templates will come from the base layer scanner in the Validator Node, the gRPC query method should allow to filter all new templates since a specific block height. How Has This Been Tested? --- Manually ran the base layer and perform a gRPC query via Postman. --- .../tari_app_grpc/proto/base_node.proto | 7 ++ .../src/grpc/base_node_grpc_server.rs | 78 ++++++++++++++++++- .../comms_interface/comms_request.rs | 4 + .../comms_interface/comms_response.rs | 9 ++- .../comms_interface/inbound_handlers.rs | 12 +++ .../comms_interface/local_interface.rs | 16 +++- base_layer/core/src/chain_storage/async_db.rs | 4 +- .../src/chain_storage/blockchain_backend.rs | 3 +- .../src/chain_storage/blockchain_database.rs | 10 ++- .../core/src/chain_storage/db_transaction.rs | 8 +- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 52 ++++++++++++- base_layer/core/src/chain_storage/mod.rs | 3 + .../src/chain_storage/template_registation.rs | 31 ++++++++ .../core/src/test_helpers/blockchain.rs | 5 ++ .../side_chain/template_registration.rs | 38 ++++++--- 15 files changed, 263 insertions(+), 17 deletions(-) create mode 100644 base_layer/core/src/chain_storage/template_registation.rs diff --git a/applications/tari_app_grpc/proto/base_node.proto b/applications/tari_app_grpc/proto/base_node.proto index 957176e14b..4b61816a22 100644 --- a/applications/tari_app_grpc/proto/base_node.proto +++ b/applications/tari_app_grpc/proto/base_node.proto @@ -25,6 +25,7 @@ import "types.proto"; import "transaction.proto"; import "block.proto"; import "network.proto"; +import "sidechain_types.proto"; package tari.rpc; @@ -92,6 +93,8 @@ service BaseNode { rpc GetActiveValidatorNodes(GetActiveValidatorNodesRequest) returns (stream GetActiveValidatorNodesResponse); rpc GetCommittee(GetCommitteeRequest) returns (GetCommitteeResponse); rpc GetShardKey(GetShardKeyRequest) returns (GetShardKeyResponse); + // Get templates + rpc GetTemplateRegistrations(GetTemplateRegistrationsRequest) returns (stream TemplateRegistration); } message GetAssetMetadataRequest { @@ -465,3 +468,7 @@ message GetShardKeyRequest { message GetShardKeyResponse { bytes shard_key = 1; } + +message GetTemplateRegistrationsRequest { + uint64 from_height = 1; +} diff --git a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs index 61666eac5d..09bd6783b7 100644 --- a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs +++ b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs @@ -140,6 +140,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { type GetMempoolTransactionsStream = mpsc::Receiver>; type GetNetworkDifficultyStream = mpsc::Receiver>; type GetPeersStream = mpsc::Receiver>; + type GetTemplateRegistrationsStream = mpsc::Receiver>; type GetTokensInCirculationStream = mpsc::Receiver>; type ListHeadersStream = mpsc::Receiver>; type SearchKernelsStream = mpsc::Receiver>; @@ -1488,7 +1489,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { }, Ok(data) => data, }; - dbg!(&active_validator_nodes); + // dbg!(&active_validator_nodes); for (public_key, shard_key) in active_validator_nodes { let active_validator_node = tari_rpc::GetActiveValidatorNodesResponse { public_key: public_key.to_vec(), @@ -1525,6 +1526,81 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { ); Ok(Response::new(rx)) } + + async fn get_template_registrations( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + let report_error_flag = self.report_error_flag(); + debug!(target: LOG_TARGET, "Incoming GRPC request for GetTemplateRegistrations"); + + let mut handler = self.node_service.clone(); + let (mut tx, rx) = mpsc::channel(1000); + + task::spawn(async move { + let template_registrations = match handler.get_template_registrations(request.from_height).await { + Err(err) => { + warn!(target: LOG_TARGET, "Error communicating with base node: {}", err,); + return; + }, + Ok(data) => data, + }; + + for template_registration in template_registrations { + let template_registration = match tari_rpc::TemplateRegistration::try_from(template_registration) { + Ok(t) => t, + Err(e) => { + warn!( + target: LOG_TARGET, + "Error sending converting template registration for GRPC: {}", e + ); + match tx + .send(Err(obscure_error_if_true( + report_error_flag, + Status::internal("Error converting template_registration"), + ))) + .await + { + Ok(_) => (), + Err(send_err) => { + warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) + }, + } + return; + }, + }; + + match tx.send(Ok(template_registration)).await { + Ok(_) => (), + Err(err) => { + warn!( + target: LOG_TARGET, + "Error sending template registration via GRPC: {}", err + ); + match tx + .send(Err(obscure_error_if_true( + report_error_flag, + Status::unknown("Error sending data"), + ))) + .await + { + Ok(_) => (), + Err(send_err) => { + warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) + }, + } + return; + }, + } + } + }); + debug!( + target: LOG_TARGET, + "Sending GetTemplateRegistrations response stream to client" + ); + Ok(Response::new(rx)) + } } enum BlockGroupType { diff --git a/base_layer/core/src/base_node/comms_interface/comms_request.rs b/base_layer/core/src/base_node/comms_interface/comms_request.rs index 817438bc05..a46e08df52 100644 --- a/base_layer/core/src/base_node/comms_interface/comms_request.rs +++ b/base_layer/core/src/base_node/comms_interface/comms_request.rs @@ -59,6 +59,7 @@ pub enum NodeCommsRequest { FetchValidatorNodesKeys { height: u64 }, FetchCommittee { height: u64, shard: [u8; 32] }, GetShardKey { height: u64, public_key: PublicKey }, + FetchTemplateRegistrations { from_height: u64 }, } #[derive(Debug, Serialize, Deserialize)] @@ -106,6 +107,9 @@ impl Display for NodeCommsRequest { GetShardKey { height, public_key } => { write!(f, "GetShardKey height ({}), public key ({:?})", height, public_key) }, + FetchTemplateRegistrations { from_height } => { + write!(f, "FetchTemplateRegistrations ({})", from_height) + }, } } } diff --git a/base_layer/core/src/base_node/comms_interface/comms_response.rs b/base_layer/core/src/base_node/comms_interface/comms_response.rs index 214b00216e..9dd0d6b360 100644 --- a/base_layer/core/src/base_node/comms_interface/comms_response.rs +++ b/base_layer/core/src/base_node/comms_interface/comms_response.rs @@ -34,7 +34,12 @@ use crate::{ blocks::{Block, ChainHeader, HistoricalBlock, NewBlockTemplate}, chain_storage::{ActiveValidatorNode, UtxoMinedInfo}, proof_of_work::Difficulty, - transactions::transaction_components::{Transaction, TransactionKernel, TransactionOutput}, + transactions::transaction_components::{ + CodeTemplateRegistration, + Transaction, + TransactionKernel, + TransactionOutput, + }, }; /// API Response enum @@ -74,6 +79,7 @@ pub enum NodeCommsResponse { FetchValidatorNodesKeysResponse(Vec<(PublicKey, [u8; 32])>), FetchCommitteeResponse(Vec), GetShardKeyResponse([u8; 32]), + FetchTemplateRegistrationsResponse(Vec), } impl Display for NodeCommsResponse { @@ -115,6 +121,7 @@ impl Display for NodeCommsResponse { FetchValidatorNodesKeysResponse(_) => write!(f, "FetchValidatorNodesKeysResponse"), FetchCommitteeResponse(_) => write!(f, "FetchCommitteeResponse"), GetShardKeyResponse(_) => write!(f, "GetShardKeyResponse"), + FetchTemplateRegistrationsResponse(_) => write!(f, "FetchTemplateRegistrationsResponse"), } } } diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index b56d1ee94d..b1a72dc3c1 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -376,6 +376,18 @@ where B: BlockchainBackend + 'static let shard_key = self.blockchain_db.get_shard_key(height, public_key).await?; Ok(NodeCommsResponse::GetShardKeyResponse(shard_key)) }, + NodeCommsRequest::FetchTemplateRegistrations { from_height } => { + let template_registrations = self + .blockchain_db + .fetch_template_registrations(from_height) + .await? + .into_iter() + .map(|tr| tr.registration_data) + .collect(); + Ok(NodeCommsResponse::FetchTemplateRegistrationsResponse( + template_registrations, + )) + }, } } diff --git a/base_layer/core/src/base_node/comms_interface/local_interface.rs b/base_layer/core/src/base_node/comms_interface/local_interface.rs index ada66a3644..6f283ba0bd 100644 --- a/base_layer/core/src/base_node/comms_interface/local_interface.rs +++ b/base_layer/core/src/base_node/comms_interface/local_interface.rs @@ -40,7 +40,7 @@ use crate::{ blocks::{Block, ChainHeader, HistoricalBlock, NewBlockTemplate}, chain_storage::ActiveValidatorNode, proof_of_work::PowAlgorithm, - transactions::transaction_components::{TransactionKernel, TransactionOutput}, + transactions::transaction_components::{CodeTemplateRegistration, TransactionKernel, TransactionOutput}, }; pub type BlockEventSender = broadcast::Sender>; @@ -312,4 +312,18 @@ impl LocalNodeCommsInterface { _ => Err(CommsInterfaceError::UnexpectedApiResponse), } } + + pub async fn get_template_registrations( + &mut self, + from_height: u64, + ) -> Result, CommsInterfaceError> { + match self + .request_sender + .call(NodeCommsRequest::FetchTemplateRegistrations { from_height }) + .await?? + { + NodeCommsResponse::FetchTemplateRegistrationsResponse(template_registrations) => Ok(template_registrations), + _ => Err(CommsInterfaceError::UnexpectedApiResponse), + } + } } diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index 14ad5b9c35..b25228a46a 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -30,7 +30,7 @@ use tari_common_types::{ }; use tari_utilities::epoch_time::EpochTime; -use super::ActiveValidatorNode; +use super::{ActiveValidatorNode, TemplateRegistration}; use crate::{ blocks::{ Block, @@ -270,6 +270,8 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_committee(height: u64, shard: [u8;32]) -> Vec, "fetch_committee"); make_async_fn!(get_shard_key(height:u64, public_key: PublicKey) -> [u8;32], "get_shard_key"); + + make_async_fn!(fetch_template_registrations(from_height: u64) -> Vec, "fetch_template_registrations"); } impl From> for AsyncBlockchainDb { diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index fbc8dc1b4d..b30e280daa 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -7,7 +7,7 @@ use tari_common_types::{ types::{Commitment, HashOutput, PublicKey, Signature}, }; -use super::ActiveValidatorNode; +use super::{ActiveValidatorNode, TemplateRegistration}; use crate::{ blocks::{ Block, @@ -196,4 +196,5 @@ pub trait BlockchainBackend: Send + Sync { fn fetch_active_validator_nodes(&self, height: u64) -> Result, ChainStorageError>; fn fetch_committee(&self, height: u64, shard: [u8; 32]) -> Result, ChainStorageError>; fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result<[u8; 32], ChainStorageError>; + fn fetch_template_registrations(&self, from_height: u64) -> Result, ChainStorageError>; } diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 59d54b4c23..d52954c388 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -41,7 +41,7 @@ use tari_common_types::{ use tari_mmr::pruned_hashset::PrunedHashSet; use tari_utilities::{epoch_time::EpochTime, hex::Hex, ByteArray}; -use super::ActiveValidatorNode; +use super::{ActiveValidatorNode, TemplateRegistration}; use crate::{ blocks::{ Block, @@ -1181,6 +1181,14 @@ where B: BlockchainBackend let db = self.db_read_access()?; db.fetch_committee(height, shard) } + + pub fn fetch_template_registrations( + &self, + from_height: u64, + ) -> Result, ChainStorageError> { + let db = self.db_read_access()?; + db.fetch_template_registrations(from_height) + } } fn unexpected_result(request: DbKey, response: DbValue) -> Result { diff --git a/base_layer/core/src/chain_storage/db_transaction.rs b/base_layer/core/src/chain_storage/db_transaction.rs index 6eb2ed3f4f..e0cf01008e 100644 --- a/base_layer/core/src/chain_storage/db_transaction.rs +++ b/base_layer/core/src/chain_storage/db_transaction.rs @@ -30,7 +30,7 @@ use croaring::Bitmap; use tari_common_types::types::{BlockHash, Commitment, HashOutput}; use tari_utilities::hex::Hex; -use super::ActiveValidatorNode; +use super::{ActiveValidatorNode, TemplateRegistration}; use crate::{ blocks::{Block, BlockHeader, BlockHeaderAccumulatedData, ChainBlock, ChainHeader, UpdateBlockAccumulatedData}, chain_storage::{error::ChainStorageError, HorizonData, Reorg}, @@ -362,6 +362,9 @@ pub enum WriteOperation { InsertValidatorNode { validator_node: ActiveValidatorNode, }, + InsertTemplateRegistration { + template_registration: TemplateRegistration, + }, } impl fmt::Display for WriteOperation { @@ -461,6 +464,9 @@ impl fmt::Display for WriteOperation { InsertValidatorNode { validator_node } => { write!(f, "Inserting VN {:?}", validator_node) }, + InsertTemplateRegistration { template_registration } => { + write!(f, "Inserting Template {:?}", template_registration) + }, } } } diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 977b381319..50238f03ce 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -103,6 +103,7 @@ use crate::{ MmrTree, PrunedOutput, Reorg, + TemplateRegistration, }, consensus::ConsensusManager, transactions::{ @@ -145,6 +146,7 @@ const LMDB_DB_REORGS: &str = "reorgs"; const LMDB_DB_VALIDATOR_NODES: &str = "validator_nodes"; const LMDB_DB_VALIDATOR_NODES_MAPPING: &str = "validator_nodes_mapping"; const LMDB_DB_VALIDATOR_NODE_ENDING: &str = "validator_node_ending"; +const LMDB_DB_TEMPLATE_REGISTRATIONS: &str = "template_registrations"; pub fn create_lmdb_database>( path: P, @@ -190,6 +192,7 @@ pub fn create_lmdb_database>( .add_database(LMDB_DB_VALIDATOR_NODES, flags | db::DUPSORT) .add_database(LMDB_DB_VALIDATOR_NODES_MAPPING, flags | db::DUPSORT) .add_database(LMDB_DB_VALIDATOR_NODE_ENDING, flags | db::INTEGERKEY | db::DUPSORT) + .add_database(LMDB_DB_TEMPLATE_REGISTRATIONS, flags | db::DUPSORT) .build() .map_err(|err| ChainStorageError::CriticalError(format!("Could not create LMDB store:{}", err)))?; debug!(target: LOG_TARGET, "LMDB database creation successful"); @@ -254,6 +257,8 @@ pub struct LMDBDatabase { validator_nodes_mapping: DatabaseRef, /// Maps the end block height of nodes validator_nodes_ending: DatabaseRef, + /// Maps CodeTemplateRegistration hash-> TemplateRegistration + template_registrations: DatabaseRef, _file_lock: Arc, consensus_manager: ConsensusManager, } @@ -297,6 +302,7 @@ impl LMDBDatabase { validator_nodes: get_database(store, LMDB_DB_VALIDATOR_NODES)?, validator_nodes_mapping: get_database(store, LMDB_DB_VALIDATOR_NODES_MAPPING)?, validator_nodes_ending: get_database(store, LMDB_DB_VALIDATOR_NODE_ENDING)?, + template_registrations: get_database(store, LMDB_DB_TEMPLATE_REGISTRATIONS)?, env, env_config: store.env_config(), _file_lock: Arc::new(file_lock), @@ -502,6 +508,9 @@ impl LMDBDatabase { InsertValidatorNode { validator_node } => { self.insert_validator_node(&write_txn, validator_node)?; }, + InsertTemplateRegistration { template_registration } => { + self.insert_template_registration(&write_txn, template_registration)?; + }, } } write_txn.commit()?; @@ -509,7 +518,7 @@ impl LMDBDatabase { Ok(()) } - fn all_dbs(&self) -> [(&'static str, &DatabaseRef); 26] { + fn all_dbs(&self) -> [(&'static str, &DatabaseRef); 27] { [ ("metadata_db", &self.metadata_db), ("headers_db", &self.headers_db), @@ -543,6 +552,7 @@ impl LMDBDatabase { ("reorgs", &self.reorgs), ("validator_nodes", &self.validator_nodes), ("validator_nodes_mapping", &self.validator_nodes_mapping), + ("template_registrations", &self.template_registrations), ] } @@ -1326,6 +1336,19 @@ impl LMDBDatabase { }; self.insert_validator_node(txn, &validator_node)?; } + if let Some(template_reg) = output + .features + .sidechain_feature + .as_ref() + .and_then(|f| f.template_registration()) + { + let record = TemplateRegistration { + registration_data: template_reg.clone(), + height: header.height, + }; + + self.insert_template_registration(txn, &record)?; + } self.insert_output( txn, &block_hash, @@ -1616,6 +1639,21 @@ impl LMDBDatabase { Ok(()) } + fn insert_template_registration( + &self, + txn: &WriteTransaction<'_>, + template_registration: &TemplateRegistration, + ) -> Result<(), ChainStorageError> { + let key = template_registration.registration_data.hash(); + lmdb_insert( + txn, + &self.template_registrations, + key.as_bytes(), + template_registration, + "template_registrations", + ) + } + fn fetch_output_in_txn( &self, txn: &ConstTransaction<'_>, @@ -2538,6 +2576,18 @@ impl BlockchainBackend for LMDBDatabase { value: public_key.to_hex(), }) } + + fn fetch_template_registrations(&self, from_height: u64) -> Result, ChainStorageError> { + // TODO: we can optimise this query by making using a compound key + let txn = self.read_transaction()?; + lmdb_filter_map_values(&txn, &self.template_registrations, |tr: TemplateRegistration| { + if tr.height >= from_height { + Some(tr) + } else { + None + } + }) + } } // Fetch the chain metadata diff --git a/base_layer/core/src/chain_storage/mod.rs b/base_layer/core/src/chain_storage/mod.rs index d374dccf1b..aa65f98a95 100644 --- a/base_layer/core/src/chain_storage/mod.rs +++ b/base_layer/core/src/chain_storage/mod.rs @@ -82,3 +82,6 @@ pub use utxo_mined_info::*; mod active_validator_node; pub use active_validator_node::ActiveValidatorNode; + +mod template_registation; +pub use template_registation::TemplateRegistration; diff --git a/base_layer/core/src/chain_storage/template_registation.rs b/base_layer/core/src/chain_storage/template_registation.rs new file mode 100644 index 0000000000..452fc02ef6 --- /dev/null +++ b/base_layer/core/src/chain_storage/template_registation.rs @@ -0,0 +1,31 @@ +// Copyright 2022, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use serde::{Deserialize, Serialize}; + +use crate::transactions::transaction_components::CodeTemplateRegistration; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct TemplateRegistration { + pub registration_data: CodeTemplateRegistration, + pub height: u64, +} diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index 88b7e9204c..655e5a1e54 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -67,6 +67,7 @@ use crate::{ MmrTree, PrunedOutput, Reorg, + TemplateRegistration, UtxoMinedInfo, Validators, }, @@ -425,6 +426,10 @@ impl BlockchainBackend for TempDatabase { fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result<[u8; 32], ChainStorageError> { self.db.as_ref().unwrap().get_shard_key(height, public_key) } + + fn fetch_template_registrations(&self, from_height: u64) -> Result, ChainStorageError> { + self.db.as_ref().unwrap().fetch_template_registrations(from_height) + } } pub fn create_chained_blocks>( diff --git a/base_layer/core/src/transactions/transaction_components/side_chain/template_registration.rs b/base_layer/core/src/transactions/transaction_components/side_chain/template_registration.rs index 83f67156f6..8f3290cd1e 100644 --- a/base_layer/core/src/transactions/transaction_components/side_chain/template_registration.rs +++ b/base_layer/core/src/transactions/transaction_components/side_chain/template_registration.rs @@ -23,15 +23,19 @@ use std::io::{Error, ErrorKind, Read, Write}; use serde::{Deserialize, Serialize}; -use tari_common_types::types::{PublicKey, Signature}; - -use crate::consensus::{ - read_byte, - ConsensusDecoding, - ConsensusEncoding, - ConsensusEncodingSized, - MaxSizeBytes, - MaxSizeString, +use tari_common_types::types::{FixedHash, PublicKey, Signature}; + +use crate::{ + consensus::{ + read_byte, + ConsensusDecoding, + ConsensusEncoding, + ConsensusEncodingSized, + DomainSeparatedConsensusHasher, + MaxSizeBytes, + MaxSizeString, + }, + transactions::TransactionHashDomain, }; #[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize)] @@ -46,6 +50,22 @@ pub struct CodeTemplateRegistration { pub binary_url: MaxSizeString<255>, } +impl CodeTemplateRegistration { + pub fn hash(&self) -> FixedHash { + DomainSeparatedConsensusHasher::::new("template_registration") + .chain(&self.author_public_key) + .chain(&self.author_signature) + .chain(&self.template_name) + .chain(&self.template_version) + .chain(&self.template_type) + .chain(&self.build_info) + .chain(&self.binary_sha) + .chain(&self.binary_url) + .finalize() + .into() + } +} + impl ConsensusEncoding for CodeTemplateRegistration { fn consensus_encode(&self, writer: &mut W) -> Result<(), Error> { self.author_public_key.consensus_encode(writer)?; From 3a4dd5096559dc7eea2d5d5c90bc64083b766c1a Mon Sep 17 00:00:00 2001 From: stringhandler Date: Fri, 30 Sep 2022 15:50:39 +0200 Subject: [PATCH 11/21] fix: fix get shard key (#4744) get_shard_key was not using the correct database key when searching --- .../tari_app_grpc/proto/base_node.proto | 1 + .../src/grpc/base_node_grpc_server.rs | 15 +++++++++--- .../comms_interface/comms_response.rs | 2 +- .../comms_interface/local_interface.rs | 6 ++++- base_layer/core/src/chain_storage/async_db.rs | 2 +- .../src/chain_storage/blockchain_backend.rs | 2 +- .../src/chain_storage/blockchain_database.rs | 2 +- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 23 +++++++++---------- .../core/src/test_helpers/blockchain.rs | 2 +- .../nodejs/base_node_grpc_client/src/index.js | 2 +- clients/nodejs/wallet_grpc_client/index.js | 2 +- 11 files changed, 36 insertions(+), 23 deletions(-) diff --git a/applications/tari_app_grpc/proto/base_node.proto b/applications/tari_app_grpc/proto/base_node.proto index 4b61816a22..7902ee8232 100644 --- a/applications/tari_app_grpc/proto/base_node.proto +++ b/applications/tari_app_grpc/proto/base_node.proto @@ -467,6 +467,7 @@ message GetShardKeyRequest { message GetShardKeyResponse { bytes shard_key = 1; + bool found = 2; } message GetTemplateRegistrationsRequest { diff --git a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs index 09bd6783b7..4698648d1a 100644 --- a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs +++ b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs @@ -1461,13 +1461,22 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { let mut handler = self.node_service.clone(); let public_key = PublicKey::from_bytes(&request.public_key) .map_err(|e| obscure_error_if_true(report_error_flag, Status::invalid_argument(e.to_string())))?; + let shard_key = handler.get_shard_key(request.height, public_key).await.map_err(|e| { error!(target: LOG_TARGET, "Error {}", e); obscure_error_if_true(report_error_flag, Status::internal(e.to_string())) })?; - Ok(Response::new(tari_rpc::GetShardKeyResponse { - shard_key: shard_key.to_vec(), - })) + if let Some(shard_key) = shard_key { + Ok(Response::new(tari_rpc::GetShardKeyResponse { + shard_key: shard_key.to_vec(), + found: true, + })) + } else { + Ok(Response::new(tari_rpc::GetShardKeyResponse { + shard_key: vec![], + found: false, + })) + } } async fn get_active_validator_nodes( diff --git a/base_layer/core/src/base_node/comms_interface/comms_response.rs b/base_layer/core/src/base_node/comms_interface/comms_response.rs index 9dd0d6b360..30509fe6f3 100644 --- a/base_layer/core/src/base_node/comms_interface/comms_response.rs +++ b/base_layer/core/src/base_node/comms_interface/comms_response.rs @@ -78,7 +78,7 @@ pub enum NodeCommsResponse { }, FetchValidatorNodesKeysResponse(Vec<(PublicKey, [u8; 32])>), FetchCommitteeResponse(Vec), - GetShardKeyResponse([u8; 32]), + GetShardKeyResponse(Option<[u8; 32]>), FetchTemplateRegistrationsResponse(Vec), } diff --git a/base_layer/core/src/base_node/comms_interface/local_interface.rs b/base_layer/core/src/base_node/comms_interface/local_interface.rs index 6f283ba0bd..a6f084cea3 100644 --- a/base_layer/core/src/base_node/comms_interface/local_interface.rs +++ b/base_layer/core/src/base_node/comms_interface/local_interface.rs @@ -302,7 +302,11 @@ impl LocalNodeCommsInterface { } } - pub async fn get_shard_key(&mut self, height: u64, public_key: PublicKey) -> Result<[u8; 32], CommsInterfaceError> { + pub async fn get_shard_key( + &mut self, + height: u64, + public_key: PublicKey, + ) -> Result, CommsInterfaceError> { match self .request_sender .call(NodeCommsRequest::GetShardKey { height, public_key }) diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index b25228a46a..27ff288bbb 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -269,7 +269,7 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_committee(height: u64, shard: [u8;32]) -> Vec, "fetch_committee"); - make_async_fn!(get_shard_key(height:u64, public_key: PublicKey) -> [u8;32], "get_shard_key"); + make_async_fn!(get_shard_key(height:u64, public_key: PublicKey) -> Option<[u8;32]>, "get_shard_key"); make_async_fn!(fetch_template_registrations(from_height: u64) -> Vec, "fetch_template_registrations"); } diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index b30e280daa..b653477bd8 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -195,6 +195,6 @@ pub trait BlockchainBackend: Send + Sync { fn fetch_active_validator_nodes(&self, height: u64) -> Result, ChainStorageError>; fn fetch_committee(&self, height: u64, shard: [u8; 32]) -> Result, ChainStorageError>; - fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result<[u8; 32], ChainStorageError>; + fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result, ChainStorageError>; fn fetch_template_registrations(&self, from_height: u64) -> Result, ChainStorageError>; } diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index d52954c388..b9da0d96bd 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -841,7 +841,7 @@ where B: BlockchainBackend db.fetch_mmr_size(tree) } - pub fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result<[u8; 32], ChainStorageError> { + pub fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result, ChainStorageError> { let db = self.db_read_access()?; db.get_shard_key(height, public_key) } diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 50238f03ce..c4056aa92e 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -2562,19 +2562,18 @@ impl BlockchainBackend for LMDBDatabase { Ok(result) } - fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result<[u8; 32], ChainStorageError> { + fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result, ChainStorageError> { let txn = self.read_transaction()?; - let validator_nodes: Vec = - lmdb_get_multiple(&txn, &self.validator_nodes, public_key.as_bytes())?; - validator_nodes - .iter() - .find(|a| a.from_height <= height && height <= a.to_height) - .map(|a| a.shard_key) - .ok_or(ChainStorageError::ValueNotFound { - entity: "ShardKey", - field: "public_key", - value: public_key.to_hex(), - }) + let mut validator_nodes: Vec = + lmdb_fetch_matching_after(&txn, &self.validator_nodes, public_key.as_bytes())?; + validator_nodes = validator_nodes + .into_iter() + .filter(|a| a.from_height <= height && height <= a.to_height) + .collect(); + // get the last one + validator_nodes.sort_by(|a, b| a.from_height.cmp(&b.from_height)); + + Ok(validator_nodes.into_iter().map(|a| a.shard_key).last()) } fn fetch_template_registrations(&self, from_height: u64) -> Result, ChainStorageError> { diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index 655e5a1e54..2219f0089c 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -423,7 +423,7 @@ impl BlockchainBackend for TempDatabase { self.db.as_ref().unwrap().fetch_committee(height, shard) } - fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result<[u8; 32], ChainStorageError> { + fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result, ChainStorageError> { self.db.as_ref().unwrap().get_shard_key(height, public_key) } diff --git a/clients/nodejs/base_node_grpc_client/src/index.js b/clients/nodejs/base_node_grpc_client/src/index.js index 4413265dd7..7a9a219d6d 100644 --- a/clients/nodejs/base_node_grpc_client/src/index.js +++ b/clients/nodejs/base_node_grpc_client/src/index.js @@ -9,7 +9,7 @@ const path = require("path"); const packageDefinition = protoLoader.loadSync( path.resolve( __dirname, - "../../../applications/tari_app_grpc/proto/base_node.proto" + "../../../../applications/tari_app_grpc/proto/base_node.proto" ), { keepCase: true, diff --git a/clients/nodejs/wallet_grpc_client/index.js b/clients/nodejs/wallet_grpc_client/index.js index f05c4cb24f..bd3c942615 100644 --- a/clients/nodejs/wallet_grpc_client/index.js +++ b/clients/nodejs/wallet_grpc_client/index.js @@ -6,7 +6,7 @@ const protoLoader = require("@grpc/proto-loader"); const { promisifyAll } = require("grpc-promise"); const packageDefinition = protoLoader.loadSync( - `${__dirname}/../../applications/tari_app_grpc/proto/wallet.proto`, + `${__dirname}/../../../applications/tari_app_grpc/proto/wallet.proto`, { keepCase: true, longs: String, From 4060935ded9c4192c58f5a8ee0b7443ff285f1b1 Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Tue, 11 Oct 2022 09:10:42 +0200 Subject: [PATCH 12/21] fix(wallet/grpc): add transaction id and template_address to template_reg response (#4788) Description --- Adds tx_id and template_address to create_template_registration method Motivation and Context --- Mainly helpful for displaying the template address in the vn cli. How Has This Been Tested? --- --- applications/tari_app_grpc/proto/wallet.proto | 5 ++++- .../src/grpc/wallet_grpc_server.rs | 15 +++++++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/applications/tari_app_grpc/proto/wallet.proto b/applications/tari_app_grpc/proto/wallet.proto index 5a8357c637..417b9dc439 100644 --- a/applications/tari_app_grpc/proto/wallet.proto +++ b/applications/tari_app_grpc/proto/wallet.proto @@ -267,7 +267,10 @@ message CreateTemplateRegistrationRequest { uint64 fee_per_gram = 2; } -message CreateTemplateRegistrationResponse { } +message CreateTemplateRegistrationResponse { + uint64 tx_id = 1; + bytes template_address = 2; +} message CancelTransactionRequest { uint64 tx_id = 1; diff --git a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs index 980f1052f4..80c3b91ddb 100644 --- a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs +++ b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs @@ -922,12 +922,23 @@ impl wallet_server::Wallet for WalletGrpcServer { "Template registration transaction: {:?}", transaction ); - let _ = transaction_service + let reg_output = transaction + .body + .outputs() + .iter() + .find(|o| o.features.output_type == OutputType::CodeTemplateRegistration) + .unwrap(); + let template_address = reg_output.hash(); + + transaction_service .submit_transaction(tx_id, transaction, 0.into(), message) .await .map_err(|e| Status::internal(e.to_string()))?; - Ok(Response::new(CreateTemplateRegistrationResponse {})) + Ok(Response::new(CreateTemplateRegistrationResponse { + tx_id: tx_id.as_u64(), + template_address: template_address.to_vec(), + })) } async fn register_validator_node( From 64002e9c442f7a3b69343d580254e4e93ad69dd4 Mon Sep 17 00:00:00 2001 From: Martin Stefcek <35243812+Cifko@users.noreply.github.com> Date: Tue, 11 Oct 2022 14:38:20 +0200 Subject: [PATCH 13/21] fix: computation of vn mmr (#4772) Description --- Fix the VNs mmr. The output vector was in random order that resulted in the mmr being kind of random as well. So the miner and base node had different order of VNs and the blocks were invalidated. More VNs = less probability of passing. How Has This Been Tested? --- I manually registered 27 VNs and mined 100 blocks. --- base_layer/core/src/chain_storage/blockchain_database.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 69cd6ac23b..225fca8035 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -1335,7 +1335,8 @@ pub fn calculate_mmr_roots(db: &T, block: &Block) -> Resul output_mmr.compress(); - let validator_nodes = db.fetch_active_validator_nodes(metadata.height_of_longest_chain() + 1)?; + let mut validator_nodes = db.fetch_active_validator_nodes(metadata.height_of_longest_chain() + 1)?; + validator_nodes.sort(); let vn_mmr = ValidatorNodeMmr::new(validator_nodes.iter().map(|vn| vn.1.to_vec()).collect()); let mmr_roots = MmrRoots { From 9e81c7b6257773ddca970982adb89a1e0d548e2b Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Tue, 11 Oct 2022 18:00:41 +0200 Subject: [PATCH 14/21] fix(core)!: adds utxo and block info to get_template_registrations request (#4789) Description --- - adds UTXO and block info to get_template_registrations request - adds end_height to get_template_registrations request - Changes validator node registration key to - uses CompositeKey for all composite keys - Removed some unused (previous contract code) response types Motivation and Context --- DAN template scanner requires block hash info to determine last scanned height. Indexing template validations by block height enable more efficient retrievals NOTE: blockchain db will need to be resynced How Has This Been Tested? --- Manually - DAN block scanner --- .../tari_app_grpc/proto/base_node.proto | 28 ++- .../tari_app_grpc/proto/validator_node.proto | 2 +- .../src/grpc/base_node_grpc_server.rs | 221 +++++++++++++----- .../comms_interface/comms_request.rs | 18 +- .../comms_interface/comms_response.rs | 31 +-- .../comms_interface/inbound_handlers.rs | 21 +- .../comms_interface/local_interface.rs | 29 ++- base_layer/core/src/chain_storage/async_db.rs | 6 +- .../src/chain_storage/blockchain_backend.rs | 8 +- .../src/chain_storage/blockchain_database.rs | 21 +- .../core/src/chain_storage/db_transaction.rs | 4 +- base_layer/core/src/chain_storage/error.rs | 2 + .../chain_storage/lmdb_db/composite_key.rs | 41 +++- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 136 ++++++----- .../core/src/chain_storage/lmdb_db/mod.rs | 1 + base_layer/core/src/chain_storage/mod.rs | 2 +- .../src/chain_storage/template_registation.rs | 7 +- .../core/src/test_helpers/blockchain.rs | 13 +- .../transaction_components/output_type.rs | 7 + .../side_chain/template_registration.rs | 38 +-- 20 files changed, 411 insertions(+), 225 deletions(-) diff --git a/applications/tari_app_grpc/proto/base_node.proto b/applications/tari_app_grpc/proto/base_node.proto index 7902ee8232..f18c39aefc 100644 --- a/applications/tari_app_grpc/proto/base_node.proto +++ b/applications/tari_app_grpc/proto/base_node.proto @@ -94,7 +94,8 @@ service BaseNode { rpc GetCommittee(GetCommitteeRequest) returns (GetCommitteeResponse); rpc GetShardKey(GetShardKeyRequest) returns (GetShardKeyResponse); // Get templates - rpc GetTemplateRegistrations(GetTemplateRegistrationsRequest) returns (stream TemplateRegistration); + rpc GetTemplateRegistrations(GetTemplateRegistrationsRequest) returns (stream GetTemplateRegistrationResponse); + rpc GetSideChainUtxos(GetSideChainUtxosRequest) returns (stream GetSideChainUtxosResponse); } message GetAssetMetadataRequest { @@ -471,5 +472,28 @@ message GetShardKeyResponse { } message GetTemplateRegistrationsRequest { - uint64 from_height = 1; + bytes start_hash = 1; + uint64 count = 2; +} + +message GetTemplateRegistrationResponse { + bytes utxo_hash = 1; + TemplateRegistration registration = 2; +} + +message BlockInfo { + uint64 height = 1; + bytes hash = 2; + bytes next_block_hash = 3; } + +message GetSideChainUtxosRequest { + bytes start_hash = 1; + uint64 count = 2; +} + +message GetSideChainUtxosResponse { + BlockInfo block_info = 1; + repeated TransactionOutput outputs = 2; +} + diff --git a/applications/tari_app_grpc/proto/validator_node.proto b/applications/tari_app_grpc/proto/validator_node.proto index a549134607..9b8d73ebb1 100644 --- a/applications/tari_app_grpc/proto/validator_node.proto +++ b/applications/tari_app_grpc/proto/validator_node.proto @@ -118,7 +118,7 @@ message Authority { bytes proxied_by = 3; } -message InvokeMethodRequest{ +message InvokeMethodRequest { bytes contract_id = 1; uint32 template_id = 2; string method = 3; diff --git a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs index 1479520768..e82ca58d9c 100644 --- a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs +++ b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs @@ -33,7 +33,7 @@ use tari_app_grpc::{ tari_rpc::{CalcType, Sorting}, }; use tari_app_utilities::consts; -use tari_common_types::types::{Commitment, PublicKey, Signature}; +use tari_common_types::types::{Commitment, FixedHash, PublicKey, Signature}; use tari_comms::{Bytes, CommsNode}; use tari_core::{ base_node::{ @@ -140,7 +140,8 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { type GetMempoolTransactionsStream = mpsc::Receiver>; type GetNetworkDifficultyStream = mpsc::Receiver>; type GetPeersStream = mpsc::Receiver>; - type GetTemplateRegistrationsStream = mpsc::Receiver>; + type GetSideChainUtxosStream = mpsc::Receiver>; + type GetTemplateRegistrationsStream = mpsc::Receiver>; type GetTokensInCirculationStream = mpsc::Receiver>; type ListHeadersStream = mpsc::Receiver>; type SearchKernelsStream = mpsc::Receiver>; @@ -1484,7 +1485,6 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { request: Request, ) -> Result, Status> { let request = request.into_inner(); - let report_error_flag = self.report_error_flag(); debug!(target: LOG_TARGET, "Incoming GRPC request for GetActiveValidatorNodes"); let mut handler = self.node_service.clone(); @@ -1493,39 +1493,24 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { task::spawn(async move { let active_validator_nodes = match handler.get_active_validator_nodes(request.height).await { Err(err) => { - warn!(target: LOG_TARGET, "Error communicating with base node: {}", err,); + warn!(target: LOG_TARGET, "Base node service error: {}", err,); return; }, Ok(data) => data, }; - // dbg!(&active_validator_nodes); + for (public_key, shard_key) in active_validator_nodes { let active_validator_node = tari_rpc::GetActiveValidatorNodesResponse { public_key: public_key.to_vec(), shard_key: shard_key.to_vec(), }; - match tx.send(Ok(active_validator_node)).await { - Ok(_) => (), - Err(err) => { - warn!( - target: LOG_TARGET, - "Error sending mempool transaction via GRPC: {}", err - ); - match tx - .send(Err(obscure_error_if_true( - report_error_flag, - Status::unknown("Error sending data"), - ))) - .await - { - Ok(_) => (), - Err(send_err) => { - warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) - }, - } - return; - }, + if tx.send(Ok(active_validator_node)).await.is_err() { + debug!( + target: LOG_TARGET, + "[get_active_validator_nodes] Client has disconnected before stream completed" + ); + return; } } }); @@ -1544,63 +1529,193 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { let report_error_flag = self.report_error_flag(); debug!(target: LOG_TARGET, "Incoming GRPC request for GetTemplateRegistrations"); - let mut handler = self.node_service.clone(); - let (mut tx, rx) = mpsc::channel(1000); + let (mut tx, rx) = mpsc::channel(10); + + let start_hash = Some(request.start_hash) + .filter(|x| !x.is_empty()) + .map(FixedHash::try_from) + .transpose() + .map_err(|_| Status::invalid_argument("Invalid start_hash"))?; + + let mut node_service = self.node_service.clone(); + + let start_height = match start_hash { + Some(hash) => { + let header = node_service + .get_header_by_hash(hash) + .await + .map_err(|err| obscure_error_if_true(self.report_grpc_error, Status::internal(err.to_string())))?; + header + .map(|h| h.height()) + .ok_or_else(|| Status::not_found("Start hash not found"))? + }, + None => 0, + }; + + if request.count == 0 { + return Ok(Response::new(rx)); + } + + let end_height = start_height + .checked_add(request.count) + .ok_or_else(|| Status::invalid_argument("Request start height + count overflows u64"))?; task::spawn(async move { - let template_registrations = match handler.get_template_registrations(request.from_height).await { + let template_registrations = match node_service.get_template_registrations(start_height, end_height).await { Err(err) => { - warn!(target: LOG_TARGET, "Error communicating with base node: {}", err,); + warn!(target: LOG_TARGET, "Base node service error: {}", err); return; }, Ok(data) => data, }; for template_registration in template_registrations { - let template_registration = match tari_rpc::TemplateRegistration::try_from(template_registration) { + let registration = match template_registration.registration_data.try_into() { Ok(t) => t, Err(e) => { warn!( target: LOG_TARGET, "Error sending converting template registration for GRPC: {}", e ); - match tx + let _ignore = tx .send(Err(obscure_error_if_true( report_error_flag, - Status::internal("Error converting template_registration"), + Status::internal(format!("Error converting template_registration: {}", e)), ))) - .await - { - Ok(_) => (), - Err(send_err) => { - warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) - }, - } + .await; return; }, }; - match tx.send(Ok(template_registration)).await { - Ok(_) => (), - Err(err) => { + let resp = tari_rpc::GetTemplateRegistrationResponse { + utxo_hash: template_registration.output_hash.to_vec(), + registration: Some(registration), + }; + + if tx.send(Ok(resp)).await.is_err() { + debug!( + target: LOG_TARGET, + "[get_template_registrations] Client has disconnected before stream completed" + ); + return; + } + } + }); + debug!( + target: LOG_TARGET, + "Sending GetTemplateRegistrations response stream to client" + ); + Ok(Response::new(rx)) + } + + async fn get_side_chain_utxos( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + let report_error_flag = self.report_error_flag(); + debug!(target: LOG_TARGET, "Incoming GRPC request for GetTemplateRegistrations"); + + let (mut tx, rx) = mpsc::channel(10); + + let start_hash = Some(request.start_hash) + .filter(|x| !x.is_empty()) + .map(FixedHash::try_from) + .transpose() + .map_err(|_| Status::invalid_argument("Invalid start_hash"))?; + + let mut node_service = self.node_service.clone(); + + let start_header = match start_hash { + Some(hash) => node_service + .get_header_by_hash(hash) + .await + .map_err(|err| obscure_error_if_true(self.report_grpc_error, Status::internal(err.to_string())))? + .ok_or_else(|| Status::not_found("Start hash not found"))?, + None => node_service + .get_header(0) + .await + .map_err(|err| obscure_error_if_true(self.report_grpc_error, Status::internal(err.to_string())))? + .ok_or_else(|| Status::unavailable("Genesis block not available"))?, + }; + + if request.count == 0 { + return Ok(Response::new(rx)); + } + + let start_height = start_header.height(); + let end_height = start_height + .checked_add(request.count - 1) + .ok_or_else(|| Status::invalid_argument("Request start height + count overflows u64"))?; + + task::spawn(async move { + let mut current_header = start_header; + + for height in start_height..=end_height { + let header_hash = *current_header.hash(); + let utxos = match node_service.fetch_unspent_utxos_in_block(header_hash).await { + Ok(utxos) => utxos, + Err(e) => { + warn!(target: LOG_TARGET, "Base node service error: {}", e); + return; + }, + }; + + let next_header = match node_service.get_header(height + 1).await { + Ok(h) => h, + Err(e) => { + let _ignore = tx.send(Err(obscure_error_if_true( + report_error_flag, + Status::internal(e.to_string()), + ))); + return; + }, + }; + + let sidechain_outputs = utxos + .into_iter() + .filter(|u| u.features.output_type.is_sidechain_type()) + .collect::>(); + + match sidechain_outputs.into_iter().map(TryInto::try_into).collect() { + Ok(outputs) => { + let resp = tari_rpc::GetSideChainUtxosResponse { + block_info: Some(tari_rpc::BlockInfo { + height: current_header.height(), + hash: header_hash.to_vec(), + next_block_hash: next_header.as_ref().map(|h| h.hash().to_vec()).unwrap_or_default(), + }), + outputs, + }; + + if tx.send(Ok(resp)).await.is_err() { + debug!( + target: LOG_TARGET, + "[get_template_registrations] Client has disconnected before stream completed" + ); + return; + } + }, + Err(e) => { warn!( target: LOG_TARGET, - "Error sending template registration via GRPC: {}", err + "Error sending converting sidechain output for GRPC: {}", e ); - match tx + let _ignore = tx .send(Err(obscure_error_if_true( report_error_flag, - Status::unknown("Error sending data"), + Status::internal(format!("Error converting sidechain output: {}", e)), ))) - .await - { - Ok(_) => (), - Err(send_err) => { - warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) - }, - } + .await; return; }, + }; + + match next_header { + Some(header) => { + current_header = header; + }, + None => break, } } }); diff --git a/base_layer/core/src/base_node/comms_interface/comms_request.rs b/base_layer/core/src/base_node/comms_interface/comms_request.rs index 6e95c929bd..f47b4a0859 100644 --- a/base_layer/core/src/base_node/comms_interface/comms_request.rs +++ b/base_layer/core/src/base_node/comms_interface/comms_request.rs @@ -26,7 +26,7 @@ use std::{ }; use serde::{Deserialize, Serialize}; -use tari_common_types::types::{Commitment, HashOutput, PrivateKey, PublicKey, Signature}; +use tari_common_types::types::{BlockHash, Commitment, HashOutput, PrivateKey, PublicKey, Signature}; use tari_utilities::hex::Hex; use crate::{blocks::NewBlockTemplate, chain_storage::MmrTree, proof_of_work::PowAlgorithm}; @@ -76,7 +76,11 @@ pub enum NodeCommsRequest { public_key: PublicKey, }, FetchTemplateRegistrations { - from_height: u64, + start_height: u64, + end_height: u64, + }, + FetchUnspentUtxosInBlock { + block_hash: BlockHash, }, } @@ -127,8 +131,14 @@ impl Display for NodeCommsRequest { GetShardKey { height, public_key } => { write!(f, "GetShardKey height ({}), public key ({:?})", height, public_key) }, - FetchTemplateRegistrations { from_height } => { - write!(f, "FetchTemplateRegistrations ({})", from_height) + FetchTemplateRegistrations { + start_height: start, + end_height: end, + } => { + write!(f, "FetchTemplateRegistrations ({}..={})", start, end) + }, + FetchUnspentUtxosInBlock { block_hash } => { + write!(f, "FetchUnspentUtxosInBlock ({})", block_hash) }, } } diff --git a/base_layer/core/src/base_node/comms_interface/comms_response.rs b/base_layer/core/src/base_node/comms_interface/comms_response.rs index 30509fe6f3..5ad0bbe052 100644 --- a/base_layer/core/src/base_node/comms_interface/comms_response.rs +++ b/base_layer/core/src/base_node/comms_interface/comms_response.rs @@ -32,14 +32,9 @@ use tari_common_types::{ use crate::{ blocks::{Block, ChainHeader, HistoricalBlock, NewBlockTemplate}, - chain_storage::{ActiveValidatorNode, UtxoMinedInfo}, + chain_storage::{ActiveValidatorNode, TemplateRegistrationEntry}, proof_of_work::Difficulty, - transactions::transaction_components::{ - CodeTemplateRegistration, - Transaction, - TransactionKernel, - TransactionOutput, - }, + transactions::transaction_components::{Transaction, TransactionKernel, TransactionOutput}, }; /// API Response enum @@ -60,26 +55,11 @@ pub enum NodeCommsResponse { }, TargetDifficulty(Difficulty), MmrNodes(Vec, Vec), - FetchTokensResponse { - outputs: Vec<(TransactionOutput, u64)>, - }, - FetchAssetRegistrationsResponse { - outputs: Vec, - }, - FetchAssetMetadataResponse { - output: Box>, - }, FetchMempoolTransactionsByExcessSigsResponse(FetchMempoolTransactionsResponse), - FetchOutputsForBlockResponse { - outputs: Vec, - }, - FetchOutputsByContractIdResponse { - outputs: Vec, - }, FetchValidatorNodesKeysResponse(Vec<(PublicKey, [u8; 32])>), FetchCommitteeResponse(Vec), GetShardKeyResponse(Option<[u8; 32]>), - FetchTemplateRegistrationsResponse(Vec), + FetchTemplateRegistrationsResponse(Vec), } impl Display for NodeCommsResponse { @@ -107,17 +87,12 @@ impl Display for NodeCommsResponse { ), TargetDifficulty(_) => write!(f, "TargetDifficulty"), MmrNodes(_, _) => write!(f, "MmrNodes"), - FetchTokensResponse { .. } => write!(f, "FetchTokensResponse"), - FetchAssetRegistrationsResponse { .. } => write!(f, "FetchAssetRegistrationsResponse"), - FetchAssetMetadataResponse { .. } => write!(f, "FetchAssetMetadataResponse"), FetchMempoolTransactionsByExcessSigsResponse(resp) => write!( f, "FetchMempoolTransactionsByExcessSigsResponse({} transaction(s), {} not found)", resp.transactions.len(), resp.not_found.len() ), - FetchOutputsForBlockResponse { .. } => write!(f, "FetchConstitutionsResponse"), - FetchOutputsByContractIdResponse { .. } => write!(f, "FetchOutputsByContractIdResponse"), FetchValidatorNodesKeysResponse(_) => write!(f, "FetchValidatorNodesKeysResponse"), FetchCommitteeResponse(_) => write!(f, "FetchCommitteeResponse"), GetShardKeyResponse(_) => write!(f, "GetShardKeyResponse"), diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index 9916e00230..f1c1774015 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -377,18 +377,27 @@ where B: BlockchainBackend + 'static let shard_key = self.blockchain_db.get_shard_key(height, public_key).await?; Ok(NodeCommsResponse::GetShardKeyResponse(shard_key)) }, - NodeCommsRequest::FetchTemplateRegistrations { from_height } => { + NodeCommsRequest::FetchTemplateRegistrations { + start_height, + end_height, + } => { let template_registrations = self .blockchain_db - .fetch_template_registrations(from_height) - .await? - .into_iter() - .map(|tr| tr.registration_data) - .collect(); + .fetch_template_registrations(start_height..=end_height) + .await?; Ok(NodeCommsResponse::FetchTemplateRegistrationsResponse( template_registrations, )) }, + NodeCommsRequest::FetchUnspentUtxosInBlock { block_hash } => { + let utxos = self.blockchain_db.fetch_outputs_in_block(block_hash).await?; + Ok(NodeCommsResponse::TransactionOutputs( + utxos + .into_iter() + .filter_map(|utxo| utxo.into_unpruned_output()) + .collect(), + )) + }, } } diff --git a/base_layer/core/src/base_node/comms_interface/local_interface.rs b/base_layer/core/src/base_node/comms_interface/local_interface.rs index b2a7a115f9..48c093e757 100644 --- a/base_layer/core/src/base_node/comms_interface/local_interface.rs +++ b/base_layer/core/src/base_node/comms_interface/local_interface.rs @@ -38,9 +38,9 @@ use crate::{ NodeCommsResponse, }, blocks::{Block, ChainHeader, HistoricalBlock, NewBlockTemplate}, - chain_storage::ActiveValidatorNode, + chain_storage::{ActiveValidatorNode, TemplateRegistrationEntry}, proof_of_work::PowAlgorithm, - transactions::transaction_components::{CodeTemplateRegistration, TransactionKernel, TransactionOutput}, + transactions::transaction_components::{TransactionKernel, TransactionOutput}, }; pub type BlockEventSender = broadcast::Sender>; @@ -327,15 +327,34 @@ impl LocalNodeCommsInterface { pub async fn get_template_registrations( &mut self, - from_height: u64, - ) -> Result, CommsInterfaceError> { + start_height: u64, + end_height: u64, + ) -> Result, CommsInterfaceError> { match self .request_sender - .call(NodeCommsRequest::FetchTemplateRegistrations { from_height }) + .call(NodeCommsRequest::FetchTemplateRegistrations { + start_height, + end_height, + }) .await?? { NodeCommsResponse::FetchTemplateRegistrationsResponse(template_registrations) => Ok(template_registrations), _ => Err(CommsInterfaceError::UnexpectedApiResponse), } } + + /// Fetches UTXOs that are not spent for the given block hash up to the current chain tip. + pub async fn fetch_unspent_utxos_in_block( + &mut self, + block_hash: BlockHash, + ) -> Result, CommsInterfaceError> { + match self + .request_sender + .call(NodeCommsRequest::FetchUnspentUtxosInBlock { block_hash }) + .await?? + { + NodeCommsResponse::TransactionOutputs(outputs) => Ok(outputs), + _ => Err(CommsInterfaceError::UnexpectedApiResponse), + } + } } diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index 0540182a89..12ee7a45d2 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -30,7 +30,7 @@ use tari_common_types::{ }; use tari_utilities::epoch_time::EpochTime; -use super::{ActiveValidatorNode, TemplateRegistration}; +use super::{ActiveValidatorNode, TemplateRegistrationEntry}; use crate::{ blocks::{ Block, @@ -163,6 +163,8 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_utxos_in_block(hash: HashOutput, deleted: Option>) -> (Vec, Bitmap), "fetch_utxos_in_block"); + make_async_fn!(fetch_outputs_in_block(hash: HashOutput) -> Vec, "fetch_outputs_in_block"); + make_async_fn!(utxo_count() -> usize, "utxo_count"); //---------------------------------- Kernel --------------------------------------------// @@ -271,7 +273,7 @@ impl AsyncBlockchainDb { make_async_fn!(get_shard_key(height:u64, public_key: PublicKey) -> Option<[u8;32]>, "get_shard_key"); - make_async_fn!(fetch_template_registrations(from_height: u64) -> Vec, "fetch_template_registrations"); + make_async_fn!(fetch_template_registrations>(range: T) -> Vec, "fetch_template_registrations"); } impl From> for AsyncBlockchainDb { diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index b653477bd8..05d8ca33d6 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -7,7 +7,7 @@ use tari_common_types::{ types::{Commitment, HashOutput, PublicKey, Signature}, }; -use super::{ActiveValidatorNode, TemplateRegistration}; +use super::{ActiveValidatorNode, TemplateRegistrationEntry}; use crate::{ blocks::{ Block, @@ -196,5 +196,9 @@ pub trait BlockchainBackend: Send + Sync { fn fetch_active_validator_nodes(&self, height: u64) -> Result, ChainStorageError>; fn fetch_committee(&self, height: u64, shard: [u8; 32]) -> Result, ChainStorageError>; fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result, ChainStorageError>; - fn fetch_template_registrations(&self, from_height: u64) -> Result, ChainStorageError>; + fn fetch_template_registrations( + &self, + start_height: u64, + end_height: u64, + ) -> Result, ChainStorageError>; } diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 225fca8035..ca893c34ca 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -41,7 +41,7 @@ use tari_common_types::{ use tari_mmr::pruned_hashset::PrunedHashSet; use tari_utilities::{epoch_time::EpochTime, hex::Hex, ByteArray}; -use super::{ActiveValidatorNode, TemplateRegistration}; +use super::{ActiveValidatorNode, TemplateRegistrationEntry}; use crate::{ blocks::{ Block, @@ -439,6 +439,11 @@ where B: BlockchainBackend db.fetch_utxos_in_block(&hash, deleted.as_deref()) } + pub fn fetch_outputs_in_block(&self, hash: HashOutput) -> Result, ChainStorageError> { + let db = self.db_read_access()?; + db.fetch_outputs_in_block(&hash) + } + /// Returns the number of UTXOs in the current unspent set pub fn utxo_count(&self) -> Result { let db = self.db_read_access()?; @@ -1188,12 +1193,18 @@ where B: BlockchainBackend db.fetch_committee(height, shard) } - pub fn fetch_template_registrations( + pub fn fetch_template_registrations>( &self, - from_height: u64, - ) -> Result, ChainStorageError> { + range: T, + ) -> Result, ChainStorageError> { let db = self.db_read_access()?; - db.fetch_template_registrations(from_height) + let (start, mut end) = convert_to_option_bounds(range); + if end.is_none() { + // `(n..)` means fetch block headers until this node's tip + end = Some(db.fetch_last_header()?.height); + } + let (start, end) = (start.unwrap_or(0), end.unwrap()); + db.fetch_template_registrations(start, end) } } diff --git a/base_layer/core/src/chain_storage/db_transaction.rs b/base_layer/core/src/chain_storage/db_transaction.rs index e0cf01008e..d2c2f706db 100644 --- a/base_layer/core/src/chain_storage/db_transaction.rs +++ b/base_layer/core/src/chain_storage/db_transaction.rs @@ -30,7 +30,7 @@ use croaring::Bitmap; use tari_common_types::types::{BlockHash, Commitment, HashOutput}; use tari_utilities::hex::Hex; -use super::{ActiveValidatorNode, TemplateRegistration}; +use super::{ActiveValidatorNode, TemplateRegistrationEntry}; use crate::{ blocks::{Block, BlockHeader, BlockHeaderAccumulatedData, ChainBlock, ChainHeader, UpdateBlockAccumulatedData}, chain_storage::{error::ChainStorageError, HorizonData, Reorg}, @@ -363,7 +363,7 @@ pub enum WriteOperation { validator_node: ActiveValidatorNode, }, InsertTemplateRegistration { - template_registration: TemplateRegistration, + template_registration: TemplateRegistrationEntry, }, } diff --git a/base_layer/core/src/chain_storage/error.rs b/base_layer/core/src/chain_storage/error.rs index 93456647ce..62d33a6a91 100644 --- a/base_layer/core/src/chain_storage/error.rs +++ b/base_layer/core/src/chain_storage/error.rs @@ -134,6 +134,8 @@ pub enum ChainStorageError { UnspendableDueToDependentUtxos { details: String }, #[error("FixedHashSize Error: {0}")] FixedHashSizeError(#[from] FixedHashSizeError), + #[error("Composite key length was exceeded (THIS SHOULD NEVER HAPPEN)")] + CompositeKeyLengthExceeded, } impl ChainStorageError { diff --git a/base_layer/core/src/chain_storage/lmdb_db/composite_key.rs b/base_layer/core/src/chain_storage/lmdb_db/composite_key.rs index 565feb8104..0d6a7f5c09 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/composite_key.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/composite_key.rs @@ -25,15 +25,18 @@ use std::{ ops::{Deref, DerefMut}, }; +use lmdb_zero::traits::AsLmdbBytes; use tari_utilities::hex::to_hex; -#[derive(Debug, Clone, Copy)] -pub(super) struct CompositeKey { - bytes: [u8; KEY_LEN], +use crate::chain_storage::ChainStorageError; + +#[derive(Debug, Clone)] +pub(super) struct CompositeKey { + bytes: Box<[u8; L]>, len: usize, } -impl CompositeKey { +impl CompositeKey { pub fn new() -> Self { Self { bytes: Self::new_buf(), @@ -41,10 +44,20 @@ impl CompositeKey { } } + pub fn try_from_parts>(parts: &[T]) -> Result { + let mut key = Self::new(); + for part in parts { + if !key.push(part) { + return Err(ChainStorageError::CompositeKeyLengthExceeded); + } + } + Ok(key) + } + pub fn push>(&mut self, bytes: T) -> bool { let b = bytes.as_ref(); let new_len = self.len + b.len(); - if new_len > KEY_LEN { + if new_len > L { return false; } self.bytes[self.len..new_len].copy_from_slice(b); @@ -61,18 +74,18 @@ impl CompositeKey { } /// Returns a fixed 0-filled byte array. - const fn new_buf() -> [u8; KEY_LEN] { - [0x0u8; KEY_LEN] + fn new_buf() -> Box<[u8; L]> { + Box::new([0x0u8; L]) } } -impl Display for CompositeKey { +impl Display for CompositeKey { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "{}", to_hex(self.as_bytes())) } } -impl Deref for CompositeKey { +impl Deref for CompositeKey { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -80,14 +93,20 @@ impl Deref for CompositeKey { } } -impl DerefMut for CompositeKey { +impl DerefMut for CompositeKey { fn deref_mut(&mut self) -> &mut Self::Target { self.as_bytes_mut() } } -impl AsRef<[u8]> for CompositeKey { +impl AsRef<[u8]> for CompositeKey { fn as_ref(&self) -> &[u8] { self.as_bytes() } } + +impl AsLmdbBytes for CompositeKey { + fn as_lmdb_bytes(&self) -> &[u8] { + self.as_bytes() + } +} diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 627125350f..7cbb3f1ac3 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -20,18 +20,12 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// Because we use dynamically sized u8 vectors for hash types through the type alias HashOutput, -// let's ignore this clippy error in this module - -#![allow(clippy::ptr_arg)] - use std::{ collections::HashMap, convert::TryFrom, fmt, fs, fs::File, - mem, ops::Deref, path::Path, sync::Arc, @@ -69,6 +63,7 @@ use crate::{ db_transaction::{DbKey, DbTransaction, DbValue, WriteOperation}, error::{ChainStorageError, OrNotFound}, lmdb_db::{ + composite_key::CompositeKey, lmdb::{ fetch_db_entry_sizes, lmdb_clear, @@ -103,7 +98,7 @@ use crate::{ MmrTree, PrunedOutput, Reorg, - TemplateRegistration, + TemplateRegistrationEntry, }, consensus::ConsensusManager, transactions::{ @@ -148,6 +143,15 @@ const LMDB_DB_VALIDATOR_NODES_MAPPING: &str = "validator_nodes_mapping"; const LMDB_DB_VALIDATOR_NODE_ENDING: &str = "validator_node_ending"; const LMDB_DB_TEMPLATE_REGISTRATIONS: &str = "template_registrations"; +/// HeaderHash(32), mmr_pos(4), hash(32) +type InputKey = CompositeKey<68>; +/// HeaderHash(32), mmr_pos(4), hash(32) +type KernelKey = CompositeKey<68>; +/// HeaderHash(32), mmr_pos(4), hash(32) +type OutputKey = CompositeKey<68>; +/// Height(8), Hash(32) +type ValidatorNodeRegistrationKey = CompositeKey<40>; + pub fn create_lmdb_database>( path: P, config: LMDBConfig, @@ -257,7 +261,7 @@ pub struct LMDBDatabase { validator_nodes_mapping: DatabaseRef, /// Maps the end block height of nodes validator_nodes_ending: DatabaseRef, - /// Maps CodeTemplateRegistration hash-> TemplateRegistration + /// Maps CodeTemplateRegistration -> TemplateRegistration template_registrations: DatabaseRef, _file_lock: Arc, consensus_manager: ConsensusManager, @@ -562,19 +566,16 @@ impl LMDBDatabase { key: &OutputKey, ) -> Result { let mut output: TransactionOutputRowData = - lmdb_get(txn, &self.utxos_db, key.as_bytes()).or_not_found("TransactionOutput", "key", key.to_hex())?; + lmdb_get(txn, &self.utxos_db, key).or_not_found("TransactionOutput", "key", key.to_string())?; let pruned_output = output .output .take() .ok_or_else(|| ChainStorageError::DataInconsistencyDetected { function: "prune_output", - details: format!( - "Attempt to prune output that has already been pruned for key {}", - key.to_hex() - ), + details: format!("Attempt to prune output that has already been pruned for key {}", key), })?; // output.output is None - lmdb_replace(txn, &self.utxos_db, key.as_bytes(), &output)?; + lmdb_replace(txn, &self.utxos_db, key, &output)?; Ok(pruned_output) } @@ -590,7 +591,7 @@ impl LMDBDatabase { let output_hash = output.hash(); let witness_hash = output.witness_hash(); - let output_key = OutputKey::new(header_hash.as_slice(), mmr_position, &[]); + let output_key = OutputKey::try_from_parts(&[header_hash.as_slice(), mmr_position.to_le_bytes().as_slice()])?; lmdb_insert( txn, @@ -604,13 +605,13 @@ impl LMDBDatabase { txn, &*self.txos_hash_to_index_db, output_hash.as_slice(), - &(mmr_position, output_key.as_bytes()), + &(mmr_position, output_key.to_vec()), "txos_hash_to_index_db", )?; lmdb_insert( txn, &*self.utxos_db, - output_key.as_bytes(), + &output_key, &TransactionOutputRowData { output: Some(output.clone()), header_hash: *header_hash, @@ -642,18 +643,18 @@ impl LMDBDatabase { header_hash.to_hex(), ))); } - let key = OutputKey::new(header_hash.as_slice(), mmr_position, &[]); + let key = OutputKey::try_from_parts(&[header_hash.as_slice(), mmr_position.to_le_bytes().as_slice()])?; lmdb_insert( txn, &*self.txos_hash_to_index_db, output_hash.as_slice(), - &(mmr_position, key.as_bytes()), + &(mmr_position, key.to_vec()), "txos_hash_to_index_db", )?; lmdb_insert( txn, &*self.utxos_db, - key.as_bytes(), + &key, &TransactionOutputRowData { output: None, header_hash: *header_hash, @@ -675,7 +676,11 @@ impl LMDBDatabase { mmr_position: u32, ) -> Result<(), ChainStorageError> { let hash = kernel.hash(); - let key = KernelKey::new(header_hash.as_slice(), mmr_position, hash.as_slice()); + let key = KernelKey::try_from_parts(&[ + header_hash.as_slice(), + mmr_position.to_le_bytes().as_slice(), + hash.as_slice(), + ])?; lmdb_insert( txn, @@ -699,7 +704,7 @@ impl LMDBDatabase { lmdb_insert( txn, &*self.kernels_db, - key.as_bytes(), + &key, &TransactionKernelRowData { kernel: kernel.clone(), header_hash: *header_hash, @@ -738,11 +743,15 @@ impl LMDBDatabase { )?; let hash = input.canonical_hash(); - let key = InputKey::new(header_hash.as_slice(), mmr_position, hash.as_slice()); + let key = InputKey::try_from_parts(&[ + header_hash.as_slice(), + mmr_position.to_le_bytes().as_slice(), + hash.as_slice(), + ])?; lmdb_insert( txn, &*self.inputs_db, - key.as_bytes(), + &key, &TransactionInputRowDataRef { input: &input.to_compact(), header_hash, @@ -1315,6 +1324,7 @@ impl LMDBDatabase { )) })?; + let output_hash = output.hash(); if let Some(vn_reg) = output .features .sidechain_feature @@ -1332,7 +1342,7 @@ impl LMDBDatabase { .consensus_constants(header.height) .validator_node_timeout(), public_key: vn_reg.public_key.clone(), - output_hash: output.hash(), + output_hash, }; self.insert_validator_node(txn, &validator_node)?; } @@ -1342,9 +1352,11 @@ impl LMDBDatabase { .as_ref() .and_then(|f| f.template_registration()) { - let record = TemplateRegistration { + let record = TemplateRegistrationEntry { registration_data: template_reg.clone(), - height: header.height, + output_hash, + block_height: header.height, + block_hash, }; self.insert_template_registration(txn, &record)?; @@ -1500,8 +1512,8 @@ impl LMDBDatabase { &u64::from(pos + 1).to_be_bytes(), ) .or_not_found("BlockHeader", "mmr_position", pos.to_string())?; - let key = OutputKey::new(&hash, *pos, &[]); - debug!(target: LOG_TARGET, "Pruning output: {}", key.to_hex()); + let key = OutputKey::try_from_parts(&[hash.as_slice(), pos.to_le_bytes().as_slice()])?; + debug!(target: LOG_TARGET, "Pruning output: {}", key); self.prune_output(write_txn, &key)?; } @@ -1642,13 +1654,16 @@ impl LMDBDatabase { fn insert_template_registration( &self, txn: &WriteTransaction<'_>, - template_registration: &TemplateRegistration, + template_registration: &TemplateRegistrationEntry, ) -> Result<(), ChainStorageError> { - let key = template_registration.registration_data.hash(); + let key = ValidatorNodeRegistrationKey::try_from_parts(&[ + template_registration.block_height.to_le_bytes().as_slice(), + template_registration.output_hash.as_slice(), + ])?; lmdb_insert( txn, &self.template_registrations, - key.as_bytes(), + &key, template_registration, "template_registrations", ) @@ -2050,8 +2065,12 @@ impl BlockchainBackend for LMDBDatabase { if let Some((header_hash, mmr_position, hash)) = lmdb_get::<_, (HashOutput, u32, HashOutput)>(&txn, &self.kernel_excess_sig_index, key.as_slice())? { - let key = KernelKey::new(header_hash.deref(), mmr_position, hash.deref()); - Ok(lmdb_get(&txn, &self.kernels_db, key.as_bytes())? + let key = KernelKey::try_from_parts(&[ + header_hash.as_slice(), + mmr_position.to_le_bytes().as_slice(), + hash.as_slice(), + ])?; + Ok(lmdb_get(&txn, &self.kernels_db, &key)? .map(|kernel: TransactionKernelRowData| (kernel.kernel, header_hash))) } else { Ok(None) @@ -2576,16 +2595,22 @@ impl BlockchainBackend for LMDBDatabase { Ok(validator_nodes.into_iter().map(|a| a.shard_key).last()) } - fn fetch_template_registrations(&self, from_height: u64) -> Result, ChainStorageError> { - // TODO: we can optimise this query by making using a compound key + fn fetch_template_registrations( + &self, + start_height: u64, + end_height: u64, + ) -> Result, ChainStorageError> { let txn = self.read_transaction()?; - lmdb_filter_map_values(&txn, &self.template_registrations, |tr: TemplateRegistration| { - if tr.height >= from_height { - Some(tr) - } else { - None + let mut result = vec![]; + for _ in start_height..=end_height { + let height = start_height.to_le_bytes(); + let mut cursor: KeyPrefixCursor = + lmdb_get_prefix_cursor(&txn, &self.template_registrations, &height)?; + while let Some((_, val)) = cursor.next()? { + result.push(val); } - }) + } + Ok(result) } } @@ -2842,30 +2867,3 @@ impl<'a, 'b> DeletedBitmapModel<'a, WriteTransaction<'b>> { Ok(()) } } - -struct CompositeKey { - key: Vec, -} - -impl CompositeKey { - pub fn new(header_hash: &[u8], mmr_position: u32, hash: &[u8]) -> CompositeKey { - let mut key = Vec::with_capacity(header_hash.len() + mem::size_of::() + hash.len()); - key.extend_from_slice(header_hash); - key.extend_from_slice(&mmr_position.to_be_bytes()); - key.extend_from_slice(hash); - - CompositeKey { key } - } - - pub fn as_bytes(&self) -> &[u8] { - &self.key - } - - pub fn to_hex(&self) -> String { - self.key.to_hex() - } -} - -type InputKey = CompositeKey; -type KernelKey = CompositeKey; -type OutputKey = CompositeKey; diff --git a/base_layer/core/src/chain_storage/lmdb_db/mod.rs b/base_layer/core/src/chain_storage/lmdb_db/mod.rs index e462684b17..dd25794e99 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/mod.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/mod.rs @@ -28,6 +28,7 @@ use tari_crypto::hash_domain; use crate::transactions::transaction_components::{TransactionInput, TransactionKernel, TransactionOutput}; // mod composite_key; +mod composite_key; pub(crate) mod helpers; pub(crate) mod key_prefix_cursor; mod lmdb; diff --git a/base_layer/core/src/chain_storage/mod.rs b/base_layer/core/src/chain_storage/mod.rs index aa65f98a95..6777a0fd05 100644 --- a/base_layer/core/src/chain_storage/mod.rs +++ b/base_layer/core/src/chain_storage/mod.rs @@ -84,4 +84,4 @@ mod active_validator_node; pub use active_validator_node::ActiveValidatorNode; mod template_registation; -pub use template_registation::TemplateRegistration; +pub use template_registation::TemplateRegistrationEntry; diff --git a/base_layer/core/src/chain_storage/template_registation.rs b/base_layer/core/src/chain_storage/template_registation.rs index 452fc02ef6..b13c8370b5 100644 --- a/base_layer/core/src/chain_storage/template_registation.rs +++ b/base_layer/core/src/chain_storage/template_registation.rs @@ -21,11 +21,14 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use serde::{Deserialize, Serialize}; +use tari_common_types::types::FixedHash; use crate::transactions::transaction_components::CodeTemplateRegistration; #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -pub struct TemplateRegistration { +pub struct TemplateRegistrationEntry { pub registration_data: CodeTemplateRegistration, - pub height: u64, + pub output_hash: FixedHash, + pub block_height: u64, + pub block_hash: FixedHash, } diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index 917f1dd3c4..e12ee86d36 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -67,7 +67,7 @@ use crate::{ MmrTree, PrunedOutput, Reorg, - TemplateRegistration, + TemplateRegistrationEntry, UtxoMinedInfo, Validators, }, @@ -427,8 +427,15 @@ impl BlockchainBackend for TempDatabase { self.db.as_ref().unwrap().get_shard_key(height, public_key) } - fn fetch_template_registrations(&self, from_height: u64) -> Result, ChainStorageError> { - self.db.as_ref().unwrap().fetch_template_registrations(from_height) + fn fetch_template_registrations( + &self, + start_height: u64, + end_height: u64, + ) -> Result, ChainStorageError> { + self.db + .as_ref() + .unwrap() + .fetch_template_registrations(start_height, end_height) } } diff --git a/base_layer/core/src/transactions/transaction_components/output_type.rs b/base_layer/core/src/transactions/transaction_components/output_type.rs index 9db69b1ef2..1927d8c3e8 100644 --- a/base_layer/core/src/transactions/transaction_components/output_type.rs +++ b/base_layer/core/src/transactions/transaction_components/output_type.rs @@ -71,6 +71,13 @@ impl OutputType { OutputType::CodeTemplateRegistration, ] } + + pub fn is_sidechain_type(&self) -> bool { + matches!( + self, + OutputType::ValidatorNodeRegistration | OutputType::CodeTemplateRegistration + ) + } } impl Default for OutputType { diff --git a/base_layer/core/src/transactions/transaction_components/side_chain/template_registration.rs b/base_layer/core/src/transactions/transaction_components/side_chain/template_registration.rs index 8f3290cd1e..83f67156f6 100644 --- a/base_layer/core/src/transactions/transaction_components/side_chain/template_registration.rs +++ b/base_layer/core/src/transactions/transaction_components/side_chain/template_registration.rs @@ -23,19 +23,15 @@ use std::io::{Error, ErrorKind, Read, Write}; use serde::{Deserialize, Serialize}; -use tari_common_types::types::{FixedHash, PublicKey, Signature}; - -use crate::{ - consensus::{ - read_byte, - ConsensusDecoding, - ConsensusEncoding, - ConsensusEncodingSized, - DomainSeparatedConsensusHasher, - MaxSizeBytes, - MaxSizeString, - }, - transactions::TransactionHashDomain, +use tari_common_types::types::{PublicKey, Signature}; + +use crate::consensus::{ + read_byte, + ConsensusDecoding, + ConsensusEncoding, + ConsensusEncodingSized, + MaxSizeBytes, + MaxSizeString, }; #[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize)] @@ -50,22 +46,6 @@ pub struct CodeTemplateRegistration { pub binary_url: MaxSizeString<255>, } -impl CodeTemplateRegistration { - pub fn hash(&self) -> FixedHash { - DomainSeparatedConsensusHasher::::new("template_registration") - .chain(&self.author_public_key) - .chain(&self.author_signature) - .chain(&self.template_name) - .chain(&self.template_version) - .chain(&self.template_type) - .chain(&self.build_info) - .chain(&self.binary_sha) - .chain(&self.binary_url) - .finalize() - .into() - } -} - impl ConsensusEncoding for CodeTemplateRegistration { fn consensus_encode(&self, writer: &mut W) -> Result<(), Error> { self.author_public_key.consensus_encode(writer)?; From 2dbceaa1d22e22f8ac18a07695b5468bcca0cdf4 Mon Sep 17 00:00:00 2001 From: Miguel Naveira <47919901+mrnaveira@users.noreply.github.com> Date: Wed, 19 Oct 2022 02:30:09 -0600 Subject: [PATCH 15/21] refactor: split tari_base_node and tari_console_wallet into a lib component (#4818) **Still a work in progress** Description --- * Splitted `tari_base_node` and `tari_console_wallet` into `lib.rs` and `main.rs` * TBD: split `tari_miner` Motivation and Context --- To enable the new cucumber-rs integration tests on [tari-dan#103](https://github.com/tari-project/tari-dan/pull/103), specifically to spawn base nodes and wallets, it's desirable to be able to treat them as native Rust functions exported in a `lib.rs` module. How Has This Been Tested? --- --- .../tari_app_utilities/src/common_cli_args.rs | 4 +- applications/tari_base_node/src/builder.rs | 2 +- applications/tari_base_node/src/cli.rs | 2 +- applications/tari_base_node/src/lib.rs | 207 ++++++++++++++ applications/tari_base_node/src/main.rs | 180 +----------- applications/tari_base_node/src/recovery.rs | 2 +- applications/tari_console_wallet/src/cli.rs | 2 +- .../tari_console_wallet/src/config.rs | 3 + .../tari_console_wallet/src/init/mod.rs | 2 + applications/tari_console_wallet/src/lib.rs | 263 ++++++++++++++++++ applications/tari_console_wallet/src/main.rs | 190 +------------ .../tari_console_wallet/src/recovery.rs | 2 + .../src/utils/crossterm_events.rs | 6 + .../tari_console_wallet/src/wallet_modes.rs | 2 + 14 files changed, 508 insertions(+), 359 deletions(-) create mode 100644 applications/tari_base_node/src/lib.rs create mode 100644 applications/tari_console_wallet/src/lib.rs diff --git a/applications/tari_app_utilities/src/common_cli_args.rs b/applications/tari_app_utilities/src/common_cli_args.rs index 6da5808d48..cc9e2d1508 100644 --- a/applications/tari_app_utilities/src/common_cli_args.rs +++ b/applications/tari_app_utilities/src/common_cli_args.rs @@ -35,10 +35,10 @@ pub struct CommonCliArgs { default_value_t= defaults::base_path(), env = "TARI_BASE_DIR" )] - base_path: String, + pub base_path: String, /// A path to the configuration file to use (config.toml) #[clap(short, long, default_value_t= defaults::config())] - config: String, + pub config: String, /// The path to the log configuration file #[clap(short, long, alias = "log_config")] pub log_config: Option, diff --git a/applications/tari_base_node/src/builder.rs b/applications/tari_base_node/src/builder.rs index a9326055e2..fe2f46bcbb 100644 --- a/applications/tari_base_node/src/builder.rs +++ b/applications/tari_base_node/src/builder.rs @@ -53,7 +53,7 @@ use tari_service_framework::ServiceHandles; use tari_shutdown::ShutdownSignal; use tokio::sync::watch; -use crate::{bootstrap::BaseNodeBootstrapper, config::DatabaseType, ApplicationConfig}; +use crate::{bootstrap::BaseNodeBootstrapper, ApplicationConfig, DatabaseType}; const LOG_TARGET: &str = "c::bn::initialization"; diff --git a/applications/tari_base_node/src/cli.rs b/applications/tari_base_node/src/cli.rs index 337bcac116..615fd96462 100644 --- a/applications/tari_base_node/src/cli.rs +++ b/applications/tari_base_node/src/cli.rs @@ -28,7 +28,7 @@ use tari_common::configuration::{ConfigOverrideProvider, Network}; #[clap(author, version, about, long_about = None)] #[clap(propagate_version = true)] #[allow(clippy::struct_excessive_bools)] -pub(crate) struct Cli { +pub struct Cli { #[clap(flatten)] pub common: CommonCliArgs, /// Create a default configuration file if it doesn't exist diff --git a/applications/tari_base_node/src/lib.rs b/applications/tari_base_node/src/lib.rs new file mode 100644 index 0000000000..5875b90651 --- /dev/null +++ b/applications/tari_base_node/src/lib.rs @@ -0,0 +1,207 @@ +// Copyright 2022. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#[macro_use] +mod table; + +mod bootstrap; +mod builder; +pub mod cli; +mod commands; +pub mod config; +mod grpc; +#[cfg(feature = "metrics")] +mod metrics; +mod recovery; +mod utils; + +use std::{env, process, sync::Arc}; + +use commands::{cli_loop::CliLoop, command::CommandContext}; +use futures::FutureExt; +use log::*; +use opentelemetry::{self, global, KeyValue}; +use tari_app_utilities::{common_cli_args::CommonCliArgs, consts}; +use tari_common::{ + configuration::bootstrap::{grpc_default_port, ApplicationType}, + exit_codes::{ExitCode, ExitError}, +}; +use tari_comms::{multiaddr::Multiaddr, utils::multiaddr::multiaddr_to_socketaddr, NodeIdentity}; +use tari_shutdown::{Shutdown, ShutdownSignal}; +use tokio::task; +use tonic::transport::Server; +use tracing_subscriber::{layer::SubscriberExt, Registry}; + +use crate::cli::Cli; +pub use crate::{ + config::{ApplicationConfig, BaseNodeConfig, DatabaseType}, + metrics::MetricsConfig, +}; + +const LOG_TARGET: &str = "tari::base_node::app"; + +pub async fn run_base_node(node_identity: Arc, config: Arc) -> Result<(), ExitError> { + let shutdown = Shutdown::new(); + + let data_dir = config.base_node.data_dir.clone(); + let data_dir_str = data_dir.clone().into_os_string().into_string().unwrap(); + + let mut config_path = data_dir.clone(); + config_path.push("config.toml"); + + let cli = Cli { + common: CommonCliArgs { + base_path: data_dir_str, + config: config_path.into_os_string().into_string().unwrap(), + log_config: None, + log_level: None, + config_property_overrides: vec![], + }, + init: true, + tracing_enabled: false, + rebuild_db: false, + non_interactive_mode: true, + watch: None, + network: None, + }; + + run_base_node_with_cli(node_identity, config, cli, shutdown).await +} + +/// Sets up the base node and runs the cli_loop +pub async fn run_base_node_with_cli( + node_identity: Arc, + config: Arc, + cli: Cli, + shutdown: Shutdown, +) -> Result<(), ExitError> { + if cli.tracing_enabled { + enable_tracing(); + } + + #[cfg(feature = "metrics")] + { + metrics::install( + ApplicationType::BaseNode, + &node_identity, + &config.metrics, + shutdown.to_signal(), + ); + } + + log_mdc::insert("node-public-key", node_identity.public_key().to_string()); + log_mdc::insert("node-id", node_identity.node_id().to_string()); + + if cli.rebuild_db { + info!(target: LOG_TARGET, "Node is in recovery mode, entering recovery"); + recovery::initiate_recover_db(&config.base_node)?; + recovery::run_recovery(&config.base_node) + .await + .map_err(|e| ExitError::new(ExitCode::RecoveryError, e))?; + return Ok(()); + }; + + // Build, node, build! + let ctx = builder::configure_and_initialize_node(config.clone(), node_identity, shutdown.to_signal()).await?; + + if config.base_node.grpc_enabled { + let grpc_address = config.base_node.grpc_address.clone().unwrap_or_else(|| { + let port = grpc_default_port(ApplicationType::BaseNode, config.base_node.network); + format!("/ip4/127.0.0.1/tcp/{}", port).parse().unwrap() + }); + // Go, GRPC, go go + let grpc = grpc::base_node_grpc_server::BaseNodeGrpcServer::from_base_node_context(&ctx); + task::spawn(run_grpc(grpc, grpc_address, shutdown.to_signal())); + } + + // Run, node, run! + let context = CommandContext::new(&ctx, shutdown); + let main_loop = CliLoop::new(context, cli.watch, cli.non_interactive_mode); + if cli.non_interactive_mode { + println!("Node started in non-interactive mode (pid = {})", process::id()); + } else { + info!( + target: LOG_TARGET, + "Node has been successfully configured and initialized. Starting CLI loop." + ); + } + if !config.base_node.force_sync_peers.is_empty() { + warn!( + target: LOG_TARGET, + "Force Sync Peers have been set! This node will only sync to the nodes in this set." + ); + } + + info!(target: LOG_TARGET, "Tari base node has STARTED"); + main_loop.cli_loop().await; + + ctx.wait_for_shutdown().await; + + println!("Goodbye!"); + Ok(()) +} + +fn enable_tracing() { + // To run: + // docker run -d -p6831:6831/udp -p6832:6832/udp -p16686:16686 -p14268:14268 jaegertracing/all-in-one:latest + // To view the UI after starting the container (default): + // http://localhost:16686 + global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); + let tracer = opentelemetry_jaeger::new_pipeline() + .with_service_name("tari::base_node") + .with_tags(vec![ + KeyValue::new("pid", process::id().to_string()), + KeyValue::new( + "current_exe", + env::current_exe().unwrap().to_str().unwrap_or_default().to_owned(), + ), + KeyValue::new("version", consts::APP_VERSION), + ]) + .install_batch(opentelemetry::runtime::Tokio) + .unwrap(); + let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); + let subscriber = Registry::default().with(telemetry); + tracing::subscriber::set_global_default(subscriber) + .expect("Tracing could not be set. Try running without `--tracing-enabled`"); +} + +/// Runs the gRPC server +async fn run_grpc( + grpc: grpc::base_node_grpc_server::BaseNodeGrpcServer, + grpc_address: Multiaddr, + interrupt_signal: ShutdownSignal, +) -> Result<(), anyhow::Error> { + info!(target: LOG_TARGET, "Starting GRPC on {}", grpc_address); + + let grpc_address = multiaddr_to_socketaddr(&grpc_address)?; + Server::builder() + .add_service(tari_app_grpc::tari_rpc::base_node_server::BaseNodeServer::new(grpc)) + .serve_with_shutdown(grpc_address, interrupt_signal.map(|_| ())) + .await + .map_err(|err| { + error!(target: LOG_TARGET, "GRPC encountered an error: {:?}", err); + err + })?; + + info!(target: LOG_TARGET, "Stopping GRPC"); + Ok(()) +} diff --git a/applications/tari_base_node/src/main.rs b/applications/tari_base_node/src/main.rs index a9b0e399e3..84043c6ed3 100644 --- a/applications/tari_base_node/src/main.rs +++ b/applications/tari_base_node/src/main.rs @@ -53,15 +53,15 @@ /// /// `help` - Displays a list of commands /// `get-balance` - Displays the balance of the wallet (available, pending incoming, pending outgoing) -/// `send-tari` - Sends Tari, the amount needs to be specified, followed by the destination (public key or emoji id) and -/// an optional message `get-chain-metadata` - Lists information about the blockchain of this Base Node +/// `send-tari` - Sends Tari, the amount needs to be specified, followed by the destination (public key or emoji +/// id) and an optional message `get-chain-metadata` - Lists information about the blockchain of this Base Node /// `list-peers` - Lists information about peers known by this base node /// `ban-peer` - Bans a peer /// `unban-peer` - Removes a ban for a peer /// `list-connections` - Lists active connections to this Base Node -/// `list-headers` - Lists header information. Either the first header height and the last header height needs to be -/// specified, or the amount of headers from the top `check-db` - Checks the blockchain database for missing blocks and -/// headers `calc-timing` - Calculates the time average time taken to mine a given range of blocks +/// `list-headers` - Lists header information. Either the first header height and the last header height needs to +/// be specified, or the amount of headers from the top `check-db` - Checks the blockchain database for missing +/// blocks and headers `calc-timing` - Calculates the time average time taken to mine a given range of blocks /// `discover-peer` - Attempts to discover a peer on the network, a public key or emoji id needs to be specified /// `get-block` - Retrieves a block, the height of the block needs to be specified /// `get-mempool-stats` - Displays information about the mempool @@ -69,53 +69,18 @@ /// `whoami` - Displays identity information about this Base Node and it's wallet /// `quit` - Exits the Base Node /// `exit` - Same as quit - -/// Used to display tabulated data -#[macro_use] -mod table; - -mod bootstrap; -mod builder; -mod cli; -mod commands; -mod config; -mod grpc; -#[cfg(feature = "metrics")] -mod metrics; -mod recovery; -mod utils; - -use std::{env, process, str::FromStr, sync::Arc}; +use std::{process, str::FromStr, sync::Arc}; use clap::Parser; -use commands::{cli_loop::CliLoop, command::CommandContext}; -use futures::FutureExt; use log::*; -use opentelemetry::{self, global, KeyValue}; -use tari_app_utilities::{consts, identity_management::setup_node_identity, utilities::setup_runtime}; -use tari_common::{ - configuration::{ - bootstrap::{grpc_default_port, ApplicationType}, - Network, - }, - exit_codes::{ExitCode, ExitError}, - initialize_logging, - load_configuration, -}; -use tari_comms::{ - multiaddr::Multiaddr, - peer_manager::PeerFeatures, - utils::multiaddr::multiaddr_to_socketaddr, - NodeIdentity, -}; +use opentelemetry::{self, global}; +use tari_app_utilities::{identity_management::setup_node_identity, utilities::setup_runtime}; +use tari_base_node::{cli::Cli, run_base_node_with_cli, ApplicationConfig}; +use tari_common::{configuration::Network, exit_codes::ExitError, initialize_logging, load_configuration}; +use tari_comms::peer_manager::PeerFeatures; #[cfg(all(unix, feature = "libtor"))] use tari_libtor::tor::Tor; -use tari_shutdown::{Shutdown, ShutdownSignal}; -use tokio::task; -use tonic::transport::Server; -use tracing_subscriber::{layer::SubscriberExt, Registry}; - -use crate::{cli::Cli, config::ApplicationConfig}; +use tari_shutdown::Shutdown; const LOG_TARGET: &str = "tari::base_node::app"; @@ -187,129 +152,10 @@ fn main_inner() -> Result<(), ExitError> { } // Run the base node - runtime.block_on(run_node(node_identity, Arc::new(config), cli, shutdown))?; + runtime.block_on(run_base_node_with_cli(node_identity, Arc::new(config), cli, shutdown))?; // Shutdown and send any traces global::shutdown_tracer_provider(); Ok(()) } - -/// Sets up the base node and runs the cli_loop -async fn run_node( - node_identity: Arc, - config: Arc, - cli: Cli, - shutdown: Shutdown, -) -> Result<(), ExitError> { - if cli.tracing_enabled { - enable_tracing(); - } - - #[cfg(feature = "metrics")] - { - metrics::install( - ApplicationType::BaseNode, - &node_identity, - &config.metrics, - shutdown.to_signal(), - ); - } - - log_mdc::insert("node-public-key", node_identity.public_key().to_string()); - log_mdc::insert("node-id", node_identity.node_id().to_string()); - - if cli.rebuild_db { - info!(target: LOG_TARGET, "Node is in recovery mode, entering recovery"); - recovery::initiate_recover_db(&config.base_node)?; - recovery::run_recovery(&config.base_node) - .await - .map_err(|e| ExitError::new(ExitCode::RecoveryError, e))?; - return Ok(()); - }; - - // Build, node, build! - let ctx = builder::configure_and_initialize_node(config.clone(), node_identity, shutdown.to_signal()).await?; - - if config.base_node.grpc_enabled { - let grpc_address = config.base_node.grpc_address.clone().unwrap_or_else(|| { - let port = grpc_default_port(ApplicationType::BaseNode, config.base_node.network); - format!("/ip4/127.0.0.1/tcp/{}", port).parse().unwrap() - }); - // Go, GRPC, go go - let grpc = grpc::base_node_grpc_server::BaseNodeGrpcServer::from_base_node_context(&ctx); - task::spawn(run_grpc(grpc, grpc_address, shutdown.to_signal())); - } - - // Run, node, run! - let context = CommandContext::new(&ctx, shutdown); - let main_loop = CliLoop::new(context, cli.watch, cli.non_interactive_mode); - if cli.non_interactive_mode { - println!("Node started in non-interactive mode (pid = {})", process::id()); - } else { - info!( - target: LOG_TARGET, - "Node has been successfully configured and initialized. Starting CLI loop." - ); - } - if !config.base_node.force_sync_peers.is_empty() { - warn!( - target: LOG_TARGET, - "Force Sync Peers have been set! This node will only sync to the nodes in this set." - ); - } - - info!(target: LOG_TARGET, "Tari base node has STARTED"); - main_loop.cli_loop().await; - - ctx.wait_for_shutdown().await; - - println!("Goodbye!"); - Ok(()) -} - -fn enable_tracing() { - // To run: - // docker run -d -p6831:6831/udp -p6832:6832/udp -p16686:16686 -p14268:14268 jaegertracing/all-in-one:latest - // To view the UI after starting the container (default): - // http://localhost:16686 - global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); - let tracer = opentelemetry_jaeger::new_pipeline() - .with_service_name("tari::base_node") - .with_tags(vec![ - KeyValue::new("pid", process::id().to_string()), - KeyValue::new( - "current_exe", - env::current_exe().unwrap().to_str().unwrap_or_default().to_owned(), - ), - KeyValue::new("version", consts::APP_VERSION), - ]) - .install_batch(opentelemetry::runtime::Tokio) - .unwrap(); - let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); - let subscriber = Registry::default().with(telemetry); - tracing::subscriber::set_global_default(subscriber) - .expect("Tracing could not be set. Try running without `--tracing-enabled`"); -} - -/// Runs the gRPC server -async fn run_grpc( - grpc: grpc::base_node_grpc_server::BaseNodeGrpcServer, - grpc_address: Multiaddr, - interrupt_signal: ShutdownSignal, -) -> Result<(), anyhow::Error> { - info!(target: LOG_TARGET, "Starting GRPC on {}", grpc_address); - - let grpc_address = multiaddr_to_socketaddr(&grpc_address)?; - Server::builder() - .add_service(tari_app_grpc::tari_rpc::base_node_server::BaseNodeServer::new(grpc)) - .serve_with_shutdown(grpc_address, interrupt_signal.map(|_| ())) - .await - .map_err(|err| { - error!(target: LOG_TARGET, "GRPC encountered an error: {:?}", err); - err - })?; - - info!(target: LOG_TARGET, "Stopping GRPC"); - Ok(()) -} diff --git a/applications/tari_base_node/src/recovery.rs b/applications/tari_base_node/src/recovery.rs index fad7ebf05c..7563635760 100644 --- a/applications/tari_base_node/src/recovery.rs +++ b/applications/tari_base_node/src/recovery.rs @@ -55,7 +55,7 @@ use tari_core::{ }, }; -use crate::config::{BaseNodeConfig, DatabaseType}; +use crate::{BaseNodeConfig, DatabaseType}; pub const LOG_TARGET: &str = "base_node::app"; diff --git a/applications/tari_console_wallet/src/cli.rs b/applications/tari_console_wallet/src/cli.rs index 4575673cfa..fc95ff52c1 100644 --- a/applications/tari_console_wallet/src/cli.rs +++ b/applications/tari_console_wallet/src/cli.rs @@ -41,7 +41,7 @@ use tari_utilities::{ #[clap(author, version, about, long_about = None)] #[clap(propagate_version = true)] #[allow(clippy::struct_excessive_bools)] -pub(crate) struct Cli { +pub struct Cli { #[clap(flatten)] pub common: CommonCliArgs, /// Enable tracing diff --git a/applications/tari_console_wallet/src/config.rs b/applications/tari_console_wallet/src/config.rs index 9deff44fec..f1ac8d2a9d 100644 --- a/applications/tari_console_wallet/src/config.rs +++ b/applications/tari_console_wallet/src/config.rs @@ -20,12 +20,15 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#![allow(dead_code, unused)] + use config::Config; use tari_app_utilities::consts; use tari_common::{configuration::CommonConfig, ConfigurationError, DefaultConfigLoader}; use tari_p2p::{auto_update::AutoUpdateConfig, PeerSeedsConfig}; use tari_wallet::WalletConfig; +#[derive(Clone)] pub struct ApplicationConfig { pub common: CommonConfig, pub auto_update: AutoUpdateConfig, diff --git a/applications/tari_console_wallet/src/init/mod.rs b/applications/tari_console_wallet/src/init/mod.rs index f00b788e31..d282bd7134 100644 --- a/applications/tari_console_wallet/src/init/mod.rs +++ b/applications/tari_console_wallet/src/init/mod.rs @@ -20,6 +20,8 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#![allow(dead_code, unused)] + use std::{fs, path::PathBuf, str::FromStr, sync::Arc}; use log::*; diff --git a/applications/tari_console_wallet/src/lib.rs b/applications/tari_console_wallet/src/lib.rs new file mode 100644 index 0000000000..2b7c0ef6e0 --- /dev/null +++ b/applications/tari_console_wallet/src/lib.rs @@ -0,0 +1,263 @@ +// Copyright 2022. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +mod automation; +mod cli; +mod config; +mod grpc; +mod init; +mod notifier; +mod recovery; +mod ui; +mod utils; +mod wallet_modes; + +use std::{env, process}; + +pub use cli::Cli; +use init::{ + boot, + change_password, + get_base_node_peer_config, + init_wallet, + start_wallet, + tari_splash_screen, + WalletBoot, +}; +use log::*; +use opentelemetry::{self, global, KeyValue}; +use recovery::{get_seed_from_seed_words, prompt_private_key_from_seed_words}; +use tari_app_utilities::{common_cli_args::CommonCliArgs, consts}; +use tari_common::{ + configuration::bootstrap::ApplicationType, + exit_codes::{ExitCode, ExitError}, +}; +use tari_key_manager::cipher_seed::CipherSeed; +#[cfg(all(unix, feature = "libtor"))] +use tari_libtor::tor::Tor; +use tari_shutdown::Shutdown; +use tari_utilities::SafePassword; +use tokio::runtime::Runtime; +use tracing_subscriber::{layer::SubscriberExt, Registry}; +use wallet_modes::{command_mode, grpc_mode, recovery_mode, script_mode, tui_mode, WalletMode}; + +pub use crate::config::ApplicationConfig; +use crate::init::wallet_mode; + +pub const LOG_TARGET: &str = "wallet::console_wallet::main"; + +pub fn run_wallet(runtime: Runtime, config: &mut ApplicationConfig) -> Result<(), ExitError> { + let data_dir = config.wallet.data_dir.clone(); + let data_dir_str = data_dir.clone().into_os_string().into_string().unwrap(); + + let mut config_path = data_dir; + config_path.push("config.toml"); + + let cli = Cli { + common: CommonCliArgs { + base_path: data_dir_str, + config: config_path.into_os_string().into_string().unwrap(), + log_config: None, + log_level: None, + config_property_overrides: vec![], + }, + tracing_enabled: false, + password: None, + change_password: false, + recovery: false, + seed_words: None, + seed_words_file_name: None, + non_interactive_mode: true, + input_file: None, + command: None, + wallet_notify: None, + command_mode_auto_exit: false, + network: None, + grpc_enabled: true, + grpc_address: None, + command2: None, + }; + + run_wallet_with_cli(runtime, config, cli) +} + +pub fn run_wallet_with_cli(runtime: Runtime, config: &mut ApplicationConfig, cli: Cli) -> Result<(), ExitError> { + if cli.tracing_enabled { + enable_tracing(); + } + + info!( + target: LOG_TARGET, + "== {} ({}) ==", + ApplicationType::ConsoleWallet, + consts::APP_VERSION + ); + + let password = get_password(config, &cli); + + if password.is_none() { + tari_splash_screen("Console Wallet"); + } + + // check for recovery based on existence of wallet file + let mut boot_mode = boot(&cli, &config.wallet)?; + + let recovery_seed = get_recovery_seed(boot_mode, &cli)?; + + // get command line password if provided + let seed_words_file_name = cli.seed_words_file_name.clone(); + + let mut shutdown = Shutdown::new(); + let shutdown_signal = shutdown.to_signal(); + + if cli.change_password { + info!(target: LOG_TARGET, "Change password requested."); + return runtime.block_on(change_password( + config, + password, + shutdown_signal, + cli.non_interactive_mode, + )); + } + + // Run our own Tor instance, if configured + // This is currently only possible on linux/macos + #[cfg(all(unix, feature = "libtor"))] + if config.wallet.use_libtor && config.wallet.p2p.transport.is_tor() { + let tor = Tor::initialize()?; + tor.update_comms_transport(&mut config.wallet.p2p.transport)?; + runtime.spawn(tor.run(shutdown.to_signal())); + debug!( + target: LOG_TARGET, + "Updated Tor comms transport: {:?}", config.wallet.p2p.transport + ); + } + + // initialize wallet + let mut wallet = runtime.block_on(init_wallet( + config, + password, + seed_words_file_name, + recovery_seed, + shutdown_signal, + cli.non_interactive_mode, + ))?; + + // Check if there is an in progress recovery in the wallet's database + if wallet.is_recovery_in_progress()? { + println!("A Wallet Recovery was found to be in progress, continuing."); + boot_mode = WalletBoot::Recovery; + } + + // get base node/s + let base_node_config = + runtime.block_on(get_base_node_peer_config(config, &mut wallet, cli.non_interactive_mode))?; + let base_node_selected = base_node_config.get_base_node_peer()?; + + let wallet_mode = wallet_mode(&cli, boot_mode); + + // start wallet + runtime.block_on(start_wallet(&mut wallet, &base_node_selected, &wallet_mode))?; + + debug!(target: LOG_TARGET, "Starting app"); + + let handle = runtime.handle().clone(); + + let result = match wallet_mode { + WalletMode::Tui => tui_mode(handle, &config.wallet, &base_node_config, wallet.clone()), + WalletMode::Grpc => grpc_mode(handle, &config.wallet, wallet.clone()), + WalletMode::Script(path) => script_mode(handle, &cli, &config.wallet, &base_node_config, wallet.clone(), path), + WalletMode::Command(command) => command_mode( + handle, + &cli, + &config.wallet, + &base_node_config, + wallet.clone(), + *command, + ), + + WalletMode::RecoveryDaemon | WalletMode::RecoveryTui => { + recovery_mode(handle, &base_node_config, &config.wallet, wallet_mode, wallet.clone()) + }, + WalletMode::Invalid => Err(ExitError::new( + ExitCode::InputError, + "Invalid wallet mode - are you trying too many command options at once?", + )), + }; + + print!("\nShutting down wallet... "); + shutdown.trigger(); + runtime.block_on(wallet.wait_until_shutdown()); + println!("Done."); + + result +} + +fn get_password(config: &ApplicationConfig, cli: &Cli) -> Option { + cli.password + .as_ref() + .or(config.wallet.password.as_ref()) + .map(|s| s.to_owned()) +} + +fn get_recovery_seed(boot_mode: WalletBoot, cli: &Cli) -> Result, ExitError> { + if matches!(boot_mode, WalletBoot::Recovery) { + let seed = if cli.seed_words.is_some() { + let seed_words: Vec = cli + .seed_words + .clone() + .unwrap() + .split_whitespace() + .map(|v| v.to_string()) + .collect(); + get_seed_from_seed_words(seed_words)? + } else { + prompt_private_key_from_seed_words()? + }; + Ok(Some(seed)) + } else { + Ok(None) + } +} + +fn enable_tracing() { + // To run: + // docker run -d -p6831:6831/udp -p6832:6832/udp -p16686:16686 -p14268:14268 jaegertracing/all-in-one:latest + // To view the UI after starting the container (default): + // http://localhost:16686 + global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); + let tracer = opentelemetry_jaeger::new_pipeline() + .with_service_name("tari::console_wallet") + .with_tags(vec![ + KeyValue::new("pid", process::id().to_string()), + KeyValue::new( + "current_exe", + env::current_exe().unwrap().to_str().unwrap_or_default().to_owned(), + ), + ]) + .install_batch(opentelemetry::runtime::Tokio) + .unwrap(); + let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); + let subscriber = Registry::default().with(telemetry); + tracing::subscriber::set_global_default(subscriber) + .expect("Tracing could not be set. Try running without `--tracing-enabled`"); +} diff --git a/applications/tari_console_wallet/src/main.rs b/applications/tari_console_wallet/src/main.rs index cbeebbcf4c..d00e8d11d2 100644 --- a/applications/tari_console_wallet/src/main.rs +++ b/applications/tari_console_wallet/src/main.rs @@ -20,38 +20,17 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{env, process}; +use std::process; use clap::Parser; -use cli::Cli; -use init::{ - boot, - change_password, - get_base_node_peer_config, - init_wallet, - start_wallet, - tari_splash_screen, - WalletBoot, -}; use log::*; -use opentelemetry::{self, global, KeyValue}; -use recovery::prompt_private_key_from_seed_words; -use tari_app_utilities::consts; use tari_common::{ configuration::bootstrap::{grpc_default_port, ApplicationType}, - exit_codes::{ExitCode, ExitError}, + exit_codes::ExitError, initialize_logging, load_configuration, }; -use tari_key_manager::cipher_seed::CipherSeed; -#[cfg(all(unix, feature = "libtor"))] -use tari_libtor::tor::Tor; -use tari_shutdown::Shutdown; -use tari_utilities::SafePassword; -use tracing_subscriber::{layer::SubscriberExt, Registry}; -use wallet_modes::{command_mode, grpc_mode, recovery_mode, script_mode, tui_mode, WalletMode}; - -use crate::{config::ApplicationConfig, init::wallet_mode, recovery::get_seed_from_seed_words}; +use tari_console_wallet::{run_wallet_with_cli, ApplicationConfig, Cli}; pub const LOG_TARGET: &str = "wallet::console_wallet::main"; @@ -108,168 +87,7 @@ fn main_inner() -> Result<(), ExitError> { .build() .expect("Failed to build a runtime!"); - if cli.tracing_enabled { - enable_tracing(); - } - - info!( - target: LOG_TARGET, - "== {} ({}) ==", - ApplicationType::ConsoleWallet, - consts::APP_VERSION - ); - - let password = get_password(&config, &cli); - - if password.is_none() { - tari_splash_screen("Console Wallet"); - } - - // check for recovery based on existence of wallet file - let mut boot_mode = boot(&cli, &config.wallet)?; - - let recovery_seed = get_recovery_seed(boot_mode, &cli)?; - - // get command line password if provided - let seed_words_file_name = cli.seed_words_file_name.clone(); - - let mut shutdown = Shutdown::new(); - let shutdown_signal = shutdown.to_signal(); - - if cli.change_password { - info!(target: LOG_TARGET, "Change password requested."); - return runtime.block_on(change_password( - &config, - password, - shutdown_signal, - cli.non_interactive_mode, - )); - } - - // Run our own Tor instance, if configured - // This is currently only possible on linux/macos - #[cfg(all(unix, feature = "libtor"))] - if config.wallet.use_libtor && config.wallet.p2p.transport.is_tor() { - let tor = Tor::initialize()?; - tor.update_comms_transport(&mut config.wallet.p2p.transport)?; - runtime.spawn(tor.run(shutdown.to_signal())); - debug!( - target: LOG_TARGET, - "Updated Tor comms transport: {:?}", config.wallet.p2p.transport - ); - } - - // initialize wallet - let mut wallet = runtime.block_on(init_wallet( - &config, - password, - seed_words_file_name, - recovery_seed, - shutdown_signal, - cli.non_interactive_mode, - ))?; - - // Check if there is an in progress recovery in the wallet's database - if wallet.is_recovery_in_progress()? { - println!("A Wallet Recovery was found to be in progress, continuing."); - boot_mode = WalletBoot::Recovery; - } - - // get base node/s - let base_node_config = runtime.block_on(get_base_node_peer_config( - &config, - &mut wallet, - cli.non_interactive_mode, - ))?; - let base_node_selected = base_node_config.get_base_node_peer()?; - - let wallet_mode = wallet_mode(&cli, boot_mode); - - // start wallet - runtime.block_on(start_wallet(&mut wallet, &base_node_selected, &wallet_mode))?; - - debug!(target: LOG_TARGET, "Starting app"); - - let handle = runtime.handle().clone(); - - let result = match wallet_mode { - WalletMode::Tui => tui_mode(handle, &config.wallet, &base_node_config, wallet.clone()), - WalletMode::Grpc => grpc_mode(handle, &config.wallet, wallet.clone()), - WalletMode::Script(path) => script_mode(handle, &cli, &config.wallet, &base_node_config, wallet.clone(), path), - WalletMode::Command(command) => command_mode( - handle, - &cli, - &config.wallet, - &base_node_config, - wallet.clone(), - *command, - ), - - WalletMode::RecoveryDaemon | WalletMode::RecoveryTui => { - recovery_mode(handle, &base_node_config, &config.wallet, wallet_mode, wallet.clone()) - }, - WalletMode::Invalid => Err(ExitError::new( - ExitCode::InputError, - "Invalid wallet mode - are you trying too many command options at once?", - )), - }; - - print!("\nShutting down wallet... "); - shutdown.trigger(); - runtime.block_on(wallet.wait_until_shutdown()); - println!("Done."); - - result -} - -fn get_password(config: &ApplicationConfig, cli: &Cli) -> Option { - cli.password - .as_ref() - .or(config.wallet.password.as_ref()) - .map(|s| s.to_owned()) -} - -fn get_recovery_seed(boot_mode: WalletBoot, cli: &Cli) -> Result, ExitError> { - if matches!(boot_mode, WalletBoot::Recovery) { - let seed = if cli.seed_words.is_some() { - let seed_words: Vec = cli - .seed_words - .clone() - .unwrap() - .split_whitespace() - .map(|v| v.to_string()) - .collect(); - get_seed_from_seed_words(seed_words)? - } else { - prompt_private_key_from_seed_words()? - }; - Ok(Some(seed)) - } else { - Ok(None) - } -} - -fn enable_tracing() { - // To run: - // docker run -d -p6831:6831/udp -p6832:6832/udp -p16686:16686 -p14268:14268 jaegertracing/all-in-one:latest - // To view the UI after starting the container (default): - // http://localhost:16686 - global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); - let tracer = opentelemetry_jaeger::new_pipeline() - .with_service_name("tari::console_wallet") - .with_tags(vec![ - KeyValue::new("pid", process::id().to_string()), - KeyValue::new( - "current_exe", - env::current_exe().unwrap().to_str().unwrap_or_default().to_owned(), - ), - ]) - .install_batch(opentelemetry::runtime::Tokio) - .unwrap(); - let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); - let subscriber = Registry::default().with(telemetry); - tracing::subscriber::set_global_default(subscriber) - .expect("Tracing could not be set. Try running without `--tracing-enabled`"); + run_wallet_with_cli(runtime, &mut config, cli) } fn setup_grpc_config(config: &mut ApplicationConfig) { diff --git a/applications/tari_console_wallet/src/recovery.rs b/applications/tari_console_wallet/src/recovery.rs index 6b9a9f3a66..35e2df9ca4 100644 --- a/applications/tari_console_wallet/src/recovery.rs +++ b/applications/tari_console_wallet/src/recovery.rs @@ -20,6 +20,8 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#![allow(dead_code, unused)] + use chrono::offset::Local; use futures::FutureExt; use log::*; diff --git a/applications/tari_console_wallet/src/utils/crossterm_events.rs b/applications/tari_console_wallet/src/utils/crossterm_events.rs index 98308e8fb3..954a9319f8 100644 --- a/applications/tari_console_wallet/src/utils/crossterm_events.rs +++ b/applications/tari_console_wallet/src/utils/crossterm_events.rs @@ -102,6 +102,12 @@ impl CrosstermEvents { } } +impl Default for CrosstermEvents { + fn default() -> Self { + Self::new() + } +} + impl EventStream for CrosstermEvents { fn next(&self) -> Result, mpsc::RecvError> { self.rx.recv() diff --git a/applications/tari_console_wallet/src/wallet_modes.rs b/applications/tari_console_wallet/src/wallet_modes.rs index 9f2d5fe82a..a95f5c4888 100644 --- a/applications/tari_console_wallet/src/wallet_modes.rs +++ b/applications/tari_console_wallet/src/wallet_modes.rs @@ -20,6 +20,8 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#![allow(dead_code, unused)] + use std::{fs, io::Stdout, path::PathBuf}; use clap::Parser; From ce35b656b84d69d2b56f07e3b1e84d0c3279466f Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Wed, 19 Oct 2022 14:05:55 +0200 Subject: [PATCH 16/21] chore: merge development into feature-dan (#4815) * fix: batch rewind operations (#4752) Description --- Split rewind DbTx into smaller pieces. How Has This Been Tested? --- I did rewind on 20000+ (empty) blocks. * fix: fix config.toml bug (#4780) Description --- The base node errored when reading the `block_sync_trigger = 5` setting ``` ExitError { exit_code: ConfigError, details: Some("Invalid value for `base_node`: unknown field `block_sync_trigger`, expected one of `override_from`, `unconfirmed_pool`, `reorg_pool`, `service`") } ``` Motivation and Context --- Reading default config settings should not cause an error How Has This Been Tested? --- System level testing * fix(p2p/liveness): remove fallible unwrap (#4784) Description --- Removed stray unwrap in liveness service Motivation and Context --- Caused a base node to panic in stress test conditions. ``` thread 'tokio-runtime-worker' panicked at 'called `Result::unwrap()` on an `Err` value: DhtOutboundError(RequesterReplyChannelClosed)', base_layer\p2p\src\services\liveness\service.rs:164:71 ``` How Has This Been Tested? --- Tests pass * fix(tari-script): use tari script encoding for execution stack serde de/serialization (#4791) Description --- - Uses tari script encoding (equivalent to consensus encoding) for `ExecutionStack` serde impl - Rename as_bytes to to_bytes as per rust convention. - adds migration to fix execution stack encoding in db Motivation and Context --- Resolves #4790 How Has This Been Tested? --- Added test to alert if breaking changes occur with serde serialization for execution stack. Manual testing in progress * feat: optimize transaction service queries (#4775) Description --- Transaction service sql db queries must handle `DieselError(DatabaseError(__Unknown, "database is locked"))`. This PR attempts to remove situations where that error may occur under highly busy async cirumstances, specifically: - Combine find and update/write type queries into one. - Add sql transactions around complex tasks. _**Note:** Partial resolution for #4731._ Motivation and Context --- See above. How Has This Been Tested? --- - Passed unit tests. - Passed cucumber tests. - ~~**TODO:**~~ System level tests under stress conditions. * feat: move nonce to first in sha hash (#4778) Description --- This moves the nonce to the front of the hashing order when hashing for the sha3 difficulty. This is done so that mining cannot cache part most the header and only load the nonce in. This forces the miner to hash the complete header each time the nonce chances. Motivation and Context --- Fixes: #4767 How Has This Been Tested? --- Unit tests all pass. * fix(dht): remove some invalid saf failure cases (#4787) Description --- - Ignores nanos for `stored_at` field in StoredMessages - Uses direct u32 <-> i32 conversion - Improve error message if attempting to store an expired message - Discard expired messages immediately - Debug log when remote client closes the connection in RPC server Motivation and Context --- - Nano conversion will fail when >= 2_000_000_000, nanos are not important to preserve so we ignore them (set to zero) - u32 to/from i32 conversion does not lose any data as both are 32-bit, only used as i32 in the database - 'The message was not valid for store and forward' occurs if the message has expired, this PR uses a more descriptive error message for this specific case. - Expired messages should be discarded immediately - Early close "errors" on the rpc server simply indicate that the client went away, which is expected and not something that the server controls, and so is logged at debug level How Has This Been Tested? --- Manually, * v0.38.6 * fix(core): only resize db if migration is required (#4792) Description --- Adds conditional to only increase database size if migration is required Motivation and Context --- A new database (cucumber, functional tests) has no inputs and so migration is not required. Ref #4791 How Has This Been Tested? --- * fix(miner): clippy error (#4793) Description --- Removes unused function in miner Motivation and Context --- Clippy How Has This Been Tested? --- No clippy error * test: remove cucumber tests, simplify others (#4794) Description --- * remove auto update tests from cucumber * rename some tests to be prefixed with `test_` * simplified two cucumber tests by removing steps Motivation and Context --- The auto update tests have an external dependency, which makes it hard to test reliably. They were marked as broken, so I rather removed them. There were two steps in the `list_height` and `list_headers` tests that created base nodes. Upon inspection of the logs, these base nodes never synced to the height of 5 and were not checked in the test, so were pretty useless and just slowed the test down How Has This Been Tested? --- npm test * v0.38.7 * feat: add deepsource config * fix(core): periodically commit large transaction in prune_to_height (#4805) * fix(comms/rpc): measures client-side latency to first message received (#4817) * fix(core): increase sync timeouts (#4800) Co-authored-by: Cayle Sharrock * feat: add multisig script that returns aggregate of signed public keys (#4742) Description --- Added an `m-of-n` multisig TariScript that returns the aggregate public key of the signatories if successful and fails otherwise. This is useful if the aggregate public key of the signatories is also the script public key, where signatories would work together to create an aggregate script signature using their individual script private keys. Motivation and Context --- To enhance the practicality of the `m-of-n` multisig TariScript. How Has This Been Tested? --- Unit tests Co-Authored-By: SW van Heerden swvheerden@gmail.com * feat(comms): adds periodic socket-level liveness checks (#4819) Description --- - adds socket-level liveness checks - adds configuration to enable liveness checks (currently enabled by default in base node, disabled in wallet) - update status line to display liveness status Motivation and Context --- Allows us to gain visibility on the base latency of the transport without including overhead of the noise socket and yamux How Has This Been Tested? --- Manually * fix(core): dont request full non-tip block if block is empty (#4802) Description --- - checks for edge-case which prevents an unnecessary full candidate block request when block is empty. Motivation and Context --- A full block request for empty block is not necessary as we already have all the information required to construct the candidate block. This check was missing from the branch where the candidate block is not the next tip block. How Has This Been Tested? --- Co-authored-by: Martin Stefcek <35243812+Cifko@users.noreply.github.com> Co-authored-by: Hansie Odendaal <39146854+hansieodendaal@users.noreply.github.com> Co-authored-by: SW van Heerden Co-authored-by: stringhandler Co-authored-by: CjS77 --- .circleci/config.yml | 3 + .deepsource.toml | 10 + Cargo.lock | 46 +- applications/tari_app_grpc/Cargo.toml | 2 +- .../src/conversions/transaction_input.rs | 4 +- .../src/conversions/transaction_output.rs | 2 +- .../src/conversions/unblinded_output.rs | 4 +- applications/tari_app_utilities/Cargo.toml | 2 +- applications/tari_base_node/Cargo.toml | 2 +- applications/tari_base_node/src/bootstrap.rs | 8 +- .../src/commands/command/add_peer.rs | 5 +- .../src/commands/command/ban_peer.rs | 5 +- .../src/commands/command/dial_peer.rs | 2 +- .../src/commands/command/get_peer.rs | 6 +- .../src/commands/command/list_connections.rs | 6 +- .../src/commands/command/list_peers.rs | 2 +- .../src/commands/command/mod.rs | 12 +- .../commands/command/reset_offline_peers.rs | 3 +- .../src/commands/command/status.rs | 17 +- .../src/commands/command/unban_all_peers.rs | 5 +- .../src/commands/status_line.rs | 6 +- .../src/grpc/base_node_grpc_server.rs | 4 +- applications/tari_console_wallet/Cargo.toml | 2 +- .../tari_merge_mining_proxy/Cargo.toml | 2 +- applications/tari_miner/Cargo.toml | 2 +- applications/tari_miner/src/difficulty.rs | 34 +- base_layer/common_types/Cargo.toml | 2 +- base_layer/core/Cargo.toml | 2 +- .../comms_interface/inbound_handlers.rs | 7 + base_layer/core/src/base_node/sync/config.rs | 4 +- .../src/chain_storage/blockchain_database.rs | 73 +- .../core/src/chain_storage/lmdb_db/lmdb.rs | 48 + .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 131 ++- .../core/src/consensus/consensus_constants.rs | 77 +- .../consensus/consensus_encoding/script.rs | 4 +- base_layer/core/src/proof_of_work/sha3_pow.rs | 19 +- base_layer/core/src/proto/transaction.rs | 8 +- .../transaction_input.rs | 8 +- .../proto/transaction_sender.rs | 2 +- base_layer/core/tests/block_validation.rs | 1 + .../chain_storage_tests/chain_backend.rs | 4 +- .../chain_storage_tests/chain_storage.rs | 80 +- base_layer/key_manager/Cargo.toml | 2 +- base_layer/mmr/Cargo.toml | 2 +- base_layer/p2p/Cargo.toml | 2 +- base_layer/p2p/src/config.rs | 10 +- base_layer/p2p/src/initialization.rs | 3 +- .../p2p/src/services/liveness/service.rs | 2 +- base_layer/service_framework/Cargo.toml | 2 +- base_layer/tari_mining_helper_ffi/Cargo.toml | 2 +- base_layer/wallet/Cargo.toml | 2 +- base_layer/wallet/src/config.rs | 1 + .../storage/sqlite_db/mod.rs | 217 +++-- .../storage/sqlite_db/new_output_sql.rs | 4 +- .../transaction_service/storage/database.rs | 4 +- .../transaction_service/storage/sqlite_db.rs | 891 +++++++++++------- base_layer/wallet/tests/contacts_service.rs | 1 + base_layer/wallet/tests/wallet.rs | 2 + base_layer/wallet_ffi/Cargo.toml | 2 +- base_layer/wallet_ffi/src/lib.rs | 1 + changelog.md | 48 + common/Cargo.toml | 2 +- common/config/presets/c_base_node.toml | 4 +- common/config/presets/d_console_wallet.toml | 2 + common_sqlite/Cargo.toml | 2 +- comms/core/Cargo.toml | 2 +- comms/core/src/builder/comms_node.rs | 13 +- comms/core/src/builder/mod.rs | 6 + comms/core/src/connection_manager/dialer.rs | 49 +- comms/core/src/connection_manager/listener.rs | 41 +- comms/core/src/connection_manager/liveness.rs | 133 ++- comms/core/src/connection_manager/manager.rs | 46 +- comms/core/src/connection_manager/mod.rs | 2 + .../tests/listener_dialer.rs | 6 +- .../core/src/connection_manager/wire_mode.rs | 12 +- comms/core/src/protocol/identity.rs | 8 +- comms/core/src/protocol/rpc/client/mod.rs | 25 +- comms/core/src/protocol/rpc/server/error.rs | 13 +- comms/core/src/protocol/rpc/server/mod.rs | 16 +- comms/core/src/test_utils/transport.rs | 4 +- comms/core/src/tor/control_client/client.rs | 4 +- comms/core/src/transports/dns/mod.rs | 1 + comms/core/src/transports/dns/tor.rs | 4 +- comms/core/src/transports/memory.rs | 16 +- comms/core/src/transports/mod.rs | 4 +- comms/core/src/transports/socks.rs | 18 +- comms/core/src/transports/tcp.rs | 8 +- comms/core/src/transports/tcp_with_tor.rs | 6 +- comms/dht/Cargo.toml | 2 +- comms/dht/src/dht.rs | 16 + comms/dht/src/envelope.rs | 2 +- .../store_forward/database/stored_message.rs | 10 +- comms/dht/src/store_forward/error.rs | 10 +- comms/dht/src/store_forward/message.rs | 7 +- .../dht/src/store_forward/saf_handler/task.rs | 17 +- comms/dht/src/store_forward/store.rs | 8 +- comms/rpc_macros/Cargo.toml | 2 +- infrastructure/derive/Cargo.toml | 2 +- infrastructure/shutdown/Cargo.toml | 2 +- infrastructure/storage/Cargo.toml | 2 +- infrastructure/storage/tests/lmdb.rs | 16 +- infrastructure/tari_script/src/lib.rs | 2 +- infrastructure/tari_script/src/op_codes.rs | 36 +- infrastructure/tari_script/src/script.rs | 80 +- infrastructure/tari_script/src/serde.rs | 107 ++- infrastructure/tari_script/src/stack.rs | 60 +- infrastructure/test_utils/Cargo.toml | 2 +- integration_tests/config/config.toml | 380 -------- integration_tests/cucumber.js | 5 +- .../features/BaseNodeAutoUpdate.feature | 15 - .../features/BaseNodeConnectivity.feature | 6 +- .../features/WalletAutoUpdate.feature | 15 - integration_tests/helpers/config.js | 7 +- integration_tests/package-lock.json | 74 ++ package-lock.json | 2 +- 115 files changed, 1881 insertions(+), 1317 deletions(-) create mode 100644 .deepsource.toml delete mode 100644 integration_tests/config/config.toml delete mode 100644 integration_tests/features/BaseNodeAutoUpdate.feature delete mode 100644 integration_tests/features/WalletAutoUpdate.feature diff --git a/.circleci/config.yml b/.circleci/config.yml index 41bbab02f5..3161fe743a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -31,6 +31,9 @@ commands: - run: name: Build miner command: cargo build --release --bin tari_miner + - run: + name: Build wallet FFI + command: cargo build --release --package tari_wallet_ffi - run: name: Run cucumber scenarios no_output_timeout: 20m diff --git a/.deepsource.toml b/.deepsource.toml new file mode 100644 index 0000000000..7219beb3ba --- /dev/null +++ b/.deepsource.toml @@ -0,0 +1,10 @@ +version = 1 + + +[[analyzers]] +name = "rust" +enabled = true + + [analyzers.meta] + msrv = "stable" + diff --git a/Cargo.lock b/Cargo.lock index 85179a2a51..af22725f8c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4594,7 +4594,7 @@ dependencies = [ [[package]] name = "tari_app_grpc" -version = "0.38.5" +version = "0.38.7" dependencies = [ "argon2 0.4.1", "base64 0.13.0", @@ -4619,7 +4619,7 @@ dependencies = [ [[package]] name = "tari_app_utilities" -version = "0.38.5" +version = "0.38.7" dependencies = [ "clap 3.2.22", "config", @@ -4641,7 +4641,7 @@ dependencies = [ [[package]] name = "tari_base_node" -version = "0.38.5" +version = "0.38.7" dependencies = [ "anyhow", "async-trait", @@ -4742,7 +4742,7 @@ dependencies = [ [[package]] name = "tari_common" -version = "0.38.5" +version = "0.38.7" dependencies = [ "anyhow", "blake2 0.9.2", @@ -4770,7 +4770,7 @@ dependencies = [ [[package]] name = "tari_common_sqlite" -version = "0.38.5" +version = "0.38.7" dependencies = [ "diesel", "log", @@ -4779,7 +4779,7 @@ dependencies = [ [[package]] name = "tari_common_types" -version = "0.38.5" +version = "0.38.7" dependencies = [ "base64 0.13.0", "digest 0.9.0", @@ -4795,7 +4795,7 @@ dependencies = [ [[package]] name = "tari_comms" -version = "0.38.5" +version = "0.38.7" dependencies = [ "anyhow", "async-trait", @@ -4845,7 +4845,7 @@ dependencies = [ [[package]] name = "tari_comms_dht" -version = "0.38.5" +version = "0.38.7" dependencies = [ "anyhow", "bitflags 1.3.2", @@ -4891,7 +4891,7 @@ dependencies = [ [[package]] name = "tari_comms_rpc_macros" -version = "0.38.5" +version = "0.38.7" dependencies = [ "futures 0.3.24", "proc-macro2", @@ -4906,7 +4906,7 @@ dependencies = [ [[package]] name = "tari_console_wallet" -version = "0.38.5" +version = "0.38.7" dependencies = [ "base64 0.13.0", "bitflags 1.3.2", @@ -4956,7 +4956,7 @@ dependencies = [ [[package]] name = "tari_core" -version = "0.38.5" +version = "0.38.7" dependencies = [ "async-trait", "bincode", @@ -5044,7 +5044,7 @@ dependencies = [ [[package]] name = "tari_key_manager" -version = "0.38.5" +version = "0.38.7" dependencies = [ "argon2 0.2.4", "arrayvec 0.7.2", @@ -5091,7 +5091,7 @@ dependencies = [ [[package]] name = "tari_merge_mining_proxy" -version = "0.38.5" +version = "0.38.7" dependencies = [ "anyhow", "bincode", @@ -5143,7 +5143,7 @@ dependencies = [ [[package]] name = "tari_miner" -version = "0.38.5" +version = "0.38.7" dependencies = [ "base64 0.13.0", "bufstream", @@ -5179,7 +5179,7 @@ dependencies = [ [[package]] name = "tari_mining_helper_ffi" -version = "0.38.5" +version = "0.38.7" dependencies = [ "hex", "libc", @@ -5196,7 +5196,7 @@ dependencies = [ [[package]] name = "tari_mmr" -version = "0.38.5" +version = "0.38.7" dependencies = [ "bincode", "blake2 0.9.2", @@ -5215,7 +5215,7 @@ dependencies = [ [[package]] name = "tari_p2p" -version = "0.38.5" +version = "0.38.7" dependencies = [ "anyhow", "bytes 0.5.6", @@ -5272,7 +5272,7 @@ dependencies = [ [[package]] name = "tari_service_framework" -version = "0.38.5" +version = "0.38.7" dependencies = [ "anyhow", "async-trait", @@ -5289,7 +5289,7 @@ dependencies = [ [[package]] name = "tari_shutdown" -version = "0.38.5" +version = "0.38.7" dependencies = [ "futures 0.3.24", "tokio", @@ -5297,7 +5297,7 @@ dependencies = [ [[package]] name = "tari_storage" -version = "0.38.5" +version = "0.38.7" dependencies = [ "bincode", "lmdb-zero", @@ -5311,7 +5311,7 @@ dependencies = [ [[package]] name = "tari_test_utils" -version = "0.38.5" +version = "0.38.7" dependencies = [ "futures 0.3.24", "futures-test", @@ -5338,7 +5338,7 @@ dependencies = [ [[package]] name = "tari_wallet" -version = "0.38.5" +version = "0.38.7" dependencies = [ "argon2 0.2.4", "async-trait", @@ -5389,7 +5389,7 @@ dependencies = [ [[package]] name = "tari_wallet_ffi" -version = "0.38.5" +version = "0.38.7" dependencies = [ "cbindgen 0.24.3", "chrono", diff --git a/applications/tari_app_grpc/Cargo.toml b/applications/tari_app_grpc/Cargo.toml index 002eefa59c..25fbb05cbd 100644 --- a/applications/tari_app_grpc/Cargo.toml +++ b/applications/tari_app_grpc/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "This crate is to provide a single source for all cross application grpc files and conversions to and from tari::core" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/applications/tari_app_grpc/src/conversions/transaction_input.rs b/applications/tari_app_grpc/src/conversions/transaction_input.rs index 0a42d1d61d..a4e346728f 100644 --- a/applications/tari_app_grpc/src/conversions/transaction_input.rs +++ b/applications/tari_app_grpc/src/conversions/transaction_input.rs @@ -119,8 +119,8 @@ impl TryFrom for grpc::TransactionInput { script: input .script() .map_err(|_| "Non-compact Transaction input should contain script".to_string())? - .as_bytes(), - input_data: input.input_data.as_bytes(), + .to_bytes(), + input_data: input.input_data.to_bytes(), script_signature, sender_offset_public_key: input .sender_offset_public_key() diff --git a/applications/tari_app_grpc/src/conversions/transaction_output.rs b/applications/tari_app_grpc/src/conversions/transaction_output.rs index af9afd989c..8a037d8ef6 100644 --- a/applications/tari_app_grpc/src/conversions/transaction_output.rs +++ b/applications/tari_app_grpc/src/conversions/transaction_output.rs @@ -85,7 +85,7 @@ impl From for grpc::TransactionOutput { features: Some(output.features.into()), commitment: Vec::from(output.commitment.as_bytes()), range_proof: Vec::from(output.proof.as_bytes()), - script: output.script.as_bytes(), + script: output.script.to_bytes(), sender_offset_public_key: output.sender_offset_public_key.as_bytes().to_vec(), metadata_signature: Some(grpc::ComSignature { public_nonce_commitment: Vec::from(output.metadata_signature.public_nonce().as_bytes()), diff --git a/applications/tari_app_grpc/src/conversions/unblinded_output.rs b/applications/tari_app_grpc/src/conversions/unblinded_output.rs index d49153c35b..18c8dab78c 100644 --- a/applications/tari_app_grpc/src/conversions/unblinded_output.rs +++ b/applications/tari_app_grpc/src/conversions/unblinded_output.rs @@ -41,8 +41,8 @@ impl From for grpc::UnblindedOutput { value: u64::from(output.value), spending_key: output.spending_key.as_bytes().to_vec(), features: Some(output.features.into()), - script: output.script.as_bytes(), - input_data: output.input_data.as_bytes(), + script: output.script.to_bytes(), + input_data: output.input_data.to_bytes(), script_private_key: output.script_private_key.as_bytes().to_vec(), sender_offset_public_key: output.sender_offset_public_key.as_bytes().to_vec(), metadata_signature: Some(grpc::ComSignature { diff --git a/applications/tari_app_utilities/Cargo.toml b/applications/tari_app_utilities/Cargo.toml index 3ada64c646..4eca3252b4 100644 --- a/applications/tari_app_utilities/Cargo.toml +++ b/applications/tari_app_utilities/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_app_utilities" -version = "0.38.5" +version = "0.38.7" authors = ["The Tari Development Community"] edition = "2018" license = "BSD-3-Clause" diff --git a/applications/tari_base_node/Cargo.toml b/applications/tari_base_node/Cargo.toml index 2ff991ef58..cd717a4cc0 100644 --- a/applications/tari_base_node/Cargo.toml +++ b/applications/tari_base_node/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "The tari full base node implementation" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/applications/tari_base_node/src/bootstrap.rs b/applications/tari_base_node/src/bootstrap.rs index 97d1c24643..d2975aaa22 100644 --- a/applications/tari_base_node/src/bootstrap.rs +++ b/applications/tari_base_node/src/bootstrap.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{cmp, str::FromStr, sync::Arc}; +use std::{cmp, str::FromStr, sync::Arc, time::Duration}; use log::*; use tari_app_utilities::{consts, identity_management, identity_management::load_from_json}; @@ -106,6 +106,12 @@ where B: BlockchainBackend + 'static .map_err(|e| ExitError::new(ExitCode::ConfigError, e))?; p2p_config.transport.tor.identity = tor_identity; + // TODO: This should probably be disabled in future and have it optionally set/unset in the config - this check + // does allow MITM/ISP/tor router to connect this node's IP to a destination IP/onion address. + // Specifically, "pingpong" text is periodically sent on an unencrypted socket allowing anyone observing + // the traffic to recognise the sending IP address as almost certainly a tari node. + p2p_config.listener_liveness_check_interval = Some(Duration::from_secs(15)); + let mut handles = StackBuilder::new(self.interrupt_signal) .add_initializer(P2pInitializer::new( p2p_config.clone(), diff --git a/applications/tari_base_node/src/commands/command/add_peer.rs b/applications/tari_base_node/src/commands/command/add_peer.rs index 50cf190716..f5c2a74bd7 100644 --- a/applications/tari_base_node/src/commands/command/add_peer.rs +++ b/applications/tari_base_node/src/commands/command/add_peer.rs @@ -44,7 +44,8 @@ pub struct ArgsAddPeer { impl HandleCommand for CommandContext { async fn handle_command(&mut self, args: ArgsAddPeer) -> Result<(), Error> { let public_key = args.public_key.into(); - if self.peer_manager.exists(&public_key).await { + let peer_manager = self.comms.peer_manager(); + if peer_manager.exists(&public_key).await { return Err(anyhow!("Peer with public key '{}' already exists", public_key)); } let node_id = NodeId::from_public_key(&public_key); @@ -57,7 +58,7 @@ impl HandleCommand for CommandContext { vec![], String::new(), ); - self.peer_manager.add_peer(peer).await?; + peer_manager.add_peer(peer).await?; println!("Peer with node id '{}'was added to the base node.", node_id); Ok(()) } diff --git a/applications/tari_base_node/src/commands/command/ban_peer.rs b/applications/tari_base_node/src/commands/command/ban_peer.rs index 7de10c7e33..0b13742740 100644 --- a/applications/tari_base_node/src/commands/command/ban_peer.rs +++ b/applications/tari_base_node/src/commands/command/ban_peer.rs @@ -80,13 +80,14 @@ impl CommandContext { if self.base_node_identity.node_id() == &node_id { Err(ArgsError::BanSelf.into()) } else if must_ban { - self.connectivity + self.comms + .connectivity() .ban_peer_until(node_id.clone(), duration, "UI manual ban".to_string()) .await?; println!("Peer was banned in base node."); Ok(()) } else { - self.peer_manager.unban_peer(&node_id).await?; + self.comms.peer_manager().unban_peer(&node_id).await?; println!("Peer ban was removed from base node."); Ok(()) } diff --git a/applications/tari_base_node/src/commands/command/dial_peer.rs b/applications/tari_base_node/src/commands/command/dial_peer.rs index d7dcbd8815..b808c936e3 100644 --- a/applications/tari_base_node/src/commands/command/dial_peer.rs +++ b/applications/tari_base_node/src/commands/command/dial_peer.rs @@ -48,7 +48,7 @@ impl HandleCommand for CommandContext { impl CommandContext { /// Function to process the dial-peer command pub async fn dial_peer(&self, dest_node_id: NodeId) -> Result<(), Error> { - let connectivity = self.connectivity.clone(); + let connectivity = self.comms.connectivity(); task::spawn(async move { let start = Instant::now(); println!("☎️ Dialing peer..."); diff --git a/applications/tari_base_node/src/commands/command/get_peer.rs b/applications/tari_base_node/src/commands/command/get_peer.rs index 91c78d114f..545bfc2748 100644 --- a/applications/tari_base_node/src/commands/command/get_peer.rs +++ b/applications/tari_base_node/src/commands/command/get_peer.rs @@ -63,7 +63,8 @@ enum ArgsError { impl CommandContext { pub async fn get_peer(&self, partial: Vec, original_str: String) -> Result<(), Error> { - let peers = self.peer_manager.find_all_starts_with(&partial).await?; + let peer_manager = self.comms.peer_manager(); + let peers = peer_manager.find_all_starts_with(&partial).await?; let peer = { if let Some(peer) = peers.into_iter().next() { peer @@ -71,8 +72,7 @@ impl CommandContext { let pk = parse_emoji_id_or_public_key(&original_str).ok_or_else(|| ArgsError::NoPeerMatching { original_str: original_str.clone(), })?; - let peer = self - .peer_manager + let peer = peer_manager .find_by_public_key(&pk) .await? .ok_or(ArgsError::NoPeerMatching { original_str })?; diff --git a/applications/tari_base_node/src/commands/command/list_connections.rs b/applications/tari_base_node/src/commands/command/list_connections.rs index dcef31f483..8771123457 100644 --- a/applications/tari_base_node/src/commands/command/list_connections.rs +++ b/applications/tari_base_node/src/commands/command/list_connections.rs @@ -53,9 +53,9 @@ impl CommandContext { "User Agent", "Info", ]); + let peer_manager = self.comms.peer_manager(); for conn in conns { - let peer = self - .peer_manager + let peer = peer_manager .find_by_node_id(conn.peer_node_id()) .await .expect("Unexpected peer database error") @@ -105,7 +105,7 @@ impl CommandContext { impl CommandContext { /// Function to process the list-connections command pub async fn list_connections(&mut self) -> Result<(), Error> { - let conns = self.connectivity.get_active_connections().await?; + let conns = self.comms.connectivity().get_active_connections().await?; let (mut nodes, mut clients) = conns .into_iter() .partition::, _>(|a| a.peer_features().is_node()); diff --git a/applications/tari_base_node/src/commands/command/list_peers.rs b/applications/tari_base_node/src/commands/command/list_peers.rs index 7587b28e3e..bb7ea82cf3 100644 --- a/applications/tari_base_node/src/commands/command/list_peers.rs +++ b/applications/tari_base_node/src/commands/command/list_peers.rs @@ -54,7 +54,7 @@ impl CommandContext { _ => false, }) } - let peers = self.peer_manager.perform_query(query).await?; + let peers = self.comms.peer_manager().perform_query(query).await?; let num_peers = peers.len(); println!(); let mut table = Table::new(); diff --git a/applications/tari_base_node/src/commands/command/mod.rs b/applications/tari_base_node/src/commands/command/mod.rs index b928ff7b0f..e2ca78b1b6 100644 --- a/applications/tari_base_node/src/commands/command/mod.rs +++ b/applications/tari_base_node/src/commands/command/mod.rs @@ -65,9 +65,9 @@ use async_trait::async_trait; use clap::{CommandFactory, FromArgMatches, Parser, Subcommand}; use strum::{EnumVariantNames, VariantNames}; use tari_comms::{ - connectivity::ConnectivityRequester, - peer_manager::{Peer, PeerManager, PeerManagerError, PeerQuery}, + peer_manager::{Peer, PeerManagerError, PeerQuery}, protocol::rpc::RpcServerHandle, + CommsNode, NodeIdentity, }; use tari_comms_dht::{DhtDiscoveryRequester, MetricsCollectorHandle}; @@ -155,8 +155,7 @@ pub struct CommandContext { dht_metrics_collector: MetricsCollectorHandle, rpc_server: RpcServerHandle, base_node_identity: Arc, - peer_manager: Arc, - connectivity: ConnectivityRequester, + comms: CommsNode, liveness: LivenessHandle, node_service: LocalNodeCommsInterface, mempool_service: LocalMempoolService, @@ -176,8 +175,7 @@ impl CommandContext { dht_metrics_collector: ctx.base_node_dht().metrics_collector(), rpc_server: ctx.rpc_server(), base_node_identity: ctx.base_node_identity(), - peer_manager: ctx.base_node_comms().peer_manager(), - connectivity: ctx.base_node_comms().connectivity(), + comms: ctx.base_node_comms().clone(), liveness: ctx.liveness(), node_service: ctx.local_node(), mempool_service: ctx.local_mempool(), @@ -297,7 +295,7 @@ impl HandleCommand for CommandContext { impl CommandContext { async fn fetch_banned_peers(&self) -> Result, PeerManagerError> { - let pm = &self.peer_manager; + let pm = self.comms.peer_manager(); let query = PeerQuery::new().select_where(|p| p.is_banned()); pm.perform_query(query).await } diff --git a/applications/tari_base_node/src/commands/command/reset_offline_peers.rs b/applications/tari_base_node/src/commands/command/reset_offline_peers.rs index 2f780e97a1..b949aba1a4 100644 --- a/applications/tari_base_node/src/commands/command/reset_offline_peers.rs +++ b/applications/tari_base_node/src/commands/command/reset_offline_peers.rs @@ -40,7 +40,8 @@ impl HandleCommand for CommandContext { impl CommandContext { pub async fn reset_offline_peers(&self) -> Result<(), Error> { let num_updated = self - .peer_manager + .comms + .peer_manager() .update_each(|mut peer| { if peer.is_offline() { peer.set_offline(false); diff --git a/applications/tari_base_node/src/commands/command/status.rs b/applications/tari_base_node/src/commands/command/status.rs index f499b55059..f46a507f1e 100644 --- a/applications/tari_base_node/src/commands/command/status.rs +++ b/applications/tari_base_node/src/commands/command/status.rs @@ -27,6 +27,7 @@ use async_trait::async_trait; use chrono::{DateTime, NaiveDateTime, Utc}; use clap::Parser; use tari_app_utilities::consts; +use tari_comms::connection_manager::LivenessStatus; use tokio::time; use super::{CommandContext, HandleCommand}; @@ -47,6 +48,7 @@ impl HandleCommand for CommandContext { } impl CommandContext { + #[allow(clippy::too_many_lines)] pub async fn status(&mut self, output: StatusLineOutput) -> Result<(), Error> { let mut full_log = false; if self.last_time_full.elapsed() > Duration::from_secs(120) { @@ -102,7 +104,7 @@ impl CommandContext { status_line.add_field("Mempool", "query timed out"); }; - let conns = self.connectivity.get_active_connections().await?; + let conns = self.comms.connectivity().get_active_connections().await?; let (num_nodes, num_clients) = conns.iter().fold((0usize, 0usize), |(nodes, clients), conn| { if conn.peer_features().is_node() { (nodes + 1, clients) @@ -139,6 +141,19 @@ impl CommandContext { ); } + match self.comms.listening_info().liveness_status() { + LivenessStatus::Disabled => {}, + LivenessStatus::Checking => { + status_line.add("⏳️️"); + }, + LivenessStatus::Unreachable => { + status_line.add("‼️"); + }, + LivenessStatus::Live(latency) => { + status_line.add(format!("⚡️ {:.2?}", latency)); + }, + } + let target = "base_node::app::status"; match output { StatusLineOutput::StdOutAndLog => { diff --git a/applications/tari_base_node/src/commands/command/unban_all_peers.rs b/applications/tari_base_node/src/commands/command/unban_all_peers.rs index fde91d9e91..c3722dd85f 100644 --- a/applications/tari_base_node/src/commands/command/unban_all_peers.rs +++ b/applications/tari_base_node/src/commands/command/unban_all_peers.rs @@ -41,10 +41,11 @@ impl HandleCommand for CommandContext { impl CommandContext { pub async fn unban_all_peers(&self) -> Result<(), Error> { let query = PeerQuery::new().select_where(|p| p.is_banned()); - let peers = self.peer_manager.perform_query(query).await?; + let peer_manager = self.comms.peer_manager(); + let peers = peer_manager.perform_query(query).await?; let num_peers = peers.len(); for peer in peers { - if let Err(err) = self.peer_manager.unban_peer(&peer.node_id).await { + if let Err(err) = peer_manager.unban_peer(&peer.node_id).await { println!("Failed to unban peer: {}", err); } } diff --git a/applications/tari_base_node/src/commands/status_line.rs b/applications/tari_base_node/src/commands/status_line.rs index e2fa549430..188ecc6037 100644 --- a/applications/tari_base_node/src/commands/status_line.rs +++ b/applications/tari_base_node/src/commands/status_line.rs @@ -43,6 +43,10 @@ impl StatusLine { Default::default() } + pub fn add(&mut self, value: T) -> &mut Self { + self.add_field("", value) + } + pub fn add_field(&mut self, name: &'static str, value: T) -> &mut Self { self.fields.push((name, value.to_string())); self @@ -54,7 +58,7 @@ impl Display for StatusLine { write!(f, "{} ", Local::now().format("%H:%M"))?; let s = self.fields.iter().map(|(k, v)| format(k, v)).collect::>(); - write!(f, "{}", s.join(", ")) + write!(f, "{}", s.join(" ")) } } diff --git a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs index e82ca58d9c..0eae5abb8f 100644 --- a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs +++ b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs @@ -1675,9 +1675,9 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { let sidechain_outputs = utxos .into_iter() .filter(|u| u.features.output_type.is_sidechain_type()) - .collect::>(); + .map(TryInto::try_into); - match sidechain_outputs.into_iter().map(TryInto::try_into).collect() { + match sidechain_outputs.collect() { Ok(outputs) => { let resp = tari_rpc::GetSideChainUtxosResponse { block_info: Some(tari_rpc::BlockInfo { diff --git a/applications/tari_console_wallet/Cargo.toml b/applications/tari_console_wallet/Cargo.toml index 8f78a9c42b..6b30325977 100644 --- a/applications/tari_console_wallet/Cargo.toml +++ b/applications/tari_console_wallet/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_console_wallet" -version = "0.38.5" +version = "0.38.7" authors = ["The Tari Development Community"] edition = "2018" license = "BSD-3-Clause" diff --git a/applications/tari_merge_mining_proxy/Cargo.toml b/applications/tari_merge_mining_proxy/Cargo.toml index d0bac48767..6fab0f4765 100644 --- a/applications/tari_merge_mining_proxy/Cargo.toml +++ b/applications/tari_merge_mining_proxy/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "The Tari merge mining proxy for xmrig" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [features] diff --git a/applications/tari_miner/Cargo.toml b/applications/tari_miner/Cargo.toml index f63f4f7291..3dffbc295d 100644 --- a/applications/tari_miner/Cargo.toml +++ b/applications/tari_miner/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "The tari miner implementation" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/applications/tari_miner/src/difficulty.rs b/applications/tari_miner/src/difficulty.rs index d4ef569167..8e283d8348 100644 --- a/applications/tari_miner/src/difficulty.rs +++ b/applications/tari_miner/src/difficulty.rs @@ -22,9 +22,8 @@ use std::convert::TryInto; -use sha3::{Digest, Sha3_256}; use tari_app_grpc::tari_rpc::BlockHeader as grpc_header; -use tari_core::{blocks::BlockHeader, large_ints::U256}; +use tari_core::{blocks::BlockHeader, proof_of_work::sha3_difficulty}; use tari_utilities::epoch_time::EpochTime; use crate::errors::MinerError; @@ -34,7 +33,6 @@ pub type Difficulty = u64; #[derive(Clone)] pub struct BlockHeaderSha3 { pub header: BlockHeader, - hash_merge_mining: Sha3_256, pub hashes: u64, } @@ -43,19 +41,7 @@ impl BlockHeaderSha3 { #[allow(clippy::cast_sign_loss)] pub fn new(header: grpc_header) -> Result { let header: BlockHeader = header.try_into().map_err(MinerError::BlockHeader)?; - - let hash_merge_mining = Sha3_256::new().chain(header.mining_hash()); - - Ok(Self { - hash_merge_mining, - header, - hashes: 0, - }) - } - - #[inline] - fn get_hash_before_nonce(&self) -> Sha3_256 { - self.hash_merge_mining.clone() + Ok(Self { header, hashes: 0 }) } /// This function will update the timestamp of the header, but only if the new timestamp is greater than the current @@ -65,7 +51,6 @@ impl BlockHeaderSha3 { // should only change the timestamp if we move it forward. if timestamp > self.header.timestamp.as_u64() { self.header.timestamp = EpochTime::from(timestamp); - self.hash_merge_mining = Sha3_256::new().chain(self.header.mining_hash()); } } @@ -82,13 +67,7 @@ impl BlockHeaderSha3 { #[inline] pub fn difficulty(&mut self) -> Difficulty { self.hashes = self.hashes.saturating_add(1); - let hash = self - .get_hash_before_nonce() - .chain(self.header.nonce.to_le_bytes()) - .chain(self.header.pow.to_bytes()) - .finalize(); - let hash = Sha3_256::digest(&hash); - big_endian_difficulty(&hash) + sha3_difficulty(&self.header).into() } #[allow(clippy::cast_possible_wrap)] @@ -102,13 +81,6 @@ impl BlockHeaderSha3 { } } -/// This will provide the difficulty of the hash assuming the hash is big_endian -fn big_endian_difficulty(hash: &[u8]) -> Difficulty { - let scalar = U256::from_big_endian(hash); // Big endian so the hash has leading zeroes - let result = U256::MAX / scalar; - result.low_u64() -} - #[cfg(test)] pub mod test { use chrono::{DateTime, NaiveDate, Utc}; diff --git a/base_layer/common_types/Cargo.toml b/base_layer/common_types/Cargo.toml index e22cf81ef7..b19366d2b5 100644 --- a/base_layer/common_types/Cargo.toml +++ b/base_layer/common_types/Cargo.toml @@ -3,7 +3,7 @@ name = "tari_common_types" authors = ["The Tari Development Community"] description = "Tari cryptocurrency common types" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/base_layer/core/Cargo.toml b/base_layer/core/Cargo.toml index d97ccee92c..bd4ddd669b 100644 --- a/base_layer/core/Cargo.toml +++ b/base_layer/core/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [features] diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index f1c1774015..e8f455ab44 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -489,6 +489,13 @@ where B: BlockchainBackend + 'static current_meta.best_block().to_hex(), source_peer, ); + if excess_sigs.is_empty() { + let block = BlockBuilder::new(header.version) + .with_coinbase_utxo(coinbase_output, coinbase_kernel) + .with_header(header.clone()) + .build(); + return Ok(block); + } metrics::compact_block_tx_misses(header.height).set(excess_sigs.len() as i64); let block = self.request_full_block_from_peer(source_peer, block_hash).await?; return Ok(block); diff --git a/base_layer/core/src/base_node/sync/config.rs b/base_layer/core/src/base_node/sync/config.rs index 5d3a331aae..5e11deb94f 100644 --- a/base_layer/core/src/base_node/sync/config.rs +++ b/base_layer/core/src/base_node/sync/config.rs @@ -56,13 +56,13 @@ pub struct BlockchainSyncConfig { impl Default for BlockchainSyncConfig { fn default() -> Self { Self { - initial_max_sync_latency: Duration::from_secs(20), + initial_max_sync_latency: Duration::from_secs(30), max_latency_increase: Duration::from_secs(2), ban_period: Duration::from_secs(30 * 60), short_ban_period: Duration::from_secs(60), forced_sync_peers: Default::default(), validation_concurrency: 6, - rpc_deadline: Duration::from_secs(10), + rpc_deadline: Duration::from_secs(30), } } } diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index ca893c34ca..ba442780d2 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -1693,18 +1693,12 @@ fn check_for_valid_height(db: &T, height: u64) -> Result<( /// Removes blocks from the db from current tip to specified height. /// Returns the blocks removed, ordered from tip to height. -fn rewind_to_height( - db: &mut T, - mut height: u64, -) -> Result>, ChainStorageError> { +fn rewind_to_height(db: &mut T, height: u64) -> Result>, ChainStorageError> { let last_header = db.fetch_last_header()?; - let mut txn = DbTransaction::new(); - // Delete headers let last_header_height = last_header.height; let metadata = db.fetch_chain_metadata()?; - let expected_block_hash = *metadata.best_block(); let last_block_height = metadata.height_of_longest_chain(); // We use the cmp::max value here because we'll only delete headers here and leave remaining headers to be deleted // with the whole block @@ -1727,20 +1721,20 @@ fn rewind_to_height( ); } // We might have more headers than blocks, so we first see if we need to delete the extra headers. - (0..steps_back).for_each(|h| { + for h in 0..steps_back { + let mut txn = DbTransaction::new(); info!( target: LOG_TARGET, "Rewinding headers at height {}", last_header_height - h ); txn.delete_header(last_header_height - h); - }); - + db.write(txn)?; + } // Delete blocks let mut steps_back = last_block_height.saturating_sub(height); // No blocks to remove, no need to update the best block if steps_back == 0 { - db.write(txn)?; return Ok(vec![]); } @@ -1761,22 +1755,45 @@ fn rewind_to_height( effective_pruning_horizon ); steps_back = effective_pruning_horizon; - height = 0; } - for h in 0..steps_back { + let mut txn = DbTransaction::new(); info!(target: LOG_TARGET, "Deleting block {}", last_block_height - h,); let block = fetch_block(db, last_block_height - h, false)?; let block = Arc::new(block.try_into_chain_block()?); txn.delete_block(*block.hash()); txn.delete_header(last_block_height - h); if !prune_past_horizon && !db.contains(&DbKey::OrphanBlock(*block.hash()))? { - // Because we know we will remove blocks we can't recover, this will be a destructive rewind, so we can't - // recover from this apart from resync from another peer. Failure here should not be common as - // this chain has a valid proof of work that has been tested at this point in time. + // Because we know we will remove blocks we can't recover, this will be a destructive rewind, so we + // can't recover from this apart from resync from another peer. Failure here + // should not be common as this chain has a valid proof of work that has been + // tested at this point in time. txn.insert_chained_orphan(block.clone()); } removed_blocks.push(block); + // Set best block to one before, to keep DB consistent. Or if we reached pruned horizon, set best block to 0. + let chain_header = db.fetch_chain_header_by_height(if prune_past_horizon && h + 1 == steps_back { + 0 + } else { + last_block_height - h - 1 + })?; + let metadata = db.fetch_chain_metadata()?; + let expected_block_hash = *metadata.best_block(); + txn.set_best_block( + chain_header.height(), + chain_header.accumulated_data().hash, + chain_header.accumulated_data().total_accumulated_difficulty, + expected_block_hash, + chain_header.timestamp(), + ); + // Update metadata + debug!( + target: LOG_TARGET, + "Updating best block to height (#{}), total accumulated difficulty: {}", + chain_header.height(), + chain_header.accumulated_data().total_accumulated_difficulty + ); + db.write(txn)?; } if prune_past_horizon { @@ -1785,6 +1802,7 @@ fn rewind_to_height( // We don't have these complete blocks, so we don't push them to the channel for further processing such as the // mempool add reorg'ed tx. for h in 0..(last_block_height - steps_back) { + let mut txn = DbTransaction::new(); debug!( target: LOG_TARGET, "Deleting blocks and utxos {}", @@ -1792,27 +1810,10 @@ fn rewind_to_height( ); let header = fetch_header(db, last_block_height - h - steps_back)?; txn.delete_block(header.hash()); + db.write(txn)?; } } - let chain_header = db.fetch_chain_header_by_height(height)?; - // Update metadata - debug!( - target: LOG_TARGET, - "Updating best block to height (#{}), total accumulated difficulty: {}", - chain_header.height(), - chain_header.accumulated_data().total_accumulated_difficulty - ); - - txn.set_best_block( - chain_header.height(), - chain_header.accumulated_data().hash, - chain_header.accumulated_data().total_accumulated_difficulty, - expected_block_hash, - chain_header.timestamp(), - ); - db.write(txn)?; - Ok(removed_blocks) } @@ -2419,6 +2420,10 @@ fn prune_to_height(db: &mut T, target_horizon_height: u64) txn.prune_outputs_at_positions(output_mmr_positions.to_vec()); txn.delete_all_inputs_in_block(*header.hash()); + if txn.operations().len() >= 100 { + txn.set_pruned_height(block_to_prune); + db.write(mem::take(&mut txn))?; + } } txn.set_pruned_height(target_horizon_height); diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs index 75e238b088..a1bc2dcd11 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs @@ -445,3 +445,51 @@ pub fn lmdb_clear(txn: &WriteTransaction<'_>, db: &Database) -> Result( + txn: &WriteTransaction<'_>, + db: &Database, + f: F, +) -> Result<(), ChainStorageError> +where + F: Fn(V) -> Option, + V: DeserializeOwned, + R: Serialize, +{ + let mut access = txn.access(); + let mut cursor = txn.cursor(db).map_err(|e| { + error!(target: LOG_TARGET, "Could not get read cursor from lmdb: {:?}", e); + ChainStorageError::AccessError(e.to_string()) + })?; + let iter = CursorIter::new( + MaybeOwned::Borrowed(&mut cursor), + &access, + |c, a| c.first(a), + Cursor::next::<[u8], [u8]>, + )?; + let items = iter + .map(|r| r.map(|(k, v)| (k.to_vec(), v.to_vec()))) + .collect::, _>>()?; + + for (key, val) in items { + // let (key, val) = row?; + let val = deserialize::(&val)?; + if let Some(ret) = f(val) { + let ret_bytes = serialize(&ret)?; + access.put(db, &key, &ret_bytes, put::Flags::empty()).map_err(|e| { + if let lmdb_zero::Error::Code(code) = &e { + if *code == lmdb_zero::error::MAP_FULL { + return ChainStorageError::DbResizeRequired; + } + } + error!( + target: LOG_TARGET, + "Could not replace value in lmdb transaction: {:?}", e + ); + ChainStorageError::AccessError(e.to_string()) + })?; + } + } + Ok(()) +} diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 7cbb3f1ac3..abd74ceb2f 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -313,6 +313,8 @@ impl LMDBDatabase { consensus_manager, }; + run_migrations(&db)?; + Ok(db) } @@ -2751,6 +2753,7 @@ enum MetadataKey { HorizonData, DeletedBitmap, BestBlockTimestamp, + MigrationVersion, } impl MetadataKey { @@ -2763,14 +2766,15 @@ impl MetadataKey { impl fmt::Display for MetadataKey { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - MetadataKey::ChainHeight => f.write_str("Current chain height"), - MetadataKey::AccumulatedWork => f.write_str("Total accumulated work"), - MetadataKey::PruningHorizon => f.write_str("Pruning horizon"), - MetadataKey::PrunedHeight => f.write_str("Effective pruned height"), - MetadataKey::BestBlock => f.write_str("Chain tip block hash"), - MetadataKey::HorizonData => f.write_str("Database info"), - MetadataKey::DeletedBitmap => f.write_str("Deleted bitmap"), - MetadataKey::BestBlockTimestamp => f.write_str("Chain tip block timestamp"), + MetadataKey::ChainHeight => write!(f, "Current chain height"), + MetadataKey::AccumulatedWork => write!(f, "Total accumulated work"), + MetadataKey::PruningHorizon => write!(f, "Pruning horizon"), + MetadataKey::PrunedHeight => write!(f, "Effective pruned height"), + MetadataKey::BestBlock => write!(f, "Chain tip block hash"), + MetadataKey::HorizonData => write!(f, "Database info"), + MetadataKey::DeletedBitmap => write!(f, "Deleted bitmap"), + MetadataKey::BestBlockTimestamp => write!(f, "Chain tip block timestamp"), + MetadataKey::MigrationVersion => write!(f, "Migration version"), } } } @@ -2786,6 +2790,7 @@ enum MetadataValue { HorizonData(HorizonData), DeletedBitmap(DeletedBitmap), BestBlockTimestamp(u64), + MigrationVersion(u64), } impl fmt::Display for MetadataValue { @@ -2801,6 +2806,7 @@ impl fmt::Display for MetadataValue { write!(f, "Deleted Bitmap ({} indexes)", deleted.bitmap().cardinality()) }, MetadataValue::BestBlockTimestamp(timestamp) => write!(f, "Chain tip block timestamp is {}", timestamp), + MetadataValue::MigrationVersion(n) => write!(f, "Migration version {}", n), } } } @@ -2867,3 +2873,112 @@ impl<'a, 'b> DeletedBitmapModel<'a, WriteTransaction<'b>> { Ok(()) } } + +fn run_migrations(db: &LMDBDatabase) -> Result<(), ChainStorageError> { + const MIGRATION_VERSION: u64 = 1; + let txn = db.read_transaction()?; + + let k = MetadataKey::MigrationVersion; + let val = lmdb_get::<_, MetadataValue>(&*txn, &db.metadata_db, &k.as_u32())?; + let n = match val { + Some(MetadataValue::MigrationVersion(n)) => n, + Some(_) | None => 0, + }; + info!( + target: LOG_TARGET, + "Blockchain database is at v{} (required version: {})", n, MIGRATION_VERSION + ); + drop(txn); + + if n < MIGRATION_VERSION { + tari_script_execution_stack_bug_migration::migrate(db)?; + info!(target: LOG_TARGET, "Migrated database to version {}", MIGRATION_VERSION); + let txn = db.write_transaction()?; + lmdb_replace( + &txn, + &db.metadata_db, + &k.as_u32(), + &MetadataValue::MigrationVersion(MIGRATION_VERSION), + )?; + txn.commit()?; + } + + Ok(()) +} + +// TODO: this is a temporary fix, remove +mod tari_script_execution_stack_bug_migration { + use std::mem; + + use serde::{Deserialize, Serialize}; + use tari_common_types::types::{ComSignature, PublicKey}; + use tari_crypto::ristretto::{pedersen::PedersenCommitment, RistrettoPublicKey, RistrettoSchnorr}; + use tari_script::{ExecutionStack, HashValue, ScalarValue, StackItem}; + + use super::*; + use crate::{ + chain_storage::lmdb_db::lmdb::lmdb_map_inplace, + transactions::transaction_components::{SpentOutput, TransactionInputVersion}, + }; + + pub fn migrate(db: &LMDBDatabase) -> Result<(), ChainStorageError> { + { + let txn = db.read_transaction()?; + // Only perform migration if necessary + if lmdb_len(&txn, &db.inputs_db)? == 0 { + return Ok(()); + } + } + unsafe { + LMDBStore::resize(&db.env, &LMDBConfig::new(0, 1024 * 1024 * 1024, 0))?; + } + let txn = db.write_transaction()?; + lmdb_map_inplace(&txn, &db.inputs_db, |mut v: TransactionInputRowDataV0| { + let mut items = Vec::with_capacity(v.input.input_data.items.len()); + while let Some(item) = v.input.input_data.items.pop() { + if let StackItemV0::Commitment(ref commitment) = item { + let pk = PublicKey::from_bytes(commitment.as_bytes()).unwrap(); + items.push(StackItem::PublicKey(pk)); + } else { + items.push(unsafe { mem::transmute(item) }); + } + } + let mut v = unsafe { mem::transmute::<_, TransactionInputRowData>(v) }; + v.input.input_data = ExecutionStack::new(items); + Some(v) + })?; + txn.commit()?; + Ok(()) + } + + #[derive(Debug, Serialize, Deserialize)] + pub(crate) struct TransactionInputRowDataV0 { + pub input: TransactionInputV0, + pub header_hash: HashOutput, + pub mmr_position: u32, + pub hash: HashOutput, + } + + #[derive(Debug, Serialize, Deserialize)] + pub struct TransactionInputV0 { + version: TransactionInputVersion, + spent_output: SpentOutput, + input_data: ExecutionStackV0, + script_signature: ComSignature, + } + + #[derive(Debug, Serialize, Deserialize)] + struct ExecutionStackV0 { + items: Vec, + } + + #[derive(Debug, Serialize, Deserialize)] + enum StackItemV0 { + Number(i64), + Hash(HashValue), + Scalar(ScalarValue), + Commitment(PedersenCommitment), + PublicKey(RistrettoPublicKey), + Signature(RistrettoSchnorr), + } +} diff --git a/base_layer/core/src/consensus/consensus_constants.rs b/base_layer/core/src/consensus/consensus_constants.rs index e4cbcf6708..2b4badb98a 100644 --- a/base_layer/core/src/consensus/consensus_constants.rs +++ b/base_layer/core/src/consensus/consensus_constants.rs @@ -517,30 +517,54 @@ impl ConsensusConstants { target_time: 200, }); let (input_version_range, output_version_range, kernel_version_range) = version_zero(); - vec![ConsensusConstants { - effective_from_height: 0, - // Todo fix after test - coinbase_lock_height: 6, - blockchain_version: 0, - valid_blockchain_version_range: 0..=0, - future_time_limit: 540, - difficulty_block_window: 90, - max_block_transaction_weight: 127_795, - median_timestamp_count: 11, - emission_initial: 18_462_816_327 * uT, - emission_decay: &ESMERALDA_DECAY_PARAMS, - emission_tail: 800 * T, - max_randomx_seed_height: 3000, - proof_of_work: algos, - faucet_value: (10 * 4000) * T, - transaction_weight: TransactionWeight::v1(), - max_script_byte_size: 2048, - input_version_range, - output_version_range, - kernel_version_range, - permitted_output_types: Self::current_permitted_output_types(), - validator_node_timeout: 50, - }] + vec![ + ConsensusConstants { + effective_from_height: 0, + coinbase_lock_height: 6, + blockchain_version: 0, + valid_blockchain_version_range: 0..=0, + future_time_limit: 540, + difficulty_block_window: 90, + max_block_transaction_weight: 127_795, + median_timestamp_count: 11, + emission_initial: 18_462_816_327 * uT, + emission_decay: &ESMERALDA_DECAY_PARAMS, + emission_tail: 800 * T, + max_randomx_seed_height: 3000, + proof_of_work: algos.clone(), + faucet_value: (10 * 4000) * T, + transaction_weight: TransactionWeight::v1(), + max_script_byte_size: 2048, + input_version_range: input_version_range.clone(), + output_version_range: output_version_range.clone(), + kernel_version_range: kernel_version_range.clone(), + permitted_output_types: Self::current_permitted_output_types(), + validator_node_timeout: 50, + }, + ConsensusConstants { + effective_from_height: 23000, + coinbase_lock_height: 6, + blockchain_version: 1, + valid_blockchain_version_range: 0..=1, + future_time_limit: 540, + difficulty_block_window: 90, + max_block_transaction_weight: 127_795, + median_timestamp_count: 11, + emission_initial: 18_462_816_327 * uT, + emission_decay: &ESMERALDA_DECAY_PARAMS, + emission_tail: 800 * T, + max_randomx_seed_height: 3000, + proof_of_work: algos, + faucet_value: (10 * 4000) * T, + transaction_weight: TransactionWeight::v1(), + max_script_byte_size: 2048, + input_version_range, + output_version_range, + kernel_version_range, + permitted_output_types: Self::current_permitted_output_types(), + validator_node_timeout: 50, + }, + ] } pub fn mainnet() -> Vec { @@ -667,6 +691,11 @@ impl ConsensusConstantsBuilder { self } + pub fn with_blockchain_version(mut self, version: u16) -> Self { + self.consensus.blockchain_version = version; + self + } + pub fn build(self) -> ConsensusConstants { self.consensus } diff --git a/base_layer/core/src/consensus/consensus_encoding/script.rs b/base_layer/core/src/consensus/consensus_encoding/script.rs index 17e8aa7dce..ea11a27c33 100644 --- a/base_layer/core/src/consensus/consensus_encoding/script.rs +++ b/base_layer/core/src/consensus/consensus_encoding/script.rs @@ -31,7 +31,7 @@ use crate::consensus::{ConsensusDecoding, ConsensusEncoding, ConsensusEncodingSi impl ConsensusEncoding for TariScript { fn consensus_encode(&self, writer: &mut W) -> Result<(), io::Error> { - self.as_bytes().consensus_encode(writer) + self.to_bytes().consensus_encode(writer) } } @@ -54,7 +54,7 @@ impl ConsensusDecoding for TariScript { impl ConsensusEncoding for ExecutionStack { fn consensus_encode(&self, writer: &mut W) -> Result<(), io::Error> { - self.as_bytes().consensus_encode(writer) + self.to_bytes().consensus_encode(writer) } } diff --git a/base_layer/core/src/proof_of_work/sha3_pow.rs b/base_layer/core/src/proof_of_work/sha3_pow.rs index 4b79c29fa6..fe56685dd5 100644 --- a/base_layer/core/src/proof_of_work/sha3_pow.rs +++ b/base_layer/core/src/proof_of_work/sha3_pow.rs @@ -37,12 +37,19 @@ pub fn sha3_difficulty(header: &BlockHeader) -> Difficulty { } pub fn sha3_hash(header: &BlockHeader) -> Vec { - Sha3_256::new() - .chain(header.mining_hash()) - .chain(header.nonce.to_le_bytes()) - .chain(header.pow.to_bytes()) - .finalize() - .to_vec() + let sha = Sha3_256::new(); + match header.version { + 0 => sha + .chain(header.mining_hash()) + .chain(header.nonce.to_le_bytes()) + .chain(header.pow.to_bytes()), + _ => sha + .chain(header.nonce.to_le_bytes()) + .chain(header.mining_hash()) + .chain(header.pow.to_bytes()), + } + .finalize() + .to_vec() } fn sha3_difficulty_with_hash(header: &BlockHeader) -> (Difficulty, Vec) { diff --git a/base_layer/core/src/proto/transaction.rs b/base_layer/core/src/proto/transaction.rs index 700e7cb3d5..5a8b2beff3 100644 --- a/base_layer/core/src/proto/transaction.rs +++ b/base_layer/core/src/proto/transaction.rs @@ -168,7 +168,7 @@ impl TryFrom for proto::types::TransactionInput { if input.is_compact() { let output_hash = input.output_hash(); Ok(Self { - input_data: input.input_data.as_bytes(), + input_data: input.input_data.to_bytes(), script_signature: Some(input.script_signature.into()), output_hash: output_hash.to_vec(), ..Default::default() @@ -192,8 +192,8 @@ impl TryFrom for proto::types::TransactionInput { script: input .script() .map_err(|_| "Non-compact Transaction input should contain script".to_string())? - .as_bytes(), - input_data: input.input_data.as_bytes(), + .to_bytes(), + input_data: input.input_data.to_bytes(), script_signature: Some(input.script_signature.clone().into()), sender_offset_public_key: input .sender_offset_public_key() @@ -277,7 +277,7 @@ impl From for proto::types::TransactionOutput { features: Some(output.features.into()), commitment: Some(output.commitment.into()), range_proof: output.proof.to_vec(), - script: output.script.as_bytes(), + script: output.script.to_bytes(), sender_offset_public_key: output.sender_offset_public_key.as_bytes().to_vec(), metadata_signature: Some(output.metadata_signature.into()), covenant: output.covenant.to_bytes(), diff --git a/base_layer/core/src/transactions/transaction_components/transaction_input.rs b/base_layer/core/src/transactions/transaction_components/transaction_input.rs index 9c3e664bdf..2a48ed400a 100644 --- a/base_layer/core/src/transactions/transaction_components/transaction_input.rs +++ b/base_layer/core/src/transactions/transaction_components/transaction_input.rs @@ -270,9 +270,11 @@ impl TransactionInput { SpentOutput::OutputData { ref script, .. } => { match script.execute_with_context(&self.input_data, &context)? { StackItem::PublicKey(pubkey) => Ok(pubkey), - _ => Err(TransactionError::ScriptExecutionError( - "The script executed successfully but it did not leave a public key on the stack".to_string(), - )), + item => Err(TransactionError::ScriptExecutionError(format!( + "The script executed successfully but it did not leave a public key on the stack. Remaining \ + stack item was {:?}", + item + ))), } }, } diff --git a/base_layer/core/src/transactions/transaction_protocol/proto/transaction_sender.rs b/base_layer/core/src/transactions/transaction_protocol/proto/transaction_sender.rs index b3c4e91a34..8820bc18bf 100644 --- a/base_layer/core/src/transactions/transaction_protocol/proto/transaction_sender.rs +++ b/base_layer/core/src/transactions/transaction_protocol/proto/transaction_sender.rs @@ -138,7 +138,7 @@ impl From for proto::SingleRoundSenderData { metadata: Some(sender_data.metadata.into()), message: sender_data.message, features: Some(sender_data.features.into()), - script: sender_data.script.as_bytes(), + script: sender_data.script.to_bytes(), sender_offset_public_key: sender_data.sender_offset_public_key.to_vec(), public_commitment_nonce: sender_data.public_commitment_nonce.to_vec(), covenant: sender_data.covenant.to_consensus_bytes(), diff --git a/base_layer/core/tests/block_validation.rs b/base_layer/core/tests/block_validation.rs index 9659a99b55..01037db622 100644 --- a/base_layer/core/tests/block_validation.rs +++ b/base_layer/core/tests/block_validation.rs @@ -102,6 +102,7 @@ fn test_monero_blocks() { max_difficulty: 1.into(), target_time: 200, }) + .with_blockchain_version(0) .build(); let cm = ConsensusManager::builder(network).add_consensus_constants(cc).build(); let header_validator = HeaderValidator::new(cm.clone()); diff --git a/base_layer/core/tests/chain_storage_tests/chain_backend.rs b/base_layer/core/tests/chain_storage_tests/chain_backend.rs index fcdc74b6d7..822c456eee 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_backend.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_backend.rs @@ -33,7 +33,7 @@ use tari_test_utils::paths::create_temporary_data_path; use crate::helpers::database::create_orphan_block; #[test] -fn lmdb_insert_contains_delete_and_fetch_orphan() { +fn test_lmdb_insert_contains_delete_and_fetch_orphan() { let network = Network::LocalNet; let consensus = ConsensusManagerBuilder::new(network).build(); let mut db = create_test_db(); @@ -63,7 +63,7 @@ fn lmdb_insert_contains_delete_and_fetch_orphan() { } #[test] -fn lmdb_file_lock() { +fn test_lmdb_file_lock() { // Create temporary test folder let temp_path = create_temporary_data_path(); diff --git a/base_layer/core/tests/chain_storage_tests/chain_storage.rs b/base_layer/core/tests/chain_storage_tests/chain_storage.rs index 4fd53d9758..a69c5a71f5 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_storage.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_storage.rs @@ -75,7 +75,7 @@ use crate::helpers::{ }; #[test] -fn fetch_nonexistent_header() { +fn test_fetch_nonexistent_header() { let network = Network::LocalNet; let _consensus_manager = ConsensusManagerBuilder::new(network).build(); let store = create_test_blockchain_db(); @@ -84,7 +84,7 @@ fn fetch_nonexistent_header() { } #[test] -fn insert_and_fetch_header() { +fn test_insert_and_fetch_header() { let network = Network::LocalNet; let _consensus_manager = ConsensusManagerBuilder::new(network).build(); let store = create_test_blockchain_db(); @@ -110,7 +110,7 @@ fn insert_and_fetch_header() { } #[test] -fn insert_and_fetch_orphan() { +fn test_insert_and_fetch_orphan() { let network = Network::LocalNet; let consensus_manager = ConsensusManagerBuilder::new(network).build(); let store = create_test_blockchain_db(); @@ -127,7 +127,7 @@ fn insert_and_fetch_orphan() { } #[test] -fn store_and_retrieve_block() { +fn test_store_and_retrieve_block() { let (db, blocks, _, _) = create_new_blockchain(Network::LocalNet); let hash = blocks[0].hash(); // Check the metadata @@ -144,7 +144,7 @@ fn store_and_retrieve_block() { } #[test] -fn add_multiple_blocks() { +fn test_add_multiple_blocks() { // Create new database with genesis block let network = Network::LocalNet; let consensus_manager = ConsensusManagerBuilder::new(network).build(); @@ -201,7 +201,7 @@ fn test_checkpoints() { #[test] #[allow(clippy::identity_op)] -fn rewind_to_height() { +fn test_rewind_to_height() { let _ = env_logger::builder().is_test(true).try_init(); let network = Network::LocalNet; let (mut db, mut blocks, mut outputs, consensus_manager) = create_new_blockchain(network); @@ -277,7 +277,7 @@ fn test_coverage_chain_storage() { } #[test] -fn rewind_past_horizon_height() { +fn test_rewind_past_horizon_height() { let network = Network::LocalNet; let block0 = genesis_block::get_esmeralda_genesis_block(); let consensus_manager = ConsensusManagerBuilder::new(network).with_block(block0.clone()).build(); @@ -320,7 +320,7 @@ fn rewind_past_horizon_height() { } #[test] -fn handle_tip_reorg() { +fn test_handle_tip_reorg() { // GB --> A1 --> A2(Low PoW) [Main Chain] // \--> B2(Highest PoW) [Forked Chain] // Initially, the main chain is GB->A1->A2. B2 has a higher accumulated PoW and when B2 is added the main chain is @@ -388,7 +388,7 @@ fn handle_tip_reorg() { #[test] #[allow(clippy::identity_op)] #[allow(clippy::too_many_lines)] -fn handle_reorg() { +fn test_handle_reorg() { // GB --> A1 --> A2 --> A3 -----> A4(Low PoW) [Main Chain] // \--> B2 --> B3(?) --> B4(Medium PoW) [Forked Chain 1] // \-----> C4(Highest PoW) [Forked Chain 2] @@ -561,7 +561,7 @@ fn handle_reorg() { #[test] #[allow(clippy::too_many_lines)] -fn reorgs_should_update_orphan_tips() { +fn test_reorgs_should_update_orphan_tips() { // Create a main chain GB -> A1 -> A2 // Create an orphan chain GB -> B1 // Add a block B2 that forces a reorg to B2 @@ -810,7 +810,7 @@ fn reorgs_should_update_orphan_tips() { } #[test] -fn handle_reorg_with_no_removed_blocks() { +fn test_handle_reorg_with_no_removed_blocks() { // GB --> A1 // \--> B2 (?) --> B3) // Initially, the main chain is GB->A1 with orphaned blocks B3. When B2 arrives late and is @@ -883,7 +883,7 @@ fn handle_reorg_with_no_removed_blocks() { } #[test] -fn handle_reorg_failure_recovery() { +fn test_handle_reorg_failure_recovery() { // GB --> A1 --> A2 --> A3 -----> A4(Low PoW) [Main Chain] // \--> B2 --> B3(double spend - rejected by db) [Forked Chain 1] // \--> B2 --> B3'(validation failed) [Forked Chain 1] @@ -1002,7 +1002,7 @@ fn handle_reorg_failure_recovery() { } #[test] -fn store_and_retrieve_blocks() { +fn test_store_and_retrieve_blocks() { let validators = Validators::new( MockValidator::new(true), MockValidator::new(true), @@ -1064,7 +1064,7 @@ fn store_and_retrieve_blocks() { #[test] #[allow(clippy::identity_op)] -fn store_and_retrieve_blocks_from_contents() { +fn test_store_and_retrieve_blocks_from_contents() { let network = Network::LocalNet; let (mut db, mut blocks, mut outputs, consensus_manager) = create_new_blockchain(network); @@ -1102,7 +1102,7 @@ fn store_and_retrieve_blocks_from_contents() { } #[test] -fn restore_metadata_and_pruning_horizon_update() { +fn test_restore_metadata_and_pruning_horizon_update() { // Perform test let validators = Validators::new( MockValidator::new(true), @@ -1177,7 +1177,7 @@ fn restore_metadata_and_pruning_horizon_update() { } static EMISSION: [u64; 2] = [10, 10]; #[test] -fn invalid_block() { +fn test_invalid_block() { let factories = CryptoFactories::default(); let network = Network::LocalNet; let consensus_constants = ConsensusConstantsBuilder::new(network) @@ -1278,7 +1278,7 @@ fn invalid_block() { } #[test] -fn orphan_cleanup_on_block_add() { +fn test_orphan_cleanup_on_block_add() { let network = Network::LocalNet; let consensus_manager = ConsensusManagerBuilder::new(network).build(); let validators = Validators::new( @@ -1345,7 +1345,7 @@ fn orphan_cleanup_on_block_add() { } #[test] -fn horizon_height_orphan_cleanup() { +fn test_horizon_height_orphan_cleanup() { let network = Network::LocalNet; let block0 = genesis_block::get_esmeralda_genesis_block(); let consensus_manager = ConsensusManagerBuilder::new(network).with_block(block0.clone()).build(); @@ -1405,7 +1405,7 @@ fn horizon_height_orphan_cleanup() { #[test] #[allow(clippy::too_many_lines)] -fn orphan_cleanup_on_reorg() { +fn test_orphan_cleanup_on_reorg() { // Create Main Chain let network = Network::LocalNet; let factories = CryptoFactories::default(); @@ -1541,7 +1541,7 @@ fn orphan_cleanup_on_reorg() { } #[test] -fn orphan_cleanup_delete_all_orphans() { +fn test_orphan_cleanup_delete_all_orphans() { let path = create_temporary_data_path(); let network = Network::LocalNet; let validators = Validators::new( @@ -1646,7 +1646,7 @@ fn orphan_cleanup_delete_all_orphans() { } #[test] -fn fails_validation() { +fn test_fails_validation() { let network = Network::LocalNet; let factories = CryptoFactories::default(); let consensus_constants = ConsensusConstantsBuilder::new(network).build(); @@ -1757,8 +1757,7 @@ mod malleability { // This test hightlights that the "version" field is not being included in the input hash // so a consensus change is needed for the input to include it #[test] - #[ignore] - fn version() { + fn test_version() { check_input_malleability(|block: &mut Block| { let input = &mut block.body.inputs_mut()[0]; let mod_version = match input.version { @@ -1770,7 +1769,7 @@ mod malleability { } #[test] - fn spent_output() { + fn test_spent_output() { check_input_malleability(|block: &mut Block| { // to modify the spent output, we will substitue it for a copy of a different output // we will use one of the outputs of the current transaction @@ -1791,7 +1790,7 @@ mod malleability { } #[test] - fn input_data() { + fn test_input_data() { check_input_malleability(|block: &mut Block| { block.body.inputs_mut()[0] .input_data @@ -1801,7 +1800,7 @@ mod malleability { } #[test] - fn script_signature() { + fn test_script_signature() { check_input_malleability(|block: &mut Block| { let input = &mut block.body.inputs_mut()[0]; input.script_signature = ComSignature::default(); @@ -1813,7 +1812,7 @@ mod malleability { use super::*; #[test] - fn version() { + fn test_version() { check_output_malleability(|block: &mut Block| { let output = &mut block.body.outputs_mut()[0]; let mod_version = match output.version { @@ -1825,7 +1824,7 @@ mod malleability { } #[test] - fn features() { + fn test_features() { check_output_malleability(|block: &mut Block| { let output = &mut block.body.outputs_mut()[0]; output.features.maturity += 1; @@ -1833,7 +1832,7 @@ mod malleability { } #[test] - fn commitment() { + fn test_commitment() { check_output_malleability(|block: &mut Block| { let output = &mut block.body.outputs_mut()[0]; let mod_commitment = &output.commitment + &output.commitment; @@ -1842,7 +1841,7 @@ mod malleability { } #[test] - fn proof() { + fn test_proof() { check_witness_malleability(|block: &mut Block| { let output = &mut block.body.outputs_mut()[0]; let mod_proof = RangeProof::from_hex(&(output.proof.to_hex() + "00")).unwrap(); @@ -1851,10 +1850,10 @@ mod malleability { } #[test] - fn script() { + fn test_script() { check_output_malleability(|block: &mut Block| { let output = &mut block.body.outputs_mut()[0]; - let mut script_bytes = output.script.as_bytes(); + let mut script_bytes = output.script.to_bytes(); Opcode::PushZero.to_bytes(&mut script_bytes); let mod_script = TariScript::from_bytes(&script_bytes).unwrap(); output.script = mod_script; @@ -1864,8 +1863,7 @@ mod malleability { // This test hightlights that the "sender_offset_public_key" field is not being included in the output hash // so a consensus change is needed for the output to include it #[test] - #[ignore] - fn sender_offset_public_key() { + fn test_sender_offset_public_key() { check_output_malleability(|block: &mut Block| { let output = &mut block.body.outputs_mut()[0]; @@ -1876,7 +1874,7 @@ mod malleability { } #[test] - fn metadata_signature() { + fn test_metadata_signature() { check_witness_malleability(|block: &mut Block| { let output = &mut block.body.outputs_mut()[0]; output.metadata_signature = ComSignature::default(); @@ -1884,7 +1882,7 @@ mod malleability { } #[test] - fn covenant() { + fn test_covenant() { check_output_malleability(|block: &mut Block| { let output = &mut block.body.outputs_mut()[0]; let mod_covenant = covenant!(absolute_height(@uint(42))); @@ -1903,7 +1901,7 @@ mod malleability { // the "features" field has only a constant value at the moment, so no malleability test possible #[test] - fn fee() { + fn test_fee() { check_kernel_malleability(|block: &mut Block| { let kernel = &mut block.body.kernels_mut()[0]; kernel.fee += MicroTari::from(1); @@ -1911,7 +1909,7 @@ mod malleability { } #[test] - fn lock_height() { + fn test_lock_height() { check_kernel_malleability(|block: &mut Block| { let kernel = &mut block.body.kernels_mut()[0]; kernel.lock_height += 1; @@ -1919,7 +1917,7 @@ mod malleability { } #[test] - fn excess() { + fn test_excess() { check_kernel_malleability(|block: &mut Block| { let kernel = &mut block.body.kernels_mut()[0]; let mod_excess = &kernel.excess + &kernel.excess; @@ -1928,7 +1926,7 @@ mod malleability { } #[test] - fn excess_sig() { + fn test_excess_sig() { check_kernel_malleability(|block: &mut Block| { let kernel = &mut block.body.kernels_mut()[0]; // "gerate_keys" should return a group of random keys, different from the ones in the field @@ -1941,7 +1939,7 @@ mod malleability { #[allow(clippy::identity_op)] #[test] -fn fetch_deleted_position_block_hash() { +fn test_fetch_deleted_position_block_hash() { // Create Main Chain let network = Network::LocalNet; let (mut store, mut blocks, mut outputs, consensus_manager) = create_new_blockchain(network); diff --git a/base_layer/key_manager/Cargo.toml b/base_layer/key_manager/Cargo.toml index 75d69a695f..843b4b3d50 100644 --- a/base_layer/key_manager/Cargo.toml +++ b/base_layer/key_manager/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "Tari cryptocurrency wallet key management" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2021" [lib] diff --git a/base_layer/mmr/Cargo.toml b/base_layer/mmr/Cargo.toml index 5774c5b1ea..7cadc4811f 100644 --- a/base_layer/mmr/Cargo.toml +++ b/base_layer/mmr/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "A Merkle Mountain Range implementation" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [features] diff --git a/base_layer/p2p/Cargo.toml b/base_layer/p2p/Cargo.toml index 665991d18a..d6c255d2f2 100644 --- a/base_layer/p2p/Cargo.toml +++ b/base_layer/p2p/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_p2p" -version = "0.38.5" +version = "0.38.7" authors = ["The Tari Development community"] description = "Tari base layer-specific peer-to-peer communication features" repository = "https://github.com/tari-project/tari" diff --git a/base_layer/p2p/src/config.rs b/base_layer/p2p/src/config.rs index 41cd121d99..5fb4030411 100644 --- a/base_layer/p2p/src/config.rs +++ b/base_layer/p2p/src/config.rs @@ -20,11 +20,15 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::path::{Path, PathBuf}; +use std::{ + path::{Path, PathBuf}, + time::Duration, +}; use serde::{Deserialize, Serialize}; use tari_common::{ configuration::{ + serializers, utils::{deserialize_string_or_struct, serialize_string}, StringList, }, @@ -105,6 +109,9 @@ pub struct P2pConfig { /// Liveness sessions can be used by third party tooling to determine node liveness. /// A value of 0 will disallow any liveness sessions. pub listener_liveness_max_sessions: usize, + /// If Some, enables periodic socket-level liveness checks + #[serde(with = "serializers::optional_seconds")] + pub listener_liveness_check_interval: Option, /// CIDR for addresses allowed to enter into liveness check mode on the listener. pub listener_liveness_allowlist_cidrs: StringList, /// User agent string for this node @@ -137,6 +144,7 @@ impl Default for P2pConfig { }, allow_test_addresses: false, listener_liveness_max_sessions: 0, + listener_liveness_check_interval: None, listener_liveness_allowlist_cidrs: StringList::default(), user_agent: String::new(), auxiliary_tcp_listener_address: None, diff --git a/base_layer/p2p/src/initialization.rs b/base_layer/p2p/src/initialization.rs index 8f6d0c2147..43c6218018 100644 --- a/base_layer/p2p/src/initialization.rs +++ b/base_layer/p2p/src/initialization.rs @@ -543,7 +543,8 @@ impl ServiceInitializer for P2pInitializer { minor_version: MINOR_NETWORK_VERSION, network_byte: self.network.as_byte(), user_agent: config.user_agent.clone(), - }); + }) + .set_liveness_check(config.listener_liveness_check_interval); if config.allow_test_addresses || config.dht.allow_test_addresses { // The default is false, so ensure that both settings are true in this case diff --git a/base_layer/p2p/src/services/liveness/service.rs b/base_layer/p2p/src/services/liveness/service.rs index def15f5116..5da92ad100 100644 --- a/base_layer/p2p/src/services/liveness/service.rs +++ b/base_layer/p2p/src/services/liveness/service.rs @@ -161,7 +161,7 @@ where match ping_pong_msg.kind().ok_or(LivenessError::InvalidPingPongType)? { PingPong::Ping => { self.state.inc_pings_received(); - self.send_pong(ping_pong_msg.nonce, public_key).await.unwrap(); + self.send_pong(ping_pong_msg.nonce, public_key).await?; self.state.inc_pongs_sent(); debug!( diff --git a/base_layer/service_framework/Cargo.toml b/base_layer/service_framework/Cargo.toml index f70eb71d7a..a210101ada 100644 --- a/base_layer/service_framework/Cargo.toml +++ b/base_layer/service_framework/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_service_framework" -version = "0.38.5" +version = "0.38.7" authors = ["The Tari Development Community"] description = "The Tari communication stack service framework" repository = "https://github.com/tari-project/tari" diff --git a/base_layer/tari_mining_helper_ffi/Cargo.toml b/base_layer/tari_mining_helper_ffi/Cargo.toml index 53072f6487..847a26b6c3 100644 --- a/base_layer/tari_mining_helper_ffi/Cargo.toml +++ b/base_layer/tari_mining_helper_ffi/Cargo.toml @@ -3,7 +3,7 @@ name = "tari_mining_helper_ffi" authors = ["The Tari Development Community"] description = "Tari cryptocurrency miningcore C FFI bindings" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/base_layer/wallet/Cargo.toml b/base_layer/wallet/Cargo.toml index cf62400004..1637d03106 100644 --- a/base_layer/wallet/Cargo.toml +++ b/base_layer/wallet/Cargo.toml @@ -3,7 +3,7 @@ name = "tari_wallet" authors = ["The Tari Development Community"] description = "Tari cryptocurrency wallet library" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/base_layer/wallet/src/config.rs b/base_layer/wallet/src/config.rs index ca5a472efe..00df3f5729 100644 --- a/base_layer/wallet/src/config.rs +++ b/base_layer/wallet/src/config.rs @@ -123,6 +123,7 @@ impl Default for WalletConfig { fn default() -> Self { let p2p = P2pConfig { datastore_path: PathBuf::from("peer_db/wallet"), + listener_liveness_check_interval: None, ..Default::default() }; Self { diff --git a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs index 0710212f94..45968f6708 100644 --- a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs +++ b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs @@ -391,56 +391,54 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - match op { - WriteOperation::Insert(kvp) => self.insert(kvp, &conn)?, + let mut msg = "".to_string(); + let result = match op { + WriteOperation::Insert(kvp) => { + msg.push_str("Insert"); + self.insert(kvp, &conn)?; + Ok(None) + }, WriteOperation::Remove(k) => match k { DbKey::AnyOutputByCommitment(commitment) => { - // Used by coinbase when mining. - match OutputSql::find_by_commitment(&commitment.to_vec(), &conn) { - Ok(mut o) => { - o.delete(&conn)?; - self.decrypt_if_necessary(&mut o)?; - if start.elapsed().as_millis() > 0 { - trace!( - target: LOG_TARGET, - "sqlite profile - write Remove: lock {} + db_op {} = {} ms", - acquire_lock.as_millis(), - (start.elapsed() - acquire_lock).as_millis(), - start.elapsed().as_millis() - ); - } - return Ok(Some(DbValue::AnyOutput(Box::new(DbUnblindedOutput::try_from(o)?)))); - }, - Err(e) => { - match e { - OutputManagerStorageError::DieselError(DieselError::NotFound) => (), - e => return Err(e), - }; - }, - } + conn.transaction::<_, _, _>(|| { + msg.push_str("Remove"); + // Used by coinbase when mining. + match OutputSql::find_by_commitment(&commitment.to_vec(), &conn) { + Ok(mut o) => { + o.delete(&conn)?; + self.decrypt_if_necessary(&mut o)?; + Ok(Some(DbValue::AnyOutput(Box::new(DbUnblindedOutput::try_from(o)?)))) + }, + Err(e) => match e { + OutputManagerStorageError::DieselError(DieselError::NotFound) => Ok(None), + e => Err(e), + }, + } + }) }, - DbKey::SpentOutput(_s) => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::UnspentOutputHash(_h) => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::UnspentOutput(_k) => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::UnspentOutputs => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::SpentOutputs => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::InvalidOutputs => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::TimeLockedUnspentOutputs(_) => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::KnownOneSidedPaymentScripts => return Err(OutputManagerStorageError::OperationNotSupported), - DbKey::OutputsByTxIdAndStatus(_, _) => return Err(OutputManagerStorageError::OperationNotSupported), + DbKey::SpentOutput(_s) => Err(OutputManagerStorageError::OperationNotSupported), + DbKey::UnspentOutputHash(_h) => Err(OutputManagerStorageError::OperationNotSupported), + DbKey::UnspentOutput(_k) => Err(OutputManagerStorageError::OperationNotSupported), + DbKey::UnspentOutputs => Err(OutputManagerStorageError::OperationNotSupported), + DbKey::SpentOutputs => Err(OutputManagerStorageError::OperationNotSupported), + DbKey::InvalidOutputs => Err(OutputManagerStorageError::OperationNotSupported), + DbKey::TimeLockedUnspentOutputs(_) => Err(OutputManagerStorageError::OperationNotSupported), + DbKey::KnownOneSidedPaymentScripts => Err(OutputManagerStorageError::OperationNotSupported), + DbKey::OutputsByTxIdAndStatus(_, _) => Err(OutputManagerStorageError::OperationNotSupported), }, - } + }; if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, - "sqlite profile - write Insert: lock {} + db_op {} = {} ms", + "sqlite profile - write {}: lock {} + db_op {} = {} ms", + msg, acquire_lock.as_millis(), (start.elapsed() - acquire_lock).as_millis(), start.elapsed().as_millis() ); } - Ok(None) + result } fn fetch_pending_incoming_outputs(&self) -> Result, OutputManagerStorageError> { @@ -852,50 +850,55 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let outputs = OutputSql::find_by_tx_id_and_encumbered(tx_id, &conn)?; + conn.transaction::<_, _, _>(|| { + let outputs = OutputSql::find_by_tx_id_and_encumbered(tx_id, &conn)?; - if outputs.is_empty() { - return Err(OutputManagerStorageError::ValueNotFound); - } + if outputs.is_empty() { + return Err(OutputManagerStorageError::ValueNotFound); + } - for output in &outputs { - if output.received_in_tx_id == Some(tx_id.as_i64_wrapped()) { - info!( - target: LOG_TARGET, - "Cancelling pending inbound output with Commitment: {} - MMR Position: {:?} from TxId: {}", - output.commitment.as_ref().unwrap_or(&vec![]).to_hex(), - output.mined_mmr_position, - tx_id - ); - output.update( - UpdateOutput { - status: Some(OutputStatus::CancelledInbound), - ..Default::default() - }, - &conn, - )?; - } else if output.spent_in_tx_id == Some(tx_id.as_i64_wrapped()) { - info!( - target: LOG_TARGET, - "Cancelling pending outbound output with Commitment: {} - MMR Position: {:?} from TxId: {}", - output.commitment.as_ref().unwrap_or(&vec![]).to_hex(), - output.mined_mmr_position, - tx_id - ); - output.update( - UpdateOutput { - status: Some(OutputStatus::Unspent), - spent_in_tx_id: Some(None), - // We clear these so that the output will be revalidated the next time a validation is done. - mined_height: Some(None), - mined_in_block: Some(None), - ..Default::default() - }, - &conn, - )?; - } else { + for output in &outputs { + if output.received_in_tx_id == Some(tx_id.as_i64_wrapped()) { + info!( + target: LOG_TARGET, + "Cancelling pending inbound output with Commitment: {} - MMR Position: {:?} from TxId: {}", + output.commitment.as_ref().unwrap_or(&vec![]).to_hex(), + output.mined_mmr_position, + tx_id + ); + output.update( + UpdateOutput { + status: Some(OutputStatus::CancelledInbound), + ..Default::default() + }, + &conn, + )?; + } else if output.spent_in_tx_id == Some(tx_id.as_i64_wrapped()) { + info!( + target: LOG_TARGET, + "Cancelling pending outbound output with Commitment: {} - MMR Position: {:?} from TxId: {}", + output.commitment.as_ref().unwrap_or(&vec![]).to_hex(), + output.mined_mmr_position, + tx_id + ); + output.update( + UpdateOutput { + status: Some(OutputStatus::Unspent), + spent_in_tx_id: Some(None), + // We clear these so that the output will be revalidated the next time a validation is done. + mined_height: Some(None), + mined_in_block: Some(None), + ..Default::default() + }, + &conn, + )?; + } else { + } } - } + + Ok(()) + })?; + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -915,17 +918,22 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let db_output = OutputSql::find_by_commitment_and_cancelled(&output.commitment.to_vec(), false, &conn)?; - db_output.update( - // Note: Only the `nonce` and `u` portion needs to be updated at this time as the `v` portion is already - // correct - UpdateOutput { - metadata_signature_nonce: Some(output.metadata_signature.public_nonce().to_vec()), - metadata_signature_u_key: Some(output.metadata_signature.u().to_vec()), - ..Default::default() - }, - &conn, - )?; + + conn.transaction::<_, OutputManagerStorageError, _>(|| { + let db_output = OutputSql::find_by_commitment_and_cancelled(&output.commitment.to_vec(), false, &conn)?; + db_output.update( + // Note: Only the `nonce` and `u` portion needs to be updated at this time as the `v` portion is + // already correct + UpdateOutput { + metadata_signature_nonce: Some(output.metadata_signature.public_nonce().to_vec()), + metadata_signature_u_key: Some(output.metadata_signature.u().to_vec()), + ..Default::default() + }, + &conn, + )?; + + Ok(()) + })?; if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -943,18 +951,23 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let output = OutputSql::find_by_commitment_and_cancelled(&commitment.to_vec(), false, &conn)?; - if OutputStatus::try_from(output.status)? != OutputStatus::Invalid { - return Err(OutputManagerStorageError::ValuesNotFound); - } - output.update( - UpdateOutput { - status: Some(OutputStatus::Unspent), - ..Default::default() - }, - &conn, - )?; + conn.transaction::<_, _, _>(|| { + let output = OutputSql::find_by_commitment_and_cancelled(&commitment.to_vec(), false, &conn)?; + + if OutputStatus::try_from(output.status)? != OutputStatus::Invalid { + return Err(OutputManagerStorageError::ValuesNotFound); + } + output.update( + UpdateOutput { + status: Some(OutputStatus::Unspent), + ..Default::default() + }, + &conn, + )?; + + Ok(()) + })?; if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -1417,8 +1430,8 @@ impl From for KnownOneSidedPaymentScriptSql { let script_lock_height = known_script.script_lock_height as i64; let script_hash = known_script.script_hash; let private_key = known_script.private_key.as_bytes().to_vec(); - let script = known_script.script.as_bytes().to_vec(); - let input = known_script.input.as_bytes().to_vec(); + let script = known_script.script.to_bytes().to_vec(); + let input = known_script.input.to_bytes().to_vec(); KnownOneSidedPaymentScriptSql { script_hash, private_key, diff --git a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/new_output_sql.rs b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/new_output_sql.rs index d3d2561ee8..2878e54a6c 100644 --- a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/new_output_sql.rs +++ b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/new_output_sql.rs @@ -83,8 +83,8 @@ impl NewOutputSql { status: status as i32, received_in_tx_id: received_in_tx_id.map(|i| i.as_u64() as i64), hash: Some(output.hash.to_vec()), - script: output.unblinded_output.script.as_bytes(), - input_data: output.unblinded_output.input_data.as_bytes(), + script: output.unblinded_output.script.to_bytes(), + input_data: output.unblinded_output.input_data.to_bytes(), script_private_key: output.unblinded_output.script_private_key.to_vec(), metadata: Some(output.unblinded_output.features.metadata.clone()), sender_offset_public_key: output.unblinded_output.sender_offset_public_key.to_vec(), diff --git a/base_layer/wallet/src/transaction_service/storage/database.rs b/base_layer/wallet/src/transaction_service/storage/database.rs index f018ba3088..8a7eec4100 100644 --- a/base_layer/wallet/src/transaction_service/storage/database.rs +++ b/base_layer/wallet/src/transaction_service/storage/database.rs @@ -117,7 +117,7 @@ pub trait TransactionBackend: Send + Sync + Clone { /// Mark a pending transaction direct send attempt as a success fn mark_direct_send_success(&self, tx_id: TxId) -> Result<(), TransactionStorageError>; /// Cancel coinbase transactions at a specific block height - fn cancel_coinbase_transaction_at_block_height(&self, block_height: u64) -> Result<(), TransactionStorageError>; + fn cancel_coinbase_transactions_at_block_height(&self, block_height: u64) -> Result<(), TransactionStorageError>; /// Find coinbase transaction at a specific block height for a given amount fn find_coinbase_transaction_at_block_height( &self, @@ -693,7 +693,7 @@ where T: TransactionBackend + 'static &self, block_height: u64, ) -> Result<(), TransactionStorageError> { - self.db.cancel_coinbase_transaction_at_block_height(block_height) + self.db.cancel_coinbase_transactions_at_block_height(block_height) } pub fn find_coinbase_transaction_at_block_height( diff --git a/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs b/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs index 92a101cadc..7d244ca817 100644 --- a/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs +++ b/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs @@ -123,44 +123,50 @@ impl TransactionServiceSqliteDatabase { fn remove(&self, key: DbKey, conn: &SqliteConnection) -> Result, TransactionStorageError> { match key { - DbKey::PendingOutboundTransaction(k) => match OutboundTransactionSql::find_by_cancelled(k, false, conn) { - Ok(mut v) => { - v.delete(conn)?; - self.decrypt_if_necessary(&mut v)?; - Ok(Some(DbValue::PendingOutboundTransaction(Box::new( - OutboundTransaction::try_from(v)?, - )))) - }, - Err(TransactionStorageError::DieselError(DieselError::NotFound)) => Err( - TransactionStorageError::ValueNotFound(DbKey::PendingOutboundTransaction(k)), - ), - Err(e) => Err(e), + DbKey::PendingOutboundTransaction(k) => { + conn.transaction::<_, _, _>(|| match OutboundTransactionSql::find_by_cancelled(k, false, conn) { + Ok(mut v) => { + v.delete(conn)?; + self.decrypt_if_necessary(&mut v)?; + Ok(Some(DbValue::PendingOutboundTransaction(Box::new( + OutboundTransaction::try_from(v)?, + )))) + }, + Err(TransactionStorageError::DieselError(DieselError::NotFound)) => Err( + TransactionStorageError::ValueNotFound(DbKey::PendingOutboundTransaction(k)), + ), + Err(e) => Err(e), + }) }, - DbKey::PendingInboundTransaction(k) => match InboundTransactionSql::find_by_cancelled(k, false, conn) { - Ok(mut v) => { - v.delete(conn)?; - self.decrypt_if_necessary(&mut v)?; - Ok(Some(DbValue::PendingInboundTransaction(Box::new( - InboundTransaction::try_from(v)?, - )))) - }, - Err(TransactionStorageError::DieselError(DieselError::NotFound)) => Err( - TransactionStorageError::ValueNotFound(DbKey::PendingOutboundTransaction(k)), - ), - Err(e) => Err(e), + DbKey::PendingInboundTransaction(k) => { + conn.transaction::<_, _, _>(|| match InboundTransactionSql::find_by_cancelled(k, false, conn) { + Ok(mut v) => { + v.delete(conn)?; + self.decrypt_if_necessary(&mut v)?; + Ok(Some(DbValue::PendingInboundTransaction(Box::new( + InboundTransaction::try_from(v)?, + )))) + }, + Err(TransactionStorageError::DieselError(DieselError::NotFound)) => Err( + TransactionStorageError::ValueNotFound(DbKey::PendingOutboundTransaction(k)), + ), + Err(e) => Err(e), + }) }, - DbKey::CompletedTransaction(k) => match CompletedTransactionSql::find_by_cancelled(k, false, conn) { - Ok(mut v) => { - v.delete(conn)?; - self.decrypt_if_necessary(&mut v)?; - Ok(Some(DbValue::CompletedTransaction(Box::new( - CompletedTransaction::try_from(v)?, - )))) - }, - Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { - Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction(k))) - }, - Err(e) => Err(e), + DbKey::CompletedTransaction(k) => { + conn.transaction::<_, _, _>(|| match CompletedTransactionSql::find_by_cancelled(k, false, conn) { + Ok(mut v) => { + v.delete(conn)?; + self.decrypt_if_necessary(&mut v)?; + Ok(Some(DbValue::CompletedTransaction(Box::new( + CompletedTransaction::try_from(v)?, + )))) + }, + Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { + Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction(k))) + }, + Err(e) => Err(e), + }) }, DbKey::PendingOutboundTransactions => Err(TransactionStorageError::OperationNotSupported), DbKey::PendingInboundTransactions => Err(TransactionStorageError::OperationNotSupported), @@ -169,7 +175,7 @@ impl TransactionServiceSqliteDatabase { DbKey::CancelledPendingInboundTransactions => Err(TransactionStorageError::OperationNotSupported), DbKey::CancelledCompletedTransactions => Err(TransactionStorageError::OperationNotSupported), DbKey::CancelledPendingOutboundTransaction(k) => { - match OutboundTransactionSql::find_by_cancelled(k, true, conn) { + conn.transaction::<_, _, _>(|| match OutboundTransactionSql::find_by_cancelled(k, true, conn) { Ok(mut v) => { v.delete(conn)?; self.decrypt_if_necessary(&mut v)?; @@ -181,10 +187,10 @@ impl TransactionServiceSqliteDatabase { TransactionStorageError::ValueNotFound(DbKey::CancelledPendingOutboundTransaction(k)), ), Err(e) => Err(e), - } + }) }, DbKey::CancelledPendingInboundTransaction(k) => { - match InboundTransactionSql::find_by_cancelled(k, true, conn) { + conn.transaction::<_, _, _>(|| match InboundTransactionSql::find_by_cancelled(k, true, conn) { Ok(mut v) => { v.delete(conn)?; self.decrypt_if_necessary(&mut v)?; @@ -196,7 +202,7 @@ impl TransactionServiceSqliteDatabase { TransactionStorageError::ValueNotFound(DbKey::CancelledPendingOutboundTransaction(k)), ), Err(e) => Err(e), - } + }) }, DbKey::AnyTransaction(_) => Err(TransactionStorageError::OperationNotSupported), } @@ -579,20 +585,22 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { return Err(TransactionStorageError::TransactionAlreadyExists); } - match OutboundTransactionSql::find_by_cancelled(tx_id, false, &conn) { - Ok(v) => { - let mut completed_tx_sql = CompletedTransactionSql::try_from(completed_transaction)?; - self.encrypt_if_necessary(&mut completed_tx_sql)?; - v.delete(&conn)?; - completed_tx_sql.commit(&conn)?; - }, - Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { - return Err(TransactionStorageError::ValueNotFound( - DbKey::PendingOutboundTransaction(tx_id), - )) - }, - Err(e) => return Err(e), - }; + let mut completed_tx_sql = CompletedTransactionSql::try_from(completed_transaction)?; + self.encrypt_if_necessary(&mut completed_tx_sql)?; + + conn.transaction::<_, _, _>(|| { + match OutboundTransactionSql::complete_outbound_transaction(tx_id, &conn) { + Ok(_) => completed_tx_sql.commit(&conn)?, + Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { + return Err(TransactionStorageError::ValueNotFound( + DbKey::PendingOutboundTransaction(tx_id), + )) + }, + Err(e) => return Err(e), + } + + Ok(()) + })?; if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -618,20 +626,22 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { return Err(TransactionStorageError::TransactionAlreadyExists); } - match InboundTransactionSql::find_by_cancelled(tx_id, false, &conn) { - Ok(v) => { - let mut completed_tx_sql = CompletedTransactionSql::try_from(completed_transaction)?; - self.encrypt_if_necessary(&mut completed_tx_sql)?; - v.delete(&conn)?; - completed_tx_sql.commit(&conn)?; - }, - Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { - return Err(TransactionStorageError::ValueNotFound( - DbKey::PendingInboundTransaction(tx_id), - )) - }, - Err(e) => return Err(e), - }; + let mut completed_tx_sql = CompletedTransactionSql::try_from(completed_transaction)?; + self.encrypt_if_necessary(&mut completed_tx_sql)?; + + conn.transaction::<_, _, _>(|| { + match InboundTransactionSql::complete_inbound_transaction(tx_id, &conn) { + Ok(_) => completed_tx_sql.commit(&conn)?, + Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { + return Err(TransactionStorageError::ValueNotFound( + DbKey::PendingInboundTransaction(tx_id), + )) + }, + Err(e) => return Err(e), + }; + + Ok(()) + })?; if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -649,25 +659,32 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - match CompletedTransactionSql::find_by_cancelled(tx_id, false, &conn) { - Ok(v) => { - if TransactionStatus::try_from(v.status)? == TransactionStatus::Completed { - v.update( - UpdateCompletedTransactionSql { - status: Some(TransactionStatus::Broadcast as i32), - ..Default::default() - }, - &conn, - )?; - } - }, - Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { - return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( - tx_id, - ))) - }, - Err(e) => return Err(e), - }; + conn.transaction::<_, _, _>(|| { + match CompletedTransactionSql::find_by_cancelled(tx_id, false, &conn) { + Ok(v) => { + // Note: This status test that does not error if the status do not match makes it inefficient + // to combine the 'find' and 'update' queries. + if TransactionStatus::try_from(v.status)? == TransactionStatus::Completed { + v.update( + UpdateCompletedTransactionSql { + status: Some(TransactionStatus::Broadcast as i32), + ..Default::default() + }, + &conn, + )?; + } + }, + Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { + return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( + tx_id, + ))) + }, + Err(e) => return Err(e), + } + + Ok(()) + })?; + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -688,17 +705,15 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - match CompletedTransactionSql::find_by_cancelled(tx_id, false, &conn) { - Ok(v) => { - v.reject(reason, &conn)?; - }, + match CompletedTransactionSql::reject_completed_transaction(tx_id, reason, &conn) { + Ok(_) => {}, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( tx_id, ))); }, Err(e) => return Err(e), - }; + } if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -719,22 +734,20 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - match InboundTransactionSql::find(tx_id, &conn) { - Ok(v) => { - v.set_cancelled(cancelled, &conn)?; - }, + + match InboundTransactionSql::find_and_set_cancelled(tx_id, cancelled, &conn) { + Ok(_) => {}, Err(_) => { - match OutboundTransactionSql::find(tx_id, &conn) { - Ok(v) => { - v.set_cancelled(cancelled, &conn)?; - }, + match OutboundTransactionSql::find_and_set_cancelled(tx_id, cancelled, &conn) { + Ok(_) => {}, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValuesNotFound); }, Err(e) => return Err(e), }; }, - }; + } + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -751,33 +764,12 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - match InboundTransactionSql::find_by_cancelled(tx_id, false, &conn) { - Ok(v) => { - v.update( - UpdateInboundTransactionSql { - cancelled: None, - direct_send_success: Some(1i32), - receiver_protocol: None, - send_count: None, - last_send_timestamp: None, - }, - &conn, - )?; - }, + + match InboundTransactionSql::mark_direct_send_success(tx_id, &conn) { + Ok(_) => {}, Err(_) => { - match OutboundTransactionSql::find_by_cancelled(tx_id, false, &conn) { - Ok(v) => { - v.update( - UpdateOutboundTransactionSql { - cancelled: None, - direct_send_success: Some(1i32), - sender_protocol: None, - send_count: None, - last_send_timestamp: None, - }, - &conn, - )?; - }, + match OutboundTransactionSql::mark_direct_send_success(tx_id, &conn) { + Ok(_) => {}, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValuesNotFound); }, @@ -785,6 +777,7 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { }; }, }; + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -808,55 +801,68 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let mut inbound_txs = InboundTransactionSql::index(&conn)?; - // If the db is already encrypted then the very first output we try to encrypt will fail. - for tx in &mut inbound_txs { - // Test if this transaction is encrypted or not to avoid a double encryption. - let _inbound_transaction = InboundTransaction::try_from(tx.clone()).map_err(|_| { - error!( - target: LOG_TARGET, - "Could not convert Inbound Transaction from database version, it might already be encrypted" - ); - TransactionStorageError::AlreadyEncrypted - })?; - tx.encrypt(&cipher) - .map_err(|_| TransactionStorageError::AeadError("Encryption Error".to_string()))?; - tx.update_encryption(&conn)?; - } + conn.transaction::<_, TransactionStorageError, _>(|| { + let mut inbound_txs = InboundTransactionSql::index(&conn)?; + // If the db is already encrypted then the very first output we try to encrypt will fail. + for tx in &mut inbound_txs { + // Test if this transaction is encrypted or not to avoid a double encryption. + let _inbound_transaction = InboundTransaction::try_from(tx.clone()).map_err(|_| { + error!( + target: LOG_TARGET, + "Could not convert Inbound Transaction from database version, it might already be encrypted" + ); + TransactionStorageError::AlreadyEncrypted + })?; + tx.encrypt(&cipher) + .map_err(|_| TransactionStorageError::AeadError("Encryption Error".to_string()))?; + tx.update_encryption(&conn)?; + } - let mut outbound_txs = OutboundTransactionSql::index(&conn)?; - // If the db is already encrypted then the very first output we try to encrypt will fail. - for tx in &mut outbound_txs { - // Test if this transaction is encrypted or not to avoid a double encryption. - let _outbound_transaction = OutboundTransaction::try_from(tx.clone()).map_err(|_| { - error!( - target: LOG_TARGET, - "Could not convert Inbound Transaction from database version, it might already be encrypted" - ); - TransactionStorageError::AlreadyEncrypted - })?; - tx.encrypt(&cipher) - .map_err(|_| TransactionStorageError::AeadError("Encryption Error".to_string()))?; - tx.update_encryption(&conn)?; - } + Ok(()) + })?; + + conn.transaction::<_, TransactionStorageError, _>(|| { + let mut outbound_txs = OutboundTransactionSql::index(&conn)?; + // If the db is already encrypted then the very first output we try to encrypt will fail. + for tx in &mut outbound_txs { + // Test if this transaction is encrypted or not to avoid a double encryption. + let _outbound_transaction = OutboundTransaction::try_from(tx.clone()).map_err(|_| { + error!( + target: LOG_TARGET, + "Could not convert Inbound Transaction from database version, it might already be encrypted" + ); + TransactionStorageError::AlreadyEncrypted + })?; + tx.encrypt(&cipher) + .map_err(|_| TransactionStorageError::AeadError("Encryption Error".to_string()))?; + tx.update_encryption(&conn)?; + } - let mut completed_txs = CompletedTransactionSql::index(&conn)?; - // If the db is already encrypted then the very first output we try to encrypt will fail. - for tx in &mut completed_txs { - // Test if this transaction is encrypted or not to avoid a double encryption. - let _completed_transaction = CompletedTransaction::try_from(tx.clone()).map_err(|_| { - error!( - target: LOG_TARGET, - "Could not convert Inbound Transaction from database version, it might already be encrypted" - ); - TransactionStorageError::AlreadyEncrypted - })?; - tx.encrypt(&cipher) - .map_err(|_| TransactionStorageError::AeadError("Encryption Error".to_string()))?; - tx.update_encryption(&conn)?; - } + Ok(()) + })?; + + conn.transaction::<_, TransactionStorageError, _>(|| { + let mut completed_txs = CompletedTransactionSql::index(&conn)?; + // If the db is already encrypted then the very first output we try to encrypt will fail. + for tx in &mut completed_txs { + // Test if this transaction is encrypted or not to avoid a double encryption. + let _completed_transaction = CompletedTransaction::try_from(tx.clone()).map_err(|_| { + error!( + target: LOG_TARGET, + "Could not convert Inbound Transaction from database version, it might already be encrypted" + ); + TransactionStorageError::AlreadyEncrypted + })?; + tx.encrypt(&cipher) + .map_err(|_| TransactionStorageError::AeadError("Encryption Error".to_string()))?; + tx.update_encryption(&conn)?; + } + + Ok(()) + })?; (*current_cipher) = Some(cipher); + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -882,31 +888,44 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let mut inbound_txs = InboundTransactionSql::index(&conn)?; + conn.transaction::<_, TransactionStorageError, _>(|| { + let mut inbound_txs = InboundTransactionSql::index(&conn)?; - for tx in &mut inbound_txs { - tx.decrypt(&cipher) - .map_err(|_| TransactionStorageError::AeadError("Decryption Error".to_string()))?; - tx.update_encryption(&conn)?; - } + for tx in &mut inbound_txs { + tx.decrypt(&cipher) + .map_err(|_| TransactionStorageError::AeadError("Decryption Error".to_string()))?; + tx.update_encryption(&conn)?; + } - let mut outbound_txs = OutboundTransactionSql::index(&conn)?; + Ok(()) + })?; - for tx in &mut outbound_txs { - tx.decrypt(&cipher) - .map_err(|_| TransactionStorageError::AeadError("Decryption Error".to_string()))?; - tx.update_encryption(&conn)?; - } + conn.transaction::<_, TransactionStorageError, _>(|| { + let mut outbound_txs = OutboundTransactionSql::index(&conn)?; - let mut completed_txs = CompletedTransactionSql::index(&conn)?; - for tx in &mut completed_txs { - tx.decrypt(&cipher) - .map_err(|_| TransactionStorageError::AeadError("Decryption Error".to_string()))?; - tx.update_encryption(&conn)?; - } + for tx in &mut outbound_txs { + tx.decrypt(&cipher) + .map_err(|_| TransactionStorageError::AeadError("Decryption Error".to_string()))?; + tx.update_encryption(&conn)?; + } + + Ok(()) + })?; + + conn.transaction::<_, TransactionStorageError, _>(|| { + let mut completed_txs = CompletedTransactionSql::index(&conn)?; + for tx in &mut completed_txs { + tx.decrypt(&cipher) + .map_err(|_| TransactionStorageError::AeadError("Decryption Error".to_string()))?; + tx.update_encryption(&conn)?; + } + + Ok(()) + })?; // Now that all the decryption has been completed we can safely remove the cipher fully std::mem::drop((*current_cipher).take()); + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -920,15 +939,16 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { Ok(()) } - fn cancel_coinbase_transaction_at_block_height(&self, block_height: u64) -> Result<(), TransactionStorageError> { + fn cancel_coinbase_transactions_at_block_height(&self, block_height: u64) -> Result<(), TransactionStorageError> { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - let coinbase_txs = CompletedTransactionSql::index_coinbase_at_block_height(block_height as i64, &conn)?; - for c in &coinbase_txs { - c.reject(TxCancellationReason::AbandonedCoinbase, &conn)?; - } + CompletedTransactionSql::reject_coinbases_at_block_height( + block_height as i64, + TxCancellationReason::AbandonedCoinbase, + &conn, + )?; if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -977,34 +997,13 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - if let Ok(tx) = CompletedTransactionSql::find(tx_id, &conn) { - let update = UpdateCompletedTransactionSql { - send_count: Some(tx.send_count + 1), - last_send_timestamp: Some(Some(Utc::now().naive_utc())), - ..Default::default() - }; - tx.update(update, &conn)?; - } else if let Ok(tx) = OutboundTransactionSql::find(tx_id, &conn) { - let update = UpdateOutboundTransactionSql { - cancelled: None, - direct_send_success: None, - sender_protocol: None, - send_count: Some(tx.send_count + 1), - last_send_timestamp: Some(Some(Utc::now().naive_utc())), - }; - tx.update(update, &conn)?; - } else if let Ok(tx) = InboundTransactionSql::find_by_cancelled(tx_id, false, &conn) { - let update = UpdateInboundTransactionSql { - cancelled: None, - direct_send_success: None, - receiver_protocol: None, - send_count: Some(tx.send_count + 1), - last_send_timestamp: Some(Some(Utc::now().naive_utc())), - }; - tx.update(update, &conn)?; - } else { + if CompletedTransactionSql::increment_send_count(tx_id, &conn).is_err() && + OutboundTransactionSql::increment_send_count(tx_id, &conn).is_err() && + InboundTransactionSql::increment_send_count(tx_id, &conn).is_err() + { return Err(TransactionStorageError::ValuesNotFound); } + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -1031,25 +1030,36 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - match CompletedTransactionSql::find(tx_id, &conn) { - Ok(v) => { - v.update_mined_height( - mined_height, - mined_in_block, - mined_timestamp, - num_confirmations, - is_confirmed, - &conn, - is_faux, - )?; - }, + let status = if is_confirmed { + if is_faux { + TransactionStatus::FauxConfirmed + } else { + TransactionStatus::MinedConfirmed + } + } else if is_faux { + TransactionStatus::FauxUnconfirmed + } else { + TransactionStatus::MinedUnconfirmed + }; + + match CompletedTransactionSql::update_mined_height( + tx_id, + num_confirmations, + status, + mined_height, + mined_in_block, + mined_timestamp, + &conn, + ) { + Ok(_) => {}, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( tx_id, ))); }, Err(e) => return Err(e), - }; + } + if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -1186,17 +1196,15 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { let start = Instant::now(); let conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - match CompletedTransactionSql::find(tx_id, &conn) { - Ok(v) => { - v.set_as_unmined(&conn)?; - }, + match CompletedTransactionSql::set_as_unmined(tx_id, &conn) { + Ok(_) => {}, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( tx_id, ))); }, Err(e) => return Err(e), - }; + } if start.elapsed().as_millis() > 0 { trace!( target: LOG_TARGET, @@ -1285,10 +1293,8 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { fn abandon_coinbase_transaction(&self, tx_id: TxId) -> Result<(), TransactionStorageError> { let conn = self.database_connection.get_pooled_connection()?; - match CompletedTransactionSql::find_by_cancelled(tx_id, false, &conn) { - Ok(tx) => { - tx.abandon_coinbase(&conn)?; - }, + match CompletedTransactionSql::find_and_abandon_coinbase(tx_id, &conn) { + Ok(_) => {}, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( tx_id, @@ -1390,6 +1396,68 @@ impl InboundTransactionSql { .first::(conn)?) } + pub fn mark_direct_send_success(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + diesel::update( + inbound_transactions::table + .filter(inbound_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .filter(inbound_transactions::cancelled.eq(i32::from(false))), + ) + .set(UpdateInboundTransactionSql { + cancelled: None, + direct_send_success: Some(1i32), + receiver_protocol: None, + send_count: None, + last_send_timestamp: None, + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + + pub fn complete_inbound_transaction(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + diesel::delete( + inbound_transactions::table + .filter(inbound_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .filter(inbound_transactions::cancelled.eq(i32::from(false))), + ) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + + pub fn increment_send_count(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + diesel::update( + inbound_transactions::table + .filter(inbound_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .filter(inbound_transactions::cancelled.eq(i32::from(false))), + ) + .set(UpdateInboundTransactionSql { + cancelled: None, + direct_send_success: None, + receiver_protocol: None, + send_count: Some( + if let Some(value) = inbound_transactions::table + .filter(inbound_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .filter(inbound_transactions::cancelled.eq(i32::from(false))) + .select(inbound_transactions::send_count) + .load::(conn)? + .first() + { + value + 1 + } else { + return Err(TransactionStorageError::DieselError(DieselError::NotFound)); + }, + ), + last_send_timestamp: Some(Some(Utc::now().naive_utc())), + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + pub fn delete(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { let num_deleted = diesel::delete(inbound_transactions::table.filter(inbound_transactions::tx_id.eq(&self.tx_id))) @@ -1421,17 +1489,23 @@ impl InboundTransactionSql { Ok(()) } - pub fn set_cancelled(&self, cancelled: bool, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { - self.update( - UpdateInboundTransactionSql { + pub fn find_and_set_cancelled( + tx_id: TxId, + cancelled: bool, + conn: &SqliteConnection, + ) -> Result<(), TransactionStorageError> { + diesel::update(inbound_transactions::table.filter(inbound_transactions::tx_id.eq(tx_id.as_u64() as i64))) + .set(UpdateInboundTransactionSql { cancelled: Some(i32::from(cancelled)), direct_send_success: None, receiver_protocol: None, send_count: None, last_send_timestamp: None, - }, - conn, - ) + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) } pub fn update_encryption(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { @@ -1589,6 +1663,63 @@ impl OutboundTransactionSql { .first::(conn)?) } + pub fn mark_direct_send_success(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + diesel::update( + outbound_transactions::table + .filter(outbound_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .filter(outbound_transactions::cancelled.eq(i32::from(false))), + ) + .set(UpdateOutboundTransactionSql { + cancelled: None, + direct_send_success: Some(1i32), + sender_protocol: None, + send_count: None, + last_send_timestamp: None, + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + + pub fn complete_outbound_transaction(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + diesel::delete( + outbound_transactions::table + .filter(outbound_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .filter(outbound_transactions::cancelled.eq(i32::from(false))), + ) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + + pub fn increment_send_count(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + diesel::update(outbound_transactions::table.filter(outbound_transactions::tx_id.eq(tx_id.as_u64() as i64))) + .set(UpdateOutboundTransactionSql { + cancelled: None, + direct_send_success: None, + sender_protocol: None, + send_count: Some( + if let Some(value) = outbound_transactions::table + .filter(outbound_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .select(outbound_transactions::send_count) + .load::(conn)? + .first() + { + value + 1 + } else { + return Err(TransactionStorageError::DieselError(DieselError::NotFound)); + }, + ), + last_send_timestamp: Some(Some(Utc::now().naive_utc())), + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + pub fn delete(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { diesel::delete(outbound_transactions::table.filter(outbound_transactions::tx_id.eq(&self.tx_id))) .execute(conn) @@ -1609,17 +1740,23 @@ impl OutboundTransactionSql { Ok(()) } - pub fn set_cancelled(&self, cancelled: bool, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { - self.update( - UpdateOutboundTransactionSql { + pub fn find_and_set_cancelled( + tx_id: TxId, + cancelled: bool, + conn: &SqliteConnection, + ) -> Result<(), TransactionStorageError> { + diesel::update(outbound_transactions::table.filter(outbound_transactions::tx_id.eq(tx_id.as_u64() as i64))) + .set(UpdateOutboundTransactionSql { cancelled: Some(i32::from(cancelled)), direct_send_success: None, sender_protocol: None, send_count: None, last_send_timestamp: None, - }, - conn, - ) + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) } pub fn update_encryption(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { @@ -1823,6 +1960,23 @@ impl CompletedTransactionSql { .load::(conn)?) } + pub fn find_and_abandon_coinbase(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + let _ = diesel::update( + completed_transactions::table + .filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .filter(completed_transactions::cancelled.is_null()) + .filter(completed_transactions::coinbase_block_height.is_not_null()), + ) + .set(UpdateCompletedTransactionSql { + cancelled: Some(Some(TxCancellationReason::AbandonedCoinbase as i32)), + ..Default::default() + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + pub fn find(tx_id: TxId, conn: &SqliteConnection) -> Result { Ok(completed_transactions::table .filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64)) @@ -1847,6 +2001,70 @@ impl CompletedTransactionSql { Ok(query.first::(conn)?) } + pub fn reject_completed_transaction( + tx_id: TxId, + reason: TxCancellationReason, + conn: &SqliteConnection, + ) -> Result<(), TransactionStorageError> { + diesel::update( + completed_transactions::table + .filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .filter(completed_transactions::cancelled.is_null()), + ) + .set(UpdateCompletedTransactionSql { + cancelled: Some(Some(reason as i32)), + status: Some(TransactionStatus::Rejected as i32), + ..Default::default() + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + + pub fn increment_send_count(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + // This query uses a sub-query to retrieve an existing value in the table + diesel::update(completed_transactions::table.filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64))) + .set(UpdateCompletedTransactionSql { + send_count: Some( + if let Some(value) = completed_transactions::table + .filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .select(completed_transactions::send_count) + .load::(conn)? + .first() + { + value + 1 + } else { + return Err(TransactionStorageError::DieselError(DieselError::NotFound)); + }, + ), + last_send_timestamp: Some(Some(Utc::now().naive_utc())), + ..Default::default() + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; + + Ok(()) + } + + pub fn reject_coinbases_at_block_height( + block_height: i64, + reason: TxCancellationReason, + conn: &SqliteConnection, + ) -> Result { + Ok(diesel::update( + completed_transactions::table + .filter(completed_transactions::status.eq(TransactionStatus::Coinbase as i32)) + .filter(completed_transactions::coinbase_block_height.eq(block_height)), + ) + .set(UpdateCompletedTransactionSql { + cancelled: Some(Some(reason as i32)), + status: Some(TransactionStatus::Rejected as i32), + ..Default::default() + }) + .execute(conn)?) + } + pub fn delete(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { let num_deleted = diesel::delete(completed_transactions::table.filter(completed_transactions::tx_id.eq(&self.tx_id))) @@ -1871,58 +2089,70 @@ impl CompletedTransactionSql { Ok(()) } - pub fn reject(&self, reason: TxCancellationReason, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { - self.update( - UpdateCompletedTransactionSql { - cancelled: Some(Some(reason as i32)), - status: Some(TransactionStatus::Rejected as i32), - ..Default::default() - }, - conn, - )?; - - Ok(()) - } - - pub fn abandon_coinbase(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { - if self.coinbase_block_height.is_none() { - return Err(TransactionStorageError::NotCoinbase); - } - - self.update( - UpdateCompletedTransactionSql { - cancelled: Some(Some(TxCancellationReason::AbandonedCoinbase as i32)), + pub fn update_mined_height( + tx_id: TxId, + num_confirmations: u64, + status: TransactionStatus, + mined_height: u64, + mined_in_block: BlockHash, + mined_timestamp: u64, + conn: &SqliteConnection, + ) -> Result<(), TransactionStorageError> { + diesel::update(completed_transactions::table.filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64))) + .set(UpdateCompletedTransactionSql { + confirmations: Some(Some(num_confirmations as i64)), + status: Some(status as i32), + mined_height: Some(Some(mined_height as i64)), + mined_in_block: Some(Some(mined_in_block.to_vec())), + mined_timestamp: Some(NaiveDateTime::from_timestamp(mined_timestamp as i64, 0)), + // If the tx is mined, then it can't be cancelled + cancelled: None, ..Default::default() - }, - conn, - )?; + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; Ok(()) } - pub fn set_as_unmined(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { - let status = if self.coinbase_block_height.is_some() { - Some(TransactionStatus::Coinbase as i32) - } else if self.status == TransactionStatus::FauxConfirmed as i32 { - Some(TransactionStatus::FauxUnconfirmed as i32) - } else if self.status == TransactionStatus::Broadcast as i32 { - Some(TransactionStatus::Broadcast as i32) - } else { - Some(TransactionStatus::Completed as i32) - }; - - self.update( - UpdateCompletedTransactionSql { - status, + pub fn set_as_unmined(tx_id: TxId, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + // This query uses two sub-queries to retrieve existing values in the table + diesel::update(completed_transactions::table.filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64))) + .set(UpdateCompletedTransactionSql { + status: { + if let Some(Some(_coinbase_block_height)) = completed_transactions::table + .filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .select(completed_transactions::coinbase_block_height) + .load::>(conn)? + .first() + { + Some(TransactionStatus::Coinbase as i32) + } else if let Some(status) = completed_transactions::table + .filter(completed_transactions::tx_id.eq(tx_id.as_u64() as i64)) + .select(completed_transactions::status) + .load::(conn)? + .first() + { + if *status == TransactionStatus::FauxConfirmed as i32 { + Some(TransactionStatus::FauxUnconfirmed as i32) + } else if *status == TransactionStatus::Broadcast as i32 { + Some(TransactionStatus::Broadcast as i32) + } else { + Some(TransactionStatus::Completed as i32) + } + } else { + return Err(TransactionStorageError::DieselError(DieselError::NotFound)); + } + }, mined_in_block: Some(None), mined_height: Some(None), confirmations: Some(None), // Turns out it should not be cancelled cancelled: Some(None), ..Default::default() - }, - conn, - )?; + }) + .execute(conn) + .num_rows_affected_or_not_found(1)?; // Ideally the outputs should be marked unmined here as well, but because of the separation of classes, // that will be done in the outputs service. @@ -1941,45 +2171,6 @@ impl CompletedTransactionSql { Ok(()) } - - pub fn update_mined_height( - &self, - mined_height: u64, - mined_in_block: BlockHash, - mined_timestamp: u64, - num_confirmations: u64, - is_confirmed: bool, - conn: &SqliteConnection, - is_faux: bool, - ) -> Result<(), TransactionStorageError> { - let status = if is_confirmed { - if is_faux { - TransactionStatus::FauxConfirmed as i32 - } else { - TransactionStatus::MinedConfirmed as i32 - } - } else if is_faux { - TransactionStatus::FauxUnconfirmed as i32 - } else { - TransactionStatus::MinedUnconfirmed as i32 - }; - - self.update( - UpdateCompletedTransactionSql { - confirmations: Some(Some(num_confirmations as i64)), - status: Some(status), - mined_height: Some(Some(mined_height as i64)), - mined_in_block: Some(Some(mined_in_block.to_vec())), - mined_timestamp: Some(NaiveDateTime::from_timestamp(mined_timestamp as i64, 0)), - // If the tx is mined, then it can't be cancelled - cancelled: None, - ..Default::default() - }, - conn, - )?; - - Ok(()) - } } impl Encryptable for CompletedTransactionSql { @@ -2240,6 +2431,7 @@ mod test { InboundTransactionSql, OutboundTransactionSql, TransactionServiceSqliteDatabase, + UpdateCompletedTransactionSql, }, }, util::encryption::Encryptable, @@ -2517,16 +2709,10 @@ mod test { .unwrap(); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, true, &conn).is_err()); - InboundTransactionSql::try_from(inbound_tx1.clone()) - .unwrap() - .set_cancelled(true, &conn) - .unwrap(); + InboundTransactionSql::find_and_set_cancelled(inbound_tx1.tx_id, true, &conn).unwrap(); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, false, &conn).is_err()); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, true, &conn).is_ok()); - InboundTransactionSql::try_from(inbound_tx1.clone()) - .unwrap() - .set_cancelled(false, &conn) - .unwrap(); + InboundTransactionSql::find_and_set_cancelled(inbound_tx1.tx_id, false, &conn).unwrap(); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, true, &conn).is_err()); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, false, &conn).is_ok()); OutboundTransactionSql::try_from(outbound_tx1.clone()) @@ -2535,16 +2721,10 @@ mod test { .unwrap(); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, true, &conn).is_err()); - OutboundTransactionSql::try_from(outbound_tx1.clone()) - .unwrap() - .set_cancelled(true, &conn) - .unwrap(); + OutboundTransactionSql::find_and_set_cancelled(outbound_tx1.tx_id, true, &conn).unwrap(); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, false, &conn).is_err()); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, true, &conn).is_ok()); - OutboundTransactionSql::try_from(outbound_tx1.clone()) - .unwrap() - .set_cancelled(false, &conn) - .unwrap(); + OutboundTransactionSql::find_and_set_cancelled(outbound_tx1.tx_id, false, &conn).unwrap(); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, true, &conn).is_err()); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, false, &conn).is_ok()); @@ -2556,7 +2736,14 @@ mod test { assert!(CompletedTransactionSql::find_by_cancelled(completed_tx1.tx_id, true, &conn).is_err()); CompletedTransactionSql::try_from(completed_tx1.clone()) .unwrap() - .reject(TxCancellationReason::Unknown, &conn) + .update( + UpdateCompletedTransactionSql { + cancelled: Some(Some(TxCancellationReason::Unknown as i32)), + status: Some(TransactionStatus::Rejected as i32), + ..Default::default() + }, + &conn, + ) .unwrap(); assert!(CompletedTransactionSql::find_by_cancelled(completed_tx1.tx_id, false, &conn).is_err()); assert!(CompletedTransactionSql::find_by_cancelled(completed_tx1.tx_id, true, &conn).is_ok()); diff --git a/base_layer/wallet/tests/contacts_service.rs b/base_layer/wallet/tests/contacts_service.rs index e31f5e5cd4..a37dba5e1c 100644 --- a/base_layer/wallet/tests/contacts_service.rs +++ b/base_layer/wallet/tests/contacts_service.rs @@ -98,6 +98,7 @@ pub fn setup_contacts_service( user_agent: "tari/test-wallet".to_string(), rpc_max_simultaneous_sessions: 0, rpc_max_sessions_per_peer: 0, + listener_liveness_check_interval: None, }; let peer_message_subscription_factory = Arc::new(subscription_factory); let shutdown = Shutdown::new(); diff --git a/base_layer/wallet/tests/wallet.rs b/base_layer/wallet/tests/wallet.rs index a0cae8e830..0041d92b90 100644 --- a/base_layer/wallet/tests/wallet.rs +++ b/base_layer/wallet/tests/wallet.rs @@ -145,6 +145,7 @@ async fn create_wallet( auxiliary_tcp_listener_address: None, rpc_max_simultaneous_sessions: 0, rpc_max_sessions_per_peer: 0, + listener_liveness_check_interval: None, }; let sql_database_path = comms_config @@ -679,6 +680,7 @@ async fn test_import_utxo() { auxiliary_tcp_listener_address: None, rpc_max_simultaneous_sessions: 0, rpc_max_sessions_per_peer: 0, + listener_liveness_check_interval: None, }; let config = WalletConfig { p2p: comms_config, diff --git a/base_layer/wallet_ffi/Cargo.toml b/base_layer/wallet_ffi/Cargo.toml index 66bc653af3..1ce077c8bc 100644 --- a/base_layer/wallet_ffi/Cargo.toml +++ b/base_layer/wallet_ffi/Cargo.toml @@ -3,7 +3,7 @@ name = "tari_wallet_ffi" authors = ["The Tari Development Community"] description = "Tari cryptocurrency wallet C FFI bindings" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/base_layer/wallet_ffi/src/lib.rs b/base_layer/wallet_ffi/src/lib.rs index 0b31851f5a..c0679ec359 100644 --- a/base_layer/wallet_ffi/src/lib.rs +++ b/base_layer/wallet_ffi/src/lib.rs @@ -3919,6 +3919,7 @@ pub unsafe extern "C" fn comms_config_create( user_agent: format!("tari/mobile_wallet/{}", env!("CARGO_PKG_VERSION")), rpc_max_simultaneous_sessions: 0, rpc_max_sessions_per_peer: 0, + listener_liveness_check_interval: None, }; Box::into_raw(Box::new(config)) diff --git a/changelog.md b/changelog.md index 1c8d231881..16b56371ef 100644 --- a/changelog.md +++ b/changelog.md @@ -2,6 +2,54 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. +### [0.38.7](https://github.com/tari-project/tari/compare/v0.38.6...v0.38.7) (2022-10-11) + + +### Bug Fixes + +* **core:** only resize db if migration is required ([#4792](https://github.com/tari-project/tari/issues/4792)) ([4811a57](https://github.com/tari-project/tari/commit/4811a5772665af4e3b9007ccadedfc651e1d232e)) +* **miner:** clippy error ([#4793](https://github.com/tari-project/tari/issues/4793)) ([734db22](https://github.com/tari-project/tari/commit/734db22bbdd36b5371aa9c70f4342bb0d3c2f3a4)) + +### [0.38.6](https://github.com/tari-project/tari/compare/v0.38.5...v0.38.6) (2022-10-11) + + +### Features + +* **base-node:** add client connection count to status line ([#4774](https://github.com/tari-project/tari/issues/4774)) ([8339b1d](https://github.com/tari-project/tari/commit/8339b1de1bace96671d8eba0cf309adb9f78014a)) +* move nonce to first in sha hash ([#4778](https://github.com/tari-project/tari/issues/4778)) ([054a314](https://github.com/tari-project/tari/commit/054a314f015ab7a3f1e571f3ee0c7a58ad0ebb5a)) +* remove dalek ng ([#4769](https://github.com/tari-project/tari/issues/4769)) ([953b0b7](https://github.com/tari-project/tari/commit/953b0b7cfc371467e7d15e933e79c8d07712f666)) + + +### Bug Fixes + +* batch rewind operations ([#4752](https://github.com/tari-project/tari/issues/4752)) ([79d3c47](https://github.com/tari-project/tari/commit/79d3c47a86bc37be0117b33c869f9e04df068384)) +* **ci:** fix client path for nodejs ([#4765](https://github.com/tari-project/tari/issues/4765)) ([c7b5e68](https://github.com/tari-project/tari/commit/c7b5e68b400c79040f2dd92ee1cc779224e463ee)) +* **core:** only resize db if migration is required ([#4792](https://github.com/tari-project/tari/issues/4792)) ([4811a57](https://github.com/tari-project/tari/commit/4811a5772665af4e3b9007ccadedfc651e1d232e)) +* **dht:** remove some invalid saf failure cases ([#4787](https://github.com/tari-project/tari/issues/4787)) ([86b4d94](https://github.com/tari-project/tari/commit/86b4d9437f87cb31ed922ff7a7dc73e7fe29eb69)) +* fix config.toml bug ([#4780](https://github.com/tari-project/tari/issues/4780)) ([f6043c1](https://github.com/tari-project/tari/commit/f6043c1f03f33a34e2612516ffca8a589e319001)) +* **miner:** clippy error ([#4793](https://github.com/tari-project/tari/issues/4793)) ([734db22](https://github.com/tari-project/tari/commit/734db22bbdd36b5371aa9c70f4342bb0d3c2f3a4)) +* **p2p/liveness:** remove fallible unwrap ([#4784](https://github.com/tari-project/tari/issues/4784)) ([e59be99](https://github.com/tari-project/tari/commit/e59be99401fc4b50f1b4f5a6a16948959e5c56a1)) +* **tari-script:** use tari script encoding for execution stack serde de/serialization ([#4791](https://github.com/tari-project/tari/issues/4791)) ([c62f7eb](https://github.com/tari-project/tari/commit/c62f7eb6c5b6b4336c7351bd89cb3a700fde1bb2)) + +### [0.38.6](https://github.com/tari-project/tari/compare/v0.38.5...v0.38.6) (2022-10-11) + + +### Features + +* **base-node:** add client connection count to status line ([#4774](https://github.com/tari-project/tari/issues/4774)) ([8339b1d](https://github.com/tari-project/tari/commit/8339b1de1bace96671d8eba0cf309adb9f78014a)) +* move nonce to first in sha hash ([#4778](https://github.com/tari-project/tari/issues/4778)) ([054a314](https://github.com/tari-project/tari/commit/054a314f015ab7a3f1e571f3ee0c7a58ad0ebb5a)) +* remove dalek ng ([#4769](https://github.com/tari-project/tari/issues/4769)) ([953b0b7](https://github.com/tari-project/tari/commit/953b0b7cfc371467e7d15e933e79c8d07712f666)) + + +### Bug Fixes + +* batch rewind operations ([#4752](https://github.com/tari-project/tari/issues/4752)) ([79d3c47](https://github.com/tari-project/tari/commit/79d3c47a86bc37be0117b33c869f9e04df068384)) +* **ci:** fix client path for nodejs ([#4765](https://github.com/tari-project/tari/issues/4765)) ([c7b5e68](https://github.com/tari-project/tari/commit/c7b5e68b400c79040f2dd92ee1cc779224e463ee)) +* **dht:** remove some invalid saf failure cases ([#4787](https://github.com/tari-project/tari/issues/4787)) ([86b4d94](https://github.com/tari-project/tari/commit/86b4d9437f87cb31ed922ff7a7dc73e7fe29eb69)) +* fix config.toml bug ([#4780](https://github.com/tari-project/tari/issues/4780)) ([f6043c1](https://github.com/tari-project/tari/commit/f6043c1f03f33a34e2612516ffca8a589e319001)) +* **p2p/liveness:** remove fallible unwrap ([#4784](https://github.com/tari-project/tari/issues/4784)) ([e59be99](https://github.com/tari-project/tari/commit/e59be99401fc4b50f1b4f5a6a16948959e5c56a1)) +* **tari-script:** use tari script encoding for execution stack serde de/serialization ([#4791](https://github.com/tari-project/tari/issues/4791)) ([c62f7eb](https://github.com/tari-project/tari/commit/c62f7eb6c5b6b4336c7351bd89cb3a700fde1bb2)) + ### [0.38.5](https://github.com/tari-project/tari/compare/v0.38.4...v0.38.5) (2022-10-03) diff --git a/common/Cargo.toml b/common/Cargo.toml index 61350b5cb9..9bdb4ee1d8 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [features] diff --git a/common/config/presets/c_base_node.toml b/common/config/presets/c_base_node.toml index 26a5ae09c1..ca3c7aa19c 100644 --- a/common/config/presets/c_base_node.toml +++ b/common/config/presets/c_base_node.toml @@ -118,7 +118,7 @@ track_reorgs = true # The maximum number of transactions to sync in a single sync session Default: 10_000 #service.initial_sync_max_transactions = 10_000 # The maximum number of blocks added via sync or re-org to triggering a sync -#block_sync_trigger = 5 +#service.block_sync_trigger = 5 [base_node.state_machine] # The initial max sync latency. If a peer fails to stream a header/block within this deadline another sync peer will be @@ -178,6 +178,8 @@ track_reorgs = true # CIDR for addresses allowed to enter into liveness check mode on the listener. #listener_liveness_allowlist_cidrs = [] +# Enables periodic socket-level liveness checks. Default: Disabled +listener_liveness_check_interval = 15 # User agent string for this node #user_agent = "" diff --git a/common/config/presets/d_console_wallet.toml b/common/config/presets/d_console_wallet.toml index a44929a546..c479f95f75 100644 --- a/common/config/presets/d_console_wallet.toml +++ b/common/config/presets/d_console_wallet.toml @@ -201,6 +201,8 @@ event_channel_size = 3500 # CIDR for addresses allowed to enter into liveness check mode on the listener. #listener_liveness_allowlist_cidrs = [] +# Enables periodic socket-level liveness checks. Default: Disabled +# listener_liveness_check_interval = 15 # User agent string for this node #user_agent = "" diff --git a/common_sqlite/Cargo.toml b/common_sqlite/Cargo.toml index 0101cba1bf..55213455e9 100644 --- a/common_sqlite/Cargo.toml +++ b/common_sqlite/Cargo.toml @@ -3,7 +3,7 @@ name = "tari_common_sqlite" authors = ["The Tari Development Community"] description = "Tari cryptocurrency wallet library" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/comms/core/Cargo.toml b/comms/core/Cargo.toml index bfab6a6a11..c1684e6b47 100644 --- a/comms/core/Cargo.toml +++ b/comms/core/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/comms/core/src/builder/comms_node.rs b/comms/core/src/builder/comms_node.rs index 48e2da083f..1279e97d1b 100644 --- a/comms/core/src/builder/comms_node.rs +++ b/comms/core/src/builder/comms_node.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{iter, sync::Arc}; +use std::{iter, sync::Arc, time::Duration}; use log::*; use tari_shutdown::ShutdownSignal; @@ -125,6 +125,12 @@ impl UnspawnedCommsNode { self } + /// Set to true to enable self liveness checking for the configured public address + pub fn set_liveness_check(mut self, interval: Option) -> Self { + self.builder = self.builder.set_liveness_check(interval); + self + } + /// Spawn a new node using the specified [Transport](crate::transports::Transport). pub async fn spawn_with_transport(self, transport: TTransport) -> Result where @@ -317,6 +323,11 @@ impl CommsNode { self.listening_info.bind_address() } + /// Return [ListenerInfo] + pub fn listening_info(&self) -> &ListenerInfo { + &self.listening_info + } + /// Return the Ip/Tcp address that this node is listening on pub fn hidden_service(&self) -> Option<&tor::HiddenService> { self.hidden_service.as_ref() diff --git a/comms/core/src/builder/mod.rs b/comms/core/src/builder/mod.rs index 4d4695859c..1975665809 100644 --- a/comms/core/src/builder/mod.rs +++ b/comms/core/src/builder/mod.rs @@ -265,6 +265,12 @@ impl CommsBuilder { self } + /// Enable and set interval for self-liveness checks, or None to disable it (default) + pub fn set_liveness_check(mut self, check_interval: Option) -> Self { + self.connection_manager_config.liveness_self_check_interval = check_interval; + self + } + fn make_peer_manager(&mut self) -> Result, CommsBuilderError> { let file_lock = self.peer_storage_file_lock.take(); diff --git a/comms/core/src/connection_manager/dialer.rs b/comms/core/src/connection_manager/dialer.rs index 195139762d..c3154816b8 100644 --- a/comms/core/src/connection_manager/dialer.rs +++ b/comms/core/src/connection_manager/dialer.rs @@ -531,31 +531,32 @@ where dial_state.peer().node_id.short_str() ); - let dial_fut = async move { - let mut socket = transport.dial(address.clone()).await.map_err(|err| { - ConnectionManagerError::TransportError { - address: address.to_string(), - details: err.to_string(), - } - })?; - debug!( - target: LOG_TARGET, - "Socket established on '{}'. Performing noise upgrade protocol", address - ); - - socket - .write(&[network_byte]) + let dial_fut = + async move { + let mut socket = transport.dial(address).await.map_err(|err| { + ConnectionManagerError::TransportError { + address: address.to_string(), + details: err.to_string(), + } + })?; + debug!( + target: LOG_TARGET, + "Socket established on '{}'. Performing noise upgrade protocol", address + ); + + socket + .write(&[network_byte]) + .await + .map_err(|_| ConnectionManagerError::WireFormatSendFailed)?; + + let noise_socket = time::timeout( + Duration::from_secs(40), + noise_config.upgrade_socket(socket, ConnectionDirection::Outbound), + ) .await - .map_err(|_| ConnectionManagerError::WireFormatSendFailed)?; - - let noise_socket = time::timeout( - Duration::from_secs(40), - noise_config.upgrade_socket(socket, ConnectionDirection::Outbound), - ) - .await - .map_err(|_| ConnectionManagerError::NoiseProtocolTimeout)??; - Result::<_, ConnectionManagerError>::Ok(noise_socket) - }; + .map_err(|_| ConnectionManagerError::NoiseProtocolTimeout)??; + Result::<_, ConnectionManagerError>::Ok(noise_socket) + }; pin_mut!(dial_fut); let either = future::select(dial_fut, cancel_signal.clone()).await; diff --git a/comms/core/src/connection_manager/listener.rs b/comms/core/src/connection_manager/listener.rs index bf58dddbf8..3df50b8696 100644 --- a/comms/core/src/connection_manager/listener.rs +++ b/comms/core/src/connection_manager/listener.rs @@ -36,7 +36,7 @@ use log::*; use tari_shutdown::{oneshot_trigger, oneshot_trigger::OneshotTrigger, ShutdownSignal}; use tokio::{ io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, - sync::mpsc, + sync::{mpsc, watch}, time, }; use tokio_stream::StreamExt; @@ -53,7 +53,7 @@ use super::{ use crate::{ bounded_executor::BoundedExecutor, connection_manager::{ - liveness::LivenessSession, + liveness::{LivenessCheck, LivenessSession, LivenessStatus}, metrics, wire_mode::{WireMode, LIVENESS_WIRE_MODE}, }, @@ -83,12 +83,12 @@ pub struct PeerListener { node_identity: Arc, our_supported_protocols: Vec, liveness_session_count: Arc, - on_listening: OneshotTrigger>, + on_listening: OneshotTrigger), ConnectionManagerError>>, } impl PeerListener where - TTransport: Transport + Send + Sync + 'static, + TTransport: Transport + Clone + Send + Sync + 'static, TTransport::Output: AsyncRead + AsyncWrite + Send + Unpin + 'static, { pub fn new( @@ -121,7 +121,10 @@ where /// in binding the listener socket // This returns an impl Future and is not async because we want to exclude &self from the future so that it has a // 'static lifetime as well as to flatten the oneshot result for ergonomics - pub fn on_listening(&self) -> impl Future> + 'static { + pub fn on_listening( + &self, + ) -> impl Future), ConnectionManagerError>> + 'static + { let signal = self.on_listening.to_signal(); signal.map(|r| r.ok_or(ConnectionManagerError::ListenerOneshotCancelled)?) } @@ -132,7 +135,7 @@ where self } - pub async fn listen(self) -> Result { + pub async fn listen(self) -> Result<(Multiaddr, watch::Receiver), ConnectionManagerError> { let on_listening = self.on_listening(); runtime::current().spawn(self.run()); on_listening.await @@ -145,7 +148,9 @@ where Ok((mut inbound, address)) => { info!(target: LOG_TARGET, "Listening for peer connections on '{}'", address); - self.on_listening.broadcast(Ok(address)); + let liveness_watch = self.spawn_liveness_check(); + + self.on_listening.broadcast(Ok((address, liveness_watch))); loop { tokio::select! { @@ -229,6 +234,21 @@ where }); } + fn spawn_liveness_check(&self) -> watch::Receiver { + match self.config.liveness_self_check_interval { + Some(interval) => LivenessCheck::spawn( + self.transport.clone(), + self.node_identity.public_address(), + interval, + self.shutdown_signal.clone(), + ), + None => { + let (_, rx) = watch::channel(LivenessStatus::Disabled); + rx + }, + } + } + async fn spawn_listen_task(&self, mut socket: TTransport::Output, peer_addr: Multiaddr) { let node_identity = self.node_identity.clone(); let peer_manager = self.peer_manager.clone(); @@ -295,8 +315,9 @@ where let _result = socket.shutdown().await; }, Ok(WireMode::Liveness) => { - if liveness_session_count.load(Ordering::SeqCst) > 0 && - Self::is_address_in_liveness_cidr_range(&peer_addr, &config.liveness_cidr_allowlist) + if config.liveness_self_check_interval.is_some() || + (liveness_session_count.load(Ordering::SeqCst) > 0 && + Self::is_address_in_liveness_cidr_range(&peer_addr, &config.liveness_cidr_allowlist)) { debug!( target: LOG_TARGET, @@ -430,7 +451,7 @@ where let bind_address = self.bind_address.clone(); debug!(target: LOG_TARGET, "Attempting to listen on {}", bind_address); self.transport - .listen(bind_address.clone()) + .listen(&bind_address) .await .map_err(|err| ConnectionManagerError::ListenerError { address: bind_address.to_string(), diff --git a/comms/core/src/connection_manager/liveness.rs b/comms/core/src/connection_manager/liveness.rs index 39870dcf60..cf73f2a06f 100644 --- a/comms/core/src/connection_manager/liveness.rs +++ b/comms/core/src/connection_manager/liveness.rs @@ -20,14 +20,27 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::future::Future; +use std::{ + future::Future, + time::{Duration, Instant}, +}; -use futures::StreamExt; -use tokio::io::{AsyncRead, AsyncWrite}; +use futures::{future, SinkExt, StreamExt}; +use log::*; +use multiaddr::Multiaddr; +use tari_shutdown::ShutdownSignal; +use tokio::{ + io::{AsyncRead, AsyncWrite, AsyncWriteExt}, + sync::watch, + time, +}; use tokio_util::codec::{Framed, LinesCodec, LinesCodecError}; +use crate::{connection_manager::wire_mode::WireMode, transports::Transport}; + /// Max line length accepted by the liveness session. const MAX_LINE_LENGTH: usize = 50; +const LOG_TARGET: &str = "comms::connection_manager::liveness"; /// Echo server for liveness checks pub struct LivenessSession { @@ -49,6 +62,120 @@ where TSocket: AsyncRead + AsyncWrite + Unpin } } +#[derive(Debug, Clone, Copy)] +pub enum LivenessStatus { + Disabled, + Checking, + Unreachable, + Live(Duration), +} + +pub struct LivenessCheck { + transport: TTransport, + address: Multiaddr, + interval: Duration, + tx_watch: watch::Sender, + shutdown_signal: ShutdownSignal, +} + +impl LivenessCheck +where + TTransport: Transport + Send + Sync + 'static, + TTransport::Output: AsyncRead + AsyncWrite + Unpin + Send, +{ + pub fn spawn( + transport: TTransport, + address: Multiaddr, + interval: Duration, + shutdown_signal: ShutdownSignal, + ) -> watch::Receiver { + let (tx_watch, rx_watch) = watch::channel(LivenessStatus::Checking); + let check = Self { + transport, + address, + interval, + tx_watch, + shutdown_signal, + }; + tokio::spawn(check.run_until_shutdown()); + rx_watch + } + + pub async fn run_until_shutdown(self) { + let shutdown_signal = self.shutdown_signal.clone(); + let run_fut = self.run(); + tokio::pin!(run_fut); + future::select(run_fut, shutdown_signal).await; + } + + pub async fn run(mut self) { + info!( + target: LOG_TARGET, + "🔌️ Starting liveness self-check with interval {:.2?}", self.interval + ); + loop { + let timer = Instant::now(); + let _ = self.tx_watch.send(LivenessStatus::Checking); + match self.transport.dial(&self.address).await { + Ok(mut socket) => { + info!(target: LOG_TARGET, "🔌 liveness dial took {:.2?}", timer.elapsed()); + if let Err(err) = socket.write(&[WireMode::Liveness.as_byte()]).await { + warn!(target: LOG_TARGET, "🔌️ liveness failed to write byte: {}", err); + self.tx_watch.send_replace(LivenessStatus::Unreachable); + continue; + } + let mut framed = Framed::new(socket, LinesCodec::new_with_max_length(MAX_LINE_LENGTH)); + loop { + match self.ping_pong(&mut framed).await { + Ok(Some(latency)) => { + info!(target: LOG_TARGET, "⚡️️ liveness check latency {:.2?}", latency); + self.tx_watch.send_replace(LivenessStatus::Live(latency)); + }, + Ok(None) => { + info!(target: LOG_TARGET, "🔌️ liveness connection closed"); + self.tx_watch.send_replace(LivenessStatus::Unreachable); + break; + }, + Err(err) => { + warn!(target: LOG_TARGET, "🔌️ ping pong failed: {}", err); + self.tx_watch.send_replace(LivenessStatus::Unreachable); + // let _ = framed.close().await; + break; + }, + } + + time::sleep(self.interval).await; + } + }, + Err(err) => { + self.tx_watch.send_replace(LivenessStatus::Unreachable); + warn!( + target: LOG_TARGET, + "🔌️ Failed to dial public address for self check: {}", err + ); + }, + } + time::sleep(self.interval).await; + } + } + + async fn ping_pong( + &mut self, + framed: &mut Framed, + ) -> Result, LinesCodecError> { + let timer = Instant::now(); + framed.send("pingpong".to_string()).await?; + match framed.next().await { + Some(res) => { + let val = res?; + debug!(target: LOG_TARGET, "Received: {}", val); + Ok(Some(timer.elapsed())) + }, + None => Ok(None), + } + } +} + #[cfg(test)] mod test { use futures::SinkExt; diff --git a/comms/core/src/connection_manager/manager.rs b/comms/core/src/connection_manager/manager.rs index ecadf7109c..6e492ff187 100644 --- a/comms/core/src/connection_manager/manager.rs +++ b/comms/core/src/connection_manager/manager.rs @@ -28,7 +28,7 @@ use tari_shutdown::{Shutdown, ShutdownSignal}; use time::Duration; use tokio::{ io::{AsyncRead, AsyncWrite}, - sync::{broadcast, mpsc, oneshot}, + sync::{broadcast, mpsc, oneshot, watch}, task, time, }; @@ -43,7 +43,7 @@ use super::{ }; use crate::{ backoff::Backoff, - connection_manager::{metrics, ConnectionDirection, ConnectionId}, + connection_manager::{liveness::LivenessStatus, metrics, ConnectionDirection, ConnectionId}, multiplexing::Substream, noise::NoiseConfig, peer_manager::{NodeId, NodeIdentity, PeerManagerError}, @@ -111,6 +111,8 @@ pub struct ConnectionManagerConfig { pub liveness_max_sessions: usize, /// CIDR blocks that allowlist liveness checks. Default: Localhost only (127.0.0.1/32) pub liveness_cidr_allowlist: Vec, + /// Interval to perform self-liveness ping-pong tests. Default: None/disabled + pub liveness_self_check_interval: Option, /// If set, an additional TCP-only p2p listener will be started. This is useful for local wallet connections. /// Default: None (disabled) pub auxiliary_tcp_listener_address: Option, @@ -133,9 +135,10 @@ impl Default for ConnectionManagerConfig { // This must always be true for internal crate tests #[cfg(test)] allow_test_addresses: true, - liveness_max_sessions: 0, + liveness_max_sessions: 1, time_to_first_byte: Duration::from_secs(45), liveness_cidr_allowlist: vec![cidr::AnyIpCidr::V4("127.0.0.1/32".parse().unwrap())], + liveness_self_check_interval: None, auxiliary_tcp_listener_address: None, } } @@ -146,6 +149,7 @@ impl Default for ConnectionManagerConfig { pub struct ListenerInfo { bind_address: Multiaddr, aux_bind_address: Option, + liveness_watch: watch::Receiver, } impl ListenerInfo { @@ -159,6 +163,17 @@ impl ListenerInfo { pub fn auxiliary_bind_address(&self) -> Option<&Multiaddr> { self.aux_bind_address.as_ref() } + + /// Returns the current liveness status + pub fn liveness_status(&self) -> LivenessStatus { + *self.liveness_watch.borrow() + } + + /// Waits for liveness status to change from the last time the value was checked. + pub async fn liveness_status_changed(&mut self) -> Option { + self.liveness_watch.changed().await.ok()?; + Some(*self.liveness_watch.borrow()) + } } /// The actor responsible for connection management. @@ -211,8 +226,13 @@ where let aux_listener = config.auxiliary_tcp_listener_address.take().map(|addr| { info!(target: LOG_TARGET, "Starting auxiliary listener on {}", addr); + let aux_config = ConnectionManagerConfig { + // Disable liveness checks on the auxiliary listener + liveness_self_check_interval: None, + ..config.clone() + }; PeerListener::new( - config.clone(), + aux_config, addr, TcpTransport::new(), noise_config.clone(), @@ -325,21 +345,19 @@ where listener.set_supported_protocols(self.protocols.get_supported_protocols()); - let mut listener_info = ListenerInfo { - bind_address: Multiaddr::empty(), - aux_bind_address: None, - }; - match listener.listen().await { - Ok(addr) => { - listener_info.bind_address = addr; + let mut listener_info = match listener.listen().await { + Ok((bind_address, liveness_watch)) => ListenerInfo { + bind_address, + aux_bind_address: None, + liveness_watch, }, Err(err) => return Err(err), - } + }; if let Some(mut listener) = self.aux_listener.take() { listener.set_supported_protocols(self.protocols.get_supported_protocols()); - let addr = listener.listen().await?; - debug!(target: LOG_TARGET, "TCP listener bound to address {}", addr); + let (addr, _) = listener.listen().await?; + debug!(target: LOG_TARGET, "Aux TCP listener bound to address {}", addr); listener_info.aux_bind_address = Some(addr); } diff --git a/comms/core/src/connection_manager/mod.rs b/comms/core/src/connection_manager/mod.rs index 98c40dad64..6ae616669a 100644 --- a/comms/core/src/connection_manager/mod.rs +++ b/comms/core/src/connection_manager/mod.rs @@ -52,6 +52,8 @@ mod peer_connection; pub use peer_connection::{ConnectionId, NegotiatedSubstream, PeerConnection, PeerConnectionRequest}; mod liveness; +pub use liveness::LivenessStatus; + mod wire_mode; #[cfg(test)] diff --git a/comms/core/src/connection_manager/tests/listener_dialer.rs b/comms/core/src/connection_manager/tests/listener_dialer.rs index 5e0b26a92a..c2b71380bb 100644 --- a/comms/core/src/connection_manager/tests/listener_dialer.rs +++ b/comms/core/src/connection_manager/tests/listener_dialer.rs @@ -66,7 +66,7 @@ async fn listen() -> Result<(), Box> { shutdown.to_signal(), ); - let mut bind_addr = listener.listen().await?; + let (mut bind_addr, _) = listener.listen().await?; unpack_enum!(Protocol::Memory(port) = bind_addr.pop().unwrap()); assert!(port > 0); @@ -103,7 +103,7 @@ async fn smoke() { listener.set_supported_protocols(supported_protocols.clone()); // Get the listening address of the peer - let address = listener.listen().await.unwrap(); + let (address, _) = listener.listen().await.unwrap(); let node_identity2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let noise_config2 = NoiseConfig::new(node_identity2.clone()); @@ -207,7 +207,7 @@ async fn banned() { listener.set_supported_protocols(supported_protocols.clone()); // Get the listener address of the peer - let address = listener.listen().await.unwrap(); + let (address, _) = listener.listen().await.unwrap(); let node_identity2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); // The listener has banned the dialer peer diff --git a/comms/core/src/connection_manager/wire_mode.rs b/comms/core/src/connection_manager/wire_mode.rs index 2ae7477988..e42ff3d9b7 100644 --- a/comms/core/src/connection_manager/wire_mode.rs +++ b/comms/core/src/connection_manager/wire_mode.rs @@ -22,13 +22,23 @@ use std::convert::TryFrom; -pub(crate) const LIVENESS_WIRE_MODE: u8 = 0xa6; // E +pub(crate) const LIVENESS_WIRE_MODE: u8 = 0xa6; +#[derive(Debug, Clone, Copy)] pub enum WireMode { Comms(u8), Liveness, } +impl WireMode { + pub fn as_byte(self) -> u8 { + match self { + WireMode::Comms(byte) => byte, + WireMode::Liveness => LIVENESS_WIRE_MODE, + } + } +} + impl TryFrom for WireMode { type Error = (); diff --git a/comms/core/src/protocol/identity.rs b/comms/core/src/protocol/identity.rs index 60103490b3..582d723894 100644 --- a/comms/core/src/protocol/identity.rs +++ b/comms/core/src/protocol/identity.rs @@ -204,9 +204,9 @@ mod test { async fn identity_exchange() { let transport = MemoryTransport; let addr = "/memory/0".parse().unwrap(); - let (mut listener, addr) = transport.listen(addr).await.unwrap(); + let (mut listener, addr) = transport.listen(&addr).await.unwrap(); - let (out_sock, in_sock) = future::join(transport.dial(addr), listener.next()).await; + let (out_sock, in_sock) = future::join(transport.dial(&addr), listener.next()).await; let mut out_sock = out_sock.unwrap(); let (mut in_sock, _) = in_sock.unwrap().unwrap(); @@ -251,9 +251,9 @@ mod test { async fn fail_cases() { let transport = MemoryTransport; let addr = "/memory/0".parse().unwrap(); - let (mut listener, addr) = transport.listen(addr).await.unwrap(); + let (mut listener, addr) = transport.listen(&addr).await.unwrap(); - let (out_sock, in_sock) = future::join(transport.dial(addr), listener.next()).await; + let (out_sock, in_sock) = future::join(transport.dial(&addr), listener.next()).await; let mut out_sock = out_sock.unwrap(); let (mut in_sock, _) = in_sock.unwrap().unwrap(); diff --git a/comms/core/src/protocol/rpc/client/mod.rs b/comms/core/src/protocol/rpc/client/mod.rs index 257905bf64..e30d3a70b0 100644 --- a/comms/core/src/protocol/rpc/client/mod.rs +++ b/comms/core/src/protocol/rpc/client/mod.rs @@ -613,7 +613,6 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + StreamId debug!(target: LOG_TARGET, "Sending request: {}", req); - let mut timer = Some(Instant::now()); if reply.is_closed() { event!(Level::WARN, "Client request was cancelled before request was sent"); warn!( @@ -637,12 +636,14 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + StreamId let latency = metrics::request_response_latency(&self.node_id, &self.protocol_id); let mut metrics_timer = Some(latency.start_timer()); + let timer = Instant::now(); if let Err(err) = self.send_request(req).await { warn!(target: LOG_TARGET, "{}", err); metrics::client_errors(&self.node_id, &self.protocol_id).inc(); let _result = response_tx.send(Err(err.into())).await; return Ok(()); } + let partial_latency = timer.elapsed(); loop { if self.shutdown_signal.is_triggered() { @@ -679,9 +680,9 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + StreamId // let resp = match self.read_response(request_id).await { let resp = match resp_result { - Ok(resp) => { - if let Some(t) = timer.take() { - let _ = self.last_request_latency_tx.send(Some(t.elapsed())); + Ok((resp, time_to_first_msg)) => { + if let Some(t) = time_to_first_msg { + let _ = self.last_request_latency_tx.send(Some(partial_latency + t)); } event!(Level::TRACE, "Message received"); trace!( @@ -804,7 +805,10 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + StreamId Ok(()) } - async fn read_response(&mut self, request_id: u16) -> Result { + async fn read_response( + &mut self, + request_id: u16, + ) -> Result<(proto::rpc::RpcResponse, Option), RpcError> { let stream_id = self.stream_id(); let protocol_name = self.protocol_name().to_string(); @@ -822,7 +826,8 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + StreamId ); metrics::inbound_response_bytes(&self.node_id, &self.protocol_id) .observe(reader.bytes_read() as f64); - break resp; + let time_to_first_msg = reader.time_to_first_msg(); + break (resp, time_to_first_msg); }, Err(RpcError::ResponseIdDidNotMatchRequest { actual, expected }) if actual.wrapping_add(1) == request_id => @@ -888,6 +893,7 @@ struct RpcResponseReader<'a, TSubstream> { config: RpcClientConfig, request_id: u16, bytes_read: usize, + time_to_first_msg: Option, } impl<'a, TSubstream> RpcResponseReader<'a, TSubstream> @@ -899,6 +905,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin config, request_id, bytes_read: 0, + time_to_first_msg: None, } } @@ -906,8 +913,14 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin self.bytes_read } + pub fn time_to_first_msg(&self) -> Option { + self.time_to_first_msg + } + pub async fn read_response(&mut self) -> Result { + let timer = Instant::now(); let mut resp = self.next().await?; + self.time_to_first_msg = Some(timer.elapsed()); self.check_response(&resp)?; let mut chunk_count = 1; let mut last_chunk_flags = RpcMessageFlags::from_bits_truncate(u8::try_from(resp.flags).unwrap()); diff --git a/comms/core/src/protocol/rpc/server/error.rs b/comms/core/src/protocol/rpc/server/error.rs index ea3458b4e5..a829ff6035 100644 --- a/comms/core/src/protocol/rpc/server/error.rs +++ b/comms/core/src/protocol/rpc/server/error.rs @@ -60,8 +60,17 @@ pub enum RpcServerError { ServiceCallExceededDeadline, #[error("Stream read exceeded deadline")] ReadStreamExceededDeadline, - #[error("Early close error: {0}")] - EarlyCloseError(#[from] EarlyCloseError), + #[error("Early close: {0}")] + EarlyClose(#[from] EarlyCloseError), +} + +impl RpcServerError { + pub fn early_close_io(&self) -> Option<&io::Error> { + match self { + Self::EarlyClose(e) => e.io(), + _ => None, + } + } } impl From for RpcServerError { diff --git a/comms/core/src/protocol/rpc/server/mod.rs b/comms/core/src/protocol/rpc/server/mod.rs index 6690e31418..a05a40de4f 100644 --- a/comms/core/src/protocol/rpc/server/mod.rs +++ b/comms/core/src/protocol/rpc/server/mod.rs @@ -44,6 +44,7 @@ use std::{ convert::TryFrom, future::Future, io, + io::ErrorKind, pin::Pin, sync::Arc, task::Poll, @@ -353,7 +354,7 @@ where { Ok(_) => {}, Err(err @ RpcServerError::HandshakeError(_)) => { - debug!(target: LOG_TARGET, "{}", err); + debug!(target: LOG_TARGET, "Handshake error: {}", err); metrics::handshake_error_counter(&node_id, ¬ification.protocol).inc(); }, Err(err) => { @@ -530,7 +531,7 @@ where metrics::error_counter(&self.node_id, &self.protocol, &err).inc(); let level = match &err { RpcServerError::Io(e) => err_to_log_level(e), - RpcServerError::EarlyCloseError(e) => e.io().map(err_to_log_level).unwrap_or(log::Level::Error), + RpcServerError::EarlyClose(e) => e.io().map(err_to_log_level).unwrap_or(log::Level::Error), _ => log::Level::Error, }; log!( @@ -562,8 +563,10 @@ where err, ); } - error!( + let level = err.early_close_io().map(err_to_log_level).unwrap_or(log::Level::Error); + log!( target: LOG_TARGET, + level, "(peer: {}, protocol: {}) Failed to handle request: {}", self.node_id, self.protocol_name(), @@ -880,8 +883,13 @@ fn into_response(request_id: u32, result: Result) -> RpcRe } fn err_to_log_level(err: &io::Error) -> log::Level { + error!(target: LOG_TARGET, "KIND: {}", err.kind()); match err.kind() { - io::ErrorKind::BrokenPipe | io::ErrorKind::WriteZero => log::Level::Debug, + ErrorKind::ConnectionReset | + ErrorKind::ConnectionAborted | + ErrorKind::BrokenPipe | + ErrorKind::WriteZero | + ErrorKind::UnexpectedEof => log::Level::Debug, _ => log::Level::Error, } } diff --git a/comms/core/src/test_utils/transport.rs b/comms/core/src/test_utils/transport.rs index 7a770440fa..4dd4619c49 100644 --- a/comms/core/src/test_utils/transport.rs +++ b/comms/core/src/test_utils/transport.rs @@ -31,8 +31,8 @@ use crate::{ }; pub async fn build_connected_sockets() -> (Multiaddr, MemorySocket, MemorySocket) { - let (mut listener, addr) = MemoryTransport.listen("/memory/0".parse().unwrap()).await.unwrap(); - let (dial_sock, listen_sock) = future::join(MemoryTransport.dial(addr.clone()), listener.next()).await; + let (mut listener, addr) = MemoryTransport.listen(&"/memory/0".parse().unwrap()).await.unwrap(); + let (dial_sock, listen_sock) = future::join(MemoryTransport.dial(&addr), listener.next()).await; let (listen_sock, _) = listen_sock.unwrap().unwrap(); (addr, dial_sock.unwrap(), listen_sock) } diff --git a/comms/core/src/tor/control_client/client.rs b/comms/core/src/tor/control_client/client.rs index 5d0d0c4f1c..29663f7603 100644 --- a/comms/core/src/tor/control_client/client.rs +++ b/comms/core/src/tor/control_client/client.rs @@ -62,7 +62,7 @@ impl TorControlPortClient { ) -> Result { let mut tcp = TcpTransport::new(); tcp.set_nodelay(true); - let socket = tcp.dial(addr).await?; + let socket = tcp.dial(&addr).await?; Ok(Self::new(socket, event_tx)) } @@ -304,7 +304,7 @@ mod test { #[runtime::test] async fn connect() { let (mut listener, addr) = TcpTransport::default() - .listen("/ip4/127.0.0.1/tcp/0".parse().unwrap()) + .listen(&"/ip4/127.0.0.1/tcp/0".parse().unwrap()) .await .unwrap(); let (event_tx, _) = broadcast::channel(1); diff --git a/comms/core/src/transports/dns/mod.rs b/comms/core/src/transports/dns/mod.rs index d45f9f91ea..85b0d991bc 100644 --- a/comms/core/src/transports/dns/mod.rs +++ b/comms/core/src/transports/dns/mod.rs @@ -38,6 +38,7 @@ use crate::multiaddr::Multiaddr; pub type DnsResolverRef = Arc; +// TODO: use async_trait pub trait DnsResolver: Send + Sync + 'static { fn resolve(&self, addr: Multiaddr) -> BoxFuture<'static, Result>; } diff --git a/comms/core/src/transports/dns/tor.rs b/comms/core/src/transports/dns/tor.rs index 4663392f0c..aa9ce1c658 100644 --- a/comms/core/src/transports/dns/tor.rs +++ b/comms/core/src/transports/dns/tor.rs @@ -48,7 +48,7 @@ impl TorDnsResolver { } pub async fn connect(self) -> Result { - let mut client = connect_inner(self.socks_config.proxy_address) + let mut client = connect_inner(&self.socks_config.proxy_address) .await .map_err(DnsResolverError::ProxyConnectFailed)?; client.with_authentication(self.socks_config.authentication)?; @@ -56,7 +56,7 @@ impl TorDnsResolver { } } -async fn connect_inner(addr: Multiaddr) -> io::Result { +async fn connect_inner(addr: &Multiaddr) -> io::Result { let socket = SocksTransport::create_socks_tcp_transport().dial(addr).await?; Ok(Socks5Client::new(socket)) } diff --git a/comms/core/src/transports/memory.rs b/comms/core/src/transports/memory.rs index fc7c3552ca..4c3455966e 100644 --- a/comms/core/src/transports/memory.rs +++ b/comms/core/src/transports/memory.rs @@ -64,9 +64,9 @@ impl Transport for MemoryTransport { type Listener = Listener; type Output = MemorySocket; - async fn listen(&self, addr: Multiaddr) -> Result<(Self::Listener, Multiaddr), Self::Error> { + async fn listen(&self, addr: &Multiaddr) -> Result<(Self::Listener, Multiaddr), Self::Error> { // parse_addr is not used in the async block because of a rust ICE (internal compiler error) - let port = parse_addr(&addr)?; + let port = parse_addr(addr)?; let listener = MemoryListener::bind(port)?; let actual_port = listener.local_addr(); let mut actual_addr = Multiaddr::empty(); @@ -74,9 +74,9 @@ impl Transport for MemoryTransport { Ok((Listener { inner: listener }, actual_addr)) } - async fn dial(&self, addr: Multiaddr) -> Result { + async fn dial(&self, addr: &Multiaddr) -> Result { // parse_addr is not used in the async block because of a rust ICE (internal compiler error) - let port = parse_addr(&addr)?; + let port = parse_addr(addr)?; Ok(MemorySocket::connect(port)?) } } @@ -140,7 +140,7 @@ mod test { async fn simple_listen_and_dial() -> Result<(), ::std::io::Error> { let t = MemoryTransport::default(); - let (listener, addr) = t.listen("/memory/0".parse().unwrap()).await?; + let (listener, addr) = t.listen(&"/memory/0".parse().unwrap()).await?; let listener = async move { let (item, _listener) = listener.into_future().await; @@ -151,7 +151,7 @@ mod test { assert_eq!(buf, b"hello world"); }; - let mut outbound = t.dial(addr).await?; + let mut outbound = t.dial(&addr).await?; let dialer = async move { outbound.write_all(b"hello world").await.unwrap(); @@ -166,10 +166,10 @@ mod test { async fn unsupported_multiaddrs() { let t = MemoryTransport::default(); - let err = t.listen("/ip4/127.0.0.1/tcp/0".parse().unwrap()).await.unwrap_err(); + let err = t.listen(&"/ip4/127.0.0.1/tcp/0".parse().unwrap()).await.unwrap_err(); assert!(matches!(err.kind(), io::ErrorKind::InvalidInput)); - let err = t.dial("/ip4/127.0.0.1/tcp/22".parse().unwrap()).await.unwrap_err(); + let err = t.dial(&"/ip4/127.0.0.1/tcp/22".parse().unwrap()).await.unwrap_err(); assert!(matches!(err.kind(), io::ErrorKind::InvalidInput)); } diff --git a/comms/core/src/transports/mod.rs b/comms/core/src/transports/mod.rs index 90e3de56de..45050f540d 100644 --- a/comms/core/src/transports/mod.rs +++ b/comms/core/src/transports/mod.rs @@ -61,8 +61,8 @@ pub trait Transport { type Listener: Stream> + Send + Unpin; /// Listen for connections on the given multiaddr - async fn listen(&self, addr: Multiaddr) -> Result<(Self::Listener, Multiaddr), Self::Error>; + async fn listen(&self, addr: &Multiaddr) -> Result<(Self::Listener, Multiaddr), Self::Error>; /// Connect (dial) to the given multiaddr - async fn dial(&self, addr: Multiaddr) -> Result; + async fn dial(&self, addr: &Multiaddr) -> Result; } diff --git a/comms/core/src/transports/socks.rs b/comms/core/src/transports/socks.rs index 754eddb0ae..aed81823b3 100644 --- a/comms/core/src/transports/socks.rs +++ b/comms/core/src/transports/socks.rs @@ -80,19 +80,19 @@ impl SocksTransport { async fn socks_connect( tcp: TcpTransport, - socks_config: SocksConfig, - dest_addr: Multiaddr, + socks_config: &SocksConfig, + dest_addr: &Multiaddr, ) -> io::Result { // Create a new connection to the SOCKS proxy - let socks_conn = tcp.dial(socks_config.proxy_address).await?; + let socks_conn = tcp.dial(&socks_config.proxy_address).await?; let mut client = Socks5Client::new(socks_conn); client - .with_authentication(socks_config.authentication) + .with_authentication(socks_config.authentication.clone()) .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; client - .connect(&dest_addr) + .connect(dest_addr) .await .map(|(socket, _)| socket) .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) @@ -105,18 +105,18 @@ impl Transport for SocksTransport { type Listener = ::Listener; type Output = ::Output; - async fn listen(&self, addr: Multiaddr) -> Result<(Self::Listener, Multiaddr), Self::Error> { + async fn listen(&self, addr: &Multiaddr) -> Result<(Self::Listener, Multiaddr), Self::Error> { self.tcp_transport.listen(addr).await } - async fn dial(&self, addr: Multiaddr) -> Result { + async fn dial(&self, addr: &Multiaddr) -> Result { // Bypass the SOCKS proxy and connect to the address directly - if self.socks_config.proxy_bypass_predicate.check(&addr) { + if self.socks_config.proxy_bypass_predicate.check(addr) { debug!(target: LOG_TARGET, "SOCKS proxy bypassed for '{}'. Using TCP.", addr); return self.tcp_transport.dial(addr).await; } - let socket = Self::socks_connect(self.tcp_transport.clone(), self.socks_config.clone(), addr).await?; + let socket = Self::socks_connect(self.tcp_transport.clone(), &self.socks_config, addr).await?; Ok(socket) } } diff --git a/comms/core/src/transports/tcp.rs b/comms/core/src/transports/tcp.rs index aab9fd0f07..c5470a29b7 100644 --- a/comms/core/src/transports/tcp.rs +++ b/comms/core/src/transports/tcp.rs @@ -125,10 +125,10 @@ impl Transport for TcpTransport { type Listener = TcpInbound; type Output = TcpStream; - async fn listen(&self, addr: Multiaddr) -> Result<(Self::Listener, Multiaddr), Self::Error> { + async fn listen(&self, addr: &Multiaddr) -> Result<(Self::Listener, Multiaddr), Self::Error> { let socket_addr = self .dns_resolver - .resolve(addr) + .resolve(addr.clone()) .await .map_err(|err| io::Error::new(io::ErrorKind::Other, format!("Failed to resolve address: {}", err)))?; let listener = TcpListener::bind(&socket_addr).await?; @@ -136,10 +136,10 @@ impl Transport for TcpTransport { Ok((TcpInbound::new(self.clone(), listener), local_addr)) } - async fn dial(&self, addr: Multiaddr) -> Result { + async fn dial(&self, addr: &Multiaddr) -> Result { let socket_addr = self .dns_resolver - .resolve(addr) + .resolve(addr.clone()) .await .map_err(|err| io::Error::new(io::ErrorKind::Other, format!("Address resolution failed: {}", err)))?; diff --git a/comms/core/src/transports/tcp_with_tor.rs b/comms/core/src/transports/tcp_with_tor.rs index 17f2c439bf..f6cea6e991 100644 --- a/comms/core/src/transports/tcp_with_tor.rs +++ b/comms/core/src/transports/tcp_with_tor.rs @@ -67,11 +67,11 @@ impl Transport for TcpWithTorTransport { type Listener = ::Listener; type Output = TcpStream; - async fn listen(&self, addr: Multiaddr) -> Result<(Self::Listener, Multiaddr), Self::Error> { + async fn listen(&self, addr: &Multiaddr) -> Result<(Self::Listener, Multiaddr), Self::Error> { self.tcp_transport.listen(addr).await } - async fn dial(&self, addr: Multiaddr) -> Result { + async fn dial(&self, addr: &Multiaddr) -> Result { if addr.is_empty() { return Err(io::Error::new( io::ErrorKind::InvalidInput, @@ -79,7 +79,7 @@ impl Transport for TcpWithTorTransport { )); } - if is_onion_address(&addr) { + if is_onion_address(addr) { match self.socks_transport { Some(ref transport) => { let socket = transport.dial(addr).await?; diff --git a/comms/dht/Cargo.toml b/comms/dht/Cargo.toml index b644c51565..08bd5c0d88 100644 --- a/comms/dht/Cargo.toml +++ b/comms/dht/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_comms_dht" -version = "0.38.5" +version = "0.38.7" authors = ["The Tari Development Community"] description = "Tari comms DHT module" repository = "https://github.com/tari-project/tari" diff --git a/comms/dht/src/dht.rs b/comms/dht/src/dht.rs index e3acdae98a..3fef7d43a8 100644 --- a/comms/dht/src/dht.rs +++ b/comms/dht/src/dht.rs @@ -31,6 +31,7 @@ use tari_comms::{ pipeline::PipelineError, }; use tari_shutdown::ShutdownSignal; +use tari_utilities::epoch_time::EpochTime; use thiserror::Error; use tokio::sync::{broadcast, mpsc}; use tower::{layer::Layer, Service, ServiceBuilder}; @@ -298,6 +299,7 @@ impl Dht { .layer(MetricsLayer::new(self.metrics_collector.clone())) .layer(inbound::DeserializeLayer::new(self.peer_manager.clone())) .layer(filter::FilterLayer::new(self.unsupported_saf_messages_filter())) + .layer(filter::FilterLayer::new(discard_expired_messages)) .layer(inbound::DecryptionLayer::new( self.config.clone(), self.node_identity.clone(), @@ -432,6 +434,20 @@ fn filter_messages_to_rebroadcast(msg: &DecryptedDhtMessage) -> bool { } } +/// Check message expiry and immediately discard if expired +fn discard_expired_messages(msg: &DhtInboundMessage) -> bool { + if let Some(expires) = msg.dht_header.expires { + if expires < EpochTime::now() { + debug!( + target: LOG_TARGET, + "[discard_expired_messages] Discarding expired message {}", msg + ); + return false; + } + } + true +} + #[cfg(test)] mod test { use std::{sync::Arc, time::Duration}; diff --git a/comms/dht/src/envelope.rs b/comms/dht/src/envelope.rs index 3f4f2ef06e..6ac881cb80 100644 --- a/comms/dht/src/envelope.rs +++ b/comms/dht/src/envelope.rs @@ -43,7 +43,7 @@ use crate::version::DhtProtocolVersion; pub(crate) fn datetime_to_timestamp(datetime: DateTime) -> Timestamp { Timestamp { seconds: datetime.timestamp(), - nanos: datetime.timestamp_subsec_nanos().try_into().unwrap_or(std::i32::MAX), + nanos: datetime.timestamp_subsec_nanos().try_into().unwrap_or(i32::MAX), } } diff --git a/comms/dht/src/store_forward/database/stored_message.rs b/comms/dht/src/store_forward/database/stored_message.rs index b8d095d901..1913b5be02 100644 --- a/comms/dht/src/store_forward/database/stored_message.rs +++ b/comms/dht/src/store_forward/database/stored_message.rs @@ -20,8 +20,6 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::convert::TryInto; - use chrono::NaiveDateTime; use tari_comms::message::MessageExt; use tari_utilities::{hex, hex::Hex}; @@ -50,7 +48,7 @@ pub struct NewStoredMessage { } impl NewStoredMessage { - pub fn try_construct(message: DecryptedDhtMessage, priority: StoredMessagePriority) -> Option { + pub fn new(message: DecryptedDhtMessage, priority: StoredMessagePriority) -> Self { let DecryptedDhtMessage { authenticated_origin, decryption_result, @@ -64,8 +62,8 @@ impl NewStoredMessage { }; let body_hash = hex::to_hex(&dedup::create_message_hash(&dht_header.message_signature, &body)); - Some(Self { - version: dht_header.version.as_major().try_into().ok()?, + Self { + version: dht_header.version.as_major() as i32, origin_pubkey: authenticated_origin.as_ref().map(|pk| pk.to_hex()), message_type: dht_header.message_type as i32, destination_pubkey: dht_header.destination.public_key().map(|pk| pk.to_hex()), @@ -81,7 +79,7 @@ impl NewStoredMessage { }, body_hash, body, - }) + } } } diff --git a/comms/dht/src/store_forward/error.rs b/comms/dht/src/store_forward/error.rs index 4a71b410eb..85fd5678c2 100644 --- a/comms/dht/src/store_forward/error.rs +++ b/comms/dht/src/store_forward/error.rs @@ -27,7 +27,7 @@ use tari_comms::{ message::MessageError, peer_manager::{NodeId, PeerManagerError}, }; -use tari_utilities::byte_array::ByteArrayError; +use tari_utilities::{byte_array::ByteArrayError, epoch_time::EpochTime}; use thiserror::Error; use crate::{ @@ -81,10 +81,10 @@ pub enum StoreAndForwardError { RequesterChannelClosed, #[error("The request was cancelled by the store and forward service")] RequestCancelled, - #[error("The message was not valid for store and forward")] - InvalidStoreMessage, - #[error("The envelope version is invalid")] - InvalidEnvelopeVersion, + #[error("The {field} field was not valid, discarding SAF response: {details}")] + InvalidSafResponseMessage { field: &'static str, details: String }, + #[error("The message has expired, not storing message in SAF db (expiry: {expired}, now: {now})")] + NotStoringExpiredMessage { expired: EpochTime, now: EpochTime }, #[error("MalformedNodeId: {0}")] MalformedNodeId(#[from] ByteArrayError), #[error("DHT message type should not have been forwarded")] diff --git a/comms/dht/src/store_forward/message.rs b/comms/dht/src/store_forward/message.rs index f753b9941b..f74af32c61 100644 --- a/comms/dht/src/store_forward/message.rs +++ b/comms/dht/src/store_forward/message.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::convert::{TryFrom, TryInto}; +use std::convert::TryFrom; use chrono::{DateTime, Utc}; use prost::Message; @@ -76,10 +76,7 @@ impl TryFrom for StoredMessage { let dht_header = DhtHeader::decode(message.header.as_slice())?; Ok(Self { stored_at: Some(datetime_to_timestamp(DateTime::from_utc(message.stored_at, Utc))), - version: message - .version - .try_into() - .map_err(|_| StoreAndForwardError::InvalidEnvelopeVersion)?, + version: message.version as u32, body: message.body, dht_header: Some(dht_header), }) diff --git a/comms/dht/src/store_forward/saf_handler/task.rs b/comms/dht/src/store_forward/saf_handler/task.rs index 7f5390d382..4bce651e68 100644 --- a/comms/dht/src/store_forward/saf_handler/task.rs +++ b/comms/dht/src/store_forward/saf_handler/task.rs @@ -36,7 +36,7 @@ use tari_comms::{ types::CommsPublicKey, BytesMut, }; -use tari_utilities::{convert::try_convert_all, ByteArray}; +use tari_utilities::ByteArray; use tokio::sync::mpsc; use tower::{Service, ServiceExt}; @@ -216,7 +216,7 @@ where S: Service let messages = self.saf_requester.fetch_messages(query.clone()).await?; let stored_messages = StoredMessagesResponse { - messages: try_convert_all(messages)?, + messages: messages.into_iter().map(TryInto::try_into).collect::>()?, request_id: retrieve_msgs.request_id, response_type: resp_type as i32, }; @@ -430,8 +430,13 @@ where S: Service .stored_at .map(|t| { Result::<_, StoreAndForwardError>::Ok(DateTime::from_utc( - NaiveDateTime::from_timestamp_opt(t.seconds, t.nanos.try_into().unwrap_or(u32::MAX)) - .ok_or(StoreAndForwardError::InvalidStoreMessage)?, + NaiveDateTime::from_timestamp_opt(t.seconds, 0).ok_or_else(|| { + StoreAndForwardError::InvalidSafResponseMessage { + field: "stored_at", + details: "number of seconds provided represents more days than can fit in a u32" + .to_string(), + } + })?, Utc, )) }) @@ -618,7 +623,7 @@ where S: Service mod test { use std::time::Duration; - use chrono::Utc; + use chrono::{Timelike, Utc}; use tari_comms::{message::MessageExt, runtime, wrap_in_envelope_body}; use tari_test_utils::collect_recv; use tari_utilities::{hex, hex::Hex}; @@ -932,7 +937,7 @@ mod test { .unwrap() .unwrap(); - assert_eq!(last_saf_received, msg2_time); + assert_eq!(last_saf_received.second(), msg2_time.second()); } #[runtime::test] diff --git a/comms/dht/src/store_forward/store.rs b/comms/dht/src/store_forward/store.rs index c0d2b8d224..70690bde94 100644 --- a/comms/dht/src/store_forward/store.rs +++ b/comms/dht/src/store_forward/store.rs @@ -437,13 +437,13 @@ where S: Service + Se ); if let Some(expires) = message.dht_header.expires { - if expires < EpochTime::now() { - return SafResult::Err(StoreAndForwardError::InvalidStoreMessage); + let now = EpochTime::now(); + if expires < now { + return Err(StoreAndForwardError::NotStoringExpiredMessage { expired: expires, now }); } } - let stored_message = - NewStoredMessage::try_construct(message, priority).ok_or(StoreAndForwardError::InvalidStoreMessage)?; + let stored_message = NewStoredMessage::new(message, priority); self.saf_requester.insert_message(stored_message).await } } diff --git a/comms/rpc_macros/Cargo.toml b/comms/rpc_macros/Cargo.toml index 81e33db8ea..f9aac328f1 100644 --- a/comms/rpc_macros/Cargo.toml +++ b/comms/rpc_macros/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [lib] diff --git a/infrastructure/derive/Cargo.toml b/infrastructure/derive/Cargo.toml index dbb5fe630c..f7c686eed4 100644 --- a/infrastructure/derive/Cargo.toml +++ b/infrastructure/derive/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [lib] diff --git a/infrastructure/shutdown/Cargo.toml b/infrastructure/shutdown/Cargo.toml index 9070aab5e4..cd2c41d80e 100644 --- a/infrastructure/shutdown/Cargo.toml +++ b/infrastructure/shutdown/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/infrastructure/storage/Cargo.toml b/infrastructure/storage/Cargo.toml index 181f62b20e..9a966cf497 100644 --- a/infrastructure/storage/Cargo.toml +++ b/infrastructure/storage/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.38.5" +version = "0.38.7" edition = "2018" [dependencies] diff --git a/infrastructure/storage/tests/lmdb.rs b/infrastructure/storage/tests/lmdb.rs index 38441e39e0..45740521c7 100644 --- a/infrastructure/storage/tests/lmdb.rs +++ b/infrastructure/storage/tests/lmdb.rs @@ -118,7 +118,7 @@ fn insert_all_users(name: &str) -> (Vec, LMDBDatabase) { } #[test] -fn single_thread() { +fn test_single_thread() { { let users = load_users(); let env = init("single_thread").unwrap(); @@ -136,7 +136,7 @@ fn single_thread() { } #[test] -fn multi_thread() { +fn test_multi_thread() { { let users_arc = Arc::new(load_users()); let env = init("multi_thread").unwrap(); @@ -167,7 +167,7 @@ fn multi_thread() { } #[test] -fn transactions() { +fn test_transactions() { { let (users, db) = insert_all_users("transactions"); // Test the `exists` and value retrieval functions @@ -186,7 +186,7 @@ fn transactions() { /// Simultaneous writes in different threads #[test] #[allow(clippy::same_item_push)] -fn multi_thread_writes() { +fn test_multi_thread_writes() { { let env = init("multi-thread-writes").unwrap(); let mut threads = Vec::new(); @@ -220,7 +220,7 @@ fn multi_thread_writes() { /// Multiple write transactions in a single thread #[test] -fn multi_writes() { +fn test_multi_writes() { { let env = init("multi-writes").unwrap(); for i in 0..2 { @@ -241,7 +241,7 @@ fn multi_writes() { } #[test] -fn pair_iterator() { +fn test_pair_iterator() { { let (users, db) = insert_all_users("pair_iterator"); let res = db.for_each::(|pair| { @@ -256,7 +256,7 @@ fn pair_iterator() { } #[test] -fn exists_and_delete() { +fn test_exists_and_delete() { { let (_, db) = insert_all_users("delete"); assert!(db.contains_key(&525u64).unwrap()); @@ -267,7 +267,7 @@ fn exists_and_delete() { } #[test] -fn lmdb_resize_on_create() { +fn test_lmdb_resize_on_create() { let db_env_name = "resize"; { let path = get_path(db_env_name); diff --git a/infrastructure/tari_script/src/lib.rs b/infrastructure/tari_script/src/lib.rs index e796c55a4d..81ef3d5e7f 100644 --- a/infrastructure/tari_script/src/lib.rs +++ b/infrastructure/tari_script/src/lib.rs @@ -24,7 +24,7 @@ mod serde; mod stack; pub use error::ScriptError; -pub use op_codes::{slice_to_boxed_hash, slice_to_hash, HashValue, Opcode}; +pub use op_codes::{slice_to_boxed_hash, slice_to_hash, HashValue, Message, Opcode, ScalarValue}; pub use script::TariScript; pub use script_commitment::{ScriptCommitment, ScriptCommitmentError, ScriptCommitmentFactory}; pub use script_context::ScriptContext; diff --git a/infrastructure/tari_script/src/op_codes.rs b/infrastructure/tari_script/src/op_codes.rs index 50350e0dbb..2eab0a48bd 100644 --- a/infrastructure/tari_script/src/op_codes.rs +++ b/infrastructure/tari_script/src/op_codes.rs @@ -118,6 +118,7 @@ pub const OP_HASH_BLAKE256: u8 = 0xb0; pub const OP_HASH_SHA256: u8 = 0xb1; pub const OP_HASH_SHA3: u8 = 0xb2; pub const OP_TO_RISTRETTO_POINT: u8 = 0xb3; +pub const OP_CHECK_MULTI_SIG_VERIFY_AGGREGATE_PUB_KEY: u8 = 0xb4; // Opcode constants: Miscellaneous pub const OP_RETURN: u8 = 0x60; @@ -234,6 +235,9 @@ pub enum Opcode { /// Identical to CheckMultiSig, except that nothing is pushed to the stack if the m signatures are valid, and the /// operation fails with VERIFY_FAILED if any of the signatures are invalid. CheckMultiSigVerify(u8, u8, Vec, Box), + /// Pop m signatures from the stack. If m signatures out of the provided n public keys sign the 32-byte message, + /// push the aggregate of the public keys to the stack, otherwise fails with VERIFY_FAILED. + CheckMultiSigVerifyAggregatePubKey(u8, u8, Vec, Box), /// Pops the top element which must be a valid 32-byte scalar or hash and calculates the corresponding Ristretto /// point, and pushes the result to the stack. Fails with EMPTY_STACK if the stack is empty. ToRistrettoPoint, @@ -355,6 +359,10 @@ impl Opcode { let (m, n, keys, msg, end) = Opcode::read_multisig_args(bytes)?; Ok((CheckMultiSigVerify(m, n, keys, msg), &bytes[end..])) }, + OP_CHECK_MULTI_SIG_VERIFY_AGGREGATE_PUB_KEY => { + let (m, n, keys, msg, end) = Opcode::read_multisig_args(bytes)?; + Ok((CheckMultiSigVerifyAggregatePubKey(m, n, keys, msg), &bytes[end..])) + }, OP_TO_RISTRETTO_POINT => Ok((ToRistrettoPoint, &bytes[1..])), OP_RETURN => Ok((Return, &bytes[1..])), OP_IF_THEN => Ok((IfThen, &bytes[1..])), @@ -464,6 +472,13 @@ impl Opcode { } array.extend_from_slice(msg.deref()); }, + CheckMultiSigVerifyAggregatePubKey(m, n, public_keys, msg) => { + array.extend_from_slice(&[OP_CHECK_MULTI_SIG_VERIFY_AGGREGATE_PUB_KEY, *m, *n]); + for public_key in public_keys { + array.extend(public_key.as_bytes()); + } + array.extend_from_slice(msg.deref()); + }, ToRistrettoPoint => array.push(OP_TO_RISTRETTO_POINT), Return => array.push(OP_RETURN), IfThen => array.push(OP_IF_THEN), @@ -530,6 +545,17 @@ impl fmt::Display for Opcode { (*msg).to_hex() ) }, + CheckMultiSigVerifyAggregatePubKey(m, n, public_keys, msg) => { + let keys: Vec = public_keys.iter().map(|p| p.to_hex()).collect(); + write!( + fmt, + "CheckMultiSigVerifyAggregatePubKey({}, {}, [{}], {})", + *m, + *n, + keys.join(", "), + (*msg).to_hex() + ) + }, ToRistrettoPoint => write!(fmt, "ToRistrettoPoint"), Return => write!(fmt, "Return"), IfThen => write!(fmt, "IfThen"), @@ -766,12 +792,20 @@ mod test { 6c9cb4d3e57351462122310fa22c90b1e6dfb528d64615363d1261a75da3e401)", ); test_checkmultisig( - &Opcode::CheckMultiSigVerify(1, 2, keys, Box::new(*msg)), + &Opcode::CheckMultiSigVerify(1, 2, keys.clone(), Box::new(*msg)), OP_CHECK_MULTI_SIG_VERIFY, "CheckMultiSigVerify(1, 2, [9c8bc5f90d221191748e8dd7686f09e1114b4bada4c367ed58ae199c51eb100b, \ 56e9f018b138ba843521b3243a29d81730c3a4c25108b108b1ca47c2132db569], \ 6c9cb4d3e57351462122310fa22c90b1e6dfb528d64615363d1261a75da3e401)", ); + test_checkmultisig( + &Opcode::CheckMultiSigVerifyAggregatePubKey(1, 2, keys, Box::new(*msg)), + OP_CHECK_MULTI_SIG_VERIFY_AGGREGATE_PUB_KEY, + "CheckMultiSigVerifyAggregatePubKey(1, 2, \ + [9c8bc5f90d221191748e8dd7686f09e1114b4bada4c367ed58ae199c51eb100b, \ + 56e9f018b138ba843521b3243a29d81730c3a4c25108b108b1ca47c2132db569], \ + 6c9cb4d3e57351462122310fa22c90b1e6dfb528d64615363d1261a75da3e401)", + ); } #[test] diff --git a/infrastructure/tari_script/src/script.rs b/infrastructure/tari_script/src/script.rs index b91fce8480..df7d91b945 100644 --- a/infrastructure/tari_script/src/script.rs +++ b/infrastructure/tari_script/src/script.rs @@ -119,7 +119,7 @@ impl TariScript { } } - pub fn as_bytes(&self) -> Vec { + pub fn to_bytes(&self) -> Vec { self.script.iter().fold(Vec::new(), |mut bytes, op| { op.to_bytes(&mut bytes); bytes @@ -137,7 +137,7 @@ impl TariScript { if D::output_size() < 32 { return Err(ScriptError::InvalidDigest); } - let h = D::digest(&self.as_bytes()); + let h = D::digest(&self.to_bytes()); Ok(slice_to_hash(&h.as_slice()[..32])) } @@ -178,7 +178,7 @@ impl TariScript { pub fn script_message(&self, pub_key: &RistrettoPublicKey) -> Result { let b = Blake256::new() .chain(pub_key.as_bytes()) - .chain(&self.as_bytes()) + .chain(&self.to_bytes()) .finalize(); RistrettoSecretKey::from_bytes(b.as_slice()).map_err(|_| ScriptError::InvalidSignature) } @@ -248,19 +248,26 @@ impl TariScript { } }, CheckMultiSig(m, n, public_keys, msg) => { - if self.check_multisig(stack, *m, *n, public_keys, *msg.deref())? { + if self.check_multisig(stack, *m, *n, public_keys, *msg.deref())?.is_some() { stack.push(Number(1)) } else { stack.push(Number(0)) } }, CheckMultiSigVerify(m, n, public_keys, msg) => { - if self.check_multisig(stack, *m, *n, public_keys, *msg.deref())? { + if self.check_multisig(stack, *m, *n, public_keys, *msg.deref())?.is_some() { Ok(()) } else { Err(ScriptError::VerifyFailed) } }, + CheckMultiSigVerifyAggregatePubKey(m, n, public_keys, msg) => { + if let Some(agg_pub_key) = self.check_multisig(stack, *m, *n, public_keys, *msg.deref())? { + stack.push(PublicKey(agg_pub_key)) + } else { + Err(ScriptError::VerifyFailed) + } + }, ToRistrettoPoint => self.handle_to_ristretto_point(stack), Return => Err(ScriptError::Return), IfThen => TariScript::handle_if_then(stack, state), @@ -505,9 +512,9 @@ impl TariScript { n: u8, public_keys: &[RistrettoPublicKey], message: Message, - ) -> Result { - if m == 0 || n == 0 || m > n || n > MAX_MULTISIG_LIMIT { - return Err(ScriptError::InvalidData); + ) -> Result, ScriptError> { + if m == 0 || n == 0 || m > n || n > MAX_MULTISIG_LIMIT || public_keys.len() != n as usize { + return Err(ScriptError::ValueExceedsBounds); } // pop m sigs let m = m as usize; @@ -524,20 +531,25 @@ impl TariScript { #[allow(clippy::mutable_key_type)] let mut sig_set = HashSet::new(); + let mut agg_pub_key = RistrettoPublicKey::default(); for s in &signatures { for (i, pk) in public_keys.iter().enumerate() { if !sig_set.contains(s) && !key_signed[i] && s.verify_challenge(pk, &message) { key_signed[i] = true; sig_set.insert(s); + agg_pub_key = agg_pub_key + pk; break; } } if !sig_set.contains(s) { - return Ok(false); + return Ok(None); } } - - Ok(sig_set.len() == m) + if sig_set.len() == m { + Ok(Some(agg_pub_key)) + } else { + Ok(None) + } } fn handle_to_ristretto_point(&self, stack: &mut ExecutionStack) -> Result<(), ScriptError> { @@ -562,7 +574,7 @@ impl Hex for TariScript { } fn to_hex(&self) -> String { - to_hex(&self.as_bytes()) + to_hex(&self.to_bytes()) } } @@ -625,6 +637,7 @@ mod test { inputs, op_codes::{slice_to_boxed_hash, slice_to_boxed_message, HashValue, Message}, ExecutionStack, + Opcode::CheckMultiSigVerifyAggregatePubKey, ScriptContext, StackItem, StackItem::{Commitment, Hash, Number}, @@ -948,7 +961,7 @@ mod test { #[test] fn serialisation() { let script = script!(Add Sub Add); - assert_eq!(&script.as_bytes(), &[0x93, 0x94, 0x93]); + assert_eq!(&script.to_bytes(), &[0x93, 0x94, 0x93]); assert_eq!(TariScript::from_bytes(&[0x93, 0x94, 0x93]).unwrap(), script); assert_eq!(script.to_hex(), "939493"); assert_eq!(TariScript::from_hex("939493").unwrap(), script); @@ -1145,21 +1158,21 @@ mod test { let script = TariScript::new(ops); let inputs = inputs!(s_alice.clone()); let err = script.execute(&inputs).unwrap_err(); - assert_eq!(err, ScriptError::InvalidData); + assert_eq!(err, ScriptError::ValueExceedsBounds); let keys = vec![p_alice.clone(), p_bob.clone()]; let ops = vec![CheckMultiSig(1, 0, keys, msg.clone())]; let script = TariScript::new(ops); let inputs = inputs!(s_alice.clone()); let err = script.execute(&inputs).unwrap_err(); - assert_eq!(err, ScriptError::InvalidData); + assert_eq!(err, ScriptError::ValueExceedsBounds); let keys = vec![p_alice, p_bob]; let ops = vec![CheckMultiSig(2, 1, keys, msg)]; let script = TariScript::new(ops); let inputs = inputs!(s_alice); let err = script.execute(&inputs).unwrap_err(); - assert_eq!(err, ScriptError::InvalidData); + assert_eq!(err, ScriptError::ValueExceedsBounds); // max n is 32 let (msg, data) = multisig_data(33); @@ -1169,7 +1182,7 @@ mod test { let items = sigs.map(StackItem::Signature).collect(); let inputs = ExecutionStack::new(items); let err = script.execute(&inputs).unwrap_err(); - assert_eq!(err, ScriptError::InvalidData); + assert_eq!(err, ScriptError::ValueExceedsBounds); // 3 of 4 let (msg, data) = multisig_data(4); @@ -1258,7 +1271,7 @@ mod test { // 1 of 3 let keys = vec![p_alice.clone(), p_bob.clone(), p_carol.clone()]; - let ops = vec![CheckMultiSigVerify(1, 2, keys, msg.clone())]; + let ops = vec![CheckMultiSigVerify(1, 3, keys, msg.clone())]; let script = TariScript::new(ops); let inputs = inputs!(Number(1), s_alice.clone()); @@ -1292,6 +1305,31 @@ mod test { let err = script.execute(&inputs).unwrap_err(); assert_eq!(err, ScriptError::VerifyFailed); + // 2 of 3 (returning the aggregate public key of the signatories) + let keys = vec![p_alice.clone(), p_bob.clone(), p_carol.clone()]; + let ops = vec![CheckMultiSigVerifyAggregatePubKey(2, 3, keys, msg.clone())]; + let script = TariScript::new(ops); + + let inputs = inputs!(s_alice.clone(), s_bob.clone()); + let agg_pub_key = script.execute(&inputs).unwrap(); + assert_eq!(agg_pub_key, StackItem::PublicKey(p_alice.clone() + p_bob.clone())); + + let inputs = inputs!(s_alice.clone(), s_carol.clone()); + let agg_pub_key = script.execute(&inputs).unwrap(); + assert_eq!(agg_pub_key, StackItem::PublicKey(p_alice.clone() + p_carol.clone())); + + let inputs = inputs!(s_bob.clone(), s_carol.clone()); + let agg_pub_key = script.execute(&inputs).unwrap(); + assert_eq!(agg_pub_key, StackItem::PublicKey(p_bob.clone() + p_carol.clone())); + + let inputs = inputs!(s_alice.clone(), s_carol.clone(), s_bob.clone()); + let err = script.execute(&inputs).unwrap_err(); + assert_eq!(err, ScriptError::NonUnitLengthStack); + + let inputs = inputs!(p_bob.clone()); + let err = script.execute(&inputs).unwrap_err(); + assert_eq!(err, ScriptError::StackUnderflow); + // 3 of 3 let keys = vec![p_alice.clone(), p_bob.clone(), p_carol]; let ops = vec![CheckMultiSigVerify(3, 3, keys, msg.clone())]; @@ -1313,21 +1351,21 @@ mod test { let script = TariScript::new(ops); let inputs = inputs!(s_alice.clone()); let err = script.execute(&inputs).unwrap_err(); - assert_eq!(err, ScriptError::InvalidData); + assert_eq!(err, ScriptError::ValueExceedsBounds); let keys = vec![p_alice.clone(), p_bob.clone()]; let ops = vec![CheckMultiSigVerify(1, 0, keys, msg.clone())]; let script = TariScript::new(ops); let inputs = inputs!(s_alice.clone()); let err = script.execute(&inputs).unwrap_err(); - assert_eq!(err, ScriptError::InvalidData); + assert_eq!(err, ScriptError::ValueExceedsBounds); let keys = vec![p_alice, p_bob]; let ops = vec![CheckMultiSigVerify(2, 1, keys, msg)]; let script = TariScript::new(ops); let inputs = inputs!(s_alice); let err = script.execute(&inputs).unwrap_err(); - assert_eq!(err, ScriptError::InvalidData); + assert_eq!(err, ScriptError::ValueExceedsBounds); // 3 of 4 let (msg, data) = multisig_data(4); diff --git a/infrastructure/tari_script/src/serde.rs b/infrastructure/tari_script/src/serde.rs index 658eef02a9..b9379dae64 100644 --- a/infrastructure/tari_script/src/serde.rs +++ b/infrastructure/tari_script/src/serde.rs @@ -26,12 +26,12 @@ use serde::{ }; use tari_utilities::hex::{from_hex, Hex}; -use crate::TariScript; +use crate::{ExecutionStack, TariScript}; impl Serialize for TariScript { fn serialize(&self, ser: S) -> Result where S: Serializer { - let script_bin = self.as_bytes(); + let script_bin = self.to_bytes(); if ser.is_human_readable() { ser.serialize_str(&script_bin.to_hex()) } else { @@ -40,44 +40,99 @@ impl Serialize for TariScript { } } -struct ScriptVisitor; +impl<'de> Deserialize<'de> for TariScript { + fn deserialize(de: D) -> Result + where D: Deserializer<'de> { + struct ScriptVisitor; -impl<'de> Visitor<'de> for ScriptVisitor { - type Value = TariScript; + impl<'de> Visitor<'de> for ScriptVisitor { + type Value = TariScript; - fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str("Expecting a binary array or hex string") - } + fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Expecting a binary array or hex string") + } - fn visit_str(self, v: &str) -> Result - where E: Error { - let bytes = from_hex(v).map_err(|e| E::custom(e.to_string()))?; - self.visit_bytes(&bytes) - } + fn visit_str(self, v: &str) -> Result + where E: Error { + let bytes = from_hex(v).map_err(|e| E::custom(e.to_string()))?; + self.visit_bytes(&bytes) + } - fn visit_string(self, v: String) -> Result - where E: Error { - self.visit_str(&v) - } + fn visit_string(self, v: String) -> Result + where E: Error { + self.visit_str(&v) + } + + fn visit_bytes(self, v: &[u8]) -> Result + where E: Error { + TariScript::from_bytes(v).map_err(|e| E::custom(e.to_string())) + } + + fn visit_borrowed_bytes(self, v: &'de [u8]) -> Result + where E: Error { + self.visit_bytes(v) + } + } - fn visit_bytes(self, v: &[u8]) -> Result - where E: Error { - TariScript::from_bytes(v).map_err(|e| E::custom(e.to_string())) + if de.is_human_readable() { + de.deserialize_string(ScriptVisitor) + } else { + de.deserialize_bytes(ScriptVisitor) + } } +} - fn visit_borrowed_bytes(self, v: &'de [u8]) -> Result - where E: Error { - self.visit_bytes(v) +// -------------------------------- ExecutionStack -------------------------------- // +impl Serialize for ExecutionStack { + fn serialize(&self, ser: S) -> Result + where S: Serializer { + let stack_bin = self.to_bytes(); + if ser.is_human_readable() { + ser.serialize_str(&stack_bin.to_hex()) + } else { + ser.serialize_bytes(&stack_bin) + } } } -impl<'de> Deserialize<'de> for TariScript { +impl<'de> Deserialize<'de> for ExecutionStack { fn deserialize(de: D) -> Result where D: Deserializer<'de> { + struct ExecutionStackVisitor; + + impl<'de> Visitor<'de> for ExecutionStackVisitor { + type Value = ExecutionStack; + + fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("Expecting a binary array or hex string") + } + + fn visit_str(self, v: &str) -> Result + where E: Error { + let bytes = from_hex(v).map_err(|e| E::custom(e.to_string()))?; + self.visit_bytes(&bytes) + } + + fn visit_string(self, v: String) -> Result + where E: Error { + self.visit_str(&v) + } + + fn visit_bytes(self, v: &[u8]) -> Result + where E: Error { + ExecutionStack::from_bytes(v).map_err(|e| E::custom(e.to_string())) + } + + fn visit_borrowed_bytes(self, v: &'de [u8]) -> Result + where E: Error { + self.visit_bytes(v) + } + } + if de.is_human_readable() { - de.deserialize_string(ScriptVisitor) + de.deserialize_string(ExecutionStackVisitor) } else { - de.deserialize_bytes(ScriptVisitor) + de.deserialize_bytes(ExecutionStackVisitor) } } } diff --git a/infrastructure/tari_script/src/stack.rs b/infrastructure/tari_script/src/stack.rs index 757988f9c3..f3b714b95c 100644 --- a/infrastructure/tari_script/src/stack.rs +++ b/infrastructure/tari_script/src/stack.rs @@ -17,7 +17,6 @@ use std::convert::TryFrom; -use serde::{Deserialize, Serialize}; use tari_crypto::ristretto::{pedersen::PedersenCommitment, RistrettoPublicKey, RistrettoSchnorr, RistrettoSecretKey}; use tari_utilities::{ hex::{from_hex, to_hex, Hex, HexError}, @@ -58,7 +57,7 @@ pub const TYPE_PUBKEY: u8 = 4; pub const TYPE_SIG: u8 = 5; pub const TYPE_SCALAR: u8 = 6; -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum StackItem { Number(i64), Hash(HashValue), @@ -178,7 +177,7 @@ stack_item_from!(RistrettoPublicKey => PublicKey); stack_item_from!(RistrettoSchnorr => Signature); stack_item_from!(ScalarValue => Scalar); -#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, PartialEq, Eq)] pub struct ExecutionStack { items: Vec, } @@ -262,7 +261,7 @@ impl ExecutionStack { } /// Return a binary array representation of the input stack - pub fn as_bytes(&self) -> Vec { + pub fn to_bytes(&self) -> Vec { self.items.iter().fold(Vec::new(), |mut bytes, item| { item.to_bytes(&mut bytes); bytes @@ -317,7 +316,7 @@ impl Hex for ExecutionStack { } fn to_hex(&self) -> String { - to_hex(&self.as_bytes()) + to_hex(&self.to_bytes()) } } @@ -361,11 +360,21 @@ mod test { use tari_crypto::{ hash::blake2::Blake256, keys::{PublicKey, SecretKey}, - ristretto::{utils, utils::SignatureSet, RistrettoPublicKey, RistrettoSchnorr, RistrettoSecretKey}, + ristretto::{ + pedersen::PedersenCommitment, + utils, + utils::SignatureSet, + RistrettoPublicKey, + RistrettoSchnorr, + RistrettoSecretKey, + }, + }; + use tari_utilities::{ + hex::{from_hex, Hex}, + message_format::MessageFormat, }; - use tari_utilities::hex::{from_hex, Hex}; - use crate::{op_codes::ScalarValue, ExecutionStack, StackItem}; + use crate::{op_codes::ScalarValue, ExecutionStack, HashValue, StackItem}; #[test] fn as_bytes_roundtrip() { @@ -378,7 +387,7 @@ mod test { } = utils::sign::(&k, b"hi").unwrap(); let items = vec![Number(5432), Number(21), Signature(s), PublicKey(p)]; let stack = ExecutionStack::new(items); - let bytes = stack.as_bytes(); + let bytes = stack.to_bytes(); let stack2 = ExecutionStack::from_bytes(&bytes).unwrap(); assert_eq!(stack, stack2); } @@ -445,4 +454,37 @@ mod test { panic!("Expected scalar") } } + + #[test] + fn serde_serialization_non_breaking() { + const SERDE_ENCODED_BYTES: &str = "ce0000000000000006fdf9fc345d2cdd8aff624a55f824c7c9ce3cc9\ + 72e011b4e750e417a90ecc5da50456c0fa32558d6edc0916baa26b48e745de834571534ca253ea82435f08ebbc\ + 7c0556c0fa32558d6edc0916baa26b48e745de834571534ca253ea82435f08ebbc7c6db1023d5c46d78a97da8eb\ + 6c5a37e00d5f2fee182dcb38c1b6c65e90a43c10906fdf9fc345d2cdd8aff624a55f824c7c9ce3cc972e011b4e7\ + 50e417a90ecc5da501d2040000000000000356c0fa32558d6edc0916baa26b48e745de834571534ca253ea82435\ + f08ebbc7c"; + let p = + RistrettoPublicKey::from_hex("56c0fa32558d6edc0916baa26b48e745de834571534ca253ea82435f08ebbc7c").unwrap(); + let s = + RistrettoSecretKey::from_hex("6db1023d5c46d78a97da8eb6c5a37e00d5f2fee182dcb38c1b6c65e90a43c109").unwrap(); + let sig = RistrettoSchnorr::new(p.clone(), s); + let m: HashValue = Blake256::digest(b"Hello Tari Script").into(); + let s: ScalarValue = m; + let commitment = PedersenCommitment::from_public_key(&p); + + // Includes all variants for StackItem + let mut expected_inputs = inputs!(s, p, sig, m, 1234, commitment); + let stack = ExecutionStack::from_binary(&from_hex(SERDE_ENCODED_BYTES).unwrap()).unwrap(); + + for (i, item) in stack.items.into_iter().enumerate().rev() { + assert_eq!( + item, + expected_inputs.pop().unwrap(), + "Stack items did not match at index {}", + i + ); + } + + assert!(expected_inputs.is_empty()); + } } diff --git a/infrastructure/test_utils/Cargo.toml b/infrastructure/test_utils/Cargo.toml index 809e2b0f67..9a6262255a 100644 --- a/infrastructure/test_utils/Cargo.toml +++ b/infrastructure/test_utils/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "tari_test_utils" description = "Utility functions used in Tari test functions" -version = "0.38.5" +version = "0.38.7" authors = ["The Tari Development Community"] edition = "2018" license = "BSD-3-Clause" diff --git a/integration_tests/config/config.toml b/integration_tests/config/config.toml deleted file mode 100644 index 569d3b05c8..0000000000 --- a/integration_tests/config/config.toml +++ /dev/null @@ -1,380 +0,0 @@ -######################################################################################################################## -# # -# Common Configuration Options # -# # -######################################################################################################################## - -[common] -#override_from="dibbler" -#base_path="/.tari" -#data_dir="data" - -[auto_update] -# This interval in seconds to check for software updates. Setting this to 0 disables checking. -check_interval = 300 - -[dibbler.auto_update] -# Customize the hosts that are used to check for updates. These hosts must contain update information in DNS TXT records. -update_uris = ["updates.dibbler.taripulse.com"] -# Customize the location of the update SHA hashes and maintainer-signed signature. -# "auto_update.hashes_url" = "https://
/hashes.txt" -# "auto_update.hashes_sig_url" = "https://
/hashes.txt.sig" - -[metrics] -# server_bind_address = "127.0.0.1:5577" -# push_endpoint = http://localhost:9091/metrics/job/base-node -# Configuration options for dibbler testnet - -[dibbler.p2p.seeds] -dns_seeds = ["seeds.dibbler.tari.com"] -peer_seeds = [ - # 333388d1cbe3e2bd17453d052f - "c2eca9cf32261a1343e21ed718e79f25bfc74386e9305350b06f62047f519347::/onion3/6yxqk2ybo43u73ukfhyc42qn25echn4zegjpod2ccxzr2jd5atipwzqd:18141", - # 555575715a49fc242d756e52ca - "42fcde82b44af1de95a505d858cb31a422c56c4ac4747fbf3da47d648d4fc346::/onion3/2l3e7ysmihc23zybapdrsbcfg6omtjtfkvwj65dstnfxkwtai2fawtyd:18141", - # 77771f53be07fab4be5f1e1ff7 - "50e6aa8f6c50f1b9d9b3d438dfd2a29cfe1f3e3a650bd9e6b1e10f96b6c38f4d::/onion3/7s6y3cz5bnewlj5ypm7sekhgvqjyrq4bpaj5dyvvo7vxydj7hsmyf5ad:18141", - # 9999016f1f3a6162dddf5a45aa - "36a9df45e1423b5315ffa7a91521924210c8e1d1537ad0968450f20f21e5200d::/onion3/v24qfheti2rztlwzgk6v4kdbes3ra7mo3i2fobacqkbfrk656e3uvnid:18141", - # bbbb8358387d81c388fadb4649 - "be128d570e8ec7b15c101ee1a56d6c56dd7d109199f0bd02f182b71142b8675f::/onion3/ha422qsy743ayblgolui5pg226u42wfcklhc5p7nbhiytlsp4ir2syqd:18141", - # eeeeb0a943ed143e613a135392 - "3e0321c0928ca559ab3c0a396272dfaea705efce88440611a38ff3898b097217::/onion3/sl5ledjoaisst6d4fh7kde746dwweuge4m4mf5nkzdhmy57uwgtb7qqd:18141", - # 66664a0f95ce468941bb9de228 - "b0f797e7413b39b6646fa370e8394d3993ead124b8ba24325c3c07a05e980e7e::/ip4/35.177.93.69/tcp/18189", - # 22221bf814d5e524fce9ba5787 - "0eefb45a4de9484eca74846a4f47d2c8d38e76be1fec63b0112bd00d297c0928::/ip4/13.40.98.39/tcp/18189", - # 4444a0efd8388739d563bdd979 - "544ed2baed414307e119d12894e27f9ddbdfa2fd5b6528dc843f27903e951c30::/ip4/13.40.189.176/tcp/18189" -] - -######################################################################################################################## -# # -# Base Node Configuration Options # -# # -######################################################################################################################## - -# If you are not running a Tari Base node, you can simply leave everything in this section commented out. Base nodes -# help maintain the security of the Tari token and are the surest way to preserve your privacy and be 100% sure that -# no-one is cheating you out of your money. - -[base_node] -# Selected network -network = "dibbler" -# The socket to expose for the gRPC base node server -grpc_address = "/ip4/127.0.0.1/tcp/18142" - -# Spin up and use a built-in Tor instance. This only works on macos/linux and you must comment out tor_control_address below. -# This requires that the base node was built with the optional "libtor" feature flag. -#use_libtor = true - -[dibbler.base_node] -# A path to the file that stores your node identity and secret key -identity_file = "config/base_node_id_dibbler.json" - -[base_node.p2p] -# The node's publicly-accessible hostname. This is the host name that is advertised on the network so that -# peers can find you. -# _NOTE_: If using the `tor` transport type, public_address will be ignored and an onion address will be -# automatically configured -public_address = "/ip4/172.2.3.4/tcp/18189" - -# Optionally bind an additional TCP socket for inbound Tari P2P protocol commms. -# Use cases include: -# - allowing wallets to locally connect to their base node, rather than through tor, when used in conjunction with `tor_proxy_bypass_addresses` -# - multiple P2P addresses, one public over DNS and one private over TOR -# - a "bridge" between TOR and TCP-only nodes -# auxiliary_tcp_listener_address = "/ip4/127.0.0.1/tcp/9998" - -[base_node.p2p.transport] -# -------------- Transport configuration -------------- -# Use TCP to connect to the Tari network. This transport can only communicate with TCP/IP addresses, so peers with -# e.g. tor onion addresses will not be contactable. -#transport = "tcp" -# The address and port to listen for peer connections over TCP. -tcp.listener_address = "/ip4/0.0.0.0/tcp/18189" -# Configures a tor proxy used to connect to onion addresses. All other traffic uses direct TCP connections. -# This setting is optional however, if it is not specified, this node will not be able to connect to nodes that -# only advertise an onion address. -tcp.tor_socks_address = "/ip4/127.0.0.1/tcp/36050" -tcp.tor_socks_auth = "none" - -# # Configures the node to run over a tor hidden service using the Tor proxy. This transport recognises ip/tcp, -# # onion v2, onion v3 and dns addresses. -#type = "tor" -# Address of the tor control server -tor.control_address = "/ip4/127.0.0.1/tcp/9051" -# Authentication to use for the tor control server -tor.control_auth = "none" # or "password=xxxxxx" -# The onion port to use. -tor.onion_port = 18141 -# When these peer addresses are encountered when dialing another peer, the tor proxy is bypassed and the connection is made -# directly over TCP. /ip4, /ip6, /dns, /dns4 and /dns6 are supported. -tor.proxy_bypass_addresses = [] -#tor.proxy_bypass_addresses = ["/dns4/my-foo-base-node/tcp/9998"] -# When using the tor transport and set to true, outbound TCP connections bypass the tor proxy. Defaults to false for better privacy -tor.proxy_bypass_for_outbound_tcp = false - -# Use a SOCKS5 proxy transport. This transport recognises any addresses supported by the proxy. -#type = "socks5" -# The address of the SOCKS5 proxy -# Traffic will be forwarded to tcp.listener_address -socks.proxy_address = "/ip4/127.0.0.1/tcp/9050" -socks.auth = "none" # or "username_password=username:xxxxxxx" - -[base_node.p2p.dht] -auto_join = true -database_url = "base_node_dht.db" -# do we allow test addresses to be accepted like 127.0.0.1 -allow_test_addresses = false - -[base_node.p2p.dht.saf] - -[base_node.lmdb] -#init_size_bytes = 1000000 -#grow_size_bytes = 1600000 -#resize_threshold_bytes = 1600000 - -[base_node.storage] -# Sets the pruning horizon. -#pruning_horizon = 0 -# Set to true to record all reorgs. Recorded reorgs can be viewed using the list-reorgs command. -track_reorgs = true - -######################################################################################################################## -# # -# Wallet Configuration Options # -# # -######################################################################################################################## - -[wallet] -# Override common.network for wallet -override_from = "dibbler" - -# The relative folder to store your local key data and transaction history. DO NOT EVER DELETE THIS FILE unless you -# a) have backed up your seed phrase and -# b) know what you are doing! -db_file = "wallet/wallet.dat" - -# The socket to expose for the gRPC wallet server. This value is ignored if grpc_enabled is false. -grpc_address = "/ip4/127.0.0.1/tcp/18143" - -# Console wallet password -# Should you wish to start your console wallet without typing in your password, the following options are available: -# 1. Start the console wallet with the --password=secret argument, or -# 2. Set the environment variable TARI_WALLET_PASSWORD=secret before starting the console wallet, or -# 3. Set the "password" key in this [wallet] section of the config -# password = "secret" - -# WalletNotify -# Allows you to execute a script or program when these transaction events are received by the console wallet: -# - transaction received -# - transaction sent -# - transaction cancelled -# - transaction mined but unconfirmed -# - transaction mined and confirmed -# An example script is available here: applications/tari_console_wallet/src/notifier/notify_example.sh -# notify = "/path/to/script" - -# This is the timeout period that will be used to monitor TXO queries to the base node (default = 60). Larger values -# are needed for wallets with many (>1000) TXOs to be validated. -#base_node_query_timeout = 180 -# The amount of seconds added to the current time (Utc) which will then be used to check if the message has -# expired or not when processing the message (default = 10800). -#saf_expiry_duration = 10800 -# This is the number of block confirmations required for a transaction to be considered completely mined and -# confirmed. (default = 3) -#transaction_num_confirmations_required = 3 -# This is the timeout period that will be used for base node broadcast monitoring tasks (default = 60) -#transaction_broadcast_monitoring_timeout = 180 -# This is the timeout period that will be used for chain monitoring tasks (default = 60) -#transaction_chain_monitoring_timeout = 60 -# This is the timeout period that will be used for sending transactions directly (default = 20) -#transaction_direct_send_timeout = 180 -# This is the timeout period that will be used for sending transactions via broadcast mode (default = 60) -#transaction_broadcast_send_timeout = 180 -# This is the size of the event channel used to communicate transaction status events to the wallet's UI. A busy console -# wallet doing thousands of bulk payments or used for stress testing needs a fairly big size (>10000) (default = 1000). -#transaction_event_channel_size = 25000 -# This is the size of the event channel used to communicate base node events to the wallet. A busy console -# wallet doing thousands of bulk payments or used for stress testing needs a fairly big size (>3000) (default = 250). -#base_node_event_channel_size = 3500 -# This is the size of the event channel used to communicate output manager events to the wallet. A busy console -# wallet doing thousands of bulk payments or used for stress testing needs a fairly big size (>3000) (default = 250). -#output_manager_event_channel_size = 3500 -# This is the size of the event channel used to communicate base node update events to the wallet. A busy console -# wallet doing thousands of bulk payments or used for stress testing needs a fairly big size (>300) (default = 50). -#base_node_update_publisher_channel_size = 500 -# If a large amount of tiny valued uT UTXOs are used as inputs to a transaction, the fee may be larger than -# the transaction amount. Set this value to `false` to allow spending of "dust" UTXOs for small valued -# transactions (default = true). -#prevent_fee_gt_amount = false -# This option specifies the transaction routing mechanism as being directly between wallets, making -# use of store and forward or using any combination of these. -# (options: "DirectOnly", "StoreAndForwardOnly", DirectAndStoreAndForward". default: "DirectAndStoreAndForward"). -#transaction_routing_mechanism = "DirectAndStoreAndForward" - -# When running the console wallet in command mode, use these values to determine what "stage" and timeout to wait -# for sent transactions. -# The stages are: -# - "DirectSendOrSaf" - The transaction was initiated and was accepted via Direct Send or Store And Forward. -# - "Negotiated" - The recipient replied and the transaction was negotiated. -# - "Broadcast" - The transaction was broadcast to the base node mempool. -# - "MinedUnconfirmed" - The transaction was successfully detected as mined but unconfirmed on the blockchain. -# - "Mined" - The transaction was successfully detected as mined and confirmed on the blockchain. - -# The default values are: "Broadcast", 300 -#command_send_wait_stage = "Broadcast" -#command_send_wait_timeout = 300 - -# The base nodes that the wallet should use for service requests and tracking chain state. -# base_node_service_peers = ["public_key::net_address", ...] -# base_node_service_peers = ["e856839057aac496b9e25f10821116d02b58f20129e9b9ba681b830568e47c4d::/onion3/exe2zgehnw3tvrbef3ep6taiacr6sdyeb54be2s25fpru357r4skhtad:18141"] - -# Configuration for the wallet's base node service -# The refresh interval, defaults to 10 seconds -#base_node_service_refresh_interval = 30 -# The maximum age of service requests in seconds, requests older than this are discarded -#base_node_service_request_max_age = 180 - -#[base_node.transport.tor] -#control_address = "/ip4/127.0.0.1/tcp/9051" -#control_auth_type = "none" # or "password" -# Required for control_auth_type = "password" -#control_auth_password = "super-secure-password" - -[wallet.p2p] - -[wallet.p2p.transport] -# # Configures the node to run over a tor hidden service using the Tor proxy. This transport recognises ip/tcp, -# # onion v2, onion v3 and dns addresses. -type = "tor" -# Address of the tor control server -tor.control_address = "/ip4/127.0.0.1/tcp/9051" -# Authentication to use for the tor control server -tor.control_auth = "none" # or "password=xxxxxx" -# The onion port to use. -tor.onion_port = 18141 -# When these peer addresses are encountered when dialing another peer, the tor proxy is bypassed and the connection is made -# directly over TCP. /ip4, /ip6, /dns, /dns4 and /dns6 are supported. -tor.proxy_bypass_addresses = [] -# When using the tor transport and set to true, outbound TCP connections bypass the tor proxy. Defaults to false for better privacy -tor.proxy_bypass_for_outbound_tcp = false - -[dibbler.wallet] -network = "dibbler" - - - -######################################################################################################################## -# # -# Miner Configuration Options # -# # -######################################################################################################################## - -[miner] -# Number of mining threads -# Default: number of logical CPU cores -#num_mining_threads=8 - -# GRPC address of base node -#base_node_grpc_address = "127.0.0.1:18142" - -# GRPC address of console wallet -#wallet_grpc_address = "127.0.0.1:18143" - -# Start mining only when base node is bootstrapped -# and current block height is on the tip of network -# Default: true -#mine_on_tip_only=true - -# Will check tip with node every N seconds and restart mining -# if height already taken and option `mine_on_tip_only` is set -# to true -# Default: 30 seconds -#validate_tip_timeout_sec=30 - -# Stratum Mode configuration -# mining_pool_address = "miningcore.tari.com:3052" -# mining_wallet_address = "YOUR_WALLET_PUBLIC_KEY" -# mining_worker_name = "worker1" - -######################################################################################################################## -# # -# Merge Mining Configuration Options # -# # -######################################################################################################################## - -[merge_mining_proxy] -#override_from = "dibbler" -monerod_url = [# stagenet - "http://stagenet.xmr-tw.org:38081", - "http://stagenet.community.xmr.to:38081", - "http://monero-stagenet.exan.tech:38081", - "http://xmr-lux.boldsuck.org:38081", - "http://singapore.node.xmr.pm:38081", -] -base_node_grpc_address = "/ip4/127.0.0.1/tcp/18142" -console_wallet_grpc_address = "/ip4/127.0.0.1/tcp/18143" - -# Address of the tari_merge_mining_proxy application -listener_address = "/ip4/127.0.0.1/tcp/18081" - -# In sole merged mining, the block solution is usually submitted to the Monero blockchain -# (monerod) as well as to the Tari blockchain, then this setting should be "true". With pool -# merged mining, there is no sense in submitting the solution to the Monero blockchain as the -# pool does that, then this setting should be "false". (default = true). -submit_to_origin = true - -# The merge mining proxy can either wait for the base node to achieve initial sync at startup before it enables mining, -# or not. If merge mining starts before the base node has achieved initial sync, those Tari mined blocks will not be -# accepted. (Default value = true; will wait for base node initial sync). -#wait_for_initial_sync_at_startup = true - -# Monero auth params -monerod_username = "" -monerod_password = "" -monerod_use_auth = false - -#[dibbler.merge_mining_proxy] -# Put any network specific settings here - - - -######################################################################################################################## -# # -# Validator Node Configuration Options # -# # -######################################################################################################################## - -[validator_node] - -phase_timeout = 30 - -# If set to false, there will be no scanning at all. -scan_for_assets = true -# How often do we want to scan the base layer for changes. -new_asset_scanning_interval = 10 -# If set then only the specific assets will be checked. -# assets_allow_list = [""] - - -constitution_auto_accept = false -constitution_management_polling_interval_in_seconds = 10 -constitution_management_polling_interval = 5 -constitution_management_confirmation_time = 50 -######################################################################################################################## -# # -# Collectibles Configuration Options # -# # -######################################################################################################################## - -[collectibles] -# GRPC address of validator node -#validator_node_grpc_address = "/ip4/127.0.0.1/tcp/18144" - -# GRPC address of base node -#base_node_grpc_address = "/ip4/127.0.0.1/tcp/18142" - -# GRPC address of wallet -#wallet_grpc_address = "/ip4/127.0.0.1/tcp/18143" diff --git a/integration_tests/cucumber.js b/integration_tests/cucumber.js index 544030439c..5b5dd3baf7 100644 --- a/integration_tests/cucumber.js +++ b/integration_tests/cucumber.js @@ -1,8 +1,7 @@ module.exports = { - default: - "--tags 'not @long-running and not @wallet-ffi and not @broken' --fail-fast", + default: "--tags 'not @long-running and not @wallet-ffi and not @broken' ", none: " ", - ci: "--tags '@critical and not @long-running and not @broken ' --fail-fast", + ci: "--tags '@critical and not @long-running and not @broken '", critical: "--format @cucumber/pretty-formatter --tags @critical", "non-critical": "--tags 'not @critical and not @long-running and not @broken'", diff --git a/integration_tests/features/BaseNodeAutoUpdate.feature b/integration_tests/features/BaseNodeAutoUpdate.feature deleted file mode 100644 index bc05149f8f..0000000000 --- a/integration_tests/features/BaseNodeAutoUpdate.feature +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2022 The Tari Project -# SPDX-License-Identifier: BSD-3-Clause - -@auto_update -Feature: AutoUpdate - - @broken - Scenario: Auto update finds a new update on base node - Given I have a node NODE_A with auto update enabled - Then NODE_A has a new software update - - @broken - Scenario: Auto update ignores update with invalid signature on base node - Given I have a node NODE_A with auto update configured with a bad signature - Then NODE_A does not have a new software update diff --git a/integration_tests/features/BaseNodeConnectivity.feature b/integration_tests/features/BaseNodeConnectivity.feature index 37300e227a..4dbd112c14 100644 --- a/integration_tests/features/BaseNodeConnectivity.feature +++ b/integration_tests/features/BaseNodeConnectivity.feature @@ -21,13 +21,11 @@ Feature: Base Node Connectivity Then SEED_A is connected to WALLET_A Scenario: Base node lists heights - Given I have 1 seed nodes - And I have a base node N1 connected to all seed nodes + Given I have a seed node N1 When I mine 5 blocks on N1 Then node N1 lists heights 1 to 5 Scenario: Base node lists headers - Given I have 1 seed nodes - And I have a base node BN1 connected to all seed nodes + Given I have a seed node BN1 When I mine 5 blocks on BN1 Then node BN1 lists headers 1 to 5 with correct heights diff --git a/integration_tests/features/WalletAutoUpdate.feature b/integration_tests/features/WalletAutoUpdate.feature deleted file mode 100644 index 2a9d89c000..0000000000 --- a/integration_tests/features/WalletAutoUpdate.feature +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2022 The Tari Project -# SPDX-License-Identifier: BSD-3-Clause - -@auto_update -Feature: AutoUpdate - - @broken - Scenario: Auto update finds a new update on wallet - Given I have a wallet WALLET with auto update enabled - Then WALLET has a new software update - - @broken - Scenario: Auto update ignores update with invalid signature on wallet - Given I have a wallet WALLET with auto update configured with a bad signature - Then WALLET does not have a new software update diff --git a/integration_tests/helpers/config.js b/integration_tests/helpers/config.js index a3396eb31d..51ae68f4a7 100644 --- a/integration_tests/helpers/config.js +++ b/integration_tests/helpers/config.js @@ -84,6 +84,9 @@ function baseEnvs(peerSeeds = [], forceSyncPeers = [], _committee = []) { ["localnet.base_node.p2p.dht.flood_ban_max_msg_count"]: "100000", ["localnet.base_node.p2p.dht.database_url"]: "localnet/dht.db", ["localnet.p2p.seeds.dns_seeds_use_dnssec"]: "false", + ["localnet.base_node.lmdb.init_size_bytes"]: 16000000, + ["localnet.base_node.lmdb.grow_size_bytes"]: 16000000, + ["localnet.base_node.lmdb.resize_threshold_bytes"]: 1024, ["localnet.wallet.identity_file"]: "walletid.json", ["localnet.wallet.contacts_auto_ping_interval"]: "5", @@ -101,9 +104,7 @@ function baseEnvs(peerSeeds = [], forceSyncPeers = [], _committee = []) { ["merge_mining_proxy.monerod_use_auth"]: false, ["merge_mining_proxy.monerod_username"]: "", ["merge_mining_proxy.monerod_password"]: "", - // ["localnet.base_node.storage_db_init_size"]: 100000000, - // ["localnet.base_node.storage.db_resize_threshold"]: 10000000, - // ["localnet.base_node.storage.db_grow_size"]: 20000000, + ["merge_mining_proxy.wait_for_initial_sync_at_startup"]: false, ["miner.num_mining_threads"]: "1", ["miner.mine_on_tip_only"]: true, diff --git a/integration_tests/package-lock.json b/integration_tests/package-lock.json index 2dd066682e..403f326a61 100644 --- a/integration_tests/package-lock.json +++ b/integration_tests/package-lock.json @@ -9,13 +9,18 @@ "version": "1.0.0", "license": "ISC", "dependencies": { + "@grpc/grpc-js": "^1.2.3", + "@grpc/proto-loader": "^0.5.5", "archiver": "^5.3.1", "axios": "^0.21.4", "clone-deep": "^4.0.1", "csv-parser": "^3.0.0", "dateformat": "^3.0.3", + "fs": "^0.0.1-security", "glob": "^7.2.3", + "grpc-promise": "^1.4.0", "json5": "^2.2.1", + "path": "^0.12.7", "sha3": "^2.1.3", "tari_crypto": "v0.14.0", "utf8": "^3.0.0", @@ -2332,6 +2337,11 @@ } } }, + "node_modules/fs": { + "version": "0.0.1-security", + "resolved": "https://registry.npmjs.org/fs/-/fs-0.0.1-security.tgz", + "integrity": "sha512-3XY9e1pP0CVEUCdj5BmfIZxRBTSDycnbqhIOGec9QYtmVH2fbLpj86CFWkrNOkt/Fvty4KZG5lTglL9j/gJ87w==" + }, "node_modules/fs-constants": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", @@ -3119,6 +3129,15 @@ "node": ">=6" } }, + "node_modules/path": { + "version": "0.12.7", + "resolved": "https://registry.npmjs.org/path/-/path-0.12.7.tgz", + "integrity": "sha512-aXXC6s+1w7otVF9UletFkFcDsJeO7lSZBPUQhtb5O0xJe8LtYhj/GxldoL09bBj9+ZmE2hNoHqQSFMN5fikh4Q==", + "dependencies": { + "process": "^0.11.1", + "util": "^0.10.3" + } + }, "node_modules/path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", @@ -3187,6 +3206,14 @@ "node": ">=6.0.0" } }, + "node_modules/process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", + "engines": { + "node": ">= 0.6.0" + } + }, "node_modules/process-nextick-args": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", @@ -3832,6 +3859,14 @@ "resolved": "https://registry.npmjs.org/utf8/-/utf8-3.0.0.tgz", "integrity": "sha512-E8VjFIQ/TyQgp+TZfS6l8yp/xWppSAHzidGiRrqe4bK4XP9pTRyKFgGJpO3SN7zdX4DeomTrwaseCHovfpFcqQ==" }, + "node_modules/util": { + "version": "0.10.4", + "resolved": "https://registry.npmjs.org/util/-/util-0.10.4.tgz", + "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==", + "dependencies": { + "inherits": "2.0.3" + } + }, "node_modules/util-arity": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/util-arity/-/util-arity-1.1.0.tgz", @@ -3843,6 +3878,11 @@ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" }, + "node_modules/util/node_modules/inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" + }, "node_modules/uuid": { "version": "3.4.0", "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", @@ -5778,6 +5818,11 @@ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.8.tgz", "integrity": "sha512-1x0S9UVJHsQprFcEC/qnNzBLcIxsjAV905f/UkQxbclCsoTWlacCNOpQa/anodLl2uaEKFhfWOvM2Qg77+15zA==" }, + "fs": { + "version": "0.0.1-security", + "resolved": "https://registry.npmjs.org/fs/-/fs-0.0.1-security.tgz", + "integrity": "sha512-3XY9e1pP0CVEUCdj5BmfIZxRBTSDycnbqhIOGec9QYtmVH2fbLpj86CFWkrNOkt/Fvty4KZG5lTglL9j/gJ87w==" + }, "fs-constants": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", @@ -6405,6 +6450,15 @@ "callsites": "^3.0.0" } }, + "path": { + "version": "0.12.7", + "resolved": "https://registry.npmjs.org/path/-/path-0.12.7.tgz", + "integrity": "sha512-aXXC6s+1w7otVF9UletFkFcDsJeO7lSZBPUQhtb5O0xJe8LtYhj/GxldoL09bBj9+ZmE2hNoHqQSFMN5fikh4Q==", + "requires": { + "process": "^0.11.1", + "util": "^0.10.3" + } + }, "path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", @@ -6449,6 +6503,11 @@ "fast-diff": "^1.1.2" } }, + "process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==" + }, "process-nextick-args": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", @@ -6956,6 +7015,21 @@ "resolved": "https://registry.npmjs.org/utf8/-/utf8-3.0.0.tgz", "integrity": "sha512-E8VjFIQ/TyQgp+TZfS6l8yp/xWppSAHzidGiRrqe4bK4XP9pTRyKFgGJpO3SN7zdX4DeomTrwaseCHovfpFcqQ==" }, + "util": { + "version": "0.10.4", + "resolved": "https://registry.npmjs.org/util/-/util-0.10.4.tgz", + "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==", + "requires": { + "inherits": "2.0.3" + }, + "dependencies": { + "inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" + } + } + }, "util-arity": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/util-arity/-/util-arity-1.1.0.tgz", diff --git a/package-lock.json b/package-lock.json index e2497b00af..30a0a96353 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,6 +1,6 @@ { "name": "tari", - "version": "0.38.5", + "version": "0.38.7", "lockfileVersion": 2, "requires": true, "packages": {} From ce6c22f9eb02a7932afc5b71fd73e34da03791ff Mon Sep 17 00:00:00 2001 From: jorgeantonio21 Date: Tue, 25 Oct 2022 06:02:41 +0100 Subject: [PATCH 17/21] feat: add missing fields to grpc consensus constants interface (#4845) * add remaining logic to build on ConsensusConstants grpc interface * address some PR comments * correct grammar error * cargo clippy * further refactoring * add constructor to TransactionOutput type * cargo fmt --- applications/tari_app_grpc/proto/types.proto | 60 ++++++++++++++ .../src/conversions/consensus_constants.rs | 81 ++++++++++++++++++- .../src/grpc/base_node_grpc_server.rs | 5 +- base_layer/core/src/transactions/weight.rs | 5 ++ 4 files changed, 148 insertions(+), 3 deletions(-) diff --git a/applications/tari_app_grpc/proto/types.proto b/applications/tari_app_grpc/proto/types.proto index a353891e41..da44dda3d4 100644 --- a/applications/tari_app_grpc/proto/types.proto +++ b/applications/tari_app_grpc/proto/types.proto @@ -23,6 +23,12 @@ syntax = "proto3"; package tari.rpc; +/// An unsigned range interface to more accurately represent Rust native Range's +message Range { + uint64 min = 1; + uint64 max = 2; +} + /// An Empty placeholder for endpoints without request parameters message Empty {} @@ -41,6 +47,36 @@ message ComSignature { bytes signature_v = 3; } +/// PoW Algorithm constants +message PowAlgorithmConstants { + uint64 max_target_time = 1; + uint64 min_difficulty = 2; + uint64 max_difficulty = 3; + uint64 target_time = 4; +} + +/// Weight params +message WeightParams { + uint64 kernel_weight = 1; + uint64 input_weight = 2; + uint64 output_weight = 3; + uint64 metadata_bytes_per_gram = 4; +} + +/// Output version +message OutputsVersion { + Range outputs = 1; + Range features = 2; +} + +/// Output types +enum OutputType { + STANDARD = 0; + COINBASE = 1; + BURN = 2; + VALIDATOR_NODE_REGISTRATION = 3; + CODE_TEMPLATE_REGISTRATION = 4; +} /// Consensus Constants response message ConsensusConstants { @@ -76,4 +112,28 @@ message ConsensusConstants { uint64 block_weight_outputs = 15; /// Block weight for kernels uint64 block_weight_kernels = 16; + /// This is to keep track of the value inside of the genesis block + uint64 faucet_value = 17; + /// Maximum byte size of TariScript + uint64 max_script_byte_size = 18; + /// How long does it take to timeout validator node registration + uint64 validator_node_timeout = 19; + /// The height at which these constants become effective + uint64 effective_from_height = 20; + /// Current version of the blockchain + Range valid_blockchain_version_range = 21; + /// This is the maximum age a monero merge mined seed can be reused + uint64 max_randomx_seed_height = 22; + /// This keeps track of the block split targets and which algo is accepted + map proof_of_work = 23; + /// Transaction Weight params + WeightParams transaction_weight = 24; + /// Range of valid transaction input versions + Range input_version_range = 26; + /// Range of valid transaction output (and features) versions + OutputsVersion output_version_range = 27; + /// Range of valid transaction kernel versions + Range kernel_version_range = 28; + /// An allowlist of output types + repeated OutputType permitted_output_types = 29; } diff --git a/applications/tari_app_grpc/src/conversions/consensus_constants.rs b/applications/tari_app_grpc/src/conversions/consensus_constants.rs index be0b45e28a..e35256a08e 100644 --- a/applications/tari_app_grpc/src/conversions/consensus_constants.rs +++ b/applications/tari_app_grpc/src/conversions/consensus_constants.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::convert::TryFrom; +use std::{collections::HashMap, convert::TryFrom, iter::FromIterator}; use tari_core::{consensus::ConsensusConstants, proof_of_work::PowAlgorithm}; @@ -30,6 +30,73 @@ impl From for grpc::ConsensusConstants { fn from(cc: ConsensusConstants) -> Self { let (emission_initial, emission_decay, emission_tail) = cc.emission_amounts(); let weight_params = cc.transaction_weight().params(); + let input_version_range = cc.input_version_range().clone().into_inner(); + let input_version_range = grpc::Range { + min: u64::from(input_version_range.0.as_u8()), + max: u64::from(input_version_range.1.as_u8()), + }; + let kernel_version_range = cc.kernel_version_range().clone().into_inner(); + let kernel_version_range = grpc::Range { + min: u64::from(kernel_version_range.0.as_u8()), + max: u64::from(kernel_version_range.1.as_u8()), + }; + let valid_blockchain_version_range = cc.valid_blockchain_version_range().clone().into_inner(); + let valid_blockchain_version_range = grpc::Range { + min: u64::from(valid_blockchain_version_range.0), + max: u64::from(valid_blockchain_version_range.1), + }; + let transaction_weight = cc.transaction_weight(); + let metadata_bytes_per_gram = if let Some(val) = transaction_weight.params().metadata_bytes_per_gram { + u64::from(val) + } else { + 0u64 + }; + let transaction_weight = grpc::WeightParams { + kernel_weight: cc.transaction_weight().params().kernel_weight, + input_weight: cc.transaction_weight().params().input_weight, + output_weight: cc.transaction_weight().params().output_weight, + metadata_bytes_per_gram, + }; + let output_version_range = cc.output_version_range(); + let outputs = grpc::Range { + min: u64::from(output_version_range.outputs.start().as_u8()), + max: u64::from(output_version_range.outputs.end().as_u8()), + }; + let features = grpc::Range { + min: u64::from(output_version_range.features.start().as_u8()), + max: u64::from(output_version_range.features.end().as_u8()), + }; + + let output_version_range = grpc::OutputsVersion { + outputs: Some(outputs), + features: Some(features), + }; + + let permitted_output_types = cc.permitted_output_types(); + let permitted_output_types = permitted_output_types + .iter() + .map(|ot| i32::from(ot.as_byte())) + .collect::>(); + + let monero_pow = PowAlgorithm::Monero; + let sha3_pow = PowAlgorithm::Sha3; + + let monero_pow = grpc::PowAlgorithmConstants { + max_target_time: cc.get_difficulty_max_block_interval(monero_pow), + max_difficulty: cc.max_pow_difficulty(monero_pow).as_u64(), + min_difficulty: cc.min_pow_difficulty(monero_pow).as_u64(), + target_time: cc.get_diff_target_block_interval(monero_pow), + }; + + let sha3_pow = grpc::PowAlgorithmConstants { + max_target_time: cc.get_difficulty_max_block_interval(sha3_pow), + max_difficulty: cc.max_pow_difficulty(sha3_pow).as_u64(), + min_difficulty: cc.min_pow_difficulty(sha3_pow).as_u64(), + target_time: cc.get_diff_target_block_interval(sha3_pow), + }; + + let proof_of_work = HashMap::from_iter([(0u32, monero_pow), (1u32, sha3_pow)]); + Self { coinbase_lock_height: cc.coinbase_lock_height(), blockchain_version: cc.blockchain_version().into(), @@ -46,6 +113,18 @@ impl From for grpc::ConsensusConstants { block_weight_inputs: weight_params.input_weight, block_weight_outputs: weight_params.output_weight, block_weight_kernels: weight_params.kernel_weight, + validator_node_timeout: cc.validator_node_timeout(), + max_script_byte_size: cc.get_max_script_byte_size() as u64, + faucet_value: cc.faucet_value().as_u64(), + effective_from_height: cc.effective_from_height(), + input_version_range: Some(input_version_range), + kernel_version_range: Some(kernel_version_range), + valid_blockchain_version_range: Some(valid_blockchain_version_range), + proof_of_work, + transaction_weight: Some(transaction_weight), + max_randomx_seed_height: cc.max_randomx_seed_height(), + output_version_range: Some(output_version_range), + permitted_output_types, } } } diff --git a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs index 0eae5abb8f..adb59026cc 100644 --- a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs +++ b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs @@ -1675,9 +1675,10 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { let sidechain_outputs = utxos .into_iter() .filter(|u| u.features.output_type.is_sidechain_type()) - .map(TryInto::try_into); + .map(TryInto::try_into) + .collect::, _>>(); - match sidechain_outputs.collect() { + match sidechain_outputs { Ok(outputs) => { let resp = tari_rpc::GetSideChainUtxosResponse { block_info: Some(tari_rpc::BlockInfo { diff --git a/base_layer/core/src/transactions/weight.rs b/base_layer/core/src/transactions/weight.rs index aedb0c83c2..9c38dbbbde 100644 --- a/base_layer/core/src/transactions/weight.rs +++ b/base_layer/core/src/transactions/weight.rs @@ -52,6 +52,11 @@ impl WeightParams { pub struct TransactionWeight(WeightParams); impl TransactionWeight { + /// Constructor + pub fn new(weight_params: WeightParams) -> Self { + Self(weight_params) + } + /// Creates a new `TransactionWeight` with latest weight params pub fn latest() -> Self { Self(WeightParams::v1()) From df5d78eff10227834313ca2a90ade0c73e8c08e3 Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Tue, 25 Oct 2022 09:50:14 +0200 Subject: [PATCH 18/21] fix: remove unused config for validator node (#4849) Description --- Removes unused config from validator node Motivation and Context --- Fixes #4814 How Has This Been Tested? --- Validator node starts up --- common/config/presets/e_validator_node.toml | 30 +++++++-------------- 1 file changed, 9 insertions(+), 21 deletions(-) diff --git a/common/config/presets/e_validator_node.toml b/common/config/presets/e_validator_node.toml index bb4589f369..fc3385986a 100644 --- a/common/config/presets/e_validator_node.toml +++ b/common/config/presets/e_validator_node.toml @@ -20,38 +20,26 @@ # automatically configured (default = ) #public_address = -# The asset worker will adhere to this phased timeout for the asset (default = 30 s) -#phase_timeout = 30 - # The Tari base node's GRPC address. (default = "/ip4/127.0.0.1/tcp/18142") #base_node_grpc_address = "127.0.0.1/tcp/18142" # The Tari console wallet's GRPC address. (default = "/ip4/127.0.0.1/tcp/18143") #wallet_grpc_address = "127.0.0.1/tcp/18143" -# If set to false, there will be no scanning at all. (default = true) -#scan_for_assets = true - # How often do we want to scan the base layer for changes. (default = 10) -#new_asset_scanning_interval = 10 - -# If set then only the specific assets will be checked. (= [""]) (default = ) -# assets_allow_list = +#base_layer_scanning_interval = 10 # The relative path to store persistent data (default = "data/validator_node") #data_dir = "data/validator_node" -# The constitution will auto accept contracts if true (default = false) -#constitution_auto_accept = false - -# Constitution confirmation time in block height (default = 20) -#constitution_management_confirmation_time = 20 +# JSON-RPC listener address +# json_rpc_address = "127.0.0.1:18200" -# Constitution polling interval in block height (default = 120) -constitution_management_polling_interval = 5 +# HTTP UI listener address +# http_ui_address = "127.0.0.1:5000" -# Constitution polling interval in time (seconds) (default = 60) -#constitution_management_polling_interval_in_seconds = 60 +# Set to true to enable auto registration for each epoch. +# auto_register = true -# GRPC address of the validator node application (default = "/ip4/127.0.0.1/tcp/18144") -#grpc_address = "/ip4/127.0.0.1/tcp/18144" +[validator_node.p2p] +# transport = "tor" \ No newline at end of file From 23b43131102fbca030f825c7c8df7ec9f698932f Mon Sep 17 00:00:00 2001 From: jorgeantonio21 Date: Wed, 26 Oct 2022 15:00:00 +0100 Subject: [PATCH 19/21] feat: add block height to input request to get network consensus constants (#4856) Description --- In this PR, we refactor the method `get_constants` of `BaseNodeClient` to request a block height input. Motivation and Context --- In order to get `ConsensusConstants` values at the DAN layer, it is useful to be able to request the former using a block height, for synchronization purposes. This PR addresses this scenario. How Has This Been Tested? --- --- applications/tari_app_grpc/proto/base_node.proto | 2 +- applications/tari_app_grpc/proto/types.proto | 5 +++++ .../src/grpc/base_node_grpc_server.rs | 15 ++++++++++----- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/applications/tari_app_grpc/proto/base_node.proto b/applications/tari_app_grpc/proto/base_node.proto index f18c39aefc..63b908bc87 100644 --- a/applications/tari_app_grpc/proto/base_node.proto +++ b/applications/tari_app_grpc/proto/base_node.proto @@ -40,7 +40,7 @@ service BaseNode { // Returns the block timing for the chain heights rpc GetBlockTiming(HeightRequest) returns (BlockTimingResponse); // Returns the network Constants - rpc GetConstants(Empty) returns (ConsensusConstants); + rpc GetConstants(BlockHeight) returns (ConsensusConstants); // Returns Block Sizes rpc GetBlockSize (BlockGroupRequest) returns (BlockGroupResponse); // Returns Block Fees diff --git a/applications/tari_app_grpc/proto/types.proto b/applications/tari_app_grpc/proto/types.proto index da44dda3d4..4afef72706 100644 --- a/applications/tari_app_grpc/proto/types.proto +++ b/applications/tari_app_grpc/proto/types.proto @@ -32,6 +32,11 @@ message Range { /// An Empty placeholder for endpoints without request parameters message Empty {} +/// Define an interface for block height +message BlockHeight { + uint64 block_height = 1; +} + // Define the explicit Signature implementation for the Tari base layer. A different signature scheme can be // employed by redefining this type. message Signature { diff --git a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs index adb59026cc..822494415e 100644 --- a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs +++ b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs @@ -1123,14 +1123,19 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { async fn get_constants( &self, - _request: Request, + request: Request, ) -> Result, Status> { debug!(target: LOG_TARGET, "Incoming GRPC request for GetConstants",); debug!(target: LOG_TARGET, "Sending GetConstants response to client"); - // TODO: Switch to request height - Ok(Response::new( - self.network.create_consensus_constants().pop().unwrap().into(), - )) + + let block_height = request.into_inner().block_height; + + let consensus_manager = ConsensusManager::builder(self.network.as_network()).build(); + let consensus_constants = consensus_manager.consensus_constants(block_height); + + Ok(Response::new(tari_rpc::ConsensusConstants::from( + consensus_constants.clone(), + ))) } async fn get_block_size( From bd49bf2dff921d05dc7ed969464d4b8eea0cb2ec Mon Sep 17 00:00:00 2001 From: Miguel Naveira <47919901+mrnaveira@users.noreply.github.com> Date: Mon, 7 Nov 2022 05:45:24 -0700 Subject: [PATCH 20/21] fix: correct value for validator_node_timeout consensus constant in localnet (#4879) Description --- Changed the `validator_node_timeout` consensus constant from `0` to `100` in the `localnet` network. Motivation and Context --- We use the `localnet` network in the `tari-dan` project `cucumber-rs` integration tests. The current value for the `validator_node_timeout` constant in that network is `0`, which makes the validator node registrations to never be valid. It should be a reasonable value, similar to the `igor` network. How Has This Been Tested? --- With this change, in the `tari-dan` project `cucumber-rs` integration tests does now list registered validator nodes --- base_layer/core/src/consensus/consensus_constants.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base_layer/core/src/consensus/consensus_constants.rs b/base_layer/core/src/consensus/consensus_constants.rs index 2b4badb98a..8c9ca34146 100644 --- a/base_layer/core/src/consensus/consensus_constants.rs +++ b/base_layer/core/src/consensus/consensus_constants.rs @@ -329,7 +329,7 @@ impl ConsensusConstants { output_version_range, kernel_version_range, permitted_output_types: OutputType::all(), - validator_node_timeout: 0, + validator_node_timeout: 100, }] } From 392d541285e0766ffaea872063a21f8968715b7c Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Mon, 7 Nov 2022 14:46:45 +0200 Subject: [PATCH 21/21] fix(core)!: remove unused get_committees call from base node (#4880) Description --- - Remove unused `get_committee` function from base layer - add test to KeyPrefixCursor to confirm it works as expected Motivation and Context --- `get_committee` would panic because it made simultaneous accesses to the same transaction by creating multiple KeyPrefixCursors. Since it is unused and implemented on the DAN layer, the decision was taken to remove it. How Has This Been Tested? --- Added new test to KeyPrefixCursor BREAKING CHANGE: grpc interface no longer has get_committee call --- .../tari_app_grpc/proto/base_node.proto | 11 --- .../src/grpc/base_node_grpc_server.rs | 21 ------ .../comms_interface/comms_request.rs | 7 -- .../comms_interface/comms_response.rs | 4 +- .../comms_interface/inbound_handlers.rs | 4 -- .../comms_interface/local_interface.rs | 17 +---- base_layer/core/src/chain_storage/async_db.rs | 4 +- .../src/chain_storage/blockchain_backend.rs | 3 +- .../src/chain_storage/blockchain_database.rs | 7 +- .../lmdb_db/key_prefix_cursor.rs | 72 ++++++++++++++++++- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 63 ---------------- .../core/src/test_helpers/blockchain.rs | 5 -- 12 files changed, 76 insertions(+), 142 deletions(-) diff --git a/applications/tari_app_grpc/proto/base_node.proto b/applications/tari_app_grpc/proto/base_node.proto index 63b908bc87..94139c6b15 100644 --- a/applications/tari_app_grpc/proto/base_node.proto +++ b/applications/tari_app_grpc/proto/base_node.proto @@ -91,7 +91,6 @@ service BaseNode { rpc GetMempoolStats(Empty) returns (MempoolStatsResponse); // Get VNs rpc GetActiveValidatorNodes(GetActiveValidatorNodesRequest) returns (stream GetActiveValidatorNodesResponse); - rpc GetCommittee(GetCommitteeRequest) returns (GetCommitteeResponse); rpc GetShardKey(GetShardKeyRequest) returns (GetShardKeyResponse); // Get templates rpc GetTemplateRegistrations(GetTemplateRegistrationsRequest) returns (stream GetTemplateRegistrationResponse); @@ -451,16 +450,6 @@ message GetActiveValidatorNodesResponse { bytes public_key = 2; } - -message GetCommitteeRequest { - uint64 height = 1; - bytes shard_key = 2; -} - -message GetCommitteeResponse { - repeated bytes public_key = 1; -} - message GetShardKeyRequest { uint64 height = 1; bytes public_key = 2; diff --git a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs index 822494415e..626d942f8a 100644 --- a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs +++ b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs @@ -1437,27 +1437,6 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { Ok(Response::new(response)) } - async fn get_committee( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); - let report_error_flag = self.report_error_flag(); - debug!(target: LOG_TARGET, "Incoming GRPC request for GetCommittee"); - let mut handler = self.node_service.clone(); - let response = handler - .get_committee(request.height, request.shard_key.try_into().unwrap()) - .await - .map_err(|e| { - error!(target: LOG_TARGET, "Error {}", e); - obscure_error_if_true(report_error_flag, Status::internal(e.to_string())) - })? - .iter() - .map(|a| a.shard_key.to_vec()) - .collect(); - Ok(Response::new(tari_rpc::GetCommitteeResponse { public_key: response })) - } - async fn get_shard_key( &self, request: Request, diff --git a/base_layer/core/src/base_node/comms_interface/comms_request.rs b/base_layer/core/src/base_node/comms_interface/comms_request.rs index f47b4a0859..b18e34f04e 100644 --- a/base_layer/core/src/base_node/comms_interface/comms_request.rs +++ b/base_layer/core/src/base_node/comms_interface/comms_request.rs @@ -67,10 +67,6 @@ pub enum NodeCommsRequest { FetchValidatorNodesKeys { height: u64, }, - FetchCommittee { - height: u64, - shard: [u8; 32], - }, GetShardKey { height: u64, public_key: PublicKey, @@ -125,9 +121,6 @@ impl Display for NodeCommsRequest { FetchValidatorNodesKeys { height } => { write!(f, "FetchValidatorNodesKeys ({})", height) }, - FetchCommittee { height, shard } => { - write!(f, "FetchCommittee height ({}), shard({:?})", height, shard) - }, GetShardKey { height, public_key } => { write!(f, "GetShardKey height ({}), public key ({:?})", height, public_key) }, diff --git a/base_layer/core/src/base_node/comms_interface/comms_response.rs b/base_layer/core/src/base_node/comms_interface/comms_response.rs index 5ad0bbe052..c6714a6fe2 100644 --- a/base_layer/core/src/base_node/comms_interface/comms_response.rs +++ b/base_layer/core/src/base_node/comms_interface/comms_response.rs @@ -32,7 +32,7 @@ use tari_common_types::{ use crate::{ blocks::{Block, ChainHeader, HistoricalBlock, NewBlockTemplate}, - chain_storage::{ActiveValidatorNode, TemplateRegistrationEntry}, + chain_storage::TemplateRegistrationEntry, proof_of_work::Difficulty, transactions::transaction_components::{Transaction, TransactionKernel, TransactionOutput}, }; @@ -57,7 +57,6 @@ pub enum NodeCommsResponse { MmrNodes(Vec, Vec), FetchMempoolTransactionsByExcessSigsResponse(FetchMempoolTransactionsResponse), FetchValidatorNodesKeysResponse(Vec<(PublicKey, [u8; 32])>), - FetchCommitteeResponse(Vec), GetShardKeyResponse(Option<[u8; 32]>), FetchTemplateRegistrationsResponse(Vec), } @@ -94,7 +93,6 @@ impl Display for NodeCommsResponse { resp.not_found.len() ), FetchValidatorNodesKeysResponse(_) => write!(f, "FetchValidatorNodesKeysResponse"), - FetchCommitteeResponse(_) => write!(f, "FetchCommitteeResponse"), GetShardKeyResponse(_) => write!(f, "GetShardKeyResponse"), FetchTemplateRegistrationsResponse(_) => write!(f, "FetchTemplateRegistrationsResponse"), } diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index e8f455ab44..1f7716986f 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -369,10 +369,6 @@ where B: BlockchainBackend + 'static active_validator_nodes, )) }, - NodeCommsRequest::FetchCommittee { height, shard } => { - let validator_nodes = self.blockchain_db.fetch_committee(height, shard).await?; - Ok(NodeCommsResponse::FetchCommitteeResponse(validator_nodes)) - }, NodeCommsRequest::GetShardKey { height, public_key } => { let shard_key = self.blockchain_db.get_shard_key(height, public_key).await?; Ok(NodeCommsResponse::GetShardKeyResponse(shard_key)) diff --git a/base_layer/core/src/base_node/comms_interface/local_interface.rs b/base_layer/core/src/base_node/comms_interface/local_interface.rs index 48c093e757..5e9de71501 100644 --- a/base_layer/core/src/base_node/comms_interface/local_interface.rs +++ b/base_layer/core/src/base_node/comms_interface/local_interface.rs @@ -38,7 +38,7 @@ use crate::{ NodeCommsResponse, }, blocks::{Block, ChainHeader, HistoricalBlock, NewBlockTemplate}, - chain_storage::{ActiveValidatorNode, TemplateRegistrationEntry}, + chain_storage::TemplateRegistrationEntry, proof_of_work::PowAlgorithm, transactions::transaction_components::{TransactionKernel, TransactionOutput}, }; @@ -295,21 +295,6 @@ impl LocalNodeCommsInterface { } } - pub async fn get_committee( - &mut self, - height: u64, - shard: [u8; 32], - ) -> Result, CommsInterfaceError> { - match self - .request_sender - .call(NodeCommsRequest::FetchCommittee { height, shard }) - .await?? - { - NodeCommsResponse::FetchCommitteeResponse(validator_node) => Ok(validator_node), - _ => Err(CommsInterfaceError::UnexpectedApiResponse), - } - } - pub async fn get_shard_key( &mut self, height: u64, diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index 12ee7a45d2..7dbef635d6 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -30,7 +30,7 @@ use tari_common_types::{ }; use tari_utilities::epoch_time::EpochTime; -use super::{ActiveValidatorNode, TemplateRegistrationEntry}; +use super::TemplateRegistrationEntry; use crate::{ blocks::{ Block, @@ -269,8 +269,6 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_active_validator_nodes(height: u64) -> Vec<(PublicKey, [u8;32])>, "fetch_active_validator_nodes"); - make_async_fn!(fetch_committee(height: u64, shard: [u8;32]) -> Vec, "fetch_committee"); - make_async_fn!(get_shard_key(height:u64, public_key: PublicKey) -> Option<[u8;32]>, "get_shard_key"); make_async_fn!(fetch_template_registrations>(range: T) -> Vec, "fetch_template_registrations"); diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index 05d8ca33d6..9bcff45612 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -7,7 +7,7 @@ use tari_common_types::{ types::{Commitment, HashOutput, PublicKey, Signature}, }; -use super::{ActiveValidatorNode, TemplateRegistrationEntry}; +use super::TemplateRegistrationEntry; use crate::{ blocks::{ Block, @@ -194,7 +194,6 @@ pub trait BlockchainBackend: Send + Sync { fn fetch_all_reorgs(&self) -> Result, ChainStorageError>; fn fetch_active_validator_nodes(&self, height: u64) -> Result, ChainStorageError>; - fn fetch_committee(&self, height: u64, shard: [u8; 32]) -> Result, ChainStorageError>; fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result, ChainStorageError>; fn fetch_template_registrations( &self, diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index ba442780d2..8affd89bd6 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -41,7 +41,7 @@ use tari_common_types::{ use tari_mmr::pruned_hashset::PrunedHashSet; use tari_utilities::{epoch_time::EpochTime, hex::Hex, ByteArray}; -use super::{ActiveValidatorNode, TemplateRegistrationEntry}; +use super::TemplateRegistrationEntry; use crate::{ blocks::{ Block, @@ -1188,11 +1188,6 @@ where B: BlockchainBackend db.fetch_active_validator_nodes(height) } - pub fn fetch_committee(&self, height: u64, shard: [u8; 32]) -> Result, ChainStorageError> { - let db = self.db_read_access()?; - db.fetch_committee(height, shard) - } - pub fn fetch_template_registrations>( &self, range: T, diff --git a/base_layer/core/src/chain_storage/lmdb_db/key_prefix_cursor.rs b/base_layer/core/src/chain_storage/lmdb_db/key_prefix_cursor.rs index 0cf60dda93..a0401b4771 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/key_prefix_cursor.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/key_prefix_cursor.rs @@ -82,7 +82,14 @@ where V: DeserializeOwned } } - pub fn seek_gte(&mut self, key: &[u8]) -> Result, V)>, ChainStorageError> { + // This function could be used later in cases where multiple seeks are required. + #[cfg(test)] + pub fn reset_to(&mut self, prefix_key: &'a [u8]) { + self.has_seeked = false; + self.prefix_key = prefix_key; + } + + fn seek_gte(&mut self, key: &[u8]) -> Result, V)>, ChainStorageError> { self.has_seeked = true; let seek_result = self.cursor.seek_range_k(&self.access, key).to_opt()?; let (k, v) = match seek_result { @@ -105,3 +112,66 @@ where V: DeserializeOwned Ok(Some((k.to_vec(), val))) } } + +#[cfg(test)] +mod tests { + use std::fs; + + use lmdb_zero::{db, ReadTransaction, WriteTransaction}; + use tari_storage::lmdb_store::{LMDBBuilder, LMDBConfig}; + use tari_test_utils::paths::create_temporary_data_path; + + use crate::chain_storage::lmdb_db::lmdb::{lmdb_get_prefix_cursor, lmdb_insert}; + + #[test] + fn test_lmdb_get_prefix_cursor() { + let temp_path = create_temporary_data_path(); + + let lmdb_store = LMDBBuilder::new() + .set_path(&temp_path) + .set_env_config(LMDBConfig::default()) + .set_max_number_of_databases(1) + .add_database("test", db::CREATE) + .build() + .unwrap(); + + let db = lmdb_store.get_handle("test").unwrap(); + { + let txn = WriteTransaction::new(lmdb_store.env()).unwrap(); + lmdb_insert(&txn, &db.db(), &[0xffu8, 0, 0, 0], &1u64, "test").unwrap(); + lmdb_insert(&txn, &db.db(), &[0x2bu8, 0, 0, 1], &2u64, "test").unwrap(); + lmdb_insert(&txn, &db.db(), &[0x2bu8, 0, 1, 1], &3u64, "test").unwrap(); + lmdb_insert(&txn, &db.db(), &[0x2bu8, 1, 1, 0], &4u64, "test").unwrap(); + lmdb_insert(&txn, &db.db(), &[0x2bu8, 1, 1, 1], &5u64, "test").unwrap(); + lmdb_insert(&txn, &db.db(), &[0x00u8, 1, 1, 1], &5u64, "test").unwrap(); + txn.commit().unwrap(); + } + + { + let txn = ReadTransaction::new(lmdb_store.env()).unwrap(); + let db = db.db(); + let mut cursor = lmdb_get_prefix_cursor::(&txn, &db, &[0x2b]).unwrap(); + let kv = cursor.next().unwrap().unwrap(); + assert_eq!(kv, (vec![0x2b, 0, 0, 1], 2)); + let kv = cursor.next().unwrap().unwrap(); + assert_eq!(kv, (vec![0x2b, 0, 1, 1], 3)); + let kv = cursor.next().unwrap().unwrap(); + assert_eq!(kv, (vec![0x2b, 1, 1, 0], 4)); + let kv = cursor.next().unwrap().unwrap(); + assert_eq!(kv, (vec![0x2b, 1, 1, 1], 5)); + assert_eq!(cursor.next().unwrap(), None); + + cursor.reset_to(&[0x2b, 1, 1]); + let kv = cursor.next().unwrap().unwrap(); + assert_eq!(kv, (vec![0x2b, 1, 1, 0], 4)); + let kv = cursor.next().unwrap().unwrap(); + assert_eq!(kv, (vec![0x2b, 1, 1, 1], 5)); + assert_eq!(cursor.next().unwrap(), None); + + cursor.reset_to(&[0x11]); + assert_eq!(cursor.next().unwrap(), None); + } + + fs::remove_dir_all(&temp_path).expect("Could not delete temporary file"); + } +} diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index abd74ceb2f..73002a57c5 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -2520,69 +2520,6 @@ impl BlockchainBackend for LMDBDatabase { .collect()) } - fn fetch_committee(&self, height: u64, shard: [u8; 32]) -> Result, ChainStorageError> { - // TODO: I'm not sure how effective this is compared to getting all and selecting by yourself. Also if there is - // less validator nodes than committee size this gets weird. - let txn = self.read_transaction()?; - let mut cursor: KeyPrefixCursor = - lmdb_get_prefix_cursor(&txn, &self.validator_nodes, &shard)?; - let mut result = vec![]; - let committee_half_size = 5u64; - let mut size = 0u64; - // Right side of the committee - while let Some((_, val)) = cursor.next()? { - if val.from_height <= height && height <= val.to_height { - result.push(val); - size += 1; - if size == committee_half_size { - break; - } - } - } - // Check if it wraps around - if size < committee_half_size { - let mut cursor: KeyPrefixCursor = - lmdb_get_prefix_cursor(&txn, &self.validator_nodes, &[0; 32])?; - while let Some((_, val)) = cursor.next()? { - if val.from_height <= height && height <= val.to_height { - result.push(val); - size += 1; - if size == committee_half_size { - break; - } - } - } - } - let mut cursor: KeyPrefixCursor = - lmdb_get_prefix_cursor(&txn, &self.validator_nodes, &shard)?; - let mut size = 0u64; - // Left side of the committee - while let Some((_, val)) = cursor.prev()? { - if val.from_height <= height && height <= val.to_height { - result.push(val); - size += 1; - if size == committee_half_size { - break; - } - } - } - // Check if it wraps around - if size < committee_half_size { - let mut cursor: KeyPrefixCursor = - lmdb_get_prefix_cursor(&txn, &self.validator_nodes, &[255; 32])?; - while let Some((_, val)) = cursor.prev()? { - if val.from_height <= height && height <= val.to_height { - result.push(val); - size += 1; - if size == committee_half_size { - break; - } - } - } - } - Ok(result) - } - fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result, ChainStorageError> { let txn = self.read_transaction()?; let mut validator_nodes: Vec = diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index e12ee86d36..6641c2d86f 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -51,7 +51,6 @@ use crate::{ }, chain_storage::{ create_lmdb_database, - ActiveValidatorNode, BlockAddResult, BlockchainBackend, BlockchainDatabase, @@ -419,10 +418,6 @@ impl BlockchainBackend for TempDatabase { self.db.as_ref().unwrap().fetch_active_validator_nodes(height) } - fn fetch_committee(&self, height: u64, shard: [u8; 32]) -> Result, ChainStorageError> { - self.db.as_ref().unwrap().fetch_committee(height, shard) - } - fn get_shard_key(&self, height: u64, public_key: PublicKey) -> Result, ChainStorageError> { self.db.as_ref().unwrap().get_shard_key(height, public_key) }