diff --git a/configure.ac b/configure.ac index 72f6244052..c9642cf988 100644 --- a/configure.ac +++ b/configure.ac @@ -3,7 +3,7 @@ AC_PREREQ([2.60]) define(_CLIENT_VERSION_MAJOR, 0) define(_CLIENT_VERSION_MINOR, 14) define(_CLIENT_VERSION_REVISION, 12) -define(_CLIENT_VERSION_BUILD, 1) +define(_CLIENT_VERSION_BUILD, 6) define(_CLIENT_VERSION_IS_RELEASE, true) define(_COPYRIGHT_YEAR, 2023) define(_COPYRIGHT_HOLDERS,[The %s developers]) @@ -92,7 +92,18 @@ AC_PATH_TOOL(OBJCOPY, objcopy) AC_ARG_VAR(PYTHONPATH, Augments the default search path for python module files) # Enable wallet -enable_wallet=yes +AC_ARG_ENABLE([wallet], + [AS_HELP_STRING([--disable-wallet], + [disable wallet (enabled by default)])], + [enable_wallet=$enableval], + [enable_wallet=yes]) + +AC_ARG_WITH([bdb], + [AS_HELP_STRING([--without-bdb], + [disable bdb wallet support (default is enabled if wallet is enabled)])], + [use_bdb=$withval], + [use_bdb=auto]) + AC_ARG_ENABLE([elysium], [AS_HELP_STRING([--enable-elysium],[enable elysium (disabled by default)])], @@ -651,9 +662,11 @@ AC_SUBST(LEVELDB_CPPFLAGS) AC_SUBST(LIBLEVELDB) AC_SUBST(LIBMEMENV) -if test x$enable_wallet != xno; then - dnl Check for libdb_cxx only if wallet enabled - BITCOIN_FIND_BDB48 +if test "$enable_wallet" != "no"; then + dnl Check for libdb_cxx only if wallet enabled + if test "$use_bdb" != "no"; then + BITCOIN_FIND_BDB48 + fi fi dnl Check for libminiupnpc (optional) @@ -1168,6 +1181,7 @@ AM_CONDITIONAL([USE_LCOV],[test x$use_lcov = xyes]) AM_CONDITIONAL([GLIBC_BACK_COMPAT],[test x$use_glibc_compat = xyes]) AM_CONDITIONAL([HARDEN],[test x$use_hardening = xyes]) AM_CONDITIONAL([ENABLE_SSE42],[test x$enable_sse42 = xyes]) +AM_CONDITIONAL([USE_BDB], [test "$use_bdb" = "yes"]) AC_DEFINE(CLIENT_VERSION_MAJOR, _CLIENT_VERSION_MAJOR, [Major version]) AC_DEFINE(CLIENT_VERSION_MINOR, _CLIENT_VERSION_MINOR, [Minor version]) @@ -1222,6 +1236,7 @@ AC_SUBST(ZMQ_LIBS) AC_SUBST(PROTOBUF_LIBS) AC_SUBST(QR_LIBS) AC_SUBST(DSYMUTIL_FLAT) +AC_SUBST(USE_BDB) AC_CONFIG_FILES([Makefile src/Makefile share/setup.nsi share/qt/Info.plist src/test/buildenv.py]) AC_CONFIG_FILES([qa/pull-tester/tests_config.py],[chmod +x qa/pull-tester/tests_config.py]) AC_CONFIG_FILES([contrib/devtools/split-debug.sh],[chmod +x contrib/devtools/split-debug.sh]) @@ -1291,6 +1306,11 @@ if test x$bitcoin_enable_qt != xno; then echo " qt version = $bitcoin_qt_got_major_vers" echo " with qr = $use_qr" fi + +if test "$enable_wallet" != "no"; then + echo " with bdb = $use_bdb" +fi + echo " with zmq = $use_zmq" echo " with test = $use_tests" echo " with bench = $use_bench" diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py index 322a259050..dd5a541074 100755 --- a/qa/pull-tester/rpc-tests.py +++ b/qa/pull-tester/rpc-tests.py @@ -101,13 +101,16 @@ raise testScripts = [ + 'spark_mint.py', + 'spark_spend_gettransaction.py', + 'spark_setmintstatus_validation.py', 'lelantus_mint.py', 'lelantus_setmintstatus_validation.py', 'lelantus_mintspend.py', 'lelantus_spend_gettransaction.py', 'elysium_create_denomination.py', 'elysium_property_creation_fee.py', - 'elysium_sendmint.py', +# 'elysium_sendmint.py', 'elysium_sendmint_wallet_encryption.py', 'elysium_sendspend.py', 'elysium_sendspend_wallet_encryption.py', diff --git a/qa/rpc-tests/bip47-sendreceive.py b/qa/rpc-tests/bip47-sendreceive.py index a2bfb701c0..0931da4d08 100755 --- a/qa/rpc-tests/bip47-sendreceive.py +++ b/qa/rpc-tests/bip47-sendreceive.py @@ -25,7 +25,7 @@ def setup_network(self, split=False): def run_test(self): - self.nodes[1].generate(1010) + self.nodes[1].generate(410) node0_pcode = self.nodes[0].createrapaddress("node0-pcode0") try: diff --git a/qa/rpc-tests/lelantus_mint.py b/qa/rpc-tests/lelantus_mint.py index eedc04a38d..929567b33b 100755 --- a/qa/rpc-tests/lelantus_mint.py +++ b/qa/rpc-tests/lelantus_mint.py @@ -13,7 +13,7 @@ def run_test(self): assert_raises_message( JSONRPCException, - "Lelantus is not activated yet", + "Lelantus is not active", self.nodes[0].mintlelantus, 1) self.nodes[0].generate(activation_block - self.nodes[0].getblockcount()) diff --git a/qa/rpc-tests/lelantus_mintspend.py b/qa/rpc-tests/lelantus_mintspend.py index 829e360a18..2bcda0bc01 100755 --- a/qa/rpc-tests/lelantus_mintspend.py +++ b/qa/rpc-tests/lelantus_mintspend.py @@ -14,7 +14,7 @@ def __init__(self): def run_test(self): # Decimal formating: 6 digits for balance will be enought 000.000 getcontext().prec = 6 - self.nodes[0].generate(401) + self.nodes[0].generate(201) self.sync_all() start_bal = self.nodes[0].getbalance() diff --git a/qa/rpc-tests/llmq-cl-evospork.py b/qa/rpc-tests/llmq-cl-evospork.py index 3aa06ac22d..c22f67c73f 100755 --- a/qa/rpc-tests/llmq-cl-evospork.py +++ b/qa/rpc-tests/llmq-cl-evospork.py @@ -39,7 +39,7 @@ def run_test(self): self.nodes[0].sendtoaddress(self.payment_address, 1) # mine many blocks, wait for chainlock - while self.nodes[0].getblockcount() < 1000: + while self.nodes[0].getblockcount() < 800: self.nodes[0].generate(20) self.wait_for_chainlock_tip_all_nodes() diff --git a/qa/rpc-tests/llmq-is-lelantus.py b/qa/rpc-tests/llmq-is-lelantus.py index 15e78d749c..39ec482dac 100755 --- a/qa/rpc-tests/llmq-is-lelantus.py +++ b/qa/rpc-tests/llmq-is-lelantus.py @@ -28,7 +28,7 @@ def run_test(self): self.mine_quorum() self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash()) - self.nodes[0].generate(1000 - self.nodes[0].getblockcount()) + self.nodes[0].generate(401 - self.nodes[0].getblockcount()) for i in range(0, 3): mintTxids = self.nodes[0].mintlelantus(1) diff --git a/qa/rpc-tests/llmq-is-spark.py b/qa/rpc-tests/llmq-is-spark.py new file mode 100755 index 0000000000..f45502f399 --- /dev/null +++ b/qa/rpc-tests/llmq-is-spark.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2018 The Dash Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +from test_framework.mininode import * +from test_framework.test_framework import EvoZnodeTestFramework +from test_framework.util import sync_blocks, set_node_times, \ + isolate_node, reconnect_isolated_node, set_mocktime, get_mocktime +from test_framework.util import assert_equal, assert_raises_jsonrpc, \ + bitcoind_processes, start_nodes, start_node, connect_nodes_bi + +from decimal import Decimal + +''' +llmq-is-spark.py + +Testing Instantsend for Spark transactions +''' + +class LLMQ_IS_Lelantus(EvoZnodeTestFramework): + def __init__(self): + super().__init__(6, 5, extra_args=[['-debug=instantsend']] * 6 ) + self.sporkprivkey = "cW2YM2xaeCaebfpKguBahUAgEzLXgSserWRuD29kSyKHq1TTgwRQ" + + def run_test(self): + self.sporkAddress = self.nodes[0].getaccountaddress("") + self.mine_quorum() + self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash()) + + self.nodes[0].generate(1001 - self.nodes[0].getblockcount()) + + sparkaddress = self.nodes[0].getnewsparkaddress()[0] + for i in range(0, 3): + mintTxids = self.nodes[0].mintspark({sparkaddress: {"amount": 1, "memo":"Test memo"}}) + + for mintTxid in mintTxids: + mintTx = self.nodes[0].getrawtransaction(mintTxid, 1) + val = 0 + for vi in mintTx['vin']: + val += vi['valueSat'] + if val > 10000: + break; + val = Decimal((val - 10000) / 1e+8).quantize(Decimal('1e-7')) + + assert(self.wait_for_instantlock(mintTxid, self.nodes[0])) + + mintDspend = self.nodes[0].createrawtransaction(mintTx['vin'], {self.nodes[0].getnewaddress(): str(val)}) + assert_raises_jsonrpc(-26, 'tx-txlock-conflict', self.nodes[0].sendrawtransaction, mintDspend) + + self.nodes[0].generate(3) + assert (self.nodes[0].getrawtransaction(mintTxid, True)['confirmations'] > 0) + + spendTxid = self.nodes[0].spendspark({self.sporkAddress: {"amount": 0.1, "subtractFee": False}}) + assert(self.wait_for_instantlock(spendTxid, self.nodes[0])) + +if __name__ == '__main__': + LLMQ_IS_Lelantus().main() diff --git a/qa/rpc-tests/spark_mint.py b/qa/rpc-tests/spark_mint.py new file mode 100755 index 0000000000..a901251750 --- /dev/null +++ b/qa/rpc-tests/spark_mint.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, assert_raises_message, JSONRPCException + +class SparkMintTest(BitcoinTestFramework): + def __init__(self): + super().__init__() + self.num_nodes = 1 + self.setup_clean_chain = False + + def run_test(self): + assert_raises_message( + JSONRPCException, + "Spark is not activated yet", + self.nodes[0].mintspark, 1) + + self.nodes[0].generate(1001) + + # generate coins + amounts = [1, 1.1, 2, 10] + + # 10 confirmations + address = self.nodes[0].getnewsparkaddress()[0] + self.nodes[0].mintspark({address: {"amount": amounts[0], "memo":"Test memo"}}) + self.nodes[0].mintspark({address: {"amount": amounts[1], "memo": "Test memo"}}) + self.nodes[0].generate(5) + + # 5 confirmations + self.nodes[0].mintspark({address: {"amount": amounts[2], "memo": "Test memo"}}) + self.nodes[0].mintspark({address: {"amount": amounts[3], "memo": "Test memo"}}) + self.nodes[0].generate(5) + + # get all mints and utxos + mints = self.verify_listsparkmints(amounts) + self.verify_listunspentsparkmints(amounts) + assert_equal([False, False, False, False], list(map(lambda m : m["isUsed"], mints))) + + # state modification test + # mark two coins as used + self.nodes[0].setsparkmintstatus(mints[2]["lTagHash"], True) + self.nodes[0].setsparkmintstatus(mints[3]["lTagHash"], True) + + mints = self.verify_listsparkmints(amounts) + self.verify_listunspentsparkmints([1, 1.1]) + assert_equal([False, False, True, True], list(map(lambda m : m["isUsed"], mints))) + + # set a coin as unused + self.nodes[0].setsparkmintstatus(mints[3]["lTagHash"], False) + mints = self.verify_listsparkmints(amounts) + self.verify_listunspentsparkmints([1, 1.1, 10]) + assert_equal([False, False, True, False], list(map(lambda m : m["isUsed"], mints))) + + self.nodes[0].setsparkmintstatus(mints[0]["lTagHash"], False) + self.nodes[0].setsparkmintstatus(mints[1]["lTagHash"], False) + self.nodes[0].setsparkmintstatus(mints[2]["lTagHash"], False) + self.nodes[0].setsparkmintstatus(mints[3]["lTagHash"], False) + + mints = self.verify_listsparkmints(amounts) + self.verify_listunspentsparkmints(amounts) + assert_equal([False, False, False, False], list(map(lambda m : m["isUsed"], mints))) + + def verify_listsparkmints(self, expected_amounts): + mints = self.nodes[0].listsparkmints() + mints = sorted(mints, key = lambda u: u['amount']) + + assert_equal( + sorted(expected_amounts), + list(map(lambda u: float(u['amount']), mints))) + + return mints + + def verify_listunspentsparkmints(self, expected_amounts): + mints = self.nodes[0].listunspentsparkmints() + mints = sorted(mints, key = lambda u: float(u['amount'])) + + assert_equal( + sorted(expected_amounts), + list(map(lambda u: float(u['amount']), mints))) + + return mints + +if __name__ == '__main__': + SparkMintTest().main() diff --git a/qa/rpc-tests/spark_mintspend.py b/qa/rpc-tests/spark_mintspend.py new file mode 100755 index 0000000000..f7a183c484 --- /dev/null +++ b/qa/rpc-tests/spark_mintspend.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python3 +from decimal import * + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +from time import * + +class SparkMintSpendTest(BitcoinTestFramework): + def __init__(self): + super().__init__() + self.num_nodes = 4 + self.setup_clean_chain = False + + def run_test(self): + # Decimal formating: 6 digits for balance will be enought 000.000 + getcontext().prec = 6 + self.nodes[0].generate(801) + self.sync_all() + + start_bal = self.nodes[0].getbalance() + + sparkAddress = self.nodes[0].getsparkdefaultaddress()[0] + + mint_trans = list() + mint_trans.append(self.nodes[0].mintspark({sparkAddress: {"amount": 1, "memo": "Test memo"}})) + mint_trans.append(self.nodes[0].mintspark({sparkAddress: {"amount": 2, "memo":"Test memo"}})) + + # Get last added transaction and fee for it + info = self.nodes[0].gettransaction(mint_trans[-1][0]) + + # fee in transaction is negative + fee = -(info['fee'] * 2) + cur_bal = self.nodes[0].getbalance() + start_bal = float(start_bal) - float(fee) - 3 + start_bal = Decimal(format(start_bal, '.8f')) + + assert start_bal == cur_bal, \ + 'Unexpected current balance: {}, should be minus two mints and two fee, ' \ + 'but start was {}'.format(cur_bal, start_bal) + + for tr in mint_trans: + info = self.nodes[0].gettransaction(tr[0]) + confrms = info['confirmations'] + assert confrms == 0, \ + 'Confirmations should be {}, ' \ + 'due to {} blocks was generated after transaction was created,' \ + 'but was {}'.format(0, 0, confrms) + + tr_type = info['details'][0]['category'] + assert tr_type == 'mint', 'Unexpected transaction type: {}'.format(tr_type) + # assert(self.wait_for_instantlock(tr, self.nodes[0])) + + self.nodes[0].generate(1) + self.sync_all() + + res = False + firoAddress = self.nodes[0].getnewaddress() + try: + res = self.nodes[0].spendspark({firoAddress: {"amount": 1, "subtractFee": False}}, {}) + except JSONRPCException as ex: + assert ex.error['message'] == 'Spark spend creation failed.' + + assert not res, 'Did not raise spend exception, but should be.' + + # generate last confirmation block - now all transactions should be confimed + self.nodes[0].generate(1) + self.sync_all() + + for tr in mint_trans: + info = self.nodes[0].gettransaction(tr[0]) + confrms = info['confirmations'] + assert confrms >= 1, \ + 'Confirmations should be 3, ' \ + 'due to 3 blocks was generated after transaction was created,' \ + 'but was {}.'.format(confrms) + tr_type = info['details'][0]['category'] + assert tr_type == 'mint', 'Unexpected transaction type' + + spend_trans = list() + spend_total = Decimal(0) + + self.sync_all() + + spend_trans.append(self.nodes[0].spendspark({firoAddress: {"amount": 1, "subtractFee": False}}, {})) + + info = self.nodes[0].gettransaction(spend_trans[-1]) + confrms = info['confirmations'] + tr_type = info['details'][0]['category'] + + assert confrms == 0, \ + 'Confirmations should be 0, ' \ + 'due to 0 blocks was generated after transaction was created,' \ + 'but was {}.'.format(confrms) + + assert tr_type == 'spend', 'Unexpected transaction type' + + before_bal = self.nodes[0].getbalance() + self.nodes[0].generate(2) + self.sync_all() + after_new = self.nodes[0].getbalance() + delta = after_new - before_bal + start_bal = before_bal + delta + cur_bal = Decimal(format(self.nodes[0].getbalance(), '.1f')) + spend_total = Decimal(format(1, '.8f')) + + assert start_bal == cur_bal, \ + 'Unexpected current balance: {}, should increase on {}, ' \ + 'but start was {}'.format(cur_bal, spend_total, start_bal) + + for tr in spend_trans: + info = self.nodes[0].gettransaction(tr) + + confrms = info['confirmations'] + tr_type = info['details'][0]['category'] + assert confrms >= 1, \ + 'Confirmations should be 1 or more, ' \ + 'due to 1 blocks was generated after transaction was created,' \ + 'but was {}.'.format(confrms) + assert tr_type == 'spend', 'Unexpected transaction type' + + +if __name__ == '__main__': + SparkMintSpendTest().main() diff --git a/qa/rpc-tests/spark_setmintstatus_validation.py b/qa/rpc-tests/spark_setmintstatus_validation.py new file mode 100755 index 0000000000..6f3ea77607 --- /dev/null +++ b/qa/rpc-tests/spark_setmintstatus_validation.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class SetSparkMintSatusValidation(BitcoinTestFramework): + def __init__(self): + super().__init__() + self.num_nodes = 4 + self.setup_clean_chain = False + + def setup_nodes(self): + # This test requires mocktime + enable_mocktime() + return start_nodes(self.num_nodes, self.options.tmpdir) + + def run_test(self): + self.nodes[0].generate(801) + self.sync_all() + + sparkAddress = self.nodes[0].getnewsparkaddress()[0] + txid = list() + txid.append(self.nodes[0].mintspark({sparkAddress: {"amount": 1, "memo":"Test memo"}})) + + spark_mint = self.nodes[0].listsparkmints() + + assert len(spark_mint) == len(txid), 'Should be same number.' + + mint_info = spark_mint[0] + + assert not mint_info['isUsed'], \ + 'This mint with txid: {} should not be Used.'.format(txid) + + print('Set mint status from False to True.') + + self.nodes[0].setsparkmintstatus(mint_info['lTagHash'], True) + + spark_mint = self.nodes[0].listsparkmints() + + assert len(spark_mint) == len(txid), 'Should be same number.' + + mint_info = spark_mint[0] + + assert mint_info['isUsed'], \ + 'This mint with txid: {} should be Used.'.format(txid) + + print('Set mint status from True to False back.') + + self.nodes[0].setsparkmintstatus(mint_info['lTagHash'], False) + + spark_mint = self.nodes[0].listsparkmints() + + assert len(spark_mint) == len(txid[0]), 'Should be same number.' + + mint_info = spark_mint[0] + + assert not mint_info['isUsed'], \ + 'This mint with txid: {} should not be Used.'.format(txid) + + + assert_raises(JSONRPCException, self.nodes[0].setsparkmintstatus, [(mint_info['lTagHash'], "sometext")]) + assert_raises(JSONRPCException, self.nodes[0].setsparkmintstatus, [mint_info['lTagHash']]) + assert_raises(JSONRPCException, self.nodes[0].setsparkmintstatus, []) + assert_raises(JSONRPCException, self.nodes[0].setsparkmintstatus, ["sometext"]) + assert_raises(JSONRPCException, self.nodes[0].setsparkmintstatus, [123]) + + +if __name__ == '__main__': + SetSparkMintSatusValidation().main() \ No newline at end of file diff --git a/qa/rpc-tests/spark_spend_gettransaction.py b/qa/rpc-tests/spark_spend_gettransaction.py new file mode 100755 index 0000000000..1af5301a71 --- /dev/null +++ b/qa/rpc-tests/spark_spend_gettransaction.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +from decimal import * + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * + +class SpendGettransactionTest(BitcoinTestFramework): + def __init__(self): + super().__init__() + self.num_nodes = 4 + self.setup_clean_chain = True + + def setup_nodes(self): + # This test requires mocktime + enable_mocktime() + return start_nodes(self.num_nodes, self.options.tmpdir) + + def run_test(self): + self.nodes[0].generate(1001) + self.sync_all() + + + # get a watch only address + sparkAddress = self.nodes[0].getnewsparkaddress()[0] + watchonly_address = self.nodes[3].getnewaddress() + watchonly_pubkey = self.nodes[3].validateaddress(watchonly_address)["pubkey"] + self.nodes[0].importpubkey(watchonly_pubkey, "", True) + + valid_address = self.nodes[0].getnewaddress() + + for _ in range(10): + self.nodes[0].mintspark({sparkAddress: {"amount": 1, "memo": "Test memo"}}) + + self.nodes[0].generate(1) + self.sync_all() + + balance = self.nodes[0].getsparkbalance() + assert balance['availableBalance'] / 1e8 == 10 + + # case 1: Spend many with watchonly address + spendto_wo_id = self.nodes[0].spendspark({watchonly_address: {"amount": 1, "subtractFee": False}}) + spendto_wo_tx = self.nodes[0].gettransaction(spendto_wo_id) + + assert int(spendto_wo_tx['amount']) == int('-1') + assert spendto_wo_tx['fee'] < Decimal('0') + assert isinstance(spendto_wo_tx['details'], list) + assert len(spendto_wo_tx['details']) == 1 + assert spendto_wo_tx['details'][0]['involvesWatchonly'] + + # case 2: Spend many with watchonly address and valid address + spendto_wo_and_valid_id = self.nodes[0].spendspark({watchonly_address: {"amount": 1, "subtractFee": False}, sparkAddress: {"amount": 0.01, "memo": "Test", "subtractFee": False}}) + spendto_wo_and_valid_tx = self.nodes[0].gettransaction(spendto_wo_and_valid_id) + + assert int(spendto_wo_and_valid_tx['amount']) == int(-1) + assert spendto_wo_and_valid_tx['fee'] < Decimal('0') + assert isinstance(spendto_wo_and_valid_tx['details'], list) + assert len(spendto_wo_and_valid_tx['details']) == 3 + + involves_watch_only_count = 0 + for detial in spendto_wo_and_valid_tx['details']: + if 'involvesWatchonly' in detial and bool(detial['involvesWatchonly']): + involves_watch_only_count += 1 + + assert involves_watch_only_count == 1 + + # case 3: spend many with watchonly address and invalid address + assert_raises(JSONRPCException, self.nodes[0].spendspark, [{watchonly_address: 1, 'invalidaddress': 2}]) + +if __name__ == '__main__': + SpendGettransactionTest().main() diff --git a/src/Makefile.am b/src/Makefile.am index 419afefca3..005a93bc0e 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -55,6 +55,7 @@ LIBBITCOIN_CONSENSUS=libbitcoin_consensus.a LIBBITCOIN_CLI=libbitcoin_cli.a LIBBITCOIN_UTIL=libbitcoin_util.a LIBLELANTUS=liblelantus.a +LIBSPARK=libspark.a LIBBITCOIN_CRYPTO=crypto/libbitcoin_crypto.a LIBBITCOINQT=qt/libfiroqt.a LIBSECP256K1=secp256k1/libsecp256k1.la @@ -86,7 +87,8 @@ EXTRA_LIBRARIES += \ $(LIBBITCOIN_WALLET) \ $(LIBBITCOIN_ZMQ) \ $(LIBFIRO_SIGMA) \ - $(LIBLELANTUS) + $(LIBLELANTUS) \ + $(LIBSPARK) lib_LTLIBRARIES = $(LIBBITCOINCONSENSUS) @@ -216,6 +218,8 @@ BITCOIN_CORE_H = \ wallet/sigmaspendbuilder.h \ wallet/txbuilder.h \ wallet/lelantusjoinsplitbuilder.h \ + spark/sparkwallet.h \ + spark/primitives.h \ wallet/wallet.h \ wallet/walletexcept.h \ wallet/walletdb.h \ @@ -239,6 +243,7 @@ BITCOIN_CORE_H = \ bip47/secretpoint.h \ sigma.h \ lelantus.h \ + spark/state.h \ blacklists.h \ coin_containers.h \ firo_params.h \ @@ -330,6 +335,7 @@ libbitcoin_util_a-clientversion.$(OBJEXT): obj/build.h libbitcoin_server_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(MINIUPNPC_CPPFLAGS) $(EVENT_CFLAGS) $(EVENT_PTHREADS_CFLAGS) libbitcoin_server_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) libbitcoin_server_a_SOURCES = \ + activemasternode.cpp \ addrman.cpp \ addrdb.cpp \ batchedlogger.cpp \ @@ -401,6 +407,8 @@ libbitcoin_server_a_SOURCES = \ versionbits.cpp \ sigma.cpp \ lelantus.cpp \ + spark/state.cpp \ + spark/primitives.cpp \ coin_containers.cpp \ mtpstate.cpp \ $(BITCOIN_CORE_H) @@ -427,6 +435,7 @@ libbitcoin_wallet_a_SOURCES = \ hdmint/wallet.cpp \ sigma.cpp \ lelantus.cpp \ + spark/state.cpp \ wallet/crypter.cpp \ wallet/bip39.cpp \ wallet/mnemoniccontainer.cpp \ @@ -438,6 +447,8 @@ libbitcoin_wallet_a_SOURCES = \ wallet/lelantusjoinsplitbuilder.cpp \ wallet/walletexcept.cpp \ wallet/wallet.cpp \ + spark/sparkwallet.cpp \ + spark/primitives.cpp \ wallet/walletdb.cpp \ wallet/authhelper.cpp \ hdmint/tracker.cpp \ @@ -625,6 +636,46 @@ libbitcoin_util_a_SOURCES = \ crypto/MerkleTreeProof/merkle-tree.cpp \ $(BITCOIN_CORE_H) +libspark_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +libspark_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +libspark_a_SOURCES = \ + libspark/transcript.h \ + libspark/transcript.cpp \ + libspark/params.h \ + libspark/params.cpp \ + libspark/schnorr_proof.h \ + libspark/schnorr.h \ + libspark/schnorr.cpp \ + libspark/chaum_proof.h \ + libspark/chaum.h \ + libspark/chaum.cpp \ + libspark/coin.h \ + libspark/coin.cpp \ + libspark/bpplus_proof.h \ + libspark/bpplus.h \ + libspark/bpplus.cpp \ + libspark/grootle_proof.h \ + libspark/grootle.h \ + libspark/grootle.cpp \ + libspark/keys.h \ + libspark/keys.cpp \ + libspark/util.h \ + libspark/util.cpp \ + libspark/aead.h \ + libspark/aead.cpp \ + libspark/kdf.h \ + libspark/kdf.cpp \ + libspark/hash.h \ + libspark/hash.cpp \ + libspark/mint_transaction.h \ + libspark/mint_transaction.cpp \ + libspark/spend_transaction.h \ + libspark/spend_transaction.cpp \ + libspark/f4grumble.h \ + libspark/f4grumble.cpp \ + libspark/bech32.h \ + libspark/bech32.cpp + liblelantus_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) liblelantus_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) liblelantus_a_SOURCES = \ @@ -730,6 +781,7 @@ firod_LDADD = \ $(LIBBITCOIN_WALLET) \ $(LIBFIRO_SIGMA) \ $(LIBLELANTUS) \ + $(LIBSPARK) \ $(LIBBITCOIN_ZMQ) \ $(LIBBITCOIN_CONSENSUS) \ $(LIBBITCOIN_CRYPTO) \ diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include index 03461fc9a6..56d62cfaff 100644 --- a/src/Makefile.qt.include +++ b/src/Makefile.qt.include @@ -200,7 +200,8 @@ QT_MOC_CPP = \ qt/moc_lelantuscoincontroldialog.cpp \ qt/moc_automintmodel.cpp \ qt/moc_automintnotification.cpp \ - qt/moc_pcodemodel.cpp + qt/moc_pcodemodel.cpp \ + qt/moc_sparkmodel.cpp BITCOIN_MM = \ qt/macdockiconhandler.mm \ @@ -298,7 +299,8 @@ BITCOIN_QT_H = \ qt/lelantusmodel.h \ qt/lelantusdialog.h \ qt/lelantuscoincontroldialog.h \ - qt/automintmodel.h + qt/automintmodel.h \ + qt/sparkmodel.h RES_ICONS = \ qt/res/icons/add.png \ @@ -341,6 +343,8 @@ RES_ICONS = \ qt/res/icons/hd_disabled.png \ qt/res/icons/hd_enabled.png \ qt/res/icons/history.png \ + qt/res/icons/ic_info.png \ + qt/res/icons/ic_warning.png \ qt/res/icons/info.png \ qt/res/icons/key.png \ qt/res/icons/lock_closed.png \ @@ -361,8 +365,10 @@ RES_ICONS = \ qt/res/icons/overview.png \ qt/res/icons/quit.png \ qt/res/icons/receive.png \ + qt/res/icons/refresh.png \ qt/res/icons/remove.png \ qt/res/icons/send.png \ + qt/res/icons/spark.png \ qt/res/icons/synced.png \ qt/res/icons/tools.png \ qt/res/icons/transaction0.png \ @@ -491,7 +497,8 @@ BITCOIN_QT_WALLET_CPP = \ qt/lelantusmodel.cpp \ qt/lelantusdialog.cpp \ qt/lelantuscoincontroldialog.cpp \ - qt/automintmodel.cpp + qt/automintmodel.cpp \ + qt/sparkmodel.cpp FIRO_QT_ELYSIUM_CPP = \ qt/elyassetsdialog.cpp \ @@ -573,7 +580,7 @@ endif qt_firo_qt_LDADD += -ltor qt_firo_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) \ - $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBFIRO_SIGMA) $(LIBLELANTUS) \ + $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBFIRO_SIGMA) $(LIBLELANTUS) $(LIBSPARK)\ $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) $(BACKTRACE_LIB) $(BOOST_LIBS) $(QT_LIBS) \ $(QT_DBUS_LIBS) $(QR_LIBS) $(PROTOBUF_LIBS) $(BDB_LIBS) $(SSL_LIBS) \ $(CRYPTO_LIBS) $(MINIUPNPC_LIBS) $(LIBSECP256K1) $(LIBBLSSIG_LIBS) $(LIBBLSSIG_DEPENDS) \ diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 2e1e951287..2abce15d3c 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -90,6 +90,21 @@ BITCOIN_TESTS = \ liblelantus/test/schnorr_test.cpp \ liblelantus/test/serialize_test.cpp \ liblelantus/test/sigma_extended_test.cpp \ + libspark/test/transcript_test.cpp \ + libspark/test/schnorr_test.cpp \ + libspark/test/chaum_test.cpp \ + libspark/test/bpplus_test.cpp \ + libspark/test/grootle_test.cpp \ + libspark/test/aead_test.cpp \ + libspark/test/encrypt_test.cpp \ + libspark/test/coin_test.cpp \ + libspark/test/mint_transaction_test.cpp \ + libspark/test/spend_transaction_test.cpp \ + libspark/test/f4grumble_test.cpp \ + libspark/test/address_test.cpp \ + test/spark_tests.cpp \ + test/spark_state_test.cpp \ + test/spark_mintspend_test.cpp \ sigma/test/coin_spend_tests.cpp \ sigma/test/coin_tests.cpp \ sigma/test/primitives_tests.cpp \ @@ -190,6 +205,7 @@ BITCOIN_TESTS += \ wallet/test/wallet_tests.cpp \ wallet/test/crypto_tests.cpp \ wallet/test/lelantus_tests.cpp \ + wallet/test/spark_tests.cpp \ wallet/test/sigma_tests.cpp \ wallet/test/mnemonic_tests.cpp \ wallet/test/txbuilder_tests.cpp @@ -199,7 +215,7 @@ test_test_bitcoin_LDADD = $(LIBBITCOIN_SERVER) -ltor test_test_bitcoin_SOURCES = $(BITCOIN_TESTS) $(JSON_TEST_FILES) $(RAW_TEST_FILES) test_test_bitcoin_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -I$(builddir)/test/ $(TESTDEFS) $(EVENT_CFLAGS) -test_test_bitcoin_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBFIRO_SIGMA) $(LIBLELANTUS) $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) \ +test_test_bitcoin_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBFIRO_SIGMA) $(LIBLELANTUS) $(LIBSPARK) $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) \ $(BACKTRACE_LIB) $(BOOST_LIBS) $(BOOST_UNIT_TEST_FRAMEWORK_LIB) $(LIBSECP256K1) $(EVENT_PTHREADS_LIBS) $(ZMQ_LIBS) $(ZLIB_LIBS) test_test_bitcoin_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) if ENABLE_WALLET @@ -226,6 +242,7 @@ test_test_bitcoin_fuzzy_LDADD = \ $(LIBUNIVALUE) \ $(LIBBITCOIN_SERVER) \ $(LIBLELANTUS) \ + $(LIBSPARK) \ $(LIBBITCOIN_COMMON) \ $(LIBBITCOIN_UTIL) \ $(LIBBITCOIN_CONSENSUS) \ diff --git a/src/addresstype.h b/src/addresstype.h index ac4ba8ff3b..155508a131 100644 --- a/src/addresstype.h +++ b/src/addresstype.h @@ -14,6 +14,9 @@ enum struct AddressType , lelantusMint = 8 , lelantusJMint = 9 , lelantusJSplit = 10 + , sparkMint = 11 + , sparksMint = 12 + , sparkSpend = 13 }; namespace zerocoin { namespace utils { @@ -62,6 +65,22 @@ inline bool isLelantusJSplit(std::string const & str){ return str == "Lelantusjsplit"; } +inline bool isSpark(std::string const & str){ + return str == "Spark"; +} + +inline bool isSparkMint(std::string const & str){ + return str == "Sparkmint"; +} + +inline bool isSparkSMint(std::string const & str){ + return str == "Sparksmint"; +} + +inline bool isSparkSpend(std::string const & str){ + return str == "Sparkspend"; +} + }} #endif /* ADDRESSTYPE_H */ diff --git a/src/batchproof_container.cpp b/src/batchproof_container.cpp index 75f039189b..d82e4638ea 100644 --- a/src/batchproof_container.cpp +++ b/src/batchproof_container.cpp @@ -6,6 +6,7 @@ #include "sigma.h" #include "lelantus.h" #include "ui_interface.h" +#include "spark/state.h" std::unique_ptr BatchProofContainer::instance; @@ -22,6 +23,7 @@ void BatchProofContainer::init() { tempSigmaProofs.clear(); tempLelantusSigmaProofs.clear(); tempRangeProofs.clear(); + tempSparkTransactions.clear(); } void BatchProofContainer::finalize() { @@ -37,6 +39,8 @@ void BatchProofContainer::finalize() { for (const auto& itr : tempRangeProofs) { rangeProofs[itr.first].insert(rangeProofs[itr.first].begin(), itr.second.begin(), itr.second.end()); } + + sparkTransactions.insert(sparkTransactions.end(), tempSparkTransactions.begin(), tempSparkTransactions.end()); } fCollectProofs = false; } @@ -46,6 +50,7 @@ void BatchProofContainer::verify() { batch_sigma(); batch_lelantus(); batch_rangeProofs(); + batch_spark(); } fCollectProofs = false; } @@ -208,7 +213,7 @@ void BatchProofContainer::batch_sigma() { try { if (!sigmaVerifier.batch_verify(anonymity_set, serials, fPadding, setSizes, proofs)) return false; - } catch (...) { + } catch (const std::exception &) { return false; } return true; @@ -311,7 +316,7 @@ void BatchProofContainer::batch_lelantus() { try { if (!sigmaVerifier.batchverify(anonymity_set, challenges, serials, setSizes, proofs)) return false; - } catch (...) { + } catch (const std::exception &) { return false; } return true; @@ -387,3 +392,55 @@ void BatchProofContainer::batch_rangeProofs() { rangeProofs.clear(); } + +void BatchProofContainer::add(const spark::SpendTransaction& tx) { + tempSparkTransactions.push_back(tx); +} + +void BatchProofContainer::remove(const spark::SpendTransaction& tx) { + sparkTransactions.erase(std::remove_if(sparkTransactions.begin(), + sparkTransactions.end(), + [tx](spark::SpendTransaction& transaction){return transaction.getUsedLTags() == tx.getUsedLTags();}), + sparkTransactions.end()); +} + +void BatchProofContainer::batch_spark() { + if (!sparkTransactions.empty()){ + LogPrintf("Spark batch verification started.\n"); + uiInterface.UpdateProgressBarLabel("Batch verifying Spark Proofs..."); + } else { + return; + } + + std::unordered_map> cover_sets; + spark::CSparkState* sparkState = spark::CSparkState::GetState(); + + for (auto& itr : sparkTransactions) { + auto& idAndBlockHashes = itr.getBlockHashes(); + for (const auto& idAndHash : idAndBlockHashes) { + int cover_set_id = idAndHash.first; + if (!cover_sets.count(cover_set_id)) { + std::vector cover_set; + sparkState->GetCoinSet(cover_set_id, cover_set); + cover_sets[cover_set_id] = cover_set; + } + } + } + auto* params = spark::Params::get_default(); + + bool passed; + try { + passed = spark::SpendTransaction::verify(params, sparkTransactions, cover_sets); + } catch (const std::exception &) { + passed = false; + } + + if (!passed) { + LogPrintf("Spark batch verification failed."); + throw std::invalid_argument("Spark batch verification failed, please run Firo with -reindex -batching=0"); + } + + if (!sparkTransactions.empty()) + LogPrintf("Spark batch verification finished successfully.\n"); + sparkTransactions.clear(); +} \ No newline at end of file diff --git a/src/batchproof_container.h b/src/batchproof_container.h index a4c7ff6211..4780c1c32e 100644 --- a/src/batchproof_container.h +++ b/src/batchproof_container.h @@ -5,6 +5,7 @@ #include "chain.h" #include "sigma/coinspend.h" #include "liblelantus/joinsplit.h" +#include "libspark/spend_transaction.h" extern CChain chainActive; @@ -73,6 +74,9 @@ class BatchProofContainer { void batch_lelantus(); void batch_rangeProofs(); + void add(const spark::SpendTransaction& tx); + void remove(const spark::SpendTransaction& tx); + void batch_spark(); public: bool fCollectProofs = 0; @@ -85,12 +89,15 @@ class BatchProofContainer { std::map, bool>, std::vector> tempLelantusSigmaProofs; // map (version to (Range proof, Pubcoins)) std::map>>> tempRangeProofs; + // temp spark transaction proofs + std::vector tempSparkTransactions; // containers to keep proofs for batching std::map>, std::vector> sigmaProofs; std::map, bool>, std::vector> lelantusSigmaProofs; std::map>>> rangeProofs; - + // spark transaction proofs + std::vector sparkTransactions; }; #endif //FIRO_BATCHPROOF_CONTAINER_H diff --git a/src/bip47/account.cpp b/src/bip47/account.cpp index 09ef4ce7b2..adb0c7fac6 100644 --- a/src/bip47/account.cpp +++ b/src/bip47/account.cpp @@ -254,7 +254,7 @@ bool CAccountReceiver::acceptMaskedPayload(std::vector const & ma std::unique_ptr jsplit; try { jsplit = lelantus::ParseLelantusJoinSplit(tx); - }catch (...) { + }catch (const std::exception &) { return false; } if (!jsplit) diff --git a/src/bip47/bip47utils.cpp b/src/bip47/bip47utils.cpp index 6b2e482e0d..8dacc6359b 100644 --- a/src/bip47/bip47utils.cpp +++ b/src/bip47/bip47utils.cpp @@ -170,7 +170,7 @@ GroupElement GeFromPubkey(CPubKey const & pubKey) serializedGe.push_back(0x0); try { result.deserialize(&serializedGe[0]); - } catch (...) { + } catch (const std::exception &) { result = GroupElement(); } return result; diff --git a/src/chain.h b/src/chain.h index c1a32fe19c..8c39b5c970 100644 --- a/src/chain.h +++ b/src/chain.h @@ -15,6 +15,7 @@ #include #include #include "sigma/coin.h" +#include "libspark/coin.h" #include "evo/spork.h" #include "firo_params.h" #include "util.h" @@ -244,12 +245,22 @@ class CBlockIndex std::map, std::vector> sigmaMintedPubCoins; //! Map id to std::map>> lelantusMintedPubCoins; + + std::unordered_map lelantusMintData; + //! Map id to std::map> anonymitySetHash; + //! Map id to spark coin + std::map> sparkMintedCoins; + //! Map id to + std::map> sparkSetHash; + //! map spark coin S to tx hash, this is used when you run with -mobile + std::unordered_map sparkTxHash; //! Values of coin serials spent in this block sigma::spend_info_container sigmaSpentSerials; std::unordered_map lelantusSpentSerials; + std::unordered_map spentLTags; //! list of disabling sporks active at this block height //! std::map {feature name} -> {block number when feature is re-enabled again, parameter} @@ -287,7 +298,12 @@ class CBlockIndex sigmaMintedPubCoins.clear(); lelantusMintedPubCoins.clear(); + lelantusMintData.clear(); anonymitySetHash.clear(); + sparkMintedCoins.clear(); + sparkSetHash.clear(); + spentLTags.clear(); + sparkTxHash.clear(); sigmaSpentSerials.clear(); lelantusSpentSerials.clear(); activeDisablingSporks.clear(); @@ -524,17 +540,33 @@ class CDiskBlockIndex : public CBlockIndex for(auto& itr : lelantusPubCoins) { if(!itr.second.empty()) { for(auto& coin : itr.second) - lelantusMintedPubCoins[itr.first].push_back(std::make_pair(coin, uint256())); + lelantusMintedPubCoins[itr.first].push_back(std::make_pair(coin,uint256())); } } } else READWRITE(lelantusMintedPubCoins); + if (GetBoolArg("-mobile", false)) { + READWRITE(lelantusMintData); + } + READWRITE(lelantusSpentSerials); if (nHeight >= params.nLelantusFixesStartBlock) READWRITE(anonymitySetHash); } + if (!(s.GetType() & SER_GETHASH) + && nHeight >= params.nSparkStartBlock) { + READWRITE(sparkMintedCoins); + READWRITE(sparkSetHash); + READWRITE(spentLTags); + + if (GetBoolArg("-mobile", false)) { + READWRITE(sparkTxHash); + } + } + + if (!(s.GetType() & SER_GETHASH) && nHeight >= params.nEvoSporkStartBlock) { if (nHeight < params.nEvoSporkStopBlock && // Workaround for late rollout of version 0.14.9.3 in which nEvoSporkStopBlock was extended diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 90f4bab3e8..d14b4a9d3e 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -401,6 +401,8 @@ class CMainParams : public CChainParams { consensus.nOldSigmaBanBlock = ZC_OLD_SIGMA_BAN_BLOCK; consensus.nLelantusStartBlock = ZC_LELANTUS_STARTING_BLOCK; consensus.nLelantusFixesStartBlock = ZC_LELANTUS_FIXES_START_BLOCK; + consensus.nSparkStartBlock = SPARK_START_BLOCK; + consensus.nLelantusGracefulPeriod = LELANTUS_GRACEFUL_PERIOD; consensus.nZerocoinV2MintMempoolGracefulPeriod = ZC_V2_MINT_GRACEFUL_MEMPOOL_PERIOD; consensus.nZerocoinV2MintGracefulPeriod = ZC_V2_MINT_GRACEFUL_PERIOD; consensus.nZerocoinV2SpendMempoolGracefulPeriod = ZC_V2_SPEND_GRACEFUL_MEMPOOL_PERIOD; @@ -414,13 +416,16 @@ class CMainParams : public CChainParams { consensus.nMaxLelantusInputPerTransaction = ZC_LELANTUS_INPUT_LIMIT_PER_TRANSACTION; consensus.nMaxValueLelantusSpendPerTransaction = ZC_LELANTUS_VALUE_SPEND_LIMIT_PER_TRANSACTION; consensus.nMaxValueLelantusMint = ZC_LELANTUS_MAX_MINT; + consensus.nMaxValueSparkSpendPerTransaction = SPARK_VALUE_SPEND_LIMIT_PER_TRANSACTION; + consensus.nMaxValueSparkSpendPerBlock = SPARK_VALUE_SPEND_LIMIT_PER_BLOCK; + consensus.nMaxSparkOutLimitPerTx = SPARK_OUT_LIMIT_PER_TX; consensus.nZerocoinToSigmaRemintWindowSize = 50000; for (const auto& str : lelantus::lelantus_blacklist) { GroupElement coin; try { coin.deserialize(ParseHex(str).data()); - } catch (...) { + } catch (const std::exception &) { continue; } consensus.lelantusBlacklist.insert(coin); @@ -430,7 +435,7 @@ class CMainParams : public CChainParams { GroupElement coin; try { coin.deserialize(ParseHex(str).data()); - } catch (...) { + } catch (const std::exception &) { continue; } consensus.sigmaBlacklist.insert(coin); @@ -438,7 +443,7 @@ class CMainParams : public CChainParams { consensus.evoSporkKeyID = "a78fERshquPsTv2TuKMSsxTeKom56uBwLP"; consensus.nEvoSporkStartBlock = ZC_LELANTUS_STARTING_BLOCK; - consensus.nEvoSporkStopBlock = AdjustEndingBlockNumberAfterSubsidyHalving(ZC_LELANTUS_STARTING_BLOCK, 3*24*12*365, 486221); // =818275, three years after lelantus + consensus.nEvoSporkStopBlock = AdjustEndingBlockNumberAfterSubsidyHalving(ZC_LELANTUS_STARTING_BLOCK, 4*24*12*365, 486221); // =1028515, four years after lelantus, one year after spark consensus.nEvoSporkStopBlockExtensionVersion = 140903; consensus.nEvoSporkStopBlockPrevious = ZC_LELANTUS_STARTING_BLOCK + 1*24*12*365; // one year after lelantus consensus.nEvoSporkStopBlockExtensionGracefulPeriod = 24*12*14; // two weeks @@ -698,6 +703,9 @@ class CTestNetParams : public CChainParams { consensus.nLelantusStartBlock = ZC_LELANTUS_TESTNET_STARTING_BLOCK; consensus.nLelantusFixesStartBlock = ZC_LELANTUS_TESTNET_FIXES_START_BLOCK; + consensus.nSparkStartBlock = SPARK_TESTNET_START_BLOCK; + consensus.nLelantusGracefulPeriod = LELANTUS_TESTNET_GRACEFUL_PERIOD; + consensus.nZerocoinV2MintMempoolGracefulPeriod = ZC_V2_MINT_TESTNET_GRACEFUL_MEMPOOL_PERIOD; consensus.nZerocoinV2MintGracefulPeriod = ZC_V2_MINT_TESTNET_GRACEFUL_PERIOD; consensus.nZerocoinV2SpendMempoolGracefulPeriod = ZC_V2_SPEND_TESTNET_GRACEFUL_MEMPOOL_PERIOD; @@ -711,13 +719,16 @@ class CTestNetParams : public CChainParams { consensus.nMaxLelantusInputPerTransaction = ZC_LELANTUS_INPUT_LIMIT_PER_TRANSACTION; consensus.nMaxValueLelantusSpendPerTransaction = 1001 * COIN; consensus.nMaxValueLelantusMint = 1001 * COIN; + consensus.nMaxValueSparkSpendPerTransaction = SPARK_VALUE_SPEND_LIMIT_PER_TRANSACTION; + consensus.nMaxValueSparkSpendPerBlock = SPARK_VALUE_SPEND_LIMIT_PER_BLOCK; + consensus.nMaxSparkOutLimitPerTx = SPARK_OUT_LIMIT_PER_TX; consensus.nZerocoinToSigmaRemintWindowSize = 0; for (const auto& str : lelantus::lelantus_testnet_blacklist) { GroupElement coin; try { coin.deserialize(ParseHex(str).data()); - } catch (...) { + } catch (const std::exception &) { continue; } consensus.lelantusBlacklist.insert(coin); @@ -767,22 +778,22 @@ class CDevNetParams : public CChainParams { consensus.chainType = Consensus::chainDevnet; - consensus.nSubsidyHalvingFirst = 120; + consensus.nSubsidyHalvingFirst = 1; consensus.nSubsidyHalvingSecond = 100000; consensus.nSubsidyHalvingInterval = 100000; consensus.nSubsidyHalvingStopBlock = 1000000; consensus.stage2DevelopmentFundShare = 15; consensus.stage2ZnodeShare = 35; - consensus.stage2DevelopmentFundAddress = "TixHByoJ21dmx5xfMAXTVC4V7k53U7RncU"; + consensus.stage2DevelopmentFundAddress = "Tq99tes2sRbQ1yNUJPJ7BforYnKcitgwWq"; consensus.stage3StartTime = 1653382800; consensus.stage3StartBlock = 1514; consensus.stage3DevelopmentFundShare = 15; consensus.stage3CommunityFundShare = 10; consensus.stage3MasternodeShare = 50; - consensus.stage3DevelopmentFundAddress = "TepVKkmUo1N6sazuM2wWwV7aiG4m1BUShU"; - consensus.stage3CommunityFundAddress = "TZpbhfvQE61USHsxd55XdPpWBqu3SXB1EP"; + consensus.stage3DevelopmentFundAddress = "TfvbHyGTo8hexoKBBS8fz9Gq7g9VZQQpcg"; + consensus.stage3CommunityFundAddress = "TgoL9nh8vDTz7UB5WkBbknBksBdUaD9qbT"; consensus.nStartBlacklist = 0; consensus.nStartDuplicationCheck = 0; @@ -887,17 +898,17 @@ class CDevNetParams : public CChainParams { nPruneAfterHeight = 1000; std::vector extraNonce(4); - extraNonce[0] = 0x0a; + extraNonce[0] = 0x1a; extraNonce[1] = 0x00; extraNonce[2] = 0x00; extraNonce[3] = 0x00; - genesis = CreateGenesisBlock(ZC_GENESIS_BLOCK_TIME, 459834, 0x1e0ffff0, 2, 0 * COIN, extraNonce); + genesis = CreateGenesisBlock(ZC_GENESIS_BLOCK_TIME, 440914, 0x1e0ffff0, 2, 0 * COIN, extraNonce); consensus.hashGenesisBlock = genesis.GetHash(); assert(consensus.hashGenesisBlock == - uint256S("0x1fcfe26873831662874b5358c4a28611be641fbb997e62d8bf9c80f799f5caff")); + uint256S("0xc4c408cfedb0a03a259d4b3046425a0ac9582f4a33960d6a34d1555538621961")); assert(genesis.hashMerkleRoot == - uint256S("0x3a0d54ae5549a8d75cd8d0cb73c6e3577ae6be8d5706fc9411bdebbe75c97210")); + uint256S("0xb84e4b6a3743eb4f24ed7e4b88355d7d5fc0aba0cbe8f04e96556ad35c52c873")); vFixedSeeds.clear(); vSeeds.clear(); // firo test seeds @@ -940,6 +951,9 @@ class CDevNetParams : public CChainParams { consensus.nLelantusStartBlock = 1; consensus.nLelantusFixesStartBlock = 1; + consensus.nSparkStartBlock = 1500; + consensus.nLelantusGracefulPeriod = 6000; + consensus.nMaxSigmaInputPerBlock = ZC_SIGMA_INPUT_LIMIT_PER_BLOCK; consensus.nMaxValueSigmaSpendPerBlock = ZC_SIGMA_VALUE_SPEND_LIMIT_PER_BLOCK; consensus.nMaxSigmaInputPerTransaction = ZC_SIGMA_INPUT_LIMIT_PER_TRANSACTION; @@ -949,9 +963,12 @@ class CDevNetParams : public CChainParams { consensus.nMaxLelantusInputPerTransaction = ZC_LELANTUS_INPUT_LIMIT_PER_TRANSACTION; consensus.nMaxValueLelantusSpendPerTransaction = 1001 * COIN; consensus.nMaxValueLelantusMint = 1001 * COIN; + consensus.nMaxValueSparkSpendPerTransaction = SPARK_VALUE_SPEND_LIMIT_PER_TRANSACTION; + consensus.nMaxValueSparkSpendPerBlock = SPARK_VALUE_SPEND_LIMIT_PER_BLOCK; + consensus.nMaxSparkOutLimitPerTx = SPARK_OUT_LIMIT_PER_TX; consensus.nZerocoinToSigmaRemintWindowSize = 0; - consensus.evoSporkKeyID = "TdxR3tfoHiQUkowcfjEGiMBfk6GXFdajUA"; + consensus.evoSporkKeyID = "Tg6CSyHKVTUhMGGNzUQMMDRk88nWW1MdHz"; consensus.nEvoSporkStartBlock = 1; consensus.nEvoSporkStopBlock = 40000; consensus.nEvoSporkStopBlockExtensionVersion = 0; @@ -1079,7 +1096,7 @@ class CRegTestParams : public CChainParams { consensus.llmqForInstantSend = Consensus::LLMQ_5_60; consensus.nInstantSendConfirmationsRequired = 2; consensus.nInstantSendKeepLock = 6; - consensus.nInstantSendBlockFilteringStartHeight = 800; + consensus.nInstantSendBlockFilteringStartHeight = 500; consensus.nMTPSwitchTime = INT_MAX; consensus.nMTPStartBlock = 0; @@ -1164,6 +1181,8 @@ class CRegTestParams : public CChainParams { consensus.nOldSigmaBanBlock = 1; consensus.nLelantusStartBlock = 400; consensus.nLelantusFixesStartBlock = 400; + consensus.nSparkStartBlock = 1000; + consensus.nLelantusGracefulPeriod = 1500; consensus.nZerocoinV2MintMempoolGracefulPeriod = 1; consensus.nZerocoinV2MintGracefulPeriod = 1; consensus.nZerocoinV2SpendMempoolGracefulPeriod = 1; @@ -1177,12 +1196,15 @@ class CRegTestParams : public CChainParams { consensus.nMaxLelantusInputPerTransaction = ZC_LELANTUS_INPUT_LIMIT_PER_TRANSACTION; consensus.nMaxValueLelantusSpendPerTransaction = ZC_LELANTUS_VALUE_SPEND_LIMIT_PER_TRANSACTION; consensus.nMaxValueLelantusMint = ZC_LELANTUS_MAX_MINT; + consensus.nMaxValueSparkSpendPerTransaction = SPARK_VALUE_SPEND_LIMIT_PER_TRANSACTION; + consensus.nMaxValueSparkSpendPerBlock = SPARK_VALUE_SPEND_LIMIT_PER_BLOCK; + consensus.nMaxSparkOutLimitPerTx = SPARK_OUT_LIMIT_PER_TX; consensus.nZerocoinToSigmaRemintWindowSize = 1000; // evo spork consensus.evoSporkKeyID = "TSpmHGzQT4KJrubWa4N2CRmpA7wKMMWDg4"; // private key is cW2YM2xaeCaebfpKguBahUAgEzLXgSserWRuD29kSyKHq1TTgwRQ - consensus.nEvoSporkStartBlock = 1000; - consensus.nEvoSporkStopBlock = 1500; + consensus.nEvoSporkStartBlock = 550; + consensus.nEvoSporkStopBlock = 950; consensus.nEvoSporkStopBlockExtensionVersion = 0; // reorg @@ -1200,7 +1222,7 @@ class CRegTestParams : public CChainParams { consensus.nMnemonicBlock = 0; // moving lelantus data to v3 payload - consensus.nLelantusV3PayloadStartBlock = 1000; + consensus.nLelantusV3PayloadStartBlock = 800; // ProgPow // this can be overridden with either -ppswitchtime or -ppswitchtimefromnow flags diff --git a/src/clientversion.h b/src/clientversion.h index c1c32b9575..a2d9aa8cbb 100644 --- a/src/clientversion.h +++ b/src/clientversion.h @@ -17,7 +17,7 @@ #define CLIENT_VERSION_MAJOR 0 #define CLIENT_VERSION_MINOR 14 #define CLIENT_VERSION_REVISION 12 -#define CLIENT_VERSION_BUILD 1 +#define CLIENT_VERSION_BUILD 6 //! Set to true for release, false for prerelease or test build #define CLIENT_VERSION_IS_RELEASE true diff --git a/src/coin_containers.h b/src/coin_containers.h index cfe5f7355c..15da1224a1 100644 --- a/src/coin_containers.h +++ b/src/coin_containers.h @@ -59,6 +59,25 @@ using spend_info_container = std::unordered_map encryptedValue; + uint256 txHash; + + ADD_SERIALIZE_METHODS; + template + void SerializationOp(Stream& s, Operation ser_action) + { + READWRITE(isJMint); + READWRITE(amount); + READWRITE(encryptedValue); + READWRITE(txHash); + } +}; + // Custom hash for the public coin. struct CPublicCoinHash { std::size_t operator()(const lelantus::PublicCoin& coin) const noexcept; diff --git a/src/coins.cpp b/src/coins.cpp index 3e1e2e77a1..1a0f29afe0 100644 --- a/src/coins.cpp +++ b/src/coins.cpp @@ -216,7 +216,7 @@ unsigned int CCoinsViewCache::GetCacheSize() const { CAmount CCoinsViewCache::GetValueIn(const CTransaction& tx) const { - if (tx.IsCoinBase() || tx.IsZerocoinSpend() || tx.IsSigmaSpend() || tx.IsZerocoinRemint() || tx.IsLelantusJoinSplit()) + if (tx.IsCoinBase() || tx.HasNoRegularInputs()) return 0; CAmount nResult = 0; @@ -228,7 +228,7 @@ CAmount CCoinsViewCache::GetValueIn(const CTransaction& tx) const bool CCoinsViewCache::HaveInputs(const CTransaction& tx) const { - if (!tx.IsCoinBase() && !tx.IsZerocoinSpend() && !tx.IsSigmaSpend() && !tx.IsZerocoinRemint() && !tx.IsLelantusJoinSplit()) { + if (!tx.IsCoinBase() && !tx.HasNoRegularInputs()) { for (unsigned int i = 0; i < tx.vin.size(); i++) { if (!HaveCoin(tx.vin[i].prevout)) { return false; diff --git a/src/compat/byteswap.h b/src/compat/byteswap.h index 3c5a5c0837..a9a96af252 100644 --- a/src/compat/byteswap.h +++ b/src/compat/byteswap.h @@ -6,7 +6,7 @@ #define BITCOIN_COMPAT_BYTESWAP_H #if defined(HAVE_CONFIG_H) -#include "config/bitcoin-config.h" +#include "../config/bitcoin-config.h" #endif #include diff --git a/src/compat/endian.h b/src/compat/endian.h index 79d6b2fdbb..c73470a5c0 100644 --- a/src/compat/endian.h +++ b/src/compat/endian.h @@ -6,12 +6,12 @@ #define BITCOIN_COMPAT_ENDIAN_H #if defined(HAVE_CONFIG_H) -#include "config/bitcoin-config.h" +#include "../config/bitcoin-config.h" #endif #include -#include "compat/byteswap.h" +#include "byteswap.h" #if defined(HAVE_ENDIAN_H) #include diff --git a/src/consensus/params.h b/src/consensus/params.h index 73d86013e7..fd19b1c63f 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -251,6 +251,10 @@ struct Params { int nLelantusFixesStartBlock; + int nSparkStartBlock; + + int nLelantusGracefulPeriod; + // Lelantus Blacklist std::unordered_set lelantusBlacklist; @@ -318,6 +322,14 @@ struct Params { // Value of maximum lelantus mint. int64_t nMaxValueLelantusMint; + // Value of maximum spark spend per transaction + int64_t nMaxValueSparkSpendPerTransaction; + + // Value of maximum spark spend per block. + int64_t nMaxValueSparkSpendPerBlock; + + unsigned nMaxSparkOutLimitPerTx; + // Number of blocks with allowed zerocoin to sigma remint transaction (after nSigmaStartBlock) int nZerocoinToSigmaRemintWindowSize; diff --git a/src/core_write.cpp b/src/core_write.cpp index 6c6b21cb83..80a3815b65 100644 --- a/src/core_write.cpp +++ b/src/core_write.cpp @@ -112,7 +112,8 @@ std::string ScriptToAsmStr(const CScript& script, const bool fAttemptSighashDeco } if (opcode == opcodetype::OP_SIGMASPEND || opcode == opcodetype::OP_SIGMAMINT || - opcode == opcodetype::OP_LELANTUSMINT || opcode == opcodetype::OP_LELANTUSJOINSPLIT) { + opcode == opcodetype::OP_LELANTUSMINT || opcode == opcodetype::OP_LELANTUSJOINSPLIT || + opcode == opcodetype::OP_SPARKMINT || opcode == opcodetype::OP_SPARKSPEND) { str += " "; str += HexStr(vch); break; diff --git a/src/crypto/aes.h b/src/crypto/aes.h index e9f1b52e71..5a3d37863e 100644 --- a/src/crypto/aes.h +++ b/src/crypto/aes.h @@ -8,7 +8,7 @@ #define BITCOIN_CRYPTO_AES_H extern "C" { -#include "crypto/ctaes/ctaes.h" +#include "ctaes/ctaes.h" } static const int AES_BLOCKSIZE = 16; diff --git a/src/crypto/progpow.h b/src/crypto/progpow.h index ba484d287d..6b1f7b5e4a 100644 --- a/src/crypto/progpow.h +++ b/src/crypto/progpow.h @@ -45,4 +45,4 @@ uint256 progpow_hash_full(const CProgPowHeader& header, uint256& mix_hash); /* Performs a light progpow hash (DAG loops excluded) provided header has mix_hash */ uint256 progpow_hash_light(const CProgPowHeader& header); -#endif // FIRO_PROGPOW_H +#endif // FIRO_PROGPOW_H \ No newline at end of file diff --git a/src/elysium/elysium.cpp b/src/elysium/elysium.cpp index 0f30f1c643..4d8091f491 100644 --- a/src/elysium/elysium.cpp +++ b/src/elysium/elysium.cpp @@ -2329,7 +2329,7 @@ int elysium::WalletTxBuilder( case InputMode::SIGMA: try { if (!pwalletMain->CommitSigmaTransaction(wtxNew, sigmaSelected, sigmaChanges)) return MP_ERR_COMMIT_TX; - } catch (...) { + } catch (const std::exception &) { return MP_ERR_COMMIT_TX; } break; diff --git a/src/elysium/rpctx.cpp b/src/elysium/rpctx.cpp index 723bdb3661..eb30d4a504 100644 --- a/src/elysium/rpctx.cpp +++ b/src/elysium/rpctx.cpp @@ -1689,7 +1689,7 @@ UniValue elysium_sendmint(const JSONRPCRequest& request) if (result != 0) { throw JSONRPCError(result, error_str(result)); } - } catch (...) { + } catch (const std::exception &) { for (auto& id : ids) { wallet->DeleteUnconfirmedSigmaMint(id); } diff --git a/src/elysium/wallet.cpp b/src/elysium/wallet.cpp index f1771ea0fe..f8686dfcbe 100644 --- a/src/elysium/wallet.cpp +++ b/src/elysium/wallet.cpp @@ -197,7 +197,7 @@ SigmaPrivateKey Wallet::GetKey(const SigmaMint &mint) // Try all mint wallets try { return mintWalletV1.GeneratePrivateKey(mint.seedId); - } catch (...) { + } catch (const std::exception &) { return mintWalletV0.GeneratePrivateKey(mint.seedId); } } diff --git a/src/evo/specialtx.cpp b/src/evo/specialtx.cpp index 35e044b37b..dc9b261887 100644 --- a/src/evo/specialtx.cpp +++ b/src/evo/specialtx.cpp @@ -42,6 +42,9 @@ bool CheckSpecialTx(const CTransaction& tx, const CBlockIndex* pindexPrev, CVali return llmq::CheckLLMQCommitment(tx, pindexPrev, state); case TRANSACTION_SPORK: return CheckSporkTx(tx, pindexPrev, state); + case TRANSACTION_SPARK: + // spark transaction checks are done in other places + return true; case TRANSACTION_LELANTUS: // lelantus transaction checks are done in other places return true; @@ -70,6 +73,8 @@ bool ProcessSpecialTx(const CTransaction& tx, const CBlockIndex* pindex, CValida return true; case TRANSACTION_LELANTUS: return true; + case TRANSACTION_SPARK: + return true; } return state.DoS(100, false, REJECT_INVALID, "bad-tx-type-proc"); @@ -95,6 +100,8 @@ bool UndoSpecialTx(const CTransaction& tx, const CBlockIndex* pindex) return true; case TRANSACTION_LELANTUS: return true; + case TRANSACTION_SPARK: + return true; } return false; diff --git a/src/firo_params.h b/src/firo_params.h index e8b8d26e72..e86189b2ee 100644 --- a/src/firo_params.h +++ b/src/firo_params.h @@ -98,6 +98,10 @@ static const int64_t DUST_HARD_LIMIT = 1000; // 0.00001 FIRO mininput #define ZC_LELANTUS_MAX_MINT_NUM 65000 #define ZC_LELANTUS_SET_START_SIZE 16000 +// limit of coins number per id in Spark +#define ZC_SPARK_MAX_MINT_NUM 32000 +#define ZC_SPARK_SET_START_SIZE 8000 + // Version of index that introduced storing accumulators and coin serials #define ZC_ADVANCED_INDEX_VERSION 130500 // Version of wallet.db entry that introduced storing extra information for mints @@ -143,6 +147,15 @@ static const int64_t DUST_HARD_LIMIT = 1000; // 0.00001 FIRO mininput // Amount of lelantus spends allowed per transaction #define ZC_LELANTUS_INPUT_LIMIT_PER_TRANSACTION 50 +// Bumner of shielded spark outputs pet tx +#define SPARK_OUT_LIMIT_PER_TX 16 + +// Value of spark spends allowed per transaction +#define SPARK_VALUE_SPEND_LIMIT_PER_TRANSACTION (10000 * COIN) + +// Value of spark spends allowed per block +#define SPARK_VALUE_SPEND_LIMIT_PER_BLOCK (20000 * COIN) + // Maximum amount of lelantus mint #define ZC_LELANTUS_MAX_MINT (5001 * COIN) @@ -166,6 +179,12 @@ static const int64_t DUST_HARD_LIMIT = 1000; // 0.00001 FIRO mininput /** Probability (percentage) that a Dandelion transaction enters fluff phase */ #define DANDELION_FLUFF 10 +// Spark +#define SPARK_START_BLOCK 819300 // Approx Jan 18 2024 8:00 AM UTC +#define SPARK_TESTNET_START_BLOCK 107000 +#define LELANTUS_GRACEFUL_PERIOD 1223500 // Approx Jan 30 2026 +#define LELANTUS_TESTNET_GRACEFUL_PERIOD 140000 + // Versions of zerocoin mint/spend transactions #define ZEROCOIN_TX_VERSION_3 30 #define ZEROCOIN_TX_VERSION_3_1 31 diff --git a/src/fuzz/FuzzedDataProvider.h b/src/fuzz/FuzzedDataProvider.h new file mode 100644 index 0000000000..9f66afc9e7 --- /dev/null +++ b/src/fuzz/FuzzedDataProvider.h @@ -0,0 +1,398 @@ +//===- FuzzedDataProvider.h - Utility header for fuzz targets ---*- C++ -* ===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// A single header library providing an utility class to break up an array of +// bytes. Whenever run on the same input, provides the same output, as long as +// its methods are called in the same order, with the same arguments. +//===----------------------------------------------------------------------===// + +#ifndef LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ +#define LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// In addition to the comments below, the API is also briefly documented at +// https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider +class FuzzedDataProvider { + public: + // |data| is an array of length |size| that the FuzzedDataProvider wraps to + // provide more granular access. |data| must outlive the FuzzedDataProvider. + FuzzedDataProvider(const uint8_t *data, size_t size) + : data_ptr_(data), remaining_bytes_(size) {} + ~FuzzedDataProvider() = default; + + // See the implementation below (after the class definition) for more verbose + // comments for each of the methods. + + // Methods returning std::vector of bytes. These are the most popular choice + // when splitting fuzzing input into pieces, as every piece is put into a + // separate buffer (i.e. ASan would catch any under-/overflow) and the memory + // will be released automatically. + template std::vector ConsumeBytes(size_t num_bytes); + template + std::vector ConsumeBytesWithTerminator(size_t num_bytes, T terminator = 0); + template std::vector ConsumeRemainingBytes(); + + // Methods returning strings. Use only when you need a std::string or a null + // terminated C-string. Otherwise, prefer the methods returning std::vector. + std::string ConsumeBytesAsString(size_t num_bytes); + std::string ConsumeRandomLengthString(size_t max_length); + std::string ConsumeRandomLengthString(); + std::string ConsumeRemainingBytesAsString(); + + // Methods returning integer values. + template T ConsumeIntegral(); + template T ConsumeIntegralInRange(T min, T max); + + // Methods returning floating point values. + template T ConsumeFloatingPoint(); + template T ConsumeFloatingPointInRange(T min, T max); + + // 0 <= return value <= 1. + template T ConsumeProbability(); + + bool ConsumeBool(); + + // Returns a value chosen from the given enum. + template T ConsumeEnum(); + + // Returns a value from the given array. + template T PickValueInArray(const T (&array)[size]); + template + T PickValueInArray(const std::array &array); + template T PickValueInArray(std::initializer_list list); + + // Writes data to the given destination and returns number of bytes written. + size_t ConsumeData(void *destination, size_t num_bytes); + + // Reports the remaining bytes available for fuzzed input. + size_t remaining_bytes() { return remaining_bytes_; } + + private: + FuzzedDataProvider(const FuzzedDataProvider &) = delete; + FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete; + + void CopyAndAdvance(void *destination, size_t num_bytes); + + void Advance(size_t num_bytes); + + template + std::vector ConsumeBytes(size_t size, size_t num_bytes); + + template TS ConvertUnsignedToSigned(TU value); + + const uint8_t *data_ptr_; + size_t remaining_bytes_; +}; + +// Returns a std::vector containing |num_bytes| of input data. If fewer than +// |num_bytes| of data remain, returns a shorter std::vector containing all +// of the data that's left. Can be used with any byte sized type, such as +// char, unsigned char, uint8_t, etc. +template +std::vector FuzzedDataProvider::ConsumeBytes(size_t num_bytes) { + num_bytes = std::min(num_bytes, remaining_bytes_); + return ConsumeBytes(num_bytes, num_bytes); +} + +// Similar to |ConsumeBytes|, but also appends the terminator value at the end +// of the resulting vector. Useful, when a mutable null-terminated C-string is +// needed, for example. But that is a rare case. Better avoid it, if possible, +// and prefer using |ConsumeBytes| or |ConsumeBytesAsString| methods. +template +std::vector FuzzedDataProvider::ConsumeBytesWithTerminator(size_t num_bytes, + T terminator) { + num_bytes = std::min(num_bytes, remaining_bytes_); + std::vector result = ConsumeBytes(num_bytes + 1, num_bytes); + result.back() = terminator; + return result; +} + +// Returns a std::vector containing all remaining bytes of the input data. +template +std::vector FuzzedDataProvider::ConsumeRemainingBytes() { + return ConsumeBytes(remaining_bytes_); +} + +// Returns a std::string containing |num_bytes| of input data. Using this and +// |.c_str()| on the resulting string is the best way to get an immutable +// null-terminated C string. If fewer than |num_bytes| of data remain, returns +// a shorter std::string containing all of the data that's left. +inline std::string FuzzedDataProvider::ConsumeBytesAsString(size_t num_bytes) { + static_assert(sizeof(std::string::value_type) == sizeof(uint8_t), + "ConsumeBytesAsString cannot convert the data to a string."); + + num_bytes = std::min(num_bytes, remaining_bytes_); + std::string result( + reinterpret_cast(data_ptr_), num_bytes); + Advance(num_bytes); + return result; +} + +// Returns a std::string of length from 0 to |max_length|. When it runs out of +// input data, returns what remains of the input. Designed to be more stable +// with respect to a fuzzer inserting characters than just picking a random +// length and then consuming that many bytes with |ConsumeBytes|. +inline std::string +FuzzedDataProvider::ConsumeRandomLengthString(size_t max_length) { + // Reads bytes from the start of |data_ptr_|. Maps "\\" to "\", and maps "\" + // followed by anything else to the end of the string. As a result of this + // logic, a fuzzer can insert characters into the string, and the string + // will be lengthened to include those new characters, resulting in a more + // stable fuzzer than picking the length of a string independently from + // picking its contents. + std::string result; + + // Reserve the anticipated capaticity to prevent several reallocations. + result.reserve(std::min(max_length, remaining_bytes_)); + for (size_t i = 0; i < max_length && remaining_bytes_ != 0; ++i) { + char next = ConvertUnsignedToSigned(data_ptr_[0]); + Advance(1); + if (next == '\\' && remaining_bytes_ != 0) { + next = ConvertUnsignedToSigned(data_ptr_[0]); + Advance(1); + if (next != '\\') + break; + } + result += next; + } + + result.shrink_to_fit(); + return result; +} + +// Returns a std::string of length from 0 to |remaining_bytes_|. +inline std::string FuzzedDataProvider::ConsumeRandomLengthString() { + return ConsumeRandomLengthString(remaining_bytes_); +} + +// Returns a std::string containing all remaining bytes of the input data. +// Prefer using |ConsumeRemainingBytes| unless you actually need a std::string +// object. +inline std::string FuzzedDataProvider::ConsumeRemainingBytesAsString() { + return ConsumeBytesAsString(remaining_bytes_); +} + +// Returns a number in the range [Type's min, Type's max]. The value might +// not be uniformly distributed in the given range. If there's no input data +// left, always returns |min|. +template T FuzzedDataProvider::ConsumeIntegral() { + return ConsumeIntegralInRange(std::numeric_limits::min(), + std::numeric_limits::max()); +} + +// Returns a number in the range [min, max] by consuming bytes from the +// input data. The value might not be uniformly distributed in the given +// range. If there's no input data left, always returns |min|. |min| must +// be less than or equal to |max|. +template +T FuzzedDataProvider::ConsumeIntegralInRange(T min, T max) { + static_assert(std::is_integral::value, "An integral type is required."); + static_assert(sizeof(T) <= sizeof(uint64_t), "Unsupported integral type."); + + if (min > max) + abort(); + + // Use the biggest type possible to hold the range and the result. + uint64_t range = static_cast(max) - min; + uint64_t result = 0; + size_t offset = 0; + + while (offset < sizeof(T) * CHAR_BIT && (range >> offset) > 0 && + remaining_bytes_ != 0) { + // Pull bytes off the end of the seed data. Experimentally, this seems to + // allow the fuzzer to more easily explore the input space. This makes + // sense, since it works by modifying inputs that caused new code to run, + // and this data is often used to encode length of data read by + // |ConsumeBytes|. Separating out read lengths makes it easier modify the + // contents of the data that is actually read. + --remaining_bytes_; + result = (result << CHAR_BIT) | data_ptr_[remaining_bytes_]; + offset += CHAR_BIT; + } + + // Avoid division by 0, in case |range + 1| results in overflow. + if (range != std::numeric_limits::max()) + result = result % (range + 1); + + return static_cast(min + result); +} + +// Returns a floating point value in the range [Type's lowest, Type's max] by +// consuming bytes from the input data. If there's no input data left, always +// returns approximately 0. +template T FuzzedDataProvider::ConsumeFloatingPoint() { + return ConsumeFloatingPointInRange(std::numeric_limits::lowest(), + std::numeric_limits::max()); +} + +// Returns a floating point value in the given range by consuming bytes from +// the input data. If there's no input data left, returns |min|. Note that +// |min| must be less than or equal to |max|. +template +T FuzzedDataProvider::ConsumeFloatingPointInRange(T min, T max) { + if (min > max) + abort(); + + T range = .0; + T result = min; + constexpr T zero(.0); + if (max > zero && min < zero && max > min + std::numeric_limits::max()) { + // The diff |max - min| would overflow the given floating point type. Use + // the half of the diff as the range and consume a bool to decide whether + // the result is in the first of the second part of the diff. + range = (max / 2.0) - (min / 2.0); + if (ConsumeBool()) { + result += range; + } + } else { + range = max - min; + } + + return result + range * ConsumeProbability(); +} + +// Returns a floating point number in the range [0.0, 1.0]. If there's no +// input data left, always returns 0. +template T FuzzedDataProvider::ConsumeProbability() { + static_assert(std::is_floating_point::value, + "A floating point type is required."); + + // Use different integral types for different floating point types in order + // to provide better density of the resulting values. + using IntegralType = + typename std::conditional<(sizeof(T) <= sizeof(uint32_t)), uint32_t, + uint64_t>::type; + + T result = static_cast(ConsumeIntegral()); + result /= static_cast(std::numeric_limits::max()); + return result; +} + +// Reads one byte and returns a bool, or false when no data remains. +inline bool FuzzedDataProvider::ConsumeBool() { + return 1 & ConsumeIntegral(); +} + +// Returns an enum value. The enum must start at 0 and be contiguous. It must +// also contain |kMaxValue| aliased to its largest (inclusive) value. Such as: +// enum class Foo { SomeValue, OtherValue, kMaxValue = OtherValue }; +template T FuzzedDataProvider::ConsumeEnum() { + static_assert(std::is_enum::value, "|T| must be an enum type."); + return static_cast( + ConsumeIntegralInRange(0, static_cast(T::kMaxValue))); +} + +// Returns a copy of the value selected from the given fixed-size |array|. +template +T FuzzedDataProvider::PickValueInArray(const T (&array)[size]) { + static_assert(size > 0, "The array must be non empty."); + return array[ConsumeIntegralInRange(0, size - 1)]; +} + +template +T FuzzedDataProvider::PickValueInArray(const std::array &array) { + static_assert(size > 0, "The array must be non empty."); + return array[ConsumeIntegralInRange(0, size - 1)]; +} + +template +T FuzzedDataProvider::PickValueInArray(std::initializer_list list) { + // TODO(Dor1s): switch to static_assert once C++14 is allowed. + if (!list.size()) + abort(); + + return *(list.begin() + ConsumeIntegralInRange(0, list.size() - 1)); +} + +// Writes |num_bytes| of input data to the given destination pointer. If there +// is not enough data left, writes all remaining bytes. Return value is the +// number of bytes written. +// In general, it's better to avoid using this function, but it may be useful +// in cases when it's necessary to fill a certain buffer or object with +// fuzzing data. +inline size_t FuzzedDataProvider::ConsumeData(void *destination, + size_t num_bytes) { + num_bytes = std::min(num_bytes, remaining_bytes_); + CopyAndAdvance(destination, num_bytes); + return num_bytes; +} + +// Private methods. +inline void FuzzedDataProvider::CopyAndAdvance(void *destination, + size_t num_bytes) { + std::memcpy(destination, data_ptr_, num_bytes); + Advance(num_bytes); +} + +inline void FuzzedDataProvider::Advance(size_t num_bytes) { + if (num_bytes > remaining_bytes_) + abort(); + + data_ptr_ += num_bytes; + remaining_bytes_ -= num_bytes; +} + +template +std::vector FuzzedDataProvider::ConsumeBytes(size_t size, size_t num_bytes) { + static_assert(sizeof(T) == sizeof(uint8_t), "Incompatible data type."); + + // The point of using the size-based constructor below is to increase the + // odds of having a vector object with capacity being equal to the length. + // That part is always implementation specific, but at least both libc++ and + // libstdc++ allocate the requested number of bytes in that constructor, + // which seems to be a natural choice for other implementations as well. + // To increase the odds even more, we also call |shrink_to_fit| below. + std::vector result(size); + if (size == 0) { + if (num_bytes != 0) + abort(); + return result; + } + + CopyAndAdvance(result.data(), num_bytes); + + // Even though |shrink_to_fit| is also implementation specific, we expect it + // to provide an additional assurance in case vector's constructor allocated + // a buffer which is larger than the actual amount of data we put inside it. + result.shrink_to_fit(); + return result; +} + +template +TS FuzzedDataProvider::ConvertUnsignedToSigned(TU value) { + static_assert(sizeof(TS) == sizeof(TU), "Incompatible data types."); + static_assert(!std::numeric_limits::is_signed, + "Source type must be unsigned."); + + // TODO(Dor1s): change to `if constexpr` once C++17 becomes mainstream. + if (std::numeric_limits::is_modulo) + return static_cast(value); + + // Avoid using implementation-defined unsigned to signed conversions. + // To learn more, see https://stackoverflow.com/questions/13150449. + if (value <= std::numeric_limits::max()) { + return static_cast(value); + } else { + constexpr auto TS_min = std::numeric_limits::min(); + return TS_min + static_cast(value - TS_min); + } +} + +#endif // LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ + diff --git a/src/fuzz/Makefile b/src/fuzz/Makefile new file mode 100644 index 0000000000..a2d2979e79 --- /dev/null +++ b/src/fuzz/Makefile @@ -0,0 +1,134 @@ +CXX := hfuzz-clang++ + +CXXFLAGS := -DHAVE_CONFIG_H -I../../src/ -iquote ../../src/config/ -iquote ../secp256k1/ -iquote ../secp256k1/src/ -iquote ../secp256k1/include/ +CXXFLAGS2 := -DHAVE_CONFIG_H + +LIBS := -lcrypto -lstdc++ -lboost_thread -lboost_filesystem -lboost_program_options -lboost_chrono +LIBS2 := -lstdc++ -lcrypto + +INCLUDE_HEADER := -include ../streams.h -include ../version.h + +BPPLUS_SRCS := libspark/bpplus_fuzz.cpp ../libspark/bpplus.cpp ../libspark/util.cpp fuzzing_utilities.cpp ../libspark/hash.cpp ../libspark/kdf.cpp ../libspark/transcript.cpp ../crypto/aes.cpp ../crypto/chacha20.cpp ../crypto/sha512.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/cleanse.cpp ../util.cpp ../utiltime.cpp ../utilstrencodings.cpp ../random.cpp ../chainparamsbase.cpp +BPPLUS_OUTPUT := libspark/bpplus_hfuzz +BPPLUS_OUTPUT_DEBUG := libspark/bpplus_debug + +BECH32_SRCS := libspark/bech32_fuzz_2.cpp ../libspark/bech32.cpp +BECH32_OUTPUT := libspark/bech32_hfuzz +BECH32_OUTPUT_DEBUG := libspark/bech32_debug + +AEAD_SRCS := libspark/aead_fuzz.cpp ../libspark/aead.cpp ../libspark/util.cpp ../libspark/kdf.cpp ../libspark/hash.cpp ../fuzz/fuzzing_utilities.cpp ../crypto/aes.cpp ../support/lockedpool.cpp ../support/cleanse.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp +AEAD_OUTPUT := libspark/aead_hfuzz +AEAD_OUTPUT_DEBUG := libspark/aead_debug + +GROOTLE_SRCS := libspark/grootle_fuzz.cpp ../libspark/grootle.cpp ../libspark/util.cpp fuzzing_utilities.cpp ../libspark/hash.cpp ../libspark/kdf.cpp ../libspark/transcript.cpp ../crypto/aes.cpp ../crypto/chacha20.cpp ../crypto/sha512.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/cleanse.cpp ../util.cpp ../utiltime.cpp ../utilstrencodings.cpp ../random.cpp ../chainparamsbase.cpp +GROOTLE_OUTPUT := libspark/grootle_hfuzz +GROOTLE_OUTPUT_DEBUG := libspark/grootle_debug + +CHAUM_SRCS := libspark/chaum_fuzz.cpp ../libspark/chaum.cpp ../libspark/transcript.cpp fuzzing_utilities.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/cleanse.cpp +CHAUM_OUTPUT := libspark/chaum_hfuzz +CHAUM_OUTPUT_DEBUG := libspark/chaum_debug + +SCHNORR_SRCS := libspark/schnorr_fuzz.cpp ../libspark/schnorr.cpp ../fuzz/fuzzing_utilities.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../libspark/transcript.cpp ../support/cleanse.cpp +SCHNORR_OUTPUT := libspark/schnorr_hfuzz +SCHNORR_OUTPUT_DEBUG := libspark/schnorr_debug + +COIN_SRCS := libspark/coin_fuzz.cpp ../libspark/coin.cpp ../libspark/params.cpp ../crypto/aes.cpp ../crypto/ripemd160.cpp ../crypto/sha256.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/*.cpp ../uint256.cpp ../utilstrencodings.cpp fuzzing_utilities.cpp ../libspark/aead.cpp ../libspark/util.cpp ../libspark/keys.cpp ../libspark/f4grumble.cpp ../libspark/hash.cpp ../libspark/bech32.cpp ../libspark/kdf.cpp +COIN_OUTPUT := libspark/coin_hfuzz +COIN_OUTPUT_DEBUG := libspark/coin_debug + +MINT_TRANSACTION_SRCS := libspark/mint_transaction_fuzz.cpp ../libspark/mint_transaction.cpp ../libspark/coin.cpp ../libspark/keys.cpp ../libspark/schnorr.cpp ../fuzz/fuzzing_utilities.cpp ../libspark/util.cpp ../libspark/hash.cpp ../libspark/kdf.cpp ../libspark/transcript.cpp ../libspark/f4grumble.cpp ../libspark/params.cpp ../libspark/bech32.cpp ../libspark/aead.cpp ../crypto/aes.cpp ../crypto/ripemd160.cpp ../crypto/sha256.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/cleanse.cpp ../uint256.cpp ../utilstrencodings.cpp +MINT_TRANSACTION_OUTPUT := libspark/mint_transaction_hfuzz +MINT_TRANSACTION_OUTPUT_DEBUG := libspark/mint_transaction_debug + +SPEND_TRANSACTION_SRCS := libspark/spend_transaction_fuzz.cpp ../libspark/spend_transaction.cpp ../libspark/coin.cpp ../libspark/keys.cpp ../libspark/schnorr.cpp ../fuzz/fuzzing_utilities.cpp ../libspark/util.cpp ../libspark/hash.cpp ../libspark/kdf.cpp ../libspark/transcript.cpp ../libspark/f4grumble.cpp ../libspark/params.cpp ../libspark/bech32.cpp ../libspark/aead.cpp ../libspark/chaum.cpp ../libspark/bpplus.cpp ../libspark/grootle.cpp ../crypto/aes.cpp ../crypto/ripemd160.cpp ../crypto/sha256.cpp ../crypto/chacha20.cpp ../crypto/sha512.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/cleanse.cpp ../uint256.cpp ../utilstrencodings.cpp ../util.cpp ../utiltime.cpp ../chainparamsbase.cpp ../random.cpp +SPEND_TRANSACTION_OUTPUT := libspark/spend_transaction_hfuzz +SPEND_TRANSACTION_OUTPUT_DEBUG := libspark/spend_transaction_debug + +F4GRUMBLE_SRCS := libspark/f4grumble_fuzz.cpp ../libspark/f4grumble.cpp ../libspark/util.cpp ../libspark/kdf.cpp ../libspark/hash.cpp ../crypto/aes.cpp ../support/lockedpool.cpp ../support/cleanse.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp +F4GRUMBLE_OUTPUT := libspark/f4grumble_hfuzz +F4GRUMBLE_OUTPUT_DEBUG := libspark/f4grumble_debug + +DEBUG_FLAGS := -g -O0 -ggdb + +bpplus: $(BPPLUS_OUTPUT) +$(BPPLUS_OUTPUT): $(BPPLUS_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) + +bpplus_debug: $(BPPLUS_OUTPUT_DEBUG) +$(BPPLUS_OUTPUT_DEBUG): $(BPPLUS_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS) + +bech32: $(BECH32_OUTPUT) +$(BECH32_OUTPUT): $(BECH32_SRCS) + $(CXX) $(CXXFLAGS2) $^ -o $@ $(LIBS2) + +bech32_debug: $(BECH32_OUTPUT_DEBUG) +$(BECH32_OUTPUT_DEBUG): $(BECH32_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS2) $^ -o $@ $(LIBS2) + +aead: $(AEAD_OUTPUT) +$(AEAD_OUTPUT): $(AEAD_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS2) + +aead_debug: $(AEAD_OUTPUT_DEBUG) +$(AEAD_OUTPUT_DEBUG): $(AEAD_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS2) + +grootle: $(GROOTLE_OUTPUT) +$(GROOTLE_OUTPUT): $(GROOTLE_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) + +grootle_debug: $(GROOTLE_OUTPUT_DEBUG) +$(GROOTLE_OUTPUT_DEBUG): $(GROOTLE_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS) + +chaum: $(CHAUM_OUTPUT) +$(CHAUM_OUTPUT): $(CHAUM_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(INCLUDE_HEADER) $(LIBS) + +chaum_debug: $(CHAUM_OUTPUT_DEBUG) +$(CHAUM_OUTPUT_DEBUG): $(CHAUM_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS2) $^ -o $@ $(INCLUDE_HEADER) $(LIBS) + +schnorr: $(SCHNORR_OUTPUT) +$(SCHNORR_OUTPUT): $(SCHNORR_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(INCLUDE_HEADER) $(LIBS) + +schnorr_debug: $(SCHNORR_OUTPUT_DEBUG) +$(SCHNORR_OUTPUT_DEBUG): $(SCHNORR_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(INCLUDE_HEADER) $(LIBS) + +coin: $(COIN_OUTPUT) +$(COIN_OUTPUT): $(COIN_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) + +coin_debug: $(COIN_OUTPUT_DEBUG) +$(COIN_OUTPUT_DEBUG): $(COIN_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS) + +mint_transaction: $(MINT_TRANSACTION_OUTPUT) +$(MINT_TRANSACTION_OUTPUT): $(MINT_TRANSACTION_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS2) + +mint_transaction_debug: $(MINT_TRANSACTION_OUTPUT_DEBUG) +$(MINT_TRANSACTION_OUTPUT_DEBUG): $(MINT_TRANSACTION_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS2) $^ -o $@ $(LIBS2) + +spend_transaction: $(SPEND_TRANSACTION_OUTPUT) +$(SPEND_TRANSACTION_OUTPUT): $(SPEND_TRANSACTION_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) + +spend_transaction_debug: $(SPEND_TRANSACTION_OUTPUT_DEBUG) +$(SPEND_TRANSACTION_OUTPUT_DEBUG): $(SPEND_TRANSACTION_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS) + +f4grumble: $(F4GRUMBLE_OUTPUT) +$(F4GRUMBLE_OUTPUT): $(F4GRUMBLE_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) + +f4grumble_debug: $(F4GRUMBLE_OUTPUT_DEBUG) +$(F4GRUMBLE_OUTPUT_DEBUG): $(F4GRUMBLE_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS) + +clean: + rm -f $(BPPLUS_OUTPUT) $(BPPLUS_OUTPUT_DEBUG) $(BECH32_OUTPUT) $(BECH32_OUTPUT_DEBUG) $(AEAD_OUTPUT) $(AEAD_OUTPUT_DEBUG) $(GROOTLE_OUTPUT) $(GROOTLE_OUTPUT_DEBUG) $(CHAUM_OUTPUT) $(CHAUM_OUTPUT_DEBUG) $(SCHNORR_OUTPUT) $(SCHNORR_OUTPUT_DEBUG) $(COIN_OUTPUT) $(COIN_OUTPUT_DEBUG) $(MINT_TRANSACTION_OUTPUT) $(MINT_TRANSACTION_OUTPUT_DEBUG) $(SPEND_TRANSACTION_OUTPUT) $(SPEND_TRANSACTION_OUTPUT_DEBUG) *.o diff --git a/src/fuzz/README.md b/src/fuzz/README.md new file mode 100644 index 0000000000..e3c68835e9 --- /dev/null +++ b/src/fuzz/README.md @@ -0,0 +1,142 @@ +# Fuzzing libspark + +## Quickstart Guide +To quickly get started fuzzing libspark using honggfuzz: + +### Build firo +- clone this repo: +``` +git clone -b spark https://github.com/firoorg/firo.git +``` +- Build firo: Follow instruction from https://github.com/firoorg/firo/tree/spark#readme + +Once the build is successful, we have to install honggfuzz and required dependencies. + +### Installing fuzzer and Dependencies +- Install honggfuzz (https://github.com/google/honggfuzz) +``` +sudo apt-get install binutils-dev libunwind-dev libblocksruntime-dev clang +git clone https://github.com/google/honggfuzz.git +cd honggfuzz +make +sudo make install +``` +For more information you can look at https://github.com/google/honggfuzz/blob/master/docs/USAGE.md + +You might also need to install the following boost and ssl dependencies in order to compile the fuzzing harness: + +``` +sudo apt install libboost-dev +sudo apt install libssl-dev +sudo apt install libstdc++-12-dev +sudo apt install libboost-filesystem-dev +sudo apt install libboost-thread-dev +sudo apt install libboost-program-options-dev +sudo apt install libboost-chrono-dev +``` + +### Fuzzing using honggfuzz +* In order to fuzz `firo/src/libpark` using Honggfuzz: + +``` +cd firo/src/fuzz/ +export CC=hfuzz-clang +export CXX=hfuzz-clang++ +``` + +To compile with `hfuzz-clang++`, inside src/fuzz run: + +``` +make +``` + +For example(for bpplus): +``` +make bpplus +``` +The above command will generate an instrumented binary with name `_hfuzz` (eg: bpplus_hfuzz) inside src/fuzz/libspark. + +The fuzzing harness of the following spark files is availabe: aead, bech32, bpplus, chaum, coin, f4grumble, grootle, mint_transaction, schnorr and spend_transaction. + +* To start fuzzing: + +1. create directories for input corpora and for saving all the crashes +``` +mkdir input crashes +``` +2. Inside the crashes directory run: +``` +honggfuzz -i input -- ./libspark/_hfuzz ___FILE___ +``` + +example: +1. `mkdir input crashes` +2. `cd crashes` +2. `honggfuzz -i ../input -- ./../libspark/bpplus_hfuzz ___FILE___` +3. To stop press `ctrl+c` + +Here we are providing an empty corpora. In case of an already available corpora, we can provide the availabe corpora. +The flag `-i` is for the input folder which we are providing `./../_hfuzz>` is the target binary which we want to fuzz. + +### Analyzing the crashes + +If there is a crash, the reason for the crash can be found in HONGGFUZZ.REPORT.TXT or simply by running +``` +./libspark/ +``` + +Example: +``` +./libspark/bpplus_hfuzz SIGABRT.PC.7ffff7a8400b.STACK.1b5b5f0067.CODE.-6.ADDR.0.INSTR.mov____0x108(%rsp),%rax +``` + +To debug or to do the rootcause analysis, gdb debugger can be used. to debug using gdb debugger: + +1. First compile the harness using gdb flags `-g -O0 -ggdb`. To compile using gdb debugger, inside `src/fuzz` run: +``` +make _debug +``` +Example: +``` +make bpplus_debug +``` + +2. start the debugger by running: +``` +gdb --args +``` +Example: +``` +gdb --args bpplus_debug SIGABRT.PC.7ffff7a8400b.STACK.1b5b5f0067.CODE.-6.ADDR.0.INSTR.mov____0x108(%rsp),%rax +``` +This will start the debugger. + +3. You can do heap analysis by running `heap-analysis` inside the debugger and/or `bt` for backtracing. + + +### Generating a Coverage Report using kcov +* Install kcov (https://github.com/SimonKagstrom/kcov/tree/master) +``` +sudo apt-get install binutils-dev libssl-dev libcurl4-openssl-dev zlib1g-dev libdw-dev libiberty-dev +git clone https://github.com/SimonKagstrom/kcov.git +cd /path/to/kcov/source/dir +mkdir build +cd build +cmake .. +make +sudo make install +``` +Once successfully installed, follow the below instructions to generate the code-coverage + +1. First compile the harness with gdb flag. run `make _debug` inside src/fuzz to compile using gdb debugger. +2. take the input_folder as the input corpora from fuzzing or one can also create it by running: `honggfuzz -i -– ./_hfuzz ___FILE___ @@`. This will start the fuzzer. Kill it by `ctrl+C`. The fuzzer will generate some random inputs inside the input_folder. Since kcov will generate coverage for each input inside the input_folder, it's preffered to have only a few inputs, otherwise it will take a long time to generate the entire coverage. + +3. inside the `generate_coverage.sh` replace the input_folder, output_folder and fuzz_exe by your inpur corpora, coverage output folder and harness binary. +4. run `./generate_coverage.sh`. This will generated a merged output for all the inputs present in the input_folder. +5. To view the result run run `firefox ./merged-output/index.html`. + +6. alternatively or if you are on a VM, go inside coverage output folder and then merged-output +7. run `python3 -m http.server`. This will start a http server at http://0.0.0.0:8000/ +8. open your browser and paste http://0.0.0.0:8000/ to see the result. + +NOTE: to view the coverage for every dependent file, `generate_coverage.sh` should be in the root folder. Also, you should either delete the previous port or start the server on new port by running `python3 -m http.server ` for different files. \ No newline at end of file diff --git a/src/fuzz/fuzzing_utilities.cpp b/src/fuzz/fuzzing_utilities.cpp new file mode 100644 index 0000000000..af9b1f2c65 --- /dev/null +++ b/src/fuzz/fuzzing_utilities.cpp @@ -0,0 +1,89 @@ +#include "fuzzing_utilities.h" + +FuzzedSecp256k1Object::FuzzedSecp256k1Object(FuzzedDataProvider *fdp) { + this->fdp = fdp; +} + +secp_primitives::GroupElement FuzzedSecp256k1Object::GetGroupElement() { + char* x = (char *)this->fdp->ConsumeBytes(256).data(); + char* y = (char *)this->fdp->ConsumeBytes(256).data(); + secp_primitives::GroupElement ge = secp_primitives::GroupElement(x, y); + + return ge; +} + +secp_primitives::Scalar FuzzedSecp256k1Object::GetScalar() { + uint64_t value = this->fdp->ConsumeIntegral(); + secp_primitives::Scalar s = secp_primitives::Scalar(value); + + return s; +} + +secp_primitives::GroupElement FuzzedSecp256k1Object::GetMemberGroupElement() { + secp_primitives::GroupElement ge; + ge.randomize(); + return ge; +} + +std::vector FuzzedSecp256k1Object::GetMemberGroupElements(size_t len) { + std::vector ge_vec; + ge_vec.resize(len); + for (size_t i = 0; i < len; i++) { + ge_vec[i] = (GetMemberGroupElement()); + } + return ge_vec; +} + +std::vector FuzzedSecp256k1Object::GetRandomGroupVector(size_t len) { + std::vector result; + result.resize(len); + for (size_t i = 0; i < len; i++) { + result[i].randomize(); + } + return result; +} + +std::vector FuzzedSecp256k1Object::GetGroupElements(int len) { + std::vector ge_vec; + ge_vec.reserve(len); + for (int i = 0; i < len; i++) { + ge_vec.push_back(GetGroupElement()); + } + + return ge_vec; +} + +std::vector FuzzedSecp256k1Object::GetScalars(size_t len) { + std::vector scalar_vec; + scalar_vec.reserve(len); + for (int i = 0; i < len; i++) { + scalar_vec.push_back(GetScalar()); + } + + return scalar_vec; +} + +std::vector FuzzedSecp256k1Object::GetScalarsVector(size_t len) { + std::vector scalar_vec; + scalar_vec.reserve(len); + for (int i = 0; i < len; i++) { + scalar_vec.push_back(GetScalar()); + } + + return scalar_vec; +} + +secp_primitives::Scalar FuzzedSecp256k1Object::GetScalar_modified() { + secp_primitives::Scalar s = secp_primitives::Scalar(this->fdp->ConsumeBytes(256).data()); + return s; +} + +std::vector FuzzedSecp256k1Object::GetScalars_modified(int len) { + std::vector scalar_vec; + scalar_vec.reserve(len); + for (int i = 0; i < len; i++) { + scalar_vec.push_back(GetScalar_modified()); + } + + return scalar_vec; +} \ No newline at end of file diff --git a/src/fuzz/fuzzing_utilities.h b/src/fuzz/fuzzing_utilities.h new file mode 100644 index 0000000000..360d27c7e3 --- /dev/null +++ b/src/fuzz/fuzzing_utilities.h @@ -0,0 +1,23 @@ +#include "FuzzedDataProvider.h" +#include "../secp256k1/include/Scalar.h" +#include "../secp256k1/include/GroupElement.h" + +class FuzzedSecp256k1Object { + public: + FuzzedSecp256k1Object(FuzzedDataProvider *fdp); + + FuzzedDataProvider *fdp; + + secp_primitives::GroupElement GetGroupElement(); + secp_primitives::Scalar GetScalar(); + secp_primitives::GroupElement GetMemberGroupElement(); + secp_primitives::Scalar GetScalar_modified(); + + std::vector GetGroupElements(int len); + std::vector GetScalars(size_t len); + std::vector GetMemberGroupElements(size_t len); + std::vector GetRandomGroupVector(size_t len); + std::vector GetScalars_modified(int len); + std::vector GetScalarsVector(size_t len); + +}; \ No newline at end of file diff --git a/src/fuzz/generate_coverage.sh b/src/fuzz/generate_coverage.sh new file mode 100755 index 0000000000..bd91f0aaa6 --- /dev/null +++ b/src/fuzz/generate_coverage.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +input_folder="../../src/fuzz/inputs/bpplus_inputs" +output_folder="../../src/fuzz/coverage_result/bpplus_coverage" +fuzz_exe="../../src/fuzz/libspark/bpplus_debug" + +mkdir $output_folder + +number_of_files=$(ls $input_folder | wc | awk '{print $1}') +echo "Number of input files to test: $number_of_files" + +count=0 + +for i in $(ls $input_folder); +do + kcov --include-path=. ./$output_folder/input_$count ./$fuzz_exe --stdout -d ./$input_folder/$i > /dev/null; + ((count++)); + echo "[++] Count of files processed: $count"; +done + +kcov --merge ./$output_folder/merged-output ./$output_folder/input_* \ No newline at end of file diff --git a/src/fuzz/libspark/aead_fuzz.cpp b/src/fuzz/libspark/aead_fuzz.cpp new file mode 100644 index 0000000000..b1b087dafb --- /dev/null +++ b/src/fuzz/libspark/aead_fuzz.cpp @@ -0,0 +1,24 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/aead.h" +#include + + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + secp_primitives::GroupElement ge = fsp.GetGroupElement(); + std::string additional_data = fdp.ConsumeBytesAsString(len); + int fuzzed_message = fdp.ConsumeIntegral(); + CDataStream ser(SER_NETWORK, PROTOCOL_VERSION); + ser << fuzzed_message; + + spark::AEADEncryptedData aed = spark::AEAD::encrypt(ge, additional_data, ser); + ser = spark::AEAD::decrypt_and_verify(ge, additional_data, aed); + int received_fuzzed_message; + ser >> received_fuzzed_message; + assert(fuzzed_message == received_fuzzed_message); + + return 0; +} \ No newline at end of file diff --git a/src/fuzz/libspark/aead_fuzz_random_key.cpp b/src/fuzz/libspark/aead_fuzz_random_key.cpp new file mode 100644 index 0000000000..631f027dd7 --- /dev/null +++ b/src/fuzz/libspark/aead_fuzz_random_key.cpp @@ -0,0 +1,24 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/aead.h" +#include + + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + secp_primitives::GroupElement ge = fsp.GetMemberGroupElement(); + std::string additional_data = fdp.ConsumeBytesAsString(len); + int fuzzed_message = fdp.ConsumeIntegral(); + CDataStream ser(SER_NETWORK, PROTOCOL_VERSION); + ser << fuzzed_message; + + spark::AEADEncryptedData aed = spark::AEAD::encrypt(ge, additional_data, ser); + ser = spark::AEAD::decrypt_and_verify(ge, additional_data, aed); + int received_fuzzed_message; + ser >> received_fuzzed_message; + assert(fuzzed_message == received_fuzzed_message); + + return 0; +} \ No newline at end of file diff --git a/src/fuzz/libspark/bech32_fuzz.cpp b/src/fuzz/libspark/bech32_fuzz.cpp new file mode 100644 index 0000000000..192b23b27d --- /dev/null +++ b/src/fuzz/libspark/bech32_fuzz.cpp @@ -0,0 +1,46 @@ +#include "../../libspark/bech32.h" +#include "../FuzzedDataProvider.h" +#include +#include + +enum class Bech32EncodingForFuzzing { + INVALID, + BECH32, + BECH32M, + kMaxValue = BECH32M +}; + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fuzzed_data(buf, len); + + std::string test_string = fuzzed_data.ConsumeBytesAsString(len); + std::vector test_vec = fuzzed_data.ConsumeBytes(len); + Bech32EncodingForFuzzing test_encoding_helper = fuzzed_data.ConsumeEnum(); + bech32::Encoding test_encoding; + switch (test_encoding_helper) { + case Bech32EncodingForFuzzing::INVALID: + test_encoding = bech32::Encoding::INVALID; + break; + case Bech32EncodingForFuzzing::BECH32: + test_encoding = bech32::Encoding::BECH32; + break; + case Bech32EncodingForFuzzing::BECH32M: + test_encoding = bech32::Encoding::BECH32M; + break; + } + std::string test_string_res; + test_string_res = bech32::encode(test_string, test_vec, test_encoding); + bech32::DecodeResult dr; + dr = bech32::decode(test_string_res); + assert(dr.hrp == test_string); + assert(dr.encoding == test_encoding); + assert(dr.data == test_vec); + + std::vector test_vec1 = fuzzed_data.ConsumeBytes(len); + std::vector test_vec2 = fuzzed_data.ConsumeBytes(len); + int test_frombits = fuzzed_data.ConsumeIntegral(); + int test_to_bits = fuzzed_data.ConsumeIntegral(); + bool test_pad = fuzzed_data.ConsumeBool(); + bech32::convertbits(test_vec1, test_vec2, test_frombits, test_to_bits, test_pad); + return 0; +} diff --git a/src/fuzz/libspark/bech32_fuzz_2.cpp b/src/fuzz/libspark/bech32_fuzz_2.cpp new file mode 100644 index 0000000000..bf71d3dd58 --- /dev/null +++ b/src/fuzz/libspark/bech32_fuzz_2.cpp @@ -0,0 +1,62 @@ +#include "../../libspark/bech32.h" +#include "../FuzzedDataProvider.h" +#include +#include +#include + +// enum class Bech32EncodingForFuzzing { +// INVALID, +// BECH32, +// BECH32M, +// kMaxValue = BECH32M +// }; + +bool CaseInsensitiveEqual(const std::string& s1, const std::string& s2) +{ + if (s1.size() != s2.size()) return false; + for (size_t i = 0; i < s1.size(); ++i) { + char c1 = s1[i]; + if (c1 >= 'A' && c1 <= 'Z') c1 -= ('A' - 'a'); + char c2 = s2[i]; + if (c2 >= 'A' && c2 <= 'Z') c2 -= ('A' - 'a'); + if (c1 != c2) return false; + } + return true; +} + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fuzzed_data(buf, len); + + std::string test_string = fuzzed_data.ConsumeBytesAsString(len); + + const auto r1 = bech32::decode(test_string); + if(r1.hrp.empty()) { + assert(r1.encoding == bech32::Encoding::INVALID); + assert(r1.data.empty()); + } else { + assert(r1.encoding != bech32::Encoding::INVALID); + const std::string reencoded = bech32::encode(r1.hrp, r1.data, r1.encoding); + assert(CaseInsensitiveEqual(test_string, reencoded)); + } + + std::vector input = fuzzed_data.ConsumeBytes(len); + std::vector test_vec2 = fuzzed_data.ConsumeBytes(len); + int test_frombits = fuzzed_data.ConsumeIntegral(); + int test_to_bits = fuzzed_data.ConsumeIntegral(); + bool test_pad = fuzzed_data.ConsumeBool(); + bech32::convertbits(input, test_vec2, test_frombits, test_to_bits, test_pad); + + if(input.size() + 3 + 6 <= 90) { + for (auto encoding: {bech32::Encoding::BECH32, bech32::Encoding::BECH32M}) { + const std::string encoded = bech32::encode("bc", input, encoding ); + assert(!encoded.empty()); + + const auto r2 = bech32::decode(encoded); + assert(r2.encoding == encoding); + assert(r2.hrp == "bc"); + assert(r2.data == input); + } + } + + return 0; +} diff --git a/src/fuzz/libspark/bpplus_fuzz.cpp b/src/fuzz/libspark/bpplus_fuzz.cpp new file mode 100644 index 0000000000..82f5504572 --- /dev/null +++ b/src/fuzz/libspark/bpplus_fuzz.cpp @@ -0,0 +1,112 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/bpplus.h" +#include "../../libspark/bpplus_proof.h" +#include + + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + /** Single Proof **/ + size_t N0 = fdp.ConsumeIntegralInRange(0,64); + size_t M0 = fdp.ConsumeIntegral(); + + N0 = 64; + M0 = 4; + // Generators + GroupElement G0, H0; + G0.randomize(); + H0.randomize(); + + std::vector Gi0, Hi0; + size_t generators_needed = N0*M0; + if (!spark::is_nonzero_power_of_2(generators_needed)) { + generators_needed = 1 << (spark::log2(N0*M0) + 1); + } + + Gi0.resize(generators_needed); + Hi0.resize(generators_needed); + for (size_t i=0; i < generators_needed; i++) { + Gi0[i].randomize(); + Hi0[i].randomize(); + } + + // Commitments + std::vector v, r; + v.resize(M0); + r.resize(M0); + // v = fsp.GetScalars(M0); + // r = fsp.GetScalars(M0); + for(int i = 0; i < M0; i++){ + v[i] = Scalar((uint64_t) rand()); + r[i].randomize(); + } + + std::vector C0; + C0.resize(M0); + for (size_t i=0; i < M0; i++) { + C0[i] = G0*v[i] + H0*r[i]; + } + + spark::BPPlus bpplus0(G0, H0, Gi0, Hi0, N0); + spark::BPPlusProof proof0; + bpplus0.prove(v, r, C0, proof0); + assert(bpplus0.verify(C0, proof0)); + /** End of Single proof fuzz test**/ + + /** Batch Proof **/ + + size_t N1 = fdp.ConsumeIntegralInRange(1,64); + size_t B = fdp.ConsumeIntegral(); + N1 = 64; + B = 5; + + std::vector sizes; + sizes.resize(B); + for(int i = 0; i < B; i++){ + sizes[i] = (fdp.ConsumeIntegral() % 8) + 1 ; // otherwise it's "Bad BPPlus statement!4" line 102 bpplus.cpp since B = 5.(checked) + } + // sizes = fdp.ConsumeRemainingBytes(); + + // Generators + GroupElement G1, H1; + G1.randomize(); + H1.randomize(); + + // std::size_t next_power = 1 << (uint(log2(B)) + 1); + std::vector Gi1, Hi1; + Gi1.resize(8*N1); + Hi1.resize(8*N1); + for (size_t i=0; i < 8*N1; i++) { + Hi1[i].randomize(); + Gi1[i].randomize(); + } + + spark::BPPlus bpplus1(G1, H1, Gi1, Hi1, N1); + std::vector proofs; + proofs.resize(B); + std::vector> C1; + + for (size_t i=0; i < B; i++) { + std::size_t M = sizes[i]; + std::vector v, r; + v.resize(M); + r.resize(M); + std::vector C_; + C_.resize(M); + for (size_t j=0; j < M; j++) { + v[j] = Scalar(uint64_t(j)); + r[j].randomize(); + C_[j] = G1*v[j] + H1*r[j]; + } + C1.emplace_back(C_); + bpplus1.prove(v, r, C_, proofs[i]); + } + assert(bpplus1.verify(C1, proofs)); + + /** End of Batch proof fuzz test **/ + + return 0; +} diff --git a/src/fuzz/libspark/chaum_fuzz.cpp b/src/fuzz/libspark/chaum_fuzz.cpp new file mode 100644 index 0000000000..e25a9a8b00 --- /dev/null +++ b/src/fuzz/libspark/chaum_fuzz.cpp @@ -0,0 +1,229 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/chaum_proof.h" +#include "../../libspark/chaum.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + /** Serialization tests **/ + GroupElement F0, G0, H0, U0; + F0.randomize(); + G0.randomize(); + H0.randomize(); + U0.randomize(); + + const std::size_t n = fdp.ConsumeIntegral(); + + Scalar mu0; + mu0.randomize(); + std::vector x0, y0, z0; + x0.resize(n); + y0.resize(n); + z0.resize(n); + std::vector S0, T0; + S0.resize(n); + T0.resize(n); + for (size_t i=0; i < n; i++) { + x0[i].randomize(); + y0[i].randomize(); + z0[i].randomize(); + + S0[i] = F0*x0[i] + G0*y0[i] + H0*z0[i]; + T0[i] = (U0 + G0*y0[i].negate())*x0[i].inverse(); + } + + spark::ChaumProof proof0; + + spark::Chaum chaum0(F0, G0, H0, U0); + chaum0.prove(mu0, x0, y0, z0, S0, T0, proof0); + + CDataStream serialized(SER_NETWORK, PROTOCOL_VERSION); + serialized << proof0; + + spark::ChaumProof deserialized_proof0; + serialized >> deserialized_proof0; + + assert(proof0.A1 == deserialized_proof0.A1); + assert(proof0.t2 == deserialized_proof0.t2); + assert(proof0.t3 == deserialized_proof0.t3); + for (size_t i = 0 ; i < n; i++) { + assert(proof0.A2[i] == deserialized_proof0.A2[i]); + assert(proof0.t1[i] == deserialized_proof0.t1[i]); + } + + /** Now fuzz all the things **/ + + GroupElement F1, G1, H1, U1; + F1 = fsp.GetMemberGroupElement(); + G1 = fsp.GetMemberGroupElement(); + H1 = fsp.GetMemberGroupElement(); + U1 = fsp.GetMemberGroupElement(); + //F1.randomize(); + //G1.randomize(); + //H1.randomize(); + //U1.randomize(); + + Scalar mu1; + mu1 = fsp.GetScalar(); + std::vector x1, y1, z1; + x1.resize(n); + x1 = fsp.GetScalars(n); + y1.resize(n); + y1 = fsp.GetScalars(n); + z1.resize(n); + z1 = fsp.GetScalars(n); + + std::vector S1, T1; + S1.resize(n); + T1.resize(n); + for (size_t i=0; i < n; i++) { + S1[i] = F1*x1[i] + G1*y1[i] + H1*z1[i]; + T1[i] = (U1 + G1*y1[i].negate())*x1[i].inverse(); + } + + spark::ChaumProof proof1; + + spark::Chaum chaum1(F1, G1, H1, U1); + chaum1.prove(mu1, x1, y1, z1, S1, T1, proof1); + + serialized << proof1; + + spark::ChaumProof deserialized_proof1; + serialized >> deserialized_proof1; + + assert(proof1.A1 == deserialized_proof1.A1); + assert(proof1.t2 == deserialized_proof1.t2); + assert(proof1.t3 == deserialized_proof1.t3); + for (size_t i = 0 ; i < n; i++) { + assert(proof1.A2[i] == deserialized_proof1.A2[i]); + assert(proof1.t1[i] == deserialized_proof1.t1[i]); + } + /**End of serialization tests**/ + + /** Completeness tests **/ + + GroupElement F2, G2, H2, U2; + F2.randomize(); + G2.randomize(); + H2.randomize(); + U2.randomize(); + + Scalar mu2; + mu2.randomize(); + std::vector x2, y2, z2; + x2.resize(n); + y2.resize(n); + z2.resize(n); + std::vector S2, T2; + S2.resize(n); + T2.resize(n); + for (size_t i=0; i < n; i++) { + x2[i].randomize(); + y2[i].randomize(); + z2[i].randomize(); + + S2[i] = F2*x2[i] + G2*y2[i] + H2*z2[i]; + T2[i] = (U2 + G2*y2[i].negate())*x2[i].inverse(); + } + + spark::ChaumProof proof2; + + spark::Chaum chaum2(F2, G2, H2, U2); + chaum2.prove(mu2, x2, y2, z2, S2, T2, proof2); + assert(chaum2.verify(mu2, S2, T2, proof2)); + + /** Full all the things again**/ + + GroupElement F3, G3, H3, U3; + F3 = fsp.GetMemberGroupElement(); + G3 = fsp.GetMemberGroupElement(); + H3 = fsp.GetMemberGroupElement(); + U3 = fsp.GetMemberGroupElement(); + //F3.randomize(); + //G3.randomize(); + //H3.randomize(); + //U3.randomize(); + + + Scalar mu3; + mu3 = fsp.GetScalar(); + std::vector x3, y3, z3; + x3.resize(n); + x3 = fsp.GetScalars(n); + y3.resize(n); + y3 = fsp.GetScalars(n); + z3.resize(n); + z3 = fsp.GetScalars(n); + + std::vector S3, T3; + S3.resize(n); + T3.resize(n); + for (size_t i=0; i < n; i++) { + S3[i] = F3*x3[i] + G3*y3[i] + H3*z3[i]; + T3[i] = (U3 + G3*y3[i].negate())*x3[i].inverse(); + } + + spark::ChaumProof proof3; + + spark::Chaum chaum3(F3, G3, H3, U3); + chaum3.prove(mu3, x3, y3, z3, S3, T3, proof3); + assert(chaum3.verify(mu3, S3, T3, proof3)); + + /** End of completeness tests**/ + + /* Fuzzing for bad proofs*/ + + // Bad mu + Scalar evil_mu; + evil_mu.randomize(); + assert(!(chaum3.verify(evil_mu, S3, T3, proof3))); + + // Bad S + for (std::size_t i = 0; i < n; i++) { + std::vector evil_S(S3); + evil_S[i].randomize(); + assert(!(chaum3.verify(mu3, evil_S, T3, proof3))); + } + + // Bad T + for (std::size_t i = 0; i < n; i++) { + std::vector evil_T(T3); + evil_T[i].randomize(); + assert(!(chaum3.verify(mu3, S3, evil_T, proof3))); + } + + // Bad A1 + spark::ChaumProof evil_proof = proof3; + evil_proof.A1.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + // Bad A2 + for (std::size_t i = 0; i < n; i++) { + evil_proof = proof3; + evil_proof.A2[i].randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + } + + // Bad t1 + for (std::size_t i = 0; i < n; i++) { + evil_proof = proof3; + evil_proof.t1[i].randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + } + + // Bad t2 + evil_proof = proof3; + evil_proof.t2.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + // Bad t3 + evil_proof = proof3; + evil_proof.t3.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/chaum_fuzz_2.cpp b/src/fuzz/libspark/chaum_fuzz_2.cpp new file mode 100644 index 0000000000..9015c93b76 --- /dev/null +++ b/src/fuzz/libspark/chaum_fuzz_2.cpp @@ -0,0 +1,145 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/chaum_proof.h" +#include "../../libspark/chaum.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + + if (len == 0) { + return 0; + } + + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + GroupElement F1, G1, H1, U1; + std::vector ge = fsp.GetGroupElements(4); + + F1 = ge[0]; + G1 = ge[1]; + H1 = ge[2]; + U1 = ge[3]; + + const std::size_t n = fdp.ConsumeIntegral(); + + Scalar mu1; + mu1 = fsp.GetScalar(); + std::vector x1, y1, z1; + x1.resize(n); + x1 = fsp.GetScalars(n); + y1.resize(n); + y1 = fsp.GetScalars(n); + z1.resize(n); + z1 = fsp.GetScalars(n); + + std::vector S1, T1; + S1.resize(n); + T1.resize(n); + for (size_t i=0; i < n; i++) { + S1[i] = F1*x1[i] + G1*y1[i] + H1*z1[i]; + T1[i] = (U1 + G1*y1[i].negate())*x1[i].inverse(); + } + + spark::ChaumProof proof1; + + spark::Chaum chaum1(F1, G1, H1, U1); + chaum1.prove(mu1, x1, y1, z1, S1, T1, proof1); + + CDataStream serialized(SER_NETWORK, PROTOCOL_VERSION); + serialized << proof1; + + spark::ChaumProof deserialized_proof1; + serialized >> deserialized_proof1; + + assert(proof1.A1 == deserialized_proof1.A1); + assert(proof1.t2 == deserialized_proof1.t2); + assert(proof1.t3 == deserialized_proof1.t3); + for (size_t i = 0 ; i < n; i++) { + assert(proof1.A2[i] == deserialized_proof1.A2[i]); + assert(proof1.t1[i] == deserialized_proof1.t1[i]); + } + + GroupElement F3, G3, H3, U3; + F3 = fsp.GetGroupElement(); + G3 = fsp.GetGroupElement(); + H3 = fsp.GetGroupElement(); + U3 = fsp.GetGroupElement(); + + Scalar mu3; + mu3 = fsp.GetScalar(); + std::vector x3, y3, z3; + x3.resize(n); + x3 = fsp.GetScalars(n); + y3.resize(n); + y3 = fsp.GetScalars(n); + z3.resize(n); + z3 = fsp.GetScalars(n); + + std::vector S3, T3; + S3.resize(n); + T3.resize(n); + for (size_t i=0; i < n; i++) { + S3[i] = F3*x3[i] + G3*y3[i] + H3*z3[i]; + T3[i] = (U3 + G3*y3[i].negate())*x3[i].inverse(); + } + + spark::ChaumProof proof3; + + spark::Chaum chaum3(F3, G3, H3, U3); + chaum3.prove(mu3, x3, y3, z3, S3, T3, proof3); + assert(chaum3.verify(mu3, S3, T3, proof3)); + + /* Fuzzing for bad proofs*/ + + // Bad mu + Scalar evil_mu; + evil_mu.randomize(); + assert(!(chaum3.verify(evil_mu, S3, T3, proof3))); + + // Bad S + for (std::size_t i = 0; i < n; i++) { + std::vector evil_S(S3); + evil_S[i].randomize(); + assert(!(chaum3.verify(mu3, evil_S, T3, proof3))); + } + + // Bad T + for (std::size_t i = 0; i < n; i++) { + std::vector evil_T(T3); + evil_T[i].randomize(); + assert(!(chaum3.verify(mu3, S3, evil_T, proof3))); + } + + // Bad A1 + spark::ChaumProof evil_proof = proof3; + evil_proof.A1.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + // Bad A2 + for (std::size_t i = 0; i < n; i++) { + evil_proof = proof3; + evil_proof.A2[i].randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + } + + // Bad t1 + for (std::size_t i = 0; i < n; i++) { + evil_proof = proof3; + evil_proof.t1[i].randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + } + + // Bad t2 + evil_proof = proof3; + evil_proof.t2.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + // Bad t3 + evil_proof = proof3; + evil_proof.t3.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/chaum_fuzz_member.cpp b/src/fuzz/libspark/chaum_fuzz_member.cpp new file mode 100644 index 0000000000..f8625545e3 --- /dev/null +++ b/src/fuzz/libspark/chaum_fuzz_member.cpp @@ -0,0 +1,143 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/chaum_proof.h" +#include "../../libspark/chaum.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + /** Serialization tests **/ + GroupElement F0, G0, H0, U0; + F0.randomize(); + G0.randomize(); + H0.randomize(); + U0.randomize(); + + const std::size_t n = fdp.ConsumeIntegralInRange(1, INT_MAX); + + Scalar mu0; + mu0.randomize(); + std::vector x0, y0, z0; + x0.resize(n); + y0.resize(n); + z0.resize(n); + std::vector S0, T0; + S0.resize(n); + T0.resize(n); + for (size_t i=0; i < n; i++) { + x0[i].randomize(); + y0[i].randomize(); + z0[i].randomize(); + + S0[i] = F0*x0[i] + G0*y0[i] + H0*z0[i]; + T0[i] = (U0 + G0*y0[i].negate())*x0[i].inverse(); + } + + spark::ChaumProof proof0; + + spark::Chaum chaum0(F0, G0, H0, U0); + chaum0.prove(mu0, x0, y0, z0, S0, T0, proof0); + + CDataStream serialized(SER_NETWORK, PROTOCOL_VERSION); + serialized << proof0; + + spark::ChaumProof deserialized_proof0; + serialized >> deserialized_proof0; + + assert(proof0.A1 == deserialized_proof0.A1); + assert(proof0.t2 == deserialized_proof0.t2); + assert(proof0.t3 == deserialized_proof0.t3); + for (size_t i = 0 ; i < n; i++) { + assert(proof0.A2[i] == deserialized_proof0.A2[i]); + assert(proof0.t1[i] == deserialized_proof0.t1[i]); + } + + // fuzz completeness + GroupElement F1, G1, H1, U1; + F1.randomize(); + G1.randomize(); + H1.randomize(); + U1.randomize(); + + const std::size_t n1 = fdp.ConsumeIntegralInRange(1, INT_MAX); + + Scalar mu1; + mu1.randomize(); + std::vector x1, y1, z1; + x1.resize(n1); + y1.resize(n1); + z1.resize(n1); + std::vector S1, T1; + S1.resize(n1); + T1.resize(n1); + for (std::size_t i = 0; i < n; i++) { + x1[i].randomize(); + y1[i].randomize(); + z1[i].randomize(); + + S1[i] = F1*x1[i] + G1*y1[i] + H1*z1[i]; + T1[i] = (U1 + G1*y1[i].negate())*x1[i].inverse(); + } + + spark::ChaumProof proof1; + spark::Chaum chaum1(F1, G1, H1, U1); + chaum1.prove(mu1, x1, y1, z1, S1, T1, proof1); + + assert(chaum1.verify(mu1, S1, T1, proof1)); + /** End of completeness tests**/ + + /* Fuzzing for bad proofs*/ + + // Bad mu + Scalar evil_mu; + evil_mu.randomize(); + assert(!(chaum1.verify(evil_mu, S1, T1, proof1))); + + // Bad S + for (std::size_t i = 0; i < n1; i++) { + std::vector evil_S(S1); + evil_S[i].randomize(); + assert(!(chaum1.verify(m1, evil_S, T1, proof1))); + } + + // Bad T + for (std::size_t i = 0; i < n1; i++) { + std::vector evil_T(T1); + evil_T[i].randomize(); + assert(!(chaum1.verify(mu1, S1, evil_T, proof1))); + } + + // Bad A1 + spark::ChaumProof evil_proof = proof1; + evil_proof.A1.randomize(); + assert(!(chaum1.verify(mu1, S1, T1, evil_proof))); + + // Bad A2 + for (std::size_t i = 0; i < n1; i++) { + evil_proof = proof1; + evil_proof.A2[i].randomize(); + assert(!(chaum1.verify(mu1, S1, T1, evil_proof))); + } + + // Bad t1 + for (std::size_t i = 0; i < n1; i++) { + evil_proof = proof1; + evil_proof.t1[i].randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + } + + // Bad t2 + evil_proof = proof3; + evil_proof.t2.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + // Bad t3 + evil_proof = proof3; + evil_proof.t3.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/coin_fuzz.cpp b/src/fuzz/libspark/coin_fuzz.cpp new file mode 100644 index 0000000000..79b0a6f0b4 --- /dev/null +++ b/src/fuzz/libspark/coin_fuzz.cpp @@ -0,0 +1,72 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/coin.h" +// #include "../../test/test_bitcoin.h" + +#include + +const std::size_t SCALAR_ENCODING = 32; +const char COIN_TYPE_MINT = 0; +const char COIN_TYPE_SPEND = 1; + + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + // Scalar temp = fsp.GetScalar(); + Scalar temp; + temp.randomize(); + + std::vector result; + result.resize(SCALAR_ENCODING); + temp.serialize(result.data()); + + const spark::Params* params; + params = spark::Params::get_default(); + + const uint64_t i = len; + + // it will be better to choose s different way to generate the value + const uint64_t v = std::rand(); + const std::string memo = fdp.ConsumeBytesAsString(len); + + // Generate keys + spark::SpendKey spend_key(params); + spark::FullViewKey full_view_key(spend_key); + spark::IncomingViewKey incoming_view_key(full_view_key); + + // Generate address + spark::Address address(incoming_view_key, i); + + // Generate coin + // Scalar k = fsp.GetScalar(); + Scalar k; + k.randomize(); + + spark::Coin coin = spark::Coin ( + params, + COIN_TYPE_MINT, + k, + address, + v, + memo, + result + ); + + // Identify coin + spark::IdentifiedCoinData i_data = coin.identify(incoming_view_key); + assert(i_data.i == i); + assert(i_data.d == address.get_d()); + assert(i_data.v == v); + assert(i_data.memo == memo); + + // Recover coin + spark::RecoveredCoinData r_data = coin.recover(full_view_key, i_data); + assert(params->get_F()*(spark::SparkUtils::hash_ser(k, coin.serial_context) + spark::SparkUtils::hash_Q2(incoming_view_key.get_s1(), i) + full_view_key.get_s2()) + full_view_key.get_D() == params->get_F()*r_data.s + full_view_key.get_D()); + + assert(r_data.T * r_data.s + full_view_key.get_D() == params->get_U()); + + + +} \ No newline at end of file diff --git a/src/fuzz/libspark/f4grumble_fuzz.cpp b/src/fuzz/libspark/f4grumble_fuzz.cpp new file mode 100644 index 0000000000..9d0f9b8d43 --- /dev/null +++ b/src/fuzz/libspark/f4grumble_fuzz.cpp @@ -0,0 +1,62 @@ +#include "../../libspark/f4grumble.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + std::string test_string = std::string((char *) buf); + std::vector test_char_vec; + test_char_vec.reserve(len); + + for (int i=0; i < len; i++) { + test_char_vec.push_back(test_string[i]); + } + + // too_long_size + bool exception_thrown_size = false; + bool exception_thrown_encode = false; + bool exception_thrown_decode = false; + + if(len > spark::F4Grumble::get_max_size()){ + + try { + spark::F4Grumble grumble(test_string[0], len); + } catch(const std::exception& ) { + exception_thrown_size = true; + } + assert(exception_thrown_size); + + spark::F4Grumble grumble = spark::F4Grumble(test_string[0], len); + + try { + grumble.encode(test_char_vec); + } catch (const std::exception& ) { + exception_thrown_encode = true; + } + + assert(exception_thrown_encode); + try { + grumble.decode(test_char_vec); + } catch (const std::exception& ) { + exception_thrown_decode = true; + } + assert(exception_thrown_decode); + return 0; + } + + spark::F4Grumble f4grumble_fuzz = spark::F4Grumble(test_string[0], len); + std::vector scrambled = f4grumble_fuzz.encode(test_char_vec); + std::vector unscrambled = f4grumble_fuzz.decode(scrambled); + + assert(scrambled.size() == test_char_vec.size()); + assert(unscrambled == test_char_vec); + + // bad_network + unsigned char evil_network = ~test_string[0]; + assert(test_string[0] != evil_network); + + spark::F4Grumble evil_grumble(evil_network, len); + //decoding with a different network + std::vector evil_unscrambled = evil_grumble.decode(scrambled); + assert(evil_unscrambled.size() == scrambled.size()); + assert(evil_unscrambled != test_char_vec); + return 0; +} \ No newline at end of file diff --git a/src/fuzz/libspark/grootle_fuzz.cpp b/src/fuzz/libspark/grootle_fuzz.cpp new file mode 100644 index 0000000000..de51fb0043 --- /dev/null +++ b/src/fuzz/libspark/grootle_fuzz.cpp @@ -0,0 +1,89 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/grootle.h" +#include "../../libspark/grootle_proof.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + size_t n = fdp.ConsumeIntegral(); + size_t m = fdp.ConsumeIntegral(); + size_t N = (size_t) std::pow(n, m); + + GroupElement H; + std::vector Gi = fsp.GetGroupElements(n*m); + std::vector Hi = fsp.GetGroupElements(n*m); + + size_t commit_size = fdp.ConsumeIntegral(); + std::vector S = fsp.GetGroupElements(commit_size); + std::vector V = fsp.GetGroupElements(commit_size); + + std::vector indexes = fdp.ConsumeBytes(len); + std::vector sizes; + sizes.resize(len); + for(size_t i=0; i < len; i++) { + sizes[i] = fdp.ConsumeIntegral(); + } + std::vector S1, V1; + std::vector> roots; + std::vector s, v; + for (std::size_t index : indexes) { + Scalar s_, v_; + s_ = fsp.GetScalar(); + v_ = fsp.GetScalar(); + s.emplace_back(s_); + v.emplace_back(v_); + + S1.emplace_back(S[index]); + V1.emplace_back(V[index]); + + S[index] += H*s_; + V[index] += H*v_; + + Scalar temp; + temp = fsp.GetScalar(); + std::vector root; + root.reserve(spark::SCALAR_ENCODING); + temp.serialize(root.data()); + roots.emplace_back(root); + } + + spark::Grootle grootle(H, Hi, Hi, n, m); + std::vector proofs; + + for (size_t i=0; i < indexes.size(); i++) { + proofs.emplace_back(); + std::vector S_(S.begin() + commit_size - sizes[i], S.end()); + std::vector V_(V.begin() + commit_size - sizes[i], V.end()); + + grootle.prove( + indexes[i] - (commit_size - sizes[i]), + s[i], + S_, + S1[i], + v[i], + V_, + V1[i], + roots[i], + proofs.back() + + ); + + assert(grootle.verify(S, S1[i], V, V1[i], roots[i], sizes[i], proofs.back())); + } + + assert(grootle.verify(S, S1, V, V1, roots, sizes, proofs)); + + // Add an invalid proof + proofs.emplace_back(proofs.back()); + S1.emplace_back(S1.back()); + V1.emplace_back(V1.back()); + S1.back().randomize(); + sizes.emplace_back(sizes.back()); + assert(!grootle.verify(S, S1, V, V1, roots, sizes, proofs)); + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/grootle_fuzz_member.cpp b/src/fuzz/libspark/grootle_fuzz_member.cpp new file mode 100644 index 0000000000..2f0b57fb0e --- /dev/null +++ b/src/fuzz/libspark/grootle_fuzz_member.cpp @@ -0,0 +1,90 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/grootle.h" +#include "../../libspark/grootle_proof.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + std::size_t n = fdp.ConsumeIntegralInRange(2, 65535); + std::size_t m = fdp.ConsumeIntegralInRange(2, 65535); + std::size_t N = (size_t) std::pow(n,m); + + GroupElement H; + H.randomize(); + std::vector Gi = fsp.GetRandomGroupVector(n*m); + std::vector Hi = fsp.GetRandomGroupVector(n*m); + + size_t commit_size = fdp.ConsumeIntegralInRange(1, N); + std::vector S = fsp.GetRandomGroupVector(commit_size); + std::vector V = fsp.GetRandomGroupVector(commit_size); + + std::vector indexes = fdp.ConsumeBytes(N); + std::vector sizes; + sizes.resize(N); + for(size_t i=0; i < N; i++) { + sizes[i] = fdp.ConsumeIntegralInRange(0, N); + } + std::vector S1, V1; + std::vector> roots; + std::vector s, v; + for (std::size_t index : indexes) { + Scalar s_, v_; + s_ = fsp.GetScalar(); + v_ = fsp.GetScalar(); + s.emplace_back(s_); + v.emplace_back(v_); + + S1.emplace_back(S[index]); + V1.emplace_back(V[index]); + + S[index] += H*s_; + V[index] += H*v_; + + Scalar temp; + temp = fsp.GetScalar(); + std::vector root; + root.reserve(spark::SCALAR_ENCODING); + temp.serialize(root.data()); + roots.emplace_back(root); + } + + spark::Grootle grootle(H, Gi, Hi, n, m); + std::vector proofs; + + for (size_t i=0; i < indexes.size(); i++) { + proofs.emplace_back(); + std::vector S_(S.begin() + commit_size - sizes[i], S.end()); + std::vector V_(V.begin() + commit_size - sizes[i], V.end()); + + grootle.prove( + indexes[i] - (commit_size - sizes[i]), + s[i], + S_, + S1[i], + v[i], + V_, + V1[i], + roots[i], + proofs.back() + + ); + + assert(grootle.verify(S, S1[i], V, V1[i], roots[i], sizes[i], proofs.back())); + } + + assert(grootle.verify(S, S1, V, V1, roots, sizes, proofs)); + + // Add an invalid proof + proofs.emplace_back(proofs.back()); + S1.emplace_back(S1.back()); + V1.emplace_back(V1.back()); + S1.back().randomize(); + sizes.emplace_back(sizes.back()); + assert(!grootle.verify(S, S1, V, V1, roots, sizes, proofs)); + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/mint_transaction_fuzz.cpp b/src/fuzz/libspark/mint_transaction_fuzz.cpp new file mode 100644 index 0000000000..8e10795667 --- /dev/null +++ b/src/fuzz/libspark/mint_transaction_fuzz.cpp @@ -0,0 +1,34 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/mint_transaction.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + const spark::Params* params; + params = spark::Params::get_default(); + const size_t t = fdp.ConsumeIntegral(); + + spark::SpendKey spend_key(params); + spark::FullViewKey full_view_key(spend_key); + spark::IncomingViewKey incoming_view_key(full_view_key); + + std::vector outputs; + + for (size_t i = 0; i < t; i++) { + spark::MintedCoinData output; + output.address = spark::Address(incoming_view_key, fdp.ConsumeIntegral()); + output.v = fdp.ConsumeIntegral(); + output.memo = fdp.ConsumeBytesAsString(len); + outputs.emplace_back(output); + } + + spark::MintTransaction mint(params, outputs, fdp.ConsumeBytes(spark::SCALAR_ENCODING)); + assert(mint.verify()); + + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/schnorr_fuzz.cpp b/src/fuzz/libspark/schnorr_fuzz.cpp new file mode 100644 index 0000000000..735c68d67f --- /dev/null +++ b/src/fuzz/libspark/schnorr_fuzz.cpp @@ -0,0 +1,95 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/schnorr_proof.h" +#include "../../libspark/schnorr.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + /** Serialization and Completeness tests **/ + GroupElement G0; + // G0 = fsp.GetGroupElement(); + + // NOTE: all GetGroupElement() is replaced by GetMemberGroupElement() + + // ensure that G0 is valid group element + // thus the crash of valid fieldElement and groupElement will not occur + G0.generate(buf); + + Scalar y0; + y0 = fsp.GetScalar(); + GroupElement Y0 = G0*y0; + + spark::SchnorrProof proof0; + + spark::Schnorr schnorr0(G0); + schnorr0.prove(y0, Y0, proof0); + + CDataStream serialized(SER_NETWORK, PROTOCOL_VERSION); + serialized << proof0; + + spark::SchnorrProof deserialized_proof0; + serialized >> deserialized_proof0; + + assert(proof0.A == deserialized_proof0.A); + assert(proof0.t == deserialized_proof0.t); + assert(schnorr0.verify(Y0, proof0)); + + /** End of serialization and completeness tests **/ + + /** Aggregation test **/ + + size_t n = fdp.ConsumeIntegral(); + + GroupElement G1; + G1 = fsp.GetMemberGroupElement(); + std::vector y1; + std::vector Y1; + + for(size_t i=0; i < n; i++) { + y1.emplace_back(); + y1.back() = fsp.GetScalar(); + + Y1.emplace_back(G1 * y1.back()); + } + + spark::SchnorrProof proof1; + spark::Schnorr schnorr1(G1); + schnorr1.prove(y1, Y1, proof1); + assert(schnorr1.verify(Y1, proof1)); + + /** End of aggregation test **/ + + /* + fuzzing bad proofs + */ + + // Bad Y + GroupElement evil_Y; + evil_Y.randomize(); + assert(!(schnorr1.verify(evil_Y, proof1))); + + // Bad A + spark::SchnorrProof evil_proof = proof1; + evil_proof.A.randomize(); + assert(!(schnorr1.verify(Y1, evil_proof))); + + // Bad t + evil_proof = proof1; + evil_proof.t.randomize(); + assert(!(schnorr1.verify(Y1, evil_proof))); + + // //checking empty proof + // std::vector y3; + // std::vector Y3; + // y3.resize(0); + // Y3.resize(0); + // spark::SchnorrProof proof3; + + // spark::Schnorr schnorr3(G1); + // schnorr3.prove(y3, Y3, proof3); + // assert(schnorr1.verify(Y3, proof3)); + +} \ No newline at end of file diff --git a/src/fuzz/libspark/spend_transaction_fuzz.cpp b/src/fuzz/libspark/spend_transaction_fuzz.cpp new file mode 100644 index 0000000000..14461e70b7 --- /dev/null +++ b/src/fuzz/libspark/spend_transaction_fuzz.cpp @@ -0,0 +1,110 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/spend_transaction.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + const spark::Params* params; + params = spark::Params::get_default(); + const std::string memo = fdp.ConsumeBytesAsString(len); + + spark::SpendKey spend_key(params); + spark::FullViewKey full_view_key(spend_key); + spark::IncomingViewKey incoming_view_key(full_view_key); + + spark::Address address(incoming_view_key, fdp.ConsumeIntegral()); + + size_t N = (size_t) pow(params->get_n_grootle(), params->get_m_grootle()); + + bool exception_thrown = false; + if (memo.size() > params->get_memo_bytes()) { + try{ + Scalar k; + k.randomize(); + uint64_t v = rand(); + spark::Coin(params, spark::COIN_TYPE_MINT, k, address, v, memo, fdp.ConsumeBytes(spark::SCALAR_ENCODING)); + } catch(const std::exception& ) { + exception_thrown = true; + } + assert(exception_thrown); + return 0; + } + + std::vector in_coins; + for (size_t i = 0; i < N; i ++) { + secp_primitives::Scalar k = fsp.GetScalar(); + + uint64_t v = fdp.ConsumeIntegral(); + + in_coins.emplace_back(spark::Coin(params, spark::COIN_TYPE_MINT, k, address, v, memo, fdp.ConsumeBytes(spark::SCALAR_ENCODING))); + } + + uint64_t f = 0; + + std::vector spend_indices = fdp.ConsumeBytes(len); + if (spend_indices.size() < len) { + for (int i = spend_indices.size(); i < len; i++) { + spend_indices.push_back(std::rand()); + } + } + std::vector spend_coin_data; + std::unordered_map cover_set_data; + const size_t w = spend_indices.size(); + for (size_t u = 0; u < w; u++) { + spark::IdentifiedCoinData identified_coin_data = in_coins[spend_indices[u]].identify(incoming_view_key); + spark::RecoveredCoinData recovered_coin_data = in_coins[spend_indices[u]].recover(full_view_key, identified_coin_data); + + spend_coin_data.emplace_back(); + uint64_t cover_set_id = fdp.ConsumeIntegral(); + spend_coin_data.back().cover_set_id = cover_set_id; + + spark::CoverSetData set_data; + set_data.cover_set = in_coins; + set_data.cover_set_representation = fdp.ConsumeBytes(spark::SCALAR_ENCODING); + cover_set_data[cover_set_id] = set_data; + spend_coin_data.back().index = spend_indices[u]; + spend_coin_data.back().k = identified_coin_data.k; + spend_coin_data.back().s = recovered_coin_data.s; + spend_coin_data.back().T = recovered_coin_data.T; + spend_coin_data.back().v = identified_coin_data.v; + + f += identified_coin_data.v; + } + + const size_t t = fdp.ConsumeIntegral(); + std::vector out_coin_data; + for (size_t j = 0; j < t; j++) { + out_coin_data.emplace_back(); + out_coin_data.back().address = address; + out_coin_data.back().v = fdp.ConsumeIntegral(); + out_coin_data.back().memo = memo; + + f -= out_coin_data.back().v; + } + + uint64_t fee_test = f; + for (size_t j = 0; j < t; j++) { + fee_test += out_coin_data[j].v; + } + + for (size_t j = 0; j < t; j++) { + fee_test -= spend_coin_data[j].v; + } + assert(fee_test == 0); + + spark::SpendTransaction transaction(params, full_view_key, spend_key, spend_coin_data, cover_set_data, f, 0, out_coin_data); + + transaction.setCoverSets(cover_set_data); + std::unordered_map> cover_sets; + for (const auto set_data: cover_set_data) { + cover_sets[set_data.first] = set_data.second.cover_set; + } + assert(spark::SpendTransaction::verify(transaction, cover_sets)); + + + return 0; + +} \ No newline at end of file diff --git a/src/hdmint/tracker.cpp b/src/hdmint/tracker.cpp index c394e721d5..677e88c817 100644 --- a/src/hdmint/tracker.cpp +++ b/src/hdmint/tracker.cpp @@ -546,7 +546,7 @@ bool CHDMintTracker::IsMempoolSpendOurs(const std::set& setMempool, con uint32_t pubcoinId; try { std::tie(spend, pubcoinId) = sigma::ParseSigmaSpend(txin); - } catch (...) { + } catch (const std::exception &) { return false; } @@ -560,7 +560,7 @@ bool CHDMintTracker::IsMempoolSpendOurs(const std::set& setMempool, con std::unique_ptr joinsplit; try { joinsplit = lelantus::ParseLelantusJoinSplit(tx); - } catch (...) { + } catch (const std::exception &) { return false; } diff --git a/src/hdmint/wallet.cpp b/src/hdmint/wallet.cpp index 235995427c..98444fd720 100644 --- a/src/hdmint/wallet.cpp +++ b/src/hdmint/wallet.cpp @@ -1183,7 +1183,7 @@ bool CHDMintWallet::TxOutToPublicCoin(const CTxOut& txout, sigma::PublicCoin& pu secp_primitives::GroupElement publicSigma; try { publicSigma.deserialize(&coin_serialised[0]); - } catch (...) { + } catch (const std::exception &) { return state.DoS(100, error("TxOutToPublicCoin : deserialize failed")); } diff --git a/src/init.cpp b/src/init.cpp index b8f7234ba7..ef2d083275 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -972,6 +972,7 @@ void InitParameterInteraction() LogPrintf("%s: parameter interaction: -whitelistforcerelay=1 -> setting -whitelistrelay=1\n", __func__); } +#ifdef ENABLE_WALLET // Forcing all mnemonic settings off if -usehd is off. if (!GetBoolArg("-usehd", DEFAULT_USE_HD_WALLET)) { if (SoftSetBoolArg("-usemnemonic", false) && SoftSetArg("-mnemonic", "") && SoftSetArg("-mnemonicpassphrase", "") && SoftSetArg("-hdseed", "not hex")) @@ -983,6 +984,7 @@ void InitParameterInteraction() if (SoftSetArg("-mnemonic", "") && SoftSetArg("-mnemonicpassphrase", "") && SoftSetArg("-hdseed", "not hex")) LogPrintf("%s: Potential parameter interaction: -usemnemonic=0 -> setting -mnemonic=\"\", -mnemonicpassphrase=\"\"\n, -hdseed=\"not hex\"\n", __func__); } +#endif // ENABLE_WALLET } static std::string ResolveErrMsg(const char *const optname, const std::string &strBind) { diff --git a/src/lelantus.cpp b/src/lelantus.cpp index 0980e1807f..7b6df0a744 100644 --- a/src/lelantus.cpp +++ b/src/lelantus.cpp @@ -6,8 +6,11 @@ #include "base58.h" #include "definition.h" #include "txmempool.h" +#ifdef ENABLE_WALLET #include "wallet/wallet.h" #include "wallet/walletdb.h" +#endif // ENABLE_WALLET +#include "sigma.h" #include "crypto/sha256.h" #include "liblelantus/coin.h" #include "liblelantus/schnorr_prover.h" @@ -89,7 +92,14 @@ bool IsLelantusAllowed() bool IsLelantusAllowed(int height) { - return height >= ::Params().GetConsensus().nLelantusStartBlock; + return height >= ::Params().GetConsensus().nLelantusStartBlock && height < ::Params().GetConsensus().nSparkStartBlock; +} + +bool IsLelantusGraceFulPeriod() +{ + LOCK(cs_main); + int height = chainActive.Height(); + return height >= ::Params().GetConsensus().nLelantusStartBlock && height < ::Params().GetConsensus().nLelantusGracefulPeriod; } bool IsAvailableToMint(const CAmount& amount) @@ -335,6 +345,7 @@ bool CheckLelantusJMintTransaction( // Update public coin list in the info lelantusTxInfo->mints.push_back(std::make_pair(pubCoin, std::make_pair(amount, mintTag))); + lelantusTxInfo->encryptedJmintValues.insert(std::make_pair(pubCoin, encryptedValue)); lelantusTxInfo->zcTransactions.insert(hashTx); } @@ -391,7 +402,7 @@ bool CheckLelantusJoinSplitTransaction( REJECT_MALFORMED, "CheckLelantusJoinSplitTransaction: invalid joinsplit transaction"); } - catch (...) { + catch (const std::exception &) { return state.DoS(100, false, REJECT_MALFORMED, @@ -433,8 +444,13 @@ bool CheckLelantusJoinSplitTransaction( for (const CTxOut &txout : tx.vout) { if (!txout.scriptPubKey.empty() && txout.scriptPubKey.IsLelantusJMint()) { - if (!CheckLelantusJMintTransaction(txout, state, hashTx, fStatefulSigmaCheck, Cout, lelantusTxInfo)) - return false; + try { + if (!CheckLelantusJMintTransaction(txout, state, hashTx, fStatefulSigmaCheck, Cout, lelantusTxInfo)) + return false; + } + catch (const std::exception &x) { + return state.Error(x.what()); + } } else if(txout.scriptPubKey.IsLelantusMint()) { return false; //putting regular mints at JoinSplit transactions is not allowed } else { @@ -726,6 +742,22 @@ bool CheckLelantusTransaction( realHeight = chainActive.Height(); } + // accept Lelantus mint tx into 5 more blocks, to allow mempool cleared + if (!isVerifyDB && realHeight >= (::Params().GetConsensus().nSparkStartBlock + 5)) { + if (tx.IsLelantusMint() && !tx.IsLelantusJoinSplit()) + return state.DoS(100, false, + REJECT_INVALID, + "Lelantus already is not available, start using Spark."); + } + + // accept lelantus spends until nLelantusGracefulPeriod passed, to allow migration of funds from lelantus to spark + if (!isVerifyDB && realHeight >= (::Params().GetConsensus().nLelantusGracefulPeriod)) { + if (tx.IsLelantusJoinSplit()) + return state.DoS(100, false, + REJECT_INVALID, + "Lelantus is fully disabled."); + } + bool const allowLelantus = (realHeight >= consensus.nLelantusStartBlock); if (!isVerifyDB && !isCheckWallet) { @@ -740,8 +772,13 @@ bool CheckLelantusTransaction( if (allowLelantus && !isVerifyDB) { for (const CTxOut &txout : tx.vout) { if (!txout.scriptPubKey.empty() && txout.scriptPubKey.IsLelantusMint()) { - if (!CheckLelantusMintTransaction(txout, state, hashTx, fStatefulSigmaCheck, lelantusTxInfo)) - return false; + try { + if (!CheckLelantusMintTransaction(txout, state, hashTx, fStatefulSigmaCheck, lelantusTxInfo)) + return false; + } + catch (const std::exception &x) { + return state.Error(x.what()); + } } } } @@ -762,10 +799,15 @@ bool CheckLelantusTransaction( } if (!isVerifyDB) { - if (!CheckLelantusJoinSplitTransaction( - tx, state, hashTx, isVerifyDB, nHeight, realHeight, - isCheckWallet, fStatefulSigmaCheck, sigmaTxInfo, lelantusTxInfo)) { - return false; + try { + if (!CheckLelantusJoinSplitTransaction( + tx, state, hashTx, isVerifyDB, nHeight, realHeight, + isCheckWallet, fStatefulSigmaCheck, sigmaTxInfo, lelantusTxInfo)) { + return false; + } + } + catch (const std::exception &x) { + return state.Error(x.what()); } } } @@ -788,7 +830,7 @@ void RemoveLelantusJoinSplitReferencingBlock(CTxMemPool& pool, CBlockIndex* bloc try { joinsplit = ParseLelantusJoinSplit(tx); } - catch (...) { + catch (const std::exception &) { txn_to_remove.push_back(tx); break; } @@ -827,7 +869,7 @@ std::vector GetLelantusJoinSplitSerialNumbers(const CTransaction &tx, co try { return ParseLelantusJoinSplit(tx)->getCoinSerialNumbers(); } - catch (...) { + catch (const std::exception &) { return std::vector(); } } @@ -839,7 +881,7 @@ std::vector GetLelantusJoinSplitIds(const CTransaction &tx, const CTxI try { return ParseLelantusJoinSplit(tx)->getCoinGroupIds(); } - catch (...) { + catch (const std::exception &) { return std::vector(); } } @@ -979,7 +1021,7 @@ bool GetOutPointFromBlock(COutPoint& outPoint, const GroupElement &pubCoinValue, try { ParseLelantusMintScript(txout.scriptPubKey, txPubCoinValue); } - catch (...) { + catch (const std::exception &) { continue; } if(pubCoinValue==txPubCoinValue){ @@ -994,6 +1036,12 @@ bool GetOutPointFromBlock(COutPoint& outPoint, const GroupElement &pubCoinValue, return false; } +uint256 GetTxHashFromPubcoin(const lelantus::PublicCoin& pubCoin) { + COutPoint outPoint; + GetOutPoint(outPoint, pubCoin.getValue()); + return outPoint.hash; +} + bool GetOutPoint(COutPoint& outPoint, const lelantus::PublicCoin &pubCoin) { lelantus::CLelantusState *lelantusState = lelantus::CLelantusState::GetState(); @@ -1098,13 +1146,11 @@ void CLelantusState::Containers::RemoveMint(lelantus::PublicCoin const & pubCoin } void CLelantusState::Containers::AddSpend(Scalar const & serial, int coinGroupId) { - if (!mintMetaInfo.count(coinGroupId)) { - throw std::invalid_argument("group id doesn't exist"); + if (mintMetaInfo.count(coinGroupId) > 0) { + usedCoinSerials[serial] = coinGroupId; + spendMetaInfo[coinGroupId] += 1; + CheckSurgeCondition(); } - - usedCoinSerials[serial] = coinGroupId; - spendMetaInfo[coinGroupId] += 1; - CheckSurgeCondition(); } void CLelantusState::Containers::RemoveSpend(Scalar const & serial) { @@ -1210,7 +1256,22 @@ void CLelantusState::AddMintsToStateAndBlockIndex( const CBlock* pblock) { std::vector> blockMints; + std::unordered_map lelantusMintData; + for (const auto& mint : pblock->lelantusTxInfo->mints) { + if (GetBoolArg("-mobile", false)) { + lelantus::MintValueData mintdata; + mintdata.amount = mint.second.first; + if (pblock->lelantusTxInfo->encryptedJmintValues.count(mint.first) > 0) { + mintdata.isJMint = true; + mintdata.encryptedValue = pblock->lelantusTxInfo->encryptedJmintValues[mint.first]; + } + + COutPoint outPoint; + GetOutPointFromBlock(outPoint, mint.first.getValue(), *pblock); + mintdata.txHash = outPoint.hash; + lelantusMintData[mint.first.getValue()] = mintdata; + } blockMints.push_back(std::make_pair(mint.first, mint.second.second)); } @@ -1250,6 +1311,10 @@ void CLelantusState::AddMintsToStateAndBlockIndex( LogPrintf("AddMintsToStateAndBlockIndex: Lelantus mint added id=%d\n", latestCoinId); index->lelantusMintedPubCoins[latestCoinId].push_back(mint); + + if (GetBoolArg("-mobile", false)) { + index->lelantusMintData[mint.first.getValue()] = lelantusMintData[mint.first.getValue()]; + } } } @@ -1416,7 +1481,8 @@ int CLelantusState::GetCoinSetForSpend( int coinGroupID, uint256& blockHash_out, std::vector& coins_out, - std::vector& setHash_out) { + std::vector& setHash_out, + std::string start_block_hash) { coins_out.clear(); @@ -1434,6 +1500,10 @@ int CLelantusState::GetCoinSetForSpend( continue; } + if (block->GetBlockHash().GetHex() == start_block_hash) { + break ; + } + // check coins in group coinGroupID - 1 in the case that using coins from prev group. int id = 0; if (CountCoinInBlock(block, coinGroupID)) { @@ -1472,6 +1542,76 @@ int CLelantusState::GetCoinSetForSpend( return numberOfCoins; } +void CLelantusState::GetCoinsForRecovery( + CChain *chain, + int maxHeight, + int coinGroupID, + std::string start_block_hash, + uint256& blockHash_out, + std::vector>>& coins, + std::vector& setHash_out) { + + coins.clear(); + if (coinGroups.count(coinGroupID) == 0) { + return; + } + + LelantusCoinGroupInfo &coinGroup = coinGroups[coinGroupID]; + + int numberOfCoins = 0; + for (CBlockIndex *block = coinGroup.lastBlock;; block = block->pprev) { + // ignore block heigher than max height + if (block->nHeight > maxHeight) { + continue; + } + + if (block->GetBlockHash().GetHex() == start_block_hash) { + break; + } + + // check coins in group coinGroupID - 1 in the case that using coins from prev group. + int id = 0; + if (CountCoinInBlock(block, coinGroupID)) { + id = coinGroupID; + } else if (CountCoinInBlock(block, coinGroupID - 1)) { + id = coinGroupID - 1; + } + + if (id) { + if (numberOfCoins == 0) { + // latest block satisfying given conditions + // remember block hash and set hash + blockHash_out = block->GetBlockHash(); + setHash_out = GetAnonymitySetHash(block, id); + } + + numberOfCoins += block->lelantusMintedPubCoins[id].size(); + if (block->lelantusMintedPubCoins.count(id) > 0) { + for (const auto &coin : block->lelantusMintedPubCoins[id]) { + LOCK(cs_main); + // skip mints from blacklist if nLelantusFixesStartBlock is passed + if (chainActive.Height() >= ::Params().GetConsensus().nLelantusFixesStartBlock) { + if (::Params().GetConsensus().lelantusBlacklist.count(coin.first.getValue()) > 0) { + continue; + } + } + + lelantus::MintValueData lelantusMintData; + if (block->lelantusMintData.count(coin.first.getValue())) + lelantusMintData = block->lelantusMintData[coin.first.getValue()]; + coins.push_back(std::make_pair(coin.first, std::make_pair(lelantusMintData, coin.second))); + + } + } + } + + if (block == coinGroup.firstBlock) { + break ; + } + } + +} + void CLelantusState::GetAnonymitySet( int coinGroupID, bool fStartLelantusBlacklist, diff --git a/src/lelantus.h b/src/lelantus.h index 8e0da50596..9d1d35ede0 100644 --- a/src/lelantus.h +++ b/src/lelantus.h @@ -27,6 +27,8 @@ class CLelantusTxInfo { // Vector of > for all the mints. std::vector>> mints; + std::unordered_map, lelantus::CPublicCoinHash> encryptedJmintValues; + // serial for every spend (map from serial to coin group id) std::unordered_map spentSerials; @@ -41,6 +43,7 @@ class CLelantusTxInfo { bool IsLelantusAllowed(); bool IsLelantusAllowed(int height); +bool IsLelantusGraceFulPeriod(); bool IsAvailableToMint(const CAmount& amount); @@ -78,6 +81,8 @@ bool ConnectBlockLelantus( const CBlock *pblock, bool fJustCheck=false); +uint256 GetTxHashFromPubcoin(const lelantus::PublicCoin& pubCoin); + /* * Get COutPoint(txHash, index) from the chain using pubcoin value alone. */ @@ -189,13 +194,23 @@ friend bool BuildLelantusStateFromIndex(CChain *, std::set &); int id, uint256& blockHash_out, std::vector& coins_out, - std::vector& setHash_out); + std::vector& setHash_out, + std::string start_block_hash = ""); void GetAnonymitySet( int coinGroupID, bool fStartLelantusBlacklist, std::vector& coins_out); + void GetCoinsForRecovery( + CChain *chain, + int maxHeight, + int coinGroupID, + std::string start_block_hash, + uint256& blockHash_out, + std::vector>>& coins, + std::vector& setHash_out); + // Return height of mint transaction and id of minted coin std::pair GetMintedCoinHeightAndId(const lelantus::PublicCoin& pubCoin); diff --git a/src/liblelantus/lelantus_prover.cpp b/src/liblelantus/lelantus_prover.cpp index 96d908b676..3154b51ea6 100644 --- a/src/liblelantus/lelantus_prover.cpp +++ b/src/liblelantus/lelantus_prover.cpp @@ -155,7 +155,7 @@ void LelantusProver::generate_sigma_proofs( parallelTasks.emplace_back(threadPool.PostTask([&]() { try { prover.sigma_commit(commits, index, rA_i, rB_i, rC_i, rD_i, a_i, Tk_i, Pk_i, Yk_i, sigma_i, proof); - } catch (...) { + } catch (const std::exception &) { return false; } return true; diff --git a/src/libspark/aead.cpp b/src/libspark/aead.cpp new file mode 100644 index 0000000000..ada79bcc2c --- /dev/null +++ b/src/libspark/aead.cpp @@ -0,0 +1,92 @@ +#include "aead.h" + +namespace spark { + +// Perform authenticated encryption with ChaCha20-Poly1305 using key commitment +AEADEncryptedData AEAD::encrypt(const GroupElement& prekey, const std::string additional_data, CDataStream& data) { + // Set up the result structure + AEADEncryptedData result; + + // Derive the key and commitment + std::vector key = SparkUtils::kdf_aead(prekey); + result.key_commitment = SparkUtils::commit_aead(prekey); + + // Internal size tracker; we know the size of the data already, and can ignore + int TEMP; + + // For our application, we can safely use a zero nonce since keys are never reused + std::vector iv; + iv.resize(AEAD_IV_SIZE); + + // Set up the cipher + EVP_CIPHER_CTX* ctx; + ctx = EVP_CIPHER_CTX_new(); + EVP_EncryptInit_ex(ctx, EVP_chacha20_poly1305(), NULL, key.data(), iv.data()); + + // Include the associated data + std::vector additional_data_bytes(additional_data.begin(), additional_data.end()); + EVP_EncryptUpdate(ctx, NULL, &TEMP, additional_data_bytes.data(), additional_data_bytes.size()); + + // Encrypt the plaintext + result.ciphertext.resize(data.size()); + EVP_EncryptUpdate(ctx, result.ciphertext.data(), &TEMP, reinterpret_cast(data.data()), data.size()); + EVP_EncryptFinal_ex(ctx, NULL, &TEMP); + + // Get the tag + result.tag.resize(AEAD_TAG_SIZE); + EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, AEAD_TAG_SIZE, result.tag.data()); + + // Clean up + EVP_CIPHER_CTX_free(ctx); + + return result; +} + +// Perform authenticated decryption with ChaCha20-Poly1305 using key commitment +CDataStream AEAD::decrypt_and_verify(const GroupElement& prekey, const std::string additional_data, AEADEncryptedData& data) { + // Derive the key and commitment + std::vector key = SparkUtils::kdf_aead(prekey); + std::vector key_commitment = SparkUtils::commit_aead(prekey); + + // Assert that the key commitment is valid + if (key_commitment != data.key_commitment) { + throw std::runtime_error("Bad AEAD key commitment"); + } + + // Set up the result + CDataStream result(SER_NETWORK, PROTOCOL_VERSION); + + // Internal size tracker; we know the size of the data already, and can ignore + int TEMP; + + // For our application, we can safely use a zero nonce since keys are never reused + std::vector iv; + iv.resize(AEAD_IV_SIZE); + + // Set up the cipher + EVP_CIPHER_CTX* ctx; + ctx = EVP_CIPHER_CTX_new(); + EVP_DecryptInit_ex(ctx, EVP_chacha20_poly1305(), NULL, key.data(), iv.data()); + + // Include the associated data + std::vector additional_data_bytes(additional_data.begin(), additional_data.end()); + EVP_DecryptUpdate(ctx, NULL, &TEMP, additional_data_bytes.data(), additional_data_bytes.size()); + + // Decrypt the ciphertext + result.resize(data.ciphertext.size()); + EVP_DecryptUpdate(ctx, reinterpret_cast(result.data()), &TEMP, data.ciphertext.data(), data.ciphertext.size()); + + // Set the expected tag + EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, AEAD_TAG_SIZE, data.tag.data()); + + // Decrypt and clean up + int ret = EVP_DecryptFinal_ex(ctx, NULL, &TEMP); + EVP_CIPHER_CTX_free(ctx); + if (ret != 1) { + throw std::runtime_error("Bad AEAD authentication"); + } + + return result; +} + +} diff --git a/src/libspark/aead.h b/src/libspark/aead.h new file mode 100644 index 0000000000..ce8470a17d --- /dev/null +++ b/src/libspark/aead.h @@ -0,0 +1,31 @@ +#ifndef FIRO_SPARK_AEAD_H +#define FIRO_SPARK_AEAD_H +#include +#include "util.h" + +namespace spark { + +struct AEADEncryptedData { + std::vector ciphertext; + std::vector tag; + std::vector key_commitment; + + ADD_SERIALIZE_METHODS; + + template + inline void SerializationOp(Stream& s, Operation ser_action) { + READWRITE(ciphertext); + READWRITE(tag); + READWRITE(key_commitment); + } +}; + +class AEAD { +public: + static AEADEncryptedData encrypt(const GroupElement& prekey, const std::string additional_data, CDataStream& data); + static CDataStream decrypt_and_verify(const GroupElement& prekey, const std::string associated_data, AEADEncryptedData& data); +}; + +} + +#endif diff --git a/src/libspark/bech32.cpp b/src/libspark/bech32.cpp new file mode 100644 index 0000000000..67b76c726b --- /dev/null +++ b/src/libspark/bech32.cpp @@ -0,0 +1,248 @@ +/* Copyright (c) 2017, 2021 Pieter Wuille + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +// Copyright (c) 2017 Pieter Wuille +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "bech32.h" + +#include +#include + +#include +#include + +namespace bech32 +{ + +namespace +{ + +typedef std::vector data; + +/** The Bech32 character set for encoding. */ +const char* CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"; + +/** The Bech32 character set for decoding. */ +const int8_t CHARSET_REV[128] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 15, -1, 10, 17, 21, 20, 26, 30, 7, 5, -1, -1, -1, -1, -1, -1, + -1, 29, -1, 24, 13, 25, 9, 8, 23, -1, 18, 22, 31, 27, 19, -1, + 1, 0, 3, 16, 11, 28, 12, 14, 6, 4, 2, -1, -1, -1, -1, -1, + -1, 29, -1, 24, 13, 25, 9, 8, 23, -1, 18, 22, 31, 27, 19, -1, + 1, 0, 3, 16, 11, 28, 12, 14, 6, 4, 2, -1, -1, -1, -1, -1 +}; + +/** Concatenate two byte arrays. */ +data cat(data x, const data& y) { + x.insert(x.end(), y.begin(), y.end()); + return x; +} + +/* Determine the final constant to use for the specified encoding. */ +uint32_t encoding_constant(Encoding encoding) { + assert(encoding == Encoding::BECH32 || encoding == Encoding::BECH32M); + return encoding == Encoding::BECH32 ? 1 : 0x2bc830a3; +} + +/** This function will compute what 6 5-bit values to XOR into the last 6 input values, in order to + * make the checksum 0. These 6 values are packed together in a single 30-bit integer. The higher + * bits correspond to earlier values. */ +uint32_t polymod(const data& values) +{ + // The input is interpreted as a list of coefficients of a polynomial over F = GF(32), with an + // implicit 1 in front. If the input is [v0,v1,v2,v3,v4], that polynomial is v(x) = + // 1*x^5 + v0*x^4 + v1*x^3 + v2*x^2 + v3*x + v4. The implicit 1 guarantees that + // [v0,v1,v2,...] has a distinct checksum from [0,v0,v1,v2,...]. + + // The output is a 30-bit integer whose 5-bit groups are the coefficients of the remainder of + // v(x) mod g(x), where g(x) is the Bech32 generator, + // x^6 + {29}x^5 + {22}x^4 + {20}x^3 + {21}x^2 + {29}x + {18}. g(x) is chosen in such a way + // that the resulting code is a BCH code, guaranteeing detection of up to 3 errors within a + // window of 1023 characters. Among the various possible BCH codes, one was selected to in + // fact guarantee detection of up to 4 errors within a window of 89 characters. + + // Note that the coefficients are elements of GF(32), here represented as decimal numbers + // between {}. In this finite field, addition is just XOR of the corresponding numbers. For + // example, {27} + {13} = {27 ^ 13} = {22}. Multiplication is more complicated, and requires + // treating the bits of values themselves as coefficients of a polynomial over a smaller field, + // GF(2), and multiplying those polynomials mod a^5 + a^3 + 1. For example, {5} * {26} = + // (a^2 + 1) * (a^4 + a^3 + a) = (a^4 + a^3 + a) * a^2 + (a^4 + a^3 + a) = a^6 + a^5 + a^4 + a + // = a^3 + 1 (mod a^5 + a^3 + 1) = {9}. + + // During the course of the loop below, `c` contains the bitpacked coefficients of the + // polynomial constructed from just the values of v that were processed so far, mod g(x). In + // the above example, `c` initially corresponds to 1 mod g(x), and after processing 2 inputs of + // v, it corresponds to x^2 + v0*x + v1 mod g(x). As 1 mod g(x) = 1, that is the starting value + // for `c`. + uint32_t c = 1; + for (const auto v_i : values) { + // We want to update `c` to correspond to a polynomial with one extra term. If the initial + // value of `c` consists of the coefficients of c(x) = f(x) mod g(x), we modify it to + // correspond to c'(x) = (f(x) * x + v_i) mod g(x), where v_i is the next input to + // process. Simplifying: + // c'(x) = (f(x) * x + v_i) mod g(x) + // ((f(x) mod g(x)) * x + v_i) mod g(x) + // (c(x) * x + v_i) mod g(x) + // If c(x) = c0*x^5 + c1*x^4 + c2*x^3 + c3*x^2 + c4*x + c5, we want to compute + // c'(x) = (c0*x^5 + c1*x^4 + c2*x^3 + c3*x^2 + c4*x + c5) * x + v_i mod g(x) + // = c0*x^6 + c1*x^5 + c2*x^4 + c3*x^3 + c4*x^2 + c5*x + v_i mod g(x) + // = c0*(x^6 mod g(x)) + c1*x^5 + c2*x^4 + c3*x^3 + c4*x^2 + c5*x + v_i + // If we call (x^6 mod g(x)) = k(x), this can be written as + // c'(x) = (c1*x^5 + c2*x^4 + c3*x^3 + c4*x^2 + c5*x + v_i) + c0*k(x) + + // First, determine the value of c0: + uint8_t c0 = c >> 25; + + // Then compute c1*x^5 + c2*x^4 + c3*x^3 + c4*x^2 + c5*x + v_i: + c = ((c & 0x1ffffff) << 5) ^ v_i; + + // Finally, for each set bit n in c0, conditionally add {2^n}k(x): + if (c0 & 1) c ^= 0x3b6a57b2; // k(x) = {29}x^5 + {22}x^4 + {20}x^3 + {21}x^2 + {29}x + {18} + if (c0 & 2) c ^= 0x26508e6d; // {2}k(x) = {19}x^5 + {5}x^4 + x^3 + {3}x^2 + {19}x + {13} + if (c0 & 4) c ^= 0x1ea119fa; // {4}k(x) = {15}x^5 + {10}x^4 + {2}x^3 + {6}x^2 + {15}x + {26} + if (c0 & 8) c ^= 0x3d4233dd; // {8}k(x) = {30}x^5 + {20}x^4 + {4}x^3 + {12}x^2 + {30}x + {29} + if (c0 & 16) c ^= 0x2a1462b3; // {16}k(x) = {21}x^5 + x^4 + {8}x^3 + {24}x^2 + {21}x + {19} + } + return c; +} + +/** Convert to lower case. */ +unsigned char lc(unsigned char c) { + return (c >= 'A' && c <= 'Z') ? (c - 'A') + 'a' : c; +} + +/** Expand a HRP for use in checksum computation. */ +data expand_hrp(const std::string& hrp) { + data ret; + ret.resize(hrp.size() * 2 + 1); + for (size_t i = 0; i < hrp.size(); ++i) { + unsigned char c = hrp[i]; + ret[i] = c >> 5; + ret[i + hrp.size() + 1] = c & 0x1f; + } + ret[hrp.size()] = 0; + return ret; +} + +/** Verify a checksum. */ +Encoding verify_checksum(const std::string& hrp, const data& values) { + // PolyMod computes what value to xor into the final values to make the checksum 0. However, + // if we required that the checksum was 0, it would be the case that appending a 0 to a valid + // list of values would result in a new valid list. For that reason, Bech32 requires the + // resulting checksum to be 1 instead. In Bech32m, this constant was amended. + uint32_t check = polymod(cat(expand_hrp(hrp), values)); + if (check == encoding_constant(Encoding::BECH32)) return Encoding::BECH32; + if (check == encoding_constant(Encoding::BECH32M)) return Encoding::BECH32M; + return Encoding::INVALID; +} + +data create_checksum(const std::string& hrp, const data& values, Encoding encoding) { + data enc = cat(expand_hrp(hrp), values); + enc.resize(enc.size() + 6); + uint32_t mod = polymod(enc) ^ encoding_constant(encoding); + data ret; + ret.resize(6); + for (size_t i = 0; i < 6; ++i) { + // Convert the 5-bit groups in mod to checksum values. + ret[i] = (mod >> (5 * (5 - i))) & 31; + } + return ret; +} + +} // namespace + +/** Encode a Bech32 or Bech32m string. */ +std::string encode(const std::string& hrp, const data& values, Encoding encoding) { + // First ensure that the HRP is all lowercase. BIP-173 requires an encoder + // to return a lowercase Bech32 string, but if given an uppercase HRP, the + // result will always be invalid. + for (const char& c : hrp) assert(c < 'A' || c > 'Z'); + data checksum = create_checksum(hrp, values, encoding); + data combined = cat(values, checksum); + std::string ret = hrp + '1'; + ret.reserve(ret.size() + combined.size()); + for (const auto c : combined) { + ret += CHARSET[c]; + } + return ret; +} + +/** Decode a Bech32 or Bech32m string. */ +DecodeResult decode(const std::string& str) { + bool lower = false, upper = false; + for (size_t i = 0; i < str.size(); ++i) { + unsigned char c = str[i]; + if (c >= 'a' && c <= 'z') lower = true; + else if (c >= 'A' && c <= 'Z') upper = true; + else if (c < 33 || c > 126) return {}; + } + if (lower && upper) return {}; + size_t pos = str.rfind('1'); + if (pos == str.npos || pos == 0 || pos + 7 > str.size()) { + return {}; + } + data values(str.size() - 1 - pos); + for (size_t i = 0; i < str.size() - 1 - pos; ++i) { + unsigned char c = str[i + pos + 1]; + int8_t rev = CHARSET_REV[c]; + + if (rev == -1) { + return {}; + } + values[i] = rev; + } + std::string hrp; + for (size_t i = 0; i < pos; ++i) { + hrp += lc(str[i]); + } + Encoding result = verify_checksum(hrp, values); + if (result == Encoding::INVALID) return {}; + return {result, std::move(hrp), data(values.begin(), values.end() - 6)}; +} + +/** Convert from one power-of-2 number base to another. */ +bool convertbits(std::vector& out, const std::vector& in, int frombits, int tobits, bool pad) { + int acc = 0; + int bits = 0; + const int maxv = (1 << tobits) - 1; + const int max_acc = (1 << (frombits + tobits - 1)) - 1; + for (size_t i = 0; i < in.size(); ++i) { + int value = in[i]; + acc = ((acc << frombits) | value) & max_acc; + bits += frombits; + while (bits >= tobits) { + bits -= tobits; + out.push_back((acc >> bits) & maxv); + } + } + if (pad) { + if (bits) out.push_back((acc << (tobits - bits)) & maxv); + } else if (bits >= frombits || ((acc << (tobits - bits)) & maxv)) { + return false; + } + return true; +} + +} // namespace bech32 diff --git a/src/libspark/bech32.h b/src/libspark/bech32.h new file mode 100644 index 0000000000..33dc430b59 --- /dev/null +++ b/src/libspark/bech32.h @@ -0,0 +1,63 @@ +/* Copyright (c) 2017, 2021 Pieter Wuille + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef BECH32_H_ +#define BECH32_H_ 1 + +#include +#include +#include + +#include + +namespace bech32 +{ + +enum class Encoding { + INVALID, + + BECH32, //! Bech32 encoding as defined in BIP173 + BECH32M, //! Bech32m encoding as defined in BIP350 +}; + +/** Encode a Bech32 or Bech32m string. If hrp contains uppercase characters, this will cause an + * assertion error. Encoding must be one of BECH32 or BECH32M. */ +std::string encode(const std::string& hrp, const std::vector& values, Encoding encoding); + +/** A type for the result of decoding. */ +struct DecodeResult +{ + Encoding encoding; //!< What encoding was detected in the result; Encoding::INVALID if failed. + std::string hrp; //!< The human readable part + std::vector data; //!< The payload (excluding checksum) + + DecodeResult() : encoding(Encoding::INVALID) {} + DecodeResult(Encoding enc, std::string&& h, std::vector&& d) : encoding(enc), hrp(std::move(h)), data(std::move(d)) {} +}; + +/** Decode a Bech32 or Bech32m string. */ +DecodeResult decode(const std::string& str); + +/** Convert from one power-of-2 number base to another. */ +bool convertbits(std::vector& out, const std::vector& in, int frombits, int tobits, bool pad); +} // namespace bech32 + +#endif // BECH32_H_ diff --git a/src/libspark/bpplus.cpp b/src/libspark/bpplus.cpp new file mode 100644 index 0000000000..393b04425e --- /dev/null +++ b/src/libspark/bpplus.cpp @@ -0,0 +1,533 @@ +#include "bpplus.h" +#include "transcript.h" + +namespace spark { + +// Useful scalar constants +const Scalar ZERO = Scalar((uint64_t) 0); +const Scalar ONE = Scalar((uint64_t) 1); +const Scalar TWO = Scalar((uint64_t) 2); + +BPPlus::BPPlus( + const GroupElement& G_, + const GroupElement& H_, + const std::vector& Gi_, + const std::vector& Hi_, + const std::size_t N_) + : G (G_) + , H (H_) + , Gi (Gi_) + , Hi (Hi_) + , N (N_) +{ + if (Gi.size() != Hi.size()) { + throw std::invalid_argument("Bad BPPlus generator sizes!"); + } + + // Bit length must be a nonzero power of two + if (!is_nonzero_power_of_2(N)) { + throw std::invalid_argument("Bad BPPlus bit length!"); + } + + // Compute 2**N-1 for optimized verification + TWO_N_MINUS_ONE = TWO; + for (int i = 0; i < log2(N); i++) { + TWO_N_MINUS_ONE *= TWO_N_MINUS_ONE; + } + TWO_N_MINUS_ONE -= ONE; +} + +// The floor function of log2 +std::size_t log2(std::size_t n) { + std::size_t l = 0; + while ((n >>= 1) != 0) { + l++; + } + + return l; +} + +// Is this value a nonzero power of 2? +bool is_nonzero_power_of_2(std::size_t n) { + return n > 0 && (n & (n - 1)) == 0; +} + +void BPPlus::prove( + const std::vector& unpadded_v, + const std::vector& unpadded_r, + const std::vector& unpadded_C, + BPPlusProof& proof) { + // Bulletproofs+ are only defined when the input set size is a nonzero power of two + // To get around this, we can trivially pad the input set with zero commitments + // We make sure this is done canonically in a way that's transparent to the caller + + // Define the original and padded sizes + std::size_t unpadded_M = unpadded_C.size(); + if (unpadded_M == 0) { + throw std::invalid_argument("Bad BPPlus statement!1"); + } + std::size_t M = unpadded_M; + if (!is_nonzero_power_of_2(M)) { + M = 1 << (log2(unpadded_M) + 1); + } + + // Set up transcript, using the unpadded values + // This is fine since the verifier canonically generates the same transcript + Transcript transcript(LABEL_TRANSCRIPT_BPPLUS); + transcript.add("G", G); + transcript.add("H", H); + transcript.add("Gi", Gi); + transcript.add("Hi", Hi); + transcript.add("N", Scalar(N)); + transcript.add("C", unpadded_C); + + // Now pad the input set to produce a valid statement + std::vector v(unpadded_v); + std::vector r(unpadded_r); + std::vector C(unpadded_C); + for (std::size_t i = unpadded_M; i < M; i++) { + v.emplace_back(); // zero scalar + r.emplace_back(); // zero scalar + C.emplace_back(); // identity group element, a valid commitment using the corresponding scalars + } + + // Check statement validity + if (C.size() != M) { + throw std::invalid_argument("Bad BPPlus statement!2"); + } + if (!is_nonzero_power_of_2(M)) { + throw std::invalid_argument("Unexpected bad padding!3"); + } + if (N*M > Gi.size()) { + throw std::invalid_argument("Bad BPPlus statement!4"); + } + if (!(v.size() == M && r.size() == M)) { + throw std::invalid_argument("Bad BPPlus statement!5"); + } + for (std::size_t j = 0; j < M; j++) { + if (!(G*v[j] + H*r[j] == C[j])) { + throw std::invalid_argument("Bad BPPlus statement!6"); + } + } + + // Decompose bits + std::vector> bits; + bits.resize(M); + for (std::size_t j = 0; j < M; j++) { + v[j].get_bits(bits[j]); + } + + // Compute aL, aR + std::vector aL, aR; + aL.reserve(N*M); + aR.reserve(N*M); + for (std::size_t j = 0; j < M; ++j) + { + for (std::size_t i = 1; i <= N; ++i) + { + aL.emplace_back(uint64_t(bits[j][bits[j].size() - i])); + aR.emplace_back(Scalar(uint64_t(bits[j][bits[j].size() - i])) - ONE); + } + } + + // Compute A + Scalar alpha; + alpha.randomize(); + + std::vector A_points; + std::vector A_scalars; + A_points.reserve(2*N*M + 1); + A_scalars.reserve(2*N*M + 1); + + A_points.emplace_back(H); + A_scalars.emplace_back(alpha); + for (std::size_t i = 0; i < N*M; i++) { + A_points.emplace_back(Gi[i]); + A_scalars.emplace_back(aL[i]); + A_points.emplace_back(Hi[i]); + A_scalars.emplace_back(aR[i]); + } + secp_primitives::MultiExponent A_multiexp(A_points, A_scalars); + proof.A = A_multiexp.get_multiple(); + transcript.add("A", proof.A); + + // Challenges + Scalar y = transcript.challenge("y"); + Scalar z = transcript.challenge("z"); + Scalar z_square = z.square(); + + // Challenge powers + std::vector y_powers; + y_powers.resize(M*N + 2); + y_powers[0] = ZERO; + y_powers[1] = y; + for (std::size_t i = 2; i < M*N + 2; i++) { + y_powers[i] = y_powers[i-1]*y; + } + + // Compute d + std::vector d; + d.resize(M*N); + d[0] = z_square; + for (std::size_t i = 1; i < N; i++) { + d[i] = TWO*d[i-1]; + } + for (std::size_t j = 1; j < M; j++) { + for (std::size_t i = 0; i < N; i++) { + d[j*N+i] = d[(j-1)*N+i]*z_square; + } + } + + // Compute aL1, aR1 + std::vector aL1, aR1; + for (std::size_t i = 0; i < N*M; i++) { + aL1.emplace_back(aL[i] - z); + aR1.emplace_back(aR[i] + d[i]*y_powers[N*M - i] + z); + } + + // Compute alpha1 + Scalar alpha1 = alpha; + Scalar z_even_powers = 1; + for (std::size_t j = 0; j < M; j++) { + z_even_powers *= z_square; + alpha1 += z_even_powers*r[j]*y_powers[N*M+1]; + } + + // Run the inner product rounds + std::vector Gi1(Gi); + std::vector Hi1(Hi); + std::vector a1(aL1); + std::vector b1(aR1); + std::size_t N1 = N*M; + + while (N1 > 1) { + N1 /= 2; + + Scalar dL, dR; + dL.randomize(); + dR.randomize(); + + // Compute cL, cR + Scalar cL, cR; + for (std::size_t i = 0; i < N1; i++) { + cL += a1[i]*y_powers[i+1]*b1[i+N1]; + cR += a1[i+N1]*y_powers[N1]*y_powers[i+1]*b1[i]; + } + + // Compute L, R + GroupElement L_, R_; + std::vector L_points, R_points; + std::vector L_scalars, R_scalars; + L_points.reserve(2*N1 + 2); + R_points.reserve(2*N1 + 2); + L_scalars.reserve(2*N1 + 2); + R_scalars.reserve(2*N1 + 2); + Scalar y_N1_inverse = y_powers[N1].inverse(); + for (std::size_t i = 0; i < N1; i++) { + L_points.emplace_back(Gi1[i+N1]); + L_scalars.emplace_back(a1[i]*y_N1_inverse); + L_points.emplace_back(Hi1[i]); + L_scalars.emplace_back(b1[i+N1]); + + R_points.emplace_back(Gi1[i]); + R_scalars.emplace_back(a1[i+N1]*y_powers[N1]); + R_points.emplace_back(Hi1[i+N1]); + R_scalars.emplace_back(b1[i]); + } + L_points.emplace_back(G); + L_scalars.emplace_back(cL); + L_points.emplace_back(H); + L_scalars.emplace_back(dL); + R_points.emplace_back(G); + R_scalars.emplace_back(cR); + R_points.emplace_back(H); + R_scalars.emplace_back(dR); + + secp_primitives::MultiExponent L_multiexp(L_points, L_scalars); + secp_primitives::MultiExponent R_multiexp(R_points, R_scalars); + L_ = L_multiexp.get_multiple(); + R_ = R_multiexp.get_multiple(); + proof.L.emplace_back(L_); + proof.R.emplace_back(R_); + + transcript.add("L", L_); + transcript.add("R", R_); + Scalar e = transcript.challenge("e"); + Scalar e_inverse = e.inverse(); + + // Compress round elements + for (std::size_t i = 0; i < N1; i++) { + Gi1[i] = Gi1[i]*e_inverse + Gi1[i+N1]*(e*y_N1_inverse); + Hi1[i] = Hi1[i]*e + Hi1[i+N1]*e_inverse; + a1[i] = a1[i]*e + a1[i+N1]*y_powers[N1]*e_inverse; + b1[i] = b1[i]*e_inverse + b1[i+N1]*e; + } + Gi1.resize(N1); + Hi1.resize(N1); + a1.resize(N1); + b1.resize(N1); + + // Update alpha1 + alpha1 = dL*e.square() + alpha1 + dR*e_inverse.square(); + } + + // Final proof elements + Scalar r_, s_, d_, eta_; + r_.randomize(); + s_.randomize(); + d_.randomize(); + eta_.randomize(); + + proof.A1 = Gi1[0]*r_ + Hi1[0]*s_ + G*(r_*y*b1[0] + s_*y*a1[0]) + H*d_; + proof.B = G*(r_*y*s_) + H*eta_; + + transcript.add("A1", proof.A1); + transcript.add("B", proof.B); + Scalar e1 = transcript.challenge("e1"); + + proof.r1 = r_ + a1[0]*e1; + proof.s1 = s_ + b1[0]*e1; + proof.d1 = eta_ + d_*e1 + alpha1*e1.square(); +} + +bool BPPlus::verify(const std::vector& unpadded_C, const BPPlusProof& proof) { + std::vector> unpadded_C_batch = {unpadded_C}; + std::vector proof_batch = {proof}; + + return verify(unpadded_C_batch, proof_batch); +} + +bool BPPlus::verify(const std::vector>& unpadded_C, const std::vector& proofs) { + // Preprocess all proofs + if (!(unpadded_C.size() == proofs.size())) { + return false; + } + std::size_t N_proofs = proofs.size(); + std::size_t max_M = 0; // maximum number of padded aggregated values across all proofs + + // Check aggregated input consistency + for (std::size_t k = 0; k < N_proofs; k++) { + std::size_t unpadded_M = unpadded_C[k].size(); + std::size_t M = unpadded_M; + + // Require a power of two + if (M == 0) { + return false; + } + if (!is_nonzero_power_of_2(M)) { + M = 1 << log2(unpadded_M) + 1; + } + + // Track the maximum value + if (M > max_M) { + max_M = M; + } + + // Check inner product round consistency + std::size_t rounds = proofs[k].L.size(); + if (proofs[k].R.size() != rounds) { + return false; + } + if (log2(N*M) != rounds) { + return false; + } + } + + // Check the bounds on the batch + if (max_M*N > Gi.size() || max_M*N > Hi.size()) { + return false; + } + + // Set up final multiscalar multiplication and common scalars + std::vector points; + std::vector scalars; + Scalar G_scalar, H_scalar; + + // Interleave the Gi and Hi scalars + for (std::size_t i = 0; i < max_M*N; i++) { + points.emplace_back(Gi[i]); + scalars.emplace_back(ZERO); + points.emplace_back(Hi[i]); + scalars.emplace_back(ZERO); + } + + // Process each proof and add to the batch + for (std::size_t k_proofs = 0; k_proofs < N_proofs; k_proofs++) { + const BPPlusProof proof = proofs[k_proofs]; + const std::size_t unpadded_M = unpadded_C[k_proofs].size(); + const std::size_t rounds = proof.L.size(); + + // Weight this proof in the batch + Scalar w = ZERO; + while (w == ZERO) { + w.randomize(); + } + + // Set up transcript + Transcript transcript(LABEL_TRANSCRIPT_BPPLUS); + transcript.add("G", G); + transcript.add("H", H); + transcript.add("Gi", Gi); + transcript.add("Hi", Hi); + transcript.add("N", Scalar(N)); + transcript.add("C", unpadded_C[k_proofs]); + transcript.add("A", proof.A); + + // Pad to a valid statement if needed + std::size_t M = unpadded_M; + if (!is_nonzero_power_of_2(M)) { + M = 1 << (log2(unpadded_M) + 1); + } + std::vector C(unpadded_C[k_proofs]); + for (std::size_t i = unpadded_M; i < M; i++) { + C.emplace_back(); + } + + // Get challenges + Scalar y = transcript.challenge("y"); + if (y == ZERO) { + return false; + } + Scalar y_inverse = y.inverse(); + Scalar y_NM = y; + for (std::size_t i = 0; i < rounds; i++) { + y_NM = y_NM.square(); + } + Scalar y_NM_1 = y_NM*y; + + Scalar z = transcript.challenge("z"); + if (z == ZERO) { + return false; + } + Scalar z_square = z.square(); + + std::vector e; + std::vector e_inverse; + for (std::size_t j = 0; j < rounds; j++) { + transcript.add("L", proof.L[j]); + transcript.add("R", proof.R[j]); + Scalar e_ = transcript.challenge("e"); + if (e_ == ZERO) { + return false; + } + e.emplace_back(e_); + e_inverse.emplace_back(e[j].inverse()); + } + + transcript.add("A1", proof.A1); + transcript.add("B", proof.B); + Scalar e1 = transcript.challenge("e1"); + if (e1 == ZERO) { + return false; + } + Scalar e1_square = e1.square(); + + // C_j: -e1**2 * z**(2*(j + 1)) * y**(N*M + 1) * w + Scalar C_scalar = e1_square.negate()*z_square*y_NM_1*w; + for (std::size_t j = 0; j < M; j++) { + points.emplace_back(C[j]); + scalars.emplace_back(C_scalar); + + C_scalar *= z.square(); + } + + // B: -w + points.emplace_back(proof.B); + scalars.emplace_back(w.negate()); + + // A1: -w*e1 + points.emplace_back(proof.A1); + scalars.emplace_back(w.negate()*e1); + + // A: -w*e1**2 + points.emplace_back(proof.A); + scalars.emplace_back(w.negate()*e1_square); + + // H: w*d1 + H_scalar += w*proof.d1; + + // Compute d + std::vector d; + d.resize(N*M); + d[0] = z_square; + for (std::size_t i = 1; i < N; i++) { + d[i] = d[i-1] + d[i-1]; + } + for (std::size_t j = 1; j < M; j++) { + for (std::size_t i = 0; i < N; i++) { + d[j*N + i] = d[(j - 1)*N + i]*z_square; + } + } + + // Sum the elements of d + Scalar sum_d = z_square; + Scalar temp_z = sum_d; + std::size_t temp_2M = 2*M; + while (temp_2M > 2) { + sum_d += sum_d*temp_z; + temp_z = temp_z.square(); + temp_2M /= 2; + } + sum_d *= TWO_N_MINUS_ONE; + + // Sum the powers of y + Scalar sum_y; + Scalar track = y; + for (std::size_t i = 0; i < N*M; i++) { + sum_y += track; + track *= y; + } + + // G: w*(r1*y*s1 + e1**2*(y**(N*M + 1)*z*sum_d + (z**2-z)*sum_y)) + G_scalar += w*(proof.r1*y*proof.s1 + e1_square*(y_NM_1*z*sum_d + (z_square - z)*sum_y)); + + // Track some iterated exponential terms + Scalar iter_y_inv = ONE; // y.inverse()**i + Scalar iter_y_NM = y_NM; // y**(N*M - i) + + // Gi, Hi + for (std::size_t i = 0; i < N*M; i++) { + Scalar g = proof.r1*e1*iter_y_inv; + Scalar h = proof.s1*e1; + for (std::size_t j = 0; j < rounds; j++) { + if ((i >> j) & 1) { + g *= e[rounds-j-1]; + h *= e_inverse[rounds-j-1]; + } else { + h *= e[rounds-j-1]; + g *= e_inverse[rounds-j-1]; + } + } + + // Gi + scalars[2*i] += w*(g + e1_square*z); + + // Hi + scalars[2*i+1] += w*(h - e1_square*(d[i]*iter_y_NM+z)); + + // Update the iterated values + iter_y_inv *= y_inverse; + iter_y_NM *= y_inverse; + } + + // L, R + for (std::size_t j = 0; j < rounds; j++) { + points.emplace_back(proof.L[j]); + scalars.emplace_back(w*(e1_square.negate()*e[j].square())); + points.emplace_back(proof.R[j]); + scalars.emplace_back(w*(e1_square.negate()*e_inverse[j].square())); + } + } + + // Add the common generators + points.emplace_back(G); + scalars.emplace_back(G_scalar); + points.emplace_back(H); + scalars.emplace_back(H_scalar); + + // Test the batch + secp_primitives::MultiExponent multiexp(points, scalars); + return multiexp.get_multiple().isInfinity(); +} + +} \ No newline at end of file diff --git a/src/libspark/bpplus.h b/src/libspark/bpplus.h new file mode 100644 index 0000000000..120b67528e --- /dev/null +++ b/src/libspark/bpplus.h @@ -0,0 +1,36 @@ +#ifndef FIRO_LIBSPARK_BPPLUS_H +#define FIRO_LIBSPARK_BPPLUS_H + +#include "bpplus_proof.h" +#include + +namespace spark { + +std::size_t log2(std::size_t n); +bool is_nonzero_power_of_2(std::size_t n); + +class BPPlus { +public: + BPPlus( + const GroupElement& G, + const GroupElement& H, + const std::vector& Gi, + const std::vector& Hi, + const std::size_t N); + + void prove(const std::vector& unpadded_v, const std::vector& unpadded_r, const std::vector& unpadded_C, BPPlusProof& proof); + bool verify(const std::vector& unpadded_C, const BPPlusProof& proof); // single proof + bool verify(const std::vector>& unpadded_C, const std::vector& proofs); // batch of proofs + +private: + GroupElement G; + GroupElement H; + std::vector Gi; + std::vector Hi; + std::size_t N; + Scalar TWO_N_MINUS_ONE; +}; + +} + +#endif diff --git a/src/libspark/bpplus_proof.h b/src/libspark/bpplus_proof.h new file mode 100644 index 0000000000..214dbc9a0e --- /dev/null +++ b/src/libspark/bpplus_proof.h @@ -0,0 +1,44 @@ +#ifndef FIRO_LIBSPARK_BPPLUS_PROOF_H +#define FIRO_LIBSPARK_BPPLUS_PROOF_H + +#include "params.h" + +namespace spark { + +class BPPlusProof{ +public: + + static inline int int_log2(std::size_t number) { + assert(number != 0); + + int l2 = 0; + while ((number >>= 1) != 0) + l2++; + + return l2; + } + + inline std::size_t memoryRequired() const { + return 3*GroupElement::memoryRequired() + 3*Scalar::memoryRequired() + L.size()*GroupElement::memoryRequired() + R.size()*GroupElement::memoryRequired(); + } + + ADD_SERIALIZE_METHODS; + template + inline void SerializationOp(Stream& s, Operation ser_action) { + READWRITE(A); + READWRITE(A1); + READWRITE(B); + READWRITE(r1); + READWRITE(s1); + READWRITE(d1); + READWRITE(L); + READWRITE(R); + } + + GroupElement A, A1, B; + Scalar r1, s1, d1; + std::vector L, R; +}; +} + +#endif diff --git a/src/libspark/chaum.cpp b/src/libspark/chaum.cpp new file mode 100644 index 0000000000..7f69852020 --- /dev/null +++ b/src/libspark/chaum.cpp @@ -0,0 +1,176 @@ +#include "chaum.h" +#include "transcript.h" + +namespace spark { + +Chaum::Chaum(const GroupElement& F_, const GroupElement& G_, const GroupElement& H_, const GroupElement& U_): + F(F_), G(G_), H(H_), U(U_) { +} + +Scalar Chaum::challenge( + const Scalar& mu, + const std::vector& S, + const std::vector& T, + const GroupElement& A1, + const std::vector& A2 +) { + Transcript transcript(LABEL_TRANSCRIPT_CHAUM); + transcript.add("F", F); + transcript.add("G", G); + transcript.add("H", H); + transcript.add("U", U); + transcript.add("mu", mu); + transcript.add("S", S); + transcript.add("T", T); + transcript.add("A1", A1); + transcript.add("A2", A2); + + return transcript.challenge("c"); +} + +void Chaum::prove( + const Scalar& mu, + const std::vector& x, + const std::vector& y, + const std::vector& z, + const std::vector& S, + const std::vector& T, + ChaumProof& proof +) { + // Check statement validity + std::size_t n = x.size(); + if (!(y.size() == n && z.size() == n && S.size() == n && T.size() == n)) { + throw std::invalid_argument("Bad Chaum statement!"); + } + for (std::size_t i = 0; i < n; i++) { + if (!(F*x[i] + G*y[i] + H*z[i] == S[i] && T[i]*x[i] + G*y[i] == U)) { + throw std::invalid_argument("Bad Chaum statement!"); + } + } + + std::vector r; + r.resize(n); + std::vector s; + s.resize(n); + for (std::size_t i = 0; i < n; i++) { + r[i].randomize(); + s[i].randomize(); + } + Scalar t; + t.randomize(); + + proof.A1 = H*t; + proof.A2.resize(n); + for (std::size_t i = 0; i < n; i++) { + proof.A1 += F*r[i] + G*s[i]; + proof.A2[i] = T[i]*r[i] + G*s[i]; + } + + Scalar c = challenge(mu, S, T, proof.A1, proof.A2); + + proof.t1.resize(n); + proof.t3 = t; + Scalar c_power(c); + for (std::size_t i = 0; i < n; i++) { + if (c_power.isZero()) { + throw std::invalid_argument("Unexpected challenge!"); + } + proof.t1[i] = r[i] + c_power*x[i]; + proof.t2 += s[i] + c_power*y[i]; + proof.t3 += c_power*z[i]; + c_power *= c; + } +} + +bool Chaum::verify( + const Scalar& mu, + const std::vector& S, + const std::vector& T, + ChaumProof& proof +) { + // Check proof semantics + std::size_t n = S.size(); + if (!(T.size() == n && proof.A2.size() == n && proof.t1.size() == n)) { + throw std::invalid_argument("Bad Chaum semantics!"); + } + + Scalar c = challenge(mu, S, T, proof.A1, proof.A2); + if (c.isZero()) { + throw std::invalid_argument("Unexpected challenge!"); + } + std::vector c_powers; + c_powers.emplace_back(c); + for (std::size_t i = 1; i < n; i++) { + c_powers.emplace_back(c_powers[i-1]*c); + if (c_powers[i].isZero()) { + throw std::invalid_argument("Unexpected challenge!"); + } + } + + // Weight the verification equations + Scalar w; + while (w.isZero()) { + w.randomize(); + } + + std::vector scalars; + std::vector points; + scalars.reserve(3*n + 5); + points.reserve(3*n + 5); + + // F + Scalar F_scalar; + for (std::size_t i = 0; i < n; i++) { + F_scalar -= proof.t1[i]; + } + scalars.emplace_back(F_scalar); + points.emplace_back(F); + + // G + scalars.emplace_back(proof.t2.negate() - w*proof.t2); + points.emplace_back(G); + + // H + scalars.emplace_back(proof.t3.negate()); + points.emplace_back(H); + + // U + Scalar U_scalar; + for (std::size_t i = 0; i < n; i++) { + U_scalar += c_powers[i]; + } + U_scalar *= w; + scalars.emplace_back(U_scalar); + points.emplace_back(U); + + // A1 + scalars.emplace_back(Scalar((uint64_t) 1)); + points.emplace_back(proof.A1); + + // {A2} + GroupElement A2_sum = proof.A2[0]; + for (std::size_t i = 1; i < n; i++) { + A2_sum += proof.A2[i]; + } + scalars.emplace_back(w); + points.emplace_back(A2_sum); + + // {S} + for (std::size_t i = 0; i < n; i++) { + scalars.emplace_back(c_powers[i]); + points.emplace_back(S[i]); + } + + // {T} + for (std::size_t i = 0; i < n; i++) { + scalars.emplace_back(w.negate()*proof.t1[i]); + points.emplace_back(T[i]); + } + + secp_primitives::MultiExponent multiexp(points, scalars); + // merged equalities and doing check in one multiexponentation, + // for weighting we use random w + return multiexp.get_multiple().isInfinity(); +} + +} diff --git a/src/libspark/chaum.h b/src/libspark/chaum.h new file mode 100644 index 0000000000..b15868b84c --- /dev/null +++ b/src/libspark/chaum.h @@ -0,0 +1,45 @@ +#ifndef FIRO_LIBSPARK_CHAUM_H +#define FIRO_LIBSPARK_CHAUM_H + +#include "chaum_proof.h" +#include + +namespace spark { + +class Chaum { +public: + Chaum(const GroupElement& F, const GroupElement& G, const GroupElement& H, const GroupElement& U); + + void prove( + const Scalar& mu, + const std::vector& x, + const std::vector& y, + const std::vector& z, + const std::vector& S, + const std::vector& T, + ChaumProof& proof + ); + bool verify( + const Scalar& mu, + const std::vector& S, + const std::vector& T, + ChaumProof& proof + ); + +private: + Scalar challenge( + const Scalar& mu, + const std::vector& S, + const std::vector& T, + const GroupElement& A1, + const std::vector& A2 + ); + const GroupElement& F; + const GroupElement& G; + const GroupElement& H; + const GroupElement& U; +}; + +} + +#endif diff --git a/src/libspark/chaum_proof.h b/src/libspark/chaum_proof.h new file mode 100644 index 0000000000..1885b883c1 --- /dev/null +++ b/src/libspark/chaum_proof.h @@ -0,0 +1,32 @@ +#ifndef FIRO_LIBSPARK_CHAUM_PROOF_H +#define FIRO_LIBSPARK_CHAUM_PROOF_H + +#include "params.h" + +namespace spark { + +class ChaumProof{ +public: + inline std::size_t memoryRequired() const { + return GroupElement::memoryRequired() + A2.size()*GroupElement::memoryRequired() + t1.size()*Scalar::memoryRequired() + 2*Scalar::memoryRequired(); + } + + ADD_SERIALIZE_METHODS; + template + inline void SerializationOp(Stream& s, Operation ser_action) { + READWRITE(A1); + READWRITE(A2); + READWRITE(t1); + READWRITE(t2); + READWRITE(t3); + } + +public: + GroupElement A1; + std::vector A2; + std::vector t1; + Scalar t2, t3; +}; +} + +#endif diff --git a/src/libspark/coin.cpp b/src/libspark/coin.cpp new file mode 100644 index 0000000000..27ca2f56e0 --- /dev/null +++ b/src/libspark/coin.cpp @@ -0,0 +1,209 @@ +#include "coin.h" +#include "../hash.h" + +namespace spark { + +using namespace secp_primitives; + +Coin::Coin() {} + +Coin::Coin(const Params* params) +{ + this->params = params; +} + +Coin::Coin( + const Params* params, + const char type, + const Scalar& k, + const Address& address, + const uint64_t& v, + const std::string& memo, + const std::vector& serial_context +) { + this->params = params; + this->serial_context = serial_context; + + // Validate the type + if (type != COIN_TYPE_MINT && type != COIN_TYPE_SPEND) { + throw std::invalid_argument("Bad coin type"); + } + this->type = type; + + + // + // Common elements to both coin types + // + + // Construct the recovery key + this->K = SparkUtils::hash_div(address.get_d())*SparkUtils::hash_k(k); + + // Construct the serial commitment + this->S = this->params->get_F()*SparkUtils::hash_ser(k, serial_context) + address.get_Q2(); + + // Construct the value commitment + this->C = this->params->get_G()*Scalar(v) + this->params->get_H()*SparkUtils::hash_val(k); + + // Check the memo validity, and pad if needed + if (memo.size() > this->params->get_memo_bytes()) { + throw std::invalid_argument("Memo is too large"); + } + std::vector memo_bytes(memo.begin(), memo.end()); + std::vector padded_memo(memo_bytes); + padded_memo.resize(this->params->get_memo_bytes()); + + // + // Type-specific elements + // + + + if (this->type == COIN_TYPE_MINT) { + this->v = v; + // Encrypt recipient data + MintCoinRecipientData r; + r.d = address.get_d(); + r.k = k; + r.memo = std::string(padded_memo.begin(), padded_memo.end()); + CDataStream r_stream(SER_NETWORK, PROTOCOL_VERSION); + r_stream << r; + this->r_ = AEAD::encrypt(address.get_Q1()*SparkUtils::hash_k(k), "Mint coin data", r_stream); + } else { + // Encrypt recipient data + SpendCoinRecipientData r; + r.v = v; + r.d = address.get_d(); + r.k = k; + r.memo = std::string(padded_memo.begin(), padded_memo.end()); + CDataStream r_stream(SER_NETWORK, PROTOCOL_VERSION); + r_stream << r; + this->r_ = AEAD::encrypt(address.get_Q1()*SparkUtils::hash_k(k), "Spend coin data", r_stream); + } +} + +// Validate a coin for identification +// NOTE: This assumes the coin has a valid associated range proof, which MUST be separately checked as part of the valid transaction that produced it +bool Coin::validate( + const IncomingViewKey& incoming_view_key, + IdentifiedCoinData& data +) { + // Check recovery key + if (SparkUtils::hash_div(data.d)*SparkUtils::hash_k(data.k) != this->K) { + return false; + } + + // Check value commitment + if (this->params->get_G()*Scalar(data.v) + this->params->get_H()*SparkUtils::hash_val(data.k) != this->C) { + return false; + } + + // Check serial commitment + data.i = incoming_view_key.get_diversifier(data.d); + + if (this->params->get_F()*(SparkUtils::hash_ser(data.k, this->serial_context) + SparkUtils::hash_Q2(incoming_view_key.get_s1(), data.i)) + incoming_view_key.get_P2() != this->S) { + return false; + } + + return true; +} + +// Recover a coin +RecoveredCoinData Coin::recover(const FullViewKey& full_view_key, const IdentifiedCoinData& data) { + RecoveredCoinData recovered_data; + recovered_data.s = SparkUtils::hash_ser(data.k, this->serial_context) + SparkUtils::hash_Q2(full_view_key.get_s1(), data.i) + full_view_key.get_s2(); + recovered_data.T = (this->params->get_U() + full_view_key.get_D().inverse())*recovered_data.s.inverse(); + + return recovered_data; +} + +// Identify a coin +IdentifiedCoinData Coin::identify(const IncomingViewKey& incoming_view_key) { + IdentifiedCoinData data; + + // Deserialization means this process depends on the coin type + if (this->type == COIN_TYPE_MINT) { + MintCoinRecipientData r; + + try { + // Decrypt recipient data + CDataStream stream = AEAD::decrypt_and_verify(this->K*incoming_view_key.get_s1(), "Mint coin data", this->r_); + stream >> r; + } catch (const std::exception &) { + throw std::runtime_error("Unable to identify coin"); + } + + data.d = r.d; + data.v = this->v; + data.k = r.k; + data.memo = r.memo; + } else { + SpendCoinRecipientData r; + + try { + // Decrypt recipient data + CDataStream stream = AEAD::decrypt_and_verify(this->K*incoming_view_key.get_s1(), "Spend coin data", this->r_); + stream >> r; + } catch (const std::exception &) { + throw std::runtime_error("Unable to identify coin"); + } + + data.d = r.d; + data.v = r.v; + data.k = r.k; + data.memo = r.memo; + } + + // Validate the coin + if (!validate(incoming_view_key, data)) { + throw std::runtime_error("Malformed coin"); + } + + return data; +} + +std::size_t Coin::memoryRequired() { + secp_primitives::GroupElement groupElement; + return 1 + groupElement.memoryRequired() * 3 + 32 + AEAD_TAG_SIZE; +} + +bool Coin::operator==(const Coin& other) const { + if(this->S != other.S) + return false; + + if(this->K != other.K) + return false; + + if(this->C != other.C) + return false; + + if(this->r_.ciphertext != other.r_.ciphertext) + return false; + + if(this->r_.key_commitment != other.r_.key_commitment) + return false; + + if(this->r_.tag != other.r_.tag) + return false; + + return true; +} + +bool Coin::operator!=(const Coin& right) const { + return !operator==(right); +} + +uint256 Coin::getHash() const { + CDataStream ss(SER_GETHASH, 0); + ss << "coin_hash"; + ss << *this; + return ::Hash(ss.begin(), ss.end()); +} + +void Coin::setSerialContext(const std::vector& serial_context_) { + serial_context = serial_context_; +} + +void Coin::setParams(const Params* params) { + this->params = params; +} + +} diff --git a/src/libspark/coin.h b/src/libspark/coin.h new file mode 100644 index 0000000000..cdb42d336f --- /dev/null +++ b/src/libspark/coin.h @@ -0,0 +1,125 @@ +#ifndef FIRO_SPARK_COIN_H +#define FIRO_SPARK_COIN_H +#include "bpplus.h" +#include "keys.h" +#include +#include "params.h" +#include "aead.h" +#include "util.h" +#include "../uint256.h" + +namespace spark { + +using namespace secp_primitives; + +// Flags for coin types: those generated from mints, and those generated from spends +const char COIN_TYPE_MINT = 0; +const char COIN_TYPE_SPEND = 1; + +struct IdentifiedCoinData { + uint64_t i; // diversifier + std::vector d; // encrypted diversifier + uint64_t v; // value + Scalar k; // nonce + std::string memo; // memo +}; + +struct RecoveredCoinData { + Scalar s; // serial + GroupElement T; // tag +}; + +// Data to be encrypted for the recipient of a coin generated in a mint transaction +struct MintCoinRecipientData { + std::vector d; // encrypted diversifier + Scalar k; // nonce + std::string memo; // memo + + ADD_SERIALIZE_METHODS; + + template + inline void SerializationOp(Stream& s, Operation ser_action) { + READWRITE(d); + READWRITE(k); + READWRITE(memo); + } +}; + +// Data to be encrypted for the recipient of a coin generated in a spend transaction +struct SpendCoinRecipientData { + uint64_t v; // value + std::vector d; // encrypted diversifier + Scalar k; // nonce + std::string memo; // memo + + ADD_SERIALIZE_METHODS; + + template + inline void SerializationOp(Stream& s, Operation ser_action) { + READWRITE(v); + READWRITE(d); + READWRITE(k); + READWRITE(memo); + } +}; + +class Coin { +public: + Coin(); + Coin(const Params* params); + Coin( + const Params* params, + const char type, + const Scalar& k, + const Address& address, + const uint64_t& v, + const std::string& memo, + const std::vector& serial_context + ); + + // Given an incoming view key, extract the coin's nonce, diversifier, value, and memo + IdentifiedCoinData identify(const IncomingViewKey& incoming_view_key); + + // Given a full view key, extract the coin's serial number and tag + RecoveredCoinData recover(const FullViewKey& full_view_key, const IdentifiedCoinData& data); + + static std::size_t memoryRequired(); + + bool operator==(const Coin& other) const; + bool operator!=(const Coin& other) const; + + // type and v are not included in hash + uint256 getHash() const; + + void setParams(const Params* params); + void setSerialContext(const std::vector& serial_context_); +protected: + bool validate(const IncomingViewKey& incoming_view_key, IdentifiedCoinData& data); + +public: + const Params* params; + char type; // type flag + GroupElement S, K, C; // serial commitment, recovery key, value commitment + AEADEncryptedData r_; // encrypted recipient data + uint64_t v; // value + std::vector serial_context; // context to which the serial commitment should be bound (not serialized, but inferred) + + // Serialization depends on the coin type + ADD_SERIALIZE_METHODS; + template + inline void SerializationOp(Stream& s, Operation ser_action) { + READWRITE(type); + READWRITE(S); + READWRITE(K); + READWRITE(C); + READWRITE(r_); + + if (type == COIN_TYPE_MINT) { + READWRITE(v); + } + } +}; + +} + +#endif diff --git a/src/libspark/f4grumble.cpp b/src/libspark/f4grumble.cpp new file mode 100644 index 0000000000..839c27e9de --- /dev/null +++ b/src/libspark/f4grumble.cpp @@ -0,0 +1,152 @@ +// A design for address scrambling based on `f4jumble`: https://zips.z.cash/zip-0316#jumbling +// This design differs from `f4jumble` to account for limitations on SHA512 +// These limitations are unfortunate, but such is life sometimes +// +// To account for these limitations, we do the following: +// - Place extra restrictions on length to avoid XOF input encoding (and because we don't need it) +// - Replace personalization with fixed-length inputs; note that length is NOT prepended +// - Truncate outputs to the proper length +// +// Additionally, we account for the number of rounds by limiting the round counter encoding + +#include "f4grumble.h" + +namespace spark { + +using namespace secp_primitives; + +// Compute the XOR of two byte vectors +std::vector F4Grumble::vec_xor(const std::vector& x, const std::vector& y) { + if (x.size() != y.size()) { + throw std::invalid_argument("Mismatched vector sizes"); + } + + std::vector result; + result.reserve(x.size()); + for (std::size_t i = 0; i < x.size(); i++) { + result.emplace_back(x[i] ^ y[i]); + } + + return result; +} + +// Return the maximum allowed input size in bytes +std::size_t F4Grumble::get_max_size() { + return 2 * EVP_MD_size(EVP_sha512()); +} + +// Instantiate with a given network identifier and expected input length +F4Grumble::F4Grumble(const unsigned char network, const int l_M) { + // Assert the length is valid + if (l_M > 2 * EVP_MD_size(EVP_sha512())) { + throw std::invalid_argument("Bad address size"); + } + + this->network = network; + this->l_M = l_M; + this->l_L = l_M / 2; + this->l_R = l_M - l_L; +} + +// Encode the input data +std::vector F4Grumble::encode(const std::vector& input) { + // Check the input size + if (input.size() != l_M) { + throw std::invalid_argument("Bad address size"); + } + + // Split the input + std::vector a = std::vector(input.begin(), input.begin() + this->l_M / 2); + std::vector b = std::vector(input.begin() + this->l_M / 2, input.end()); + + // Perform the Feistel operations + std::vector x = vec_xor(b, G(0, a)); + std::vector y = vec_xor(a, H(0, x)); + std::vector d = vec_xor(x, G(1, y)); + std::vector c = vec_xor(y, H(1, d)); + + // Return the concatenation + std::vector result(c); + result.insert(result.end(), d.begin(), d.end()); + return result; +} + +// Decode the input data +std::vector F4Grumble::decode(const std::vector& input) { + // Check the input size + if (input.size() != l_M) { + throw std::invalid_argument("Bad address size"); + } + + // Split the input + std::vector c = std::vector(input.begin(), input.begin() + this->l_M / 2); + std::vector d = std::vector(input.begin() + this->l_M / 2, input.end()); + + // Perform the Feistel operations + std::vector y = vec_xor(c, H(1, d)); + std::vector x = vec_xor(d, G(1, y)); + std::vector a = vec_xor(y, H(0, x)); + std::vector b = vec_xor(x, G(0, a)); + + // Return the concatenation + std::vector result(a); + result.insert(result.end(), b.begin(), b.end()); + return result; +} + +// Feistel round functions +std::vector F4Grumble::G(const unsigned char i, const std::vector& u) { + EVP_MD_CTX* ctx = EVP_MD_CTX_new(); + EVP_DigestInit_ex(ctx, EVP_sha512(), NULL); + + // Bind the domain separator and network + std::vector domain(LABEL_F4GRUMBLE_G.begin(), LABEL_F4GRUMBLE_G.end()); + EVP_DigestUpdate(ctx, domain.data(), domain.size()); + EVP_DigestUpdate(ctx, &this->network, sizeof(this->network)); + + // Include the round index + EVP_DigestUpdate(ctx, &i, sizeof(i)); + + // Include the input data + EVP_DigestUpdate(ctx, u.data(), u.size()); + + // Finalize the hash and resize + std::vector result; + result.resize(EVP_MD_size(EVP_sha512())); + + unsigned int TEMP; + EVP_DigestFinal_ex(ctx, result.data(), &TEMP); + EVP_MD_CTX_free(ctx); + result.resize(this->l_R); + + return result; +} + +std::vector F4Grumble::H(const unsigned char i, const std::vector& u) { + EVP_MD_CTX* ctx = EVP_MD_CTX_new(); + EVP_DigestInit_ex(ctx, EVP_sha512(), NULL); + + // Bind the domain separator and network + std::vector domain(LABEL_F4GRUMBLE_H.begin(), LABEL_F4GRUMBLE_H.end()); + EVP_DigestUpdate(ctx, domain.data(), domain.size()); + EVP_DigestUpdate(ctx, &this->network, sizeof(this->network)); + + // Include the round index + EVP_DigestUpdate(ctx, &i, sizeof(i)); + + // Include the input data + EVP_DigestUpdate(ctx, u.data(), u.size()); + + // Finalize the hash and resize + std::vector result; + result.resize(EVP_MD_size(EVP_sha512())); + + unsigned int TEMP; + EVP_DigestFinal_ex(ctx, result.data(), &TEMP); + EVP_MD_CTX_free(ctx); + result.resize(this->l_L); + + return result; +} + +} diff --git a/src/libspark/f4grumble.h b/src/libspark/f4grumble.h new file mode 100644 index 0000000000..6c9fbb47bd --- /dev/null +++ b/src/libspark/f4grumble.h @@ -0,0 +1,32 @@ +#ifndef FIRO_SPARK_F4GRUMBLE_H +#define FIRO_SPARK_F4GRUMBLE_H +#include +#include "util.h" + +namespace spark { + +using namespace secp_primitives; + +class F4Grumble { +public: + F4Grumble(const unsigned char network, const int l_M); + + std::vector encode(const std::vector& input); + std::vector decode(const std::vector& input); + + static std::size_t get_max_size(); + +private: + static std::vector vec_xor(const std::vector& x, const std::vector& y); + + // The internal Feistel round functions + std::vector G(const unsigned char i, const std::vector& u); + std::vector H(const unsigned char i, const std::vector& u); + + unsigned char network; + int l_M, l_L, l_R; +}; + +} + +#endif diff --git a/src/libspark/grootle.cpp b/src/libspark/grootle.cpp new file mode 100644 index 0000000000..67ca20eced --- /dev/null +++ b/src/libspark/grootle.cpp @@ -0,0 +1,573 @@ +#include "grootle.h" +#include "transcript.h" + +namespace spark { + +// Useful scalar constants +const Scalar ZERO = Scalar(uint64_t(0)); +const Scalar ONE = Scalar(uint64_t(1)); +const Scalar TWO = Scalar(uint64_t(2)); + +Grootle::Grootle( + const GroupElement& H_, + const std::vector& Gi_, + const std::vector& Hi_, + const std::size_t n_, + const std::size_t m_) + : H (H_) + , Gi (Gi_) + , Hi (Hi_) + , n (n_) + , m (m_) +{ + if (!(n > 1 && m > 1)) { + throw std::invalid_argument("Bad Grootle size parameters!"); + } + if (Gi.size() != n*m || Hi.size() != n*m) { + throw std::invalid_argument("Bad Grootle generator size!"); + } +} + +// Compute a delta function vector +static inline std::vector convert_to_sigma(std::size_t num, const std::size_t n, const std::size_t m) { + std::vector result; + result.reserve(n*m); + + for (std::size_t j = 0; j < m; j++) { + for (std::size_t i = 0; i < n; i++) { + if (i == (num % n)) { + result.emplace_back(ONE); + } else { + result.emplace_back(ZERO); + } + } + num /= n; + } + + return result; +} + +// Decompose an integer with arbitrary base and padded size +static inline std::vector decompose(std::size_t num, const std::size_t n, const std::size_t m) { + std::vector result; + result.reserve(m); + + while (num != 0) { + result.emplace_back(num % n); + num /= n; + } + result.resize(m); + + return result; +} + +// Compute a double Pedersen vector commitment +static inline GroupElement vector_commit(const std::vector& Gi, const std::vector& Hi, const std::vector& a, const std::vector& b, const GroupElement& H, const Scalar& r) { + if (Gi.size() != a.size() || Hi.size() != b.size()) { + throw std::runtime_error("Vector commitment size mismatch!"); + } + return secp_primitives::MultiExponent(Gi, a).get_multiple() + secp_primitives::MultiExponent(Hi, b).get_multiple() + H*r; +} + +// Compute a convolution with a degree-one polynomial +static inline void convolve(const Scalar& x_1, const Scalar& x_0, std::vector& coefficients) { + if (coefficients.empty()) { + throw std::runtime_error("Empty convolution coefficient vector!"); + } + + std::size_t degree = coefficients.size() - 1; + coefficients.emplace_back(x_1*coefficients[degree]); + for (std::size_t i = degree; i >=1; i--) { + coefficients[i] = x_0*coefficients[i] + x_1*coefficients[i-1]; + } + coefficients[0] *= x_0; +} + +static bool compute_fs( + const GrootleProof& proof, + const Scalar& x, + std::vector& f_, + const std::size_t n, + const std::size_t m) { + for (std::size_t j = 0; j < proof.f.size(); ++j) { + if(proof.f[j] == x) + return false; + } + + f_.reserve(n * m); + for (std::size_t j = 0; j < m; ++j) + { + f_.push_back(Scalar(uint64_t(0))); + Scalar temp; + std::size_t k = n - 1; + for (std::size_t i = 0; i < k; ++i) + { + temp += proof.f[j * k + i]; + f_.emplace_back(proof.f[j * k + i]); + } + f_[j * n] = x - temp; + } + return true; +} + +static void compute_batch_fis( + Scalar& f_sum, + const Scalar& f_i, + int j, + const std::vector& f, + const Scalar& y, + std::vector::iterator& ptr, + std::vector::iterator start_ptr, + std::vector::iterator end_ptr, + const std::size_t n) { + j--; + if (j == -1) + { + if(ptr >= start_ptr && ptr < end_ptr){ + *ptr++ += f_i * y; + f_sum += f_i; + } + return; + } + + Scalar t; + + for (std::size_t i = 0; i < n; i++) + { + t = f[j * n + i]; + t *= f_i; + + compute_batch_fis(f_sum, t, j, f, y, ptr, start_ptr, end_ptr, n); + } +} + +void Grootle::prove( + const std::size_t l, + const Scalar& s, + const std::vector& S, + const GroupElement& S1, + const Scalar& v, + const std::vector& V, + const GroupElement& V1, + const std::vector& root, + GrootleProof& proof) { + // Check statement validity + std::size_t N = (std::size_t) pow(n, m); // padded input size + std::size_t size = S.size(); // actual input size + if (l >= size) { + throw std::invalid_argument("Bad Grootle secret index!"); + } + if (V.size() != S.size()) { + throw std::invalid_argument("Bad Grootle input vector sizes!"); + } + if (size > N || size == 0) { + throw std::invalid_argument("Bad Grootle size parameter!"); + } + if (S[l] + S1.inverse() != H*s) { + throw std::invalid_argument("Bad Grootle proof statement!"); + } + if (V[l] + V1.inverse() != H*v) { + throw std::invalid_argument("Bad Grootle proof statement!"); + } + + // Set up transcript + Transcript transcript(LABEL_TRANSCRIPT_GROOTLE); + transcript.add("H", H); + transcript.add("Gi", Gi); + transcript.add("Hi", Hi); + transcript.add("n", Scalar(n)); + transcript.add("m", Scalar(m)); + transcript.add("root", root); + transcript.add("S1", S1); + transcript.add("V1", V1); + + // Compute A + std::vector a; + a.resize(n*m); + for (std::size_t j = 0; j < m; j++) { + for (std::size_t i = 1; i < n; i++) { + a[j*n + i].randomize(); + a[j*n] -= a[j*n + i]; + } + } + std::vector d; + d.resize(n*m); + for (std::size_t i = 0; i < n*m; i++) { + d[i] = a[i].square().negate(); + } + Scalar rA; + rA.randomize(); + proof.A = vector_commit(Gi, Hi, a, d, H, rA); + + // Compute B + std::vector sigma = convert_to_sigma(l, n, m); + std::vector c; + c.resize(n*m); + for (std::size_t i = 0; i < n*m; i++) { + c[i] = a[i]*(ONE - TWO*sigma[i]); + } + Scalar rB; + rB.randomize(); + proof.B = vector_commit(Gi, Hi, sigma, c, H, rB); + + // Compute convolution terms + std::vector> P_i_j; + P_i_j.resize(size); + for (std::size_t i = 0; i < size - 1; ++i) + { + std::vector& coefficients = P_i_j[i]; + std::vector I = decompose(i, n, m); + coefficients.push_back(a[I[0]]); + coefficients.push_back(sigma[I[0]]); + for (std::size_t j = 1; j < m; ++j) { + convolve(sigma[j*n + I[j]], a[j*n + I[j]], coefficients); + } + } + + /* + * To optimize calculation of sum of all polynomials indices 's' = size-1 through 'n^m-1' we use the + * fact that sum of all of elements in each row of 'a' array is zero. Computation is done by going + * through n-ary representation of 's' and increasing "digit" at each position to 'n-1' one by one. + * During every step digits at higher positions are fixed and digits at lower positions go through all + * possible combinations with a total corresponding polynomial sum of 'x^j'. + * + * The math behind optimization (TeX notation): + * + * \sum_{i=s+1}^{N-1}p_i(x) = + * \sum_{j=0}^{m-1} + * \left[ + * \left( \sum_{i=s_j+1}^{n-1}(\delta_{l_j,i}x+a_{j,i}) \right) + * \left( \prod_{k=j}^{m-1}(\delta_{l_k,s_k}x+a_{k,s_k}) \right) + * x^j + * \right] + */ + + std::vector I = decompose(size - 1, n, m); + std::vector lj = decompose(l, n, m); + + std::vector p_i_sum; + p_i_sum.emplace_back(ONE); + std::vector> partial_p_s; + + // Pre-calculate product parts and calculate p_s(x) at the same time, put the latter into p_i_sum + for (std::ptrdiff_t j = m - 1; j >= 0; j--) { + partial_p_s.push_back(p_i_sum); + convolve(sigma[j*n + I[j]], a[j*n + I[j]], p_i_sum); + } + + for (std::size_t j = 0; j < m; j++) { + // \sum_{i=s_j+1}^{n-1}(\delta_{l_j,i}x+a_{j,i}) + Scalar a_sum(uint64_t(0)); + for (std::size_t i = I[j] + 1; i < n; i++) + a_sum += a[j * n + i]; + Scalar x_sum(uint64_t(lj[j] >= I[j]+1 ? 1 : 0)); + + // Multiply by \prod_{k=j}^{m-1}(\delta_{l_k,s_k}x+a_{k,s_k}) + std::vector &polynomial = partial_p_s[m - j - 1]; + convolve(x_sum, a_sum, polynomial); + + // Multiply by x^j and add to the result + for (std::size_t k = 0; k < m - j; k++) + p_i_sum[j + k] += polynomial[k]; + } + + P_i_j[size - 1] = p_i_sum; + + // Perform the commitment offsets + std::vector S_offset(S); + std::vector V_offset(V); + GroupElement S1_inverse = S1.inverse(); + GroupElement V1_inverse = V1.inverse(); + for (std::size_t k = 0; k < S_offset.size(); k++) { + S_offset[k] += S1_inverse; + V_offset[k] += V1_inverse; + } + + // Generate masks + std::vector rho_S, rho_V; + rho_S.resize(m); + rho_V.resize(m); + for (std::size_t j = 0; j < m; j++) { + rho_S[j].randomize(); + rho_V[j].randomize(); + } + + proof.X.reserve(m); + proof.X1.reserve(m); + for (std::size_t j = 0; j < m; ++j) + { + std::vector P_i; + P_i.reserve(size); + for (std::size_t i = 0; i < size; ++i){ + P_i.emplace_back(P_i_j[i][j]); + } + + // S + secp_primitives::MultiExponent mult_S(S_offset, P_i); + proof.X.emplace_back(mult_S.get_multiple() + H*rho_S[j]); + + // V + secp_primitives::MultiExponent mult_V(V_offset, P_i); + proof.X1.emplace_back(mult_V.get_multiple() + H*rho_V[j]); + } + + // Challenge + transcript.add("A", proof.A); + transcript.add("B", proof.B); + transcript.add("X", proof.X); + transcript.add("X1", proof.X1); + Scalar x = transcript.challenge("x"); + + // Compute f + proof.f.reserve(m*(n - 1)); + for (std::size_t j = 0; j < m; j++) + { + for (std::size_t i = 1; i < n; i++) { + proof.f.emplace_back(sigma[(j * n) + i] * x + a[(j * n) + i]); + } + } + + // Compute zA, zC + proof.z = rB * x + rA; + + // Compute zS, zV + proof.zS = s * x.exponent(uint64_t(m)); + proof.zV = v * x.exponent(uint64_t(m)); + Scalar sumS, sumV; + + Scalar x_powers(uint64_t(1)); + for (std::size_t j = 0; j < m; ++j) { + sumS += (rho_S[j] * x_powers); + sumV += (rho_V[j] * x_powers); + x_powers *= x; + } + proof.zS -= sumS; + proof.zV -= sumV; +} + +// Verify a single proof +bool Grootle::verify( + const std::vector& S, + const GroupElement& S1, + const std::vector& V, + const GroupElement& V1, + const std::vector& root, + const std::size_t size, + const GrootleProof& proof) { + std::vector S1_batch = {S1}; + std::vector V1_batch = {V1}; + std::vector size_batch = {size}; + std::vector> root_batch = {root}; + std::vector proof_batch = {proof}; + + return verify(S, S1_batch, V, V1_batch, root_batch, size_batch, proof_batch); +} + +// Verify a batch of proofs +bool Grootle::verify( + const std::vector& S, + const std::vector& S1, + const std::vector& V, + const std::vector& V1, + const std::vector>& roots, + const std::vector& sizes, + const std::vector& proofs) { + // Sanity checks + if (n < 2 || m < 2) { + LogPrintf("Verifier parameters are invalid"); + return false; + } + std::size_t M = proofs.size(); + std::size_t N = (std::size_t)pow(n, m); + + if (S.size() == 0) { + LogPrintf("Cannot have empty commitment set"); + return false; + } + if (S.size() > N) { + LogPrintf("Commitment set is too large"); + return false; + } + if (S.size() != V.size()) { + LogPrintf("Commitment set sizes do not match"); + return false; + } + if (S1.size() != M || V1.size() != M) { + LogPrintf("Invalid number of offsets provided"); + return false; + } + if (sizes.size() != M) { + LogPrintf("Invalid set size vector size"); + return false; + } + if (roots.size() != M) { + LogPrintf("Invalid root vector size"); + return false; + } + + // Check proof semantics + for (std::size_t t = 0; t < M; t++) { + GrootleProof proof = proofs[t]; + if (proof.X.size() != m || proof.X1.size() != m) { + LogPrintf("Bad proof vector size!"); + return false; + } + if (proof.f.size() != m*(n-1)) { + LogPrintf("Bad proof vector size!"); + return false; + } + } + + // Commitment binding weight; intentionally restricted range for efficiency, but must be nonzero + // NOTE: this may initialize with a PRNG, which should be sufficient for this use + std::random_device generator; + std::uniform_int_distribution distribution; + Scalar bind_weight(ZERO); + while (bind_weight == ZERO) { + bind_weight = Scalar(distribution(generator)); + } + + // Bind the commitment lists + std::vector commits; + commits.reserve(S.size()); + for (std::size_t i = 0; i < S.size(); i++) { + commits.emplace_back(S[i] + V[i]*bind_weight); + } + + // Final batch multiscalar multiplication + Scalar H_scalar; + std::vector Gi_scalars; + std::vector Hi_scalars; + std::vector commit_scalars; + Gi_scalars.resize(n*m); + Hi_scalars.resize(n*m); + commit_scalars.resize(commits.size()); + + // Set up the final batch elements + std::vector points; + std::vector scalars; + std::size_t final_size = 1 + 2*m*n + commits.size(); // F, (Gi), (Hi), (commits) + for (std::size_t t = 0; t < M; t++) { + final_size += 2 + proofs[t].X.size() + proofs[t].X1.size(); // A, B, (Gs), (Gv) + } + points.reserve(final_size); + scalars.reserve(final_size); + + // Index decomposition, which is common among all proofs + std::vector > I_; + I_.reserve(commits.size()); + I_.resize(commits.size()); + for (std::size_t i = 0; i < commits.size(); i++) { + I_[i] = decompose(i, n, m); + } + + // Process all proofs + for (std::size_t t = 0; t < M; t++) { + GrootleProof proof = proofs[t]; + + // Reconstruct the challenge + Transcript transcript(LABEL_TRANSCRIPT_GROOTLE); + transcript.add("H", H); + transcript.add("Gi", Gi); + transcript.add("Hi", Hi); + transcript.add("n", Scalar(n)); + transcript.add("m", Scalar(m)); + transcript.add("root", roots[t]); + transcript.add("S1", S1[t]); + transcript.add("V1", V1[t]); + transcript.add("A", proof.A); + transcript.add("B", proof.B); + transcript.add("X", proof.X); + transcript.add("X1", proof.X1); + Scalar x = transcript.challenge("x"); + + // Generate nonzero random verifier weights (the randomization already asserts nonzero) + Scalar w1, w2; + w1.randomize(); + w2.randomize(); + + // Reconstruct f-matrix + std::vector f_; + if (!compute_fs(proof, x, f_, n, m)) { + LogPrintf("Invalid matrix reconstruction"); + return false; + } + + // Effective set size + const std::size_t size = sizes[t]; + + // A, B (and associated commitments) + points.emplace_back(proof.A); + scalars.emplace_back(w1.negate()); + points.emplace_back(proof.B); + scalars.emplace_back(x.negate() * w1); + + H_scalar += proof.z * w1; + for (std::size_t i = 0; i < m * n; i++) { + Gi_scalars[i] += f_[i] * w1; + Hi_scalars[i] += f_[i]*(x - f_[i]) * w1; + } + + // Input sets + H_scalar += (proof.zS + bind_weight * proof.zV) * w2.negate(); + + Scalar f_sum; + Scalar f_i(uint64_t(1)); + std::vector::iterator ptr = commit_scalars.begin() + commits.size() - size; + compute_batch_fis(f_sum, f_i, m, f_, w2, ptr, ptr, ptr + size - 1, n); + + Scalar pow(uint64_t(1)); + std::vector f_part_product; + for (std::ptrdiff_t j = m - 1; j >= 0; j--) { + f_part_product.push_back(pow); + pow *= f_[j*n + I_[size - 1][j]]; + } + + Scalar x_powers(uint64_t(1)); + for (std::size_t j = 0; j < m; j++) { + Scalar fi_sum(uint64_t(0)); + for (std::size_t i = I_[size - 1][j] + 1; i < n; i++) + fi_sum += f_[j*n + i]; + pow += fi_sum * x_powers * f_part_product[m - j - 1]; + x_powers *= x; + } + + f_sum += pow; + commit_scalars[commits.size() - 1] += pow * w2; + + // S1, V1 + points.emplace_back(S1[t] + V1[t] * bind_weight); + scalars.emplace_back(f_sum * w2.negate()); + + // (X), (X1) + x_powers = Scalar(uint64_t(1)); + for (std::size_t j = 0; j < m; j++) { + points.emplace_back(proof.X[j] + proof.X1[j] * bind_weight); + scalars.emplace_back(x_powers.negate() * w2); + x_powers *= x; + } + } + + // Add common generators + points.emplace_back(H); + scalars.emplace_back(H_scalar); + for (std::size_t i = 0; i < m * n; i++) { + points.emplace_back(Gi[i]); + scalars.emplace_back(Gi_scalars[i]); + points.emplace_back(Hi[i]); + scalars.emplace_back(Hi_scalars[i]); + } + for (std::size_t i = 0; i < commits.size(); i++) { + points.emplace_back(commits[i]); + scalars.emplace_back(commit_scalars[i]); + } + + // Verify the batch + secp_primitives::MultiExponent result(points, scalars); + if (result.get_multiple().isInfinity()) { + return true; + } + return false; +} + +} \ No newline at end of file diff --git a/src/libspark/grootle.h b/src/libspark/grootle.h new file mode 100644 index 0000000000..f0d5dd4b27 --- /dev/null +++ b/src/libspark/grootle.h @@ -0,0 +1,56 @@ +#ifndef FIRO_LIBSPARK_GROOTLE_H +#define FIRO_LIBSPARK_GROOTLE_H + +#include "grootle_proof.h" +#include +#include +#include "util.h" + +namespace spark { + +class Grootle { + +public: + Grootle( + const GroupElement& H, + const std::vector& Gi, + const std::vector& Hi, + const std::size_t n, + const std::size_t m + ); + + void prove(const std::size_t l, + const Scalar& s, + const std::vector& S, + const GroupElement& S1, + const Scalar& v, + const std::vector& V, + const GroupElement& V1, + const std::vector& root, + GrootleProof& proof); + bool verify(const std::vector& S, + const GroupElement& S1, + const std::vector& V, + const GroupElement& V1, + const std::vector& root, + const std::size_t size, + const GrootleProof& proof); // single proof + bool verify(const std::vector& S, + const std::vector& S1, + const std::vector& V, + const std::vector& V1, + const std::vector>& roots, + const std::vector& sizes, + const std::vector& proofs); // batch of proofs + +private: + GroupElement H; + std::vector Gi; + std::vector Hi; + std::size_t n; + std::size_t m; +}; + +} + +#endif diff --git a/src/libspark/grootle_proof.h b/src/libspark/grootle_proof.h new file mode 100644 index 0000000000..3530f7e343 --- /dev/null +++ b/src/libspark/grootle_proof.h @@ -0,0 +1,45 @@ +#ifndef FIRO_LIBSPARK_GROOTLE_PROOF_H +#define FIRO_LIBSPARK_GROOTLE_PROOF_H + +#include "params.h" + +namespace spark { + +class GrootleProof { +public: + + inline std::size_t memoryRequired() const { + return 2*GroupElement::memoryRequired() + X.size()*GroupElement::memoryRequired() + X1.size()*GroupElement::memoryRequired() + f.size()*Scalar::memoryRequired() + 3*Scalar::memoryRequired(); + } + + inline std::size_t memoryRequired(int n, int m) const { + return 2*GroupElement::memoryRequired() + 2*m*GroupElement::memoryRequired() + m*(n-1)*Scalar::memoryRequired() + 3*Scalar::memoryRequired(); + } + + ADD_SERIALIZE_METHODS; + template + inline void SerializationOp(Stream& s, Operation ser_action) { + READWRITE(A); + READWRITE(B); + READWRITE(X); + READWRITE(X1); + READWRITE(f); + READWRITE(z); + READWRITE(zS); + READWRITE(zV); + } + +public: + GroupElement A; + GroupElement B; + std::vector X; + std::vector X1; + std::vector f; + Scalar z; + Scalar zS; + Scalar zV; +}; + +} + +#endif diff --git a/src/libspark/hash.cpp b/src/libspark/hash.cpp new file mode 100644 index 0000000000..2c6d71317d --- /dev/null +++ b/src/libspark/hash.cpp @@ -0,0 +1,160 @@ +#include "hash.h" + +namespace spark { + +using namespace secp_primitives; + +// Set up a labeled hash function +Hash::Hash(const std::string label) { + this->ctx = EVP_MD_CTX_new(); + EVP_DigestInit_ex(this->ctx, EVP_sha512(), NULL); + + // Write the protocol and mode information + std::vector protocol(LABEL_PROTOCOL.begin(), LABEL_PROTOCOL.end()); + EVP_DigestUpdate(this->ctx, protocol.data(), protocol.size()); + EVP_DigestUpdate(this->ctx, &HASH_MODE_FUNCTION, sizeof(HASH_MODE_FUNCTION)); + + // Include the label with size + include_size(label.size()); + std::vector label_bytes(label.begin(), label.end()); + EVP_DigestUpdate(this->ctx, label_bytes.data(), label_bytes.size()); +} + +// Clean up +Hash::~Hash() { + EVP_MD_CTX_free(this->ctx); +} + +// Include serialized data in the hash function +void Hash::include(CDataStream& data) { + include_size(data.size()); + EVP_DigestUpdate(this->ctx, reinterpret_cast(data.data()), data.size()); +} + +// Finalize the hash function to a byte array +std::vector Hash::finalize() { + // Use the full output size of the hash function + std::vector result; + result.resize(EVP_MD_size(EVP_sha512())); + + unsigned int TEMP; + EVP_DigestFinal_ex(this->ctx, result.data(), &TEMP); + + return result; +} + +// Finalize the hash function to a scalar +Scalar Hash::finalize_scalar() { + // Ensure we can properly populate a scalar + if (EVP_MD_size(EVP_sha512()) < SCALAR_ENCODING) { + throw std::runtime_error("Bad hash size!"); + } + + std::vector hash; + hash.resize(EVP_MD_size(EVP_sha512())); + unsigned char counter = 0; + + EVP_MD_CTX* state_counter; + state_counter = EVP_MD_CTX_new(); + EVP_DigestInit_ex(state_counter, EVP_sha512(), NULL); + + EVP_MD_CTX* state_finalize; + state_finalize = EVP_MD_CTX_new(); + EVP_DigestInit_ex(state_finalize, EVP_sha512(), NULL); + + while (1) { + // Prepare temporary state for counter testing + EVP_MD_CTX_copy_ex(state_counter, this->ctx); + + // Embed the counter + EVP_DigestUpdate(state_counter, &counter, sizeof(counter)); + + // Finalize the hash with a temporary state + EVP_MD_CTX_copy_ex(state_finalize, state_counter); + unsigned int TEMP; // We already know the digest length! + EVP_DigestFinal_ex(state_finalize, hash.data(), &TEMP); + + // Check for scalar validity + Scalar candidate; + try { + candidate.deserialize(hash.data()); + + EVP_MD_CTX_free(state_counter); + EVP_MD_CTX_free(state_finalize); + + return candidate; + } catch (const std::exception &) { + counter++; + } + } +} + +// Finalize the hash function to a group element +GroupElement Hash::finalize_group() { + const int GROUP_ENCODING = 34; + const unsigned char ZERO = 0; + + // Ensure we can properly populate a + if (EVP_MD_size(EVP_sha512()) < GROUP_ENCODING) { + throw std::runtime_error("Bad hash size!"); + } + + std::vector hash; + hash.resize(EVP_MD_size(EVP_sha512())); + unsigned char counter = 0; + + EVP_MD_CTX* state_counter; + state_counter = EVP_MD_CTX_new(); + EVP_DigestInit_ex(state_counter, EVP_sha512(), NULL); + + EVP_MD_CTX* state_finalize; + state_finalize = EVP_MD_CTX_new(); + EVP_DigestInit_ex(state_finalize, EVP_sha512(), NULL); + + while (1) { + // Prepare temporary state for counter testing + EVP_MD_CTX_copy_ex(state_counter, this->ctx); + + // Embed the counter + EVP_DigestUpdate(state_counter, &counter, sizeof(counter)); + + // Finalize the hash with a temporary state + EVP_MD_CTX_copy_ex(state_finalize, state_counter); + unsigned int TEMP; // We already know the digest length! + EVP_DigestFinal_ex(state_finalize, hash.data(), &TEMP); + + // Assemble the serialized input: + // bytes 0..31: x coordinate + // byte 32: even/odd + // byte 33: zero (this point is not infinity) + unsigned char candidate_bytes[GROUP_ENCODING]; + memcpy(candidate_bytes, hash.data(), 33); + memcpy(candidate_bytes + 33, &ZERO, 1); + GroupElement candidate; + try { + candidate.deserialize(candidate_bytes); + + // Deserialization can succeed even with an invalid result + if (!candidate.isMember()) { + counter++; + continue; + } + + EVP_MD_CTX_free(state_counter); + EVP_MD_CTX_free(state_finalize); + + return candidate; + } catch (const std::exception &) { + counter++; + } + } +} + +// Include a serialized size in the hash function +void Hash::include_size(std::size_t size) { + CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); + stream << (uint64_t)size; + EVP_DigestUpdate(this->ctx, reinterpret_cast(stream.data()), stream.size()); +} + +} \ No newline at end of file diff --git a/src/libspark/hash.h b/src/libspark/hash.h new file mode 100644 index 0000000000..39cd250bfa --- /dev/null +++ b/src/libspark/hash.h @@ -0,0 +1,26 @@ +#ifndef FIRO_SPARK_HASH_H +#define FIRO_SPARK_HASH_H +#include +#include "util.h" + +namespace spark { + +using namespace secp_primitives; + +class Hash { +public: + Hash(const std::string label); + ~Hash(); + void include(CDataStream& data); + std::vector finalize(); + Scalar finalize_scalar(); + GroupElement finalize_group(); + +private: + void include_size(std::size_t size); + EVP_MD_CTX* ctx; +}; + +} + +#endif diff --git a/src/libspark/kdf.cpp b/src/libspark/kdf.cpp new file mode 100644 index 0000000000..238b922df4 --- /dev/null +++ b/src/libspark/kdf.cpp @@ -0,0 +1,58 @@ +#include "kdf.h" + +namespace spark { + +// Set up a labeled KDF +KDF::KDF(const std::string label, std::size_t derived_key_size) { + this->ctx = EVP_MD_CTX_new(); + EVP_DigestInit_ex(this->ctx, EVP_sha512(), NULL); + + // Write the protocol and mode information + std::vector protocol(LABEL_PROTOCOL.begin(), LABEL_PROTOCOL.end()); + EVP_DigestUpdate(this->ctx, protocol.data(), protocol.size()); + EVP_DigestUpdate(this->ctx, &HASH_MODE_KDF, sizeof(HASH_MODE_KDF)); + + // Include the label with size + include_size(label.size()); + std::vector label_bytes(label.begin(), label.end()); + EVP_DigestUpdate(this->ctx, label_bytes.data(), label_bytes.size()); + + // Embed and set the derived key size + if (derived_key_size > EVP_MD_size(EVP_sha512())) { + throw std::invalid_argument("Requested KDF size is too large"); + } + include_size(derived_key_size); + this->derived_key_size = derived_key_size; +} + +// Clean up +KDF::~KDF() { + EVP_MD_CTX_free(this->ctx); +} + +// Include serialized data in the KDF +void KDF::include(CDataStream& data) { + include_size(data.size()); + EVP_DigestUpdate(this->ctx, reinterpret_cast(data.data()), data.size()); +} + +// Finalize the KDF with arbitrary size +std::vector KDF::finalize() { + std::vector result; + result.resize(EVP_MD_size(EVP_sha512())); + + unsigned int TEMP; + EVP_DigestFinal_ex(this->ctx, result.data(), &TEMP); + result.resize(this->derived_key_size); + + return result; +} + +// Include a serialized size in the KDF +void KDF::include_size(std::size_t size) { + CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); + stream << (uint64_t)size; + EVP_DigestUpdate(this->ctx, reinterpret_cast(stream.data()), stream.size()); +} + +} \ No newline at end of file diff --git a/src/libspark/kdf.h b/src/libspark/kdf.h new file mode 100644 index 0000000000..6484c2c362 --- /dev/null +++ b/src/libspark/kdf.h @@ -0,0 +1,23 @@ +#ifndef FIRO_SPARK_KDF_H +#define FIRO_SPARK_KDF_H +#include +#include "util.h" + +namespace spark { + +class KDF { +public: + KDF(const std::string label, std::size_t derived_key_size); + ~KDF(); + void include(CDataStream& data); + std::vector finalize(); + +private: + void include_size(std::size_t size); + EVP_MD_CTX* ctx; + std::size_t derived_key_size; +}; + +} + +#endif diff --git a/src/libspark/keys.cpp b/src/libspark/keys.cpp new file mode 100644 index 0000000000..791c05a2bf --- /dev/null +++ b/src/libspark/keys.cpp @@ -0,0 +1,246 @@ +#include "keys.h" +#include "../hash.h" + +namespace spark { + +using namespace secp_primitives; + +SpendKey::SpendKey(const Params* params) { + this->params = params; + this->s1.randomize(); + this->s2.randomize(); + this->r.randomize(); +} + +SpendKey::SpendKey(const Params* params, const Scalar& r_) { + this->params = params; + this->r = r_; + std::vector data; + data.resize(32); + r.serialize(data.data()); + std::vector result(CSHA256().OUTPUT_SIZE); + + CHash256 hash256; + std::string prefix1 = "s1_generation"; + hash256.Write(reinterpret_cast(prefix1.c_str()), prefix1.size()); + hash256.Write(data.data(), data.size()); + hash256.Finalize(&result[0]); + this->s1.memberFromSeed(&result[0]); + + data.clear(); + result.clear(); + hash256.Reset(); + s1.serialize(data.data()); + + std::string prefix2 = "s2_generation"; + hash256.Write(reinterpret_cast(prefix2.c_str()), prefix2.size()); + hash256.Write(data.data(), data.size()); + hash256.Finalize(&result[0]); + this->s2.memberFromSeed(&result[0]); +} + +const Params* SpendKey::get_params() const { + return this->params; +} + +const Scalar& SpendKey::get_s1() const { + return this->s1; +} + +const Scalar& SpendKey::get_s2() const { + return this->s2; +} + +const Scalar& SpendKey::get_r() const { + return this->r; +} + +SpendKey& SpendKey::operator=(const SpendKey& other) { + this->s1 = other.s1; + this->s2 = other.s2; + this->r = other.r; + return *this; +} + +bool SpendKey::operator==(const SpendKey& other) const { + if (this->s1 != other.s1 || + this->s2 != other.s2 || + this->r != other.r) + return false; + return true; +} + +FullViewKey::FullViewKey() {} +FullViewKey::FullViewKey(const Params* params) { + this->params = params; +} +FullViewKey::FullViewKey(const SpendKey& spend_key) { + this->params = spend_key.get_params(); + this->s1 = spend_key.get_s1(); + this->s2 = spend_key.get_s2(); + this->D = this->params->get_G()*spend_key.get_r(); + this->P2 = this->params->get_F()*this->s2 + this->D; +} + +const Params* FullViewKey::get_params() const { + return this->params; +} + +const Scalar& FullViewKey::get_s1() const { + return this->s1; +} + +const Scalar& FullViewKey::get_s2() const { + return this->s2; +} + +const GroupElement& FullViewKey::get_D() const { + return this->D; +} + +const GroupElement& FullViewKey::get_P2() const { + return this->P2; +} + +IncomingViewKey::IncomingViewKey() {} + +IncomingViewKey::IncomingViewKey(const Params* params) { + this->params = params; +} + +IncomingViewKey::IncomingViewKey(const FullViewKey& full_view_key) { + this->params = full_view_key.get_params(); + this->s1 = full_view_key.get_s1(); + this->P2 = full_view_key.get_P2(); +} + +const Params* IncomingViewKey::get_params() const { + return this->params; +} + +const Scalar& IncomingViewKey::get_s1() const { + return this->s1; +} + +const GroupElement& IncomingViewKey::get_P2() const { + return this->P2; +} + +uint64_t IncomingViewKey::get_diversifier(const std::vector& d) const { + // Assert proper size + if (d.size() != AES_BLOCKSIZE) { + throw std::invalid_argument("Bad encrypted diversifier"); + } + + // Decrypt the diversifier; this is NOT AUTHENTICATED and MUST be externally checked for validity against a claimed address + std::vector key = SparkUtils::kdf_diversifier(this->s1); + uint64_t i = SparkUtils::diversifier_decrypt(key, d); + + return i; +} + +Address::Address() {} + +Address::Address(const Params* params) { + this->params = params; +} + +Address::Address(const IncomingViewKey& incoming_view_key, const uint64_t i) { + // Encrypt the diversifier + std::vector key = SparkUtils::kdf_diversifier(incoming_view_key.get_s1()); + this->params = incoming_view_key.get_params(); + this->d = SparkUtils::diversifier_encrypt(key, i); + this->Q1 = SparkUtils::hash_div(this->d)*incoming_view_key.get_s1(); + this->Q2 = this->params->get_F()*SparkUtils::hash_Q2(incoming_view_key.get_s1(), i) + incoming_view_key.get_P2(); +} + +const Params* Address::get_params() const { + return this->params; +} + +const std::vector& Address::get_d() const { + return this->d; +} + +const GroupElement& Address::get_Q1() const { + return this->Q1; +} + +const GroupElement& Address::get_Q2() const { + return this->Q2; +} + +// Encode the address to string, given a network identifier +std::string Address::encode(const unsigned char network) const { + // Serialize the address components + std::vector raw; + raw.reserve(2 * GroupElement::serialize_size + AES_BLOCKSIZE); + + raw.insert(raw.end(), this->d.begin(), this->d.end()); + + std::vector component; + component.resize(GroupElement::serialize_size); + + this->get_Q1().serialize(component.data()); + raw.insert(raw.end(), component.begin(), component.end()); + + this->get_Q2().serialize(component.data()); + raw.insert(raw.end(), component.begin(), component.end()); + + // Apply the scramble encoding and prepend the network byte + std::vector scrambled = F4Grumble(network, raw.size()).encode(raw); + + // Encode using `bech32m` + std::string hrp; + hrp.push_back(ADDRESS_ENCODING_PREFIX); + hrp.push_back(network); + + std::vector bit_converted; + bech32::convertbits(bit_converted, scrambled, 8, 5, true); + + return bech32::encode(hrp, bit_converted, bech32::Encoding::BECH32M); +} + +// Decode an address (if possible) from a string, returning the network identifier +unsigned char Address::decode(const std::string& str) { + // Decode using `bech32m` + bech32::DecodeResult decoded = bech32::decode(str); + + // Check the encoding + if (decoded.encoding != bech32::Encoding::BECH32M) { + throw std::invalid_argument("Bad address encoding"); + } + + // Check the encoding prefix + if (decoded.hrp[0] != ADDRESS_ENCODING_PREFIX) { + throw std::invalid_argument("Bad address prefix"); + } + + // Get the network identifier + unsigned char network = decoded.hrp[1]; + + // Convert the address components to bytes + std::vector scrambled; + bech32::convertbits(scrambled, decoded.data, 5, 8, false); + + // Assert the proper address size + if (scrambled.size() != 2 * GroupElement::serialize_size + AES_BLOCKSIZE) { + throw std::invalid_argument("Bad address size"); + } + + // Apply the scramble decoding + std::vector raw = F4Grumble(network, scrambled.size()).decode(scrambled); + + // Deserialize the adddress components + this->d = std::vector(raw.begin(), raw.begin() + AES_BLOCKSIZE); + + std::vector component(raw.begin() + AES_BLOCKSIZE, raw.begin() + AES_BLOCKSIZE + GroupElement::serialize_size); + this->Q1.deserialize(component.data()); + + component = std::vector(raw.begin() + AES_BLOCKSIZE + GroupElement::serialize_size, raw.end()); + this->Q2.deserialize(component.data()); + + return network; +} + +} diff --git a/src/libspark/keys.h b/src/libspark/keys.h new file mode 100644 index 0000000000..4af8b25687 --- /dev/null +++ b/src/libspark/keys.h @@ -0,0 +1,95 @@ +#ifndef FIRO_SPARK_KEYS_H +#define FIRO_SPARK_KEYS_H +#include "bech32.h" +#include "f4grumble.h" +#include "params.h" +#include "util.h" + +namespace spark { + +using namespace secp_primitives; + +class SpendKey { +public: + SpendKey(const Params* params); + SpendKey(const Params* params, const Scalar& r_); + const Params* get_params() const; + const Scalar& get_s1() const; + const Scalar& get_s2() const; + const Scalar& get_r() const; + + SpendKey& operator=(const SpendKey& other); + bool operator==(const SpendKey& other) const; + +private: + const Params* params; + Scalar s1, s2, r; +}; + +class FullViewKey { +public: + FullViewKey(); + FullViewKey(const Params* params); + FullViewKey(const SpendKey& spend_key); + const Params* get_params() const; + const Scalar& get_s1() const; + const Scalar& get_s2() const; + const GroupElement& get_D() const; + const GroupElement& get_P2() const; + + + ADD_SERIALIZE_METHODS; + template + inline void SerializationOp(Stream& s, Operation ser_action) { + READWRITE(s1); + READWRITE(s2); + READWRITE(D); + READWRITE(P2); + } + +private: + const Params* params; + Scalar s1, s2; + GroupElement D, P2; +}; + +class IncomingViewKey { +public: + IncomingViewKey(); + IncomingViewKey(const Params* params); + IncomingViewKey(const FullViewKey& full_view_key); + const Params* get_params() const; + const Scalar& get_s1() const; + const GroupElement& get_P2() const; + uint64_t get_diversifier(const std::vector& d) const; + +private: + const Params* params; + Scalar s1; + GroupElement P2; +}; + +class Address { +public: + Address(); + Address(const Params* params); + Address(const IncomingViewKey& incoming_view_key, const uint64_t i); + const Params* get_params() const; + const std::vector& get_d() const; + const GroupElement& get_Q1() const; + const GroupElement& get_Q2() const; + + std::string encode(const unsigned char network) const; + unsigned char decode(const std::string& str); + +private: + const Params* params; + std::vector d; + GroupElement Q1, Q2; + + static std::string get_checksum(const std::string data); +}; + +} + +#endif diff --git a/src/libspark/mint_transaction.cpp b/src/libspark/mint_transaction.cpp new file mode 100644 index 0000000000..7acac3cb73 --- /dev/null +++ b/src/libspark/mint_transaction.cpp @@ -0,0 +1,110 @@ +#include "mint_transaction.h" + +namespace spark { + +MintTransaction::MintTransaction(const Params* params) { + this->params = params; +} + +MintTransaction::MintTransaction( + const Params* params, + const std::vector& outputs, + const std::vector& serial_context, + bool generate +) { + // Important note: This construction assumes that the public coin values are correct according to higher-level consensus rules! + // Important note: For pool transition transactions, the serial context should contain unique references to all base-layer spent assets, in order to ensure the resulting serial commitment is bound to this transaction + + this->params = params; + Schnorr schnorr(this->params->get_H()); + + std::vector value_statement; + std::vector value_witness; + + for (std::size_t j = 0; j < outputs.size(); j++) { + if (generate) { + MintedCoinData output = outputs[j]; + + // Generate the coin + Scalar k; + k.randomize(); + this->coins.emplace_back(Coin( + this->params, + COIN_TYPE_MINT, + k, + output.address, + output.v, + output.memo, + serial_context + )); + + // Prepare the value proof + value_statement.emplace_back(this->coins[j].C + this->params->get_G().inverse()*Scalar(this->coins[j].v)); + value_witness.emplace_back(SparkUtils::hash_val(k)); + } else { + Coin coin; + coin.type = 0; + coin.r_.ciphertext.resize(82); // max possible size + coin.r_.key_commitment.resize(64); + coin.r_.tag.resize(16); + coin.v = 0; + this->coins.emplace_back(coin); + } + } + + // Complete the value proof + if (generate) + schnorr.prove(value_witness, value_statement, this->value_proof); + else + value_proof = SchnorrProof(); +} + +bool MintTransaction::verify() { + // Verify the value proof + Schnorr schnorr(this->params->get_H()); + std::vector value_statement; + + for (std::size_t j = 0; j < this->coins.size(); j++) { + value_statement.emplace_back(this->coins[j].C + this->params->get_G().inverse()*Scalar(this->coins[j].v)); + } + + return schnorr.verify(value_statement, this->value_proof); +} + +std::vector MintTransaction::getMintedCoinsSerialized() { + std::vector serializedCoins; + bool first = true; + for (const auto& coin : coins) { + CDataStream serializedCoin(SER_NETWORK, 0); + serializedCoin << coin; + if (first) { + serializedCoin << value_proof; + first = false; + } + serializedCoins.push_back(serializedCoin); + } + return serializedCoins; +} + +void MintTransaction::setMintTransaction(std::vector& serializedCoins) { + bool first = true; + coins.reserve(serializedCoins.size()); + size_t i = 0; + for (auto& stream : serializedCoins) { + Coin coin(params); + stream >> coin; + coins.push_back(coin); + i++; + if (first) { + stream >> value_proof; + first = false; + } + } +} + +void MintTransaction::getCoins(std::vector& coins_) { + coins_.insert(coins_.end(), coins.begin(), coins.end()); +} + + +} // namespace spark diff --git a/src/libspark/mint_transaction.h b/src/libspark/mint_transaction.h new file mode 100644 index 0000000000..4a41b00f42 --- /dev/null +++ b/src/libspark/mint_transaction.h @@ -0,0 +1,45 @@ +#ifndef FIRO_SPARK_MINT_TRANSACTION_H +#define FIRO_SPARK_MINT_TRANSACTION_H +#include "keys.h" +#include "coin.h" +#include "schnorr.h" +#include "util.h" + +namespace spark { + +using namespace secp_primitives; + +struct MintedCoinData { + Address address; + uint64_t v; + std::string memo; +}; + +class MintTransaction { +public: + MintTransaction(const Params* params); + MintTransaction( + const Params* params, + const std::vector& outputs, + const std::vector& serial_context, + bool generate = true + ); + bool verify(); + + // returns the vector of serialized coins, with first one it puts also the chnorr proof; + std::vector getMintedCoinsSerialized(); + + // deserialize from the vector of CDataStreams + void setMintTransaction(std::vector& serializedCoins); + + void getCoins(std::vector& coins_); + +private: + const Params* params; + std::vector coins; + SchnorrProof value_proof; +}; + +} + +#endif diff --git a/src/libspark/params.cpp b/src/libspark/params.cpp new file mode 100644 index 0000000000..466de7be00 --- /dev/null +++ b/src/libspark/params.cpp @@ -0,0 +1,137 @@ +#include "params.h" +#include "chainparams.h" +#include "util.h" + +namespace spark { + + CCriticalSection Params::cs_instance; + std::unique_ptr Params::instance; + +// Protocol parameters for deployment +Params const* Params::get_default() { + if (instance) { + return instance.get(); + } else { + LOCK(cs_instance); + if (instance) { + return instance.get(); + } + + std::size_t memo_bytes = 32; + std::size_t max_M_range = 16; + std::size_t n_grootle = 8; + std::size_t m_grootle = 5; + + instance.reset(new Params(memo_bytes, max_M_range, n_grootle, m_grootle)); + return instance.get(); + } +} + +// Protocol parameters for testing +Params const* Params::get_test() { + if (instance) { + return instance.get(); + } else { + LOCK(cs_instance); + if (instance) { + return instance.get(); + } + + std::size_t memo_bytes = 32; + std::size_t max_M_range = 16; + std::size_t n_grootle = 2; + std::size_t m_grootle = 4; + + instance.reset(new Params(memo_bytes, max_M_range, n_grootle, m_grootle)); + return instance.get(); + } +} + +Params::Params( + const std::size_t memo_bytes, + const std::size_t max_M_range, + const std::size_t n_grootle, + const std::size_t m_grootle +) +{ + // Global generators + this->F = SparkUtils::hash_generator(LABEL_GENERATOR_F); + this->G.set_base_g(); + this->H = SparkUtils::hash_generator(LABEL_GENERATOR_H); + this->U = SparkUtils::hash_generator(LABEL_GENERATOR_U); + + // Coin parameters + this->memo_bytes = memo_bytes; + + // Range proof parameters + this->max_M_range = max_M_range; + this->G_range.resize(64*max_M_range); + this->H_range.resize(64*max_M_range); + for (std::size_t i = 0; i < 64*max_M_range; i++) { + this->G_range[i] = SparkUtils::hash_generator(LABEL_GENERATOR_G_RANGE + " " + std::to_string(i)); + this->H_range[i] = SparkUtils::hash_generator(LABEL_GENERATOR_H_RANGE + " " + std::to_string(i)); + } + + // One-of-many parameters + if (n_grootle < 2 || m_grootle < 3) { + throw std::invalid_argument("Bad Grootle parameteres"); + } + this->n_grootle = n_grootle; + this->m_grootle = m_grootle; + this->G_grootle.resize(n_grootle * m_grootle); + this->H_grootle.resize(n_grootle * m_grootle); + for (std::size_t i = 0; i < n_grootle * m_grootle; i++) { + this->G_grootle[i] = SparkUtils::hash_generator(LABEL_GENERATOR_G_GROOTLE + " " + std::to_string(i)); + this->H_grootle[i] = SparkUtils::hash_generator(LABEL_GENERATOR_H_GROOTLE + " " + std::to_string(i)); + } +} + +const GroupElement& Params::get_F() const { + return this->F; +} + +const GroupElement& Params::get_G() const { + return this->G; +} + +const GroupElement& Params::get_H() const { + return this->H; +} + +const GroupElement& Params::get_U() const { + return this->U; +} + +const std::size_t Params::get_memo_bytes() const { + return this->memo_bytes; +} + +const std::vector& Params::get_G_range() const { + return this->G_range; +} + +const std::vector& Params::get_H_range() const { + return this->H_range; +} + +const std::vector& Params::get_G_grootle() const { + return this->G_grootle; +} + +const std::vector& Params::get_H_grootle() const { + return this->H_grootle; +} + +std::size_t Params::get_max_M_range() const { + return this->max_M_range; +} + +std::size_t Params::get_n_grootle() const { + return this->n_grootle; +} + +std::size_t Params::get_m_grootle() const { + return this->m_grootle; +} + +} diff --git a/src/libspark/params.h b/src/libspark/params.h new file mode 100644 index 0000000000..e37855dab4 --- /dev/null +++ b/src/libspark/params.h @@ -0,0 +1,67 @@ +#ifndef FIRO_LIBSPARK_PARAMS_H +#define FIRO_LIBSPARK_PARAMS_H + +#include +#include +#include +#include + +using namespace secp_primitives; + +namespace spark { + +class Params { +public: + static Params const* get_default(); + static Params const* get_test(); + + const GroupElement& get_F() const; + const GroupElement& get_G() const; + const GroupElement& get_H() const; + const GroupElement& get_U() const; + + const std::size_t get_memo_bytes() const; + + std::size_t get_max_M_range() const; + const std::vector& get_G_range() const; + const std::vector& get_H_range() const; + + std::size_t get_n_grootle() const; + std::size_t get_m_grootle() const; + const std::vector& get_G_grootle() const; + const std::vector& get_H_grootle() const; + +private: + Params( + const std::size_t memo_bytes, + const std::size_t max_M_range, + const std::size_t n_grootle, + const std::size_t m_grootle + ); + +private: + static CCriticalSection cs_instance; + static std::unique_ptr instance; + + // Global generators + GroupElement F; + GroupElement G; + GroupElement H; + GroupElement U; + + // Coin parameters + std::size_t memo_bytes; + + // Range proof parameters + std::size_t max_M_range; + std::vector G_range, H_range; + + // One-of-many parameters + std::size_t n_grootle, m_grootle; + std::vector G_grootle; + std::vector H_grootle; +}; + +} + +#endif diff --git a/src/libspark/schnorr.cpp b/src/libspark/schnorr.cpp new file mode 100644 index 0000000000..4657fece77 --- /dev/null +++ b/src/libspark/schnorr.cpp @@ -0,0 +1,93 @@ +#include "schnorr.h" +#include "transcript.h" + +namespace spark { + +Schnorr::Schnorr(const GroupElement& G_): + G(G_) { +} + +Scalar Schnorr::challenge( + const std::vector& Y, + const GroupElement& A) { + Transcript transcript(LABEL_TRANSCRIPT_SCHNORR); + transcript.add("G", G); + transcript.add("Y", Y); + transcript.add("A", A); + + return transcript.challenge("c"); +} + +void Schnorr::prove(const Scalar& y, const GroupElement& Y, SchnorrProof& proof) { + const std::vector y_vector = { y }; + const std::vector Y_vector = { Y }; + prove(y_vector, Y_vector, proof); +} + +void Schnorr::prove(const std::vector& y, const std::vector& Y, SchnorrProof& proof) { + const std::size_t n = y.size(); + + // Check statement validity + if (y.size() != Y.size()) { + throw std::invalid_argument("Bad Schnorr statement!"); + } + + for (std::size_t i = 0; i < n; i++) { + if (G*y[i] != Y[i]) { + throw std::invalid_argument("Bad Schnorr statement!"); + } + } + + Scalar r; + r.randomize(); + proof.A = G*r; + + const Scalar c = challenge(Y, proof.A); + Scalar c_power(c); + + proof.t = r; + for (std::size_t i = 0; i < n; i++) { + if (c_power.isZero()) { + throw std::invalid_argument("Unexpected challenge!"); + } + + proof.t += y[i].negate()*c_power; + c_power *= c; + } +} + +bool Schnorr::verify(const GroupElement& Y, const SchnorrProof& proof) { + const std::vector Y_vector = { Y }; + return verify(Y_vector, proof); +} + +bool Schnorr::verify(const std::vector& Y, const SchnorrProof& proof) { + const std::size_t n = Y.size(); + + std::vector points; + points.reserve(n + 2); + std::vector scalars; + scalars.reserve(n + 2); + + points.emplace_back(G); + scalars.emplace_back(proof.t); + points.emplace_back(proof.A); + scalars.emplace_back(Scalar(uint64_t(1)).negate()); + + const Scalar c = challenge(Y, proof.A); + Scalar c_power(c); + for (std::size_t i = 0; i < n; i++) { + if (c_power.isZero()) { + throw std::invalid_argument("Unexpected challenge!"); + } + + points.emplace_back(Y[i]); + scalars.emplace_back(c_power); + c_power *= c; + } + + MultiExponent result(points, scalars); + return result.get_multiple().isInfinity(); +} + +} diff --git a/src/libspark/schnorr.h b/src/libspark/schnorr.h new file mode 100644 index 0000000000..a760e5a56b --- /dev/null +++ b/src/libspark/schnorr.h @@ -0,0 +1,25 @@ +#ifndef FIRO_LIBSPARK_SCHNORR_H +#define FIRO_LIBSPARK_SCHNORR_H + +#include "schnorr_proof.h" +#include + +namespace spark { + +class Schnorr { +public: + Schnorr(const GroupElement& G); + + void prove(const Scalar& y, const GroupElement& Y, SchnorrProof& proof); + void prove(const std::vector& y, const std::vector& Y, SchnorrProof& proof); + bool verify(const GroupElement& Y, const SchnorrProof& proof); + bool verify(const std::vector& Y, const SchnorrProof& proof); + +private: + Scalar challenge(const std::vector& Y, const GroupElement& A); + const GroupElement& G; +}; + +} + +#endif diff --git a/src/libspark/schnorr_proof.h b/src/libspark/schnorr_proof.h new file mode 100644 index 0000000000..a3fe067d01 --- /dev/null +++ b/src/libspark/schnorr_proof.h @@ -0,0 +1,27 @@ +#ifndef FIRO_LIBSPARK_SCHNORR_PROOF_H +#define FIRO_LIBSPARK_SCHNORR_PROOF_H + +#include "params.h" + +namespace spark { + +class SchnorrProof{ +public: + inline std::size_t memoryRequired() const { + return Scalar::memoryRequired() + GroupElement::memoryRequired(); + } + + ADD_SERIALIZE_METHODS; + template + inline void SerializationOp(Stream& s, Operation ser_action) { + READWRITE(A); + READWRITE(t); + } + +public: + GroupElement A; + Scalar t; +}; +} + +#endif diff --git a/src/libspark/spend_transaction.cpp b/src/libspark/spend_transaction.cpp new file mode 100644 index 0000000000..b2d3561e79 --- /dev/null +++ b/src/libspark/spend_transaction.cpp @@ -0,0 +1,454 @@ +#include "spend_transaction.h" + +namespace spark { + +// Generate a spend transaction that consumes existing coins and generates new ones +SpendTransaction::SpendTransaction( + const Params* params) { + this->params = params; +} + +SpendTransaction::SpendTransaction( + const Params* params, + const FullViewKey& full_view_key, + const SpendKey& spend_key, + const std::vector& inputs, + const std::unordered_map& cover_set_data, + const uint64_t f, + const uint64_t vout, + const std::vector& outputs +) { + this->params = params; + + // Size parameters + const std::size_t w = inputs.size(); // number of consumed coins + const std::size_t t = outputs.size(); // number of generated coins + const std::size_t N = (std::size_t) std::pow(params->get_n_grootle(), params->get_m_grootle()); // size of cover sets + + // Prepare input-related vectors + this->cover_set_ids.reserve(w); // cover set data and metadata + this->setCoverSets(cover_set_data); + this->S1.reserve(w); // serial commitment offsets + this->C1.reserve(w); // value commitment offsets + this->grootle_proofs.reserve(w); // Grootle one-of-many proofs + this->T.reserve(w); // linking tags + + this->f = f; // fee + this->vout = vout; // transparent output value + + // Prepare Chaum vectors + std::vector chaum_x, chaum_y, chaum_z; + + // Prepare output vector + this->out_coins.reserve(t); // coins + std::vector k; // nonces + + // Prepare inputs + Grootle grootle( + this->params->get_H(), + this->params->get_G_grootle(), + this->params->get_H_grootle(), + this->params->get_n_grootle(), + this->params->get_m_grootle() + ); + for (std::size_t u = 0; u < w; u++) { + // Parse out cover set data for this spend + uint64_t set_id = inputs[u].cover_set_id; + this->cover_set_ids.emplace_back(set_id); + if (cover_set_data.count(set_id) == 0) + throw std::invalid_argument("Required set is not passed"); + + const auto& cover_set = cover_set_data.at(set_id).cover_set; + std::size_t set_size = cover_set.size(); + if (set_size > N) + throw std::invalid_argument("Wrong set size"); + + std::vector S, C; + S.reserve(set_size); + C.reserve(set_size); + for (std::size_t i = 0; i < set_size; i++) { + S.emplace_back(cover_set[i].S); + C.emplace_back(cover_set[i].C); + } + + // Serial commitment offset + this->S1.emplace_back( + this->params->get_F()*inputs[u].s + + this->params->get_H().inverse()*SparkUtils::hash_ser1(inputs[u].s, full_view_key.get_D()) + + full_view_key.get_D() + ); + + // Value commitment offset + this->C1.emplace_back( + this->params->get_G()*Scalar(inputs[u].v) + + this->params->get_H()*SparkUtils::hash_val1(inputs[u].s, full_view_key.get_D()) + ); + + // Tags + this->T.emplace_back(inputs[u].T); + + // Grootle proof + this->grootle_proofs.emplace_back(); + std::size_t l = inputs[u].index; + grootle.prove( + l, + SparkUtils::hash_ser1(inputs[u].s, full_view_key.get_D()), + S, + this->S1.back(), + SparkUtils::hash_val(inputs[u].k) - SparkUtils::hash_val1(inputs[u].s, full_view_key.get_D()), + C, + this->C1.back(), + this->cover_set_representations[set_id], + this->grootle_proofs.back() + ); + + // Chaum data + chaum_x.emplace_back(inputs[u].s); + chaum_y.emplace_back(spend_key.get_r()); + chaum_z.emplace_back(SparkUtils::hash_ser1(inputs[u].s, full_view_key.get_D()).negate()); + } + + // Generate output coins and prepare range proof vectors + std::vector range_v; + std::vector range_r; + std::vector range_C; + + // Serial context for all outputs is the set of linking tags for this transaction, which must always be in a fixed order + CDataStream serial_context(SER_NETWORK, PROTOCOL_VERSION); + serial_context << this->T; + + for (std::size_t j = 0; j < t; j++) { + // Nonce + k.emplace_back(); + k.back().randomize(); + + // Output coin + this->out_coins.emplace_back(); + this->out_coins.back() = Coin( + this->params, + COIN_TYPE_SPEND, + k.back(), + outputs[j].address, + outputs[j].v, + outputs[j].memo, + std::vector(serial_context.begin(), serial_context.end()) + ); + + // Range data + range_v.emplace_back(outputs[j].v); + range_r.emplace_back(SparkUtils::hash_val(k.back())); + range_C.emplace_back(this->out_coins.back().C); + } + + // Generate range proof + BPPlus range( + this->params->get_G(), + this->params->get_H(), + this->params->get_G_range(), + this->params->get_H_range(), + 64 + ); + range.prove( + range_v, + range_r, + range_C, + this->range_proof + ); + + // Generate the balance proof + Schnorr schnorr(this->params->get_H()); + GroupElement balance_statement; + Scalar balance_witness; + for (std::size_t u = 0; u < w; u++) { + balance_statement += this->C1[u]; + balance_witness += SparkUtils::hash_val1(inputs[u].s, full_view_key.get_D()); + } + for (std::size_t j = 0; j < t; j++) { + balance_statement += this->out_coins[j].C.inverse(); + balance_witness -= SparkUtils::hash_val(k[j]); + } + balance_statement += (this->params->get_G()*Scalar(f + vout)).inverse(); + schnorr.prove( + balance_witness, + balance_statement, + this->balance_proof + ); + + // Compute the binding hash + Scalar mu = hash_bind( + hash_bind_inner( + this->cover_set_representations, + this->C1, + this->grootle_proofs, + this->balance_proof, + this->range_proof + ), + this->out_coins, + this->f + vout + ); + + // Compute the authorizing Chaum proof + Chaum chaum( + this->params->get_F(), + this->params->get_G(), + this->params->get_H(), + this->params->get_U() + ); + chaum.prove( + mu, + chaum_x, + chaum_y, + chaum_z, + this->S1, + this->T, + this->chaum_proof + ); +} + +uint64_t SpendTransaction::getFee() { + return f; +} + +const std::vector& SpendTransaction::getUsedLTags() const { + return T; +} + +const std::vector& SpendTransaction::getCoinGroupIds() { + return cover_set_ids; +} + +const std::vector& SpendTransaction::getOutCoins() { + return out_coins; +} + +// Convenience wrapper for verifying a single spend transaction +bool SpendTransaction::verify( + const SpendTransaction& transaction, + const std::unordered_map>& cover_sets) { + std::vector transactions = { transaction }; + return verify(transaction.params, transactions, cover_sets); +} + +// Determine if a set of spend transactions is collectively valid +// NOTE: This assumes that the relationship between a `cover_set_id` and the provided `cover_set` is already valid and canonical! +// NOTE: This assumes that validity criteria relating to chain context have been externally checked! +bool SpendTransaction::verify( + const Params* params, + const std::vector& transactions, + const std::unordered_map>& cover_sets) { + // The idea here is to perform batching as broadly as possible + // - Grootle proofs can be batched if they share a (partial) cover set + // - Range proofs can always be batched arbitrarily + // - Other parts of the transaction can be checked separately + // - We try to verify in order of likely computational complexity, to fail early + + // Track range proofs to batch + std::vector> range_proofs_C; // commitments for all range proofs + std::vector range_proofs; // all range proofs + + // Track cover sets across Grootle proofs to batch + std::unordered_map>> grootle_buckets; + + // Process each transaction + for (std::size_t i = 0; i < transactions.size(); i++) { + SpendTransaction tx = transactions[i]; + + // Assert common parameters + if (params != tx.params) { + return false; + } + + // Size parameters for this transaction + const std::size_t w = tx.cover_set_ids.size(); // number of consumed coins + const std::size_t t = tx.out_coins.size(); // number of generated coins + const std::size_t N = (std::size_t) std::pow(params->get_n_grootle(), params->get_m_grootle()); // size of cover sets + + // Consumed coin semantics + if (tx.S1.size() != w || + tx.C1.size() != w || + tx.T.size() != w || + tx.grootle_proofs.size() != w, + tx.cover_set_sizes.size() != tx.cover_set_representations.size()) { + throw std::invalid_argument("Bad spend transaction semantics"); + } + + // Cover set semantics + for (const auto& set : cover_sets) { + if (set.second.size() > N) { + throw std::invalid_argument("Bad spend transaction semantics"); + } + } + + // Store range proof with commitments + range_proofs_C.emplace_back(); + for (std::size_t j = 0; j < t; j++) { + range_proofs_C.back().emplace_back(tx.out_coins[j].C); + } + range_proofs.emplace_back(tx.range_proof); + + // Sort all Grootle proofs into buckets for batching based on common input sets + for (std::size_t u = 0; u < w; u++) { + grootle_buckets[tx.cover_set_ids[u]].emplace_back(std::pair(i, u)); + } + + // Compute the binding hash + Scalar mu = hash_bind( + tx.hash_bind_inner( + tx.cover_set_representations, + tx.C1, + tx.grootle_proofs, + tx.balance_proof, + tx.range_proof + ), + tx.out_coins, + tx.f + tx.vout + ); + + // Verify the authorizing Chaum-Pedersen proof + Chaum chaum( + tx.params->get_F(), + tx.params->get_G(), + tx.params->get_H(), + tx.params->get_U() + ); + if (!chaum.verify(mu, tx.S1, tx.T, tx.chaum_proof)) { + return false; + } + + // Verify the balance proof + Schnorr schnorr(tx.params->get_H()); + GroupElement balance_statement; + for (std::size_t u = 0; u < w; u++) { + balance_statement += tx.C1[u]; + } + for (std::size_t j = 0; j < t; j++) { + balance_statement += tx.out_coins[j].C.inverse(); + } + balance_statement += (tx.params->get_G()*Scalar(tx.f + tx.vout)).inverse(); + + if(!schnorr.verify( + balance_statement, + tx.balance_proof + )) { + return false; + } + } + + // Verify all range proofs in a batch + BPPlus range( + params->get_G(), + params->get_H(), + params->get_G_range(), + params->get_H_range(), + 64 + ); + if (!range.verify(range_proofs_C, range_proofs)) { + return false; + } + + // Verify all Grootle proofs in batches (based on cover set) + // TODO: Finish this + Grootle grootle( + params->get_H(), + params->get_G_grootle(), + params->get_H_grootle(), + params->get_n_grootle(), + params->get_m_grootle() + ); + for (auto grootle_bucket : grootle_buckets) { + std::size_t cover_set_id = grootle_bucket.first; + std::vector> proof_indexes = grootle_bucket.second; + + // Build the proof statement and metadata vectors from these proofs + std::vector S, S1, V, V1; + std::vector> cover_set_representations; + std::vector sizes; + std::vector proofs; + + std::size_t full_cover_set_size = cover_sets.at(cover_set_id).size(); + for (std::size_t i = 0; i < full_cover_set_size; i++) { + S.emplace_back(cover_sets.at(cover_set_id)[i].S); + V.emplace_back(cover_sets.at(cover_set_id)[i].C); + } + + for (auto proof_index : proof_indexes) { + const auto& tx = transactions[proof_index.first]; + if (!cover_sets.count(cover_set_id)) + throw std::invalid_argument("Cover set missing"); + // Because we assume all proofs in this list share a monotonic cover set, the largest such set is the one to use for verification + if (!tx.cover_set_sizes.count(cover_set_id)) + throw std::invalid_argument("Cover set size missing"); + + std::size_t this_cover_set_size = tx.cover_set_sizes.at(cover_set_id); + + // We always use the other elements + S1.emplace_back(tx.S1[proof_index.second]); + V1.emplace_back(tx.C1[proof_index.second]); + if (!tx.cover_set_representations.count(cover_set_id)) + throw std::invalid_argument("Cover set representation missing"); + + cover_set_representations.emplace_back(tx.cover_set_representations.at(cover_set_id)); + sizes.emplace_back(this_cover_set_size); + proofs.emplace_back(tx.grootle_proofs[proof_index.second]); + } + + // Verify the batch + if (!grootle.verify(S, S1, V, V1, cover_set_representations, sizes, proofs)) { + return false; + } + } + + // Any failures have been identified already, so the batch is valid + return true; +} + +// Hash function H_bind_inner +// This function pre-hashes auxiliary data that makes things easier for a limited signer who cannot process the data directly +// Its value is then used as part of the binding hash, which a limited signer can verify as part of the signing process +std::vector SpendTransaction::hash_bind_inner( + const std::unordered_map>& cover_set_representations, + const std::vector& C1, + const std::vector& grootle_proofs, + const SchnorrProof& balance_proof, + const BPPlusProof& range_proof +) { + Hash hash(LABEL_HASH_BIND_INNER); + CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); + stream << cover_set_representations; + stream << S1; + stream << C1; + stream << T; + stream << grootle_proofs; + stream << balance_proof; + stream << range_proof; + hash.include(stream); + + return hash.finalize(); +} + +// Hash-to-scalar function H_bind +// This function must accept pre-hashed data from `H_bind_inner` intended to correspond to the signing operation +Scalar SpendTransaction::hash_bind( + const std::vector hash_bind_inner, + const std::vector& out_coins, + const uint64_t f_ +) { + Hash hash(LABEL_HASH_BIND); + CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); + stream << hash_bind_inner, + stream << out_coins; + stream << f_; + hash.include(stream); + + return hash.finalize_scalar(); +} + +void SpendTransaction::setBlockHashes(const std::map& idAndHashes) { + set_id_blockHash = idAndHashes; +} + +const std::map& SpendTransaction::getBlockHashes() { + return set_id_blockHash; +} + +} diff --git a/src/libspark/spend_transaction.h b/src/libspark/spend_transaction.h new file mode 100644 index 0000000000..6b2803da5a --- /dev/null +++ b/src/libspark/spend_transaction.h @@ -0,0 +1,131 @@ +#ifndef FIRO_SPARK_SPEND_TRANSACTION_H +#define FIRO_SPARK_SPEND_TRANSACTION_H +#include "keys.h" +#include "coin.h" +#include "schnorr.h" +#include "util.h" +#include "grootle.h" +#include "bpplus.h" +#include "chaum.h" + +namespace spark { + +using namespace secp_primitives; + +// Note that cover sets are treated as monotonic, meaning they grow over time (up to some implementation-defined limit) +// To support efficient batching, we track which set each spend references +// If spends share a `cover_set_id`, we assume the corresponding `cover_set` vectors have a subset relationship +// This relationship _must_ be checked elsewhere, as we simply use the largest `cover_set` for each `cover_set_id`! +struct InputCoinData { + uint64_t cover_set_id; // an identifier for the monotonically-growing set of which `cover_set` is a subset + std::size_t index; // index of the coin in the cover set + Scalar s; // serial number + GroupElement T; // tag + uint64_t v; // value + Scalar k; // nonce +}; + +struct CoverSetData { + std::vector cover_set; // set of coins used as a cover set for the spend + std::vector cover_set_representation; // a unique representation for the ordered elements of the partial `cover_set` used in the spend +}; + +struct OutputCoinData { + Address address; + uint64_t v; + std::string memo; +}; + +class SpendTransaction { +public: + SpendTransaction( + const Params* params); + + SpendTransaction( + const Params* params, + const FullViewKey& full_view_key, + const SpendKey& spend_key, + const std::vector& inputs, + const std::unordered_map& cover_set_data, + const uint64_t f, + const uint64_t vout, + const std::vector& outputs + ); + + uint64_t getFee(); + const std::vector& getUsedLTags() const; + const std::vector& getOutCoins(); + const std::vector& getCoinGroupIds(); + + static bool verify(const Params* params, const std::vector& transactions, const std::unordered_map>& cover_sets); + static bool verify(const SpendTransaction& transaction, const std::unordered_map>& cover_sets); + + std::vector hash_bind_inner( + const std::unordered_map>& cover_set_representations, + const std::vector& C1, + const std::vector& grootle_proofs, + const SchnorrProof& balance_proof, + const BPPlusProof& range_proof + ); + static Scalar hash_bind( + const std::vector hash_bind_inner, + const std::vector& out_coins, + const uint64_t f_ + ); + + ADD_SERIALIZE_METHODS; + template + void SerializationOp(Stream& s, Operation ser_action) + { + READWRITE(cover_set_ids); + READWRITE(set_id_blockHash); + READWRITE(f); + READWRITE(S1); + READWRITE(C1); + READWRITE(T); + READWRITE(grootle_proofs); + READWRITE(chaum_proof); + READWRITE(balance_proof); + READWRITE(range_proof); + } + + void setOutCoins(const std::vector& out_coins_) { + this->out_coins = out_coins_; + } + + void setCoverSets(const std::unordered_map& cover_set_data) { + for (const auto& data : cover_set_data) { + this->cover_set_sizes[data.first] = data.second.cover_set.size(); + this->cover_set_representations[data.first] = data.second.cover_set_representation; + } + } + + void setVout(const uint64_t& vout_) { + this->vout = vout_; + } + + void setBlockHashes(const std::map& idAndHashes); + + const std::map& getBlockHashes(); +private: + const Params* params; + // We need to construct and pass this data before running verification + std::unordered_map cover_set_sizes; + std::unordered_map> cover_set_representations; + std::vector out_coins; + + // All this data we need to serialize + std::map set_id_blockHash; + std::vector cover_set_ids; + uint64_t f; + uint64_t vout; + std::vector S1, C1, T; + std::vector grootle_proofs; + ChaumProof chaum_proof; + SchnorrProof balance_proof; + BPPlusProof range_proof; +}; + +} + +#endif diff --git a/src/libspark/test/address_test.cpp b/src/libspark/test/address_test.cpp new file mode 100644 index 0000000000..8f50d910b3 --- /dev/null +++ b/src/libspark/test/address_test.cpp @@ -0,0 +1,124 @@ +#include "../keys.h" + +#include "../../test/test_bitcoin.h" +#include + +namespace spark { + +using namespace secp_primitives; + +BOOST_FIXTURE_TEST_SUITE(spark_address_tests, BasicTestingSetup) + +// Check that correct encoding and decoding succeed +BOOST_AUTO_TEST_CASE(correctness) +{ + // Parameters + const Params* params; + params = Params::get_test(); + + // Generate keys + SpendKey spend_key(params); + FullViewKey full_view_key(spend_key); + IncomingViewKey incoming_view_key(full_view_key); + + // Generate address + const uint64_t i = 12345; + Address address(incoming_view_key, i); + + // Encode address + std::string encoded = address.encode(ADDRESS_NETWORK_TESTNET); + + // Decode address + Address decoded; + decoded.decode(encoded); + + // Check correctness + BOOST_CHECK_EQUAL_COLLECTIONS(address.get_d().begin(), address.get_d().end(), decoded.get_d().begin(), decoded.get_d().end()); + BOOST_CHECK_EQUAL(address.get_Q1(), decoded.get_Q1()); + BOOST_CHECK_EQUAL(address.get_Q2(), decoded.get_Q2()); +} + +// Check that a bad checksum fails +BOOST_AUTO_TEST_CASE(evil_checksum) +{ + // Parameters + const Params* params; + params = Params::get_test(); + + // Generate keys + SpendKey spend_key(params); + FullViewKey full_view_key(spend_key); + IncomingViewKey incoming_view_key(full_view_key); + + // Generate address + const uint64_t i = 12345; + Address address(incoming_view_key, i); + + // Encode address + std::string encoded = address.encode(ADDRESS_NETWORK_TESTNET); + + // Malleate the checksum + encoded[encoded.size() - 1] = ~encoded[encoded.size() - 1]; + + // Decode address + Address decoded; + BOOST_CHECK_THROW(decoded.decode(encoded), std::invalid_argument); +} + +// Check that a bad prefix fails +BOOST_AUTO_TEST_CASE(evil_prefix) +{ + // Parameters + const Params* params; + params = Params::get_test(); + + // Generate keys + SpendKey spend_key(params); + FullViewKey full_view_key(spend_key); + IncomingViewKey incoming_view_key(full_view_key); + + // Generate address + const uint64_t i = 12345; + Address address(incoming_view_key, i); + + // Encode address + std::string encoded = address.encode(ADDRESS_NETWORK_TESTNET); + + // Malleate the prefix + encoded[0] = 'x'; + + // Decode address + Address decoded; + BOOST_CHECK_THROW(decoded.decode(encoded), std::invalid_argument); +} + +// Check that a bad network fails +BOOST_AUTO_TEST_CASE(evil_network) +{ + // Parameters + const Params* params; + params = Params::get_test(); + + // Generate keys + SpendKey spend_key(params); + FullViewKey full_view_key(spend_key); + IncomingViewKey incoming_view_key(full_view_key); + + // Generate address + const uint64_t i = 12345; + Address address(incoming_view_key, i); + + // Encode address + std::string encoded = address.encode(ADDRESS_NETWORK_TESTNET); + + // Malleate the network + encoded[1] = 'x'; + + // Decode address + Address decoded; + BOOST_CHECK_THROW(decoded.decode(encoded), std::invalid_argument); +} + +BOOST_AUTO_TEST_SUITE_END() + +} diff --git a/src/libspark/test/aead_test.cpp b/src/libspark/test/aead_test.cpp new file mode 100644 index 0000000000..2a3901326d --- /dev/null +++ b/src/libspark/test/aead_test.cpp @@ -0,0 +1,146 @@ +#include "../aead.h" + +#include "../../test/test_bitcoin.h" +#include + +namespace spark { + +BOOST_FIXTURE_TEST_SUITE(spark_aead_tests, BasicTestingSetup) + +BOOST_AUTO_TEST_CASE(complete) +{ + // Key + GroupElement prekey; + prekey.randomize(); + + // Serialize + int message = 12345; + CDataStream ser(SER_NETWORK, PROTOCOL_VERSION); + ser << message; + + // Encrypt + AEADEncryptedData data = AEAD::encrypt(prekey, "Associated data", ser); + + // Decrypt + ser = AEAD::decrypt_and_verify(prekey, "Associated data", data); + + // Deserialize + int message_; + ser >> message_; + + BOOST_CHECK_EQUAL(message_, message); +} + +BOOST_AUTO_TEST_CASE(bad_tag) +{ + // Key + GroupElement prekey; + prekey.randomize(); + + // Serialize and encrypt a message + int message = 12345; + CDataStream ser(SER_NETWORK, PROTOCOL_VERSION); + ser << message; + AEADEncryptedData data = AEAD::encrypt(prekey, "Associated data", ser); + + // Serialize and encrypt an evil message + ser.clear(); + int evil_message = 666; + ser << evil_message; + AEADEncryptedData evil_data = AEAD::encrypt(prekey, "Associated data", ser); + + // Replace tag + data.tag = evil_data.tag; + + // Decrypt; this should fail + BOOST_CHECK_THROW(ser = AEAD::decrypt_and_verify(prekey, "Associated data", data), std::runtime_error); +} + +BOOST_AUTO_TEST_CASE(bad_ciphertext) +{ + // Key + GroupElement prekey; + prekey.randomize(); + + // Serialize and encrypt a message + int message = 12345; + CDataStream ser(SER_NETWORK, PROTOCOL_VERSION); + ser << message; + AEADEncryptedData data = AEAD::encrypt(prekey, "Associated data", ser); + + // Serialize and encrypt an evil message + ser.clear(); + int evil_message = 666; + ser << evil_message; + AEADEncryptedData evil_data = AEAD::encrypt(prekey, "Associated data", ser); + + // Replace ciphertext + data.ciphertext = evil_data.ciphertext; + + // Decrypt; this should fail + BOOST_CHECK_THROW(ser = AEAD::decrypt_and_verify(prekey, "Associated data", data), std::runtime_error); +} + +BOOST_AUTO_TEST_CASE(bad_associated_data) +{ + // Key + GroupElement prekey; + prekey.randomize(); + + // Serialize and encrypt a message + int message = 12345; + CDataStream ser(SER_NETWORK, PROTOCOL_VERSION); + ser << message; + AEADEncryptedData data = AEAD::encrypt(prekey, "Associated data", ser); + + // Decrypt; this should fail + BOOST_CHECK_THROW(ser = AEAD::decrypt_and_verify(prekey, "Evil associated data", data), std::runtime_error); +} + +BOOST_AUTO_TEST_CASE(bad_key) +{ + // Key + GroupElement prekey; + prekey.randomize(); + + // Evil key + GroupElement evil_prekey; + evil_prekey.randomize(); + + // Serialize and encrypt a message + int message = 12345; + CDataStream ser(SER_NETWORK, PROTOCOL_VERSION); + ser << message; + AEADEncryptedData data = AEAD::encrypt(prekey, "Associated data", ser); + + // Decrypt; this should fail + BOOST_CHECK_THROW(ser = AEAD::decrypt_and_verify(evil_prekey, "Associated data", data), std::runtime_error); +} + +BOOST_AUTO_TEST_CASE(bad_key_commitment) +{ + // Key + GroupElement prekey; + prekey.randomize(); + + // Evil key and key commitment + GroupElement evil_prekey; + evil_prekey.randomize(); + std::vector evil_key_commitment = SparkUtils::commit_aead(evil_prekey); + + // Serialize and encrypt a message + int message = 12345; + CDataStream ser(SER_NETWORK, PROTOCOL_VERSION); + ser << message; + AEADEncryptedData data = AEAD::encrypt(prekey, "Associated data", ser); + + // Replace key commitment + data.key_commitment = evil_key_commitment; + + // Decrypt; this should fail + BOOST_CHECK_THROW(ser = AEAD::decrypt_and_verify(prekey, "Associated data", data), std::runtime_error); +} + +BOOST_AUTO_TEST_SUITE_END() + +} \ No newline at end of file diff --git a/src/libspark/test/bpplus_test.cpp b/src/libspark/test/bpplus_test.cpp new file mode 100644 index 0000000000..492d40ce11 --- /dev/null +++ b/src/libspark/test/bpplus_test.cpp @@ -0,0 +1,303 @@ +#include "../bpplus.h" + +#include "../../test/test_bitcoin.h" +#include + +namespace spark { + +BOOST_FIXTURE_TEST_SUITE(spark_bpplus_tests, BasicTestingSetup) + +// Generate and verify a single aggregated proof with no padding +BOOST_AUTO_TEST_CASE(completeness_single_unpadded) +{ + // Parameters + std::size_t N = 64; // bit length + std::size_t M = 4; // aggregation + + // Generators + GroupElement G, H; + G.randomize(); + H.randomize(); + + std::vector Gi, Hi; + std::size_t gens_needed = N*M; + if (!is_nonzero_power_of_2(gens_needed)) { + gens_needed = 1 << (log2(N*M) + 1); + } + BOOST_CHECK_EQUAL(gens_needed, N*M); + Gi.resize(gens_needed); + Hi.resize(gens_needed); + for (std::size_t i = 0; i < gens_needed; i++) { + Gi[i].randomize(); + Hi[i].randomize(); + } + + // Commitments + std::vector v, r; + v.resize(M); + v[0] = Scalar(uint64_t(0)); + v[1] = Scalar(uint64_t(1)); + v[2] = Scalar(uint64_t(2)); + v[3] = Scalar(uint64_t(3)); + r.resize(M); + std::vector C; + C.resize(M); + for (std::size_t j = 0; j < M; j++) { + r[j].randomize(); + C[j] = G*v[j] + H*r[j]; + } + + BPPlus bpplus(G, H, Gi, Hi, N); + BPPlusProof proof; + bpplus.prove(v, r, C, proof); + + BOOST_CHECK(bpplus.verify(C, proof)); +} + +// Generate and verify a single aggregated proof with padding +BOOST_AUTO_TEST_CASE(completeness_single_padded) +{ + // Parameters + std::size_t N = 64; // bit length + std::size_t M = 5; // aggregation + + // Generators + GroupElement G, H; + G.randomize(); + H.randomize(); + + std::vector Gi, Hi; + std::size_t gens_needed = N*M; + if (!is_nonzero_power_of_2(gens_needed)) { + gens_needed = 1 << (log2(N*M) + 1); + } + BOOST_CHECK_EQUAL(gens_needed, 8*N); // hardcoded for this test + Gi.resize(gens_needed); + Hi.resize(gens_needed); + for (std::size_t i = 0; i < gens_needed; i++) { + Gi[i].randomize(); + Hi[i].randomize(); + } + + // Commitments + std::vector v, r; + v.resize(M); + v[0] = Scalar(uint64_t(0)); + v[1] = Scalar(uint64_t(1)); + v[2] = Scalar(uint64_t(2)); + v[3] = Scalar(uint64_t(3)); + v[4] = Scalar(std::numeric_limits::max()); + r.resize(M); + std::vector C; + C.resize(M); + for (std::size_t j = 0; j < M; j++) { + r[j].randomize(); + C[j] = G*v[j] + H*r[j]; + } + + BPPlus bpplus(G, H, Gi, Hi, N); + BPPlusProof proof; + bpplus.prove(v, r, C, proof); + + BOOST_CHECK(bpplus.verify(C, proof)); +} + +// A single proof with invalid value and no padding +BOOST_AUTO_TEST_CASE(invalid_single_unpadded) +{ + // Parameters + std::size_t N = 64; // bit length + std::size_t M = 4; // aggregation + + // Generators + GroupElement G, H; + G.randomize(); + H.randomize(); + + std::vector Gi, Hi; + std::size_t gens_needed = N*M; + if (!is_nonzero_power_of_2(gens_needed)) { + gens_needed = 1 << (log2(N*M) + 1); + } + BOOST_CHECK_EQUAL(gens_needed, N*M); + Gi.resize(gens_needed); + Hi.resize(gens_needed); + for (std::size_t i = 0; i < gens_needed; i++) { + Gi[i].randomize(); + Hi[i].randomize(); + } + + // Commitments + std::vector v, r; + v.resize(M); + v[0] = Scalar(uint64_t(0)); + v[1] = Scalar(uint64_t(1)); + v[2] = Scalar(uint64_t(2)); + v[3] = Scalar(std::numeric_limits::max()) + Scalar(uint64_t(1)); // out of range + r.resize(M); + std::vector C; + C.resize(M); + for (std::size_t j = 0; j < M; j++) { + r[j].randomize(); + C[j] = G*v[j] + H*r[j]; + } + + BPPlus bpplus(G, H, Gi, Hi, N); + BPPlusProof proof; + bpplus.prove(v, r, C, proof); + + BOOST_CHECK(!bpplus.verify(C, proof)); +} + +// A single proof with invalid value and padding +BOOST_AUTO_TEST_CASE(invalid_single_padded) +{ + // Parameters + std::size_t N = 64; // bit length + std::size_t M = 5; // aggregation + + // Generators + GroupElement G, H; + G.randomize(); + H.randomize(); + + std::vector Gi, Hi; + std::size_t gens_needed = N*M; + if (!is_nonzero_power_of_2(gens_needed)) { + gens_needed = 1 << (log2(N*M) + 1); + } + BOOST_CHECK_EQUAL(gens_needed, 8*N); // hardcoded for this test + Gi.resize(gens_needed); + Hi.resize(gens_needed); + for (std::size_t i = 0; i < gens_needed; i++) { + Gi[i].randomize(); + Hi[i].randomize(); + } + + // Commitments + std::vector v, r; + v.resize(M); + v[0] = Scalar(uint64_t(0)); + v[1] = Scalar(uint64_t(1)); + v[2] = Scalar(uint64_t(2)); + v[3] = Scalar(uint64_t(3)); + v[4] = Scalar(std::numeric_limits::max()) + Scalar(uint64_t(1)); // out of range + r.resize(M); + std::vector C; + C.resize(M); + for (std::size_t j = 0; j < M; j++) { + r[j].randomize(); + C[j] = G*v[j] + H*r[j]; + } + + BPPlus bpplus(G, H, Gi, Hi, N); + BPPlusProof proof; + bpplus.prove(v, r, C, proof); + + BOOST_CHECK(!bpplus.verify(C, proof)); +} +// Generate and verify a batch of proofs with variable aggregation +BOOST_AUTO_TEST_CASE(completeness_batch) +{ + // Parameters + std::size_t N = 64; // bit length + std::size_t B = 5; // number of proofs in batch + std::vector sizes = {1, 2, 3, 4, 5}; + BOOST_CHECK_EQUAL(sizes.size(), B); + + // Generators + GroupElement G, H; + G.randomize(); + H.randomize(); + + std::vector Gi, Hi; + Gi.resize(8*N); + Hi.resize(8*N); + for (std::size_t i = 0; i < 8*N; i++) { + Gi[i].randomize(); + Hi[i].randomize(); + } + + BPPlus bpplus(G, H, Gi, Hi, N); + std::vector proofs; + proofs.resize(B); + std::vector> C; + + // Build each proof + for (std::size_t i = 0; i < B; i++) { + // Commitments + std::size_t M = sizes[i]; + std::vector v, r; + v.resize(M); + r.resize(M); + std::vector C_; + C_.resize(M); + for (std::size_t j = 0; j < M; j++) { + v[j] = Scalar(uint64_t(j)); + r[j].randomize(); + C_[j] = G*v[j] + H*r[j]; + } + C.emplace_back(C_); + + bpplus.prove(v, r, C_, proofs[i]); + } + + BOOST_CHECK(bpplus.verify(C, proofs)); +} + +// An invalid batch of proofs +BOOST_AUTO_TEST_CASE(invalid_batch) +{ + // Parameters + std::size_t N = 64; // bit length + std::size_t B = 5; // number of proofs in batch + std::vector sizes = {1, 2, 3, 4, 5}; + BOOST_CHECK_EQUAL(sizes.size(), B); + + // Generators + GroupElement G, H; + G.randomize(); + H.randomize(); + + std::vector Gi, Hi; + Gi.resize(8*N); + Hi.resize(8*N); + for (std::size_t i = 0; i < 8*N; i++) { + Gi[i].randomize(); + Hi[i].randomize(); + } + + BPPlus bpplus(G, H, Gi, Hi, N); + std::vector proofs; + proofs.resize(B); + std::vector> C; + + // Build each proof + for (std::size_t i = 0; i < B; i++) { + // Commitments + std::size_t M = sizes[i]; + std::vector v, r; + v.resize(M); + r.resize(M); + std::vector C_; + C_.resize(M); + for (std::size_t j = 0; j < M; j++) { + v[j] = Scalar(uint64_t(j)); + // Set one proof to an out-of-range value; + if (i == 0 && j == 0) { + v[j] = Scalar(std::numeric_limits::max()) + Scalar(uint64_t(1)); + } + r[j].randomize(); + C_[j] = G*v[j] + H*r[j]; + } + C.emplace_back(C_); + + bpplus.prove(v, r, C_, proofs[i]); + } + + BOOST_CHECK(!bpplus.verify(C, proofs)); +} + +BOOST_AUTO_TEST_SUITE_END() + +} diff --git a/src/libspark/test/chaum_test.cpp b/src/libspark/test/chaum_test.cpp new file mode 100644 index 0000000000..26281438bd --- /dev/null +++ b/src/libspark/test/chaum_test.cpp @@ -0,0 +1,180 @@ +#include "../chaum.h" +#include "../../streams.h" +#include "../../version.h" + +#include "../../test/test_bitcoin.h" +#include + +namespace spark { + +BOOST_FIXTURE_TEST_SUITE(spark_chaum_tests, BasicTestingSetup) + +BOOST_AUTO_TEST_CASE(serialization) +{ + GroupElement F, G, H, U; + F.randomize(); + G.randomize(); + H.randomize(); + U.randomize(); + + const std::size_t n = 3; + + Scalar mu; + mu.randomize(); + std::vector x, y, z; + x.resize(n); + y.resize(n); + z.resize(n); + std::vector S, T; + S.resize(n); + T.resize(n); + for (std::size_t i = 0; i < n; i++) { + x[i].randomize(); + y[i].randomize(); + z[i].randomize(); + + S[i] = F*x[i] + G*y[i] + H*z[i]; + T[i] = (U + G*y[i].negate())*x[i].inverse(); + } + + ChaumProof proof; + + Chaum chaum(F, G, H, U); + chaum.prove(mu, x, y, z, S, T, proof); + + CDataStream serialized(SER_NETWORK, PROTOCOL_VERSION); + serialized << proof; + + ChaumProof deserialized; + serialized >> deserialized; + + BOOST_CHECK(proof.A1 == deserialized.A1); + BOOST_CHECK(proof.t2 == deserialized.t2); + BOOST_CHECK(proof.t3 == deserialized.t3); + for (std::size_t i = 0; i < n; i++) { + BOOST_CHECK(proof.A2[i] == deserialized.A2[i]); + BOOST_CHECK(proof.t1[i] == deserialized.t1[i]); + } +} + +BOOST_AUTO_TEST_CASE(completeness) +{ + GroupElement F, G, H, U; + F.randomize(); + G.randomize(); + H.randomize(); + U.randomize(); + + const std::size_t n = 3; + + Scalar mu; + mu.randomize(); + std::vector x, y, z; + x.resize(n); + y.resize(n); + z.resize(n); + std::vector S, T; + S.resize(n); + T.resize(n); + for (std::size_t i = 0; i < n; i++) { + x[i].randomize(); + y[i].randomize(); + z[i].randomize(); + + S[i] = F*x[i] + G*y[i] + H*z[i]; + T[i] = (U + G*y[i].negate())*x[i].inverse(); + } + + ChaumProof proof; + + Chaum chaum(F, G, H, U); + chaum.prove(mu, x, y, z, S, T, proof); + + BOOST_CHECK(chaum.verify(mu, S, T, proof)); +} + +BOOST_AUTO_TEST_CASE(bad_proofs) +{ + GroupElement F, G, H, U; + F.randomize(); + G.randomize(); + H.randomize(); + U.randomize(); + + const std::size_t n = 3; + + Scalar mu; + mu.randomize(); + std::vector x, y, z; + x.resize(n); + y.resize(n); + z.resize(n); + std::vector S, T; + S.resize(n); + T.resize(n); + for (std::size_t i = 0; i < n; i++) { + x[i].randomize(); + y[i].randomize(); + z[i].randomize(); + + S[i] = F*x[i] + G*y[i] + H*z[i]; + T[i] = (U + G*y[i].negate())*x[i].inverse(); + } + + ChaumProof proof; + + Chaum chaum(F, G, H, U); + chaum.prove(mu, x, y, z, S, T, proof); + + // Bad mu + Scalar evil_mu; + evil_mu.randomize(); + BOOST_CHECK(!(chaum.verify(evil_mu, S, T, proof))); + + // Bad S + for (std::size_t i = 0; i < n; i++) { + std::vector evil_S(S); + evil_S[i].randomize(); + BOOST_CHECK(!(chaum.verify(mu, evil_S, T, proof))); + } + + // Bad T + for (std::size_t i = 0; i < n; i++) { + std::vector evil_T(T); + evil_T[i].randomize(); + BOOST_CHECK(!(chaum.verify(mu, S, evil_T, proof))); + } + + // Bad A1 + ChaumProof evil_proof = proof; + evil_proof.A1.randomize(); + BOOST_CHECK(!(chaum.verify(mu, S, T, evil_proof))); + + // Bad A2 + for (std::size_t i = 0; i < n; i++) { + evil_proof = proof; + evil_proof.A2[i].randomize(); + BOOST_CHECK(!(chaum.verify(mu, S, T, evil_proof))); + } + + // Bad t1 + for (std::size_t i = 0; i < n; i++) { + evil_proof = proof; + evil_proof.t1[i].randomize(); + BOOST_CHECK(!(chaum.verify(mu, S, T, evil_proof))); + } + + // Bad t2 + evil_proof = proof; + evil_proof.t2.randomize(); + BOOST_CHECK(!(chaum.verify(mu, S, T, evil_proof))); + + // Bad t3 + evil_proof = proof; + evil_proof.t3.randomize(); + BOOST_CHECK(!(chaum.verify(mu, S, T, evil_proof))); +} + +BOOST_AUTO_TEST_SUITE_END() + +} diff --git a/src/libspark/test/coin_test.cpp b/src/libspark/test/coin_test.cpp new file mode 100644 index 0000000000..522ea29f2a --- /dev/null +++ b/src/libspark/test/coin_test.cpp @@ -0,0 +1,121 @@ +#include "../coin.h" + +#include "../../test/test_bitcoin.h" +#include + +namespace spark { + +using namespace secp_primitives; + +// Generate a random char vector from a random scalar +static std::vector random_char_vector() { + Scalar temp; + temp.randomize(); + std::vector result; + result.resize(SCALAR_ENCODING); + temp.serialize(result.data()); + + return result; +} + +BOOST_FIXTURE_TEST_SUITE(spark_coin_tests, BasicTestingSetup) + +BOOST_AUTO_TEST_CASE(mint_identify_recover) +{ + // Parameters + const Params* params; + params = Params::get_default(); + + const uint64_t i = 12345; + const uint64_t v = 86; + const std::string memo = "Spam and eggs"; + + // Generate keys + SpendKey spend_key(params); + FullViewKey full_view_key(spend_key); + IncomingViewKey incoming_view_key(full_view_key); + + // Generate address + Address address(incoming_view_key, i); + + // Generate coin + Scalar k; + k.randomize(); + Coin coin = Coin( + params, + COIN_TYPE_MINT, + k, + address, + v, + memo, + random_char_vector() + ); + + // Identify coin + IdentifiedCoinData i_data = coin.identify(incoming_view_key); + BOOST_CHECK_EQUAL(i_data.i, i); + BOOST_CHECK_EQUAL_COLLECTIONS(i_data.d.begin(), i_data.d.end(), address.get_d().begin(), address.get_d().end()); + BOOST_CHECK_EQUAL(i_data.v, v); + BOOST_CHECK_EQUAL(i_data.k, k); + BOOST_CHECK_EQUAL(strcmp(memo.c_str(), i_data.memo.c_str()), 0); // compare strings in a lexicographical manner, as we pad the memo in the coin + BOOST_CHECK_EQUAL(i_data.memo.size(), params->get_memo_bytes()); // check that it is padded + // Recover coin + RecoveredCoinData r_data = coin.recover(full_view_key, i_data); + BOOST_CHECK_EQUAL( + params->get_F()*(SparkUtils::hash_ser(k, coin.serial_context) + SparkUtils::hash_Q2(incoming_view_key.get_s1(), i) + full_view_key.get_s2()) + full_view_key.get_D(), + params->get_F()*r_data.s + full_view_key.get_D() + ); + BOOST_CHECK_EQUAL(r_data.T*r_data.s + full_view_key.get_D(), params->get_U()); +} + +BOOST_AUTO_TEST_CASE(spend_identify_recover) +{ + // Parameters + const Params* params; + params = Params::get_default(); + + const uint64_t i = 12345; + const uint64_t v = 86; + const std::string memo = "Spam and eggs"; + + // Generate keys + SpendKey spend_key(params); + FullViewKey full_view_key(spend_key); + IncomingViewKey incoming_view_key(full_view_key); + + // Generate address + Address address(incoming_view_key, i); + + // Generate coin + Scalar k; + k.randomize(); + Coin coin = Coin( + params, + COIN_TYPE_SPEND, + k, + address, + v, + memo, + random_char_vector() + ); + + // Identify coin + IdentifiedCoinData i_data = coin.identify(incoming_view_key); + BOOST_CHECK_EQUAL(i_data.i, i); + BOOST_CHECK_EQUAL_COLLECTIONS(i_data.d.begin(), i_data.d.end(), address.get_d().begin(), address.get_d().end()); + BOOST_CHECK_EQUAL(i_data.v, v); + BOOST_CHECK_EQUAL(i_data.k, k); + BOOST_CHECK_EQUAL(strcmp(memo.c_str(), i_data.memo.c_str()), 0); // compare strings in a lexicographical manner, as we pad the memo in the coin + BOOST_CHECK_EQUAL(i_data.memo.size(), params->get_memo_bytes()); // check that it is padded + + // Recover coin + RecoveredCoinData r_data = coin.recover(full_view_key, i_data); + BOOST_CHECK_EQUAL( + params->get_F()*(SparkUtils::hash_ser(k, coin.serial_context) + SparkUtils::hash_Q2(incoming_view_key.get_s1(), i) + full_view_key.get_s2()) + full_view_key.get_D(), + params->get_F()*r_data.s + full_view_key.get_D() + ); + BOOST_CHECK_EQUAL(r_data.T*r_data.s + full_view_key.get_D(), params->get_U()); +} +BOOST_AUTO_TEST_SUITE_END() + +} diff --git a/src/libspark/test/encrypt_test.cpp b/src/libspark/test/encrypt_test.cpp new file mode 100644 index 0000000000..d0849b81c8 --- /dev/null +++ b/src/libspark/test/encrypt_test.cpp @@ -0,0 +1,52 @@ +#include "../util.h" +#include + +#include "../../test/test_bitcoin.h" +#include + +namespace spark { + +BOOST_FIXTURE_TEST_SUITE(spark_encrypt_tests, BasicTestingSetup) + +BOOST_AUTO_TEST_CASE(complete) +{ + // Key + std::string key_string = "Key prefix"; + std::vector key(key_string.begin(), key_string.end()); + key.resize(AES256_KEYSIZE); + + // Encrypt + uint64_t i = 12345; + std::vector d = SparkUtils::diversifier_encrypt(key, i); + + // Decrypt + uint64_t i_ = SparkUtils::diversifier_decrypt(key, d); + + BOOST_CHECK_EQUAL(i_, i); +} + +BOOST_AUTO_TEST_CASE(bad_key) +{ + // Key + std::string key_string = "Key prefix"; + std::vector key(key_string.begin(), key_string.end()); + key.resize(AES256_KEYSIZE); + + // Evil key + std::string evil_key_string = "Evil key prefix"; + std::vector evil_key(evil_key_string.begin(), evil_key_string.end()); + evil_key.resize(AES256_KEYSIZE); + + // Encrypt + uint64_t i = 12345; + std::vector d = SparkUtils::diversifier_encrypt(key, i); + + // Decrypt + uint64_t i_ = SparkUtils::diversifier_decrypt(evil_key, d); + + BOOST_CHECK_NE(i_, i); +} + +BOOST_AUTO_TEST_SUITE_END() + +} diff --git a/src/libspark/test/f4grumble_test.cpp b/src/libspark/test/f4grumble_test.cpp new file mode 100644 index 0000000000..15348d8749 --- /dev/null +++ b/src/libspark/test/f4grumble_test.cpp @@ -0,0 +1,125 @@ +#include "../f4grumble.h" + +#include "../../test/test_bitcoin.h" +#include + +#include + +namespace spark { + +BOOST_FIXTURE_TEST_SUITE(spark_f4grumble_tests, BasicTestingSetup) + +BOOST_AUTO_TEST_CASE(complete) +{ + // Test all sizes of interest + const int MIN_SIZE = 0; // sure, why not + const int MAX_SIZE = 128; + + // Set up the randomizer + std::random_device rand; + std::uniform_int_distribution dist(0, 0xFF); + + for (int i = MIN_SIZE; i <= MAX_SIZE; i++) { + // Generate a random byte array + std::vector input; + input.reserve(i); + + for (int j = 0; j < i; j++) { + input.emplace_back(static_cast(dist(rand))); + } + + // Pick a network byte and set up the encoder + unsigned char network = static_cast(dist(rand)); + F4Grumble grumble(network, i); + + // Encode the byte array + std::vector scrambled = grumble.encode(input); + + // Check that the length has not changed + BOOST_CHECK_EQUAL(scrambled.size(), input.size()); + + // Decode and check correctness + std::vector unscrambled = grumble.decode(scrambled); + BOOST_CHECK_EQUAL_COLLECTIONS(unscrambled.begin(), unscrambled.end(), input.begin(), input.end()); + } +} + +BOOST_AUTO_TEST_CASE(too_long) +{ + // This size is invalid! + int size = F4Grumble::get_max_size() + 1; + + // Set up the randomizer + std::random_device rand; + std::uniform_int_distribution dist(0, 0xFF); + + // Generate a random byte array + std::vector input; + input.reserve(size); + + for (int j = 0; j < size; j++) { + input.emplace_back(static_cast(dist(rand))); + } + + // Pick a network byte + unsigned char network = static_cast(dist(rand)); + + // We can't even instantiate this! + BOOST_CHECK_THROW(F4Grumble grumble(network, size), std::invalid_argument); + + // But pretend we can + F4Grumble grumble(network, F4Grumble::get_max_size()); + + // We should not be able to encode this... + BOOST_CHECK_THROW(grumble.encode(input), std::invalid_argument); + + // ... nor decode it + BOOST_CHECK_THROW(grumble.decode(input), std::invalid_argument); +} + +BOOST_AUTO_TEST_CASE(bad_network) +{ + // Choose a large input size (such that collisions are unlikely) + int size = F4Grumble::get_max_size(); + + // Set up the randomizer + std::random_device rand; + std::uniform_int_distribution dist(0, 0xFF); + + // Generate a random byte array + std::vector input; + input.reserve(size); + + for (int j = 0; j < size; j++) { + input.emplace_back(static_cast(dist(rand))); + } + + // Pick a network byte + unsigned char network = static_cast(dist(rand)); + + // Pick an evil network byte + unsigned char evil_network = ~network; + BOOST_CHECK_NE(network, evil_network); + + // Encode with the original network + F4Grumble grumble(network, size); + std::vector scrambled = grumble.encode(input); + + // Encode with the evil network + F4Grumble evil_grumble(evil_network, size); + std::vector evil_scrambled = evil_grumble.decode(input); + + // They should be distinct + bool equal = true; + BOOST_CHECK_EQUAL(scrambled.size(), evil_scrambled.size()); + for (std::size_t i = 0; i < scrambled.size(); i++) { + if (scrambled[i] != evil_scrambled[i]) { + equal = false; + } + } + BOOST_CHECK(!equal); +} + +BOOST_AUTO_TEST_SUITE_END() + +} \ No newline at end of file diff --git a/src/libspark/test/grootle_test.cpp b/src/libspark/test/grootle_test.cpp new file mode 100644 index 0000000000..57d82635de --- /dev/null +++ b/src/libspark/test/grootle_test.cpp @@ -0,0 +1,173 @@ +#include "../grootle.h" + +#include "../../test/test_bitcoin.h" +#include + +namespace spark { + +static std::vector random_group_vector(const std::size_t n) { + std::vector result; + result.resize(n); + for (std::size_t i = 0; i < n; i++) { + result[i].randomize(); + } + return result; +} + +BOOST_FIXTURE_TEST_SUITE(spark_grootle_tests, BasicTestingSetup) + +BOOST_AUTO_TEST_CASE(batch) +{ + // Parameters + const std::size_t n = 4; + const std::size_t m = 3; + const std::size_t N = (std::size_t) std::pow(n, m); // N = 64 + + // Generators + GroupElement H; + H.randomize(); + std::vector Gi = random_group_vector(n*m); + std::vector Hi = random_group_vector(n*m); + + // Commitments + std::size_t commit_size = 60; // require padding + std::vector S = random_group_vector(commit_size); + std::vector V = random_group_vector(commit_size); + + // Generate valid commitments to zero + std::vector indexes = { 0, 1, 3, 59 }; + std::vector sizes = { 60, 60, 59, 16 }; + std::vector S1, V1; + std::vector> roots; + std::vector s, v; + for (std::size_t index : indexes) { + Scalar s_, v_; + s_.randomize(); + v_.randomize(); + s.emplace_back(s_); + v.emplace_back(v_); + + S1.emplace_back(S[index]); + V1.emplace_back(V[index]); + + S[index] += H*s_; + V[index] += H*v_; + + // Prepare random data in place of Merkle root + Scalar temp; + temp.randomize(); + std::vector root; + root.reserve(SCALAR_ENCODING); + temp.serialize(root.data()); + roots.emplace_back(root); + } + + // Prepare proving system + Grootle grootle(H, Gi, Hi, n, m); + std::vector proofs; + + for (std::size_t i = 0; i < indexes.size(); i++) { + proofs.emplace_back(); + std::vector S_(S.begin() + commit_size - sizes[i], S.end()); + std::vector V_(V.begin() + commit_size - sizes[i], V.end()); + grootle.prove( + indexes[i] - (commit_size - sizes[i]), + s[i], + S_, + S1[i], + v[i], + V_, + V1[i], + roots[i], + proofs.back() + ); + + // Verify single proof + BOOST_CHECK(grootle.verify(S, S1[i], V, V1[i], roots[i], sizes[i], proofs.back())); + } + + BOOST_CHECK(grootle.verify(S, S1, V, V1, roots, sizes, proofs)); +} + +BOOST_AUTO_TEST_CASE(invalid_batch) +{ + // Parameters + const std::size_t n = 4; + const std::size_t m = 3; + const std::size_t N = (std::size_t) std::pow(n, m); // N = 64 + + // Generators + GroupElement H; + H.randomize(); + std::vector Gi = random_group_vector(n*m); + std::vector Hi = random_group_vector(n*m); + + // Commitments + std::size_t commit_size = 60; // require padding + std::vector S = random_group_vector(commit_size); + std::vector V = random_group_vector(commit_size); + + // Generate valid commitments to zero + std::vector indexes = { 0, 1, 3, 59 }; + std::vector sizes = { 60, 60, 59, 16 }; + std::vector S1, V1; + std::vector> roots; + std::vector s, v; + for (std::size_t index : indexes) { + Scalar s_, v_; + s_.randomize(); + v_.randomize(); + s.emplace_back(s_); + v.emplace_back(v_); + + S1.emplace_back(S[index]); + V1.emplace_back(V[index]); + + S[index] += H*s_; + V[index] += H*v_; + + // Prepare random data in place of Merkle root + Scalar temp; + temp.randomize(); + std::vector root; + root.reserve(SCALAR_ENCODING); + temp.serialize(root.data()); + roots.emplace_back(root); + } + + // Prepare proving system + Grootle grootle(H, Gi, Hi, n, m); + std::vector proofs; + + for (std::size_t i = 0; i < indexes.size(); i++) { + proofs.emplace_back(); + std::vector S_(S.begin() + commit_size - sizes[i], S.end()); + std::vector V_(V.begin() + commit_size - sizes[i], V.end()); + grootle.prove( + indexes[i] - (commit_size - sizes[i]), + s[i], + S_, + S1[i], + v[i], + V_, + V1[i], + roots[i], + proofs.back() + ); + } + + BOOST_CHECK(grootle.verify(S, S1, V, V1, roots, sizes, proofs)); + + // Add an invalid proof + proofs.emplace_back(proofs.back()); + S1.emplace_back(S1.back()); + V1.emplace_back(V1.back()); + S1.back().randomize(); + sizes.emplace_back(sizes.back()); + + BOOST_CHECK(!grootle.verify(S, S1, V, V1, roots, sizes, proofs)); +} + +BOOST_AUTO_TEST_SUITE_END() + +} \ No newline at end of file diff --git a/src/libspark/test/mint_transaction_test.cpp b/src/libspark/test/mint_transaction_test.cpp new file mode 100644 index 0000000000..cbf562415f --- /dev/null +++ b/src/libspark/test/mint_transaction_test.cpp @@ -0,0 +1,60 @@ +#include "../mint_transaction.h" + +#include "../../test/test_bitcoin.h" +#include + +namespace spark { + +using namespace secp_primitives; + +// Generate a random char vector from a random scalar +static std::vector random_char_vector() { + Scalar temp; + temp.randomize(); + std::vector result; + result.resize(SCALAR_ENCODING); + temp.serialize(result.data()); + + return result; +} + +BOOST_FIXTURE_TEST_SUITE(spark_mint_transaction_tests, BasicTestingSetup) + +BOOST_AUTO_TEST_CASE(generate_verify) +{ + // Parameters + const Params* params; + params = Params::get_default(); + const std::size_t t = 3; // number of coins to generate + + // Generate keys + SpendKey spend_key(params); + FullViewKey full_view_key(spend_key); + IncomingViewKey incoming_view_key(full_view_key); + + std::vector outputs; + + // Generate addresses and coins + for (std::size_t j = 0; j < t; j++) { + MintedCoinData output; + output.address = Address(incoming_view_key, 12345 + j); + output.v = 678 + j; + output.memo = "Spam and eggs"; + + outputs.emplace_back(output); + } + + // Generate mint transaction + MintTransaction mint( + params, + outputs, + random_char_vector() + ); + + // Verify + BOOST_CHECK(mint.verify()); +} + +BOOST_AUTO_TEST_SUITE_END() + +} diff --git a/src/libspark/test/schnorr_test.cpp b/src/libspark/test/schnorr_test.cpp new file mode 100644 index 0000000000..261ed383ba --- /dev/null +++ b/src/libspark/test/schnorr_test.cpp @@ -0,0 +1,110 @@ +#include "../schnorr.h" +#include "../../streams.h" +#include "../../version.h" + +#include "../../test/test_bitcoin.h" +#include + +namespace spark { + +BOOST_FIXTURE_TEST_SUITE(spark_schnorr_tests, BasicTestingSetup) + +BOOST_AUTO_TEST_CASE(serialization) +{ + GroupElement G; + G.randomize(); + + Scalar y; + y.randomize(); + GroupElement Y = G*y; + + SchnorrProof proof; + + Schnorr schnorr(G); + schnorr.prove(y, Y, proof); + + CDataStream serialized(SER_NETWORK, PROTOCOL_VERSION); + serialized << proof; + + SchnorrProof deserialized; + serialized >> deserialized; + + BOOST_CHECK(proof.A == deserialized.A); + BOOST_CHECK(proof.t == deserialized.t); +} + +BOOST_AUTO_TEST_CASE(completeness) +{ + GroupElement G; + G.randomize(); + + Scalar y; + y.randomize(); + GroupElement Y = G*y; + + SchnorrProof proof; + + Schnorr schnorr(G); + schnorr.prove(y, Y, proof); + + BOOST_CHECK(schnorr.verify(Y, proof)); +} + +BOOST_AUTO_TEST_CASE(completeness_aggregate) +{ + const std::size_t n = 3; + + GroupElement G; + G.randomize(); + + std::vector y; + std::vector Y; + + for (std::size_t i = 0; i < n; i++) { + y.emplace_back(); + y.back().randomize(); + + Y.emplace_back(G*y.back()); + } + + SchnorrProof proof; + + Schnorr schnorr(G); + schnorr.prove(y, Y, proof); + + BOOST_CHECK(schnorr.verify(Y, proof)); +} + +BOOST_AUTO_TEST_CASE(bad_proofs) +{ + GroupElement G; + G.randomize(); + + Scalar y; + y.randomize(); + GroupElement Y = G*y; + + SchnorrProof proof; + + Schnorr schnorr(G); + schnorr.prove(y, Y, proof); + + // Bad Y + GroupElement evil_Y; + evil_Y.randomize(); + BOOST_CHECK(!(schnorr.verify(evil_Y, proof))); + + // Bad A + SchnorrProof evil_proof = proof; + evil_proof.A.randomize(); + BOOST_CHECK(!(schnorr.verify(Y, evil_proof))); + + // Bad t + evil_proof = proof; + evil_proof.t.randomize(); + BOOST_CHECK(!(schnorr.verify(Y, evil_proof))); +} + +BOOST_AUTO_TEST_SUITE_END() + +} diff --git a/src/libspark/test/spend_transaction_test.cpp b/src/libspark/test/spend_transaction_test.cpp new file mode 100644 index 0000000000..4527b26893 --- /dev/null +++ b/src/libspark/test/spend_transaction_test.cpp @@ -0,0 +1,136 @@ +#include "../spend_transaction.h" + +#include "../../test/test_bitcoin.h" +#include + +namespace spark { + +using namespace secp_primitives; + +// Generate a random char vector from a random scalar +static std::vector random_char_vector() { + Scalar temp; + temp.randomize(); + std::vector result; + result.resize(SCALAR_ENCODING); + temp.serialize(result.data()); + + return result; +} + +BOOST_FIXTURE_TEST_SUITE(spark_spend_transaction_tests, BasicTestingSetup) + +BOOST_AUTO_TEST_CASE(generate_verify) +{ + // Parameters + const Params* params; + params = Params::get_test(); + + const std::string memo = "Spam and eggs"; // arbitrary memo + + // Generate keys + SpendKey spend_key(params); + FullViewKey full_view_key(spend_key); + IncomingViewKey incoming_view_key(full_view_key); + + // Generate address + const uint64_t i = 12345; + Address address(incoming_view_key, i); + + // Mint some coins to the address + std::size_t N = (std::size_t) pow(params->get_n_grootle(), params->get_m_grootle()); + std::vector in_coins; + for (std::size_t i = 0; i < N; i++) { + Scalar k; + k.randomize(); + + uint64_t v = 123 + i; // arbitrary value + + in_coins.emplace_back(Coin( + params, + COIN_TYPE_MINT, + k, + address, + v, + memo, + random_char_vector() + )); + } + + // Track values so we can set the fee to make the transaction balance + uint64_t f = 0; + + // Choose coins to spend, recover them, and prepare them for spending + std::vector spend_indices = { 1, 3, 5 }; + std::vector spend_coin_data; + std::unordered_map cover_set_data; + const std::size_t w = spend_indices.size(); + for (std::size_t u = 0; u < w; u++) { + IdentifiedCoinData identified_coin_data = in_coins[spend_indices[u]].identify(incoming_view_key); + RecoveredCoinData recovered_coin_data = in_coins[spend_indices[u]].recover(full_view_key, identified_coin_data); + + spend_coin_data.emplace_back(); + uint64_t cover_set_id = 31415; + spend_coin_data.back().cover_set_id = cover_set_id; + + CoverSetData setData; + setData.cover_set = in_coins; + setData.cover_set_representation = random_char_vector(); + cover_set_data[cover_set_id] = setData; + spend_coin_data.back().index = spend_indices[u]; + spend_coin_data.back().k = identified_coin_data.k; + spend_coin_data.back().s = recovered_coin_data.s; + spend_coin_data.back().T = recovered_coin_data.T; + spend_coin_data.back().v = identified_coin_data.v; + + f += identified_coin_data.v; + } + + // Generate new output coins and compute the fee + const std::size_t t = 3; + std::vector out_coin_data; + for (std::size_t j = 0; j < t; j++) { + out_coin_data.emplace_back(); + out_coin_data.back().address = address; + out_coin_data.back().v = 12 + j; // arbitrary value + out_coin_data.back().memo = memo; + + f -= out_coin_data.back().v; + } + + // Assert the fee is correct + uint64_t fee_test = f; + for (std::size_t j = 0; j < t; j++) { + fee_test += out_coin_data[j].v; + } + for (std::size_t u = 0; u < w; u++) { + fee_test -= spend_coin_data[u].v; + } + + if (fee_test != 0) { + throw std::runtime_error("Bad fee assertion"); + } + + // Generate spend transaction + SpendTransaction transaction( + params, + full_view_key, + spend_key, + spend_coin_data, + cover_set_data, + f, + 0, + out_coin_data + ); + + // Verify + transaction.setCoverSets(cover_set_data); + std::unordered_map> cover_sets; + for (const auto set_data : cover_set_data) + cover_sets[set_data.first] = set_data.second.cover_set; + BOOST_CHECK(SpendTransaction::verify(transaction, cover_sets)); +} + +BOOST_AUTO_TEST_SUITE_END() + +} diff --git a/src/libspark/test/transcript_test.cpp b/src/libspark/test/transcript_test.cpp new file mode 100644 index 0000000000..4ef9e1131d --- /dev/null +++ b/src/libspark/test/transcript_test.cpp @@ -0,0 +1,174 @@ +#include "../transcript.h" + +#include "../../test/test_bitcoin.h" +#include + +namespace spark { + +BOOST_FIXTURE_TEST_SUITE(spark_transcript_tests, BasicTestingSetup) + +BOOST_AUTO_TEST_CASE(init) +{ + // Identical domain separators + Transcript transcript_1("Spam"); + Transcript transcript_2("Spam"); + BOOST_CHECK_EQUAL(transcript_1.challenge("x"), transcript_2.challenge("x")); + + // Distinct domain separators + transcript_1 = Transcript("Spam"); + transcript_2 = Transcript("Eggs"); + BOOST_CHECK_NE(transcript_1.challenge("x"), transcript_2.challenge("x")); +} + +BOOST_AUTO_TEST_CASE(challenge_labels) +{ + Transcript transcript_1("Spam"); + Transcript transcript_2("Spam"); + + // Identical challenge labels + BOOST_CHECK_EQUAL(transcript_1.challenge("x"), transcript_2.challenge("x")); + + // Distinct challenge labels + BOOST_CHECK_NE(transcript_1.challenge("x"), transcript_2.challenge("y")); +} + +BOOST_AUTO_TEST_CASE(add_types) +{ + // Add all fixed types and assert distinct challenges + const std::string domain = "Spam"; + Transcript transcript(domain); + + Scalar scalar; + scalar.randomize(); + transcript.add("Scalar", scalar); + Scalar ch_1 = transcript.challenge("x"); + + GroupElement group; + group.randomize(); + transcript.add("Group", group); + Scalar ch_2 = transcript.challenge("x"); + BOOST_CHECK_NE(ch_1, ch_2); + + std::vector scalars; + for (std::size_t i = 0; i < 3; i++) { + scalar.randomize(); + scalars.emplace_back(scalar); + } + Scalar ch_3 = transcript.challenge("x"); + BOOST_CHECK_NE(ch_2, ch_3); + + std::vector groups; + for (std::size_t i = 0; i < 3; i++) { + group.randomize(); + groups.emplace_back(group); + } + Scalar ch_4 = transcript.challenge("x"); + BOOST_CHECK_NE(ch_3, ch_4); + + const std::string data = "Arbitrary string"; + const std::vector data_char(data.begin(), data.end()); + transcript.add("Data", data_char); + Scalar ch_5 = transcript.challenge("x"); + BOOST_CHECK_NE(ch_4, ch_5); +} + +BOOST_AUTO_TEST_CASE(repeated_challenge) +{ + // Repeated challenges must be distinct, even with the same label + Transcript transcript("Eggs"); + + Scalar ch_1 = transcript.challenge("x"); + Scalar ch_2 = transcript.challenge("x"); + + BOOST_CHECK_NE(ch_1, ch_2); +} + +BOOST_AUTO_TEST_CASE(repeated_challenge_ordering) +{ + // Repeated challenges must respect ordering + Transcript prover("Spam"); + Transcript verifier("Spam"); + + Scalar prover_x = prover.challenge("x"); + Scalar prover_y = prover.challenge("y"); + + // Oh no, we mixed up the order + Scalar verifier_y = verifier.challenge("y"); + Scalar verifier_x = verifier.challenge("x"); + + BOOST_CHECK_NE(prover_x, verifier_x); + BOOST_CHECK_NE(prover_y, verifier_y); +} + +BOOST_AUTO_TEST_CASE(identical_transcripts) +{ + // Ensure that identical transcripts yield identical challenges + Transcript prover("Beer"); + Transcript verifier("Beer"); + + Scalar scalar; + scalar.randomize(); + GroupElement group; + group.randomize(); + + prover.add("Scalar", scalar); + verifier.add("Scalar", scalar); + prover.add("Group", group); + verifier.add("Group", group); + + BOOST_CHECK_EQUAL(prover.challenge("x"), verifier.challenge("x")); +} + +BOOST_AUTO_TEST_CASE(distinct_values) +{ + // Ensure that distinct transcript values yield distinct challenges + Transcript prover("Soda"); + Transcript verifier("Soda"); + + Scalar prover_scalar; + prover_scalar.randomize(); + Scalar verifier_scalar; + verifier_scalar.randomize(); + + prover.add("Scalar", prover_scalar); + verifier.add("Scalar", verifier_scalar); + + BOOST_CHECK_NE(prover.challenge("x"), verifier.challenge("x")); +} + +BOOST_AUTO_TEST_CASE(distinct_labels) +{ + // Ensure that distinct transcript labels yield distinct challenges + Transcript prover("Soda"); + Transcript verifier("Soda"); + + Scalar scalar; + scalar.randomize(); + + prover.add("Prover scalar", scalar); + verifier.add("Verifier scalar", scalar); + + BOOST_CHECK_NE(prover.challenge("x"), verifier.challenge("y")); +} + +BOOST_AUTO_TEST_CASE(converging) +{ + // Transcripts with distinct initial states but common post-challenge elements + Transcript transcript_1("Spam"); + Transcript transcript_2("Eggs"); + + Scalar ch_1 = transcript_1.challenge("x"); + Scalar ch_2 = transcript_1.challenge("x"); + + // Add a common element and assert the states still differ + Scalar scalar; + scalar.randomize(); + transcript_1.add("Scalar", scalar); + transcript_2.add("Scalar", scalar); + + BOOST_CHECK_NE(transcript_1.challenge("x"), transcript_2.challenge("x")); +} + +BOOST_AUTO_TEST_SUITE_END() + +} diff --git a/src/libspark/transcript.cpp b/src/libspark/transcript.cpp new file mode 100644 index 0000000000..5cd67c63c0 --- /dev/null +++ b/src/libspark/transcript.cpp @@ -0,0 +1,177 @@ +#include "transcript.h" + +namespace spark { + +using namespace secp_primitives; + +// Flags for transcript operations +const unsigned char FLAG_DOMAIN = 0; +const unsigned char FLAG_DATA = 1; +const unsigned char FLAG_VECTOR = 2; +const unsigned char FLAG_CHALLENGE = 3; + +// Initialize a transcript with a domain separator +Transcript::Transcript(const std::string domain) { + // Prepare the state + this->ctx = EVP_MD_CTX_new(); + EVP_DigestInit_ex(this->ctx, EVP_sha512(), NULL); + + // Write the protocol and mode information + std::vector protocol(LABEL_PROTOCOL.begin(), LABEL_PROTOCOL.end()); + EVP_DigestUpdate(this->ctx, protocol.data(), protocol.size()); + EVP_DigestUpdate(this->ctx, &HASH_MODE_TRANSCRIPT, sizeof(HASH_MODE_TRANSCRIPT)); + + // Domain separator + include_flag(FLAG_DOMAIN); + include_label(domain); +} + +Transcript::~Transcript() { + EVP_MD_CTX_free(this->ctx); +} + +Transcript& Transcript::operator=(const Transcript& t) { + if (this == &t) { + return *this; + } + + EVP_MD_CTX_copy_ex(this->ctx, t.ctx); + + return *this; +} + +// Add a group element +void Transcript::add(const std::string label, const GroupElement& group_element) { + std::vector data; + data.resize(GroupElement::serialize_size); + group_element.serialize(data.data()); + + include_flag(FLAG_DATA); + include_label(label); + include_data(data); +} + +// Add a vector of group elements +void Transcript::add(const std::string label, const std::vector& group_elements) { + include_flag(FLAG_VECTOR); + size(group_elements.size()); + include_label(label); + for (std::size_t i = 0; i < group_elements.size(); i++) { + std::vector data; + data.resize(GroupElement::serialize_size); + group_elements[i].serialize(data.data()); + include_data(data); + } +} + +// Add a scalar +void Transcript::add(const std::string label, const Scalar& scalar) { + std::vector data; + data.resize(SCALAR_ENCODING); + scalar.serialize(data.data()); + + include_flag(FLAG_DATA); + include_label(label); + include_data(data); +} + +// Add a vector of scalars +void Transcript::add(const std::string label, const std::vector& scalars) { + include_flag(FLAG_VECTOR); + size(scalars.size()); + include_label(label); + for (std::size_t i = 0; i < scalars.size(); i++) { + std::vector data; + data.resize(SCALAR_ENCODING); + scalars[i].serialize(data.data()); + include_data(data); + } +} + +// Add arbitrary data +void Transcript::add(const std::string label, const std::vector& data) { + include_flag(FLAG_DATA); + include_label(label); + include_data(data); +} + +// Produce a challenge +Scalar Transcript::challenge(const std::string label) { + // Ensure we can properly populate a scalar + if (EVP_MD_size(EVP_sha512()) < SCALAR_ENCODING) { + throw std::runtime_error("Bad hash size!"); + } + + std::vector hash; + hash.resize(EVP_MD_size(EVP_sha512())); + unsigned char counter = 0; + + EVP_MD_CTX* state_counter; + state_counter = EVP_MD_CTX_new(); + EVP_DigestInit_ex(state_counter, EVP_sha512(), NULL); + + EVP_MD_CTX* state_finalize; + state_finalize = EVP_MD_CTX_new(); + EVP_DigestInit_ex(state_finalize, EVP_sha512(), NULL); + + include_flag(FLAG_CHALLENGE); + include_label(label); + + while (1) { + // Prepare temporary state for counter testing + EVP_MD_CTX_copy_ex(state_counter, this->ctx); + + // Embed the counter + EVP_DigestUpdate(state_counter, &counter, sizeof(counter)); + + // Finalize the hash with a temporary state + EVP_MD_CTX_copy_ex(state_finalize, state_counter); + unsigned int TEMP; // We already know the digest length! + EVP_DigestFinal_ex(state_finalize, hash.data(), &TEMP); + + // Check for scalar validity + Scalar candidate; + try { + candidate.deserialize(hash.data()); + EVP_MD_CTX_copy_ex(this->ctx, state_counter); + + EVP_MD_CTX_free(state_counter); + EVP_MD_CTX_free(state_finalize); + + return candidate; + } catch (const std::exception &) { + counter++; + } + } +} + +// Encode and include a size +void Transcript::size(const std::size_t size_) { + Scalar size_scalar(size_); + std::vector size_data; + size_data.resize(SCALAR_ENCODING); + size_scalar.serialize(size_data.data()); + EVP_DigestUpdate(this->ctx, size_data.data(), size_data.size()); +} + +// Include a flag +void Transcript::include_flag(const unsigned char flag) { + EVP_DigestUpdate(this->ctx, &flag, sizeof(flag)); +} + +// Encode and include a label +void Transcript::include_label(const std::string label) { + std::vector bytes(label.begin(), label.end()); + include_data(bytes); +} + +// Encode and include data +void Transcript::include_data(const std::vector& data) { + // Include size + size(data.size()); + + // Include data + EVP_DigestUpdate(this->ctx, data.data(), data.size()); +} + +} diff --git a/src/libspark/transcript.h b/src/libspark/transcript.h new file mode 100644 index 0000000000..eef2f9f59b --- /dev/null +++ b/src/libspark/transcript.h @@ -0,0 +1,32 @@ +#ifndef FIRO_SPARK_TRANSCRIPT_H +#define FIRO_SPARK_TRANSCRIPT_H +#include +#include "util.h" + +namespace spark { + +using namespace secp_primitives; + +class Transcript { +public: + Transcript(const std::string); + Transcript& operator=(const Transcript&); + ~Transcript(); + void add(const std::string, const Scalar&); + void add(const std::string, const std::vector&); + void add(const std::string, const GroupElement&); + void add(const std::string, const std::vector&); + void add(const std::string, const std::vector&); + Scalar challenge(const std::string); + +private: + void size(const std::size_t size_); + void include_flag(const unsigned char); + void include_label(const std::string); + void include_data(const std::vector&); + EVP_MD_CTX* ctx; +}; + +} + +#endif diff --git a/src/libspark/util.cpp b/src/libspark/util.cpp new file mode 100644 index 0000000000..4547251320 --- /dev/null +++ b/src/libspark/util.cpp @@ -0,0 +1,249 @@ +#include "util.h" + +namespace spark { + +using namespace secp_primitives; + +// Encrypt a diversifier using AES-256 +std::vector SparkUtils::diversifier_encrypt(const std::vector& key, const uint64_t i) { + // Serialize the diversifier + CDataStream i_stream(SER_NETWORK, PROTOCOL_VERSION); + i_stream << i; + + // Assert proper sizes + if (key.size() != AES256_KEYSIZE) { + throw std::invalid_argument("Bad diversifier encryption key size"); + } + + // Encrypt using padded AES-256 (CBC) using a zero IV + std::vector ciphertext; + ciphertext.resize(AES_BLOCKSIZE); + std::vector iv; + iv.resize(AES_BLOCKSIZE); + + AES256CBCEncrypt aes(key.data(), iv.data(), true); + std::vector plaintext; + plaintext.insert(plaintext.begin(), i_stream.begin(), i_stream.end()); + plaintext.resize(AES_BLOCKSIZE); + aes.Encrypt(plaintext.data(), i_stream.size(), ciphertext.data()); + + return ciphertext; +} + +// Decrypt a diversifier using AES-256 +uint64_t SparkUtils::diversifier_decrypt(const std::vector& key, const std::vector& d) { + // Assert proper sizes + if (key.size() != AES256_KEYSIZE) { + throw std::invalid_argument("Bad diversifier decryption key size"); + } + + std::vector iv; + iv.resize(AES_BLOCKSIZE); + + AES256CBCDecrypt aes(key.data(), iv.data(), true); + std::vector plaintext; + plaintext.resize(AES_BLOCKSIZE); + aes.Decrypt(d.data(), d.size(), plaintext.data()); + + // Decrypt using padded AES-256 (CBC) using a zero IV + CDataStream i_stream(SER_NETWORK, PROTOCOL_VERSION); + i_stream.write((const char *)plaintext.data(), sizeof(uint64_t)); + // Deserialize the diversifier + uint64_t i; + i_stream >> i; + + return i; +} + +// Produce a uniformly-sampled group element from a label +GroupElement SparkUtils::hash_generator(const std::string label) { + const int GROUP_ENCODING = 34; + const unsigned char ZERO = 0; + + // Ensure we can properly populate a group element encoding + if (EVP_MD_size(EVP_sha512()) < GROUP_ENCODING) { + throw std::runtime_error("Bad hash size!"); + } + + EVP_MD_CTX* ctx; + ctx = EVP_MD_CTX_new(); + EVP_DigestInit_ex(ctx, EVP_sha512(), NULL); + + // Write the protocol and mode + std::vector protocol(LABEL_PROTOCOL.begin(), LABEL_PROTOCOL.end()); + EVP_DigestUpdate(ctx, protocol.data(), protocol.size()); + EVP_DigestUpdate(ctx, &HASH_MODE_GROUP_GENERATOR, sizeof(HASH_MODE_GROUP_GENERATOR)); + + // Write the label + std::vector bytes(label.begin(), label.end()); + EVP_DigestUpdate(ctx, bytes.data(), bytes.size()); + + std::vector hash; + hash.resize(EVP_MD_size(EVP_sha512())); + unsigned char counter = 0; + + EVP_MD_CTX* state_counter; + state_counter = EVP_MD_CTX_new(); + EVP_DigestInit_ex(state_counter, EVP_sha512(), NULL); + + EVP_MD_CTX* state_finalize; + state_finalize = EVP_MD_CTX_new(); + EVP_DigestInit_ex(state_finalize, EVP_sha512(), NULL); + + // Finalize the hash + while (1) { + // Prepare temporary state for counter testing + EVP_MD_CTX_copy_ex(state_counter, ctx); + + // Embed the counter + EVP_DigestUpdate(state_counter, &counter, sizeof(counter)); + + // Finalize the hash with a temporary state + EVP_MD_CTX_copy_ex(state_finalize, state_counter); + unsigned int TEMP; // We already know the digest length! + EVP_DigestFinal_ex(state_finalize, hash.data(), &TEMP); + + // Assemble the serialized input: + // bytes 0..31: x coordinate + // byte 32: even/odd + // byte 33: zero (this point is not infinity) + unsigned char candidate_bytes[GROUP_ENCODING]; + memcpy(candidate_bytes, hash.data(), 33); + memcpy(candidate_bytes + 33, &ZERO, 1); + GroupElement candidate; + try { + candidate.deserialize(candidate_bytes); + + // Deserialization can succeed even with an invalid result + if (!candidate.isMember()) { + counter++; + continue; + } + + EVP_MD_CTX_free(ctx); + EVP_MD_CTX_free(state_counter); + EVP_MD_CTX_free(state_finalize); + + return candidate; + } catch (const std::exception &) { + counter++; + } + } +} + +// Derive an AES key for diversifier encryption/decryption +std::vector SparkUtils::kdf_diversifier(const Scalar& s1) { + KDF kdf(LABEL_KDF_DIVERSIFIER, AES256_KEYSIZE); + + CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); + stream << s1; + kdf.include(stream); + + return kdf.finalize(); +} + +// Derive a ChaCha20 key for AEAD operations +std::vector SparkUtils::kdf_aead(const GroupElement& K_der) { + KDF kdf(LABEL_KDF_AEAD, AEAD_KEY_SIZE); + + CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); + stream << K_der; + kdf.include(stream); + + return kdf.finalize(); +} + +// Derive a ChaCha20 key commitment for AEAD operations +std::vector SparkUtils::commit_aead(const GroupElement& K_der) { + // We use a KDF here because of the output size + KDF kdf(LABEL_COMMIT_AEAD, AEAD_COMMIT_SIZE); + + CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); + stream << K_der; + kdf.include(stream); + + return kdf.finalize(); +} + +// Hash-to-group function H_div +GroupElement SparkUtils::hash_div(const std::vector& d) { + Hash hash(LABEL_HASH_DIV); + + CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); + stream << d; + hash.include(stream); + + return hash.finalize_group(); +} + +// Hash-to-scalar function H_Q2 +Scalar SparkUtils::hash_Q2(const Scalar& s1, const Scalar& i) { + Hash hash(LABEL_HASH_Q2); + + CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); + stream << s1; + stream << i; + hash.include(stream); + + return hash.finalize_scalar(); +} + +// Hash-to-scalar function H_k +Scalar SparkUtils::hash_k(const Scalar& k) { + Hash hash(LABEL_HASH_K); + + CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); + stream << k; + hash.include(stream); + + return hash.finalize_scalar(); +} + +// Hash-to-scalar function H_ser +Scalar SparkUtils::hash_ser(const Scalar& k, const std::vector& serial_context) { + Hash hash(LABEL_HASH_SER); + + CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); + stream << k; + stream << serial_context; + hash.include(stream); + + return hash.finalize_scalar(); +} + +// Hash-to-scalar function H_val +Scalar SparkUtils::hash_val(const Scalar& k) { + Hash hash(LABEL_HASH_VAL); + + CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); + stream << k; + hash.include(stream); + + return hash.finalize_scalar(); +} + +// Hash-to-scalar function H_ser1 +Scalar SparkUtils::hash_ser1(const Scalar& s, const GroupElement& D) { + Hash hash(LABEL_HASH_SER1); + + CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); + stream << s; + stream << D; + hash.include(stream); + + return hash.finalize_scalar(); +} + +// Hash-to-scalar function H_val1 +Scalar SparkUtils::hash_val1(const Scalar& s, const GroupElement& D) { + Hash hash(LABEL_HASH_VAL1); + + CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); + stream << s; + stream << D; + hash.include(stream); + + return hash.finalize_scalar(); +} + +} diff --git a/src/libspark/util.h b/src/libspark/util.h new file mode 100644 index 0000000000..015c67e074 --- /dev/null +++ b/src/libspark/util.h @@ -0,0 +1,103 @@ +#ifndef FIRO_SPARK_UTIL_H +#define FIRO_SPARK_UTIL_H +#include "../secp256k1/include/Scalar.h" +#include "../secp256k1/include/GroupElement.h" +#include "../crypto/aes.h" +#include "../streams.h" +#include "../version.h" +#include "../util.h" +#include "kdf.h" +#include "hash.h" + +namespace spark { + +using namespace secp_primitives; + +// Useful serialization constant +const std::size_t SCALAR_ENCODING = 32; + +// Base protocol separator +const std::string LABEL_PROTOCOL = "SPARK"; + +// All hash operations have a mode flag to separate their use cases +const unsigned char HASH_MODE_TRANSCRIPT = 0; // a Fiat-Shamir transcript +const unsigned char HASH_MODE_GROUP_GENERATOR = 1; // a prime-order group generator derived from a label +const unsigned char HASH_MODE_FUNCTION = 2; // a hash function derived from a label +const unsigned char HASH_MODE_KDF = 3; // a key derivation function derived from a label + +// Transcript labels +const std::string LABEL_TRANSCRIPT_BPPLUS = "BULLETPROOF_PLUS_V1"; +const std::string LABEL_TRANSCRIPT_CHAUM = "CHAUM_V1"; +const std::string LABEL_TRANSCRIPT_GROOTLE = "GROOTLE_V1"; +const std::string LABEL_TRANSCRIPT_SCHNORR = "SCHNORR_V1"; + +// Generator labels +const std::string LABEL_GENERATOR_F = "F"; +const std::string LABEL_GENERATOR_H = "H"; +const std::string LABEL_GENERATOR_U = "U"; +const std::string LABEL_GENERATOR_G_RANGE = "G_RANGE"; +const std::string LABEL_GENERATOR_H_RANGE = "H_RANGE"; +const std::string LABEL_GENERATOR_G_GROOTLE = "G_GROOTLE"; +const std::string LABEL_GENERATOR_H_GROOTLE = "H_GROOTLE"; + +// Hash function labels +const std::string LABEL_HASH_DIV = "DIV"; +const std::string LABEL_HASH_Q2 = "Q2"; +const std::string LABEL_HASH_K = "K"; +const std::string LABEL_HASH_SER = "SER"; +const std::string LABEL_HASH_VAL = "VAL"; +const std::string LABEL_HASH_SER1 = "SER1"; +const std::string LABEL_HASH_VAL1 = "VAL1"; +const std::string LABEL_HASH_BIND_INNER = "BIND_INNER"; +const std::string LABEL_HASH_BIND = "BIND"; +const std::string LABEL_F4GRUMBLE_G = "SPARK_F4GRUMBLE_G"; +const std::string LABEL_F4GRUMBLE_H = "SPARK_F4GRUMBLE_H"; + +// KDF labels +const std::string LABEL_KDF_DIVERSIFIER = "DIVERSIFIER"; +const std::string LABEL_KDF_AEAD = "AEAD"; +const std::string LABEL_COMMIT_AEAD = "COMMIT_AEAD"; + +// AEAD constants +const int AEAD_IV_SIZE = 12; // byte length of the IV +const int AEAD_KEY_SIZE = 32; // byte length of the key +const int AEAD_TAG_SIZE = 16; // byte length of the tag +const int AEAD_COMMIT_SIZE = 32; // byte length of the key commitment + +// Address encoding prefix +const unsigned char ADDRESS_ENCODING_PREFIX = 's'; + +// Address encoding network identifiers +// TODO: Extend/update/replace these as needed! These are just initial examples +const unsigned char ADDRESS_NETWORK_MAINNET = 'm'; +const unsigned char ADDRESS_NETWORK_TESTNET = 't'; +const unsigned char ADDRESS_NETWORK_REGTEST = 'r'; +const unsigned char ADDRESS_NETWORK_DEVNET = 'd'; + +class SparkUtils { +public: + // Protocol-level hash functions + static GroupElement hash_generator(const std::string label); + + // Hash functions + static GroupElement hash_div(const std::vector& d); + static Scalar hash_Q2(const Scalar& s1, const Scalar& i); + static Scalar hash_k(const Scalar& k); + static Scalar hash_ser(const Scalar& k, const std::vector& serial_context); + static Scalar hash_val(const Scalar& k); + static Scalar hash_ser1(const Scalar& s, const GroupElement& D); + static Scalar hash_val1(const Scalar& s, const GroupElement& D); + + // Key derivation functions + static std::vector kdf_diversifier(const Scalar& s1); + static std::vector kdf_aead(const GroupElement& K_der); + static std::vector commit_aead(const GroupElement& K_der); + + // Diversifier encryption/decryption + static std::vector diversifier_encrypt(const std::vector& key, const uint64_t i); + static uint64_t diversifier_decrypt(const std::vector& key, const std::vector& d); +}; + +} + +#endif diff --git a/src/llmq/quorums_instantsend.cpp b/src/llmq/quorums_instantsend.cpp index a9e9310f38..9db3cade75 100644 --- a/src/llmq/quorums_instantsend.cpp +++ b/src/llmq/quorums_instantsend.cpp @@ -18,7 +18,7 @@ #ifdef ENABLE_WALLET #include "wallet/wallet.h" #endif - +#include "primitives/mint_spend.h" #include #include #include @@ -42,7 +42,7 @@ uint256 CInstantSendLock::GetRequestId() const namespace isutils { static int16_t const INSTANTSEND_ADAPTED_TX = std::numeric_limits::min(); -CTransaction AdaptJsplitTx(CTransaction const & tx); +CTransaction AdaptPrivateTx(CTransaction const & tx); } @@ -491,12 +491,22 @@ bool CInstantSendManager::CheckCanLock(const CTransaction& tx, bool printDebug, } if (tx.nType == isutils::INSTANTSEND_ADAPTED_TX ) { - for (CTxIn const & in : tx.vin) { - Scalar serial; - serial.deserialize(&in.scriptSig.front()); - LOCK(cs_main); - if (lelantus::CLelantusState::GetState()->IsUsedCoinSerial(serial)) - return false; + if (tx.IsLelantusJoinSplit()) { + for (CTxIn const & in : tx.vin) { + Scalar serial; + serial.deserialize(&in.scriptSig.front()); + LOCK(cs_main); + if (lelantus::CLelantusState::GetState()->IsUsedCoinSerial(serial)) + return false; + } + } else if (tx.IsSparkSpend()) { + for (CTxIn const & in : tx.vin) { + GroupElement lTag; + lTag.deserialize(&in.scriptSig.front()); + LOCK(cs_main); + if (spark::CSparkState::GetState()->IsUsedLTag(lTag)) + return false; + } } return true; } @@ -607,8 +617,8 @@ void CInstantSendManager::HandleNewInputLockRecoveredSig(const CRecoveredSig& re return; } - if(tx && tx->IsLelantusJoinSplit()) { - tx = MakeTransactionRef(isutils::AdaptJsplitTx(*tx)); + if (tx && (tx->IsLelantusJoinSplit() || tx->IsSparkSpend())) { + tx = MakeTransactionRef(isutils::AdaptPrivateTx(*tx)); } if (LogAcceptCategory("instantsend")) { @@ -1013,7 +1023,7 @@ void CInstantSendManager::SyncTransaction(const CTransaction& tx_, const CBlockI return; } - CTransaction const & tx{tx_.IsLelantusJoinSplit() ? isutils::AdaptJsplitTx(tx_) : tx_}; + CTransaction const & tx{(tx_.IsLelantusJoinSplit() || tx_.IsSparkSpend()) ? isutils::AdaptPrivateTx(tx_) : tx_}; bool inMempool = mempool.get(tx.GetHash()) != nullptr; bool isDisconnect = pindex && posInBlock == CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK; @@ -1497,7 +1507,7 @@ CInstantSendLockPtr CInstantSendManager::GetConflictingLock(const CTransaction& return nullptr; } - CTransaction const & tx{tx_.IsLelantusJoinSplit() ? isutils::AdaptJsplitTx(tx_) : tx_}; + CTransaction const & tx{(tx_.IsLelantusJoinSplit() || tx_.IsSparkSpend()) ? isutils::AdaptPrivateTx(tx_) : tx_}; LOCK(cs); for (const auto& in : tx.vin) { @@ -1567,6 +1577,43 @@ CTransaction AdaptJsplitTx(CTransaction const & tx) assert(result.GetHash() == tx.GetHash()); return result; } + +CTransaction AdaptSparkTx(CTransaction const & tx) +{ + static size_t const lTagSerialSize = 34; + + CTransaction result{tx}; + std::unique_ptr spend; + try { + spend = std::make_unique(spark::ParseSparkSpend(tx)); + } + catch (...) { + return result; + } + + const_cast*>(&result.vin)->clear(); //This const_cast was done intentionally as the current design allows for this way only + for (GroupElement const & lTag : spend->getUsedLTags()) { + CTxIn newin; + newin.scriptSig.resize(lTagSerialSize); + lTag.serialize(&newin.scriptSig.front()); + newin.prevout.hash = primitives::GetLTagHash(lTag); + newin.prevout.n = 0; + const_cast*>(&result.vin)->push_back(newin); + } + *const_cast(&result.nType) = INSTANTSEND_ADAPTED_TX; + assert(result.GetHash() == tx.GetHash()); + return result; +} + +CTransaction AdaptPrivateTx(CTransaction const & tx) +{ + if (tx.IsLelantusJoinSplit()) { + return AdaptJsplitTx(tx); + } else if (tx.IsSparkSpend()) { + return AdaptSparkTx(tx); + } + return tx; +} } } diff --git a/src/miner.cpp b/src/miner.cpp index bf29aec84e..fee39aaff0 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -25,7 +25,9 @@ #include "util.h" #include "utilmoneystr.h" #include "validationinterface.h" +#ifdef ENABLE_WALLET #include "wallet/wallet.h" +#endif // ENABLE_WALLET #include "definition.h" #include "crypto/scrypt.h" #include "crypto/MerkleTreeProof/mtp.h" @@ -153,6 +155,9 @@ void BlockAssembler::resetBlock() nLelantusSpendAmount = 0; nLelantusSpendInputs = 0; + + nSparkSpendAmount = 0; + nSparkSpendInputs = 0; } std::unique_ptr BlockAssembler::CreateNewBlock(const CScript& scriptPubKeyIn, bool fMineWitnessTx) @@ -457,6 +462,18 @@ bool BlockAssembler::TestForBlock(CTxMemPool::txiter iter) return false; } + // Check transaction against spark limits + if(tx.IsSparkSpend()) { + CAmount spendAmount = spark::GetSpendTransparentAmount(tx); + const auto ¶ms = chainparams.GetConsensus(); + + if (spendAmount > params.nMaxValueSparkSpendPerTransaction) + return false; + + if (spendAmount + nSparkSpendAmount > params.nMaxValueSparkSpendPerBlock) + return false; + } + return true; } @@ -489,6 +506,17 @@ void BlockAssembler::AddToBlock(CTxMemPool::txiter iter) return; } + if(tx.IsSparkSpend()) { + CAmount spendAmount = spark::GetSpendTransparentAmount(tx); + const auto ¶ms = chainparams.GetConsensus(); + + if (spendAmount > params.nMaxValueSparkSpendPerTransaction) + return; + + if ((nSparkSpendAmount += spendAmount) > params.nMaxValueSparkSpendPerBlock) + return; + } + pblock->vtx.emplace_back(iter->GetSharedTx()); pblocktemplate->vTxFees.push_back(iter->GetFee()); pblocktemplate->vTxSigOpsCost.push_back(iter->GetSigOpCost()); @@ -914,7 +942,7 @@ void BlockAssembler::FillBlackListForBlockTemplate() { // transactions depending (directly or not) on sigma spends in the mempool cannot be included in the // same block with spend transaction - if (tx.IsSigmaSpend() || tx.IsLelantusJoinSplit()) { + if (tx.IsSigmaSpend() || tx.IsLelantusJoinSplit() || tx.IsSparkSpend()) { mempool.CalculateDescendants(mi, txBlackList); // remove privacy transaction itself txBlackList.erase(mi); diff --git a/src/miner.h b/src/miner.h index aec48edb0c..b6348ab94e 100644 --- a/src/miner.h +++ b/src/miner.h @@ -174,6 +174,10 @@ class BlockAssembler CAmount nLelantusSpendAmount; size_t nLelantusSpendInputs; + // lelantus spend limits + CAmount nSparkSpendAmount; + size_t nSparkSpendInputs; + // transactions we cannot include in this block CTxMemPool::setEntries txBlackList; diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp index 5f13132cd1..ef3463589c 100644 --- a/src/policy/policy.cpp +++ b/src/policy/policy.cpp @@ -91,7 +91,7 @@ bool IsStandardTx(const CTransaction& tx, std::string& reason, const bool witnes return false; } - if (!txin.scriptSig.IsZerocoinSpend() && !txin.scriptSig.IsSigmaSpend() && !txin.scriptSig.IsLelantusJoinSplit() && !txin.IsZerocoinRemint()) { + if (!txin.scriptSig.IsZerocoinSpend() && !txin.scriptSig.IsSigmaSpend() && !txin.scriptSig.IsLelantusJoinSplit() && !txin.scriptSig.IsSparkSpend() && !txin.IsZerocoinRemint()) { if (!txin.scriptSig.IsPushOnly()) { reason = "scriptsig-not-pushonly"; return false; @@ -133,7 +133,12 @@ bool IsStandardTx(const CTransaction& tx, std::string& reason, const bool witnes bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) { - if (tx.IsCoinBase() || tx.IsZerocoinSpend() || tx.IsSigmaSpend() || tx.IsZerocoinRemint() || tx.IsLelantusJoinSplit()) + if (tx.IsCoinBase() + || tx.IsZerocoinSpend() + || tx.IsSigmaSpend() + || tx.IsZerocoinRemint() + || tx.IsLelantusJoinSplit() + || tx.IsSparkSpend()) return true; // Coinbases don't use vin normally for (unsigned int i = 0; i < tx.vin.size(); i++) @@ -167,7 +172,12 @@ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) { - if (tx.IsCoinBase() || tx.IsZerocoinSpend() || tx.IsSigmaSpend() || tx.IsZerocoinRemint() || tx.IsLelantusJoinSplit()) + if (tx.IsCoinBase() + || tx.IsZerocoinSpend() + || tx.IsSigmaSpend() + || tx.IsZerocoinRemint() + || tx.IsLelantusJoinSplit() + || tx.IsSparkSpend()) return true; // Coinbases are skipped for (unsigned int i = 0; i < tx.vin.size(); i++) diff --git a/src/primitives/block.h b/src/primitives/block.h index a54adda409..d7ce6ce111 100644 --- a/src/primitives/block.h +++ b/src/primitives/block.h @@ -29,6 +29,11 @@ class CLelantusTxInfo; } // namespace lelantus +namespace spark { + class CSparkTxInfo; + +} // namespace spark + unsigned char GetNfactor(int64_t nTimestamp); /** Nodes collect new transactions into a block, hash them into a hash tree, @@ -289,6 +294,8 @@ class CBlock : public CBlockHeader mutable std::shared_ptr lelantusTxInfo; + mutable std::shared_ptr sparkTxInfo; + CBlock() { SetNull(); diff --git a/src/primitives/transaction.cpp b/src/primitives/transaction.cpp index c03fe6cc83..e36aaa986d 100644 --- a/src/primitives/transaction.cpp +++ b/src/primitives/transaction.cpp @@ -210,6 +210,27 @@ bool CTransaction::IsLelantusMint() const return false; } +bool CTransaction::IsSparkTransaction() const +{ + return IsSparkMint() || IsSparkSpend(); +} + +bool CTransaction::IsSparkSpend() const +{ + if (nVersion >= 3 && nType == TRANSACTION_SPARK) + return true; + return false; +} + +bool CTransaction::IsSparkMint() const +{ + for (const CTxOut &txout: vout) { + if (txout.scriptPubKey.IsSparkMint()) + return true; + } + return false; +} + bool CTransaction::IsZerocoinTransaction() const { return IsZerocoinSpend() || IsZerocoinMint(); @@ -235,7 +256,11 @@ bool CTransaction::IsZerocoinRemint() const } bool CTransaction::HasNoRegularInputs() const { - return IsZerocoinSpend() || IsSigmaSpend() || IsZerocoinRemint() || IsLelantusJoinSplit(); + return IsZerocoinSpend() || IsSigmaSpend() || IsZerocoinRemint() || IsLelantusJoinSplit() || IsSparkSpend(); +} + +bool CTransaction::HasPrivateInputs() const { + return IsSigmaSpend() || IsLelantusJoinSplit() || IsSparkSpend(); } unsigned int CTransaction::CalculateModifiedSize(unsigned int nTxSize) const diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h index ae04b04deb..8ade56f1d5 100644 --- a/src/primitives/transaction.h +++ b/src/primitives/transaction.h @@ -35,7 +35,8 @@ enum { TRANSACTION_COINBASE = 5, TRANSACTION_QUORUM_COMMITMENT = 6, TRANSACTION_SPORK = 7, - TRANSACTION_LELANTUS = 8 + TRANSACTION_LELANTUS = 8, + TRANSACTION_SPARK = 9 }; /** An outpoint - a combination of a transaction hash and an index n into its vout */ @@ -453,7 +454,12 @@ class CTransaction bool IsZerocoinRemint() const; + bool IsSparkTransaction() const; + bool IsSparkSpend() const; + bool IsSparkMint() const; + bool HasNoRegularInputs() const; + bool HasPrivateInputs() const; /** * Get the total transaction size in bytes, including witness data. @@ -468,7 +474,8 @@ class CTransaction || (vin[0].scriptSig[0] != OP_ZEROCOINSPEND && vin[0].scriptSig[0] != OP_ZEROCOINTOSIGMAREMINT && vin[0].scriptSig[0] != OP_LELANTUSJOINSPLIT - && vin[0].scriptSig[0] != OP_LELANTUSJOINSPLITPAYLOAD))); + && vin[0].scriptSig[0] != OP_LELANTUSJOINSPLITPAYLOAD + && vin[0].scriptSig[0] != OP_SPARKSPEND))); } friend bool operator==(const CTransaction& a, const CTransaction& b) diff --git a/src/qt/addressbookpage.cpp b/src/qt/addressbookpage.cpp index 34c2d80330..7ba41d25e6 100644 --- a/src/qt/addressbookpage.cpp +++ b/src/qt/addressbookpage.cpp @@ -23,7 +23,7 @@ #include #include -AddressBookPage::AddressBookPage(const PlatformStyle *platformStyle, Mode _mode, Tabs _tab, QWidget *parent) : +AddressBookPage::AddressBookPage(const PlatformStyle *platformStyle, Mode _mode, Tabs _tab, QWidget *parent, bool isReused) : QDialog(parent), ui(new Ui::AddressBookPage), model(0), @@ -32,6 +32,19 @@ AddressBookPage::AddressBookPage(const PlatformStyle *platformStyle, Mode _mode, { ui->setupUi(this); + if (tab == SendingTab) { + ui->addressType->addItem(tr("Spark"), Spark); + ui->addressType->addItem(tr("Transparent"), Transparent); + ui->addressType->addItem(tr("RAP"), RAP); + } else if(tab == ReceivingTab && !isReused) { + ui->addressType->addItem(tr("Spark"), Spark); + ui->addressType->addItem(tr("Transparent"), Transparent); + } else { + ui->addressType->addItem(tr(""), Transparent); + ui->addressType->addItem(tr("Transparent"), Transparent); + ui->addressType->hide(); + } + if (!platformStyle->getImagesOnButtons()) { ui->newAddress->setIcon(QIcon()); ui->copyAddress->setIcon(QIcon()); @@ -53,7 +66,6 @@ AddressBookPage::AddressBookPage(const PlatformStyle *platformStyle, Mode _mode, case ReceivingTab: setWindowTitle(tr("Choose the address to receive coins with")); break; } connect(ui->tableView, &QTableView::doubleClicked, this, &QDialog::accept); - connect(ui->tableViewPcodes, &QTableView::doubleClicked, this, &QDialog::accept); ui->tableView->setEditTriggers(QAbstractItemView::NoEditTriggers); ui->tableView->setFocus(); ui->closeButton->setText(tr("C&hoose")); @@ -72,14 +84,10 @@ AddressBookPage::AddressBookPage(const PlatformStyle *platformStyle, Mode _mode, case SendingTab: ui->labelExplanation->setText(tr("These are your Firo addresses for sending payments. Always check the amount and the receiving address before sending coins.")); ui->deleteAddress->setVisible(true); - connect(ui->tabWidget, &QTabWidget::currentChanged, this, &AddressBookPage::selectionChanged); - connect(ui->tableViewPcodes, &QWidget::customContextMenuRequested, this, &AddressBookPage::contextualMenu); break; case ReceivingTab: ui->labelExplanation->setText(tr("These are your Firo addresses for receiving payments. It is recommended to use a new receiving address for each transaction.")); ui->deleteAddress->setVisible(false); - ui->tabWidget->removeTab(1); //RAP Pcodes tab - ui->tabWidget->tabBar()->setVisible(false); break; } @@ -121,11 +129,8 @@ void AddressBookPage::setModel(AddressTableModel *_model) return; proxyModel = new QSortFilterProxyModel(this); - proxyModel->setSourceModel(_model); - proxyModel->setDynamicSortFilter(true); - proxyModel->setSortCaseSensitivity(Qt::CaseInsensitive); - proxyModel->setFilterCaseSensitivity(Qt::CaseInsensitive); - + fproxyModel = new AddressBookFilterProxy(this); + proxyModel->setSourceModel(model); switch(tab) { case ReceivingTab: @@ -137,59 +142,46 @@ void AddressBookPage::setModel(AddressTableModel *_model) // Send filter proxyModel->setFilterRole(AddressTableModel::TypeRole); proxyModel->setFilterFixedString(AddressTableModel::Send); - - proxyModelPcode = new QSortFilterProxyModel(this); - proxyModelPcode->setSourceModel(_model->getPcodeAddressTableModel()); - proxyModelPcode->setDynamicSortFilter(true); - proxyModelPcode->setSortCaseSensitivity(Qt::CaseInsensitive); - proxyModelPcode->setFilterCaseSensitivity(Qt::CaseInsensitive); - ui->tableViewPcodes->setModel(proxyModelPcode); - ui->tableViewPcodes->sortByColumn(0, Qt::AscendingOrder); - connect(ui->tableViewPcodes->selectionModel(), &QItemSelectionModel::selectionChanged, this, &AddressBookPage::selectionChanged); - -#if QT_VERSION < 0x050000 - ui->tableViewPcodes->horizontalHeader()->setResizeMode(AddressTableModel::Label, QHeaderView::Stretch); - ui->tableViewPcodes->horizontalHeader()->setResizeMode(AddressTableModel::Address, QHeaderView::ResizeToContents); -#else - ui->tableViewPcodes->horizontalHeader()->setSectionResizeMode(AddressTableModel::Label, QHeaderView::Stretch); - ui->tableViewPcodes->horizontalHeader()->setSectionResizeMode(AddressTableModel::Address, QHeaderView::ResizeToContents); -#endif break; } - ui->tableView->setModel(proxyModel); - ui->tableView->sortByColumn(0, Qt::AscendingOrder); - + proxyModel->setDynamicSortFilter(true); + proxyModel->setSortCaseSensitivity(Qt::CaseInsensitive); + proxyModel->setFilterCaseSensitivity(Qt::CaseInsensitive); + + fproxyModel->setSourceModel(proxyModel); + fproxyModel->setDynamicSortFilter(true); + fproxyModel->setSortCaseSensitivity(Qt::CaseInsensitive); + fproxyModel->setFilterCaseSensitivity(Qt::CaseInsensitive); + ui->tableView->setModel(fproxyModel); // Set column widths -#if QT_VERSION < 0x050000 - ui->tableView->horizontalHeader()->setResizeMode(AddressTableModel::Label, QHeaderView::Stretch); - ui->tableView->horizontalHeader()->setResizeMode(AddressTableModel::Address, QHeaderView::ResizeToContents); -#else - ui->tableView->horizontalHeader()->setSectionResizeMode(AddressTableModel::Label, QHeaderView::Stretch); - ui->tableView->horizontalHeader()->setSectionResizeMode(AddressTableModel::Address, QHeaderView::ResizeToContents); -#endif - + #if QT_VERSION < 0x050000 + ui->tableView->horizontalHeader()->setResizeMode(AddressTableModel::Label, QHeaderView::Stretch); + ui->tableView->horizontalHeader()->setResizeMode(AddressTableModel::Address, QHeaderView::Stretch); + ui->tableView->horizontalHeader()->setResizeMode(AddressTableModel::AddressType, QHeaderView::Stretch); + #else + ui->tableView->horizontalHeader()->setSectionResizeMode(AddressTableModel::Label, QHeaderView::Stretch); + ui->tableView->horizontalHeader()->setSectionResizeMode(AddressTableModel::Address, QHeaderView::Stretch); + ui->tableView->horizontalHeader()->setSectionResizeMode(AddressTableModel::AddressType, QHeaderView::Stretch); + #endif + ui->tableView->setTextElideMode(Qt::ElideMiddle); connect(ui->tableView->selectionModel(), &QItemSelectionModel::selectionChanged, this, &AddressBookPage::selectionChanged); // Select row for newly created address - connect(_model, &AddressTableModel::rowsInserted, this, &AddressBookPage::selectNewAddress); + connect(model, &AddressTableModel::rowsInserted, this, &AddressBookPage::selectNewAddress); selectionChanged(); + chooseAddressType(0); + connect(ui->addressType, qOverload(&QComboBox::activated), this, &AddressBookPage::chooseAddressType); } void AddressBookPage::on_copyAddress_clicked() { - if(ui->tabWidget->currentWidget() == ui->tabAddresses) - GUIUtil::copyEntryData(ui->tableView, AddressTableModel::Address); - else - GUIUtil::copyEntryData(ui->tableViewPcodes, AddressTableModel::Address); + GUIUtil::copyEntryData(ui->tableView, AddressTableModel::Address); } void AddressBookPage::onCopyLabelAction() { - if(ui->tabWidget->currentWidget() == ui->tabAddresses) - GUIUtil::copyEntryData(ui->tableView, AddressTableModel::Label); - else - GUIUtil::copyEntryData(ui->tableViewPcodes, AddressTableModel::Label); + GUIUtil::copyEntryData(ui->tableView, AddressTableModel::Label); } void AddressBookPage::onEditAction() @@ -198,32 +190,27 @@ void AddressBookPage::onEditAction() EditAddressDialog::Mode mode; AddressTableModel * pmodel; - QSortFilterProxyModel *pproxyModel; - if(ui->tabWidget->currentWidget() == ui->tabAddresses) - { - mode = tab == SendingTab ? EditAddressDialog::EditSendingAddress : EditAddressDialog::EditReceivingAddress; - pmodel = model; - pproxyModel = proxyModel; - if(!ui->tableView->selectionModel()) - return; - indexes = ui->tableView->selectionModel()->selectedRows(); - } - else - { + pmodel = model; + if (ui->addressType->currentText() == AddressTableModel::RAP) { mode = EditAddressDialog::EditPcode; - pmodel = model->getPcodeAddressTableModel(); - pproxyModel = proxyModelPcode; - if(!ui->tableViewPcodes->selectionModel()) - return; - indexes = ui->tableViewPcodes->selectionModel()->selectedRows(); + } else if (ui->addressType->currentText() == AddressTableModel::Transparent) { + mode = tab == SendingTab ? EditAddressDialog::EditSendingAddress : EditAddressDialog::EditReceivingAddress; + } else { + mode = tab == SendingTab ? EditAddressDialog::EditSparkSendingAddress : EditAddressDialog::EditSparkReceivingAddress; } - if(!pmodel || indexes.isEmpty()) + + if (!ui->tableView->selectionModel()) + return; + indexes = ui->tableView->selectionModel()->selectedRows(); + if (!pmodel || indexes.isEmpty()) return; EditAddressDialog dlg(mode, this); dlg.setModel(pmodel); - QModelIndex origIndex = pproxyModel->mapToSource(indexes.at(0)); - dlg.loadRow(origIndex.row()); + QModelIndex origIndex1, origIndex2; + origIndex1 = fproxyModel->mapToSource(indexes.at(0)); + origIndex2 = proxyModel->mapToSource(origIndex1); + dlg.loadRow(origIndex2.row()); dlg.exec(); } @@ -234,15 +221,13 @@ void AddressBookPage::on_newAddress_clicked() AddressTableModel *pmodel; EditAddressDialog::Mode mode; - if(ui->tabWidget->currentWidget() == ui->tabAddresses) - { - pmodel = model; - mode = tab == SendingTab ? EditAddressDialog::NewSendingAddress : EditAddressDialog::NewReceivingAddress; - } - else - { - pmodel = model->getPcodeAddressTableModel(); + pmodel = model; + if (ui->addressType->currentText() == AddressTableModel::Spark) { + mode = tab == SendingTab ? EditAddressDialog::NewSparkSendingAddress : EditAddressDialog::NewSparkReceivingAddress; + } else if (ui->addressType->currentText() == AddressTableModel::RAP) { mode = EditAddressDialog::NewPcode; + } else { + mode = tab == SendingTab ? EditAddressDialog::NewSendingAddress : EditAddressDialog::NewReceivingAddress; } EditAddressDialog dlg(mode, this); @@ -256,15 +241,13 @@ void AddressBookPage::on_newAddress_clicked() void AddressBookPage::on_deleteAddress_clicked() { QTableView *table; - if(ui->tabWidget->currentWidget() == ui->tabAddresses) - table = ui->tableView; - else - table = ui->tableViewPcodes; + table = ui->tableView; if(!table->selectionModel()) return; QModelIndexList indexes = table->selectionModel()->selectedRows(); + if(!indexes.isEmpty()) { table->model()->removeRow(indexes.at(0).row()); @@ -275,10 +258,7 @@ void AddressBookPage::selectionChanged() { // Set button states based on selected tab and selection QTableView *table; - if(ui->tabWidget->currentWidget() == ui->tabAddresses) - table = ui->tableView; - else - table = ui->tableViewPcodes; + table = ui->tableView; if(!table->selectionModel()) return; @@ -300,6 +280,7 @@ void AddressBookPage::selectionChanged() deleteAction->setEnabled(false); break; } + ui->copyAddress->setEnabled(true); } else @@ -312,10 +293,7 @@ void AddressBookPage::selectionChanged() void AddressBookPage::done(int retval) { QTableView *table; - if(ui->tabWidget->currentWidget() == ui->tabAddresses) - table = ui->tableView; - else - table = ui->tableViewPcodes; + table = ui->tableView; if(!table->selectionModel() || !table->model()) return; @@ -350,17 +328,19 @@ void AddressBookPage::on_exportButton_clicked() CSVModelWriter writer(filename); QTableView *table; - if(ui->tabWidget->currentWidget() == ui->tabAddresses) - { - writer.setModel(proxyModel); + writer.setModel(proxyModel); + if (ui->addressType->currentText() == AddressTableModel::Transparent) { writer.addColumn("Label", AddressTableModel::Label, Qt::EditRole); - writer.addColumn("Address", AddressTableModel::Address, Qt::EditRole); - } - else - { - writer.setModel(proxyModelPcode); + writer.addColumn("Transparent Address", AddressTableModel::Address, Qt::EditRole); + writer.addColumn("Address Type", AddressTableModel::AddressType, Qt::EditRole); + } else if (ui->addressType->currentText() == AddressTableModel::RAP) { writer.addColumn("Label", AddressTableModel::Label, Qt::EditRole); writer.addColumn("PaymentCode", AddressTableModel::Address, Qt::EditRole); + writer.addColumn("Address Type", AddressTableModel::AddressType, Qt::EditRole); + } else { + writer.addColumn("Label", AddressTableModel::Label, Qt::EditRole); + writer.addColumn("Spark Address", AddressTableModel::Address, Qt::EditRole); + writer.addColumn("Address Type", AddressTableModel::AddressType, Qt::EditRole); } if(!writer.write()) { @@ -372,15 +352,14 @@ void AddressBookPage::on_exportButton_clicked() void AddressBookPage::contextualMenu(const QPoint &point) { QModelIndex index; - if(ui->tabWidget->currentWidget() == ui->tabAddresses) - { - index = ui->tableView->indexAt(point); - copyAddressAction->setText(tr("&Copy Address")); - } - else - { - index = ui->tableViewPcodes->indexAt(point); + index = ui->tableView->indexAt(point); + + if (ui->addressType->currentText() == "Spark") { + copyAddressAction->setText(tr("&Copy Spark Address")); + } else if (ui->addressType->currentText() == "RAP") { copyAddressAction->setText(tr("&Copy RAP address")); + } else { + copyAddressAction->setText(tr("&Copy Transparent Address")); } if(index.isValid()) { @@ -399,3 +378,38 @@ void AddressBookPage::selectNewAddress(const QModelIndex &parent, int begin, int newAddressToSelect.clear(); } } + +void AddressBookPage::chooseAddressType(int idx) +{ + if(!proxyModel) + return; + fproxyModel->setTypeFilter( + ui->addressType->itemData(idx).toInt()); +} + +AddressBookFilterProxy::AddressBookFilterProxy(QObject *parent) : + QSortFilterProxyModel(parent) +{ +} + +bool AddressBookFilterProxy::filterAcceptsRow(int sourceRow, const QModelIndex &sourceParent) const +{ + QModelIndex index = sourceModel()->index(sourceRow, 2, sourceParent); + bool res0 = sourceModel()->data(index).toString().contains("spark"); + bool res1 = sourceModel()->data(index).toString().contains("transparent"); + bool res2 = sourceModel()->data(index).toString().contains("RAP"); + + if(res0 && typeFilter == 0) + return true; + if(res1 && typeFilter == 1) + return true; + if(res2 && typeFilter == 2) + return true; + return false; +} + +void AddressBookFilterProxy::setTypeFilter(quint32 modes) +{ + this->typeFilter = modes; + invalidateFilter(); +} diff --git a/src/qt/addressbookpage.h b/src/qt/addressbookpage.h index 0a209cf47c..e89d1402f8 100644 --- a/src/qt/addressbookpage.h +++ b/src/qt/addressbookpage.h @@ -6,10 +6,12 @@ #define BITCOIN_QT_ADDRESSBOOKPAGE_H #include +#include class AddressTableModel; class OptionsModel; class PlatformStyle; +class AddressBookFilterProxy; namespace Ui { class AddressBookPage; @@ -40,7 +42,14 @@ class AddressBookPage : public QDialog ForEditing /**< Open address book for editing */ }; - explicit AddressBookPage(const PlatformStyle *platformStyle, Mode mode, Tabs tab, QWidget *parent); + enum AddressTypeEnum + { + Spark, + Transparent, + RAP + }; + + explicit AddressBookPage(const PlatformStyle *platformStyle, Mode mode, Tabs tab, QWidget *parent, bool isReused = true); ~AddressBookPage(); void setModel(AddressTableModel *model); @@ -55,7 +64,8 @@ public Q_SLOTS: Mode mode; Tabs tab; QString returnValue; - QSortFilterProxyModel *proxyModel, *proxyModelPcode; + QSortFilterProxyModel *proxyModel; + AddressBookFilterProxy *fproxyModel; QMenu *contextMenu; QAction *copyAddressAction; QAction *deleteAction; // to be able to explicitly disable it @@ -82,8 +92,31 @@ private Q_SLOTS: /** New entry/entries were added to address table */ void selectNewAddress(const QModelIndex &parent, int begin, int /*end*/); + void chooseAddressType(int idx); + Q_SIGNALS: void sendCoins(QString addr); }; +class AddressBookFilterProxy : public QSortFilterProxyModel +{ + Q_OBJECT + +public: + explicit AddressBookFilterProxy(QObject *parent = 0); + + // static const quint32 RECEIVE_TYPE = 0xFFFFFFFF; + static const quint32 RECEIVE_TYPE = 8; + + static quint32 TYPE(int type) { return 1<