aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--configure.ac33
-rw-r--r--depends/packages/qt.mk2
-rw-r--r--depends/packages/zlib.mk21
-rw-r--r--doc/release-notes-16512.md4
-rw-r--r--src/Makefile.am1
-rw-r--r--src/compat/glibc_sanity.cpp19
-rw-r--r--src/compat/glibc_sanity_fdelt.cpp26
-rw-r--r--src/consensus/tx_check.cpp2
-rw-r--r--src/net_processing.cpp2
-rw-r--r--src/rpc/rawtransaction.cpp62
-rw-r--r--src/script/interpreter.cpp4
-rw-r--r--src/test/data/script_tests.json7
-rw-r--r--src/test/merkle_tests.cpp100
-rw-r--r--src/validation.cpp848
-rw-r--r--test/functional/data/invalid_txs.py56
-rwxr-xr-xtest/functional/feature_block.py2
-rwxr-xr-xtest/functional/feature_segwit.py2
-rwxr-xr-xtest/functional/mempool_accept.py2
-rwxr-xr-xtest/functional/p2p_invalid_block.py44
-rwxr-xr-xtest/functional/rpc_psbt.py10
-rwxr-xr-xtest/functional/rpc_rawtransaction.py32
-rwxr-xr-xtest/functional/wallet_basic.py2
22 files changed, 863 insertions, 418 deletions
diff --git a/configure.ac b/configure.ac
index e1b7281c30..2445b72683 100644
--- a/configure.ac
+++ b/configure.ac
@@ -777,6 +777,39 @@ fi
AC_CHECK_HEADERS([endian.h sys/endian.h byteswap.h stdio.h stdlib.h unistd.h strings.h sys/types.h sys/stat.h sys/select.h sys/prctl.h])
+# FD_ZERO may be dependent on a declaration of memcpy, e.g. in SmartOS
+# check that it fails to build without memcpy, then that it builds with
+AC_MSG_CHECKING(FD_ZERO memcpy dependence)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+ #include <cstddef>
+ #if HAVE_SYS_SELECT_H
+ #include <sys/select.h>
+ #endif
+ ]],[[
+ #if HAVE_SYS_SELECT_H
+ fd_set fds;
+ FD_ZERO(&fds);
+ #endif
+ ]])],
+ [ AC_MSG_RESULT(no) ],
+ [
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+ #include <cstring>
+ #if HAVE_SYS_SELECT_H
+ #include <sys/select.h>
+ #endif
+ ]], [[
+ #if HAVE_SYS_SELECT_H
+ fd_set fds;
+ FD_ZERO(&fds);
+ #endif
+ ]])],
+ [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_CSTRING_DEPENDENT_FD_ZERO, 1, [Define this symbol if FD_ZERO is dependent of a memcpy declaration being available]) ],
+ [ AC_MSG_ERROR(failed with cstring include) ]
+ )
+ ]
+)
+
AC_CHECK_DECLS([getifaddrs, freeifaddrs],,,
[#include <sys/types.h>
#include <ifaddrs.h>]
diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk
index 57208a678a..56045ade50 100644
--- a/depends/packages/qt.mk
+++ b/depends/packages/qt.mk
@@ -156,9 +156,7 @@ define $(package)_preprocess_cmds
sed -i.old "s|updateqm.commands = \$$$$\$$$$LRELEASE|updateqm.commands = $($(package)_extract_dir)/qttools/bin/lrelease|" qttranslations/translations/translations.pro && \
sed -i.old "/updateqm.depends =/d" qttranslations/translations/translations.pro && \
sed -i.old "s/src_plugins.depends = src_sql src_network/src_plugins.depends = src_network/" qtbase/src/src.pro && \
- sed -i.old "s|X11/extensions/XIproto.h|X11/X.h|" qtbase/src/plugins/platforms/xcb/qxcbxsettings.cpp && \
sed -i.old -e 's/if \[ "$$$$XPLATFORM_MAC" = "yes" \]; then xspecvals=$$$$(macSDKify/if \[ "$$$$BUILD_ON_MAC" = "yes" \]; then xspecvals=$$$$(macSDKify/' -e 's|/bin/pwd|pwd|' qtbase/configure && \
- sed -i.old 's/CGEventCreateMouseEvent(0, kCGEventMouseMoved, pos, 0)/CGEventCreateMouseEvent(0, kCGEventMouseMoved, pos, kCGMouseButtonLeft)/' qtbase/src/plugins/platforms/cocoa/qcocoacursor.mm && \
mkdir -p qtbase/mkspecs/macx-clang-linux &&\
cp -f qtbase/mkspecs/macx-clang/Info.plist.lib qtbase/mkspecs/macx-clang-linux/ &&\
cp -f qtbase/mkspecs/macx-clang/Info.plist.app qtbase/mkspecs/macx-clang-linux/ &&\
diff --git a/depends/packages/zlib.mk b/depends/packages/zlib.mk
index 1600b11a01..168f85e65e 100644
--- a/depends/packages/zlib.mk
+++ b/depends/packages/zlib.mk
@@ -5,23 +5,26 @@ $(package)_file_name=$(package)-$($(package)_version).tar.gz
$(package)_sha256_hash=c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1
define $(package)_set_vars
-$(package)_build_opts= CC="$($(package)_cc)"
-$(package)_build_opts+=CFLAGS="$($(package)_cflags) $($(package)_cppflags) -fPIC"
-$(package)_build_opts+=RANLIB="$($(package)_ranlib)"
-$(package)_build_opts+=AR="$($(package)_ar)"
-$(package)_build_opts_darwin+=AR="$($(package)_libtool)"
-$(package)_build_opts_darwin+=ARFLAGS="-o"
+$(package)_config_opts= CC="$($(package)_cc)"
+$(package)_config_opts+=CFLAGS="$($(package)_cflags) $($(package)_cppflags) -fPIC"
+$(package)_config_opts+=RANLIB="$($(package)_ranlib)"
+$(package)_config_opts+=AR="$($(package)_ar)"
+$(package)_config_opts_darwin+=AR="$($(package)_libtool)"
+$(package)_config_opts_darwin+=ARFLAGS="-o"
endef
+# zlib has its own custom configure script that takes in options like CC,
+# CFLAGS, RANLIB, AR, and ARFLAGS from the environment rather than from
+# command-line arguments.
define $(package)_config_cmds
- ./configure --static --prefix=$(host_prefix)
+ env $($(package)_config_opts) ./configure --static --prefix=$(host_prefix)
endef
define $(package)_build_cmds
- $(MAKE) $($(package)_build_opts) libz.a
+ $(MAKE) libz.a
endef
define $(package)_stage_cmds
- $(MAKE) DESTDIR=$($(package)_staging_dir) install $($(package)_build_opts)
+ $(MAKE) DESTDIR=$($(package)_staging_dir) install
endef
diff --git a/doc/release-notes-16512.md b/doc/release-notes-16512.md
new file mode 100644
index 0000000000..9aa9cf36f9
--- /dev/null
+++ b/doc/release-notes-16512.md
@@ -0,0 +1,4 @@
+RPC changes
+-----------
+The RPC `joinpsbts` will shuffle the order of the inputs and outputs of the resulting joined psbt.
+Previously inputs and outputs were added in the order that the PSBTs were provided which makes correlating inputs to outputs extremely easy.
diff --git a/src/Makefile.am b/src/Makefile.am
index 8fc7f61d4b..1ef62a656d 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -484,6 +484,7 @@ libbitcoin_util_a_SOURCES = \
support/lockedpool.cpp \
chainparamsbase.cpp \
clientversion.cpp \
+ compat/glibc_sanity_fdelt.cpp \
compat/glibc_sanity.cpp \
compat/glibcxx_sanity.cpp \
compat/strnlen.cpp \
diff --git a/src/compat/glibc_sanity.cpp b/src/compat/glibc_sanity.cpp
index 1ef66e27b4..cc74f28899 100644
--- a/src/compat/glibc_sanity.cpp
+++ b/src/compat/glibc_sanity.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2009-2018 The Bitcoin Core developers
+// Copyright (c) 2009-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -9,7 +9,7 @@
#include <cstddef>
#if defined(HAVE_SYS_SELECT_H)
-#include <sys/select.h>
+bool sanity_test_fdelt();
#endif
extern "C" void* memcpy(void* a, const void* b, size_t c);
@@ -41,21 +41,6 @@ bool sanity_test_memcpy()
}
return true;
}
-
-#if defined(HAVE_SYS_SELECT_H)
-// trigger: Call FD_SET to trigger __fdelt_chk. FORTIFY_SOURCE must be defined
-// as >0 and optimizations must be set to at least -O2.
-// test: Add a file descriptor to an empty fd_set. Verify that it has been
-// correctly added.
-bool sanity_test_fdelt()
-{
- fd_set fds;
- FD_ZERO(&fds);
- FD_SET(0, &fds);
- return FD_ISSET(0, &fds);
-}
-#endif
-
} // namespace
bool glibc_sanity_test()
diff --git a/src/compat/glibc_sanity_fdelt.cpp b/src/compat/glibc_sanity_fdelt.cpp
new file mode 100644
index 0000000000..87140d0c71
--- /dev/null
+++ b/src/compat/glibc_sanity_fdelt.cpp
@@ -0,0 +1,26 @@
+// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#if defined(HAVE_CONFIG_H)
+#include <config/bitcoin-config.h>
+#endif
+
+#if defined(HAVE_SYS_SELECT_H)
+#ifdef HAVE_CSTRING_DEPENDENT_FD_ZERO
+#include <cstring>
+#endif
+#include <sys/select.h>
+
+// trigger: Call FD_SET to trigger __fdelt_chk. FORTIFY_SOURCE must be defined
+// as >0 and optimizations must be set to at least -O2.
+// test: Add a file descriptor to an empty fd_set. Verify that it has been
+// correctly added.
+bool sanity_test_fdelt()
+{
+ fd_set fds;
+ FD_ZERO(&fds);
+ FD_SET(0, &fds);
+ return FD_ISSET(0, &fds);
+}
+#endif
diff --git a/src/consensus/tx_check.cpp b/src/consensus/tx_check.cpp
index 23ed3ecb53..00ebbbd1ab 100644
--- a/src/consensus/tx_check.cpp
+++ b/src/consensus/tx_check.cpp
@@ -18,7 +18,7 @@ bool CheckTransaction(const CTransaction& tx, CValidationState &state, bool fChe
if (::GetSerializeSize(tx, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT)
return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-txns-oversize");
- // Check for negative or overflow output values
+ // Check for negative or overflow output values (see CVE-2010-5139)
CAmount nValueOut = 0;
for (const auto& txout : tx.vout)
{
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 93a98974eb..34d349e8e9 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -2559,7 +2559,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
}
AddOrphanTx(ptx, pfrom->GetId());
- // DoS prevention: do not allow mapOrphanTransactions to grow unbounded
+ // DoS prevention: do not allow mapOrphanTransactions to grow unbounded (see CVE-2012-3789)
unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, gArgs.GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
if (nEvicted > 0) {
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index fb8ea8c227..461e8025da 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -14,9 +14,11 @@
#include <node/coin.h>
#include <node/psbt.h>
#include <node/transaction.h>
+#include <policy/policy.h>
#include <policy/rbf.h>
#include <primitives/transaction.h>
#include <psbt.h>
+#include <random.h>
#include <rpc/rawtransaction_util.h>
#include <rpc/server.h>
#include <rpc/util.h>
@@ -37,11 +39,11 @@
#include <univalue.h>
-/** High fee for sendrawtransaction and testmempoolaccept.
- * By default, transaction with a fee higher than this will be rejected by the
- * RPCs. This can be overridden with the maxfeerate argument.
+/** High fee rate for sendrawtransaction and testmempoolaccept.
+ * By default, transaction with a fee rate higher than this will be rejected by
+ * the RPCs. This can be overridden with the maxfeerate argument.
*/
-constexpr static CAmount DEFAULT_MAX_RAW_TX_FEE{COIN / 10};
+static const CFeeRate DEFAULT_MAX_RAW_TX_FEE_RATE{COIN / 10};
static void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry)
{
@@ -774,7 +776,7 @@ static UniValue sendrawtransaction(const JSONRPCRequest& request)
"\nAlso see createrawtransaction and signrawtransactionwithkey calls.\n",
{
{"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The hex string of the raw transaction"},
- {"maxfeerate", RPCArg::Type::AMOUNT, /* default */ FormatMoney(DEFAULT_MAX_RAW_TX_FEE),
+ {"maxfeerate", RPCArg::Type::AMOUNT, /* default */ FormatMoney(DEFAULT_MAX_RAW_TX_FEE_RATE.GetFeePerK()),
"Reject transactions whose fee rate is higher than the specified value, expressed in " + CURRENCY_UNIT +
"/kB.\nSet to 0 to accept any fee rate.\n"},
},
@@ -804,19 +806,17 @@ static UniValue sendrawtransaction(const JSONRPCRequest& request)
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
CTransactionRef tx(MakeTransactionRef(std::move(mtx)));
- CAmount max_raw_tx_fee = DEFAULT_MAX_RAW_TX_FEE;
+ CFeeRate max_raw_tx_fee_rate = DEFAULT_MAX_RAW_TX_FEE_RATE;
// TODO: temporary migration code for old clients. Remove in v0.20
if (request.params[1].isBool()) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Second argument must be numeric (maxfeerate) and no longer supports a boolean. To allow a transaction with high fees, set maxfeerate to 0.");
} else if (!request.params[1].isNull()) {
- size_t weight = GetTransactionWeight(*tx);
- CFeeRate fr(AmountFromValue(request.params[1]));
- // the +3/4 part rounds the value up, and is the same formula used when
- // calculating the fee for a transaction
- // (see GetVirtualTransactionSize)
- max_raw_tx_fee = fr.GetFee((weight+3)/4);
+ max_raw_tx_fee_rate = CFeeRate(AmountFromValue(request.params[1]));
}
+ int64_t virtual_size = GetVirtualTransactionSize(*tx);
+ CAmount max_raw_tx_fee = max_raw_tx_fee_rate.GetFee(virtual_size);
+
std::string err_string;
AssertLockNotHeld(cs_main);
const TransactionError err = BroadcastTransaction(tx, err_string, max_raw_tx_fee, /*relay*/ true, /*wait_callback*/ true);
@@ -840,7 +840,7 @@ static UniValue testmempoolaccept(const JSONRPCRequest& request)
{"rawtx", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, ""},
},
},
- {"maxfeerate", RPCArg::Type::AMOUNT, /* default */ FormatMoney(DEFAULT_MAX_RAW_TX_FEE), "Reject transactions whose fee rate is higher than the specified value, expressed in " + CURRENCY_UNIT + "/kB\n"},
+ {"maxfeerate", RPCArg::Type::AMOUNT, /* default */ FormatMoney(DEFAULT_MAX_RAW_TX_FEE_RATE.GetFeePerK()), "Reject transactions whose fee rate is higher than the specified value, expressed in " + CURRENCY_UNIT + "/kB\n"},
},
RPCResult{
"[ (array) The result of the mempool acceptance test for each raw transaction in the input array.\n"
@@ -880,19 +880,17 @@ static UniValue testmempoolaccept(const JSONRPCRequest& request)
CTransactionRef tx(MakeTransactionRef(std::move(mtx)));
const uint256& tx_hash = tx->GetHash();
- CAmount max_raw_tx_fee = DEFAULT_MAX_RAW_TX_FEE;
+ CFeeRate max_raw_tx_fee_rate = DEFAULT_MAX_RAW_TX_FEE_RATE;
// TODO: temporary migration code for old clients. Remove in v0.20
if (request.params[1].isBool()) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Second argument must be numeric (maxfeerate) and no longer supports a boolean. To allow a transaction with high fees, set maxfeerate to 0.");
} else if (!request.params[1].isNull()) {
- size_t weight = GetTransactionWeight(*tx);
- CFeeRate fr(AmountFromValue(request.params[1]));
- // the +3/4 part rounds the value up, and is the same formula used when
- // calculating the fee for a transaction
- // (see GetVirtualTransactionSize)
- max_raw_tx_fee = fr.GetFee((weight+3)/4);
+ max_raw_tx_fee_rate = CFeeRate(AmountFromValue(request.params[1]));
}
+ int64_t virtual_size = GetVirtualTransactionSize(*tx);
+ CAmount max_raw_tx_fee = max_raw_tx_fee_rate.GetFee(virtual_size);
+
UniValue result(UniValue::VARR);
UniValue result_0(UniValue::VOBJ);
result_0.pushKV("txid", tx_hash.GetHex());
@@ -1615,8 +1613,30 @@ UniValue joinpsbts(const JSONRPCRequest& request)
merged_psbt.unknown.insert(psbt.unknown.begin(), psbt.unknown.end());
}
+ // Generate list of shuffled indices for shuffling inputs and outputs of the merged PSBT
+ std::vector<int> input_indices(merged_psbt.inputs.size());
+ std::iota(input_indices.begin(), input_indices.end(), 0);
+ std::vector<int> output_indices(merged_psbt.outputs.size());
+ std::iota(output_indices.begin(), output_indices.end(), 0);
+
+ // Shuffle input and output indicies lists
+ Shuffle(input_indices.begin(), input_indices.end(), FastRandomContext());
+ Shuffle(output_indices.begin(), output_indices.end(), FastRandomContext());
+
+ PartiallySignedTransaction shuffled_psbt;
+ shuffled_psbt.tx = CMutableTransaction();
+ shuffled_psbt.tx->nVersion = merged_psbt.tx->nVersion;
+ shuffled_psbt.tx->nLockTime = merged_psbt.tx->nLockTime;
+ for (int i : input_indices) {
+ shuffled_psbt.AddInput(merged_psbt.tx->vin[i], merged_psbt.inputs[i]);
+ }
+ for (int i : output_indices) {
+ shuffled_psbt.AddOutput(merged_psbt.tx->vout[i], merged_psbt.outputs[i]);
+ }
+ shuffled_psbt.unknown.insert(merged_psbt.unknown.begin(), merged_psbt.unknown.end());
+
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
- ssTx << merged_psbt;
+ ssTx << shuffled_psbt;
return EncodeBase64((unsigned char*)ssTx.data(), ssTx.size());
}
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index f8701b6d01..20fae2eebf 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -334,7 +334,7 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript&
opcode == OP_MOD ||
opcode == OP_LSHIFT ||
opcode == OP_RSHIFT)
- return set_error(serror, SCRIPT_ERR_DISABLED_OPCODE); // Disabled opcodes.
+ return set_error(serror, SCRIPT_ERR_DISABLED_OPCODE); // Disabled opcodes (CVE-2010-5137).
// With SCRIPT_VERIFY_CONST_SCRIPTCODE, OP_CODESEPARATOR in non-segwit script is rejected even in an unexecuted branch
if (opcode == OP_CODESEPARATOR && sigversion == SigVersion::BASE && (flags & SCRIPT_VERIFY_CONST_SCRIPTCODE))
@@ -1483,6 +1483,8 @@ bool VerifyScript(const CScript& scriptSig, const CScript& scriptPubKey, const C
return set_error(serror, SCRIPT_ERR_SIG_PUSHONLY);
}
+ // scriptSig and scriptPubKey must be evaluated sequentially on the same stack
+ // rather than being simply concatenated (see CVE-2010-5141)
std::vector<std::vector<unsigned char> > stack, stackCopy;
if (!EvalScript(stack, scriptSig, flags, checker, SigVersion::BASE, serror))
// serror is set
diff --git a/src/test/data/script_tests.json b/src/test/data/script_tests.json
index 9b320b6943..3241f32f56 100644
--- a/src/test/data/script_tests.json
+++ b/src/test/data/script_tests.json
@@ -829,15 +829,16 @@
["NOP", "2SWAP 1", "P2SH,STRICTENC", "INVALID_STACK_OPERATION"],
["1", "2 3 2SWAP 1", "P2SH,STRICTENC", "INVALID_STACK_OPERATION"],
+
+["NOP", "SIZE 1", "P2SH,STRICTENC", "INVALID_STACK_OPERATION"],
+
+["TEST DISABLED OP CODES (CVE-2010-5137)"],
["'a' 'b'", "CAT", "P2SH,STRICTENC", "DISABLED_OPCODE", "CAT disabled"],
["'a' 'b' 0", "IF CAT ELSE 1 ENDIF", "P2SH,STRICTENC", "DISABLED_OPCODE", "CAT disabled"],
["'abc' 1 1", "SUBSTR", "P2SH,STRICTENC", "DISABLED_OPCODE", "SUBSTR disabled"],
["'abc' 1 1 0", "IF SUBSTR ELSE 1 ENDIF", "P2SH,STRICTENC", "DISABLED_OPCODE", "SUBSTR disabled"],
["'abc' 2 0", "IF LEFT ELSE 1 ENDIF", "P2SH,STRICTENC", "DISABLED_OPCODE", "LEFT disabled"],
["'abc' 2 0", "IF RIGHT ELSE 1 ENDIF", "P2SH,STRICTENC", "DISABLED_OPCODE", "RIGHT disabled"],
-
-["NOP", "SIZE 1", "P2SH,STRICTENC", "INVALID_STACK_OPERATION"],
-
["'abc'", "IF INVERT ELSE 1 ENDIF", "P2SH,STRICTENC", "DISABLED_OPCODE", "INVERT disabled"],
["1 2 0 IF AND ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC", "DISABLED_OPCODE", "AND disabled"],
["1 2 0 IF OR ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC", "DISABLED_OPCODE", "OR disabled"],
diff --git a/src/test/merkle_tests.cpp b/src/test/merkle_tests.cpp
index 1684258c9f..dc38a1a818 100644
--- a/src/test/merkle_tests.cpp
+++ b/src/test/merkle_tests.cpp
@@ -249,4 +249,104 @@ BOOST_AUTO_TEST_CASE(merkle_test)
}
}
+
+BOOST_AUTO_TEST_CASE(merkle_test_empty_block)
+{
+ bool mutated = false;
+ CBlock block;
+ uint256 root = BlockMerkleRoot(block, &mutated);
+
+ BOOST_CHECK_EQUAL(root.IsNull(), true);
+ BOOST_CHECK_EQUAL(mutated, false);
+}
+
+BOOST_AUTO_TEST_CASE(merkle_test_oneTx_block)
+{
+ bool mutated = false;
+ CBlock block;
+
+ block.vtx.resize(1);
+ CMutableTransaction mtx;
+ mtx.nLockTime = 0;
+ block.vtx[0] = MakeTransactionRef(std::move(mtx));
+ uint256 root = BlockMerkleRoot(block, &mutated);
+ BOOST_CHECK_EQUAL(root, block.vtx[0]->GetHash());
+ BOOST_CHECK_EQUAL(mutated, false);
+}
+
+BOOST_AUTO_TEST_CASE(merkle_test_OddTxWithRepeatedLastTx_block)
+{
+ bool mutated;
+ CBlock block, blockWithRepeatedLastTx;
+
+ block.vtx.resize(3);
+
+ for (std::size_t pos = 0; pos < block.vtx.size(); pos++) {
+ CMutableTransaction mtx;
+ mtx.nLockTime = pos;
+ block.vtx[pos] = MakeTransactionRef(std::move(mtx));
+ }
+
+ blockWithRepeatedLastTx = block;
+ blockWithRepeatedLastTx.vtx.push_back(blockWithRepeatedLastTx.vtx.back());
+
+ uint256 rootofBlock = BlockMerkleRoot(block, &mutated);
+ BOOST_CHECK_EQUAL(mutated, false);
+
+ uint256 rootofBlockWithRepeatedLastTx = BlockMerkleRoot(blockWithRepeatedLastTx, &mutated);
+ BOOST_CHECK_EQUAL(rootofBlock, rootofBlockWithRepeatedLastTx);
+ BOOST_CHECK_EQUAL(mutated, true);
+}
+
+BOOST_AUTO_TEST_CASE(merkle_test_LeftSubtreeRightSubtree)
+{
+ CBlock block, leftSubtreeBlock, rightSubtreeBlock;
+
+ block.vtx.resize(4);
+ std::size_t pos;
+ for (pos = 0; pos < block.vtx.size(); pos++) {
+ CMutableTransaction mtx;
+ mtx.nLockTime = pos;
+ block.vtx[pos] = MakeTransactionRef(std::move(mtx));
+ }
+
+ for (pos = 0; pos < block.vtx.size() / 2; pos++)
+ leftSubtreeBlock.vtx.push_back(block.vtx[pos]);
+
+ for (pos = block.vtx.size() / 2; pos < block.vtx.size(); pos++)
+ rightSubtreeBlock.vtx.push_back(block.vtx[pos]);
+
+ uint256 root = BlockMerkleRoot(block);
+ uint256 rootOfLeftSubtree = BlockMerkleRoot(leftSubtreeBlock);
+ uint256 rootOfRightSubtree = BlockMerkleRoot(rightSubtreeBlock);
+ std::vector<uint256> leftRight;
+ leftRight.push_back(rootOfLeftSubtree);
+ leftRight.push_back(rootOfRightSubtree);
+ uint256 rootOfLR = ComputeMerkleRoot(leftRight);
+
+ BOOST_CHECK_EQUAL(root, rootOfLR);
+}
+
+BOOST_AUTO_TEST_CASE(merkle_test_BlockWitness)
+{
+ CBlock block;
+
+ block.vtx.resize(2);
+ for (std::size_t pos = 0; pos < block.vtx.size(); pos++) {
+ CMutableTransaction mtx;
+ mtx.nLockTime = pos;
+ block.vtx[pos] = MakeTransactionRef(std::move(mtx));
+ }
+
+ uint256 blockWitness = BlockWitnessMerkleRoot(block);
+
+ std::vector<uint256> hashes;
+ hashes.resize(block.vtx.size());
+ hashes[0].SetNull();
+ hashes[1] = block.vtx[1]->GetHash();
+
+ uint256 merkelRootofHashes = ComputeMerkleRoot(hashes);
+
+ BOOST_CHECK_EQUAL(merkelRootofHashes, blockWitness);
+}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/validation.cpp b/src/validation.cpp
index 6a9b0c95fb..9696b65ea7 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -428,21 +428,134 @@ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, CValidationSt
return CheckInputs(tx, state, view, flags, cacheSigStore, true, txdata);
}
-/**
- * @param[out] coins_to_uncache Return any outpoints which were not previously present in the
- * coins cache, but were added as a result of validating the tx
- * for mempool acceptance. This allows the caller to optionally
- * remove the cache additions if the associated transaction ends
- * up being rejected by the mempool.
- */
-static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool& pool, CValidationState& state, const CTransactionRef& ptx,
- bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
- bool bypass_limits, const CAmount& nAbsurdFee, std::vector<COutPoint>& coins_to_uncache, bool test_accept) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+namespace {
+
+class MemPoolAccept
{
- const CTransaction& tx = *ptx;
- const uint256 hash = tx.GetHash();
- AssertLockHeld(cs_main);
- LOCK(pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
+public:
+ MemPoolAccept(CTxMemPool& mempool) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&::ChainstateActive().CoinsTip(), m_pool),
+ m_limit_ancestors(gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)),
+ m_limit_ancestor_size(gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000),
+ m_limit_descendants(gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)),
+ m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) {}
+
+ // We put the arguments we're handed into a struct, so we can pass them
+ // around easier.
+ struct ATMPArgs {
+ const CChainParams& m_chainparams;
+ CValidationState &m_state;
+ bool* m_missing_inputs;
+ const int64_t m_accept_time;
+ std::list<CTransactionRef>* m_replaced_transactions;
+ const bool m_bypass_limits;
+ const CAmount& m_absurd_fee;
+ /*
+ * Return any outpoints which were not previously present in the coins
+ * cache, but were added as a result of validating the tx for mempool
+ * acceptance. This allows the caller to optionally remove the cache
+ * additions if the associated transaction ends up being rejected by
+ * the mempool.
+ */
+ std::vector<COutPoint>& m_coins_to_uncache;
+ const bool m_test_accept;
+ };
+
+ // Single transaction acceptance
+ bool AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+private:
+ // All the intermediate state that gets passed between the various levels
+ // of checking a given transaction.
+ struct Workspace {
+ Workspace(const CTransactionRef& ptx) : m_ptx(ptx), m_hash(ptx->GetHash()) {}
+ std::set<uint256> m_conflicts;
+ CTxMemPool::setEntries m_all_conflicting;
+ CTxMemPool::setEntries m_ancestors;
+ std::unique_ptr<CTxMemPoolEntry> m_entry;
+
+ bool m_replacement_transaction;
+ CAmount m_modified_fees;
+ CAmount m_conflicting_fees;
+ size_t m_conflicting_size;
+
+ const CTransactionRef& m_ptx;
+ const uint256& m_hash;
+ };
+
+ // Run the policy checks on a given transaction, excluding any script checks.
+ // Looks up inputs, calculates feerate, considers replacement, evaluates
+ // package limits, etc. As this function can be invoked for "free" by a peer,
+ // only tests that are fast should be done here (to avoid CPU DoS).
+ bool PreChecks(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
+
+ // Run the script checks using our policy flags. As this can be slow, we should
+ // only invoke this on transactions that have otherwise passed policy checks.
+ bool PolicyScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ // Re-run the script checks, using consensus flags, and try to cache the
+ // result in the scriptcache. This should be done after
+ // PolicyScriptChecks(). This requires that all inputs either be in our
+ // utxo set or in the mempool.
+ bool ConsensusScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData &txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ // Try to add the transaction to the mempool, removing any conflicts first.
+ // Returns true if the transaction is in the mempool after any size
+ // limiting is performed, false otherwise.
+ bool Finalize(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
+
+ // Compare a package's feerate against minimum allowed.
+ bool CheckFeeRate(size_t package_size, CAmount package_fee, CValidationState& state)
+ {
+ CAmount mempoolRejectFee = m_pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(package_size);
+ if (mempoolRejectFee > 0 && package_fee < mempoolRejectFee) {
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee));
+ }
+
+ if (package_fee < ::minRelayTxFee.GetFee(package_size)) {
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "min relay fee not met", strprintf("%d < %d", package_fee, ::minRelayTxFee.GetFee(package_size)));
+ }
+ return true;
+ }
+
+private:
+ CTxMemPool& m_pool;
+ CCoinsViewCache m_view;
+ CCoinsViewMemPool m_viewmempool;
+ CCoinsView m_dummy;
+
+ // The package limits in effect at the time of invocation.
+ const size_t m_limit_ancestors;
+ const size_t m_limit_ancestor_size;
+ // These may be modified while evaluating a transaction (eg to account for
+ // in-mempool conflicts; see below).
+ size_t m_limit_descendants;
+ size_t m_limit_descendant_size;
+};
+
+bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
+{
+ const CTransactionRef& ptx = ws.m_ptx;
+ const CTransaction& tx = *ws.m_ptx;
+ const uint256& hash = ws.m_hash;
+
+ // Copy/alias what we need out of args
+ CValidationState &state = args.m_state;
+ bool* pfMissingInputs = args.m_missing_inputs;
+ const int64_t nAcceptTime = args.m_accept_time;
+ const bool bypass_limits = args.m_bypass_limits;
+ const CAmount& nAbsurdFee = args.m_absurd_fee;
+ std::vector<COutPoint>& coins_to_uncache = args.m_coins_to_uncache;
+
+ // Alias what we need out of ws
+ std::set<uint256>& setConflicts = ws.m_conflicts;
+ CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
+ CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
+ std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
+ bool& fReplacementTransaction = ws.m_replacement_transaction;
+ CAmount& nModifiedFees = ws.m_modified_fees;
+ CAmount& nConflictingFees = ws.m_conflicting_fees;
+ size_t& nConflictingSize = ws.m_conflicting_size;
+
if (pfMissingInputs) {
*pfMissingInputs = false;
}
@@ -472,15 +585,14 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
return state.Invalid(ValidationInvalidReason::TX_PREMATURE_SPEND, false, REJECT_NONSTANDARD, "non-final");
// is it already in the memory pool?
- if (pool.exists(hash)) {
+ if (m_pool.exists(hash)) {
return state.Invalid(ValidationInvalidReason::TX_CONFLICT, false, REJECT_DUPLICATE, "txn-already-in-mempool");
}
// Check for conflicts with in-memory transactions
- std::set<uint256> setConflicts;
for (const CTxIn &txin : tx.vin)
{
- const CTransaction* ptxConflicting = pool.GetConflictTx(txin.prevout);
+ const CTransaction* ptxConflicting = m_pool.GetConflictTx(txin.prevout);
if (ptxConflicting) {
if (!setConflicts.count(ptxConflicting->GetHash()))
{
@@ -514,395 +626,436 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
}
}
- {
- CCoinsView dummy;
- CCoinsViewCache view(&dummy);
-
- LockPoints lp;
- CCoinsViewCache& coins_cache = ::ChainstateActive().CoinsTip();
- CCoinsViewMemPool viewMemPool(&coins_cache, pool);
- view.SetBackend(viewMemPool);
-
- // do all inputs exist?
- for (const CTxIn& txin : tx.vin) {
- if (!coins_cache.HaveCoinInCache(txin.prevout)) {
- coins_to_uncache.push_back(txin.prevout);
- }
+ LockPoints lp;
+ m_view.SetBackend(m_viewmempool);
- // Note: this call may add txin.prevout to the coins cache
- // (CoinsTip().cacheCoins) by way of FetchCoin(). It should be removed
- // later (via coins_to_uncache) if this tx turns out to be invalid.
- if (!view.HaveCoin(txin.prevout)) {
- // Are inputs missing because we already have the tx?
- for (size_t out = 0; out < tx.vout.size(); out++) {
- // Optimistically just do efficient check of cache for outputs
- if (coins_cache.HaveCoinInCache(COutPoint(hash, out))) {
- return state.Invalid(ValidationInvalidReason::TX_CONFLICT, false, REJECT_DUPLICATE, "txn-already-known");
- }
- }
- // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
- if (pfMissingInputs) {
- *pfMissingInputs = true;
+ CCoinsViewCache& coins_cache = ::ChainstateActive().CoinsTip();
+ // do all inputs exist?
+ for (const CTxIn& txin : tx.vin) {
+ if (!coins_cache.HaveCoinInCache(txin.prevout)) {
+ coins_to_uncache.push_back(txin.prevout);
+ }
+
+ // Note: this call may add txin.prevout to the coins cache
+ // (coins_cache.cacheCoins) by way of FetchCoin(). It should be removed
+ // later (via coins_to_uncache) if this tx turns out to be invalid.
+ if (!m_view.HaveCoin(txin.prevout)) {
+ // Are inputs missing because we already have the tx?
+ for (size_t out = 0; out < tx.vout.size(); out++) {
+ // Optimistically just do efficient check of cache for outputs
+ if (coins_cache.HaveCoinInCache(COutPoint(hash, out))) {
+ return state.Invalid(ValidationInvalidReason::TX_CONFLICT, false, REJECT_DUPLICATE, "txn-already-known");
}
- return false; // fMissingInputs and !state.IsInvalid() is used to detect this condition, don't set state.Invalid()
}
+ // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
+ if (pfMissingInputs) {
+ *pfMissingInputs = true;
+ }
+ return false; // fMissingInputs and !state.IsInvalid() is used to detect this condition, don't set state.Invalid()
}
+ }
- // Bring the best block into scope
- view.GetBestBlock();
+ // Bring the best block into scope
+ m_view.GetBestBlock();
- // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
- view.SetBackend(dummy);
+ // we have all inputs cached now, so switch back to dummy (to protect
+ // against bugs where we pull more inputs from disk that miss being added
+ // to coins_to_uncache)
+ m_view.SetBackend(m_dummy);
- // Only accept BIP68 sequence locked transactions that can be mined in the next
- // block; we don't want our mempool filled up with transactions that can't
- // be mined yet.
- // Must keep pool.cs for this unless we change CheckSequenceLocks to take a
- // CoinsViewCache instead of create its own
- if (!CheckSequenceLocks(pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
- return state.Invalid(ValidationInvalidReason::TX_PREMATURE_SPEND, false, REJECT_NONSTANDARD, "non-BIP68-final");
+ // Only accept BIP68 sequence locked transactions that can be mined in the next
+ // block; we don't want our mempool filled up with transactions that can't
+ // be mined yet.
+ // Must keep pool.cs for this unless we change CheckSequenceLocks to take a
+ // CoinsViewCache instead of create its own
+ if (!CheckSequenceLocks(m_pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
+ return state.Invalid(ValidationInvalidReason::TX_PREMATURE_SPEND, false, REJECT_NONSTANDARD, "non-BIP68-final");
- CAmount nFees = 0;
- if (!Consensus::CheckTxInputs(tx, state, view, GetSpendHeight(view), nFees)) {
- return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
- }
+ CAmount nFees = 0;
+ if (!Consensus::CheckTxInputs(tx, state, m_view, GetSpendHeight(m_view), nFees)) {
+ return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
+ }
- // Check for non-standard pay-to-script-hash in inputs
- if (fRequireStandard && !AreInputsStandard(tx, view))
- return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, "bad-txns-nonstandard-inputs");
+ // Check for non-standard pay-to-script-hash in inputs
+ if (fRequireStandard && !AreInputsStandard(tx, m_view))
+ return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, "bad-txns-nonstandard-inputs");
- // Check for non-standard witness in P2WSH
- if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, view))
- return state.Invalid(ValidationInvalidReason::TX_WITNESS_MUTATED, false, REJECT_NONSTANDARD, "bad-witness-nonstandard");
+ // Check for non-standard witness in P2WSH
+ if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, m_view))
+ return state.Invalid(ValidationInvalidReason::TX_WITNESS_MUTATED, false, REJECT_NONSTANDARD, "bad-witness-nonstandard");
- int64_t nSigOpsCost = GetTransactionSigOpCost(tx, view, STANDARD_SCRIPT_VERIFY_FLAGS);
+ int64_t nSigOpsCost = GetTransactionSigOpCost(tx, m_view, STANDARD_SCRIPT_VERIFY_FLAGS);
- // nModifiedFees includes any fee deltas from PrioritiseTransaction
- CAmount nModifiedFees = nFees;
- pool.ApplyDelta(hash, nModifiedFees);
+ // nModifiedFees includes any fee deltas from PrioritiseTransaction
+ nModifiedFees = nFees;
+ m_pool.ApplyDelta(hash, nModifiedFees);
- // Keep track of transactions that spend a coinbase, which we re-scan
- // during reorgs to ensure COINBASE_MATURITY is still met.
- bool fSpendsCoinbase = false;
- for (const CTxIn &txin : tx.vin) {
- const Coin &coin = view.AccessCoin(txin.prevout);
- if (coin.IsCoinBase()) {
- fSpendsCoinbase = true;
- break;
- }
+ // Keep track of transactions that spend a coinbase, which we re-scan
+ // during reorgs to ensure COINBASE_MATURITY is still met.
+ bool fSpendsCoinbase = false;
+ for (const CTxIn &txin : tx.vin) {
+ const Coin &coin = m_view.AccessCoin(txin.prevout);
+ if (coin.IsCoinBase()) {
+ fSpendsCoinbase = true;
+ break;
}
+ }
- CTxMemPoolEntry entry(ptx, nFees, nAcceptTime, ::ChainActive().Height(),
- fSpendsCoinbase, nSigOpsCost, lp);
- unsigned int nSize = entry.GetTxSize();
+ entry.reset(new CTxMemPoolEntry(ptx, nFees, nAcceptTime, ::ChainActive().Height(),
+ fSpendsCoinbase, nSigOpsCost, lp));
+ unsigned int nSize = entry->GetTxSize();
- if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
- return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops",
+ if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
+ return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops",
strprintf("%d", nSigOpsCost));
- CAmount mempoolRejectFee = pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(nSize);
- if (!bypass_limits && mempoolRejectFee > 0 && nModifiedFees < mempoolRejectFee) {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "mempool min fee not met", strprintf("%d < %d", nModifiedFees, mempoolRejectFee));
- }
-
- // No transactions are allowed below minRelayTxFee except from disconnected blocks
- if (!bypass_limits && nModifiedFees < ::minRelayTxFee.GetFee(nSize)) {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "min relay fee not met", strprintf("%d < %d", nModifiedFees, ::minRelayTxFee.GetFee(nSize)));
- }
+ // No transactions are allowed below minRelayTxFee except from disconnected
+ // blocks
+ if (!bypass_limits && !CheckFeeRate(nSize, nModifiedFees, state)) return false;
- if (nAbsurdFee && nFees > nAbsurdFee)
- return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false,
+ if (nAbsurdFee && nFees > nAbsurdFee)
+ return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false,
REJECT_HIGHFEE, "absurdly-high-fee",
strprintf("%d > %d", nFees, nAbsurdFee));
- const CTxMemPool::setEntries setIterConflicting = pool.GetIterSet(setConflicts);
- // Calculate in-mempool ancestors, up to a limit.
- CTxMemPool::setEntries setAncestors;
- size_t nLimitAncestors = gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
- size_t nLimitAncestorSize = gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000;
- size_t nLimitDescendants = gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
- size_t nLimitDescendantSize = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000;
-
- if (setConflicts.size() == 1) {
- // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we
- // would meet the chain limits after the conflicts have been removed. However, there isn't a practical
- // way to do this short of calculating the ancestor and descendant sets with an overlay cache of
- // changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't
- // very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool
- // conflicts here. Importantly, we need to ensure that some transactions which were accepted using
- // the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides
- // for off-chain contract systems (see link in the comment below).
- //
- // Specifically, the subset of RBF transactions which we allow despite chain limits are those which
- // conflict directly with exactly one other transaction (but may evict children of said transaction),
- // and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
- // check is accomplished later, so we don't bother doing anything about it here, but if BIP 125 is
- // amended, we may need to move that check to here instead of removing it wholesale.
- //
- // Such transactions are clearly not merging any existing packages, so we are only concerned with
- // ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
- // not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed
- // to.
- //
- // To check these we first check if we meet the RBF criteria, above, and increment the descendant
- // limits by the direct conflict and its descendants (as these are recalculated in
- // CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no
- // removals, of each parent's existing dependant set). The ancestor count limits are unmodified (as
- // the ancestor limits should be the same for both our new transaction and any conflicts).
- // We don't bother incrementing nLimitDescendants by the full removal count as that limit never comes
- // into force here (as we're only adding a single transaction).
- assert(setIterConflicting.size() == 1);
- CTxMemPool::txiter conflict = *setIterConflicting.begin();
-
- nLimitDescendants += 1;
- nLimitDescendantSize += conflict->GetSizeWithDescendants();
- }
-
- std::string errString;
- if (!pool.CalculateMemPoolAncestors(entry, setAncestors, nLimitAncestors, nLimitAncestorSize, nLimitDescendants, nLimitDescendantSize, errString)) {
- setAncestors.clear();
- // If CalculateMemPoolAncestors fails second time, we want the original error string.
- std::string dummy_err_string;
- // Contracting/payment channels CPFP carve-out:
- // If the new transaction is relatively small (up to 40k weight)
- // and has at most one ancestor (ie ancestor limit of 2, including
- // the new transaction), allow it if its parent has exactly the
- // descendant limit descendants.
- //
- // This allows protocols which rely on distrusting counterparties
- // being able to broadcast descendants of an unconfirmed transaction
- // to be secure by simply only having two immediately-spendable
- // outputs - one for each counterparty. For more info on the uses for
- // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
- if (nSize > EXTRA_DESCENDANT_TX_SIZE_LIMIT ||
- !pool.CalculateMemPoolAncestors(entry, setAncestors, 2, nLimitAncestorSize, nLimitDescendants + 1, nLimitDescendantSize + EXTRA_DESCENDANT_TX_SIZE_LIMIT, dummy_err_string)) {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "too-long-mempool-chain", errString);
- }
- }
-
- // A transaction that spends outputs that would be replaced by it is invalid. Now
- // that we have the set of all ancestors we can detect this
- // pathological case by making sure setConflicts and setAncestors don't
- // intersect.
- for (CTxMemPool::txiter ancestorIt : setAncestors)
+ const CTxMemPool::setEntries setIterConflicting = m_pool.GetIterSet(setConflicts);
+ // Calculate in-mempool ancestors, up to a limit.
+ if (setConflicts.size() == 1) {
+ // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we
+ // would meet the chain limits after the conflicts have been removed. However, there isn't a practical
+ // way to do this short of calculating the ancestor and descendant sets with an overlay cache of
+ // changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't
+ // very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool
+ // conflicts here. Importantly, we need to ensure that some transactions which were accepted using
+ // the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides
+ // for off-chain contract systems (see link in the comment below).
+ //
+ // Specifically, the subset of RBF transactions which we allow despite chain limits are those which
+ // conflict directly with exactly one other transaction (but may evict children of said transaction),
+ // and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
+ // check is accomplished later, so we don't bother doing anything about it here, but if BIP 125 is
+ // amended, we may need to move that check to here instead of removing it wholesale.
+ //
+ // Such transactions are clearly not merging any existing packages, so we are only concerned with
+ // ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
+ // not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed
+ // to.
+ //
+ // To check these we first check if we meet the RBF criteria, above, and increment the descendant
+ // limits by the direct conflict and its descendants (as these are recalculated in
+ // CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no
+ // removals, of each parent's existing dependant set). The ancestor count limits are unmodified (as
+ // the ancestor limits should be the same for both our new transaction and any conflicts).
+ // We don't bother incrementing m_limit_descendants by the full removal count as that limit never comes
+ // into force here (as we're only adding a single transaction).
+ assert(setIterConflicting.size() == 1);
+ CTxMemPool::txiter conflict = *setIterConflicting.begin();
+
+ m_limit_descendants += 1;
+ m_limit_descendant_size += conflict->GetSizeWithDescendants();
+ }
+
+ std::string errString;
+ if (!m_pool.CalculateMemPoolAncestors(*entry, setAncestors, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants, m_limit_descendant_size, errString)) {
+ setAncestors.clear();
+ // If CalculateMemPoolAncestors fails second time, we want the original error string.
+ std::string dummy_err_string;
+ // Contracting/payment channels CPFP carve-out:
+ // If the new transaction is relatively small (up to 40k weight)
+ // and has at most one ancestor (ie ancestor limit of 2, including
+ // the new transaction), allow it if its parent has exactly the
+ // descendant limit descendants.
+ //
+ // This allows protocols which rely on distrusting counterparties
+ // being able to broadcast descendants of an unconfirmed transaction
+ // to be secure by simply only having two immediately-spendable
+ // outputs - one for each counterparty. For more info on the uses for
+ // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
+ if (nSize > EXTRA_DESCENDANT_TX_SIZE_LIMIT ||
+ !m_pool.CalculateMemPoolAncestors(*entry, setAncestors, 2, m_limit_ancestor_size, m_limit_descendants + 1, m_limit_descendant_size + EXTRA_DESCENDANT_TX_SIZE_LIMIT, dummy_err_string)) {
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "too-long-mempool-chain", errString);
+ }
+ }
+
+ // A transaction that spends outputs that would be replaced by it is invalid. Now
+ // that we have the set of all ancestors we can detect this
+ // pathological case by making sure setConflicts and setAncestors don't
+ // intersect.
+ for (CTxMemPool::txiter ancestorIt : setAncestors)
+ {
+ const uint256 &hashAncestor = ancestorIt->GetTx().GetHash();
+ if (setConflicts.count(hashAncestor))
{
- const uint256 &hashAncestor = ancestorIt->GetTx().GetHash();
- if (setConflicts.count(hashAncestor))
- {
- return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-txns-spends-conflicting-tx",
- strprintf("%s spends conflicting transaction %s",
- hash.ToString(),
- hashAncestor.ToString()));
- }
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-txns-spends-conflicting-tx",
+ strprintf("%s spends conflicting transaction %s",
+ hash.ToString(),
+ hashAncestor.ToString()));
}
+ }
- // Check if it's economically rational to mine this transaction rather
- // than the ones it replaces.
- CAmount nConflictingFees = 0;
- size_t nConflictingSize = 0;
- uint64_t nConflictingCount = 0;
- CTxMemPool::setEntries allConflicting;
-
- // If we don't hold the lock allConflicting might be incomplete; the
- // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
- // mempool consistency for us.
- const bool fReplacementTransaction = setConflicts.size();
- if (fReplacementTransaction)
- {
- CFeeRate newFeeRate(nModifiedFees, nSize);
- std::set<uint256> setConflictsParents;
- const int maxDescendantsToVisit = 100;
- for (const auto& mi : setIterConflicting) {
- // Don't allow the replacement to reduce the feerate of the
- // mempool.
- //
- // We usually don't want to accept replacements with lower
- // feerates than what they replaced as that would lower the
- // feerate of the next block. Requiring that the feerate always
- // be increased is also an easy-to-reason about way to prevent
- // DoS attacks via replacements.
- //
- // We only consider the feerates of transactions being directly
- // replaced, not their indirect descendants. While that does
- // mean high feerate children are ignored when deciding whether
- // or not to replace, we do require the replacement to pay more
- // overall fees too, mitigating most cases.
- CFeeRate oldFeeRate(mi->GetModifiedFee(), mi->GetTxSize());
- if (newFeeRate <= oldFeeRate)
- {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "insufficient fee",
- strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
- hash.ToString(),
- newFeeRate.ToString(),
- oldFeeRate.ToString()));
- }
-
- for (const CTxIn &txin : mi->GetTx().vin)
- {
- setConflictsParents.insert(txin.prevout.hash);
- }
+ // Check if it's economically rational to mine this transaction rather
+ // than the ones it replaces.
+ nConflictingFees = 0;
+ nConflictingSize = 0;
+ uint64_t nConflictingCount = 0;
- nConflictingCount += mi->GetCountWithDescendants();
- }
- // This potentially overestimates the number of actual descendants
- // but we just want to be conservative to avoid doing too much
- // work.
- if (nConflictingCount <= maxDescendantsToVisit) {
- // If not too many to replace, then calculate the set of
- // transactions that would have to be evicted
- for (CTxMemPool::txiter it : setIterConflicting) {
- pool.CalculateDescendants(it, allConflicting);
- }
- for (CTxMemPool::txiter it : allConflicting) {
- nConflictingFees += it->GetModifiedFee();
- nConflictingSize += it->GetTxSize();
- }
- } else {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "too many potential replacements",
- strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
+ // If we don't hold the lock allConflicting might be incomplete; the
+ // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
+ // mempool consistency for us.
+ fReplacementTransaction = setConflicts.size();
+ if (fReplacementTransaction)
+ {
+ CFeeRate newFeeRate(nModifiedFees, nSize);
+ std::set<uint256> setConflictsParents;
+ const int maxDescendantsToVisit = 100;
+ for (const auto& mi : setIterConflicting) {
+ // Don't allow the replacement to reduce the feerate of the
+ // mempool.
+ //
+ // We usually don't want to accept replacements with lower
+ // feerates than what they replaced as that would lower the
+ // feerate of the next block. Requiring that the feerate always
+ // be increased is also an easy-to-reason about way to prevent
+ // DoS attacks via replacements.
+ //
+ // We only consider the feerates of transactions being directly
+ // replaced, not their indirect descendants. While that does
+ // mean high feerate children are ignored when deciding whether
+ // or not to replace, we do require the replacement to pay more
+ // overall fees too, mitigating most cases.
+ CFeeRate oldFeeRate(mi->GetModifiedFee(), mi->GetTxSize());
+ if (newFeeRate <= oldFeeRate)
+ {
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "insufficient fee",
+ strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
hash.ToString(),
- nConflictingCount,
- maxDescendantsToVisit));
+ newFeeRate.ToString(),
+ oldFeeRate.ToString()));
}
- for (unsigned int j = 0; j < tx.vin.size(); j++)
+ for (const CTxIn &txin : mi->GetTx().vin)
{
- // We don't want to accept replacements that require low
- // feerate junk to be mined first. Ideally we'd keep track of
- // the ancestor feerates and make the decision based on that,
- // but for now requiring all new inputs to be confirmed works.
- //
- // Note that if you relax this to make RBF a little more useful,
- // this may break the CalculateMempoolAncestors RBF relaxation,
- // above. See the comment above the first CalculateMempoolAncestors
- // call for more info.
- if (!setConflictsParents.count(tx.vin[j].prevout.hash))
- {
- // Rather than check the UTXO set - potentially expensive -
- // it's cheaper to just check if the new input refers to a
- // tx that's in the mempool.
- if (pool.exists(tx.vin[j].prevout.hash)) {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "replacement-adds-unconfirmed",
- strprintf("replacement %s adds unconfirmed input, idx %d",
- hash.ToString(), j));
- }
- }
+ setConflictsParents.insert(txin.prevout.hash);
}
- // The replacement must pay greater fees than the transactions it
- // replaces - if we did the bandwidth used by those conflicting
- // transactions would not be paid for.
- if (nModifiedFees < nConflictingFees)
- {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "insufficient fee",
- strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
- hash.ToString(), FormatMoney(nModifiedFees), FormatMoney(nConflictingFees)));
+ nConflictingCount += mi->GetCountWithDescendants();
+ }
+ // This potentially overestimates the number of actual descendants
+ // but we just want to be conservative to avoid doing too much
+ // work.
+ if (nConflictingCount <= maxDescendantsToVisit) {
+ // If not too many to replace, then calculate the set of
+ // transactions that would have to be evicted
+ for (CTxMemPool::txiter it : setIterConflicting) {
+ m_pool.CalculateDescendants(it, allConflicting);
}
-
- // Finally in addition to paying more fees than the conflicts the
- // new transaction must pay for its own bandwidth.
- CAmount nDeltaFees = nModifiedFees - nConflictingFees;
- if (nDeltaFees < ::incrementalRelayFee.GetFee(nSize))
- {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "insufficient fee",
- strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
- hash.ToString(),
- FormatMoney(nDeltaFees),
- FormatMoney(::incrementalRelayFee.GetFee(nSize))));
+ for (CTxMemPool::txiter it : allConflicting) {
+ nConflictingFees += it->GetModifiedFee();
+ nConflictingSize += it->GetTxSize();
}
+ } else {
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "too many potential replacements",
+ strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
+ hash.ToString(),
+ nConflictingCount,
+ maxDescendantsToVisit));
}
- constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
-
- // Check against previous transactions
- // The first loop above does all the inexpensive checks.
- // Only if ALL inputs pass do we perform expensive ECDSA signature checks.
- // Helps prevent CPU exhaustion denial-of-service attacks.
- PrecomputedTransactionData txdata(tx);
- if (!CheckInputs(tx, state, view, scriptVerifyFlags, true, false, txdata)) {
- // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
- // need to turn both off, and compare against just turning off CLEANSTACK
- // to see if the failure is specifically due to witness validation.
- CValidationState stateDummy; // Want reported failures to be from first CheckInputs
- if (!tx.HasWitness() && CheckInputs(tx, stateDummy, view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) &&
- !CheckInputs(tx, stateDummy, view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) {
- // Only the witness is missing, so the transaction itself may be fine.
- state.Invalid(ValidationInvalidReason::TX_WITNESS_MUTATED, false,
- state.GetRejectCode(), state.GetRejectReason(), state.GetDebugMessage());
+ for (unsigned int j = 0; j < tx.vin.size(); j++)
+ {
+ // We don't want to accept replacements that require low
+ // feerate junk to be mined first. Ideally we'd keep track of
+ // the ancestor feerates and make the decision based on that,
+ // but for now requiring all new inputs to be confirmed works.
+ //
+ // Note that if you relax this to make RBF a little more useful,
+ // this may break the CalculateMempoolAncestors RBF relaxation,
+ // above. See the comment above the first CalculateMempoolAncestors
+ // call for more info.
+ if (!setConflictsParents.count(tx.vin[j].prevout.hash))
+ {
+ // Rather than check the UTXO set - potentially expensive -
+ // it's cheaper to just check if the new input refers to a
+ // tx that's in the mempool.
+ if (m_pool.exists(tx.vin[j].prevout.hash)) {
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "replacement-adds-unconfirmed",
+ strprintf("replacement %s adds unconfirmed input, idx %d",
+ hash.ToString(), j));
+ }
}
- assert(IsTransactionReason(state.GetReason()));
- return false; // state filled in by CheckInputs
- }
-
- // Check again against the current block tip's script verification
- // flags to cache our script execution flags. This is, of course,
- // useless if the next block has different script flags from the
- // previous one, but because the cache tracks script flags for us it
- // will auto-invalidate and we'll just have a few blocks of extra
- // misses on soft-fork activation.
- //
- // This is also useful in case of bugs in the standard flags that cause
- // transactions to pass as valid when they're actually invalid. For
- // instance the STRICTENC flag was incorrectly allowing certain
- // CHECKSIG NOT scripts to pass, even though they were invalid.
- //
- // There is a similar check in CreateNewBlock() to prevent creating
- // invalid blocks (using TestBlockValidity), however allowing such
- // transactions into the mempool can be exploited as a DoS attack.
- unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(::ChainActive().Tip(), chainparams.GetConsensus());
- if (!CheckInputsFromMempoolAndCache(tx, state, view, pool, currentBlockScriptVerifyFlags, true, txdata)) {
- return error("%s: BUG! PLEASE REPORT THIS! CheckInputs failed against latest-block but not STANDARD flags %s, %s",
- __func__, hash.ToString(), FormatStateMessage(state));
}
- if (test_accept) {
- // Tx was accepted, but not added
- return true;
+ // The replacement must pay greater fees than the transactions it
+ // replaces - if we did the bandwidth used by those conflicting
+ // transactions would not be paid for.
+ if (nModifiedFees < nConflictingFees)
+ {
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "insufficient fee",
+ strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
+ hash.ToString(), FormatMoney(nModifiedFees), FormatMoney(nConflictingFees)));
}
- // Remove conflicting transactions from the mempool
- for (CTxMemPool::txiter it : allConflicting)
+ // Finally in addition to paying more fees than the conflicts the
+ // new transaction must pay for its own bandwidth.
+ CAmount nDeltaFees = nModifiedFees - nConflictingFees;
+ if (nDeltaFees < ::incrementalRelayFee.GetFee(nSize))
{
- LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
- it->GetTx().GetHash().ToString(),
- hash.ToString(),
- FormatMoney(nModifiedFees - nConflictingFees),
- (int)nSize - (int)nConflictingSize);
- if (plTxnReplaced)
- plTxnReplaced->push_back(it->GetSharedTx());
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "insufficient fee",
+ strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
+ hash.ToString(),
+ FormatMoney(nDeltaFees),
+ FormatMoney(::incrementalRelayFee.GetFee(nSize))));
}
- pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED);
+ }
+ return true;
+}
+
+bool MemPoolAccept::PolicyScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata)
+{
+ const CTransaction& tx = *ws.m_ptx;
- // This transaction should only count for fee estimation if:
- // - it isn't a BIP 125 replacement transaction (may not be widely supported)
- // - it's not being re-added during a reorg which bypasses typical mempool fee limits
- // - the node is not behind
- // - the transaction is not dependent on any other transactions in the mempool
- bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx);
+ CValidationState &state = args.m_state;
- // Store transaction in memory
- pool.addUnchecked(entry, setAncestors, validForFeeEstimation);
+ constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
- // trim mempool and check if tx was trimmed
- if (!bypass_limits) {
- LimitMempoolSize(pool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
- if (!pool.exists(hash))
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "mempool full");
+ // Check against previous transactions
+ // This is done last to help prevent CPU exhaustion denial-of-service attacks.
+ if (!CheckInputs(tx, state, m_view, scriptVerifyFlags, true, false, txdata)) {
+ // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
+ // need to turn both off, and compare against just turning off CLEANSTACK
+ // to see if the failure is specifically due to witness validation.
+ CValidationState stateDummy; // Want reported failures to be from first CheckInputs
+ if (!tx.HasWitness() && CheckInputs(tx, stateDummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) &&
+ !CheckInputs(tx, stateDummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) {
+ // Only the witness is missing, so the transaction itself may be fine.
+ state.Invalid(ValidationInvalidReason::TX_WITNESS_MUTATED, false,
+ state.GetRejectCode(), state.GetRejectReason(), state.GetDebugMessage());
}
+ assert(IsTransactionReason(state.GetReason()));
+ return false; // state filled in by CheckInputs
+ }
+
+ return true;
+}
+
+bool MemPoolAccept::ConsensusScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata)
+{
+ const CTransaction& tx = *ws.m_ptx;
+ const uint256& hash = ws.m_hash;
+
+ CValidationState &state = args.m_state;
+ const CChainParams& chainparams = args.m_chainparams;
+
+ // Check again against the current block tip's script verification
+ // flags to cache our script execution flags. This is, of course,
+ // useless if the next block has different script flags from the
+ // previous one, but because the cache tracks script flags for us it
+ // will auto-invalidate and we'll just have a few blocks of extra
+ // misses on soft-fork activation.
+ //
+ // This is also useful in case of bugs in the standard flags that cause
+ // transactions to pass as valid when they're actually invalid. For
+ // instance the STRICTENC flag was incorrectly allowing certain
+ // CHECKSIG NOT scripts to pass, even though they were invalid.
+ //
+ // There is a similar check in CreateNewBlock() to prevent creating
+ // invalid blocks (using TestBlockValidity), however allowing such
+ // transactions into the mempool can be exploited as a DoS attack.
+ unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(::ChainActive().Tip(), chainparams.GetConsensus());
+ if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, true, txdata)) {
+ return error("%s: BUG! PLEASE REPORT THIS! CheckInputs failed against latest-block but not STANDARD flags %s, %s",
+ __func__, hash.ToString(), FormatStateMessage(state));
+ }
+
+ return true;
+}
+
+bool MemPoolAccept::Finalize(ATMPArgs& args, Workspace& ws)
+{
+ const CTransaction& tx = *ws.m_ptx;
+ const uint256& hash = ws.m_hash;
+ CValidationState &state = args.m_state;
+ const bool bypass_limits = args.m_bypass_limits;
+
+ CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
+ CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
+ const CAmount& nModifiedFees = ws.m_modified_fees;
+ const CAmount& nConflictingFees = ws.m_conflicting_fees;
+ const size_t& nConflictingSize = ws.m_conflicting_size;
+ const bool fReplacementTransaction = ws.m_replacement_transaction;
+ std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
+
+ // Remove conflicting transactions from the mempool
+ for (CTxMemPool::txiter it : allConflicting)
+ {
+ LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
+ it->GetTx().GetHash().ToString(),
+ hash.ToString(),
+ FormatMoney(nModifiedFees - nConflictingFees),
+ (int)entry->GetTxSize() - (int)nConflictingSize);
+ if (args.m_replaced_transactions)
+ args.m_replaced_transactions->push_back(it->GetSharedTx());
+ }
+ m_pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED);
+
+ // This transaction should only count for fee estimation if:
+ // - it isn't a BIP 125 replacement transaction (may not be widely supported)
+ // - it's not being re-added during a reorg which bypasses typical mempool fee limits
+ // - the node is not behind
+ // - the transaction is not dependent on any other transactions in the mempool
+ bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && m_pool.HasNoInputsOf(tx);
+
+ // Store transaction in memory
+ m_pool.addUnchecked(*entry, setAncestors, validForFeeEstimation);
+
+ // trim mempool and check if tx was trimmed
+ if (!bypass_limits) {
+ LimitMempoolSize(m_pool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
+ if (!m_pool.exists(hash))
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "mempool full");
}
+ return true;
+}
+
+bool MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args)
+{
+ AssertLockHeld(cs_main);
+ LOCK(m_pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
+
+ Workspace workspace(ptx);
+
+ if (!PreChecks(args, workspace)) return false;
+
+ // Only compute the precomputed transaction data if we need to verify
+ // scripts (ie, other policy checks pass). We perform the inexpensive
+ // checks first and avoid hashing and signature verification unless those
+ // checks pass, to mitigate CPU exhaustion denial-of-service attacks.
+ PrecomputedTransactionData txdata(*ptx);
+
+ if (!PolicyScriptChecks(args, workspace, txdata)) return false;
+
+ if (!ConsensusScriptChecks(args, workspace, txdata)) return false;
+
+ // Tx was accepted, but not added
+ if (args.m_test_accept) return true;
+
+ if (!Finalize(args, workspace)) return false;
GetMainSignals().TransactionAddedToMempool(ptx);
return true;
}
+} // anon namespace
+
/** (try to) add transaction to memory pool with a specified acceptance time **/
static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx,
bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
bool bypass_limits, const CAmount nAbsurdFee, bool test_accept) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
std::vector<COutPoint> coins_to_uncache;
- bool res = AcceptToMemoryPoolWorker(chainparams, pool, state, tx, pfMissingInputs, nAcceptTime, plTxnReplaced, bypass_limits, nAbsurdFee, coins_to_uncache, test_accept);
+ MemPoolAccept::ATMPArgs args { chainparams, state, pfMissingInputs, nAcceptTime, plTxnReplaced, bypass_limits, nAbsurdFee, coins_to_uncache, test_accept };
+ bool res = MemPoolAccept(pool).AcceptSingleTransaction(tx, args);
if (!res) {
// Remove coins that were not present in the coins cache before calling ATMPW;
// this is to prevent memory DoS in case we receive a large number of
@@ -1828,7 +1981,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
// If such overwrites are allowed, coinbases and transactions depending upon those
// can be duplicated to remove the ability to spend the first instance -- even after
// being sent to another address.
- // See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
+ // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html for more information.
// This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
// already refuses previously-known transaction ids entirely.
// This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
@@ -3100,6 +3253,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-cb-multiple", "more than one coinbase");
// Check transactions
+ // Must check for duplicate inputs (see CVE-2018-17144)
for (const auto& tx : block.vtx)
if (!CheckTransaction(*tx, state, true))
return state.Invalid(state.GetReason(), false, state.GetRejectCode(), state.GetRejectReason(),
diff --git a/test/functional/data/invalid_txs.py b/test/functional/data/invalid_txs.py
index 454eb583f7..9dc06422c4 100644
--- a/test/functional/data/invalid_txs.py
+++ b/test/functional/data/invalid_txs.py
@@ -24,7 +24,24 @@ import abc
from test_framework.messages import CTransaction, CTxIn, CTxOut, COutPoint
from test_framework import script as sc
from test_framework.blocktools import create_tx_with_script, MAX_BLOCK_SIGOPS
-
+from test_framework.script import (
+ CScript,
+ OP_CAT,
+ OP_SUBSTR,
+ OP_LEFT,
+ OP_RIGHT,
+ OP_INVERT,
+ OP_AND,
+ OP_OR,
+ OP_XOR,
+ OP_2MUL,
+ OP_2DIV,
+ OP_MUL,
+ OP_DIV,
+ OP_MOD,
+ OP_LSHIFT,
+ OP_RSHIFT
+)
basic_p2sh = sc.CScript([sc.OP_HASH160, sc.hash160(sc.CScript([sc.OP_0])), sc.OP_EQUAL])
@@ -178,7 +195,44 @@ class TooManySigops(BadTxTemplate):
script_pub_key=lotsa_checksigs,
amount=1)
+def getDisabledOpcodeTemplate(opcode):
+ """ Creates disabled opcode tx template class"""
+ def get_tx(self):
+ tx = CTransaction()
+ vin = self.valid_txin
+ vin.scriptSig = CScript([opcode])
+ tx.vin.append(vin)
+ tx.vout.append(CTxOut(1, basic_p2sh))
+ tx.calc_sha256()
+ return tx
+
+ return type('DisabledOpcode_' + str(opcode), (BadTxTemplate,), {
+ 'reject_reason': "disabled opcode",
+ 'expect_disconnect': True,
+ 'get_tx': get_tx,
+ 'valid_in_block' : True
+ })
+
+# Disabled opcode tx templates (CVE-2010-5137)
+DisabledOpcodeTemplates = [getDisabledOpcodeTemplate(opcode) for opcode in [
+ OP_CAT,
+ OP_SUBSTR,
+ OP_LEFT,
+ OP_RIGHT,
+ OP_INVERT,
+ OP_AND,
+ OP_OR,
+ OP_XOR,
+ OP_2MUL,
+ OP_2DIV,
+ OP_MUL,
+ OP_DIV,
+ OP_MOD,
+ OP_LSHIFT,
+ OP_RSHIFT]]
+
def iter_all_templates():
"""Iterate through all bad transaction template types."""
return BadTxTemplate.__subclasses__()
+
diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py
index 377e6c82cd..c74270febc 100755
--- a/test/functional/feature_block.py
+++ b/test/functional/feature_block.py
@@ -806,7 +806,7 @@ class FullBlockTest(BitcoinTestFramework):
#
# Blocks are not allowed to contain a transaction whose id matches that of an earlier,
# not-fully-spent transaction in the same chain. To test, make identical coinbases;
- # the second one should be rejected.
+ # the second one should be rejected. See also CVE-2012-1909.
#
self.log.info("Reject a block with a transaction with a duplicate hash of a previous transaction (BIP30)")
self.move_tip(60)
diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py
index b9db618575..d2826dd1b7 100755
--- a/test/functional/feature_segwit.py
+++ b/test/functional/feature_segwit.py
@@ -257,7 +257,7 @@ class SegWitTest(BitcoinTestFramework):
tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b""))
tx.vout.append(CTxOut(int(49.95 * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) # Huge fee
tx.calc_sha256()
- txid3 = self.nodes[0].sendrawtransaction(ToHex(tx))
+ txid3 = self.nodes[0].sendrawtransaction(ToHex(tx), 0)
assert tx.wit.is_null()
assert txid3 in self.nodes[0].getrawmempool()
diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py
index 209a222004..dee7a04516 100755
--- a/test/functional/mempool_accept.py
+++ b/test/functional/mempool_accept.py
@@ -183,6 +183,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': True}],
rawtxs=[tx.serialize().hex()],
+ maxfeerate=0,
)
self.log.info('A transaction with no outputs')
@@ -211,6 +212,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
rawtxs=[tx.serialize().hex()],
)
+ # The following two validations prevent overflow of the output amounts (see CVE-2010-5139).
self.log.info('A transaction with too large output value')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].nValue = 21000000 * COIN + 1
diff --git a/test/functional/p2p_invalid_block.py b/test/functional/p2p_invalid_block.py
index 1e0b876593..905534b862 100755
--- a/test/functional/p2p_invalid_block.py
+++ b/test/functional/p2p_invalid_block.py
@@ -53,10 +53,11 @@ class InvalidBlockRequestTest(BitcoinTestFramework):
block_time = best_block["time"] + 1
# Use merkle-root malleability to generate an invalid block with
- # same blockheader.
+ # same blockheader (CVE-2012-2459).
# Manufacture a block with 3 transactions (coinbase, spend of prior
# coinbase, spend of that spend). Duplicate the 3rd transaction to
# leave merkle root and blockheader unchanged but invalidate the block.
+ # For more information on merkle-root malleability see src/consensus/merkle.cpp.
self.log.info("Test merkle root malleability.")
block2 = create_block(tip, create_coinbase(height), block_time)
@@ -81,15 +82,16 @@ class InvalidBlockRequestTest(BitcoinTestFramework):
node.p2p.send_blocks_and_test([block2], node, success=False, reject_reason='bad-txns-duplicate')
- # Check transactions for duplicate inputs
+ # Check transactions for duplicate inputs (CVE-2018-17144)
self.log.info("Test duplicate input block.")
- block2_orig.vtx[2].vin.append(block2_orig.vtx[2].vin[0])
- block2_orig.vtx[2].rehash()
- block2_orig.hashMerkleRoot = block2_orig.calc_merkle_root()
- block2_orig.rehash()
- block2_orig.solve()
- node.p2p.send_blocks_and_test([block2_orig], node, success=False, reject_reason='bad-txns-inputs-duplicate')
+ block2_dup = copy.deepcopy(block2_orig)
+ block2_dup.vtx[2].vin.append(block2_dup.vtx[2].vin[0])
+ block2_dup.vtx[2].rehash()
+ block2_dup.hashMerkleRoot = block2_dup.calc_merkle_root()
+ block2_dup.rehash()
+ block2_dup.solve()
+ node.p2p.send_blocks_and_test([block2_dup], node, success=False, reject_reason='bad-txns-inputs-duplicate')
self.log.info("Test very broken block.")
@@ -105,5 +107,31 @@ class InvalidBlockRequestTest(BitcoinTestFramework):
node.p2p.send_blocks_and_test([block3], node, success=False, reject_reason='bad-cb-amount')
+ # Complete testing of CVE-2012-2459 by sending the original block.
+ # It should be accepted even though it has the same hash as the mutated one.
+
+ self.log.info("Test accepting original block after rejecting its mutated version.")
+ node.p2p.send_blocks_and_test([block2_orig], node, success=True, timeout=5)
+
+ # Update tip info
+ height += 1
+ block_time += 1
+ tip = int(block2_orig.hash, 16)
+
+ # Complete testing of CVE-2018-17144, by checking for the inflation bug.
+ # Create a block that spends the output of a tx in a previous block.
+ block4 = create_block(tip, create_coinbase(height), block_time)
+ tx3 = create_tx_with_script(tx2, 0, script_sig=b'\x51', amount=50 * COIN)
+
+ # Duplicates input
+ tx3.vin.append(tx3.vin[0])
+ tx3.rehash()
+ block4.vtx.append(tx3)
+ block4.hashMerkleRoot = block4.calc_merkle_root()
+ block4.rehash()
+ block4.solve()
+ self.log.info("Test inflation by duplicating input")
+ node.p2p.send_blocks_and_test([block4], node, success=False, reject_reason='bad-txns-inputs-duplicate')
+
if __name__ == '__main__':
InvalidBlockRequestTest().main()
diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py
index 5a04e0c8d8..cb8320a6c2 100755
--- a/test/functional/rpc_psbt.py
+++ b/test/functional/rpc_psbt.py
@@ -382,6 +382,16 @@ class PSBTTest(BitcoinTestFramework):
joined_decoded = self.nodes[0].decodepsbt(joined)
assert len(joined_decoded['inputs']) == 4 and len(joined_decoded['outputs']) == 2 and "final_scriptwitness" not in joined_decoded['inputs'][3] and "final_scriptSig" not in joined_decoded['inputs'][3]
+ # Check that joining shuffles the inputs and outputs
+ # 10 attempts should be enough to get a shuffled join
+ shuffled = False
+ for i in range(0, 10):
+ shuffled_joined = self.nodes[0].joinpsbts([psbt, psbt2])
+ shuffled |= joined != shuffled_joined
+ if shuffled:
+ break
+ assert shuffled
+
# Newly created PSBT needs UTXOs and updating
addr = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid = self.nodes[0].sendtoaddress(addr, 7)
diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py
index 4338675270..d1ce97efff 100755
--- a/test/functional/rpc_rawtransaction.py
+++ b/test/functional/rpc_rawtransaction.py
@@ -432,17 +432,18 @@ class RawTransactionsTest(BitcoinTestFramework):
self.log.info('sendrawtransaction/testmempoolaccept with maxfeerate')
+ # Test a transaction with small fee
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
rawTx = self.nodes[0].getrawtransaction(txId, True)
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('1.00000000'))
self.sync_all()
inputs = [{ "txid" : txId, "vout" : vout['n'] }]
- outputs = { self.nodes[0].getnewaddress() : Decimal("0.99999000") } # 1000 sat fee
+ outputs = { self.nodes[0].getnewaddress() : Decimal("0.999990000") } # 10000 sat fee
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx)
assert_equal(rawTxSigned['complete'], True)
- # 1000 sat fee, ~100 b transaction, fee rate should land around 10 sat/b = 0.00010000 BTC/kB
+ # 10000 sat fee, ~100 b transaction, fee rate should land around 100 sat/b = 0.00100000 BTC/kB
# Thus, testmempoolaccept should reject
testres = self.nodes[2].testmempoolaccept([rawTxSigned['hex']], 0.00001000)[0]
assert_equal(testres['allowed'], False)
@@ -450,9 +451,32 @@ class RawTransactionsTest(BitcoinTestFramework):
# and sendrawtransaction should throw
assert_raises_rpc_error(-26, "absurdly-high-fee", self.nodes[2].sendrawtransaction, rawTxSigned['hex'], 0.00001000)
# And below calls should both succeed
- testres = self.nodes[2].testmempoolaccept(rawtxs=[rawTxSigned['hex']], maxfeerate='0.00070000')[0]
+ testres = self.nodes[2].testmempoolaccept(rawtxs=[rawTxSigned['hex']])[0]
assert_equal(testres['allowed'], True)
- self.nodes[2].sendrawtransaction(hexstring=rawTxSigned['hex'], maxfeerate='0.00070000')
+ self.nodes[2].sendrawtransaction(hexstring=rawTxSigned['hex'])
+
+ # Test a transaction with large fee
+ txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
+ rawTx = self.nodes[0].getrawtransaction(txId, True)
+ vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('1.00000000'))
+
+ self.sync_all()
+ inputs = [{ "txid" : txId, "vout" : vout['n'] }]
+ outputs = { self.nodes[0].getnewaddress() : Decimal("0.98000000") } # 2000000 sat fee
+ rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
+ rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx)
+ assert_equal(rawTxSigned['complete'], True)
+ # 2000000 sat fee, ~100 b transaction, fee rate should land around 20000 sat/b = 0.20000000 BTC/kB
+ # Thus, testmempoolaccept should reject
+ testres = self.nodes[2].testmempoolaccept([rawTxSigned['hex']])[0]
+ assert_equal(testres['allowed'], False)
+ assert_equal(testres['reject-reason'], '256: absurdly-high-fee')
+ # and sendrawtransaction should throw
+ assert_raises_rpc_error(-26, "absurdly-high-fee", self.nodes[2].sendrawtransaction, rawTxSigned['hex'])
+ # And below calls should both succeed
+ testres = self.nodes[2].testmempoolaccept(rawtxs=[rawTxSigned['hex']], maxfeerate='0.20000000')[0]
+ assert_equal(testres['allowed'], True)
+ self.nodes[2].sendrawtransaction(hexstring=rawTxSigned['hex'], maxfeerate='0.20000000')
if __name__ == '__main__':
diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py
index ce04110240..2283bff2e6 100755
--- a/test/functional/wallet_basic.py
+++ b/test/functional/wallet_basic.py
@@ -433,7 +433,7 @@ class WalletTest(BitcoinTestFramework):
# Split into two chains
rawtx = self.nodes[0].createrawtransaction([{"txid": singletxid, "vout": 0}], {chain_addrs[0]: node0_balance / 2 - Decimal('0.01'), chain_addrs[1]: node0_balance / 2 - Decimal('0.01')})
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx)
- singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"])
+ singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"], 0)
self.nodes[0].generate(1)
# Make a long chain of unconfirmed payments without hitting mempool limit