aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--depends/packages/miniupnpc.mk4
-rw-r--r--doc/fuzzing.md8
-rw-r--r--src/Makefile.am2
-rw-r--r--src/Makefile.test.include1
-rw-r--r--src/bench/bench.cpp1
-rw-r--r--src/bench/bench.h5
-rw-r--r--src/bloom.cpp8
-rw-r--r--src/bloom.h8
-rw-r--r--src/consensus/tx_verify.cpp246
-rw-r--r--src/consensus/tx_verify.h78
-rw-r--r--src/fs.h2
-rw-r--r--src/httprpc.cpp4
-rw-r--r--src/init.cpp2
-rw-r--r--src/miner.cpp1
-rw-r--r--src/net.cpp9
-rw-r--r--src/netbase.cpp15
-rw-r--r--src/netbase.h2
-rw-r--r--src/policy/fees.cpp619
-rw-r--r--src/policy/fees.h190
-rw-r--r--src/policy/policy.cpp8
-rw-r--r--src/qt/coincontroldialog.cpp4
-rw-r--r--src/qt/forms/sendcoinsdialog.ui13
-rw-r--r--src/qt/rpcconsole.cpp6
-rw-r--r--src/qt/sendcoinsdialog.cpp26
-rw-r--r--src/qt/sendcoinsdialog.h3
-rw-r--r--src/qt/transactionview.cpp25
-rw-r--r--src/qt/transactionview.h2
-rw-r--r--src/qt/walletmodel.cpp83
-rw-r--r--src/qt/walletmodel.h3
-rw-r--r--src/random.cpp33
-rw-r--r--src/random.h7
-rw-r--r--src/rpc/blockchain.cpp22
-rw-r--r--src/rpc/client.cpp4
-rw-r--r--src/rpc/mining.cpp105
-rw-r--r--src/rpc/rawtransaction.cpp5
-rw-r--r--src/rpc/server.cpp8
-rw-r--r--src/rpc/server.h12
-rw-r--r--src/scheduler.cpp6
-rw-r--r--src/scheduler.h4
-rw-r--r--src/test/addrman_tests.cpp10
-rw-r--r--src/test/miner_tests.cpp1
-rw-r--r--src/test/policyestimator_tests.cpp47
-rw-r--r--src/test/script_P2SH_tests.cpp1
-rw-r--r--src/test/sighash_tests.cpp2
-rw-r--r--src/test/sigopcount_tests.cpp2
-rw-r--r--src/test/test_bitcoin_fuzzy.cpp23
-rw-r--r--src/test/torcontrol_tests.cpp199
-rw-r--r--src/test/transaction_tests.cpp3
-rw-r--r--src/torcontrol.cpp91
-rw-r--r--src/txdb.cpp2
-rw-r--r--src/txdb.h4
-rw-r--r--src/txmempool.cpp4
-rw-r--r--src/txmempool.h2
-rw-r--r--src/validation.cpp250
-rw-r--r--src/validation.h53
-rw-r--r--src/versionbits.cpp36
-rw-r--r--src/versionbits.h10
-rw-r--r--src/wallet/coincontrol.h3
-rw-r--r--src/wallet/feebumper.cpp50
-rw-r--r--src/wallet/feebumper.h3
-rw-r--r--src/wallet/rpcdump.cpp9
-rw-r--r--src/wallet/test/wallet_tests.cpp62
-rw-r--r--src/wallet/wallet.cpp5
-rw-r--r--src/wallet/walletdb.cpp2
-rwxr-xr-xtest/functional/bip9-softforks.py51
-rwxr-xr-xtest/functional/prioritise_transaction.py15
-rwxr-xr-xtest/functional/signrawtransactions.py27
-rwxr-xr-xtest/functional/smartfees.py4
-rwxr-xr-xtest/functional/test_framework/test_framework.py13
-rwxr-xr-xtest/functional/test_runner.py29
-rwxr-xr-xtest/functional/wallet.py6
71 files changed, 1906 insertions, 697 deletions
diff --git a/depends/packages/miniupnpc.mk b/depends/packages/miniupnpc.mk
index e34cf7be2f..1bb8cb5d26 100644
--- a/depends/packages/miniupnpc.mk
+++ b/depends/packages/miniupnpc.mk
@@ -1,8 +1,8 @@
package=miniupnpc
-$(package)_version=2.0
+$(package)_version=2.0.20170509
$(package)_download_path=http://miniupnp.free.fr/files
$(package)_file_name=$(package)-$($(package)_version).tar.gz
-$(package)_sha256_hash=d434ceb8986efbe199c5ca53f90ed53eab290b1e6d0530b717eb6fa49d61f93b
+$(package)_sha256_hash=d3c368627f5cdfb66d3ebd64ca39ba54d6ff14a61966dbecb8dd296b7039f16a
define $(package)_set_vars
$(package)_build_opts=CC="$($(package)_cc)"
diff --git a/doc/fuzzing.md b/doc/fuzzing.md
index bf3ad17861..5dedcb51c8 100644
--- a/doc/fuzzing.md
+++ b/doc/fuzzing.md
@@ -32,6 +32,13 @@ We disable ccache because we don't want to pollute the ccache with instrumented
objects, and similarly don't want to use non-instrumented cached objects linked
in.
+The fuzzing can be sped up significantly (~200x) by using `afl-clang-fast` and
+`afl-clang-fast++` in place of `afl-gcc` and `afl-g++` when compiling. When
+compiling using `afl-clang-fast`/`afl-clang-fast++` the resulting
+`test_bitcoin_fuzzy` binary will be instrumented in such a way that the AFL
+features "persistent mode" and "deferred forkserver" can be used. See
+https://github.com/mcarpenter/afl/tree/master/llvm_mode for details.
+
Preparing fuzzing
------------------
@@ -63,4 +70,3 @@ $AFLPATH/afl-fuzz -i ${AFLIN} -o ${AFLOUT} -m52 -- test/test_bitcoin_fuzzy
You may have to change a few kernel parameters to test optimally - `afl-fuzz`
will print an error and suggestion if so.
-
diff --git a/src/Makefile.am b/src/Makefile.am
index 14d55a944f..ae2eb29c94 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -95,6 +95,7 @@ BITCOIN_CORE_H = \
compat/sanity.h \
compressor.h \
consensus/consensus.h \
+ consensus/tx_verify.h \
core_io.h \
core_memusage.h \
cuckoocache.h \
@@ -185,6 +186,7 @@ libbitcoin_server_a_SOURCES = \
blockencodings.cpp \
chain.cpp \
checkpoints.cpp \
+ consensus/tx_verify.cpp \
httprpc.cpp \
httpserver.cpp \
init.cpp \
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index 10cb7e775a..ee1c11ff1f 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -78,6 +78,7 @@ BITCOIN_TESTS =\
test/testutil.cpp \
test/testutil.h \
test/timedata_tests.cpp \
+ test/torcontrol_tests.cpp \
test/transaction_tests.cpp \
test/txvalidationcache_tests.cpp \
test/versionbits_tests.cpp \
diff --git a/src/bench/bench.cpp b/src/bench/bench.cpp
index b0df3d2b04..33631d2d15 100644
--- a/src/bench/bench.cpp
+++ b/src/bench/bench.cpp
@@ -5,6 +5,7 @@
#include "bench.h"
#include "perf.h"
+#include <assert.h>
#include <iostream>
#include <iomanip>
#include <sys/time.h>
diff --git a/src/bench/bench.h b/src/bench/bench.h
index f12a41126c..1f36f2a4bc 100644
--- a/src/bench/bench.h
+++ b/src/bench/bench.h
@@ -5,10 +5,11 @@
#ifndef BITCOIN_BENCH_BENCH_H
#define BITCOIN_BENCH_BENCH_H
+#include <functional>
+#include <limits>
#include <map>
#include <string>
-#include <boost/function.hpp>
#include <boost/preprocessor/cat.hpp>
#include <boost/preprocessor/stringize.hpp>
@@ -59,7 +60,7 @@ namespace benchmark {
bool KeepRunning();
};
- typedef boost::function<void(State&)> BenchFunction;
+ typedef std::function<void(State&)> BenchFunction;
class BenchRunner
{
diff --git a/src/bloom.cpp b/src/bloom.cpp
index ac3e565721..7ed982c984 100644
--- a/src/bloom.cpp
+++ b/src/bloom.cpp
@@ -19,7 +19,7 @@
#define LN2SQUARED 0.4804530139182014246671025263266649717305529515945455
#define LN2 0.6931471805599453094172321214581765680755001343602552
-CBloomFilter::CBloomFilter(unsigned int nElements, double nFPRate, unsigned int nTweakIn, unsigned char nFlagsIn) :
+CBloomFilter::CBloomFilter(const unsigned int nElements, const double nFPRate, const unsigned int nTweakIn, unsigned char nFlagsIn) :
/**
* The ideal size for a bloom filter with a given number of elements and false positive rate is:
* - nElements * log(fp rate) / ln(2)^2
@@ -40,7 +40,7 @@ CBloomFilter::CBloomFilter(unsigned int nElements, double nFPRate, unsigned int
}
// Private constructor used by CRollingBloomFilter
-CBloomFilter::CBloomFilter(unsigned int nElements, double nFPRate, unsigned int nTweakIn) :
+CBloomFilter::CBloomFilter(const unsigned int nElements, const double nFPRate, const unsigned int nTweakIn) :
vData((unsigned int)(-1 / LN2SQUARED * nElements * log(nFPRate)) / 8),
isFull(false),
isEmpty(true),
@@ -120,7 +120,7 @@ void CBloomFilter::clear()
isEmpty = true;
}
-void CBloomFilter::reset(unsigned int nNewTweak)
+void CBloomFilter::reset(const unsigned int nNewTweak)
{
clear();
nTweak = nNewTweak;
@@ -214,7 +214,7 @@ void CBloomFilter::UpdateEmptyFull()
isEmpty = empty;
}
-CRollingBloomFilter::CRollingBloomFilter(unsigned int nElements, double fpRate)
+CRollingBloomFilter::CRollingBloomFilter(const unsigned int nElements, const double fpRate)
{
double logFpRate = log(fpRate);
/* The optimal number of hash functions is log(fpRate) / log(0.5), but
diff --git a/src/bloom.h b/src/bloom.h
index 5ad727c330..7ca9682239 100644
--- a/src/bloom.h
+++ b/src/bloom.h
@@ -54,7 +54,7 @@ private:
unsigned int Hash(unsigned int nHashNum, const std::vector<unsigned char>& vDataToHash) const;
// Private constructor for CRollingBloomFilter, no restrictions on size
- CBloomFilter(unsigned int nElements, double nFPRate, unsigned int nTweak);
+ CBloomFilter(const unsigned int nElements, const double nFPRate, const unsigned int nTweak);
friend class CRollingBloomFilter;
public:
@@ -67,7 +67,7 @@ public:
* It should generally always be a random value (and is largely only exposed for unit testing)
* nFlags should be one of the BLOOM_UPDATE_* enums (not _MASK)
*/
- CBloomFilter(unsigned int nElements, double nFPRate, unsigned int nTweak, unsigned char nFlagsIn);
+ CBloomFilter(const unsigned int nElements, const double nFPRate, const unsigned int nTweak, unsigned char nFlagsIn);
CBloomFilter() : isFull(true), isEmpty(false), nHashFuncs(0), nTweak(0), nFlags(0) {}
ADD_SERIALIZE_METHODS;
@@ -89,7 +89,7 @@ public:
bool contains(const uint256& hash) const;
void clear();
- void reset(unsigned int nNewTweak);
+ void reset(const unsigned int nNewTweak);
//! True if the size is <= MAX_BLOOM_FILTER_SIZE and the number of hash functions is <= MAX_HASH_FUNCS
//! (catch a filter which was just deserialized which was too big)
@@ -122,7 +122,7 @@ public:
// A random bloom filter calls GetRand() at creation time.
// Don't create global CRollingBloomFilter objects, as they may be
// constructed before the randomizer is properly initialized.
- CRollingBloomFilter(unsigned int nElements, double nFPRate);
+ CRollingBloomFilter(const unsigned int nElements, const double nFPRate);
void insert(const std::vector<unsigned char>& vKey);
void insert(const uint256& hash);
diff --git a/src/consensus/tx_verify.cpp b/src/consensus/tx_verify.cpp
new file mode 100644
index 0000000000..043f4cf95c
--- /dev/null
+++ b/src/consensus/tx_verify.cpp
@@ -0,0 +1,246 @@
+// Copyright (c) 2017-2017 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include "tx_verify.h"
+
+#include "consensus.h"
+#include "primitives/transaction.h"
+#include "script/interpreter.h"
+#include "validation.h"
+
+// TODO remove the following dependencies
+#include "chain.h"
+#include "coins.h"
+#include "utilmoneystr.h"
+
+bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime)
+{
+ if (tx.nLockTime == 0)
+ return true;
+ if ((int64_t)tx.nLockTime < ((int64_t)tx.nLockTime < LOCKTIME_THRESHOLD ? (int64_t)nBlockHeight : nBlockTime))
+ return true;
+ for (const auto& txin : tx.vin) {
+ if (!(txin.nSequence == CTxIn::SEQUENCE_FINAL))
+ return false;
+ }
+ return true;
+}
+
+std::pair<int, int64_t> CalculateSequenceLocks(const CTransaction &tx, int flags, std::vector<int>* prevHeights, const CBlockIndex& block)
+{
+ assert(prevHeights->size() == tx.vin.size());
+
+ // Will be set to the equivalent height- and time-based nLockTime
+ // values that would be necessary to satisfy all relative lock-
+ // time constraints given our view of block chain history.
+ // The semantics of nLockTime are the last invalid height/time, so
+ // use -1 to have the effect of any height or time being valid.
+ int nMinHeight = -1;
+ int64_t nMinTime = -1;
+
+ // tx.nVersion is signed integer so requires cast to unsigned otherwise
+ // we would be doing a signed comparison and half the range of nVersion
+ // wouldn't support BIP 68.
+ bool fEnforceBIP68 = static_cast<uint32_t>(tx.nVersion) >= 2
+ && flags & LOCKTIME_VERIFY_SEQUENCE;
+
+ // Do not enforce sequence numbers as a relative lock time
+ // unless we have been instructed to
+ if (!fEnforceBIP68) {
+ return std::make_pair(nMinHeight, nMinTime);
+ }
+
+ for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
+ const CTxIn& txin = tx.vin[txinIndex];
+
+ // Sequence numbers with the most significant bit set are not
+ // treated as relative lock-times, nor are they given any
+ // consensus-enforced meaning at this point.
+ if (txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_DISABLE_FLAG) {
+ // The height of this input is not relevant for sequence locks
+ (*prevHeights)[txinIndex] = 0;
+ continue;
+ }
+
+ int nCoinHeight = (*prevHeights)[txinIndex];
+
+ if (txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG) {
+ int64_t nCoinTime = block.GetAncestor(std::max(nCoinHeight-1, 0))->GetMedianTimePast();
+ // NOTE: Subtract 1 to maintain nLockTime semantics
+ // BIP 68 relative lock times have the semantics of calculating
+ // the first block or time at which the transaction would be
+ // valid. When calculating the effective block time or height
+ // for the entire transaction, we switch to using the
+ // semantics of nLockTime which is the last invalid block
+ // time or height. Thus we subtract 1 from the calculated
+ // time or height.
+
+ // Time-based relative lock-times are measured from the
+ // smallest allowed timestamp of the block containing the
+ // txout being spent, which is the median time past of the
+ // block prior.
+ nMinTime = std::max(nMinTime, nCoinTime + (int64_t)((txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_MASK) << CTxIn::SEQUENCE_LOCKTIME_GRANULARITY) - 1);
+ } else {
+ nMinHeight = std::max(nMinHeight, nCoinHeight + (int)(txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_MASK) - 1);
+ }
+ }
+
+ return std::make_pair(nMinHeight, nMinTime);
+}
+
+bool EvaluateSequenceLocks(const CBlockIndex& block, std::pair<int, int64_t> lockPair)
+{
+ assert(block.pprev);
+ int64_t nBlockTime = block.pprev->GetMedianTimePast();
+ if (lockPair.first >= block.nHeight || lockPair.second >= nBlockTime)
+ return false;
+
+ return true;
+}
+
+bool SequenceLocks(const CTransaction &tx, int flags, std::vector<int>* prevHeights, const CBlockIndex& block)
+{
+ return EvaluateSequenceLocks(block, CalculateSequenceLocks(tx, flags, prevHeights, block));
+}
+
+unsigned int GetLegacySigOpCount(const CTransaction& tx)
+{
+ unsigned int nSigOps = 0;
+ for (const auto& txin : tx.vin)
+ {
+ nSigOps += txin.scriptSig.GetSigOpCount(false);
+ }
+ for (const auto& txout : tx.vout)
+ {
+ nSigOps += txout.scriptPubKey.GetSigOpCount(false);
+ }
+ return nSigOps;
+}
+
+unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& inputs)
+{
+ if (tx.IsCoinBase())
+ return 0;
+
+ unsigned int nSigOps = 0;
+ for (unsigned int i = 0; i < tx.vin.size(); i++)
+ {
+ const CTxOut &prevout = inputs.GetOutputFor(tx.vin[i]);
+ if (prevout.scriptPubKey.IsPayToScriptHash())
+ nSigOps += prevout.scriptPubKey.GetSigOpCount(tx.vin[i].scriptSig);
+ }
+ return nSigOps;
+}
+
+int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& inputs, int flags)
+{
+ int64_t nSigOps = GetLegacySigOpCount(tx) * WITNESS_SCALE_FACTOR;
+
+ if (tx.IsCoinBase())
+ return nSigOps;
+
+ if (flags & SCRIPT_VERIFY_P2SH) {
+ nSigOps += GetP2SHSigOpCount(tx, inputs) * WITNESS_SCALE_FACTOR;
+ }
+
+ for (unsigned int i = 0; i < tx.vin.size(); i++)
+ {
+ const CTxOut &prevout = inputs.GetOutputFor(tx.vin[i]);
+ nSigOps += CountWitnessSigOps(tx.vin[i].scriptSig, prevout.scriptPubKey, &tx.vin[i].scriptWitness, flags);
+ }
+ return nSigOps;
+}
+
+bool CheckTransaction(const CTransaction& tx, CValidationState &state, bool fCheckDuplicateInputs)
+{
+ // Basic checks that don't depend on any context
+ if (tx.vin.empty())
+ return state.DoS(10, false, REJECT_INVALID, "bad-txns-vin-empty");
+ if (tx.vout.empty())
+ return state.DoS(10, false, REJECT_INVALID, "bad-txns-vout-empty");
+ // Size limits (this doesn't take the witness into account, as that hasn't been checked for malleability)
+ if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) > MAX_BLOCK_BASE_SIZE)
+ return state.DoS(100, false, REJECT_INVALID, "bad-txns-oversize");
+
+ // Check for negative or overflow output values
+ CAmount nValueOut = 0;
+ for (const auto& txout : tx.vout)
+ {
+ if (txout.nValue < 0)
+ return state.DoS(100, false, REJECT_INVALID, "bad-txns-vout-negative");
+ if (txout.nValue > MAX_MONEY)
+ return state.DoS(100, false, REJECT_INVALID, "bad-txns-vout-toolarge");
+ nValueOut += txout.nValue;
+ if (!MoneyRange(nValueOut))
+ return state.DoS(100, false, REJECT_INVALID, "bad-txns-txouttotal-toolarge");
+ }
+
+ // Check for duplicate inputs - note that this check is slow so we skip it in CheckBlock
+ if (fCheckDuplicateInputs) {
+ std::set<COutPoint> vInOutPoints;
+ for (const auto& txin : tx.vin)
+ {
+ if (!vInOutPoints.insert(txin.prevout).second)
+ return state.DoS(100, false, REJECT_INVALID, "bad-txns-inputs-duplicate");
+ }
+ }
+
+ if (tx.IsCoinBase())
+ {
+ if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > 100)
+ return state.DoS(100, false, REJECT_INVALID, "bad-cb-length");
+ }
+ else
+ {
+ for (const auto& txin : tx.vin)
+ if (txin.prevout.IsNull())
+ return state.DoS(10, false, REJECT_INVALID, "bad-txns-prevout-null");
+ }
+
+ return true;
+}
+
+bool Consensus::CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight)
+{
+ // This doesn't trigger the DoS code on purpose; if it did, it would make it easier
+ // for an attacker to attempt to split the network.
+ if (!inputs.HaveInputs(tx))
+ return state.Invalid(false, 0, "", "Inputs unavailable");
+
+ CAmount nValueIn = 0;
+ CAmount nFees = 0;
+ for (unsigned int i = 0; i < tx.vin.size(); i++)
+ {
+ const COutPoint &prevout = tx.vin[i].prevout;
+ const CCoins *coins = inputs.AccessCoins(prevout.hash);
+ assert(coins);
+
+ // If prev is coinbase, check that it's matured
+ if (coins->IsCoinBase()) {
+ if (nSpendHeight - coins->nHeight < COINBASE_MATURITY)
+ return state.Invalid(false,
+ REJECT_INVALID, "bad-txns-premature-spend-of-coinbase",
+ strprintf("tried to spend coinbase at depth %d", nSpendHeight - coins->nHeight));
+ }
+
+ // Check for negative or overflow input values
+ nValueIn += coins->vout[prevout.n].nValue;
+ if (!MoneyRange(coins->vout[prevout.n].nValue) || !MoneyRange(nValueIn))
+ return state.DoS(100, false, REJECT_INVALID, "bad-txns-inputvalues-outofrange");
+
+ }
+
+ if (nValueIn < tx.GetValueOut())
+ return state.DoS(100, false, REJECT_INVALID, "bad-txns-in-belowout", false,
+ strprintf("value in (%s) < value out (%s)", FormatMoney(nValueIn), FormatMoney(tx.GetValueOut())));
+
+ // Tally transaction fees
+ CAmount nTxFee = nValueIn - tx.GetValueOut();
+ if (nTxFee < 0)
+ return state.DoS(100, false, REJECT_INVALID, "bad-txns-fee-negative");
+ nFees += nTxFee;
+ if (!MoneyRange(nFees))
+ return state.DoS(100, false, REJECT_INVALID, "bad-txns-fee-outofrange");
+ return true;
+}
diff --git a/src/consensus/tx_verify.h b/src/consensus/tx_verify.h
new file mode 100644
index 0000000000..d46d3294ca
--- /dev/null
+++ b/src/consensus/tx_verify.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2017-2017 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_CONSENSUS_TX_VERIFY_H
+#define BITCOIN_CONSENSUS_TX_VERIFY_H
+
+#include <stdint.h>
+#include <vector>
+
+class CBlockIndex;
+class CCoinsViewCache;
+class CTransaction;
+class CValidationState;
+
+/** Transaction validation functions */
+
+/** Context-independent validity checks */
+bool CheckTransaction(const CTransaction& tx, CValidationState& state, bool fCheckDuplicateInputs=true);
+
+namespace Consensus {
+/**
+ * Check whether all inputs of this transaction are valid (no double spends and amounts)
+ * This does not modify the UTXO set. This does not check scripts and sigs.
+ * Preconditions: tx.IsCoinBase() is false.
+ */
+bool CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight);
+} // namespace Consensus
+
+/** Auxiliary functions for transaction validation (ideally should not be exposed) */
+
+/**
+ * Count ECDSA signature operations the old-fashioned (pre-0.6) way
+ * @return number of sigops this transaction's outputs will produce when spent
+ * @see CTransaction::FetchInputs
+ */
+unsigned int GetLegacySigOpCount(const CTransaction& tx);
+
+/**
+ * Count ECDSA signature operations in pay-to-script-hash inputs.
+ *
+ * @param[in] mapInputs Map of previous transactions that have outputs we're spending
+ * @return maximum number of sigops required to validate this transaction's inputs
+ * @see CTransaction::FetchInputs
+ */
+unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& mapInputs);
+
+/**
+ * Compute total signature operation cost of a transaction.
+ * @param[in] tx Transaction for which we are computing the cost
+ * @param[in] inputs Map of previous transactions that have outputs we're spending
+ * @param[out] flags Script verification flags
+ * @return Total signature operation cost of tx
+ */
+int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& inputs, int flags);
+
+/**
+ * Check if transaction is final and can be included in a block with the
+ * specified height and time. Consensus critical.
+ */
+bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime);
+
+/**
+ * Calculates the block height and previous block's median time past at
+ * which the transaction will be considered final in the context of BIP 68.
+ * Also removes from the vector of input heights any entries which did not
+ * correspond to sequence locked inputs as they do not affect the calculation.
+ */
+std::pair<int, int64_t> CalculateSequenceLocks(const CTransaction &tx, int flags, std::vector<int>* prevHeights, const CBlockIndex& block);
+
+bool EvaluateSequenceLocks(const CBlockIndex& block, std::pair<int, int64_t> lockPair);
+/**
+ * Check if transaction is final per BIP 68 sequence numbers and can be included in a block.
+ * Consensus critical. Takes as input a list of heights at which tx's inputs (in order) confirmed.
+ */
+bool SequenceLocks(const CTransaction &tx, int flags, std::vector<int>* prevHeights, const CBlockIndex& block);
+
+#endif // BITCOIN_CONSENSUS_TX_VERIFY_H
diff --git a/src/fs.h b/src/fs.h
index 585cbf9c38..abb4be254b 100644
--- a/src/fs.h
+++ b/src/fs.h
@@ -21,4 +21,4 @@ namespace fsbridge {
FILE *freopen(const fs::path& p, const char *mode, FILE *stream);
};
-#endif
+#endif // BITCOIN_FS_H
diff --git a/src/httprpc.cpp b/src/httprpc.cpp
index 18a9819edd..5ab6d8d732 100644
--- a/src/httprpc.cpp
+++ b/src/httprpc.cpp
@@ -30,7 +30,7 @@ static const char* WWW_AUTH_HEADER_DATA = "Basic realm=\"jsonrpc\"";
class HTTPRPCTimer : public RPCTimerBase
{
public:
- HTTPRPCTimer(struct event_base* eventBase, boost::function<void(void)>& func, int64_t millis) :
+ HTTPRPCTimer(struct event_base* eventBase, std::function<void(void)>& func, int64_t millis) :
ev(eventBase, false, func)
{
struct timeval tv;
@@ -52,7 +52,7 @@ public:
{
return "HTTP";
}
- RPCTimerBase* NewTimer(boost::function<void(void)>& func, int64_t millis)
+ RPCTimerBase* NewTimer(std::function<void(void)>& func, int64_t millis)
{
return new HTTPRPCTimer(base, func, millis);
}
diff --git a/src/init.cpp b/src/init.cpp
index 93b4c80c01..3bbdb16c3b 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -59,7 +59,6 @@
#include <boost/algorithm/string/replace.hpp>
#include <boost/algorithm/string/split.hpp>
#include <boost/bind.hpp>
-#include <boost/function.hpp>
#include <boost/interprocess/sync/file_lock.hpp>
#include <boost/thread.hpp>
#include <openssl/crypto.h>
@@ -215,6 +214,7 @@ void Shutdown()
if (fFeeEstimatesInitialized)
{
+ ::feeEstimator.FlushUnconfirmed(::mempool);
fs::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME;
CAutoFile est_fileout(fsbridge::fopen(est_path, "wb"), SER_DISK, CLIENT_VERSION);
if (!est_fileout.IsNull())
diff --git a/src/miner.cpp b/src/miner.cpp
index 9d2959723a..28b6f23d56 100644
--- a/src/miner.cpp
+++ b/src/miner.cpp
@@ -10,6 +10,7 @@
#include "chainparams.h"
#include "coins.h"
#include "consensus/consensus.h"
+#include "consensus/tx_verify.h"
#include "consensus/merkle.h"
#include "consensus/validation.h"
#include "hash.h"
diff --git a/src/net.cpp b/src/net.cpp
index dd375e580f..198d8f5fff 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -148,7 +148,7 @@ static std::vector<CAddress> convertSeed6(const std::vector<SeedSpec6> &vSeedsIn
// one by discovery.
CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices)
{
- CAddress ret(CService(CNetAddr(),GetListenPort()), NODE_NONE);
+ CAddress ret(CService(CNetAddr(),GetListenPort()), nLocalServices);
CService addr;
if (GetLocal(addr, paddrPeer))
{
@@ -1071,12 +1071,7 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) {
// According to the internet TCP_NODELAY is not carried into accepted sockets
// on all platforms. Set it again here just to be sure.
- int set = 1;
-#ifdef WIN32
- setsockopt(hSocket, IPPROTO_TCP, TCP_NODELAY, (const char*)&set, sizeof(int));
-#else
- setsockopt(hSocket, IPPROTO_TCP, TCP_NODELAY, (void*)&set, sizeof(int));
-#endif
+ SetSocketNoDelay(hSocket);
if (IsBanned(addr) && !whitelisted)
{
diff --git a/src/netbase.cpp b/src/netbase.cpp
index bdc725359a..2584f571ee 100644
--- a/src/netbase.cpp
+++ b/src/netbase.cpp
@@ -428,18 +428,14 @@ bool static ConnectSocketDirectly(const CService &addrConnect, SOCKET& hSocketRe
if (hSocket == INVALID_SOCKET)
return false;
- int set = 1;
#ifdef SO_NOSIGPIPE
+ int set = 1;
// Different way of disabling SIGPIPE on BSD
setsockopt(hSocket, SOL_SOCKET, SO_NOSIGPIPE, (void*)&set, sizeof(int));
#endif
//Disable Nagle's algorithm
-#ifdef WIN32
- setsockopt(hSocket, IPPROTO_TCP, TCP_NODELAY, (const char*)&set, sizeof(int));
-#else
- setsockopt(hSocket, IPPROTO_TCP, TCP_NODELAY, (void*)&set, sizeof(int));
-#endif
+ SetSocketNoDelay(hSocket);
// Set to non-blocking
if (!SetSocketNonBlocking(hSocket, true))
@@ -728,6 +724,13 @@ bool SetSocketNonBlocking(SOCKET& hSocket, bool fNonBlocking)
return true;
}
+bool SetSocketNoDelay(SOCKET& hSocket)
+{
+ int set = 1;
+ int rc = setsockopt(hSocket, IPPROTO_TCP, TCP_NODELAY, (const char*)&set, sizeof(int));
+ return rc == 0;
+}
+
void InterruptSocks5(bool interrupt)
{
interruptSocks5Recv = interrupt;
diff --git a/src/netbase.h b/src/netbase.h
index dd33b6e47e..c9d108aadd 100644
--- a/src/netbase.h
+++ b/src/netbase.h
@@ -59,6 +59,8 @@ std::string NetworkErrorString(int err);
bool CloseSocket(SOCKET& hSocket);
/** Disable or enable blocking-mode for a socket */
bool SetSocketNonBlocking(SOCKET& hSocket, bool fNonBlocking);
+/** Set the TCP_NODELAY flag on a socket */
+bool SetSocketNoDelay(SOCKET& hSocket);
/**
* Convert milliseconds to a struct timeval for e.g. select.
*/
diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp
index bd169f875a..bad3de4b44 100644
--- a/src/policy/fees.cpp
+++ b/src/policy/fees.cpp
@@ -14,6 +14,8 @@
#include "txmempool.h"
#include "util.h"
+static constexpr double INF_FEERATE = 1e99;
+
/**
* We will instantiate an instance of this class to track transactions that were
* included in a block. We will lump transactions into a bucket according to their
@@ -26,40 +28,43 @@ class TxConfirmStats
{
private:
//Define the buckets we will group transactions into
- std::vector<double> buckets; // The upper-bound of the range for the bucket (inclusive)
- std::map<double, unsigned int> bucketMap; // Map of bucket upper-bound to index into all vectors by bucket
+ const std::vector<double>& buckets; // The upper-bound of the range for the bucket (inclusive)
+ const std::map<double, unsigned int>& bucketMap; // Map of bucket upper-bound to index into all vectors by bucket
// For each bucket X:
// Count the total # of txs in each bucket
// Track the historical moving average of this total over blocks
std::vector<double> txCtAvg;
- // and calculate the total for the current block to update the moving average
- std::vector<int> curBlockTxCt;
// Count the total # of txs confirmed within Y blocks in each bucket
// Track the historical moving average of theses totals over blocks
- std::vector<std::vector<double> > confAvg; // confAvg[Y][X]
- // and calculate the totals for the current block to update the moving averages
- std::vector<std::vector<int> > curBlockConf; // curBlockConf[Y][X]
+ std::vector<std::vector<double>> confAvg; // confAvg[Y][X]
+
+ // Track moving avg of txs which have been evicted from the mempool
+ // after failing to be confirmed within Y blocks
+ std::vector<std::vector<double>> failAvg; // failAvg[Y][X]
// Sum the total feerate of all tx's in each bucket
// Track the historical moving average of this total over blocks
std::vector<double> avg;
- // and calculate the total for the current block to update the moving average
- std::vector<double> curBlockVal;
// Combine the conf counts with tx counts to calculate the confirmation % for each Y,X
// Combine the total value with the tx counts to calculate the avg feerate per bucket
double decay;
+ // Resolution (# of blocks) with which confirmations are tracked
+ unsigned int scale;
+
// Mempool counts of outstanding transactions
// For each bucket X, track the number of transactions in the mempool
// that are unconfirmed for each possible confirmation value Y
std::vector<std::vector<int> > unconfTxs; //unconfTxs[Y][X]
- // transactions still unconfirmed after MAX_CONFIRMS for each bucket
+ // transactions still unconfirmed after GetMaxConfirms for each bucket
std::vector<int> oldUnconfTxs;
+ void resizeInMemoryCounters(size_t newbuckets);
+
public:
/**
* Create new TxConfirmStats. This is called by BlockPolicyEstimator's
@@ -68,9 +73,10 @@ public:
* @param maxConfirms max number of confirms to track
* @param decay how much to decay the historical moving average per block
*/
- TxConfirmStats(const std::vector<double>& defaultBuckets, unsigned int maxConfirms, double decay);
+ TxConfirmStats(const std::vector<double>& defaultBuckets, const std::map<double, unsigned int>& defaultBucketMap,
+ unsigned int maxPeriods, double decay, unsigned int scale);
- /** Clear the state of the curBlock variables to start counting for the new block */
+ /** Roll the circular buffer for unconfirmed txs*/
void ClearCurrent(unsigned int nBlockHeight);
/**
@@ -86,7 +92,7 @@ public:
/** Remove a transaction from mempool tracking stats*/
void removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight,
- unsigned int bucketIndex);
+ unsigned int bucketIndex, bool inBlock);
/** Update our estimates by decaying our historical moving average and updating
with the data gathered from the current block */
@@ -104,10 +110,11 @@ public:
* @param nBlockHeight the current block height
*/
double EstimateMedianVal(int confTarget, double sufficientTxVal,
- double minSuccess, bool requireGreater, unsigned int nBlockHeight) const;
+ double minSuccess, bool requireGreater, unsigned int nBlockHeight,
+ EstimationResult *result = nullptr) const;
/** Return the max number of confirms we're tracking */
- unsigned int GetMaxConfirms() const { return confAvg.size(); }
+ unsigned int GetMaxConfirms() const { return scale * confAvg.size(); }
/** Write state of estimation data to a file*/
void Write(CAutoFile& fileout) const;
@@ -116,44 +123,47 @@ public:
* Read saved state of estimation data from a file and replace all internal data structures and
* variables with this state.
*/
- void Read(CAutoFile& filein);
+ void Read(CAutoFile& filein, int nFileVersion, size_t numBuckets);
};
TxConfirmStats::TxConfirmStats(const std::vector<double>& defaultBuckets,
- unsigned int maxConfirms, double _decay)
+ const std::map<double, unsigned int>& defaultBucketMap,
+ unsigned int maxPeriods, double _decay, unsigned int _scale)
+ : buckets(defaultBuckets), bucketMap(defaultBucketMap)
{
decay = _decay;
- for (unsigned int i = 0; i < defaultBuckets.size(); i++) {
- buckets.push_back(defaultBuckets[i]);
- bucketMap[defaultBuckets[i]] = i;
- }
- confAvg.resize(maxConfirms);
- curBlockConf.resize(maxConfirms);
- unconfTxs.resize(maxConfirms);
- for (unsigned int i = 0; i < maxConfirms; i++) {
+ scale = _scale;
+ confAvg.resize(maxPeriods);
+ for (unsigned int i = 0; i < maxPeriods; i++) {
confAvg[i].resize(buckets.size());
- curBlockConf[i].resize(buckets.size());
- unconfTxs[i].resize(buckets.size());
+ }
+ failAvg.resize(maxPeriods);
+ for (unsigned int i = 0; i < maxPeriods; i++) {
+ failAvg[i].resize(buckets.size());
}
- oldUnconfTxs.resize(buckets.size());
- curBlockTxCt.resize(buckets.size());
txCtAvg.resize(buckets.size());
- curBlockVal.resize(buckets.size());
avg.resize(buckets.size());
+
+ resizeInMemoryCounters(buckets.size());
}
-// Zero out the data for the current block
+void TxConfirmStats::resizeInMemoryCounters(size_t newbuckets) {
+ // newbuckets must be passed in because the buckets referred to during Read have not been updated yet.
+ unconfTxs.resize(GetMaxConfirms());
+ for (unsigned int i = 0; i < unconfTxs.size(); i++) {
+ unconfTxs[i].resize(newbuckets);
+ }
+ oldUnconfTxs.resize(newbuckets);
+}
+
+// Roll the unconfirmed txs circular buffer
void TxConfirmStats::ClearCurrent(unsigned int nBlockHeight)
{
for (unsigned int j = 0; j < buckets.size(); j++) {
oldUnconfTxs[j] += unconfTxs[nBlockHeight%unconfTxs.size()][j];
unconfTxs[nBlockHeight%unconfTxs.size()][j] = 0;
- for (unsigned int i = 0; i < curBlockConf.size(); i++)
- curBlockConf[i][j] = 0;
- curBlockTxCt[j] = 0;
- curBlockVal[j] = 0;
}
}
@@ -163,33 +173,38 @@ void TxConfirmStats::Record(int blocksToConfirm, double val)
// blocksToConfirm is 1-based
if (blocksToConfirm < 1)
return;
+ int periodsToConfirm = (blocksToConfirm + scale - 1)/scale;
unsigned int bucketindex = bucketMap.lower_bound(val)->second;
- for (size_t i = blocksToConfirm; i <= curBlockConf.size(); i++) {
- curBlockConf[i - 1][bucketindex]++;
+ for (size_t i = periodsToConfirm; i <= confAvg.size(); i++) {
+ confAvg[i - 1][bucketindex]++;
}
- curBlockTxCt[bucketindex]++;
- curBlockVal[bucketindex] += val;
+ txCtAvg[bucketindex]++;
+ avg[bucketindex] += val;
}
void TxConfirmStats::UpdateMovingAverages()
{
for (unsigned int j = 0; j < buckets.size(); j++) {
for (unsigned int i = 0; i < confAvg.size(); i++)
- confAvg[i][j] = confAvg[i][j] * decay + curBlockConf[i][j];
- avg[j] = avg[j] * decay + curBlockVal[j];
- txCtAvg[j] = txCtAvg[j] * decay + curBlockTxCt[j];
+ confAvg[i][j] = confAvg[i][j] * decay;
+ for (unsigned int i = 0; i < failAvg.size(); i++)
+ failAvg[i][j] = failAvg[i][j] * decay;
+ avg[j] = avg[j] * decay;
+ txCtAvg[j] = txCtAvg[j] * decay;
}
}
// returns -1 on error conditions
double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
double successBreakPoint, bool requireGreater,
- unsigned int nBlockHeight) const
+ unsigned int nBlockHeight, EstimationResult *result) const
{
// Counters for a bucket (or range of buckets)
double nConf = 0; // Number of tx's confirmed within the confTarget
double totalNum = 0; // Total number of tx's that were ever confirmed
int extraNum = 0; // Number of tx's still in mempool for confTarget or longer
+ double failNum = 0; // Number of tx's that were never confirmed but removed from the mempool after confTarget
+ int periodTarget = (confTarget + scale - 1)/scale;
int maxbucketindex = buckets.size() - 1;
@@ -212,12 +227,21 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
bool foundAnswer = false;
unsigned int bins = unconfTxs.size();
+ bool newBucketRange = true;
+ bool passing = true;
+ EstimatorBucket passBucket;
+ EstimatorBucket failBucket;
// Start counting from highest(default) or lowest feerate transactions
for (int bucket = startbucket; bucket >= 0 && bucket <= maxbucketindex; bucket += step) {
+ if (newBucketRange) {
+ curNearBucket = bucket;
+ newBucketRange = false;
+ }
curFarBucket = bucket;
- nConf += confAvg[confTarget - 1][bucket];
+ nConf += confAvg[periodTarget - 1][bucket];
totalNum += txCtAvg[bucket];
+ failNum += failAvg[periodTarget - 1][bucket];
for (unsigned int confct = confTarget; confct < GetMaxConfirms(); confct++)
extraNum += unconfTxs[(nBlockHeight - confct)%bins][bucket];
extraNum += oldUnconfTxs[bucket];
@@ -226,24 +250,41 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
// (Only count the confirmed data points, so that each confirmation count
// will be looking at the same amount of data and same bucket breaks)
if (totalNum >= sufficientTxVal / (1 - decay)) {
- double curPct = nConf / (totalNum + extraNum);
+ double curPct = nConf / (totalNum + failNum + extraNum);
// Check to see if we are no longer getting confirmed at the success rate
- if (requireGreater && curPct < successBreakPoint)
- break;
- if (!requireGreater && curPct > successBreakPoint)
- break;
-
+ if ((requireGreater && curPct < successBreakPoint) || (!requireGreater && curPct > successBreakPoint)) {
+ if (passing == true) {
+ // First time we hit a failure record the failed bucket
+ unsigned int failMinBucket = std::min(curNearBucket, curFarBucket);
+ unsigned int failMaxBucket = std::max(curNearBucket, curFarBucket);
+ failBucket.start = failMinBucket ? buckets[failMinBucket - 1] : 0;
+ failBucket.end = buckets[failMaxBucket];
+ failBucket.withinTarget = nConf;
+ failBucket.totalConfirmed = totalNum;
+ failBucket.inMempool = extraNum;
+ failBucket.leftMempool = failNum;
+ passing = false;
+ }
+ continue;
+ }
// Otherwise update the cumulative stats, and the bucket variables
// and reset the counters
else {
+ failBucket = EstimatorBucket(); // Reset any failed bucket, currently passing
foundAnswer = true;
+ passing = true;
+ passBucket.withinTarget = nConf;
nConf = 0;
+ passBucket.totalConfirmed = totalNum;
totalNum = 0;
+ passBucket.inMempool = extraNum;
+ passBucket.leftMempool = failNum;
+ failNum = 0;
extraNum = 0;
bestNearBucket = curNearBucket;
bestFarBucket = curFarBucket;
- curNearBucket = bucket + step;
+ newBucketRange = true;
}
}
}
@@ -255,8 +296,8 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
// Find the bucket with the median transaction and then report the average feerate from that bucket
// This is a compromise between finding the median which we can't since we don't save all tx's
// and reporting the average which is less accurate
- unsigned int minBucket = bestNearBucket < bestFarBucket ? bestNearBucket : bestFarBucket;
- unsigned int maxBucket = bestNearBucket > bestFarBucket ? bestNearBucket : bestFarBucket;
+ unsigned int minBucket = std::min(bestNearBucket, bestFarBucket);
+ unsigned int maxBucket = std::max(bestNearBucket, bestFarBucket);
for (unsigned int j = minBucket; j <= maxBucket; j++) {
txSum += txCtAvg[j];
}
@@ -270,83 +311,109 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
break;
}
}
+
+ passBucket.start = minBucket ? buckets[minBucket-1] : 0;
+ passBucket.end = buckets[maxBucket];
}
- LogPrint(BCLog::ESTIMATEFEE, "%3d: For conf success %s %4.2f need feerate %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n",
- confTarget, requireGreater ? ">" : "<", successBreakPoint,
- requireGreater ? ">" : "<", median, buckets[minBucket], buckets[maxBucket],
- 100 * nConf / (totalNum + extraNum), nConf, totalNum, extraNum);
+ // If we were passing until we reached last few buckets with insufficient data, then report those as failed
+ if (passing && !newBucketRange) {
+ unsigned int failMinBucket = std::min(curNearBucket, curFarBucket);
+ unsigned int failMaxBucket = std::max(curNearBucket, curFarBucket);
+ failBucket.start = failMinBucket ? buckets[failMinBucket - 1] : 0;
+ failBucket.end = buckets[failMaxBucket];
+ failBucket.withinTarget = nConf;
+ failBucket.totalConfirmed = totalNum;
+ failBucket.inMempool = extraNum;
+ failBucket.leftMempool = failNum;
+ }
+ LogPrint(BCLog::ESTIMATEFEE, "FeeEst: %d %s%.0f%% decay %.5f: feerate: %g from (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out) Fail: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out)\n",
+ confTarget, requireGreater ? ">" : "<", 100.0 * successBreakPoint, decay,
+ median, passBucket.start, passBucket.end,
+ 100 * passBucket.withinTarget / (passBucket.totalConfirmed + passBucket.inMempool + passBucket.leftMempool),
+ passBucket.withinTarget, passBucket.totalConfirmed, passBucket.inMempool, passBucket.leftMempool,
+ failBucket.start, failBucket.end,
+ 100 * failBucket.withinTarget / (failBucket.totalConfirmed + failBucket.inMempool + failBucket.leftMempool),
+ failBucket.withinTarget, failBucket.totalConfirmed, failBucket.inMempool, failBucket.leftMempool);
+
+
+ if (result) {
+ result->pass = passBucket;
+ result->fail = failBucket;
+ result->decay = decay;
+ result->scale = scale;
+ }
return median;
}
void TxConfirmStats::Write(CAutoFile& fileout) const
{
fileout << decay;
- fileout << buckets;
+ fileout << scale;
fileout << avg;
fileout << txCtAvg;
fileout << confAvg;
+ fileout << failAvg;
}
-void TxConfirmStats::Read(CAutoFile& filein)
+void TxConfirmStats::Read(CAutoFile& filein, int nFileVersion, size_t numBuckets)
{
- // Read data file into temporary variables and do some very basic sanity checking
- std::vector<double> fileBuckets;
- std::vector<double> fileAvg;
- std::vector<std::vector<double> > fileConfAvg;
- std::vector<double> fileTxCtAvg;
- double fileDecay;
- size_t maxConfirms;
- size_t numBuckets;
-
- filein >> fileDecay;
- if (fileDecay <= 0 || fileDecay >= 1)
- throw std::runtime_error("Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive)");
- filein >> fileBuckets;
- numBuckets = fileBuckets.size();
- if (numBuckets <= 1 || numBuckets > 1000)
- throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
- filein >> fileAvg;
- if (fileAvg.size() != numBuckets)
+ // Read data file and do some very basic sanity checking
+ // buckets and bucketMap are not updated yet, so don't access them
+ // If there is a read failure, we'll just discard this entire object anyway
+ size_t maxConfirms, maxPeriods;
+
+ // The current version will store the decay with each individual TxConfirmStats and also keep a scale factor
+ if (nFileVersion >= 149900) {
+ filein >> decay;
+ if (decay <= 0 || decay >= 1) {
+ throw std::runtime_error("Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive)");
+ }
+ filein >> scale;
+ }
+
+ filein >> avg;
+ if (avg.size() != numBuckets) {
throw std::runtime_error("Corrupt estimates file. Mismatch in feerate average bucket count");
- filein >> fileTxCtAvg;
- if (fileTxCtAvg.size() != numBuckets)
+ }
+ filein >> txCtAvg;
+ if (txCtAvg.size() != numBuckets) {
throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count");
- filein >> fileConfAvg;
- maxConfirms = fileConfAvg.size();
- if (maxConfirms <= 0 || maxConfirms > 6 * 24 * 7) // one week
- throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms");
- for (unsigned int i = 0; i < maxConfirms; i++) {
- if (fileConfAvg[i].size() != numBuckets)
- throw std::runtime_error("Corrupt estimates file. Mismatch in feerate conf average bucket count");
}
- // Now that we've processed the entire feerate estimate data file and not
- // thrown any errors, we can copy it to our data structures
- decay = fileDecay;
- buckets = fileBuckets;
- avg = fileAvg;
- confAvg = fileConfAvg;
- txCtAvg = fileTxCtAvg;
- bucketMap.clear();
+ filein >> confAvg;
+ maxPeriods = confAvg.size();
+ maxConfirms = scale * maxPeriods;
- // Resize the current block variables which aren't stored in the data file
- // to match the number of confirms and buckets
- curBlockConf.resize(maxConfirms);
- for (unsigned int i = 0; i < maxConfirms; i++) {
- curBlockConf[i].resize(buckets.size());
+ if (maxConfirms <= 0 || maxConfirms > 6 * 24 * 7) { // one week
+ throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms");
+ }
+ for (unsigned int i = 0; i < maxPeriods; i++) {
+ if (confAvg[i].size() != numBuckets) {
+ throw std::runtime_error("Corrupt estimates file. Mismatch in feerate conf average bucket count");
+ }
}
- curBlockTxCt.resize(buckets.size());
- curBlockVal.resize(buckets.size());
- unconfTxs.resize(maxConfirms);
- for (unsigned int i = 0; i < maxConfirms; i++) {
- unconfTxs[i].resize(buckets.size());
+ if (nFileVersion >= 149900) {
+ filein >> failAvg;
+ if (maxPeriods != failAvg.size()) {
+ throw std::runtime_error("Corrupt estimates file. Mismatch in confirms tracked for failures");
+ }
+ for (unsigned int i = 0; i < maxPeriods; i++) {
+ if (failAvg[i].size() != numBuckets) {
+ throw std::runtime_error("Corrupt estimates file. Mismatch in one of failure average bucket counts");
+ }
+ }
+ } else {
+ failAvg.resize(confAvg.size());
+ for (unsigned int i = 0; i < failAvg.size(); i++) {
+ failAvg[i].resize(numBuckets);
+ }
}
- oldUnconfTxs.resize(buckets.size());
- for (unsigned int i = 0; i < buckets.size(); i++)
- bucketMap[buckets[i]] = i;
+ // Resize the current block variables which aren't stored in the data file
+ // to match the number of confirms and buckets
+ resizeInMemoryCounters(numBuckets);
LogPrint(BCLog::ESTIMATEFEE, "Reading estimates: %u buckets counting confirms up to %u blocks\n",
numBuckets, maxConfirms);
@@ -360,7 +427,7 @@ unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val)
return bucketindex;
}
-void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight, unsigned int bucketindex)
+void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight, unsigned int bucketindex, bool inBlock)
{
//nBestSeenHeight is not updated yet for the new block
int blocksAgo = nBestSeenHeight - entryHeight;
@@ -388,6 +455,12 @@ void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHe
blockIndex, bucketindex);
}
}
+ if (!inBlock && (unsigned int)blocksAgo >= scale) { // Only counts as a failure if not confirmed for entire period
+ unsigned int periodsAgo = blocksAgo / scale;
+ for (size_t i = 0; i < periodsAgo && i < failAvg.size(); i++) {
+ failAvg[i][bucketindex]++;
+ }
+ }
}
// This function is called from CTxMemPool::removeUnchecked to ensure
@@ -395,12 +468,14 @@ void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHe
// tracked. Txs that were part of a block have already been removed in
// processBlockTx to ensure they are never double tracked, but it is
// of no harm to try to remove them again.
-bool CBlockPolicyEstimator::removeTx(uint256 hash)
+bool CBlockPolicyEstimator::removeTx(uint256 hash, bool inBlock)
{
LOCK(cs_feeEstimator);
std::map<uint256, TxStatsInfo>::iterator pos = mapMemPoolTxs.find(hash);
if (pos != mapMemPoolTxs.end()) {
- feeStats->removeTx(pos->second.blockHeight, nBestSeenHeight, pos->second.bucketIndex);
+ feeStats->removeTx(pos->second.blockHeight, nBestSeenHeight, pos->second.bucketIndex, inBlock);
+ shortStats->removeTx(pos->second.blockHeight, nBestSeenHeight, pos->second.bucketIndex, inBlock);
+ longStats->removeTx(pos->second.blockHeight, nBestSeenHeight, pos->second.bucketIndex, inBlock);
mapMemPoolTxs.erase(hash);
return true;
} else {
@@ -409,21 +484,28 @@ bool CBlockPolicyEstimator::removeTx(uint256 hash)
}
CBlockPolicyEstimator::CBlockPolicyEstimator()
- : nBestSeenHeight(0), trackedTxs(0), untrackedTxs(0)
+ : nBestSeenHeight(0), firstRecordedHeight(0), historicalFirst(0), historicalBest(0), trackedTxs(0), untrackedTxs(0)
{
static_assert(MIN_BUCKET_FEERATE > 0, "Min feerate must be nonzero");
- minTrackedFee = CFeeRate(MIN_BUCKET_FEERATE);
- std::vector<double> vfeelist;
- for (double bucketBoundary = minTrackedFee.GetFeePerK(); bucketBoundary <= MAX_BUCKET_FEERATE; bucketBoundary *= FEE_SPACING) {
- vfeelist.push_back(bucketBoundary);
+ size_t bucketIndex = 0;
+ for (double bucketBoundary = MIN_BUCKET_FEERATE; bucketBoundary <= MAX_BUCKET_FEERATE; bucketBoundary *= FEE_SPACING, bucketIndex++) {
+ buckets.push_back(bucketBoundary);
+ bucketMap[bucketBoundary] = bucketIndex;
}
- vfeelist.push_back(INF_FEERATE);
- feeStats = new TxConfirmStats(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY);
+ buckets.push_back(INF_FEERATE);
+ bucketMap[INF_FEERATE] = bucketIndex;
+ assert(bucketMap.size() == buckets.size());
+
+ feeStats = new TxConfirmStats(buckets, bucketMap, MED_BLOCK_PERIODS, MED_DECAY, MED_SCALE);
+ shortStats = new TxConfirmStats(buckets, bucketMap, SHORT_BLOCK_PERIODS, SHORT_DECAY, SHORT_SCALE);
+ longStats = new TxConfirmStats(buckets, bucketMap, LONG_BLOCK_PERIODS, LONG_DECAY, LONG_SCALE);
}
CBlockPolicyEstimator::~CBlockPolicyEstimator()
{
delete feeStats;
+ delete shortStats;
+ delete longStats;
}
void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, bool validFeeEstimate)
@@ -457,12 +539,17 @@ void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, boo
CFeeRate feeRate(entry.GetFee(), entry.GetTxSize());
mapMemPoolTxs[hash].blockHeight = txHeight;
- mapMemPoolTxs[hash].bucketIndex = feeStats->NewTx(txHeight, (double)feeRate.GetFeePerK());
+ unsigned int bucketIndex = feeStats->NewTx(txHeight, (double)feeRate.GetFeePerK());
+ mapMemPoolTxs[hash].bucketIndex = bucketIndex;
+ unsigned int bucketIndex2 = shortStats->NewTx(txHeight, (double)feeRate.GetFeePerK());
+ assert(bucketIndex == bucketIndex2);
+ unsigned int bucketIndex3 = longStats->NewTx(txHeight, (double)feeRate.GetFeePerK());
+ assert(bucketIndex == bucketIndex3);
}
bool CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry* entry)
{
- if (!removeTx(entry->GetTx().GetHash())) {
+ if (!removeTx(entry->GetTx().GetHash(), true)) {
// This transaction wasn't being tracked for fee estimation
return false;
}
@@ -482,6 +569,8 @@ bool CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxM
CFeeRate feeRate(entry->GetFee(), entry->GetTxSize());
feeStats->Record(blocksToConfirm, (double)feeRate.GetFeePerK());
+ shortStats->Record(blocksToConfirm, (double)feeRate.GetFeePerK());
+ longStats->Record(blocksToConfirm, (double)feeRate.GetFeePerK());
return true;
}
@@ -503,21 +592,32 @@ void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
// of unconfirmed txs to remove from tracking.
nBestSeenHeight = nBlockHeight;
- // Clear the current block state and update unconfirmed circular buffer
+ // Update unconfirmed circular buffer
feeStats->ClearCurrent(nBlockHeight);
+ shortStats->ClearCurrent(nBlockHeight);
+ longStats->ClearCurrent(nBlockHeight);
+
+ // Decay all exponential averages
+ feeStats->UpdateMovingAverages();
+ shortStats->UpdateMovingAverages();
+ longStats->UpdateMovingAverages();
unsigned int countedTxs = 0;
- // Repopulate the current block states
+ // Update averages with data points from current block
for (unsigned int i = 0; i < entries.size(); i++) {
if (processBlockTx(nBlockHeight, entries[i]))
countedTxs++;
}
- // Update all exponential averages with the current block state
- feeStats->UpdateMovingAverages();
+ if (firstRecordedHeight == 0 && countedTxs > 0) {
+ firstRecordedHeight = nBestSeenHeight;
+ LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy first recorded height %u\n", firstRecordedHeight);
+ }
+
- LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy after updating estimates for %u of %u txs in block, since last block %u of %u tracked, new mempool map size %u\n",
- countedTxs, entries.size(), trackedTxs, trackedTxs + untrackedTxs, mapMemPoolTxs.size());
+ LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy estimates updated by %u of %u block txs, since last block %u of %u tracked, mempool map size %u, max target %u from %s\n",
+ countedTxs, entries.size(), trackedTxs, trackedTxs + untrackedTxs, mapMemPoolTxs.size(),
+ MaxUsableEstimate(), HistoricalBlockSpan() > BlockSpan() ? "historical" : "current");
trackedTxs = 0;
untrackedTxs = 0;
@@ -525,13 +625,44 @@ void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
CFeeRate CBlockPolicyEstimator::estimateFee(int confTarget) const
{
+ // It's not possible to get reasonable estimates for confTarget of 1
+ if (confTarget <= 1)
+ return CFeeRate(0);
+
+ return estimateRawFee(confTarget, DOUBLE_SUCCESS_PCT, FeeEstimateHorizon::MED_HALFLIFE);
+}
+
+CFeeRate CBlockPolicyEstimator::estimateRawFee(int confTarget, double successThreshold, FeeEstimateHorizon horizon, EstimationResult* result) const
+{
+ TxConfirmStats* stats;
+ double sufficientTxs = SUFFICIENT_FEETXS;
+ switch (horizon) {
+ case FeeEstimateHorizon::SHORT_HALFLIFE: {
+ stats = shortStats;
+ sufficientTxs = SUFFICIENT_TXS_SHORT;
+ break;
+ }
+ case FeeEstimateHorizon::MED_HALFLIFE: {
+ stats = feeStats;
+ break;
+ }
+ case FeeEstimateHorizon::LONG_HALFLIFE: {
+ stats = longStats;
+ break;
+ }
+ default: {
+ return CFeeRate(0);
+ }
+ }
+
LOCK(cs_feeEstimator);
// Return failure if trying to analyze a target we're not tracking
- // It's not possible to get reasonable estimates for confTarget of 1
- if (confTarget <= 1 || (unsigned int)confTarget > feeStats->GetMaxConfirms())
+ if (confTarget <= 0 || (unsigned int)confTarget > stats->GetMaxConfirms())
+ return CFeeRate(0);
+ if (successThreshold > 1)
return CFeeRate(0);
- double median = feeStats->EstimateMedianVal(confTarget, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
+ double median = stats->EstimateMedianVal(confTarget, sufficientTxs, successThreshold, true, nBestSeenHeight, result);
if (median < 0)
return CFeeRate(0);
@@ -539,31 +670,148 @@ CFeeRate CBlockPolicyEstimator::estimateFee(int confTarget) const
return CFeeRate(median);
}
-CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool) const
+unsigned int CBlockPolicyEstimator::BlockSpan() const
+{
+ if (firstRecordedHeight == 0) return 0;
+ assert(nBestSeenHeight >= firstRecordedHeight);
+
+ return nBestSeenHeight - firstRecordedHeight;
+}
+
+unsigned int CBlockPolicyEstimator::HistoricalBlockSpan() const
+{
+ if (historicalFirst == 0) return 0;
+ assert(historicalBest >= historicalFirst);
+
+ if (nBestSeenHeight - historicalBest > OLDEST_ESTIMATE_HISTORY) return 0;
+
+ return historicalBest - historicalFirst;
+}
+
+unsigned int CBlockPolicyEstimator::MaxUsableEstimate() const
+{
+ // Block spans are divided by 2 to make sure there are enough potential failing data points for the estimate
+ return std::min(longStats->GetMaxConfirms(), std::max(BlockSpan(), HistoricalBlockSpan()) / 2);
+}
+
+/** Return a fee estimate at the required successThreshold from the shortest
+ * time horizon which tracks confirmations up to the desired target. If
+ * checkShorterHorizon is requested, also allow short time horizon estimates
+ * for a lower target to reduce the given answer */
+double CBlockPolicyEstimator::estimateCombinedFee(unsigned int confTarget, double successThreshold, bool checkShorterHorizon) const
+{
+ double estimate = -1;
+ if (confTarget >= 1 && confTarget <= longStats->GetMaxConfirms()) {
+ // Find estimate from shortest time horizon possible
+ if (confTarget <= shortStats->GetMaxConfirms()) { // short horizon
+ estimate = shortStats->EstimateMedianVal(confTarget, SUFFICIENT_TXS_SHORT, successThreshold, true, nBestSeenHeight);
+ }
+ else if (confTarget <= feeStats->GetMaxConfirms()) { // medium horizon
+ estimate = feeStats->EstimateMedianVal(confTarget, SUFFICIENT_FEETXS, successThreshold, true, nBestSeenHeight);
+ }
+ else { // long horizon
+ estimate = longStats->EstimateMedianVal(confTarget, SUFFICIENT_FEETXS, successThreshold, true, nBestSeenHeight);
+ }
+ if (checkShorterHorizon) {
+ // If a lower confTarget from a more recent horizon returns a lower answer use it.
+ if (confTarget > feeStats->GetMaxConfirms()) {
+ double medMax = feeStats->EstimateMedianVal(feeStats->GetMaxConfirms(), SUFFICIENT_FEETXS, successThreshold, true, nBestSeenHeight);
+ if (medMax > 0 && (estimate == -1 || medMax < estimate))
+ estimate = medMax;
+ }
+ if (confTarget > shortStats->GetMaxConfirms()) {
+ double shortMax = shortStats->EstimateMedianVal(shortStats->GetMaxConfirms(), SUFFICIENT_TXS_SHORT, successThreshold, true, nBestSeenHeight);
+ if (shortMax > 0 && (estimate == -1 || shortMax < estimate))
+ estimate = shortMax;
+ }
+ }
+ }
+ return estimate;
+}
+
+/** Ensure that for a conservative estimate, the DOUBLE_SUCCESS_PCT is also met
+ * at 2 * target for any longer time horizons.
+ */
+double CBlockPolicyEstimator::estimateConservativeFee(unsigned int doubleTarget) const
+{
+ double estimate = -1;
+ if (doubleTarget <= shortStats->GetMaxConfirms()) {
+ estimate = feeStats->EstimateMedianVal(doubleTarget, SUFFICIENT_FEETXS, DOUBLE_SUCCESS_PCT, true, nBestSeenHeight);
+ }
+ if (doubleTarget <= feeStats->GetMaxConfirms()) {
+ double longEstimate = longStats->EstimateMedianVal(doubleTarget, SUFFICIENT_FEETXS, DOUBLE_SUCCESS_PCT, true, nBestSeenHeight);
+ if (longEstimate > estimate) {
+ estimate = longEstimate;
+ }
+ }
+ return estimate;
+}
+
+/** estimateSmartFee returns the max of the feerates calculated with a 60%
+ * threshold required at target / 2, an 85% threshold required at target and a
+ * 95% threshold required at 2 * target. Each calculation is performed at the
+ * shortest time horizon which tracks the required target. Conservative
+ * estimates, however, required the 95% threshold at 2 * target be met for any
+ * longer time horizons also.
+ */
+CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool, bool conservative) const
{
if (answerFoundAtTarget)
*answerFoundAtTarget = confTarget;
double median = -1;
-
{
LOCK(cs_feeEstimator);
// Return failure if trying to analyze a target we're not tracking
- if (confTarget <= 0 || (unsigned int)confTarget > feeStats->GetMaxConfirms())
+ if (confTarget <= 0 || (unsigned int)confTarget > longStats->GetMaxConfirms())
return CFeeRate(0);
// It's not possible to get reasonable estimates for confTarget of 1
if (confTarget == 1)
confTarget = 2;
- while (median < 0 && (unsigned int)confTarget <= feeStats->GetMaxConfirms()) {
- median = feeStats->EstimateMedianVal(confTarget++, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
+ unsigned int maxUsableEstimate = MaxUsableEstimate();
+ if (maxUsableEstimate <= 1)
+ return CFeeRate(0);
+
+ if ((unsigned int)confTarget > maxUsableEstimate) {
+ confTarget = maxUsableEstimate;
+ }
+
+ assert(confTarget > 0); //estimateCombinedFee and estimateConservativeFee take unsigned ints
+
+ /** true is passed to estimateCombined fee for target/2 and target so
+ * that we check the max confirms for shorter time horizons as well.
+ * This is necessary to preserve monotonically increasing estimates.
+ * For non-conservative estimates we do the same thing for 2*target, but
+ * for conservative estimates we want to skip these shorter horizons
+ * checks for 2*target becuase we are taking the max over all time
+ * horizons so we already have monotonically increasing estimates and
+ * the purpose of conservative estimates is not to let short term
+ * fluctuations lower our estimates by too much.
+ */
+ double halfEst = estimateCombinedFee(confTarget/2, HALF_SUCCESS_PCT, true);
+ double actualEst = estimateCombinedFee(confTarget, SUCCESS_PCT, true);
+ double doubleEst = estimateCombinedFee(2 * confTarget, DOUBLE_SUCCESS_PCT, !conservative);
+ median = halfEst;
+ if (actualEst > median) {
+ median = actualEst;
+ }
+ if (doubleEst > median) {
+ median = doubleEst;
+ }
+
+ if (conservative || median == -1) {
+ double consEst = estimateConservativeFee(2 * confTarget);
+ if (consEst > median) {
+ median = consEst;
+ }
}
} // Must unlock cs_feeEstimator before taking mempool locks
if (answerFoundAtTarget)
- *answerFoundAtTarget = confTarget - 1;
+ *answerFoundAtTarget = confTarget;
// If mempool is limiting txs , return at least the min feerate from the mempool
CAmount minPoolFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
@@ -576,14 +824,24 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoun
return CFeeRate(median);
}
+
bool CBlockPolicyEstimator::Write(CAutoFile& fileout) const
{
try {
LOCK(cs_feeEstimator);
- fileout << 139900; // version required to read: 0.13.99 or later
+ fileout << 149900; // version required to read: 0.14.99 or later
fileout << CLIENT_VERSION; // version that wrote the file
fileout << nBestSeenHeight;
+ if (BlockSpan() > HistoricalBlockSpan()/2) {
+ fileout << firstRecordedHeight << nBestSeenHeight;
+ }
+ else {
+ fileout << historicalFirst << historicalBest;
+ }
+ fileout << buckets;
feeStats->Write(fileout);
+ shortStats->Write(fileout);
+ longStats->Write(fileout);
}
catch (const std::exception&) {
LogPrintf("CBlockPolicyEstimator::Write(): unable to write policy estimator data (non-fatal)\n");
@@ -596,27 +854,104 @@ bool CBlockPolicyEstimator::Read(CAutoFile& filein)
{
try {
LOCK(cs_feeEstimator);
- int nVersionRequired, nVersionThatWrote, nFileBestSeenHeight;
+ int nVersionRequired, nVersionThatWrote;
+ unsigned int nFileBestSeenHeight, nFileHistoricalFirst, nFileHistoricalBest;
filein >> nVersionRequired >> nVersionThatWrote;
if (nVersionRequired > CLIENT_VERSION)
return error("CBlockPolicyEstimator::Read(): up-version (%d) fee estimate file", nVersionRequired);
+
+ // Read fee estimates file into temporary variables so existing data
+ // structures aren't corrupted if there is an exception.
filein >> nFileBestSeenHeight;
- feeStats->Read(filein);
- nBestSeenHeight = nFileBestSeenHeight;
- // if nVersionThatWrote < 139900 then another TxConfirmStats (for priority) follows but can be ignored.
+
+ if (nVersionThatWrote < 149900) {
+ // Read the old fee estimates file for temporary use, but then discard. Will start collecting data from scratch.
+ // decay is stored before buckets in old versions, so pre-read decay and pass into TxConfirmStats constructor
+ double tempDecay;
+ filein >> tempDecay;
+ if (tempDecay <= 0 || tempDecay >= 1)
+ throw std::runtime_error("Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive)");
+
+ std::vector<double> tempBuckets;
+ filein >> tempBuckets;
+ size_t tempNum = tempBuckets.size();
+ if (tempNum <= 1 || tempNum > 1000)
+ throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
+
+ std::map<double, unsigned int> tempMap;
+
+ std::unique_ptr<TxConfirmStats> tempFeeStats(new TxConfirmStats(tempBuckets, tempMap, MED_BLOCK_PERIODS, tempDecay, 1));
+ tempFeeStats->Read(filein, nVersionThatWrote, tempNum);
+ // if nVersionThatWrote < 139900 then another TxConfirmStats (for priority) follows but can be ignored.
+
+ tempMap.clear();
+ for (unsigned int i = 0; i < tempBuckets.size(); i++) {
+ tempMap[tempBuckets[i]] = i;
+ }
+ }
+ else { // nVersionThatWrote >= 149900
+ filein >> nFileHistoricalFirst >> nFileHistoricalBest;
+ if (nFileHistoricalFirst > nFileHistoricalBest || nFileHistoricalBest > nFileBestSeenHeight) {
+ throw std::runtime_error("Corrupt estimates file. Historical block range for estimates is invalid");
+ }
+ std::vector<double> fileBuckets;
+ filein >> fileBuckets;
+ size_t numBuckets = fileBuckets.size();
+ if (numBuckets <= 1 || numBuckets > 1000)
+ throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
+
+ std::unique_ptr<TxConfirmStats> fileFeeStats(new TxConfirmStats(buckets, bucketMap, MED_BLOCK_PERIODS, MED_DECAY, MED_SCALE));
+ std::unique_ptr<TxConfirmStats> fileShortStats(new TxConfirmStats(buckets, bucketMap, SHORT_BLOCK_PERIODS, SHORT_DECAY, SHORT_SCALE));
+ std::unique_ptr<TxConfirmStats> fileLongStats(new TxConfirmStats(buckets, bucketMap, LONG_BLOCK_PERIODS, LONG_DECAY, LONG_SCALE));
+ fileFeeStats->Read(filein, nVersionThatWrote, numBuckets);
+ fileShortStats->Read(filein, nVersionThatWrote, numBuckets);
+ fileLongStats->Read(filein, nVersionThatWrote, numBuckets);
+
+ // Fee estimates file parsed correctly
+ // Copy buckets from file and refresh our bucketmap
+ buckets = fileBuckets;
+ bucketMap.clear();
+ for (unsigned int i = 0; i < buckets.size(); i++) {
+ bucketMap[buckets[i]] = i;
+ }
+
+ // Destroy old TxConfirmStats and point to new ones that already reference buckets and bucketMap
+ delete feeStats;
+ delete shortStats;
+ delete longStats;
+ feeStats = fileFeeStats.release();
+ shortStats = fileShortStats.release();
+ longStats = fileLongStats.release();
+
+ nBestSeenHeight = nFileBestSeenHeight;
+ historicalFirst = nFileHistoricalFirst;
+ historicalBest = nFileHistoricalBest;
+ }
}
- catch (const std::exception&) {
- LogPrintf("CBlockPolicyEstimator::Read(): unable to read policy estimator data (non-fatal)\n");
+ catch (const std::exception& e) {
+ LogPrintf("CBlockPolicyEstimator::Read(): unable to read policy estimator data (non-fatal): %s\n",e.what());
return false;
}
return true;
}
+void CBlockPolicyEstimator::FlushUnconfirmed(CTxMemPool& pool) {
+ int64_t startclear = GetTimeMicros();
+ std::vector<uint256> txids;
+ pool.queryHashes(txids);
+ LOCK(cs_feeEstimator);
+ for (auto& txid : txids) {
+ removeTx(txid, false);
+ }
+ int64_t endclear = GetTimeMicros();
+ LogPrint(BCLog::ESTIMATEFEE, "Recorded %u unconfirmed txs from mempool in %ld micros\n",txids.size(), endclear - startclear);
+}
+
FeeFilterRounder::FeeFilterRounder(const CFeeRate& minIncrementalFee)
{
CAmount minFeeLimit = std::max(CAmount(1), minIncrementalFee.GetFeePerK() / 2);
feeset.insert(0);
- for (double bucketBoundary = minFeeLimit; bucketBoundary <= MAX_BUCKET_FEERATE; bucketBoundary *= FEE_SPACING) {
+ for (double bucketBoundary = minFeeLimit; bucketBoundary <= MAX_FILTER_FEERATE; bucketBoundary *= FEE_FILTER_SPACING) {
feeset.insert(bucketBoundary);
}
}
diff --git a/src/policy/fees.h b/src/policy/fees.h
index 15876574d2..f527c85004 100644
--- a/src/policy/fees.h
+++ b/src/policy/fees.h
@@ -42,53 +42,57 @@ class TxConfirmStats;
* within your desired 5 blocks.
*
* Here is a brief description of the implementation:
- * When a transaction enters the mempool, we
- * track the height of the block chain at entry. Whenever a block comes in,
- * we count the number of transactions in each bucket and the total amount of feerate
- * paid in each bucket. Then we calculate how many blocks Y it took each
- * transaction to be mined and we track an array of counters in each bucket
- * for how long it to took transactions to get confirmed from 1 to a max of 25
- * and we increment all the counters from Y up to 25. This is because for any
- * number Z>=Y the transaction was successfully mined within Z blocks. We
- * want to save a history of this information, so at any time we have a
- * counter of the total number of transactions that happened in a given feerate
- * bucket and the total number that were confirmed in each number 1-25 blocks
- * or less for any bucket. We save this history by keeping an exponentially
- * decaying moving average of each one of these stats. Furthermore we also
- * keep track of the number unmined (in mempool) transactions in each bucket
- * and for how many blocks they have been outstanding and use that to increase
- * the number of transactions we've seen in that feerate bucket when calculating
- * an estimate for any number of confirmations below the number of blocks
- * they've been outstanding.
+ * When a transaction enters the mempool, we track the height of the block chain
+ * at entry. All further calculations are conducted only on this set of "seen"
+ * transactions. Whenever a block comes in, we count the number of transactions
+ * in each bucket and the total amount of feerate paid in each bucket. Then we
+ * calculate how many blocks Y it took each transaction to be mined. We convert
+ * from a number of blocks to a number of periods Y' each encompassing "scale"
+ * blocks. This is is tracked in 3 different data sets each up to a maximum
+ * number of periods. Within each data set we have an array of counters in each
+ * feerate bucket and we increment all the counters from Y' up to max periods
+ * representing that a tx was successfullly confirmed in less than or equal to
+ * that many periods. We want to save a history of this information, so at any
+ * time we have a counter of the total number of transactions that happened in a
+ * given feerate bucket and the total number that were confirmed in each of the
+ * periods or less for any bucket. We save this history by keeping an
+ * exponentially decaying moving average of each one of these stats. This is
+ * done for a different decay in each of the 3 data sets to keep relevant data
+ * from different time horizons. Furthermore we also keep track of the number
+ * unmined (in mempool or left mempool without being included in a block)
+ * transactions in each bucket and for how many blocks they have been
+ * outstanding and use both of these numbers to increase the number of transactions
+ * we've seen in that feerate bucket when calculating an estimate for any number
+ * of confirmations below the number of blocks they've been outstanding.
*/
-/** Track confirm delays up to 25 blocks, can't estimate beyond that */
-static const unsigned int MAX_BLOCK_CONFIRMS = 25;
-
-/** Decay of .998 is a half-life of 346 blocks or about 2.4 days */
-static const double DEFAULT_DECAY = .998;
-
-/** Require greater than 95% of X feerate transactions to be confirmed within Y blocks for X to be big enough */
-static const double MIN_SUCCESS_PCT = .95;
-
-/** Require an avg of 1 tx in the combined feerate bucket per block to have stat significance */
-static const double SUFFICIENT_FEETXS = 1;
+/* Identifier for each of the 3 different TxConfirmStats which will track
+ * history over different time horizons. */
+enum FeeEstimateHorizon {
+ SHORT_HALFLIFE = 0,
+ MED_HALFLIFE = 1,
+ LONG_HALFLIFE = 2
+};
-// Minimum and Maximum values for tracking feerates
-// The MIN_BUCKET_FEERATE should just be set to the lowest reasonable feerate we
-// might ever want to track. Historically this has been 1000 since it was
-// inheriting DEFAULT_MIN_RELAY_TX_FEE and changing it is disruptive as it
-// invalidates old estimates files. So leave it at 1000 unless it becomes
-// necessary to lower it, and then lower it substantially.
-static constexpr double MIN_BUCKET_FEERATE = 1000;
-static const double MAX_BUCKET_FEERATE = 1e7;
-static const double INF_FEERATE = MAX_MONEY;
+/* Used to return detailed information about a feerate bucket */
+struct EstimatorBucket
+{
+ double start = -1;
+ double end = -1;
+ double withinTarget = 0;
+ double totalConfirmed = 0;
+ double inMempool = 0;
+ double leftMempool = 0;
+};
-// We have to lump transactions into buckets based on feerate, but we want to be able
-// to give accurate estimates over a large range of potential feerates
-// Therefore it makes sense to exponentially space the buckets
-/** Spacing of FeeRate buckets */
-static const double FEE_SPACING = 1.1;
+/* Used to return detailed information about a fee estimate calculation */
+struct EstimationResult
+{
+ EstimatorBucket pass;
+ EstimatorBucket fail;
+ double decay;
+ unsigned int scale;
+};
/**
* We want to be able to estimate feerates that are needed on tx's to be included in
@@ -97,6 +101,55 @@ static const double FEE_SPACING = 1.1;
*/
class CBlockPolicyEstimator
{
+private:
+ /** Track confirm delays up to 12 blocks for short horizon */
+ static constexpr unsigned int SHORT_BLOCK_PERIODS = 12;
+ static constexpr unsigned int SHORT_SCALE = 1;
+ /** Track confirm delays up to 48 blocks for medium horizon */
+ static constexpr unsigned int MED_BLOCK_PERIODS = 24;
+ static constexpr unsigned int MED_SCALE = 2;
+ /** Track confirm delays up to 1008 blocks for long horizon */
+ static constexpr unsigned int LONG_BLOCK_PERIODS = 42;
+ static constexpr unsigned int LONG_SCALE = 24;
+ /** Historical estimates that are older than this aren't valid */
+ static const unsigned int OLDEST_ESTIMATE_HISTORY = 6 * 1008;
+
+ /** Decay of .962 is a half-life of 18 blocks or about 3 hours */
+ static constexpr double SHORT_DECAY = .962;
+ /** Decay of .998 is a half-life of 144 blocks or about 1 day */
+ static constexpr double MED_DECAY = .9952;
+ /** Decay of .9995 is a half-life of 1008 blocks or about 1 week */
+ static constexpr double LONG_DECAY = .99931;
+
+ /** Require greater than 60% of X feerate transactions to be confirmed within Y/2 blocks*/
+ static constexpr double HALF_SUCCESS_PCT = .6;
+ /** Require greater than 85% of X feerate transactions to be confirmed within Y blocks*/
+ static constexpr double SUCCESS_PCT = .85;
+ /** Require greater than 95% of X feerate transactions to be confirmed within 2 * Y blocks*/
+ static constexpr double DOUBLE_SUCCESS_PCT = .95;
+
+ /** Require an avg of 0.1 tx in the combined feerate bucket per block to have stat significance */
+ static constexpr double SUFFICIENT_FEETXS = 0.1;
+ /** Require an avg of 0.5 tx when using short decay since there are fewer blocks considered*/
+ static constexpr double SUFFICIENT_TXS_SHORT = 0.5;
+
+ /** Minimum and Maximum values for tracking feerates
+ * The MIN_BUCKET_FEERATE should just be set to the lowest reasonable feerate we
+ * might ever want to track. Historically this has been 1000 since it was
+ * inheriting DEFAULT_MIN_RELAY_TX_FEE and changing it is disruptive as it
+ * invalidates old estimates files. So leave it at 1000 unless it becomes
+ * necessary to lower it, and then lower it substantially.
+ */
+ static constexpr double MIN_BUCKET_FEERATE = 1000;
+ static constexpr double MAX_BUCKET_FEERATE = 1e7;
+
+ /** Spacing of FeeRate buckets
+ * We have to lump transactions into buckets based on feerate, but we want to be able
+ * to give accurate estimates over a large range of potential feerates
+ * Therefore it makes sense to exponentially space the buckets
+ */
+ static constexpr double FEE_SPACING = 1.05;
+
public:
/** Create new BlockPolicyEstimator and initialize stats tracking classes with default values */
CBlockPolicyEstimator();
@@ -110,16 +163,23 @@ public:
void processTransaction(const CTxMemPoolEntry& entry, bool validFeeEstimate);
/** Remove a transaction from the mempool tracking stats*/
- bool removeTx(uint256 hash);
+ bool removeTx(uint256 hash, bool inBlock);
- /** Return a feerate estimate */
+ /** DEPRECATED. Return a feerate estimate */
CFeeRate estimateFee(int confTarget) const;
- /** Estimate feerate needed to get be included in a block within
- * confTarget blocks. If no answer can be given at confTarget, return an
- * estimate at the lowest target where one can be given.
+ /** Estimate feerate needed to get be included in a block within confTarget
+ * blocks. If no answer can be given at confTarget, return an estimate at
+ * the closest target where one can be given. 'conservative' estimates are
+ * valid over longer time horizons also.
+ */
+ CFeeRate estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool, bool conservative = true) const;
+
+ /** Return a specific fee estimate calculation with a given success
+ * threshold and time horizon, and optionally return detailed data about
+ * calculation
*/
- CFeeRate estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool) const;
+ CFeeRate estimateRawFee(int confTarget, double successThreshold, FeeEstimateHorizon horizon, EstimationResult *result = nullptr) const;
/** Write estimation data to a file */
bool Write(CAutoFile& fileout) const;
@@ -127,9 +187,15 @@ public:
/** Read estimation data from a file */
bool Read(CAutoFile& filein);
+ /** Empty mempool transactions on shutdown to record failure to confirm for txs still in mempool */
+ void FlushUnconfirmed(CTxMemPool& pool);
+
private:
- CFeeRate minTrackedFee; //!< Passed to constructor to avoid dependency on main
unsigned int nBestSeenHeight;
+ unsigned int firstRecordedHeight;
+ unsigned int historicalFirst;
+ unsigned int historicalBest;
+
struct TxStatsInfo
{
unsigned int blockHeight;
@@ -142,19 +208,42 @@ private:
/** Classes to track historical data on transaction confirmations */
TxConfirmStats* feeStats;
+ TxConfirmStats* shortStats;
+ TxConfirmStats* longStats;
unsigned int trackedTxs;
unsigned int untrackedTxs;
+ std::vector<double> buckets; // The upper-bound of the range for the bucket (inclusive)
+ std::map<double, unsigned int> bucketMap; // Map of bucket upper-bound to index into all vectors by bucket
+
mutable CCriticalSection cs_feeEstimator;
/** Process a transaction confirmed in a block*/
bool processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry* entry);
+ /** Helper for estimateSmartFee */
+ double estimateCombinedFee(unsigned int confTarget, double successThreshold, bool checkShorterHorizon) const;
+ /** Helper for estimateSmartFee */
+ double estimateConservativeFee(unsigned int doubleTarget) const;
+ /** Number of blocks of data recorded while fee estimates have been running */
+ unsigned int BlockSpan() const;
+ /** Number of blocks of recorded fee estimate data represented in saved data file */
+ unsigned int HistoricalBlockSpan() const;
+ /** Calculation of highest target that reasonable estimate can be provided for */
+ unsigned int MaxUsableEstimate() const;
};
class FeeFilterRounder
{
+private:
+ static constexpr double MAX_FILTER_FEERATE = 1e7;
+ /** FEE_FILTER_SPACING is just used to provide some quantization of fee
+ * filter results. Historically it reused FEE_SPACING, but it is completely
+ * unrelated, and was made a separate constant so the two concepts are not
+ * tied together */
+ static constexpr double FEE_FILTER_SPACING = 1.1;
+
public:
/** Create new FeeFilterRounder */
FeeFilterRounder(const CFeeRate& minIncrementalFee);
@@ -166,4 +255,5 @@ private:
std::set<double> feeset;
FastRandomContext insecure_rand;
};
+
#endif /*BITCOIN_POLICYESTIMATOR_H */
diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp
index 2b19a6714b..f4fffd6578 100644
--- a/src/policy/policy.cpp
+++ b/src/policy/policy.cpp
@@ -15,7 +15,7 @@
#include <boost/foreach.hpp>
-CAmount GetDustThreshold(const CTxOut& txout, const CFeeRate& dustRelayFee)
+CAmount GetDustThreshold(const CTxOut& txout, const CFeeRate& dustRelayFeeIn)
{
// "Dust" is defined in terms of dustRelayFee,
// which has units satoshis-per-kilobyte.
@@ -44,12 +44,12 @@ CAmount GetDustThreshold(const CTxOut& txout, const CFeeRate& dustRelayFee)
nSize += (32 + 4 + 1 + 107 + 4); // the 148 mentioned above
}
- return 3 * dustRelayFee.GetFee(nSize);
+ return 3 * dustRelayFeeIn.GetFee(nSize);
}
-bool IsDust(const CTxOut& txout, const CFeeRate& dustRelayFee)
+bool IsDust(const CTxOut& txout, const CFeeRate& dustRelayFeeIn)
{
- return (txout.nValue < GetDustThreshold(txout, dustRelayFee));
+ return (txout.nValue < GetDustThreshold(txout, dustRelayFeeIn));
}
/**
diff --git a/src/qt/coincontroldialog.cpp b/src/qt/coincontroldialog.cpp
index 2a331d4fae..135cf6f701 100644
--- a/src/qt/coincontroldialog.cpp
+++ b/src/qt/coincontroldialog.cpp
@@ -513,8 +513,6 @@ void CoinControlDialog::updateLabels(WalletModel *model, QDialog* dialog)
// Fee
nPayFee = CWallet::GetMinimumFee(nBytes, nTxConfirmTarget, ::mempool, ::feeEstimator);
- if (nPayFee > 0 && coinControl->nMinimumTotalFee > nPayFee)
- nPayFee = coinControl->nMinimumTotalFee;
if (nPayAmount > 0)
{
@@ -573,7 +571,7 @@ void CoinControlDialog::updateLabels(WalletModel *model, QDialog* dialog)
l5->setText(((nBytes > 0) ? ASYMP_UTF8 : "") + QString::number(nBytes)); // Bytes
l7->setText(fDust ? tr("yes") : tr("no")); // Dust
l8->setText(BitcoinUnits::formatWithUnit(nDisplayUnit, nChange)); // Change
- if (nPayFee > 0 && (coinControl->nMinimumTotalFee < nPayFee))
+ if (nPayFee > 0)
{
l3->setText(ASYMP_UTF8 + l3->text());
l4->setText(ASYMP_UTF8 + l4->text());
diff --git a/src/qt/forms/sendcoinsdialog.ui b/src/qt/forms/sendcoinsdialog.ui
index 52256ca5c4..89f9c25d14 100644
--- a/src/qt/forms/sendcoinsdialog.ui
+++ b/src/qt/forms/sendcoinsdialog.ui
@@ -862,19 +862,6 @@
</widget>
</item>
<item>
- <widget class="QRadioButton" name="radioCustomAtLeast">
- <property name="toolTip">
- <string>If the custom fee is set to 1000 satoshis and the transaction is only 250 bytes, then &quot;per kilobyte&quot; only pays 250 satoshis in fee, while &quot;total at least&quot; pays 1000 satoshis. For transactions bigger than a kilobyte both pay by kilobyte.</string>
- </property>
- <property name="text">
- <string>total at least</string>
- </property>
- <attribute name="buttonGroup">
- <string notr="true">groupCustomFee</string>
- </attribute>
- </widget>
- </item>
- <item>
<widget class="BitcoinAmountField" name="customFee"/>
</item>
<item>
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index 2ff78c75cb..b200cb1127 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -98,7 +98,7 @@ class QtRPCTimerBase: public QObject, public RPCTimerBase
{
Q_OBJECT
public:
- QtRPCTimerBase(boost::function<void(void)>& _func, int64_t millis):
+ QtRPCTimerBase(std::function<void(void)>& _func, int64_t millis):
func(_func)
{
timer.setSingleShot(true);
@@ -110,7 +110,7 @@ private Q_SLOTS:
void timeout() { func(); }
private:
QTimer timer;
- boost::function<void(void)> func;
+ std::function<void(void)> func;
};
class QtRPCTimerInterface: public RPCTimerInterface
@@ -118,7 +118,7 @@ class QtRPCTimerInterface: public RPCTimerInterface
public:
~QtRPCTimerInterface() {}
const char *Name() { return "Qt"; }
- RPCTimerBase* NewTimer(boost::function<void(void)>& func, int64_t millis)
+ RPCTimerBase* NewTimer(std::function<void(void)>& func, int64_t millis)
{
return new QtRPCTimerBase(func, millis);
}
diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp
index 098cda6d32..272ab9486a 100644
--- a/src/qt/sendcoinsdialog.cpp
+++ b/src/qt/sendcoinsdialog.cpp
@@ -31,8 +31,6 @@
#include <QTextDocument>
#include <QTimer>
-#define SEND_CONFIRM_DELAY 3
-
SendCoinsDialog::SendCoinsDialog(const PlatformStyle *_platformStyle, QWidget *parent) :
QDialog(parent),
ui(new Ui::SendCoinsDialog),
@@ -111,7 +109,6 @@ SendCoinsDialog::SendCoinsDialog(const PlatformStyle *_platformStyle, QWidget *p
ui->groupFee->setId(ui->radioCustomFee, 1);
ui->groupFee->button((int)std::max(0, std::min(1, settings.value("nFeeRadio").toInt())))->setChecked(true);
ui->groupCustomFee->setId(ui->radioCustomPerKilobyte, 0);
- ui->groupCustomFee->setId(ui->radioCustomAtLeast, 1);
ui->groupCustomFee->button((int)std::max(0, std::min(1, settings.value("nCustomFeeRadio").toInt())))->setChecked(true);
ui->customFee->setValue(settings.value("nTransactionFee").toLongLong());
ui->checkBoxMinimumFee->setChecked(settings.value("fPayOnlyMinFee").toBool());
@@ -608,7 +605,6 @@ void SendCoinsDialog::updateFeeSectionControls()
ui->checkBoxMinimumFee ->setEnabled(ui->radioCustomFee->isChecked());
ui->labelMinFeeWarning ->setEnabled(ui->radioCustomFee->isChecked());
ui->radioCustomPerKilobyte ->setEnabled(ui->radioCustomFee->isChecked() && !ui->checkBoxMinimumFee->isChecked());
- ui->radioCustomAtLeast ->setEnabled(ui->radioCustomFee->isChecked() && !ui->checkBoxMinimumFee->isChecked() && CoinControlDialog::coinControl->HasSelected());
ui->customFee ->setEnabled(ui->radioCustomFee->isChecked() && !ui->checkBoxMinimumFee->isChecked());
}
@@ -619,19 +615,12 @@ void SendCoinsDialog::updateGlobalFeeVariables()
int nConfirmTarget = ui->sliderSmartFee->maximum() - ui->sliderSmartFee->value() + 2;
payTxFee = CFeeRate(0);
- // set nMinimumTotalFee to 0 to not accidentally pay a custom fee
- CoinControlDialog::coinControl->nMinimumTotalFee = 0;
-
// show the estimated required time for confirmation
ui->confirmationTargetLabel->setText(GUIUtil::formatDurationStr(nConfirmTarget * Params().GetConsensus().nPowTargetSpacing) + " / " + tr("%n block(s)", "", nConfirmTarget));
}
else
{
payTxFee = CFeeRate(ui->customFee->value());
-
- // if user has selected to set a minimum absolute fee, pass the value to coincontrol
- // set nMinimumTotalFee to 0 in case of user has selected that the fee is per KB
- CoinControlDialog::coinControl->nMinimumTotalFee = ui->radioCustomAtLeast->isChecked() ? ui->customFee->value() : 0;
}
}
@@ -830,21 +819,6 @@ void SendCoinsDialog::coinControlUpdateLabels()
if (!model || !model->getOptionsModel())
return;
- if (model->getOptionsModel()->getCoinControlFeatures())
- {
- // enable minimum absolute fee UI controls
- ui->radioCustomAtLeast->setVisible(true);
-
- // only enable the feature if inputs are selected
- ui->radioCustomAtLeast->setEnabled(ui->radioCustomFee->isChecked() && !ui->checkBoxMinimumFee->isChecked() &&CoinControlDialog::coinControl->HasSelected());
- }
- else
- {
- // in case coin control is disabled (=default), hide minimum absolute fee UI controls
- ui->radioCustomAtLeast->setVisible(false);
- return;
- }
-
// set pay amounts
CoinControlDialog::payAmounts.clear();
CoinControlDialog::fSubtractFeeFromAmount = false;
diff --git a/src/qt/sendcoinsdialog.h b/src/qt/sendcoinsdialog.h
index a402edc961..a932f129be 100644
--- a/src/qt/sendcoinsdialog.h
+++ b/src/qt/sendcoinsdialog.h
@@ -100,13 +100,14 @@ Q_SIGNALS:
};
+#define SEND_CONFIRM_DELAY 3
class SendConfirmationDialog : public QMessageBox
{
Q_OBJECT
public:
- SendConfirmationDialog(const QString &title, const QString &text, int secDelay = 0, QWidget *parent = 0);
+ SendConfirmationDialog(const QString &title, const QString &text, int secDelay = SEND_CONFIRM_DELAY, QWidget *parent = 0);
int exec();
private Q_SLOTS:
diff --git a/src/qt/transactionview.cpp b/src/qt/transactionview.cpp
index 30f4db9450..9008c81634 100644
--- a/src/qt/transactionview.cpp
+++ b/src/qt/transactionview.cpp
@@ -11,6 +11,7 @@
#include "guiutil.h"
#include "optionsmodel.h"
#include "platformstyle.h"
+#include "sendcoinsdialog.h"
#include "transactiondescdialog.h"
#include "transactionfilterproxy.h"
#include "transactionrecord.h"
@@ -37,7 +38,7 @@
TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *parent) :
QWidget(parent), model(0), transactionProxyModel(0),
- transactionView(0), abandonAction(0), columnResizingFixer(0)
+ transactionView(0), abandonAction(0), bumpFeeAction(0), columnResizingFixer(0)
{
// Build filter row
setContentsMargins(0,0,0,0);
@@ -138,6 +139,7 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *pa
// Actions
abandonAction = new QAction(tr("Abandon transaction"), this);
+ bumpFeeAction = new QAction(tr("Increase transaction fee"), this);
QAction *copyAddressAction = new QAction(tr("Copy address"), this);
QAction *copyLabelAction = new QAction(tr("Copy label"), this);
QAction *copyAmountAction = new QAction(tr("Copy amount"), this);
@@ -156,6 +158,7 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *pa
contextMenu->addAction(copyTxPlainText);
contextMenu->addAction(showDetailsAction);
contextMenu->addSeparator();
+ contextMenu->addAction(bumpFeeAction);
contextMenu->addAction(abandonAction);
contextMenu->addAction(editLabelAction);
@@ -173,6 +176,7 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *pa
connect(view, SIGNAL(doubleClicked(QModelIndex)), this, SIGNAL(doubleClicked(QModelIndex)));
connect(view, SIGNAL(customContextMenuRequested(QPoint)), this, SLOT(contextualMenu(QPoint)));
+ connect(bumpFeeAction, SIGNAL(triggered()), this, SLOT(bumpFee()));
connect(abandonAction, SIGNAL(triggered()), this, SLOT(abandonTx()));
connect(copyAddressAction, SIGNAL(triggered()), this, SLOT(copyAddress()));
connect(copyLabelAction, SIGNAL(triggered()), this, SLOT(copyLabel()));
@@ -372,6 +376,7 @@ void TransactionView::contextualMenu(const QPoint &point)
uint256 hash;
hash.SetHex(selection.at(0).data(TransactionTableModel::TxHashRole).toString().toStdString());
abandonAction->setEnabled(model->transactionCanBeAbandoned(hash));
+ bumpFeeAction->setEnabled(model->transactionSignalsRBF(hash));
if(index.isValid())
{
@@ -397,6 +402,24 @@ void TransactionView::abandonTx()
model->getTransactionTableModel()->updateTransaction(hashQStr, CT_UPDATED, false);
}
+void TransactionView::bumpFee()
+{
+ if(!transactionView || !transactionView->selectionModel())
+ return;
+ QModelIndexList selection = transactionView->selectionModel()->selectedRows(0);
+
+ // get the hash from the TxHashRole (QVariant / QString)
+ uint256 hash;
+ QString hashQStr = selection.at(0).data(TransactionTableModel::TxHashRole).toString();
+ hash.SetHex(hashQStr.toStdString());
+
+ // Bump tx fee over the walletModel
+ if (model->bumpFee(hash)) {
+ // Update the table
+ model->getTransactionTableModel()->updateTransaction(hashQStr, CT_UPDATED, false);
+ }
+}
+
void TransactionView::copyAddress()
{
GUIUtil::copyEntryData(transactionView, 0, TransactionTableModel::AddressRole);
diff --git a/src/qt/transactionview.h b/src/qt/transactionview.h
index 595701cdd9..52e57cae4c 100644
--- a/src/qt/transactionview.h
+++ b/src/qt/transactionview.h
@@ -76,6 +76,7 @@ private:
QDateTimeEdit *dateFrom;
QDateTimeEdit *dateTo;
QAction *abandonAction;
+ QAction *bumpFeeAction;
QWidget *createDateRangeWidget();
@@ -99,6 +100,7 @@ private Q_SLOTS:
void openThirdPartyTxUrl(QString url);
void updateWatchOnlyColumn(bool fHaveWatchOnly);
void abandonTx();
+ void bumpFee();
Q_SIGNALS:
void doubleClicked(const QModelIndex&);
diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp
index 719089d05a..33b407ae57 100644
--- a/src/qt/walletmodel.cpp
+++ b/src/qt/walletmodel.cpp
@@ -8,8 +8,10 @@
#include "consensus/validation.h"
#include "guiconstants.h"
#include "guiutil.h"
+#include "optionsmodel.h"
#include "paymentserver.h"
#include "recentrequeststablemodel.h"
+#include "sendcoinsdialog.h"
#include "transactiontablemodel.h"
#include "base58.h"
@@ -17,15 +19,18 @@
#include "keystore.h"
#include "validation.h"
#include "net.h" // for g_connman
+#include "policy/rbf.h"
#include "sync.h"
#include "ui_interface.h"
#include "util.h" // for GetBoolArg
+#include "wallet/feebumper.h"
#include "wallet/wallet.h"
#include "wallet/walletdb.h" // for BackupWallet
#include <stdint.h>
#include <QDebug>
+#include <QMessageBox>
#include <QSet>
#include <QTimer>
@@ -651,6 +656,84 @@ bool WalletModel::abandonTransaction(uint256 hash) const
return wallet->AbandonTransaction(hash);
}
+bool WalletModel::transactionSignalsRBF(uint256 hash) const
+{
+ LOCK2(cs_main, wallet->cs_wallet);
+ const CWalletTx *wtx = wallet->GetWalletTx(hash);
+ return wtx && SignalsOptInRBF(*wtx);
+}
+
+bool WalletModel::bumpFee(uint256 hash)
+{
+ std::unique_ptr<CFeeBumper> feeBump;
+ {
+ LOCK2(cs_main, wallet->cs_wallet);
+ feeBump.reset(new CFeeBumper(wallet, hash, nTxConfirmTarget, false, 0, true));
+ }
+ if (feeBump->getResult() != BumpFeeResult::OK)
+ {
+ QMessageBox::critical(0, tr("Fee bump error"), tr("Increasing transaction fee failed") + "<br />(" +
+ (feeBump->getErrors().size() ? QString::fromStdString(feeBump->getErrors()[0]) : "") +")");
+ return false;
+ }
+
+ // allow a user based fee verification
+ QString questionString = tr("Do you want to increase the fee?");
+ questionString.append("<br />");
+ CAmount oldFee = feeBump->getOldFee();
+ CAmount newFee = feeBump->getNewFee();
+ questionString.append("<table style=\"text-align: left;\">");
+ questionString.append("<tr><td>");
+ questionString.append(tr("Current fee:"));
+ questionString.append("</td><td>");
+ questionString.append(BitcoinUnits::formatHtmlWithUnit(getOptionsModel()->getDisplayUnit(), oldFee));
+ questionString.append("</td></tr><tr><td>");
+ questionString.append(tr("Increase:"));
+ questionString.append("</td><td>");
+ questionString.append(BitcoinUnits::formatHtmlWithUnit(getOptionsModel()->getDisplayUnit(), newFee - oldFee));
+ questionString.append("</td></tr><tr><td>");
+ questionString.append(tr("New fee:"));
+ questionString.append("</td><td>");
+ questionString.append(BitcoinUnits::formatHtmlWithUnit(getOptionsModel()->getDisplayUnit(), newFee));
+ questionString.append("</td></tr></table>");
+ SendConfirmationDialog confirmationDialog(tr("Confirm fee bump"), questionString);
+ confirmationDialog.exec();
+ QMessageBox::StandardButton retval = (QMessageBox::StandardButton)confirmationDialog.result();
+
+ // cancel sign&broadcast if users doesn't want to bump the fee
+ if (retval != QMessageBox::Yes) {
+ return false;
+ }
+
+ WalletModel::UnlockContext ctx(requestUnlock());
+ if(!ctx.isValid())
+ {
+ return false;
+ }
+
+ // sign bumped transaction
+ bool res = false;
+ {
+ LOCK2(cs_main, wallet->cs_wallet);
+ res = feeBump->signTransaction(wallet);
+ }
+ if (!res) {
+ QMessageBox::critical(0, tr("Fee bump error"), tr("Can't sign transaction."));
+ return false;
+ }
+ // commit the bumped transaction
+ {
+ LOCK2(cs_main, wallet->cs_wallet);
+ res = feeBump->commit(wallet);
+ }
+ if(!res) {
+ QMessageBox::critical(0, tr("Fee bump error"), tr("Could not commit transaction") + "<br />(" +
+ QString::fromStdString(feeBump->getErrors()[0])+")");
+ return false;
+ }
+ return true;
+}
+
bool WalletModel::isWalletEnabled()
{
return !GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET);
diff --git a/src/qt/walletmodel.h b/src/qt/walletmodel.h
index 78e45dc369..df5acaf684 100644
--- a/src/qt/walletmodel.h
+++ b/src/qt/walletmodel.h
@@ -207,6 +207,9 @@ public:
bool transactionCanBeAbandoned(uint256 hash) const;
bool abandonTransaction(uint256 hash) const;
+ bool transactionSignalsRBF(uint256 hash) const;
+ bool bumpFee(uint256 hash);
+
static bool isWalletEnabled();
bool hdEnabled() const;
diff --git a/src/random.cpp b/src/random.cpp
index 3b9df3edaa..de7553c825 100644
--- a/src/random.cpp
+++ b/src/random.cpp
@@ -203,10 +203,43 @@ void GetRandBytes(unsigned char* buf, int num)
}
}
+static void AddDataToRng(void* data, size_t len);
+
+void RandAddSeedSleep()
+{
+ int64_t nPerfCounter1 = GetPerformanceCounter();
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ int64_t nPerfCounter2 = GetPerformanceCounter();
+
+ // Combine with and update state
+ AddDataToRng(&nPerfCounter1, sizeof(nPerfCounter1));
+ AddDataToRng(&nPerfCounter2, sizeof(nPerfCounter2));
+
+ memory_cleanse(&nPerfCounter1, sizeof(nPerfCounter1));
+ memory_cleanse(&nPerfCounter2, sizeof(nPerfCounter2));
+}
+
+
static std::mutex cs_rng_state;
static unsigned char rng_state[32] = {0};
static uint64_t rng_counter = 0;
+static void AddDataToRng(void* data, size_t len) {
+ CSHA512 hasher;
+ hasher.Write((const unsigned char*)&len, sizeof(len));
+ hasher.Write((const unsigned char*)data, len);
+ unsigned char buf[64];
+ {
+ std::unique_lock<std::mutex> lock(cs_rng_state);
+ hasher.Write(rng_state, sizeof(rng_state));
+ hasher.Write((const unsigned char*)&rng_counter, sizeof(rng_counter));
+ ++rng_counter;
+ hasher.Finalize(buf);
+ memcpy(rng_state, buf + 32, 32);
+ }
+ memory_cleanse(buf, 64);
+}
+
void GetStrongRandBytes(unsigned char* out, int num)
{
assert(num <= 32);
diff --git a/src/random.h b/src/random.h
index 9551e1c461..6a63d57429 100644
--- a/src/random.h
+++ b/src/random.h
@@ -24,6 +24,13 @@ int GetRandInt(int nMax);
uint256 GetRandHash();
/**
+ * Add a little bit of randomness to the output of GetStrongRangBytes.
+ * This sleeps for a millisecond, so should only be called when there is
+ * no other work to be done.
+ */
+void RandAddSeedSleep();
+
+/**
* Function to gather random data from multiple sources, failing whenever any
* of those source fail to provide a result.
*/
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 9d72a23e6d..b4b160aac9 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -187,7 +187,7 @@ void RPCNotifyBlockChange(bool ibd, const CBlockIndex * pindex)
latestblock.hash = pindex->GetBlockHash();
latestblock.height = pindex->nHeight;
}
- cond_blockchange.notify_all();
+ cond_blockchange.notify_all();
}
UniValue waitfornewblock(const JSONRPCRequest& request)
@@ -1074,6 +1074,17 @@ static UniValue BIP9SoftForkDesc(const Consensus::Params& consensusParams, Conse
rv.push_back(Pair("startTime", consensusParams.vDeployments[id].nStartTime));
rv.push_back(Pair("timeout", consensusParams.vDeployments[id].nTimeout));
rv.push_back(Pair("since", VersionBitsTipStateSinceHeight(consensusParams, id)));
+ if (THRESHOLD_STARTED == thresholdState)
+ {
+ UniValue statsUV(UniValue::VOBJ);
+ BIP9Stats statsStruct = VersionBitsTipStatistics(consensusParams, id);
+ statsUV.push_back(Pair("period", statsStruct.period));
+ statsUV.push_back(Pair("threshold", statsStruct.threshold));
+ statsUV.push_back(Pair("elapsed", statsStruct.elapsed));
+ statsUV.push_back(Pair("count", statsStruct.count));
+ statsUV.push_back(Pair("possible", statsStruct.possible));
+ rv.push_back(Pair("statistics", statsUV));
+ }
return rv;
}
@@ -1119,7 +1130,14 @@ UniValue getblockchaininfo(const JSONRPCRequest& request)
" \"bit\": xx, (numeric) the bit (0-28) in the block version field used to signal this softfork (only for \"started\" status)\n"
" \"startTime\": xx, (numeric) the minimum median time past of a block at which the bit gains its meaning\n"
" \"timeout\": xx, (numeric) the median time past of a block at which the deployment is considered failed if not yet locked in\n"
- " \"since\": xx (numeric) height of the first block to which the status applies\n"
+ " \"since\": xx, (numeric) height of the first block to which the status applies\n"
+ " \"statistics\": { (object) numeric statistics about BIP9 signalling for a softfork (only for \"started\" status)\n"
+ " \"period\": xx, (numeric) the length in blocks of the BIP9 signalling period \n"
+ " \"threshold\": xx, (numeric) the number of blocks with the version bit set required to activate the feature \n"
+ " \"elapsed\": xx, (numeric) the number of blocks elapsed since the beginning of the current period \n"
+ " \"count\": xx, (numeric) the number of blocks with the version bit set in the current period \n"
+ " \"possible\": xx (boolean) returns false if there are not enough blocks left in this period to pass activation threshold \n"
+ " }\n"
" }\n"
" }\n"
"}\n"
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index df017d89cb..a3a692c14d 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -108,6 +108,10 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "getrawmempool", 0, "verbose" },
{ "estimatefee", 0, "nblocks" },
{ "estimatesmartfee", 0, "nblocks" },
+ { "estimatesmartfee", 1, "conservative" },
+ { "estimaterawfee", 0, "nblocks" },
+ { "estimaterawfee", 1, "threshold" },
+ { "estimaterawfee", 2, "horizon" },
{ "prioritisetransaction", 1, "fee_delta" },
{ "setban", 2, "bantime" },
{ "setban", 3, "absolute" },
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 4ce52a6c7f..d744269df1 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -797,6 +797,7 @@ UniValue estimatefee(const JSONRPCRequest& request)
if (request.fHelp || request.params.size() != 1)
throw std::runtime_error(
"estimatefee nblocks\n"
+ "\nDEPRECATED. Please use estimatesmartfee for more intelligent estimates."
"\nEstimates the approximate fee per kilobyte needed for a transaction to begin\n"
"confirmation within nblocks blocks. Uses virtual transaction size of transaction\n"
"as defined in BIP 141 (witness data is discounted).\n"
@@ -828,16 +829,19 @@ UniValue estimatefee(const JSONRPCRequest& request)
UniValue estimatesmartfee(const JSONRPCRequest& request)
{
- if (request.fHelp || request.params.size() != 1)
+ if (request.fHelp || request.params.size() < 1 || request.params.size() > 2)
throw std::runtime_error(
- "estimatesmartfee nblocks\n"
- "\nWARNING: This interface is unstable and may disappear or change!\n"
+ "estimatesmartfee nblocks (conservative)\n"
"\nEstimates the approximate fee per kilobyte needed for a transaction to begin\n"
"confirmation within nblocks blocks if possible and return the number of blocks\n"
"for which the estimate is valid. Uses virtual transaction size as defined\n"
"in BIP 141 (witness data is discounted).\n"
"\nArguments:\n"
- "1. nblocks (numeric)\n"
+ "1. nblocks (numeric)\n"
+ "2. conservative (bool, optional, default=true) Whether to return a more conservative estimate which\n"
+ " also satisfies a longer history. A conservative estimate potentially returns a higher\n"
+ " feerate and is more likely to be sufficient for the desired target, but is not as\n"
+ " responsive to short term drops in the prevailing fee market\n"
"\nResult:\n"
"{\n"
" \"feerate\" : x.x, (numeric) estimate fee-per-kilobyte (in BTC)\n"
@@ -854,15 +858,102 @@ UniValue estimatesmartfee(const JSONRPCRequest& request)
RPCTypeCheck(request.params, boost::assign::list_of(UniValue::VNUM));
int nBlocks = request.params[0].get_int();
+ bool conservative = true;
+ if (request.params.size() > 1 && !request.params[1].isNull()) {
+ RPCTypeCheckArgument(request.params[1], UniValue::VBOOL);
+ conservative = request.params[1].get_bool();
+ }
UniValue result(UniValue::VOBJ);
int answerFound;
- CFeeRate feeRate = ::feeEstimator.estimateSmartFee(nBlocks, &answerFound, ::mempool);
+ CFeeRate feeRate = ::feeEstimator.estimateSmartFee(nBlocks, &answerFound, ::mempool, conservative);
result.push_back(Pair("feerate", feeRate == CFeeRate(0) ? -1.0 : ValueFromAmount(feeRate.GetFeePerK())));
result.push_back(Pair("blocks", answerFound));
return result;
}
+UniValue estimaterawfee(const JSONRPCRequest& request)
+{
+ if (request.fHelp || request.params.size() < 1|| request.params.size() > 3)
+ throw std::runtime_error(
+ "estimaterawfee nblocks (threshold horizon)\n"
+ "\nWARNING: This interface is unstable and may disappear or change!\n"
+ "\nWARNING: This is an advanced API call that is tightly coupled to the specific\n"
+ " implementation of fee estimation. The parameters it can be called with\n"
+ " and the results it returns will change if the internal implementation changes.\n"
+ "\nEstimates the approximate fee per kilobyte needed for a transaction to begin\n"
+ "confirmation within nblocks blocks if possible. Uses virtual transaction size as defined\n"
+ "in BIP 141 (witness data is discounted).\n"
+ "\nArguments:\n"
+ "1. nblocks (numeric)\n"
+ "2. threshold (numeric, optional) The proportion of transactions in a given feerate range that must have been\n"
+ " confirmed within nblocks in order to consider those feerates as high enough and proceed to check\n"
+ " lower buckets. Default: 0.95\n"
+ "3. horizon (numeric, optional) How long a history of estimates to consider. 0=short, 1=medium, 2=long.\n"
+ " Default: 1\n"
+ "\nResult:\n"
+ "{\n"
+ " \"feerate\" : x.x, (numeric) estimate fee-per-kilobyte (in BTC)\n"
+ " \"decay\" : x.x, (numeric) exponential decay (per block) for historical moving average of confirmation data\n"
+ " \"scale\" : x, (numeric) The resolution of confirmation targets at this time horizon\n"
+ " \"pass\" : { (json object) information about the lowest range of feerates to succeed in meeting the threshold\n"
+ " \"startrange\" : x.x, (numeric) start of feerate range\n"
+ " \"endrange\" : x.x, (numeric) end of feerate range\n"
+ " \"withintarget\" : x.x, (numeric) number of txs over history horizon in the feerate range that were confirmed within target\n"
+ " \"totalconfirmed\" : x.x, (numeric) number of txs over history horizon in the feerate range that were confirmed at any point\n"
+ " \"inmempool\" : x.x, (numeric) current number of txs in mempool in the feerate range unconfirmed for at least target blocks\n"
+ " \"leftmempool\" : x.x, (numeric) number of txs over history horizon in the feerate range that left mempool unconfirmed after target\n"
+ " }\n"
+ " \"fail\" : { ... } (json object) information about the highest range of feerates to fail to meet the threshold\n"
+ "}\n"
+ "\n"
+ "A negative feerate is returned if no answer can be given.\n"
+ "\nExample:\n"
+ + HelpExampleCli("estimaterawfee", "6 0.9 1")
+ );
+
+ RPCTypeCheck(request.params, boost::assign::list_of(UniValue::VNUM)(UniValue::VNUM)(UniValue::VNUM), true);
+ RPCTypeCheckArgument(request.params[0], UniValue::VNUM);
+ int nBlocks = request.params[0].get_int();
+ double threshold = 0.95;
+ if (!request.params[1].isNull())
+ threshold = request.params[1].get_real();
+ FeeEstimateHorizon horizon = FeeEstimateHorizon::MED_HALFLIFE;
+ if (!request.params[2].isNull()) {
+ int horizonInt = request.params[2].get_int();
+ if (horizonInt < 0 || horizonInt > 2) {
+ throw JSONRPCError(RPC_TYPE_ERROR, "Invalid horizon for fee estimates");
+ } else {
+ horizon = (FeeEstimateHorizon)horizonInt;
+ }
+ }
+ UniValue result(UniValue::VOBJ);
+ CFeeRate feeRate;
+ EstimationResult buckets;
+ feeRate = ::feeEstimator.estimateRawFee(nBlocks, threshold, horizon, &buckets);
+
+ result.push_back(Pair("feerate", feeRate == CFeeRate(0) ? -1.0 : ValueFromAmount(feeRate.GetFeePerK())));
+ result.push_back(Pair("decay", buckets.decay));
+ result.push_back(Pair("scale", (int)buckets.scale));
+ UniValue passbucket(UniValue::VOBJ);
+ passbucket.push_back(Pair("startrange", round(buckets.pass.start)));
+ passbucket.push_back(Pair("endrange", round(buckets.pass.end)));
+ passbucket.push_back(Pair("withintarget", round(buckets.pass.withinTarget * 100.0) / 100.0));
+ passbucket.push_back(Pair("totalconfirmed", round(buckets.pass.totalConfirmed * 100.0) / 100.0));
+ passbucket.push_back(Pair("inmempool", round(buckets.pass.inMempool * 100.0) / 100.0));
+ passbucket.push_back(Pair("leftmempool", round(buckets.pass.leftMempool * 100.0) / 100.0));
+ result.push_back(Pair("pass", passbucket));
+ UniValue failbucket(UniValue::VOBJ);
+ failbucket.push_back(Pair("startrange", round(buckets.fail.start)));
+ failbucket.push_back(Pair("endrange", round(buckets.fail.end)));
+ failbucket.push_back(Pair("withintarget", round(buckets.fail.withinTarget * 100.0) / 100.0));
+ failbucket.push_back(Pair("totalconfirmed", round(buckets.fail.totalConfirmed * 100.0) / 100.0));
+ failbucket.push_back(Pair("inmempool", round(buckets.fail.inMempool * 100.0) / 100.0));
+ failbucket.push_back(Pair("leftmempool", round(buckets.fail.leftMempool * 100.0) / 100.0));
+ result.push_back(Pair("fail", failbucket));
+ return result;
+}
+
static const CRPCCommand commands[] =
{ // category name actor (function) okSafeMode
// --------------------- ------------------------ ----------------------- ----------
@@ -876,7 +967,9 @@ static const CRPCCommand commands[] =
{ "generating", "generatetoaddress", &generatetoaddress, true, {"nblocks","address","maxtries"} },
{ "util", "estimatefee", &estimatefee, true, {"nblocks"} },
- { "util", "estimatesmartfee", &estimatesmartfee, true, {"nblocks"} },
+ { "util", "estimatesmartfee", &estimatesmartfee, true, {"nblocks", "conservative"} },
+
+ { "hidden", "estimaterawfee", &estimaterawfee, true, {"nblocks", "threshold", "horizon"} },
};
void RegisterMiningRPCCommands(CRPCTable &t)
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 3947fb3f7d..683bb25246 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -523,6 +523,11 @@ static void TxInErrorToJSON(const CTxIn& txin, UniValue& vErrorsRet, const std::
UniValue entry(UniValue::VOBJ);
entry.push_back(Pair("txid", txin.prevout.hash.ToString()));
entry.push_back(Pair("vout", (uint64_t)txin.prevout.n));
+ UniValue witness(UniValue::VARR);
+ for (unsigned int i = 0; i < txin.scriptWitness.stack.size(); i++) {
+ witness.push_back(HexStr(txin.scriptWitness.stack[i].begin(), txin.scriptWitness.stack[i].end()));
+ }
+ entry.push_back(Pair("witness", witness));
entry.push_back(Pair("scriptSig", HexStr(txin.scriptSig.begin(), txin.scriptSig.end())));
entry.push_back(Pair("sequence", (uint64_t)txin.nSequence));
entry.push_back(Pair("error", strMessage));
diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp
index ea93e8cc74..c5fbff0077 100644
--- a/src/rpc/server.cpp
+++ b/src/rpc/server.cpp
@@ -44,17 +44,17 @@ static struct CRPCSignals
boost::signals2::signal<void (const CRPCCommand&)> PreCommand;
} g_rpcSignals;
-void RPCServer::OnStarted(boost::function<void ()> slot)
+void RPCServer::OnStarted(std::function<void ()> slot)
{
g_rpcSignals.Started.connect(slot);
}
-void RPCServer::OnStopped(boost::function<void ()> slot)
+void RPCServer::OnStopped(std::function<void ()> slot)
{
g_rpcSignals.Stopped.connect(slot);
}
-void RPCServer::OnPreCommand(boost::function<void (const CRPCCommand&)> slot)
+void RPCServer::OnPreCommand(std::function<void (const CRPCCommand&)> slot)
{
g_rpcSignals.PreCommand.connect(boost::bind(slot, _1));
}
@@ -536,7 +536,7 @@ void RPCUnsetTimerInterface(RPCTimerInterface *iface)
timerInterface = NULL;
}
-void RPCRunLater(const std::string& name, boost::function<void(void)> func, int64_t nSeconds)
+void RPCRunLater(const std::string& name, std::function<void(void)> func, int64_t nSeconds)
{
if (!timerInterface)
throw JSONRPCError(RPC_INTERNAL_ERROR, "No timer handler registered for RPC");
diff --git a/src/rpc/server.h b/src/rpc/server.h
index 34a9d3c24c..1e984cbc0d 100644
--- a/src/rpc/server.h
+++ b/src/rpc/server.h
@@ -15,8 +15,6 @@
#include <stdint.h>
#include <string>
-#include <boost/function.hpp>
-
#include <univalue.h>
static const unsigned int DEFAULT_RPC_SERIALIZE_VERSION = 1;
@@ -25,9 +23,9 @@ class CRPCCommand;
namespace RPCServer
{
- void OnStarted(boost::function<void ()> slot);
- void OnStopped(boost::function<void ()> slot);
- void OnPreCommand(boost::function<void (const CRPCCommand&)> slot);
+ void OnStarted(std::function<void ()> slot);
+ void OnStopped(std::function<void ()> slot);
+ void OnPreCommand(std::function<void (const CRPCCommand&)> slot);
}
class CBlockIndex;
@@ -115,7 +113,7 @@ public:
* This is needed to cope with the case in which there is no HTTP server, but
* only GUI RPC console, and to break the dependency of pcserver on httprpc.
*/
- virtual RPCTimerBase* NewTimer(boost::function<void(void)>& func, int64_t millis) = 0;
+ virtual RPCTimerBase* NewTimer(std::function<void(void)>& func, int64_t millis) = 0;
};
/** Set the factory function for timers */
@@ -129,7 +127,7 @@ void RPCUnsetTimerInterface(RPCTimerInterface *iface);
* Run func nSeconds from now.
* Overrides previous timer <name> (if any).
*/
-void RPCRunLater(const std::string& name, boost::function<void(void)> func, int64_t nSeconds);
+void RPCRunLater(const std::string& name, std::function<void(void)> func, int64_t nSeconds);
typedef UniValue(*rpcfn_type)(const JSONRPCRequest& jsonRequest);
diff --git a/src/scheduler.cpp b/src/scheduler.cpp
index 0c1cfa2718..923ba2c231 100644
--- a/src/scheduler.cpp
+++ b/src/scheduler.cpp
@@ -4,6 +4,7 @@
#include "scheduler.h"
+#include "random.h"
#include "reverselock.h"
#include <assert.h>
@@ -39,6 +40,11 @@ void CScheduler::serviceQueue()
// is called.
while (!shouldStop()) {
try {
+ if (!shouldStop() && taskQueue.empty()) {
+ reverse_lock<boost::unique_lock<boost::mutex> > rlock(lock);
+ // Use this chance to get a tiny bit more entropy
+ RandAddSeedSleep();
+ }
while (!shouldStop() && taskQueue.empty()) {
// Wait until there is something to do.
newTaskScheduled.wait(lock);
diff --git a/src/scheduler.h b/src/scheduler.h
index 613fc1653f..27412a15b4 100644
--- a/src/scheduler.h
+++ b/src/scheduler.h
@@ -7,8 +7,8 @@
//
// NOTE:
-// boost::thread / boost::function / boost::chrono should be ported to
-// std::thread / std::function / std::chrono when we support C++11.
+// boost::thread / boost::chrono should be ported to std::thread / std::chrono
+// when we support C++11.
//
#include <boost/chrono/chrono.hpp>
#include <boost/thread.hpp>
diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp
index 3812490ec0..dc5372a070 100644
--- a/src/test/addrman_tests.cpp
+++ b/src/test/addrman_tests.cpp
@@ -104,10 +104,14 @@ BOOST_AUTO_TEST_CASE(addrman_simple)
// Test: New table has one addr and we add a diff addr we should
- // have two addrs.
+ // have at least one addr.
+ // Note that addrman's size cannot be tested reliably after insertion, as
+ // hash collisions may occur. But we can always be sure of at least one
+ // success.
+
CService addr2 = ResolveService("250.1.1.2", 8333);
BOOST_CHECK(addrman.Add(CAddress(addr2, NODE_NONE), source));
- BOOST_CHECK_EQUAL(addrman.size(), 2);
+ BOOST_CHECK(addrman.size() >= 1);
// Test: AddrMan::Clear() should empty the new table.
addrman.Clear();
@@ -120,7 +124,7 @@ BOOST_AUTO_TEST_CASE(addrman_simple)
vAddr.push_back(CAddress(ResolveService("250.1.1.3", 8333), NODE_NONE));
vAddr.push_back(CAddress(ResolveService("250.1.1.4", 8333), NODE_NONE));
BOOST_CHECK(addrman.Add(vAddr, source));
- BOOST_CHECK_EQUAL(addrman.size(), 2);
+ BOOST_CHECK(addrman.size() >= 1);
}
BOOST_AUTO_TEST_CASE(addrman_ports)
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index fadff612d4..a40060e657 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -6,6 +6,7 @@
#include "coins.h"
#include "consensus/consensus.h"
#include "consensus/merkle.h"
+#include "consensus/tx_verify.h"
#include "consensus/validation.h"
#include "validation.h"
#include "miner.h"
diff --git a/src/test/policyestimator_tests.cpp b/src/test/policyestimator_tests.cpp
index ed6782ea34..6bfd315647 100644
--- a/src/test/policyestimator_tests.cpp
+++ b/src/test/policyestimator_tests.cpp
@@ -50,8 +50,8 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
int blocknum = 0;
// Loop through 200 blocks
- // At a decay .998 and 4 fee transactions per block
- // This makes the tx count about 1.33 per bucket, above the 1 threshold
+ // At a decay .9952 and 4 fee transactions per block
+ // This makes the tx count about 2.5 per bucket, well above the 0.1 threshold
while (blocknum < 200) {
for (int j = 0; j < 10; j++) { // For each fee
for (int k = 0; k < 4; k++) { // add 4 fee txs
@@ -75,20 +75,14 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
}
mpool.removeForBlock(block, ++blocknum);
block.clear();
- if (blocknum == 30) {
- // At this point we should need to combine 5 buckets to get enough data points
- // So estimateFee(1,2,3) should fail and estimateFee(4) should return somewhere around
- // 8*baserate. estimateFee(4) %'s are 100,100,100,100,90 = average 98%
+ // Check after just a few txs that combining buckets works as expected
+ if (blocknum == 3) {
+ // At this point we should need to combine 3 buckets to get enough data points
+ // So estimateFee(1) should fail and estimateFee(2) should return somewhere around
+ // 9*baserate. estimateFee(2) %'s are 100,100,90 = average 97%
BOOST_CHECK(feeEst.estimateFee(1) == CFeeRate(0));
- BOOST_CHECK(feeEst.estimateFee(2) == CFeeRate(0));
- BOOST_CHECK(feeEst.estimateFee(3) == CFeeRate(0));
- BOOST_CHECK(feeEst.estimateFee(4).GetFeePerK() < 8*baseRate.GetFeePerK() + deltaFee);
- BOOST_CHECK(feeEst.estimateFee(4).GetFeePerK() > 8*baseRate.GetFeePerK() - deltaFee);
- int answerFound;
- BOOST_CHECK(feeEst.estimateSmartFee(1, &answerFound, mpool) == feeEst.estimateFee(4) && answerFound == 4);
- BOOST_CHECK(feeEst.estimateSmartFee(3, &answerFound, mpool) == feeEst.estimateFee(4) && answerFound == 4);
- BOOST_CHECK(feeEst.estimateSmartFee(4, &answerFound, mpool) == feeEst.estimateFee(4) && answerFound == 4);
- BOOST_CHECK(feeEst.estimateSmartFee(8, &answerFound, mpool) == feeEst.estimateFee(8) && answerFound == 8);
+ BOOST_CHECK(feeEst.estimateFee(2).GetFeePerK() < 9*baseRate.GetFeePerK() + deltaFee);
+ BOOST_CHECK(feeEst.estimateFee(2).GetFeePerK() > 9*baseRate.GetFeePerK() - deltaFee);
}
}
@@ -105,13 +99,14 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
BOOST_CHECK(origFeeEst[i-1] <= origFeeEst[i-2]);
}
int mult = 11-i;
- if (i > 1) {
+ if (i % 2 == 0) { //At scale 2, test logic is only correct for even targets
BOOST_CHECK(origFeeEst[i-1] < mult*baseRate.GetFeePerK() + deltaFee);
BOOST_CHECK(origFeeEst[i-1] > mult*baseRate.GetFeePerK() - deltaFee);
}
- else {
- BOOST_CHECK(origFeeEst[i-1] == CFeeRate(0).GetFeePerK());
- }
+ }
+ // Fill out rest of the original estimates
+ for (int i = 10; i <= 48; i++) {
+ origFeeEst.push_back(feeEst.estimateFee(i).GetFeePerK());
}
// Mine 50 more blocks with no transactions happening, estimates shouldn't change
@@ -140,10 +135,8 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
mpool.removeForBlock(block, ++blocknum);
}
- int answerFound;
for (int i = 1; i < 10;i++) {
BOOST_CHECK(feeEst.estimateFee(i) == CFeeRate(0) || feeEst.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
- BOOST_CHECK(feeEst.estimateSmartFee(i, &answerFound, mpool).GetFeePerK() > origFeeEst[answerFound-1] - deltaFee);
}
// Mine all those transactions
@@ -156,16 +149,16 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
txHashes[j].pop_back();
}
}
- mpool.removeForBlock(block, 265);
+ mpool.removeForBlock(block, 266);
block.clear();
BOOST_CHECK(feeEst.estimateFee(1) == CFeeRate(0));
for (int i = 2; i < 10;i++) {
- BOOST_CHECK(feeEst.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
+ BOOST_CHECK(feeEst.estimateFee(i) == CFeeRate(0) || feeEst.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
}
- // Mine 200 more blocks where everything is mined every block
+ // Mine 400 more blocks where everything is mined every block
// Estimates should be below original estimates
- while (blocknum < 465) {
+ while (blocknum < 665) {
for (int j = 0; j < 10; j++) { // For each fee multiple
for (int k = 0; k < 4; k++) { // add 4 fee txs
tx.vin[0].prevout.n = 10000*blocknum+100*j+k;
@@ -181,7 +174,7 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
block.clear();
}
BOOST_CHECK(feeEst.estimateFee(1) == CFeeRate(0));
- for (int i = 2; i < 10; i++) {
+ for (int i = 2; i < 9; i++) { // At 9, the original estimate was already at the bottom (b/c scale = 2)
BOOST_CHECK(feeEst.estimateFee(i).GetFeePerK() < origFeeEst[i-1] - deltaFee);
}
@@ -191,7 +184,7 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
mpool.TrimToSize(1);
BOOST_CHECK(mpool.GetMinFee(1).GetFeePerK() > feeV[5]);
for (int i = 1; i < 10; i++) {
- BOOST_CHECK(feeEst.estimateSmartFee(i, NULL, mpool).GetFeePerK() >= feeEst.estimateFee(i).GetFeePerK());
+ BOOST_CHECK(feeEst.estimateSmartFee(i, NULL, mpool).GetFeePerK() >= feeEst.estimateRawFee(i, 0.85, FeeEstimateHorizon::MED_HALFLIFE).GetFeePerK());
BOOST_CHECK(feeEst.estimateSmartFee(i, NULL, mpool).GetFeePerK() >= mpool.GetMinFee(1).GetFeePerK());
}
}
diff --git a/src/test/script_P2SH_tests.cpp b/src/test/script_P2SH_tests.cpp
index f8fd8cc30c..ede68f23d7 100644
--- a/src/test/script_P2SH_tests.cpp
+++ b/src/test/script_P2SH_tests.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include "consensus/tx_verify.h"
#include "core_io.h"
#include "key.h"
#include "keystore.h"
diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp
index 5279cb243b..2f7c22084e 100644
--- a/src/test/sighash_tests.cpp
+++ b/src/test/sighash_tests.cpp
@@ -2,10 +2,10 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include "consensus/tx_verify.h"
#include "consensus/validation.h"
#include "data/sighash.json.h"
#include "hash.h"
-#include "validation.h" // For CheckTransaction
#include "script/interpreter.h"
#include "script/script.h"
#include "serialize.h"
diff --git a/src/test/sigopcount_tests.cpp b/src/test/sigopcount_tests.cpp
index 13d8911f03..92781d763d 100644
--- a/src/test/sigopcount_tests.cpp
+++ b/src/test/sigopcount_tests.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include "validation.h"
+#include "consensus/tx_verify.h"
#include "pubkey.h"
#include "key.h"
#include "script/script.h"
diff --git a/src/test/test_bitcoin_fuzzy.cpp b/src/test/test_bitcoin_fuzzy.cpp
index c4983f6f5c..e11e46bb02 100644
--- a/src/test/test_bitcoin_fuzzy.cpp
+++ b/src/test/test_bitcoin_fuzzy.cpp
@@ -59,9 +59,8 @@ bool read_stdin(std::vector<char> &data) {
return length==0;
}
-int main(int argc, char **argv)
+int do_fuzz()
{
- ECCVerifyHandle globalVerifyHandle;
std::vector<char> buffer;
if (!read_stdin(buffer)) return 0;
@@ -256,3 +255,23 @@ int main(int argc, char **argv)
return 0;
}
+int main(int argc, char **argv)
+{
+ ECCVerifyHandle globalVerifyHandle;
+#ifdef __AFL_INIT
+ // Enable AFL deferred forkserver mode. Requires compilation using
+ // afl-clang-fast++. See fuzzing.md for details.
+ __AFL_INIT();
+#endif
+
+#ifdef __AFL_LOOP
+ // Enable AFL persistent mode. Requires compilation using afl-clang-fast++.
+ // See fuzzing.md for details.
+ while (__AFL_LOOP(1000)) {
+ do_fuzz();
+ }
+ return 0;
+#else
+ return do_fuzz();
+#endif
+}
diff --git a/src/test/torcontrol_tests.cpp b/src/test/torcontrol_tests.cpp
new file mode 100644
index 0000000000..b7affaacde
--- /dev/null
+++ b/src/test/torcontrol_tests.cpp
@@ -0,0 +1,199 @@
+// Copyright (c) 2017 The Zcash developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+//
+#include "test/test_bitcoin.h"
+#include "torcontrol.cpp"
+
+#include <boost/test/unit_test.hpp>
+
+
+BOOST_FIXTURE_TEST_SUITE(torcontrol_tests, BasicTestingSetup)
+
+void CheckSplitTorReplyLine(std::string input, std::string command, std::string args)
+{
+ BOOST_TEST_MESSAGE(std::string("CheckSplitTorReplyLine(") + input + ")");
+ auto ret = SplitTorReplyLine(input);
+ BOOST_CHECK_EQUAL(ret.first, command);
+ BOOST_CHECK_EQUAL(ret.second, args);
+}
+
+BOOST_AUTO_TEST_CASE(util_SplitTorReplyLine)
+{
+ // Data we should receive during normal usage
+ CheckSplitTorReplyLine(
+ "PROTOCOLINFO PIVERSION",
+ "PROTOCOLINFO", "PIVERSION");
+ CheckSplitTorReplyLine(
+ "AUTH METHODS=COOKIE,SAFECOOKIE COOKIEFILE=\"/home/x/.tor/control_auth_cookie\"",
+ "AUTH", "METHODS=COOKIE,SAFECOOKIE COOKIEFILE=\"/home/x/.tor/control_auth_cookie\"");
+ CheckSplitTorReplyLine(
+ "AUTH METHODS=NULL",
+ "AUTH", "METHODS=NULL");
+ CheckSplitTorReplyLine(
+ "AUTH METHODS=HASHEDPASSWORD",
+ "AUTH", "METHODS=HASHEDPASSWORD");
+ CheckSplitTorReplyLine(
+ "VERSION Tor=\"0.2.9.8 (git-a0df013ea241b026)\"",
+ "VERSION", "Tor=\"0.2.9.8 (git-a0df013ea241b026)\"");
+ CheckSplitTorReplyLine(
+ "AUTHCHALLENGE SERVERHASH=aaaa SERVERNONCE=bbbb",
+ "AUTHCHALLENGE", "SERVERHASH=aaaa SERVERNONCE=bbbb");
+
+ // Other valid inputs
+ CheckSplitTorReplyLine("COMMAND", "COMMAND", "");
+ CheckSplitTorReplyLine("COMMAND SOME ARGS", "COMMAND", "SOME ARGS");
+
+ // These inputs are valid because PROTOCOLINFO accepts an OtherLine that is
+ // just an OptArguments, which enables multiple spaces to be present
+ // between the command and arguments.
+ CheckSplitTorReplyLine("COMMAND ARGS", "COMMAND", " ARGS");
+ CheckSplitTorReplyLine("COMMAND EVEN+more ARGS", "COMMAND", " EVEN+more ARGS");
+}
+
+void CheckParseTorReplyMapping(std::string input, std::map<std::string,std::string> expected)
+{
+ BOOST_TEST_MESSAGE(std::string("CheckParseTorReplyMapping(") + input + ")");
+ auto ret = ParseTorReplyMapping(input);
+ BOOST_CHECK_EQUAL(ret.size(), expected.size());
+ auto r_it = ret.begin();
+ auto e_it = expected.begin();
+ while (r_it != ret.end() && e_it != expected.end()) {
+ BOOST_CHECK_EQUAL(r_it->first, e_it->first);
+ BOOST_CHECK_EQUAL(r_it->second, e_it->second);
+ r_it++;
+ e_it++;
+ }
+}
+
+BOOST_AUTO_TEST_CASE(util_ParseTorReplyMapping)
+{
+ // Data we should receive during normal usage
+ CheckParseTorReplyMapping(
+ "METHODS=COOKIE,SAFECOOKIE COOKIEFILE=\"/home/x/.tor/control_auth_cookie\"", {
+ {"METHODS", "COOKIE,SAFECOOKIE"},
+ {"COOKIEFILE", "/home/x/.tor/control_auth_cookie"},
+ });
+ CheckParseTorReplyMapping(
+ "METHODS=NULL", {
+ {"METHODS", "NULL"},
+ });
+ CheckParseTorReplyMapping(
+ "METHODS=HASHEDPASSWORD", {
+ {"METHODS", "HASHEDPASSWORD"},
+ });
+ CheckParseTorReplyMapping(
+ "Tor=\"0.2.9.8 (git-a0df013ea241b026)\"", {
+ {"Tor", "0.2.9.8 (git-a0df013ea241b026)"},
+ });
+ CheckParseTorReplyMapping(
+ "SERVERHASH=aaaa SERVERNONCE=bbbb", {
+ {"SERVERHASH", "aaaa"},
+ {"SERVERNONCE", "bbbb"},
+ });
+ CheckParseTorReplyMapping(
+ "ServiceID=exampleonion1234", {
+ {"ServiceID", "exampleonion1234"},
+ });
+ CheckParseTorReplyMapping(
+ "PrivateKey=RSA1024:BLOB", {
+ {"PrivateKey", "RSA1024:BLOB"},
+ });
+ CheckParseTorReplyMapping(
+ "ClientAuth=bob:BLOB", {
+ {"ClientAuth", "bob:BLOB"},
+ });
+
+ // Other valid inputs
+ CheckParseTorReplyMapping(
+ "Foo=Bar=Baz Spam=Eggs", {
+ {"Foo", "Bar=Baz"},
+ {"Spam", "Eggs"},
+ });
+ CheckParseTorReplyMapping(
+ "Foo=\"Bar=Baz\"", {
+ {"Foo", "Bar=Baz"},
+ });
+ CheckParseTorReplyMapping(
+ "Foo=\"Bar Baz\"", {
+ {"Foo", "Bar Baz"},
+ });
+
+ // Escapes
+ CheckParseTorReplyMapping(
+ "Foo=\"Bar\\ Baz\"", {
+ {"Foo", "Bar Baz"},
+ });
+ CheckParseTorReplyMapping(
+ "Foo=\"Bar\\Baz\"", {
+ {"Foo", "BarBaz"},
+ });
+ CheckParseTorReplyMapping(
+ "Foo=\"Bar\\@Baz\"", {
+ {"Foo", "Bar@Baz"},
+ });
+ CheckParseTorReplyMapping(
+ "Foo=\"Bar\\\"Baz\" Spam=\"\\\"Eggs\\\"\"", {
+ {"Foo", "Bar\"Baz"},
+ {"Spam", "\"Eggs\""},
+ });
+ CheckParseTorReplyMapping(
+ "Foo=\"Bar\\\\Baz\"", {
+ {"Foo", "Bar\\Baz"},
+ });
+
+ // C escapes
+ CheckParseTorReplyMapping(
+ "Foo=\"Bar\\nBaz\\t\" Spam=\"\\rEggs\" Octals=\"\\1a\\11\\17\\18\\81\\377\\378\\400\\2222\" Final=Check", {
+ {"Foo", "Bar\nBaz\t"},
+ {"Spam", "\rEggs"},
+ {"Octals", "\1a\11\17\1" "881\377\37" "8\40" "0\222" "2"},
+ {"Final", "Check"},
+ });
+ CheckParseTorReplyMapping(
+ "Valid=Mapping Escaped=\"Escape\\\\\"", {
+ {"Valid", "Mapping"},
+ {"Escaped", "Escape\\"},
+ });
+ CheckParseTorReplyMapping(
+ "Valid=Mapping Bare=\"Escape\\\"", {});
+ CheckParseTorReplyMapping(
+ "OneOctal=\"OneEnd\\1\" TwoOctal=\"TwoEnd\\11\"", {
+ {"OneOctal", "OneEnd\1"},
+ {"TwoOctal", "TwoEnd\11"},
+ });
+
+ // Special handling for null case
+ // (needed because string comparison reads the null as end-of-string)
+ BOOST_TEST_MESSAGE(std::string("CheckParseTorReplyMapping(Null=\"\\0\")"));
+ auto ret = ParseTorReplyMapping("Null=\"\\0\"");
+ BOOST_CHECK_EQUAL(ret.size(), 1);
+ auto r_it = ret.begin();
+ BOOST_CHECK_EQUAL(r_it->first, "Null");
+ BOOST_CHECK_EQUAL(r_it->second.size(), 1);
+ BOOST_CHECK_EQUAL(r_it->second[0], '\0');
+
+ // A more complex valid grammar. PROTOCOLINFO accepts a VersionLine that
+ // takes a key=value pair followed by an OptArguments, making this valid.
+ // Because an OptArguments contains no semantic data, there is no point in
+ // parsing it.
+ CheckParseTorReplyMapping(
+ "SOME=args,here MORE optional=arguments here", {
+ {"SOME", "args,here"},
+ });
+
+ // Inputs that are effectively invalid under the target grammar.
+ // PROTOCOLINFO accepts an OtherLine that is just an OptArguments, which
+ // would make these inputs valid. However,
+ // - This parser is never used in that situation, because the
+ // SplitTorReplyLine parser enables OtherLine to be skipped.
+ // - Even if these were valid, an OptArguments contains no semantic data,
+ // so there is no point in parsing it.
+ CheckParseTorReplyMapping("ARGS", {});
+ CheckParseTorReplyMapping("MORE ARGS", {});
+ CheckParseTorReplyMapping("MORE ARGS", {});
+ CheckParseTorReplyMapping("EVEN more=ARGS", {});
+ CheckParseTorReplyMapping("EVEN+more ARGS", {});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp
index 3b5da4980b..67610301d7 100644
--- a/src/test/transaction_tests.cpp
+++ b/src/test/transaction_tests.cpp
@@ -8,11 +8,12 @@
#include "clientversion.h"
#include "checkqueue.h"
+#include "consensus/tx_verify.h"
#include "consensus/validation.h"
#include "core_io.h"
#include "key.h"
#include "keystore.h"
-#include "validation.h" // For CheckTransaction
+#include "validation.h"
#include "policy/policy.h"
#include "script/script.h"
#include "script/sign.h"
diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp
index c1bd95b00f..8a37139f1d 100644
--- a/src/torcontrol.cpp
+++ b/src/torcontrol.cpp
@@ -1,4 +1,5 @@
// Copyright (c) 2015-2016 The Bitcoin Core developers
+// Copyright (c) 2017 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -14,7 +15,6 @@
#include <set>
#include <stdlib.h>
-#include <boost/function.hpp>
#include <boost/bind.hpp>
#include <boost/signals2/signal.hpp>
#include <boost/foreach.hpp>
@@ -73,8 +73,8 @@ public:
class TorControlConnection
{
public:
- typedef boost::function<void(TorControlConnection&)> ConnectionCB;
- typedef boost::function<void(TorControlConnection &,const TorControlReply &)> ReplyHandlerCB;
+ typedef std::function<void(TorControlConnection&)> ConnectionCB;
+ typedef std::function<void(TorControlConnection &,const TorControlReply &)> ReplyHandlerCB;
/** Create a new TorControlConnection.
*/
@@ -105,9 +105,9 @@ public:
boost::signals2::signal<void(TorControlConnection &,const TorControlReply &)> async_handler;
private:
/** Callback when ready for use */
- boost::function<void(TorControlConnection&)> connected;
+ std::function<void(TorControlConnection&)> connected;
/** Callback when connection lost */
- boost::function<void(TorControlConnection&)> disconnected;
+ std::function<void(TorControlConnection&)> disconnected;
/** Libevent event base */
struct event_base *base;
/** Connection to control socket */
@@ -250,6 +250,8 @@ bool TorControlConnection::Command(const std::string &cmd, const ReplyHandlerCB&
/* Split reply line in the form 'AUTH METHODS=...' into a type
* 'AUTH' and arguments 'METHODS=...'.
+ * Grammar is implicitly defined in https://spec.torproject.org/control-spec by
+ * the server reply formats for PROTOCOLINFO (S3.21) and AUTHCHALLENGE (S3.24).
*/
static std::pair<std::string,std::string> SplitTorReplyLine(const std::string &s)
{
@@ -265,6 +267,10 @@ static std::pair<std::string,std::string> SplitTorReplyLine(const std::string &s
}
/** Parse reply arguments in the form 'METHODS=COOKIE,SAFECOOKIE COOKIEFILE=".../control_auth_cookie"'.
+ * Returns a map of keys to values, or an empty map if there was an error.
+ * Grammar is implicitly defined in https://spec.torproject.org/control-spec by
+ * the server reply formats for PROTOCOLINFO (S3.21), AUTHCHALLENGE (S3.24),
+ * and ADD_ONION (S3.27). See also sections 2.1 and 2.3.
*/
static std::map<std::string,std::string> ParseTorReplyMapping(const std::string &s)
{
@@ -272,28 +278,74 @@ static std::map<std::string,std::string> ParseTorReplyMapping(const std::string
size_t ptr=0;
while (ptr < s.size()) {
std::string key, value;
- while (ptr < s.size() && s[ptr] != '=') {
+ while (ptr < s.size() && s[ptr] != '=' && s[ptr] != ' ') {
key.push_back(s[ptr]);
++ptr;
}
if (ptr == s.size()) // unexpected end of line
return std::map<std::string,std::string>();
+ if (s[ptr] == ' ') // The remaining string is an OptArguments
+ break;
++ptr; // skip '='
if (ptr < s.size() && s[ptr] == '"') { // Quoted string
- ++ptr; // skip '='
+ ++ptr; // skip opening '"'
bool escape_next = false;
- while (ptr < s.size() && (!escape_next && s[ptr] != '"')) {
- escape_next = (s[ptr] == '\\');
+ while (ptr < s.size() && (escape_next || s[ptr] != '"')) {
+ // Repeated backslashes must be interpreted as pairs
+ escape_next = (s[ptr] == '\\' && !escape_next);
value.push_back(s[ptr]);
++ptr;
}
if (ptr == s.size()) // unexpected end of line
return std::map<std::string,std::string>();
++ptr; // skip closing '"'
- /* TODO: unescape value - according to the spec this depends on the
- * context, some strings use C-LogPrintf style escape codes, some
- * don't. So may be better handled at the call site.
+ /**
+ * Unescape value. Per https://spec.torproject.org/control-spec section 2.1.1:
+ *
+ * For future-proofing, controller implementors MAY use the following
+ * rules to be compatible with buggy Tor implementations and with
+ * future ones that implement the spec as intended:
+ *
+ * Read \n \t \r and \0 ... \377 as C escapes.
+ * Treat a backslash followed by any other character as that character.
*/
+ std::string escaped_value;
+ for (size_t i = 0; i < value.size(); ++i) {
+ if (value[i] == '\\') {
+ // This will always be valid, because if the QuotedString
+ // ended in an odd number of backslashes, then the parser
+ // would already have returned above, due to a missing
+ // terminating double-quote.
+ ++i;
+ if (value[i] == 'n') {
+ escaped_value.push_back('\n');
+ } else if (value[i] == 't') {
+ escaped_value.push_back('\t');
+ } else if (value[i] == 'r') {
+ escaped_value.push_back('\r');
+ } else if ('0' <= value[i] && value[i] <= '7') {
+ size_t j;
+ // Octal escape sequences have a limit of three octal digits,
+ // but terminate at the first character that is not a valid
+ // octal digit if encountered sooner.
+ for (j = 1; j < 3 && (i+j) < value.size() && '0' <= value[i+j] && value[i+j] <= '7'; ++j) {}
+ // Tor restricts first digit to 0-3 for three-digit octals.
+ // A leading digit of 4-7 would therefore be interpreted as
+ // a two-digit octal.
+ if (j == 3 && value[i] > '3') {
+ j--;
+ }
+ escaped_value.push_back(strtol(value.substr(i, j).c_str(), NULL, 8));
+ // Account for automatic incrementing at loop end
+ i += j - 1;
+ } else {
+ escaped_value.push_back(value[i]);
+ }
+ } else {
+ escaped_value.push_back(value[i]);
+ }
+ }
+ value = escaped_value;
} else { // Unquoted value. Note that values can contain '=' at will, just no spaces
while (ptr < s.size() && s[ptr] != ' ') {
value.push_back(s[ptr]);
@@ -323,6 +375,10 @@ static std::pair<bool,std::string> ReadBinaryFile(const fs::path &filename, size
char buffer[128];
size_t n;
while ((n=fread(buffer, 1, sizeof(buffer), f)) > 0) {
+ // Check for reading errors so we don't return any data if we couldn't
+ // read the entire file (or up to maxsize)
+ if (ferror(f))
+ return std::make_pair(false,"");
retval.append(buffer, buffer+n);
if (retval.size() > maxsize)
break;
@@ -439,6 +495,13 @@ void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlRe
if ((i = m.find("PrivateKey")) != m.end())
private_key = i->second;
}
+ if (service_id.empty()) {
+ LogPrintf("tor: Error parsing ADD_ONION parameters:\n");
+ for (const std::string &s : reply.lines) {
+ LogPrintf(" %s\n", SanitizeString(s));
+ }
+ return;
+ }
service = LookupNumeric(std::string(service_id+".onion").c_str(), GetListenPort());
LogPrintf("tor: Got service ID %s, advertising service %s\n", service_id, service.ToString());
if (WriteBinaryFile(GetPrivateKeyFile(), private_key)) {
@@ -516,6 +579,10 @@ void TorController::authchallenge_cb(TorControlConnection& _conn, const TorContr
std::pair<std::string,std::string> l = SplitTorReplyLine(reply.lines[0]);
if (l.first == "AUTHCHALLENGE") {
std::map<std::string,std::string> m = ParseTorReplyMapping(l.second);
+ if (m.empty()) {
+ LogPrintf("tor: Error parsing AUTHCHALLENGE parameters: %s\n", SanitizeString(l.second));
+ return;
+ }
std::vector<uint8_t> serverHash = ParseHex(m["SERVERHASH"]);
std::vector<uint8_t> serverNonce = ParseHex(m["SERVERNONCE"]);
LogPrint(BCLog::TOR, "tor: AUTHCHALLENGE ServerHash %s ServerNonce %s\n", HexStr(serverHash), HexStr(serverNonce));
diff --git a/src/txdb.cpp b/src/txdb.cpp
index a3889fdf79..42dc31760b 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -169,7 +169,7 @@ bool CBlockTreeDB::ReadFlag(const std::string &name, bool &fValue) {
return true;
}
-bool CBlockTreeDB::LoadBlockIndexGuts(boost::function<CBlockIndex*(const uint256&)> insertBlockIndex)
+bool CBlockTreeDB::LoadBlockIndexGuts(std::function<CBlockIndex*(const uint256&)> insertBlockIndex)
{
std::unique_ptr<CDBIterator> pcursor(NewIterator());
diff --git a/src/txdb.h b/src/txdb.h
index d9214ba618..117e7201fb 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -15,8 +15,6 @@
#include <utility>
#include <vector>
-#include <boost/function.hpp>
-
class CBlockIndex;
class CCoinsViewDBCursor;
class uint256;
@@ -122,7 +120,7 @@ public:
bool WriteTxIndex(const std::vector<std::pair<uint256, CDiskTxPos> > &list);
bool WriteFlag(const std::string &name, bool fValue);
bool ReadFlag(const std::string &name, bool &fValue);
- bool LoadBlockIndexGuts(boost::function<CBlockIndex*(const uint256&)> insertBlockIndex);
+ bool LoadBlockIndexGuts(std::function<CBlockIndex*(const uint256&)> insertBlockIndex);
};
#endif // BITCOIN_TXDB_H
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index ac842da6bf..33df0536d0 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -6,6 +6,7 @@
#include "txmempool.h"
#include "consensus/consensus.h"
+#include "consensus/tx_verify.h"
#include "consensus/validation.h"
#include "validation.h"
#include "policy/policy.h"
@@ -448,7 +449,7 @@ void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason)
mapLinks.erase(it);
mapTx.erase(it);
nTransactionsUpdated++;
- if (minerPolicyEstimator) {minerPolicyEstimator->removeTx(hash);}
+ if (minerPolicyEstimator) {minerPolicyEstimator->removeTx(hash, false);}
}
// Calculates descendants of entry that are not already in setDescendants, and adds to
@@ -865,6 +866,7 @@ void CTxMemPool::PrioritiseTransaction(const uint256& hash, const CAmount& nFeeD
BOOST_FOREACH(txiter descendantIt, setDescendants) {
mapTx.modify(descendantIt, update_ancestor_state(0, nFeeDelta, 0, 0));
}
+ ++nTransactionsUpdated;
}
}
LogPrintf("PrioritiseTransaction: %s feerate += %s\n", hash.ToString(), FormatMoney(nFeeDelta));
diff --git a/src/txmempool.h b/src/txmempool.h
index 9cb73fcc0c..a91eb5be54 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -397,7 +397,7 @@ class CTxMemPool
{
private:
uint32_t nCheckFrequency; //!< Value n means that n times in 2^32 we check.
- unsigned int nTransactionsUpdated;
+ unsigned int nTransactionsUpdated; //!< Used by getblocktemplate to trigger CreateNewBlock() invocation
CBlockPolicyEstimator* minerPolicyEstimator;
uint64_t totalTxSize; //!< sum of all mempool tx's virtual sizes. Differs from serialized tx size since witness data is discounted. Defined in BIP 141.
diff --git a/src/validation.cpp b/src/validation.cpp
index ccc14a91f1..89358f9603 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -12,6 +12,7 @@
#include "checkqueue.h"
#include "consensus/consensus.h"
#include "consensus/merkle.h"
+#include "consensus/tx_verify.h"
#include "consensus/validation.h"
#include "fs.h"
#include "hash.h"
@@ -188,19 +189,6 @@ enum FlushStateMode {
bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode, int nManualPruneHeight=0);
void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight);
-bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime)
-{
- if (tx.nLockTime == 0)
- return true;
- if ((int64_t)tx.nLockTime < ((int64_t)tx.nLockTime < LOCKTIME_THRESHOLD ? (int64_t)nBlockHeight : nBlockTime))
- return true;
- for (const auto& txin : tx.vin) {
- if (!(txin.nSequence == CTxIn::SEQUENCE_FINAL))
- return false;
- }
- return true;
-}
-
bool CheckFinalTx(const CTransaction &tx, int flags)
{
AssertLockHeld(cs_main);
@@ -233,89 +221,6 @@ bool CheckFinalTx(const CTransaction &tx, int flags)
return IsFinalTx(tx, nBlockHeight, nBlockTime);
}
-/**
- * Calculates the block height and previous block's median time past at
- * which the transaction will be considered final in the context of BIP 68.
- * Also removes from the vector of input heights any entries which did not
- * correspond to sequence locked inputs as they do not affect the calculation.
- */
-static std::pair<int, int64_t> CalculateSequenceLocks(const CTransaction &tx, int flags, std::vector<int>* prevHeights, const CBlockIndex& block)
-{
- assert(prevHeights->size() == tx.vin.size());
-
- // Will be set to the equivalent height- and time-based nLockTime
- // values that would be necessary to satisfy all relative lock-
- // time constraints given our view of block chain history.
- // The semantics of nLockTime are the last invalid height/time, so
- // use -1 to have the effect of any height or time being valid.
- int nMinHeight = -1;
- int64_t nMinTime = -1;
-
- // tx.nVersion is signed integer so requires cast to unsigned otherwise
- // we would be doing a signed comparison and half the range of nVersion
- // wouldn't support BIP 68.
- bool fEnforceBIP68 = static_cast<uint32_t>(tx.nVersion) >= 2
- && flags & LOCKTIME_VERIFY_SEQUENCE;
-
- // Do not enforce sequence numbers as a relative lock time
- // unless we have been instructed to
- if (!fEnforceBIP68) {
- return std::make_pair(nMinHeight, nMinTime);
- }
-
- for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
- const CTxIn& txin = tx.vin[txinIndex];
-
- // Sequence numbers with the most significant bit set are not
- // treated as relative lock-times, nor are they given any
- // consensus-enforced meaning at this point.
- if (txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_DISABLE_FLAG) {
- // The height of this input is not relevant for sequence locks
- (*prevHeights)[txinIndex] = 0;
- continue;
- }
-
- int nCoinHeight = (*prevHeights)[txinIndex];
-
- if (txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG) {
- int64_t nCoinTime = block.GetAncestor(std::max(nCoinHeight-1, 0))->GetMedianTimePast();
- // NOTE: Subtract 1 to maintain nLockTime semantics
- // BIP 68 relative lock times have the semantics of calculating
- // the first block or time at which the transaction would be
- // valid. When calculating the effective block time or height
- // for the entire transaction, we switch to using the
- // semantics of nLockTime which is the last invalid block
- // time or height. Thus we subtract 1 from the calculated
- // time or height.
-
- // Time-based relative lock-times are measured from the
- // smallest allowed timestamp of the block containing the
- // txout being spent, which is the median time past of the
- // block prior.
- nMinTime = std::max(nMinTime, nCoinTime + (int64_t)((txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_MASK) << CTxIn::SEQUENCE_LOCKTIME_GRANULARITY) - 1);
- } else {
- nMinHeight = std::max(nMinHeight, nCoinHeight + (int)(txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_MASK) - 1);
- }
- }
-
- return std::make_pair(nMinHeight, nMinTime);
-}
-
-static bool EvaluateSequenceLocks(const CBlockIndex& block, std::pair<int, int64_t> lockPair)
-{
- assert(block.pprev);
- int64_t nBlockTime = block.pprev->GetMedianTimePast();
- if (lockPair.first >= block.nHeight || lockPair.second >= nBlockTime)
- return false;
-
- return true;
-}
-
-bool SequenceLocks(const CTransaction &tx, int flags, std::vector<int>* prevHeights, const CBlockIndex& block)
-{
- return EvaluateSequenceLocks(block, CalculateSequenceLocks(tx, flags, prevHeights, block));
-}
-
bool TestLockPointValidity(const LockPoints* lp)
{
AssertLockHeld(cs_main);
@@ -405,107 +310,6 @@ bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints* lp, bool
}
-unsigned int GetLegacySigOpCount(const CTransaction& tx)
-{
- unsigned int nSigOps = 0;
- for (const auto& txin : tx.vin)
- {
- nSigOps += txin.scriptSig.GetSigOpCount(false);
- }
- for (const auto& txout : tx.vout)
- {
- nSigOps += txout.scriptPubKey.GetSigOpCount(false);
- }
- return nSigOps;
-}
-
-unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& inputs)
-{
- if (tx.IsCoinBase())
- return 0;
-
- unsigned int nSigOps = 0;
- for (unsigned int i = 0; i < tx.vin.size(); i++)
- {
- const CTxOut &prevout = inputs.GetOutputFor(tx.vin[i]);
- if (prevout.scriptPubKey.IsPayToScriptHash())
- nSigOps += prevout.scriptPubKey.GetSigOpCount(tx.vin[i].scriptSig);
- }
- return nSigOps;
-}
-
-int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& inputs, int flags)
-{
- int64_t nSigOps = GetLegacySigOpCount(tx) * WITNESS_SCALE_FACTOR;
-
- if (tx.IsCoinBase())
- return nSigOps;
-
- if (flags & SCRIPT_VERIFY_P2SH) {
- nSigOps += GetP2SHSigOpCount(tx, inputs) * WITNESS_SCALE_FACTOR;
- }
-
- for (unsigned int i = 0; i < tx.vin.size(); i++)
- {
- const CTxOut &prevout = inputs.GetOutputFor(tx.vin[i]);
- nSigOps += CountWitnessSigOps(tx.vin[i].scriptSig, prevout.scriptPubKey, &tx.vin[i].scriptWitness, flags);
- }
- return nSigOps;
-}
-
-
-
-
-
-bool CheckTransaction(const CTransaction& tx, CValidationState &state, bool fCheckDuplicateInputs)
-{
- // Basic checks that don't depend on any context
- if (tx.vin.empty())
- return state.DoS(10, false, REJECT_INVALID, "bad-txns-vin-empty");
- if (tx.vout.empty())
- return state.DoS(10, false, REJECT_INVALID, "bad-txns-vout-empty");
- // Size limits (this doesn't take the witness into account, as that hasn't been checked for malleability)
- if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) > MAX_BLOCK_BASE_SIZE)
- return state.DoS(100, false, REJECT_INVALID, "bad-txns-oversize");
-
- // Check for negative or overflow output values
- CAmount nValueOut = 0;
- for (const auto& txout : tx.vout)
- {
- if (txout.nValue < 0)
- return state.DoS(100, false, REJECT_INVALID, "bad-txns-vout-negative");
- if (txout.nValue > MAX_MONEY)
- return state.DoS(100, false, REJECT_INVALID, "bad-txns-vout-toolarge");
- nValueOut += txout.nValue;
- if (!MoneyRange(nValueOut))
- return state.DoS(100, false, REJECT_INVALID, "bad-txns-txouttotal-toolarge");
- }
-
- // Check for duplicate inputs - note that this check is slow so we skip it in CheckBlock
- if (fCheckDuplicateInputs) {
- std::set<COutPoint> vInOutPoints;
- for (const auto& txin : tx.vin)
- {
- if (!vInOutPoints.insert(txin.prevout).second)
- return state.DoS(100, false, REJECT_INVALID, "bad-txns-inputs-duplicate");
- }
- }
-
- if (tx.IsCoinBase())
- {
- if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > 100)
- return state.DoS(100, false, REJECT_INVALID, "bad-cb-length");
- }
- else
- {
- for (const auto& txin : tx.vin)
- if (txin.prevout.IsNull())
- return state.DoS(10, false, REJECT_INVALID, "bad-txns-prevout-null");
- }
-
- return true;
-}
-
void LimitMempoolSize(CTxMemPool& pool, size_t limit, unsigned long age) {
int expired = pool.Expire(GetTime() - age);
if (expired != 0) {
@@ -1310,52 +1114,6 @@ int GetSpendHeight(const CCoinsViewCache& inputs)
return pindexPrev->nHeight + 1;
}
-namespace Consensus {
-bool CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight)
-{
- // This doesn't trigger the DoS code on purpose; if it did, it would make it easier
- // for an attacker to attempt to split the network.
- if (!inputs.HaveInputs(tx))
- return state.Invalid(false, 0, "", "Inputs unavailable");
-
- CAmount nValueIn = 0;
- CAmount nFees = 0;
- for (unsigned int i = 0; i < tx.vin.size(); i++)
- {
- const COutPoint &prevout = tx.vin[i].prevout;
- const CCoins *coins = inputs.AccessCoins(prevout.hash);
- assert(coins);
-
- // If prev is coinbase, check that it's matured
- if (coins->IsCoinBase()) {
- if (nSpendHeight - coins->nHeight < COINBASE_MATURITY)
- return state.Invalid(false,
- REJECT_INVALID, "bad-txns-premature-spend-of-coinbase",
- strprintf("tried to spend coinbase at depth %d", nSpendHeight - coins->nHeight));
- }
-
- // Check for negative or overflow input values
- nValueIn += coins->vout[prevout.n].nValue;
- if (!MoneyRange(coins->vout[prevout.n].nValue) || !MoneyRange(nValueIn))
- return state.DoS(100, false, REJECT_INVALID, "bad-txns-inputvalues-outofrange");
-
- }
-
- if (nValueIn < tx.GetValueOut())
- return state.DoS(100, false, REJECT_INVALID, "bad-txns-in-belowout", false,
- strprintf("value in (%s) < value out (%s)", FormatMoney(nValueIn), FormatMoney(tx.GetValueOut())));
-
- // Tally transaction fees
- CAmount nTxFee = nValueIn - tx.GetValueOut();
- if (nTxFee < 0)
- return state.DoS(100, false, REJECT_INVALID, "bad-txns-fee-negative");
- nFees += nTxFee;
- if (!MoneyRange(nFees))
- return state.DoS(100, false, REJECT_INVALID, "bad-txns-fee-outofrange");
- return true;
-}
-}// namespace Consensus
-
bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks)
{
if (!tx.IsCoinBase())
@@ -4177,6 +3935,12 @@ ThresholdState VersionBitsTipState(const Consensus::Params& params, Consensus::D
return VersionBitsState(chainActive.Tip(), params, pos, versionbitscache);
}
+BIP9Stats VersionBitsTipStatistics(const Consensus::Params& params, Consensus::DeploymentPos pos)
+{
+ LOCK(cs_main);
+ return VersionBitsStatistics(chainActive.Tip(), params, pos);
+}
+
int VersionBitsTipStateSinceHeight(const Consensus::Params& params, Consensus::DeploymentPos pos)
{
LOCK(cs_main);
diff --git a/src/validation.h b/src/validation.h
index 8ddceb2306..b19c3ff4f7 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -339,33 +339,12 @@ std::string FormatStateMessage(const CValidationState &state);
/** Get the BIP9 state for a given deployment at the current tip. */
ThresholdState VersionBitsTipState(const Consensus::Params& params, Consensus::DeploymentPos pos);
+/** Get the numerical statistics for the BIP9 state for a given deployment at the current tip. */
+BIP9Stats VersionBitsTipStatistics(const Consensus::Params& params, Consensus::DeploymentPos pos);
+
/** Get the block height at which the BIP9 deployment switched into the state for the block building on the current tip. */
int VersionBitsTipStateSinceHeight(const Consensus::Params& params, Consensus::DeploymentPos pos);
-/**
- * Count ECDSA signature operations the old-fashioned (pre-0.6) way
- * @return number of sigops this transaction's outputs will produce when spent
- * @see CTransaction::FetchInputs
- */
-unsigned int GetLegacySigOpCount(const CTransaction& tx);
-
-/**
- * Count ECDSA signature operations in pay-to-script-hash inputs.
- *
- * @param[in] mapInputs Map of previous transactions that have outputs we're spending
- * @return maximum number of sigops required to validate this transaction's inputs
- * @see CTransaction::FetchInputs
- */
-unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& mapInputs);
-
-/**
- * Compute total signature operation cost of a transaction.
- * @param[in] tx Transaction for which we are computing the cost
- * @param[in] inputs Map of previous transactions that have outputs we're spending
- * @param[out] flags Script verification flags
- * @return Total signature operation cost of tx
- */
-int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& inputs, int flags);
/**
* Check whether all inputs of this transaction are valid (no double spends, scripts & sigs, amounts)
@@ -380,26 +359,6 @@ void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight);
/** Transaction validation functions */
-/** Context-independent validity checks */
-bool CheckTransaction(const CTransaction& tx, CValidationState& state, bool fCheckDuplicateInputs=true);
-
-namespace Consensus {
-
-/**
- * Check whether all inputs of this transaction are valid (no double spends and amounts)
- * This does not modify the UTXO set. This does not check scripts and sigs.
- * Preconditions: tx.IsCoinBase() is false.
- */
-bool CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight);
-
-} // namespace Consensus
-
-/**
- * Check if transaction is final and can be included in a block with the
- * specified height and time. Consensus critical.
- */
-bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime);
-
/**
* Check if transaction will be final in the next block to be created.
*
@@ -415,12 +374,6 @@ bool CheckFinalTx(const CTransaction &tx, int flags = -1);
bool TestLockPointValidity(const LockPoints* lp);
/**
- * Check if transaction is final per BIP 68 sequence numbers and can be included in a block.
- * Consensus critical. Takes as input a list of heights at which tx's inputs (in order) confirmed.
- */
-bool SequenceLocks(const CTransaction &tx, int flags, std::vector<int>* prevHeights, const CBlockIndex& block);
-
-/**
* Check if transaction will be BIP 68 final in the next block to be created.
*
* Simulates calling SequenceLocks() with data from the tip of the current active chain.
diff --git a/src/versionbits.cpp b/src/versionbits.cpp
index 8a7cce7485..80786233f5 100644
--- a/src/versionbits.cpp
+++ b/src/versionbits.cpp
@@ -3,7 +3,6 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "versionbits.h"
-
#include "consensus/params.h"
const struct BIP9DeploymentInfo VersionBitsDeploymentInfo[Consensus::MAX_VERSION_BITS_DEPLOYMENTS] = {
@@ -105,6 +104,36 @@ ThresholdState AbstractThresholdConditionChecker::GetStateFor(const CBlockIndex*
return state;
}
+// return the numerical statistics of blocks signalling the specified BIP9 condition in this current period
+BIP9Stats AbstractThresholdConditionChecker::GetStateStatisticsFor(const CBlockIndex* pindex, const Consensus::Params& params) const
+{
+ BIP9Stats stats;
+
+ stats.period = Period(params);
+ stats.threshold = Threshold(params);
+
+ if (pindex == NULL)
+ return stats;
+
+ // Find beginning of period
+ const CBlockIndex* pindexEndOfPrevPeriod = pindex->GetAncestor(pindex->nHeight - ((pindex->nHeight + 1) % stats.period));
+ stats.elapsed = pindex->nHeight - pindexEndOfPrevPeriod->nHeight;
+
+ // Count from current block to beginning of period
+ int count = 0;
+ const CBlockIndex* currentIndex = pindex;
+ while (pindexEndOfPrevPeriod->nHeight != currentIndex->nHeight){
+ if (Condition(currentIndex, params))
+ count++;
+ currentIndex = currentIndex->pprev;
+ }
+
+ stats.count = count;
+ stats.possible = (stats.period - stats.threshold ) >= (stats.elapsed - count);
+
+ return stats;
+}
+
int AbstractThresholdConditionChecker::GetStateSinceHeightFor(const CBlockIndex* pindexPrev, const Consensus::Params& params, ThresholdConditionCache& cache) const
{
const ThresholdState initialState = GetStateFor(pindexPrev, params, cache);
@@ -167,6 +196,11 @@ ThresholdState VersionBitsState(const CBlockIndex* pindexPrev, const Consensus::
return VersionBitsConditionChecker(pos).GetStateFor(pindexPrev, params, cache.caches[pos]);
}
+BIP9Stats VersionBitsStatistics(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos)
+{
+ return VersionBitsConditionChecker(pos).GetStateStatisticsFor(pindexPrev, params);
+}
+
int VersionBitsStateSinceHeight(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos, VersionBitsCache& cache)
{
return VersionBitsConditionChecker(pos).GetStateSinceHeightFor(pindexPrev, params, cache.caches[pos]);
diff --git a/src/versionbits.h b/src/versionbits.h
index 7a929266aa..f1d31ea0af 100644
--- a/src/versionbits.h
+++ b/src/versionbits.h
@@ -37,6 +37,14 @@ struct BIP9DeploymentInfo {
bool gbt_force;
};
+struct BIP9Stats {
+ int period;
+ int threshold;
+ int elapsed;
+ int count;
+ bool possible;
+};
+
extern const struct BIP9DeploymentInfo VersionBitsDeploymentInfo[];
/**
@@ -51,6 +59,7 @@ protected:
virtual int Threshold(const Consensus::Params& params) const =0;
public:
+ BIP9Stats GetStateStatisticsFor(const CBlockIndex* pindex, const Consensus::Params& params) const;
// Note that the functions below take a pindexPrev as input: they compute information for block B based on its parent.
ThresholdState GetStateFor(const CBlockIndex* pindexPrev, const Consensus::Params& params, ThresholdConditionCache& cache) const;
int GetStateSinceHeightFor(const CBlockIndex* pindexPrev, const Consensus::Params& params, ThresholdConditionCache& cache) const;
@@ -64,6 +73,7 @@ struct VersionBitsCache
};
ThresholdState VersionBitsState(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos, VersionBitsCache& cache);
+BIP9Stats VersionBitsStatistics(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos);
int VersionBitsStateSinceHeight(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos, VersionBitsCache& cache);
uint32_t VersionBitsMask(const Consensus::Params& params, Consensus::DeploymentPos pos);
diff --git a/src/wallet/coincontrol.h b/src/wallet/coincontrol.h
index 2aa26fb00a..cb4719ae90 100644
--- a/src/wallet/coincontrol.h
+++ b/src/wallet/coincontrol.h
@@ -18,8 +18,6 @@ public:
bool fAllowOtherInputs;
//! Includes watch only addresses which match the ISMINE_WATCH_SOLVABLE criteria
bool fAllowWatchOnly;
- //! Minimum absolute fee (not per kilobyte)
- CAmount nMinimumTotalFee;
//! Override estimated feerate
bool fOverrideFeeRate;
//! Feerate to use if overrideFeeRate is true
@@ -40,7 +38,6 @@ public:
fAllowOtherInputs = false;
fAllowWatchOnly = false;
setSelected.clear();
- nMinimumTotalFee = 0;
nFeeRate = CFeeRate(0);
fOverrideFeeRate = false;
nConfirmTarget = 0;
diff --git a/src/wallet/feebumper.cpp b/src/wallet/feebumper.cpp
index b3cb6a718c..c10a9eccd9 100644
--- a/src/wallet/feebumper.cpp
+++ b/src/wallet/feebumper.cpp
@@ -41,6 +41,31 @@ int64_t CalculateMaximumSignedTxSize(const CTransaction &tx, const CWallet *pWal
return GetVirtualTransactionSize(txNew);
}
+bool CFeeBumper::preconditionChecks(const CWallet *pWallet, const CWalletTx& wtx) {
+ if (pWallet->HasWalletSpend(wtx.GetHash())) {
+ vErrors.push_back("Transaction has descendants in the wallet");
+ currentResult = BumpFeeResult::INVALID_PARAMETER;
+ return false;
+ }
+
+ {
+ LOCK(mempool.cs);
+ auto it_mp = mempool.mapTx.find(wtx.GetHash());
+ if (it_mp != mempool.mapTx.end() && it_mp->GetCountWithDescendants() > 1) {
+ vErrors.push_back("Transaction has descendants in the mempool");
+ currentResult = BumpFeeResult::INVALID_PARAMETER;
+ return false;
+ }
+ }
+
+ if (wtx.GetDepthInMainChain() != 0) {
+ vErrors.push_back("Transaction has been mined, or is conflicted with a mined transaction");
+ currentResult = BumpFeeResult::WALLET_ERROR;
+ return false;
+ }
+ return true;
+}
+
CFeeBumper::CFeeBumper(const CWallet *pWallet, const uint256 txidIn, int newConfirmTarget, bool specifiedConfirmTarget, CAmount totalFee, bool newTxReplaceable)
:
txid(std::move(txidIn)),
@@ -58,25 +83,7 @@ CFeeBumper::CFeeBumper(const CWallet *pWallet, const uint256 txidIn, int newConf
auto it = pWallet->mapWallet.find(txid);
const CWalletTx& wtx = it->second;
- if (pWallet->HasWalletSpend(txid)) {
- vErrors.push_back("Transaction has descendants in the wallet");
- currentResult = BumpFeeResult::INVALID_PARAMETER;
- return;
- }
-
- {
- LOCK(mempool.cs);
- auto it_mp = mempool.mapTx.find(txid);
- if (it_mp != mempool.mapTx.end() && it_mp->GetCountWithDescendants() > 1) {
- vErrors.push_back("Transaction has descendants in the mempool");
- currentResult = BumpFeeResult::INVALID_PARAMETER;
- return;
- }
- }
-
- if (wtx.GetDepthInMainChain() != 0) {
- vErrors.push_back("Transaction has been mined, or is conflicted with a mined transaction");
- currentResult = BumpFeeResult::WALLET_ERROR;
+ if (!preconditionChecks(pWallet, wtx)) {
return;
}
@@ -248,6 +255,11 @@ bool CFeeBumper::commit(CWallet *pWallet)
}
CWalletTx& oldWtx = pWallet->mapWallet[txid];
+ // make sure the transaction still has no descendants and hasen't been mined in the meantime
+ if (!preconditionChecks(pWallet, oldWtx)) {
+ return false;
+ }
+
CWalletTx wtxBumped(pWallet, MakeTransactionRef(std::move(mtx)));
// commit/broadcast the tx
CReserveKey reservekey(pWallet);
diff --git a/src/wallet/feebumper.h b/src/wallet/feebumper.h
index 681f55e4e5..f40d05da28 100644
--- a/src/wallet/feebumper.h
+++ b/src/wallet/feebumper.h
@@ -8,6 +8,7 @@
#include <primitives/transaction.h>
class CWallet;
+class CWalletTx;
class uint256;
enum class BumpFeeResult
@@ -44,6 +45,8 @@ public:
bool commit(CWallet *pWalletNonConst);
private:
+ bool preconditionChecks(const CWallet *pWallet, const CWalletTx& wtx);
+
const uint256 txid;
uint256 bumpedTxid;
CMutableTransaction mtx;
diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp
index 82708dab26..d46cf69efb 100644
--- a/src/wallet/rpcdump.cpp
+++ b/src/wallet/rpcdump.cpp
@@ -536,14 +536,11 @@ UniValue importwallet(const JSONRPCRequest& request)
}
file.close();
pwallet->ShowProgress("", 100); // hide progress dialog in GUI
-
- CBlockIndex *pindex = chainActive.Tip();
- while (pindex && pindex->pprev && pindex->GetBlockTime() > nTimeBegin - TIMESTAMP_WINDOW)
- pindex = pindex->pprev;
-
pwallet->UpdateTimeFirstKey(nTimeBegin);
- LogPrintf("Rescanning last %i blocks\n", chainActive.Height() - pindex->nHeight + 1);
+ CBlockIndex *pindex = chainActive.FindEarliestAtLeast(nTimeBegin - TIMESTAMP_WINDOW);
+
+ LogPrintf("Rescanning last %i blocks\n", pindex ? chainActive.Height() - pindex->nHeight + 1 : 0);
pwallet->ScanForWalletTransactions(pindex);
pwallet->MarkDirty();
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index 8e56f1efeb..5c7359fdce 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -20,6 +20,8 @@
#include <univalue.h>
extern UniValue importmulti(const JSONRPCRequest& request);
+extern UniValue dumpwallet(const JSONRPCRequest& request);
+extern UniValue importwallet(const JSONRPCRequest& request);
// how many times to run all the tests to have a chance to catch errors that only show up with particular random shuffles
#define RUN_TESTS 100
@@ -438,6 +440,66 @@ BOOST_FIXTURE_TEST_CASE(rescan, TestChain100Setup)
}
}
+// Verify importwallet RPC starts rescan at earliest block with timestamp
+// greater or equal than key birthday. Previously there was a bug where
+// importwallet RPC would start the scan at the latest block with timestamp less
+// than or equal to key birthday.
+BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
+{
+ CWallet *pwalletMainBackup = ::pwalletMain;
+ LOCK(cs_main);
+
+ // Create two blocks with same timestamp to verify that importwallet rescan
+ // will pick up both blocks, not just the first.
+ const int64_t BLOCK_TIME = chainActive.Tip()->GetBlockTimeMax() + 5;
+ SetMockTime(BLOCK_TIME);
+ coinbaseTxns.emplace_back(*CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
+ coinbaseTxns.emplace_back(*CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
+
+ // Set key birthday to block time increased by the timestamp window, so
+ // rescan will start at the block time.
+ const int64_t KEY_TIME = BLOCK_TIME + TIMESTAMP_WINDOW;
+ SetMockTime(KEY_TIME);
+ coinbaseTxns.emplace_back(*CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
+
+ // Import key into wallet and call dumpwallet to create backup file.
+ {
+ CWallet wallet;
+ LOCK(wallet.cs_wallet);
+ wallet.mapKeyMetadata[coinbaseKey.GetPubKey().GetID()].nCreateTime = KEY_TIME;
+ wallet.AddKeyPubKey(coinbaseKey, coinbaseKey.GetPubKey());
+
+ JSONRPCRequest request;
+ request.params.setArray();
+ request.params.push_back("wallet.backup");
+ ::pwalletMain = &wallet;
+ ::dumpwallet(request);
+ }
+
+ // Call importwallet RPC and verify all blocks with timestamps >= BLOCK_TIME
+ // were scanned, and no prior blocks were scanned.
+ {
+ CWallet wallet;
+
+ JSONRPCRequest request;
+ request.params.setArray();
+ request.params.push_back("wallet.backup");
+ ::pwalletMain = &wallet;
+ ::importwallet(request);
+
+ BOOST_CHECK_EQUAL(wallet.mapWallet.size(), 3);
+ BOOST_CHECK_EQUAL(coinbaseTxns.size(), 103);
+ for (size_t i = 0; i < coinbaseTxns.size(); ++i) {
+ bool found = wallet.GetWalletTx(coinbaseTxns[i].GetHash());
+ bool expected = i >= 100;
+ BOOST_CHECK_EQUAL(found, expected);
+ }
+ }
+
+ SetMockTime(0);
+ ::pwalletMain = pwalletMainBackup;
+}
+
// Check that GetImmatureCredit() returns a newly calculated value instead of
// the cached value after a MarkDirty() call.
//
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index ca620b5af1..997515a04b 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -2396,6 +2396,8 @@ bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAm
bool CWallet::SignTransaction(CMutableTransaction &tx)
{
+ AssertLockHeld(cs_wallet); // mapWallet
+
// sign the new tx
CTransaction txNewConst(tx);
int nIn = 0;
@@ -2718,9 +2720,6 @@ bool CWallet::CreateTransaction(const std::vector<CRecipient>& vecSend, CWalletT
currentConfirmationTarget = coinControl->nConfirmTarget;
CAmount nFeeNeeded = GetMinimumFee(nBytes, currentConfirmationTarget, ::mempool, ::feeEstimator);
- if (coinControl && nFeeNeeded > 0 && coinControl->nMinimumTotalFee > nFeeNeeded) {
- nFeeNeeded = coinControl->nMinimumTotalFee;
- }
if (coinControl && coinControl->fOverrideFeeRate)
nFeeNeeded = coinControl->nFeeRate.GetFee(nBytes);
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index a90fa6dbbd..342c797dd3 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -6,9 +6,9 @@
#include "wallet/walletdb.h"
#include "base58.h"
+#include "consensus/tx_verify.h"
#include "consensus/validation.h"
#include "fs.h"
-#include "validation.h" // For CheckTransaction
#include "protocol.h"
#include "serialize.h"
#include "sync.h"
diff --git a/test/functional/bip9-softforks.py b/test/functional/bip9-softforks.py
index 1b2dff63d2..fff47fcca9 100755
--- a/test/functional/bip9-softforks.py
+++ b/test/functional/bip9-softforks.py
@@ -99,12 +99,43 @@ class BIP9SoftForksTest(ComparisonTestFramework):
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
+ assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
+ assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
+ # Test 1-A
+ # check stats after max number of "signalling not" blocks such that LOCKED_IN still possible this period
+ test_blocks = self.generate_blocks(36, 4, test_blocks) # 0x00000004 (signalling not)
+ test_blocks = self.generate_blocks(10, activated_version) # 0x20000001 (signalling ready)
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 46)
+ assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10)
+ assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
+
+ # Test 1-B
+ # check stats after one additional "signalling not" block -- LOCKED_IN no longer possible this period
+ test_blocks = self.generate_blocks(1, 4, test_blocks) # 0x00000004 (signalling not)
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 47)
+ assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10)
+ assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], False)
+
+ # Test 1-C
+ # finish period with "ready" blocks, but soft fork will still fail to advance to LOCKED_IN
+ test_blocks = self.generate_blocks(97, activated_version) # 0x20000001 (signalling ready)
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
+ assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
+ assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
+ assert_equal(self.get_bip9_status(bipName)['status'], 'started')
+
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
@@ -116,6 +147,8 @@ class BIP9SoftForksTest(ComparisonTestFramework):
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
+ assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
+ assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
@@ -125,14 +158,24 @@ class BIP9SoftForksTest(ComparisonTestFramework):
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
- test_blocks = self.generate_blocks(58, activated_version) # 0x20000001 (signalling ready)
+ test_blocks = self.generate_blocks(57, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
+ # check counting stats and "possible" flag before last block of this period achieves LOCKED_IN...
+ assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 143)
+ assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 107)
+ assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
+ assert_equal(self.get_bip9_status(bipName)['status'], 'started')
+
+ # ...continue with Test 3
+ test_blocks = self.generate_blocks(1, activated_version) # 0x20000001 (signalling ready)
+ yield TestInstance(test_blocks, sync_every_block=False)
+
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
- assert_equal(self.get_bip9_status(bipName)['since'], 432)
+ assert_equal(self.get_bip9_status(bipName)['since'], 576)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
@@ -142,7 +185,7 @@ class BIP9SoftForksTest(ComparisonTestFramework):
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
- assert_equal(self.get_bip9_status(bipName)['since'], 432)
+ assert_equal(self.get_bip9_status(bipName)['since'], 576)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
@@ -168,7 +211,7 @@ class BIP9SoftForksTest(ComparisonTestFramework):
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
- assert_equal(self.get_bip9_status(bipName)['since'], 576)
+ assert_equal(self.get_bip9_status(bipName)['since'], 720)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
diff --git a/test/functional/prioritise_transaction.py b/test/functional/prioritise_transaction.py
index a07923edba..9c3b3fd5d9 100755
--- a/test/functional/prioritise_transaction.py
+++ b/test/functional/prioritise_transaction.py
@@ -13,8 +13,8 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
- self.num_nodes = 1
- self.extra_args = [["-printpriority=1"]]
+ self.num_nodes = 2
+ self.extra_args = [["-printpriority=1"], ["-printpriority=1"]]
def run_test(self):
self.txouts = gen_return_txouts()
@@ -115,5 +115,16 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
assert_equal(self.nodes[0].sendrawtransaction(tx_hex), tx_id)
assert(tx_id in self.nodes[0].getrawmempool())
+ # Test that calling prioritisetransaction is sufficient to trigger
+ # getblocktemplate to (eventually) return a new block.
+ mock_time = int(time.time())
+ self.nodes[0].setmocktime(mock_time)
+ template = self.nodes[0].getblocktemplate()
+ self.nodes[0].prioritisetransaction(tx_id, -int(self.relayfee*COIN))
+ self.nodes[0].setmocktime(mock_time+10)
+ new_template = self.nodes[0].getblocktemplate()
+
+ assert(template != new_template)
+
if __name__ == '__main__':
PrioritiseTransactionTest().main()
diff --git a/test/functional/signrawtransactions.py b/test/functional/signrawtransactions.py
index fc49c23b9f..437905e764 100755
--- a/test/functional/signrawtransactions.py
+++ b/test/functional/signrawtransactions.py
@@ -114,6 +114,7 @@ class SignRawTransactionsTest(BitcoinTestFramework):
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
+ assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
@@ -123,6 +124,32 @@ class SignRawTransactionsTest(BitcoinTestFramework):
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
+ assert not rawTxSigned['errors'][0]['witness']
+
+ # Now test signing failure for transaction with input witnesses
+ p2wpkh_raw_tx = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000"
+
+ rawTxSigned = self.nodes[0].signrawtransaction(p2wpkh_raw_tx)
+
+ # 7) The transaction has no complete set of signatures
+ assert 'complete' in rawTxSigned
+ assert_equal(rawTxSigned['complete'], False)
+
+ # 8) Two script verification errors occurred
+ assert 'errors' in rawTxSigned
+ assert_equal(len(rawTxSigned['errors']), 2)
+
+ # 9) Script verification errors have certain properties
+ assert 'txid' in rawTxSigned['errors'][0]
+ assert 'vout' in rawTxSigned['errors'][0]
+ assert 'witness' in rawTxSigned['errors'][0]
+ assert 'scriptSig' in rawTxSigned['errors'][0]
+ assert 'sequence' in rawTxSigned['errors'][0]
+ assert 'error' in rawTxSigned['errors'][0]
+
+ # Non-empty witness checked here
+ assert_equal(rawTxSigned['errors'][1]['witness'], ["304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01", "025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357"])
+ assert not rawTxSigned['errors'][0]['witness']
def run_test(self):
self.successful_signing_test()
diff --git a/test/functional/smartfees.py b/test/functional/smartfees.py
index 19ffe9e851..4124f8025e 100755
--- a/test/functional/smartfees.py
+++ b/test/functional/smartfees.py
@@ -116,8 +116,8 @@ def check_estimates(node, fees_seen, max_invalid, print_estimates = True):
for i,e in enumerate(all_estimates): # estimate is for i+1
if e >= 0:
valid_estimate = True
- # estimatesmartfee should return the same result
- assert_equal(node.estimatesmartfee(i+1)["feerate"], e)
+ if i >= 13: # for n>=14 estimatesmartfee(n/2) should be at least as high as estimatefee(n)
+ assert(node.estimatesmartfee((i+1)//2)["feerate"] > float(e) - delta)
else:
invalid_estimates += 1
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index 8d8139e4e4..4b5b311385 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -109,8 +109,7 @@ class BitcoinTestFramework(object):
help="Source directory containing bitcoind/bitcoin-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
- parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
- help="Root directory for datadirs")
+ parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
@@ -124,9 +123,6 @@ class BitcoinTestFramework(object):
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
- # backup dir variable for removal at cleanup
- self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
-
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
@@ -137,7 +133,10 @@ class BitcoinTestFramework(object):
check_json_precision()
# Set up temp directory and start logging
- os.makedirs(self.options.tmpdir, exist_ok=False)
+ if self.options.tmpdir:
+ os.makedirs(self.options.tmpdir, exist_ok=False)
+ else:
+ self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = False
@@ -167,8 +166,6 @@ class BitcoinTestFramework(object):
if not self.options.nocleanup and not self.options.noshutdown and success:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
- if not os.listdir(self.options.root):
- os.rmdir(self.options.root)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 6174ca1d88..b2aee7c739 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -16,6 +16,7 @@ For a description of arguments recognized by test scripts, see
import argparse
import configparser
+import datetime
import os
import time
import shutil
@@ -170,6 +171,7 @@ def main():
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
+ parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
@@ -187,6 +189,12 @@ def main():
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
+ # Create base test directory
+ tmpdir = "%s/bitcoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
+ os.makedirs(tmpdir)
+
+ logging.debug("Temporary test directory at %s" % tmpdir)
+
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
@@ -247,9 +255,9 @@ def main():
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
- run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], args.jobs, args.coverage, passon_args)
+ run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args)
-def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=False, args=[]):
+def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[]):
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "bitcoind"]) is not None:
@@ -280,10 +288,10 @@ def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=Fal
if len(test_list) > 1 and jobs > 1:
# Populate cache
- subprocess.check_output([tests_dir + 'create_cache.py'] + flags)
+ subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
#Run Tests
- job_queue = TestHandler(jobs, tests_dir, test_list, flags)
+ job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
time0 = time.time()
test_results = []
@@ -310,6 +318,10 @@ def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=Fal
logging.debug("Cleaning up coverage data")
coverage.cleanup()
+ # Clear up the temp directory if all subdirectories are gone
+ if not os.listdir(tmpdir):
+ os.rmdir(tmpdir)
+
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
@@ -337,10 +349,11 @@ class TestHandler:
Trigger the testscrips passed in via the list.
"""
- def __init__(self, num_tests_parallel, tests_dir, test_list=None, flags=None):
+ def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
+ self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
@@ -355,13 +368,15 @@ class TestHandler:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
- port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)]
+ portseed = len(self.test_list) + self.portseed_offset
+ portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
+ tmpdir = ["--tmpdir=%s/%s_%s" % (self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)]
self.jobs.append((t,
time.time(),
- subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + port_seed,
+ subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
diff --git a/test/functional/wallet.py b/test/functional/wallet.py
index 57f6dfdaa9..efde2e005e 100755
--- a/test/functional/wallet.py
+++ b/test/functional/wallet.py
@@ -57,8 +57,13 @@ class WalletTest(BitcoinTestFramework):
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 21 BTC from 0 to 2 using sendtoaddress call.
+ # Locked memory should use at least 32 bytes to sign each transaction
+ self.log.info("test getmemoryinfo")
+ memory_before = self.nodes[0].getmemoryinfo()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
mempool_txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
+ memory_after = self.nodes[0].getmemoryinfo()
+ assert(memory_before['locked']['used'] + 64 <= memory_after['locked']['used'])
self.log.info("test gettxout")
# utxo spent in mempool should be visible if you exclude mempool
@@ -78,7 +83,6 @@ class WalletTest(BitcoinTestFramework):
# but 10 will go to node2 and the rest will go to node0
balance = self.nodes[0].getbalance()
assert_equal(set([txout1['value'], txout2['value']]), set([10, balance]))
-
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)