aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xcontrib/devtools/check-doc.py2
-rw-r--r--contrib/gitian-keys/laanwj-key.pgpbin1713 -> 17689 bytes
-rw-r--r--src/chain.cpp19
-rw-r--r--src/chain.h3
-rw-r--r--src/coins.cpp17
-rw-r--r--src/coins.h24
-rw-r--r--src/httprpc.cpp57
-rw-r--r--src/httpserver.cpp22
-rw-r--r--src/init.cpp104
-rw-r--r--src/keystore.h24
-rw-r--r--src/net.cpp5
-rw-r--r--src/net_processing.cpp19
-rw-r--r--src/qt/clientmodel.cpp3
-rw-r--r--src/rpc/blockchain.cpp2
-rw-r--r--src/rpc/server.cpp17
-rw-r--r--src/script/interpreter.h6
-rw-r--r--src/script/sigcache.h2
-rw-r--r--src/script/sign.cpp2
-rw-r--r--src/script/sign.h8
-rw-r--r--src/support/lockedpool.cpp12
-rw-r--r--src/test/addrman_tests.cpp2
-rw-r--r--src/test/allocator_tests.cpp6
-rw-r--r--src/test/coins_tests.cpp22
-rw-r--r--src/test/net_tests.cpp4
-rw-r--r--src/test/versionbits_tests.cpp10
-rw-r--r--src/txdb.cpp51
-rw-r--r--src/txdb.h17
-rw-r--r--src/txmempool.cpp6
-rw-r--r--src/txmempool.h17
-rw-r--r--src/util.cpp12
-rw-r--r--src/util.h5
-rw-r--r--src/validation.cpp130
-rw-r--r--src/validation.h5
-rw-r--r--src/versionbits.cpp10
-rw-r--r--src/wallet/crypter.h10
-rw-r--r--src/wallet/wallet.h2
-rw-r--r--src/zmq/zmqpublishnotifier.h12
-rw-r--r--test/README.md154
-rw-r--r--test/functional/README.md174
-rwxr-xr-xtest/functional/blockchain.py9
-rwxr-xr-xtest/functional/dbcrash.py268
-rwxr-xr-xtest/functional/example_test.py219
-rwxr-xr-xtest/functional/test_framework/test_framework.py6
-rw-r--r--test/functional/test_framework/util.py2
-rwxr-xr-xtest/functional/test_runner.py3
-rwxr-xr-xtest/functional/uptime.py32
46 files changed, 1185 insertions, 351 deletions
diff --git a/contrib/devtools/check-doc.py b/contrib/devtools/check-doc.py
index 249214e931..150f368513 100755
--- a/contrib/devtools/check-doc.py
+++ b/contrib/devtools/check-doc.py
@@ -21,7 +21,7 @@ CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' %s" % (CMD_RO
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
# list unsupported, deprecated and duplicate args as they need no documentation
-SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize'])
+SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-dbcrashratio'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True)
diff --git a/contrib/gitian-keys/laanwj-key.pgp b/contrib/gitian-keys/laanwj-key.pgp
index 559295109d..eed232a872 100644
--- a/contrib/gitian-keys/laanwj-key.pgp
+++ b/contrib/gitian-keys/laanwj-key.pgp
Binary files differ
diff --git a/src/chain.cpp b/src/chain.cpp
index 8d4c4e7dea..ffd58d471d 100644
--- a/src/chain.cpp
+++ b/src/chain.cpp
@@ -148,3 +148,22 @@ int64_t GetBlockProofEquivalentTime(const CBlockIndex& to, const CBlockIndex& fr
}
return sign * r.GetLow64();
}
+
+/** Find the last common ancestor two blocks have.
+ * Both pa and pb must be non-NULL. */
+const CBlockIndex* LastCommonAncestor(const CBlockIndex* pa, const CBlockIndex* pb) {
+ if (pa->nHeight > pb->nHeight) {
+ pa = pa->GetAncestor(pb->nHeight);
+ } else if (pb->nHeight > pa->nHeight) {
+ pb = pb->GetAncestor(pa->nHeight);
+ }
+
+ while (pa != pb && pa && pb) {
+ pa = pa->pprev;
+ pb = pb->pprev;
+ }
+
+ // Eventually all chain branches meet at the genesis block.
+ assert(pa == pb);
+ return pa;
+}
diff --git a/src/chain.h b/src/chain.h
index de120d2d75..c5304b7d6f 100644
--- a/src/chain.h
+++ b/src/chain.h
@@ -362,6 +362,9 @@ public:
arith_uint256 GetBlockProof(const CBlockIndex& block);
/** Return the time it would take to redo the work difference between from and to, assuming the current hashrate corresponds to the difficulty at tip, in seconds. */
int64_t GetBlockProofEquivalentTime(const CBlockIndex& to, const CBlockIndex& from, const CBlockIndex& tip, const Consensus::Params&);
+/** Find the forking point between two chain tips. */
+const CBlockIndex* LastCommonAncestor(const CBlockIndex* pa, const CBlockIndex* pb);
+
/** Used to marshal pointers into hashes for db storage. */
class CDiskBlockIndex : public CBlockIndex
diff --git a/src/coins.cpp b/src/coins.cpp
index f8df835e9f..b5dc6197bd 100644
--- a/src/coins.cpp
+++ b/src/coins.cpp
@@ -11,16 +11,22 @@
#include <assert.h>
bool CCoinsView::GetCoin(const COutPoint &outpoint, Coin &coin) const { return false; }
-bool CCoinsView::HaveCoin(const COutPoint &outpoint) const { return false; }
uint256 CCoinsView::GetBestBlock() const { return uint256(); }
+std::vector<uint256> CCoinsView::GetHeadBlocks() const { return std::vector<uint256>(); }
bool CCoinsView::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { return false; }
CCoinsViewCursor *CCoinsView::Cursor() const { return 0; }
+bool CCoinsView::HaveCoin(const COutPoint &outpoint) const
+{
+ Coin coin;
+ return GetCoin(outpoint, coin);
+}
CCoinsViewBacked::CCoinsViewBacked(CCoinsView *viewIn) : base(viewIn) { }
bool CCoinsViewBacked::GetCoin(const COutPoint &outpoint, Coin &coin) const { return base->GetCoin(outpoint, coin); }
bool CCoinsViewBacked::HaveCoin(const COutPoint &outpoint) const { return base->HaveCoin(outpoint); }
uint256 CCoinsViewBacked::GetBestBlock() const { return base->GetBestBlock(); }
+std::vector<uint256> CCoinsViewBacked::GetHeadBlocks() const { return base->GetHeadBlocks(); }
void CCoinsViewBacked::SetBackend(CCoinsView &viewIn) { base = &viewIn; }
bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { return base->BatchWrite(mapCoins, hashBlock); }
CCoinsViewCursor *CCoinsViewBacked::Cursor() const { return base->Cursor(); }
@@ -55,7 +61,7 @@ bool CCoinsViewCache::GetCoin(const COutPoint &outpoint, Coin &coin) const {
CCoinsMap::const_iterator it = FetchCoin(outpoint);
if (it != cacheCoins.end()) {
coin = it->second.coin;
- return true;
+ return !coin.IsSpent();
}
return false;
}
@@ -81,13 +87,14 @@ void CCoinsViewCache::AddCoin(const COutPoint &outpoint, Coin&& coin, bool possi
cachedCoinsUsage += it->second.coin.DynamicMemoryUsage();
}
-void AddCoins(CCoinsViewCache& cache, const CTransaction &tx, int nHeight) {
+void AddCoins(CCoinsViewCache& cache, const CTransaction &tx, int nHeight, bool check) {
bool fCoinbase = tx.IsCoinBase();
const uint256& txid = tx.GetHash();
for (size_t i = 0; i < tx.vout.size(); ++i) {
- // Pass fCoinbase as the possible_overwrite flag to AddCoin, in order to correctly
+ bool overwrite = check ? cache.HaveCoin(COutPoint(txid, i)) : fCoinbase;
+ // Always set the possible_overwrite flag to AddCoin for coinbase txn, in order to correctly
// deal with the pre-BIP30 occurrences of duplicate coinbase transactions.
- cache.AddCoin(COutPoint(txid, i), Coin(tx.vout[i], nHeight, fCoinbase), fCoinbase);
+ cache.AddCoin(COutPoint(txid, i), Coin(tx.vout[i], nHeight, fCoinbase), overwrite);
}
}
diff --git a/src/coins.h b/src/coins.h
index 4774c9f6a6..efb5ce869c 100644
--- a/src/coins.h
+++ b/src/coins.h
@@ -145,16 +145,24 @@ private:
class CCoinsView
{
public:
- //! Retrieve the Coin (unspent transaction output) for a given outpoint.
+ /** Retrieve the Coin (unspent transaction output) for a given outpoint.
+ * Returns true only when an unspent coin was found, which is returned in coin.
+ * When false is returned, coin's value is unspecified.
+ */
virtual bool GetCoin(const COutPoint &outpoint, Coin &coin) const;
- //! Just check whether we have data for a given outpoint.
- //! This may (but cannot always) return true for spent outputs.
+ //! Just check whether a given outpoint is unspent.
virtual bool HaveCoin(const COutPoint &outpoint) const;
//! Retrieve the block hash whose state this CCoinsView currently represents
virtual uint256 GetBestBlock() const;
+ //! Retrieve the range of blocks that may have been only partially written.
+ //! If the database is in a consistent state, the result is the empty vector.
+ //! Otherwise, a two-element vector is returned consisting of the new and
+ //! the old block hash, in that order.
+ virtual std::vector<uint256> GetHeadBlocks() const;
+
//! Do a bulk modification (multiple Coin changes + BestBlock change).
//! The passed mapCoins can be modified.
virtual bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock);
@@ -181,6 +189,7 @@ public:
bool GetCoin(const COutPoint &outpoint, Coin &coin) const override;
bool HaveCoin(const COutPoint &outpoint) const override;
uint256 GetBestBlock() const override;
+ std::vector<uint256> GetHeadBlocks() const override;
void SetBackend(CCoinsView &viewIn);
bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override;
CCoinsViewCursor *Cursor() const override;
@@ -289,12 +298,17 @@ private:
};
//! Utility function to add all of a transaction's outputs to a cache.
-// It assumes that overwrites are only possible for coinbase transactions,
+// When check is false, this assumes that overwrites are only possible for coinbase transactions.
+// When check is true, the underlying view may be queried to determine whether an addition is
+// an overwrite.
// TODO: pass in a boolean to limit these possible overwrites to known
// (pre-BIP34) cases.
-void AddCoins(CCoinsViewCache& cache, const CTransaction& tx, int nHeight);
+void AddCoins(CCoinsViewCache& cache, const CTransaction& tx, int nHeight, bool check = false);
//! Utility function to find any unspent output with a given txid.
+// This function can be quite expensive because in the event of a transaction
+// which is not found in the cache, it can cause up to MAX_OUTPUTS_PER_BLOCK
+// lookups to database, so it should be used with care.
const Coin& AccessByTxid(const CCoinsViewCache& cache, const uint256& txid);
#endif // BITCOIN_COINS_H
diff --git a/src/httprpc.cpp b/src/httprpc.cpp
index 053702f843..8c2e0da32f 100644
--- a/src/httprpc.cpp
+++ b/src/httprpc.cpp
@@ -47,11 +47,11 @@ public:
HTTPRPCTimerInterface(struct event_base* _base) : base(_base)
{
}
- const char* Name()
+ const char* Name() override
{
return "HTTP";
}
- RPCTimerBase* NewTimer(std::function<void(void)>& func, int64_t millis)
+ RPCTimerBase* NewTimer(std::function<void(void)>& func, int64_t millis) override
{
return new HTTPRPCTimer(base, func, millis);
}
@@ -92,35 +92,32 @@ static bool multiUserAuthorized(std::string strUserPass)
std::string strUser = strUserPass.substr(0, strUserPass.find(":"));
std::string strPass = strUserPass.substr(strUserPass.find(":") + 1);
- if (gArgs.IsArgSet("-rpcauth")) {
+ for (const std::string& strRPCAuth : gArgs.GetArgs("-rpcauth")) {
//Search for multi-user login/pass "rpcauth" from config
- for (std::string strRPCAuth : gArgs.GetArgs("-rpcauth"))
- {
- std::vector<std::string> vFields;
- boost::split(vFields, strRPCAuth, boost::is_any_of(":$"));
- if (vFields.size() != 3) {
- //Incorrect formatting in config file
- continue;
- }
-
- std::string strName = vFields[0];
- if (!TimingResistantEqual(strName, strUser)) {
- continue;
- }
-
- std::string strSalt = vFields[1];
- std::string strHash = vFields[2];
-
- static const unsigned int KEY_SIZE = 32;
- unsigned char out[KEY_SIZE];
-
- CHMAC_SHA256(reinterpret_cast<const unsigned char*>(strSalt.c_str()), strSalt.size()).Write(reinterpret_cast<const unsigned char*>(strPass.c_str()), strPass.size()).Finalize(out);
- std::vector<unsigned char> hexvec(out, out+KEY_SIZE);
- std::string strHashFromPass = HexStr(hexvec);
-
- if (TimingResistantEqual(strHashFromPass, strHash)) {
- return true;
- }
+ std::vector<std::string> vFields;
+ boost::split(vFields, strRPCAuth, boost::is_any_of(":$"));
+ if (vFields.size() != 3) {
+ //Incorrect formatting in config file
+ continue;
+ }
+
+ std::string strName = vFields[0];
+ if (!TimingResistantEqual(strName, strUser)) {
+ continue;
+ }
+
+ std::string strSalt = vFields[1];
+ std::string strHash = vFields[2];
+
+ static const unsigned int KEY_SIZE = 32;
+ unsigned char out[KEY_SIZE];
+
+ CHMAC_SHA256(reinterpret_cast<const unsigned char*>(strSalt.c_str()), strSalt.size()).Write(reinterpret_cast<const unsigned char*>(strPass.c_str()), strPass.size()).Finalize(out);
+ std::vector<unsigned char> hexvec(out, out+KEY_SIZE);
+ std::string strHashFromPass = HexStr(hexvec);
+
+ if (TimingResistantEqual(strHashFromPass, strHash)) {
+ return true;
}
}
return false;
diff --git a/src/httpserver.cpp b/src/httpserver.cpp
index 1557cf98f8..1c53d8d49d 100644
--- a/src/httpserver.cpp
+++ b/src/httpserver.cpp
@@ -46,7 +46,7 @@ public:
req(std::move(_req)), path(_path), func(_func)
{
}
- void operator()()
+ void operator()() override
{
func(req.get(), path);
}
@@ -196,18 +196,16 @@ static bool InitHTTPAllowList()
LookupHost("::1", localv6, false);
rpc_allow_subnets.push_back(CSubNet(localv4, 8)); // always allow IPv4 local subnet
rpc_allow_subnets.push_back(CSubNet(localv6)); // always allow IPv6 localhost
- if (gArgs.IsArgSet("-rpcallowip")) {
- for (const std::string& strAllow : gArgs.GetArgs("-rpcallowip")) {
- CSubNet subnet;
- LookupSubNet(strAllow.c_str(), subnet);
- if (!subnet.IsValid()) {
- uiInterface.ThreadSafeMessageBox(
- strprintf("Invalid -rpcallowip subnet specification: %s. Valid are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24).", strAllow),
- "", CClientUIInterface::MSG_ERROR);
- return false;
- }
- rpc_allow_subnets.push_back(subnet);
+ for (const std::string& strAllow : gArgs.GetArgs("-rpcallowip")) {
+ CSubNet subnet;
+ LookupSubNet(strAllow.c_str(), subnet);
+ if (!subnet.IsValid()) {
+ uiInterface.ThreadSafeMessageBox(
+ strprintf("Invalid -rpcallowip subnet specification: %s. Valid are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24).", strAllow),
+ "", CClientUIInterface::MSG_ERROR);
+ return false;
}
+ rpc_allow_subnets.push_back(subnet);
}
std::string strAllowed;
for (const CSubNet& subnet : rpc_allow_subnets)
diff --git a/src/init.cpp b/src/init.cpp
index 88084cbeec..d59713258c 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -336,6 +336,9 @@ std::string HelpMessage(HelpMessageMode mode)
#endif
}
strUsage += HelpMessageOpt("-datadir=<dir>", _("Specify data directory"));
+ if (showDebug) {
+ strUsage += HelpMessageOpt("-dbbatchsize", strprintf("Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize));
+ }
strUsage += HelpMessageOpt("-dbcache=<n>", strprintf(_("Set database cache size in megabytes (%d to %d, default: %d)"), nMinDbCache, nMaxDbCache, nDefaultDbCache));
if (showDebug)
strUsage += HelpMessageOpt("-feefilter", strprintf("Tell other nodes to filter invs to us by our mempool min fee (default: %u)", DEFAULT_FEEFILTER));
@@ -880,9 +883,7 @@ bool AppInitParameterInteraction()
}
// -bind and -whitebind can't be set when not listening
- size_t nUserBind =
- (gArgs.IsArgSet("-bind") ? gArgs.GetArgs("-bind").size() : 0) +
- (gArgs.IsArgSet("-whitebind") ? gArgs.GetArgs("-whitebind").size() : 0);
+ size_t nUserBind = gArgs.GetArgs("-bind").size() + gArgs.GetArgs("-whitebind").size();
if (nUserBind != 0 && !gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) {
return InitError("Cannot set -bind or -whitebind together with -listen=0");
}
@@ -920,15 +921,13 @@ bool AppInitParameterInteraction()
}
// Now remove the logging categories which were explicitly excluded
- if (gArgs.IsArgSet("-debugexclude")) {
- for (const std::string& cat : gArgs.GetArgs("-debugexclude")) {
- uint32_t flag = 0;
- if (!GetLogCategory(&flag, &cat)) {
- InitWarning(strprintf(_("Unsupported logging category %s=%s."), "-debugexclude", cat));
- continue;
- }
- logCategories &= ~flag;
+ for (const std::string& cat : gArgs.GetArgs("-debugexclude")) {
+ uint32_t flag = 0;
+ if (!GetLogCategory(&flag, &cat)) {
+ InitWarning(strprintf(_("Unsupported logging category %s=%s."), "-debugexclude", cat));
+ continue;
}
+ logCategories &= ~flag;
}
// Check for -debugnet
@@ -1238,13 +1237,10 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
// sanitize comments per BIP-0014, format user agent and check total size
std::vector<std::string> uacomments;
- if (gArgs.IsArgSet("-uacomment")) {
- for (std::string cmt : gArgs.GetArgs("-uacomment"))
- {
- if (cmt != SanitizeString(cmt, SAFE_CHARS_UA_COMMENT))
- return InitError(strprintf(_("User Agent comment (%s) contains unsafe characters."), cmt));
- uacomments.push_back(cmt);
- }
+ for (const std::string& cmt : gArgs.GetArgs("-uacomment")) {
+ if (cmt != SanitizeString(cmt, SAFE_CHARS_UA_COMMENT))
+ return InitError(strprintf(_("User Agent comment (%s) contains unsafe characters."), cmt));
+ uacomments.push_back(cmt);
}
strSubVersion = FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, uacomments);
if (strSubVersion.size() > MAX_SUBVERSION_LENGTH) {
@@ -1317,14 +1313,12 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
fDiscover = GetBoolArg("-discover", true);
fRelayTxes = !GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY);
- if (gArgs.IsArgSet("-externalip")) {
- for (const std::string& strAddr : gArgs.GetArgs("-externalip")) {
- CService addrLocal;
- if (Lookup(strAddr.c_str(), addrLocal, GetListenPort(), fNameLookup) && addrLocal.IsValid())
- AddLocal(addrLocal, LOCAL_MANUAL);
- else
- return InitError(ResolveErrMsg("externalip", strAddr));
- }
+ for (const std::string& strAddr : gArgs.GetArgs("-externalip")) {
+ CService addrLocal;
+ if (Lookup(strAddr.c_str(), addrLocal, GetListenPort(), fNameLookup) && addrLocal.IsValid())
+ AddLocal(addrLocal, LOCAL_MANUAL);
+ else
+ return InitError(ResolveErrMsg("externalip", strAddr));
}
#if ENABLE_ZMQ
@@ -1382,7 +1376,6 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
pblocktree = new CBlockTreeDB(nBlockTreeDBCache, false, fReindex);
pcoinsdbview = new CCoinsViewDB(nCoinDBCache, false, fReindex || fReindexChainState);
pcoinscatcher = new CCoinsViewErrorCatcher(pcoinsdbview);
- pcoinsTip = new CCoinsViewCache(pcoinscatcher);
if (fReindex) {
pblocktree->WriteReindexing(true);
@@ -1426,6 +1419,13 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
break;
}
+ if (!ReplayBlocks(chainparams, pcoinsdbview)) {
+ strLoadError = _("Unable to replay blocks. You will need to rebuild the database using -reindex-chainstate.");
+ break;
+ }
+ pcoinsTip = new CCoinsViewCache(pcoinscatcher);
+ LoadChainTip(chainparams);
+
if (!fReindex && chainActive.Tip() != NULL) {
uiInterface.InitMessage(_("Rewinding blocks..."));
if (!RewindBlockIndex(chainparams)) {
@@ -1553,10 +1553,8 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
uiInterface.NotifyBlockTip.connect(BlockNotifyCallback);
std::vector<fs::path> vImportFiles;
- if (gArgs.IsArgSet("-loadblock"))
- {
- for (const std::string& strFile : gArgs.GetArgs("-loadblock"))
- vImportFiles.push_back(strFile);
+ for (const std::string& strFile : gArgs.GetArgs("-loadblock")) {
+ vImportFiles.push_back(strFile);
}
threadGroup.create_thread(boost::bind(&ThreadImport, vImportFiles));
@@ -1598,36 +1596,30 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
connOptions.nMaxOutboundTimeframe = nMaxOutboundTimeframe;
connOptions.nMaxOutboundLimit = nMaxOutboundLimit;
- if (gArgs.IsArgSet("-bind")) {
- for (const std::string& strBind : gArgs.GetArgs("-bind")) {
- CService addrBind;
- if (!Lookup(strBind.c_str(), addrBind, GetListenPort(), false)) {
- return InitError(ResolveErrMsg("bind", strBind));
- }
- connOptions.vBinds.push_back(addrBind);
+ for (const std::string& strBind : gArgs.GetArgs("-bind")) {
+ CService addrBind;
+ if (!Lookup(strBind.c_str(), addrBind, GetListenPort(), false)) {
+ return InitError(ResolveErrMsg("bind", strBind));
}
+ connOptions.vBinds.push_back(addrBind);
}
- if (gArgs.IsArgSet("-whitebind")) {
- for (const std::string& strBind : gArgs.GetArgs("-whitebind")) {
- CService addrBind;
- if (!Lookup(strBind.c_str(), addrBind, 0, false)) {
- return InitError(ResolveErrMsg("whitebind", strBind));
- }
- if (addrBind.GetPort() == 0) {
- return InitError(strprintf(_("Need to specify a port with -whitebind: '%s'"), strBind));
- }
- connOptions.vWhiteBinds.push_back(addrBind);
+ for (const std::string& strBind : gArgs.GetArgs("-whitebind")) {
+ CService addrBind;
+ if (!Lookup(strBind.c_str(), addrBind, 0, false)) {
+ return InitError(ResolveErrMsg("whitebind", strBind));
}
+ if (addrBind.GetPort() == 0) {
+ return InitError(strprintf(_("Need to specify a port with -whitebind: '%s'"), strBind));
+ }
+ connOptions.vWhiteBinds.push_back(addrBind);
}
- if (gArgs.IsArgSet("-whitelist")) {
- for (const auto& net : gArgs.GetArgs("-whitelist")) {
- CSubNet subnet;
- LookupSubNet(net.c_str(), subnet);
- if (!subnet.IsValid())
- return InitError(strprintf(_("Invalid netmask specified in -whitelist: '%s'"), net));
- connOptions.vWhitelistedRange.push_back(subnet);
- }
+ for (const auto& net : gArgs.GetArgs("-whitelist")) {
+ CSubNet subnet;
+ LookupSubNet(net.c_str(), subnet);
+ if (!subnet.IsValid())
+ return InitError(strprintf(_("Invalid netmask specified in -whitelist: '%s'"), net));
+ connOptions.vWhitelistedRange.push_back(subnet);
}
if (gArgs.IsArgSet("-seednode")) {
diff --git a/src/keystore.h b/src/keystore.h
index a2621f2de4..965ae0c79a 100644
--- a/src/keystore.h
+++ b/src/keystore.h
@@ -60,9 +60,9 @@ protected:
WatchOnlySet setWatchOnly;
public:
- bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey);
- bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const;
- bool HaveKey(const CKeyID &address) const
+ bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey) override;
+ bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const override;
+ bool HaveKey(const CKeyID &address) const override
{
bool result;
{
@@ -71,7 +71,7 @@ public:
}
return result;
}
- void GetKeys(std::set<CKeyID> &setAddress) const
+ void GetKeys(std::set<CKeyID> &setAddress) const override
{
setAddress.clear();
{
@@ -84,7 +84,7 @@ public:
}
}
}
- bool GetKey(const CKeyID &address, CKey &keyOut) const
+ bool GetKey(const CKeyID &address, CKey &keyOut) const override
{
{
LOCK(cs_KeyStore);
@@ -97,14 +97,14 @@ public:
}
return false;
}
- virtual bool AddCScript(const CScript& redeemScript);
- virtual bool HaveCScript(const CScriptID &hash) const;
- virtual bool GetCScript(const CScriptID &hash, CScript& redeemScriptOut) const;
+ virtual bool AddCScript(const CScript& redeemScript) override;
+ virtual bool HaveCScript(const CScriptID &hash) const override;
+ virtual bool GetCScript(const CScriptID &hash, CScript& redeemScriptOut) const override;
- virtual bool AddWatchOnly(const CScript &dest);
- virtual bool RemoveWatchOnly(const CScript &dest);
- virtual bool HaveWatchOnly(const CScript &dest) const;
- virtual bool HaveWatchOnly() const;
+ virtual bool AddWatchOnly(const CScript &dest) override;
+ virtual bool RemoveWatchOnly(const CScript &dest) override;
+ virtual bool HaveWatchOnly(const CScript &dest) const override;
+ virtual bool HaveWatchOnly() const override;
};
typedef std::vector<unsigned char, secure_allocator<unsigned char> > CKeyingMaterial;
diff --git a/src/net.cpp b/src/net.cpp
index 91a62626a2..301cf58b87 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -1677,7 +1677,7 @@ void CConnman::ProcessOneShot()
void CConnman::ThreadOpenConnections()
{
// Connect to specific addresses
- if (gArgs.IsArgSet("-connect") && gArgs.GetArgs("-connect").size() > 0)
+ if (gArgs.IsArgSet("-connect"))
{
for (int64_t nLoop = 0;; nLoop++)
{
@@ -1903,8 +1903,7 @@ void CConnman::ThreadOpenAddedConnections()
{
{
LOCK(cs_vAddedNodes);
- if (gArgs.IsArgSet("-addnode"))
- vAddedNodes = gArgs.GetArgs("-addnode");
+ vAddedNodes = gArgs.GetArgs("-addnode");
}
while (true)
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 8fc6f6f95e..4d832f3711 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -452,25 +452,6 @@ bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
return false;
}
-/** Find the last common ancestor two blocks have.
- * Both pa and pb must be non-NULL. */
-const CBlockIndex* LastCommonAncestor(const CBlockIndex* pa, const CBlockIndex* pb) {
- if (pa->nHeight > pb->nHeight) {
- pa = pa->GetAncestor(pb->nHeight);
- } else if (pb->nHeight > pa->nHeight) {
- pb = pb->GetAncestor(pa->nHeight);
- }
-
- while (pa != pb && pa && pb) {
- pa = pa->pprev;
- pb = pb->pprev;
- }
-
- // Eventually all chain branches meet at the genesis block.
- assert(pa == pb);
- return pa;
-}
-
/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
* at most count entries. */
void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) {
diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp
index 3dfb51ccfa..33f4535ee2 100644
--- a/src/qt/clientmodel.cpp
+++ b/src/qt/clientmodel.cpp
@@ -26,7 +26,6 @@
class CBlockIndex;
-static const int64_t nClientStartupTime = GetTime();
static int64_t nLastHeaderTipUpdateNotification = 0;
static int64_t nLastBlockTipUpdateNotification = 0;
@@ -238,7 +237,7 @@ bool ClientModel::isReleaseVersion() const
QString ClientModel::formatClientStartupTime() const
{
- return QDateTime::fromTime_t(nClientStartupTime).toString();
+ return QDateTime::fromTime_t(GetStartupTime()).toString();
}
QString ClientModel::dataDir() const
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 8f7f76841d..c17ca2fa3a 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -991,7 +991,7 @@ UniValue gettxout(const JSONRPCRequest& request)
if (fMempool) {
LOCK(mempool.cs);
CCoinsViewMemPool view(pcoinsTip, mempool);
- if (!view.GetCoin(out, coin) || mempool.isSpent(out)) { // TODO: filtering spent coins should be done by the CCoinsViewMemPool
+ if (!view.GetCoin(out, coin) || mempool.isSpent(out)) {
return NullUniValue;
}
} else {
diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp
index 1a04ce2b47..c320d20453 100644
--- a/src/rpc/server.cpp
+++ b/src/rpc/server.cpp
@@ -258,6 +258,22 @@ UniValue stop(const JSONRPCRequest& jsonRequest)
return "Bitcoin server stopping";
}
+UniValue uptime(const JSONRPCRequest& jsonRequest)
+{
+ if (jsonRequest.fHelp || jsonRequest.params.size() > 1)
+ throw std::runtime_error(
+ "uptime\n"
+ "\nReturns the total uptime of the server.\n"
+ "\nResult:\n"
+ "ttt (numeric) The number of seconds that the server has been running\n"
+ "\nExamples:\n"
+ + HelpExampleCli("uptime", "")
+ + HelpExampleRpc("uptime", "")
+ );
+
+ return GetTime() - GetStartupTime();
+}
+
/**
* Call Table
*/
@@ -267,6 +283,7 @@ static const CRPCCommand vRPCCommands[] =
/* Overall control/query calls */
{ "control", "help", &help, true, {"command"} },
{ "control", "stop", &stop, true, {} },
+ { "control", "uptime", &uptime, true, {} },
};
CRPCTable::CRPCTable()
diff --git a/src/script/interpreter.h b/src/script/interpreter.h
index 60f6f711e6..ab1dc4e681 100644
--- a/src/script/interpreter.h
+++ b/src/script/interpreter.h
@@ -160,9 +160,9 @@ protected:
public:
TransactionSignatureChecker(const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn) : txTo(txToIn), nIn(nInIn), amount(amountIn), txdata(NULL) {}
TransactionSignatureChecker(const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, const PrecomputedTransactionData& txdataIn) : txTo(txToIn), nIn(nInIn), amount(amountIn), txdata(&txdataIn) {}
- bool CheckSig(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const;
- bool CheckLockTime(const CScriptNum& nLockTime) const;
- bool CheckSequence(const CScriptNum& nSequence) const;
+ bool CheckSig(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override;
+ bool CheckLockTime(const CScriptNum& nLockTime) const override;
+ bool CheckSequence(const CScriptNum& nSequence) const override;
};
class MutableTransactionSignatureChecker : public TransactionSignatureChecker
diff --git a/src/script/sigcache.h b/src/script/sigcache.h
index 55cec4cc8d..5832b264b3 100644
--- a/src/script/sigcache.h
+++ b/src/script/sigcache.h
@@ -48,7 +48,7 @@ private:
public:
CachingTransactionSignatureChecker(const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, bool storeIn, PrecomputedTransactionData& txdataIn) : TransactionSignatureChecker(txToIn, nInIn, amountIn, txdataIn), store(storeIn) {}
- bool VerifySignature(const std::vector<unsigned char>& vchSig, const CPubKey& vchPubKey, const uint256& sighash) const;
+ bool VerifySignature(const std::vector<unsigned char>& vchSig, const CPubKey& vchPubKey, const uint256& sighash) const override;
};
void InitSignatureCache();
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index 4b01a6de94..ec93c5451b 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -393,7 +393,7 @@ class DummySignatureChecker : public BaseSignatureChecker
public:
DummySignatureChecker() {}
- bool CheckSig(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const
+ bool CheckSig(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override
{
return true;
}
diff --git a/src/script/sign.h b/src/script/sign.h
index f3c0be4139..bd45862892 100644
--- a/src/script/sign.h
+++ b/src/script/sign.h
@@ -40,8 +40,8 @@ class TransactionSignatureCreator : public BaseSignatureCreator {
public:
TransactionSignatureCreator(const CKeyStore* keystoreIn, const CTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn=SIGHASH_ALL);
- const BaseSignatureChecker& Checker() const { return checker; }
- bool CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const;
+ const BaseSignatureChecker& Checker() const override { return checker; }
+ bool CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const override;
};
class MutableTransactionSignatureCreator : public TransactionSignatureCreator {
@@ -55,8 +55,8 @@ public:
class DummySignatureCreator : public BaseSignatureCreator {
public:
DummySignatureCreator(const CKeyStore* keystoreIn) : BaseSignatureCreator(keystoreIn) {}
- const BaseSignatureChecker& Checker() const;
- bool CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const;
+ const BaseSignatureChecker& Checker() const override;
+ bool CreateSig(std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const override;
};
struct SignatureData {
diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp
index 98c1581093..2df6b84a59 100644
--- a/src/support/lockedpool.cpp
+++ b/src/support/lockedpool.cpp
@@ -148,9 +148,9 @@ class Win32LockedPageAllocator: public LockedPageAllocator
{
public:
Win32LockedPageAllocator();
- void* AllocateLocked(size_t len, bool *lockingSuccess);
- void FreeLocked(void* addr, size_t len);
- size_t GetLimit();
+ void* AllocateLocked(size_t len, bool *lockingSuccess) override;
+ void FreeLocked(void* addr, size_t len) override;
+ size_t GetLimit() override;
private:
size_t page_size;
};
@@ -200,9 +200,9 @@ class PosixLockedPageAllocator: public LockedPageAllocator
{
public:
PosixLockedPageAllocator();
- void* AllocateLocked(size_t len, bool *lockingSuccess);
- void FreeLocked(void* addr, size_t len);
- size_t GetLimit();
+ void* AllocateLocked(size_t len, bool *lockingSuccess) override;
+ void FreeLocked(void* addr, size_t len) override;
+ size_t GetLimit() override;
private:
size_t page_size;
};
diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp
index dc5372a070..bc6aef2c11 100644
--- a/src/test/addrman_tests.cpp
+++ b/src/test/addrman_tests.cpp
@@ -27,7 +27,7 @@ public:
insecure_rand = FastRandomContext(true);
}
- int RandomInt(int nMax)
+ int RandomInt(int nMax) override
{
state = (CHashWriter(SER_GETHASH, 0) << state).GetHash().GetCheapHash();
return (unsigned int)(state % nMax);
diff --git a/src/test/allocator_tests.cpp b/src/test/allocator_tests.cpp
index 3f15a0dec1..4a533b5bf2 100644
--- a/src/test/allocator_tests.cpp
+++ b/src/test/allocator_tests.cpp
@@ -131,7 +131,7 @@ class TestLockedPageAllocator: public LockedPageAllocator
{
public:
TestLockedPageAllocator(int count_in, int lockedcount_in): count(count_in), lockedcount(lockedcount_in) {}
- void* AllocateLocked(size_t len, bool *lockingSuccess)
+ void* AllocateLocked(size_t len, bool *lockingSuccess) override
{
*lockingSuccess = false;
if (count > 0) {
@@ -146,10 +146,10 @@ public:
}
return 0;
}
- void FreeLocked(void* addr, size_t len)
+ void FreeLocked(void* addr, size_t len) override
{
}
- size_t GetLimit()
+ size_t GetLimit() override
{
return std::numeric_limits<size_t>::max();
}
diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp
index 622b157621..e24431528a 100644
--- a/src/test/coins_tests.cpp
+++ b/src/test/coins_tests.cpp
@@ -50,12 +50,6 @@ public:
return true;
}
- bool HaveCoin(const COutPoint& outpoint) const override
- {
- Coin coin;
- return GetCoin(outpoint, coin);
- }
-
uint256 GetBestBlock() const override { return hashBestBlock_; }
bool BatchWrite(CCoinsMap& mapCoins, const uint256& hashBlock) override
@@ -147,8 +141,22 @@ BOOST_AUTO_TEST_CASE(coins_cache_simulation_test)
{
uint256 txid = txids[InsecureRandRange(txids.size())]; // txid we're going to modify in this iteration.
Coin& coin = result[COutPoint(txid, 0)];
+
+ // Determine whether to test HaveCoin before or after Access* (or both). As these functions
+ // can influence each other's behaviour by pulling things into the cache, all combinations
+ // are tested.
+ bool test_havecoin_before = InsecureRandBits(2) == 0;
+ bool test_havecoin_after = InsecureRandBits(2) == 0;
+
+ bool result_havecoin = test_havecoin_before ? stack.back()->HaveCoin(COutPoint(txid, 0)) : false;
const Coin& entry = (InsecureRandRange(500) == 0) ? AccessByTxid(*stack.back(), txid) : stack.back()->AccessCoin(COutPoint(txid, 0));
BOOST_CHECK(coin == entry);
+ BOOST_CHECK(!test_havecoin_before || result_havecoin == !entry.IsSpent());
+
+ if (test_havecoin_after) {
+ bool ret = stack.back()->HaveCoin(COutPoint(txid, 0));
+ BOOST_CHECK(ret == !entry.IsSpent());
+ }
if (InsecureRandRange(5) == 0 || coin.IsSpent()) {
Coin newcoin;
@@ -628,7 +636,7 @@ BOOST_AUTO_TEST_CASE(ccoins_access)
CheckAccessCoin(ABSENT, VALUE2, VALUE2, FRESH , FRESH );
CheckAccessCoin(ABSENT, VALUE2, VALUE2, DIRTY , DIRTY );
CheckAccessCoin(ABSENT, VALUE2, VALUE2, DIRTY|FRESH, DIRTY|FRESH);
- CheckAccessCoin(PRUNED, ABSENT, PRUNED, NO_ENTRY , FRESH );
+ CheckAccessCoin(PRUNED, ABSENT, ABSENT, NO_ENTRY , NO_ENTRY );
CheckAccessCoin(PRUNED, PRUNED, PRUNED, 0 , 0 );
CheckAccessCoin(PRUNED, PRUNED, PRUNED, FRESH , FRESH );
CheckAccessCoin(PRUNED, PRUNED, PRUNED, DIRTY , DIRTY );
diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp
index 66354699b2..095d86834c 100644
--- a/src/test/net_tests.cpp
+++ b/src/test/net_tests.cpp
@@ -29,7 +29,7 @@ public:
class CAddrManUncorrupted : public CAddrManSerializationMock
{
public:
- void Serialize(CDataStream& s) const
+ void Serialize(CDataStream& s) const override
{
CAddrMan::Serialize(s);
}
@@ -38,7 +38,7 @@ public:
class CAddrManCorrupted : public CAddrManSerializationMock
{
public:
- void Serialize(CDataStream& s) const
+ void Serialize(CDataStream& s) const override
{
// Produces corrupt output that claims addrman has 20 addrs when it only has one addr.
unsigned char nVersion = 1;
diff --git a/src/test/versionbits_tests.cpp b/src/test/versionbits_tests.cpp
index faa2383d14..722f6ae059 100644
--- a/src/test/versionbits_tests.cpp
+++ b/src/test/versionbits_tests.cpp
@@ -22,11 +22,11 @@ private:
mutable ThresholdConditionCache cache;
public:
- int64_t BeginTime(const Consensus::Params& params) const { return TestTime(10000); }
- int64_t EndTime(const Consensus::Params& params) const { return TestTime(20000); }
- int Period(const Consensus::Params& params) const { return 1000; }
- int Threshold(const Consensus::Params& params) const { return 900; }
- bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const { return (pindex->nVersion & 0x100); }
+ int64_t BeginTime(const Consensus::Params& params) const override { return TestTime(10000); }
+ int64_t EndTime(const Consensus::Params& params) const override { return TestTime(20000); }
+ int Period(const Consensus::Params& params) const override { return 1000; }
+ int Threshold(const Consensus::Params& params) const override { return 900; }
+ bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override { return (pindex->nVersion & 0x100); }
ThresholdState GetStateFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateFor(pindexPrev, paramsDummy, cache); }
int GetStateSinceHeightFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateSinceHeightFor(pindexPrev, paramsDummy, cache); }
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 97e916fd22..d24162ba2d 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -7,8 +7,10 @@
#include "chainparams.h"
#include "hash.h"
+#include "random.h"
#include "pow.h"
#include "uint256.h"
+#include "util.h"
#include <stdint.h>
@@ -21,6 +23,7 @@ static const char DB_TXINDEX = 't';
static const char DB_BLOCK_INDEX = 'b';
static const char DB_BEST_BLOCK = 'B';
+static const char DB_HEAD_BLOCKS = 'H';
static const char DB_FLAG = 'F';
static const char DB_REINDEX_FLAG = 'R';
static const char DB_LAST_BLOCK = 'l';
@@ -68,10 +71,39 @@ uint256 CCoinsViewDB::GetBestBlock() const {
return hashBestChain;
}
+std::vector<uint256> CCoinsViewDB::GetHeadBlocks() const {
+ std::vector<uint256> vhashHeadBlocks;
+ if (!db.Read(DB_HEAD_BLOCKS, vhashHeadBlocks)) {
+ return std::vector<uint256>();
+ }
+ return vhashHeadBlocks;
+}
+
bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
CDBBatch batch(db);
size_t count = 0;
size_t changed = 0;
+ size_t batch_size = (size_t)GetArg("-dbbatchsize", nDefaultDbBatchSize);
+ int crash_simulate = GetArg("-dbcrashratio", 0);
+ assert(!hashBlock.IsNull());
+
+ uint256 old_tip = GetBestBlock();
+ if (old_tip.IsNull()) {
+ // We may be in the middle of replaying.
+ std::vector<uint256> old_heads = GetHeadBlocks();
+ if (old_heads.size() == 2) {
+ assert(old_heads[0] == hashBlock);
+ old_tip = old_heads[1];
+ }
+ }
+
+ // In the first batch, mark the database as being in the middle of a
+ // transition from old_tip to hashBlock.
+ // A vector is used for future extensibility, as we may want to support
+ // interrupting after partial writes from multiple independent reorgs.
+ batch.Erase(DB_BEST_BLOCK);
+ batch.Write(DB_HEAD_BLOCKS, std::vector<uint256>{hashBlock, old_tip});
+
for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) {
if (it->second.flags & CCoinsCacheEntry::DIRTY) {
CoinEntry entry(&it->first);
@@ -84,10 +116,25 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
count++;
CCoinsMap::iterator itOld = it++;
mapCoins.erase(itOld);
+ if (batch.SizeEstimate() > batch_size) {
+ LogPrint(BCLog::COINDB, "Writing partial batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0));
+ db.WriteBatch(batch);
+ batch.Clear();
+ if (crash_simulate) {
+ static FastRandomContext rng;
+ if (rng.randrange(crash_simulate) == 0) {
+ LogPrintf("Simulating a crash. Goodbye.\n");
+ _Exit(0);
+ }
+ }
+ }
}
- if (!hashBlock.IsNull())
- batch.Write(DB_BEST_BLOCK, hashBlock);
+ // In the last batch, mark the database as consistent with hashBlock again.
+ batch.Erase(DB_HEAD_BLOCKS);
+ batch.Write(DB_BEST_BLOCK, hashBlock);
+
+ LogPrint(BCLog::COINDB, "Writing final batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0));
bool ret = db.WriteBatch(batch);
LogPrint(BCLog::COINDB, "Committed %u changed transaction outputs (out of %u) to coin database...\n", (unsigned int)changed, (unsigned int)count);
return ret;
diff --git a/src/txdb.h b/src/txdb.h
index 2a3e4eb696..adcbc73380 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -19,12 +19,12 @@ class CBlockIndex;
class CCoinsViewDBCursor;
class uint256;
-//! Compensate for extra memory peak (x1.5-x1.9) at flush time.
-static constexpr int DB_PEAK_USAGE_FACTOR = 2;
//! No need to periodic flush if at least this much space still available.
-static constexpr int MAX_BLOCK_COINSDB_USAGE = 10 * DB_PEAK_USAGE_FACTOR;
+static constexpr int MAX_BLOCK_COINSDB_USAGE = 10;
//! -dbcache default (MiB)
static const int64_t nDefaultDbCache = 450;
+//! -dbbatchsize default (bytes)
+static const int64_t nDefaultDbBatchSize = 16 << 20;
//! max. -dbcache (MiB)
static const int64_t nMaxDbCache = sizeof(void*) > 4 ? 16384 : 1024;
//! min. -dbcache (MiB)
@@ -74,6 +74,7 @@ public:
bool GetCoin(const COutPoint &outpoint, Coin &coin) const override;
bool HaveCoin(const COutPoint &outpoint) const override;
uint256 GetBestBlock() const override;
+ std::vector<uint256> GetHeadBlocks() const override;
bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override;
CCoinsViewCursor *Cursor() const override;
@@ -88,12 +89,12 @@ class CCoinsViewDBCursor: public CCoinsViewCursor
public:
~CCoinsViewDBCursor() {}
- bool GetKey(COutPoint &key) const;
- bool GetValue(Coin &coin) const;
- unsigned int GetValueSize() const;
+ bool GetKey(COutPoint &key) const override;
+ bool GetValue(Coin &coin) const override;
+ unsigned int GetValueSize() const override;
- bool Valid() const;
- void Next();
+ bool Valid() const override;
+ void Next() override;
private:
CCoinsViewDBCursor(CDBIterator* pcursorIn, const uint256 &hashBlockIn):
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index 8deb703d2e..dcfc5ffde0 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -903,11 +903,7 @@ bool CCoinsViewMemPool::GetCoin(const COutPoint &outpoint, Coin &coin) const {
return false;
}
}
- return (base->GetCoin(outpoint, coin) && !coin.IsSpent());
-}
-
-bool CCoinsViewMemPool::HaveCoin(const COutPoint &outpoint) const {
- return mempool.exists(outpoint) || base->HaveCoin(outpoint);
+ return base->GetCoin(outpoint, coin);
}
size_t CTxMemPool::DynamicMemoryUsage() const {
diff --git a/src/txmempool.h b/src/txmempool.h
index 7ca3b18a1e..d272114a7c 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -618,13 +618,6 @@ public:
return (mapTx.count(hash) != 0);
}
- bool exists(const COutPoint& outpoint) const
- {
- LOCK(cs);
- auto it = mapTx.find(outpoint.hash);
- return (it != mapTx.end() && outpoint.n < it->GetTx().vout.size());
- }
-
CTransactionRef get(const uint256& hash) const;
TxMempoolInfo info(const uint256& hash) const;
std::vector<TxMempoolInfo> infoAll() const;
@@ -676,6 +669,13 @@ private:
/**
* CCoinsView that brings transactions from a memorypool into view.
* It does not check for spendings by memory pool transactions.
+ * Instead, it provides access to all Coins which are either unspent in the
+ * base CCoinsView, or are outputs from any mempool transaction!
+ * This allows transaction replacement to work as expected, as you want to
+ * have all inputs "available" to check signatures, and any cycles in the
+ * dependency graph are checked directly in AcceptToMemoryPool.
+ * It also allows you to sign a double-spend directly in signrawtransaction,
+ * as long as the conflicting transaction is not yet confirmed.
*/
class CCoinsViewMemPool : public CCoinsViewBacked
{
@@ -684,8 +684,7 @@ protected:
public:
CCoinsViewMemPool(CCoinsView* baseIn, const CTxMemPool& mempoolIn);
- bool GetCoin(const COutPoint &outpoint, Coin &coin) const;
- bool HaveCoin(const COutPoint &outpoint) const;
+ bool GetCoin(const COutPoint &outpoint, Coin &coin) const override;
};
/**
diff --git a/src/util.cpp b/src/util.cpp
index 20a8082017..b76c173f90 100644
--- a/src/util.cpp
+++ b/src/util.cpp
@@ -84,6 +84,8 @@
#include <openssl/rand.h>
#include <openssl/conf.h>
+// Application startup time (used for uptime calculation)
+const int64_t nStartupTime = GetTime();
const char * const BITCOIN_CONF_FILENAME = "bitcoin.conf";
const char * const BITCOIN_PID_FILENAME = "bitcoind.pid";
@@ -420,7 +422,9 @@ void ArgsManager::ParseParameters(int argc, const char* const argv[])
std::vector<std::string> ArgsManager::GetArgs(const std::string& strArg)
{
LOCK(cs_args);
- return mapMultiArgs.at(strArg);
+ if (IsArgSet(strArg))
+ return mapMultiArgs.at(strArg);
+ return {};
}
bool ArgsManager::IsArgSet(const std::string& strArg)
@@ -889,3 +893,9 @@ std::string CopyrightHolders(const std::string& strPrefix)
}
return strCopyrightHolders;
}
+
+// Obtain the application startup time (used for uptime calculation)
+int64_t GetStartupTime()
+{
+ return nStartupTime;
+}
diff --git a/src/util.h b/src/util.h
index a4d7aa4db8..824ad51ac4 100644
--- a/src/util.h
+++ b/src/util.h
@@ -5,7 +5,7 @@
/**
* Server/client environment: argument handling, config file parsing,
- * logging, thread wrappers
+ * logging, thread wrappers, startup time
*/
#ifndef BITCOIN_UTIL_H
#define BITCOIN_UTIL_H
@@ -29,6 +29,9 @@
#include <boost/signals2/signal.hpp>
+// Application startup time (used for uptime calculation)
+int64_t GetStartupTime();
+
static const bool DEFAULT_LOGTIMEMICROS = false;
static const bool DEFAULT_LOGIPS = false;
static const bool DEFAULT_LOGTIMESTAMPS = true;
diff --git a/src/validation.cpp b/src/validation.cpp
index eb6ea42b63..0fe7f775af 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -96,7 +96,7 @@ namespace {
struct CBlockIndexWorkComparator
{
- bool operator()(CBlockIndex *pa, CBlockIndex *pb) const {
+ bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const {
// First sort by most total work, ...
if (pa->nChainWork > pb->nChainWork) return false;
if (pa->nChainWork < pb->nChainWork) return true;
@@ -1331,17 +1331,19 @@ int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out)
return DISCONNECT_FAILED; // adding output for transaction without known metadata
}
}
- view.AddCoin(out, std::move(undo), undo.fCoinBase);
+ // The potential_overwrite parameter to AddCoin is only allowed to be false if we know for
+ // sure that the coin did not already exist in the cache. As we have queried for that above
+ // using HaveCoin, we don't need to guess. When fClean is false, a coin already existed and
+ // it is an overwrite.
+ view.AddCoin(out, std::move(undo), !fClean);
return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
}
/** Undo the effects of this block (with given index) on the UTXO set represented by coins.
- * When UNCLEAN or FAILED is returned, view is left in an indeterminate state. */
+ * When FAILED is returned, view is left in an indeterminate state. */
static DisconnectResult DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view)
{
- assert(pindex->GetBlockHash() == view.GetBestBlock());
-
bool fClean = true;
CBlockUndo blockUndo;
@@ -1364,6 +1366,7 @@ static DisconnectResult DisconnectBlock(const CBlock& block, const CBlockIndex*
for (int i = block.vtx.size() - 1; i >= 0; i--) {
const CTransaction &tx = *(block.vtx[i]);
uint256 hash = tx.GetHash();
+ bool is_coinbase = tx.IsCoinBase();
// Check that all outputs are available and match the outputs in the block itself
// exactly.
@@ -1372,7 +1375,7 @@ static DisconnectResult DisconnectBlock(const CBlock& block, const CBlockIndex*
COutPoint out(hash, o);
Coin coin;
bool is_spent = view.SpendCoin(out, &coin);
- if (!is_spent || tx.vout[o] != coin.out) {
+ if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) {
fClean = false; // transaction output mismatch
}
}
@@ -1462,12 +1465,12 @@ private:
public:
WarningBitsConditionChecker(int bitIn) : bit(bitIn) {}
- int64_t BeginTime(const Consensus::Params& params) const { return 0; }
- int64_t EndTime(const Consensus::Params& params) const { return std::numeric_limits<int64_t>::max(); }
- int Period(const Consensus::Params& params) const { return params.nMinerConfirmationWindow; }
- int Threshold(const Consensus::Params& params) const { return params.nRuleChangeActivationThreshold; }
+ int64_t BeginTime(const Consensus::Params& params) const override { return 0; }
+ int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); }
+ int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; }
+ int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; }
- bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const
+ bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
{
return ((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) &&
((pindex->nVersion >> bit) & 1) != 0 &&
@@ -1779,7 +1782,7 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &
nLastSetChain = nNow;
}
int64_t nMempoolSizeMax = GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
- int64_t cacheSize = pcoinsTip->DynamicMemoryUsage() * DB_PEAK_USAGE_FACTOR;
+ int64_t cacheSize = pcoinsTip->DynamicMemoryUsage();
int64_t nTotalSpace = nCoinCacheUsage + std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0);
// The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024);
@@ -1946,6 +1949,7 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara
int64_t nStart = GetTimeMicros();
{
CCoinsViewCache view(pcoinsTip);
+ assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK)
return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
bool flushed = view.Flush();
@@ -3417,20 +3421,25 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams)
pblocktree->ReadFlag("txindex", fTxIndex);
LogPrintf("%s: transaction index %s\n", __func__, fTxIndex ? "enabled" : "disabled");
+ return true;
+}
+
+void LoadChainTip(const CChainParams& chainparams)
+{
+ if (chainActive.Tip() && chainActive.Tip()->GetBlockHash() == pcoinsTip->GetBestBlock()) return;
+
// Load pointer to end of best chain
BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock());
if (it == mapBlockIndex.end())
- return true;
+ return;
chainActive.SetTip(it->second);
PruneBlockIndexCandidates();
- LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__,
+ LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(),
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()),
GuessVerificationProgress(chainparams.TxData(), chainActive.Tip()));
-
- return true;
}
CVerifyDB::CVerifyDB()
@@ -3499,6 +3508,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
}
// check level 3: check for inconsistencies during memory-only disconnect of tip blocks
if (nCheckLevel >= 3 && pindex == pindexState && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) {
+ assert(coins.GetBestBlock() == pindex->GetBlockHash());
DisconnectResult res = DisconnectBlock(block, pindex, coins);
if (res == DISCONNECT_FAILED) {
return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
@@ -3538,6 +3548,92 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
return true;
}
+/** Apply the effects of a block on the utxo cache, ignoring that it may already have been applied. */
+static bool RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs, const CChainParams& params)
+{
+ // TODO: merge with ConnectBlock
+ CBlock block;
+ if (!ReadBlockFromDisk(block, pindex, params.GetConsensus())) {
+ return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
+ }
+
+ for (const CTransactionRef& tx : block.vtx) {
+ if (!tx->IsCoinBase()) {
+ for (const CTxIn &txin : tx->vin) {
+ inputs.SpendCoin(txin.prevout);
+ }
+ }
+ // Pass check = true as every addition may be an overwrite.
+ AddCoins(inputs, *tx, pindex->nHeight, true);
+ }
+ return true;
+}
+
+bool ReplayBlocks(const CChainParams& params, CCoinsView* view)
+{
+ LOCK(cs_main);
+
+ CCoinsViewCache cache(view);
+
+ std::vector<uint256> hashHeads = view->GetHeadBlocks();
+ if (hashHeads.empty()) return true; // We're already in a consistent state.
+ if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
+
+ uiInterface.ShowProgress(_("Replaying blocks..."), 0);
+ LogPrintf("Replaying blocks\n");
+
+ const CBlockIndex* pindexOld = nullptr; // Old tip during the interrupted flush.
+ const CBlockIndex* pindexNew; // New tip during the interrupted flush.
+ const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip.
+
+ if (mapBlockIndex.count(hashHeads[0]) == 0) {
+ return error("ReplayBlocks(): reorganization to unknown block requested");
+ }
+ pindexNew = mapBlockIndex[hashHeads[0]];
+
+ if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
+ if (mapBlockIndex.count(hashHeads[1]) == 0) {
+ return error("ReplayBlocks(): reorganization from unknown block requested");
+ }
+ pindexOld = mapBlockIndex[hashHeads[1]];
+ pindexFork = LastCommonAncestor(pindexOld, pindexNew);
+ assert(pindexFork != nullptr);
+ }
+
+ // Rollback along the old branch.
+ while (pindexOld != pindexFork) {
+ if (pindexOld->nHeight > 0) { // Never disconnect the genesis block.
+ CBlock block;
+ if (!ReadBlockFromDisk(block, pindexOld, params.GetConsensus())) {
+ return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
+ }
+ LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
+ DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
+ if (res == DISCONNECT_FAILED) {
+ return error("RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
+ }
+ // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was
+ // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations
+ // applied to the UTXO set. However, as both writing a UTXO and deleting a UTXO are idempotent operations,
+ // the result is still a version of the UTXO set with the effects of that block undone.
+ }
+ pindexOld = pindexOld->pprev;
+ }
+
+ // Roll forward from the forking point to the new tip.
+ int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
+ for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) {
+ const CBlockIndex* pindex = pindexNew->GetAncestor(nHeight);
+ LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight);
+ if (!RollforwardBlock(pindex, cache, params)) return false;
+ }
+
+ cache.SetBestBlock(pindexNew->GetBlockHash());
+ cache.Flush();
+ uiInterface.ShowProgress("", 100);
+ return true;
+}
+
bool RewindBlockIndex(const CChainParams& params)
{
LOCK(cs_main);
@@ -3687,8 +3783,6 @@ bool InitBlockIndex(const CChainParams& chainparams)
CBlockIndex *pindex = AddToBlockIndex(block);
if (!ReceivedBlockTransactions(block, state, pindex, blockPos, chainparams.GetConsensus()))
return error("LoadBlockIndex(): genesis block not accepted");
- // Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data
- return FlushStateToDisk(chainparams, state, FLUSH_STATE_ALWAYS);
} catch (const std::runtime_error& e) {
return error("LoadBlockIndex(): failed to initialize block database: %s", e.what());
}
diff --git a/src/validation.h b/src/validation.h
index 82df4cb170..8a721dd7a2 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -260,6 +260,8 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
bool InitBlockIndex(const CChainParams& chainparams);
/** Load the block tree and coins database from disk */
bool LoadBlockIndex(const CChainParams& chainparams);
+/** Update the chain tip based on database information. */
+void LoadChainTip(const CChainParams& chainparams);
/** Unload database information */
void UnloadBlockIndex();
/** Run an instance of the script checking thread */
@@ -424,6 +426,9 @@ public:
bool VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth);
};
+/** Replay blocks that aren't fully applied to the database. */
+bool ReplayBlocks(const CChainParams& params, CCoinsView* view);
+
/** Find the last common block between the parameter chain and a locator. */
CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator);
diff --git a/src/versionbits.cpp b/src/versionbits.cpp
index 92c90b7efb..8047e17aa8 100644
--- a/src/versionbits.cpp
+++ b/src/versionbits.cpp
@@ -174,12 +174,12 @@ private:
const Consensus::DeploymentPos id;
protected:
- int64_t BeginTime(const Consensus::Params& params) const { return params.vDeployments[id].nStartTime; }
- int64_t EndTime(const Consensus::Params& params) const { return params.vDeployments[id].nTimeout; }
- int Period(const Consensus::Params& params) const { return params.nMinerConfirmationWindow; }
- int Threshold(const Consensus::Params& params) const { return params.nRuleChangeActivationThreshold; }
+ int64_t BeginTime(const Consensus::Params& params) const override { return params.vDeployments[id].nStartTime; }
+ int64_t EndTime(const Consensus::Params& params) const override { return params.vDeployments[id].nTimeout; }
+ int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; }
+ int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; }
- bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const
+ bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
{
return (((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) && (pindex->nVersion & Mask(params)) != 0);
}
diff --git a/src/wallet/crypter.h b/src/wallet/crypter.h
index f1c4f57428..1dc44e424f 100644
--- a/src/wallet/crypter.h
+++ b/src/wallet/crypter.h
@@ -157,8 +157,8 @@ public:
bool Lock();
virtual bool AddCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret);
- bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey);
- bool HaveKey(const CKeyID &address) const
+ bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey) override;
+ bool HaveKey(const CKeyID &address) const override
{
{
LOCK(cs_KeyStore);
@@ -168,9 +168,9 @@ public:
}
return false;
}
- bool GetKey(const CKeyID &address, CKey& keyOut) const;
- bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const;
- void GetKeys(std::set<CKeyID> &setAddress) const
+ bool GetKey(const CKeyID &address, CKey& keyOut) const override;
+ bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const override;
+ void GetKeys(std::set<CKeyID> &setAddress) const override
{
if (!IsCrypted())
{
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index 6ed955cf58..a3fd7408a0 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -1153,7 +1153,7 @@ public:
void ReturnKey();
bool GetReservedKey(CPubKey &pubkey, bool internal = false);
void KeepKey();
- void KeepScript() { KeepKey(); }
+ void KeepScript() override { KeepKey(); }
};
diff --git a/src/zmq/zmqpublishnotifier.h b/src/zmq/zmqpublishnotifier.h
index bcbecf1bde..1790fe5698 100644
--- a/src/zmq/zmqpublishnotifier.h
+++ b/src/zmq/zmqpublishnotifier.h
@@ -24,32 +24,32 @@ public:
*/
bool SendMessage(const char *command, const void* data, size_t size);
- bool Initialize(void *pcontext);
- void Shutdown();
+ bool Initialize(void *pcontext) override;
+ void Shutdown() override;
};
class CZMQPublishHashBlockNotifier : public CZMQAbstractPublishNotifier
{
public:
- bool NotifyBlock(const CBlockIndex *pindex);
+ bool NotifyBlock(const CBlockIndex *pindex) override;
};
class CZMQPublishHashTransactionNotifier : public CZMQAbstractPublishNotifier
{
public:
- bool NotifyTransaction(const CTransaction &transaction);
+ bool NotifyTransaction(const CTransaction &transaction) override;
};
class CZMQPublishRawBlockNotifier : public CZMQAbstractPublishNotifier
{
public:
- bool NotifyBlock(const CBlockIndex *pindex);
+ bool NotifyBlock(const CBlockIndex *pindex) override;
};
class CZMQPublishRawTransactionNotifier : public CZMQAbstractPublishNotifier
{
public:
- bool NotifyTransaction(const CTransaction &transaction);
+ bool NotifyTransaction(const CTransaction &transaction) override;
};
#endif // BITCOIN_ZMQ_ZMQPUBLISHNOTIFIER_H
diff --git a/test/README.md b/test/README.md
index 4dd512638d..15f6df790f 100644
--- a/test/README.md
+++ b/test/README.md
@@ -15,84 +15,152 @@ The util tests are run as part of `make check` target. The functional
tests are run by the travis continuous build process whenever a pull
request is opened. Both sets of tests can also be run locally.
-Functional Test dependencies
-============================
+# Running tests locally
+
+Build for your system first. Be sure to enable wallet, utils and daemon when you configure. Tests will not run otherwise.
+
+### Functional tests
+
+#### Dependencies
+
The ZMQ functional test requires a python ZMQ library. To install it:
- on Unix, run `sudo apt-get install python3-zmq`
- on mac OS, run `pip3 install pyzmq`
-Running tests locally
-=====================
+#### Running the tests
-Build for your system first. Be sure to enable wallet, utils and daemon when you configure. Tests will not run otherwise.
+Individual tests can be run by directly calling the test script, eg:
-Functional tests
-----------------
+```
+test/functional/replace-by-fee.py
+```
-You can run any single test by calling
+or can be run through the test_runner harness, eg:
- test/functional/test_runner.py <testname>
+```
+test/functional/test_runner.py replace-by-fee.py
+```
-Or you can run any combination (incl. duplicates) of tests by calling
+You can run any combination (incl. duplicates) of tests by calling:
- test/functional/test_runner.py <testname1> <testname2> <testname3> ...
+```
+test/functional/test_runner.py <testname1> <testname2> <testname3> ...
+```
-Run the regression test suite with
+Run the regression test suite with:
- test/functional/test_runner.py
+```
+test/functional/test_runner.py
+```
Run all possible tests with
- test/functional/test_runner.py --extended
+```
+test/functional/test_runner.py --extended
+```
+
+By default, up to 4 tests will be run in parallel by test_runner. To specify
+how many jobs to run, append `--jobs=n`
-By default, tests will be run in parallel. To specify how many jobs to run,
-append `--jobs=n` (default n=4).
+The individual tests and the test_runner harness have many command-line
+options. Run `test_runner.py -h` to see them all.
-If you want to create a basic coverage report for the RPC test suite, append `--coverage`.
+#### Troubleshooting and debugging test failures
-Possible options, which apply to each individual test run:
+##### Resource contention
+The P2P and RPC ports used by the bitcoind nodes-under-test are chosen to make
+conflicts with other processes unlikely. However, if there is another bitcoind
+process running on the system (perhaps from a previous test which hasn't successfully
+killed all its bitcoind nodes), then there may be a port conflict which will
+cause the test to fail. It is recommended that you run the tests on a system
+where no other bitcoind processes are running.
+
+On linux, the test_framework will warn if there is another
+bitcoind process running when the tests are started.
+
+If there are zombie bitcoind processes after test failure, you can kill them
+by running the following commands. **Note that these commands will kill all
+bitcoind processes running on the system, so should not be used if any non-test
+bitcoind processes are being run.**
+
+```bash
+killall bitcoind
```
- -h, --help show this help message and exit
- --nocleanup Leave bitcoinds and test.* datadir on exit or error
- --noshutdown Don't stop bitcoinds after the test execution
- --srcdir=SRCDIR Source directory containing bitcoind/bitcoin-cli
- (default: ../../src)
- --tmpdir=TMPDIR Root directory for datadirs
- --tracerpc Print out all RPC calls as they are made
- --coveragedir=COVERAGEDIR
- Write tested RPC commands into this directory
-```
-If you set the environment variable `PYTHON_DEBUG=1` you will get some debug
-output (example: `PYTHON_DEBUG=1 test/functional/test_runner.py wallet`).
+or
+
+```bash
+pkill -9 bitcoind
+```
-A 200-block -regtest blockchain and wallets for four nodes
-is created the first time a regression test is run and
-is stored in the cache/ directory. Each node has 25 mature
-blocks (25*50=1250 BTC) in its wallet.
-After the first run, the cache/ blockchain and wallets are
-copied into a temporary directory and used as the initial
-test state.
+##### Data directory cache
-If you get into a bad state, you should be able
-to recover with:
+A pre-mined blockchain with 200 blocks is generated the first time a
+functional test is run and is stored in test/cache. This speeds up
+test startup times since new blockchains don't need to be generated for
+each test. However, the cache may get into a bad state, in which case
+tests will fail. If this happens, remove the cache directory (and make
+sure bitcoind processes are stopped as above):
```bash
rm -rf cache
killall bitcoind
```
-Util tests
-----------
+##### Test logging
+
+The tests contain logging at different levels (debug, info, warning, etc). By
+default:
+
+- when run through the test_runner harness, *all* logs are written to
+ `test_framework.log` and no logs are output to the console.
+- when run directly, *all* logs are written to `test_framework.log` and INFO
+ level and above are output to the console.
+- when run on Travis, no logs are output to the console. However, if a test
+ fails, the `test_framework.log` and bitcoind `debug.log`s will all be dumped
+ to the console to help troubleshooting.
+
+To change the level of logs output to the console, use the `-l` command line
+argument.
+
+`test_framework.log` and bitcoind `debug.log`s can be combined into a single
+aggregate log by running the `combine_logs.py` script. The output can be plain
+text, colorized text or html. For example:
+
+```
+combine_logs.py -c <test data directory> | less -r
+```
+
+will pipe the colorized logs from the test into less.
+
+Use `--tracerpc` to trace out all the RPC calls and responses to the console. For
+some tests (eg any that use `submitblock` to submit a full block over RPC),
+this can result in a lot of screen output.
+
+By default, the test data directory will be deleted after a successful run.
+Use `--nocleanup` to leave the test data directory intact. The test data
+directory is never deleted after a failed test.
+
+##### Attaching a debugger
+
+A python debugger can be attached to tests at any point. Just add the line:
+
+```py
+import pdb; pdb.set_trace()
+```
+
+anywhere in the test. You will then be able to inspect variables, as well as
+call methods that interact with the bitcoind nodes-under-test.
+
+### Util tests
Util tests can be run locally by running `test/util/bitcoin-util-test.py`.
Use the `-v` option for verbose output.
-Writing functional tests
-========================
+# Writing functional tests
You are encouraged to write functional tests for new or existing features.
Further information about the functional test framework and individual
diff --git a/test/functional/README.md b/test/functional/README.md
index e6c4849702..96fe0becce 100644
--- a/test/functional/README.md
+++ b/test/functional/README.md
@@ -1,108 +1,154 @@
-Regression tests
-================
+# Functional tests
-### [test_framework/authproxy.py](test_framework/authproxy.py)
-Taken from the [python-bitcoinrpc repository](https://github.com/jgarzik/python-bitcoinrpc).
+### Writing Functional Tests
-### [test_framework/test_framework.py](test_framework/test_framework.py)
-Base class for new regression tests.
+#### Example test
-### [test_framework/util.py](test_framework/util.py)
-Generally useful functions.
+The [example_test.py](example_test.py) is a heavily commented example of a test case that uses both
+the RPC and P2P interfaces. If you are writing your first test, copy that file
+and modify to fit your needs.
-### [test_framework/mininode.py](test_framework/mininode.py)
-Basic code to support p2p connectivity to a bitcoind.
+#### Coverage
-### [test_framework/comptool.py](test_framework/comptool.py)
-Framework for comparison-tool style, p2p tests.
+Running `test_runner.py` with the `--coverage` argument tracks which RPCs are
+called by the tests and prints a report of uncovered RPCs in the summary. This
+can be used (along with the `--extended` argument) to find out which RPCs we
+don't have test cases for.
-### [test_framework/script.py](test_framework/script.py)
-Utilities for manipulating transaction scripts (originally from python-bitcoinlib)
+#### Style guidelines
-### [test_framework/blockstore.py](test_framework/blockstore.py)
-Implements disk-backed block and tx storage.
+- Where possible, try to adhere to [PEP-8 guidelines]([https://www.python.org/dev/peps/pep-0008/)
+- Use a python linter like flake8 before submitting PRs to catch common style
+ nits (eg trailing whitespace, unused imports, etc)
+- Avoid wildcard imports where possible
+- Use a module-level docstring to describe what the test is testing, and how it
+ is testing it.
+- When subclassing the BitcoinTestFramwork, place overrides for the
+ `__init__()`, and `setup_xxxx()` methods at the top of the subclass, then
+ locally-defined helper methods, then the `run_test()` method.
-### [test_framework/key.py](test_framework/key.py)
-Wrapper around OpenSSL EC_Key (originally from python-bitcoinlib)
+#### General test-writing advice
-### [test_framework/bignum.py](test_framework/bignum.py)
-Helpers for script.py
+- Set `self.num_nodes` to the minimum number of nodes necessary for the test.
+ Having additional unrequired nodes adds to the execution time of the test as
+ well as memory/CPU/disk requirements (which is important when running tests in
+ parallel or on Travis).
+- Avoid stop-starting the nodes multiple times during the test if possible. A
+ stop-start takes several seconds, so doing it several times blows up the
+ runtime of the test.
+- Set the `self.setup_clean_chain` variable in `__init__()` to control whether
+ or not to use the cached data directories. The cached data directories
+ contain a 200-block pre-mined blockchain and wallets for four nodes. Each node
+ has 25 mature blocks (25x50=1250 BTC) in its wallet.
+- When calling RPCs with lots of arguments, consider using named keyword
+ arguments instead of positional arguments to make the intent of the call
+ clear to readers.
-### [test_framework/blocktools.py](test_framework/blocktools.py)
-Helper functions for creating blocks and transactions.
+#### RPC and P2P definitions
-P2P test design notes
----------------------
+Test writers may find it helpful to refer to the definitions for the RPC and
+P2P messages. These can be found in the following source files:
-## Mininode
+- `/src/rpc/*` for RPCs
+- `/src/wallet/rpc*` for wallet RPCs
+- `ProcessMessage()` in `/src/net_processing.cpp` for parsing P2P messages
-* ```mininode.py``` contains all the definitions for objects that pass
-over the network (```CBlock```, ```CTransaction```, etc, along with the network-level
-wrappers for them, ```msg_block```, ```msg_tx```, etc).
+#### Using the P2P interface
-* P2P tests have two threads. One thread handles all network communication
+- `mininode.py` contains all the definitions for objects that pass
+over the network (`CBlock`, `CTransaction`, etc, along with the network-level
+wrappers for them, `msg_block`, `msg_tx`, etc).
+
+- P2P tests have two threads. One thread handles all network communication
with the bitcoind(s) being tested (using python's asyncore package); the other
implements the test logic.
-* ```NodeConn``` is the class used to connect to a bitcoind. If you implement
-a callback class that derives from ```NodeConnCB``` and pass that to the
-```NodeConn``` object, your code will receive the appropriate callbacks when
+- `NodeConn` is the class used to connect to a bitcoind. If you implement
+a callback class that derives from `NodeConnCB` and pass that to the
+`NodeConn` object, your code will receive the appropriate callbacks when
events of interest arrive.
-* You can pass the same handler to multiple ```NodeConn```'s if you like, or pass
-different ones to each -- whatever makes the most sense for your test.
-
-* Call ```NetworkThread.start()``` after all ```NodeConn``` objects are created to
+- Call `NetworkThread.start()` after all `NodeConn` objects are created to
start the networking thread. (Continue with the test logic in your existing
thread.)
-* RPC calls are available in p2p tests.
+- Can be used to write tests where specific P2P protocol behavior is tested.
+Examples tests are `p2p-accept-block.py`, `p2p-compactblocks.py`.
-* Can be used to write free-form tests, where specific p2p-protocol behavior
-is tested. Examples: ```p2p-accept-block.py```, ```p2p-compactblocks.py```.
+#### Comptool
-## Comptool
+- Comptool is a Testing framework for writing tests that compare the block/tx acceptance
+behavior of a bitcoind against 1 or more other bitcoind instances. It should not be used
+to write static tests with known outcomes, since that type of test is easier to write and
+maintain using the standard BitcoinTestFramework.
-* Testing framework for writing tests that compare the block/tx acceptance
-behavior of a bitcoind against 1 or more other bitcoind instances, or against
-known outcomes, or both.
-
-* Set the ```num_nodes``` variable (defined in ```ComparisonTestFramework```) to start up
-1 or more nodes. If using 1 node, then ```--testbinary``` can be used as a command line
+- Set the `num_nodes` variable (defined in `ComparisonTestFramework`) to start up
+1 or more nodes. If using 1 node, then `--testbinary` can be used as a command line
option to change the bitcoind binary used by the test. If using 2 or more nodes,
-then ```--refbinary``` can be optionally used to change the bitcoind that will be used
+then `--refbinary` can be optionally used to change the bitcoind that will be used
on nodes 2 and up.
-* Implement a (generator) function called ```get_tests()``` which yields ```TestInstance```s.
-Each ```TestInstance``` consists of:
- - a list of ```[object, outcome, hash]``` entries
- * ```object``` is a ```CBlock```, ```CTransaction```, or
- ```CBlockHeader```. ```CBlock```'s and ```CTransaction```'s are tested for
- acceptance. ```CBlockHeader```s can be used so that the test runner can deliver
+- Implement a (generator) function called `get_tests()` which yields `TestInstance`s.
+Each `TestInstance` consists of:
+ - a list of `[object, outcome, hash]` entries
+ * `object` is a `CBlock`, `CTransaction`, or
+ `CBlockHeader`. `CBlock`'s and `CTransaction`'s are tested for
+ acceptance. `CBlockHeader`s can be used so that the test runner can deliver
complete headers-chains when requested from the bitcoind, to allow writing
tests where blocks can be delivered out of order but still processed by
headers-first bitcoind's.
- * ```outcome``` is ```True```, ```False```, or ```None```. If ```True```
- or ```False```, the tip is compared with the expected tip -- either the
+ * `outcome` is `True`, `False`, or `None`. If `True`
+ or `False`, the tip is compared with the expected tip -- either the
block passed in, or the hash specified as the optional 3rd entry. If
- ```None``` is specified, then the test will compare all the bitcoind's
+ `None` is specified, then the test will compare all the bitcoind's
being tested to see if they all agree on what the best tip is.
- * ```hash``` is the block hash of the tip to compare against. Optional to
+ * `hash` is the block hash of the tip to compare against. Optional to
specify; if left out then the hash of the block passed in will be used as
the expected tip. This allows for specifying an expected tip while testing
the handling of either invalid blocks or blocks delivered out of order,
which complete a longer chain.
- - ```sync_every_block```: ```True/False```. If ```False```, then all blocks
+ - `sync_every_block`: `True/False`. If `False`, then all blocks
are inv'ed together, and the test runner waits until the node receives the
last one, and tests only the last block for tip acceptance using the
- outcome and specified tip. If ```True```, then each block is tested in
+ outcome and specified tip. If `True`, then each block is tested in
sequence and synced (this is slower when processing many blocks).
- - ```sync_every_transaction```: ```True/False```. Analogous to
- ```sync_every_block```, except if the outcome on the last tx is "None",
+ - `sync_every_transaction`: `True/False`. Analogous to
+ `sync_every_block`, except if the outcome on the last tx is "None",
then the contents of the entire mempool are compared across all bitcoind
- connections. If ```True``` or ```False```, then only the last tx's
+ connections. If `True` or `False`, then only the last tx's
acceptance is tested against the given outcome.
-* For examples of tests written in this framework, see
- ```invalidblockrequest.py``` and ```p2p-fullblocktest.py```.
+- For examples of tests written in this framework, see
+ `invalidblockrequest.py` and `p2p-fullblocktest.py`.
+
+### test-framework modules
+
+#### [test_framework/authproxy.py](test_framework/authproxy.py)
+Taken from the [python-bitcoinrpc repository](https://github.com/jgarzik/python-bitcoinrpc).
+
+#### [test_framework/test_framework.py](test_framework/test_framework.py)
+Base class for functional tests.
+#### [test_framework/util.py](test_framework/util.py)
+Generally useful functions.
+
+#### [test_framework/mininode.py](test_framework/mininode.py)
+Basic code to support P2P connectivity to a bitcoind.
+
+#### [test_framework/comptool.py](test_framework/comptool.py)
+Framework for comparison-tool style, P2P tests.
+
+#### [test_framework/script.py](test_framework/script.py)
+Utilities for manipulating transaction scripts (originally from python-bitcoinlib)
+
+#### [test_framework/blockstore.py](test_framework/blockstore.py)
+Implements disk-backed block and tx storage.
+
+#### [test_framework/key.py](test_framework/key.py)
+Wrapper around OpenSSL EC_Key (originally from python-bitcoinlib)
+
+#### [test_framework/bignum.py](test_framework/bignum.py)
+Helpers for script.py
+
+#### [test_framework/blocktools.py](test_framework/blocktools.py)
+Helper functions for creating blocks and transactions.
diff --git a/test/functional/blockchain.py b/test/functional/blockchain.py
index e205c6400c..eeef05efd2 100755
--- a/test/functional/blockchain.py
+++ b/test/functional/blockchain.py
@@ -18,6 +18,7 @@ Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
+import http.client
import subprocess
from test_framework.test_framework import BitcoinTestFramework
@@ -28,6 +29,7 @@ from test_framework.util import (
assert_is_hex_string,
assert_is_hash_string,
bitcoind_processes,
+ BITCOIND_PROC_WAIT_TIMEOUT,
)
@@ -140,9 +142,12 @@ class BlockchainTest(BitcoinTestFramework):
assert_equal(self.nodes[0].getblockcount(), 206)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: bitcoind_processes[0].wait(timeout=3))
- self.nodes[0].generate(1)
+ try:
+ self.nodes[0].generate(1)
+ except (ConnectionError, http.client.BadStatusLine):
+ pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
- bitcoind_processes[0].wait(timeout=3)
+ bitcoind_processes[0].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
self.nodes[0] = self.start_node(0, self.options.tmpdir)
assert_equal(self.nodes[0].getblockcount(), 207)
diff --git a/test/functional/dbcrash.py b/test/functional/dbcrash.py
new file mode 100755
index 0000000000..4a10743f04
--- /dev/null
+++ b/test/functional/dbcrash.py
@@ -0,0 +1,268 @@
+#!/usr/bin/env python3
+# Copyright (c) 2017 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test recovery from a crash during chainstate writing."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.script import *
+from test_framework.mininode import *
+import random
+try:
+ import http.client as httplib
+except ImportError:
+ import httplib
+import errno
+
+'''
+Test structure:
+
+- 4 nodes
+ * node0, node1, and node2 will have different dbcrash ratios, and different
+ dbcache sizes
+ * node3 will be a regular node, with no crashing.
+ * The nodes will not connect to each other.
+
+- use default test framework starting chain. initialize starting_tip_height to
+ tip height.
+
+- Main loop:
+ * generate lots of transactions on node3, enough to fill up a block.
+ * uniformly randomly pick a tip height from starting_tip_height to
+ tip_height; with probability 1/(height_difference+4), invalidate this block.
+ * mine enough blocks to overtake tip_height at start of loop.
+ * for each node in [node0,node1,node2]:
+ - for each mined block:
+ * submit block to node
+ * if node crashed on/after submitting:
+ - restart until recovery succeeds
+ - check that utxo matches node3 using gettxoutsetinfo
+'''
+
+class ChainstateWriteCrashTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 4
+ self.setup_clean_chain = False
+
+ # Set -maxmempool=0 to turn off mempool memory sharing with dbcache
+ # Set -rpcservertimeout=900 to reduce socket disconnects in this
+ # long-running test
+ self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900"]
+
+ # Set different crash ratios and cache sizes. Note that not all of
+ # -dbcache goes to pcoinsTip.
+ self.node0_args = ["-dbcrashratio=8", "-dbcache=4", "-dbbatchsize=200000"] + self.base_args
+ self.node1_args = ["-dbcrashratio=16", "-dbcache=8", "-dbbatchsize=200000"] + self.base_args
+ self.node2_args = ["-dbcrashratio=24", "-dbcache=16", "-dbbatchsize=200000"] + self.base_args
+
+ # Node3 is a normal node with default args, except will mine full blocks
+ self.node3_args = ["-blockmaxweight=4000000"]
+ self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args]
+
+ # We'll track some test coverage statistics
+ self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2
+ self.crashed_on_restart = 0 # Track count of crashes during recovery
+
+ def setup_network(self):
+ self.setup_nodes()
+ # Leave them unconnected, we'll use submitblock directly in this test
+
+ # Starts up a given node id, waits for the tip to reach the given block
+ # hash, and calculates the utxo hash. Exceptions on startup should
+ # indicate node crash (due to -dbcrashratio), in which case we try again.
+ # Give up after 60 seconds.
+ # Returns the utxo hash of the given node.
+ def restart_node(self, node_index, expected_tip):
+ time_start = time.time()
+ while time.time() - time_start < 60:
+ try:
+ # Any of these RPC calls could throw due to node crash
+ self.nodes[node_index] = self.start_node(node_index, self.options.tmpdir, self.extra_args[node_index])
+ self.nodes[node_index].waitforblock(expected_tip)
+ utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_2']
+ return utxo_hash
+ except:
+ # An exception here should mean the node is about to crash.
+ # If bitcoind exits, then try again. wait_for_node_exit()
+ # should raise an exception if bitcoind doesn't exit.
+ wait_for_node_exit(node_index, timeout=10)
+ self.crashed_on_restart += 1
+ time.sleep(1)
+
+ # If we got here, bitcoind isn't coming back up on restart. Could be a
+ # bug in bitcoind, or we've gotten unlucky with our dbcrash ratio --
+ # perhaps we generated a test case that blew up our cache?
+ # TODO: If this happens a lot, we should try to restart without -dbcrashratio
+ # and make sure that recovery happens.
+ raise AssertionError("Unable to successfully restart node %d in allotted time", node_index)
+
+ # Try submitting a block to the given node.
+ # Catch any exceptions that indicate the node has crashed.
+ # Returns true if the block was submitted successfully; false otherwise.
+ def submit_block_catch_error(self, node_index, block):
+ try:
+ self.nodes[node_index].submitblock(block)
+ return True
+ except (httplib.CannotSendRequest, httplib.RemoteDisconnected) as e:
+ self.log.debug("node %d submitblock raised exception: %s", node_index, e)
+ return False
+ except OSError as e:
+ self.log.debug("node %d submitblock raised OSError exception: errno=%s", node_index, e.errno)
+ if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]:
+ # The node has likely crashed
+ return False
+ else:
+ # Unexpected exception, raise
+ raise
+
+ # Use submitblock to sync node3's chain with the other nodes
+ # If submitblock fails, restart the node and get the new utxo hash.
+ def sync_node3blocks(self, block_hashes):
+ # If any nodes crash while updating, we'll compare utxo hashes to
+ # ensure recovery was successful.
+ node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
+
+ # Retrieve all the blocks from node3
+ blocks = []
+ for block_hash in block_hashes:
+ blocks.append([block_hash, self.nodes[3].getblock(block_hash, 0)])
+
+ # Deliver each block to each other node
+ for i in range(3):
+ nodei_utxo_hash = None
+ self.log.debug("Syncing blocks to node %d", i)
+ for (block_hash, block) in blocks:
+ # Get the block from node3, and submit to node_i
+ self.log.debug("submitting block %s", block_hash)
+ if not self.submit_block_catch_error(i, block):
+ # TODO: more carefully check that the crash is due to -dbcrashratio
+ # (change the exit code perhaps, and check that here?)
+ wait_for_node_exit(i, timeout=30)
+ self.log.debug("Restarting node %d after block hash %s", i, block_hash)
+ nodei_utxo_hash = self.restart_node(i, block_hash)
+ assert nodei_utxo_hash is not None
+ self.restart_counts[i] += 1
+ else:
+ # Clear it out after successful submitblock calls -- the cached
+ # utxo hash will no longer be correct
+ nodei_utxo_hash = None
+
+ # Check that the utxo hash matches node3's utxo set
+ # NOTE: we only check the utxo set if we had to restart the node
+ # after the last block submitted:
+ # - checking the utxo hash causes a cache flush, which we don't
+ # want to do every time; so
+ # - we only update the utxo cache after a node restart, since flushing
+ # the cache is a no-op at that point
+ if nodei_utxo_hash is not None:
+ self.log.debug("Checking txoutsetinfo matches for node %d", i)
+ assert_equal(nodei_utxo_hash, node3_utxo_hash)
+
+ # Verify that the utxo hash of each node matches node3.
+ # Restart any nodes that crash while querying.
+ def verify_utxo_hash(self):
+ node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
+ self.log.info("Verifying utxo hash matches for all nodes")
+
+ for i in range(3):
+ try:
+ nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()['hash_serialized_2']
+ except OSError:
+ # probably a crash on db flushing
+ nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash())
+ assert_equal(nodei_utxo_hash, node3_utxo_hash)
+
+
+ def generate_small_transactions(self, node, count, utxo_list):
+ FEE = 1000 # TODO: replace this with node relay fee based calculation
+ num_transactions = 0
+ random.shuffle(utxo_list)
+ while len(utxo_list) >= 2 and num_transactions < count:
+ tx = CTransaction()
+ input_amount = 0
+ for i in range(2):
+ utxo = utxo_list.pop()
+ tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout'])))
+ input_amount += int(utxo['amount']*COIN)
+ output_amount = (input_amount - FEE)//3
+
+ if output_amount <= 0:
+ # Sanity check -- if we chose inputs that are too small, skip
+ continue
+
+ for i in range(3):
+ tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey'])))
+
+ # Sign and send the transaction to get into the mempool
+ tx_signed_hex = node.signrawtransaction(ToHex(tx))['hex']
+ node.sendrawtransaction(tx_signed_hex)
+ num_transactions += 1
+
+ def run_test(self):
+
+ # Start by creating a lot of utxos on node3
+ initial_height = self.nodes[3].getblockcount()
+ utxo_list = create_confirmed_utxos(self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000)
+ self.log.info("Prepped %d utxo entries", len(utxo_list))
+
+ # Sync these blocks with the other nodes
+ block_hashes_to_sync = []
+ for height in range(initial_height+1, self.nodes[3].getblockcount()+1):
+ block_hashes_to_sync.append(self.nodes[3].getblockhash(height))
+
+ self.log.debug("Syncing %d blocks with other nodes", len(block_hashes_to_sync))
+ # Syncing the blocks could cause nodes to crash, so the test begins here.
+ self.sync_node3blocks(block_hashes_to_sync)
+
+ starting_tip_height = self.nodes[3].getblockcount()
+
+ # Main test loop:
+ # each time through the loop, generate a bunch of transactions,
+ # and then either mine a single new block on the tip, or some-sized reorg.
+ for i in range(40):
+ self.log.info("Iteration %d, generating 2500 transactions %s", i, self.restart_counts)
+ # Generate a bunch of small-ish transactions
+ self.generate_small_transactions(self.nodes[3], 2500, utxo_list)
+ # Pick a random block between current tip, and starting tip
+ current_height = self.nodes[3].getblockcount()
+ random_height = random.randint(starting_tip_height, current_height)
+ self.log.debug("At height %d, considering height %d", current_height, random_height)
+ if random_height > starting_tip_height:
+ # Randomly reorg from this point with some probability (1/4 for
+ # tip, 1/5 for tip-1, ...)
+ if random.random() < 1.0/(current_height + 4 - random_height):
+ self.log.debug("Invalidating block at height %d", random_height)
+ self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height))
+
+ # Now generate new blocks until we pass the old tip height
+ self.log.debug("Mining longer tip")
+ block_hashes = self.nodes[3].generate(current_height+1-self.nodes[3].getblockcount())
+ self.log.debug("Syncing %d new blocks...", len(block_hashes))
+ self.sync_node3blocks(block_hashes)
+ utxo_list = self.nodes[3].listunspent()
+ self.log.debug("Node3 utxo count: %d", len(utxo_list))
+
+ # Check that the utxo hashes agree with node3
+ # Useful side effect: each utxo cache gets flushed here, so that we
+ # won't get crashes on shutdown at the end of the test.
+ self.verify_utxo_hash()
+
+ # Check the test coverage
+ self.log.info("Restarted nodes: %s; crashes on restart: %d", self.restart_counts, self.crashed_on_restart)
+
+ # If no nodes were restarted, we didn't test anything.
+ assert self.restart_counts != [0, 0, 0]
+
+ # Make sure we tested the case of crash-during-recovery.
+ assert self.crashed_on_restart > 0
+
+ # Warn if any of the nodes escaped restart.
+ for i in range(3):
+ if self.restart_counts[i] == 0:
+ self.log.warn("Node %d never crashed during utxo flush!", i)
+
+if __name__ == "__main__":
+ ChainstateWriteCrashTest().main()
diff --git a/test/functional/example_test.py b/test/functional/example_test.py
new file mode 100755
index 0000000000..1ba5f756cd
--- /dev/null
+++ b/test/functional/example_test.py
@@ -0,0 +1,219 @@
+#!/usr/bin/env python3
+# Copyright (c) 2017 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""An example functional test
+
+The module-level docstring should include a high-level description of
+what the test is doing. It's the first thing people see when they open
+the file and should give the reader information about *what* the test
+is testing and *how* it's being tested
+"""
+# Imports should be in PEP8 ordering (std library first, then third party
+# libraries then local imports).
+from collections import defaultdict
+
+# Avoid wildcard * imports if possible
+from test_framework.blocktools import (create_block, create_coinbase)
+from test_framework.mininode import (
+ CInv,
+ NetworkThread,
+ NodeConn,
+ NodeConnCB,
+ mininode_lock,
+ msg_block,
+ msg_getdata,
+ wait_until,
+)
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ connect_nodes,
+ p2p_port,
+)
+
+# NodeConnCB is a class containing callbacks to be executed when a P2P
+# message is received from the node-under-test. Subclass NodeConnCB and
+# override the on_*() methods if you need custom behaviour.
+class BaseNode(NodeConnCB):
+ def __init__(self):
+ """Initialize the NodeConnCB
+
+ Used to inialize custom properties for the Node that aren't
+ included by default in the base class. Be aware that the NodeConnCB
+ base class already stores a counter for each P2P message type and the
+ last received message of each type, which should be sufficient for the
+ needs of most tests.
+
+ Call super().__init__() first for standard initialization and then
+ initialize custom properties."""
+ super().__init__()
+ # Stores a dictionary of all blocks received
+ self.block_receive_map = defaultdict(int)
+
+ def on_block(self, conn, message):
+ """Override the standard on_block callback
+
+ Store the hash of a received block in the dictionary."""
+ message.block.calc_sha256()
+ self.block_receive_map[message.block.sha256] += 1
+
+def custom_function():
+ """Do some custom behaviour
+
+ If this function is more generally useful for other tests, consider
+ moving it to a module in test_framework."""
+ # self.log.info("running custom_function") # Oops! Can't run self.log outside the BitcoinTestFramework
+ pass
+
+class ExampleTest(BitcoinTestFramework):
+ # Each functional test is a subclass of the BitcoinTestFramework class.
+
+ # Override the __init__(), add_options(), setup_chain(), setup_network()
+ # and setup_nodes() methods to customize the test setup as required.
+
+ def __init__(self):
+ """Initialize the test
+
+ Call super().__init__() first, and then override any test parameters
+ for your individual test."""
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 3
+ # Use self.extra_args to change command-line arguments for the nodes
+ self.extra_args = [[], ["-logips"], []]
+
+ # self.log.info("I've finished __init__") # Oops! Can't run self.log before run_test()
+
+ # Use add_options() to add specific command-line options for your test.
+ # In practice this is not used very much, since the tests are mostly written
+ # to be run in automated environments without command-line options.
+ # def add_options()
+ # pass
+
+ # Use setup_chain() to customize the node data directories. In practice
+ # this is not used very much since the default behaviour is almost always
+ # fine
+ # def setup_chain():
+ # pass
+
+ def setup_network(self):
+ """Setup the test network topology
+
+ Often you won't need to override this, since the standard network topology
+ (linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests.
+
+ If you do override this method, remember to start the nodes, assign
+ them to self.nodes, connect them and then sync."""
+
+ self.setup_nodes()
+
+ # In this test, we're not connecting node2 to node0 or node1. Calls to
+ # sync_all() should not include node2, since we're not expecting it to
+ # sync.
+ connect_nodes(self.nodes[0], 1)
+ self.sync_all([self.nodes[0:1]])
+
+ # Use setup_nodes() to customize the node start behaviour (for example if
+ # you don't want to start all nodes at the start of the test).
+ # def setup_nodes():
+ # pass
+
+ def custom_method(self):
+ """Do some custom behaviour for this test
+
+ Define it in a method here because you're going to use it repeatedly.
+ If you think it's useful in general, consider moving it to the base
+ BitcoinTestFramework class so other tests can use it."""
+
+ self.log.info("Running custom_method")
+
+ def run_test(self):
+ """Main test logic"""
+
+ # Create a P2P connection to one of the nodes
+ node0 = BaseNode()
+ connections = []
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
+ node0.add_connection(connections[0])
+
+ # Start up network handling in another thread. This needs to be called
+ # after the P2P connections have been created.
+ NetworkThread().start()
+ # wait_for_verack ensures that the P2P connection is fully up.
+ node0.wait_for_verack()
+
+ # Generating a block on one of the nodes will get us out of IBD
+ blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
+ self.sync_all([self.nodes[0:1]])
+
+ # Notice above how we called an RPC by calling a method with the same
+ # name on the node object. Notice also how we used a keyword argument
+ # to specify a named RPC argument. Neither of those are defined on the
+ # node object. Instead there's some __getattr__() magic going on under
+ # the covers to dispatch unrecognised attribute calls to the RPC
+ # interface.
+
+ # Logs are nice. Do plenty of them. They can be used in place of comments for
+ # breaking the test into sub-sections.
+ self.log.info("Starting test!")
+
+ self.log.info("Calling a custom function")
+ custom_function()
+
+ self.log.info("Calling a custom method")
+ self.custom_method()
+
+ self.log.info("Create some blocks")
+ self.tip = int(self.nodes[0].getbestblockhash(), 16)
+ self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
+
+ height = 1
+
+ for i in range(10):
+ # Use the mininode and blocktools functionality to manually build a block
+ # Calling the generate() rpc is easier, but this allows us to exactly
+ # control the blocks and transactions.
+ block = create_block(self.tip, create_coinbase(height), self.block_time)
+ block.solve()
+ block_message = msg_block(block)
+ # Send message is used to send a P2P message to the node over our NodeConn connection
+ node0.send_message(block_message)
+ self.tip = block.sha256
+ blocks.append(self.tip)
+ self.block_time += 1
+ height += 1
+
+ self.log.info("Wait for node1 to reach current tip (height 11) using RPC")
+ self.nodes[1].waitforblockheight(11)
+
+ self.log.info("Connect node2 and node1")
+ connect_nodes(self.nodes[1], 2)
+
+ self.log.info("Add P2P connection to node2")
+ node2 = BaseNode()
+ connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
+ node2.add_connection(connections[1])
+ node2.wait_for_verack()
+
+ self.log.info("Wait for node2 reach current tip. Test that it has propogated all the blocks to us")
+
+ for block in blocks:
+ getdata_request = msg_getdata()
+ getdata_request.inv.append(CInv(2, block))
+ node2.send_message(getdata_request)
+
+ # wait_until() will loop until a predicate condition is met. Use it to test properties of the
+ # NodeConnCB objects.
+ assert wait_until(lambda: sorted(blocks) == sorted(list(node2.block_receive_map.keys())), timeout=5)
+
+ self.log.info("Check that each block was received only once")
+ # The network thread uses a global lock on data access to the NodeConn objects when sending and receiving
+ # messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking
+ # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
+ with mininode_lock:
+ for block in node2.block_receive_map.values():
+ assert_equal(block, 1)
+
+if __name__ == '__main__':
+ ExampleTest().main()
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index c7fd44b81c..ac0fbe61f8 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -381,8 +381,10 @@ class ComparisonTestFramework(BitcoinTestFramework):
help="bitcoind binary to use for reference nodes (if any)")
def setup_network(self):
+ extra_args = [['-whitelist=127.0.0.1']]*self.num_nodes
+ if hasattr(self, "extra_args"):
+ extra_args = self.extra_args
self.nodes = self.start_nodes(
- self.num_nodes, self.options.tmpdir,
- extra_args=[['-whitelist=127.0.0.1']] * self.num_nodes,
+ self.num_nodes, self.options.tmpdir, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index fa6388bf96..2a4f3104aa 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -249,6 +249,8 @@ def wait_for_bitcoind_start(process, datadir, i, rpchost=None):
raise
time.sleep(0.25)
+def wait_for_node_exit(node_index, timeout):
+ bitcoind_processes[node_index].wait(timeout)
def _start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
"""Start a bitcoind and return RPC connection to it
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 0dca318af8..54f625514b 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -113,6 +113,7 @@ BASE_SCRIPTS= [
'listsinceblock.py',
'p2p-leaktests.py',
'wallet-encryption.py',
+ 'uptime.py',
]
EXTENDED_SCRIPTS = [
@@ -124,6 +125,7 @@ EXTENDED_SCRIPTS = [
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
+ 'dbcrash.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py',
@@ -138,6 +140,7 @@ EXTENDED_SCRIPTS = [
'bip65-cltv-p2p.py',
'bipdersig-p2p.py',
'bipdersig.py',
+ 'example_test.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
diff --git a/test/functional/uptime.py b/test/functional/uptime.py
new file mode 100755
index 0000000000..b20d6f5cb6
--- /dev/null
+++ b/test/functional/uptime.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python3
+# Copyright (c) 2017 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the RPC call related to the uptime command.
+
+Test corresponds to code in rpc/server.cpp.
+"""
+
+import time
+
+from test_framework.test_framework import BitcoinTestFramework
+
+
+class UptimeTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+
+ self.num_nodes = 1
+ self.setup_clean_chain = True
+
+ def run_test(self):
+ self._test_uptime()
+
+ def _test_uptime(self):
+ wait_time = 10
+ self.nodes[0].setmocktime(int(time.time() + wait_time))
+ assert(self.nodes[0].uptime() >= wait_time)
+
+
+if __name__ == '__main__':
+ UptimeTest().main()