aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/build-openbsd.md72
-rw-r--r--src/net_processing.cpp368
-rw-r--r--src/scheduler.cpp5
-rw-r--r--src/scheduler.h2
-rw-r--r--src/test/miner_tests.cpp38
-rw-r--r--src/test/test_bitcoin.cpp6
-rw-r--r--src/test/txvalidationcache_tests.cpp21
-rw-r--r--src/validation.cpp12
-rw-r--r--src/validationinterface.cpp17
-rw-r--r--src/validationinterface.h12
-rw-r--r--src/wallet/test/wallet_tests.cpp17
-rw-r--r--src/wallet/wallet.cpp7
-rwxr-xr-xtest/util/bitcoin-util-test.py12
13 files changed, 333 insertions, 256 deletions
diff --git a/doc/build-openbsd.md b/doc/build-openbsd.md
index cd1d217b47..0817821221 100644
--- a/doc/build-openbsd.md
+++ b/doc/build-openbsd.md
@@ -23,47 +23,31 @@ git clone https://github.com/bitcoin/bitcoin.git
See [dependencies.md](dependencies.md) for a complete overview.
-GCC
--------
-
-The default C++ compiler that comes with OpenBSD 6.2 is g++ 4.2.1. This version is old (from 2007), and is not able to compile the current version of Bitcoin Core because it has no C++11 support. We'll install a newer version of GCC:
-
-```bash
- pkg_add g++
- ```
-
- This compiler will not overwrite the system compiler, it will be installed as `egcc` and `eg++` in `/usr/local/bin`.
+**Important**: From OpenBSD 6.2 onwards a C++11-supporting clang compiler is
+part of the base image, and while building it is necessary to make sure that this
+compiler is used and not ancient g++ 4.2.1. This is done by appending
+`CC=cc CXX=c++` to configuration commands. Mixing different compilers
+within the same executable will result in linker errors.
### Building BerkeleyDB
-BerkeleyDB is only necessary for the wallet functionality. To skip this, pass `--disable-wallet` to `./configure`.
+BerkeleyDB is only necessary for the wallet functionality. To skip this, pass
+`--disable-wallet` to `./configure` and skip to the next section.
It is recommended to use Berkeley DB 4.8. You cannot use the BerkeleyDB library
from ports, for the same reason as boost above (g++/libstd++ incompatibility).
If you have to build it yourself, you can use [the installation script included
-in contrib/](contrib/install_db4.sh) like so
+in contrib/](/contrib/install_db4.sh) like so
```shell
-./contrib/install_db4.sh `pwd` CC=egcc CXX=eg++ CPP=ecpp
+./contrib/install_db4.sh `pwd` CC=cc CXX=c++
```
-from the root of the repository.
-
-### Resource limits
-
-The standard ulimit restrictions in OpenBSD are very strict:
-
- data(kbytes) 1572864
+from the root of the repository. Then set `BDB_PREFIX` for the next section:
-This, unfortunately, may no longer be enough to compile some `.cpp` files in the project,
-at least with GCC 4.9.4 (see issue [#6658](https://github.com/bitcoin/bitcoin/issues/6658)).
-If your user is in the `staff` group the limit can be raised with:
-
- ulimit -d 3000000
-
-The change will only affect the current shell and processes spawned by it. To
-make the change system-wide, change `datasize-cur` and `datasize-max` in
-`/etc/login.conf`, and reboot.
+```shell
+export BDB_PREFIX="$PWD/db4"
+```
### Building Bitcoin Core
@@ -79,13 +63,13 @@ Make sure `BDB_PREFIX` is set to the appropriate path from the above steps.
To configure with wallet:
```bash
-./configure --with-gui=no CC=egcc CXX=eg++ CPP=ecpp \
+./configure --with-gui=no CC=cc CXX=c++ \
BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" BDB_CFLAGS="-I${BDB_PREFIX}/include"
```
To configure without wallet:
```bash
-./configure --disable-wallet --with-gui=no CC=egcc CXX=eg++ CPP=ecpp
+./configure --disable-wallet --with-gui=no CC=cc CXX=c++
```
Build and run the tests:
@@ -94,13 +78,23 @@ gmake # use -jX here for parallelism
gmake check
```
-Clang
-------------------------------
+Resource limits
+-------------------
-```bash
-pkg_add llvm
+If the build runs into out-of-memory errors, the instructions in this section
+might help.
+
+The standard ulimit restrictions in OpenBSD are very strict:
+
+ data(kbytes) 1572864
+
+This, unfortunately, in some cases not enough to compile some `.cpp` files in the project,
+(see issue [#6658](https://github.com/bitcoin/bitcoin/issues/6658)).
+If your user is in the `staff` group the limit can be raised with:
+
+ ulimit -d 3000000
+
+The change will only affect the current shell and processes spawned by it. To
+make the change system-wide, change `datasize-cur` and `datasize-max` in
+`/etc/login.conf`, and reboot.
-./configure --disable-wallet --with-gui=no CC=clang CXX=clang++
-gmake # use -jX here for parallelism
-gmake check
-```
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 20426eaceb..76e82a5ef6 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -51,12 +51,13 @@ struct COrphanTx {
NodeId fromPeer;
int64_t nTimeExpire;
};
-std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main);
-std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(cs_main);
-void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+static CCriticalSection g_cs_orphans;
+std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
+std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
+void EraseOrphansFor(NodeId peer);
-static size_t vExtraTxnForCompactIt = 0;
-static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(cs_main);
+static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
+static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; // SHA256("main address relay")[0:8]
@@ -127,7 +128,7 @@ namespace {
int g_outbound_peers_with_protect_from_disconnect = 0;
/** When our tip was last updated. */
- int64_t g_last_tip_update = 0;
+ std::atomic<int64_t> g_last_tip_update(0);
/** Relay map, protected by cs_main. */
typedef std::map<uint256, CTransactionRef> MapRelay;
@@ -631,7 +632,7 @@ bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
// mapOrphanTransactions
//
-void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
{
size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
if (max_extra_txn <= 0)
@@ -642,7 +643,7 @@ void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_RE
vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
}
-bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
{
const uint256& hash = tx->GetHash();
if (mapOrphanTransactions.count(hash))
@@ -675,7 +676,7 @@ bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRE
return true;
}
-int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
{
std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
if (it == mapOrphanTransactions.end())
@@ -695,6 +696,7 @@ int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
void EraseOrphansFor(NodeId peer)
{
+ LOCK(g_cs_orphans);
int nErased = 0;
std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
while (iter != mapOrphanTransactions.end())
@@ -709,8 +711,10 @@ void EraseOrphansFor(NodeId peer)
}
-unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
{
+ LOCK(g_cs_orphans);
+
unsigned int nEvicted = 0;
static int64_t nNextSweep;
int64_t nNow = GetTime();
@@ -804,7 +808,7 @@ PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, CScheduler &schedu
}
void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted) {
- LOCK(cs_main);
+ LOCK(g_cs_orphans);
std::vector<uint256> vOrphanErase;
@@ -971,9 +975,13 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
recentRejects->reset();
}
+ {
+ LOCK(g_cs_orphans);
+ if (mapOrphanTransactions.count(inv.hash)) return true;
+ }
+
return recentRejects->contains(inv.hash) ||
mempool.exists(inv.hash) ||
- mapOrphanTransactions.count(inv.hash) ||
pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 0)) || // Best effort: only try output 0 and 1
pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 1));
}
@@ -1030,180 +1038,198 @@ static void RelayAddress(const CAddress& addr, bool fReachable, CConnman* connma
connman->ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
}
-void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
+void static ProcessGetBlockData(CNode* pfrom, const Consensus::Params& consensusParams, const CInv& inv, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
{
- std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
- std::vector<CInv> vNotFound;
- const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
- LOCK(cs_main);
-
- while (it != pfrom->vRecvGetData.end()) {
- // Don't bother if send buffer is too full to respond anyway
- if (pfrom->fPauseSend)
- break;
+ bool send = false;
+ std::shared_ptr<const CBlock> a_recent_block;
+ std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
+ bool fWitnessesPresentInARecentCompactBlock;
+ {
+ LOCK(cs_most_recent_block);
+ a_recent_block = most_recent_block;
+ a_recent_compact_block = most_recent_compact_block;
+ fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock;
+ }
- const CInv &inv = *it;
+ bool need_activate_chain = false;
+ {
+ LOCK(cs_main);
+ BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
+ if (mi != mapBlockIndex.end())
{
- if (interruptMsgProc)
- return;
+ if (mi->second->nChainTx && !mi->second->IsValid(BLOCK_VALID_SCRIPTS) &&
+ mi->second->IsValid(BLOCK_VALID_TREE)) {
+ // If we have the block and all of its parents, but have not yet validated it,
+ // we might be in the middle of connecting it (ie in the unlock of cs_main
+ // before ActivateBestChain but after AcceptBlock).
+ // In this case, we need to run ActivateBestChain prior to checking the relay
+ // conditions below.
+ need_activate_chain = true;
+ }
+ }
+ } // release cs_main before calling ActivateBestChain
+ if (need_activate_chain) {
+ CValidationState dummy;
+ ActivateBestChain(dummy, Params(), a_recent_block);
+ }
- it++;
+ LOCK(cs_main);
+ BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
+ if (mi != mapBlockIndex.end()) {
+ send = BlockRequestAllowed(mi->second, consensusParams);
+ if (!send) {
+ LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
+ }
+ }
+ const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
+ // disconnect node in case we have reached the outbound limit for serving historical blocks
+ // never disconnect whitelisted nodes
+ if (send && connman->OutboundTargetReached(true) && ( ((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
+ {
+ LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
+
+ //disconnect node
+ pfrom->fDisconnect = true;
+ send = false;
+ }
+ // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
+ if (send && !pfrom->fWhitelisted && (
+ (((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom->GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (chainActive.Tip()->nHeight - mi->second->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
+ )) {
+ LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d\n", pfrom->GetId());
- if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK)
+ //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
+ pfrom->fDisconnect = true;
+ send = false;
+ }
+ // Pruned nodes may have deleted the block, so check whether
+ // it's available before trying to send.
+ if (send && (mi->second->nStatus & BLOCK_HAVE_DATA))
+ {
+ std::shared_ptr<const CBlock> pblock;
+ if (a_recent_block && a_recent_block->GetHash() == (*mi).second->GetBlockHash()) {
+ pblock = a_recent_block;
+ } else {
+ // Send block from disk
+ std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
+ if (!ReadBlockFromDisk(*pblockRead, (*mi).second, consensusParams))
+ assert(!"cannot load block from disk");
+ pblock = pblockRead;
+ }
+ if (inv.type == MSG_BLOCK)
+ connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
+ else if (inv.type == MSG_WITNESS_BLOCK)
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
+ else if (inv.type == MSG_FILTERED_BLOCK)
+ {
+ bool sendMerkleBlock = false;
+ CMerkleBlock merkleBlock;
{
- bool send = false;
- BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
- std::shared_ptr<const CBlock> a_recent_block;
- std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
- bool fWitnessesPresentInARecentCompactBlock;
- {
- LOCK(cs_most_recent_block);
- a_recent_block = most_recent_block;
- a_recent_compact_block = most_recent_compact_block;
- fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock;
+ LOCK(pfrom->cs_filter);
+ if (pfrom->pfilter) {
+ sendMerkleBlock = true;
+ merkleBlock = CMerkleBlock(*pblock, *pfrom->pfilter);
}
- if (mi != mapBlockIndex.end())
- {
- if (mi->second->nChainTx && !mi->second->IsValid(BLOCK_VALID_SCRIPTS) &&
- mi->second->IsValid(BLOCK_VALID_TREE)) {
- // If we have the block and all of its parents, but have not yet validated it,
- // we might be in the middle of connecting it (ie in the unlock of cs_main
- // before ActivateBestChain but after AcceptBlock).
- // In this case, we need to run ActivateBestChain prior to checking the relay
- // conditions below.
- CValidationState dummy;
- ActivateBestChain(dummy, Params(), a_recent_block);
- }
- send = BlockRequestAllowed(mi->second, consensusParams);
- if (!send) {
- LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
- }
+ }
+ if (sendMerkleBlock) {
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
+ // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
+ // This avoids hurting performance by pointlessly requiring a round-trip
+ // Note that there is currently no way for a node to request any single transactions we didn't send here -
+ // they must either disconnect and retry or request the full block.
+ // Thus, the protocol spec specified allows for us to provide duplicate txn here,
+ // however we MUST always provide at least what the remote peer needs
+ typedef std::pair<unsigned int, uint256> PairType;
+ for (PairType& pair : merkleBlock.vMatchedTxn)
+ connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
+ }
+ // else
+ // no response
+ }
+ else if (inv.type == MSG_CMPCT_BLOCK)
+ {
+ // If a peer is asking for old blocks, we're almost guaranteed
+ // they won't have a useful mempool to match against a compact block,
+ // and we don't feel like constructing the object for them, so
+ // instead we respond with the full, non-compact block.
+ bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness;
+ int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
+ if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
+ if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == mi->second->GetBlockHash()) {
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
+ } else {
+ CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness);
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
}
- // disconnect node in case we have reached the outbound limit for serving historical blocks
- // never disconnect whitelisted nodes
- if (send && connman->OutboundTargetReached(true) && ( ((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
- {
- LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
+ } else {
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
+ }
+ }
- //disconnect node
- pfrom->fDisconnect = true;
- send = false;
- }
- // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
- if (send && !pfrom->fWhitelisted && (
- (((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom->GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (chainActive.Tip()->nHeight - mi->second->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
- )) {
- LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d\n", pfrom->GetId());
+ // Trigger the peer node to send a getblocks request for the next batch of inventory
+ if (inv.hash == pfrom->hashContinue)
+ {
+ // Bypass PushInventory, this must send even if redundant,
+ // and we want it right after the last block so they don't
+ // wait for other stuff first.
+ std::vector<CInv> vInv;
+ vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
+ connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
+ pfrom->hashContinue.SetNull();
+ }
+ }
+}
- //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
- pfrom->fDisconnect = true;
- send = false;
- }
- // Pruned nodes may have deleted the block, so check whether
- // it's available before trying to send.
- if (send && (mi->second->nStatus & BLOCK_HAVE_DATA))
- {
- std::shared_ptr<const CBlock> pblock;
- if (a_recent_block && a_recent_block->GetHash() == (*mi).second->GetBlockHash()) {
- pblock = a_recent_block;
- } else {
- // Send block from disk
- std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
- if (!ReadBlockFromDisk(*pblockRead, (*mi).second, consensusParams))
- assert(!"cannot load block from disk");
- pblock = pblockRead;
- }
- if (inv.type == MSG_BLOCK)
- connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
- else if (inv.type == MSG_WITNESS_BLOCK)
- connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
- else if (inv.type == MSG_FILTERED_BLOCK)
- {
- bool sendMerkleBlock = false;
- CMerkleBlock merkleBlock;
- {
- LOCK(pfrom->cs_filter);
- if (pfrom->pfilter) {
- sendMerkleBlock = true;
- merkleBlock = CMerkleBlock(*pblock, *pfrom->pfilter);
- }
- }
- if (sendMerkleBlock) {
- connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
- // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
- // This avoids hurting performance by pointlessly requiring a round-trip
- // Note that there is currently no way for a node to request any single transactions we didn't send here -
- // they must either disconnect and retry or request the full block.
- // Thus, the protocol spec specified allows for us to provide duplicate txn here,
- // however we MUST always provide at least what the remote peer needs
- typedef std::pair<unsigned int, uint256> PairType;
- for (PairType& pair : merkleBlock.vMatchedTxn)
- connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
- }
- // else
- // no response
- }
- else if (inv.type == MSG_CMPCT_BLOCK)
- {
- // If a peer is asking for old blocks, we're almost guaranteed
- // they won't have a useful mempool to match against a compact block,
- // and we don't feel like constructing the object for them, so
- // instead we respond with the full, non-compact block.
- bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness;
- int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
- if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
- if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == mi->second->GetBlockHash()) {
- connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
- } else {
- CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness);
- connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
- }
- } else {
- connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
- }
- }
+void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
+{
+ AssertLockNotHeld(cs_main);
- // Trigger the peer node to send a getblocks request for the next batch of inventory
- if (inv.hash == pfrom->hashContinue)
- {
- // Bypass PushInventory, this must send even if redundant,
- // and we want it right after the last block so they don't
- // wait for other stuff first.
- std::vector<CInv> vInv;
- vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
- connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
- pfrom->hashContinue.SetNull();
- }
- }
- }
- else if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX)
- {
- // Send stream from relay memory
- bool push = false;
- auto mi = mapRelay.find(inv.hash);
- int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
- if (mi != mapRelay.end()) {
- connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
+ std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
+ std::vector<CInv> vNotFound;
+ const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
+ {
+ LOCK(cs_main);
+
+ while (it != pfrom->vRecvGetData.end() && (it->type == MSG_TX || it->type == MSG_WITNESS_TX)) {
+ if (interruptMsgProc)
+ return;
+ // Don't bother if send buffer is too full to respond anyway
+ if (pfrom->fPauseSend)
+ break;
+
+ const CInv &inv = *it;
+ it++;
+
+ // Send stream from relay memory
+ bool push = false;
+ auto mi = mapRelay.find(inv.hash);
+ int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
+ if (mi != mapRelay.end()) {
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
+ push = true;
+ } else if (pfrom->timeLastMempoolReq) {
+ auto txinfo = mempool.info(inv.hash);
+ // To protect privacy, do not answer getdata using the mempool when
+ // that TX couldn't have been INVed in reply to a MEMPOOL request.
+ if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
+ connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
push = true;
- } else if (pfrom->timeLastMempoolReq) {
- auto txinfo = mempool.info(inv.hash);
- // To protect privacy, do not answer getdata using the mempool when
- // that TX couldn't have been INVed in reply to a MEMPOOL request.
- if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
- connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
- push = true;
- }
- }
- if (!push) {
- vNotFound.push_back(inv);
}
}
+ if (!push) {
+ vNotFound.push_back(inv);
+ }
// Track requests for our stuff.
GetMainSignals().Inventory(inv.hash);
+ }
+ } // release cs_main
- if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK)
- break;
+ if (it != pfrom->vRecvGetData.end()) {
+ const CInv &inv = *it;
+ it++;
+ if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK) {
+ ProcessGetBlockData(pfrom, consensusParams, inv, connman, interruptMsgProc);
}
}
@@ -2008,7 +2034,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK;
inv.hash = req.blockhash;
pfrom->vRecvGetData.push_back(inv);
- ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc);
+ // The message processing loop will go around again (without pausing) and we'll respond then (without cs_main)
return true;
}
@@ -2101,7 +2127,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
CInv inv(MSG_TX, tx.GetHash());
pfrom->AddInventoryKnown(inv);
- LOCK(cs_main);
+ LOCK2(cs_main, g_cs_orphans);
bool fMissingInputs = false;
CValidationState state;
@@ -2324,7 +2350,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
bool fBlockReconstructed = false;
{
- LOCK(cs_main);
+ LOCK2(cs_main, g_cs_orphans);
// If AcceptBlockHeader returned true, it set pindex
assert(pindex);
UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());
diff --git a/src/scheduler.cpp b/src/scheduler.cpp
index 260f6fa60e..7d5ec14610 100644
--- a/src/scheduler.cpp
+++ b/src/scheduler.cpp
@@ -206,3 +206,8 @@ void SingleThreadedSchedulerClient::EmptyQueue() {
should_continue = !m_callbacks_pending.empty();
}
}
+
+size_t SingleThreadedSchedulerClient::CallbacksPending() {
+ LOCK(m_cs_callbacks_pending);
+ return m_callbacks_pending.size();
+}
diff --git a/src/scheduler.h b/src/scheduler.h
index b99f165343..39ecb849b9 100644
--- a/src/scheduler.h
+++ b/src/scheduler.h
@@ -108,6 +108,8 @@ public:
// Processes all remaining queue members on the calling thread, blocking until queue is empty
// Must be called after the CScheduler has no remaining processing threads!
void EmptyQueue();
+
+ size_t CallbacksPending();
};
#endif
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index 97c548dbb0..6be717b43c 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -216,7 +216,6 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
entry.nFee = 11;
entry.nHeight = 11;
- LOCK(cs_main);
fCheckpointsEnabled = false;
// Simple block creation, nothing special yet:
@@ -229,27 +228,32 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
for (unsigned int i = 0; i < sizeof(blockinfo)/sizeof(*blockinfo); ++i)
{
CBlock *pblock = &pblocktemplate->block; // pointer for convenience
- pblock->nVersion = 1;
- pblock->nTime = chainActive.Tip()->GetMedianTimePast()+1;
- CMutableTransaction txCoinbase(*pblock->vtx[0]);
- txCoinbase.nVersion = 1;
- txCoinbase.vin[0].scriptSig = CScript();
- txCoinbase.vin[0].scriptSig.push_back(blockinfo[i].extranonce);
- txCoinbase.vin[0].scriptSig.push_back(chainActive.Height());
- txCoinbase.vout.resize(1); // Ignore the (optional) segwit commitment added by CreateNewBlock (as the hardcoded nonces don't account for this)
- txCoinbase.vout[0].scriptPubKey = CScript();
- pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase));
- if (txFirst.size() == 0)
- baseheight = chainActive.Height();
- if (txFirst.size() < 4)
- txFirst.push_back(pblock->vtx[0]);
- pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
- pblock->nNonce = blockinfo[i].nonce;
+ {
+ LOCK(cs_main);
+ pblock->nVersion = 1;
+ pblock->nTime = chainActive.Tip()->GetMedianTimePast()+1;
+ CMutableTransaction txCoinbase(*pblock->vtx[0]);
+ txCoinbase.nVersion = 1;
+ txCoinbase.vin[0].scriptSig = CScript();
+ txCoinbase.vin[0].scriptSig.push_back(blockinfo[i].extranonce);
+ txCoinbase.vin[0].scriptSig.push_back(chainActive.Height());
+ txCoinbase.vout.resize(1); // Ignore the (optional) segwit commitment added by CreateNewBlock (as the hardcoded nonces don't account for this)
+ txCoinbase.vout[0].scriptPubKey = CScript();
+ pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase));
+ if (txFirst.size() == 0)
+ baseheight = chainActive.Height();
+ if (txFirst.size() < 4)
+ txFirst.push_back(pblock->vtx[0]);
+ pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
+ pblock->nNonce = blockinfo[i].nonce;
+ }
std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock);
BOOST_CHECK(ProcessNewBlock(chainparams, shared_pblock, true, nullptr));
pblock->hashPrevBlock = pblock->GetHash();
}
+ LOCK(cs_main);
+
// Just to make sure we can still make simple blocks
BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey));
diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp
index f52c8ccc21..6bd228a63a 100644
--- a/src/test/test_bitcoin.cpp
+++ b/src/test/test_bitcoin.cpp
@@ -69,9 +69,9 @@ TestingSetup::TestingSetup(const std::string& chainName) : BasicTestingSetup(cha
fs::create_directories(pathTemp);
gArgs.ForceSetArg("-datadir", pathTemp.string());
- // Note that because we don't bother running a scheduler thread here,
- // callbacks via CValidationInterface are unreliable, but that's OK,
- // our unit tests aren't testing multiple parts of the code at once.
+ // We have to run a scheduler thread to prevent ActivateBestChain
+ // from blocking due to queue overrun.
+ threadGroup.create_thread(boost::bind(&CScheduler::serviceQueue, &scheduler));
GetMainSignals().RegisterBackgroundSignalScheduler(scheduler);
mempool.setSanityCheck(1.0);
diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp
index fe8cb6126e..10acabe0be 100644
--- a/src/test/txvalidationcache_tests.cpp
+++ b/src/test/txvalidationcache_tests.cpp
@@ -66,7 +66,6 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup)
// Test 1: block with both of those transactions should be rejected.
block = CreateAndProcessBlock(spends, scriptPubKey);
- LOCK(cs_main);
BOOST_CHECK(chainActive.Tip()->GetBlockHash() != block.GetHash());
// Test 2: ... and should be rejected if spend1 is in the memory pool
@@ -190,12 +189,12 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup)
spend_tx.vin[0].scriptSig << vchSig;
}
- LOCK(cs_main);
-
// Test that invalidity under a set of flags doesn't preclude validity
// under other (eg consensus) flags.
// spend_tx is invalid according to DERSIG
{
+ LOCK(cs_main);
+
CValidationState state;
PrecomputedTransactionData ptd_spend_tx(spend_tx);
@@ -213,15 +212,17 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup)
// test later that block validation works fine in the absence of cached
// successes.
ValidateCheckInputsForAllFlags(spend_tx, SCRIPT_VERIFY_DERSIG | SCRIPT_VERIFY_LOW_S | SCRIPT_VERIFY_STRICTENC, false);
+ }
- // And if we produce a block with this tx, it should be valid (DERSIG not
- // enabled yet), even though there's no cache entry.
- CBlock block;
+ // And if we produce a block with this tx, it should be valid (DERSIG not
+ // enabled yet), even though there's no cache entry.
+ CBlock block;
- block = CreateAndProcessBlock({spend_tx}, p2pk_scriptPubKey);
- BOOST_CHECK(chainActive.Tip()->GetBlockHash() == block.GetHash());
- BOOST_CHECK(pcoinsTip->GetBestBlock() == block.GetHash());
- }
+ block = CreateAndProcessBlock({spend_tx}, p2pk_scriptPubKey);
+ BOOST_CHECK(chainActive.Tip()->GetBlockHash() == block.GetHash());
+ BOOST_CHECK(pcoinsTip->GetBestBlock() == block.GetHash());
+
+ LOCK(cs_main);
// Test P2SH: construct a transaction that is valid without P2SH, and
// then test validity with P2SH.
diff --git a/src/validation.cpp b/src/validation.cpp
index 75c40b22fc..1c024ce2ae 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -40,6 +40,7 @@
#include <validationinterface.h>
#include <warnings.h>
+#include <future>
#include <sstream>
#include <boost/algorithm/string/replace.hpp>
@@ -2559,12 +2560,21 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams&
// far from a guarantee. Things in the P2P/RPC will often end up calling
// us in the middle of ProcessNewBlock - do not assume pblock is set
// sanely for performance or correctness!
+ AssertLockNotHeld(cs_main);
CBlockIndex *pindexMostWork = nullptr;
CBlockIndex *pindexNewTip = nullptr;
int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
do {
boost::this_thread::interruption_point();
+
+ if (GetMainSignals().CallbacksPending() > 10) {
+ // Block until the validation queue drains. This should largely
+ // never happen in normal operation, however may happen during
+ // reindex, causing memory blowup if we run too far ahead.
+ SyncWithValidationInterfaceQueue();
+ }
+
if (ShutdownRequested())
break;
@@ -3383,6 +3393,8 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CVali
bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool *fNewBlock)
{
+ AssertLockNotHeld(cs_main);
+
{
CBlockIndex *pindex = nullptr;
if (fNewBlock) *fNewBlock = false;
diff --git a/src/validationinterface.cpp b/src/validationinterface.cpp
index abbd8cc4d2..1e13465bf7 100644
--- a/src/validationinterface.cpp
+++ b/src/validationinterface.cpp
@@ -11,9 +11,11 @@
#include <sync.h>
#include <txmempool.h>
#include <util.h>
+#include <validation.h>
#include <list>
#include <atomic>
+#include <future>
#include <boost/signals2/signal.hpp>
@@ -54,6 +56,11 @@ void CMainSignals::FlushBackgroundCallbacks() {
}
}
+size_t CMainSignals::CallbacksPending() {
+ if (!m_internals) return 0;
+ return m_internals->m_schedulerClient.CallbacksPending();
+}
+
void CMainSignals::RegisterWithMempoolSignals(CTxMemPool& pool) {
pool.NotifyEntryRemoved.connect(boost::bind(&CMainSignals::MempoolEntryRemoved, this, _1, _2));
}
@@ -113,6 +120,16 @@ void CallFunctionInValidationInterfaceQueue(std::function<void ()> func) {
g_signals.m_internals->m_schedulerClient.AddToProcessQueue(std::move(func));
}
+void SyncWithValidationInterfaceQueue() {
+ AssertLockNotHeld(cs_main);
+ // Block until the validation queue drains
+ std::promise<void> promise;
+ CallFunctionInValidationInterfaceQueue([&promise] {
+ promise.set_value();
+ });
+ promise.get_future().wait();
+}
+
void CMainSignals::MempoolEntryRemoved(CTransactionRef ptx, MemPoolRemovalReason reason) {
if (reason != MemPoolRemovalReason::BLOCK && reason != MemPoolRemovalReason::CONFLICT) {
m_internals->m_schedulerClient.AddToProcessQueue([ptx, this] {
diff --git a/src/validationinterface.h b/src/validationinterface.h
index 7b5d138414..f416f6678f 100644
--- a/src/validationinterface.h
+++ b/src/validationinterface.h
@@ -42,6 +42,16 @@ void UnregisterAllValidationInterfaces();
* will result in a deadlock (that DEBUG_LOCKORDER will miss).
*/
void CallFunctionInValidationInterfaceQueue(std::function<void ()> func);
+/**
+ * This is a synonym for the following, which asserts certain locks are not
+ * held:
+ * std::promise<void> promise;
+ * CallFunctionInValidationInterfaceQueue([&promise] {
+ * promise.set_value();
+ * });
+ * promise.get_future().wait();
+ */
+void SyncWithValidationInterfaceQueue();
class CValidationInterface {
protected:
@@ -131,6 +141,8 @@ public:
/** Call any remaining callbacks on the calling thread */
void FlushBackgroundCallbacks();
+ size_t CallbacksPending();
+
/** Register with mempool to call TransactionRemovedFromMempool callbacks */
void RegisterWithMempoolSignals(CTxMemPool& pool);
/** Unregister with mempool */
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index 80e31a1ce0..8e9362c649 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -370,8 +370,6 @@ static void AddKey(CWallet& wallet, const CKey& key)
BOOST_FIXTURE_TEST_CASE(rescan, TestChain100Setup)
{
- LOCK(cs_main);
-
// Cap last block file size, and mine new block in a new block file.
CBlockIndex* const nullBlock = nullptr;
CBlockIndex* oldTip = chainActive.Tip();
@@ -379,6 +377,8 @@ BOOST_FIXTURE_TEST_CASE(rescan, TestChain100Setup)
CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
CBlockIndex* newTip = chainActive.Tip();
+ LOCK(cs_main);
+
// Verify ScanForWalletTransactions picks up transactions in both the old
// and new block files.
{
@@ -447,8 +447,6 @@ BOOST_FIXTURE_TEST_CASE(rescan, TestChain100Setup)
// than or equal to key birthday.
BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
{
- LOCK(cs_main);
-
// Create two blocks with same timestamp to verify that importwallet rescan
// will pick up both blocks, not just the first.
const int64_t BLOCK_TIME = chainActive.Tip()->GetBlockTimeMax() + 5;
@@ -462,6 +460,8 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
SetMockTime(KEY_TIME);
coinbaseTxns.emplace_back(*CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
+ LOCK(cs_main);
+
// Import key into wallet and call dumpwallet to create backup file.
{
CWallet wallet;
@@ -627,10 +627,15 @@ public:
BOOST_CHECK(wallet->CreateTransaction({recipient}, wtx, reservekey, fee, changePos, error, dummy));
CValidationState state;
BOOST_CHECK(wallet->CommitTransaction(wtx, reservekey, nullptr, state));
+ CMutableTransaction blocktx;
+ {
+ LOCK(wallet->cs_wallet);
+ blocktx = CMutableTransaction(*wallet->mapWallet.at(wtx.GetHash()).tx);
+ }
+ CreateAndProcessBlock({CMutableTransaction(blocktx)}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
LOCK(wallet->cs_wallet);
auto it = wallet->mapWallet.find(wtx.GetHash());
BOOST_CHECK(it != wallet->mapWallet.end());
- CreateAndProcessBlock({CMutableTransaction(*it->second.tx)}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
it->second.SetMerkleBranch(chainActive.Tip(), 1);
return it->second;
}
@@ -641,7 +646,6 @@ public:
BOOST_FIXTURE_TEST_CASE(ListCoins, ListCoinsTestingSetup)
{
std::string coinbaseAddress = coinbaseKey.GetPubKey().GetID().ToString();
- LOCK2(cs_main, wallet->cs_wallet);
// Confirm ListCoins initially returns 1 coin grouped under coinbaseKey
// address.
@@ -669,6 +673,7 @@ BOOST_FIXTURE_TEST_CASE(ListCoins, ListCoinsTestingSetup)
BOOST_CHECK_EQUAL(available.size(), 2);
for (const auto& group : list) {
for (const auto& coin : group.second) {
+ LOCK(wallet->cs_wallet);
wallet->LockCoin(COutPoint(coin.tx->GetHash(), coin.i));
}
}
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index dafd708d09..cf9bb2909f 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -1292,12 +1292,7 @@ void CWallet::BlockUntilSyncedToCurrentChain() {
// ...otherwise put a callback in the validation interface queue and wait
// for the queue to drain enough to execute it (indicating we are caught up
// at least with the time we entered this function).
-
- std::promise<void> promise;
- CallFunctionInValidationInterfaceQueue([&promise] {
- promise.set_value();
- });
- promise.get_future().wait();
+ SyncWithValidationInterfaceQueue();
}
diff --git a/test/util/bitcoin-util-test.py b/test/util/bitcoin-util-test.py
index ef34955d90..64e826ad0b 100755
--- a/test/util/bitcoin-util-test.py
+++ b/test/util/bitcoin-util-test.py
@@ -48,7 +48,7 @@ def main():
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
- input_filename = testDir + "/" + input_basename
+ input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename).read()
input_data = json.loads(raw_data)
@@ -77,7 +77,7 @@ def bctest(testDir, testObj, buildenv):
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
- execprog = buildenv["BUILDDIR"] + "/src/" + testObj['exec'] + buildenv["EXEEXT"]
+ execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
@@ -85,24 +85,28 @@ def bctest(testDir, testObj, buildenv):
stdinCfg = None
inputData = None
if "input" in testObj:
- filename = testDir + "/" + testObj['input']
+ filename = os.path.join(testDir, testObj["input"])
inputData = open(filename).read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
+ outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
- outputData = open(testDir + "/" + outputFn).read()
+ outputData = open(os.path.join(testDir, outputFn)).read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
+ if not outputType:
+ logging.error("Output file %s does not have a file extension" % outputFn)
+ raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)