diff options
34 files changed, 392 insertions, 141 deletions
diff --git a/contrib/debian/changelog b/contrib/debian/changelog index 7ce3babc1b..bd7ab3524c 100644 --- a/contrib/debian/changelog +++ b/contrib/debian/changelog @@ -149,7 +149,7 @@ bitcoin (0.5.3-natty0) natty; urgency=low bitcoin (0.5.2-natty1) natty; urgency=low * Remove mentions on anonymity in package descriptions and manpage. - These should never have been there, bitcoin isnt anonymous without + These should never have been there, bitcoin isn't anonymous without a ton of work that virtually no users will ever be willing and capable of doing @@ -190,7 +190,7 @@ bitcoin (0.5.0~rc1-natty1) natty; urgency=low * Add test_bitcoin to build test * Fix clean - * Remove uneccessary build-dependancies + * Remove unnecessary build-dependancies -- Matt Corallo <matt@bluematt.me> Wed, 26 Oct 2011 14:37:18 -0400 @@ -350,7 +350,7 @@ bitcoin (0.3.20.01~dfsg-1) unstable; urgency=low bitcoin (0.3.19~dfsg-6) unstable; urgency=low - * Fix override agressive optimizations. + * Fix override aggressive optimizations. * Fix tighten build-dependencies to really fit backporting to Lenny: + Add fallback build-dependency on libdb4.6++-dev. + Tighten unversioned Boost build-dependencies to recent versions, diff --git a/depends/config.guess b/depends/config.guess index f7eb141e75..f357ec6c8f 100755 --- a/depends/config.guess +++ b/depends/config.guess @@ -1117,7 +1117,7 @@ EOF # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub - # prints for the "djgpp" host, or else GDB configury will decide that + # prints for the "djgpp" host, or else GDB configure will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; diff --git a/doc/REST-interface.md b/doc/REST-interface.md index ac7cd45f70..bf669235e3 100644 --- a/doc/REST-interface.md +++ b/doc/REST-interface.md @@ -77,6 +77,20 @@ $ curl localhost:18332/rest/getutxos/checkmempool/b2cdfd7b89def827ff8af7cd9bff76 } ``` +####Memory pool +`GET /rest/mempool/info.json` + +Returns various information about the TX mempool. +Only supports JSON as output format. +* size : (numeric) the number of transactions in the TX mempool +* bytes : (numeric) size of the TX mempool in bytes +* usage : (numeric) total TX mempool memory usage + +`GET /rest/mempool/contents.json` + +Returns transactions in the TX mempool. +Only supports JSON as output format. + Risks ------------- Running a web browser on the same node with a REST enabled bitcoind can be a risk. Accessing prepared XSS websites could read out tx/block data of your node by placing links like `<script src="http://127.0.0.1:8332/rest/tx/1234567890.json">` which might break the nodes privacy. diff --git a/doc/files.md b/doc/files.md index 80195535bb..c083bcb038 100644 --- a/doc/files.md +++ b/doc/files.md @@ -1,12 +1,17 @@ -Used in 0.8.0 ---------------------- -* wallet.dat: personal wallet (BDB) with keys and transactions -* peers.dat: peer IP address database (custom format); since 0.7.0 + +* banlist.dat: stores the IPs/Subnets of banned nodes +* bitcoin.conf: contains configuration settings for bitcoind or bitcoin-qt +* bitcoind.pid: stores the process id of bitcoind while running * blocks/blk000??.dat: block data (custom, 128 MiB per file); since 0.8.0 * blocks/rev000??.dat; block undo data (custom); since 0.8.0 (format changed since pre-0.8) * blocks/index/*; block index (LevelDB); since 0.8.0 * chainstate/*; block chain state database (LevelDB); since 0.8.0 * database/*: BDB database environment; only used for wallet since 0.8.0 +* db.log: wallet database log file +* debug.log: contains debug information and general logging generated by bitcoind or bitcoin-qt +* fee_estimates.dat: stores statistics used to estimate minimum transaction fees and priorities required for confirmation; since 0.10.0 +* peers.dat: peer IP address database (custom format); since 0.7.0 +* wallet.dat: personal wallet (BDB) with keys and transactions Only used in pre-0.8.0 --------------------- diff --git a/qa/rpc-tests/keypool.py b/qa/rpc-tests/keypool.py index aee29a596a..5a67220021 100755 --- a/qa/rpc-tests/keypool.py +++ b/qa/rpc-tests/keypool.py @@ -73,6 +73,21 @@ def run_test(nodes, tmpdir): except JSONRPCException,e: assert(e.error['code']==-12) + # refill keypool with three new addresses + nodes[0].walletpassphrase('test', 12000) + nodes[0].keypoolrefill(3) + nodes[0].walletlock() + + # drain them by mining + nodes[0].generate(1) + nodes[0].generate(1) + nodes[0].generate(1) + nodes[0].generate(1) + try: + nodes[0].generate(1) + raise AssertionError('Keypool should be exhausted after three addesses') + except JSONRPCException,e: + assert(e.error['code']==-12) def main(): import optparse diff --git a/qa/rpc-tests/rest.py b/qa/rpc-tests/rest.py index b0cde7268e..3a035f996c 100755 --- a/qa/rpc-tests/rest.py +++ b/qa/rpc-tests/rest.py @@ -200,7 +200,7 @@ class RESTTest (BitcoinTestFramework): response = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True) assert_equal(response.status, 200) #must be a 500 because we exceeding the limits - self.nodes[0].generate(1) #generate block to not affect upcomming tests + self.nodes[0].generate(1) #generate block to not affect upcoming tests self.sync_all() ################ @@ -292,6 +292,19 @@ class RESTTest (BitcoinTestFramework): txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)) self.sync_all() + # check that there are exactly 3 transactions in the TX memory pool before generating the block + json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json') + json_obj = json.loads(json_string) + assert_equal(json_obj['size'], 3) + # the size of the memory pool should be greater than 3x ~100 bytes + assert_greater_than(json_obj['bytes'], 300) + + # check that there are our submitted transactions in the TX memory pool + json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json') + json_obj = json.loads(json_string) + for tx in txs: + assert_equal(tx in json_obj, True) + # now mine the transactions newblockhash = self.nodes[1].generate(1) self.sync_all() diff --git a/qa/rpc-tests/test_framework/comptool.py b/qa/rpc-tests/test_framework/comptool.py index 7fb31d4a06..b945f1bf29 100755 --- a/qa/rpc-tests/test_framework/comptool.py +++ b/qa/rpc-tests/test_framework/comptool.py @@ -27,6 +27,20 @@ generator that returns TestInstance objects. See below for definition. global mininode_lock +def wait_until(predicate, attempts=float('inf'), timeout=float('inf')): + attempt = 0 + elapsed = 0 + + while attempt < attempts and elapsed < timeout: + with mininode_lock: + if predicate(): + return True + attempt += 1 + elapsed += 0.05 + time.sleep(0.05) + + return False + class TestNode(NodeConnCB): def __init__(self, block_store, tx_store): @@ -43,6 +57,10 @@ class TestNode(NodeConnCB): # a response self.pingMap = {} self.lastInv = [] + self.closed = False + + def on_close(self, conn): + self.closed = True def add_connection(self, conn): self.conn = conn @@ -116,7 +134,7 @@ class TestNode(NodeConnCB): # is reached) and then sent out in one inv message. Then the final block # will be synced across all connections, and the outcome of the final # block will be tested. -# sync_every_tx: analagous to behavior for sync_every_block, except if outcome +# sync_every_tx: analogous to behavior for sync_every_block, except if outcome # on the final tx is None, then contents of entire mempool are compared # across all connections. (If outcome of final tx is specified as true # or false, then only the last tx is tested against outcome.) @@ -132,6 +150,7 @@ class TestManager(object): def __init__(self, testgen, datadir): self.test_generator = testgen self.connections = [] + self.test_nodes = [] self.block_store = BlockStore(datadir) self.tx_store = TxStore(datadir) self.ping_counter = 1 @@ -139,54 +158,40 @@ class TestManager(object): def add_all_connections(self, nodes): for i in range(len(nodes)): # Create a p2p connection to each node - self.connections.append(NodeConn('127.0.0.1', p2p_port(i), - nodes[i], TestNode(self.block_store, self.tx_store))) + test_node = TestNode(self.block_store, self.tx_store) + self.test_nodes.append(test_node) + self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node)) # Make sure the TestNode (callback class) has a reference to its # associated NodeConn - self.connections[-1].cb.add_connection(self.connections[-1]) + test_node.add_connection(self.connections[-1]) + + def wait_for_disconnections(self): + def disconnected(): + return all(node.closed for node in self.test_nodes) + return wait_until(disconnected, timeout=10) def wait_for_verack(self): - sleep_time = 0.05 - max_tries = 10 / sleep_time # Wait at most 10 seconds - while max_tries > 0: - done = True - with mininode_lock: - for c in self.connections: - if c.cb.verack_received is False: - done = False - break - if done: - break - time.sleep(sleep_time) + def veracked(): + return all(node.verack_received for node in self.test_nodes) + return wait_until(veracked, timeout=10) def wait_for_pings(self, counter): - received_pongs = False - while received_pongs is not True: - time.sleep(0.05) - received_pongs = True - with mininode_lock: - for c in self.connections: - if c.cb.received_ping_response(counter) is not True: - received_pongs = False - break + def received_pongs(): + return all(node.received_ping_response(counter) for node in self.test_nodes) + return wait_until(received_pongs) # sync_blocks: Wait for all connections to request the blockhash given # then send get_headers to find out the tip of each node, and synchronize # the response by using a ping (and waiting for pong with same nonce). def sync_blocks(self, blockhash, num_blocks): - # Wait for nodes to request block (50ms sleep * 20 tries * num_blocks) - max_tries = 20*num_blocks - while max_tries > 0: - with mininode_lock: - results = [ blockhash in c.cb.block_request_map and - c.cb.block_request_map[blockhash] for c in self.connections ] - if False not in results: - break - time.sleep(0.05) - max_tries -= 1 + def blocks_requested(): + return all( + blockhash in node.block_request_map and node.block_request_map[blockhash] + for node in self.test_nodes + ) # --> error if not requested - if max_tries == 0: + if not wait_until(blocks_requested, attempts=20*num_blocks): # print [ c.cb.block_request_map for c in self.connections ] raise AssertionError("Not all nodes requested block") # --> Answer request (we did this inline!) @@ -202,18 +207,14 @@ class TestManager(object): # Analogous to sync_block (see above) def sync_transaction(self, txhash, num_events): # Wait for nodes to request transaction (50ms sleep * 20 tries * num_events) - max_tries = 20*num_events - while max_tries > 0: - with mininode_lock: - results = [ txhash in c.cb.tx_request_map and - c.cb.tx_request_map[txhash] for c in self.connections ] - if False not in results: - break - time.sleep(0.05) - max_tries -= 1 + def transaction_requested(): + return all( + txhash in node.tx_request_map and node.tx_request_map[txhash] + for node in self.test_nodes + ) # --> error if not requested - if max_tries == 0: + if not wait_until(transaction_requested, attempts=20*num_events): # print [ c.cb.tx_request_map for c in self.connections ] raise AssertionError("Not all nodes requested transaction") # --> Answer request (we did this inline!) @@ -336,6 +337,7 @@ class TestManager(object): print "Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ] test_number += 1 + [ c.disconnect_node() for c in self.connections ] + self.wait_for_disconnections() self.block_store.close() self.tx_store.close() - [ c.disconnect_node() for c in self.connections ] diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 8289198959..fc4e047c35 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -52,6 +52,7 @@ BITCOIN_TESTS =\ test/getarg_tests.cpp \ test/hash_tests.cpp \ test/key_tests.cpp \ + test/limitedmap_tests.cpp \ test/main_tests.cpp \ test/mempool_tests.cpp \ test/miner_tests.cpp \ diff --git a/src/addrman.h b/src/addrman.h index 2623d89809..384b6cfdb9 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -265,7 +265,7 @@ public: * Notice that vvTried, mapAddr and vVector are never encoded explicitly; * they are instead reconstructed from the other information. * - * vvNew is serialized, but only used if ADDRMAN_UNKOWN_BUCKET_COUNT didn't change, + * vvNew is serialized, but only used if ADDRMAN_UNKNOWN_BUCKET_COUNT didn't change, * otherwise it is reconstructed as well. * * This format is more complex, but significantly smaller (at most 1.5 MiB), and supports diff --git a/src/ecwrapper.cpp b/src/ecwrapper.cpp index 5e3aec25ba..f94bc954fd 100644 --- a/src/ecwrapper.cpp +++ b/src/ecwrapper.cpp @@ -13,6 +13,29 @@ namespace { +class ecgroup_order +{ +public: + static const EC_GROUP* get() + { + static const ecgroup_order wrapper; + return wrapper.pgroup; + } + +private: + ecgroup_order() + : pgroup(EC_GROUP_new_by_curve_name(NID_secp256k1)) + { + } + + ~ecgroup_order() + { + EC_GROUP_free(pgroup); + } + + EC_GROUP* pgroup; +}; + /** * Perform ECDSA key recovery (see SEC1 4.1.6) for curves over (mod p)-fields * recid selects which key is recovered @@ -92,8 +115,10 @@ err: } // anon namespace CECKey::CECKey() { - pkey = EC_KEY_new_by_curve_name(NID_secp256k1); + pkey = EC_KEY_new(); assert(pkey != NULL); + int result = EC_KEY_set_group(pkey, ecgroup_order::get()); + assert(result); } CECKey::~CECKey() { @@ -185,11 +210,9 @@ bool CECKey::TweakPublic(const unsigned char vchTweak[32]) { bool CECKey::SanityCheck() { - EC_KEY *pkey = EC_KEY_new_by_curve_name(NID_secp256k1); - if(pkey == NULL) + const EC_GROUP *pgroup = ecgroup_order::get(); + if(pgroup == NULL) return false; - EC_KEY_free(pkey); - // TODO Is there more EC functionality that could be missing? return true; } diff --git a/src/leveldbwrapper.cpp b/src/leveldbwrapper.cpp index c353dfa6d9..26cacf95ae 100644 --- a/src/leveldbwrapper.cpp +++ b/src/leveldbwrapper.cpp @@ -58,7 +58,8 @@ CLevelDBWrapper::CLevelDBWrapper(const boost::filesystem::path& path, size_t nCa } else { if (fWipe) { LogPrintf("Wiping LevelDB in %s\n", path.string()); - leveldb::DestroyDB(path.string(), options); + leveldb::Status result = leveldb::DestroyDB(path.string(), options); + HandleError(result); } TryCreateDirectory(path); LogPrintf("Opening LevelDB in %s\n", path.string()); diff --git a/src/limitedmap.h b/src/limitedmap.h index e8ea549653..5456dfc7c4 100644 --- a/src/limitedmap.h +++ b/src/limitedmap.h @@ -27,7 +27,11 @@ protected: size_type nMaxSize; public: - limitedmap(size_type nMaxSizeIn = 0) { nMaxSize = nMaxSizeIn; } + limitedmap(size_type nMaxSizeIn) + { + assert(nMaxSizeIn > 0); + nMaxSize = nMaxSizeIn; + } const_iterator begin() const { return map.begin(); } const_iterator end() const { return map.end(); } size_type size() const { return map.size(); } @@ -38,13 +42,12 @@ public: { std::pair<iterator, bool> ret = map.insert(x); if (ret.second) { - if (nMaxSize && map.size() == nMaxSize) { + if (map.size() > nMaxSize) { map.erase(rmap.begin()->second); rmap.erase(rmap.begin()); } rmap.insert(make_pair(x.second, ret.first)); } - return; } void erase(const key_type& k) { @@ -81,11 +84,11 @@ public: size_type max_size() const { return nMaxSize; } size_type max_size(size_type s) { - if (s) - while (map.size() > s) { - map.erase(rmap.begin()->second); - rmap.erase(rmap.begin()); - } + assert(s > 0); + while (map.size() > s) { + map.erase(rmap.begin()->second); + rmap.erase(rmap.begin()); + } nMaxSize = s; return nMaxSize; } diff --git a/src/main.cpp b/src/main.cpp index 9f42819a0a..33b57a5285 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -1457,7 +1457,7 @@ bool AbortNode(const std::string& strMessage, const std::string& userMessage="") strMiscWarning = strMessage; LogPrintf("*** %s\n", strMessage); uiInterface.ThreadSafeMessageBox( - userMessage.empty() ? _("Error: A fatal internal error occured, see debug.log for details") : userMessage, + userMessage.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage, "", CClientUIInterface::MSG_ERROR); StartShutdown(); return false; diff --git a/src/merkleblock.cpp b/src/merkleblock.cpp index 4d90fd8cd7..f8e877df25 100644 --- a/src/merkleblock.cpp +++ b/src/merkleblock.cpp @@ -168,7 +168,7 @@ uint256 CPartialMerkleTree::ExtractMatches(std::vector<uint256> &vMatch) { // traverse the partial tree unsigned int nBitsUsed = 0, nHashUsed = 0; uint256 hashMerkleRoot = TraverseAndExtract(nHeight, 0, nBitsUsed, nHashUsed, vMatch); - // verify that no problems occured during the tree traversal + // verify that no problems occurred during the tree traversal if (fBad) return uint256(); // verify that all bits were consumed (except for the padding caused by serializing it as a byte sequence) diff --git a/src/miner.cpp b/src/miner.cpp index 4172266067..9dd1d459b5 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -444,8 +444,10 @@ void static BitcoinMiner(const CChainParams& chainparams) GetMainSignals().ScriptForMining(coinbaseScript); try { - //throw an error if no script was provided - if (!coinbaseScript->reserveScript.size()) + // Throw an error if no script was provided. This can happen + // due to some internal error but also if the keypool is empty. + // In the latter case, already the pointer is NULL. + if (!coinbaseScript || coinbaseScript->reserveScript.empty()) throw std::runtime_error("No coinbase script available (mining requires a wallet)"); while (true) { diff --git a/src/net.cpp b/src/net.cpp index e4ead3c92e..fb5726a2b9 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -2319,7 +2319,7 @@ void DumpBanlist() { int64_t nStart = GetTimeMillis(); - CNode::SweepBanned(); //clean unused entires (if bantime has expired) + CNode::SweepBanned(); //clean unused entries (if bantime has expired) CBanDB bandb; banmap_t banmap; @@ -695,7 +695,7 @@ public: static bool BannedSetIsDirty(); //!set the "dirty" flag for the banlist static void SetBannedSetDirty(bool dirty=true); - //!clean unused entires (if bantime has expired) + //!clean unused entries (if bantime has expired) static void SweepBanned(); void copyStats(CNodeStats &stats); diff --git a/src/netbase.cpp b/src/netbase.cpp index 259e5c14a3..7a87d125c2 100644 --- a/src/netbase.cpp +++ b/src/netbase.cpp @@ -349,7 +349,7 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials } if (pchRetA[0] != 0x01 || pchRetA[1] != 0x00) { CloseSocket(hSocket); - return error("Proxy authentication unsuccesful"); + return error("Proxy authentication unsuccessful"); } } else if (pchRet1[1] == 0x00) { // Perform no authentication diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp index cdee541d2f..ffe31d1942 100644 --- a/src/policy/fees.cpp +++ b/src/policy/fees.cpp @@ -261,7 +261,7 @@ void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHe blocksAgo = 0; if (blocksAgo < 0) { LogPrint("estimatefee", "Blockpolicy error, blocks ago is negative for mempool tx\n"); - return; //This can't happen becasue we call this with our best seen height, no entries can have higher + return; //This can't happen because we call this with our best seen height, no entries can have higher } if (blocksAgo >= (int)unconfTxs.size()) { diff --git a/src/policy/fees.h b/src/policy/fees.h index ce4d782566..15577d128a 100644 --- a/src/policy/fees.h +++ b/src/policy/fees.h @@ -118,7 +118,7 @@ public: /** * Initialize the data structures. This is called by BlockPolicyEstimator's * constructor with default values. - * @param defaultBuckets contains the upper limits for the bucket boundries + * @param defaultBuckets contains the upper limits for the bucket boundaries * @param maxConfirms max number of confirms to track * @param decay how much to decay the historical moving average per block * @param dataTypeString for logging purposes diff --git a/src/qt/paymentserver.cpp b/src/qt/paymentserver.cpp index 0827d99125..5cc4d00dbf 100644 --- a/src/qt/paymentserver.cpp +++ b/src/qt/paymentserver.cpp @@ -762,7 +762,7 @@ void PaymentServer::setOptionsModel(OptionsModel *optionsModel) void PaymentServer::handlePaymentACK(const QString& paymentACKMsg) { - // currently we don't futher process or store the paymentACK message + // currently we don't further process or store the paymentACK message Q_EMIT message(tr("Payment acknowledged"), paymentACKMsg, CClientUIInterface::ICON_INFORMATION | CClientUIInterface::MODAL); } diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index 35729bbb8b..d9d4f1d0ed 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -570,7 +570,7 @@ void RPCConsole::peerLayoutChanged() if (detailNodeRow < 0) { - // detail node dissapeared from table (node disconnected) + // detail node disappeared from table (node disconnected) fUnselect = true; } else diff --git a/src/qt/splashscreen.cpp b/src/qt/splashscreen.cpp index 8430e017c1..c15b64c327 100644 --- a/src/qt/splashscreen.cpp +++ b/src/qt/splashscreen.cpp @@ -57,7 +57,7 @@ SplashScreen::SplashScreen(Qt::WindowFlags f, const NetworkStyle *networkStyle) QPainter pixPaint(&pixmap); pixPaint.setPen(QColor(100,100,100)); - // draw a slighly radial gradient + // draw a slightly radial gradient QRadialGradient gradient(QPoint(0,0), splashSize.width()/devicePixelRatio); gradient.setColorAt(0, Qt::white); gradient.setColorAt(1, QColor(247,247,247)); diff --git a/src/rest.cpp b/src/rest.cpp index 0dd238b683..74d27e73bb 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -65,6 +65,8 @@ public: extern void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry); extern UniValue blockToJSON(const CBlock& block, const CBlockIndex* blockindex, bool txDetails = false); +extern UniValue mempoolInfoToJSON(); +extern UniValue mempoolToJSON(bool fVerbose = false); extern void ScriptPubKeyToJSON(const CScript& scriptPubKey, UniValue& out, bool fIncludeHex); extern UniValue blockheaderToJSON(const CBlockIndex* blockindex); @@ -293,6 +295,58 @@ static bool rest_chaininfo(AcceptedConnection* conn, return true; // continue to process further HTTP reqs on this cxn } +static bool rest_mempool_info(AcceptedConnection* conn, + const std::string& strURIPart, + const std::string& strRequest, + const std::map<std::string, std::string>& mapHeaders, + bool fRun) +{ + vector<string> params; + const RetFormat rf = ParseDataFormat(params, strURIPart); + + switch (rf) { + case RF_JSON: { + UniValue mempoolInfoObject = mempoolInfoToJSON(); + + string strJSON = mempoolInfoObject.write() + "\n"; + conn->stream() << HTTPReply(HTTP_OK, strJSON, fRun) << std::flush; + return true; + } + default: { + throw RESTERR(HTTP_NOT_FOUND, "output format not found (available: json)"); + } + } + + // not reached + return true; // continue to process further HTTP reqs on this cxn +} + +static bool rest_mempool_contents(AcceptedConnection* conn, + const std::string& strURIPart, + const std::string& strRequest, + const std::map<std::string, std::string>& mapHeaders, + bool fRun) +{ + vector<string> params; + const RetFormat rf = ParseDataFormat(params, strURIPart); + + switch (rf) { + case RF_JSON: { + UniValue mempoolObject = mempoolToJSON(true); + + string strJSON = mempoolObject.write() + "\n"; + conn->stream() << HTTPReply(HTTP_OK, strJSON, fRun) << std::flush; + return true; + } + default: { + throw RESTERR(HTTP_NOT_FOUND, "output format not found (available: json)"); + } + } + + // not reached + return true; // continue to process further HTTP reqs on this cxn +} + static bool rest_tx(AcceptedConnection* conn, const std::string& strURIPart, const std::string& strRequest, @@ -553,6 +607,8 @@ static const struct { {"/rest/block/notxdetails/", rest_block_notxdetails}, {"/rest/block/", rest_block_extended}, {"/rest/chaininfo", rest_chaininfo}, + {"/rest/mempool/info", rest_mempool_info}, + {"/rest/mempool/contents", rest_mempool_contents}, {"/rest/headers/", rest_headers}, {"/rest/getutxos", rest_getutxos}, }; diff --git a/src/rpcblockchain.cpp b/src/rpcblockchain.cpp index 80d49490d2..e6751de96b 100644 --- a/src/rpcblockchain.cpp +++ b/src/rpcblockchain.cpp @@ -175,45 +175,8 @@ UniValue getdifficulty(const UniValue& params, bool fHelp) return GetDifficulty(); } - -UniValue getrawmempool(const UniValue& params, bool fHelp) +UniValue mempoolToJSON(bool fVerbose = false) { - if (fHelp || params.size() > 1) - throw runtime_error( - "getrawmempool ( verbose )\n" - "\nReturns all transaction ids in memory pool as a json array of string transaction ids.\n" - "\nArguments:\n" - "1. verbose (boolean, optional, default=false) true for a json object, false for array of transaction ids\n" - "\nResult: (for verbose = false):\n" - "[ (json array of string)\n" - " \"transactionid\" (string) The transaction id\n" - " ,...\n" - "]\n" - "\nResult: (for verbose = true):\n" - "{ (json object)\n" - " \"transactionid\" : { (json object)\n" - " \"size\" : n, (numeric) transaction size in bytes\n" - " \"fee\" : n, (numeric) transaction fee in " + CURRENCY_UNIT + "\n" - " \"time\" : n, (numeric) local time transaction entered pool in seconds since 1 Jan 1970 GMT\n" - " \"height\" : n, (numeric) block height when transaction entered pool\n" - " \"startingpriority\" : n, (numeric) priority when transaction entered pool\n" - " \"currentpriority\" : n, (numeric) transaction priority now\n" - " \"depends\" : [ (array) unconfirmed transactions used as inputs for this transaction\n" - " \"transactionid\", (string) parent transaction id\n" - " ... ]\n" - " }, ...\n" - "}\n" - "\nExamples\n" - + HelpExampleCli("getrawmempool", "true") - + HelpExampleRpc("getrawmempool", "true") - ); - - LOCK(cs_main); - - bool fVerbose = false; - if (params.size() > 0) - fVerbose = params[0].get_bool(); - if (fVerbose) { LOCK(mempool.cs); @@ -261,6 +224,47 @@ UniValue getrawmempool(const UniValue& params, bool fHelp) } } +UniValue getrawmempool(const UniValue& params, bool fHelp) +{ + if (fHelp || params.size() > 1) + throw runtime_error( + "getrawmempool ( verbose )\n" + "\nReturns all transaction ids in memory pool as a json array of string transaction ids.\n" + "\nArguments:\n" + "1. verbose (boolean, optional, default=false) true for a json object, false for array of transaction ids\n" + "\nResult: (for verbose = false):\n" + "[ (json array of string)\n" + " \"transactionid\" (string) The transaction id\n" + " ,...\n" + "]\n" + "\nResult: (for verbose = true):\n" + "{ (json object)\n" + " \"transactionid\" : { (json object)\n" + " \"size\" : n, (numeric) transaction size in bytes\n" + " \"fee\" : n, (numeric) transaction fee in " + CURRENCY_UNIT + "\n" + " \"time\" : n, (numeric) local time transaction entered pool in seconds since 1 Jan 1970 GMT\n" + " \"height\" : n, (numeric) block height when transaction entered pool\n" + " \"startingpriority\" : n, (numeric) priority when transaction entered pool\n" + " \"currentpriority\" : n, (numeric) transaction priority now\n" + " \"depends\" : [ (array) unconfirmed transactions used as inputs for this transaction\n" + " \"transactionid\", (string) parent transaction id\n" + " ... ]\n" + " }, ...\n" + "}\n" + "\nExamples\n" + + HelpExampleCli("getrawmempool", "true") + + HelpExampleRpc("getrawmempool", "true") + ); + + LOCK(cs_main); + + bool fVerbose = false; + if (params.size() > 0) + fVerbose = params[0].get_bool(); + + return mempoolToJSON(fVerbose); +} + UniValue getblockhash(const UniValue& params, bool fHelp) { if (fHelp || params.size() != 1) @@ -757,6 +761,16 @@ UniValue getchaintips(const UniValue& params, bool fHelp) return res; } +UniValue mempoolInfoToJSON() +{ + UniValue ret(UniValue::VOBJ); + ret.push_back(Pair("size", (int64_t) mempool.size())); + ret.push_back(Pair("bytes", (int64_t) mempool.GetTotalTxSize())); + ret.push_back(Pair("usage", (int64_t) mempool.DynamicMemoryUsage())); + + return ret; +} + UniValue getmempoolinfo(const UniValue& params, bool fHelp) { if (fHelp || params.size() != 0) @@ -774,12 +788,7 @@ UniValue getmempoolinfo(const UniValue& params, bool fHelp) + HelpExampleRpc("getmempoolinfo", "") ); - UniValue ret(UniValue::VOBJ); - ret.push_back(Pair("size", (int64_t) mempool.size())); - ret.push_back(Pair("bytes", (int64_t) mempool.GetTotalTxSize())); - ret.push_back(Pair("usage", (int64_t) mempool.DynamicMemoryUsage())); - - return ret; + return mempoolInfoToJSON(); } UniValue invalidateblock(const UniValue& params, bool fHelp) diff --git a/src/rpcmining.cpp b/src/rpcmining.cpp index b7d4ff58fc..620a46be15 100644 --- a/src/rpcmining.cpp +++ b/src/rpcmining.cpp @@ -138,8 +138,12 @@ UniValue generate(const UniValue& params, bool fHelp) boost::shared_ptr<CReserveScript> coinbaseScript; GetMainSignals().ScriptForMining(coinbaseScript); + // If the keypool is exhausted, no script is returned at all. Catch this. + if (!coinbaseScript) + throw JSONRPCError(RPC_WALLET_KEYPOOL_RAN_OUT, "Error: Keypool ran out, please call keypoolrefill first"); + //throw an error if no script was provided - if (!coinbaseScript->reserveScript.size()) + if (coinbaseScript->reserveScript.empty()) throw JSONRPCError(RPC_INTERNAL_ERROR, "No coinbase script available (mining requires a wallet)"); { // Don't keep cs_main locked diff --git a/src/rpcserver.cpp b/src/rpcserver.cpp index 4088f374f8..9362401b1e 100644 --- a/src/rpcserver.cpp +++ b/src/rpcserver.cpp @@ -607,7 +607,7 @@ void StartRPCThreads() LogPrintf("No rpcpassword set - using random cookie authentication\n"); if (!GenerateAuthCookie(&strRPCUserColonPass)) { uiInterface.ThreadSafeMessageBox( - _("Error: A fatal internal error occured, see debug.log for details"), // Same message as AbortNode + _("Error: A fatal internal error occurred, see debug.log for details"), // Same message as AbortNode "", CClientUIInterface::MSG_ERROR); StartShutdown(); return; @@ -671,7 +671,7 @@ void StartRPCThreads() vEndpoints.push_back(ip::tcp::endpoint(boost::asio::ip::address_v6::any(), defaultPort)); vEndpoints.push_back(ip::tcp::endpoint(boost::asio::ip::address_v4::any(), defaultPort)); // Prefer making the socket dual IPv6/IPv4 instead of binding - // to both addresses seperately. + // to both addresses separately. bBindAny = true; } diff --git a/src/scheduler.cpp b/src/scheduler.cpp index d5bb588b71..06115f5619 100644 --- a/src/scheduler.cpp +++ b/src/scheduler.cpp @@ -6,6 +6,7 @@ #include <assert.h> #include <boost/bind.hpp> +#include <boost/thread/reverse_lock.hpp> #include <utility> CScheduler::CScheduler() : nThreadsServicingQueue(0), stopRequested(false), stopWhenEmpty(false) @@ -65,11 +66,12 @@ void CScheduler::serviceQueue() Function f = taskQueue.begin()->second; taskQueue.erase(taskQueue.begin()); - // Unlock before calling f, so it can reschedule itself or another task - // without deadlocking: - lock.unlock(); - f(); - lock.lock(); + { + // Unlock before calling f, so it can reschedule itself or another task + // without deadlocking: + boost::reverse_lock<boost::unique_lock<boost::mutex> > rlock(lock); + f(); + } } catch (...) { --nThreadsServicingQueue; throw; diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index 0b78fdf5a8..bd5e54b33f 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -1128,7 +1128,7 @@ bool TransactionSignatureChecker::CheckSig(const vector<unsigned char>& vchSigIn bool TransactionSignatureChecker::CheckLockTime(const CScriptNum& nLockTime) const { - // There are two times of nLockTime: lock-by-blockheight + // There are two kinds of nLockTime: lock-by-blockheight // and lock-by-blocktime, distinguished by whether // nLockTime < LOCKTIME_THRESHOLD. // diff --git a/src/sync.h b/src/sync.h index 705647e4a5..68a9443084 100644 --- a/src/sync.h +++ b/src/sync.h @@ -16,7 +16,7 @@ //////////////////////////////////////////////// // // -// THE SIMPLE DEFINITON, EXCLUDING DEBUG CODE // +// THE SIMPLE DEFINITION, EXCLUDING DEBUG CODE // // // //////////////////////////////////////////////// diff --git a/src/test/data/tx_invalid.json b/src/test/data/tx_invalid.json index 20bdbd08a5..5cad5af7c3 100644 --- a/src/test/data/tx_invalid.json +++ b/src/test/data/tx_invalid.json @@ -128,7 +128,7 @@ [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "499999999 NOP2 1"]], "0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000fe64cd1d", "P2SH,CHECKLOCKTIMEVERIFY"], -["By-time locks, with argument just beyond tx nLockTime (but within numerical boundries)"], +["By-time locks, with argument just beyond tx nLockTime (but within numerical boundaries)"], [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "500000001 NOP2 1"]], "01000000010001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000065cd1d", "P2SH,CHECKLOCKTIMEVERIFY"], [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "4294967295 NOP2 1"]], @@ -181,7 +181,7 @@ [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "2147483648 NOP2 1"]], "0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ffffff7f", "P2SH,CHECKLOCKTIMEVERIFY"], -["6 byte non-minimally-encoded arguments are invalid even in their contents are valid"], +["6 byte non-minimally-encoded arguments are invalid even if their contents are valid"], [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x06 0x000000000000 NOP2 1"]], "010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", "P2SH,CHECKLOCKTIMEVERIFY"], diff --git a/src/test/data/tx_valid.json b/src/test/data/tx_valid.json index 24fff575c1..9744a3c848 100644 --- a/src/test/data/tx_valid.json +++ b/src/test/data/tx_valid.json @@ -197,7 +197,7 @@ [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0 NOP2 1"]], "0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ff64cd1d", "P2SH,CHECKLOCKTIMEVERIFY"], -["By-time locks, with argument just beyond tx nLockTime (but within numerical boundries)"], +["By-time locks, with argument just beyond tx nLockTime (but within numerical boundaries)"], [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "500000000 NOP2 1"]], "01000000010001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000065cd1d", "P2SH,CHECKLOCKTIMEVERIFY"], [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "4294967295 NOP2 1"]], diff --git a/src/test/limitedmap_tests.cpp b/src/test/limitedmap_tests.cpp new file mode 100644 index 0000000000..faaddffad8 --- /dev/null +++ b/src/test/limitedmap_tests.cpp @@ -0,0 +1,101 @@ +// Copyright (c) 2012-2015 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "limitedmap.h" + +#include "test/test_bitcoin.h" + +#include <boost/test/unit_test.hpp> + +BOOST_FIXTURE_TEST_SUITE(limitedmap_tests, BasicTestingSetup) + +BOOST_AUTO_TEST_CASE(limitedmap_test) +{ + // create a limitedmap capped at 10 items + limitedmap<int, int> map(10); + + // check that the max size is 10 + BOOST_CHECK(map.max_size() == 10); + + // check that it's empty + BOOST_CHECK(map.size() == 0); + + // insert (-1, -1) + map.insert(std::pair<int, int>(-1, -1)); + + // make sure that the size is updated + BOOST_CHECK(map.size() == 1); + + // make sure that the new items is in the map + BOOST_CHECK(map.count(-1) == 1); + + // insert 10 new items + for (int i = 0; i < 10; i++) { + map.insert(std::pair<int, int>(i, i + 1)); + } + + // make sure that the map now contains 10 items... + BOOST_CHECK(map.size() == 10); + + // ...and that the first item has been discarded + BOOST_CHECK(map.count(-1) == 0); + + // iterate over the map, both with an index and an iterator + limitedmap<int, int>::const_iterator it = map.begin(); + for (int i = 0; i < 10; i++) { + // make sure the item is present + BOOST_CHECK(map.count(i) == 1); + + // use the iterator to check for the expected key adn value + BOOST_CHECK(it->first == i); + BOOST_CHECK(it->second == i + 1); + + // use find to check for the value + BOOST_CHECK(map.find(i)->second == i + 1); + + // update and recheck + map.update(it, i + 2); + BOOST_CHECK(map.find(i)->second == i + 2); + + it++; + } + + // check that we've exhausted the iterator + BOOST_CHECK(it == map.end()); + + // resize the map to 5 items + map.max_size(5); + + // check that the max size and size are now 5 + BOOST_CHECK(map.max_size() == 5); + BOOST_CHECK(map.size() == 5); + + // check that items less than 5 have been discarded + // and items greater than 5 are retained + for (int i = 0; i < 10; i++) { + if (i < 5) { + BOOST_CHECK(map.count(i) == 0); + } else { + BOOST_CHECK(map.count(i) == 1); + } + } + + // erase some items not in the map + for (int i = 100; i < 1000; i += 100) { + map.erase(i); + } + + // check that the size is unaffected + BOOST_CHECK(map.size() == 5); + + // erase the remaining elements + for (int i = 5; i < 10; i++) { + map.erase(i); + } + + // check that the map is now empty + BOOST_CHECK(map.empty()); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index bd16da7614..5d182f3d42 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -797,7 +797,7 @@ UniValue movecmd(const UniValue& params, bool fHelp) "4. minconf (numeric, optional, default=1) Only use funds with at least this many confirmations.\n" "5. \"comment\" (string, optional) An optional comment, stored in the wallet only.\n" "\nResult:\n" - "true|false (boolean) true if successfull.\n" + "true|false (boolean) true if successful.\n" "\nExamples:\n" "\nMove 0.01 " + CURRENCY_UNIT + " from the default account to the account named tabby\n" + HelpExampleCli("move", "\"\" \"tabby\" 0.01") + |