diff options
29 files changed, 269 insertions, 170 deletions
diff --git a/.travis.yml b/.travis.yml index 4087a854b4..ccd2490925 100644 --- a/.travis.yml +++ b/.travis.yml @@ -51,6 +51,9 @@ before_script: - if [ -n "$OSX_SDK" -a -f depends/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz ]; then tar -C depends/SDKs -xf depends/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz; fi - make $MAKEJOBS -C depends HOST=$HOST $DEP_OPTS script: + - if [ "$RUN_TESTS" = "true" -a "$TRAVIS_REPO_SLUG" = "bitcoin/bitcoin" -a "$TRAVIS_PULL_REQUEST" = "false" ]; then while read LINE; do travis_retry gpg --keyserver hkp://pool.sks-keyservers.net --recv-keys $LINE; done < contrib/verify-commits/trusted-keys; fi + - if [ "$RUN_TESTS" = "true" -a "$TRAVIS_REPO_SLUG" = "bitcoin/bitcoin" -a "$TRAVIS_PULL_REQUEST" = "false" ]; then git fetch --unshallow; fi + - if [ "$RUN_TESTS" = "true" -a "$TRAVIS_REPO_SLUG" = "bitcoin/bitcoin" -a "$TRAVIS_PULL_REQUEST" = "false" ]; then contrib/verify-commits/verify-commits.sh; fi - export TRAVIS_COMMIT_LOG=`git log --format=fuller -1` - if [ -n "$USE_SHELL" ]; then export CONFIG_SHELL="$USE_SHELL"; fi - OUTDIR=$BASE_OUTDIR/$TRAVIS_PULL_REQUEST/$TRAVIS_JOB_NUMBER-$HOST diff --git a/contrib/gitian-build.sh b/contrib/gitian-build.sh index 53c24e3a87..6ee5df4703 100755 --- a/contrib/gitian-build.sh +++ b/contrib/gitian-build.sh @@ -41,7 +41,7 @@ Options: -c|--commit Indicate that the version argument is for a commit or branch -u|--url Specify the URL of the repository. Default is https://github.com/bitcoin/bitcoin -v|--verify Verify the gitian build --b|--build Do a gitiain build +-b|--build Do a gitian build -s|--sign Make signed binaries for Windows and Mac OSX -B|--buildsign Build both signed and unsigned binaries -o|--os Specify which Operating Systems the build is for. Default is lwx. l for linux, w for windows, x for osx diff --git a/contrib/gitian-keys/jtimon-key.pgp b/contrib/gitian-keys/jtimon-key.pgp Binary files differnew file mode 100644 index 0000000000..88d0de1503 --- /dev/null +++ b/contrib/gitian-keys/jtimon-key.pgp diff --git a/contrib/linearize/README.md b/contrib/linearize/README.md index adc9a559cc..0971e7816b 100644 --- a/contrib/linearize/README.md +++ b/contrib/linearize/README.md @@ -32,8 +32,11 @@ Required configuration file settings: * `output`: Output directory for linearized `blocks/blkNNNNN.dat` output. Optional config file setting for linearize-data: -* `file_timestamp`: Set each file's last-modified time to that of the most -recent block in that file. +* `debug_output`: Some printouts may not always be desired. If true, such output +will be printed. +* `file_timestamp`: Set each file's last-accessed and last-modified times, +respectively, to the current time and to the timestamp of the most recent block +written to the script's blockchain. * `genesis`: The hash of the genesis block in the blockchain. * `input`: bitcoind blocks/ directory containing blkNNNNN.dat * `hashlist`: text file containing list of block hashes created by @@ -41,6 +44,9 @@ linearize-hashes.py. * `max_out_sz`: Maximum size for files created by the `output_file` option. (Default: `1000*1000*1000 bytes`) * `netmagic`: Network magic number. +* `out_of_order_cache_sz`: If out-of-order blocks are being read, the block can +be written to a cache so that the blockchain doesn't have to be seeked again. +This option specifies the cache size. (Default: `100*1000*1000 bytes`) * `rev_hash_bytes`: If true, the block hash list written by linearize-hashes.py will be byte-reversed when read by linearize-data.py. See the linearize-hashes entry for more information. diff --git a/contrib/linearize/example-linearize.cfg b/contrib/linearize/example-linearize.cfg index 69f0e9247a..2cc910edfe 100644 --- a/contrib/linearize/example-linearize.cfg +++ b/contrib/linearize/example-linearize.cfg @@ -1,4 +1,3 @@ - # bitcoind RPC settings (linearize-hashes) rpcuser=someuser rpcpassword=somepassword @@ -21,6 +20,9 @@ input=/home/example/.bitcoin/blocks #genesis=000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943 #input=/home/example/.bitcoin/testnet3/blocks +# "output" option causes blockchain files to be written to the given location, +# with "output_file" ignored. If not used, "output_file" is used instead. +# output=/home/example/blockchain_directory output_file=/home/example/Downloads/bootstrap.dat hashlist=hashlist.txt @@ -29,3 +31,12 @@ out_of_order_cache_sz = 100000000 # Do we want the reverse the hash bytes coming from getblockhash? rev_hash_bytes = False + +# On a new month, do we want to set the access and modify times of the new +# blockchain file? +file_timestamp = 0 +# Do we want to split the blockchain files given a new month or specific height? +split_timestamp = 0 + +# Do we want debug printouts? +debug_output = False diff --git a/contrib/linearize/linearize-data.py b/contrib/linearize/linearize-data.py index 3fdec134b8..afcec2b60a 100755 --- a/contrib/linearize/linearize-data.py +++ b/contrib/linearize/linearize-data.py @@ -134,7 +134,7 @@ class BlockDataCopier: if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz): self.outF.close() if self.setFileTime: - os.utime(outFname, (int(time.time()), highTS)) + os.utime(self.outFname, (int(time.time()), self.highTS)) self.outF = None self.outFname = None self.outFn = self.outFn + 1 @@ -142,12 +142,12 @@ class BlockDataCopier: (blkDate, blkTS) = get_blk_dt(blk_hdr) if self.timestampSplit and (blkDate > self.lastDate): - print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str) - lastDate = blkDate - if outF: - outF.close() - if setFileTime: - os.utime(outFname, (int(time.time()), highTS)) + print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str) + self.lastDate = blkDate + if self.outF: + self.outF.close() + if self.setFileTime: + os.utime(self.outFname, (int(time.time()), self.highTS)) self.outF = None self.outFname = None self.outFn = self.outFn + 1 @@ -155,11 +155,11 @@ class BlockDataCopier: if not self.outF: if self.fileOutput: - outFname = self.settings['output_file'] + self.outFname = self.settings['output_file'] else: - outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn) - print("Output file " + outFname) - self.outF = open(outFname, "wb") + self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn) + print("Output file " + self.outFname) + self.outF = open(self.outFname, "wb") self.outF.write(inhdr) self.outF.write(blk_hdr) @@ -223,13 +223,16 @@ class BlockDataCopier: blk_hdr = self.inF.read(80) inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen) - hash_str = calc_hash_str(blk_hdr) - if not hash_str in blkmap: - print("Skipping unknown block " + hash_str) + self.hash_str = calc_hash_str(blk_hdr) + if not self.hash_str in blkmap: + # Because blocks can be written to files out-of-order as of 0.10, the script + # may encounter blocks it doesn't know about. Treat as debug output. + if settings['debug_output'] == 'true': + print("Skipping unknown block " + self.hash_str) self.inF.seek(inLen, os.SEEK_CUR) continue - blkHeight = self.blkmap[hash_str] + blkHeight = self.blkmap[self.hash_str] self.blkCountIn += 1 if self.blkCountOut == blkHeight: @@ -295,12 +298,15 @@ if __name__ == '__main__': settings['max_out_sz'] = 1000 * 1000 * 1000 if 'out_of_order_cache_sz' not in settings: settings['out_of_order_cache_sz'] = 100 * 1000 * 1000 + if 'debug_output' not in settings: + settings['debug_output'] = 'false' settings['max_out_sz'] = int(settings['max_out_sz']) settings['split_timestamp'] = int(settings['split_timestamp']) settings['file_timestamp'] = int(settings['file_timestamp']) settings['netmagic'] = unhexlify(settings['netmagic'].encode('utf-8')) settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz']) + settings['debug_output'] = settings['debug_output'].lower() if 'output_file' not in settings and 'output' not in settings: print("Missing output file / directory") diff --git a/contrib/macdeploy/macdeployqtplus b/contrib/macdeploy/macdeployqtplus index 73d4f159d8..5995f9f438 100755 --- a/contrib/macdeploy/macdeployqtplus +++ b/contrib/macdeploy/macdeployqtplus @@ -340,7 +340,7 @@ def deployFrameworks(frameworks, bundlePath, binaryPath, strip, verbose, deploym # install_name_tool the new id into the binary changeInstallName(framework.installName, framework.deployedInstallName, binaryPath, verbose) - # Copy farmework to app bundle. + # Copy framework to app bundle. deployedBinaryPath = copyFramework(framework, bundlePath, verbose) # Skip the rest if already was deployed. if deployedBinaryPath is None: @@ -492,7 +492,7 @@ ap.add_argument("-no-strip", dest="strip", action="store_false", default=True, h ap.add_argument("-sign", dest="sign", action="store_true", default=False, help="sign .app bundle with codesign tool") ap.add_argument("-dmg", nargs="?", const="", metavar="basename", help="create a .dmg disk image; if basename is not specified, a camel-cased version of the app name is used") ap.add_argument("-fancy", nargs=1, metavar="plist", default=[], help="make a fancy looking disk image using the given plist file with instructions; requires -dmg to work") -ap.add_argument("-add-qt-tr", nargs=1, metavar="languages", default=[], help="add Qt translation files to the bundle's ressources; the language list must be separated with commas, not with whitespace") +ap.add_argument("-add-qt-tr", nargs=1, metavar="languages", default=[], help="add Qt translation files to the bundle's resources; the language list must be separated with commas, not with whitespace") ap.add_argument("-translations-dir", nargs=1, metavar="path", default=None, help="Path to Qt's translation files") ap.add_argument("-add-resources", nargs="+", metavar="path", default=[], help="list of additional files or folders to be copied into the bundle's resources; must be the last argument") ap.add_argument("-volname", nargs=1, metavar="volname", default=[], help="custom volume name for dmg") diff --git a/contrib/rpm/README.md b/contrib/rpm/README.md index aecb3ba84f..e1fd0b317b 100644 --- a/contrib/rpm/README.md +++ b/contrib/rpm/README.md @@ -31,7 +31,7 @@ through `Source23` are used. Sources 30-39 should be reserved for SELinux related files. Currently only `Source30` through `Source32` are used. Until those files are in a tagged release, the full URL specified in the RPM spec file will not work. You can get -them from the git ropository where you retrieved this file. +them from the git repository where you retrieved this file. Sources 100+ are for files that are not source tarballs and are not maintained in the bitcoin git repository. At present only an SVG version of the Bitcoin diff --git a/contrib/verify-commits/verify-commits.sh b/contrib/verify-commits/verify-commits.sh index cfe4f11a0b..b2cebdf1a0 100755 --- a/contrib/verify-commits/verify-commits.sh +++ b/contrib/verify-commits/verify-commits.sh @@ -28,9 +28,10 @@ IS_SIGNED () { local PARENTS PARENTS=$(git show -s --format=format:%P $1) for PARENT in $PARENTS; do - if IS_SIGNED $PARENT > /dev/null; then + if IS_SIGNED $PARENT; then return 0; fi + break done if ! "$HAVE_FAILED"; then echo "No parent of $1 was signed with a trusted key!" > /dev/stderr diff --git a/qa/rpc-tests/bumpfee.py b/qa/rpc-tests/bumpfee.py index 0ebd79f7f3..ac282796c1 100755 --- a/qa/rpc-tests/bumpfee.py +++ b/qa/rpc-tests/bumpfee.py @@ -69,6 +69,7 @@ class BumpFeeTest(BitcoinTestFramework): test_rebumping(rbf_node, dest_address) test_rebumping_not_replaceable(rbf_node, dest_address) test_unconfirmed_not_spendable(rbf_node, rbf_node_address) + test_bumpfee_metadata(rbf_node, dest_address) test_locked_wallet_fails(rbf_node, dest_address) print("Success") @@ -257,6 +258,14 @@ def test_unconfirmed_not_spendable(rbf_node, rbf_node_address): if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1) +def test_bumpfee_metadata(rbf_node, dest_address): + rbfid = rbf_node.sendtoaddress(dest_address, 0.00090000, "comment value", "to value") + bumped_tx = rbf_node.bumpfee(rbfid) + bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"]) + assert_equal(bumped_wtx["comment"], "comment value") + assert_equal(bumped_wtx["to"], "to value") + + def test_locked_wallet_fails(rbf_node, dest_address): rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000}) rbf_node.walletlock() diff --git a/share/certs/PrivateKeyNotes.md b/share/certs/PrivateKeyNotes.md index da299d168f..8d50144c21 100644 --- a/share/certs/PrivateKeyNotes.md +++ b/share/certs/PrivateKeyNotes.md @@ -2,7 +2,7 @@ Code-signing private key notes == The private keys for these certificates were generated on Gavin's main work machine, -following the certificate authoritys' recommendations for generating certificate +following the certificate authority's recommendations for generating certificate signing requests. For OSX, the private key was generated by Keychain.app on Gavin's main work machine. diff --git a/src/hash.cpp b/src/hash.cpp index a399ca9252..a14a2386a2 100644 --- a/src/hash.cpp +++ b/src/hash.cpp @@ -57,7 +57,7 @@ unsigned int MurmurHash3(unsigned int nHashSeed, const std::vector<unsigned char k1 = ROTL32(k1, 15); k1 *= c2; h1 ^= k1; - }; + } } //---------- diff --git a/src/httpserver.cpp b/src/httpserver.cpp index 1692b43f28..e1763c6ad2 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -118,7 +118,7 @@ public: void Run() { ThreadCounter count(*this); - while (running) { + while (true) { std::unique_ptr<WorkItem> i; { std::unique_lock<std::mutex> lock(cs); diff --git a/src/init.cpp b/src/init.cpp index d7d60b0fbc..7c108ac4a6 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1146,8 +1146,11 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler) #ifndef WIN32 CreatePidFile(GetPidFile(), getpid()); #endif - if (GetBoolArg("-shrinkdebugfile", !fDebug)) + if (GetBoolArg("-shrinkdebugfile", !fDebug)) { + // Do this first since it both loads a bunch of debug.log into memory, + // and because this needs to happen before any other debug.log printing ShrinkDebugFile(); + } if (fPrintToDebugLog) OpenDebugLog(); diff --git a/src/net.cpp b/src/net.cpp index 5a5d94cd11..4d0d781d6d 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -689,6 +689,33 @@ bool CNode::ReceiveMsgBytes(const char *pch, unsigned int nBytes, bool& complete return true; } +void CNode::SetSendVersion(int nVersionIn) +{ + // Send version may only be changed in the version message, and + // only one version message is allowed per session. We can therefore + // treat this value as const and even atomic as long as it's only used + // once a version message has been successfully processed. Any attempt to + // set this twice is an error. + if (nSendVersion != 0) { + error("Send version already set for node: %i. Refusing to change from %i to %i", id, nSendVersion, nVersionIn); + } else { + nSendVersion = nVersionIn; + } +} + +int CNode::GetSendVersion() const +{ + // The send version should always be explicitly set to + // INIT_PROTO_VERSION rather than using this value until SetSendVersion + // has been called. + if (nSendVersion == 0) { + error("Requesting unset send version for node: %i. Using %i", id, INIT_PROTO_VERSION); + return INIT_PROTO_VERSION; + } + return nSendVersion; +} + + int CNetMessage::readHeader(const char *pch, unsigned int nBytes) { // copy data to temporary parsing buffer @@ -1833,11 +1860,11 @@ bool CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFai if (fAddnode) pnode->fAddnode = true; + GetNodeSignals().InitializeNode(pnode, *this); { LOCK(cs_vNodes); vNodes.push_back(pnode); } - GetNodeSignals().InitializeNode(pnode, *this); return true; } @@ -2630,6 +2657,11 @@ void CNode::AskFor(const CInv& inv) mapAskFor.insert(std::make_pair(nRequestTime, inv)); } +bool CConnman::NodeFullyConnected(const CNode* pnode) +{ + return pnode && pnode->fSuccessfullyConnected && !pnode->fDisconnect; +} + void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg) { size_t nMessageSize = msg.data.size(); @@ -2680,7 +2712,7 @@ bool CConnman::ForNode(NodeId id, std::function<bool(CNode* pnode)> func) break; } } - return found != nullptr && func(found); + return found != nullptr && NodeFullyConnected(found) && func(found); } int64_t PoissonNextSend(int64_t nNow, int average_interval_seconds) { @@ -162,75 +162,33 @@ public: void PushMessage(CNode* pnode, CSerializedNetMsg&& msg); template<typename Callable> - bool ForEachNodeContinueIf(Callable&& func) - { - LOCK(cs_vNodes); - for (auto&& node : vNodes) - if(!func(node)) - return false; - return true; - }; - - template<typename Callable> - bool ForEachNodeContinueIf(Callable&& func) const - { - LOCK(cs_vNodes); - for (const auto& node : vNodes) - if(!func(node)) - return false; - return true; - }; - - template<typename Callable, typename CallableAfter> - bool ForEachNodeContinueIfThen(Callable&& pre, CallableAfter&& post) - { - bool ret = true; - LOCK(cs_vNodes); - for (auto&& node : vNodes) - if(!pre(node)) { - ret = false; - break; - } - post(); - return ret; - }; - - template<typename Callable, typename CallableAfter> - bool ForEachNodeContinueIfThen(Callable&& pre, CallableAfter&& post) const - { - bool ret = true; - LOCK(cs_vNodes); - for (const auto& node : vNodes) - if(!pre(node)) { - ret = false; - break; - } - post(); - return ret; - }; - - template<typename Callable> void ForEachNode(Callable&& func) { LOCK(cs_vNodes); - for (auto&& node : vNodes) - func(node); + for (auto&& node : vNodes) { + if (NodeFullyConnected(node)) + func(node); + } }; template<typename Callable> void ForEachNode(Callable&& func) const { LOCK(cs_vNodes); - for (const auto& node : vNodes) - func(node); + for (auto&& node : vNodes) { + if (NodeFullyConnected(node)) + func(node); + } }; template<typename Callable, typename CallableAfter> void ForEachNodeThen(Callable&& pre, CallableAfter&& post) { LOCK(cs_vNodes); - for (auto&& node : vNodes) - pre(node); + for (auto&& node : vNodes) { + if (NodeFullyConnected(node)) + pre(node); + } post(); }; @@ -238,8 +196,10 @@ public: void ForEachNodeThen(Callable&& pre, CallableAfter&& post) const { LOCK(cs_vNodes); - for (const auto& node : vNodes) - pre(node); + for (auto&& node : vNodes) { + if (NodeFullyConnected(node)) + pre(node); + } post(); }; @@ -372,6 +332,9 @@ private: void RecordBytesRecv(uint64_t bytes); void RecordBytesSent(uint64_t bytes); + // Whether the node should be passed out in ForEach* callbacks + static bool NodeFullyConnected(const CNode* pnode); + // Network usage totals CCriticalSection cs_totalBytesRecv; CCriticalSection cs_totalBytesSent; @@ -627,7 +590,7 @@ public: const CAddress addr; std::string addrName; CService addrLocal; - int nVersion; + std::atomic<int> nVersion; // strSubVer is whatever byte array we read from the wire. However, this field is intended // to be printed out, displayed to humans in various forms and so on. So we sanitize it and // store the sanitized version in cleanSubVer. The original should be used when dealing with @@ -639,7 +602,7 @@ public: bool fAddnode; bool fClient; const bool fInbound; - bool fSuccessfullyConnected; + std::atomic_bool fSuccessfullyConnected; std::atomic_bool fDisconnect; // We use fRelayTxes for two purposes - // a) it allows us to not relay tx invs before receiving the peer's version message @@ -760,25 +723,8 @@ public: { return nRecvVersion; } - void SetSendVersion(int nVersionIn) - { - // Send version may only be changed in the version message, and - // only one version message is allowed per session. We can therefore - // treat this value as const and even atomic as long as it's only used - // once the handshake is complete. Any attempt to set this twice is an - // error. - assert(nSendVersion == 0); - nSendVersion = nVersionIn; - } - - int GetSendVersion() const - { - // The send version should always be explicitly set to - // INIT_PROTO_VERSION rather than using this value until the handshake - // is complete. - assert(nSendVersion != 0); - return nSendVersion; - } + void SetSendVersion(int nVersionIn); + int GetSendVersion() const; CNode* AddRef() { diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 1cc86a6626..3a89c7ac42 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1199,50 +1199,51 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr CAddress addrFrom; uint64_t nNonce = 1; uint64_t nServiceInt; - vRecv >> pfrom->nVersion >> nServiceInt >> nTime >> addrMe; - pfrom->nServices = ServiceFlags(nServiceInt); + ServiceFlags nServices; + int nVersion; + int nSendVersion; + std::string strSubVer; + int nStartingHeight = -1; + bool fRelay = true; + + vRecv >> nVersion >> nServiceInt >> nTime >> addrMe; + nSendVersion = std::min(nVersion, PROTOCOL_VERSION); + nServices = ServiceFlags(nServiceInt); if (!pfrom->fInbound) { - connman.SetServices(pfrom->addr, pfrom->nServices); + connman.SetServices(pfrom->addr, nServices); } - if (pfrom->nServicesExpected & ~pfrom->nServices) + if (pfrom->nServicesExpected & ~nServices) { - LogPrint("net", "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->id, pfrom->nServices, pfrom->nServicesExpected); + LogPrint("net", "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->id, nServices, pfrom->nServicesExpected); connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD, strprintf("Expected to offer services %08x", pfrom->nServicesExpected))); pfrom->fDisconnect = true; return false; } - if (pfrom->nVersion < MIN_PEER_PROTO_VERSION) + if (nVersion < MIN_PEER_PROTO_VERSION) { // disconnect from peers older than this proto version - LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion); + LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, nVersion); connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE, strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION))); pfrom->fDisconnect = true; return false; } - if (pfrom->nVersion == 10300) - pfrom->nVersion = 300; + if (nVersion == 10300) + nVersion = 300; if (!vRecv.empty()) vRecv >> addrFrom >> nNonce; if (!vRecv.empty()) { - vRecv >> LIMITED_STRING(pfrom->strSubVer, MAX_SUBVERSION_LENGTH); - pfrom->cleanSubVer = SanitizeString(pfrom->strSubVer); + vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH); } if (!vRecv.empty()) { - vRecv >> pfrom->nStartingHeight; + vRecv >> nStartingHeight; } - { - LOCK(pfrom->cs_filter); - if (!vRecv.empty()) - vRecv >> pfrom->fRelayTxes; // set to true after we get the first filter* message - else - pfrom->fRelayTxes = true; - } - + if (!vRecv.empty()) + vRecv >> fRelay; // Disconnect if we connected to ourself if (pfrom->fInbound && !connman.CheckIncomingNonce(nNonce)) { @@ -1251,7 +1252,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr return true; } - pfrom->addrLocal = addrMe; if (pfrom->fInbound && addrMe.IsRoutable()) { SeenLocal(addrMe); @@ -1261,9 +1261,24 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr if (pfrom->fInbound) PushNodeVersion(pfrom, connman, GetAdjustedTime()); - pfrom->fClient = !(pfrom->nServices & NODE_NETWORK); + connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK)); - if((pfrom->nServices & NODE_WITNESS)) + pfrom->nServices = nServices; + pfrom->addrLocal = addrMe; + pfrom->strSubVer = strSubVer; + pfrom->cleanSubVer = SanitizeString(strSubVer); + pfrom->nStartingHeight = nStartingHeight; + pfrom->fClient = !(nServices & NODE_NETWORK); + { + LOCK(pfrom->cs_filter); + pfrom->fRelayTxes = fRelay; // set to true after we get the first filter* message + } + + // Change version + pfrom->SetSendVersion(nSendVersion); + pfrom->nVersion = nVersion; + + if((nServices & NODE_WITNESS)) { LOCK(cs_main); State(pfrom->GetId())->fHaveWitness = true; @@ -1275,11 +1290,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr UpdatePreferredDownload(pfrom, State(pfrom->GetId())); } - // Change version - connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK)); - int nSendVersion = std::min(pfrom->nVersion, PROTOCOL_VERSION); - pfrom->SetSendVersion(nSendVersion); - if (!pfrom->fInbound) { // Advertise our address @@ -1307,8 +1317,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr connman.MarkAddressGood(pfrom->addr); } - pfrom->fSuccessfullyConnected = true; - std::string remoteAddr; if (fLogIPs) remoteAddr = ", peeraddr=" + pfrom->addr.ToString(); @@ -1350,7 +1358,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr if (strCommand == NetMsgType::VERACK) { - pfrom->SetRecvVersion(std::min(pfrom->nVersion, PROTOCOL_VERSION)); + pfrom->SetRecvVersion(std::min(pfrom->nVersion.load(), PROTOCOL_VERSION)); if (!pfrom->fInbound) { // Mark this node as currently connected, so we update its timestamp later. @@ -1378,6 +1386,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr nCMPCTBLOCKVersion = 1; connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); } + pfrom->fSuccessfullyConnected = true; } @@ -2716,8 +2725,8 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr { const Consensus::Params& consensusParams = Params().GetConsensus(); { - // Don't send anything until we get its version message - if (pto->nVersion == 0 || pto->fDisconnect) + // Don't send anything until the version handshake is complete + if (!pto->fSuccessfullyConnected || pto->fDisconnect) return true; // If we get here, the outgoing message serialization version is set and can't change. diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index f86b09644b..1c1acb6b10 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -770,7 +770,7 @@ void BitcoinGUI::setNumBlocks(int count, const QDateTime& blockDate, double nVer if (!clientModel) return; - // Prevent orphan statusbar messages (e.g. hover Quit in main menu, wait until chain-sync starts -> garbelled text) + // Prevent orphan statusbar messages (e.g. hover Quit in main menu, wait until chain-sync starts -> garbled text) statusBar()->clearMessage(); // Acquire current block source diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp index ea80a1f549..5d6c0e2e31 100644 --- a/src/qt/guiutil.cpp +++ b/src/qt/guiutil.cpp @@ -536,7 +536,7 @@ int TableViewLastColumnResizingFixer::getAvailableWidthForColumn(int column) return nResult; } -// Make sure we don't make the columns wider than the tables viewport width. +// Make sure we don't make the columns wider than the table's viewport width. void TableViewLastColumnResizingFixer::adjustTableColumnsWidth() { disconnectViewHeadersSignals(); @@ -570,7 +570,7 @@ void TableViewLastColumnResizingFixer::on_sectionResized(int logicalIndex, int o } } -// When the tabless geometry is ready, we manually perform the stretch of the "Message" column, +// When the table's geometry is ready, we manually perform the stretch of the "Message" column, // as the "Stretch" resize mode does not allow for interactive resizing. void TableViewLastColumnResizingFixer::on_geometriesChanged() { diff --git a/src/qt/guiutil.h b/src/qt/guiutil.h index 5765b2ec4a..913aa5e24b 100644 --- a/src/qt/guiutil.h +++ b/src/qt/guiutil.h @@ -140,7 +140,7 @@ namespace GUIUtil * Also makes sure the column widths are never larger than the table's viewport. * In Qt, all columns are resizable from the right, but it's not intuitive resizing the last column from the right. * Usually our second to last columns behave as if stretched, and when on strech mode, columns aren't resizable - * interactively or programatically. + * interactively or programmatically. * * This helper object takes care of this issue. * diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index e46f55a8aa..c594daca0d 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -697,7 +697,7 @@ public: bool found; CValidationState state; - submitblock_StateCatcher(const uint256 &hashIn) : hash(hashIn), found(false), state() {}; + submitblock_StateCatcher(const uint256 &hashIn) : hash(hashIn), found(false), state() {} protected: virtual void BlockChecked(const CBlock& block, const CValidationState& stateIn) { @@ -705,7 +705,7 @@ protected: return; found = true; state = stateIn; - }; + } }; UniValue submitblock(const JSONRPCRequest& request) diff --git a/src/test/DoS_tests.cpp b/src/test/DoS_tests.cpp index 9345a44fb0..a8f09ba6ae 100644 --- a/src/test/DoS_tests.cpp +++ b/src/test/DoS_tests.cpp @@ -55,6 +55,7 @@ BOOST_AUTO_TEST_CASE(DoS_banning) dummyNode1.SetSendVersion(PROTOCOL_VERSION); GetNodeSignals().InitializeNode(&dummyNode1, *connman); dummyNode1.nVersion = 1; + dummyNode1.fSuccessfullyConnected = true; Misbehaving(dummyNode1.GetId(), 100); // Should get banned SendMessages(&dummyNode1, *connman, interruptDummy); BOOST_CHECK(connman->IsBanned(addr1)); @@ -65,6 +66,7 @@ BOOST_AUTO_TEST_CASE(DoS_banning) dummyNode2.SetSendVersion(PROTOCOL_VERSION); GetNodeSignals().InitializeNode(&dummyNode2, *connman); dummyNode2.nVersion = 1; + dummyNode2.fSuccessfullyConnected = true; Misbehaving(dummyNode2.GetId(), 50); SendMessages(&dummyNode2, *connman, interruptDummy); BOOST_CHECK(!connman->IsBanned(addr2)); // 2 not banned yet... @@ -85,6 +87,7 @@ BOOST_AUTO_TEST_CASE(DoS_banscore) dummyNode1.SetSendVersion(PROTOCOL_VERSION); GetNodeSignals().InitializeNode(&dummyNode1, *connman); dummyNode1.nVersion = 1; + dummyNode1.fSuccessfullyConnected = true; Misbehaving(dummyNode1.GetId(), 100); SendMessages(&dummyNode1, *connman, interruptDummy); BOOST_CHECK(!connman->IsBanned(addr1)); @@ -110,6 +113,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) dummyNode.SetSendVersion(PROTOCOL_VERSION); GetNodeSignals().InitializeNode(&dummyNode, *connman); dummyNode.nVersion = 1; + dummyNode.fSuccessfullyConnected = true; Misbehaving(dummyNode.GetId(), 100); SendMessages(&dummyNode, *connman, interruptDummy); diff --git a/src/test/test_bitcoin_fuzzy.cpp b/src/test/test_bitcoin_fuzzy.cpp index 376d8e428a..c4983f6f5c 100644 --- a/src/test/test_bitcoin_fuzzy.cpp +++ b/src/test/test_bitcoin_fuzzy.cpp @@ -18,6 +18,7 @@ #include "streams.h" #include "undo.h" #include "version.h" +#include "pubkey.h" #include <stdint.h> #include <unistd.h> @@ -60,6 +61,7 @@ bool read_stdin(std::vector<char> &data) { int main(int argc, char **argv) { + ECCVerifyHandle globalVerifyHandle; std::vector<char> buffer; if (!read_stdin(buffer)) return 0; diff --git a/src/test/versionbits_tests.cpp b/src/test/versionbits_tests.cpp index bae0eff7e5..e2b5573abd 100644 --- a/src/test/versionbits_tests.cpp +++ b/src/test/versionbits_tests.cpp @@ -292,7 +292,7 @@ BOOST_AUTO_TEST_CASE(versionbits_computeblockversion) blocksToMine--; nTime += 600; nHeight += 1; - }; + } nTime = nTimeout; // FAILED is only triggered at the end of a period, so CBV should be setting diff --git a/src/txmempool.cpp b/src/txmempool.cpp index a1a37dac75..236527a2ba 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -171,6 +171,8 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes bool CTxMemPool::CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents /* = true */) const { + LOCK(cs); + setEntries parentHashes; const CTransaction &tx = entry.GetTx(); diff --git a/src/util.cpp b/src/util.cpp index 08ee6b8b87..ba157625d8 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -723,13 +723,17 @@ void AllocateFileRange(FILE *file, unsigned int offset, unsigned int length) { void ShrinkDebugFile() { + // Amount of debug.log to save at end when shrinking (must fit in memory) + constexpr size_t RECENT_DEBUG_HISTORY_SIZE = 10 * 1000000; // Scroll debug.log if it's getting too big boost::filesystem::path pathLog = GetDataDir() / "debug.log"; FILE* file = fopen(pathLog.string().c_str(), "r"); - if (file && boost::filesystem::file_size(pathLog) > 10 * 1000000) + // If debug.log file is more than 10% bigger the RECENT_DEBUG_HISTORY_SIZE + // trim it down by saving only the last RECENT_DEBUG_HISTORY_SIZE bytes + if (file && boost::filesystem::file_size(pathLog) > 11 * (RECENT_DEBUG_HISTORY_SIZE / 10)) { // Restart the file with some of the end - std::vector <char> vch(200000,0); + std::vector<char> vch(RECENT_DEBUG_HISTORY_SIZE, 0); fseek(file, -((long)vch.size()), SEEK_END); int nBytes = fread(vch.data(), 1, vch.size(), file); fclose(file); diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index bfc738afad..45b572aa2e 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -2416,7 +2416,7 @@ UniValue listunspent(const JSONRPCRequest& request) " \"address\" : \"address\", (string) the bitcoin address\n" " \"account\" : \"account\", (string) DEPRECATED. The associated account, or \"\" for the default account\n" " \"scriptPubKey\" : \"key\", (string) the script key\n" - " \"amount\" : x.xxx, (numeric) the transaction amount in " + CURRENCY_UNIT + "\n" + " \"amount\" : x.xxx, (numeric) the transaction output amount in " + CURRENCY_UNIT + "\n" " \"confirmations\" : n, (numeric) The number of confirmations\n" " \"redeemScript\" : n (string) The redeemScript if scriptPubKey is P2SH\n" " \"spendable\" : xxx, (bool) Whether we have the private keys to spend this output\n" @@ -2664,6 +2664,33 @@ UniValue fundrawtransaction(const JSONRPCRequest& request) return result; } +// Calculate the size of the transaction assuming all signatures are max size +// Use DummySignatureCreator, which inserts 72 byte signatures everywhere. +// TODO: re-use this in CWallet::CreateTransaction (right now +// CreateTransaction uses the constructed dummy-signed tx to do a priority +// calculation, but we should be able to refactor after priority is removed). +// NOTE: this requires that all inputs must be in mapWallet (eg the tx should +// be IsAllFromMe). +int64_t CalculateMaximumSignedTxSize(const CTransaction &tx) +{ + CMutableTransaction txNew(tx); + std::vector<pair<CWalletTx *, unsigned int>> vCoins; + // Look up the inputs. We should have already checked that this transaction + // IsAllFromMe(ISMINE_SPENDABLE), so every input should already be in our + // wallet, with a valid index into the vout array. + for (auto& input : tx.vin) { + const auto mi = pwalletMain->mapWallet.find(input.prevout.hash); + assert(mi != pwalletMain->mapWallet.end() && input.prevout.n < mi->second.tx->vout.size()); + vCoins.emplace_back(make_pair(&(mi->second), input.prevout.n)); + } + if (!pwalletMain->DummySignTx(txNew, vCoins)) { + // This should never happen, because IsAllFromMe(ISMINE_SPENDABLE) + // implies that we can sign for every input. + throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction contains inputs that cannot be signed"); + } + return GetVirtualTransactionSize(txNew); +} + UniValue bumpfee(const JSONRPCRequest& request) { if (!EnsureWalletIsAvailable(request.fHelp)) { @@ -2706,6 +2733,7 @@ UniValue bumpfee(const JSONRPCRequest& request) " \"txid\": \"value\", (string) The id of the new transaction\n" " \"origfee\": n, (numeric) Fee of the replaced transaction\n" " \"fee\": n, (numeric) Fee of the new transaction\n" + " \"errors\": [ str... ] (json array of strings) Errors encountered during processing (may be empty)\n" "}\n" "\nExamples:\n" "\nBump the fee, get the new transaction\'s txid\n" + @@ -2769,9 +2797,9 @@ UniValue bumpfee(const JSONRPCRequest& request) throw JSONRPCError(RPC_MISC_ERROR, "Transaction does not have a change output"); } - // signature sizes can vary by a byte, so add 1 for each input when calculating the new fee + // Calculate the expected size of the new transaction. int64_t txSize = GetVirtualTransactionSize(*(wtx.tx)); - const int64_t maxNewTxSize = txSize + wtx.tx->vin.size(); + const int64_t maxNewTxSize = CalculateMaximumSignedTxSize(*wtx.tx); // optional parameters bool specifiedConfirmTarget = false; @@ -2845,8 +2873,12 @@ UniValue bumpfee(const JSONRPCRequest& request) nNewFeeRate = CFeeRate(nNewFee, maxNewTxSize); // New fee rate must be at least old rate + minimum incremental relay rate - if (nNewFeeRate.GetFeePerK() < nOldFeeRate.GetFeePerK() + walletIncrementalRelayFee.GetFeePerK()) { - nNewFeeRate = CFeeRate(nOldFeeRate.GetFeePerK() + walletIncrementalRelayFee.GetFeePerK()); + // walletIncrementalRelayFee.GetFeePerK() should be exact, because it's initialized + // in that unit (fee per kb). + // However, nOldFeeRate is a calculated value from the tx fee/size, so + // add 1 satoshi to the result, because it may have been rounded down. + if (nNewFeeRate.GetFeePerK() < nOldFeeRate.GetFeePerK() + 1 + walletIncrementalRelayFee.GetFeePerK()) { + nNewFeeRate = CFeeRate(nOldFeeRate.GetFeePerK() + 1 + walletIncrementalRelayFee.GetFeePerK()); nNewFee = nNewFeeRate.GetFee(maxNewTxSize); } } @@ -2912,25 +2944,39 @@ UniValue bumpfee(const JSONRPCRequest& request) // commit/broadcast the tx CReserveKey reservekey(pwalletMain); CWalletTx wtxBumped(pwalletMain, MakeTransactionRef(std::move(tx))); + wtxBumped.mapValue = wtx.mapValue; wtxBumped.mapValue["replaces_txid"] = hash.ToString(); + wtxBumped.vOrderForm = wtx.vOrderForm; + wtxBumped.strFromAccount = wtx.strFromAccount; + wtxBumped.fTimeReceivedIsTxTime = true; + wtxBumped.fFromMe = true; CValidationState state; - if (!pwalletMain->CommitTransaction(wtxBumped, reservekey, g_connman.get(), state) || !state.IsValid()) { + if (!pwalletMain->CommitTransaction(wtxBumped, reservekey, g_connman.get(), state)) { + // NOTE: CommitTransaction never returns false, so this should never happen. throw JSONRPCError(RPC_WALLET_ERROR, strprintf("Error: The transaction was rejected! Reason given: %s", state.GetRejectReason())); } + UniValue vErrors(UniValue::VARR); + if (state.IsInvalid()) { + // This can happen if the mempool rejected the transaction. Report + // what happened in the "errors" response. + vErrors.push_back(strprintf("Error: The transaction was rejected: %s", FormatStateMessage(state))); + } + // mark the original tx as bumped if (!pwalletMain->MarkReplaced(wtx.GetHash(), wtxBumped.GetHash())) { // TODO: see if JSON-RPC has a standard way of returning a response // along with an exception. It would be good to return information about // wtxBumped to the caller even if marking the original transaction // replaced does not succeed for some reason. - throw JSONRPCError(RPC_WALLET_ERROR, "Error: Created new bumpfee transaction but could not mark the original transaction as replaced."); + vErrors.push_back("Error: Created new bumpfee transaction but could not mark the original transaction as replaced."); } UniValue result(UniValue::VOBJ); result.push_back(Pair("txid", wtxBumped.GetHash().GetHex())); result.push_back(Pair("origfee", ValueFromAmount(nOldFee))); result.push_back(Pair("fee", ValueFromAmount(nNewFee))); + result.push_back(Pair("errors", vErrors)); return result; } diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 9a5f35b6e3..a7b8022bd9 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -2583,21 +2583,9 @@ bool CWallet::CreateTransaction(const vector<CRecipient>& vecSend, CWalletTx& wt std::numeric_limits<unsigned int>::max() - (fWalletRbf ? 2 : 1))); // Fill in dummy signatures for fee calculation. - int nIn = 0; - for (const auto& coin : setCoins) - { - const CScript& scriptPubKey = coin.first->tx->vout[coin.second].scriptPubKey; - SignatureData sigdata; - - if (!ProduceSignature(DummySignatureCreator(this), scriptPubKey, sigdata)) - { - strFailReason = _("Signing transaction failed"); - return false; - } else { - UpdateTransaction(txNew, nIn, sigdata); - } - - nIn++; + if (!DummySignTx(txNew, setCoins)) { + strFailReason = _("Signing transaction failed"); + return false; } unsigned int nBytes = GetVirtualTransactionSize(txNew); diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 200ec0cba2..1de04ae16a 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -13,6 +13,7 @@ #include "utilstrencodings.h" #include "validationinterface.h" #include "script/ismine.h" +#include "script/sign.h" #include "wallet/crypter.h" #include "wallet/walletdb.h" #include "wallet/rpcwallet.h" @@ -796,6 +797,8 @@ public: void ListAccountCreditDebit(const std::string& strAccount, std::list<CAccountingEntry>& entries); bool AddAccountingEntry(const CAccountingEntry&); bool AddAccountingEntry(const CAccountingEntry&, CWalletDB *pwalletdb); + template <typename ContainerType> + bool DummySignTx(CMutableTransaction &txNew, const ContainerType &coins); static CFeeRate minTxFee; static CFeeRate fallbackFee; @@ -1028,4 +1031,28 @@ public: } }; +// Helper for producing a bunch of max-sized low-S signatures (eg 72 bytes) +// ContainerType is meant to hold pair<CWalletTx *, int>, and be iterable +// so that each entry corresponds to each vIn, in order. +template <typename ContainerType> +bool CWallet::DummySignTx(CMutableTransaction &txNew, const ContainerType &coins) +{ + // Fill in dummy signatures for fee calculation. + int nIn = 0; + for (const auto& coin : coins) + { + const CScript& scriptPubKey = coin.first->tx->vout[coin.second].scriptPubKey; + SignatureData sigdata; + + if (!ProduceSignature(DummySignatureCreator(this), scriptPubKey, sigdata)) + { + return false; + } else { + UpdateTransaction(txNew, nIn, sigdata); + } + + nIn++; + } + return true; +} #endif // BITCOIN_WALLET_WALLET_H |