diff options
47 files changed, 1072 insertions, 207 deletions
diff --git a/depends/packages/native_cdrkit.mk b/depends/packages/native_cdrkit.mk index 8243458ec8..14c37a0fc7 100644 --- a/depends/packages/native_cdrkit.mk +++ b/depends/packages/native_cdrkit.mk @@ -9,8 +9,10 @@ define $(package)_preprocess_cmds patch -p1 < $($(package)_patch_dir)/cdrkit-deterministic.patch endef +# Starting with 10.1, GCC defaults to -fno-common, resulting in linking errors. +# Pass -fcommon to retain the legacy behaviour. define $(package)_config_cmds - cmake -DCMAKE_INSTALL_PREFIX=$(build_prefix) + cmake -DCMAKE_INSTALL_PREFIX=$(build_prefix) -DCMAKE_C_FLAGS="-fcommon" endef define $(package)_build_cmds diff --git a/doc/files.md b/doc/files.md index cd23d547bb..5475826329 100644 --- a/doc/files.md +++ b/doc/files.md @@ -50,7 +50,7 @@ Subdirectory | File(s) | Description `indexes/blockfilter/basic/` | `fltrNNNNN.dat`<sup>[\[2\]](#note2)</sup> | Blockfilter index filters for the basic filtertype; *optional*, used if `-blockfilterindex=basic` `wallets/` | | [Contains wallets](#multi-wallet-environment); can be specified by `-walletdir` option; if `wallets/` subdirectory does not exist, a wallet resides in the data directory `./` | `banlist.dat` | Stores the IPs/subnets of banned nodes -`./` | `bitcoin.conf` | Contains [configuration settings](bitcoin-conf.md) for `bitcoind` or `bitcoin-qt`; can be specified by `-conf` option +`./` | `bitcoin.conf` | User-defined [configuration settings](bitcoin-conf.md) for `bitcoind` or `bitcoin-qt`. File is not written to by the software and must be created manually. Path can be specified by `-conf` option `./` | `bitcoind.pid` | Stores the process ID (PID) of `bitcoind` or `bitcoin-qt` while running; created at start and deleted on shutdown; can be specified by `-pid` option `./` | `debug.log` | Contains debug information and general logging generated by `bitcoind` or `bitcoin-qt`; can be specified by `-debuglogfile` option `./` | `fee_estimates.dat` | Stores statistics used to estimate minimum transaction fees and priorities required for confirmation @@ -58,6 +58,7 @@ Subdirectory | File(s) | Description `./` | `mempool.dat` | Dump of the mempool's transactions `./` | `onion_private_key` | Cached Tor hidden service private key for `-listenonion` option `./` | `peers.dat` | Peer IP address database (custom format) +`./` | `settings.json` | Read-write settings set through GUI or RPC interfaces, augmenting manual settings from [bitcoin.conf](bitcoin-conf.md). File is created automatically if read-write settings storage is not disabled with `-nosettings` option. Path can be specified with `-settings` option `./` | `.cookie` | Session RPC authentication cookie; if used, created at start and deleted on shutdown; can be specified by `-rpccookiefile` option `./` | `.lock` | Data directory lock file diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp index 3dcce92ab5..b04cc12059 100644 --- a/src/bitcoind.cpp +++ b/src/bitcoind.cpp @@ -101,6 +101,11 @@ static bool AppInit(int argc, char* argv[]) } } + if (!gArgs.InitSettings(error)) { + InitError(Untranslated(error)); + return false; + } + // -server defaults to true for bitcoind but not for the GUI so do this here gArgs.SoftSetBoolArg("-server", true); // Set this early so that parameter interactions go to console diff --git a/src/consensus/validation.h b/src/consensus/validation.h index a79e7b9d12..8de7a8f2d8 100644 --- a/src/consensus/validation.h +++ b/src/consensus/validation.h @@ -30,12 +30,16 @@ enum class TxValidationResult { TX_MISSING_INPUTS, //!< transaction was missing some of its inputs TX_PREMATURE_SPEND, //!< transaction spends a coinbase too early, or violates locktime/sequence locks /** - * Transaction might be missing a witness, have a witness prior to SegWit + * Transaction might have a witness prior to SegWit * activation, or witness may have been malleated (which includes * non-standard witnesses). */ TX_WITNESS_MUTATED, /** + * Transaction is missing a witness. + */ + TX_WITNESS_STRIPPED, + /** * Tx already in mempool or conflicts with a tx in the chain * (if it conflicts with another tx in mempool, we use MEMPOOL_POLICY as it failed to reach the RBF threshold) * Currently this is only used if the transaction already exists in the mempool or on chain. @@ -75,37 +79,39 @@ enum class BlockValidationResult { * by TxValidationState and BlockValidationState for validation information on transactions * and blocks respectively. */ template <typename Result> -class ValidationState { +class ValidationState +{ private: - enum mode_state { - MODE_VALID, //!< everything ok - MODE_INVALID, //!< network rule violation (DoS value may be set) - MODE_ERROR, //!< run-time error - } m_mode{MODE_VALID}; + enum class ModeState { + M_VALID, //!< everything ok + M_INVALID, //!< network rule violation (DoS value may be set) + M_ERROR, //!< run-time error + } m_mode{ModeState::M_VALID}; Result m_result{}; std::string m_reject_reason; std::string m_debug_message; + public: bool Invalid(Result result, - const std::string &reject_reason="", - const std::string &debug_message="") + const std::string& reject_reason = "", + const std::string& debug_message = "") { m_result = result; m_reject_reason = reject_reason; m_debug_message = debug_message; - if (m_mode != MODE_ERROR) m_mode = MODE_INVALID; + if (m_mode != ModeState::M_ERROR) m_mode = ModeState::M_INVALID; return false; } bool Error(const std::string& reject_reason) { - if (m_mode == MODE_VALID) + if (m_mode == ModeState::M_VALID) m_reject_reason = reject_reason; - m_mode = MODE_ERROR; + m_mode = ModeState::M_ERROR; return false; } - bool IsValid() const { return m_mode == MODE_VALID; } - bool IsInvalid() const { return m_mode == MODE_INVALID; } - bool IsError() const { return m_mode == MODE_ERROR; } + bool IsValid() const { return m_mode == ModeState::M_VALID; } + bool IsInvalid() const { return m_mode == ModeState::M_INVALID; } + bool IsError() const { return m_mode == ModeState::M_ERROR; } Result GetResult() const { return m_result; } std::string GetRejectReason() const { return m_reject_reason; } std::string GetDebugMessage() const { return m_debug_message; } diff --git a/src/init.cpp b/src/init.cpp index 2b62d5d306..acf9f8bd91 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -397,7 +397,7 @@ void SetupServerArgs(NodeContext& node) #endif gArgs.AddArg("-blockreconstructionextratxn=<n>", strprintf("Extra transactions to keep in memory for compact block reconstructions (default: %u)", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); gArgs.AddArg("-blocksonly", strprintf("Whether to reject transactions from network peers. Automatic broadcast and rebroadcast of any transactions from inbound peers is disabled, unless the peer has the 'forcerelay' permission. RPC transactions are not affected. (default: %u)", DEFAULT_BLOCKSONLY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - gArgs.AddArg("-conf=<file>", strprintf("Specify configuration file. Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + gArgs.AddArg("-conf=<file>", strprintf("Specify path to read-only configuration file. Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); gArgs.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); gArgs.AddArg("-dbbatchsize", strprintf("Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS); gArgs.AddArg("-dbcache=<n>", strprintf("Maximum database cache size <n> MiB (%d to %d, default: %d). In addition, unused mempool memory is shared for this cache (see -maxmempool).", nMinDbCache, nMaxDbCache, nDefaultDbCache), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); @@ -418,6 +418,7 @@ void SetupServerArgs(NodeContext& node) "(default: 0 = disable pruning blocks, 1 = allow manual pruning via RPC, >=%u = automatically prune block files to stay under the specified target size in MiB)", MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); gArgs.AddArg("-reindex", "Rebuild chain state and block index from the blk*.dat files on disk", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); gArgs.AddArg("-reindex-chainstate", "Rebuild chain state from the currently indexed blocks. When in pruning mode or if blocks on disk might be corrupted, use full -reindex instead.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + gArgs.AddArg("-settings=<file>", strprintf("Specify path to dynamic settings data file. Can be disabled with -nosettings. File is written at runtime and not meant to be edited by users (use %s instead for custom settings). Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME, BITCOIN_SETTINGS_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); #ifndef WIN32 gArgs.AddArg("-sysperms", "Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); #else @@ -455,6 +456,7 @@ void SetupServerArgs(NodeContext& node) gArgs.AddArg("-proxy=<ip:port>", "Connect through SOCKS5 proxy, set -noproxy to disable (default: disabled)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); gArgs.AddArg("-proxyrandomize", strprintf("Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)", DEFAULT_PROXYRANDOMIZE), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); gArgs.AddArg("-seednode=<ip>", "Connect to a node to retrieve peer addresses, and disconnect. This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + gArgs.AddArg("-networkactive", "Enable all P2P network activity (default: 1). Can be changed by the setnetworkactive RPC command", ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION); gArgs.AddArg("-timeout=<n>", strprintf("Specify connection timeout in milliseconds (minimum: 1, default: %d)", DEFAULT_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); gArgs.AddArg("-peertimeout=<n>", strprintf("Specify p2p connection timeout in seconds. This option determines the amount of time a peer may be inactive before the connection to it is dropped. (minimum: 1, default: %d)", DEFAULT_PEER_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION); gArgs.AddArg("-torcontrol=<ip>:<port>", strprintf("Tor control port to use if onion listening enabled (default: %s)", DEFAULT_TOR_CONTROL), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); @@ -1372,7 +1374,7 @@ bool AppInitMain(const util::Ref& context, NodeContext& node) assert(!node.banman); node.banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", &uiInterface, gArgs.GetArg("-bantime", DEFAULT_MISBEHAVING_BANTIME)); assert(!node.connman); - node.connman = std::unique_ptr<CConnman>(new CConnman(GetRand(std::numeric_limits<uint64_t>::max()), GetRand(std::numeric_limits<uint64_t>::max()))); + node.connman = MakeUnique<CConnman>(GetRand(std::numeric_limits<uint64_t>::max()), GetRand(std::numeric_limits<uint64_t>::max()), gArgs.GetBoolArg("-networkactive", true)); // Make mempool generally available in the node context. For example the connection manager, wallet, or RPC threads, // which are all started after this, may use it from the node context. assert(!node.mempool); diff --git a/src/interfaces/node.cpp b/src/interfaces/node.cpp index 834a16ecf5..33f0dac263 100644 --- a/src/interfaces/node.cpp +++ b/src/interfaces/node.cpp @@ -66,6 +66,7 @@ public: bool softSetArg(const std::string& arg, const std::string& value) override { return gArgs.SoftSetArg(arg, value); } bool softSetBoolArg(const std::string& arg, bool value) override { return gArgs.SoftSetBoolArg(arg, value); } void selectParams(const std::string& network) override { SelectParams(network); } + bool initSettings(std::string& error) override { return gArgs.InitSettings(error); } uint64_t getAssumedBlockchainSize() override { return Params().AssumedBlockchainSize(); } uint64_t getAssumedChainStateSize() override { return Params().AssumedChainStateSize(); } std::string getNetwork() override { return Params().NetworkIDString(); } diff --git a/src/interfaces/node.h b/src/interfaces/node.h index b88b5bc14e..a9680c42b5 100644 --- a/src/interfaces/node.h +++ b/src/interfaces/node.h @@ -66,6 +66,11 @@ public: //! Choose network parameters. virtual void selectParams(const std::string& network) = 0; + //! Read and update <datadir>/settings.json file with saved settings. This + //! needs to be called after selectParams() because the settings file + //! location is network-specific. + virtual bool initSettings(std::string& error) = 0; + //! Get the (assumed) blockchain size. virtual uint64_t getAssumedBlockchainSize() = 0; diff --git a/src/net.cpp b/src/net.cpp index cf5757d6c0..0c56cddbdc 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -2253,7 +2253,7 @@ void Discover() void CConnman::SetNetworkActive(bool active) { - LogPrint(BCLog::NET, "SetNetworkActive: %s\n", active); + LogPrintf("%s: %s\n", __func__, active); if (fNetworkActive == active) { return; @@ -2264,12 +2264,14 @@ void CConnman::SetNetworkActive(bool active) uiInterface.NotifyNetworkActiveChanged(fNetworkActive); } -CConnman::CConnman(uint64_t nSeed0In, uint64_t nSeed1In) : nSeed0(nSeed0In), nSeed1(nSeed1In) +CConnman::CConnman(uint64_t nSeed0In, uint64_t nSeed1In, bool network_active) + : nSeed0(nSeed0In), nSeed1(nSeed1In) { SetTryNewOutboundPeer(false); Options connOptions; Init(connOptions); + SetNetworkActive(network_active); } NodeId CConnman::GetNewNodeId() @@ -181,7 +181,7 @@ public: } } - CConnman(uint64_t seed0, uint64_t seed1); + CConnman(uint64_t seed0, uint64_t seed1, bool network_active = true); ~CConnman(); bool Start(CScheduler& scheduler, const Options& options); @@ -965,11 +965,11 @@ public: } - void AddInventoryKnown(const CInv& inv) + void AddKnownTx(const uint256& hash) { if (m_tx_relay != nullptr) { LOCK(m_tx_relay->cs_tx_inventory); - m_tx_relay->filterInventoryKnown.insert(inv.hash); + m_tx_relay->filterInventoryKnown.insert(hash); } } diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 7a58de35d7..5f1e7318f3 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -75,6 +75,8 @@ static const unsigned int MAX_INV_SZ = 50000; static constexpr int32_t MAX_PEER_TX_IN_FLIGHT = 100; /** Maximum number of announced transactions from a peer */ static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 2 * MAX_INV_SZ; +/** How many microseconds to delay requesting transactions via txids, if we have wtxid-relaying peers */ +static constexpr std::chrono::microseconds TXID_RELAY_DELAY{std::chrono::seconds{2}}; /** How many microseconds to delay requesting transactions from inbound peers */ static constexpr std::chrono::microseconds INBOUND_PEER_TX_DELAY{std::chrono::seconds{2}}; /** How long to wait (in microseconds) before downloading a transaction from an additional peer */ @@ -151,6 +153,7 @@ struct COrphanTx { }; RecursiveMutex g_cs_orphans; std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans); +std::map<uint256, std::map<uint256, COrphanTx>::iterator> g_orphans_by_wtxid GUARDED_BY(g_cs_orphans); void EraseOrphansFor(NodeId peer); @@ -187,6 +190,15 @@ namespace { * million to make it highly unlikely for users to have issues with this * filter. * + * We only need to add wtxids to this filter. For non-segwit + * transactions, the txid == wtxid, so this only prevents us from + * re-downloading non-segwit transactions when communicating with + * non-wtxidrelay peers -- which is important for avoiding malleation + * attacks that could otherwise interfere with transaction relay from + * non-wtxidrelay peers. For communicating with wtxidrelay peers, having + * the reject filter store wtxids is exactly what we want to avoid + * redownload of a rejected transaction. + * * Memory used: 1.3 MB */ std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main); @@ -218,6 +230,9 @@ namespace { /** Number of peers from which we're downloading blocks. */ int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0; + /** Number of peers with wtxid relay. */ + int g_wtxid_relay_peers GUARDED_BY(cs_main) = 0; + /** Number of outbound peers with m_chain_sync.m_protect. */ int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; @@ -409,6 +424,9 @@ struct CNodeState { //! A rolling bloom filter of all announced tx CInvs to this peer. CRollingBloomFilter m_recently_announced_invs = CRollingBloomFilter{INVENTORY_MAX_RECENT_RELAY, 0.000001}; + //! Whether this peer relays txs via wtxid + bool m_wtxid_relay{false}; + CNodeState(CAddress addrIn, std::string addrNameIn, bool is_inbound, bool is_manual) : address(addrIn), name(std::move(addrNameIn)), m_is_inbound(is_inbound), m_is_manual_connection (is_manual) @@ -760,7 +778,7 @@ void UpdateTxRequestTime(const uint256& txid, std::chrono::microseconds request_ } } -std::chrono::microseconds CalculateTxGetDataTime(const uint256& txid, std::chrono::microseconds current_time, bool use_inbound_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +std::chrono::microseconds CalculateTxGetDataTime(const uint256& txid, std::chrono::microseconds current_time, bool use_inbound_delay, bool use_txid_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { std::chrono::microseconds process_time; const auto last_request_time = GetTxRequestTime(txid); @@ -776,6 +794,9 @@ std::chrono::microseconds CalculateTxGetDataTime(const uint256& txid, std::chron // We delay processing announcements from inbound peers if (use_inbound_delay) process_time += INBOUND_PEER_TX_DELAY; + // We delay processing announcements from peers that use txid-relay (instead of wtxid) + if (use_txid_delay) process_time += TXID_RELAY_DELAY; + return process_time; } @@ -793,7 +814,7 @@ void RequestTx(CNodeState* state, const uint256& txid, std::chrono::microseconds // Calculate the time to try requesting this transaction. Use // fPreferredDownload as a proxy for outbound peers. - const auto process_time = CalculateTxGetDataTime(txid, current_time, !state->fPreferredDownload); + const auto process_time = CalculateTxGetDataTime(txid, current_time, !state->fPreferredDownload, !state->m_wtxid_relay && g_wtxid_relay_peers > 0); peer_download_state.m_tx_process_time.emplace(process_time, txid); } @@ -830,14 +851,15 @@ void PeerLogicValidation::InitializeNode(CNode *pnode) { void PeerLogicValidation::ReattemptInitialBroadcast(CScheduler& scheduler) const { - std::set<uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs(); + std::map<uint256, uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs(); - for (const uint256& txid : unbroadcast_txids) { + for (const auto& elem : unbroadcast_txids) { // Sanity check: all unbroadcast txns should exist in the mempool - if (m_mempool.exists(txid)) { - RelayTransaction(txid, *connman); + if (m_mempool.exists(elem.first)) { + LOCK(cs_main); + RelayTransaction(elem.first, elem.second, *connman); } else { - m_mempool.RemoveUnbroadcastTx(txid, true); + m_mempool.RemoveUnbroadcastTx(elem.first, true); } } @@ -869,6 +891,8 @@ void PeerLogicValidation::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTim assert(nPeersWithValidatedDownloads >= 0); g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; assert(g_outbound_peers_with_protect_from_disconnect >= 0); + g_wtxid_relay_peers -= state->m_wtxid_relay; + assert(g_wtxid_relay_peers >= 0); mapNodeState.erase(nodeid); @@ -878,6 +902,7 @@ void PeerLogicValidation::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTim assert(nPreferredDownload == 0); assert(nPeersWithValidatedDownloads == 0); assert(g_outbound_peers_with_protect_from_disconnect == 0); + assert(g_wtxid_relay_peers == 0); } LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); } @@ -936,6 +961,8 @@ bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRE auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, g_orphan_list.size()}); assert(ret.second); g_orphan_list.push_back(ret.first); + // Allow for lookups in the orphan pool by wtxid, as well as txid + g_orphans_by_wtxid.emplace(tx->GetWitnessHash(), ret.first); for (const CTxIn& txin : tx->vin) { mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first); } @@ -972,6 +999,7 @@ int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) it_last->second.list_pos = old_pos; } g_orphan_list.pop_back(); + g_orphans_by_wtxid.erase(it->second.tx->GetWitnessHash()); mapOrphanTransactions.erase(it); return 1; @@ -1141,6 +1169,7 @@ static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, case TxValidationResult::TX_MISSING_INPUTS: case TxValidationResult::TX_PREMATURE_SPEND: case TxValidationResult::TX_WITNESS_MUTATED: + case TxValidationResult::TX_WITNESS_STRIPPED: case TxValidationResult::TX_CONFLICT: case TxValidationResult::TX_MEMPOOL_POLICY: break; @@ -1181,14 +1210,15 @@ PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, BanMan* banman, CS recentRejects.reset(new CRollingBloomFilter(120000, 0.000001)); // Blocks don't typically have more than 4000 transactions, so this should - // be at least six blocks (~1 hr) worth of transactions that we can store. + // be at least six blocks (~1 hr) worth of transactions that we can store, + // inserting both a txid and wtxid for every observed transaction. // If the number of transactions appearing in a block goes up, or if we are // seeing getdata requests more than an hour after initial announcement, we // can increase this number. // The false positive rate of 1/1M should come out to less than 1 // transaction per day that would be inadvertently ignored (which is the // same probability that we have in the reject filter). - g_recent_confirmed_transactions.reset(new CRollingBloomFilter(24000, 0.000001)); + g_recent_confirmed_transactions.reset(new CRollingBloomFilter(48000, 0.000001)); const Consensus::Params& consensusParams = Params().GetConsensus(); // Stale tip checking and peer eviction are on two different timers, but we @@ -1244,6 +1274,9 @@ void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pb LOCK(g_cs_recent_confirmed_transactions); for (const auto& ptx : pblock->vtx) { g_recent_confirmed_transactions->insert(ptx->GetHash()); + if (ptx->GetHash() != ptx->GetWitnessHash()) { + g_recent_confirmed_transactions->insert(ptx->GetWitnessHash()); + } } } } @@ -1397,6 +1430,7 @@ bool static AlreadyHave(const CInv& inv, const CTxMemPool& mempool) EXCLUSIVE_LO { case MSG_TX: case MSG_WITNESS_TX: + case MSG_WTX: { assert(recentRejects); if (::ChainActive().Tip()->GetBlockHash() != hashRecentRejectsChainTip) @@ -1411,7 +1445,11 @@ bool static AlreadyHave(const CInv& inv, const CTxMemPool& mempool) EXCLUSIVE_LO { LOCK(g_cs_orphans); - if (mapOrphanTransactions.count(inv.hash)) return true; + if (inv.type != MSG_WTX && mapOrphanTransactions.count(inv.hash)) { + return true; + } else if (inv.type == MSG_WTX && g_orphans_by_wtxid.count(inv.hash)) { + return true; + } } { @@ -1419,8 +1457,8 @@ bool static AlreadyHave(const CInv& inv, const CTxMemPool& mempool) EXCLUSIVE_LO if (g_recent_confirmed_transactions->contains(inv.hash)) return true; } - return recentRejects->contains(inv.hash) || - mempool.exists(inv.hash); + const bool by_wtxid = (inv.type == MSG_WTX); + return recentRejects->contains(inv.hash) || mempool.exists(inv.hash, by_wtxid); } case MSG_BLOCK: case MSG_WITNESS_BLOCK: @@ -1430,11 +1468,17 @@ bool static AlreadyHave(const CInv& inv, const CTxMemPool& mempool) EXCLUSIVE_LO return true; } -void RelayTransaction(const uint256& txid, const CConnman& connman) +void RelayTransaction(const uint256& txid, const uint256& wtxid, const CConnman& connman) { - connman.ForEachNode([&txid](CNode* pnode) + connman.ForEachNode([&txid, &wtxid](CNode* pnode) { - pnode->PushTxInventory(txid); + AssertLockHeld(cs_main); + CNodeState &state = *State(pnode->GetId()); + if (state.m_wtxid_relay) { + pnode->PushTxInventory(wtxid); + } else { + pnode->PushTxInventory(txid); + } }); } @@ -1632,9 +1676,9 @@ void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, c } //! Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). -CTransactionRef static FindTxForGetData(const CNode& peer, const uint256& txid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main) +CTransactionRef static FindTxForGetData(const CNode& peer, const uint256& txid_or_wtxid, bool use_wtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main) { - auto txinfo = mempool.info(txid); + auto txinfo = mempool.info(txid_or_wtxid, use_wtxid); if (txinfo.tx) { // If a TX could have been INVed in reply to a MEMPOOL request, // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request @@ -1646,13 +1690,12 @@ CTransactionRef static FindTxForGetData(const CNode& peer, const uint256& txid, { LOCK(cs_main); - // Otherwise, the transaction must have been announced recently. - if (State(peer.GetId())->m_recently_announced_invs.contains(txid)) { + if (State(peer.GetId())->m_recently_announced_invs.contains(txid_or_wtxid)) { // If it was, it can be relayed from either the mempool... if (txinfo.tx) return std::move(txinfo.tx); // ... or the relay pool. - auto mi = mapRelay.find(txid); + auto mi = mapRelay.find(txid_or_wtxid); if (mi != mapRelay.end()) return mi->second; } } @@ -1676,7 +1719,7 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm // Process as many TX items from the front of the getdata queue as // possible, since they're common and it's efficient to batch process // them. - while (it != pfrom.vRecvGetData.end() && (it->type == MSG_TX || it->type == MSG_WITNESS_TX)) { + while (it != pfrom.vRecvGetData.end() && (it->type == MSG_TX || it->type == MSG_WITNESS_TX || it->type == MSG_WTX)) { if (interruptMsgProc) return; // The send buffer provides backpressure. If there's no space in // the buffer, pause processing until the next call. @@ -1689,11 +1732,12 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm continue; } - CTransactionRef tx = FindTxForGetData(pfrom, inv.hash, mempool_req, now); + CTransactionRef tx = FindTxForGetData(pfrom, inv.hash, inv.type == MSG_WTX, mempool_req, now); if (tx) { + // WTX and WITNESS_TX imply we serialize with witness int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0); connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx)); - mempool.RemoveUnbroadcastTx(inv.hash); + mempool.RemoveUnbroadcastTx(tx->GetHash()); // As we're going to send tx, make sure its unconfirmed parents are made requestable. for (const auto& txin : tx->vin) { auto txinfo = mempool.info(txin.prevout.hash); @@ -1972,7 +2016,7 @@ void static ProcessOrphanTx(CConnman& connman, CTxMemPool& mempool, std::set<uin if (setMisbehaving.count(fromPeer)) continue; if (AcceptToMemoryPool(mempool, orphan_state, porphanTx, &removed_txn, false /* bypass_limits */, 0 /* nAbsurdFee */)) { LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString()); - RelayTransaction(orphanHash, connman); + RelayTransaction(orphanHash, porphanTx->GetWitnessHash(), connman); for (unsigned int i = 0; i < orphanTx.vout.size(); i++) { auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(orphanHash, i)); if (it_by_prev != mapOrphanTransactionsByPrev.end()) { @@ -1997,12 +2041,22 @@ void static ProcessOrphanTx(CConnman& connman, CTxMemPool& mempool, std::set<uin // Has inputs but not accepted to mempool // Probably non-standard or insufficient fee LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString()); - if (!orphanTx.HasWitness() && orphan_state.GetResult() != TxValidationResult::TX_WITNESS_MUTATED) { - // Do not use rejection cache for witness transactions or - // witness-stripped transactions, as they can have been malleated. - // See https://github.com/bitcoin/bitcoin/issues/8279 for details. + if (orphan_state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) { + // We can add the wtxid of this transaction to our reject filter. + // Do not add txids of witness transactions or witness-stripped + // transactions to the filter, as they can have been malleated; + // adding such txids to the reject filter would potentially + // interfere with relay of valid transactions from peers that + // do not support wtxid-based relay. See + // https://github.com/bitcoin/bitcoin/issues/8279 for details. + // We can remove this restriction (and always add wtxids to + // the filter even for witness stripped transactions) once + // wtxid-based relay is broadly deployed. + // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034 + // for concerns around weakening security of unupgraded nodes + // if we start doing this too early. assert(recentRejects); - recentRejects->insert(orphanHash); + recentRejects->insert(orphanTx.GetWitnessHash()); } EraseOrphanTx(orphanHash); done = true; @@ -2319,6 +2373,10 @@ void ProcessMessage( if (pfrom.fInbound) PushNodeVersion(pfrom, connman, GetAdjustedTime()); + if (nVersion >= WTXID_RELAY_VERSION) { + connman.PushMessage(&pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::WTXIDRELAY)); + } + connman.PushMessage(&pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK)); pfrom.nServices = nServices; @@ -2455,6 +2513,25 @@ void ProcessMessage( return; } + // Feature negotiation of wtxidrelay should happen between VERSION and + // VERACK, to avoid relay problems from switching after a connection is up + if (msg_type == NetMsgType::WTXIDRELAY) { + if (pfrom.fSuccessfullyConnected) { + // Disconnect peers that send wtxidrelay message after VERACK; this + // must be negotiated between VERSION and VERACK. + pfrom.fDisconnect = true; + return; + } + if (pfrom.nVersion >= WTXID_RELAY_VERSION) { + LOCK(cs_main); + if (!State(pfrom.GetId())->m_wtxid_relay) { + State(pfrom.GetId())->m_wtxid_relay = true; + g_wtxid_relay_peers++; + } + } + return; + } + if (!pfrom.fSuccessfullyConnected) { // Must have a verack message before anything else LOCK(cs_main); @@ -2575,6 +2652,13 @@ void ProcessMessage( if (interruptMsgProc) return; + // ignore INVs that don't match wtxidrelay setting + if (State(pfrom.GetId())->m_wtxid_relay) { + if (inv.type == MSG_TX) continue; + } else { + if (inv.type == MSG_WTX) continue; + } + bool fAlreadyHave = AlreadyHave(inv, mempool); LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); @@ -2593,7 +2677,7 @@ void ProcessMessage( best_block = &inv.hash; } } else { - pfrom.AddInventoryKnown(inv); + pfrom.AddKnownTx(inv.hash); if (fBlocksOnly) { LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.hash.ToString(), pfrom.GetId()); pfrom.fDisconnect = true; @@ -2832,26 +2916,50 @@ void ProcessMessage( vRecv >> ptx; const CTransaction& tx = *ptx; - CInv inv(MSG_TX, tx.GetHash()); - pfrom.AddInventoryKnown(inv); + const uint256& txid = ptx->GetHash(); + const uint256& wtxid = ptx->GetWitnessHash(); LOCK2(cs_main, g_cs_orphans); + CNodeState* nodestate = State(pfrom.GetId()); + + const uint256& hash = nodestate->m_wtxid_relay ? wtxid : txid; + pfrom.AddKnownTx(hash); + if (nodestate->m_wtxid_relay && txid != wtxid) { + // Insert txid into filterInventoryKnown, even for + // wtxidrelay peers. This prevents re-adding of + // unconfirmed parents to the recently_announced + // filter, when a child tx is requested. See + // ProcessGetData(). + pfrom.AddKnownTx(txid); + } + TxValidationState state; - CNodeState* nodestate = State(pfrom.GetId()); - nodestate->m_tx_download.m_tx_announced.erase(inv.hash); - nodestate->m_tx_download.m_tx_in_flight.erase(inv.hash); - EraseTxRequest(inv.hash); + nodestate->m_tx_download.m_tx_announced.erase(hash); + nodestate->m_tx_download.m_tx_in_flight.erase(hash); + EraseTxRequest(hash); std::list<CTransactionRef> lRemovedTxn; - if (!AlreadyHave(inv, mempool) && + // We do the AlreadyHave() check using wtxid, rather than txid - in the + // absence of witness malleation, this is strictly better, because the + // recent rejects filter may contain the wtxid but will never contain + // the txid of a segwit transaction that has been rejected. + // In the presence of witness malleation, it's possible that by only + // doing the check with wtxid, we could overlook a transaction which + // was confirmed with a different witness, or exists in our mempool + // with a different witness, but this has limited downside: + // mempool validation does its own lookup of whether we have the txid + // already; and an adversary can already relay us old transactions + // (older than our recency filter) if trying to DoS us, without any need + // for witness malleation. + if (!AlreadyHave(CInv(MSG_WTX, wtxid), mempool) && AcceptToMemoryPool(mempool, state, ptx, &lRemovedTxn, false /* bypass_limits */, 0 /* nAbsurdFee */)) { mempool.check(&::ChainstateActive().CoinsTip()); - RelayTransaction(tx.GetHash(), connman); + RelayTransaction(tx.GetHash(), tx.GetWitnessHash(), connman); for (unsigned int i = 0; i < tx.vout.size(); i++) { - auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(inv.hash, i)); + auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(txid, i)); if (it_by_prev != mapOrphanTransactionsByPrev.end()) { for (const auto& elem : it_by_prev->second) { pfrom.orphan_work_set.insert(elem->first); @@ -2882,10 +2990,17 @@ void ProcessMessage( uint32_t nFetchFlags = GetFetchFlags(pfrom); const auto current_time = GetTime<std::chrono::microseconds>(); - for (const CTxIn& txin : tx.vin) { - CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash); - pfrom.AddInventoryKnown(_inv); - if (!AlreadyHave(_inv, mempool)) RequestTx(State(pfrom.GetId()), _inv.hash, current_time); + if (!State(pfrom.GetId())->m_wtxid_relay) { + for (const CTxIn& txin : tx.vin) { + // Here, we only have the txid (and not wtxid) of the + // inputs, so we only request parents from + // non-wtxid-relay peers. + // Eventually we should replace this with an improved + // protocol for getting all unconfirmed parents. + CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash); + pfrom.AddKnownTx(txin.prevout.hash); + if (!AlreadyHave(_inv, mempool)) RequestTx(State(pfrom.GetId()), _inv.hash, current_time); + } } AddOrphanTx(ptx, pfrom.GetId()); @@ -2899,15 +3014,30 @@ void ProcessMessage( LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString()); // We will continue to reject this tx since it has rejected // parents so avoid re-requesting it from other peers. + // Here we add both the txid and the wtxid, as we know that + // regardless of what witness is provided, we will not accept + // this, so we don't need to allow for redownload of this txid + // from any of our non-wtxidrelay peers. recentRejects->insert(tx.GetHash()); + recentRejects->insert(tx.GetWitnessHash()); } } else { - if (!tx.HasWitness() && state.GetResult() != TxValidationResult::TX_WITNESS_MUTATED) { - // Do not use rejection cache for witness transactions or - // witness-stripped transactions, as they can have been malleated. - // See https://github.com/bitcoin/bitcoin/issues/8279 for details. + if (state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) { + // We can add the wtxid of this transaction to our reject filter. + // Do not add txids of witness transactions or witness-stripped + // transactions to the filter, as they can have been malleated; + // adding such txids to the reject filter would potentially + // interfere with relay of valid transactions from peers that + // do not support wtxid-based relay. See + // https://github.com/bitcoin/bitcoin/issues/8279 for details. + // We can remove this restriction (and always add wtxids to + // the filter even for witness stripped transactions) once + // wtxid-based relay is broadly deployed. + // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034 + // for concerns around weakening security of unupgraded nodes + // if we start doing this too early. assert(recentRejects); - recentRejects->insert(tx.GetHash()); + recentRejects->insert(tx.GetWitnessHash()); if (RecursiveDynamicUsage(*ptx) < 100000) { AddToCompactExtraTransactions(ptx); } @@ -2924,7 +3054,7 @@ void ProcessMessage( LogPrintf("Not relaying non-mempool transaction %s from forcerelay peer=%d\n", tx.GetHash().ToString(), pfrom.GetId()); } else { LogPrintf("Force relaying tx %s from peer=%d\n", tx.GetHash().ToString(), pfrom.GetId()); - RelayTransaction(tx.GetHash(), connman); + RelayTransaction(tx.GetHash(), tx.GetWitnessHash(), connman); } } } @@ -3564,7 +3694,7 @@ void ProcessMessage( vRecv >> vInv; if (vInv.size() <= MAX_PEER_TX_IN_FLIGHT + MAX_BLOCKS_IN_TRANSIT_PER_PEER) { for (CInv &inv : vInv) { - if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX) { + if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX || inv.type == MSG_WTX) { // If we receive a NOTFOUND message for a txid we requested, erase // it from our data structures for this peer. auto in_flight_it = state->m_tx_download.m_tx_in_flight.find(inv.hash); @@ -3849,17 +3979,19 @@ namespace { class CompareInvMempoolOrder { CTxMemPool *mp; + bool m_wtxid_relay; public: - explicit CompareInvMempoolOrder(CTxMemPool *_mempool) + explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid) { mp = _mempool; + m_wtxid_relay = use_wtxid; } bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b) { /* As std::make_heap produces a max-heap, we want the entries with the * fewest ancestors/highest fee to sort later. */ - return mp->CompareDepthAndScore(*b, *a); + return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay); } }; } @@ -4166,8 +4298,8 @@ bool PeerLogicValidation::SendMessages(CNode* pto) LOCK(pto->m_tx_relay->cs_filter); for (const auto& txinfo : vtxinfo) { - const uint256& hash = txinfo.tx->GetHash(); - CInv inv(MSG_TX, hash); + const uint256& hash = state.m_wtxid_relay ? txinfo.tx->GetWitnessHash() : txinfo.tx->GetHash(); + CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash); pto->m_tx_relay->setInventoryTxToSend.erase(hash); // Don't send transactions that peers will not put into their mempool if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { @@ -4202,7 +4334,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) } // Topologically and fee-rate sort the inventory we send for privacy and priority reasons. // A heap is used so that not all items need sorting if only a few are being sent. - CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool); + CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, state.m_wtxid_relay); std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); // No reason to drain out at many times the network's capacity, // especially since we have many peers and some will draw much shorter delays. @@ -4221,10 +4353,12 @@ bool PeerLogicValidation::SendMessages(CNode* pto) continue; } // Not in the mempool anymore? don't bother sending it. - auto txinfo = m_mempool.info(hash); + auto txinfo = m_mempool.info(hash, state.m_wtxid_relay); if (!txinfo.tx) { continue; } + auto txid = txinfo.tx->GetHash(); + auto wtxid = txinfo.tx->GetWitnessHash(); // Peer told you to not send transactions at that feerate? Don't bother sending it. if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { continue; @@ -4232,7 +4366,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue; // Send State(pto->GetId())->m_recently_announced_invs.insert(hash); - vInv.push_back(CInv(MSG_TX, hash)); + vInv.push_back(CInv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash)); nRelayedTransactions++; { // Expire old relay messages @@ -4242,9 +4376,14 @@ bool PeerLogicValidation::SendMessages(CNode* pto) vRelayExpiration.pop_front(); } - auto ret = mapRelay.insert(std::make_pair(hash, std::move(txinfo.tx))); + auto ret = mapRelay.emplace(txid, std::move(txinfo.tx)); if (ret.second) { - vRelayExpiration.push_back(std::make_pair(nNow + std::chrono::microseconds{RELAY_TX_CACHE_TIME}.count(), ret.first)); + vRelayExpiration.emplace_back(nNow + std::chrono::microseconds{RELAY_TX_CACHE_TIME}.count(), ret.first); + } + // Add wtxid-based lookup into mapRelay as well, so that peers can request by wtxid + auto ret2 = mapRelay.emplace(wtxid, ret.first->second); + if (ret2.second) { + vRelayExpiration.emplace_back(nNow + std::chrono::microseconds{RELAY_TX_CACHE_TIME}.count(), ret2.first); } } if (vInv.size() == MAX_INV_SZ) { @@ -4252,6 +4391,14 @@ bool PeerLogicValidation::SendMessages(CNode* pto) vInv.clear(); } pto->m_tx_relay->filterInventoryKnown.insert(hash); + if (hash != txid) { + // Insert txid into filterInventoryKnown, even for + // wtxidrelay peers. This prevents re-adding of + // unconfirmed parents to the recently_announced + // filter, when a child tx is requested. See + // ProcessGetData(). + pto->m_tx_relay->filterInventoryKnown.insert(txid); + } } } } @@ -4376,7 +4523,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) // Erase this entry from tx_process_time (it may be added back for // processing at a later time, see below) tx_process_time.erase(tx_process_time.begin()); - CInv inv(MSG_TX | GetFetchFlags(*pto), txid); + CInv inv(state.m_wtxid_relay ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), txid); if (!AlreadyHave(inv, m_mempool)) { // If this transaction was last requested more than 1 minute ago, // then request. @@ -4395,7 +4542,15 @@ bool PeerLogicValidation::SendMessages(CNode* pto) // up processing to happen after the download times out // (with a slight delay for inbound peers, to prefer // requests to outbound peers). - const auto next_process_time = CalculateTxGetDataTime(txid, current_time, !state.fPreferredDownload); + // Don't apply the txid-delay to re-requests of a + // transaction; the heuristic of delaying requests to + // txid-relay peers is to save bandwidth on initial + // announcement of a transaction, and doesn't make sense + // for a followup request if our first peer times out (and + // would open us up to an attacker using inbound + // wtxid-relay to prevent us from requesting transactions + // from outbound txid-relay peers). + const auto next_process_time = CalculateTxGetDataTime(txid, current_time, !state.fPreferredDownload, false); tx_process_time.emplace(next_process_time, txid); } } else { @@ -4459,6 +4614,7 @@ public: // orphan transactions mapOrphanTransactions.clear(); mapOrphanTransactionsByPrev.clear(); + g_orphans_by_wtxid.clear(); } }; static CNetProcessingCleanup instance_of_cnetprocessingcleanup; diff --git a/src/net_processing.h b/src/net_processing.h index fa1555fbe6..0534828761 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -100,6 +100,6 @@ struct CNodeStateStats { bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats); /** Relay transaction to every node */ -void RelayTransaction(const uint256&, const CConnman& connman); +void RelayTransaction(const uint256& txid, const uint256& wtxid, const CConnman& connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main); #endif // BITCOIN_NET_PROCESSING_H diff --git a/src/node/transaction.cpp b/src/node/transaction.cpp index 3841d8687d..5633abe817 100644 --- a/src/node/transaction.cpp +++ b/src/node/transaction.cpp @@ -80,9 +80,10 @@ TransactionError BroadcastTransaction(NodeContext& node, const CTransactionRef t if (relay) { // the mempool tracks locally submitted transactions to make a // best-effort of initial broadcast - node.mempool->AddUnbroadcastTx(hashTx); + node.mempool->AddUnbroadcastTx(hashTx, tx->GetWitnessHash()); - RelayTransaction(hashTx, *node.connman); + LOCK(cs_main); + RelayTransaction(hashTx, tx->GetWitnessHash(), *node.connman); } return TransactionError::OK; diff --git a/src/protocol.cpp b/src/protocol.cpp index 2dfe4bee74..ee77ca3b94 100644 --- a/src/protocol.cpp +++ b/src/protocol.cpp @@ -46,6 +46,7 @@ const char *GETCFHEADERS="getcfheaders"; const char *CFHEADERS="cfheaders"; const char *GETCFCHECKPT="getcfcheckpt"; const char *CFCHECKPT="cfcheckpt"; +const char *WTXIDRELAY="wtxidrelay"; } // namespace NetMsgType /** All known message types. Keep this in the same order as the list of @@ -83,6 +84,7 @@ const static std::string allNetMessageTypes[] = { NetMsgType::CFHEADERS, NetMsgType::GETCFCHECKPT, NetMsgType::CFCHECKPT, + NetMsgType::WTXIDRELAY, }; const static std::vector<std::string> allNetMessageTypesVec(allNetMessageTypes, allNetMessageTypes+ARRAYLEN(allNetMessageTypes)); @@ -177,6 +179,8 @@ std::string CInv::GetCommand() const switch (masked) { case MSG_TX: return cmd.append(NetMsgType::TX); + // WTX is not a message type, just an inv type + case MSG_WTX: return cmd.append("wtx"); case MSG_BLOCK: return cmd.append(NetMsgType::BLOCK); case MSG_FILTERED_BLOCK: return cmd.append(NetMsgType::MERKLEBLOCK); case MSG_CMPCT_BLOCK: return cmd.append(NetMsgType::CMPCTBLOCK); diff --git a/src/protocol.h b/src/protocol.h index 9ab63a30fb..d83da2034a 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -261,6 +261,12 @@ extern const char* GETCFCHECKPT; * evenly spaced filter headers for blocks on the requested chain. */ extern const char* CFCHECKPT; +/** + * Indicates that a node prefers to relay transactions via wtxid, rather than + * txid. + * @since protocol version 70016 as described by BIP 339. + */ +extern const char *WTXIDRELAY; }; // namespace NetMsgType /* Get a vector of all valid message types (see above) */ @@ -397,11 +403,12 @@ const uint32_t MSG_TYPE_MASK = 0xffffffff >> 2; * These numbers are defined by the protocol. When adding a new value, be sure * to mention it in the respective BIP. */ -enum GetDataMsg { +enum GetDataMsg : uint32_t { UNDEFINED = 0, MSG_TX = 1, MSG_BLOCK = 2, - // The following can only occur in getdata. Invs always use TX or BLOCK. + MSG_WTX = 5, //!< Defined in BIP 339 + // The following can only occur in getdata. Invs always use TX/WTX or BLOCK. MSG_FILTERED_BLOCK = 3, //!< Defined in BIP37 MSG_CMPCT_BLOCK = 4, //!< Defined in BIP152 MSG_WITNESS_BLOCK = MSG_BLOCK | MSG_WITNESS_FLAG, //!< Defined in BIP144 diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp index a304c23a2c..ecb753a306 100644 --- a/src/qt/bitcoin.cpp +++ b/src/qt/bitcoin.cpp @@ -528,6 +528,11 @@ int GuiMain(int argc, char* argv[]) // Parse URIs on command line -- this can affect Params() PaymentServer::ipcParseCommandLine(*node, argc, argv); #endif + if (!node->initSettings(error)) { + node->initError(Untranslated(error)); + QMessageBox::critical(nullptr, PACKAGE_NAME, QObject::tr("Error initializing settings: %1").arg(QString::fromStdString(error))); + return EXIT_FAILURE; + } QScopedPointer<const NetworkStyle> networkStyle(NetworkStyle::instantiate(Params().NetworkIDString())); assert(!networkStyle.isNull()); diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index 29fd720244..821a337a62 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -24,7 +24,6 @@ #include <univalue.h> #ifdef ENABLE_WALLET -#include <wallet/bdb.h> #include <wallet/db.h> #include <wallet/wallet.h> #endif diff --git a/src/test/settings_tests.cpp b/src/test/settings_tests.cpp index fcd831ccda..1a2d775f49 100644 --- a/src/test/settings_tests.cpp +++ b/src/test/settings_tests.cpp @@ -12,10 +12,90 @@ #include <univalue.h> #include <util/strencodings.h> #include <util/string.h> +#include <util/system.h> #include <vector> +inline bool operator==(const util::SettingsValue& a, const util::SettingsValue& b) +{ + return a.write() == b.write(); +} + +inline std::ostream& operator<<(std::ostream& os, const util::SettingsValue& value) +{ + os << value.write(); + return os; +} + +inline std::ostream& operator<<(std::ostream& os, const std::pair<std::string, util::SettingsValue>& kv) +{ + util::SettingsValue out(util::SettingsValue::VOBJ); + out.__pushKV(kv.first, kv.second); + os << out.write(); + return os; +} + +inline void WriteText(const fs::path& path, const std::string& text) +{ + fsbridge::ofstream file; + file.open(path); + file << text; +} + BOOST_FIXTURE_TEST_SUITE(settings_tests, BasicTestingSetup) +BOOST_AUTO_TEST_CASE(ReadWrite) +{ + fs::path path = GetDataDir() / "settings.json"; + + WriteText(path, R"({ + "string": "string", + "num": 5, + "bool": true, + "null": null + })"); + + std::map<std::string, util::SettingsValue> expected{ + {"string", "string"}, + {"num", 5}, + {"bool", true}, + {"null", {}}, + }; + + // Check file read. + std::map<std::string, util::SettingsValue> values; + std::vector<std::string> errors; + BOOST_CHECK(util::ReadSettings(path, values, errors)); + BOOST_CHECK_EQUAL_COLLECTIONS(values.begin(), values.end(), expected.begin(), expected.end()); + BOOST_CHECK(errors.empty()); + + // Check no errors if file doesn't exist. + fs::remove(path); + BOOST_CHECK(util::ReadSettings(path, values, errors)); + BOOST_CHECK(values.empty()); + BOOST_CHECK(errors.empty()); + + // Check duplicate keys not allowed + WriteText(path, R"({ + "dupe": "string", + "dupe": "dupe" + })"); + BOOST_CHECK(!util::ReadSettings(path, values, errors)); + std::vector<std::string> dup_keys = {strprintf("Found duplicate key dupe in settings file %s", path.string())}; + BOOST_CHECK_EQUAL_COLLECTIONS(errors.begin(), errors.end(), dup_keys.begin(), dup_keys.end()); + + // Check non-kv json files not allowed + WriteText(path, R"("non-kv")"); + BOOST_CHECK(!util::ReadSettings(path, values, errors)); + std::vector<std::string> non_kv = {strprintf("Found non-object value \"non-kv\" in settings file %s", path.string())}; + BOOST_CHECK_EQUAL_COLLECTIONS(errors.begin(), errors.end(), non_kv.begin(), non_kv.end()); + + // Check invalid json not allowed + WriteText(path, R"(invalid json)"); + BOOST_CHECK(!util::ReadSettings(path, values, errors)); + std::vector<std::string> fail_parse = {strprintf("Unable to parse settings file %s", path.string())}; + BOOST_CHECK_EQUAL_COLLECTIONS(errors.begin(), errors.end(), fail_parse.begin(), fail_parse.end()); +} + //! Check settings struct contents against expected json strings. static void CheckValues(const util::Settings& settings, const std::string& single_val, const std::string& list_val) { diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index e247c09a97..a30e366028 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -9,6 +9,7 @@ #include <key.h> // For CKey #include <optional.h> #include <sync.h> +#include <test/util/logging.h> #include <test/util/setup_common.h> #include <test/util/str.h> #include <uint256.h> @@ -1138,6 +1139,28 @@ BOOST_FIXTURE_TEST_CASE(util_ChainMerge, ChainMergeTestingSetup) BOOST_CHECK_EQUAL(out_sha_hex, "f0b3a3c29869edc765d579c928f7f1690a71fbb673b49ccf39cbc4de18156a0d"); } +BOOST_AUTO_TEST_CASE(util_ReadWriteSettings) +{ + // Test writing setting. + TestArgsManager args1; + args1.LockSettings([&](util::Settings& settings) { settings.rw_settings["name"] = "value"; }); + args1.WriteSettingsFile(); + + // Test reading setting. + TestArgsManager args2; + args2.ReadSettingsFile(); + args2.LockSettings([&](util::Settings& settings) { BOOST_CHECK_EQUAL(settings.rw_settings["name"].get_str(), "value"); }); + + // Test error logging, and remove previously written setting. + { + ASSERT_DEBUG_LOG("Failed renaming settings file"); + fs::remove(GetDataDir() / "settings.json"); + fs::create_directory(GetDataDir() / "settings.json"); + args2.WriteSettingsFile(); + fs::remove(GetDataDir() / "settings.json"); + } +} + BOOST_AUTO_TEST_CASE(util_FormatMoney) { BOOST_CHECK_EQUAL(FormatMoney(0), "0.00"); diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 7d8eb8a323..1d9f6a4a46 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -726,12 +726,12 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const assert(innerUsage == cachedInnerUsage); } -bool CTxMemPool::CompareDepthAndScore(const uint256& hasha, const uint256& hashb) +bool CTxMemPool::CompareDepthAndScore(const uint256& hasha, const uint256& hashb, bool wtxid) { LOCK(cs); - indexed_transaction_set::const_iterator i = mapTx.find(hasha); + indexed_transaction_set::const_iterator i = wtxid ? get_iter_from_wtxid(hasha) : mapTx.find(hasha); if (i == mapTx.end()) return false; - indexed_transaction_set::const_iterator j = mapTx.find(hashb); + indexed_transaction_set::const_iterator j = wtxid ? get_iter_from_wtxid(hashb) : mapTx.find(hashb); if (j == mapTx.end()) return true; uint64_t counta = i->GetCountWithAncestors(); uint64_t countb = j->GetCountWithAncestors(); @@ -811,10 +811,10 @@ CTransactionRef CTxMemPool::get(const uint256& hash) const return i->GetSharedTx(); } -TxMempoolInfo CTxMemPool::info(const uint256& hash) const +TxMempoolInfo CTxMemPool::info(const uint256& hash, bool wtxid) const { LOCK(cs); - indexed_transaction_set::const_iterator i = mapTx.find(hash); + indexed_transaction_set::const_iterator i = (wtxid ? get_iter_from_wtxid(hash) : mapTx.find(hash)); if (i == mapTx.end()) return TxMempoolInfo(); return GetInfo(i); @@ -917,8 +917,8 @@ bool CCoinsViewMemPool::GetCoin(const COutPoint &outpoint, Coin &coin) const { size_t CTxMemPool::DynamicMemoryUsage() const { LOCK(cs); - // Estimate the overhead of mapTx to be 12 pointers + an allocation, as no exact formula for boost::multi_index_contained is implemented. - return memusage::MallocUsage(sizeof(CTxMemPoolEntry) + 12 * sizeof(void*)) * mapTx.size() + memusage::DynamicUsage(mapNextTx) + memusage::DynamicUsage(mapDeltas) + memusage::DynamicUsage(mapLinks) + memusage::DynamicUsage(vTxHashes) + cachedInnerUsage; + // Estimate the overhead of mapTx to be 15 pointers + an allocation, as no exact formula for boost::multi_index_contained is implemented. + return memusage::MallocUsage(sizeof(CTxMemPoolEntry) + 15 * sizeof(void*)) * mapTx.size() + memusage::DynamicUsage(mapNextTx) + memusage::DynamicUsage(mapDeltas) + memusage::DynamicUsage(mapLinks) + memusage::DynamicUsage(vTxHashes) + cachedInnerUsage; } void CTxMemPool::RemoveUnbroadcastTx(const uint256& txid, const bool unchecked) { diff --git a/src/txmempool.h b/src/txmempool.h index 583f7614b7..d4e9845942 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -198,6 +198,22 @@ struct mempoolentry_txid } }; +// extracts a transaction witness-hash from CTxMemPoolEntry or CTransactionRef +struct mempoolentry_wtxid +{ + typedef uint256 result_type; + result_type operator() (const CTxMemPoolEntry &entry) const + { + return entry.GetTx().GetWitnessHash(); + } + + result_type operator() (const CTransactionRef& tx) const + { + return tx->GetWitnessHash(); + } +}; + + /** \class CompareTxMemPoolEntryByDescendantScore * * Sort an entry by max(score/size of entry's tx, score/size with all descendants). @@ -318,6 +334,7 @@ public: struct descendant_score {}; struct entry_time {}; struct ancestor_score {}; +struct index_by_wtxid {}; class CBlockPolicyEstimator; @@ -383,8 +400,9 @@ public: * * CTxMemPool::mapTx, and CTxMemPoolEntry bookkeeping: * - * mapTx is a boost::multi_index that sorts the mempool on 4 criteria: - * - transaction hash + * mapTx is a boost::multi_index that sorts the mempool on 5 criteria: + * - transaction hash (txid) + * - witness-transaction hash (wtxid) * - descendant feerate [we use max(feerate of tx, feerate of tx with all descendants)] * - time in mempool * - ancestor feerate [we use min(feerate of tx, feerate of tx with all unconfirmed ancestors)] @@ -469,6 +487,12 @@ public: boost::multi_index::indexed_by< // sorted by txid boost::multi_index::hashed_unique<mempoolentry_txid, SaltedTxidHasher>, + // sorted by wtxid + boost::multi_index::hashed_unique< + boost::multi_index::tag<index_by_wtxid>, + mempoolentry_wtxid, + SaltedTxidHasher + >, // sorted by fee rate boost::multi_index::ordered_non_unique< boost::multi_index::tag<descendant_score>, @@ -549,8 +573,11 @@ private: std::vector<indexed_transaction_set::const_iterator> GetSortedDepthAndScore() const EXCLUSIVE_LOCKS_REQUIRED(cs); - /** track locally submitted transactions to periodically retry initial broadcast */ - std::set<uint256> m_unbroadcast_txids GUARDED_BY(cs); + /** + * track locally submitted transactions to periodically retry initial broadcast + * map of txid -> wtxid + */ + std::map<uint256, uint256> m_unbroadcast_txids GUARDED_BY(cs); public: indirectmap<COutPoint, const CTransaction*> mapNextTx GUARDED_BY(cs); @@ -586,7 +613,7 @@ public: void clear(); void _clear() EXCLUSIVE_LOCKS_REQUIRED(cs); //lock free - bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb); + bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb, bool wtxid=false); void queryHashes(std::vector<uint256>& vtxid) const; bool isSpent(const COutPoint& outpoint) const; unsigned int GetTransactionsUpdated() const; @@ -689,24 +716,32 @@ public: return totalTxSize; } - bool exists(const uint256& hash) const + bool exists(const uint256& hash, bool wtxid=false) const { LOCK(cs); + if (wtxid) { + return (mapTx.get<index_by_wtxid>().count(hash) != 0); + } return (mapTx.count(hash) != 0); } CTransactionRef get(const uint256& hash) const; - TxMempoolInfo info(const uint256& hash) const; + txiter get_iter_from_wtxid(const uint256& wtxid) const EXCLUSIVE_LOCKS_REQUIRED(cs) + { + AssertLockHeld(cs); + return mapTx.project<0>(mapTx.get<index_by_wtxid>().find(wtxid)); + } + TxMempoolInfo info(const uint256& hash, bool wtxid=false) const; std::vector<TxMempoolInfo> infoAll() const; size_t DynamicMemoryUsage() const; /** Adds a transaction to the unbroadcast set */ - void AddUnbroadcastTx(const uint256& txid) { + void AddUnbroadcastTx(const uint256& txid, const uint256& wtxid) { LOCK(cs); // Sanity Check: the transaction should also be in the mempool if (exists(txid)) { - m_unbroadcast_txids.insert(txid); + m_unbroadcast_txids[txid] = wtxid; } } @@ -714,7 +749,7 @@ public: void RemoveUnbroadcastTx(const uint256& txid, const bool unchecked = false); /** Returns transactions in unbroadcast set */ - std::set<uint256> GetUnbroadcastTxs() const { + std::map<uint256, uint256> GetUnbroadcastTxs() const { LOCK(cs); return m_unbroadcast_txids; } diff --git a/src/util/settings.cpp b/src/util/settings.cpp index e4979df755..b92b1d30c3 100644 --- a/src/util/settings.cpp +++ b/src/util/settings.cpp @@ -4,6 +4,7 @@ #include <util/settings.h> +#include <tinyformat.h> #include <univalue.h> namespace util { @@ -12,12 +13,13 @@ namespace { enum class Source { FORCED, COMMAND_LINE, + RW_SETTINGS, CONFIG_FILE_NETWORK_SECTION, CONFIG_FILE_DEFAULT_SECTION }; //! Merge settings from multiple sources in precedence order: -//! Forced config > command line > config file network-specific section > config file default section +//! Forced config > command line > read-write settings file > config file network-specific section > config file default section //! //! This function is provided with a callback function fn that contains //! specific logic for how to merge the sources. @@ -32,6 +34,10 @@ static void MergeSettings(const Settings& settings, const std::string& section, if (auto* values = FindKey(settings.command_line_options, name)) { fn(SettingsSpan(*values), Source::COMMAND_LINE); } + // Merge in the read-write settings + if (const SettingsValue* value = FindKey(settings.rw_settings, name)) { + fn(SettingsSpan(*value), Source::RW_SETTINGS); + } // Merge in the network-specific section of the config file if (!section.empty()) { if (auto* map = FindKey(settings.ro_config, section)) { @@ -49,6 +55,62 @@ static void MergeSettings(const Settings& settings, const std::string& section, } } // namespace +bool ReadSettings(const fs::path& path, std::map<std::string, SettingsValue>& values, std::vector<std::string>& errors) +{ + values.clear(); + errors.clear(); + + fsbridge::ifstream file; + file.open(path); + if (!file.is_open()) return true; // Ok for file not to exist. + + SettingsValue in; + if (!in.read(std::string{std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>()})) { + errors.emplace_back(strprintf("Unable to parse settings file %s", path.string())); + return false; + } + + if (file.fail()) { + errors.emplace_back(strprintf("Failed reading settings file %s", path.string())); + return false; + } + file.close(); // Done with file descriptor. Release while copying data. + + if (!in.isObject()) { + errors.emplace_back(strprintf("Found non-object value %s in settings file %s", in.write(), path.string())); + return false; + } + + const std::vector<std::string>& in_keys = in.getKeys(); + const std::vector<SettingsValue>& in_values = in.getValues(); + for (size_t i = 0; i < in_keys.size(); ++i) { + auto inserted = values.emplace(in_keys[i], in_values[i]); + if (!inserted.second) { + errors.emplace_back(strprintf("Found duplicate key %s in settings file %s", in_keys[i], path.string())); + } + } + return errors.empty(); +} + +bool WriteSettings(const fs::path& path, + const std::map<std::string, SettingsValue>& values, + std::vector<std::string>& errors) +{ + SettingsValue out(SettingsValue::VOBJ); + for (const auto& value : values) { + out.__pushKV(value.first, value.second); + } + fsbridge::ofstream file; + file.open(path); + if (file.fail()) { + errors.emplace_back(strprintf("Error: Unable to open settings file %s for writing", path.string())); + return false; + } + file << out.write(/* prettyIndent= */ 1, /* indentLevel= */ 4) << std::endl; + file.close(); + return true; +} + SettingsValue GetSetting(const Settings& settings, const std::string& section, const std::string& name, diff --git a/src/util/settings.h b/src/util/settings.h index 1d03639fa2..ed36349232 100644 --- a/src/util/settings.h +++ b/src/util/settings.h @@ -5,6 +5,8 @@ #ifndef BITCOIN_UTIL_SETTINGS_H #define BITCOIN_UTIL_SETTINGS_H +#include <fs.h> + #include <map> #include <string> #include <vector> @@ -24,19 +26,31 @@ namespace util { //! https://github.com/bitcoin/bitcoin/pull/15934/files#r337691812) using SettingsValue = UniValue; -//! Stored bitcoin settings. This struct combines settings from the command line -//! and a read-only configuration file. +//! Stored settings. This struct combines settings from the command line, a +//! read-only configuration file, and a read-write runtime settings file. struct Settings { //! Map of setting name to forced setting value. std::map<std::string, SettingsValue> forced_settings; //! Map of setting name to list of command line values. std::map<std::string, std::vector<SettingsValue>> command_line_options; + //! Map of setting name to read-write file setting value. + std::map<std::string, SettingsValue> rw_settings; //! Map of config section name and setting name to list of config file values. std::map<std::string, std::map<std::string, std::vector<SettingsValue>>> ro_config; }; +//! Read settings file. +bool ReadSettings(const fs::path& path, + std::map<std::string, SettingsValue>& values, + std::vector<std::string>& errors); + +//! Write settings file. +bool WriteSettings(const fs::path& path, + const std::map<std::string, SettingsValue>& values, + std::vector<std::string>& errors); + //! Get settings value from combined sources: forced settings, command line -//! arguments and the read-only config file. +//! arguments, runtime read-write settings, and the read-only config file. //! //! @param ignore_default_section_config - ignore values in the default section //! of the config file (part before any diff --git a/src/util/system.cpp b/src/util/system.cpp index 7e7ba840cd..8164e884b1 100644 --- a/src/util/system.cpp +++ b/src/util/system.cpp @@ -73,6 +73,7 @@ const int64_t nStartupTime = GetTime(); const char * const BITCOIN_CONF_FILENAME = "bitcoin.conf"; +const char * const BITCOIN_SETTINGS_FILENAME = "settings.json"; ArgsManager gArgs; @@ -372,6 +373,84 @@ bool ArgsManager::IsArgSet(const std::string& strArg) const return !GetSetting(strArg).isNull(); } +bool ArgsManager::InitSettings(std::string& error) +{ + if (!GetSettingsPath()) { + return true; // Do nothing if settings file disabled. + } + + std::vector<std::string> errors; + if (!ReadSettingsFile(&errors)) { + error = strprintf("Failed loading settings file:\n- %s\n", Join(errors, "\n- ")); + return false; + } + if (!WriteSettingsFile(&errors)) { + error = strprintf("Failed saving settings file:\n- %s\n", Join(errors, "\n- ")); + return false; + } + return true; +} + +bool ArgsManager::GetSettingsPath(fs::path* filepath, bool temp) const +{ + if (IsArgNegated("-settings")) { + return false; + } + if (filepath) { + std::string settings = GetArg("-settings", BITCOIN_SETTINGS_FILENAME); + *filepath = fs::absolute(temp ? settings + ".tmp" : settings, GetDataDir(/* net_specific= */ true)); + } + return true; +} + +static void SaveErrors(const std::vector<std::string> errors, std::vector<std::string>* error_out) +{ + for (const auto& error : errors) { + if (error_out) { + error_out->emplace_back(error); + } else { + LogPrintf("%s\n", error); + } + } +} + +bool ArgsManager::ReadSettingsFile(std::vector<std::string>* errors) +{ + fs::path path; + if (!GetSettingsPath(&path, /* temp= */ false)) { + return true; // Do nothing if settings file disabled. + } + + LOCK(cs_args); + m_settings.rw_settings.clear(); + std::vector<std::string> read_errors; + if (!util::ReadSettings(path, m_settings.rw_settings, read_errors)) { + SaveErrors(read_errors, errors); + return false; + } + return true; +} + +bool ArgsManager::WriteSettingsFile(std::vector<std::string>* errors) const +{ + fs::path path, path_tmp; + if (!GetSettingsPath(&path, /* temp= */ false) || !GetSettingsPath(&path_tmp, /* temp= */ true)) { + throw std::logic_error("Attempt to write settings file when dynamic settings are disabled."); + } + + LOCK(cs_args); + std::vector<std::string> write_errors; + if (!util::WriteSettings(path_tmp, m_settings.rw_settings, write_errors)) { + SaveErrors(write_errors, errors); + return false; + } + if (!RenameOver(path_tmp, path)) { + SaveErrors({strprintf("Failed renaming settings file %s to %s\n", path_tmp.string(), path.string())}, errors); + return false; + } + return true; +} + bool ArgsManager::IsArgNegated(const std::string& strArg) const { return GetSetting(strArg).isFalse(); @@ -893,6 +972,9 @@ void ArgsManager::LogArgs() const for (const auto& section : m_settings.ro_config) { logArgsPrefix("Config file arg:", section.first, section.second); } + for (const auto& setting : m_settings.rw_settings) { + LogPrintf("Setting file arg: %s = %s\n", setting.first, setting.second.write()); + } logArgsPrefix("Command-line arg:", "", m_settings.command_line_options); } diff --git a/src/util/system.h b/src/util/system.h index a5eea5dfab..0bd14cc9ea 100644 --- a/src/util/system.h +++ b/src/util/system.h @@ -41,6 +41,7 @@ int64_t GetStartupTime(); extern const char * const BITCOIN_CONF_FILENAME; +extern const char * const BITCOIN_SETTINGS_FILENAME; void SetupEnvironment(); bool SetupNetworking(); @@ -334,6 +335,39 @@ public: Optional<unsigned int> GetArgFlags(const std::string& name) const; /** + * Read and update settings file with saved settings. This needs to be + * called after SelectParams() because the settings file location is + * network-specific. + */ + bool InitSettings(std::string& error); + + /** + * Get settings file path, or return false if read-write settings were + * disabled with -nosettings. + */ + bool GetSettingsPath(fs::path* filepath = nullptr, bool temp = false) const; + + /** + * Read settings file. Push errors to vector, or log them if null. + */ + bool ReadSettingsFile(std::vector<std::string>* errors = nullptr); + + /** + * Write settings file. Push errors to vector, or log them if null. + */ + bool WriteSettingsFile(std::vector<std::string>* errors = nullptr) const; + + /** + * Access settings with lock held. + */ + template <typename Fn> + void LockSettings(Fn&& fn) + { + LOCK(cs_args); + fn(m_settings); + } + + /** * Log the config file options and the command line arguments, * useful for troubleshooting. */ diff --git a/src/validation.cpp b/src/validation.cpp index 4fe02ed244..5aa3d315d5 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -939,7 +939,7 @@ bool MemPoolAccept::PolicyScriptChecks(ATMPArgs& args, Workspace& ws, Precompute if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) && !CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) { // Only the witness is missing, so the transaction itself may be fine. - state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, + state.Invalid(TxValidationResult::TX_WITNESS_STRIPPED, state.GetRejectReason(), state.GetDebugMessage()); } return false; // state filled in by CheckInputScripts @@ -3641,8 +3641,10 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS return true; } - if (!CheckBlockHeader(block, state, chainparams.GetConsensus())) - return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), state.ToString()); + if (!CheckBlockHeader(block, state, chainparams.GetConsensus())) { + LogPrint(BCLog::VALIDATION, "%s: Consensus::CheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString()); + return false; + } // Get prev block index CBlockIndex* pindexPrev = nullptr; @@ -5082,19 +5084,22 @@ bool LoadMempool(CTxMemPool& pool) } // TODO: remove this try except in v0.22 + std::map<uint256, uint256> unbroadcast_txids; try { - std::set<uint256> unbroadcast_txids; file >> unbroadcast_txids; unbroadcast = unbroadcast_txids.size(); - - for (const auto& txid : unbroadcast_txids) { - pool.AddUnbroadcastTx(txid); - } } catch (const std::exception&) { // mempool.dat files created prior to v0.21 will not have an // unbroadcast set. No need to log a failure if parsing fails here. } - + for (const auto& elem : unbroadcast_txids) { + // Don't add unbroadcast transactions that didn't get back into the + // mempool. + const CTransactionRef& added_tx = pool.get(elem.first); + if (added_tx != nullptr) { + pool.AddUnbroadcastTx(elem.first, added_tx->GetWitnessHash()); + } + } } catch (const std::exception& e) { LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e.what()); return false; @@ -5110,7 +5115,7 @@ bool DumpMempool(const CTxMemPool& pool) std::map<uint256, CAmount> mapDeltas; std::vector<TxMempoolInfo> vinfo; - std::set<uint256> unbroadcast_txids; + std::map<uint256, uint256> unbroadcast_txids; static Mutex dump_mutex; LOCK(dump_mutex); diff --git a/src/version.h b/src/version.h index e5d1f5a7f9..b5f379e1b8 100644 --- a/src/version.h +++ b/src/version.h @@ -9,7 +9,7 @@ * network protocol versioning */ -static const int PROTOCOL_VERSION = 70015; +static const int PROTOCOL_VERSION = 70016; //! initial proto version, to be increased after version/verack negotiation static const int INIT_PROTO_VERSION = 209; @@ -35,4 +35,7 @@ static const int SHORT_IDS_BLOCKS_VERSION = 70014; //! not banning for invalid compact blocks starts with this version static const int INVALID_CB_NO_BAN_VERSION = 70015; +//! "wtxidrelay" command for wtxid-based relay starts with this version +static const int WTXID_RELAY_VERSION = 70016; + #endif // BITCOIN_VERSION_H diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index b8e03e3ec1..1953be2d54 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -312,8 +312,17 @@ void BerkeleyEnvironment::CheckpointLSN(const std::string& strFile) dbenv->lsn_reset(strFile.c_str(), 0); } +BerkeleyDatabase::~BerkeleyDatabase() +{ + if (env) { + LOCK(cs_db); + size_t erased = env->m_databases.erase(strFile); + assert(erased == 1); + env->m_fileids.erase(strFile); + } +} -BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bool fFlushOnCloseIn) : pdb(nullptr), activeTxn(nullptr), m_cursor(nullptr) +BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bool fFlushOnCloseIn) : pdb(nullptr), activeTxn(nullptr), m_cursor(nullptr), m_database(database) { fReadOnly = (!strchr(pszMode, '+') && !strchr(pszMode, 'w')); fFlushOnClose = fFlushOnCloseIn; @@ -388,11 +397,16 @@ BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode, bo fReadOnly = fTmp; } } - ++env->mapFileUseCount[strFilename]; + database.AddRef(); strFile = strFilename; } } +void BerkeleyDatabase::Open(const char* mode) +{ + throw std::logic_error("BerkeleyDatabase does not implement Open. This function should not be called."); +} + void BerkeleyBatch::Flush() { if (activeTxn) @@ -426,11 +440,7 @@ void BerkeleyBatch::Close() if (fFlushOnClose) Flush(); - { - LOCK(cs_db); - --env->mapFileUseCount[strFile]; - } - env->m_db_in_use.notify_all(); + m_database.RemoveRef(); } void BerkeleyEnvironment::CloseDb(const std::string& strFile) @@ -675,22 +685,17 @@ bool BerkeleyDatabase::Backup(const std::string& strDest) const } } -void BerkeleyDatabase::Flush(bool shutdown) +void BerkeleyDatabase::Flush() { if (!IsDummy()) { - env->Flush(shutdown); - if (shutdown) { - LOCK(cs_db); - g_dbenvs.erase(env->Directory().string()); - env = nullptr; - } else { - // TODO: To avoid g_dbenvs.erase erasing the environment prematurely after the - // first database shutdown when multiple databases are open in the same - // environment, should replace raw database `env` pointers with shared or weak - // pointers, or else separate the database and environment shutdowns so - // environments can be shut down after databases. - env->m_fileids.erase(strFile); - } + env->Flush(false); + } +} + +void BerkeleyDatabase::Close() +{ + if (!IsDummy()) { + env->Flush(true); } } @@ -832,7 +837,22 @@ bool BerkeleyBatch::HasKey(CDataStream&& key) return ret == 0; } -std::unique_ptr<BerkeleyBatch> BerkeleyDatabase::MakeBatch(const char* mode, bool flush_on_close) +void BerkeleyDatabase::AddRef() +{ + LOCK(cs_db); + ++env->mapFileUseCount[strFile]; +} + +void BerkeleyDatabase::RemoveRef() +{ + { + LOCK(cs_db); + --env->mapFileUseCount[strFile]; + } + env->m_db_in_use.notify_all(); +} + +std::unique_ptr<DatabaseBatch> BerkeleyDatabase::MakeBatch(const char* mode, bool flush_on_close) { return MakeUnique<BerkeleyBatch>(*this, mode, flush_on_close); } diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h index 1b9b5de9f9..ef3b81d4d6 100644 --- a/src/wallet/bdb.h +++ b/src/wallet/bdb.h @@ -98,56 +98,59 @@ class BerkeleyBatch; /** An instance of this class represents one database. * For BerkeleyDB this is just a (env, strFile) tuple. **/ -class BerkeleyDatabase +class BerkeleyDatabase : public WalletDatabase { friend class BerkeleyBatch; public: /** Create dummy DB handle */ - BerkeleyDatabase() : nUpdateCounter(0), nLastSeen(0), nLastFlushed(0), nLastWalletUpdate(0), env(nullptr) + BerkeleyDatabase() : WalletDatabase(), env(nullptr) { } /** Create DB handle to real database */ BerkeleyDatabase(std::shared_ptr<BerkeleyEnvironment> env, std::string filename) : - nUpdateCounter(0), nLastSeen(0), nLastFlushed(0), nLastWalletUpdate(0), env(std::move(env)), strFile(std::move(filename)) + WalletDatabase(), env(std::move(env)), strFile(std::move(filename)) { auto inserted = this->env->m_databases.emplace(strFile, std::ref(*this)); assert(inserted.second); } - ~BerkeleyDatabase() { - if (env) { - size_t erased = env->m_databases.erase(strFile); - assert(erased == 1); - } - } + ~BerkeleyDatabase() override; + + /** Open the database if it is not already opened. + * Dummy function, doesn't do anything right now, but is needed for class abstraction */ + void Open(const char* mode) override; /** Rewrite the entire database on disk, with the exception of key pszSkip if non-zero */ - bool Rewrite(const char* pszSkip=nullptr); + bool Rewrite(const char* pszSkip=nullptr) override; + + /** Indicate the a new database user has began using the database. */ + void AddRef() override; + /** Indicate that database user has stopped using the database and that it could be flushed or closed. */ + void RemoveRef() override; /** Back up the entire database to a file. */ - bool Backup(const std::string& strDest) const; + bool Backup(const std::string& strDest) const override; - /** Make sure all changes are flushed to disk. + /** Make sure all changes are flushed to database file. + */ + void Flush() override; + /** Flush to the database file and close the database. + * Also close the environment if no other databases are open in it. */ - void Flush(bool shutdown); + void Close() override; /* flush the wallet passively (TRY_LOCK) ideal to be called periodically */ - bool PeriodicFlush(); + bool PeriodicFlush() override; - void IncrementUpdateCounter(); - - void ReloadDbEnv(); + void IncrementUpdateCounter() override; - std::atomic<unsigned int> nUpdateCounter; - unsigned int nLastSeen; - unsigned int nLastFlushed; - int64_t nLastWalletUpdate; + void ReloadDbEnv() override; /** Verifies the environment and database file */ - bool Verify(bilingual_str& error); + bool Verify(bilingual_str& error) override; /** * Pointer to shared database environment. @@ -164,7 +167,7 @@ public: std::unique_ptr<Db> m_db; /** Make a BerkeleyBatch connected to this database */ - std::unique_ptr<BerkeleyBatch> MakeBatch(const char* mode, bool flush_on_close); + std::unique_ptr<DatabaseBatch> MakeBatch(const char* mode = "r+", bool flush_on_close = true) override; private: std::string strFile; @@ -213,6 +216,7 @@ protected: bool fReadOnly; bool fFlushOnClose; BerkeleyEnvironment *env; + BerkeleyDatabase& m_database; public: explicit BerkeleyBatch(BerkeleyDatabase& database, const char* pszMode = "r+", bool fFlushOnCloseIn=true); diff --git a/src/wallet/db.h b/src/wallet/db.h index 76668f8dc2..12dc1cc96b 100644 --- a/src/wallet/db.h +++ b/src/wallet/db.h @@ -10,8 +10,12 @@ #include <fs.h> #include <streams.h> +#include <atomic> +#include <memory> #include <string> +struct bilingual_str; + /** Given a wallet directory path or legacy file path, return path to main data file in the wallet database. */ fs::path WalletDataFilePath(const fs::path& wallet_path); void SplitWalletPath(const fs::path& wallet_path, fs::path& env_directory, std::string& database_filename); @@ -94,4 +98,60 @@ public: virtual bool TxnAbort() = 0; }; +/** An instance of this class represents one database. + **/ +class WalletDatabase +{ +public: + /** Create dummy DB handle */ + WalletDatabase() : nUpdateCounter(0), nLastSeen(0), nLastFlushed(0), nLastWalletUpdate(0) {} + virtual ~WalletDatabase() {}; + + /** Open the database if it is not already opened. */ + virtual void Open(const char* mode) = 0; + + //! Counts the number of active database users to be sure that the database is not closed while someone is using it + std::atomic<int> m_refcount{0}; + /** Indicate the a new database user has began using the database. Increments m_refcount */ + virtual void AddRef() = 0; + /** Indicate that database user has stopped using the database and that it could be flushed or closed. Decrement m_refcount */ + virtual void RemoveRef() = 0; + + /** Rewrite the entire database on disk, with the exception of key pszSkip if non-zero + */ + virtual bool Rewrite(const char* pszSkip=nullptr) = 0; + + /** Back up the entire database to a file. + */ + virtual bool Backup(const std::string& strDest) const = 0; + + /** Make sure all changes are flushed to database file. + */ + virtual void Flush() = 0; + /** Flush to the database file and close the database. + * Also close the environment if no other databases are open in it. + */ + virtual void Close() = 0; + /* flush the wallet passively (TRY_LOCK) + ideal to be called periodically */ + virtual bool PeriodicFlush() = 0; + + virtual void IncrementUpdateCounter() = 0; + + virtual void ReloadDbEnv() = 0; + + std::atomic<unsigned int> nUpdateCounter; + unsigned int nLastSeen; + unsigned int nLastFlushed; + int64_t nLastWalletUpdate; + + /** Verifies the environment and database file */ + virtual bool Verify(bilingual_str& error) = 0; + + std::string m_file_path; + + /** Make a DatabaseBatch connected to this database */ + virtual std::unique_ptr<DatabaseBatch> MakeBatch(const char* mode = "r+", bool flush_on_close = true) = 0; +}; + #endif // BITCOIN_WALLET_DB_H diff --git a/src/wallet/load.cpp b/src/wallet/load.cpp index c2818a41e7..2a81d30133 100644 --- a/src/wallet/load.cpp +++ b/src/wallet/load.cpp @@ -99,14 +99,14 @@ void StartWallets(CScheduler& scheduler, const ArgsManager& args) void FlushWallets() { for (const std::shared_ptr<CWallet>& pwallet : GetWallets()) { - pwallet->Flush(false); + pwallet->Flush(); } } void StopWallets() { for (const std::shared_ptr<CWallet>& pwallet : GetWallets()) { - pwallet->Flush(true); + pwallet->Close(); } } diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index 9cc847b2d0..d2770a46f7 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -28,6 +28,11 @@ extern UniValue importmulti(const JSONRPCRequest& request); extern UniValue dumpwallet(const JSONRPCRequest& request); extern UniValue importwallet(const JSONRPCRequest& request); +// Ensure that fee levels defined in the wallet are at least as high +// as the default levels for node policy. +static_assert(DEFAULT_TRANSACTION_MINFEE >= DEFAULT_MIN_RELAY_TX_FEE, "wallet minimum fee is smaller than default relay fee"); +static_assert(WALLET_INCREMENTAL_RELAY_FEE >= DEFAULT_INCREMENTAL_RELAY_FEE, "wallet incremental fee is smaller than default incremental relay fee"); + BOOST_FIXTURE_TEST_SUITE(wallet_tests, WalletTestingSetup) static std::shared_ptr<CWallet> TestLoadWallet(interfaces::Chain& chain) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 8eec00993f..cee2f2214c 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -439,9 +439,14 @@ bool CWallet::HasWalletSpend(const uint256& txid) const return (iter != mapTxSpends.end() && iter->first.hash == txid); } -void CWallet::Flush(bool shutdown) +void CWallet::Flush() { - database->Flush(shutdown); + database->Flush(); +} + +void CWallet::Close() +{ + database->Close(); } void CWallet::SyncMetaData(std::pair<TxSpends::iterator, TxSpends::iterator> range) diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 8cb2a64484..a761caf38c 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -1087,7 +1087,10 @@ public: bool HasWalletSpend(const uint256& txid) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); //! Flush wallet (bitdb flush) - void Flush(bool shutdown=false); + void Flush(); + + //! Close wallet database + void Close(); /** Wallet is about to be unloaded */ boost::signals2::signal<void ()> NotifyUnload; diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index 1478687bf9..8c409b40cd 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -1012,20 +1012,20 @@ bool IsWalletLoaded(const fs::path& wallet_path) } /** Return object for accessing database at specified path. */ -std::unique_ptr<BerkeleyDatabase> CreateWalletDatabase(const fs::path& path) +std::unique_ptr<WalletDatabase> CreateWalletDatabase(const fs::path& path) { std::string filename; return MakeUnique<BerkeleyDatabase>(GetWalletEnv(path, filename), std::move(filename)); } /** Return object for accessing dummy database with no read/write capabilities. */ -std::unique_ptr<BerkeleyDatabase> CreateDummyWalletDatabase() +std::unique_ptr<WalletDatabase> CreateDummyWalletDatabase() { return MakeUnique<BerkeleyDatabase>(); } /** Return object for accessing temporary in-memory database. */ -std::unique_ptr<BerkeleyDatabase> CreateMockWalletDatabase() +std::unique_ptr<WalletDatabase> CreateMockWalletDatabase() { return MakeUnique<BerkeleyDatabase>(std::make_shared<BerkeleyEnvironment>(), ""); } diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h index 6b55361c07..7c5bf7652b 100644 --- a/src/wallet/walletdb.h +++ b/src/wallet/walletdb.h @@ -40,9 +40,6 @@ class CWalletTx; class uint160; class uint256; -/** Backend-agnostic database type. */ -using WalletDatabase = BerkeleyDatabase; - /** Error statuses for the wallet database */ enum class DBErrors { @@ -280,7 +277,7 @@ public: //! Abort current transaction bool TxnAbort(); private: - std::unique_ptr<BerkeleyBatch> m_batch; + std::unique_ptr<DatabaseBatch> m_batch; WalletDatabase& m_database; }; @@ -294,12 +291,12 @@ bool ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, st bool IsWalletLoaded(const fs::path& wallet_path); /** Return object for accessing database at specified path. */ -std::unique_ptr<BerkeleyDatabase> CreateWalletDatabase(const fs::path& path); +std::unique_ptr<WalletDatabase> CreateWalletDatabase(const fs::path& path); /** Return object for accessing dummy database with no read/write capabilities. */ -std::unique_ptr<BerkeleyDatabase> CreateDummyWalletDatabase(); +std::unique_ptr<WalletDatabase> CreateDummyWalletDatabase(); /** Return object for accessing temporary in-memory database. */ -std::unique_ptr<BerkeleyDatabase> CreateMockWalletDatabase(); +std::unique_ptr<WalletDatabase> CreateMockWalletDatabase(); #endif // BITCOIN_WALLET_WALLETDB_H diff --git a/src/wallet/wallettool.cpp b/src/wallet/wallettool.cpp index 8a45d81456..9f25b1ae7d 100644 --- a/src/wallet/wallettool.cpp +++ b/src/wallet/wallettool.cpp @@ -17,7 +17,7 @@ namespace WalletTool { static void WalletToolReleaseWallet(CWallet* wallet) { wallet->WalletLogPrintf("Releasing wallet\n"); - wallet->Flush(true); + wallet->Close(); delete wallet; } @@ -133,7 +133,7 @@ bool ExecuteWalletToolFunc(const std::string& command, const std::string& name) std::shared_ptr<CWallet> wallet_instance = CreateWallet(name, path); if (wallet_instance) { WalletShowInfo(wallet_instance.get()); - wallet_instance->Flush(true); + wallet_instance->Close(); } } else if (command == "info" || command == "salvage") { if (!fs::exists(path)) { @@ -145,7 +145,7 @@ bool ExecuteWalletToolFunc(const std::string& command, const std::string& name) std::shared_ptr<CWallet> wallet_instance = LoadWallet(name, path); if (!wallet_instance) return false; WalletShowInfo(wallet_instance.get()); - wallet_instance->Flush(true); + wallet_instance->Close(); } else if (command == "salvage") { return SalvageWallet(path); } diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py index a4dc455d57..34e856c1ba 100755 --- a/test/functional/feature_config_args.py +++ b/test/functional/feature_config_args.py @@ -112,11 +112,38 @@ class ConfArgsTest(BitcoinTestFramework): ]) self.stop_node(0) + def test_networkactive(self): + self.log.info('Test -networkactive option') + with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']): + self.start_node(0) + self.stop_node(0) + + with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']): + self.start_node(0, extra_args=['-networkactive']) + self.stop_node(0) + + with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']): + self.start_node(0, extra_args=['-networkactive=1']) + self.stop_node(0) + + with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']): + self.start_node(0, extra_args=['-networkactive=0']) + self.stop_node(0) + + with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']): + self.start_node(0, extra_args=['-nonetworkactive']) + self.stop_node(0) + + with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']): + self.start_node(0, extra_args=['-nonetworkactive=1']) + self.stop_node(0) + def run_test(self): self.stop_node(0) self.test_log_buffer() self.test_args_log() + self.test_networkactive() self.test_config_file_parser() diff --git a/test/functional/feature_settings.py b/test/functional/feature_settings.py new file mode 100755 index 0000000000..c565854bb0 --- /dev/null +++ b/test/functional/feature_settings.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017-2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test various command line arguments and configuration file parameters.""" + +import json + +from pathlib import Path + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.test_node import ErrorMatch +from test_framework.util import assert_equal + + +class SettingsTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def run_test(self): + node, = self.nodes + settings = Path(node.datadir, self.chain, "settings.json") + conf = Path(node.datadir, "bitcoin.conf") + + # Assert empty settings file was created + self.stop_node(0) + with settings.open() as fp: + assert_equal(json.load(fp), {}) + + # Assert settings are parsed and logged + with settings.open("w") as fp: + json.dump({"string": "string", "num": 5, "bool": True, "null": None, "list": [6,7]}, fp) + with node.assert_debug_log(expected_msgs=[ + 'Setting file arg: string = "string"', + 'Setting file arg: num = 5', + 'Setting file arg: bool = true', + 'Setting file arg: null = null', + 'Setting file arg: list = [6,7]']): + self.start_node(0) + self.stop_node(0) + + # Assert settings are unchanged after shutdown + with settings.open() as fp: + assert_equal(json.load(fp), {"string": "string", "num": 5, "bool": True, "null": None, "list": [6,7]}) + + # Test invalid json + with settings.open("w") as fp: + fp.write("invalid json") + node.assert_start_raises_init_error(expected_msg='Unable to parse settings file', match=ErrorMatch.PARTIAL_REGEX) + + # Test invalid json object + with settings.open("w") as fp: + fp.write('"string"') + node.assert_start_raises_init_error(expected_msg='Found non-object value "string" in settings file', match=ErrorMatch.PARTIAL_REGEX) + + # Test invalid settings file containing duplicate keys + with settings.open("w") as fp: + fp.write('{"key": 1, "key": 2}') + node.assert_start_raises_init_error(expected_msg='Found duplicate key key in settings file', match=ErrorMatch.PARTIAL_REGEX) + + # Test invalid settings file is ignored with command line -nosettings + with node.assert_debug_log(expected_msgs=['Command-line arg: settings=false']): + self.start_node(0, extra_args=["-nosettings"]) + self.stop_node(0) + + # Test invalid settings file is ignored with config file -nosettings + with conf.open('a') as conf: + conf.write('nosettings=1\n') + with node.assert_debug_log(expected_msgs=['Config file arg: [regtest] settings=false']): + self.start_node(0) + self.stop_node(0) + + # Test alternate settings path + altsettings = Path(node.datadir, "altsettings.json") + with altsettings.open("w") as fp: + fp.write('{"key": "value"}') + with node.assert_debug_log(expected_msgs=['Setting file arg: key = "value"']): + self.start_node(0, extra_args=["-settings={}".format(altsettings)]) + self.stop_node(0) + + +if __name__ == '__main__': + SettingsTest().main() diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py index 5b7216b253..542d24f4be 100755 --- a/test/functional/mempool_packages.py +++ b/test/functional/mempool_packages.py @@ -69,14 +69,19 @@ class MempoolPackagesTest(BitcoinTestFramework): fee = Decimal("0.0001") # MAX_ANCESTORS transactions off a confirmed tx should be fine chain = [] + witness_chain = [] for i in range(MAX_ANCESTORS): (txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1) value = sent_value chain.append(txid) + # We need the wtxids to check P2P announcements + fulltx = self.nodes[0].getrawtransaction(txid) + witnesstx = self.nodes[0].decoderawtransaction(fulltx, True) + witness_chain.append(witnesstx['hash']) # Wait until mempool transactions have passed initial broadcast (sent inv and received getdata) # Otherwise, getrawmempool may be inconsistent with getmempoolentry if unbroadcast changes in between - self.nodes[0].p2p.wait_for_broadcast(chain) + self.nodes[0].p2p.wait_for_broadcast(witness_chain) # Check mempool has MAX_ANCESTORS transactions in it, and descendant and ancestor # count and fees should look correct diff --git a/test/functional/p2p_blocksonly.py b/test/functional/p2p_blocksonly.py index f42a343042..27e6b669f6 100755 --- a/test/functional/p2p_blocksonly.py +++ b/test/functional/p2p_blocksonly.py @@ -52,7 +52,7 @@ class P2PBlocksOnly(BitcoinTestFramework): self.log.info('Check that txs from rpc are not rejected and relayed to other peers') assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], True) txid = self.nodes[0].testmempoolaccept([sigtx])[0]['txid'] - with self.nodes[0].assert_debug_log(['received getdata for: tx {} peer=1'.format(txid)]): + with self.nodes[0].assert_debug_log(['received getdata for: wtx {} peer=1'.format(txid)]): self.nodes[0].sendrawtransaction(sigtx) self.nodes[0].p2p.wait_for_tx(txid) assert_equal(self.nodes[0].getmempoolinfo()['size'], 1) diff --git a/test/functional/p2p_feefilter.py b/test/functional/p2p_feefilter.py index f939ea965c..73afe9adc4 100755 --- a/test/functional/p2p_feefilter.py +++ b/test/functional/p2p_feefilter.py @@ -7,7 +7,7 @@ from decimal import Decimal import time -from test_framework.messages import MSG_TX, msg_feefilter +from test_framework.messages import MSG_TX, MSG_WTX, msg_feefilter from test_framework.mininode import mininode_lock, P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal @@ -45,7 +45,7 @@ class TestP2PConn(P2PInterface): def on_inv(self, message): for i in message.inv: - if (i.type == MSG_TX): + if (i.type == MSG_TX) or (i.type == MSG_WTX): self.txinvs.append(hashToHex(i.hash)) def clear_invs(self): diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index 25dd765442..9915b844d1 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -25,6 +25,7 @@ from test_framework.messages import ( MSG_BLOCK, MSG_TX, MSG_WITNESS_FLAG, + MSG_WTX, NODE_NETWORK, NODE_WITNESS, msg_no_witness_block, @@ -34,6 +35,7 @@ from test_framework.messages import ( msg_tx, msg_block, msg_no_witness_tx, + msg_verack, ser_uint256, ser_vector, sha256, @@ -81,6 +83,7 @@ from test_framework.util import ( softfork_active, hex_str_to_bytes, assert_raises_rpc_error, + wait_until, ) # The versionbit bit used to signal activation of SegWit @@ -143,25 +146,47 @@ def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=Non class TestP2PConn(P2PInterface): - def __init__(self): + def __init__(self, wtxidrelay=False): super().__init__() self.getdataset = set() + self.last_wtxidrelay = [] + self.lastgetdata = [] + self.wtxidrelay = wtxidrelay # Avoid sending out msg_getdata in the mininode thread as a reply to invs. # They are not needed and would only lead to races because we send msg_getdata out in the test thread def on_inv(self, message): pass + def on_version(self, message): + if self.wtxidrelay: + super().on_version(message) + else: + self.send_message(msg_verack()) + self.nServices = message.nServices + def on_getdata(self, message): + self.lastgetdata = message.inv for inv in message.inv: self.getdataset.add(inv.hash) - def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True): + def on_wtxidrelay(self, message): + self.last_wtxidrelay.append(message) + + def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True, use_wtxid=False): with mininode_lock: self.last_message.pop("getdata", None) - self.send_message(msg_inv(inv=[CInv(MSG_TX, tx.sha256)])) + if use_wtxid: + wtxid = tx.calc_sha256(True) + self.send_message(msg_inv(inv=[CInv(MSG_WTX, wtxid)])) + else: + self.send_message(msg_inv(inv=[CInv(MSG_TX, tx.sha256)])) + if success: - self.wait_for_getdata([tx.sha256], timeout) + if use_wtxid: + self.wait_for_getdata([wtxid], timeout) + else: + self.wait_for_getdata([tx.sha256], timeout) else: time.sleep(timeout) assert not self.last_message.get("getdata") @@ -277,6 +302,7 @@ class SegWitTest(BitcoinTestFramework): self.test_upgrade_after_activation() self.test_witness_sigops() self.test_superfluous_witness() + self.test_wtxid_relay() # Individual tests @@ -1270,7 +1296,6 @@ class SegWitTest(BitcoinTestFramework): test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False) # Verify that removing the witness succeeds. - self.test_node.announce_tx_and_wait_for_getdata(tx) test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True) # Now try to add extra witness data to a valid witness tx. @@ -1297,8 +1322,6 @@ class SegWitTest(BitcoinTestFramework): # Node will not be blinded to the transaction self.std_node.announce_tx_and_wait_for_getdata(tx3) test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size') - self.std_node.announce_tx_and_wait_for_getdata(tx3) - test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size') # Remove witness stuffing, instead add extra witness push on stack tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])) @@ -2016,6 +2039,11 @@ class SegWitTest(BitcoinTestFramework): # TODO: test p2sh sigop counting + # Cleanup and prep for next test + self.utxo.pop(0) + self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) + + @subtest # type: ignore def test_superfluous_witness(self): # Serialization of tx that puts witness flag to 3 always def serialize_with_bogus_witness(tx): @@ -2059,6 +2087,67 @@ class SegWitTest(BitcoinTestFramework): with self.nodes[0].assert_debug_log(['Unknown transaction optional data']): self.nodes[0].p2p.send_and_ping(msg_bogus_tx(tx)) + @subtest # type: ignore + def test_wtxid_relay(self): + # Use brand new nodes to avoid contamination from earlier tests + self.wtx_node = self.nodes[0].add_p2p_connection(TestP2PConn(wtxidrelay=True), services=NODE_NETWORK | NODE_WITNESS) + self.tx_node = self.nodes[0].add_p2p_connection(TestP2PConn(wtxidrelay=False), services=NODE_NETWORK | NODE_WITNESS) + + # Check wtxidrelay feature negotiation message through connecting a new peer + def received_wtxidrelay(): + return (len(self.wtx_node.last_wtxidrelay) > 0) + wait_until(received_wtxidrelay, timeout=60, lock=mininode_lock) + + # Create a Segwit output from the latest UTXO + # and announce it to the network + witness_program = CScript([OP_TRUE]) + witness_hash = sha256(witness_program) + script_pubkey = CScript([OP_0, witness_hash]) + + tx = CTransaction() + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey)) + tx.rehash() + + # Create a Segwit transaction + tx2 = CTransaction() + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) + tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey)) + tx2.wit.vtxinwit.append(CTxInWitness()) + tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] + tx2.rehash() + + # Announce Segwit transaction with wtxid + # and wait for getdata + self.wtx_node.announce_tx_and_wait_for_getdata(tx2, use_wtxid=True) + with mininode_lock: + lgd = self.wtx_node.lastgetdata[:] + assert_equal(lgd, [CInv(MSG_WTX, tx2.calc_sha256(True))]) + + # Announce Segwit transaction from non wtxidrelay peer + # and wait for getdata + self.tx_node.announce_tx_and_wait_for_getdata(tx2, use_wtxid=False) + with mininode_lock: + lgd = self.tx_node.lastgetdata[:] + assert_equal(lgd, [CInv(MSG_TX|MSG_WITNESS_FLAG, tx2.sha256)]) + + # Send tx2 through; it's an orphan so won't be accepted + with mininode_lock: + self.tx_node.last_message.pop("getdata", None) + test_transaction_acceptance(self.nodes[0], self.tx_node, tx2, with_witness=True, accepted=False) + + # Expect a request for parent (tx) due to use of non-WTX peer + self.tx_node.wait_for_getdata([tx.sha256], 60) + with mininode_lock: + lgd = self.tx_node.lastgetdata[:] + assert_equal(lgd, [CInv(MSG_TX|MSG_WITNESS_FLAG, tx.sha256)]) + + # Send tx through + test_transaction_acceptance(self.nodes[0], self.tx_node, tx, with_witness=False, accepted=True) + + # Check tx2 is there now + assert_equal(tx2.hash in self.nodes[0].getrawmempool(), True) + if __name__ == '__main__': SegWitTest().main() diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py index 10f5eea0e5..2527edc135 100755 --- a/test/functional/p2p_tx_download.py +++ b/test/functional/p2p_tx_download.py @@ -12,6 +12,7 @@ from test_framework.messages import ( FromHex, MSG_TX, MSG_TYPE_MASK, + MSG_WTX, msg_inv, msg_notfound, ) @@ -36,7 +37,7 @@ class TestP2PConn(P2PInterface): def on_getdata(self, message): for i in message.inv: - if i.type & MSG_TYPE_MASK == MSG_TX: + if i.type & MSG_TYPE_MASK == MSG_TX or i.type & MSG_TYPE_MASK == MSG_WTX: self.tx_getdata_count += 1 @@ -44,12 +45,13 @@ class TestP2PConn(P2PInterface): GETDATA_TX_INTERVAL = 60 # seconds MAX_GETDATA_RANDOM_DELAY = 2 # seconds INBOUND_PEER_TX_DELAY = 2 # seconds +TXID_RELAY_DELAY = 2 # seconds MAX_GETDATA_IN_FLIGHT = 100 TX_EXPIRY_INTERVAL = GETDATA_TX_INTERVAL * 10 # Python test constants NUM_INBOUND = 10 -MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + MAX_GETDATA_RANDOM_DELAY + INBOUND_PEER_TX_DELAY +MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + MAX_GETDATA_RANDOM_DELAY + INBOUND_PEER_TX_DELAY + TXID_RELAY_DELAY class TxDownloadTest(BitcoinTestFramework): @@ -63,7 +65,7 @@ class TxDownloadTest(BitcoinTestFramework): txid = 0xdeadbeef self.log.info("Announce the txid from each incoming peer to node 0") - msg = msg_inv([CInv(t=MSG_TX, h=txid)]) + msg = msg_inv([CInv(t=MSG_WTX, h=txid)]) for p in self.nodes[0].p2ps: p.send_and_ping(msg) @@ -135,13 +137,13 @@ class TxDownloadTest(BitcoinTestFramework): with mininode_lock: p.tx_getdata_count = 0 - p.send_message(msg_inv([CInv(t=MSG_TX, h=i) for i in txids])) + p.send_message(msg_inv([CInv(t=MSG_WTX, h=i) for i in txids])) wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT, lock=mininode_lock) with mininode_lock: assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT) self.log.info("Now check that if we send a NOTFOUND for a transaction, we'll get one more request") - p.send_message(msg_notfound(vec=[CInv(t=MSG_TX, h=txids[0])])) + p.send_message(msg_notfound(vec=[CInv(t=MSG_WTX, h=txids[0])])) wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT + 1, timeout=10, lock=mininode_lock) with mininode_lock: assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT + 1) diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index ca26152e7e..3336246c8b 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -102,12 +102,14 @@ class NetTest(BitcoinTestFramework): assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True) assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2) - self.nodes[0].setnetworkactive(state=False) + with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']): + self.nodes[0].setnetworkactive(state=False) assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False) # Wait a bit for all sockets to close wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3) - self.nodes[0].setnetworkactive(state=True) + with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']): + self.nodes[0].setnetworkactive(state=True) self.log.info('Connect nodes both way') connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[1], 0) diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 12302cdbc3..2462a9a6db 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -31,7 +31,7 @@ from test_framework.siphash import siphash256 from test_framework.util import hex_str_to_bytes, assert_equal MIN_VERSION_SUPPORTED = 60001 -MY_VERSION = 70014 # past bip-31 for ping/pong +MY_VERSION = 70016 # past wtxid relay MY_SUBVERSION = b"/python-mininode-tester:0.0.3/" MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37) @@ -59,6 +59,7 @@ MSG_TX = 1 MSG_BLOCK = 2 MSG_FILTERED_BLOCK = 3 MSG_CMPCT_BLOCK = 4 +MSG_WTX = 5 MSG_WITNESS_FLAG = 1 << 30 MSG_TYPE_MASK = 0xffffffff >> 2 @@ -242,7 +243,8 @@ class CInv: MSG_TX | MSG_WITNESS_FLAG: "WitnessTx", MSG_BLOCK | MSG_WITNESS_FLAG: "WitnessBlock", MSG_FILTERED_BLOCK: "filtered Block", - 4: "CompactBlock" + 4: "CompactBlock", + 5: "WTX", } def __init__(self, t=0, h=0): @@ -263,6 +265,9 @@ class CInv: return "CInv(type=%s hash=%064x)" \ % (self.typemap[self.type], self.hash) + def __eq__(self, other): + return isinstance(other, CInv) and self.hash == other.hash and self.type == other.type + class CBlockLocator: __slots__ = ("nVersion", "vHave") @@ -1124,6 +1129,22 @@ class msg_tx: def __repr__(self): return "msg_tx(tx=%s)" % (repr(self.tx)) +class msg_wtxidrelay: + __slots__ = () + msgtype = b"wtxidrelay" + + def __init__(self): + pass + + def deserialize(self, f): + pass + + def serialize(self): + return b"" + + def __repr__(self): + return "msg_wtxidrelay()" + class msg_no_witness_tx(msg_tx): __slots__ = () diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py index e6da33763d..f68c1a9ddd 100755 --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -59,6 +59,8 @@ from test_framework.messages import ( MSG_TYPE_MASK, msg_verack, msg_version, + MSG_WTX, + msg_wtxidrelay, NODE_NETWORK, NODE_WITNESS, sha256, @@ -96,6 +98,7 @@ MESSAGEMAP = { b"tx": msg_tx, b"verack": msg_verack, b"version": msg_version, + b"wtxidrelay": msg_wtxidrelay, } MAGIC_BYTES = { @@ -356,6 +359,7 @@ class P2PInterface(P2PConnection): def on_sendcmpct(self, message): pass def on_sendheaders(self, message): pass def on_tx(self, message): pass + def on_wtxidrelay(self, message): pass def on_inv(self, message): want = msg_getdata() @@ -373,6 +377,8 @@ class P2PInterface(P2PConnection): def on_version(self, message): assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED) + if message.nVersion >= 70016: + self.send_message(msg_wtxidrelay()) self.send_message(msg_verack()) self.nServices = message.nServices @@ -654,7 +660,7 @@ class P2PTxInvStore(P2PInterface): super().on_inv(message) # Send getdata in response. # Store how many times invs have been received for each tx. for i in message.inv: - if i.type == MSG_TX: + if (i.type == MSG_TX) or (i.type == MSG_WTX): # save txid self.tx_invs_received[i.hash] += 1 diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 2a360bd38a..95c2b7c5ec 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -243,6 +243,7 @@ BASE_SCRIPTS = [ 'p2p_permissions.py', 'feature_blocksdir.py', 'feature_config_args.py', + 'feature_settings.py', 'rpc_getdescriptorinfo.py', 'rpc_getpeerinfo_banscore_deprecation.py', 'rpc_help.py', |