aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xcontrib/testgen/gen_key_io_test_vectors.py9
-rw-r--r--doc/bips.md1
-rw-r--r--doc/files.md1
-rw-r--r--doc/release-notes-19988.md9
-rw-r--r--doc/release-notes.md8
-rw-r--r--src/addrdb.cpp20
-rw-r--r--src/addrdb.h20
-rw-r--r--src/chainparams.cpp18
-rw-r--r--src/consensus/params.h2
-rw-r--r--src/net.cpp71
-rw-r--r--src/net.h31
-rw-r--r--src/net_processing.cpp125
-rw-r--r--src/qt/sendcoinsdialog.cpp10
-rw-r--r--src/qt/sendcoinsdialog.h2
-rw-r--r--src/randomenv.cpp5
-rw-r--r--src/rpc/mining.cpp2
-rw-r--r--src/rpc/net.cpp13
-rw-r--r--src/rpc/rawtransaction.cpp16
-rw-r--r--src/script/sigcache.cpp4
-rw-r--r--src/test/script_tests.cpp60
-rw-r--r--src/util/system.cpp8
-rw-r--r--src/wallet/rpcdump.cpp5
-rw-r--r--src/wallet/rpcwallet.cpp4
-rw-r--r--src/wallet/scriptpubkeyman.cpp2
-rwxr-xr-xtest/functional/feature_bip68_sequence.py17
-rwxr-xr-xtest/functional/feature_nulldummy.py15
-rwxr-xr-xtest/functional/feature_segwit.py2
-rwxr-xr-xtest/functional/feature_settings.py12
-rwxr-xr-xtest/functional/mining_getblocktemplate_longpoll.py33
-rwxr-xr-xtest/functional/p2p_compactblocks.py11
-rw-r--r--test/functional/test_framework/blocktools.py32
-rw-r--r--test/functional/test_framework/key.py6
-rw-r--r--test/functional/test_framework/util.py57
-rwxr-xr-xtest/functional/wallet_import_rescan.py1
-rwxr-xr-xtest/functional/wallet_send.py1
35 files changed, 378 insertions, 255 deletions
diff --git a/contrib/testgen/gen_key_io_test_vectors.py b/contrib/testgen/gen_key_io_test_vectors.py
index a00acb1f41..49320d92e6 100755
--- a/contrib/testgen/gen_key_io_test_vectors.py
+++ b/contrib/testgen/gen_key_io_test_vectors.py
@@ -15,8 +15,7 @@ import os
from itertools import islice
from base58 import b58encode_chk, b58decode_chk, b58chars
import random
-from binascii import b2a_hex
-from segwit_addr import bech32_encode, decode, convertbits, CHARSET
+from segwit_addr import bech32_encode, decode_segwit_address, convertbits, CHARSET
# key types
PUBKEY_ADDRESS = 0
@@ -109,7 +108,7 @@ def is_valid(v):
def is_valid_bech32(v):
'''Check vector v for bech32 validity'''
for hrp in ['bc', 'tb', 'bcrt']:
- if decode(hrp, v) != (None, None):
+ if decode_segwit_address(hrp, v) != (None, None):
return True
return False
@@ -141,9 +140,7 @@ def gen_valid_vectors():
rv, payload = valid_vector_generator(template)
assert is_valid(rv)
metadata = {x: y for x, y in zip(metadata_keys,template[3]) if y is not None}
- hexrepr = b2a_hex(payload)
- if isinstance(hexrepr, bytes):
- hexrepr = hexrepr.decode('utf8')
+ hexrepr = payload.hex()
yield (rv, hexrepr, metadata)
def gen_invalid_base58_vector(template):
diff --git a/doc/bips.md b/doc/bips.md
index ad6f7a0767..8c20533c9b 100644
--- a/doc/bips.md
+++ b/doc/bips.md
@@ -45,3 +45,4 @@ BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.21.0**):
* [`BIP 176`](https://github.com/bitcoin/bips/blob/master/bip-0176.mediawiki): Bits Denomination [QT only] is supported as of **v0.16.0** ([PR 12035](https://github.com/bitcoin/bitcoin/pull/12035)).
* [`BIP 325`](https://github.com/bitcoin/bips/blob/master/bip-0325.mediawiki): Signet test network is supported as of **v0.21.0** ([PR 18267](https://github.com/bitcoin/bitcoin/pull/18267)).
* [`BIP 339`](https://github.com/bitcoin/bips/blob/master/bip-0339.mediawiki): Relay of transactions by wtxid is supported as of **v0.21.0** ([PR 18044](https://github.com/bitcoin/bitcoin/pull/18044)).
+* [`BIP 340`](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki) [`341`](https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki) [`342`](https://github.com/bitcoin/bips/blob/master/bip-0342.mediawiki): Validation rules for Taproot (including Schnorr signatures and Tapscript leaves) are implemented as of **v0.21.0** ([PR 19953](https://github.com/bitcoin/bitcoin/pull/19953)), without mainnet activation.
diff --git a/doc/files.md b/doc/files.md
index e3f195de43..65f98e6986 100644
--- a/doc/files.md
+++ b/doc/files.md
@@ -50,6 +50,7 @@ Subdirectory | File(s) | Description
`indexes/blockfilter/basic/db/` | LevelDB database | Blockfilter index LevelDB database for the basic filtertype; *optional*, used if `-blockfilterindex=basic`
`indexes/blockfilter/basic/` | `fltrNNNNN.dat`<sup>[\[2\]](#note2)</sup> | Blockfilter index filters for the basic filtertype; *optional*, used if `-blockfilterindex=basic`
`wallets/` | | [Contains wallets](#multi-wallet-environment); can be specified by `-walletdir` option; if `wallets/` subdirectory does not exist, wallets reside in the [data directory](#data-directory-location)
+`./` | `anchors.dat` | Anchor IP address database, created on shutdown and deleted at startup. Anchors are last known outgoing block-relay-only peers that are tried to re-connect to on startup
`./` | `banlist.dat` | Stores the IPs/subnets of banned nodes
`./` | `bitcoin.conf` | User-defined [configuration settings](bitcoin-conf.md) for `bitcoind` or `bitcoin-qt`. File is not written to by the software and must be created manually. Path can be specified by `-conf` option
`./` | `bitcoind.pid` | Stores the process ID (PID) of `bitcoind` or `bitcoin-qt` while running; created at start and deleted on shutdown; can be specified by `-pid` option
diff --git a/doc/release-notes-19988.md b/doc/release-notes-19988.md
deleted file mode 100644
index ef26eb3032..0000000000
--- a/doc/release-notes-19988.md
+++ /dev/null
@@ -1,9 +0,0 @@
-P2P changes
------------
-
-The size of the set of transactions that peers have announced and we consider
-for requests has been reduced from 100000 to 5000 (per peer), and further
-announcements will be ignored when that limit is reached. If you need to
-dump (very) large batches of transactions, exceptions can be made for trusted
-peers using the "relay" network permission. For localhost for example it can
-be enabled using the command line option `-whitelist=relay@127.0.0.1`.
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 65726f3d5d..4c69c61390 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -82,6 +82,14 @@ P2P and network changes
node using P2P relay. This version reduces the initial broadcast guarantees
for wallet transactions submitted via P2P to a node running the wallet. (#18038)
+- The size of the set of transactions that peers have announced and we consider
+ for requests has been reduced from 100000 to 5000 (per peer), and further
+ announcements will be ignored when that limit is reached. If you need to dump
+ (very) large batches of transactions, exceptions can be made for trusted
+ peers using the "relay" network permission. For localhost for example it can
+ be enabled using the command line option `-whitelist=relay@127.0.0.1`.
+ (#19988)
+
- The Tor onion service that is automatically created by setting the
`-listenonion` configuration parameter will now be created as a Tor v3 service
instead of Tor v2. The private key that was used for Tor v2 (if any) will be
diff --git a/src/addrdb.cpp b/src/addrdb.cpp
index f3e8a19de2..27f22826a9 100644
--- a/src/addrdb.cpp
+++ b/src/addrdb.cpp
@@ -10,6 +10,7 @@
#include <clientversion.h>
#include <cstdint>
#include <hash.h>
+#include <logging/timer.h>
#include <random.h>
#include <streams.h>
#include <tinyformat.h>
@@ -156,3 +157,22 @@ bool CAddrDB::Read(CAddrMan& addr, CDataStream& ssPeers)
}
return ret;
}
+
+void DumpAnchors(const fs::path& anchors_db_path, const std::vector<CAddress>& anchors)
+{
+ LOG_TIME_SECONDS(strprintf("Flush %d outbound block-relay-only peer addresses to anchors.dat", anchors.size()));
+ SerializeFileDB("anchors", anchors_db_path, anchors);
+}
+
+std::vector<CAddress> ReadAnchors(const fs::path& anchors_db_path)
+{
+ std::vector<CAddress> anchors;
+ if (DeserializeFileDB(anchors_db_path, anchors)) {
+ LogPrintf("Loaded %i addresses from %s\n", anchors.size(), anchors_db_path.filename());
+ } else {
+ anchors.clear();
+ }
+
+ fs::remove(anchors_db_path);
+ return anchors;
+}
diff --git a/src/addrdb.h b/src/addrdb.h
index 8410c3776c..4ac0e3e1b5 100644
--- a/src/addrdb.h
+++ b/src/addrdb.h
@@ -11,9 +11,9 @@
#include <serialize.h>
#include <string>
-#include <map>
+#include <vector>
-class CSubNet;
+class CAddress;
class CAddrMan;
class CDataStream;
@@ -73,4 +73,20 @@ public:
bool Read(banmap_t& banSet);
};
+/**
+ * Dump the anchor IP address database (anchors.dat)
+ *
+ * Anchors are last known outgoing block-relay-only peers that are
+ * tried to re-connect to on startup.
+ */
+void DumpAnchors(const fs::path& anchors_db_path, const std::vector<CAddress>& anchors);
+
+/**
+ * Read the anchor IP address database (anchors.dat)
+ *
+ * Deleting anchors.dat is intentional as it avoids renewed peering to anchors after
+ * an unclean shutdown and thus potential exploitation of the anchor peer policy.
+ */
+std::vector<CAddress> ReadAnchors(const fs::path& anchors_db_path);
+
#endif // BITCOIN_ADDRDB_H
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index a34bf350fc..7998357bc7 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -91,10 +91,7 @@ public:
consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nStartTime = 1199145601; // January 1, 2008
consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = 1230767999; // December 31, 2008
- // The best chain should have at least this much work.
consensus.nMinimumChainWork = uint256S("0x00000000000000000000000000000000000000000e1ab5ec9348e9f4b8eb8154");
-
- // By default assume that the signatures in ancestors of this block are valid.
consensus.defaultAssumeValid = uint256S("0x0000000000000000000f2adce67e49b0b6bdeb9de8b7c3d7e93b21e7fc1e819d"); // 623950
/**
@@ -207,10 +204,7 @@ public:
consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nStartTime = 1199145601; // January 1, 2008
consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = 1230767999; // December 31, 2008
- // The best chain should have at least this much work.
consensus.nMinimumChainWork = uint256S("0x0000000000000000000000000000000000000000000001495c1d5a01e2af8a23");
-
- // By default assume that the signatures in ancestors of this block are valid.
consensus.defaultAssumeValid = uint256S("0x000000000000056c49030c174179b52a928c870e6e8a822c75973b7970cfbd01"); // 1692000
pchMessageStart[0] = 0x0b;
@@ -297,6 +291,8 @@ public:
}
bin = ParseHex(signet_challenge[0]);
+ consensus.nMinimumChainWork = uint256{};
+ consensus.defaultAssumeValid = uint256{};
m_assumed_blockchain_size = 0;
m_assumed_chain_state_size = 0;
chainTxData = ChainTxData{
@@ -315,7 +311,9 @@ public:
consensus.signet_blocks = true;
consensus.signet_challenge.assign(bin.begin(), bin.end());
consensus.nSubsidyHalvingInterval = 210000;
+ consensus.BIP16Exception = uint256{};
consensus.BIP34Height = 1;
+ consensus.BIP34Hash = uint256{};
consensus.BIP65Height = 1;
consensus.BIP66Height = 1;
consensus.CSVHeight = 1;
@@ -326,6 +324,7 @@ public:
consensus.fPowNoRetargeting = false;
consensus.nRuleChangeActivationThreshold = 1916;
consensus.nMinerConfirmationWindow = 2016;
+ consensus.MinBIP9WarningHeight = 0;
consensus.powLimit = uint256S("00000377ae000000000000000000000000000000000000000000000000000000");
consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].bit = 28;
consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nStartTime = 1199145601; // January 1, 2008
@@ -394,11 +393,8 @@ public:
consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nStartTime = Consensus::BIP9Deployment::ALWAYS_ACTIVE;
consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT;
- // The best chain should have at least this much work.
- consensus.nMinimumChainWork = uint256S("0x00");
-
- // By default assume that the signatures in ancestors of this block are valid.
- consensus.defaultAssumeValid = uint256S("0x00");
+ consensus.nMinimumChainWork = uint256{};
+ consensus.defaultAssumeValid = uint256{};
pchMessageStart[0] = 0xfa;
pchMessageStart[1] = 0xbf;
diff --git a/src/consensus/params.h b/src/consensus/params.h
index 932f0d2c60..0983595c6a 100644
--- a/src/consensus/params.h
+++ b/src/consensus/params.h
@@ -79,7 +79,9 @@ struct Params {
int64_t nPowTargetSpacing;
int64_t nPowTargetTimespan;
int64_t DifficultyAdjustmentInterval() const { return nPowTargetTimespan / nPowTargetSpacing; }
+ /** The best chain should have at least this much work */
uint256 nMinimumChainWork;
+ /** By default assume that the signatures in ancestors of this block are valid */
uint256 defaultAssumeValid;
/**
diff --git a/src/net.cpp b/src/net.cpp
index 5db7fc9e73..e8a27c3530 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -47,6 +47,12 @@ static_assert(MINIUPNPC_API_VERSION >= 10, "miniUPnPc API version >= 10 assumed"
#include <math.h>
+/** Maximum number of block-relay-only anchor connections */
+static constexpr size_t MAX_BLOCK_RELAY_ONLY_ANCHORS = 2;
+static_assert (MAX_BLOCK_RELAY_ONLY_ANCHORS <= static_cast<size_t>(MAX_BLOCK_RELAY_ONLY_CONNECTIONS), "MAX_BLOCK_RELAY_ONLY_ANCHORS must not exceed MAX_BLOCK_RELAY_ONLY_CONNECTIONS.");
+/** Anchor IP address database file name */
+const char* const ANCHORS_DATABASE_FILENAME = "anchors.dat";
+
// How often to dump addresses to peers.dat
static constexpr std::chrono::minutes DUMP_PEERS_INTERVAL{15};
@@ -1933,10 +1939,12 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
ConnectionType conn_type = ConnectionType::OUTBOUND_FULL_RELAY;
int64_t nTime = GetTimeMicros();
+ bool anchor = false;
bool fFeeler = false;
// Determine what type of connection to open. Opening
- // OUTBOUND_FULL_RELAY connections gets the highest priority until we
+ // BLOCK_RELAY connections to addresses from anchors.dat gets the highest
+ // priority. Then we open OUTBOUND_FULL_RELAY priority until we
// meet our full-relay capacity. Then we open BLOCK_RELAY connection
// until we hit our block-relay-only peer limit.
// GetTryNewOutboundPeer() gets set when a stale tip is detected, so we
@@ -1944,7 +1952,10 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// these conditions are met, check the nNextFeeler timer to decide if
// we should open a FEELER.
- if (nOutboundFullRelay < m_max_outbound_full_relay) {
+ if (!m_anchors.empty() && (nOutboundBlockRelay < m_max_outbound_block_relay)) {
+ conn_type = ConnectionType::BLOCK_RELAY;
+ anchor = true;
+ } else if (nOutboundFullRelay < m_max_outbound_full_relay) {
// OUTBOUND_FULL_RELAY
} else if (nOutboundBlockRelay < m_max_outbound_block_relay) {
conn_type = ConnectionType::BLOCK_RELAY;
@@ -1965,6 +1976,24 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
int nTries = 0;
while (!interruptNet)
{
+ if (anchor && !m_anchors.empty()) {
+ const CAddress addr = m_anchors.back();
+ m_anchors.pop_back();
+ if (!addr.IsValid() || IsLocal(addr) || !IsReachable(addr) ||
+ !HasAllDesirableServiceFlags(addr.nServices) ||
+ setConnected.count(addr.GetGroup(addrman.m_asmap))) continue;
+ addrConnect = addr;
+ LogPrint(BCLog::NET, "Trying to make an anchor connection to %s\n", addrConnect.ToString());
+ break;
+ }
+
+ // If we didn't find an appropriate destination after trying 100 addresses fetched from addrman,
+ // stop this loop, and let the outer loop run again (which sleeps, adds seed nodes, recalculates
+ // already-connected network ranges, ...) before trying new addrman addresses.
+ nTries++;
+ if (nTries > 100)
+ break;
+
CAddrInfo addr = addrman.SelectTriedCollision();
// SelectTriedCollision returns an invalid address if it is empty.
@@ -1982,13 +2011,6 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
break;
}
- // If we didn't find an appropriate destination after trying 100 addresses fetched from addrman,
- // stop this loop, and let the outer loop run again (which sleeps, adds seed nodes, recalculates
- // already-connected network ranges, ...) before trying new addrman addresses.
- nTries++;
- if (nTries > 100)
- break;
-
if (!IsReachable(addr))
continue;
@@ -2028,6 +2050,19 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
}
}
+std::vector<CAddress> CConnman::GetCurrentBlockRelayOnlyConns() const
+{
+ std::vector<CAddress> ret;
+ LOCK(cs_vNodes);
+ for (const CNode* pnode : vNodes) {
+ if (pnode->IsBlockOnlyConn()) {
+ ret.push_back(pnode->addr);
+ }
+ }
+
+ return ret;
+}
+
std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo()
{
std::vector<AddedNodeInfo> ret;
@@ -2427,6 +2462,15 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions)
}
}
+ if (m_use_addrman_outgoing) {
+ // Load addresses from anchors.dat
+ m_anchors = ReadAnchors(GetDataDir() / ANCHORS_DATABASE_FILENAME);
+ if (m_anchors.size() > MAX_BLOCK_RELAY_ONLY_ANCHORS) {
+ m_anchors.resize(MAX_BLOCK_RELAY_ONLY_ANCHORS);
+ }
+ LogPrintf("%i block-relay-only anchors will be tried for connections.\n", m_anchors.size());
+ }
+
uiInterface.InitMessage(_("Starting network threads...").translated);
fAddressesInitialized = true;
@@ -2542,6 +2586,15 @@ void CConnman::StopNodes()
if (fAddressesInitialized) {
DumpAddresses();
fAddressesInitialized = false;
+
+ if (m_use_addrman_outgoing) {
+ // Anchor connections are only dumped during clean shutdown.
+ std::vector<CAddress> anchors_to_dump = GetCurrentBlockRelayOnlyConns();
+ if (anchors_to_dump.size() > MAX_BLOCK_RELAY_ONLY_ANCHORS) {
+ anchors_to_dump.resize(MAX_BLOCK_RELAY_ONLY_ANCHORS);
+ }
+ DumpAnchors(GetDataDir() / ANCHORS_DATABASE_FILENAME, anchors_to_dump);
+ }
}
// Close sockets
diff --git a/src/net.h b/src/net.h
index fb5c2bd328..2652d82ea0 100644
--- a/src/net.h
+++ b/src/net.h
@@ -115,17 +115,12 @@ struct CSerializedNetMsg
std::string m_type;
};
-const std::vector<std::string> CONNECTION_TYPE_DOC{
- "outbound-full-relay (default automatic connections)",
- "block-relay-only (does not relay transactions or addresses)",
- "inbound (initiated by the peer)",
- "manual (added via addnode RPC or -addnode/-connect configuration options)",
- "addr-fetch (short-lived automatic connection for soliciting addresses)",
- "feeler (short-lived automatic connection for testing addresses)"};
-
/** Different types of connections to a peer. This enum encapsulates the
* information we have available at the time of opening or accepting the
- * connection. Aside from INBOUND, all types are initiated by us. */
+ * connection. Aside from INBOUND, all types are initiated by us.
+ *
+ * If adding or removing types, please update CONNECTION_TYPE_DOC in
+ * src/rpc/net.cpp. */
enum class ConnectionType {
/**
* Inbound connections are those initiated by a peer. This is the only
@@ -173,7 +168,9 @@ enum class ConnectionType {
* attacks. By not relaying transactions or addresses, these connections
* are harder to detect by a third party, thus helping obfuscate the
* network topology. We automatically attempt to open
- * MAX_BLOCK_RELAY_ONLY_CONNECTIONS using addresses from our AddrMan.
+ * MAX_BLOCK_RELAY_ONLY_ANCHORS using addresses from our anchors.dat. Then
+ * addresses from our AddrMan if MAX_BLOCK_RELAY_ONLY_CONNECTIONS
+ * isn't reached yet.
*/
BLOCK_RELAY,
@@ -460,6 +457,11 @@ private:
void RecordBytesRecv(uint64_t bytes);
void RecordBytesSent(uint64_t bytes);
+ /**
+ * Return vector of current BLOCK_RELAY peers.
+ */
+ std::vector<CAddress> GetCurrentBlockRelayOnlyConns() const;
+
// Whether the node should be passed out in ForEach* callbacks
static bool NodeFullyConnected(const CNode* pnode);
@@ -561,6 +563,12 @@ private:
/** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */
BanMan* m_banman;
+ /**
+ * Addresses that were saved during the previous clean shutdown. We'll
+ * attempt to make block-relay-only connections to them.
+ */
+ std::vector<CAddress> m_anchors;
+
/** SipHasher seeds for deterministic randomness */
const uint64_t nSeed0, nSeed1;
@@ -849,7 +857,6 @@ public:
RecursiveMutex cs_sendProcessing;
- std::deque<CInv> vRecvGetData;
uint64_t nRecvBytes GUARDED_BY(cs_vRecv){0};
std::atomic<int64_t> nLastSend{0};
@@ -1043,8 +1050,6 @@ public:
// Whether a ping is requested.
std::atomic<bool> fPingQueued{false};
- std::set<uint256> orphan_work_set;
-
CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress &addrBindIn, const std::string &addrNameIn, ConnectionType conn_type_in, bool inbound_onion = false);
~CNode();
CNode(const CNode&) = delete;
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index f14db379fb..94d4052fa1 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -446,6 +446,14 @@ struct Peer {
/** Whether this peer should be disconnected and marked as discouraged (unless it has the noban permission). */
bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
+ /** Set of txids to reconsider once their parent transactions have been accepted **/
+ std::set<uint256> m_orphan_work_set GUARDED_BY(g_cs_orphans);
+
+ /** Protects m_getdata_requests **/
+ Mutex m_getdata_requests_mutex;
+ /** Work queue of items requested by this peer **/
+ std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
+
Peer(NodeId id) : m_id(id) {}
};
@@ -1654,11 +1662,11 @@ static CTransactionRef FindTxForGetData(const CTxMemPool& mempool, const CNode&
return {};
}
-void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnman& connman, CTxMemPool& mempool, const std::atomic<bool>& interruptMsgProc) LOCKS_EXCLUDED(cs_main)
+void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainparams, CConnman& connman, CTxMemPool& mempool, const std::atomic<bool>& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(!cs_main, peer.m_getdata_requests_mutex)
{
AssertLockNotHeld(cs_main);
- std::deque<CInv>::iterator it = pfrom.vRecvGetData.begin();
+ std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
std::vector<CInv> vNotFound;
const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
@@ -1670,7 +1678,7 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm
// Process as many TX items from the front of the getdata queue as
// possible, since they're common and it's efficient to batch process
// them.
- while (it != pfrom.vRecvGetData.end() && it->IsGenTxMsg()) {
+ while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) {
if (interruptMsgProc) return;
// The send buffer provides backpressure. If there's no space in
// the buffer, pause processing until the next call.
@@ -1718,7 +1726,7 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm
// Only process one BLOCK item per call, since they're uncommon and can be
// expensive to process.
- if (it != pfrom.vRecvGetData.end() && !pfrom.fPauseSend) {
+ if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
const CInv &inv = *it++;
if (inv.IsGenBlkMsg()) {
ProcessGetBlockData(pfrom, chainparams, inv, connman);
@@ -1727,7 +1735,7 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm
// and continue processing the queue on the next call.
}
- pfrom.vRecvGetData.erase(pfrom.vRecvGetData.begin(), it);
+ peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
if (!vNotFound.empty()) {
// Let the peer know that we didn't find what it asked for, so it doesn't
@@ -2270,6 +2278,8 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
return;
}
+ PeerRef peer = GetPeerRef(pfrom.GetId());
+ if (peer == nullptr) return;
if (msg_type == NetMsgType::VERSION) {
// Each connection can only send one version message
@@ -2708,8 +2718,12 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId());
}
- pfrom.vRecvGetData.insert(pfrom.vRecvGetData.end(), vInv.begin(), vInv.end());
- ProcessGetData(pfrom, m_chainparams, m_connman, m_mempool, interruptMsgProc);
+ {
+ LOCK(peer->m_getdata_requests_mutex);
+ peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end());
+ ProcessGetData(pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc);
+ }
+
return;
}
@@ -2797,36 +2811,38 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
return;
}
- LOCK(cs_main);
+ {
+ LOCK(cs_main);
- const CBlockIndex* pindex = LookupBlockIndex(req.blockhash);
- if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) {
- LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId());
- return;
- }
+ const CBlockIndex* pindex = LookupBlockIndex(req.blockhash);
+ if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) {
+ LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId());
+ return;
+ }
- if (pindex->nHeight < ::ChainActive().Height() - MAX_BLOCKTXN_DEPTH) {
- // If an older block is requested (should never happen in practice,
- // but can happen in tests) send a block response instead of a
- // blocktxn response. Sending a full block response instead of a
- // small blocktxn response is preferable in the case where a peer
- // might maliciously send lots of getblocktxn requests to trigger
- // expensive disk reads, because it will require the peer to
- // actually receive all the data read from disk over the network.
- LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
- CInv inv;
- inv.type = State(pfrom.GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK;
- inv.hash = req.blockhash;
- pfrom.vRecvGetData.push_back(inv);
- // The message processing loop will go around again (without pausing) and we'll respond then (without cs_main)
- return;
- }
+ if (pindex->nHeight >= ::ChainActive().Height() - MAX_BLOCKTXN_DEPTH) {
+ CBlock block;
+ bool ret = ReadBlockFromDisk(block, pindex, m_chainparams.GetConsensus());
+ assert(ret);
- CBlock block;
- bool ret = ReadBlockFromDisk(block, pindex, m_chainparams.GetConsensus());
- assert(ret);
+ SendBlockTransactions(pfrom, block, req);
+ return;
+ }
+ }
- SendBlockTransactions(pfrom, block, req);
+ // If an older block is requested (should never happen in practice,
+ // but can happen in tests) send a block response instead of a
+ // blocktxn response. Sending a full block response instead of a
+ // small blocktxn response is preferable in the case where a peer
+ // might maliciously send lots of getblocktxn requests to trigger
+ // expensive disk reads, because it will require the peer to
+ // actually receive all the data read from disk over the network.
+ LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
+ CInv inv;
+ WITH_LOCK(cs_main, inv.type = State(pfrom.GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK);
+ inv.hash = req.blockhash;
+ WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv));
+ // The message processing loop will go around again (without pausing) and we'll respond then
return;
}
@@ -2961,7 +2977,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(txid, i));
if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
for (const auto& elem : it_by_prev->second) {
- pfrom.orphan_work_set.insert(elem->first);
+ peer->m_orphan_work_set.insert(elem->first);
}
}
}
@@ -2978,7 +2994,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
}
// Recursively process any orphan transactions that depended on this one
- ProcessOrphanTx(pfrom.orphan_work_set);
+ ProcessOrphanTx(peer->m_orphan_work_set);
}
else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS)
{
@@ -3773,21 +3789,37 @@ bool PeerManager::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgP
{
bool fMoreWork = false;
- if (!pfrom->vRecvGetData.empty())
- ProcessGetData(*pfrom, m_chainparams, m_connman, m_mempool, interruptMsgProc);
+ PeerRef peer = GetPeerRef(pfrom->GetId());
+ if (peer == nullptr) return false;
- if (!pfrom->orphan_work_set.empty()) {
+ {
+ LOCK(peer->m_getdata_requests_mutex);
+ if (!peer->m_getdata_requests.empty()) {
+ ProcessGetData(*pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc);
+ }
+ }
+
+ {
LOCK2(cs_main, g_cs_orphans);
- ProcessOrphanTx(pfrom->orphan_work_set);
+ if (!peer->m_orphan_work_set.empty()) {
+ ProcessOrphanTx(peer->m_orphan_work_set);
+ }
}
if (pfrom->fDisconnect)
return false;
// this maintains the order of responses
- // and prevents vRecvGetData to grow unbounded
- if (!pfrom->vRecvGetData.empty()) return true;
- if (!pfrom->orphan_work_set.empty()) return true;
+ // and prevents m_getdata_requests to grow unbounded
+ {
+ LOCK(peer->m_getdata_requests_mutex);
+ if (!peer->m_getdata_requests.empty()) return true;
+ }
+
+ {
+ LOCK(g_cs_orphans);
+ if (!peer->m_orphan_work_set.empty()) return true;
+ }
// Don't bother if send buffer is too full to respond anyway
if (pfrom->fPauseSend)
@@ -3814,10 +3846,11 @@ bool PeerManager::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgP
try {
ProcessMessage(*pfrom, msg_type, msg.m_recv, msg.m_time, interruptMsgProc);
- if (interruptMsgProc)
- return false;
- if (!pfrom->vRecvGetData.empty())
- fMoreWork = true;
+ if (interruptMsgProc) return false;
+ {
+ LOCK(peer->m_getdata_requests_mutex);
+ if (!peer->m_getdata_requests.empty()) fMoreWork = true;
+ }
} catch (const std::exception& e) {
LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg_type), nMessageSize, e.what(), typeid(e).name());
} catch (...) {
diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp
index 97fb88d71c..50a1ea6936 100644
--- a/src/qt/sendcoinsdialog.cpp
+++ b/src/qt/sendcoinsdialog.cpp
@@ -28,6 +28,8 @@
#include <wallet/fees.h>
#include <wallet/wallet.h>
+#include <validation.h>
+
#include <QFontMetrics>
#include <QScrollBar>
#include <QSettings>
@@ -134,7 +136,7 @@ void SendCoinsDialog::setClientModel(ClientModel *_clientModel)
this->clientModel = _clientModel;
if (_clientModel) {
- connect(_clientModel, &ClientModel::numBlocksChanged, this, &SendCoinsDialog::updateSmartFeeLabel);
+ connect(_clientModel, &ClientModel::numBlocksChanged, this, &SendCoinsDialog::updateNumberOfBlocks);
}
}
@@ -744,6 +746,12 @@ void SendCoinsDialog::updateCoinControlState(CCoinControl& ctrl)
ctrl.fAllowWatchOnly = model->wallet().privateKeysDisabled();
}
+void SendCoinsDialog::updateNumberOfBlocks(int count, const QDateTime& blockDate, double nVerificationProgress, bool headers, SynchronizationState sync_state) {
+ if (sync_state == SynchronizationState::POST_INIT) {
+ updateSmartFeeLabel();
+ }
+}
+
void SendCoinsDialog::updateSmartFeeLabel()
{
if(!model || !model->getOptionsModel())
diff --git a/src/qt/sendcoinsdialog.h b/src/qt/sendcoinsdialog.h
index 6961aa7821..8519f1f65b 100644
--- a/src/qt/sendcoinsdialog.h
+++ b/src/qt/sendcoinsdialog.h
@@ -17,6 +17,7 @@ class ClientModel;
class PlatformStyle;
class SendCoinsEntry;
class SendCoinsRecipient;
+enum class SynchronizationState;
namespace Ui {
class SendCoinsDialog;
@@ -98,6 +99,7 @@ private Q_SLOTS:
void coinControlClipboardLowOutput();
void coinControlClipboardChange();
void updateFeeSectionControls();
+ void updateNumberOfBlocks(int count, const QDateTime& blockDate, double nVerificationProgress, bool headers, SynchronizationState sync_state);
void updateSmartFeeLabel();
Q_SIGNALS:
diff --git a/src/randomenv.cpp b/src/randomenv.cpp
index 073d82b491..07122b7f6d 100644
--- a/src/randomenv.cpp
+++ b/src/randomenv.cpp
@@ -67,7 +67,8 @@ void RandAddSeedPerfmon(CSHA512& hasher)
#ifdef WIN32
// Seed with the entire set of perfmon data
- // This can take up to 2 seconds, so only do it every 10 minutes
+ // This can take up to 2 seconds, so only do it every 10 minutes.
+ // Initialize last_perfmon to 0 seconds, we don't skip the first call.
static std::atomic<std::chrono::seconds> last_perfmon{std::chrono::seconds{0}};
auto last_time = last_perfmon.load();
auto current_time = GetTime<std::chrono::seconds>();
@@ -83,7 +84,7 @@ void RandAddSeedPerfmon(CSHA512& hasher)
ret = RegQueryValueExA(HKEY_PERFORMANCE_DATA, "Global", nullptr, nullptr, vData.data(), &nSize);
if (ret != ERROR_MORE_DATA || vData.size() >= nMaxSize)
break;
- vData.resize(std::max((vData.size() * 3) / 2, nMaxSize)); // Grow size of buffer exponentially
+ vData.resize(std::min((vData.size() * 3) / 2, nMaxSize)); // Grow size of buffer exponentially
}
RegCloseKey(HKEY_PERFORMANCE_DATA);
if (ret == ERROR_SUCCESS) {
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index b04e106b2d..a561b7e93c 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -352,7 +352,7 @@ static RPCHelpMan generateblock()
txs.push_back(MakeTransactionRef(std::move(mtx)));
} else {
- throw JSONRPCError(RPC_DESERIALIZATION_ERROR, strprintf("Transaction decode failed for %s", str));
+ throw JSONRPCError(RPC_DESERIALIZATION_ERROR, strprintf("Transaction decode failed for %s. Make sure the tx has at least one input.", str));
}
}
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index 43c525b6a0..b81e6414a5 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -29,6 +29,15 @@
#include <univalue.h>
+const std::vector<std::string> CONNECTION_TYPE_DOC{
+ "outbound-full-relay (default automatic connections)",
+ "block-relay-only (does not relay transactions or addresses)",
+ "inbound (initiated by the peer)",
+ "manual (added via addnode RPC or -addnode/-connect configuration options)",
+ "addr-fetch (short-lived automatic connection for soliciting addresses)",
+ "feeler (short-lived automatic connection for testing addresses)"
+};
+
static RPCHelpMan getconnectioncount()
{
return RPCHelpMan{"getconnectioncount",
@@ -119,7 +128,9 @@ static RPCHelpMan getpeerinfo()
{RPCResult::Type::BOOL, "inbound", "Inbound (true) or Outbound (false)"},
{RPCResult::Type::BOOL, "addnode", "Whether connection was due to addnode/-connect or if it was an automatic/inbound connection\n"
"(DEPRECATED, returned only if the config option -deprecatedrpc=getpeerinfo_addnode is passed)"},
- {RPCResult::Type::STR, "connection_type", "Type of connection: \n" + Join(CONNECTION_TYPE_DOC, ",\n") + "."},
+ {RPCResult::Type::STR, "connection_type", "Type of connection: \n" + Join(CONNECTION_TYPE_DOC, ",\n") + ".\n"
+ "Please note this output is unlikely to be stable in upcoming releases as we iterate to\n"
+ "best capture connection behaviors."},
{RPCResult::Type::NUM, "startingheight", "The starting height (block) of the peer"},
{RPCResult::Type::NUM, "banscore", "The ban score (DEPRECATED, returned only if config option -deprecatedrpc=banscore is passed)"},
{RPCResult::Type::NUM, "synced_headers", "The last header we have in common with this peer"},
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 7a6b605ec3..c6d7fea443 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -656,8 +656,8 @@ static RPCHelpMan combinerawtransaction()
std::vector<CMutableTransaction> txVariants(txs.size());
for (unsigned int idx = 0; idx < txs.size(); idx++) {
- if (!DecodeHexTx(txVariants[idx], txs[idx].get_str(), true)) {
- throw JSONRPCError(RPC_DESERIALIZATION_ERROR, strprintf("TX decode failed for tx %d", idx));
+ if (!DecodeHexTx(txVariants[idx], txs[idx].get_str())) {
+ throw JSONRPCError(RPC_DESERIALIZATION_ERROR, strprintf("TX decode failed for tx %d. Make sure the tx has at least one input.", idx));
}
}
@@ -780,8 +780,8 @@ static RPCHelpMan signrawtransactionwithkey()
RPCTypeCheck(request.params, {UniValue::VSTR, UniValue::VARR, UniValue::VARR, UniValue::VSTR}, true);
CMutableTransaction mtx;
- if (!DecodeHexTx(mtx, request.params[0].get_str(), true)) {
- throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
+ if (!DecodeHexTx(mtx, request.params[0].get_str())) {
+ throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed. Make sure the tx has at least one input.");
}
FillableSigningProvider keystore;
@@ -847,10 +847,10 @@ static RPCHelpMan sendrawtransaction()
UniValueType(), // VNUM or VSTR, checked inside AmountFromValue()
});
- // parse hex string from parameter
CMutableTransaction mtx;
- if (!DecodeHexTx(mtx, request.params[0].get_str()))
- throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
+ if (!DecodeHexTx(mtx, request.params[0].get_str())) {
+ throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed. Make sure the tx has at least one input.");
+ }
CTransactionRef tx(MakeTransactionRef(std::move(mtx)));
const CFeeRate max_raw_tx_fee_rate = request.params[1].isNull() ?
@@ -928,7 +928,7 @@ static RPCHelpMan testmempoolaccept()
CMutableTransaction mtx;
if (!DecodeHexTx(mtx, request.params[0].get_array()[0].get_str())) {
- throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
+ throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed. Make sure the tx has at least one input.");
}
CTransactionRef tx(MakeTransactionRef(std::move(mtx)));
const uint256& tx_hash = tx->GetHash();
diff --git a/src/script/sigcache.cpp b/src/script/sigcache.cpp
index 4a6e04f2eb..c1786140de 100644
--- a/src/script/sigcache.cpp
+++ b/src/script/sigcache.cpp
@@ -46,14 +46,14 @@ public:
}
void
- ComputeEntryECDSA(uint256& entry, const uint256 &hash, const std::vector<unsigned char>& vchSig, const CPubKey& pubkey)
+ ComputeEntryECDSA(uint256& entry, const uint256 &hash, const std::vector<unsigned char>& vchSig, const CPubKey& pubkey) const
{
CSHA256 hasher = m_salted_hasher_ecdsa;
hasher.Write(hash.begin(), 32).Write(&pubkey[0], pubkey.size()).Write(&vchSig[0], vchSig.size()).Finalize(entry.begin());
}
void
- ComputeEntrySchnorr(uint256& entry, const uint256 &hash, Span<const unsigned char> sig, const XOnlyPubKey& pubkey)
+ ComputeEntrySchnorr(uint256& entry, const uint256 &hash, Span<const unsigned char> sig, const XOnlyPubKey& pubkey) const
{
CSHA256 hasher = m_salted_hasher_schnorr;
hasher.Write(hash.begin(), 32).Write(&pubkey[0], pubkey.size()).Write(sig.data(), sig.size()).Finalize(entry.begin());
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index a2efd8ac07..7c53bd0002 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -1347,36 +1347,6 @@ static CScript ScriptFromHex(const std::string& str)
return CScript(data.begin(), data.end());
}
-static CMutableTransaction TxFromHex(const std::string& str)
-{
- CMutableTransaction tx;
- VectorReader(SER_DISK, SERIALIZE_TRANSACTION_NO_WITNESS, ParseHex(str), 0) >> tx;
- return tx;
-}
-
-static std::vector<CTxOut> TxOutsFromJSON(const UniValue& univalue)
-{
- assert(univalue.isArray());
- std::vector<CTxOut> prevouts;
- for (size_t i = 0; i < univalue.size(); ++i) {
- CTxOut txout;
- VectorReader(SER_DISK, 0, ParseHex(univalue[i].get_str()), 0) >> txout;
- prevouts.push_back(std::move(txout));
- }
- return prevouts;
-}
-
-static CScriptWitness ScriptWitnessFromJSON(const UniValue& univalue)
-{
- assert(univalue.isArray());
- CScriptWitness scriptwitness;
- for (size_t i = 0; i < univalue.size(); ++i) {
- auto bytes = ParseHex(univalue[i].get_str());
- scriptwitness.stack.push_back(std::move(bytes));
- }
- return scriptwitness;
-}
-
BOOST_AUTO_TEST_CASE(script_FindAndDelete)
{
// Exercise the FindAndDelete functionality
@@ -1502,6 +1472,36 @@ BOOST_AUTO_TEST_CASE(script_HasValidOps)
#if defined(HAVE_CONSENSUS_LIB)
+static CMutableTransaction TxFromHex(const std::string& str)
+{
+ CMutableTransaction tx;
+ VectorReader(SER_DISK, SERIALIZE_TRANSACTION_NO_WITNESS, ParseHex(str), 0) >> tx;
+ return tx;
+}
+
+static std::vector<CTxOut> TxOutsFromJSON(const UniValue& univalue)
+{
+ assert(univalue.isArray());
+ std::vector<CTxOut> prevouts;
+ for (size_t i = 0; i < univalue.size(); ++i) {
+ CTxOut txout;
+ VectorReader(SER_DISK, 0, ParseHex(univalue[i].get_str()), 0) >> txout;
+ prevouts.push_back(std::move(txout));
+ }
+ return prevouts;
+}
+
+static CScriptWitness ScriptWitnessFromJSON(const UniValue& univalue)
+{
+ assert(univalue.isArray());
+ CScriptWitness scriptwitness;
+ for (size_t i = 0; i < univalue.size(); ++i) {
+ auto bytes = ParseHex(univalue[i].get_str());
+ scriptwitness.stack.push_back(std::move(bytes));
+ }
+ return scriptwitness;
+}
+
/* Test simple (successful) usage of bitcoinconsensus_verify_script */
BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_returns_true)
{
diff --git a/src/util/system.cpp b/src/util/system.cpp
index a411b73a16..9f8035948b 100644
--- a/src/util/system.cpp
+++ b/src/util/system.cpp
@@ -427,6 +427,14 @@ bool ArgsManager::ReadSettingsFile(std::vector<std::string>* errors)
SaveErrors(read_errors, errors);
return false;
}
+ for (const auto& setting : m_settings.rw_settings) {
+ std::string section;
+ std::string key = setting.first;
+ (void)InterpretOption(section, key, /* value */ {}); // Split setting key into section and argname
+ if (!GetArgFlags('-' + key)) {
+ LogPrintf("Ignoring unknown rw_settings value %s\n", setting.first);
+ }
+ }
return true;
}
diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp
index 7dcab46ad3..884ab58497 100644
--- a/src/wallet/rpcdump.cpp
+++ b/src/wallet/rpcdump.cpp
@@ -340,8 +340,9 @@ RPCHelpMan importprunedfunds()
CWallet* const pwallet = wallet.get();
CMutableTransaction tx;
- if (!DecodeHexTx(tx, request.params[0].get_str()))
- throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
+ if (!DecodeHexTx(tx, request.params[0].get_str())) {
+ throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed. Make sure the tx has at least one input.");
+ }
uint256 hashTx = tx.GetHash();
CDataStream ssMB(ParseHexV(request.params[1], "proof"), SER_NETWORK, PROTOCOL_VERSION);
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 23291e3a48..2295fb0ef1 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -3331,8 +3331,8 @@ RPCHelpMan signrawtransactionwithwallet()
RPCTypeCheck(request.params, {UniValue::VSTR, UniValue::VARR, UniValue::VSTR}, true);
CMutableTransaction mtx;
- if (!DecodeHexTx(mtx, request.params[0].get_str(), true)) {
- throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
+ if (!DecodeHexTx(mtx, request.params[0].get_str())) {
+ throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed. Make sure the tx has at least one input.");
}
// Sign the transaction
diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp
index b7c70dac3a..188289b010 100644
--- a/src/wallet/scriptpubkeyman.cpp
+++ b/src/wallet/scriptpubkeyman.cpp
@@ -453,7 +453,7 @@ bool LegacyScriptPubKeyMan::Upgrade(int prev_version, bilingual_str& error)
hd_upgrade = true;
}
// Upgrade to HD chain split if necessary
- if (m_storage.CanSupportFeature(FEATURE_HD_SPLIT) && CHDChain::VERSION_HD_CHAIN_SPLIT) {
+ if (m_storage.CanSupportFeature(FEATURE_HD_SPLIT)) {
WalletLogPrintf("Upgrading wallet to use HD chain split\n");
m_storage.SetMinVersion(FEATURE_PRE_SPLIT_KEYPOOL);
split_upgrade = FEATURE_HD_SPLIT > prev_version;
diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py
index 1253c45418..60492350ee 100755
--- a/test/functional/feature_bip68_sequence.py
+++ b/test/functional/feature_bip68_sequence.py
@@ -6,7 +6,7 @@
import time
-from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
+from test_framework.blocktools import create_block, NORMAL_GBT_REQUEST_PARAMS, add_witness_commitment
from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, ToHex
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
@@ -275,6 +275,8 @@ class BIP68Test(BitcoinTestFramework):
# Advance the time on the node so that we can test timelocks
self.nodes[0].setmocktime(cur_time+600)
+ # Save block template now to use for the reorg later
+ tmpl = self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
self.nodes[0].generate(1)
assert tx2.hash not in self.nodes[0].getrawmempool()
@@ -318,16 +320,15 @@ class BIP68Test(BitcoinTestFramework):
# diagram above).
# This would cause tx2 to be added back to the mempool, which in turn causes
# tx3 to be removed.
- tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16)
- height = self.nodes[0].getblockcount()
for i in range(2):
- block = create_block(tip, create_coinbase(height), cur_time)
- block.nVersion = 3
+ block = create_block(tmpl=tmpl, ntime=cur_time)
block.rehash()
block.solve()
tip = block.sha256
- height += 1
assert_equal(None if i == 1 else 'inconclusive', self.nodes[0].submitblock(ToHex(block)))
+ tmpl = self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
+ tmpl['previousblockhash'] = '%x' % tip
+ tmpl['transactions'] = []
cur_time += 1
mempool = self.nodes[0].getrawmempool()
@@ -375,9 +376,7 @@ class BIP68Test(BitcoinTestFramework):
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3))
# make a block that violates bip68; ensure that the tip updates
- tip = int(self.nodes[0].getbestblockhash(), 16)
- block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1))
- block.nVersion = 3
+ block = create_block(tmpl=self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS))
block.vtx.extend([tx1, tx2, tx3])
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
diff --git a/test/functional/feature_nulldummy.py b/test/functional/feature_nulldummy.py
index ff55cb76d9..d1196a4bbd 100755
--- a/test/functional/feature_nulldummy.py
+++ b/test/functional/feature_nulldummy.py
@@ -14,7 +14,7 @@ Generate 427 more blocks.
"""
import time
-from test_framework.blocktools import create_coinbase, create_block, create_transaction, add_witness_commitment
+from test_framework.blocktools import NORMAL_GBT_REQUEST_PARAMS, create_block, create_transaction, add_witness_commitment
from test_framework.messages import CTransaction
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
@@ -37,14 +37,15 @@ def trueDummy(tx):
class NULLDUMMYTest(BitcoinTestFramework):
def set_test_params(self):
- self.num_nodes = 1
+ # Need two nodes only so GBT doesn't complain that it's not connected
+ self.num_nodes = 2
self.setup_clean_chain = True
# This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
# normal segwit activation here (and don't use the default always-on behaviour).
self.extra_args = [[
'-segwitheight=432',
'-addresstype=legacy',
- ]]
+ ]] * 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
@@ -61,7 +62,6 @@ class NULLDUMMYTest(BitcoinTestFramework):
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(427) # Block 429
self.lastblockhash = self.nodes[0].getbestblockhash()
- self.tip = int("0x" + self.lastblockhash, 0)
self.lastblockheight = 429
self.lastblocktime = int(time.time()) + 429
@@ -102,8 +102,10 @@ class NULLDUMMYTest(BitcoinTestFramework):
self.block_submit(self.nodes[0], test6txs, True, True)
def block_submit(self, node, txs, witness=False, accept=False):
- block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1)
- block.nVersion = 4
+ tmpl = node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
+ assert_equal(tmpl['previousblockhash'], self.lastblockhash)
+ assert_equal(tmpl['height'], self.lastblockheight + 1)
+ block = create_block(tmpl=tmpl, ntime=self.lastblocktime + 1)
for tx in txs:
tx.rehash()
block.vtx.append(tx)
@@ -114,7 +116,6 @@ class NULLDUMMYTest(BitcoinTestFramework):
assert_equal(None if accept else 'block-validation-failed', node.submitblock(block.serialize().hex()))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
- self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py
index 0842972779..120e4c2001 100755
--- a/test/functional/feature_segwit.py
+++ b/test/functional/feature_segwit.py
@@ -61,14 +61,12 @@ class SegWitTest(BitcoinTestFramework):
],
[
"-acceptnonstdtxn=1",
- "-blockversion=4",
"-rpcserialversion=1",
"-segwitheight=432",
"-addresstype=legacy",
],
[
"-acceptnonstdtxn=1",
- "-blockversion=536870915",
"-segwitheight=432",
"-addresstype=legacy",
],
diff --git a/test/functional/feature_settings.py b/test/functional/feature_settings.py
index 3705caf4ff..5a0236401d 100755
--- a/test/functional/feature_settings.py
+++ b/test/functional/feature_settings.py
@@ -31,19 +31,25 @@ class SettingsTest(BitcoinTestFramework):
# Assert settings are parsed and logged
with settings.open("w") as fp:
- json.dump({"string": "string", "num": 5, "bool": True, "null": None, "list": [6,7]}, fp)
+ json.dump({"string": "string", "num": 5, "bool": True, "null": None, "list": [6, 7]}, fp)
with node.assert_debug_log(expected_msgs=[
+ 'Ignoring unknown rw_settings value bool',
+ 'Ignoring unknown rw_settings value list',
+ 'Ignoring unknown rw_settings value null',
+ 'Ignoring unknown rw_settings value num',
+ 'Ignoring unknown rw_settings value string',
'Setting file arg: string = "string"',
'Setting file arg: num = 5',
'Setting file arg: bool = true',
'Setting file arg: null = null',
- 'Setting file arg: list = [6,7]']):
+ 'Setting file arg: list = [6,7]',
+ ]):
self.start_node(0)
self.stop_node(0)
# Assert settings are unchanged after shutdown
with settings.open() as fp:
- assert_equal(json.load(fp), {"string": "string", "num": 5, "bool": True, "null": None, "list": [6,7]})
+ assert_equal(json.load(fp), {"string": "string", "num": 5, "bool": True, "null": None, "list": [6, 7]})
# Test invalid json
with settings.open("w") as fp:
diff --git a/test/functional/mining_getblocktemplate_longpoll.py b/test/functional/mining_getblocktemplate_longpoll.py
index 6d0b241e57..2adafb1fdb 100755
--- a/test/functional/mining_getblocktemplate_longpoll.py
+++ b/test/functional/mining_getblocktemplate_longpoll.py
@@ -5,11 +5,13 @@
"""Test longpolling with getblocktemplate."""
from decimal import Decimal
+import random
+import threading
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import get_rpc_proxy, random_transaction
+from test_framework.util import get_rpc_proxy
+from test_framework.wallet import MiniWallet
-import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
@@ -29,45 +31,48 @@ class GetBlockTemplateLPTest(BitcoinTestFramework):
self.num_nodes = 2
self.supports_cli = False
- def skip_test_if_missing_module(self):
- self.skip_if_no_wallet()
-
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
+ self.log.info("Test that longpollid doesn't change between successive getblocktemplate() invocations if nothing else happens")
self.nodes[0].generate(10)
template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
longpollid = template['longpollid']
- # longpollid should not change between successive invocations if nothing else happens
template2 = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert template2['longpollid'] == longpollid
- # Test 1: test that the longpolling wait if we do nothing
+ self.log.info("Test that longpoll waits if we do nothing")
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert thr.is_alive()
- # Test 2: test that longpoll will terminate if another node generates a block
- self.nodes[1].generate(1) # generate a block on another node
+ miniwallets = [ MiniWallet(node) for node in self.nodes ]
+ self.log.info("Test that longpoll will terminate if another node generates a block")
+ miniwallets[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert not thr.is_alive()
- # Test 3: test that longpoll will terminate if we generate a block ourselves
+ self.log.info("Test that longpoll will terminate if we generate a block ourselves")
thr = LongpollThread(self.nodes[0])
thr.start()
- self.nodes[0].generate(1) # generate a block on another node
+ miniwallets[0].generate(1) # generate a block on own node
thr.join(5) # wait 5 seconds or until thread exits
assert not thr.is_alive()
- # Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
+ # Add enough mature utxos to the wallets, so that all txs spend confirmed coins
+ self.nodes[0].generate(100)
+ self.sync_blocks()
+
+ self.log.info("Test that introducing a new transaction into the mempool will terminate the longpoll")
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
- # min_relay_fee is fee per 1000 bytes, which should be more than enough.
- (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
+ fee_rate = min_relay_fee + Decimal('0.00000010') * random.randint(0,20)
+ miniwallets[0].send_self_transfer(from_node=random.choice(self.nodes),
+ fee_rate=fee_rate)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert not thr.is_alive()
diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py
index 506b480039..611bffb25f 100755
--- a/test/functional/p2p_compactblocks.py
+++ b/test/functional/p2p_compactblocks.py
@@ -9,7 +9,7 @@ Version 2 compact blocks are post-segwit (wtxids)
"""
import random
-from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
+from test_framework.blocktools import create_block, NORMAL_GBT_REQUEST_PARAMS, add_witness_commitment
from test_framework.messages import BlockTransactions, BlockTransactionsRequest, calculate_shortid, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, FromHex, HeaderAndShortIDs, msg_no_witness_block, msg_no_witness_blocktxn, msg_cmpctblock, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendcmpct, msg_sendheaders, msg_tx, msg_block, msg_blocktxn, MSG_BLOCK, MSG_CMPCT_BLOCK, MSG_WITNESS_FLAG, NODE_NETWORK, P2PHeaderAndShortIDs, PrefilledTransaction, ser_uint256, ToHex
from test_framework.p2p import p2p_lock, P2PInterface
from test_framework.script import CScript, OP_TRUE, OP_DROP
@@ -104,11 +104,7 @@ class CompactBlocksTest(BitcoinTestFramework):
self.skip_if_no_wallet()
def build_block_on_tip(self, node, segwit=False):
- height = node.getblockcount()
- tip = node.getbestblockhash()
- mtp = node.getblockheader(tip)['mediantime']
- block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
- block.nVersion = 4
+ block = create_block(tmpl=node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS))
if segwit:
add_witness_commitment(block)
block.solve()
@@ -769,6 +765,9 @@ class CompactBlocksTest(BitcoinTestFramework):
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
+ # Get the nodes out of IBD
+ self.nodes[0].generate(1)
+
# Setup the p2p connections
self.segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=2))
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=1), services=NODE_NETWORK)
diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py
index 4be8b7d80b..0859380d06 100644
--- a/test/functional/test_framework/blocktools.py
+++ b/test/functional/test_framework/blocktools.py
@@ -4,6 +4,10 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
+from binascii import a2b_hex
+import io
+import struct
+import time
import unittest
from .address import (
@@ -53,19 +57,31 @@ TIME_GENESIS_BLOCK = 1296688602
# From BIP141
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
+NORMAL_GBT_REQUEST_PARAMS = {"rules": ["segwit"]}
-def create_block(hashprev, coinbase, ntime=None, *, version=1):
+
+def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl=None, txlist=None):
"""Create a block (with regtest difficulty)."""
block = CBlock()
- block.nVersion = version
- if ntime is None:
- import time
- block.nTime = int(time.time() + 600)
+ if tmpl is None:
+ tmpl = {}
+ block.nVersion = version or tmpl.get('version') or 1
+ block.nTime = ntime or tmpl.get('curtime') or int(time.time() + 600)
+ block.hashPrevBlock = hashprev or int(tmpl['previousblockhash'], 0x10)
+ if tmpl and not tmpl.get('bits') is None:
+ block.nBits = struct.unpack('>I', a2b_hex(tmpl['bits']))[0]
else:
- block.nTime = ntime
- block.hashPrevBlock = hashprev
- block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams
+ block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams
+ if coinbase is None:
+ coinbase = create_coinbase(height=tmpl['height'])
block.vtx.append(coinbase)
+ if txlist:
+ for tx in txlist:
+ if not hasattr(tx, 'calc_sha256'):
+ txo = CTransaction()
+ txo.deserialize(io.BytesIO(tx))
+ tx = txo
+ block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py
index 17b869e542..a6bc187985 100644
--- a/test/functional/test_framework/key.py
+++ b/test/functional/test_framework/key.py
@@ -240,8 +240,8 @@ class ECPubKey():
x = int.from_bytes(data[1:33], 'big')
if SECP256K1.is_x_coord(x):
p = SECP256K1.lift_x(x)
- # if the oddness of the y co-ord isn't correct, find the other
- # valid y
+ # Make the Y coordinate odd if required (lift_x always produces
+ # a point with an even Y coordinate).
if data[0] & 1:
p = SECP256K1.negate(p)
self.p = p
@@ -542,7 +542,7 @@ class TestFrameworkKey(unittest.TestCase):
sig_actual = sign_schnorr(seckey, msg, aux_rand)
self.assertEqual(sig.hex(), sig_actual.hex(), "BIP340 test vector %i (%s): sig mismatch" % (i, comment))
except RuntimeError as e:
- self.assertFalse("BIP340 test vector %i (%s): signing raised exception %s" % (i, comment, e))
+ self.fail("BIP340 test vector %i (%s): signing raised exception %s" % (i, comment, e))
result_actual = verify_schnorr(pubkey, sig, msg)
if result:
self.assertEqual(result, result_actual, "BIP340 test vector %i (%s): verification failed" % (i, comment))
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index af7f0b62f4..7688febae7 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -12,7 +12,6 @@ import inspect
import json
import logging
import os
-import random
import re
import time
import unittest
@@ -469,62 +468,6 @@ def find_output(node, txid, amount, *, blockhash=None):
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
-def gather_inputs(from_node, amount_needed, confirmations_required=1):
- """
- Return a random set of unspent txouts that are enough to pay amount_needed
- """
- assert confirmations_required >= 0
- utxo = from_node.listunspent(confirmations_required)
- random.shuffle(utxo)
- inputs = []
- total_in = Decimal("0.00000000")
- while total_in < amount_needed and len(utxo) > 0:
- t = utxo.pop()
- total_in += t["amount"]
- inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
- if total_in < amount_needed:
- raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
- return (total_in, inputs)
-
-
-def make_change(from_node, amount_in, amount_out, fee):
- """
- Create change output(s), return them
- """
- outputs = {}
- amount = amount_out + fee
- change = amount_in - amount
- if change > amount * 2:
- # Create an extra change output to break up big inputs
- change_address = from_node.getnewaddress()
- # Split change in two, being careful of rounding:
- outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
- change = amount_in - amount - outputs[change_address]
- if change > 0:
- outputs[from_node.getnewaddress()] = change
- return outputs
-
-
-def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
- """
- Create a random transaction.
- Returns (txid, hex-encoded-transaction-data, fee)
- """
- from_node = random.choice(nodes)
- to_node = random.choice(nodes)
- fee = min_fee + fee_increment * random.randint(0, fee_variants)
-
- (total_in, inputs) = gather_inputs(from_node, amount + fee)
- outputs = make_change(from_node, total_in, amount, fee)
- outputs[to_node.getnewaddress()] = float(amount)
-
- rawtx = from_node.createrawtransaction(inputs, outputs)
- signresult = from_node.signrawtransactionwithwallet(rawtx)
- txid = from_node.sendrawtransaction(signresult["hex"], 0)
-
- return (txid, signresult["hex"], fee)
-
-
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
diff --git a/test/functional/wallet_import_rescan.py b/test/functional/wallet_import_rescan.py
index 9d532742ee..f491a69d99 100755
--- a/test/functional/wallet_import_rescan.py
+++ b/test/functional/wallet_import_rescan.py
@@ -182,6 +182,7 @@ class ImportRescanTest(BitcoinTestFramework):
self.nodes[0].generate(1) # Generate one block for each send
variant.confirmation_height = self.nodes[0].getblockcount()
variant.timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
+ self.sync_all() # Conclude sync before calling setmocktime to avoid timeouts
# Generate a block further in the future (past the rescan window).
assert_equal(self.nodes[0].getrawmempool(), [])
diff --git a/test/functional/wallet_send.py b/test/functional/wallet_send.py
index 876eb7f29e..60ab2d457e 100755
--- a/test/functional/wallet_send.py
+++ b/test/functional/wallet_send.py
@@ -316,6 +316,7 @@ class WalletSendTest(BitcoinTestFramework):
res = self.nodes[0].sendrawtransaction(hex)
self.nodes[0].generate(1)
assert_equal(self.nodes[0].gettransaction(txid)["confirmations"], 1)
+ self.sync_all()
self.log.info("Lock unspents...")
utxo1 = w0.listunspent()[0]