aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Makefile.am5
-rw-r--r--src/Makefile.qt.include3
-rw-r--r--src/addrman.cpp2
-rw-r--r--src/addrman.h12
-rw-r--r--src/bitcoin-tx.cpp6
-rw-r--r--src/blockencodings.cpp9
-rw-r--r--src/core_write.cpp4
-rw-r--r--src/dbwrapper.cpp3
-rw-r--r--src/httprpc.cpp6
-rw-r--r--src/httpserver.cpp37
-rw-r--r--src/init.cpp78
-rw-r--r--src/miner.cpp53
-rw-r--r--src/miner.h11
-rw-r--r--src/net.cpp46
-rw-r--r--src/net_processing.cpp140
-rw-r--r--src/netbase.cpp8
-rw-r--r--src/policy/fees.cpp26
-rw-r--r--src/qt/bitcoin.cpp14
-rw-r--r--src/qt/callback.h30
-rw-r--r--src/qt/rpcconsole.cpp7
-rw-r--r--src/qt/test/wallettests.cpp9
-rw-r--r--src/qt/transactiondesc.cpp2
-rw-r--r--src/random.cpp2
-rw-r--r--src/rest.cpp10
-rw-r--r--src/rpc/blockchain.cpp15
-rw-r--r--src/rpc/blockchain.h40
-rw-r--r--src/rpc/client.cpp2
-rw-r--r--src/rpc/mining.cpp1
-rw-r--r--src/rpc/misc.cpp1
-rw-r--r--src/rpc/rawtransaction.cpp6
-rw-r--r--src/rpc/server.cpp14
-rw-r--r--src/rpc/server.h4
-rw-r--r--src/scheduler.cpp4
-rw-r--r--src/test/cuckoocache_tests.cpp13
-rw-r--r--src/timedata.cpp17
-rw-r--r--src/torcontrol.cpp37
-rw-r--r--src/txdb.cpp2
-rw-r--r--src/txmempool.cpp9
-rw-r--r--src/txmempool.h8
-rw-r--r--src/util.cpp106
-rw-r--r--src/util.h42
-rw-r--r--src/validation.cpp43
-rw-r--r--src/validation.h2
-rw-r--r--src/validationinterface.h7
-rw-r--r--src/wallet/db.cpp18
-rw-r--r--src/wallet/rpcwallet.cpp36
-rw-r--r--src/wallet/wallet.cpp198
-rw-r--r--src/wallet/wallet.h38
-rw-r--r--src/wallet/walletdb.cpp2
-rw-r--r--src/wallet/walletdb.h8
-rw-r--r--src/zmq/zmqnotificationinterface.cpp12
-rw-r--r--src/zmq/zmqpublishnotifier.cpp15
52 files changed, 786 insertions, 437 deletions
diff --git a/src/Makefile.am b/src/Makefile.am
index e8d22313dc..30d027315a 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -122,6 +122,7 @@ BITCOIN_CORE_H = \
protocol.h \
random.h \
reverselock.h \
+ rpc/blockchain.h \
rpc/client.h \
rpc/protocol.h \
rpc/server.h \
@@ -167,8 +168,8 @@ BITCOIN_CORE_H = \
obj/build.h: FORCE
@$(MKDIR_P) $(builddir)/obj
- @$(top_srcdir)/share/genbuild.sh $(abs_top_builddir)/src/obj/build.h \
- $(abs_top_srcdir)
+ @$(top_srcdir)/share/genbuild.sh "$(abs_top_builddir)/src/obj/build.h" \
+ "$(abs_top_srcdir)"
libbitcoin_util_a-clientversion.$(OBJEXT): obj/build.h
# server: shared between bitcoind and bitcoin-qt
diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include
index edc3c4b292..48411f29ec 100644
--- a/src/Makefile.qt.include
+++ b/src/Makefile.qt.include
@@ -122,6 +122,7 @@ QT_MOC_CPP = \
qt/moc_bitcoinamountfield.cpp \
qt/moc_bitcoingui.cpp \
qt/moc_bitcoinunits.cpp \
+ qt/moc_callback.cpp \
qt/moc_clientmodel.cpp \
qt/moc_coincontroldialog.cpp \
qt/moc_coincontroltreewidget.cpp \
@@ -167,6 +168,7 @@ BITCOIN_MM = \
QT_MOC = \
qt/bitcoin.moc \
qt/bitcoinamountfield.moc \
+ qt/callback.moc \
qt/intro.moc \
qt/overviewpage.moc \
qt/rpcconsole.moc
@@ -189,6 +191,7 @@ BITCOIN_QT_H = \
qt/bitcoinamountfield.h \
qt/bitcoingui.h \
qt/bitcoinunits.h \
+ qt/callback.h \
qt/clientmodel.h \
qt/coincontroldialog.h \
qt/coincontroltreewidget.h \
diff --git a/src/addrman.cpp b/src/addrman.cpp
index b6ab4c6305..ed9c128eb2 100644
--- a/src/addrman.cpp
+++ b/src/addrman.cpp
@@ -233,7 +233,7 @@ void CAddrMan::Good_(const CService& addr, int64_t nTime)
if (nUBucket == -1)
return;
- LogPrint("addrman", "Moving %s to tried\n", addr.ToString());
+ LogPrint(BCLog::ADDRMAN, "Moving %s to tried\n", addr.ToString());
// move nId to the tried tables
MakeTried(info, nId);
diff --git a/src/addrman.h b/src/addrman.h
index 6e5f946bf2..f123b20b1b 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -442,7 +442,7 @@ public:
}
}
if (nLost + nLostUnk > 0) {
- LogPrint("addrman", "addrman lost %i new and %i tried addresses due to collisions\n", nLostUnk, nLost);
+ LogPrint(BCLog::ADDRMAN, "addrman lost %i new and %i tried addresses due to collisions\n", nLostUnk, nLost);
}
Check();
@@ -507,8 +507,9 @@ public:
Check();
fRet |= Add_(addr, source, nTimePenalty);
Check();
- if (fRet)
- LogPrint("addrman", "Added %s from %s: %i tried, %i new\n", addr.ToStringIPPort(), source.ToString(), nTried, nNew);
+ if (fRet) {
+ LogPrint(BCLog::ADDRMAN, "Added %s from %s: %i tried, %i new\n", addr.ToStringIPPort(), source.ToString(), nTried, nNew);
+ }
return fRet;
}
@@ -521,8 +522,9 @@ public:
for (std::vector<CAddress>::const_iterator it = vAddr.begin(); it != vAddr.end(); it++)
nAdd += Add_(*it, source, nTimePenalty) ? 1 : 0;
Check();
- if (nAdd)
- LogPrint("addrman", "Added %i addresses from %s: %i tried, %i new\n", nAdd, source.ToString(), nTried, nNew);
+ if (nAdd) {
+ LogPrint(BCLog::ADDRMAN, "Added %i addresses from %s: %i tried, %i new\n", nAdd, source.ToString(), nTried, nNew);
+ }
return nAdd > 0;
}
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index 61e0eb74e6..83b855cbcf 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -242,6 +242,9 @@ static void MutateTxAddOutAddr(CMutableTransaction& tx, const std::string& strIn
std::vector<std::string> vStrInputParts;
boost::split(vStrInputParts, strInput, boost::is_any_of(":"));
+ if (vStrInputParts.size() != 2)
+ throw std::runtime_error("TX output missing or too many separators");
+
// Extract and validate VALUE
CAmount value = ExtractAndValidateValue(vStrInputParts[0]);
@@ -264,6 +267,9 @@ static void MutateTxAddOutPubKey(CMutableTransaction& tx, const std::string& str
std::vector<std::string> vStrInputParts;
boost::split(vStrInputParts, strInput, boost::is_any_of(":"));
+ if (vStrInputParts.size() < 2 || vStrInputParts.size() > 3)
+ throw std::runtime_error("TX output missing or too many separators");
+
// Extract and validate VALUE
CAmount value = ExtractAndValidateValue(vStrInputParts[0]);
diff --git a/src/blockencodings.cpp b/src/blockencodings.cpp
index 90120b82eb..ee2c654980 100644
--- a/src/blockencodings.cpp
+++ b/src/blockencodings.cpp
@@ -164,7 +164,7 @@ ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& c
break;
}
- LogPrint("cmpctblock", "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of size %lu\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock, SER_NETWORK, PROTOCOL_VERSION));
+ LogPrint(BCLog::CMPCTBLOCK, "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of size %lu\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock, SER_NETWORK, PROTOCOL_VERSION));
return READ_STATUS_OK;
}
@@ -209,10 +209,11 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<
return READ_STATUS_CHECKBLOCK_FAILED;
}
- LogPrint("cmpctblock", "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size());
+ LogPrint(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size());
if (vtx_missing.size() < 5) {
- for (const auto& tx : vtx_missing)
- LogPrint("cmpctblock", "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString());
+ for (const auto& tx : vtx_missing) {
+ LogPrint(BCLog::CMPCTBLOCK, "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString());
+ }
}
return READ_STATUS_OK;
diff --git a/src/core_write.cpp b/src/core_write.cpp
index b0993a131f..a3ca87c8b5 100644
--- a/src/core_write.cpp
+++ b/src/core_write.cpp
@@ -114,9 +114,9 @@ std::string ScriptToAsmStr(const CScript& script, const bool fAttemptSighashDeco
return str;
}
-std::string EncodeHexTx(const CTransaction& tx, const int serialFlags)
+std::string EncodeHexTx(const CTransaction& tx, const int serializeFlags)
{
- CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION | serialFlags);
+ CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION | serializeFlags);
ssTx << tx;
return HexStr(ssTx.begin(), ssTx.end());
}
diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp
index cd441add4b..01fcd07420 100644
--- a/src/dbwrapper.cpp
+++ b/src/dbwrapper.cpp
@@ -21,8 +21,9 @@ public:
// This code is adapted from posix_logger.h, which is why it is using vsprintf.
// Please do not do this in normal code
virtual void Logv(const char * format, va_list ap) override {
- if (!LogAcceptCategory("leveldb"))
+ if (!LogAcceptCategory(BCLog::LEVELDB)) {
return;
+ }
char buffer[500];
for (int iter = 0; iter < 2; iter++) {
char* base;
diff --git a/src/httprpc.cpp b/src/httprpc.cpp
index 8ac6925acd..21c64c5c83 100644
--- a/src/httprpc.cpp
+++ b/src/httprpc.cpp
@@ -233,7 +233,7 @@ static bool InitRPCAuthentication()
bool StartHTTPRPC()
{
- LogPrint("rpc", "Starting HTTP RPC server\n");
+ LogPrint(BCLog::RPC, "Starting HTTP RPC server\n");
if (!InitRPCAuthentication())
return false;
@@ -247,12 +247,12 @@ bool StartHTTPRPC()
void InterruptHTTPRPC()
{
- LogPrint("rpc", "Interrupting HTTP RPC server\n");
+ LogPrint(BCLog::RPC, "Interrupting HTTP RPC server\n");
}
void StopHTTPRPC()
{
- LogPrint("rpc", "Stopping HTTP RPC server\n");
+ LogPrint(BCLog::RPC, "Stopping HTTP RPC server\n");
UnregisterHTTPHandler("/", true);
if (httpRPCTimerInterface) {
RPCUnsetTimerInterface(httpRPCTimerInterface);
diff --git a/src/httpserver.cpp b/src/httpserver.cpp
index 799de1c2cb..347433eb11 100644
--- a/src/httpserver.cpp
+++ b/src/httpserver.cpp
@@ -213,7 +213,7 @@ static bool InitHTTPAllowList()
std::string strAllowed;
for (const CSubNet& subnet : rpc_allow_subnets)
strAllowed += subnet.ToString() + " ";
- LogPrint("http", "Allowing HTTP connections from: %s\n", strAllowed);
+ LogPrint(BCLog::HTTP, "Allowing HTTP connections from: %s\n", strAllowed);
return true;
}
@@ -243,7 +243,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg)
{
std::unique_ptr<HTTPRequest> hreq(new HTTPRequest(req));
- LogPrint("http", "Received a %s request for %s from %s\n",
+ LogPrint(BCLog::HTTP, "Received a %s request for %s from %s\n",
RequestMethodString(hreq->GetRequestMethod()), hreq->GetURI(), hreq->GetPeer().ToString());
// Early address-based allow check
@@ -293,7 +293,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg)
/** Callback to reject HTTP requests after shutdown. */
static void http_reject_request_cb(struct evhttp_request* req, void*)
{
- LogPrint("http", "Rejecting request while shutting down\n");
+ LogPrint(BCLog::HTTP, "Rejecting request while shutting down\n");
evhttp_send_error(req, HTTP_SERVUNAVAIL, NULL);
}
@@ -301,10 +301,10 @@ static void http_reject_request_cb(struct evhttp_request* req, void*)
static bool ThreadHTTP(struct event_base* base, struct evhttp* http)
{
RenameThread("bitcoin-http");
- LogPrint("http", "Entering http event loop\n");
+ LogPrint(BCLog::HTTP, "Entering http event loop\n");
event_base_dispatch(base);
// Event loop will be interrupted by InterruptHTTPServer()
- LogPrint("http", "Exited http event loop\n");
+ LogPrint(BCLog::HTTP, "Exited http event loop\n");
return event_base_got_break(base) == 0;
}
@@ -336,7 +336,7 @@ static bool HTTPBindAddresses(struct evhttp* http)
// Bind addresses
for (std::vector<std::pair<std::string, uint16_t> >::iterator i = endpoints.begin(); i != endpoints.end(); ++i) {
- LogPrint("http", "Binding RPC on address %s port %i\n", i->first, i->second);
+ LogPrint(BCLog::HTTP, "Binding RPC on address %s port %i\n", i->first, i->second);
evhttp_bound_socket *bind_handle = evhttp_bind_socket_with_handle(http, i->first.empty() ? NULL : i->first.c_str(), i->second);
if (bind_handle) {
boundSockets.push_back(bind_handle);
@@ -364,7 +364,7 @@ static void libevent_log_cb(int severity, const char *msg)
if (severity >= EVENT_LOG_WARN) // Log warn messages and higher without debug category
LogPrintf("libevent: %s\n", msg);
else
- LogPrint("libevent", "libevent: %s\n", msg);
+ LogPrint(BCLog::LIBEVENT, "libevent: %s\n", msg);
}
bool InitHTTPServer()
@@ -387,10 +387,11 @@ bool InitHTTPServer()
#if LIBEVENT_VERSION_NUMBER >= 0x02010100
// If -debug=libevent, set full libevent debugging.
// Otherwise, disable all libevent debugging.
- if (LogAcceptCategory("libevent"))
+ if (LogAcceptCategory(BCLog::LIBEVENT)) {
event_enable_debug_logging(EVENT_DBG_ALL);
- else
+ } else {
event_enable_debug_logging(EVENT_DBG_NONE);
+ }
#endif
#ifdef WIN32
evthread_use_windows_threads();
@@ -424,7 +425,7 @@ bool InitHTTPServer()
return false;
}
- LogPrint("http", "Initialized HTTP server\n");
+ LogPrint(BCLog::HTTP, "Initialized HTTP server\n");
int workQueueDepth = std::max((long)GetArg("-rpcworkqueue", DEFAULT_HTTP_WORKQUEUE), 1L);
LogPrintf("HTTP: creating work queue of depth %d\n", workQueueDepth);
@@ -439,7 +440,7 @@ std::future<bool> threadResult;
bool StartHTTPServer()
{
- LogPrint("http", "Starting HTTP server\n");
+ LogPrint(BCLog::HTTP, "Starting HTTP server\n");
int rpcThreads = std::max((long)GetArg("-rpcthreads", DEFAULT_HTTP_THREADS), 1L);
LogPrintf("HTTP: starting %d worker threads\n", rpcThreads);
std::packaged_task<bool(event_base*, evhttp*)> task(ThreadHTTP);
@@ -455,7 +456,7 @@ bool StartHTTPServer()
void InterruptHTTPServer()
{
- LogPrint("http", "Interrupting HTTP server\n");
+ LogPrint(BCLog::HTTP, "Interrupting HTTP server\n");
if (eventHTTP) {
// Unlisten sockets
for (evhttp_bound_socket *socket : boundSockets) {
@@ -470,15 +471,15 @@ void InterruptHTTPServer()
void StopHTTPServer()
{
- LogPrint("http", "Stopping HTTP server\n");
+ LogPrint(BCLog::HTTP, "Stopping HTTP server\n");
if (workQueue) {
- LogPrint("http", "Waiting for HTTP worker threads to exit\n");
+ LogPrint(BCLog::HTTP, "Waiting for HTTP worker threads to exit\n");
workQueue->WaitExit();
delete workQueue;
workQueue = nullptr;
}
if (eventBase) {
- LogPrint("http", "Waiting for HTTP event thread to exit\n");
+ LogPrint(BCLog::HTTP, "Waiting for HTTP event thread to exit\n");
// Give event loop a few seconds to exit (to send back last RPC responses), then break it
// Before this was solved with event_base_loopexit, but that didn't work as expected in
// at least libevent 2.0.21 and always introduced a delay. In libevent
@@ -499,7 +500,7 @@ void StopHTTPServer()
event_base_free(eventBase);
eventBase = 0;
}
- LogPrint("http", "Stopped HTTP server\n");
+ LogPrint(BCLog::HTTP, "Stopped HTTP server\n");
}
struct event_base* EventBase()
@@ -646,7 +647,7 @@ HTTPRequest::RequestMethod HTTPRequest::GetRequestMethod()
void RegisterHTTPHandler(const std::string &prefix, bool exactMatch, const HTTPRequestHandler &handler)
{
- LogPrint("http", "Registering HTTP handler for %s (exactmatch %d)\n", prefix, exactMatch);
+ LogPrint(BCLog::HTTP, "Registering HTTP handler for %s (exactmatch %d)\n", prefix, exactMatch);
pathHandlers.push_back(HTTPPathHandler(prefix, exactMatch, handler));
}
@@ -659,7 +660,7 @@ void UnregisterHTTPHandler(const std::string &prefix, bool exactMatch)
break;
if (i != iend)
{
- LogPrint("http", "Unregistering HTTP handler for %s (exactmatch %d)\n", prefix, exactMatch);
+ LogPrint(BCLog::HTTP, "Unregistering HTTP handler for %s (exactmatch %d)\n", prefix, exactMatch);
pathHandlers.erase(i);
}
}
diff --git a/src/init.cpp b/src/init.cpp
index 246d5f340a..23a15b4fd8 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -27,6 +27,7 @@
#include "policy/policy.h"
#include "rpc/server.h"
#include "rpc/register.h"
+#include "rpc/blockchain.h"
#include "script/standard.h"
#include "script/sigcache.h"
#include "scheduler.h"
@@ -265,18 +266,31 @@ void Shutdown()
}
/**
- * Signal handlers are very limited in what they are allowed to do, so:
+ * Signal handlers are very limited in what they are allowed to do.
+ * The execution context the handler is invoked in is not guaranteed,
+ * so we restrict handler operations to just touching variables:
*/
-void HandleSIGTERM(int)
+static void HandleSIGTERM(int)
{
fRequestShutdown = true;
}
-void HandleSIGHUP(int)
+static void HandleSIGHUP(int)
{
fReopenDebugLog = true;
}
+#ifndef WIN32
+static void registerSignalHandler(int signal, void(*handler)(int))
+{
+ struct sigaction sa;
+ sa.sa_handler = handler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = 0;
+ sigaction(signal, &sa, NULL);
+}
+#endif
+
bool static Bind(CConnman& connman, const CService &addr, unsigned int flags) {
if (!(flags & BF_EXPLICIT) && IsLimited(addr))
return false;
@@ -298,7 +312,7 @@ void OnRPCStopped()
uiInterface.NotifyBlockTip.disconnect(&RPCNotifyBlockChange);
RPCNotifyBlockChange(false, nullptr);
cvBlockChange.notify_all();
- LogPrint("rpc", "RPC stopped.\n");
+ LogPrint(BCLog::RPC, "RPC stopped.\n");
}
void OnRPCPreCommand(const CRPCCommand& cmd)
@@ -360,13 +374,13 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageOpt("-banscore=<n>", strprintf(_("Threshold for disconnecting misbehaving peers (default: %u)"), DEFAULT_BANSCORE_THRESHOLD));
strUsage += HelpMessageOpt("-bantime=<n>", strprintf(_("Number of seconds to keep misbehaving peers from reconnecting (default: %u)"), DEFAULT_MISBEHAVING_BANTIME));
strUsage += HelpMessageOpt("-bind=<addr>", _("Bind to given address and always listen on it. Use [host]:port notation for IPv6"));
- strUsage += HelpMessageOpt("-connect=<ip>", _("Connect only to the specified node(s); -noconnect or -connect=0 alone to disable automatic connections"));
+ strUsage += HelpMessageOpt("-connect=<ip>", _("Connect only to the specified node(s); -connect=0 disables automatic connections"));
strUsage += HelpMessageOpt("-discover", _("Discover own IP addresses (default: 1 when listening and no -externalip or -proxy)"));
strUsage += HelpMessageOpt("-dns", _("Allow DNS lookups for -addnode, -seednode and -connect") + " " + strprintf(_("(default: %u)"), DEFAULT_NAME_LOOKUP));
- strUsage += HelpMessageOpt("-dnsseed", _("Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect/-noconnect)"));
+ strUsage += HelpMessageOpt("-dnsseed", _("Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect used)"));
strUsage += HelpMessageOpt("-externalip=<ip>", _("Specify your own public address"));
strUsage += HelpMessageOpt("-forcednsseed", strprintf(_("Always query for peer addresses via DNS lookup (default: %u)"), DEFAULT_FORCEDNSSEED));
- strUsage += HelpMessageOpt("-listen", _("Accept connections from outside (default: 1 if no -proxy or -connect/-noconnect)"));
+ strUsage += HelpMessageOpt("-listen", _("Accept connections from outside (default: 1 if no -proxy or -connect)"));
strUsage += HelpMessageOpt("-listenonion", strprintf(_("Automatically create Tor hidden service (default: %d)"), DEFAULT_LISTEN_ONION));
strUsage += HelpMessageOpt("-maxconnections=<n>", strprintf(_("Maintain at most <n> connections to peers (default: %u)"), DEFAULT_MAX_PEER_CONNECTIONS));
strUsage += HelpMessageOpt("-maxreceivebuffer=<n>", strprintf(_("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)"), DEFAULT_MAXRECEIVEBUFFER));
@@ -379,7 +393,6 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageOpt("-port=<port>", strprintf(_("Listen for connections on <port> (default: %u or testnet: %u)"), Params(CBaseChainParams::MAIN).GetDefaultPort(), Params(CBaseChainParams::TESTNET).GetDefaultPort()));
strUsage += HelpMessageOpt("-proxy=<ip:port>", _("Connect through SOCKS5 proxy"));
strUsage += HelpMessageOpt("-proxyrandomize", strprintf(_("Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)"), DEFAULT_PROXYRANDOMIZE));
- strUsage += HelpMessageOpt("-rpcserialversion", strprintf(_("Sets the serialization of raw transaction or block hex returned in non-verbose mode, non-segwit(0) or segwit(1) (default: %d)"), DEFAULT_RPC_SERIALIZE_VERSION));
strUsage += HelpMessageOpt("-seednode=<ip>", _("Connect to a node to retrieve peer addresses, and disconnect"));
strUsage += HelpMessageOpt("-timeout=<n>", strprintf(_("Specify connection timeout in milliseconds (minimum: 1, default: %d)"), DEFAULT_CONNECT_TIMEOUT));
strUsage += HelpMessageOpt("-torcontrol=<ip>:<port>", strprintf(_("Tor control port to use if onion listening enabled (default: %s)"), DEFAULT_TOR_CONTROL));
@@ -394,8 +407,6 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageOpt("-whitebind=<addr>", _("Bind to given address and whitelist peers connecting to it. Use [host]:port notation for IPv6"));
strUsage += HelpMessageOpt("-whitelist=<IP address or network>", _("Whitelist peers connecting from the given IP address (e.g. 1.2.3.4) or CIDR notated network (e.g. 1.2.3.0/24). Can be specified multiple times.") +
" " + _("Whitelisted peers cannot be DoS banned and their transactions are always relayed, even if they are already in the mempool, useful e.g. for a gateway"));
- strUsage += HelpMessageOpt("-whitelistrelay", strprintf(_("Accept relayed transactions received from whitelisted peers even when not relaying transactions (default: %d)"), DEFAULT_WHITELISTRELAY));
- strUsage += HelpMessageOpt("-whitelistforcerelay", strprintf(_("Force relay of transactions from whitelisted peers even if they violate local relay policy (default: %d)"), DEFAULT_WHITELISTFORCERELAY));
strUsage += HelpMessageOpt("-maxuploadtarget=<n>", strprintf(_("Tries to keep outbound traffic under the given target (in MiB per 24h), 0 = no limit (default: %d)"), DEFAULT_MAX_UPLOAD_TARGET));
#ifdef ENABLE_WALLET
@@ -430,11 +441,8 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageOpt("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT));
strUsage += HelpMessageOpt("-bip9params=deployment:start:end", "Use given start/end times for specified BIP9 deployment (regtest-only)");
}
- std::string debugCategories = "addrman, alert, bench, cmpctblock, coindb, db, http, leveldb, libevent, lock, mempool, mempoolrej, net, proxy, prune, rand, reindex, rpc, selectcoins, tor, zmq"; // Don't translate these and qt below
- if (mode == HMM_BITCOIN_QT)
- debugCategories += ", qt";
strUsage += HelpMessageOpt("-debug=<category>", strprintf(_("Output debugging information (default: %u, supplying <category> is optional)"), 0) + ". " +
- _("If <category> is not supplied or if <category> = 1, output all debugging information.") + _("<category> can be:") + " " + debugCategories + ".");
+ _("If <category> is not supplied or if <category> = 1, output all debugging information.") + " " + _("<category> can be:") + " " + ListLogCategories() + ".");
if (showDebug)
strUsage += HelpMessageOpt("-nodebug", "Turn off debugging messages, same as -debug=0");
strUsage += HelpMessageOpt("-help-debug", _("Show all debugging options (usage: --help -help-debug)"));
@@ -447,8 +455,6 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageOpt("-maxsigcachesize=<n>", strprintf("Limit size of signature cache to <n> MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE));
strUsage += HelpMessageOpt("-maxtipage=<n>", strprintf("Maximum tip age in seconds to consider node in initial block download (default: %u)", DEFAULT_MAX_TIP_AGE));
}
- strUsage += HelpMessageOpt("-minrelaytxfee=<amt>", strprintf(_("Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)"),
- CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE)));
strUsage += HelpMessageOpt("-maxtxfee=<amt>", strprintf(_("Maximum total fees (in %s) to use in a single wallet transaction or raw transaction; setting this too low may abort large transactions (default: %s)"),
CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MAXFEE)));
strUsage += HelpMessageOpt("-printtoconsole", _("Send trace/debug info to console instead of debug.log file"));
@@ -470,6 +476,10 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageOpt("-datacarrier", strprintf(_("Relay and mine data carrier transactions (default: %u)"), DEFAULT_ACCEPT_DATACARRIER));
strUsage += HelpMessageOpt("-datacarriersize", strprintf(_("Maximum size of data in data carrier transactions we relay and mine (default: %u)"), MAX_OP_RETURN_RELAY));
strUsage += HelpMessageOpt("-mempoolreplacement", strprintf(_("Enable transaction replacement in the memory pool (default: %u)"), DEFAULT_ENABLE_REPLACEMENT));
+ strUsage += HelpMessageOpt("-minrelaytxfee=<amt>", strprintf(_("Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)"),
+ CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE)));
+ strUsage += HelpMessageOpt("-whitelistrelay", strprintf(_("Accept relayed transactions received from whitelisted peers even when not relaying transactions (default: %d)"), DEFAULT_WHITELISTRELAY));
+ strUsage += HelpMessageOpt("-whitelistforcerelay", strprintf(_("Force relay of transactions from whitelisted peers even if they violate local relay policy (default: %d)"), DEFAULT_WHITELISTFORCERELAY));
strUsage += HelpMessageGroup(_("Block creation options:"));
strUsage += HelpMessageOpt("-blockmaxweight=<n>", strprintf(_("Set maximum BIP141 block weight (default: %d)"), DEFAULT_BLOCK_MAX_WEIGHT));
@@ -488,6 +498,7 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageOpt("-rpcauth=<userpw>", _("Username and hashed password for JSON-RPC connections. The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical python script is included in share/rpcuser. The client then connects normally using the rpcuser=<USERNAME>/rpcpassword=<PASSWORD> pair of arguments. This option can be specified multiple times"));
strUsage += HelpMessageOpt("-rpcport=<port>", strprintf(_("Listen for JSON-RPC connections on <port> (default: %u or testnet: %u)"), BaseParams(CBaseChainParams::MAIN).RPCPort(), BaseParams(CBaseChainParams::TESTNET).RPCPort()));
strUsage += HelpMessageOpt("-rpcallowip=<ip>", _("Allow JSON-RPC connections from specified source. Valid for <ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times"));
+ strUsage += HelpMessageOpt("-rpcserialversion", strprintf(_("Sets the serialization of raw transaction or block hex returned in non-verbose mode, non-segwit(0) or segwit(1) (default: %d)"), DEFAULT_RPC_SERIALIZE_VERSION));
strUsage += HelpMessageOpt("-rpcthreads=<n>", strprintf(_("Set the number of threads to service RPC calls (default: %d)"), DEFAULT_HTTP_THREADS));
if (showDebug) {
strUsage += HelpMessageOpt("-rpcworkqueue=<n>", strprintf("Set the depth of the work queue to service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE));
@@ -848,19 +859,11 @@ bool AppInitBasicSetup()
}
// Clean shutdown on SIGTERM
- struct sigaction sa;
- sa.sa_handler = HandleSIGTERM;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = 0;
- sigaction(SIGTERM, &sa, NULL);
- sigaction(SIGINT, &sa, NULL);
+ registerSignalHandler(SIGTERM, HandleSIGTERM);
+ registerSignalHandler(SIGINT, HandleSIGTERM);
// Reopen debug.log on SIGHUP
- struct sigaction sa_hup;
- sa_hup.sa_handler = HandleSIGHUP;
- sigemptyset(&sa_hup.sa_mask);
- sa_hup.sa_flags = 0;
- sigaction(SIGHUP, &sa_hup, NULL);
+ registerSignalHandler(SIGHUP, HandleSIGHUP);
// Ignore SIGPIPE, otherwise it will bring the daemon down if the client closes unexpectedly
signal(SIGPIPE, SIG_IGN);
@@ -903,12 +906,19 @@ bool AppInitParameterInteraction()
// ********************************************************* Step 3: parameter-to-internal-flags
- fDebug = mapMultiArgs.count("-debug");
- // Special-case: if -debug=0/-nodebug is set, turn off debugging messages
- if (fDebug) {
+ if (mapMultiArgs.count("-debug") > 0) {
+ // Special-case: if -debug=0/-nodebug is set, turn off debugging messages
const std::vector<std::string>& categories = mapMultiArgs.at("-debug");
- if (GetBoolArg("-nodebug", false) || find(categories.begin(), categories.end(), std::string("0")) != categories.end())
- fDebug = false;
+
+ if (!(GetBoolArg("-nodebug", false) || find(categories.begin(), categories.end(), std::string("0")) != categories.end())) {
+ for (const auto& cat : categories) {
+ uint32_t flag;
+ if (!GetLogCategory(&flag, &cat)) {
+ InitWarning(strprintf(_("Unsupported logging category %s.\n"), cat));
+ }
+ logCategories |= flag;
+ }
+ }
}
// Check for -debugnet
@@ -1162,7 +1172,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
#ifndef WIN32
CreatePidFile(GetPidFile(), getpid());
#endif
- if (GetBoolArg("-shrinkdebugfile", !fDebug)) {
+ if (GetBoolArg("-shrinkdebugfile", logCategories != BCLog::NONE)) {
// Do this first since it both loads a bunch of debug.log into memory,
// and because this needs to happen before any other debug.log printing
ShrinkDebugFile();
@@ -1486,7 +1496,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
break;
}
} catch (const std::exception& e) {
- if (fDebug) LogPrintf("%s\n", e.what());
+ LogPrintf("%s\n", e.what());
strLoadError = _("Error opening block database");
break;
}
diff --git a/src/miner.cpp b/src/miner.cpp
index ff28a5680e..386d75c4be 100644
--- a/src/miner.cpp
+++ b/src/miner.cpp
@@ -26,8 +26,6 @@
#include "validationinterface.h"
#include <algorithm>
-#include <boost/thread.hpp>
-#include <boost/tuple/tuple.hpp>
#include <queue>
#include <utility>
@@ -46,17 +44,6 @@ uint64_t nLastBlockTx = 0;
uint64_t nLastBlockSize = 0;
uint64_t nLastBlockWeight = 0;
-class ScoreCompare
-{
-public:
- ScoreCompare() {}
-
- bool operator()(const CTxMemPool::txiter a, const CTxMemPool::txiter b)
- {
- return CompareTxMemPoolEntryByScore()(*b,*a); // Convert to less than
- }
-};
-
int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
{
int64_t nOldTime = pblock->nTime;
@@ -139,6 +126,8 @@ void BlockAssembler::resetBlock()
std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& scriptPubKeyIn, bool fMineWitnessTx)
{
+ int64_t nTimeStart = GetTimeMicros();
+
resetBlock();
pblocktemplate.reset(new CBlockTemplate());
@@ -177,7 +166,11 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
// transaction (which in most cases can be a no-op).
fIncludeWitness = IsWitnessEnabled(pindexPrev, chainparams.GetConsensus()) && fMineWitnessTx;
- addPackageTxs();
+ int nPackagesSelected = 0;
+ int nDescendantsUpdated = 0;
+ addPackageTxs(nPackagesSelected, nDescendantsUpdated);
+
+ int64_t nTime1 = GetTimeMicros();
nLastBlockTx = nBlockTx;
nLastBlockSize = nBlockSize;
@@ -209,6 +202,9 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
if (!TestBlockValidity(state, chainparams, *pblock, pindexPrev, false, false)) {
throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, FormatStateMessage(state)));
}
+ int64_t nTime2 = GetTimeMicros();
+
+ LogPrint(BCLog::BENCH, "CreateNewBlock() packages: %.2fms (%d packages, %d updated descendants), validity: %.2fms (total %.2fms)\n", 0.001 * (nTime1 - nTimeStart), nPackagesSelected, nDescendantsUpdated, 0.001 * (nTime2 - nTime1), 0.001 * (nTime2 - nTimeStart));
return std::move(pblocktemplate);
}
@@ -282,9 +278,10 @@ void BlockAssembler::AddToBlock(CTxMemPool::txiter iter)
}
}
-void BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& alreadyAdded,
+int BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& alreadyAdded,
indexed_modified_transaction_set &mapModifiedTx)
{
+ int nDescendantsUpdated = 0;
BOOST_FOREACH(const CTxMemPool::txiter it, alreadyAdded) {
CTxMemPool::setEntries descendants;
mempool.CalculateDescendants(it, descendants);
@@ -292,6 +289,7 @@ void BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& alread
BOOST_FOREACH(CTxMemPool::txiter desc, descendants) {
if (alreadyAdded.count(desc))
continue;
+ ++nDescendantsUpdated;
modtxiter mit = mapModifiedTx.find(desc);
if (mit == mapModifiedTx.end()) {
CTxMemPoolModifiedEntry modEntry(desc);
@@ -304,6 +302,7 @@ void BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& alread
}
}
}
+ return nDescendantsUpdated;
}
// Skip entries in mapTx that are already in a block or are present
@@ -344,7 +343,7 @@ void BlockAssembler::SortForBlock(const CTxMemPool::setEntries& package, CTxMemP
// Each time through the loop, we compare the best transaction in
// mapModifiedTxs with the next transaction in the mempool to decide what
// transaction package to work on next.
-void BlockAssembler::addPackageTxs()
+void BlockAssembler::addPackageTxs(int &nPackagesSelected, int &nDescendantsUpdated)
{
// mapModifiedTx will store sorted packages after they are modified
// because some of their txs are already in the block
@@ -358,6 +357,13 @@ void BlockAssembler::addPackageTxs()
CTxMemPool::indexed_transaction_set::index<ancestor_score>::type::iterator mi = mempool.mapTx.get<ancestor_score>().begin();
CTxMemPool::txiter iter;
+
+ // Limit the number of attempts to add transactions to the block when it is
+ // close to full; this is just a simple heuristic to finish quickly if the
+ // mempool has a lot of entries.
+ const int64_t MAX_CONSECUTIVE_FAILURES = 1000;
+ int64_t nConsecutiveFailed = 0;
+
while (mi != mempool.mapTx.get<ancestor_score>().end() || !mapModifiedTx.empty())
{
// First try to find a new transaction in mapTx to evaluate.
@@ -419,6 +425,14 @@ void BlockAssembler::addPackageTxs()
mapModifiedTx.get<ancestor_score>().erase(modit);
failedTx.insert(iter);
}
+
+ ++nConsecutiveFailed;
+
+ if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight >
+ nBlockMaxWeight - 4000) {
+ // Give up if we're close to full and haven't succeeded in a while
+ break;
+ }
continue;
}
@@ -439,6 +453,9 @@ void BlockAssembler::addPackageTxs()
continue;
}
+ // This transaction will make it in; reset the failed counter.
+ nConsecutiveFailed = 0;
+
// Package can be added. Sort the entries in a valid order.
std::vector<CTxMemPool::txiter> sortedEntries;
SortForBlock(ancestors, iter, sortedEntries);
@@ -449,8 +466,10 @@ void BlockAssembler::addPackageTxs()
mapModifiedTx.erase(sortedEntries[i]);
}
+ ++nPackagesSelected;
+
// Update transactions that depend on each of these
- UpdatePackagesForAdded(ancestors, mapModifiedTx);
+ nDescendantsUpdated += UpdatePackagesForAdded(ancestors, mapModifiedTx);
}
}
diff --git a/src/miner.h b/src/miner.h
index a159b05534..1f3c9d652f 100644
--- a/src/miner.h
+++ b/src/miner.h
@@ -180,8 +180,10 @@ private:
void AddToBlock(CTxMemPool::txiter iter);
// Methods for how to add transactions to a block.
- /** Add transactions based on feerate including unconfirmed ancestors */
- void addPackageTxs();
+ /** Add transactions based on feerate including unconfirmed ancestors
+ * Increments nPackagesSelected / nDescendantsUpdated with corresponding
+ * statistics from the package selection (for logging statistics). */
+ void addPackageTxs(int &nPackagesSelected, int &nDescendantsUpdated);
// helper functions for addPackageTxs()
/** Remove confirmed (inBlock) entries from given set */
@@ -199,8 +201,9 @@ private:
/** Sort the package in an order that is valid to appear in a block */
void SortForBlock(const CTxMemPool::setEntries& package, CTxMemPool::txiter entry, std::vector<CTxMemPool::txiter>& sortedEntries);
/** Add descendants of given transactions to mapModifiedTx with ancestor
- * state updated assuming given transactions are inBlock. */
- void UpdatePackagesForAdded(const CTxMemPool::setEntries& alreadyAdded, indexed_modified_transaction_set &mapModifiedTx);
+ * state updated assuming given transactions are inBlock. Returns number
+ * of updated descendants. */
+ int UpdatePackagesForAdded(const CTxMemPool::setEntries& alreadyAdded, indexed_modified_transaction_set &mapModifiedTx);
};
/** Modify the extranonce in a block */
diff --git a/src/net.cpp b/src/net.cpp
index 4c5b04b785..cf94faf854 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -190,7 +190,7 @@ void AdvertiseLocal(CNode *pnode)
}
if (addrLocal.IsRoutable())
{
- LogPrint("net", "AdvertiseLocal: advertising address %s\n", addrLocal.ToString());
+ LogPrint(BCLog::NET, "AdvertiseLocal: advertising address %s\n", addrLocal.ToString());
FastRandomContext insecure_rand;
pnode->PushAddress(addrLocal, insecure_rand);
}
@@ -356,7 +356,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
}
/// debug print
- LogPrint("net", "trying connection %s lastseen=%.1fhrs\n",
+ LogPrint(BCLog::NET, "trying connection %s lastseen=%.1fhrs\n",
pszDest ? pszDest : addrConnect.ToString(),
pszDest ? 0.0 : (double)(GetAdjustedTime() - addrConnect.nTime)/3600.0);
@@ -423,7 +423,7 @@ void CConnman::DumpBanlist()
if (!bandb.Write(banmap))
SetBannedSetDirty(true);
- LogPrint("net", "Flushed %d banned node ips/subnets to banlist.dat %dms\n",
+ LogPrint(BCLog::NET, "Flushed %d banned node ips/subnets to banlist.dat %dms\n",
banmap.size(), GetTimeMillis() - nStart);
}
@@ -433,7 +433,7 @@ void CNode::CloseSocketDisconnect()
LOCK(cs_hSocket);
if (hSocket != INVALID_SOCKET)
{
- LogPrint("net", "disconnecting peer=%d\n", id);
+ LogPrint(BCLog::NET, "disconnecting peer=%d\n", id);
CloseSocket(hSocket);
}
}
@@ -565,7 +565,7 @@ void CConnman::SweepBanned()
{
setBanned.erase(it++);
setBannedIsDirty = true;
- LogPrint("net", "%s: Removed banned node ip/subnet from banlist.dat: %s\n", __func__, subNet.ToString());
+ LogPrint(BCLog::NET, "%s: Removed banned node ip/subnet from banlist.dat: %s\n", __func__, subNet.ToString());
}
else
++it;
@@ -711,7 +711,7 @@ bool CNode::ReceiveMsgBytes(const char *pch, unsigned int nBytes, bool& complete
return false;
if (msg.in_data && msg.hdr.nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH) {
- LogPrint("net", "Oversized message from peer=%i, disconnecting\n", GetId());
+ LogPrint(BCLog::NET, "Oversized message from peer=%i, disconnecting\n", GetId());
return false;
}
@@ -1087,7 +1087,7 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) {
{
if (!AttemptToEvictConnection()) {
// No connection to evict, disconnect the new connection
- LogPrint("net", "failed to find an eviction candidate - connection dropped (full)\n");
+ LogPrint(BCLog::NET, "failed to find an eviction candidate - connection dropped (full)\n");
CloseSocket(hSocket);
return;
}
@@ -1101,7 +1101,7 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) {
pnode->fWhitelisted = whitelisted;
GetNodeSignals().InitializeNode(pnode, *this);
- LogPrint("net", "connection from %s accepted\n", addr.ToString());
+ LogPrint(BCLog::NET, "connection from %s accepted\n", addr.ToString());
{
LOCK(cs_vNodes);
@@ -1336,8 +1336,9 @@ void CConnman::ThreadSocketHandler()
else if (nBytes == 0)
{
// socket closed gracefully
- if (!pnode->fDisconnect)
- LogPrint("net", "socket closed\n");
+ if (!pnode->fDisconnect) {
+ LogPrint(BCLog::NET, "socket closed\n");
+ }
pnode->CloseSocketDisconnect();
}
else if (nBytes < 0)
@@ -1375,7 +1376,7 @@ void CConnman::ThreadSocketHandler()
{
if (pnode->nLastRecv == 0 || pnode->nLastSend == 0)
{
- LogPrint("net", "socket no message in first 60 seconds, %d %d from %d\n", pnode->nLastRecv != 0, pnode->nLastSend != 0, pnode->id);
+ LogPrint(BCLog::NET, "socket no message in first 60 seconds, %d %d from %d\n", pnode->nLastRecv != 0, pnode->nLastSend != 0, pnode->id);
pnode->fDisconnect = true;
}
else if (nTime - pnode->nLastSend > TIMEOUT_INTERVAL)
@@ -1634,7 +1635,7 @@ void CConnman::DumpAddresses()
CAddrDB adb;
adb.Write(addrman);
- LogPrint("net", "Flushed %d addresses to peers.dat %dms\n",
+ LogPrint(BCLog::NET, "Flushed %d addresses to peers.dat %dms\n",
addrman.size(), GetTimeMillis() - nStart);
}
@@ -1807,7 +1808,7 @@ void CConnman::ThreadOpenConnections()
int randsleep = GetRandInt(FEELER_SLEEP_WINDOW * 1000);
if (!interruptNet.sleep_for(std::chrono::milliseconds(randsleep)))
return;
- LogPrint("net", "Making feeler connection to %s\n", addrConnect.ToString());
+ LogPrint(BCLog::NET, "Making feeler connection to %s\n", addrConnect.ToString());
}
OpenNetworkConnection(addrConnect, (int)setConnected.size() >= std::min(nMaxConnections - 1, 2), &grant, NULL, false, fFeeler);
@@ -2150,9 +2151,7 @@ void Discover(boost::thread_group& threadGroup)
void CConnman::SetNetworkActive(bool active)
{
- if (fDebug) {
- LogPrint("net", "SetNetworkActive: %s\n", active);
- }
+ LogPrint(BCLog::NET, "SetNetworkActive: %s\n", active);
if (!active) {
fNetworkActive = false;
@@ -2241,7 +2240,7 @@ bool CConnman::Start(CScheduler& scheduler, std::string& strNodeError, Options c
SetBannedSetDirty(false); // no need to write down, just read data
SweepBanned(); // sweep out unused entries
- LogPrint("net", "Loaded %d banned node ips/subnets from banlist.dat %dms\n",
+ LogPrint(BCLog::NET, "Loaded %d banned node ips/subnets from banlist.dat %dms\n",
banmap.size(), GetTimeMillis() - nStart);
} else {
LogPrintf("Invalid or missing banlist.dat; recreating\n");
@@ -2683,10 +2682,11 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn
mapRecvBytesPerMsgCmd[msg] = 0;
mapRecvBytesPerMsgCmd[NET_MESSAGE_COMMAND_OTHER] = 0;
- if (fLogIPs)
- LogPrint("net", "Added connection to %s peer=%d\n", addrName, id);
- else
- LogPrint("net", "Added connection peer=%d\n", id);
+ if (fLogIPs) {
+ LogPrint(BCLog::NET, "Added connection to %s peer=%d\n", addrName, id);
+ } else {
+ LogPrint(BCLog::NET, "Added connection peer=%d\n", id);
+ }
}
CNode::~CNode()
@@ -2713,7 +2713,7 @@ void CNode::AskFor(const CInv& inv)
nRequestTime = it->second;
else
nRequestTime = 0;
- LogPrint("net", "askfor %s %d (%s) peer=%d\n", inv.ToString(), nRequestTime, DateTimeStrFormat("%H:%M:%S", nRequestTime/1000000), id);
+ LogPrint(BCLog::NET, "askfor %s %d (%s) peer=%d\n", inv.ToString(), nRequestTime, DateTimeStrFormat("%H:%M:%S", nRequestTime/1000000), id);
// Make sure not to reuse time indexes to keep things in the same order
int64_t nNow = GetTimeMicros() - 1000000;
@@ -2740,7 +2740,7 @@ void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg)
{
size_t nMessageSize = msg.data.size();
size_t nTotalSize = nMessageSize + CMessageHeader::HEADER_SIZE;
- LogPrint("net", "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.command.c_str()), nMessageSize, pnode->id);
+ LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.command.c_str()), nMessageSize, pnode->id);
std::vector<unsigned char> serializedHeader;
serializedHeader.reserve(CMessageHeader::HEADER_SIZE);
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 71a0a1de22..17653f542d 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -256,10 +256,11 @@ void PushNodeVersion(CNode *pnode, CConnman& connman, int64_t nTime)
connman.PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
nonce, strSubVersion, nNodeStartingHeight, ::fRelayTxes));
- if (fLogIPs)
- LogPrint("net", "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
- else
- LogPrint("net", "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
+ if (fLogIPs) {
+ LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
+ } else {
+ LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
+ }
}
void InitializeNode(CNode *pnode, CConnman& connman) {
@@ -619,7 +620,7 @@ bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRE
unsigned int sz = GetTransactionWeight(*tx);
if (sz >= MAX_STANDARD_TX_WEIGHT)
{
- LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
+ LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
return false;
}
@@ -631,7 +632,7 @@ bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRE
AddToCompactExtraTransactions(tx);
- LogPrint("mempool", "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
+ LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
return true;
}
@@ -666,7 +667,7 @@ void EraseOrphansFor(NodeId peer)
nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
}
}
- if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer=%d\n", nErased, peer);
+ if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer);
}
@@ -691,7 +692,7 @@ unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRE
}
// Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
- if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx due to expiration\n", nErased);
+ if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased);
}
while (mapOrphanTransactions.size() > nMaxOrphans)
{
@@ -767,7 +768,7 @@ void PeerLogicValidation::SyncTransaction(const CTransaction& tx, const CBlockIn
BOOST_FOREACH(uint256 &orphanHash, vOrphanErase) {
nErased += EraseOrphanTx(orphanHash);
}
- LogPrint("mempool", "Erased %d orphan tx included or conflicted by block\n", nErased);
+ LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
}
}
@@ -808,7 +809,7 @@ void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std:
if (state.fPreferHeaderAndIDs && (!fWitnessEnabled || state.fWantsCmpctWitness) &&
!PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
- LogPrint("net", "%s sending header-and-ids %s to peer=%d\n", "PeerLogicValidation::NewPoWValidBlock",
+ LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerLogicValidation::NewPoWValidBlock",
hashBlock.ToString(), pnode->id);
connman->PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
state.pindexBestHeaderSent = pindex;
@@ -1024,7 +1025,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
static const int nOneWeek = 7 * 24 * 60 * 60; // assume > 1 week = historical
if (send && connman.OutboundTargetReached(true) && ( ((pindexBestHeader != NULL) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
{
- LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
+ LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
//disconnect node
pfrom->fDisconnect = true;
@@ -1168,7 +1169,7 @@ inline void static SendBlockTransactions(const CBlock& block, const BlockTransac
bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman& connman, const std::atomic<bool>& interruptMsgProc)
{
- LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id);
+ LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id);
if (IsArgSet("-dropmessagestest") && GetRand(GetArg("-dropmessagestest", 0)) == 0)
{
LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
@@ -1192,7 +1193,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (strCommand == NetMsgType::REJECT)
{
- if (fDebug) {
+ if (LogAcceptCategory(BCLog::NET)) {
try {
std::string strMsg; unsigned char ccode; std::string strReason;
vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH);
@@ -1206,10 +1207,10 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
vRecv >> hash;
ss << ": hash " << hash.ToString();
}
- LogPrint("net", "Reject %s\n", SanitizeString(ss.str()));
+ LogPrint(BCLog::NET, "Reject %s\n", SanitizeString(ss.str()));
} catch (const std::ios_base::failure&) {
// Avoid feedback loops by preventing reject messages from triggering a new reject message.
- LogPrint("net", "Unparseable reject message received\n");
+ LogPrint(BCLog::NET, "Unparseable reject message received\n");
}
}
}
@@ -1247,7 +1248,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
}
if (pfrom->nServicesExpected & ~nServices)
{
- LogPrint("net", "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->id, nServices, pfrom->nServicesExpected);
+ LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->id, nServices, pfrom->nServicesExpected);
connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
strprintf("Expected to offer services %08x", pfrom->nServicesExpected)));
pfrom->fDisconnect = true;
@@ -1335,11 +1336,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
FastRandomContext insecure_rand;
if (addr.IsRoutable())
{
- LogPrint("net", "ProcessMessages: advertising address %s\n", addr.ToString());
+ LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
pfrom->PushAddress(addr, insecure_rand);
} else if (IsPeerAddrLocalGood(pfrom)) {
addr.SetIP(addrMe);
- LogPrint("net", "ProcessMessages: advertising address %s\n", addr.ToString());
+ LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
pfrom->PushAddress(addr, insecure_rand);
}
}
@@ -1541,7 +1542,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
return true;
bool fAlreadyHave = AlreadyHave(inv);
- LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id);
+ LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id);
if (inv.type == MSG_TX) {
inv.type |= nFetchFlags;
@@ -1556,16 +1557,17 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// we now only provide a getheaders response here. When we receive the headers, we will
// then ask for the blocks we need.
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash));
- LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
+ LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
}
}
else
{
pfrom->AddInventoryKnown(inv);
- if (fBlocksOnly)
- LogPrint("net", "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->id);
- else if (!fAlreadyHave && !fImporting && !fReindex && !IsInitialBlockDownload())
+ if (fBlocksOnly) {
+ LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->id);
+ } else if (!fAlreadyHave && !fImporting && !fReindex && !IsInitialBlockDownload()) {
pfrom->AskFor(inv);
+ }
}
// Track requests for our stuff
@@ -1588,11 +1590,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
return error("message getdata size() = %u", vInv.size());
}
- if (fDebug || (vInv.size() != 1))
- LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id);
+ LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id);
- if ((fDebug && vInv.size() > 0) || (vInv.size() == 1))
- LogPrint("net", "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id);
+ if (vInv.size() > 0) {
+ LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id);
+ }
pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc);
@@ -1631,12 +1633,12 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (pindex)
pindex = chainActive.Next(pindex);
int nLimit = 500;
- LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id);
+ LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id);
for (; pindex; pindex = chainActive.Next(pindex))
{
if (pindex->GetBlockHash() == hashStop)
{
- LogPrint("net", " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
+ LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
break;
}
// If pruning, don't inv blocks unless we have on disk and are likely to still have
@@ -1644,7 +1646,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing;
if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= chainActive.Tip()->nHeight - nPrunedBlocksLikelyToHave))
{
- LogPrint("net", " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
+ LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
break;
}
pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
@@ -1652,7 +1654,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
{
// When this block is requested, we'll send an inv that'll
// trigger the peer to getblocks the next batch of inventory.
- LogPrint("net", " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
+ LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
pfrom->hashContinue = pindex->GetBlockHash();
break;
}
@@ -1693,7 +1695,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// might maliciously send lots of getblocktxn requests to trigger
// expensive disk reads, because it will require the peer to
// actually receive all the data read from disk over the network.
- LogPrint("net", "Peer %d sent us a getblocktxn for a block > %i deep", pfrom->id, MAX_BLOCKTXN_DEPTH);
+ LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep", pfrom->id, MAX_BLOCKTXN_DEPTH);
CInv inv;
inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK;
inv.hash = req.blockhash;
@@ -1718,7 +1720,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
LOCK(cs_main);
if (IsInitialBlockDownload() && !pfrom->fWhitelisted) {
- LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->id);
+ LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->id);
return true;
}
@@ -1743,7 +1745,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
std::vector<CBlock> vHeaders;
int nLimit = MAX_HEADERS_RESULTS;
- LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->id);
+ LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->id);
for (; pindex; pindex = chainActive.Next(pindex))
{
vHeaders.push_back(pindex->GetBlockHeader());
@@ -1773,7 +1775,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
if (!fRelayTxes && (!pfrom->fWhitelisted || !GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)))
{
- LogPrint("net", "transaction sent in violation of protocol peer=%d\n", pfrom->id);
+ LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom->id);
return true;
}
@@ -1805,7 +1807,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
pfrom->nLastTXTime = GetTime();
- LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
+ LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
pfrom->id,
tx.GetHash().ToString(),
mempool.size(), mempool.DynamicMemoryUsage() / 1000);
@@ -1835,7 +1837,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (setMisbehaving.count(fromPeer))
continue;
if (AcceptToMemoryPool(mempool, stateDummy, porphanTx, true, &fMissingInputs2, &lRemovedTxn)) {
- LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString());
+ LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
RelayTransaction(orphanTx, connman);
for (unsigned int i = 0; i < orphanTx.vout.size(); i++) {
vWorkQueue.emplace_back(orphanHash, i);
@@ -1850,11 +1852,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// Punish peer that gave us an invalid orphan tx
Misbehaving(fromPeer, nDos);
setMisbehaving.insert(fromPeer);
- LogPrint("mempool", " invalid orphan tx %s\n", orphanHash.ToString());
+ LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s\n", orphanHash.ToString());
}
// Has inputs but not accepted to mempool
// Probably non-standard or insufficient fee
- LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString());
+ LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString());
vEraseQueue.push_back(orphanHash);
if (!orphanTx.HasWitness() && !stateDummy.CorruptionPossible()) {
// Do not use rejection cache for witness transactions or
@@ -1892,10 +1894,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// DoS prevention: do not allow mapOrphanTransactions to grow unbounded
unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
- if (nEvicted > 0)
- LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted);
+ if (nEvicted > 0) {
+ LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted);
+ }
} else {
- LogPrint("mempool", "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
+ LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
// We will continue to reject this tx since it has rejected
// parents so avoid re-requesting it from other peers.
recentRejects->insert(tx.GetHash());
@@ -1939,7 +1942,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
int nDoS = 0;
if (state.IsInvalid(nDoS))
{
- LogPrint("mempoolrej", "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
+ LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
pfrom->id,
FormatStateMessage(state));
if (state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P
@@ -2046,7 +2049,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
(*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&mempool));
else {
// The block was already in flight using compact blocks from the same peer
- LogPrint("net", "Peer sent us compact block we were already syncing!\n");
+ LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
return true;
}
}
@@ -2161,7 +2164,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
it->second.first != pfrom->GetId()) {
- LogPrint("net", "Peer %d sent us block transactions for block we weren't expecting\n", pfrom->id);
+ LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom->id);
return true;
}
@@ -2254,7 +2257,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
nodestate->nUnconnectingHeaders++;
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()));
- LogPrint("net", "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
+ LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
headers[0].GetHash().ToString(),
headers[0].hashPrevBlock.ToString(),
pindexBestHeader->nHeight,
@@ -2296,7 +2299,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
LOCK(cs_main);
CNodeState *nodestate = State(pfrom->GetId());
if (nodestate->nUnconnectingHeaders > 0) {
- LogPrint("net", "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->id, nodestate->nUnconnectingHeaders);
+ LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->id, nodestate->nUnconnectingHeaders);
}
nodestate->nUnconnectingHeaders = 0;
@@ -2307,7 +2310,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// Headers message had its maximum size; the peer may have more headers.
// TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
// from there instead.
- LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
+ LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256()));
}
@@ -2332,7 +2335,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// the main chain -- this shouldn't really happen. Bail out on the
// direct fetch and rely on parallel download instead.
if (!chainActive.Contains(pindexWalk)) {
- LogPrint("net", "Large reorg, won't direct fetch to %s (%d)\n",
+ LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
pindexLast->GetBlockHash().ToString(),
pindexLast->nHeight);
} else {
@@ -2346,11 +2349,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
uint32_t nFetchFlags = GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus());
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex);
- LogPrint("net", "Requesting block %s from peer=%d\n",
+ LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
pindex->GetBlockHash().ToString(), pfrom->id);
}
if (vGetData.size() > 1) {
- LogPrint("net", "Downloading blocks toward %s (%d) via headers direct fetch\n",
+ LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
}
if (vGetData.size() > 0) {
@@ -2370,7 +2373,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
vRecv >> *pblock;
- LogPrint("net", "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->id);
+ LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->id);
// Process all blocks from whitelisted peers, even if not requested,
// unless we're still syncing with the network.
@@ -2402,14 +2405,14 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// Making nodes which are behind NAT and can only make outgoing connections ignore
// the getaddr message mitigates the attack.
if (!pfrom->fInbound) {
- LogPrint("net", "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->id);
+ LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->id);
return true;
}
// Only send one GetAddr response per connection to reduce resource waste
// and discourage addr stamping of INV announcements.
if (pfrom->fSentAddr) {
- LogPrint("net", "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->id);
+ LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->id);
return true;
}
pfrom->fSentAddr = true;
@@ -2426,14 +2429,14 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
{
if (!(pfrom->GetLocalServices() & NODE_BLOOM) && !pfrom->fWhitelisted)
{
- LogPrint("net", "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom->GetId());
+ LogPrint(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom->GetId());
pfrom->fDisconnect = true;
return true;
}
if (connman.OutboundTargetReached(false) && !pfrom->fWhitelisted)
{
- LogPrint("net", "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom->GetId());
+ LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom->GetId());
pfrom->fDisconnect = true;
return true;
}
@@ -2509,7 +2512,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
}
if (!(sProblem.empty())) {
- LogPrint("net", "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
+ LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
pfrom->id,
sProblem,
pfrom->nPingNonceSent,
@@ -2587,7 +2590,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
LOCK(pfrom->cs_feeFilter);
pfrom->minFeeFilter = newFeeFilter;
}
- LogPrint("net", "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom->id);
+ LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom->id);
}
}
@@ -2598,7 +2601,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
else {
// Ignore unknown commands for extensibility
- LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id);
+ LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id);
}
@@ -2876,7 +2879,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
got back an empty response. */
if (pindexStart->pprev)
pindexStart = pindexStart->pprev;
- LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight);
+ LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight);
connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256()));
}
}
@@ -2960,7 +2963,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
// We only send up to 1 block as header-and-ids, as otherwise
// probably means we're doing an initial-ish-sync or they're slow
- LogPrint("net", "%s sending header-and-ids %s to peer=%d\n", __func__,
+ LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
vHeaders.front().GetHash().ToString(), pto->id);
int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
@@ -2988,12 +2991,12 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
state.pindexBestHeaderSent = pBestIndex;
} else if (state.fPreferHeaders) {
if (vHeaders.size() > 1) {
- LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
+ LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
vHeaders.size(),
vHeaders.front().GetHash().ToString(),
vHeaders.back().GetHash().ToString(), pto->id);
} else {
- LogPrint("net", "%s: sending header %s to peer=%d\n", __func__,
+ LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
vHeaders.front().GetHash().ToString(), pto->id);
}
connman.PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
@@ -3015,14 +3018,14 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
// This should be very rare and could be optimized out.
// Just log for now.
if (chainActive[pindex->nHeight] != pindex) {
- LogPrint("net", "Announcing block %s not on main chain (tip=%s)\n",
+ LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString());
}
// If the peer's chain has this block, don't inv it back.
if (!PeerHasHeader(&state, pindex)) {
pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
- LogPrint("net", "%s: sending inv peer=%d hash=%s\n", __func__,
+ LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
pto->id, hashToAnnounce.ToString());
}
}
@@ -3201,13 +3204,13 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
uint32_t nFetchFlags = GetFetchFlags(pto, pindex->pprev, consensusParams);
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);
- LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
+ LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
pindex->nHeight, pto->id);
}
if (state.nBlocksInFlight == 0 && staller != -1) {
if (State(staller)->nStallingSince == 0) {
State(staller)->nStallingSince = nNow;
- LogPrint("net", "Stall started peer=%d\n", staller);
+ LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
}
}
}
@@ -3220,8 +3223,7 @@ bool SendMessages(CNode* pto, CConnman& connman, const std::atomic<bool>& interr
const CInv& inv = (*pto->mapAskFor.begin()).second;
if (!AlreadyHave(inv))
{
- if (fDebug)
- LogPrint("net", "Requesting %s peer=%d\n", inv.ToString(), pto->id);
+ LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->id);
vGetData.push_back(inv);
if (vGetData.size() >= 1000)
{
diff --git a/src/netbase.cpp b/src/netbase.cpp
index 0f02e93e46..bdc725359a 100644
--- a/src/netbase.cpp
+++ b/src/netbase.cpp
@@ -281,7 +281,7 @@ std::string Socks5ErrorString(int err)
static bool Socks5(const std::string& strDest, int port, const ProxyCredentials *auth, SOCKET& hSocket)
{
IntrRecvError recvr;
- LogPrint("net", "SOCKS5 connecting %s\n", strDest);
+ LogPrint(BCLog::NET, "SOCKS5 connecting %s\n", strDest);
if (strDest.size() > 255) {
CloseSocket(hSocket);
return error("Hostname too long");
@@ -327,7 +327,7 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials
CloseSocket(hSocket);
return error("Error sending authentication to proxy");
}
- LogPrint("proxy", "SOCKS5 sending proxy authentication %s:%s\n", auth->username, auth->password);
+ LogPrint(BCLog::PROXY, "SOCKS5 sending proxy authentication %s:%s\n", auth->username, auth->password);
char pchRetA[2];
if ((recvr = InterruptibleRecv(pchRetA, 2, SOCKS5_RECV_TIMEOUT, hSocket)) != IntrRecvError::OK) {
CloseSocket(hSocket);
@@ -409,7 +409,7 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials
CloseSocket(hSocket);
return error("Error reading from proxy");
}
- LogPrint("net", "SOCKS5 connected %s\n", strDest);
+ LogPrint(BCLog::NET, "SOCKS5 connected %s\n", strDest);
return true;
}
@@ -458,7 +458,7 @@ bool static ConnectSocketDirectly(const CService &addrConnect, SOCKET& hSocketRe
int nRet = select(hSocket + 1, NULL, &fdset, NULL, &timeout);
if (nRet == 0)
{
- LogPrint("net", "connection to %s timeout\n", addrConnect.ToString());
+ LogPrint(BCLog::NET, "connection to %s timeout\n", addrConnect.ToString());
CloseSocket(hSocket);
return false;
}
diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp
index da33e4100f..38e07dc345 100644
--- a/src/policy/fees.cpp
+++ b/src/policy/fees.cpp
@@ -165,7 +165,7 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
}
}
- LogPrint("estimatefee", "%3d: For conf success %s %4.2f need feerate %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n",
+ LogPrint(BCLog::ESTIMATEFEE, "%3d: For conf success %s %4.2f need feerate %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n",
confTarget, requireGreater ? ">" : "<", successBreakPoint,
requireGreater ? ">" : "<", median, buckets[minBucket], buckets[maxBucket],
100 * nConf / (totalNum + extraNum), nConf, totalNum, extraNum);
@@ -241,7 +241,7 @@ void TxConfirmStats::Read(CAutoFile& filein)
for (unsigned int i = 0; i < buckets.size(); i++)
bucketMap[buckets[i]] = i;
- LogPrint("estimatefee", "Reading estimates: %u buckets counting confirms up to %u blocks\n",
+ LogPrint(BCLog::ESTIMATEFEE, "Reading estimates: %u buckets counting confirms up to %u blocks\n",
numBuckets, maxConfirms);
}
@@ -260,24 +260,26 @@ void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHe
if (nBestSeenHeight == 0) // the BlockPolicyEstimator hasn't seen any blocks yet
blocksAgo = 0;
if (blocksAgo < 0) {
- LogPrint("estimatefee", "Blockpolicy error, blocks ago is negative for mempool tx\n");
+ LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error, blocks ago is negative for mempool tx\n");
return; //This can't happen because we call this with our best seen height, no entries can have higher
}
if (blocksAgo >= (int)unconfTxs.size()) {
- if (oldUnconfTxs[bucketindex] > 0)
+ if (oldUnconfTxs[bucketindex] > 0) {
oldUnconfTxs[bucketindex]--;
- else
- LogPrint("estimatefee", "Blockpolicy error, mempool tx removed from >25 blocks,bucketIndex=%u already\n",
+ } else {
+ LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error, mempool tx removed from >25 blocks,bucketIndex=%u already\n",
bucketindex);
+ }
}
else {
unsigned int blockIndex = entryHeight % unconfTxs.size();
- if (unconfTxs[blockIndex][bucketindex] > 0)
+ if (unconfTxs[blockIndex][bucketindex] > 0) {
unconfTxs[blockIndex][bucketindex]--;
- else
- LogPrint("estimatefee", "Blockpolicy error, mempool tx removed from blockIndex=%u,bucketIndex=%u already\n",
+ } else {
+ LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error, mempool tx removed from blockIndex=%u,bucketIndex=%u already\n",
blockIndex, bucketindex);
+ }
}
}
@@ -316,7 +318,7 @@ void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, boo
unsigned int txHeight = entry.GetHeight();
uint256 hash = entry.GetTx().GetHash();
if (mapMemPoolTxs.count(hash)) {
- LogPrint("estimatefee", "Blockpolicy error mempool tx %s already being tracked\n",
+ LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error mempool tx %s already being tracked\n",
hash.ToString().c_str());
return;
}
@@ -358,7 +360,7 @@ bool CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxM
if (blocksToConfirm <= 0) {
// This can't happen because we don't process transactions from a block with a height
// lower than our greatest seen height
- LogPrint("estimatefee", "Blockpolicy error Transaction had negative blocksToConfirm\n");
+ LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error Transaction had negative blocksToConfirm\n");
return false;
}
@@ -399,7 +401,7 @@ void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
// Update all exponential averages with the current block state
feeStats.UpdateMovingAverages();
- LogPrint("estimatefee", "Blockpolicy after updating estimates for %u of %u txs in block, since last block %u of %u tracked, new mempool map size %u\n",
+ LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy after updating estimates for %u of %u txs in block, since last block %u of %u tracked, new mempool map size %u\n",
countedTxs, entries.size(), trackedTxs, trackedTxs + untrackedTxs, mapMemPoolTxs.size());
trackedTxs = 0;
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index 662e8037ca..05a3bd71fd 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -152,15 +152,21 @@ static void initTranslations(QTranslator &qtTranslatorBase, QTranslator &qtTrans
#if QT_VERSION < 0x050000
void DebugMessageHandler(QtMsgType type, const char *msg)
{
- const char *category = (type == QtDebugMsg) ? "qt" : NULL;
- LogPrint(category, "GUI: %s\n", msg);
+ if (type == QtDebugMsg) {
+ LogPrint(BCLog::QT, "GUI: %s\n", msg);
+ } else {
+ LogPrintf("GUI: %s\n", msg);
+ }
}
#else
void DebugMessageHandler(QtMsgType type, const QMessageLogContext& context, const QString &msg)
{
Q_UNUSED(context);
- const char *category = (type == QtDebugMsg) ? "qt" : NULL;
- LogPrint(category, "GUI: %s\n", msg.toStdString());
+ if (type == QtDebugMsg) {
+ LogPrint(BCLog::QT, "GUI: %s\n", msg.toStdString());
+ } else {
+ LogPrintf("GUI: %s\n", msg.toStdString());
+ }
}
#endif
diff --git a/src/qt/callback.h b/src/qt/callback.h
new file mode 100644
index 0000000000..a8b593a652
--- /dev/null
+++ b/src/qt/callback.h
@@ -0,0 +1,30 @@
+#ifndef BITCOIN_QT_CALLBACK_H
+#define BITCOIN_QT_CALLBACK_H
+
+#include <QObject>
+
+class Callback : public QObject
+{
+ Q_OBJECT
+public Q_SLOTS:
+ virtual void call() = 0;
+};
+
+template <typename F>
+class FunctionCallback : public Callback
+{
+ F f;
+
+public:
+ FunctionCallback(F f_) : f(std::move(f_)) {}
+ ~FunctionCallback() override {}
+ void call() override { f(this); }
+};
+
+template <typename F>
+FunctionCallback<F>* makeCallback(F f)
+{
+ return new FunctionCallback<F>(std::move(f));
+}
+
+#endif // BITCOIN_QT_CALLBACK_H
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index 60406c2059..5167232d6a 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -175,6 +175,10 @@ bool RPCConsole::RPCParseCommandLine(std::string &strResult, const std::string &
nDepthInsideSensitive = 1;
filter_begin_pos = chpos;
}
+ // Make sure stack is not empty before adding something
+ if (stack.empty()) {
+ stack.push_back(std::vector<std::string>());
+ }
stack.back().push_back(strArg);
};
@@ -626,9 +630,12 @@ void RPCConsole::setClientModel(ClientModel *model)
for (size_t i = 0; i < commandList.size(); ++i)
{
wordList << commandList[i].c_str();
+ wordList << ("help " + commandList[i]).c_str();
}
+ wordList.sort();
autoCompleter = new QCompleter(wordList, this);
+ autoCompleter->setModelSorting(QCompleter::CaseSensitivelySortedModel);
ui->lineEdit->setCompleter(autoCompleter);
autoCompleter->popup()->installEventFilter(this);
// Start thread to execute RPC commands.
diff --git a/src/qt/test/wallettests.cpp b/src/qt/test/wallettests.cpp
index 36d93361dd..b7c1e4c4d5 100644
--- a/src/qt/test/wallettests.cpp
+++ b/src/qt/test/wallettests.cpp
@@ -1,6 +1,7 @@
#include "wallettests.h"
#include "qt/bitcoinamountfield.h"
+#include "qt/callback.h"
#include "qt/optionsmodel.h"
#include "qt/platformstyle.h"
#include "qt/qvalidatedlineedit.h"
@@ -22,9 +23,7 @@ namespace
//! Press "Yes" button in modal send confirmation dialog.
void ConfirmSend()
{
- QTimer* timer = new QTimer;
- timer->setSingleShot(true);
- QObject::connect(timer, &QTimer::timeout, []() {
+ QTimer::singleShot(0, makeCallback([](Callback* callback) {
for (QWidget* widget : QApplication::topLevelWidgets()) {
if (widget->inherits("SendConfirmationDialog")) {
SendConfirmationDialog* dialog = qobject_cast<SendConfirmationDialog*>(widget);
@@ -33,8 +32,8 @@ void ConfirmSend()
button->click();
}
}
- });
- timer->start(0);
+ delete callback;
+ }), SLOT(call()));
}
//! Send coins to address and return txid.
diff --git a/src/qt/transactiondesc.cpp b/src/qt/transactiondesc.cpp
index 7e16cc9dd4..d81188895b 100644
--- a/src/qt/transactiondesc.cpp
+++ b/src/qt/transactiondesc.cpp
@@ -273,7 +273,7 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco
//
// Debug view
//
- if (fDebug)
+ if (logCategories != BCLog::NONE)
{
strHTML += "<hr><br>" + tr("Debug information") + "<br><br>";
BOOST_FOREACH(const CTxIn& txin, wtx.tx->vin)
diff --git a/src/random.cpp b/src/random.cpp
index 8284f457c9..6bcd0a70ba 100644
--- a/src/random.cpp
+++ b/src/random.cpp
@@ -91,7 +91,7 @@ static void RandAddSeedPerfmon()
if (ret == ERROR_SUCCESS) {
RAND_add(vData.data(), nSize, nSize / 100.0);
memory_cleanse(vData.data(), nSize);
- LogPrint("rand", "%s: %lu bytes\n", __func__, nSize);
+ LogPrint(BCLog::RAND, "%s: %lu bytes\n", __func__, nSize);
} else {
static bool warned = false; // Warn only once
if (!warned) {
diff --git a/src/rest.cpp b/src/rest.cpp
index 54eefcafe3..9dcaf269d6 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -9,6 +9,7 @@
#include "primitives/transaction.h"
#include "validation.h"
#include "httpserver.h"
+#include "rpc/blockchain.h"
#include "rpc/server.h"
#include "streams.h"
#include "sync.h"
@@ -55,12 +56,9 @@ struct CCoin {
}
};
-extern void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry);
-extern UniValue blockToJSON(const CBlock& block, const CBlockIndex* blockindex, bool txDetails = false);
-extern UniValue mempoolInfoToJSON();
-extern UniValue mempoolToJSON(bool fVerbose = false);
-extern void ScriptPubKeyToJSON(const CScript& scriptPubKey, UniValue& out, bool fIncludeHex);
-extern UniValue blockheaderToJSON(const CBlockIndex* blockindex);
+/* Defined in rawtransaction.cpp */
+void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry);
+void ScriptPubKeyToJSON(const CScript& scriptPubKey, UniValue& out, bool fIncludeHex);
static bool RESTERR(HTTPRequest* req, enum HTTPStatusCode status, std::string message)
{
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 96254a8cb9..01066d0eb2 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -3,6 +3,8 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include "rpc/blockchain.h"
+
#include "amount.h"
#include "chain.h"
#include "chainparams.h"
@@ -42,13 +44,6 @@ static CUpdatedBlock latestblock;
extern void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry);
void ScriptPubKeyToJSON(const CScript& scriptPubKey, UniValue& out, bool fIncludeHex);
-/**
- * Get the difficulty of the net wrt to the given block index, or the chain tip if
- * not provided.
- *
- * @return A floating point number that is a multiple of the main net minimum
- * difficulty (4295032833 hashes).
- */
double GetDifficulty(const CBlockIndex* blockindex)
{
if (blockindex == NULL)
@@ -106,7 +101,7 @@ UniValue blockheaderToJSON(const CBlockIndex* blockindex)
return result;
}
-UniValue blockToJSON(const CBlock& block, const CBlockIndex* blockindex, bool txDetails = false)
+UniValue blockToJSON(const CBlock& block, const CBlockIndex* blockindex, bool txDetails)
{
UniValue result(UniValue::VOBJ);
result.push_back(Pair("hash", blockindex->GetBlockHash().GetHex()));
@@ -383,7 +378,7 @@ void entryToJSON(UniValue &info, const CTxMemPoolEntry &e)
info.push_back(Pair("depends", depends));
}
-UniValue mempoolToJSON(bool fVerbose = false)
+UniValue mempoolToJSON(bool fVerbose)
{
if (fVerbose)
{
@@ -860,7 +855,7 @@ UniValue pruneblockchain(const JSONRPCRequest& request)
else if (height > chainHeight)
throw JSONRPCError(RPC_INVALID_PARAMETER, "Blockchain is shorter than the attempted prune height.");
else if (height > chainHeight - MIN_BLOCKS_TO_KEEP) {
- LogPrint("rpc", "Attempt to prune blocks close to the tip. Retaining the minimum number of blocks.");
+ LogPrint(BCLog::RPC, "Attempt to prune blocks close to the tip. Retaining the minimum number of blocks.");
height = chainHeight - MIN_BLOCKS_TO_KEEP;
}
diff --git a/src/rpc/blockchain.h b/src/rpc/blockchain.h
new file mode 100644
index 0000000000..c021441b0a
--- /dev/null
+++ b/src/rpc/blockchain.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2017 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_RPC_BLOCKCHAIN_H
+#define BITCOIN_RPC_BLOCKCHAIN_H
+
+class CBlock;
+class CBlockIndex;
+class CScript;
+class CTransaction;
+class uint256;
+class UniValue;
+
+/**
+ * Get the difficulty of the net wrt to the given block index, or the chain tip if
+ * not provided.
+ *
+ * @return A floating point number that is a multiple of the main net minimum
+ * difficulty (4295032833 hashes).
+ */
+double GetDifficulty(const CBlockIndex* blockindex = nullptr);
+
+/** Callback for when block tip changed. */
+void RPCNotifyBlockChange(bool ibd, const CBlockIndex *);
+
+/** Block description to JSON */
+UniValue blockToJSON(const CBlock& block, const CBlockIndex* blockindex, bool txDetails = false);
+
+/** Mempool information to JSON */
+UniValue mempoolInfoToJSON();
+
+/** Mempool to JSON */
+UniValue mempoolToJSON(bool fVerbose = false);
+
+/** Block header to JSON */
+UniValue blockheaderToJSON(const CBlockIndex* blockindex);
+
+#endif
+
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index 2cb250a198..35bc5d6a82 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -81,7 +81,7 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "getblockheader", 1, "verbose" },
{ "gettransaction", 1, "include_watchonly" },
{ "getrawtransaction", 1, "verbose" },
- { "createrawtransaction", 0, "transactions" },
+ { "createrawtransaction", 0, "inputs" },
{ "createrawtransaction", 1, "outputs" },
{ "createrawtransaction", 2, "locktime" },
{ "signrawtransaction", 1, "prevtxs" },
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 4db8ffaa7d..b823c159d3 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -16,6 +16,7 @@
#include "miner.h"
#include "net.h"
#include "pow.h"
+#include "rpc/blockchain.h"
#include "rpc/server.h"
#include "txmempool.h"
#include "util.h"
diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp
index de1bbe62e5..24c5eeffe9 100644
--- a/src/rpc/misc.cpp
+++ b/src/rpc/misc.cpp
@@ -9,6 +9,7 @@
#include "validation.h"
#include "net.h"
#include "netbase.h"
+#include "rpc/blockchain.h"
#include "rpc/server.h"
#include "timedata.h"
#include "util.h"
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 696f1bd781..717e9d75f3 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -360,7 +360,7 @@ UniValue createrawtransaction(const JSONRPCRequest& request)
"it is not stored in the wallet or transmitted to the network.\n"
"\nArguments:\n"
- "1. \"inputs\" (string, required) A json array of json objects\n"
+ "1. \"inputs\" (array, required) A json array of json objects\n"
" [\n"
" {\n"
" \"txid\":\"id\", (string, required) The transaction id\n"
@@ -369,7 +369,7 @@ UniValue createrawtransaction(const JSONRPCRequest& request)
" } \n"
" ,...\n"
" ]\n"
- "2. \"outputs\" (string, required) a json object with outputs\n"
+ "2. \"outputs\" (object, required) a json object with outputs\n"
" {\n"
" \"address\": x.xxx, (numeric or string, required) The key is the bitcoin address, the numeric value (can be string) is the " + CURRENCY_UNIT + " amount\n"
" \"data\": \"hex\" (string, required) The key is \"data\", the value is hex encoded data\n"
@@ -936,7 +936,7 @@ static const CRPCCommand commands[] =
{ // category name actor (function) okSafeMode
// --------------------- ------------------------ ----------------------- ----------
{ "rawtransactions", "getrawtransaction", &getrawtransaction, true, {"txid","verbose"} },
- { "rawtransactions", "createrawtransaction", &createrawtransaction, true, {"transactions","outputs","locktime"} },
+ { "rawtransactions", "createrawtransaction", &createrawtransaction, true, {"inputs","outputs","locktime"} },
{ "rawtransactions", "decoderawtransaction", &decoderawtransaction, true, {"hexstring"} },
{ "rawtransactions", "decodescript", &decodescript, true, {"hexstring"} },
{ "rawtransactions", "sendrawtransaction", &sendrawtransaction, false, {"hexstring","allowhighfees"} },
diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp
index 9b0699afcc..141062b3c0 100644
--- a/src/rpc/server.cpp
+++ b/src/rpc/server.cpp
@@ -305,7 +305,7 @@ bool CRPCTable::appendCommand(const std::string& name, const CRPCCommand* pcmd)
bool StartRPC()
{
- LogPrint("rpc", "Starting RPC\n");
+ LogPrint(BCLog::RPC, "Starting RPC\n");
fRPCRunning = true;
g_rpcSignals.Started();
return true;
@@ -313,15 +313,16 @@ bool StartRPC()
void InterruptRPC()
{
- LogPrint("rpc", "Interrupting RPC\n");
+ LogPrint(BCLog::RPC, "Interrupting RPC\n");
// Interrupt e.g. running longpolls
fRPCRunning = false;
}
void StopRPC()
{
- LogPrint("rpc", "Stopping RPC\n");
+ LogPrint(BCLog::RPC, "Stopping RPC\n");
deadlineTimers.clear();
+ DeleteAuthCookie();
g_rpcSignals.Stopped();
}
@@ -368,8 +369,9 @@ void JSONRPCRequest::parse(const UniValue& valRequest)
if (!valMethod.isStr())
throw JSONRPCError(RPC_INVALID_REQUEST, "Method must be a string");
strMethod = valMethod.get_str();
- if (strMethod != "getblocktemplate")
- LogPrint("rpc", "ThreadRPCServer method=%s\n", SanitizeString(strMethod));
+ if (strMethod != "getblocktemplate") {
+ LogPrint(BCLog::RPC, "ThreadRPCServer method=%s\n", SanitizeString(strMethod));
+ }
// Parse params
UniValue valParams = find_value(request, "params");
@@ -531,7 +533,7 @@ void RPCRunLater(const std::string& name, boost::function<void(void)> func, int6
if (!timerInterface)
throw JSONRPCError(RPC_INTERNAL_ERROR, "No timer handler registered for RPC");
deadlineTimers.erase(name);
- LogPrint("rpc", "queue run of timer %s in %i seconds (using %s)\n", name, nSeconds, timerInterface->Name());
+ LogPrint(BCLog::RPC, "queue run of timer %s in %i seconds (using %s)\n", name, nSeconds, timerInterface->Name());
deadlineTimers.emplace(name, std::unique_ptr<RPCTimerBase>(timerInterface->NewTimer(func, nSeconds*1000)));
}
diff --git a/src/rpc/server.h b/src/rpc/server.h
index ad064e5690..34a9d3c24c 100644
--- a/src/rpc/server.h
+++ b/src/rpc/server.h
@@ -68,7 +68,7 @@ void SetRPCWarmupStatus(const std::string& newStatus);
void SetRPCWarmupFinished();
/* returns the current warmup state. */
-bool RPCIsInWarmup(std::string *statusOut);
+bool RPCIsInWarmup(std::string *outStatus);
/**
* Type-check arguments; throws JSONRPCError if wrong type given. Does not check that
@@ -191,7 +191,6 @@ extern std::vector<unsigned char> ParseHexO(const UniValue& o, std::string strKe
extern CAmount AmountFromValue(const UniValue& value);
extern UniValue ValueFromAmount(const CAmount& amount);
-extern double GetDifficulty(const CBlockIndex* blockindex = NULL);
extern std::string HelpExampleCli(const std::string& methodname, const std::string& args);
extern std::string HelpExampleRpc(const std::string& methodname, const std::string& args);
@@ -199,7 +198,6 @@ bool StartRPC();
void InterruptRPC();
void StopRPC();
std::string JSONRPCExecBatch(const UniValue& vReq);
-void RPCNotifyBlockChange(bool ibd, const CBlockIndex *);
// Retrieves any serialization flags requested in command line argument
int RPCSerializationFlags();
diff --git a/src/scheduler.cpp b/src/scheduler.cpp
index 861c1a0220..0c1cfa2718 100644
--- a/src/scheduler.cpp
+++ b/src/scheduler.cpp
@@ -23,7 +23,9 @@ CScheduler::~CScheduler()
#if BOOST_VERSION < 105000
static boost::system_time toPosixTime(const boost::chrono::system_clock::time_point& t)
{
- return boost::posix_time::from_time_t(boost::chrono::system_clock::to_time_t(t));
+ // Creating the posix_time using from_time_t loses sub-second precision. So rather than exporting the time_point to time_t,
+ // start with a posix_time at the epoch (0) and add the milliseconds that have passed since then.
+ return boost::posix_time::from_time_t(0) + boost::posix_time::milliseconds(boost::chrono::duration_cast<boost::chrono::milliseconds>(t.time_since_epoch()).count());
}
#endif
diff --git a/src/test/cuckoocache_tests.cpp b/src/test/cuckoocache_tests.cpp
index 4d6d6d6210..ccd7155627 100644
--- a/src/test/cuckoocache_tests.cpp
+++ b/src/test/cuckoocache_tests.cpp
@@ -60,7 +60,8 @@ BOOST_AUTO_TEST_CASE(test_cuckoocache_no_fakes)
{
insecure_rand = FastRandomContext(true);
CuckooCache::cache<uint256, uint256Hasher> cc{};
- cc.setup_bytes(32 << 20);
+ size_t megabytes = 4;
+ cc.setup_bytes(megabytes << 20);
uint256 v;
for (int x = 0; x < 100000; ++x) {
insecure_GetRandHash(v);
@@ -135,7 +136,7 @@ BOOST_AUTO_TEST_CASE(cuckoocache_hit_rate_ok)
* as a lower bound on performance.
*/
double HitRateThresh = 0.98;
- size_t megabytes = 32;
+ size_t megabytes = 4;
for (double load = 0.1; load < 2; load *= 2) {
double hits = test_cache<CuckooCache::cache<uint256, uint256Hasher>>(megabytes, load);
BOOST_CHECK(normalize_hit_rate(hits, load) > HitRateThresh);
@@ -204,7 +205,7 @@ void test_cache_erase(size_t megabytes)
BOOST_AUTO_TEST_CASE(cuckoocache_erase_ok)
{
- size_t megabytes = 32;
+ size_t megabytes = 4;
test_cache_erase<CuckooCache::cache<uint256, uint256Hasher>>(megabytes);
}
@@ -291,7 +292,7 @@ void test_cache_erase_parallel(size_t megabytes)
}
BOOST_AUTO_TEST_CASE(cuckoocache_erase_parallel_ok)
{
- size_t megabytes = 32;
+ size_t megabytes = 4;
test_cache_erase_parallel<CuckooCache::cache<uint256, uint256Hasher>>(megabytes);
}
@@ -342,13 +343,13 @@ void test_cache_generations()
}
};
- const uint32_t BLOCK_SIZE = 10000;
+ const uint32_t BLOCK_SIZE = 1000;
// We expect window size 60 to perform reasonably given that each epoch
// stores 45% of the cache size (~472k).
const uint32_t WINDOW_SIZE = 60;
const uint32_t POP_AMOUNT = (BLOCK_SIZE / WINDOW_SIZE) / 2;
const double load = 10;
- const size_t megabytes = 32;
+ const size_t megabytes = 4;
const size_t bytes = megabytes * (1 << 20);
const uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256)));
diff --git a/src/timedata.cpp b/src/timedata.cpp
index 2ff6437c73..ec74912703 100644
--- a/src/timedata.cpp
+++ b/src/timedata.cpp
@@ -58,7 +58,7 @@ void AddTimeData(const CNetAddr& ip, int64_t nOffsetSample)
// Add data
static CMedianFilter<int64_t> vTimeOffsets(BITCOIN_TIMEDATA_MAX_SAMPLES, 0);
vTimeOffsets.input(nOffsetSample);
- LogPrint("net","added time data, samples %d, offset %+d (%+d minutes)\n", vTimeOffsets.size(), nOffsetSample, nOffsetSample/60);
+ LogPrint(BCLog::NET,"added time data, samples %d, offset %+d (%+d minutes)\n", vTimeOffsets.size(), nOffsetSample, nOffsetSample/60);
// There is a known issue here (see issue #4521):
//
@@ -108,11 +108,14 @@ void AddTimeData(const CNetAddr& ip, int64_t nOffsetSample)
}
}
}
-
- BOOST_FOREACH(int64_t n, vSorted)
- LogPrint("net", "%+d ", n);
- LogPrint("net", "| ");
-
- LogPrint("net", "nTimeOffset = %+d (%+d minutes)\n", nTimeOffset, nTimeOffset/60);
+
+ if (LogAcceptCategory(BCLog::NET)) {
+ BOOST_FOREACH(int64_t n, vSorted) {
+ LogPrint(BCLog::NET, "%+d ", n);
+ }
+ LogPrint(BCLog::NET, "| ");
+
+ LogPrint(BCLog::NET, "nTimeOffset = %+d (%+d minutes)\n", nTimeOffset, nTimeOffset/60);
+ }
}
}
diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp
index d24020e51f..4c88ebe1d5 100644
--- a/src/torcontrol.cpp
+++ b/src/torcontrol.cpp
@@ -163,7 +163,7 @@ void TorControlConnection::readcb(struct bufferevent *bev, void *ctx)
self->reply_handlers.front()(*self, self->message);
self->reply_handlers.pop_front();
} else {
- LogPrint("tor", "tor: Received unexpected sync reply %i\n", self->message.code);
+ LogPrint(BCLog::TOR, "tor: Received unexpected sync reply %i\n", self->message.code);
}
}
self->message.Clear();
@@ -182,13 +182,14 @@ void TorControlConnection::eventcb(struct bufferevent *bev, short what, void *ct
{
TorControlConnection *self = (TorControlConnection*)ctx;
if (what & BEV_EVENT_CONNECTED) {
- LogPrint("tor", "tor: Successfully connected!\n");
+ LogPrint(BCLog::TOR, "tor: Successfully connected!\n");
self->connected(*self);
} else if (what & (BEV_EVENT_EOF|BEV_EVENT_ERROR)) {
- if (what & BEV_EVENT_ERROR)
- LogPrint("tor", "tor: Error connecting to Tor control socket\n");
- else
- LogPrint("tor", "tor: End of stream\n");
+ if (what & BEV_EVENT_ERROR) {
+ LogPrint(BCLog::TOR, "tor: Error connecting to Tor control socket\n");
+ } else {
+ LogPrint(BCLog::TOR, "tor: End of stream\n");
+ }
self->Disconnect();
self->disconnected(*self);
}
@@ -410,7 +411,7 @@ TorController::TorController(struct event_base* _base, const std::string& _targe
// Read service private key if cached
std::pair<bool,std::string> pkf = ReadBinaryFile(GetPrivateKeyFile());
if (pkf.first) {
- LogPrint("tor", "tor: Reading cached private key from %s\n", GetPrivateKeyFile());
+ LogPrint(BCLog::TOR, "tor: Reading cached private key from %s\n", GetPrivateKeyFile());
private_key = pkf.second;
}
}
@@ -429,7 +430,7 @@ TorController::~TorController()
void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlReply& reply)
{
if (reply.code == 250) {
- LogPrint("tor", "tor: ADD_ONION successful\n");
+ LogPrint(BCLog::TOR, "tor: ADD_ONION successful\n");
BOOST_FOREACH(const std::string &s, reply.lines) {
std::map<std::string,std::string> m = ParseTorReplyMapping(s);
std::map<std::string,std::string>::iterator i;
@@ -441,7 +442,7 @@ void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlRe
service = LookupNumeric(std::string(service_id+".onion").c_str(), GetListenPort());
LogPrintf("tor: Got service ID %s, advertising service %s\n", service_id, service.ToString());
if (WriteBinaryFile(GetPrivateKeyFile(), private_key)) {
- LogPrint("tor", "tor: Cached service private key to %s\n", GetPrivateKeyFile());
+ LogPrint(BCLog::TOR, "tor: Cached service private key to %s\n", GetPrivateKeyFile());
} else {
LogPrintf("tor: Error writing service private key to %s\n", GetPrivateKeyFile());
}
@@ -457,7 +458,7 @@ void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlRe
void TorController::auth_cb(TorControlConnection& _conn, const TorControlReply& reply)
{
if (reply.code == 250) {
- LogPrint("tor", "tor: Authentication successful\n");
+ LogPrint(BCLog::TOR, "tor: Authentication successful\n");
// Now that we know Tor is running setup the proxy for onion addresses
// if -onion isn't set to something else.
@@ -511,13 +512,13 @@ static std::vector<uint8_t> ComputeResponse(const std::string &key, const std::v
void TorController::authchallenge_cb(TorControlConnection& _conn, const TorControlReply& reply)
{
if (reply.code == 250) {
- LogPrint("tor", "tor: SAFECOOKIE authentication challenge successful\n");
+ LogPrint(BCLog::TOR, "tor: SAFECOOKIE authentication challenge successful\n");
std::pair<std::string,std::string> l = SplitTorReplyLine(reply.lines[0]);
if (l.first == "AUTHCHALLENGE") {
std::map<std::string,std::string> m = ParseTorReplyMapping(l.second);
std::vector<uint8_t> serverHash = ParseHex(m["SERVERHASH"]);
std::vector<uint8_t> serverNonce = ParseHex(m["SERVERNONCE"]);
- LogPrint("tor", "tor: AUTHCHALLENGE ServerHash %s ServerNonce %s\n", HexStr(serverHash), HexStr(serverNonce));
+ LogPrint(BCLog::TOR, "tor: AUTHCHALLENGE ServerHash %s ServerNonce %s\n", HexStr(serverHash), HexStr(serverNonce));
if (serverNonce.size() != 32) {
LogPrintf("tor: ServerNonce is not 32 bytes, as required by spec\n");
return;
@@ -562,12 +563,12 @@ void TorController::protocolinfo_cb(TorControlConnection& _conn, const TorContro
std::map<std::string,std::string> m = ParseTorReplyMapping(l.second);
std::map<std::string,std::string>::iterator i;
if ((i = m.find("Tor")) != m.end()) {
- LogPrint("tor", "tor: Connected to Tor version %s\n", i->second);
+ LogPrint(BCLog::TOR, "tor: Connected to Tor version %s\n", i->second);
}
}
}
BOOST_FOREACH(const std::string &s, methods) {
- LogPrint("tor", "tor: Supported authentication method: %s\n", s);
+ LogPrint(BCLog::TOR, "tor: Supported authentication method: %s\n", s);
}
// Prefer NULL, otherwise SAFECOOKIE. If a password is provided, use HASHEDPASSWORD
/* Authentication:
@@ -577,18 +578,18 @@ void TorController::protocolinfo_cb(TorControlConnection& _conn, const TorContro
std::string torpassword = GetArg("-torpassword", "");
if (!torpassword.empty()) {
if (methods.count("HASHEDPASSWORD")) {
- LogPrint("tor", "tor: Using HASHEDPASSWORD authentication\n");
+ LogPrint(BCLog::TOR, "tor: Using HASHEDPASSWORD authentication\n");
boost::replace_all(torpassword, "\"", "\\\"");
_conn.Command("AUTHENTICATE \"" + torpassword + "\"", boost::bind(&TorController::auth_cb, this, _1, _2));
} else {
LogPrintf("tor: Password provided with -torpassword, but HASHEDPASSWORD authentication is not available\n");
}
} else if (methods.count("NULL")) {
- LogPrint("tor", "tor: Using NULL authentication\n");
+ LogPrint(BCLog::TOR, "tor: Using NULL authentication\n");
_conn.Command("AUTHENTICATE", boost::bind(&TorController::auth_cb, this, _1, _2));
} else if (methods.count("SAFECOOKIE")) {
// Cookie: hexdump -e '32/1 "%02x""\n"' ~/.tor/control_auth_cookie
- LogPrint("tor", "tor: Using SAFECOOKIE authentication, reading cookie authentication from %s\n", cookiefile);
+ LogPrint(BCLog::TOR, "tor: Using SAFECOOKIE authentication, reading cookie authentication from %s\n", cookiefile);
std::pair<bool,std::string> status_cookie = ReadBinaryFile(cookiefile, TOR_COOKIE_SIZE);
if (status_cookie.first && status_cookie.second.size() == TOR_COOKIE_SIZE) {
// _conn.Command("AUTHENTICATE " + HexStr(status_cookie.second), boost::bind(&TorController::auth_cb, this, _1, _2));
@@ -630,7 +631,7 @@ void TorController::disconnected_cb(TorControlConnection& _conn)
if (!reconnect)
return;
- LogPrint("tor", "tor: Not connected to Tor control port %s, trying to reconnect\n", target);
+ LogPrint(BCLog::TOR, "tor: Not connected to Tor control port %s, trying to reconnect\n", target);
// Single-shot timer for reconnect. Use exponential backoff.
struct timeval time = MillisToTimeval(int64_t(reconnect_timeout * 1000.0));
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 1a30bb58ad..a3889fdf79 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -63,7 +63,7 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
if (!hashBlock.IsNull())
batch.Write(DB_BEST_BLOCK, hashBlock);
- LogPrint("coindb", "Committing %u changed transactions (out of %u) to coin database...\n", (unsigned int)changed, (unsigned int)count);
+ LogPrint(BCLog::COINDB, "Committing %u changed transactions (out of %u) to coin database...\n", (unsigned int)changed, (unsigned int)count);
return db.WriteBatch(batch);
}
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index fb58208774..36a046ed2a 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -108,7 +108,7 @@ void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendan
// vHashesToUpdate is the set of transaction hashes from a disconnected block
// which has been re-added to the mempool.
-// for each entry, look for descendants that are outside hashesToUpdate, and
+// for each entry, look for descendants that are outside vHashesToUpdate, and
// add fee/size information for such descendants to the parent.
// for each such descendant, also update the ancestor state to include the parent.
void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashesToUpdate)
@@ -634,7 +634,7 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const
if (GetRand(std::numeric_limits<uint32_t>::max()) >= nCheckFrequency)
return;
- LogPrint("mempool", "Checking mempool with %u transactions and %u inputs\n", (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size());
+ LogPrint(BCLog::MEMPOOL, "Checking mempool with %u transactions and %u inputs\n", (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size());
uint64_t checkTotal = 0;
uint64_t innerUsage = 0;
@@ -1108,8 +1108,9 @@ void CTxMemPool::TrimToSize(size_t sizelimit, std::vector<uint256>* pvNoSpendsRe
}
}
- if (maxFeeRateRemoved > CFeeRate(0))
- LogPrint("mempool", "Removed %u txn, rolling minimum fee bumped to %s\n", nTxnRemoved, maxFeeRateRemoved.ToString());
+ if (maxFeeRateRemoved > CFeeRate(0)) {
+ LogPrint(BCLog::MEMPOOL, "Removed %u txn, rolling minimum fee bumped to %s\n", nTxnRemoved, maxFeeRateRemoved.ToString());
+ }
}
bool CTxMemPool::TransactionWithinChainLimit(const uint256& txid, size_t chainLimit) const {
diff --git a/src/txmempool.h b/src/txmempool.h
index f9a9d088d0..4222789510 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -552,12 +552,12 @@ public:
* new mempool entries may have children in the mempool (which is generally
* not the case when otherwise adding transactions).
* UpdateTransactionsFromBlock() will find child transactions and update the
- * descendant state for each transaction in hashesToUpdate (excluding any
- * child transactions present in hashesToUpdate, which are already accounted
- * for). Note: hashesToUpdate should be the set of transactions from the
+ * descendant state for each transaction in vHashesToUpdate (excluding any
+ * child transactions present in vHashesToUpdate, which are already accounted
+ * for). Note: vHashesToUpdate should be the set of transactions from the
* disconnected block that have been accepted back into the mempool.
*/
- void UpdateTransactionsFromBlock(const std::vector<uint256> &hashesToUpdate);
+ void UpdateTransactionsFromBlock(const std::vector<uint256> &vHashesToUpdate);
/** Try to calculate all in-mempool ancestors of entry.
* (these are all calculated including the tx itself)
diff --git a/src/util.cpp b/src/util.cpp
index 694fb184c1..db45ad1626 100644
--- a/src/util.cpp
+++ b/src/util.cpp
@@ -72,6 +72,10 @@
#include <sys/prctl.h>
#endif
+#ifdef HAVE_MALLOPT_ARENA_MAX
+#include <malloc.h>
+#endif
+
#include <boost/algorithm/string/case_conv.hpp> // for to_lower()
#include <boost/algorithm/string/join.hpp>
#include <boost/algorithm/string/predicate.hpp> // for startswith() and endswith()
@@ -106,7 +110,6 @@ CCriticalSection cs_args;
std::map<std::string, std::string> mapArgs;
static std::map<std::string, std::vector<std::string> > _mapMultiArgs;
const std::map<std::string, std::vector<std::string> >& mapMultiArgs = _mapMultiArgs;
-bool fDebug = false;
bool fPrintToConsole = false;
bool fPrintToDebugLog = true;
@@ -116,6 +119,9 @@ bool fLogIPs = DEFAULT_LOGIPS;
std::atomic<bool> fReopenDebugLog(false);
CTranslationInterface translationInterface;
+/** Log categories bitfield. Leveldb/libevent need special handling if their flags are changed at runtime. */
+std::atomic<uint32_t> logCategories(0);
+
/** Init OpenSSL library multithreading support */
static std::unique_ptr<CCriticalSection[]> ppmutexOpenSSL;
void locking_callback(int mode, int i, const char* file, int line) NO_THREAD_SAFETY_ANALYSIS
@@ -224,36 +230,70 @@ void OpenDebugLog()
vMsgsBeforeOpenLog = NULL;
}
-bool LogAcceptCategory(const char* category)
-{
- if (category != NULL)
- {
- if (!fDebug)
- return false;
-
- // Give each thread quick access to -debug settings.
- // This helps prevent issues debugging global destructors,
- // where mapMultiArgs might be deleted before another
- // global destructor calls LogPrint()
- static boost::thread_specific_ptr<std::set<std::string> > ptrCategory;
- if (ptrCategory.get() == NULL)
- {
- if (mapMultiArgs.count("-debug")) {
- const std::vector<std::string>& categories = mapMultiArgs.at("-debug");
- ptrCategory.reset(new std::set<std::string>(categories.begin(), categories.end()));
- // thread_specific_ptr automatically deletes the set when the thread ends.
- } else
- ptrCategory.reset(new std::set<std::string>());
+struct CLogCategoryDesc
+{
+ uint32_t flag;
+ std::string category;
+};
+
+const CLogCategoryDesc LogCategories[] =
+{
+ {BCLog::NONE, "0"},
+ {BCLog::NET, "net"},
+ {BCLog::TOR, "tor"},
+ {BCLog::MEMPOOL, "mempool"},
+ {BCLog::HTTP, "http"},
+ {BCLog::BENCH, "bench"},
+ {BCLog::ZMQ, "zmq"},
+ {BCLog::DB, "db"},
+ {BCLog::RPC, "rpc"},
+ {BCLog::ESTIMATEFEE, "estimatefee"},
+ {BCLog::ADDRMAN, "addrman"},
+ {BCLog::SELECTCOINS, "selectcoins"},
+ {BCLog::REINDEX, "reindex"},
+ {BCLog::CMPCTBLOCK, "cmpctblock"},
+ {BCLog::RAND, "rand"},
+ {BCLog::PRUNE, "prune"},
+ {BCLog::PROXY, "proxy"},
+ {BCLog::MEMPOOLREJ, "mempoolrej"},
+ {BCLog::LIBEVENT, "libevent"},
+ {BCLog::COINDB, "coindb"},
+ {BCLog::QT, "qt"},
+ {BCLog::LEVELDB, "leveldb"},
+ {BCLog::ALL, "1"},
+ {BCLog::ALL, "all"},
+};
+
+bool GetLogCategory(uint32_t *f, const std::string *str)
+{
+ if (f && str) {
+ if (*str == "") {
+ *f = BCLog::ALL;
+ return true;
+ }
+ for (unsigned int i = 0; i < ARRAYLEN(LogCategories); i++) {
+ if (LogCategories[i].category == *str) {
+ *f = LogCategories[i].flag;
+ return true;
+ }
}
- const std::set<std::string>& setCategories = *ptrCategory;
+ }
+ return false;
+}
- // if not debugging everything and not debugging specific category, LogPrint does nothing.
- if (setCategories.count(std::string("")) == 0 &&
- setCategories.count(std::string("1")) == 0 &&
- setCategories.count(std::string(category)) == 0)
- return false;
+std::string ListLogCategories()
+{
+ std::string ret;
+ int outcount = 0;
+ for (unsigned int i = 0; i < ARRAYLEN(LogCategories); i++) {
+ // Omit the special cases.
+ if (LogCategories[i].flag != BCLog::NONE && LogCategories[i].flag != BCLog::ALL) {
+ if (outcount != 0) ret += ", ";
+ ret += LogCategories[i].category;
+ outcount++;
+ }
}
- return true;
+ return ret;
}
/**
@@ -789,6 +829,16 @@ void RenameThread(const char* name)
void SetupEnvironment()
{
+#ifdef HAVE_MALLOPT_ARENA_MAX
+ // glibc-specific: On 32-bit systems set the number of arenas to 1.
+ // By default, since glibc 2.10, the C library will create up to two heap
+ // arenas per core. This is known to cause excessive virtual address space
+ // usage in our usage. Work around it by setting the maximum number of
+ // arenas to 1.
+ if (sizeof(void*) == 4) {
+ mallopt(M_ARENA_MAX, 1);
+ }
+#endif
// On most POSIX systems (e.g. Linux, but not BSD) the environment's locale
// may be invalid, in which case the "C" locale is used as fallback.
#if !defined(WIN32) && !defined(MAC_OSX) && !defined(__FreeBSD__) && !defined(__OpenBSD__)
diff --git a/src/util.h b/src/util.h
index 61dc0d366c..09481bc01c 100644
--- a/src/util.h
+++ b/src/util.h
@@ -42,7 +42,6 @@ public:
};
extern const std::map<std::string, std::vector<std::string> >& mapMultiArgs;
-extern bool fDebug;
extern bool fPrintToConsole;
extern bool fPrintToDebugLog;
@@ -55,6 +54,8 @@ extern CTranslationInterface translationInterface;
extern const char * const BITCOIN_CONF_FILENAME;
extern const char * const BITCOIN_PID_FILENAME;
+extern std::atomic<uint32_t> logCategories;
+
/**
* Translation function: Call Translate signal on UI interface, which returns a boost::optional result.
* If no translation slot is registered, nothing is returned, and simply return the input.
@@ -68,8 +69,45 @@ inline std::string _(const char* psz)
void SetupEnvironment();
bool SetupNetworking();
+namespace BCLog {
+ enum LogFlags : uint32_t {
+ NONE = 0,
+ NET = (1 << 0),
+ TOR = (1 << 1),
+ MEMPOOL = (1 << 2),
+ HTTP = (1 << 3),
+ BENCH = (1 << 4),
+ ZMQ = (1 << 5),
+ DB = (1 << 6),
+ RPC = (1 << 7),
+ ESTIMATEFEE = (1 << 8),
+ ADDRMAN = (1 << 9),
+ SELECTCOINS = (1 << 10),
+ REINDEX = (1 << 11),
+ CMPCTBLOCK = (1 << 12),
+ RAND = (1 << 13),
+ PRUNE = (1 << 14),
+ PROXY = (1 << 15),
+ MEMPOOLREJ = (1 << 16),
+ LIBEVENT = (1 << 17),
+ COINDB = (1 << 18),
+ QT = (1 << 19),
+ LEVELDB = (1 << 20),
+ ALL = ~(uint32_t)0,
+ };
+}
/** Return true if log accepts specified category */
-bool LogAcceptCategory(const char* category);
+static inline bool LogAcceptCategory(uint32_t category)
+{
+ return (logCategories.load(std::memory_order_relaxed) & category) != 0;
+}
+
+/** Returns a string with the supported log categories */
+std::string ListLogCategories();
+
+/** Return true if str parses as a log category and set the flags in f */
+bool GetLogCategory(uint32_t *f, const std::string *str);
+
/** Send a string to the log output */
int LogPrintStr(const std::string &str);
diff --git a/src/validation.cpp b/src/validation.cpp
index 037b9c0abc..6ade633988 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -540,8 +540,9 @@ bool CheckTransaction(const CTransaction& tx, CValidationState &state, bool fChe
void LimitMempoolSize(CTxMemPool& pool, size_t limit, unsigned long age) {
int expired = pool.Expire(GetTime() - age);
- if (expired != 0)
- LogPrint("mempool", "Expired %i transactions from the memory pool\n", expired);
+ if (expired != 0) {
+ LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired);
+ }
std::vector<uint256> vNoSpendsRemaining;
pool.TrimToSize(limit, &vNoSpendsRemaining);
@@ -955,7 +956,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
// Remove conflicting transactions from the mempool
BOOST_FOREACH(const CTxMemPool::txiter it, allConflicting)
{
- LogPrint("mempool", "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
+ LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
it->GetTx().GetHash().ToString(),
hash.ToString(),
FormatMoney(nModifiedFees - nConflictingFees),
@@ -1763,7 +1764,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
}
int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
- LogPrint("bench", " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1 - nTimeStart), nTimeCheck * 0.000001);
+ LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1 - nTimeStart), nTimeCheck * 0.000001);
// Do not allow blocks that contain transactions which 'overwrite' older transactions,
// unless those are already completely spent.
@@ -1830,7 +1831,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
}
int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
- LogPrint("bench", " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2 - nTime1), nTimeForks * 0.000001);
+ LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2 - nTime1), nTimeForks * 0.000001);
CBlockUndo blockundo;
@@ -1904,7 +1905,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION);
}
int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
- LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime3 - nTime2), 0.001 * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * 0.000001);
+ LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime3 - nTime2), 0.001 * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * 0.000001);
CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
if (block.vtx[0]->GetValueOut() > blockReward)
@@ -1916,7 +1917,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
if (!control.Wait())
return state.DoS(100, false);
int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
- LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime4 - nTime2), nInputs <= 1 ? 0 : 0.001 * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * 0.000001);
+ LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime4 - nTime2), nInputs <= 1 ? 0 : 0.001 * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * 0.000001);
if (fJustCheck)
return true;
@@ -1948,7 +1949,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
view.SetBestBlock(pindex->GetBlockHash());
int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4;
- LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5 - nTime4), nTimeIndex * 0.000001);
+ LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5 - nTime4), nTimeIndex * 0.000001);
// Watch for changes to the previous coinbase transaction.
static uint256 hashPrevBestCoinBase;
@@ -1957,7 +1958,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5;
- LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6 - nTime5), nTimeCallbacks * 0.000001);
+ LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6 - nTime5), nTimeCallbacks * 0.000001);
return true;
}
@@ -2005,7 +2006,7 @@ bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode, int n
nLastSetChain = nNow;
}
int64_t nMempoolSizeMax = GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
- int64_t cacheSize = pcoinsTip->DynamicMemoryUsage();
+ int64_t cacheSize = pcoinsTip->DynamicMemoryUsage() * 2; // Compensate for extra memory peak (x1.5-x1.9) at flush time.
int64_t nTotalSpace = nCoinCacheUsage + std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0);
// The cache is large and we're within 10% and 100 MiB of the limit, but we have time now (not in the middle of a block processing).
bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - 100 * 1024 * 1024);
@@ -2164,7 +2165,7 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara
bool flushed = view.Flush();
assert(flushed);
}
- LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001);
+ LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001);
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED))
return false;
@@ -2239,7 +2240,7 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams,
// Apply the block atomically to the chain state.
int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
int64_t nTime3;
- LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001);
+ LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001);
{
CCoinsViewCache view(pcoinsTip);
bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, chainparams);
@@ -2250,25 +2251,25 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams,
return error("ConnectTip(): ConnectBlock %s failed", pindexNew->GetBlockHash().ToString());
}
nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
- LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001);
+ LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001);
bool flushed = view.Flush();
assert(flushed);
}
int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
- LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001);
+ LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001);
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED))
return false;
int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
- LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001);
+ LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001);
// Remove conflicting transactions from the mempool.;
mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
// Update chainActive & related variables.
UpdateTip(pindexNew, chainparams);
int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
- LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001);
- LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001);
+ LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001);
+ LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001);
return true;
}
@@ -3391,7 +3392,7 @@ void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight
}
}
- LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
+ LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
nLastBlockWeCanPrune, count);
@@ -3885,7 +3886,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
// detect out of order blocks, and store them for later
uint256 hash = block.GetHash();
if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex.find(block.hashPrevBlock) == mapBlockIndex.end()) {
- LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
+ LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
block.hashPrevBlock.ToString());
if (dbp)
mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
@@ -3901,7 +3902,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
if (state.IsError())
break;
} else if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex[hash]->nHeight % 1000 == 0) {
- LogPrint("reindex", "Block Import: already had block %s at height %d\n", hash.ToString(), mapBlockIndex[hash]->nHeight);
+ LogPrint(BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), mapBlockIndex[hash]->nHeight);
}
// Activate the genesis block so normal node progress can continue
@@ -3926,7 +3927,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
if (ReadBlockFromDisk(*pblockrecursive, it->second, chainparams.GetConsensus()))
{
- LogPrint("reindex", "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
+ LogPrint(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
head.ToString());
LOCK(cs_main);
CValidationState dummy;
diff --git a/src/validation.h b/src/validation.h
index 43f0dbae34..5f8e9a689c 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -314,7 +314,7 @@ void FlushStateToDisk();
/** Prune block files and flush state to disk. */
void PruneAndFlush();
/** Prune block files up to a given height */
-void PruneBlockFilesManual(int nPruneUpToHeight);
+void PruneBlockFilesManual(int nManualPruneHeight);
/** (try to) add transaction to memory pool
* plTxnReplaced will be appended to with all transactions replaced from mempool **/
diff --git a/src/validationinterface.h b/src/validationinterface.h
index a494eb6990..7f13a29d23 100644
--- a/src/validationinterface.h
+++ b/src/validationinterface.h
@@ -69,7 +69,12 @@ struct CMainSignals {
boost::signals2::signal<void (const uint256 &)> Inventory;
/** Tells listeners to broadcast their data. */
boost::signals2::signal<void (int64_t nBestBlockTime, CConnman* connman)> Broadcast;
- /** Notifies listeners of a block validation result */
+ /**
+ * Notifies listeners of a block validation result.
+ * If the provided CValidationState IsValid, the provided block
+ * is guaranteed to be the current best block at the time the
+ * callback was generated (not necessarily now)
+ */
boost::signals2::signal<void (const CBlock&, const CValidationState&)> BlockChecked;
/** Notifies listeners that a key for mining is required (coinbase) */
boost::signals2::signal<void (boost::shared_ptr<CReserveScript>&)> ScriptForMining;
diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp
index 0801bd7300..d3333bf1ab 100644
--- a/src/wallet/db.cpp
+++ b/src/wallet/db.cpp
@@ -118,7 +118,7 @@ void CDBEnv::MakeMock()
boost::this_thread::interruption_point();
- LogPrint("db", "CDBEnv::MakeMock\n");
+ LogPrint(BCLog::DB, "CDBEnv::MakeMock\n");
dbenv->set_cachesize(1, 0, 1);
dbenv->set_lg_bsize(10485760 * 4);
@@ -560,7 +560,7 @@ void CDBEnv::Flush(bool fShutdown)
{
int64_t nStart = GetTimeMillis();
// Flush log data to the actual data file on all files that are not in use
- LogPrint("db", "CDBEnv::Flush: Flush(%s)%s\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started");
+ LogPrint(BCLog::DB, "CDBEnv::Flush: Flush(%s)%s\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started");
if (!fDbEnvInit)
return;
{
@@ -569,21 +569,21 @@ void CDBEnv::Flush(bool fShutdown)
while (mi != mapFileUseCount.end()) {
std::string strFile = (*mi).first;
int nRefCount = (*mi).second;
- LogPrint("db", "CDBEnv::Flush: Flushing %s (refcount = %d)...\n", strFile, nRefCount);
+ LogPrint(BCLog::DB, "CDBEnv::Flush: Flushing %s (refcount = %d)...\n", strFile, nRefCount);
if (nRefCount == 0) {
// Move log data to the dat file
CloseDb(strFile);
- LogPrint("db", "CDBEnv::Flush: %s checkpoint\n", strFile);
+ LogPrint(BCLog::DB, "CDBEnv::Flush: %s checkpoint\n", strFile);
dbenv->txn_checkpoint(0, 0, 0);
- LogPrint("db", "CDBEnv::Flush: %s detach\n", strFile);
+ LogPrint(BCLog::DB, "CDBEnv::Flush: %s detach\n", strFile);
if (!fMockDb)
dbenv->lsn_reset(strFile.c_str(), 0);
- LogPrint("db", "CDBEnv::Flush: %s closed\n", strFile);
+ LogPrint(BCLog::DB, "CDBEnv::Flush: %s closed\n", strFile);
mapFileUseCount.erase(mi++);
} else
mi++;
}
- LogPrint("db", "CDBEnv::Flush: Flush(%s)%s took %15dms\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started", GetTimeMillis() - nStart);
+ LogPrint(BCLog::DB, "CDBEnv::Flush: Flush(%s)%s took %15dms\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started", GetTimeMillis() - nStart);
if (fShutdown) {
char** listp;
if (mapFileUseCount.empty()) {
@@ -617,7 +617,7 @@ bool CDB::PeriodicFlush(std::string strFile)
std::map<std::string, int>::iterator mi = bitdb.mapFileUseCount.find(strFile);
if (mi != bitdb.mapFileUseCount.end())
{
- LogPrint("db", "Flushing %s\n", strFile);
+ LogPrint(BCLog::DB, "Flushing %s\n", strFile);
int64_t nStart = GetTimeMillis();
// Flush wallet file so it's self contained
@@ -625,7 +625,7 @@ bool CDB::PeriodicFlush(std::string strFile)
bitdb.CheckpointLSN(strFile);
bitdb.mapFileUseCount.erase(mi++);
- LogPrint("db", "Flushed %s %dms\n", strFile, GetTimeMillis() - nStart);
+ LogPrint(BCLog::DB, "Flushed %s %dms\n", strFile, GetTimeMillis() - nStart);
ret = true;
}
}
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 84e7eb60d7..ccb744c6b2 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -221,7 +221,7 @@ UniValue getrawchangeaddress(const JSONRPCRequest& request)
CReserveKey reservekey(pwallet);
CPubKey vchPubKey;
- if (!reservekey.GetReservedKey(vchPubKey))
+ if (!reservekey.GetReservedKey(vchPubKey, true))
throw JSONRPCError(RPC_WALLET_KEYPOOL_RAN_OUT, "Error: Keypool ran out, please call keypoolrefill first");
reservekey.KeepKey();
@@ -1852,7 +1852,7 @@ UniValue gettransaction(const JSONRPCRequest& request)
" \"fee\": x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the \n"
" 'send' category of transactions.\n"
" \"abandoned\": xxx (bool) 'true' if the transaction has been abandoned (inputs are respendable). Only available for the \n"
- " 'send' category of transactions.\n"
+ " 'send' category of transactions.\n"
" }\n"
" ,...\n"
" ],\n"
@@ -2418,16 +2418,17 @@ UniValue getwalletinfo(const JSONRPCRequest& request)
"Returns an object containing various wallet state info.\n"
"\nResult:\n"
"{\n"
- " \"walletversion\": xxxxx, (numeric) the wallet version\n"
- " \"balance\": xxxxxxx, (numeric) the total confirmed balance of the wallet in " + CURRENCY_UNIT + "\n"
- " \"unconfirmed_balance\": xxx, (numeric) the total unconfirmed balance of the wallet in " + CURRENCY_UNIT + "\n"
- " \"immature_balance\": xxxxxx, (numeric) the total immature balance of the wallet in " + CURRENCY_UNIT + "\n"
- " \"txcount\": xxxxxxx, (numeric) the total number of transactions in the wallet\n"
- " \"keypoololdest\": xxxxxx, (numeric) the timestamp (seconds since Unix epoch) of the oldest pre-generated key in the key pool\n"
- " \"keypoolsize\": xxxx, (numeric) how many new keys are pre-generated\n"
- " \"unlocked_until\": ttt, (numeric) the timestamp in seconds since epoch (midnight Jan 1 1970 GMT) that the wallet is unlocked for transfers, or 0 if the wallet is locked\n"
- " \"paytxfee\": x.xxxx, (numeric) the transaction fee configuration, set in " + CURRENCY_UNIT + "/kB\n"
- " \"hdmasterkeyid\": \"<hash160>\" (string) the Hash160 of the HD master pubkey\n"
+ " \"walletversion\": xxxxx, (numeric) the wallet version\n"
+ " \"balance\": xxxxxxx, (numeric) the total confirmed balance of the wallet in " + CURRENCY_UNIT + "\n"
+ " \"unconfirmed_balance\": xxx, (numeric) the total unconfirmed balance of the wallet in " + CURRENCY_UNIT + "\n"
+ " \"immature_balance\": xxxxxx, (numeric) the total immature balance of the wallet in " + CURRENCY_UNIT + "\n"
+ " \"txcount\": xxxxxxx, (numeric) the total number of transactions in the wallet\n"
+ " \"keypoololdest\": xxxxxx, (numeric) the timestamp (seconds since Unix epoch) of the oldest pre-generated key in the key pool\n"
+ " \"keypoolsize\": xxxx, (numeric) how many new keys are pre-generated (only counts external keys)\n"
+ " \"keypoolsize_hd_internal\": xxxx, (numeric) how many new keys are pre-generated for internal use (used for change outputs, only appears if the wallet is using this feature, otherwise external keys are used)\n"
+ " \"unlocked_until\": ttt, (numeric) the timestamp in seconds since epoch (midnight Jan 1 1970 GMT) that the wallet is unlocked for transfers, or 0 if the wallet is locked\n"
+ " \"paytxfee\": x.xxxx, (numeric) the transaction fee configuration, set in " + CURRENCY_UNIT + "/kB\n"
+ " \"hdmasterkeyid\": \"<hash160>\" (string) the Hash160 of the HD master pubkey\n"
"}\n"
"\nExamples:\n"
+ HelpExampleCli("getwalletinfo", "")
@@ -2437,18 +2438,23 @@ UniValue getwalletinfo(const JSONRPCRequest& request)
LOCK2(cs_main, pwallet->cs_wallet);
UniValue obj(UniValue::VOBJ);
+
+ size_t kpExternalSize = pwallet->KeypoolCountExternalKeys();
obj.push_back(Pair("walletversion", pwallet->GetVersion()));
obj.push_back(Pair("balance", ValueFromAmount(pwallet->GetBalance())));
obj.push_back(Pair("unconfirmed_balance", ValueFromAmount(pwallet->GetUnconfirmedBalance())));
obj.push_back(Pair("immature_balance", ValueFromAmount(pwallet->GetImmatureBalance())));
obj.push_back(Pair("txcount", (int)pwallet->mapWallet.size()));
obj.push_back(Pair("keypoololdest", pwallet->GetOldestKeyPoolTime()));
- obj.push_back(Pair("keypoolsize", (int)pwallet->GetKeyPoolSize()));
+ obj.push_back(Pair("keypoolsize", (int64_t)kpExternalSize));
+ CKeyID masterKeyID = pwallet->GetHDChain().masterKeyID;
+ if (!masterKeyID.IsNull() && pwallet->CanSupportFeature(FEATURE_HD_SPLIT)) {
+ obj.push_back(Pair("keypoolsize_hd_internal", (int64_t)(pwallet->GetKeyPoolSize() - kpExternalSize)));
+ }
if (pwallet->IsCrypted()) {
obj.push_back(Pair("unlocked_until", pwallet->nRelockTime));
}
obj.push_back(Pair("paytxfee", ValueFromAmount(payTxFee.GetFeePerK())));
- CKeyID masterKeyID = pwallet->GetHDChain().masterKeyID;
if (!masterKeyID.IsNull())
obj.push_back(Pair("hdmasterkeyid", masterKeyID.GetHex()));
return obj;
@@ -3022,7 +3028,7 @@ UniValue bumpfee(const JSONRPCRequest& request)
// If the output would become dust, discard it (converting the dust to fee)
poutput->nValue -= nDelta;
if (poutput->nValue <= poutput->GetDustThreshold(::dustRelayFee)) {
- LogPrint("rpc", "Bumping fee and discarding dust output\n");
+ LogPrint(BCLog::RPC, "Bumping fee and discarding dust output\n");
nNewFee += poutput->nValue;
tx.vout.erase(tx.vout.begin() + nOutput);
}
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 445e40b043..68d4bc35ee 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -85,7 +85,7 @@ const CWalletTx* CWallet::GetWalletTx(const uint256& hash) const
return &(it->second);
}
-CPubKey CWallet::GenerateNewKey()
+CPubKey CWallet::GenerateNewKey(bool internal)
{
AssertLockHeld(cs_wallet); // mapKeyMetadata
bool fCompressed = CanSupportFeature(FEATURE_COMPRPUBKEY); // default to compressed public keys if we want 0.6.0 wallets
@@ -98,7 +98,7 @@ CPubKey CWallet::GenerateNewKey()
// use HD key derivation if HD was enabled during wallet creation
if (IsHDEnabled()) {
- DeriveNewChildKey(metadata, secret);
+ DeriveNewChildKey(metadata, secret, (CanSupportFeature(FEATURE_HD_SPLIT) ? internal : false));
} else {
secret.MakeNewKey(fCompressed);
}
@@ -118,13 +118,13 @@ CPubKey CWallet::GenerateNewKey()
return pubkey;
}
-void CWallet::DeriveNewChildKey(CKeyMetadata& metadata, CKey& secret)
+void CWallet::DeriveNewChildKey(CKeyMetadata& metadata, CKey& secret, bool internal)
{
// for now we use a fixed keypath scheme of m/0'/0'/k
CKey key; //master key seed (256bit)
CExtKey masterKey; //hd master key
CExtKey accountKey; //key at m/0'
- CExtKey externalChainChildKey; //key at m/0'/0'
+ CExtKey chainChildKey; //key at m/0'/0' (external) or m/0'/1' (internal)
CExtKey childKey; //key at m/0'/0'/<n>'
// try to get the master key
@@ -137,22 +137,28 @@ void CWallet::DeriveNewChildKey(CKeyMetadata& metadata, CKey& secret)
// use hardened derivation (child keys >= 0x80000000 are hardened after bip32)
masterKey.Derive(accountKey, BIP32_HARDENED_KEY_LIMIT);
- // derive m/0'/0'
- accountKey.Derive(externalChainChildKey, BIP32_HARDENED_KEY_LIMIT);
+ // derive m/0'/0' (external chain) OR m/0'/1' (internal chain)
+ assert(internal ? CanSupportFeature(FEATURE_HD_SPLIT) : true);
+ accountKey.Derive(chainChildKey, BIP32_HARDENED_KEY_LIMIT+(internal ? 1 : 0));
// derive child key at next index, skip keys already known to the wallet
do {
// always derive hardened keys
// childIndex | BIP32_HARDENED_KEY_LIMIT = derive childIndex in hardened child-index-range
// example: 1 | BIP32_HARDENED_KEY_LIMIT == 0x80000001 == 2147483649
- externalChainChildKey.Derive(childKey, hdChain.nExternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
- metadata.hdKeypath = "m/0'/0'/" + std::to_string(hdChain.nExternalChainCounter) + "'";
- metadata.hdMasterKeyID = hdChain.masterKeyID;
- // increment childkey index
- hdChain.nExternalChainCounter++;
+ if (internal) {
+ chainChildKey.Derive(childKey, hdChain.nInternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
+ metadata.hdKeypath = "m/0'/1'/" + std::to_string(hdChain.nInternalChainCounter) + "'";
+ hdChain.nInternalChainCounter++;
+ }
+ else {
+ chainChildKey.Derive(childKey, hdChain.nExternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
+ metadata.hdKeypath = "m/0'/0'/" + std::to_string(hdChain.nExternalChainCounter) + "'";
+ hdChain.nExternalChainCounter++;
+ }
} while (HaveKey(childKey.key.GetPubKey().GetID()));
secret = childKey.key;
-
+ metadata.hdMasterKeyID = hdChain.masterKeyID;
// update the chain model in the database
if (!CWalletDB(strWalletFile).WriteHDChain(hdChain))
throw std::runtime_error(std::string(__func__) + ": Writing HD chain model failed");
@@ -633,7 +639,9 @@ bool CWallet::EncryptWallet(const SecureString& strWalletPassphrase)
if (IsHDEnabled()) {
CKey key;
CPubKey masterPubKey = GenerateNewHDMasterKey();
- if (!SetHDMasterKey(masterPubKey))
+ // preserve the old chains version to not break backward compatibility
+ CHDChain oldChain = GetHDChain();
+ if (!SetHDMasterKey(masterPubKey, &oldChain))
return false;
}
@@ -799,7 +807,7 @@ bool CWallet::GetAccountPubkey(CPubKey &pubKey, std::string strAccount, bool bFo
// Generate a new key
if (bForceNew) {
- if (!GetKeyFromPool(account.vchPubKey))
+ if (!GetKeyFromPool(account.vchPubKey, false))
return false;
SetAddressBook(account.vchPubKey.GetID(), strAccount, "receive");
@@ -1300,17 +1308,17 @@ CPubKey CWallet::GenerateNewHDMasterKey()
return pubkey;
}
-bool CWallet::SetHDMasterKey(const CPubKey& pubkey)
+bool CWallet::SetHDMasterKey(const CPubKey& pubkey, CHDChain *possibleOldChain)
{
LOCK(cs_wallet);
-
- // ensure this wallet.dat can only be opened by clients supporting HD
- SetMinVersion(FEATURE_HD);
-
// store the keyid (hash160) together with
// the child index counter in the database
// as a hdchain object
CHDChain newHdChain;
+ if (possibleOldChain) {
+ // preserve the old chains version
+ newHdChain.nVersion = possibleOldChain->nVersion;
+ }
newHdChain.masterKeyID = pubkey.GetID();
SetHDChain(newHdChain, false);
@@ -2023,7 +2031,7 @@ void CWallet::AvailableCoins(std::vector<COutput>& vCoins, bool fOnlySafe, const
}
}
-static void ApproximateBestSubset(std::vector<std::pair<CAmount, std::pair<const CWalletTx*,unsigned int> > >vValue, const CAmount& nTotalLower, const CAmount& nTargetValue,
+static void ApproximateBestSubset(const std::vector<std::pair<CAmount, std::pair<const CWalletTx*,unsigned int> > >& vValue, const CAmount& nTotalLower, const CAmount& nTargetValue,
std::vector<char>& vfBest, CAmount& nBest, int iterations = 1000)
{
std::vector<char> vfIncluded;
@@ -2164,11 +2172,15 @@ bool CWallet::SelectCoinsMinConf(const CAmount& nTargetValue, const int nConfMin
nValueRet += vValue[i].first;
}
- LogPrint("selectcoins", "SelectCoins() best subset: ");
- for (unsigned int i = 0; i < vValue.size(); i++)
- if (vfBest[i])
- LogPrint("selectcoins", "%s ", FormatMoney(vValue[i].first));
- LogPrint("selectcoins", "total %s\n", FormatMoney(nBest));
+ if (LogAcceptCategory(BCLog::SELECTCOINS)) {
+ LogPrint(BCLog::SELECTCOINS, "SelectCoins() best subset: ");
+ for (unsigned int i = 0; i < vValue.size(); i++) {
+ if (vfBest[i]) {
+ LogPrint(BCLog::SELECTCOINS, "%s ", FormatMoney(vValue[i].first));
+ }
+ }
+ LogPrint(BCLog::SELECTCOINS, "total %s\n", FormatMoney(nBest));
+ }
}
return true;
@@ -2445,7 +2457,7 @@ bool CWallet::CreateTransaction(const std::vector<CRecipient>& vecSend, CWalletT
// Reserve a new key pair from key pool
CPubKey vchPubKey;
bool ret;
- ret = reservekey.GetReservedKey(vchPubKey);
+ ret = reservekey.GetReservedKey(vchPubKey, true);
if (!ret)
{
strFailReason = _("Keypool ran out, please call keypoolrefill first");
@@ -2885,7 +2897,7 @@ bool CWallet::SetDefaultKey(const CPubKey &vchPubKey)
/**
* Mark old keypool keys as used,
- * and generate all new keys
+ * and generate all new keys
*/
bool CWallet::NewKeyPool()
{
@@ -2896,21 +2908,37 @@ bool CWallet::NewKeyPool()
walletdb.ErasePool(nIndex);
setKeyPool.clear();
- if (IsLocked())
+ if (!TopUpKeyPool()) {
return false;
-
- int64_t nKeys = std::max(GetArg("-keypool", DEFAULT_KEYPOOL_SIZE), (int64_t)0);
- for (int i = 0; i < nKeys; i++)
- {
- int64_t nIndex = i+1;
- walletdb.WritePool(nIndex, CKeyPool(GenerateNewKey()));
- setKeyPool.insert(nIndex);
}
- LogPrintf("CWallet::NewKeyPool wrote %d new keys\n", nKeys);
+ LogPrintf("CWallet::NewKeyPool rewrote keypool\n");
}
return true;
}
+size_t CWallet::KeypoolCountExternalKeys()
+{
+ AssertLockHeld(cs_wallet); // setKeyPool
+
+ // immediately return setKeyPool's size if HD or HD_SPLIT is disabled or not supported
+ if (!IsHDEnabled() || !CanSupportFeature(FEATURE_HD_SPLIT))
+ return setKeyPool.size();
+
+ CWalletDB walletdb(strWalletFile);
+
+ // count amount of external keys
+ size_t amountE = 0;
+ for(const int64_t& id : setKeyPool)
+ {
+ CKeyPool tmpKeypool;
+ if (!walletdb.ReadPool(id, tmpKeypool))
+ throw std::runtime_error(std::string(__func__) + ": read failed");
+ amountE += !tmpKeypool.fInternal;
+ }
+
+ return amountE;
+}
+
bool CWallet::TopUpKeyPool(unsigned int kpSize)
{
{
@@ -2919,8 +2947,6 @@ bool CWallet::TopUpKeyPool(unsigned int kpSize)
if (IsLocked())
return false;
- CWalletDB walletdb(strWalletFile);
-
// Top up key pool
unsigned int nTargetSize;
if (kpSize > 0)
@@ -2928,21 +2954,37 @@ bool CWallet::TopUpKeyPool(unsigned int kpSize)
else
nTargetSize = std::max(GetArg("-keypool", DEFAULT_KEYPOOL_SIZE), (int64_t) 0);
- while (setKeyPool.size() < (nTargetSize + 1))
+ // count amount of available keys (internal, external)
+ // make sure the keypool of external and internal keys fits the user selected target (-keypool)
+ int64_t amountExternal = KeypoolCountExternalKeys();
+ int64_t amountInternal = setKeyPool.size() - amountExternal;
+ int64_t missingExternal = std::max(std::max((int64_t) nTargetSize, (int64_t) 1) - amountExternal, (int64_t) 0);
+ int64_t missingInternal = std::max(std::max((int64_t) nTargetSize, (int64_t) 1) - amountInternal, (int64_t) 0);
+
+ if (!IsHDEnabled() || !CanSupportFeature(FEATURE_HD_SPLIT))
+ {
+ // don't create extra internal keys
+ missingInternal = 0;
+ }
+ bool internal = false;
+ CWalletDB walletdb(strWalletFile);
+ for (int64_t i = missingInternal + missingExternal; i--;)
{
int64_t nEnd = 1;
+ if (i < missingInternal)
+ internal = true;
if (!setKeyPool.empty())
nEnd = *(--setKeyPool.end()) + 1;
- if (!walletdb.WritePool(nEnd, CKeyPool(GenerateNewKey())))
+ if (!walletdb.WritePool(nEnd, CKeyPool(GenerateNewKey(internal), internal)))
throw std::runtime_error(std::string(__func__) + ": writing generated key failed");
setKeyPool.insert(nEnd);
- LogPrintf("keypool added key %d, size=%u\n", nEnd, setKeyPool.size());
+ LogPrintf("keypool added key %d, size=%u, internal=%d\n", nEnd, setKeyPool.size(), internal);
}
}
return true;
}
-void CWallet::ReserveKeyFromKeyPool(int64_t& nIndex, CKeyPool& keypool)
+void CWallet::ReserveKeyFromKeyPool(int64_t& nIndex, CKeyPool& keypool, bool internal)
{
nIndex = -1;
keypool.vchPubKey = CPubKey();
@@ -2958,14 +3000,24 @@ void CWallet::ReserveKeyFromKeyPool(int64_t& nIndex, CKeyPool& keypool)
CWalletDB walletdb(strWalletFile);
- nIndex = *(setKeyPool.begin());
- setKeyPool.erase(setKeyPool.begin());
- if (!walletdb.ReadPool(nIndex, keypool))
- throw std::runtime_error(std::string(__func__) + ": read failed");
- if (!HaveKey(keypool.vchPubKey.GetID()))
- throw std::runtime_error(std::string(__func__) + ": unknown key in key pool");
- assert(keypool.vchPubKey.IsValid());
- LogPrintf("keypool reserve %d\n", nIndex);
+ // try to find a key that matches the internal/external filter
+ for(const int64_t& id : setKeyPool)
+ {
+ CKeyPool tmpKeypool;
+ if (!walletdb.ReadPool(id, tmpKeypool))
+ throw std::runtime_error(std::string(__func__) + ": read failed");
+ if (!HaveKey(tmpKeypool.vchPubKey.GetID()))
+ throw std::runtime_error(std::string(__func__) + ": unknown key in key pool");
+ if (!IsHDEnabled() || !CanSupportFeature(FEATURE_HD_SPLIT) || tmpKeypool.fInternal == internal)
+ {
+ nIndex = id;
+ keypool = tmpKeypool;
+ setKeyPool.erase(id);
+ assert(keypool.vchPubKey.IsValid());
+ LogPrintf("keypool reserve %d\n", nIndex);
+ return;
+ }
+ }
}
}
@@ -2990,17 +3042,17 @@ void CWallet::ReturnKey(int64_t nIndex)
LogPrintf("keypool return %d\n", nIndex);
}
-bool CWallet::GetKeyFromPool(CPubKey& result)
+bool CWallet::GetKeyFromPool(CPubKey& result, bool internal)
{
int64_t nIndex = 0;
CKeyPool keypool;
{
LOCK(cs_wallet);
- ReserveKeyFromKeyPool(nIndex, keypool);
+ ReserveKeyFromKeyPool(nIndex, keypool, internal);
if (nIndex == -1)
{
if (IsLocked()) return false;
- result = GenerateNewKey();
+ result = GenerateNewKey(internal);
return true;
}
KeepKey(nIndex);
@@ -3017,9 +3069,33 @@ int64_t CWallet::GetOldestKeyPoolTime()
if (setKeyPool.empty())
return GetTime();
- // load oldest key from keypool, get time and return
CKeyPool keypool;
CWalletDB walletdb(strWalletFile);
+
+ if (IsHDEnabled() && CanSupportFeature(FEATURE_HD_SPLIT))
+ {
+ // if HD & HD Chain Split is enabled, response max(oldest-internal-key, oldest-external-key)
+ int64_t now = GetTime();
+ int64_t oldest_external = now, oldest_internal = now;
+
+ for(const int64_t& id : setKeyPool)
+ {
+ if (!walletdb.ReadPool(id, keypool)) {
+ throw std::runtime_error(std::string(__func__) + ": read failed");
+ }
+ if (keypool.fInternal && keypool.nTime < oldest_internal) {
+ oldest_internal = keypool.nTime;
+ }
+ else if (!keypool.fInternal && keypool.nTime < oldest_external) {
+ oldest_external = keypool.nTime;
+ }
+ if (oldest_internal != now && oldest_external != now) {
+ break;
+ }
+ }
+ return std::max(oldest_internal, oldest_external);
+ }
+ // load oldest key from keypool, get time and return
int64_t nIndex = *(setKeyPool.begin());
if (!walletdb.ReadPool(nIndex, keypool))
throw std::runtime_error(std::string(__func__) + ": read oldest key in keypool failed");
@@ -3205,12 +3281,12 @@ std::set<CTxDestination> CWallet::GetAccountAddresses(const std::string& strAcco
return result;
}
-bool CReserveKey::GetReservedKey(CPubKey& pubkey)
+bool CReserveKey::GetReservedKey(CPubKey& pubkey, bool internal)
{
if (nIndex == -1)
{
CKeyPool keypool;
- pwallet->ReserveKeyFromKeyPool(nIndex, keypool);
+ pwallet->ReserveKeyFromKeyPool(nIndex, keypool, internal);
if (nIndex != -1)
vchPubKey = keypool.vchPubKey;
else {
@@ -3623,13 +3699,17 @@ CWallet* CWallet::CreateWalletFromFile(const std::string walletFile)
{
// Create new keyUser and set as default key
if (GetBoolArg("-usehd", DEFAULT_USE_HD_WALLET) && !walletInstance->IsHDEnabled()) {
+
+ // ensure this wallet.dat can only be opened by clients supporting HD with chain split
+ walletInstance->SetMinVersion(FEATURE_HD_SPLIT);
+
// generate a new master key
CPubKey masterPubKey = walletInstance->GenerateNewHDMasterKey();
if (!walletInstance->SetHDMasterKey(masterPubKey))
throw std::runtime_error(std::string(__func__) + ": Storing master key failed");
}
CPubKey newDefaultKey;
- if (walletInstance->GetKeyFromPool(newDefaultKey)) {
+ if (walletInstance->GetKeyFromPool(newDefaultKey, false)) {
walletInstance->SetDefaultKey(newDefaultKey);
if (!walletInstance->SetAddressBook(walletInstance->vchDefaultKey.GetID(), "", "receive")) {
InitError(_("Cannot write default address") += "\n");
@@ -3888,12 +3968,14 @@ bool CWallet::BackupWallet(const std::string& strDest)
CKeyPool::CKeyPool()
{
nTime = GetTime();
+ fInternal = false;
}
-CKeyPool::CKeyPool(const CPubKey& vchPubKeyIn)
+CKeyPool::CKeyPool(const CPubKey& vchPubKeyIn, bool internalIn)
{
nTime = GetTime();
vchPubKey = vchPubKeyIn;
+ fInternal = internalIn;
}
CWalletKey::CWalletKey(int64_t nExpires)
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index ae4321eef8..ccede60097 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -86,6 +86,9 @@ enum WalletFeature
FEATURE_COMPRPUBKEY = 60000, // compressed public keys
FEATURE_HD = 130000, // Hierarchical key derivation after BIP32 (HD Wallet)
+
+ FEATURE_HD_SPLIT = 139900, // Wallet with HD chain split (change outputs will use m/0'/1'/k)
+
FEATURE_LATEST = FEATURE_COMPRPUBKEY // HD is optional, use FEATURE_COMPRPUBKEY as latest version
};
@@ -96,9 +99,10 @@ class CKeyPool
public:
int64_t nTime;
CPubKey vchPubKey;
+ bool fInternal; // for change outputs
CKeyPool();
- CKeyPool(const CPubKey& vchPubKeyIn);
+ CKeyPool(const CPubKey& vchPubKeyIn, bool internalIn);
ADD_SERIALIZE_METHODS;
@@ -109,6 +113,19 @@ public:
READWRITE(nVersion);
READWRITE(nTime);
READWRITE(vchPubKey);
+ if (ser_action.ForRead()) {
+ try {
+ READWRITE(fInternal);
+ }
+ catch (std::ios_base::failure&) {
+ /* flag as external address if we can't read the internal boolean
+ (this will be the case for any wallet before the HD chain split version) */
+ fInternal = false;
+ }
+ }
+ else {
+ READWRITE(fInternal);
+ }
}
};
@@ -647,6 +664,9 @@ private:
/* the HD chain data model (external chain counters) */
CHDChain hdChain;
+ /* HD derive new child key (on internal or external chain) */
+ void DeriveNewChildKey(CKeyMetadata& metadata, CKey& secret, bool internal = false);
+
bool fFileBacked;
std::set<int64_t> setKeyPool;
@@ -774,8 +794,7 @@ public:
* keystore implementation
* Generate a new key
*/
- CPubKey GenerateNewKey();
- void DeriveNewChildKey(CKeyMetadata& metadata, CKey& secret);
+ CPubKey GenerateNewKey(bool internal = false);
//! Adds a key to the store, and saves it to disk.
bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey) override;
//! Adds a key to the store, without saving it to disk (used by LoadWallet)
@@ -883,11 +902,12 @@ public:
static CAmount GetRequiredFee(unsigned int nTxBytes);
bool NewKeyPool();
+ size_t KeypoolCountExternalKeys();
bool TopUpKeyPool(unsigned int kpSize = 0);
- void ReserveKeyFromKeyPool(int64_t& nIndex, CKeyPool& keypool);
+ void ReserveKeyFromKeyPool(int64_t& nIndex, CKeyPool& keypool, bool internal);
void KeepKey(int64_t nIndex);
void ReturnKey(int64_t nIndex);
- bool GetKeyFromPool(CPubKey &key);
+ bool GetKeyFromPool(CPubKey &key, bool internal = false);
int64_t GetOldestKeyPoolTime();
void GetAllReserveKeys(std::set<CKeyID>& setAddress) const;
@@ -1035,8 +1055,10 @@ public:
/* Generates a new HD master key (will not be activated) */
CPubKey GenerateNewHDMasterKey();
- /* Set the current HD master key (will reset the chain child index counters) */
- bool SetHDMasterKey(const CPubKey& key);
+ /* Set the current HD master key (will reset the chain child index counters)
+ If possibleOldChain is provided, the parameters from the old chain (version)
+ will be preserved. */
+ bool SetHDMasterKey(const CPubKey& key, CHDChain *possibleOldChain = nullptr);
};
/** A key allocated from the key pool. */
@@ -1063,7 +1085,7 @@ public:
}
void ReturnKey();
- bool GetReservedKey(CPubKey &pubkey);
+ bool GetReservedKey(CPubKey &pubkey, bool internal = false);
void KeepKey();
void KeepScript() { KeepKey(); }
};
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index d017965385..73d79eb452 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -745,7 +745,7 @@ DBErrors CWalletDB::ZapSelectTx(std::vector<uint256>& vTxHashIn, std::vector<uin
}
else if ((*it) == hash) {
if(!EraseTx(hash)) {
- LogPrint("db", "Transaction was found for deletion but returned database error: %s\n", hash.GetHex());
+ LogPrint(BCLog::DB, "Transaction was found for deletion but returned database error: %s\n", hash.GetHex());
delerror = true;
}
vTxHashOut.push_back(hash);
diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h
index 4d7dfb727e..271c1d66c9 100644
--- a/src/wallet/walletdb.h
+++ b/src/wallet/walletdb.h
@@ -46,9 +46,12 @@ class CHDChain
{
public:
uint32_t nExternalChainCounter;
+ uint32_t nInternalChainCounter;
CKeyID masterKeyID; //!< master key hash160
- static const int CURRENT_VERSION = 1;
+ static const int VERSION_HD_BASE = 1;
+ static const int VERSION_HD_CHAIN_SPLIT = 2;
+ static const int CURRENT_VERSION = VERSION_HD_CHAIN_SPLIT;
int nVersion;
CHDChain() { SetNull(); }
@@ -59,12 +62,15 @@ public:
READWRITE(this->nVersion);
READWRITE(nExternalChainCounter);
READWRITE(masterKeyID);
+ if (this->nVersion >= VERSION_HD_CHAIN_SPLIT)
+ READWRITE(nInternalChainCounter);
}
void SetNull()
{
nVersion = CHDChain::CURRENT_VERSION;
nExternalChainCounter = 0;
+ nInternalChainCounter = 0;
masterKeyID.SetNull();
}
};
diff --git a/src/zmq/zmqnotificationinterface.cpp b/src/zmq/zmqnotificationinterface.cpp
index 431d8c9ac9..fac2a3c57a 100644
--- a/src/zmq/zmqnotificationinterface.cpp
+++ b/src/zmq/zmqnotificationinterface.cpp
@@ -12,7 +12,7 @@
void zmqError(const char *str)
{
- LogPrint("zmq", "zmq: Error: %s, errno=%s\n", str, zmq_strerror(errno));
+ LogPrint(BCLog::ZMQ, "zmq: Error: %s, errno=%s\n", str, zmq_strerror(errno));
}
CZMQNotificationInterface::CZMQNotificationInterface() : pcontext(NULL)
@@ -72,7 +72,7 @@ CZMQNotificationInterface* CZMQNotificationInterface::Create()
// Called at startup to conditionally set up ZMQ socket(s)
bool CZMQNotificationInterface::Initialize()
{
- LogPrint("zmq", "zmq: Initialize notification interface\n");
+ LogPrint(BCLog::ZMQ, "zmq: Initialize notification interface\n");
assert(!pcontext);
pcontext = zmq_init(1);
@@ -89,11 +89,11 @@ bool CZMQNotificationInterface::Initialize()
CZMQAbstractNotifier *notifier = *i;
if (notifier->Initialize(pcontext))
{
- LogPrint("zmq", " Notifier %s ready (address = %s)\n", notifier->GetType(), notifier->GetAddress());
+ LogPrint(BCLog::ZMQ, " Notifier %s ready (address = %s)\n", notifier->GetType(), notifier->GetAddress());
}
else
{
- LogPrint("zmq", " Notifier %s failed (address = %s)\n", notifier->GetType(), notifier->GetAddress());
+ LogPrint(BCLog::ZMQ, " Notifier %s failed (address = %s)\n", notifier->GetType(), notifier->GetAddress());
break;
}
}
@@ -109,13 +109,13 @@ bool CZMQNotificationInterface::Initialize()
// Called during shutdown sequence
void CZMQNotificationInterface::Shutdown()
{
- LogPrint("zmq", "zmq: Shutdown notification interface\n");
+ LogPrint(BCLog::ZMQ, "zmq: Shutdown notification interface\n");
if (pcontext)
{
for (std::list<CZMQAbstractNotifier*>::iterator i=notifiers.begin(); i!=notifiers.end(); ++i)
{
CZMQAbstractNotifier *notifier = *i;
- LogPrint("zmq", " Shutdown notifier %s at %s\n", notifier->GetType(), notifier->GetAddress());
+ LogPrint(BCLog::ZMQ, " Shutdown notifier %s at %s\n", notifier->GetType(), notifier->GetAddress());
notifier->Shutdown();
}
zmq_ctx_destroy(pcontext);
diff --git a/src/zmq/zmqpublishnotifier.cpp b/src/zmq/zmqpublishnotifier.cpp
index caca1248a1..b2963e9bde 100644
--- a/src/zmq/zmqpublishnotifier.cpp
+++ b/src/zmq/zmqpublishnotifier.cpp
@@ -30,6 +30,7 @@ static int zmq_send_multipart(void *sock, const void* data, size_t size, ...)
if (rc != 0)
{
zmqError("Unable to initialize ZMQ msg");
+ va_end(args);
return -1;
}
@@ -43,6 +44,7 @@ static int zmq_send_multipart(void *sock, const void* data, size_t size, ...)
{
zmqError("Unable to send ZMQ msg");
zmq_msg_close(&msg);
+ va_end(args);
return -1;
}
@@ -53,6 +55,7 @@ static int zmq_send_multipart(void *sock, const void* data, size_t size, ...)
size = va_arg(args, size_t);
}
+ va_end(args);
return 0;
}
@@ -86,7 +89,7 @@ bool CZMQAbstractPublishNotifier::Initialize(void *pcontext)
}
else
{
- LogPrint("zmq", "zmq: Reusing socket for address %s\n", address);
+ LogPrint(BCLog::ZMQ, "zmq: Reusing socket for address %s\n", address);
psocket = i->second->psocket;
mapPublishNotifiers.insert(std::make_pair(address, this));
@@ -116,7 +119,7 @@ void CZMQAbstractPublishNotifier::Shutdown()
if (count == 1)
{
- LogPrint("zmq", "Close socket at address %s\n", address);
+ LogPrint(BCLog::ZMQ, "Close socket at address %s\n", address);
int linger = 0;
zmq_setsockopt(psocket, ZMQ_LINGER, &linger, sizeof(linger));
zmq_close(psocket);
@@ -145,7 +148,7 @@ bool CZMQAbstractPublishNotifier::SendMessage(const char *command, const void* d
bool CZMQPublishHashBlockNotifier::NotifyBlock(const CBlockIndex *pindex)
{
uint256 hash = pindex->GetBlockHash();
- LogPrint("zmq", "zmq: Publish hashblock %s\n", hash.GetHex());
+ LogPrint(BCLog::ZMQ, "zmq: Publish hashblock %s\n", hash.GetHex());
char data[32];
for (unsigned int i = 0; i < 32; i++)
data[31 - i] = hash.begin()[i];
@@ -155,7 +158,7 @@ bool CZMQPublishHashBlockNotifier::NotifyBlock(const CBlockIndex *pindex)
bool CZMQPublishHashTransactionNotifier::NotifyTransaction(const CTransaction &transaction)
{
uint256 hash = transaction.GetHash();
- LogPrint("zmq", "zmq: Publish hashtx %s\n", hash.GetHex());
+ LogPrint(BCLog::ZMQ, "zmq: Publish hashtx %s\n", hash.GetHex());
char data[32];
for (unsigned int i = 0; i < 32; i++)
data[31 - i] = hash.begin()[i];
@@ -164,7 +167,7 @@ bool CZMQPublishHashTransactionNotifier::NotifyTransaction(const CTransaction &t
bool CZMQPublishRawBlockNotifier::NotifyBlock(const CBlockIndex *pindex)
{
- LogPrint("zmq", "zmq: Publish rawblock %s\n", pindex->GetBlockHash().GetHex());
+ LogPrint(BCLog::ZMQ, "zmq: Publish rawblock %s\n", pindex->GetBlockHash().GetHex());
const Consensus::Params& consensusParams = Params().GetConsensus();
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags());
@@ -186,7 +189,7 @@ bool CZMQPublishRawBlockNotifier::NotifyBlock(const CBlockIndex *pindex)
bool CZMQPublishRawTransactionNotifier::NotifyTransaction(const CTransaction &transaction)
{
uint256 hash = transaction.GetHash();
- LogPrint("zmq", "zmq: Publish rawtx %s\n", hash.GetHex());
+ LogPrint(BCLog::ZMQ, "zmq: Publish rawtx %s\n", hash.GetHex());
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags());
ss << transaction;
return SendMessage(MSG_RAWTX, &(*ss.begin()), ss.size());