aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml1
-rw-r--r--contrib/linearize/README.md3
-rw-r--r--contrib/linearize/example-linearize.cfg1
-rwxr-xr-xcontrib/linearize/linearize-hashes.py23
-rw-r--r--src/rpc/mining.cpp25
-rw-r--r--src/txdb.h8
-rw-r--r--src/validation.cpp15
7 files changed, 58 insertions, 18 deletions
diff --git a/.travis.yml b/.travis.yml
index d8395255bc..a479e46f44 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -39,7 +39,6 @@ env:
before_install:
- export PATH=$(echo $PATH | tr ':' "\n" | sed '/\/opt\/python/d' | tr "\n" ":" | sed "s|::|:|g")
install:
- - if [ -n "$PPA" ]; then travis_retry sudo add-apt-repository "$PPA" -y; fi
- if [ -n "$DPKG_ADD_ARCH" ]; then sudo dpkg --add-architecture "$DPKG_ADD_ARCH" ; fi
- if [ -n "$PACKAGES" ]; then travis_retry sudo apt-get update; fi
- if [ -n "$PACKAGES" ]; then travis_retry sudo apt-get install --no-install-recommends --no-upgrade -qq $PACKAGES; fi
diff --git a/contrib/linearize/README.md b/contrib/linearize/README.md
index 0971e7816b..f2a2ab2768 100644
--- a/contrib/linearize/README.md
+++ b/contrib/linearize/README.md
@@ -7,7 +7,8 @@ run using Python 3 but are compatible with Python 2.
$ ./linearize-hashes.py linearize.cfg > hashlist.txt
Required configuration file settings for linearize-hashes:
-* RPC: `rpcuser`, `rpcpassword`
+* RPC: `datadir` (Required if `rpcuser` and `rpcpassword` are not specified)
+* RPC: `rpcuser`, `rpcpassword` (Required if `datadir` is not specified)
Optional config file setting for linearize-hashes:
* RPC: `host` (Default: `127.0.0.1`)
diff --git a/contrib/linearize/example-linearize.cfg b/contrib/linearize/example-linearize.cfg
index 2cc910edfe..d019b06b6c 100644
--- a/contrib/linearize/example-linearize.cfg
+++ b/contrib/linearize/example-linearize.cfg
@@ -1,6 +1,7 @@
# bitcoind RPC settings (linearize-hashes)
rpcuser=someuser
rpcpassword=somepassword
+#datadir=~/.bitcoin
host=127.0.0.1
port=8332
#port=18332
diff --git a/contrib/linearize/linearize-hashes.py b/contrib/linearize/linearize-hashes.py
index 00a54d0820..db8eb7021e 100755
--- a/contrib/linearize/linearize-hashes.py
+++ b/contrib/linearize/linearize-hashes.py
@@ -16,6 +16,8 @@ import json
import re
import base64
import sys
+import os
+import os.path
settings = {}
@@ -93,6 +95,14 @@ def get_block_hashes(settings, max_blocks_per_call=10000):
height += num_blocks
+def get_rpc_cookie():
+ # Open the cookie file
+ with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r') as f:
+ combined = f.readline()
+ combined_split = combined.split(":")
+ settings['rpcuser'] = combined_split[0]
+ settings['rpcpassword'] = combined_split[1]
+
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
@@ -122,8 +132,15 @@ if __name__ == '__main__':
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
+
+ use_userpass = True
+ use_datadir = False
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
- print("Missing username and/or password in cfg file", file=stderr)
+ use_userpass = False
+ if 'datadir' in settings and not use_userpass:
+ use_datadir = True
+ if not use_userpass and not use_datadir:
+ print("Missing datadir or username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
@@ -133,4 +150,8 @@ if __name__ == '__main__':
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
+ # Get the rpc user and pass from the cookie if the datadir is set
+ if use_datadir:
+ get_rpc_cookie()
+
get_block_hashes(settings)
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index b823c159d3..7e5f0d608e 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -720,7 +720,7 @@ protected:
UniValue submitblock(const JSONRPCRequest& request)
{
- if (request.fHelp || request.params.size() < 1 || request.params.size() > 2)
+ if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) {
throw std::runtime_error(
"submitblock \"hexdata\" ( \"jsonparametersobject\" )\n"
"\nAttempts to submit new block to network.\n"
@@ -738,11 +738,17 @@ UniValue submitblock(const JSONRPCRequest& request)
+ HelpExampleCli("submitblock", "\"mydata\"")
+ HelpExampleRpc("submitblock", "\"mydata\"")
);
+ }
std::shared_ptr<CBlock> blockptr = std::make_shared<CBlock>();
CBlock& block = *blockptr;
- if (!DecodeHexBlk(block, request.params[0].get_str()))
+ if (!DecodeHexBlk(block, request.params[0].get_str())) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed");
+ }
+
+ if (block.vtx.empty() || !block.vtx[0]->IsCoinBase()) {
+ throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block does not start with a coinbase");
+ }
uint256 hash = block.GetHash();
bool fBlockPresent = false;
@@ -751,10 +757,12 @@ UniValue submitblock(const JSONRPCRequest& request)
BlockMap::iterator mi = mapBlockIndex.find(hash);
if (mi != mapBlockIndex.end()) {
CBlockIndex *pindex = mi->second;
- if (pindex->IsValid(BLOCK_VALID_SCRIPTS))
+ if (pindex->IsValid(BLOCK_VALID_SCRIPTS)) {
return "duplicate";
- if (pindex->nStatus & BLOCK_FAILED_MASK)
+ }
+ if (pindex->nStatus & BLOCK_FAILED_MASK) {
return "duplicate-invalid";
+ }
// Otherwise, we might only have the header - process the block before returning
fBlockPresent = true;
}
@@ -772,14 +780,15 @@ UniValue submitblock(const JSONRPCRequest& request)
RegisterValidationInterface(&sc);
bool fAccepted = ProcessNewBlock(Params(), blockptr, true, NULL);
UnregisterValidationInterface(&sc);
- if (fBlockPresent)
- {
- if (fAccepted && !sc.found)
+ if (fBlockPresent) {
+ if (fAccepted && !sc.found) {
return "duplicate-inconclusive";
+ }
return "duplicate";
}
- if (!sc.found)
+ if (!sc.found) {
return "inconclusive";
+ }
return BIP22ValidationResult(sc.state);
}
diff --git a/src/txdb.h b/src/txdb.h
index 7f5cf2b583..d9214ba618 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -21,8 +21,14 @@ class CBlockIndex;
class CCoinsViewDBCursor;
class uint256;
+//! Compensate for extra memory peak (x1.5-x1.9) at flush time.
+static constexpr int DB_PEAK_USAGE_FACTOR = 2;
+//! No need to periodic flush if at least this much space still available.
+static constexpr int MAX_BLOCK_COINSDB_USAGE = 200 * DB_PEAK_USAGE_FACTOR;
+//! Always periodic flush if less than this much space still available.
+static constexpr int MIN_BLOCK_COINSDB_USAGE = 50 * DB_PEAK_USAGE_FACTOR;
//! -dbcache default (MiB)
-static const int64_t nDefaultDbCache = 300;
+static const int64_t nDefaultDbCache = 450;
//! max. -dbcache (MiB)
static const int64_t nMaxDbCache = sizeof(void*) > 4 ? 16384 : 1024;
//! min. -dbcache (MiB)
diff --git a/src/validation.cpp b/src/validation.cpp
index 6ade633988..1f5b317441 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -2006,10 +2006,11 @@ bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode, int n
nLastSetChain = nNow;
}
int64_t nMempoolSizeMax = GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
- int64_t cacheSize = pcoinsTip->DynamicMemoryUsage() * 2; // Compensate for extra memory peak (x1.5-x1.9) at flush time.
+ int64_t cacheSize = pcoinsTip->DynamicMemoryUsage() * DB_PEAK_USAGE_FACTOR;
int64_t nTotalSpace = nCoinCacheUsage + std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0);
- // The cache is large and we're within 10% and 100 MiB of the limit, but we have time now (not in the middle of a block processing).
- bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - 100 * 1024 * 1024);
+ // The cache is large and we're within 10% and 200 MiB or 50% and 50MiB of the limit, but we have time now (not in the middle of a block processing).
+ bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize > std::min(std::max(nTotalSpace / 2, nTotalSpace - MIN_BLOCK_COINSDB_USAGE * 1024 * 1024),
+ std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024));
// The cache is over the limit, we have to write now.
bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nTotalSpace;
// It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
@@ -2901,9 +2902,11 @@ bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& pa
static int GetWitnessCommitmentIndex(const CBlock& block)
{
int commitpos = -1;
- for (size_t o = 0; o < block.vtx[0]->vout.size(); o++) {
- if (block.vtx[0]->vout[o].scriptPubKey.size() >= 38 && block.vtx[0]->vout[o].scriptPubKey[0] == OP_RETURN && block.vtx[0]->vout[o].scriptPubKey[1] == 0x24 && block.vtx[0]->vout[o].scriptPubKey[2] == 0xaa && block.vtx[0]->vout[o].scriptPubKey[3] == 0x21 && block.vtx[0]->vout[o].scriptPubKey[4] == 0xa9 && block.vtx[0]->vout[o].scriptPubKey[5] == 0xed) {
- commitpos = o;
+ if (!block.vtx.empty()) {
+ for (size_t o = 0; o < block.vtx[0]->vout.size(); o++) {
+ if (block.vtx[0]->vout[o].scriptPubKey.size() >= 38 && block.vtx[0]->vout[o].scriptPubKey[0] == OP_RETURN && block.vtx[0]->vout[o].scriptPubKey[1] == 0x24 && block.vtx[0]->vout[o].scriptPubKey[2] == 0xaa && block.vtx[0]->vout[o].scriptPubKey[3] == 0x21 && block.vtx[0]->vout[o].scriptPubKey[4] == 0xa9 && block.vtx[0]->vout[o].scriptPubKey[5] == 0xed) {
+ commitpos = o;
+ }
}
}
return commitpos;