aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-x.travis/lint_06_script.sh2
-rwxr-xr-xcontrib/verify-commits/verify-commits.py10
-rw-r--r--doc/dependencies.md4
-rw-r--r--src/crypto/aes.cpp62
-rw-r--r--src/crypto/aes.h51
-rw-r--r--src/rpc/rawtransaction.cpp47
-rw-r--r--src/test/crypto_tests.cpp87
-rwxr-xr-xtest/functional/feature_pruning.py186
8 files changed, 140 insertions, 309 deletions
diff --git a/.travis/lint_06_script.sh b/.travis/lint_06_script.sh
index 701e6d8005..1a1693a8ce 100755
--- a/.travis/lint_06_script.sh
+++ b/.travis/lint_06_script.sh
@@ -21,5 +21,5 @@ test/lint/lint-all.sh
if [ "$TRAVIS_REPO_SLUG" = "bitcoin/bitcoin" -a "$TRAVIS_EVENT_TYPE" = "cron" ]; then
git log --merges --before="2 days ago" -1 --format='%H' > ./contrib/verify-commits/trusted-sha512-root-commit
while read -r LINE; do travis_retry gpg --keyserver hkp://subset.pool.sks-keyservers.net --recv-keys $LINE; done < contrib/verify-commits/trusted-keys &&
- travis_wait 50 contrib/verify-commits/verify-commits.py --clean-merge=2;
+ ./contrib/verify-commits/verify-commits.py --clean-merge=2;
fi
diff --git a/contrib/verify-commits/verify-commits.py b/contrib/verify-commits/verify-commits.py
index 6bbed01073..255ce75092 100755
--- a/contrib/verify-commits/verify-commits.py
+++ b/contrib/verify-commits/verify-commits.py
@@ -5,6 +5,7 @@
"""Verify commits against a trusted keys list."""
import argparse
import hashlib
+import logging
import os
import subprocess
import sys
@@ -66,6 +67,11 @@ def tree_sha512sum(commit='HEAD'):
return overall.hexdigest()
def main():
+
+ # Enable debug logging if running in CI
+ if 'CI' in os.environ and os.environ['CI'].lower() == "true":
+ logging.getLogger().setLevel(logging.DEBUG)
+
# Parse arguments
parser = argparse.ArgumentParser(usage='%(prog)s [options] [commit id]')
parser.add_argument('--disable-tree-check', action='store_false', dest='verify_tree', help='disable SHA-512 tree check')
@@ -95,6 +101,10 @@ def main():
# Iterate through commits
while True:
+
+ # Log a message to prevent Travis from timing out
+ logging.debug("verify-commits: [in-progress] processing commit {}".format(current_commit[:8]))
+
if current_commit == verified_root:
print('There is a valid path from "{}" to {} where all commits are signed!'.format(initial_commit, verified_root))
sys.exit(0)
diff --git a/doc/dependencies.md b/doc/dependencies.md
index 0fb36184c2..c445e2e23f 100644
--- a/doc/dependencies.md
+++ b/doc/dependencies.md
@@ -17,7 +17,7 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct
| libevent | [2.1.8-stable](https://github.com/libevent/libevent/releases) | 2.0.22 | No | | |
| libjpeg | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk#L65) |
| libpng | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk#L64) |
-| libsrvg | | | | | |
+| librsvg | | | | | |
| MiniUPnPc | [2.0.20180203](http://miniupnp.free.fr/files) | | No | | |
| OpenSSL | [1.0.1k](https://www.openssl.org/source) | | Yes | | |
| PCRE | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk#L66) |
@@ -43,4 +43,4 @@ Some dependencies are not needed in all configurations. The following are some f
* ZeroMQ is needed only with the `--with-zmq` option.
#### Other
-* librsvg is only needed if you need to run `make deploy` on (cross-compliation to) macOS. \ No newline at end of file
+* librsvg is only needed if you need to run `make deploy` on (cross-compliation to) macOS.
diff --git a/src/crypto/aes.cpp b/src/crypto/aes.cpp
index 919ea593b7..2dc2133434 100644
--- a/src/crypto/aes.cpp
+++ b/src/crypto/aes.cpp
@@ -12,36 +12,6 @@ extern "C" {
#include <crypto/ctaes/ctaes.c>
}
-AES128Encrypt::AES128Encrypt(const unsigned char key[16])
-{
- AES128_init(&ctx, key);
-}
-
-AES128Encrypt::~AES128Encrypt()
-{
- memset(&ctx, 0, sizeof(ctx));
-}
-
-void AES128Encrypt::Encrypt(unsigned char ciphertext[16], const unsigned char plaintext[16]) const
-{
- AES128_encrypt(&ctx, 1, ciphertext, plaintext);
-}
-
-AES128Decrypt::AES128Decrypt(const unsigned char key[16])
-{
- AES128_init(&ctx, key);
-}
-
-AES128Decrypt::~AES128Decrypt()
-{
- memset(&ctx, 0, sizeof(ctx));
-}
-
-void AES128Decrypt::Decrypt(unsigned char plaintext[16], const unsigned char ciphertext[16]) const
-{
- AES128_decrypt(&ctx, 1, plaintext, ciphertext);
-}
-
AES256Encrypt::AES256Encrypt(const unsigned char key[32])
{
AES256_init(&ctx, key);
@@ -182,35 +152,3 @@ AES256CBCDecrypt::~AES256CBCDecrypt()
{
memset(iv, 0, sizeof(iv));
}
-
-AES128CBCEncrypt::AES128CBCEncrypt(const unsigned char key[AES128_KEYSIZE], const unsigned char ivIn[AES_BLOCKSIZE], bool padIn)
- : enc(key), pad(padIn)
-{
- memcpy(iv, ivIn, AES_BLOCKSIZE);
-}
-
-AES128CBCEncrypt::~AES128CBCEncrypt()
-{
- memset(iv, 0, AES_BLOCKSIZE);
-}
-
-int AES128CBCEncrypt::Encrypt(const unsigned char* data, int size, unsigned char* out) const
-{
- return CBCEncrypt(enc, iv, data, size, pad, out);
-}
-
-AES128CBCDecrypt::AES128CBCDecrypt(const unsigned char key[AES128_KEYSIZE], const unsigned char ivIn[AES_BLOCKSIZE], bool padIn)
- : dec(key), pad(padIn)
-{
- memcpy(iv, ivIn, AES_BLOCKSIZE);
-}
-
-AES128CBCDecrypt::~AES128CBCDecrypt()
-{
- memset(iv, 0, AES_BLOCKSIZE);
-}
-
-int AES128CBCDecrypt::Decrypt(const unsigned char* data, int size, unsigned char* out) const
-{
- return CBCDecrypt(dec, iv, data, size, pad, out);
-}
diff --git a/src/crypto/aes.h b/src/crypto/aes.h
index fdad70c593..e06c8de272 100644
--- a/src/crypto/aes.h
+++ b/src/crypto/aes.h
@@ -12,33 +12,8 @@ extern "C" {
}
static const int AES_BLOCKSIZE = 16;
-static const int AES128_KEYSIZE = 16;
static const int AES256_KEYSIZE = 32;
-/** An encryption class for AES-128. */
-class AES128Encrypt
-{
-private:
- AES128_ctx ctx;
-
-public:
- explicit AES128Encrypt(const unsigned char key[16]);
- ~AES128Encrypt();
- void Encrypt(unsigned char ciphertext[16], const unsigned char plaintext[16]) const;
-};
-
-/** A decryption class for AES-128. */
-class AES128Decrypt
-{
-private:
- AES128_ctx ctx;
-
-public:
- explicit AES128Decrypt(const unsigned char key[16]);
- ~AES128Decrypt();
- void Decrypt(unsigned char plaintext[16], const unsigned char ciphertext[16]) const;
-};
-
/** An encryption class for AES-256. */
class AES256Encrypt
{
@@ -89,30 +64,4 @@ private:
unsigned char iv[AES_BLOCKSIZE];
};
-class AES128CBCEncrypt
-{
-public:
- AES128CBCEncrypt(const unsigned char key[AES128_KEYSIZE], const unsigned char ivIn[AES_BLOCKSIZE], bool padIn);
- ~AES128CBCEncrypt();
- int Encrypt(const unsigned char* data, int size, unsigned char* out) const;
-
-private:
- const AES128Encrypt enc;
- const bool pad;
- unsigned char iv[AES_BLOCKSIZE];
-};
-
-class AES128CBCDecrypt
-{
-public:
- AES128CBCDecrypt(const unsigned char key[AES128_KEYSIZE], const unsigned char ivIn[AES_BLOCKSIZE], bool padIn);
- ~AES128CBCDecrypt();
- int Decrypt(const unsigned char* data, int size, unsigned char* out) const;
-
-private:
- const AES128Decrypt dec;
- const bool pad;
- unsigned char iv[AES_BLOCKSIZE];
-};
-
#endif // BITCOIN_CRYPTO_AES_H
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 299a594585..5c404e0251 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -615,33 +615,54 @@ static UniValue decoderawtransaction(const JSONRPCRequest& request)
return result;
}
+static std::string GetAllOutputTypes()
+{
+ std::string ret;
+ for (int i = TX_NONSTANDARD; i <= TX_WITNESS_UNKNOWN; ++i) {
+ if (i != TX_NONSTANDARD) ret += ", ";
+ ret += GetTxnOutputType(static_cast<txnouttype>(i));
+ }
+ return ret;
+}
+
static UniValue decodescript(const JSONRPCRequest& request)
{
- if (request.fHelp || request.params.size() != 1)
- throw std::runtime_error(
- RPCHelpMan{"decodescript",
+ const RPCHelpMan help{"decodescript",
"\nDecode a hex-encoded script.\n",
{
{"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "the hex-encoded script"},
},
RPCResult{
"{\n"
- " \"asm\":\"asm\", (string) Script public key\n"
- " \"hex\":\"hex\", (string) hex-encoded public key\n"
- " \"type\":\"type\", (string) The output type\n"
- " \"reqSigs\": n, (numeric) The required signatures\n"
- " \"addresses\": [ (json array of string)\n"
- " \"address\" (string) bitcoin address\n"
+ " \"asm\":\"asm\", (string) Script public key\n"
+ " \"type\":\"type\", (string) The output type (e.g. "+GetAllOutputTypes()+")\n"
+ " \"reqSigs\": n, (numeric) The required signatures\n"
+ " \"addresses\": [ (json array of string)\n"
+ " \"address\" (string) bitcoin address\n"
" ,...\n"
" ],\n"
- " \"p2sh\",\"address\" (string) address of P2SH script wrapping this redeem script (not returned if the script is already a P2SH).\n"
+ " \"p2sh\":\"str\" (string) address of P2SH script wrapping this redeem script (not returned if the script is already a P2SH).\n"
+ " \"segwit\": { (json object) Result of a witness script public key wrapping this redeem script (not returned if the script is a P2SH or witness).\n"
+ " \"asm\":\"str\", (string) String representation of the script public key\n"
+ " \"hex\":\"hexstr\", (string) Hex string of the script public key\n"
+ " \"type\":\"str\", (string) The type of the script public key (e.g. witness_v0_keyhash or witness_v0_scripthash)\n"
+ " \"reqSigs\": n, (numeric) The required signatures (always 1)\n"
+ " \"addresses\": [ (json array of string) (always length 1)\n"
+ " \"address\" (string) segwit address\n"
+ " ,...\n"
+ " ],\n"
+ " \"p2sh-segwit\":\"str\" (string) address of the P2SH script wrapping this witness redeem script.\n"
"}\n"
},
RPCExamples{
HelpExampleCli("decodescript", "\"hexstring\"")
+ HelpExampleRpc("decodescript", "\"hexstring\"")
},
- }.ToString());
+ };
+
+ if (request.fHelp || !help.IsValidNumArgs(request.params.size())) {
+ throw std::runtime_error(help.ToString());
+ }
RPCTypeCheck(request.params, {UniValue::VSTR});
@@ -653,7 +674,7 @@ static UniValue decodescript(const JSONRPCRequest& request)
} else {
// Empty scripts are valid
}
- ScriptPubKeyToUniv(script, r, false);
+ ScriptPubKeyToUniv(script, r, /* fIncludeHex */ false);
UniValue type;
type = find_value(r, "type");
@@ -687,7 +708,7 @@ static UniValue decodescript(const JSONRPCRequest& request)
// Newer segwit program versions should be considered when then become available.
segwitScr = GetScriptForDestination(WitnessV0ScriptHash(script));
}
- ScriptPubKeyToUniv(segwitScr, sr, true);
+ ScriptPubKeyToUniv(segwitScr, sr, /* fIncludeHex */ true);
sr.pushKV("p2sh-segwit", EncodeDestination(CScriptID(segwitScr)));
r.pushKV("segwit", sr);
}
diff --git a/src/test/crypto_tests.cpp b/src/test/crypto_tests.cpp
index 352cff37d1..2cc38a0679 100644
--- a/src/test/crypto_tests.cpp
+++ b/src/test/crypto_tests.cpp
@@ -67,26 +67,6 @@ static void TestHMACSHA512(const std::string &hexkey, const std::string &hexin,
TestVector(CHMAC_SHA512(key.data(), key.size()), ParseHex(hexin), ParseHex(hexout));
}
-static void TestAES128(const std::string &hexkey, const std::string &hexin, const std::string &hexout)
-{
- std::vector<unsigned char> key = ParseHex(hexkey);
- std::vector<unsigned char> in = ParseHex(hexin);
- std::vector<unsigned char> correctout = ParseHex(hexout);
- std::vector<unsigned char> buf, buf2;
-
- assert(key.size() == 16);
- assert(in.size() == 16);
- assert(correctout.size() == 16);
- AES128Encrypt enc(key.data());
- buf.resize(correctout.size());
- buf2.resize(correctout.size());
- enc.Encrypt(buf.data(), in.data());
- BOOST_CHECK_EQUAL(HexStr(buf), HexStr(correctout));
- AES128Decrypt dec(key.data());
- dec.Decrypt(buf2.data(), buf.data());
- BOOST_CHECK_EQUAL(HexStr(buf2), HexStr(in));
-}
-
static void TestAES256(const std::string &hexkey, const std::string &hexin, const std::string &hexout)
{
std::vector<unsigned char> key = ParseHex(hexkey);
@@ -106,47 +86,6 @@ static void TestAES256(const std::string &hexkey, const std::string &hexin, cons
BOOST_CHECK(buf == in);
}
-static void TestAES128CBC(const std::string &hexkey, const std::string &hexiv, bool pad, const std::string &hexin, const std::string &hexout)
-{
- std::vector<unsigned char> key = ParseHex(hexkey);
- std::vector<unsigned char> iv = ParseHex(hexiv);
- std::vector<unsigned char> in = ParseHex(hexin);
- std::vector<unsigned char> correctout = ParseHex(hexout);
- std::vector<unsigned char> realout(in.size() + AES_BLOCKSIZE);
-
- // Encrypt the plaintext and verify that it equals the cipher
- AES128CBCEncrypt enc(key.data(), iv.data(), pad);
- int size = enc.Encrypt(in.data(), in.size(), realout.data());
- realout.resize(size);
- BOOST_CHECK(realout.size() == correctout.size());
- BOOST_CHECK_MESSAGE(realout == correctout, HexStr(realout) + std::string(" != ") + hexout);
-
- // Decrypt the cipher and verify that it equals the plaintext
- std::vector<unsigned char> decrypted(correctout.size());
- AES128CBCDecrypt dec(key.data(), iv.data(), pad);
- size = dec.Decrypt(correctout.data(), correctout.size(), decrypted.data());
- decrypted.resize(size);
- BOOST_CHECK(decrypted.size() == in.size());
- BOOST_CHECK_MESSAGE(decrypted == in, HexStr(decrypted) + std::string(" != ") + hexin);
-
- // Encrypt and re-decrypt substrings of the plaintext and verify that they equal each-other
- for(std::vector<unsigned char>::iterator i(in.begin()); i != in.end(); ++i)
- {
- std::vector<unsigned char> sub(i, in.end());
- std::vector<unsigned char> subout(sub.size() + AES_BLOCKSIZE);
- int _size = enc.Encrypt(sub.data(), sub.size(), subout.data());
- if (_size != 0)
- {
- subout.resize(_size);
- std::vector<unsigned char> subdecrypted(subout.size());
- _size = dec.Decrypt(subout.data(), subout.size(), subdecrypted.data());
- subdecrypted.resize(_size);
- BOOST_CHECK(decrypted.size() == in.size());
- BOOST_CHECK_MESSAGE(subdecrypted == sub, HexStr(subdecrypted) + std::string(" != ") + HexStr(sub));
- }
- }
-}
-
static void TestAES256CBC(const std::string &hexkey, const std::string &hexiv, bool pad, const std::string &hexin, const std::string &hexout)
{
std::vector<unsigned char> key = ParseHex(hexkey);
@@ -440,14 +379,9 @@ BOOST_AUTO_TEST_CASE(hmac_sha512_testvectors) {
BOOST_AUTO_TEST_CASE(aes_testvectors) {
// AES test vectors from FIPS 197.
- TestAES128("000102030405060708090a0b0c0d0e0f", "00112233445566778899aabbccddeeff", "69c4e0d86a7b0430d8cdb78070b4c55a");
TestAES256("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", "00112233445566778899aabbccddeeff", "8ea2b7ca516745bfeafc49904b496089");
// AES-ECB test vectors from NIST sp800-38a.
- TestAES128("2b7e151628aed2a6abf7158809cf4f3c", "6bc1bee22e409f96e93d7e117393172a", "3ad77bb40d7a3660a89ecaf32466ef97");
- TestAES128("2b7e151628aed2a6abf7158809cf4f3c", "ae2d8a571e03ac9c9eb76fac45af8e51", "f5d3d58503b9699de785895a96fdbaaf");
- TestAES128("2b7e151628aed2a6abf7158809cf4f3c", "30c81c46a35ce411e5fbc1191a0a52ef", "43b1cd7f598ece23881b00e3ed030688");
- TestAES128("2b7e151628aed2a6abf7158809cf4f3c", "f69f2445df4f9b17ad2b417be66c3710", "7b0c785e27e8ad3f8223207104725dd4");
TestAES256("603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4", "6bc1bee22e409f96e93d7e117393172a", "f3eed1bdb5d2a03c064b5a7e3db181f8");
TestAES256("603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4", "ae2d8a571e03ac9c9eb76fac45af8e51", "591ccb10d410ed26dc5ba74a31362870");
TestAES256("603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4", "30c81c46a35ce411e5fbc1191a0a52ef", "b6ed21b99ca6f4f9f153e7b1beafed1d");
@@ -455,27 +389,6 @@ BOOST_AUTO_TEST_CASE(aes_testvectors) {
}
BOOST_AUTO_TEST_CASE(aes_cbc_testvectors) {
-
- // NIST AES CBC 128-bit encryption test-vectors
- TestAES128CBC("2b7e151628aed2a6abf7158809cf4f3c", "000102030405060708090A0B0C0D0E0F", false, \
- "6bc1bee22e409f96e93d7e117393172a", "7649abac8119b246cee98e9b12e9197d");
- TestAES128CBC("2b7e151628aed2a6abf7158809cf4f3c", "7649ABAC8119B246CEE98E9B12E9197D", false, \
- "ae2d8a571e03ac9c9eb76fac45af8e51", "5086cb9b507219ee95db113a917678b2");
- TestAES128CBC("2b7e151628aed2a6abf7158809cf4f3c", "5086cb9b507219ee95db113a917678b2", false, \
- "30c81c46a35ce411e5fbc1191a0a52ef", "73bed6b8e3c1743b7116e69e22229516");
- TestAES128CBC("2b7e151628aed2a6abf7158809cf4f3c", "73bed6b8e3c1743b7116e69e22229516", false, \
- "f69f2445df4f9b17ad2b417be66c3710", "3ff1caa1681fac09120eca307586e1a7");
-
- // The same vectors with padding enabled
- TestAES128CBC("2b7e151628aed2a6abf7158809cf4f3c", "000102030405060708090A0B0C0D0E0F", true, \
- "6bc1bee22e409f96e93d7e117393172a", "7649abac8119b246cee98e9b12e9197d8964e0b149c10b7b682e6e39aaeb731c");
- TestAES128CBC("2b7e151628aed2a6abf7158809cf4f3c", "7649ABAC8119B246CEE98E9B12E9197D", true, \
- "ae2d8a571e03ac9c9eb76fac45af8e51", "5086cb9b507219ee95db113a917678b255e21d7100b988ffec32feeafaf23538");
- TestAES128CBC("2b7e151628aed2a6abf7158809cf4f3c", "5086cb9b507219ee95db113a917678b2", true, \
- "30c81c46a35ce411e5fbc1191a0a52ef", "73bed6b8e3c1743b7116e69e22229516f6eccda327bf8e5ec43718b0039adceb");
- TestAES128CBC("2b7e151628aed2a6abf7158809cf4f3c", "73bed6b8e3c1743b7116e69e22229516", true, \
- "f69f2445df4f9b17ad2b417be66c3710", "3ff1caa1681fac09120eca307586e1a78cb82807230e1321d3fae00d18cc2012");
-
// NIST AES CBC 256-bit encryption test-vectors
TestAES256CBC("603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4", \
"000102030405060708090A0B0C0D0E0F", false, "6bc1bee22e409f96e93d7e117393172a", \
diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py
index 12da2655ee..49951e24f3 100755
--- a/test/functional/feature_pruning.py
+++ b/test/functional/feature_pruning.py
@@ -8,11 +8,13 @@ WARNING:
This test uses 4GB of disk space.
This test takes 30 mins or more (up to 2 hours)
"""
+import os
+from test_framework.blocktools import create_coinbase
+from test_framework.messages import CBlock, ToHex
+from test_framework.script import CScript, OP_RETURN, OP_NOP
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, mine_large_block, sync_blocks, wait_until
-
-import os
+from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, disconnect_nodes, sync_blocks, wait_until
MIN_BLOCKS_TO_KEEP = 288
@@ -21,19 +23,59 @@ MIN_BLOCKS_TO_KEEP = 288
# compatible with pruning based on key creation time.
TIMESTAMP_WINDOW = 2 * 60 * 60
+def mine_large_blocks(node, n):
+ # Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN
+ # followed by 950k of OP_NOP. This would be non-standard in a non-coinbase
+ # transaction but is consensus valid.
+
+ # Get the block parameters for the first block
+ big_script = CScript([OP_RETURN] + [OP_NOP] * 950000)
+ best_block = node.getblock(node.getbestblockhash())
+ height = int(best_block["height"]) + 1
+ try:
+ # Static variable ensures that time is monotonicly increasing and is therefore
+ # different for each block created => blockhash is unique.
+ mine_large_blocks.nTime = min(mine_large_blocks.nTime, int(best_block["time"])) + 1
+ except AttributeError:
+ mine_large_blocks.nTime = int(best_block["time"]) + 1
+ previousblockhash = int(best_block["hash"], 16)
+
+ for _ in range(n):
+ # Build the coinbase transaction (with large scriptPubKey)
+ coinbase_tx = create_coinbase(height)
+ coinbase_tx.vin[0].nSequence = 2 ** 32 - 1
+ coinbase_tx.vout[0].scriptPubKey = big_script
+ coinbase_tx.rehash()
+
+ # Build the block
+ block = CBlock()
+ block.nVersion = best_block["version"]
+ block.hashPrevBlock = previousblockhash
+ block.nTime = mine_large_blocks.nTime
+ block.nBits = int('207fffff', 16)
+ block.nNonce = 0
+ block.vtx = [coinbase_tx]
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.solve()
+
+ # Submit to the node
+ node.submitblock(ToHex(block))
+
+ previousblockhash = block.sha256
+ height += 1
+ mine_large_blocks.nTime += 1
def calc_usage(blockdir):
- return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.)
+ return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.)
class PruneTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 6
- self.rpc_timeout = 900
# Create nodes 0 and 1 to mine.
# Create node 2 to test pruning.
- self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000"]
+ self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5"]
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
# Create nodes 5 to test wallet in prune mode, but do not connect
self.extra_args = [
@@ -55,7 +97,7 @@ class PruneTest(BitcoinTestFramework):
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
- connect_nodes(self.nodes[2], 0)
+ connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[0], 4)
sync_blocks(self.nodes[0:5])
@@ -71,21 +113,19 @@ class PruneTest(BitcoinTestFramework):
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
+
# Then mine enough full blocks to create more than 550MiB of data
- for i in range(645):
- mine_large_block(self.nodes[0], self.utxo_cache_0)
+ mine_large_blocks(self.nodes[0], 645)
sync_blocks(self.nodes[0:5])
def test_height_min(self):
- if not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")):
- raise AssertionError("blk00000.dat is missing, pruning too early")
+ assert os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early"
self.log.info("Success")
self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir))
self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
- for i in range(25):
- mine_large_block(self.nodes[0], self.utxo_cache_0)
+ mine_large_blocks(self.nodes[0], 25)
# Wait for blk00000.dat to be pruned
wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30)
@@ -93,8 +133,7 @@ class PruneTest(BitcoinTestFramework):
self.log.info("Success")
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
- if (usage > 550):
- raise AssertionError("Pruning target not being met")
+ assert_greater_than(550, usage)
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
@@ -103,26 +142,17 @@ class PruneTest(BitcoinTestFramework):
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
- # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
- self.stop_node(0)
- self.start_node(0, extra_args=self.full_node_default_args)
+ disconnect_nodes(self.nodes[0], 1)
+ disconnect_nodes(self.nodes[0], 2)
# Mine 24 blocks in node 1
- for i in range(24):
- if j == 0:
- mine_large_block(self.nodes[1], self.utxo_cache_1)
- else:
- # Add node1's wallet transactions back to the mempool, to
- # avoid the mined blocks from being too small.
- self.nodes[1].resendwallettransactions()
- self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
+ mine_large_blocks(self.nodes[1], 24)
# Reorg back with 25 block chain from node 0
- for i in range(25):
- mine_large_block(self.nodes[0], self.utxo_cache_0)
+ mine_large_blocks(self.nodes[0], 25)
# Create connections in the order so both nodes can see the reorg at the same time
- connect_nodes(self.nodes[1], 0)
- connect_nodes(self.nodes[2], 0)
+ connect_nodes(self.nodes[0], 1)
+ connect_nodes(self.nodes[0], 2)
sync_blocks(self.nodes[0:3])
self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir))
@@ -130,63 +160,48 @@ class PruneTest(BitcoinTestFramework):
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
- # Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
- # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
- self.stop_node(1)
- self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxweight=20000", "-checkblocks=5"])
height = self.nodes[1].getblockcount()
self.log.info("Current block height: %d" % height)
- invalidheight = height-287
- badhash = self.nodes[1].getblockhash(invalidheight)
- self.log.info("Invalidating block %s at height %d" % (badhash,invalidheight))
- self.nodes[1].invalidateblock(badhash)
+ self.forkheight = height - 287
+ self.forkhash = self.nodes[1].getblockhash(self.forkheight)
+ self.log.info("Invalidating block %s at height %d" % (self.forkhash, self.forkheight))
+ self.nodes[1].invalidateblock(self.forkhash)
# We've now switched to our previously mined-24 block fork on node 1, but that's not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
- mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
- curhash = self.nodes[1].getblockhash(invalidheight - 1)
+ mainchainhash = self.nodes[0].getblockhash(self.forkheight - 1)
+ curhash = self.nodes[1].getblockhash(self.forkheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
- curhash = self.nodes[1].getblockhash(invalidheight - 1)
+ curhash = self.nodes[1].getblockhash(self.forkheight - 1)
- assert self.nodes[1].getblockcount() == invalidheight - 1
+ assert self.nodes[1].getblockcount() == self.forkheight - 1
self.log.info("New best height: %d" % self.nodes[1].getblockcount())
- # Reboot node1 to clear those giant tx's from mempool
- self.stop_node(1)
- self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxweight=20000", "-checkblocks=5"])
+ # Disconnect node1 and generate the new chain
+ disconnect_nodes(self.nodes[0], 1)
+ disconnect_nodes(self.nodes[1], 2)
self.log.info("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
self.log.info("Reconnect nodes")
connect_nodes(self.nodes[0], 1)
- connect_nodes(self.nodes[2], 1)
+ connect_nodes(self.nodes[1], 2)
sync_blocks(self.nodes[0:3], timeout=120)
self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount())
- self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self.prunedir))
-
- self.log.info("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
+ self.log.info("Usage possibly still high because of stale blocks in block files: %d" % calc_usage(self.prunedir))
- # Get node0's wallet transactions back in its mempool, to avoid the
- # mined blocks from being too small.
- self.nodes[0].resendwallettransactions()
+ self.log.info("Mine 220 more large blocks so we have requisite history")
- for i in range(22):
- # This can be slow, so do this in multiple RPC calls to avoid
- # RPC timeouts.
- self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
- sync_blocks(self.nodes[0:3], timeout=300)
+ mine_large_blocks(self.nodes[0], 220)
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
- if (usage > 550):
- raise AssertionError("Pruning target not being met")
-
- return invalidheight,badhash
+ assert_greater_than(550, usage)
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
@@ -219,17 +234,17 @@ class PruneTest(BitcoinTestFramework):
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine)
self.nodes[0].invalidateblock(curchainhash)
- assert self.nodes[0].getblockcount() == self.mainchainheight
- assert self.nodes[0].getbestblockhash() == self.mainchainhash2
+ assert_equal(self.nodes[0].getblockcount(), self.mainchainheight)
+ assert_equal(self.nodes[0].getbestblockhash(), self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
# Wait for Node 2 to reorg to proper height
wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900)
- assert self.nodes[2].getbestblockhash() == goalbesthash
+ assert_equal(self.nodes[2].getbestblockhash(), goalbesthash)
# Verify we can now have the data for a block previously pruned
- assert self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight
+ assert_equal(self.nodes[2].getblock(self.forkhash)["height"], self.forkheight)
def manual_test(self, node_number, use_timestamp):
# at this point, node has 995 blocks and has not yet run in prune mode
@@ -287,38 +302,30 @@ class PruneTest(BitcoinTestFramework):
# height=100 too low to prune first block file so this is a no-op
prune(100)
- if not has_block(0):
- raise AssertionError("blk00000.dat is missing when should still be there")
+ assert has_block(0), "blk00000.dat is missing when should still be there"
# Does nothing
node.pruneblockchain(height(0))
- if not has_block(0):
- raise AssertionError("blk00000.dat is missing when should still be there")
+ assert has_block(0), "blk00000.dat is missing when should still be there"
# height=500 should prune first file
prune(500)
- if has_block(0):
- raise AssertionError("blk00000.dat is still there, should be pruned by now")
- if not has_block(1):
- raise AssertionError("blk00001.dat is missing when should still be there")
+ assert not has_block(0), "blk00000.dat is still there, should be pruned by now"
+ assert has_block(1), "blk00001.dat is missing when should still be there"
# height=650 should prune second file
prune(650)
- if has_block(1):
- raise AssertionError("blk00001.dat is still there, should be pruned by now")
+ assert not has_block(1), "blk00001.dat is still there, should be pruned by now"
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000, 1001 - MIN_BLOCKS_TO_KEEP)
- if not has_block(2):
- raise AssertionError("blk00002.dat is still there, should be pruned by now")
+ assert has_block(2), "blk00002.dat is still there, should be pruned by now"
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
node.generate(288)
prune(1000)
- if has_block(2):
- raise AssertionError("blk00002.dat is still there, should be pruned by now")
- if has_block(3):
- raise AssertionError("blk00003.dat is still there, should be pruned by now")
+ assert not has_block(2), "blk00002.dat is still there, should be pruned by now"
+ assert not has_block(3), "blk00003.dat is still there, should be pruned by now"
# stop node, start back up with auto-prune at 550 MiB, make sure still runs
self.stop_node(node_number)
@@ -339,21 +346,14 @@ class PruneTest(BitcoinTestFramework):
connect_nodes(self.nodes[0], 5)
nds = [self.nodes[0], self.nodes[5]]
sync_blocks(nds, wait=5, timeout=300)
- self.stop_node(5) #stop and start to trigger rescan
+ self.stop_node(5) # stop and start to trigger rescan
self.start_node(5, extra_args=["-prune=550"])
self.log.info("Success")
def run_test(self):
- self.log.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
- self.log.info("Mining a big blockchain of 995 blocks")
-
- # Determine default relay fee
- self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
-
- # Cache for utxos, as the listunspent may take a long time later in the test
- self.utxo_cache_0 = []
- self.utxo_cache_1 = []
+ self.log.info("Warning! This test requires 4GB of disk space")
+ self.log.info("Mining a big blockchain of 995 blocks")
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
@@ -394,11 +394,11 @@ class PruneTest(BitcoinTestFramework):
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
- self.mainchainheight = self.nodes[2].getblockcount() #1320
+ self.mainchainheight = self.nodes[2].getblockcount() # 1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
self.log.info("Check that we can survive a 288 block reorg still")
- (self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
+ self.reorg_test() # (1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain