aboutsummaryrefslogtreecommitdiff
path: root/src/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/test')
-rw-r--r--src/test/coins_tests.cpp27
-rw-r--r--src/test/data/script_valid.json3
-rw-r--r--src/test/data/tx_invalid.json6
-rw-r--r--src/test/policyestimator_tests.cpp186
-rw-r--r--src/test/scheduler_tests.cpp110
-rw-r--r--src/test/test_bitcoin.cpp3
6 files changed, 332 insertions, 3 deletions
diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp
index 2e2cc2214b..34b311b804 100644
--- a/src/test/coins_tests.cpp
+++ b/src/test/coins_tests.cpp
@@ -59,6 +59,24 @@ public:
bool GetStats(CCoinsStats& stats) const { return false; }
};
+
+class CCoinsViewCacheTest : public CCoinsViewCache
+{
+public:
+ CCoinsViewCacheTest(CCoinsView* base) : CCoinsViewCache(base) {}
+
+ void SelfTest() const
+ {
+ // Manually recompute the dynamic usage of the whole data, and compare it.
+ size_t ret = memusage::DynamicUsage(cacheCoins);
+ for (CCoinsMap::iterator it = cacheCoins.begin(); it != cacheCoins.end(); it++) {
+ ret += memusage::DynamicUsage(it->second.coins);
+ }
+ BOOST_CHECK_EQUAL(memusage::DynamicUsage(*this), ret);
+ }
+
+};
+
}
BOOST_FIXTURE_TEST_SUITE(coins_tests, BasicTestingSetup)
@@ -90,8 +108,8 @@ BOOST_AUTO_TEST_CASE(coins_cache_simulation_test)
// The cache stack.
CCoinsViewTest base; // A CCoinsViewTest at the bottom.
- std::vector<CCoinsViewCache*> stack; // A stack of CCoinsViewCaches on top.
- stack.push_back(new CCoinsViewCache(&base)); // Start with one cache.
+ std::vector<CCoinsViewCacheTest*> stack; // A stack of CCoinsViewCaches on top.
+ stack.push_back(new CCoinsViewCacheTest(&base)); // Start with one cache.
// Use a limited set of random transaction ids, so we do test overwriting entries.
std::vector<uint256> txids;
@@ -136,6 +154,9 @@ BOOST_AUTO_TEST_CASE(coins_cache_simulation_test)
missed_an_entry = true;
}
}
+ BOOST_FOREACH(const CCoinsViewCacheTest *test, stack) {
+ test->SelfTest();
+ }
}
if (insecure_rand() % 100 == 0) {
@@ -152,7 +173,7 @@ BOOST_AUTO_TEST_CASE(coins_cache_simulation_test)
} else {
removed_all_caches = true;
}
- stack.push_back(new CCoinsViewCache(tip));
+ stack.push_back(new CCoinsViewCacheTest(tip));
if (stack.size() == 4) {
reached_4_caches = true;
}
diff --git a/src/test/data/script_valid.json b/src/test/data/script_valid.json
index 064dde8464..a4e15faeaf 100644
--- a/src/test/data/script_valid.json
+++ b/src/test/data/script_valid.json
@@ -74,12 +74,14 @@
["1 1", "VERIFY", "P2SH,STRICTENC"],
["1 0x05 0x01 0x00 0x00 0x00 0x00", "VERIFY", "P2SH,STRICTENC", "values >4 bytes can be cast to boolean"],
+["1 0x01 0x80", "IF 0 ENDIF", "P2SH,STRICTENC", "negative 0 is false"],
["10 0 11 TOALTSTACK DROP FROMALTSTACK", "ADD 21 EQUAL", "P2SH,STRICTENC"],
["'gavin_was_here' TOALTSTACK 11 FROMALTSTACK", "'gavin_was_here' EQUALVERIFY 11 EQUAL", "P2SH,STRICTENC"],
["0 IFDUP", "DEPTH 1 EQUALVERIFY 0 EQUAL", "P2SH,STRICTENC"],
["1 IFDUP", "DEPTH 2 EQUALVERIFY 1 EQUALVERIFY 1 EQUAL", "P2SH,STRICTENC"],
+["0x05 0x0100000000 IFDUP", "DEPTH 2 EQUALVERIFY 0x05 0x0100000000 EQUAL", "P2SH,STRICTENC", "IFDUP dups non ints"],
["0 DROP", "DEPTH 0 EQUAL", "P2SH,STRICTENC"],
["0", "DUP 1 ADD 1 EQUALVERIFY 0 EQUAL", "P2SH,STRICTENC"],
["0 1", "NIP", "P2SH,STRICTENC"],
@@ -408,6 +410,7 @@
["0 0", "EQUAL", "P2SH,STRICTENC"],
["0 0", "EQUALVERIFY 1", "P2SH,STRICTENC"],
+["0 0 1", "EQUAL EQUAL", "P2SH,STRICTENC", "OP_0 and bools must have identical byte representations"],
["0", "1ADD", "P2SH,STRICTENC"],
["2", "1SUB", "P2SH,STRICTENC"],
diff --git a/src/test/data/tx_invalid.json b/src/test/data/tx_invalid.json
index 456e0d2f7b..31d33c63fb 100644
--- a/src/test/data/tx_invalid.json
+++ b/src/test/data/tx_invalid.json
@@ -19,6 +19,12 @@
[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "DUP HASH160 0x14 0x5b6462475454710f3c22f5fdf0b40704c92f25c3 EQUALVERIFY CHECKSIGVERIFY 1 0x4c 0x47 0x3044022067288ea50aa799543a536ff9306f8e1cba05b9c6b10951175b924f96732555ed022026d7b5265f38d21541519e4a1e55044d5b9e17e15cdbaf29ae3792e99e883e7a01"]],
"01000000010001000000000000000000000000000000000000000000000000000000000000000000006b4c473044022067288ea50aa799543a536ff9306f8e1cba05b9c6b10951175b924f96732555ed022026d7b5265f38d21541519e4a1e55044d5b9e17e15cdbaf29ae3792e99e883e7a012103ba8c8b86dea131c22ab967e6dd99bdae8eff7a1f75a2c35f1f944109e3fe5e22ffffffff010000000000000000015100000000", "P2SH"],
+["This is the nearly-standard transaction with CHECKSIGVERIFY 1 instead of CHECKSIG from tx_valid.json"],
+["but with the signature duplicated in the scriptPubKey with a different hashtype suffix"],
+["See FindAndDelete, which will only remove if the signature, including the hash type, matches"],
+[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "DUP HASH160 0x14 0x5b6462475454710f3c22f5fdf0b40704c92f25c3 EQUALVERIFY CHECKSIGVERIFY 1 0x47 0x3044022067288ea50aa799543a536ff9306f8e1cba05b9c6b10951175b924f96732555ed022026d7b5265f38d21541519e4a1e55044d5b9e17e15cdbaf29ae3792e99e883e7a81"]],
+"01000000010001000000000000000000000000000000000000000000000000000000000000000000006a473044022067288ea50aa799543a536ff9306f8e1cba05b9c6b10951175b924f96732555ed022026d7b5265f38d21541519e4a1e55044d5b9e17e15cdbaf29ae3792e99e883e7a012103ba8c8b86dea131c22ab967e6dd99bdae8eff7a1f75a2c35f1f944109e3fe5e22ffffffff010000000000000000015100000000", "P2SH"],
+
["An invalid P2SH Transaction"],
[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "HASH160 0x14 0x7a052c840ba73af26755de42cf01cc9e0a49fef0 EQUAL"]],
"010000000100010000000000000000000000000000000000000000000000000000000000000000000009085768617420697320ffffffff010000000000000000015100000000", "P2SH"],
diff --git a/src/test/policyestimator_tests.cpp b/src/test/policyestimator_tests.cpp
new file mode 100644
index 0000000000..cb64ee7c69
--- /dev/null
+++ b/src/test/policyestimator_tests.cpp
@@ -0,0 +1,186 @@
+// Copyright (c) 2011-2015 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include "policy/fees.h"
+#include "txmempool.h"
+#include "uint256.h"
+#include "util.h"
+
+#include "test/test_bitcoin.h"
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(policyestimator_tests, BasicTestingSetup)
+
+BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
+{
+ CTxMemPool mpool(CFeeRate(1000));
+ CAmount basefee(2000);
+ double basepri = 1e6;
+ CAmount deltaFee(100);
+ double deltaPri=5e5;
+ std::vector<CAmount> feeV[2];
+ std::vector<double> priV[2];
+
+ // Populate vectors of increasing fees or priorities
+ for (int j = 0; j < 10; j++) {
+ //V[0] is for fee transactions
+ feeV[0].push_back(basefee * (j+1));
+ priV[0].push_back(0);
+ //V[1] is for priority transactions
+ feeV[1].push_back(CAmount(0));
+ priV[1].push_back(basepri * pow(10, j+1));
+ }
+
+ // Store the hashes of transactions that have been
+ // added to the mempool by their associate fee/pri
+ // txHashes[j] is populated with transactions either of
+ // fee = basefee * (j+1) OR pri = 10^6 * 10^(j+1)
+ std::vector<uint256> txHashes[10];
+
+ // Create a transaction template
+ CScript garbage;
+ for (unsigned int i = 0; i < 128; i++)
+ garbage.push_back('X');
+ CMutableTransaction tx;
+ std::list<CTransaction> dummyConflicted;
+ tx.vin.resize(1);
+ tx.vin[0].scriptSig = garbage;
+ tx.vout.resize(1);
+ tx.vout[0].nValue=0LL;
+ CFeeRate baseRate(basefee, ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION));
+
+ // Create a fake block
+ std::vector<CTransaction> block;
+ int blocknum = 0;
+
+ // Loop through 200 blocks
+ // At a decay .998 and 4 fee transactions per block
+ // This makes the tx count about 1.33 per bucket, above the 1 threshold
+ while (blocknum < 200) {
+ for (int j = 0; j < 10; j++) { // For each fee/pri multiple
+ for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx
+ tx.vin[0].prevout.n = 10000*blocknum+100*j+k; // make transaction unique
+ uint256 hash = tx.GetHash();
+ mpool.addUnchecked(hash, CTxMemPoolEntry(tx, feeV[k/4][j], GetTime(), priV[k/4][j], blocknum, mpool.HasNoInputsOf(tx)));
+ txHashes[j].push_back(hash);
+ }
+ }
+ //Create blocks where higher fee/pri txs are included more often
+ for (int h = 0; h <= blocknum%10; h++) {
+ // 10/10 blocks add highest fee/pri transactions
+ // 9/10 blocks add 2nd highest and so on until ...
+ // 1/10 blocks add lowest fee/pri transactions
+ while (txHashes[9-h].size()) {
+ CTransaction btx;
+ if (mpool.lookup(txHashes[9-h].back(), btx))
+ block.push_back(btx);
+ txHashes[9-h].pop_back();
+ }
+ }
+ mpool.removeForBlock(block, ++blocknum, dummyConflicted);
+ block.clear();
+ if (blocknum == 30) {
+ // At this point we should need to combine 5 buckets to get enough data points
+ // So estimateFee(1) should fail and estimateFee(2) should return somewhere around
+ // 8*baserate
+ BOOST_CHECK(mpool.estimateFee(1) == CFeeRate(0));
+ BOOST_CHECK(mpool.estimateFee(2).GetFeePerK() < 8*baseRate.GetFeePerK() + deltaFee);
+ BOOST_CHECK(mpool.estimateFee(2).GetFeePerK() > 8*baseRate.GetFeePerK() - deltaFee);
+ }
+ }
+
+ std::vector<CAmount> origFeeEst;
+ std::vector<double> origPriEst;
+ // Highest feerate is 10*baseRate and gets in all blocks,
+ // second highest feerate is 9*baseRate and gets in 9/10 blocks = 90%,
+ // third highest feerate is 8*base rate, and gets in 8/10 blocks = 80%,
+ // so estimateFee(1) should return 9*baseRate.
+ // Third highest feerate has 90% chance of being included by 2 blocks,
+ // so estimateFee(2) should return 8*baseRate etc...
+ for (int i = 1; i < 10;i++) {
+ origFeeEst.push_back(mpool.estimateFee(i).GetFeePerK());
+ origPriEst.push_back(mpool.estimatePriority(i));
+ if (i > 1) { // Fee estimates should be monotonically decreasing
+ BOOST_CHECK(origFeeEst[i-1] <= origFeeEst[i-2]);
+ BOOST_CHECK(origPriEst[i-1] <= origPriEst[i-2]);
+ }
+ BOOST_CHECK(origFeeEst[i-1] < (10-i)*baseRate.GetFeePerK() + deltaFee);
+ BOOST_CHECK(origFeeEst[i-1] > (10-i)*baseRate.GetFeePerK() - deltaFee);
+ BOOST_CHECK(origPriEst[i-1] < pow(10,10-i) * basepri + deltaPri);
+ BOOST_CHECK(origPriEst[i-1] > pow(10,10-i) * basepri - deltaPri);
+ }
+
+ // Mine 50 more blocks with no transactions happening, estimates shouldn't change
+ // We haven't decayed the moving average enough so we still have enough data points in every bucket
+ while (blocknum < 250)
+ mpool.removeForBlock(block, ++blocknum, dummyConflicted);
+
+ for (int i = 1; i < 10;i++) {
+ BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() < origFeeEst[i-1] + deltaFee);
+ BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
+ BOOST_CHECK(mpool.estimatePriority(i) < origPriEst[i-1] + deltaPri);
+ BOOST_CHECK(mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri);
+ }
+
+
+ // Mine 15 more blocks with lots of transactions happening and not getting mined
+ // Estimates should go up
+ while (blocknum < 265) {
+ for (int j = 0; j < 10; j++) { // For each fee/pri multiple
+ for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx
+ tx.vin[0].prevout.n = 10000*blocknum+100*j+k;
+ uint256 hash = tx.GetHash();
+ mpool.addUnchecked(hash, CTxMemPoolEntry(tx, feeV[k/4][j], GetTime(), priV[k/4][j], blocknum, mpool.HasNoInputsOf(tx)));
+ txHashes[j].push_back(hash);
+ }
+ }
+ mpool.removeForBlock(block, ++blocknum, dummyConflicted);
+ }
+
+ for (int i = 1; i < 10;i++) {
+ BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
+ BOOST_CHECK(mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri);
+ }
+
+ // Mine all those transactions
+ // Estimates should still not be below original
+ for (int j = 0; j < 10; j++) {
+ while(txHashes[j].size()) {
+ CTransaction btx;
+ if (mpool.lookup(txHashes[j].back(), btx))
+ block.push_back(btx);
+ txHashes[j].pop_back();
+ }
+ }
+ mpool.removeForBlock(block, 265, dummyConflicted);
+ block.clear();
+ for (int i = 1; i < 10;i++) {
+ BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee);
+ BOOST_CHECK(mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri);
+ }
+
+ // Mine 100 more blocks where everything is mined every block
+ // Estimates should be below original estimates (not possible for last estimate)
+ while (blocknum < 365) {
+ for (int j = 0; j < 10; j++) { // For each fee/pri multiple
+ for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx
+ tx.vin[0].prevout.n = 10000*blocknum+100*j+k;
+ uint256 hash = tx.GetHash();
+ mpool.addUnchecked(hash, CTxMemPoolEntry(tx, feeV[k/4][j], GetTime(), priV[k/4][j], blocknum, mpool.HasNoInputsOf(tx)));
+ CTransaction btx;
+ if (mpool.lookup(hash, btx))
+ block.push_back(btx);
+ }
+ }
+ mpool.removeForBlock(block, ++blocknum, dummyConflicted);
+ block.clear();
+ }
+ for (int i = 1; i < 9; i++) {
+ BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() < origFeeEst[i-1] - deltaFee);
+ BOOST_CHECK(mpool.estimatePriority(i) < origPriEst[i-1] - deltaPri);
+ }
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/scheduler_tests.cpp b/src/test/scheduler_tests.cpp
new file mode 100644
index 0000000000..a26d0afaed
--- /dev/null
+++ b/src/test/scheduler_tests.cpp
@@ -0,0 +1,110 @@
+// Copyright (c) 2012-2013 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include "random.h"
+#include "scheduler.h"
+
+#include "test/test_bitcoin.h"
+
+#include <boost/bind.hpp>
+#include <boost/random/mersenne_twister.hpp>
+#include <boost/random/uniform_int_distribution.hpp>
+#include <boost/thread.hpp>
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(scheduler_tests)
+
+static void microTask(CScheduler& s, boost::mutex& mutex, int& counter, int delta, boost::chrono::system_clock::time_point rescheduleTime)
+{
+ {
+ boost::unique_lock<boost::mutex> lock(mutex);
+ counter += delta;
+ }
+ boost::chrono::system_clock::time_point noTime = boost::chrono::system_clock::time_point::min();
+ if (rescheduleTime != noTime) {
+ CScheduler::Function f = boost::bind(&microTask, boost::ref(s), boost::ref(mutex), boost::ref(counter), -delta + 1, noTime);
+ s.schedule(f, rescheduleTime);
+ }
+}
+
+static void MicroSleep(uint64_t n)
+{
+#if defined(HAVE_WORKING_BOOST_SLEEP_FOR)
+ boost::this_thread::sleep_for(boost::chrono::microseconds(n));
+#elif defined(HAVE_WORKING_BOOST_SLEEP)
+ boost::this_thread::sleep(boost::posix_time::microseconds(n));
+#else
+ //should never get here
+ #error missing boost sleep implementation
+#endif
+}
+
+BOOST_AUTO_TEST_CASE(manythreads)
+{
+ // Stress test: hundreds of microsecond-scheduled tasks,
+ // serviced by 10 threads.
+ //
+ // So... ten shared counters, which if all the tasks execute
+ // properly will sum to the number of tasks done.
+ // Each task adds or subtracts from one of the counters a
+ // random amount, and then schedules another task 0-1000
+ // microseconds in the future to subtract or add from
+ // the counter -random_amount+1, so in the end the shared
+ // counters should sum to the number of initial tasks performed.
+ CScheduler microTasks;
+
+ boost::thread_group microThreads;
+ for (int i = 0; i < 5; i++)
+ microThreads.create_thread(boost::bind(&CScheduler::serviceQueue, &microTasks));
+
+ boost::mutex counterMutex[10];
+ int counter[10] = { 0 };
+ boost::random::mt19937 rng(insecure_rand());
+ boost::random::uniform_int_distribution<> zeroToNine(0, 9);
+ boost::random::uniform_int_distribution<> randomMsec(-11, 1000);
+ boost::random::uniform_int_distribution<> randomDelta(-1000, 1000);
+
+ boost::chrono::system_clock::time_point start = boost::chrono::system_clock::now();
+ boost::chrono::system_clock::time_point now = start;
+
+ for (int i = 0; i < 100; i++) {
+ boost::chrono::system_clock::time_point t = now + boost::chrono::microseconds(randomMsec(rng));
+ boost::chrono::system_clock::time_point tReschedule = now + boost::chrono::microseconds(500 + randomMsec(rng));
+ int whichCounter = zeroToNine(rng);
+ CScheduler::Function f = boost::bind(&microTask, boost::ref(microTasks),
+ boost::ref(counterMutex[whichCounter]), boost::ref(counter[whichCounter]),
+ randomDelta(rng), tReschedule);
+ microTasks.schedule(f, t);
+ }
+
+ MicroSleep(600);
+ now = boost::chrono::system_clock::now();
+ // More threads and more tasks:
+ for (int i = 0; i < 5; i++)
+ microThreads.create_thread(boost::bind(&CScheduler::serviceQueue, &microTasks));
+ for (int i = 0; i < 100; i++) {
+ boost::chrono::system_clock::time_point t = now + boost::chrono::microseconds(randomMsec(rng));
+ boost::chrono::system_clock::time_point tReschedule = now + boost::chrono::microseconds(500 + randomMsec(rng));
+ int whichCounter = zeroToNine(rng);
+ CScheduler::Function f = boost::bind(&microTask, boost::ref(microTasks),
+ boost::ref(counterMutex[whichCounter]), boost::ref(counter[whichCounter]),
+ randomDelta(rng), tReschedule);
+ microTasks.schedule(f, t);
+ }
+
+ // All 2,000 tasks should be finished within 2 milliseconds. Sleep a bit longer.
+ MicroSleep(2100);
+
+ microThreads.interrupt_all();
+ microThreads.join_all();
+
+ int counterSum = 0;
+ for (int i = 0; i < 10; i++) {
+ BOOST_CHECK(counter[i] != 0);
+ counterSum += counter[i];
+ }
+ BOOST_CHECK_EQUAL(counterSum, 200);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp
index 4057eccbed..c727303ea1 100644
--- a/src/test/test_bitcoin.cpp
+++ b/src/test/test_bitcoin.cpp
@@ -6,6 +6,7 @@
#include "test_bitcoin.h"
+#include "key.h"
#include "main.h"
#include "random.h"
#include "txdb.h"
@@ -28,6 +29,7 @@ extern void noui_connect();
BasicTestingSetup::BasicTestingSetup()
{
+ ECC_Start();
SetupEnvironment();
fPrintToDebugLog = false; // don't want to write to debug.log file
fCheckBlockIndex = true;
@@ -35,6 +37,7 @@ BasicTestingSetup::BasicTestingSetup()
}
BasicTestingSetup::~BasicTestingSetup()
{
+ ECC_Stop();
}
TestingSetup::TestingSetup()