aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/bitcoinrpc.cpp3
-rw-r--r--src/chainparams.h2
-rw-r--r--src/init.cpp20
-rw-r--r--src/leveldb/Makefile6
-rw-r--r--src/leveldb/db/autocompact_test.cc118
-rw-r--r--src/leveldb/db/corruption_test.cc51
-rw-r--r--src/leveldb/db/db_impl.cc41
-rw-r--r--src/leveldb/db/db_impl.h9
-rw-r--r--src/leveldb/db/db_iter.cc41
-rw-r--r--src/leveldb/db/db_iter.h8
-rw-r--r--src/leveldb/db/dbformat.h3
-rw-r--r--src/leveldb/db/version_set.cc96
-rw-r--r--src/leveldb/db/version_set.h15
-rw-r--r--src/leveldb/include/leveldb/db.h2
-rw-r--r--src/leveldb/util/env_posix.cc33
-rw-r--r--src/leveldb/util/random.h7
-rw-r--r--src/main.cpp191
-rw-r--r--src/main.h83
-rw-r--r--src/miner.cpp8
-rw-r--r--src/net.cpp4
-rw-r--r--src/net.h4
-rw-r--r--src/qt/clientmodel.cpp8
-rw-r--r--src/qt/transactiondesc.cpp2
-rw-r--r--src/qt/transactionrecord.cpp6
-rw-r--r--src/qt/transactiontablemodel.cpp4
-rw-r--r--src/qt/walletmodel.cpp4
-rw-r--r--src/rpcblockchain.cpp16
-rw-r--r--src/rpcdump.cpp12
-rw-r--r--src/rpcmining.cpp17
-rw-r--r--src/rpcrawtransaction.cpp4
-rw-r--r--src/rpcwallet.cpp23
-rw-r--r--src/test/miner_tests.cpp12
-rw-r--r--src/txdb.cpp4
-rw-r--r--src/wallet.cpp10
34 files changed, 584 insertions, 283 deletions
diff --git a/src/bitcoinrpc.cpp b/src/bitcoinrpc.cpp
index 2c8db7b53a..798660dff3 100644
--- a/src/bitcoinrpc.cpp
+++ b/src/bitcoinrpc.cpp
@@ -882,7 +882,8 @@ void StopRPCThreads()
deadlineTimers.clear();
rpc_io_service->stop();
- rpc_worker_group->join_all();
+ if (rpc_worker_group != NULL)
+ rpc_worker_group->join_all();
delete rpc_worker_group; rpc_worker_group = NULL;
delete rpc_ssl_context; rpc_ssl_context = NULL;
delete rpc_io_service; rpc_io_service = NULL;
diff --git a/src/chainparams.h b/src/chainparams.h
index ce3c14306d..3f99b7eb06 100644
--- a/src/chainparams.h
+++ b/src/chainparams.h
@@ -68,7 +68,7 @@ public:
virtual const vector<CAddress>& FixedSeeds() const = 0;
int RPCPort() const { return nRPCPort; }
protected:
- CChainParams() {};
+ CChainParams() {}
uint256 hashGenesisBlock;
MessageStartChars pchMessageStart;
diff --git a/src/init.cpp b/src/init.cpp
index e75e981a57..dcd65198c3 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -116,7 +116,7 @@ void Shutdown()
{
LOCK(cs_main);
if (pwalletMain)
- pwalletMain->SetBestChain(CBlockLocator(pindexBest));
+ pwalletMain->SetBestChain(CBlockLocator(chainActive.Tip()));
if (pblocktree)
pblocktree->Flush();
if (pcoinsTip)
@@ -766,7 +766,7 @@ bool AppInit2(boost::thread_group& threadGroup)
// If the loaded chain has a wrong genesis, bail out immediately
// (we're likely using a testnet datadir, or the other way around).
- if (!mapBlockIndex.empty() && pindexGenesisBlock == NULL)
+ if (!mapBlockIndex.empty() && chainActive.Genesis() == NULL)
return InitError(_("Incorrect or no genesis block found. Wrong datadir for network?"));
// Initialize the block index (no-op if non-empty database was already loaded)
@@ -912,7 +912,7 @@ bool AppInit2(boost::thread_group& threadGroup)
strErrors << _("Cannot write default address") << "\n";
}
- pwalletMain->SetBestChain(CBlockLocator(pindexBest));
+ pwalletMain->SetBestChain(CBlockLocator(chainActive.Tip()));
}
LogPrintf("%s", strErrors.str().c_str());
@@ -920,9 +920,9 @@ bool AppInit2(boost::thread_group& threadGroup)
RegisterWallet(pwalletMain);
- CBlockIndex *pindexRescan = pindexBest;
+ CBlockIndex *pindexRescan = chainActive.Tip();
if (GetBoolArg("-rescan", false))
- pindexRescan = pindexGenesisBlock;
+ pindexRescan = chainActive.Genesis();
else
{
CWalletDB walletdb(strWalletFile);
@@ -930,16 +930,16 @@ bool AppInit2(boost::thread_group& threadGroup)
if (walletdb.ReadBestBlock(locator))
pindexRescan = locator.GetBlockIndex();
else
- pindexRescan = pindexGenesisBlock;
+ pindexRescan = chainActive.Genesis();
}
- if (pindexBest && pindexBest != pindexRescan)
+ if (chainActive.Tip() && chainActive.Tip() != pindexRescan)
{
uiInterface.InitMessage(_("Rescanning..."));
- LogPrintf("Rescanning last %i blocks (from block %i)...\n", pindexBest->nHeight - pindexRescan->nHeight, pindexRescan->nHeight);
+ LogPrintf("Rescanning last %i blocks (from block %i)...\n", chainActive.Height() - pindexRescan->nHeight, pindexRescan->nHeight);
nStart = GetTimeMillis();
pwalletMain->ScanForWalletTransactions(pindexRescan, true);
LogPrintf(" rescan %15"PRI64d"ms\n", GetTimeMillis() - nStart);
- pwalletMain->SetBestChain(CBlockLocator(pindexBest));
+ pwalletMain->SetBestChain(CBlockLocator(chainActive.Tip()));
nWalletDBUpdated++;
}
@@ -985,7 +985,7 @@ bool AppInit2(boost::thread_group& threadGroup)
//// debug print
LogPrintf("mapBlockIndex.size() = %"PRIszu"\n", mapBlockIndex.size());
- LogPrintf("nBestHeight = %d\n", nBestHeight);
+ LogPrintf("nBestHeight = %d\n", chainActive.Height());
LogPrintf("setKeyPool.size() = %"PRIszu"\n", pwalletMain ? pwalletMain->setKeyPool.size() : 0);
LogPrintf("mapWallet.size() = %"PRIszu"\n", pwalletMain ? pwalletMain->mapWallet.size() : 0);
LogPrintf("mapAddressBook.size() = %"PRIszu"\n", pwalletMain ? pwalletMain->mapAddressBook.size() : 0);
diff --git a/src/leveldb/Makefile b/src/leveldb/Makefile
index 96af7765be..20c9c4f287 100644
--- a/src/leveldb/Makefile
+++ b/src/leveldb/Makefile
@@ -31,6 +31,7 @@ TESTHARNESS = ./util/testharness.o $(TESTUTIL)
TESTS = \
arena_test \
+ autocompact_test \
bloom_test \
c_test \
cache_test \
@@ -70,7 +71,7 @@ SHARED = $(SHARED1)
else
# Update db.h if you change these.
SHARED_MAJOR = 1
-SHARED_MINOR = 12
+SHARED_MINOR = 13
SHARED1 = libleveldb.$(PLATFORM_SHARED_EXT)
SHARED2 = $(SHARED1).$(SHARED_MAJOR)
SHARED3 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR)
@@ -114,6 +115,9 @@ leveldbutil: db/leveldb_main.o $(LIBOBJECTS)
arena_test: util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS)
$(CXX) $(LDFLAGS) util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+autocompact_test: db/autocompact_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) db/autocompact_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
bloom_test: util/bloom_test.o $(LIBOBJECTS) $(TESTHARNESS)
$(CXX) $(LDFLAGS) util/bloom_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
diff --git a/src/leveldb/db/autocompact_test.cc b/src/leveldb/db/autocompact_test.cc
new file mode 100644
index 0000000000..d20a2362c3
--- /dev/null
+++ b/src/leveldb/db/autocompact_test.cc
@@ -0,0 +1,118 @@
+// Copyright (c) 2013 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "leveldb/db.h"
+#include "db/db_impl.h"
+#include "leveldb/cache.h"
+#include "util/testharness.h"
+#include "util/testutil.h"
+
+namespace leveldb {
+
+class AutoCompactTest {
+ public:
+ std::string dbname_;
+ Cache* tiny_cache_;
+ Options options_;
+ DB* db_;
+
+ AutoCompactTest() {
+ dbname_ = test::TmpDir() + "/autocompact_test";
+ tiny_cache_ = NewLRUCache(100);
+ options_.block_cache = tiny_cache_;
+ DestroyDB(dbname_, options_);
+ options_.create_if_missing = true;
+ options_.compression = kNoCompression;
+ ASSERT_OK(DB::Open(options_, dbname_, &db_));
+ }
+
+ ~AutoCompactTest() {
+ delete db_;
+ DestroyDB(dbname_, Options());
+ delete tiny_cache_;
+ }
+
+ std::string Key(int i) {
+ char buf[100];
+ snprintf(buf, sizeof(buf), "key%06d", i);
+ return std::string(buf);
+ }
+
+ uint64_t Size(const Slice& start, const Slice& limit) {
+ Range r(start, limit);
+ uint64_t size;
+ db_->GetApproximateSizes(&r, 1, &size);
+ return size;
+ }
+
+ void DoReads(int n);
+};
+
+static const int kValueSize = 200 * 1024;
+static const int kTotalSize = 100 * 1024 * 1024;
+static const int kCount = kTotalSize / kValueSize;
+
+// Read through the first n keys repeatedly and check that they get
+// compacted (verified by checking the size of the key space).
+void AutoCompactTest::DoReads(int n) {
+ std::string value(kValueSize, 'x');
+ DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
+
+ // Fill database
+ for (int i = 0; i < kCount; i++) {
+ ASSERT_OK(db_->Put(WriteOptions(), Key(i), value));
+ }
+ ASSERT_OK(dbi->TEST_CompactMemTable());
+
+ // Delete everything
+ for (int i = 0; i < kCount; i++) {
+ ASSERT_OK(db_->Delete(WriteOptions(), Key(i)));
+ }
+ ASSERT_OK(dbi->TEST_CompactMemTable());
+
+ // Get initial measurement of the space we will be reading.
+ const int64_t initial_size = Size(Key(0), Key(n));
+ const int64_t initial_other_size = Size(Key(n), Key(kCount));
+
+ // Read until size drops significantly.
+ std::string limit_key = Key(n);
+ for (int read = 0; true; read++) {
+ ASSERT_LT(read, 100) << "Taking too long to compact";
+ Iterator* iter = db_->NewIterator(ReadOptions());
+ for (iter->SeekToFirst();
+ iter->Valid() && iter->key().ToString() < limit_key;
+ iter->Next()) {
+ // Drop data
+ }
+ delete iter;
+ // Wait a little bit to allow any triggered compactions to complete.
+ Env::Default()->SleepForMicroseconds(1000000);
+ uint64_t size = Size(Key(0), Key(n));
+ fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n",
+ read+1, size/1048576.0, Size(Key(n), Key(kCount))/1048576.0);
+ if (size <= initial_size/10) {
+ break;
+ }
+ }
+
+ // Verify that the size of the key space not touched by the reads
+ // is pretty much unchanged.
+ const int64_t final_other_size = Size(Key(n), Key(kCount));
+ ASSERT_LE(final_other_size, initial_other_size + 1048576);
+ ASSERT_GE(final_other_size, initial_other_size/5 - 1048576);
+}
+
+TEST(AutoCompactTest, ReadAll) {
+ DoReads(kCount);
+}
+
+TEST(AutoCompactTest, ReadHalf) {
+ DoReads(kCount/2);
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) {
+ return leveldb::test::RunAllTests();
+}
diff --git a/src/leveldb/db/corruption_test.cc b/src/leveldb/db/corruption_test.cc
index 31b2d5f416..b37ffdfe64 100644
--- a/src/leveldb/db/corruption_test.cc
+++ b/src/leveldb/db/corruption_test.cc
@@ -35,6 +35,7 @@ class CorruptionTest {
CorruptionTest() {
tiny_cache_ = NewLRUCache(100);
options_.env = &env_;
+ options_.block_cache = tiny_cache_;
dbname_ = test::TmpDir() + "/db_test";
DestroyDB(dbname_, options_);
@@ -50,17 +51,14 @@ class CorruptionTest {
delete tiny_cache_;
}
- Status TryReopen(Options* options = NULL) {
+ Status TryReopen() {
delete db_;
db_ = NULL;
- Options opt = (options ? *options : options_);
- opt.env = &env_;
- opt.block_cache = tiny_cache_;
- return DB::Open(opt, dbname_, &db_);
+ return DB::Open(options_, dbname_, &db_);
}
- void Reopen(Options* options = NULL) {
- ASSERT_OK(TryReopen(options));
+ void Reopen() {
+ ASSERT_OK(TryReopen());
}
void RepairDB() {
@@ -92,6 +90,10 @@ class CorruptionTest {
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
uint64_t key;
Slice in(iter->key());
+ if (in == "" || in == "~") {
+ // Ignore boundary keys.
+ continue;
+ }
if (!ConsumeDecimalNumber(&in, &key) ||
!in.empty() ||
key < next_expected) {
@@ -233,7 +235,7 @@ TEST(CorruptionTest, TableFile) {
dbi->TEST_CompactRange(1, NULL, NULL);
Corrupt(kTableFile, 100, 1);
- Check(99, 99);
+ Check(90, 99);
}
TEST(CorruptionTest, TableFileIndexData) {
@@ -299,7 +301,7 @@ TEST(CorruptionTest, CompactionInputError) {
ASSERT_EQ(1, Property("leveldb.num-files-at-level" + NumberToString(last)));
Corrupt(kTableFile, 100, 1);
- Check(9, 9);
+ Check(5, 9);
// Force compactions by writing lots of values
Build(10000);
@@ -307,32 +309,23 @@ TEST(CorruptionTest, CompactionInputError) {
}
TEST(CorruptionTest, CompactionInputErrorParanoid) {
- Options options;
- options.paranoid_checks = true;
- options.write_buffer_size = 1048576;
- Reopen(&options);
+ options_.paranoid_checks = true;
+ options_.write_buffer_size = 512 << 10;
+ Reopen();
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
- // Fill levels >= 1 so memtable compaction outputs to level 1
- for (int level = 1; level < config::kNumLevels; level++) {
- dbi->Put(WriteOptions(), "", "begin");
- dbi->Put(WriteOptions(), "~", "end");
+ // Make multiple inputs so we need to compact.
+ for (int i = 0; i < 2; i++) {
+ Build(10);
dbi->TEST_CompactMemTable();
+ Corrupt(kTableFile, 100, 1);
+ env_.SleepForMicroseconds(100000);
}
+ dbi->CompactRange(NULL, NULL);
- Build(10);
- dbi->TEST_CompactMemTable();
- ASSERT_EQ(1, Property("leveldb.num-files-at-level0"));
-
- Corrupt(kTableFile, 100, 1);
- Check(9, 9);
-
- // Write must eventually fail because of corrupted table
- Status s;
+ // Write must fail because of corrupted table
std::string tmp1, tmp2;
- for (int i = 0; i < 10000 && s.ok(); i++) {
- s = db_->Put(WriteOptions(), Key(i, &tmp1), Value(i, &tmp2));
- }
+ Status s = db_->Put(WriteOptions(), Key(5, &tmp1), Value(5, &tmp2));
ASSERT_TRUE(!s.ok()) << "write did not fail in corrupted paranoid db";
}
diff --git a/src/leveldb/db/db_impl.cc b/src/leveldb/db/db_impl.cc
index 395d3172ad..fa1351038b 100644
--- a/src/leveldb/db/db_impl.cc
+++ b/src/leveldb/db/db_impl.cc
@@ -113,14 +113,14 @@ Options SanitizeOptions(const std::string& dbname,
return result;
}
-DBImpl::DBImpl(const Options& options, const std::string& dbname)
- : env_(options.env),
- internal_comparator_(options.comparator),
- internal_filter_policy_(options.filter_policy),
- options_(SanitizeOptions(
- dbname, &internal_comparator_, &internal_filter_policy_, options)),
- owns_info_log_(options_.info_log != options.info_log),
- owns_cache_(options_.block_cache != options.block_cache),
+DBImpl::DBImpl(const Options& raw_options, const std::string& dbname)
+ : env_(raw_options.env),
+ internal_comparator_(raw_options.comparator),
+ internal_filter_policy_(raw_options.filter_policy),
+ options_(SanitizeOptions(dbname, &internal_comparator_,
+ &internal_filter_policy_, raw_options)),
+ owns_info_log_(options_.info_log != raw_options.info_log),
+ owns_cache_(options_.block_cache != raw_options.block_cache),
dbname_(dbname),
db_lock_(NULL),
shutting_down_(NULL),
@@ -130,6 +130,7 @@ DBImpl::DBImpl(const Options& options, const std::string& dbname)
logfile_(NULL),
logfile_number_(0),
log_(NULL),
+ seed_(0),
tmp_batch_(new WriteBatch),
bg_compaction_scheduled_(false),
manual_compaction_(NULL),
@@ -138,7 +139,7 @@ DBImpl::DBImpl(const Options& options, const std::string& dbname)
has_imm_.Release_Store(NULL);
// Reserve ten files or so for other uses and give the rest to TableCache.
- const int table_cache_size = options.max_open_files - kNumNonTableCacheFiles;
+ const int table_cache_size = options_.max_open_files - kNumNonTableCacheFiles;
table_cache_ = new TableCache(dbname_, &options_, table_cache_size);
versions_ = new VersionSet(dbname_, &options_, table_cache_,
@@ -1027,7 +1028,8 @@ static void CleanupIteratorState(void* arg1, void* arg2) {
} // namespace
Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
- SequenceNumber* latest_snapshot) {
+ SequenceNumber* latest_snapshot,
+ uint32_t* seed) {
IterState* cleanup = new IterState;
mutex_.Lock();
*latest_snapshot = versions_->LastSequence();
@@ -1051,13 +1053,15 @@ Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
cleanup->version = versions_->current();
internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, NULL);
+ *seed = ++seed_;
mutex_.Unlock();
return internal_iter;
}
Iterator* DBImpl::TEST_NewInternalIterator() {
SequenceNumber ignored;
- return NewInternalIterator(ReadOptions(), &ignored);
+ uint32_t ignored_seed;
+ return NewInternalIterator(ReadOptions(), &ignored, &ignored_seed);
}
int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() {
@@ -1114,12 +1118,21 @@ Status DBImpl::Get(const ReadOptions& options,
Iterator* DBImpl::NewIterator(const ReadOptions& options) {
SequenceNumber latest_snapshot;
- Iterator* internal_iter = NewInternalIterator(options, &latest_snapshot);
+ uint32_t seed;
+ Iterator* iter = NewInternalIterator(options, &latest_snapshot, &seed);
return NewDBIterator(
- &dbname_, env_, user_comparator(), internal_iter,
+ this, user_comparator(), iter,
(options.snapshot != NULL
? reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_
- : latest_snapshot));
+ : latest_snapshot),
+ seed);
+}
+
+void DBImpl::RecordReadSample(Slice key) {
+ MutexLock l(&mutex_);
+ if (versions_->current()->RecordReadSample(key)) {
+ MaybeScheduleCompaction();
+ }
}
const Snapshot* DBImpl::GetSnapshot() {
diff --git a/src/leveldb/db/db_impl.h b/src/leveldb/db/db_impl.h
index 3c8d711ae0..75fd30abe9 100644
--- a/src/leveldb/db/db_impl.h
+++ b/src/leveldb/db/db_impl.h
@@ -59,13 +59,19 @@ class DBImpl : public DB {
// file at a level >= 1.
int64_t TEST_MaxNextLevelOverlappingBytes();
+ // Record a sample of bytes read at the specified internal key.
+ // Samples are taken approximately once every config::kReadBytesPeriod
+ // bytes.
+ void RecordReadSample(Slice key);
+
private:
friend class DB;
struct CompactionState;
struct Writer;
Iterator* NewInternalIterator(const ReadOptions&,
- SequenceNumber* latest_snapshot);
+ SequenceNumber* latest_snapshot,
+ uint32_t* seed);
Status NewDB();
@@ -135,6 +141,7 @@ class DBImpl : public DB {
WritableFile* logfile_;
uint64_t logfile_number_;
log::Writer* log_;
+ uint32_t seed_; // For sampling.
// Queue of writers.
std::deque<Writer*> writers_;
diff --git a/src/leveldb/db/db_iter.cc b/src/leveldb/db/db_iter.cc
index 87dca2ded4..071a54e3f4 100644
--- a/src/leveldb/db/db_iter.cc
+++ b/src/leveldb/db/db_iter.cc
@@ -5,12 +5,14 @@
#include "db/db_iter.h"
#include "db/filename.h"
+#include "db/db_impl.h"
#include "db/dbformat.h"
#include "leveldb/env.h"
#include "leveldb/iterator.h"
#include "port/port.h"
#include "util/logging.h"
#include "util/mutexlock.h"
+#include "util/random.h"
namespace leveldb {
@@ -46,15 +48,16 @@ class DBIter: public Iterator {
kReverse
};
- DBIter(const std::string* dbname, Env* env,
- const Comparator* cmp, Iterator* iter, SequenceNumber s)
- : dbname_(dbname),
- env_(env),
+ DBIter(DBImpl* db, const Comparator* cmp, Iterator* iter, SequenceNumber s,
+ uint32_t seed)
+ : db_(db),
user_comparator_(cmp),
iter_(iter),
sequence_(s),
direction_(kForward),
- valid_(false) {
+ valid_(false),
+ rnd_(seed),
+ bytes_counter_(RandomPeriod()) {
}
virtual ~DBIter() {
delete iter_;
@@ -100,8 +103,12 @@ class DBIter: public Iterator {
}
}
- const std::string* const dbname_;
- Env* const env_;
+ // Pick next gap with average value of config::kReadBytesPeriod.
+ ssize_t RandomPeriod() {
+ return rnd_.Uniform(2*config::kReadBytesPeriod);
+ }
+
+ DBImpl* db_;
const Comparator* const user_comparator_;
Iterator* const iter_;
SequenceNumber const sequence_;
@@ -112,13 +119,23 @@ class DBIter: public Iterator {
Direction direction_;
bool valid_;
+ Random rnd_;
+ ssize_t bytes_counter_;
+
// No copying allowed
DBIter(const DBIter&);
void operator=(const DBIter&);
};
inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {
- if (!ParseInternalKey(iter_->key(), ikey)) {
+ Slice k = iter_->key();
+ ssize_t n = k.size() + iter_->value().size();
+ bytes_counter_ -= n;
+ while (bytes_counter_ < 0) {
+ bytes_counter_ += RandomPeriod();
+ db_->RecordReadSample(k);
+ }
+ if (!ParseInternalKey(k, ikey)) {
status_ = Status::Corruption("corrupted internal key in DBIter");
return false;
} else {
@@ -288,12 +305,12 @@ void DBIter::SeekToLast() {
} // anonymous namespace
Iterator* NewDBIterator(
- const std::string* dbname,
- Env* env,
+ DBImpl* db,
const Comparator* user_key_comparator,
Iterator* internal_iter,
- const SequenceNumber& sequence) {
- return new DBIter(dbname, env, user_key_comparator, internal_iter, sequence);
+ SequenceNumber sequence,
+ uint32_t seed) {
+ return new DBIter(db, user_key_comparator, internal_iter, sequence, seed);
}
} // namespace leveldb
diff --git a/src/leveldb/db/db_iter.h b/src/leveldb/db/db_iter.h
index d9e1b174ab..04927e937b 100644
--- a/src/leveldb/db/db_iter.h
+++ b/src/leveldb/db/db_iter.h
@@ -11,15 +11,17 @@
namespace leveldb {
+class DBImpl;
+
// Return a new iterator that converts internal keys (yielded by
// "*internal_iter") that were live at the specified "sequence" number
// into appropriate user keys.
extern Iterator* NewDBIterator(
- const std::string* dbname,
- Env* env,
+ DBImpl* db,
const Comparator* user_key_comparator,
Iterator* internal_iter,
- const SequenceNumber& sequence);
+ SequenceNumber sequence,
+ uint32_t seed);
} // namespace leveldb
diff --git a/src/leveldb/db/dbformat.h b/src/leveldb/db/dbformat.h
index f7f64dafb6..5d8a032bd3 100644
--- a/src/leveldb/db/dbformat.h
+++ b/src/leveldb/db/dbformat.h
@@ -38,6 +38,9 @@ static const int kL0_StopWritesTrigger = 12;
// space if the same key space is being repeatedly overwritten.
static const int kMaxMemCompactLevel = 2;
+// Approximate gap in bytes between samples of data read during iteration.
+static const int kReadBytesPeriod = 1048576;
+
} // namespace config
class InternalKey;
diff --git a/src/leveldb/db/version_set.cc b/src/leveldb/db/version_set.cc
index 4fd1ddef21..66d73be71f 100644
--- a/src/leveldb/db/version_set.cc
+++ b/src/leveldb/db/version_set.cc
@@ -289,6 +289,51 @@ static bool NewestFirst(FileMetaData* a, FileMetaData* b) {
return a->number > b->number;
}
+void Version::ForEachOverlapping(Slice user_key, Slice internal_key,
+ void* arg,
+ bool (*func)(void*, int, FileMetaData*)) {
+ // TODO(sanjay): Change Version::Get() to use this function.
+ const Comparator* ucmp = vset_->icmp_.user_comparator();
+
+ // Search level-0 in order from newest to oldest.
+ std::vector<FileMetaData*> tmp;
+ tmp.reserve(files_[0].size());
+ for (uint32_t i = 0; i < files_[0].size(); i++) {
+ FileMetaData* f = files_[0][i];
+ if (ucmp->Compare(user_key, f->smallest.user_key()) >= 0 &&
+ ucmp->Compare(user_key, f->largest.user_key()) <= 0) {
+ tmp.push_back(f);
+ }
+ }
+ if (!tmp.empty()) {
+ std::sort(tmp.begin(), tmp.end(), NewestFirst);
+ for (uint32_t i = 0; i < tmp.size(); i++) {
+ if (!(*func)(arg, 0, tmp[i])) {
+ return;
+ }
+ }
+ }
+
+ // Search other levels.
+ for (int level = 1; level < config::kNumLevels; level++) {
+ size_t num_files = files_[level].size();
+ if (num_files == 0) continue;
+
+ // Binary search to find earliest index whose largest key >= internal_key.
+ uint32_t index = FindFile(vset_->icmp_, files_[level], internal_key);
+ if (index < num_files) {
+ FileMetaData* f = files_[level][index];
+ if (ucmp->Compare(user_key, f->smallest.user_key()) < 0) {
+ // All of "f" is past any data for user_key
+ } else {
+ if (!(*func)(arg, level, f)) {
+ return;
+ }
+ }
+ }
+ }
+}
+
Status Version::Get(const ReadOptions& options,
const LookupKey& k,
std::string* value,
@@ -401,6 +446,44 @@ bool Version::UpdateStats(const GetStats& stats) {
return false;
}
+bool Version::RecordReadSample(Slice internal_key) {
+ ParsedInternalKey ikey;
+ if (!ParseInternalKey(internal_key, &ikey)) {
+ return false;
+ }
+
+ struct State {
+ GetStats stats; // Holds first matching file
+ int matches;
+
+ static bool Match(void* arg, int level, FileMetaData* f) {
+ State* state = reinterpret_cast<State*>(arg);
+ state->matches++;
+ if (state->matches == 1) {
+ // Remember first match.
+ state->stats.seek_file = f;
+ state->stats.seek_file_level = level;
+ }
+ // We can stop iterating once we have a second match.
+ return state->matches < 2;
+ }
+ };
+
+ State state;
+ state.matches = 0;
+ ForEachOverlapping(ikey.user_key, internal_key, &state, &State::Match);
+
+ // Must have at least two matches since we want to merge across
+ // files. But what if we have a single file that contains many
+ // overwrites and deletions? Should we have another mechanism for
+ // finding such files?
+ if (state.matches >= 2) {
+ // 1MB cost is about 1 seek (see comment in Builder::Apply).
+ return UpdateStats(state.stats);
+ }
+ return false;
+}
+
void Version::Ref() {
++refs_;
}
@@ -435,10 +518,13 @@ int Version::PickLevelForMemTableOutput(
if (OverlapInLevel(level + 1, &smallest_user_key, &largest_user_key)) {
break;
}
- GetOverlappingInputs(level + 2, &start, &limit, &overlaps);
- const int64_t sum = TotalFileSize(overlaps);
- if (sum > kMaxGrandParentOverlapBytes) {
- break;
+ if (level + 2 < config::kNumLevels) {
+ // Check that file does not overlap too many grandparent bytes.
+ GetOverlappingInputs(level + 2, &start, &limit, &overlaps);
+ const int64_t sum = TotalFileSize(overlaps);
+ if (sum > kMaxGrandParentOverlapBytes) {
+ break;
+ }
}
level++;
}
@@ -452,6 +538,8 @@ void Version::GetOverlappingInputs(
const InternalKey* begin,
const InternalKey* end,
std::vector<FileMetaData*>* inputs) {
+ assert(level >= 0);
+ assert(level < config::kNumLevels);
inputs->clear();
Slice user_begin, user_end;
if (begin != NULL) {
diff --git a/src/leveldb/db/version_set.h b/src/leveldb/db/version_set.h
index 9d084fdb7d..20de0e2629 100644
--- a/src/leveldb/db/version_set.h
+++ b/src/leveldb/db/version_set.h
@@ -78,6 +78,12 @@ class Version {
// REQUIRES: lock is held
bool UpdateStats(const GetStats& stats);
+ // Record a sample of bytes read at the specified internal key.
+ // Samples are taken approximately once every config::kReadBytesPeriod
+ // bytes. Returns true if a new compaction may need to be triggered.
+ // REQUIRES: lock is held
+ bool RecordReadSample(Slice key);
+
// Reference count management (so Versions do not disappear out from
// under live iterators)
void Ref();
@@ -114,6 +120,15 @@ class Version {
class LevelFileNumIterator;
Iterator* NewConcatenatingIterator(const ReadOptions&, int level) const;
+ // Call func(arg, level, f) for every file that overlaps user_key in
+ // order from newest to oldest. If an invocation of func returns
+ // false, makes no more calls.
+ //
+ // REQUIRES: user portion of internal_key == user_key.
+ void ForEachOverlapping(Slice user_key, Slice internal_key,
+ void* arg,
+ bool (*func)(void*, int, FileMetaData*));
+
VersionSet* vset_; // VersionSet to which this Version belongs
Version* next_; // Next version in linked list
Version* prev_; // Previous version in linked list
diff --git a/src/leveldb/include/leveldb/db.h b/src/leveldb/include/leveldb/db.h
index da8b11a8c0..57c00a5da0 100644
--- a/src/leveldb/include/leveldb/db.h
+++ b/src/leveldb/include/leveldb/db.h
@@ -14,7 +14,7 @@ namespace leveldb {
// Update Makefile if you change these
static const int kMajorVersion = 1;
-static const int kMinorVersion = 12;
+static const int kMinorVersion = 13;
struct Options;
struct ReadOptions;
diff --git a/src/leveldb/util/env_posix.cc b/src/leveldb/util/env_posix.cc
index 6badfdc230..0f5dcfac5a 100644
--- a/src/leveldb/util/env_posix.cc
+++ b/src/leveldb/util/env_posix.cc
@@ -320,8 +320,39 @@ class PosixMmapFile : public WritableFile {
return Status::OK();
}
- virtual Status Sync() {
+ Status SyncDirIfManifest() {
+ const char* f = filename_.c_str();
+ const char* sep = strrchr(f, '/');
+ Slice basename;
+ std::string dir;
+ if (sep == NULL) {
+ dir = ".";
+ basename = f;
+ } else {
+ dir = std::string(f, sep - f);
+ basename = sep + 1;
+ }
Status s;
+ if (basename.starts_with("MANIFEST")) {
+ int fd = open(dir.c_str(), O_RDONLY);
+ if (fd < 0) {
+ s = IOError(dir, errno);
+ } else {
+ if (fsync(fd) < 0) {
+ s = IOError(dir, errno);
+ }
+ close(fd);
+ }
+ }
+ return s;
+ }
+
+ virtual Status Sync() {
+ // Ensure new files referred to by the manifest are in the filesystem.
+ Status s = SyncDirIfManifest();
+ if (!s.ok()) {
+ return s;
+ }
if (pending_sync_) {
// Some unmapped data was not synced
diff --git a/src/leveldb/util/random.h b/src/leveldb/util/random.h
index 07538242ea..ddd51b1c7b 100644
--- a/src/leveldb/util/random.h
+++ b/src/leveldb/util/random.h
@@ -16,7 +16,12 @@ class Random {
private:
uint32_t seed_;
public:
- explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) { }
+ explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) {
+ // Avoid bad seeds.
+ if (seed_ == 0 || seed_ == 2147483647L) {
+ seed_ = 1;
+ }
+ }
uint32_t Next() {
static const uint32_t M = 2147483647L; // 2^31-1
static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0
diff --git a/src/main.cpp b/src/main.cpp
index 5ce330a9dc..732a828524 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -32,13 +32,8 @@ CTxMemPool mempool;
unsigned int nTransactionsUpdated = 0;
map<uint256, CBlockIndex*> mapBlockIndex;
-std::vector<CBlockIndex*> vBlockIndexByHeight;
-CBlockIndex* pindexGenesisBlock = NULL;
-int nBestHeight = -1;
-uint256 nBestChainWork = 0;
+CChain chainActive;
uint256 nBestInvalidWork = 0;
-uint256 hashBestChain = 0;
-CBlockIndex* pindexBest = NULL;
set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexValid; // may contain all CBlockIndex*'s that have validness >=BLOCK_VALID_TRANSACTIONS, and must contain those who aren't failed
int64 nTimeBestReceived = 0;
int nScriptCheckThreads = 0;
@@ -173,14 +168,22 @@ void static ResendWalletTransactions()
// Registration of network node signals.
//
+int static GetHeight()
+{
+ LOCK(cs_main);
+ return chainActive.Height();
+}
+
void RegisterNodeSignals(CNodeSignals& nodeSignals)
{
+ nodeSignals.GetHeight.connect(&GetHeight);
nodeSignals.ProcessMessages.connect(&ProcessMessages);
nodeSignals.SendMessages.connect(&SendMessages);
}
void UnregisterNodeSignals(CNodeSignals& nodeSignals)
{
+ nodeSignals.GetHeight.disconnect(&GetHeight);
nodeSignals.ProcessMessages.disconnect(&ProcessMessages);
nodeSignals.SendMessages.disconnect(&SendMessages);
}
@@ -225,7 +228,7 @@ int CBlockLocator::GetDistanceBack()
if (mi != mapBlockIndex.end())
{
CBlockIndex* pindex = (*mi).second;
- if (pindex->IsInMainChain())
+ if (chainActive.Contains(pindex))
return nDistance;
}
nDistance += nStep;
@@ -244,11 +247,11 @@ CBlockIndex *CBlockLocator::GetBlockIndex()
if (mi != mapBlockIndex.end())
{
CBlockIndex* pindex = (*mi).second;
- if (pindex->IsInMainChain())
+ if (chainActive.Contains(pindex))
return pindex;
}
}
- return pindexGenesisBlock;
+ return chainActive.Genesis();
}
uint256 CBlockLocator::GetBlockHash()
@@ -260,7 +263,7 @@ uint256 CBlockLocator::GetBlockHash()
if (mi != mapBlockIndex.end())
{
CBlockIndex* pindex = (*mi).second;
- if (pindex->IsInMainChain())
+ if (chainActive.Contains(pindex))
return hash;
}
}
@@ -275,6 +278,19 @@ int CBlockLocator::GetHeight()
return pindex->nHeight;
}
+CBlockIndex *CChain::SetTip(CBlockIndex *pindex) {
+ if (pindex == NULL) {
+ std::vector<CBlockIndex*>().swap(vChain);
+ return NULL;
+ }
+ vChain.resize(pindex->nHeight + 1);
+ while (pindex && vChain[pindex->nHeight] != pindex) {
+ vChain[pindex->nHeight] = pindex;
+ pindex = pindex->pprev;
+ }
+ return pindex;
+}
+
//////////////////////////////////////////////////////////////////////////////
//
// CCoinsView implementations
@@ -517,7 +533,7 @@ bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64 nBlockTime)
if (tx.nLockTime == 0)
return true;
if (nBlockHeight == 0)
- nBlockHeight = nBestHeight;
+ nBlockHeight = chainActive.Height();
if (nBlockTime == 0)
nBlockTime = GetAdjustedTime();
if ((int64)tx.nLockTime < ((int64)tx.nLockTime < LOCKTIME_THRESHOLD ? (int64)nBlockHeight : nBlockTime))
@@ -644,7 +660,7 @@ int CMerkleTx::SetMerkleBranch(const CBlock* pblock)
if (pblock == NULL) {
CCoins coins;
if (pcoinsTip->GetCoins(GetHash(), coins)) {
- CBlockIndex *pindex = FindBlockByHeight(coins.nHeight);
+ CBlockIndex *pindex = chainActive[coins.nHeight];
if (pindex) {
if (!ReadBlockFromDisk(blockTmp, pindex))
return 0;
@@ -678,10 +694,10 @@ int CMerkleTx::SetMerkleBranch(const CBlock* pblock)
if (mi == mapBlockIndex.end())
return 0;
CBlockIndex* pindex = (*mi).second;
- if (!pindex || !pindex->IsInMainChain())
+ if (!pindex || !chainActive.Contains(pindex))
return 0;
- return pindexBest->nHeight - pindex->nHeight + 1;
+ return chainActive.Height() - pindex->nHeight + 1;
}
@@ -1078,7 +1094,7 @@ int CMerkleTx::GetDepthInMainChain(CBlockIndex* &pindexRet) const
if (mi == mapBlockIndex.end())
return 0;
CBlockIndex* pindex = (*mi).second;
- if (!pindex || !pindex->IsInMainChain())
+ if (!pindex || !chainActive.Contains(pindex))
return 0;
// Make sure the merkle branch connects to this block
@@ -1090,7 +1106,7 @@ int CMerkleTx::GetDepthInMainChain(CBlockIndex* &pindexRet) const
}
pindexRet = pindex;
- return pindexBest->nHeight - pindex->nHeight + 1;
+ return chainActive.Height() - pindex->nHeight + 1;
}
@@ -1173,7 +1189,7 @@ bool GetTransaction(const uint256 &hash, CTransaction &txOut, uint256 &hashBlock
nHeight = coins.nHeight;
}
if (nHeight > 0)
- pindexSlow = FindBlockByHeight(nHeight);
+ pindexSlow = chainActive[nHeight];
}
}
@@ -1203,14 +1219,6 @@ bool GetTransaction(const uint256 &hash, CTransaction &txOut, uint256 &hashBlock
// CBlock and CBlockIndex
//
-static CBlockIndex* pblockindexFBBHLast;
-CBlockIndex* FindBlockByHeight(int nHeight)
-{
- if (nHeight >= (int)vBlockIndexByHeight.size())
- return NULL;
- return vBlockIndexByHeight[nHeight];
-}
-
bool WriteBlockToDisk(CBlock& block, CDiskBlockPos& pos)
{
// Open history file to append
@@ -1404,17 +1412,17 @@ int GetNumBlocksOfPeers()
bool IsInitialBlockDownload()
{
- if (pindexBest == NULL || fImporting || fReindex || nBestHeight < Checkpoints::GetTotalBlocksEstimate())
+ if (fImporting || fReindex || chainActive.Height() < Checkpoints::GetTotalBlocksEstimate())
return true;
static int64 nLastUpdate;
static CBlockIndex* pindexLastBest;
- if (pindexBest != pindexLastBest)
+ if (chainActive.Tip() != pindexLastBest)
{
- pindexLastBest = pindexBest;
+ pindexLastBest = chainActive.Tip();
nLastUpdate = GetTime();
}
return (GetTime() - nLastUpdate < 10 &&
- pindexBest->GetBlockTime() < GetTime() - 24 * 60 * 60);
+ chainActive.Tip()->GetBlockTime() < GetTime() - 24 * 60 * 60);
}
bool fLargeWorkForkFound = false;
@@ -1430,10 +1438,10 @@ void CheckForkWarningConditions()
// If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it)
// of our head, drop it
- if (pindexBestForkTip && nBestHeight - pindexBestForkTip->nHeight >= 72)
+ if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->nHeight >= 72)
pindexBestForkTip = NULL;
- if (pindexBestForkTip || nBestInvalidWork > nBestChainWork + (pindexBest->GetBlockWork() * 6).getuint256())
+ if (pindexBestForkTip || nBestInvalidWork > chainActive.Tip()->nChainWork + (chainActive.Tip()->GetBlockWork() * 6).getuint256())
{
if (!fLargeWorkForkFound)
{
@@ -1470,7 +1478,7 @@ void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip)
{
// If we are on a fork that is sufficiently large, set a warning flag
CBlockIndex* pfork = pindexNewForkTip;
- CBlockIndex* plonger = pindexBest;
+ CBlockIndex* plonger = chainActive.Tip();
while (pfork && pfork != plonger)
{
while (plonger && plonger->nHeight > pfork->nHeight)
@@ -1489,7 +1497,7 @@ void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip)
// the 7-block condition and from this always have the most-likely-to-cause-warning fork
if (pfork && (!pindexBestForkTip || (pindexBestForkTip && pindexNewForkTip->nHeight > pindexBestForkTip->nHeight)) &&
pindexNewForkTip->nChainWork - pfork->nChainWork > (pfork->GetBlockWork() * 7).getuint256() &&
- nBestHeight - pindexNewForkTip->nHeight < 72)
+ chainActive.Height() - pindexNewForkTip->nHeight < 72)
{
pindexBestForkTip = pindexNewForkTip;
pindexBestForkBase = pfork;
@@ -1511,8 +1519,8 @@ void static InvalidChainFound(CBlockIndex* pindexNew)
log(pindexNew->nChainWork.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
pindexNew->GetBlockTime()).c_str());
LogPrintf("InvalidChainFound: current best=%s height=%d log2_work=%.8g date=%s\n",
- hashBestChain.ToString().c_str(), nBestHeight, log(nBestChainWork.getdouble())/log(2.0),
- DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexBest->GetBlockTime()).c_str());
+ chainActive.Tip()->GetBlockHash().ToString().c_str(), chainActive.Height(), log(chainActive.Tip()->nChainWork.getdouble())/log(2.0),
+ DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()).c_str());
CheckForkWarningConditions();
}
@@ -1521,7 +1529,7 @@ void static InvalidBlockFound(CBlockIndex *pindex) {
pblocktree->WriteBlockIndex(CDiskBlockIndex(pindex));
setBlockIndexValid.erase(pindex);
InvalidChainFound(pindex);
- if (pindex->GetNextInMainChain()) {
+ if (chainActive.Next(pindex)) {
CValidationState stateDummy;
ConnectBestBlock(stateDummy); // reorganise away from the failed block
}
@@ -1538,7 +1546,7 @@ bool ConnectBestBlock(CValidationState &state) {
pindexNewBest = *it;
}
- if (pindexNewBest == pindexBest || (pindexBest && pindexNewBest->nChainWork == pindexBest->nChainWork))
+ if (pindexNewBest == chainActive.Tip() || (chainActive.Tip() && pindexNewBest->nChainWork == chainActive.Tip()->nChainWork))
return true; // nothing to do
// check ancestry
@@ -1558,10 +1566,10 @@ bool ConnectBestBlock(CValidationState &state) {
break;
}
- if (pindexBest == NULL || pindexTest->nChainWork > pindexBest->nChainWork)
+ if (chainActive.Tip() == NULL || pindexTest->nChainWork > chainActive.Tip()->nChainWork)
vAttach.push_back(pindexTest);
- if (pindexTest->pprev == NULL || pindexTest->GetNextInMainChain()) {
+ if (pindexTest->pprev == NULL || chainActive.Next(pindexTest)) {
reverse(vAttach.begin(), vAttach.end());
BOOST_FOREACH(CBlockIndex *pindexSwitch, vAttach) {
boost::this_thread::interruption_point();
@@ -1881,7 +1889,6 @@ bool ConnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex, C
// (its coinbase is unspendable)
if (block.GetHash() == Params().HashGenesisBlock()) {
view.SetBestBlock(pindex);
- pindexGenesisBlock = pindex;
return true;
}
@@ -2129,9 +2136,7 @@ bool SetBestChain(CValidationState &state, CBlockIndex* pindexNew)
// Proceed by updating the memory structures.
// Register new best chain
- vBlockIndexByHeight.resize(pindexNew->nHeight + 1);
- BOOST_FOREACH(CBlockIndex* pindex, vConnect)
- vBlockIndexByHeight[pindex->nHeight] = pindex;
+ chainActive.SetTip(pindexNew);
// Resurrect memory transactions that were in the disconnected branch
BOOST_FOREACH(CTransaction& tx, vResurrect) {
@@ -2157,23 +2162,18 @@ bool SetBestChain(CValidationState &state, CBlockIndex* pindexNew)
}
// New best block
- hashBestChain = pindexNew->GetBlockHash();
- pindexBest = pindexNew;
- pblockindexFBBHLast = NULL;
- nBestHeight = pindexBest->nHeight;
- nBestChainWork = pindexNew->nChainWork;
nTimeBestReceived = GetTime();
nTransactionsUpdated++;
LogPrintf("SetBestChain: new best=%s height=%d log2_work=%.8g tx=%lu date=%s progress=%f\n",
- hashBestChain.ToString().c_str(), nBestHeight, log(nBestChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx,
- DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexBest->GetBlockTime()).c_str(),
- Checkpoints::GuessVerificationProgress(pindexBest));
+ chainActive.Tip()->GetBlockHash().ToString().c_str(), chainActive.Height(), log(chainActive.Tip()->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx,
+ DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()).c_str(),
+ Checkpoints::GuessVerificationProgress(chainActive.Tip()));
// Check the version of the last 100 blocks to see if we need to upgrade:
if (!fIsInitialDownload)
{
int nUpgraded = 0;
- const CBlockIndex* pindex = pindexBest;
+ const CBlockIndex* pindex = chainActive.Tip();
for (int i = 0; i < 100 && pindex != NULL; i++)
{
if (pindex->nVersion > CBlock::CURRENT_VERSION)
@@ -2191,7 +2191,7 @@ bool SetBestChain(CValidationState &state, CBlockIndex* pindexNew)
if (!fIsInitialDownload && !strCmd.empty())
{
- boost::replace_all(strCmd, "%s", hashBestChain.GetHex());
+ boost::replace_all(strCmd, "%s", chainActive.Tip()->GetBlockHash().GetHex());
boost::thread t(runCommand, strCmd); // thread runs free
}
@@ -2233,7 +2233,7 @@ bool AddToBlockIndex(CBlock& block, CValidationState& state, const CDiskBlockPos
if (!ConnectBestBlock(state))
return false;
- if (pindexNew == pindexBest)
+ if (pindexNew == chainActive.Tip())
{
// Clear fork warning if its no longer applicable
CheckForkWarningConditions();
@@ -2482,11 +2482,11 @@ bool AcceptBlock(CBlock& block, CValidationState& state, CDiskBlockPos* dbp)
// Relay inventory, but don't relay old inventory during initial block download
int nBlockEstimate = Checkpoints::GetTotalBlocksEstimate();
- if (hashBestChain == hash)
+ if (chainActive.Tip()->GetBlockHash() == hash)
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
- if (nBestHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate))
+ if (chainActive.Height() > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate))
pnode->PushInventory(CInv(MSG_BLOCK, hash));
}
@@ -2505,6 +2505,18 @@ bool CBlockIndex::IsSuperMajority(int minVersion, const CBlockIndex* pstart, uns
return (nFound >= nRequired);
}
+int64 CBlockIndex::GetMedianTime() const
+{
+ const CBlockIndex* pindex = this;
+ for (int i = 0; i < nMedianTimeSpan/2; i++)
+ {
+ if (!chainActive.Next(pindex))
+ return GetBlockTime();
+ pindex = chainActive.Next(pindex);
+ }
+ return pindex->GetMedianTimePast();
+}
+
void PushGetBlocks(CNode* pnode, CBlockIndex* pindexBegin, uint256 hashEnd)
{
// Filter out duplicate requests
@@ -2530,7 +2542,7 @@ bool ProcessBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, CDiskBl
return error("ProcessBlock() : CheckBlock FAILED");
CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(mapBlockIndex);
- if (pcheckpoint && pblock->hashPrevBlock != hashBestChain)
+ if (pcheckpoint && pblock->hashPrevBlock != (chainActive.Tip() ? chainActive.Tip()->GetBlockHash() : uint256(0)))
{
// Extra checks to prevent "fill up memory by spamming with bogus blocks"
int64 deltaTime = pblock->GetBlockTime() - pcheckpoint->nTime;
@@ -2561,7 +2573,7 @@ bool ProcessBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, CDiskBl
mapOrphanBlocksByPrev.insert(make_pair(pblock2->hashPrevBlock, pblock2));
// Ask this guy to fill in what we're missing
- PushGetBlocks(pfrom, pindexBest, GetOrphanRoot(pblock2));
+ PushGetBlocks(pfrom, chainActive.Tip(), GetOrphanRoot(pblock2));
}
return true;
}
@@ -2875,48 +2887,39 @@ bool static LoadBlockIndexDB()
LogPrintf("LoadBlockIndexDB(): transaction index %s\n", fTxIndex ? "enabled" : "disabled");
// Load hashBestChain pointer to end of best chain
- pindexBest = pcoinsTip->GetBestBlock();
- if (pindexBest == NULL)
+ chainActive.SetTip(pcoinsTip->GetBestBlock());
+ if (chainActive.Tip() == NULL)
return true;
- hashBestChain = pindexBest->GetBlockHash();
- nBestHeight = pindexBest->nHeight;
- nBestChainWork = pindexBest->nChainWork;
// register best chain
- CBlockIndex *pindex = pindexBest;
- vBlockIndexByHeight.resize(pindexBest->nHeight + 1);
- while(pindex != NULL) {
- vBlockIndexByHeight[pindex->nHeight] = pindex;
- pindex = pindex->pprev;
- }
LogPrintf("LoadBlockIndexDB(): hashBestChain=%s height=%d date=%s\n",
- hashBestChain.ToString().c_str(), nBestHeight,
- DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexBest->GetBlockTime()).c_str());
+ chainActive.Tip()->GetBlockHash().ToString().c_str(), chainActive.Height(),
+ DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()).c_str());
return true;
}
bool VerifyDB(int nCheckLevel, int nCheckDepth)
{
- if (pindexBest == NULL || pindexBest->pprev == NULL)
+ if (chainActive.Tip() == NULL || chainActive.Tip()->pprev == NULL)
return true;
// Verify blocks in the best chain
if (nCheckDepth <= 0)
nCheckDepth = 1000000000; // suffices until the year 19000
- if (nCheckDepth > nBestHeight)
- nCheckDepth = nBestHeight;
+ if (nCheckDepth > chainActive.Height())
+ nCheckDepth = chainActive.Height();
nCheckLevel = std::max(0, std::min(4, nCheckLevel));
LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
CCoinsViewCache coins(*pcoinsTip, true);
- CBlockIndex* pindexState = pindexBest;
+ CBlockIndex* pindexState = chainActive.Tip();
CBlockIndex* pindexFailure = NULL;
int nGoodTransactions = 0;
CValidationState state;
- for (CBlockIndex* pindex = pindexBest; pindex && pindex->pprev; pindex = pindex->pprev)
+ for (CBlockIndex* pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev)
{
boost::this_thread::interruption_point();
- if (pindex->nHeight < nBestHeight-nCheckDepth)
+ if (pindex->nHeight < chainActive.Height()-nCheckDepth)
break;
CBlock block;
// check level 0: read from disk
@@ -2948,14 +2951,14 @@ bool VerifyDB(int nCheckLevel, int nCheckDepth)
}
}
if (pindexFailure)
- return error("VerifyDB() : *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", pindexBest->nHeight - pindexFailure->nHeight + 1, nGoodTransactions);
+ return error("VerifyDB() : *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
// check level 4: try reconnecting blocks
if (nCheckLevel >= 4) {
CBlockIndex *pindex = pindexState;
- while (pindex != pindexBest) {
+ while (pindex != chainActive.Tip()) {
boost::this_thread::interruption_point();
- pindex = pindex->GetNextInMainChain();
+ pindex = chainActive.Next(pindex);
CBlock block;
if (!ReadBlockFromDisk(block, pindex))
return error("VerifyDB() : *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
@@ -2964,7 +2967,7 @@ bool VerifyDB(int nCheckLevel, int nCheckDepth)
}
}
- LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", pindexBest->nHeight - pindexState->nHeight, nGoodTransactions);
+ LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive.Height() - pindexState->nHeight, nGoodTransactions);
return true;
}
@@ -2973,12 +2976,8 @@ void UnloadBlockIndex()
{
mapBlockIndex.clear();
setBlockIndexValid.clear();
- pindexGenesisBlock = NULL;
- nBestHeight = 0;
- nBestChainWork = 0;
+ chainActive.SetTip(NULL);
nBestInvalidWork = 0;
- hashBestChain = 0;
- pindexBest = NULL;
}
bool LoadBlockIndex()
@@ -2992,7 +2991,7 @@ bool LoadBlockIndex()
bool InitBlockIndex() {
// Check whether we're already initialized
- if (pindexGenesisBlock != NULL)
+ if (chainActive.Genesis() != NULL)
return true;
// Use the provided setting for -txindex in the new database
@@ -3038,7 +3037,7 @@ void PrintBlockTree()
}
vector<pair<int, CBlockIndex*> > vStack;
- vStack.push_back(make_pair(0, pindexGenesisBlock));
+ vStack.push_back(make_pair(0, chainActive.Genesis()));
int nPrevCol = 0;
while (!vStack.empty())
@@ -3081,7 +3080,7 @@ void PrintBlockTree()
vector<CBlockIndex*>& vNext = mapNext[pindex];
for (unsigned int i = 0; i < vNext.size(); i++)
{
- if (vNext[i]->GetNextInMainChain())
+ if (chainActive.Next(vNext[i]))
{
swap(vNext[0], vNext[i]);
break;
@@ -3328,7 +3327,7 @@ void static ProcessGetData(CNode* pfrom)
// and we want it right after the last block so they don't
// wait for other stuff first.
vector<CInv> vInv;
- vInv.push_back(CInv(MSG_BLOCK, hashBestChain));
+ vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
pfrom->PushMessage("inv", vInv);
pfrom->hashContinue = 0;
}
@@ -3610,7 +3609,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv)
if (!fImporting && !fReindex)
pfrom->AskFor(inv);
} else if (inv.type == MSG_BLOCK && mapOrphanBlocks.count(inv.hash)) {
- PushGetBlocks(pfrom, pindexBest, GetOrphanRoot(mapOrphanBlocks[inv.hash]));
+ PushGetBlocks(pfrom, chainActive.Tip(), GetOrphanRoot(mapOrphanBlocks[inv.hash]));
} else if (nInv == nLastBlock) {
// In case we are on a very long side-chain, it is possible that we already have
// the last block in an inv bundle sent in response to getblocks. Try to detect
@@ -3658,10 +3657,10 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv)
// Send the rest of the chain
if (pindex)
- pindex = pindex->GetNextInMainChain();
+ pindex = chainActive.Next(pindex);
int nLimit = 500;
LogPrint("net", "getblocks %d to %s limit %d\n", (pindex ? pindex->nHeight : -1), hashStop.ToString().c_str(), nLimit);
- for (; pindex; pindex = pindex->GetNextInMainChain())
+ for (; pindex; pindex = chainActive.Next(pindex))
{
if (pindex->GetBlockHash() == hashStop)
{
@@ -3701,14 +3700,14 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv)
// Find the last block the caller has in the main chain
pindex = locator.GetBlockIndex();
if (pindex)
- pindex = pindex->GetNextInMainChain();
+ pindex = chainActive.Next(pindex);
}
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
vector<CBlock> vHeaders;
int nLimit = 2000;
LogPrint("net", "getheaders %d to %s\n", (pindex ? pindex->nHeight : -1), hashStop.ToString().c_str());
- for (; pindex; pindex = pindex->GetNextInMainChain())
+ for (; pindex; pindex = chainActive.Next(pindex))
{
vHeaders.push_back(pindex->GetBlockHeader());
if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
@@ -4174,7 +4173,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
// Start block sync
if (pto->fStartSync && !fImporting && !fReindex) {
pto->fStartSync = false;
- PushGetBlocks(pto, pindexBest, uint256(0));
+ PushGetBlocks(pto, chainActive.Tip(), uint256(0));
}
// Resend wallet transactions that haven't gotten in a block yet
diff --git a/src/main.h b/src/main.h
index 83b0d07f63..46d629044a 100644
--- a/src/main.h
+++ b/src/main.h
@@ -74,14 +74,8 @@ extern CScript COINBASE_FLAGS;
extern CCriticalSection cs_main;
extern std::map<uint256, CBlockIndex*> mapBlockIndex;
-extern std::vector<CBlockIndex*> vBlockIndexByHeight;
extern std::set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexValid;
-extern CBlockIndex* pindexGenesisBlock;
-extern int nBestHeight;
-extern uint256 nBestChainWork;
extern uint256 nBestInvalidWork;
-extern uint256 hashBestChain;
-extern CBlockIndex* pindexBest;
extern unsigned int nTransactionsUpdated;
extern uint64 nLastBlockTx;
extern uint64 nLastBlockSize;
@@ -153,8 +147,6 @@ void UnloadBlockIndex();
bool VerifyDB(int nCheckLevel, int nCheckDepth);
/** Print the loaded block tree */
void PrintBlockTree();
-/** Find a block by height in the currently-connected chain */
-CBlockIndex* FindBlockByHeight(int nHeight);
/** Process protocol messages received from a given node */
bool ProcessMessages(CNode* pfrom);
/** Send queued protocol messages to be sent to a give node */
@@ -819,15 +811,6 @@ public:
return (CBigNum(1)<<256) / (bnTarget+1);
}
- bool IsInMainChain() const
- {
- return nHeight < (int)vBlockIndexByHeight.size() && vBlockIndexByHeight[nHeight] == this;
- }
-
- CBlockIndex *GetNextInMainChain() const {
- return nHeight+1 >= (int)vBlockIndexByHeight.size() ? NULL : vBlockIndexByHeight[nHeight+1];
- }
-
bool CheckIndex() const
{
return CheckProofOfWork(GetBlockHash(), nBits);
@@ -849,17 +832,7 @@ public:
return pbegin[(pend - pbegin)/2];
}
- int64 GetMedianTime() const
- {
- const CBlockIndex* pindex = this;
- for (int i = 0; i < nMedianTimeSpan/2; i++)
- {
- if (!pindex->GetNextInMainChain())
- return GetBlockTime();
- pindex = pindex->GetNextInMainChain();
- }
- return pindex->GetMedianTimePast();
- }
+ int64 GetMedianTime() const;
/**
* Returns true if there are nRequired or more blocks of minVersion or above
@@ -870,8 +843,8 @@ public:
std::string ToString() const
{
- return strprintf("CBlockIndex(pprev=%p, pnext=%p, nHeight=%d, merkle=%s, hashBlock=%s)",
- pprev, GetNextInMainChain(), nHeight,
+ return strprintf("CBlockIndex(pprev=%p, nHeight=%d, merkle=%s, hashBlock=%s)",
+ pprev, nHeight,
hashMerkleRoot.ToString().c_str(),
GetBlockHash().ToString().c_str());
}
@@ -1011,9 +984,59 @@ public:
}
};
+/** An in-memory indexed chain of blocks. */
+class CChain {
+private:
+ std::vector<CBlockIndex*> vChain;
+public:
+ /** Returns the index entry for the genesis block of this chain, or NULL if none. */
+ CBlockIndex *Genesis() const {
+ return vChain.size() > 0 ? vChain[0] : NULL;
+ }
+ /** Returns the index entry for the tip of this chain, or NULL if none. */
+ CBlockIndex *Tip() const {
+ return vChain.size() > 0 ? vChain[vChain.size() - 1] : NULL;
+ }
+
+ /** Returns the index entry at a particular height in this chain, or NULL if no such height exists. */
+ CBlockIndex *operator[](int nHeight) const {
+ if (nHeight < 0 || nHeight >= (int)vChain.size())
+ return NULL;
+ return vChain[nHeight];
+ }
+
+ /** Compare two chains efficiently. */
+ friend bool operator==(const CChain &a, const CChain &b) {
+ return a.vChain.size() == b.vChain.size() &&
+ a.vChain[a.vChain.size() - 1] == b.vChain[b.vChain.size() - 1];
+ }
+
+ /** Efficiently check whether a block is present in this chain. */
+ bool Contains(const CBlockIndex *pindex) const {
+ return (*this)[pindex->nHeight] == pindex;
+ }
+
+ /** Find the successor of a block in this chain, or NULL if the given index is not found or is the tip. */
+ CBlockIndex *Next(const CBlockIndex *pindex) const {
+ if (Contains(pindex))
+ return (*this)[pindex->nHeight + 1];
+ else
+ return NULL;
+ }
+
+ /** Return the maximal height in the chain. Is equal to chain.Tip() ? chain.Tip()->nHeight : -1. */
+ int Height() const {
+ return vChain.size() - 1;
+ }
+
+ /** Set/initialize a chain with a given tip. Returns the forking point. */
+ CBlockIndex *SetTip(CBlockIndex *pindex);
+};
+/** The currently-connected chain of blocks. */
+extern CChain chainActive;
diff --git a/src/miner.cpp b/src/miner.cpp
index 30c600071f..e9c1d9aff9 100644
--- a/src/miner.cpp
+++ b/src/miner.cpp
@@ -176,7 +176,7 @@ CBlockTemplate* CreateNewBlock(const CScript& scriptPubKeyIn)
int64 nFees = 0;
{
LOCK2(cs_main, mempool.cs);
- CBlockIndex* pindexPrev = pindexBest;
+ CBlockIndex* pindexPrev = chainActive.Tip();
CCoinsViewCache view(*pcoinsTip, true);
// Priority order to process transactions
@@ -467,7 +467,7 @@ bool CheckWork(CBlock* pblock, CWallet& wallet, CReserveKey& reservekey)
// Found a solution
{
LOCK(cs_main);
- if (pblock->hashPrevBlock != hashBestChain)
+ if (pblock->hashPrevBlock != chainActive.Tip()->GetBlockHash())
return error("BitcoinMiner : generated block is stale");
// Remove key from key pool
@@ -510,7 +510,7 @@ void static BitcoinMiner(CWallet *pwallet)
// Create new block
//
unsigned int nTransactionsUpdatedLast = nTransactionsUpdated;
- CBlockIndex* pindexPrev = pindexBest;
+ CBlockIndex* pindexPrev = chainActive.Tip();
auto_ptr<CBlockTemplate> pblocktemplate(CreateNewBlockWithKey(reservekey));
if (!pblocktemplate.get())
@@ -613,7 +613,7 @@ void static BitcoinMiner(CWallet *pwallet)
break;
if (nTransactionsUpdated != nTransactionsUpdatedLast && GetTime() - nStart > 60)
break;
- if (pindexPrev != pindexBest)
+ if (pindexPrev != chainActive.Tip())
break;
// Update nTime every few seconds
diff --git a/src/net.cpp b/src/net.cpp
index e22c56f006..99457be0f5 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -542,6 +542,8 @@ void CNode::Cleanup()
void CNode::PushVersion()
{
+ int nBestHeight = g_signals.GetHeight().get_value_or(0);
+
/// when NTP implemented, change to just nTime = GetAdjustedTime()
int64 nTime = (fInbound ? GetAdjustedTime() : GetTime());
CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService("0.0.0.0",0)));
@@ -1485,6 +1487,8 @@ void static StartSync(const vector<CNode*> &vNodes) {
CNode *pnodeNewSync = NULL;
double dBestScore = 0;
+ int nBestHeight = g_signals.GetHeight().get_value_or(0);
+
// Iterate over all nodes
BOOST_FOREACH(CNode* pnode, vNodes) {
// check preconditions for allowing a sync
diff --git a/src/net.h b/src/net.h
index ab1b69acb7..9b76d1643d 100644
--- a/src/net.h
+++ b/src/net.h
@@ -28,7 +28,6 @@ static const unsigned int MAX_INV_SZ = 50000;
class CNode;
class CBlockIndex;
-extern int nBestHeight;
@@ -52,6 +51,7 @@ void SocketSendData(CNode *pnode);
// Signals for message handling
struct CNodeSignals
{
+ boost::signals2::signal<int ()> GetHeight;
boost::signals2::signal<bool (CNode*)> ProcessMessages;
boost::signals2::signal<bool (CNode*, bool)> SendMessages;
};
@@ -320,7 +320,7 @@ public:
unsigned int GetTotalRecvSize()
{
unsigned int total = 0;
- BOOST_FOREACH(const CNetMessage &msg, vRecvMsg)
+ BOOST_FOREACH(const CNetMessage &msg, vRecvMsg)
total += msg.vRecv.size() + 24;
return total;
}
diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp
index f0cf229b36..2bab488135 100644
--- a/src/qt/clientmodel.cpp
+++ b/src/qt/clientmodel.cpp
@@ -42,7 +42,7 @@ int ClientModel::getNumConnections() const
int ClientModel::getNumBlocks() const
{
- return nBestHeight;
+ return chainActive.Height();
}
int ClientModel::getNumBlocksAtStartup()
@@ -63,8 +63,8 @@ quint64 ClientModel::getTotalBytesSent() const
QDateTime ClientModel::getLastBlockDate() const
{
- if (pindexBest)
- return QDateTime::fromTime_t(pindexBest->GetBlockTime());
+ if (chainActive.Tip())
+ return QDateTime::fromTime_t(chainActive.Tip()->GetBlockTime());
else if(!isTestNet())
return QDateTime::fromTime_t(1231006505); // Genesis block's time
else
@@ -73,7 +73,7 @@ QDateTime ClientModel::getLastBlockDate() const
double ClientModel::getVerificationProgress() const
{
- return Checkpoints::GuessVerificationProgress(pindexBest);
+ return Checkpoints::GuessVerificationProgress(chainActive.Tip());
}
void ClientModel::updateTimer()
diff --git a/src/qt/transactiondesc.cpp b/src/qt/transactiondesc.cpp
index e27aa93a4a..93fc8cab22 100644
--- a/src/qt/transactiondesc.cpp
+++ b/src/qt/transactiondesc.cpp
@@ -17,7 +17,7 @@ QString TransactionDesc::FormatTxStatus(const CWalletTx& wtx)
if (!IsFinalTx(wtx))
{
if (wtx.nLockTime < LOCKTIME_THRESHOLD)
- return tr("Open for %n more block(s)", "", wtx.nLockTime - nBestHeight + 1);
+ return tr("Open for %n more block(s)", "", wtx.nLockTime - chainActive.Height() + 1);
else
return tr("Open until %1").arg(GUIUtil::dateTimeStr(wtx.nLockTime));
}
diff --git a/src/qt/transactionrecord.cpp b/src/qt/transactionrecord.cpp
index ea2c1f0a5c..162908a9a4 100644
--- a/src/qt/transactionrecord.cpp
+++ b/src/qt/transactionrecord.cpp
@@ -160,14 +160,14 @@ void TransactionRecord::updateStatus(const CWalletTx &wtx)
idx);
status.confirmed = wtx.IsConfirmed();
status.depth = wtx.GetDepthInMainChain();
- status.cur_num_blocks = nBestHeight;
+ status.cur_num_blocks = chainActive.Height();
if (!IsFinalTx(wtx))
{
if (wtx.nLockTime < LOCKTIME_THRESHOLD)
{
status.status = TransactionStatus::OpenUntilBlock;
- status.open_for = wtx.nLockTime - nBestHeight + 1;
+ status.open_for = wtx.nLockTime - chainActive.Height() + 1;
}
else
{
@@ -221,7 +221,7 @@ void TransactionRecord::updateStatus(const CWalletTx &wtx)
bool TransactionRecord::statusUpdateNeeded()
{
- return status.cur_num_blocks != nBestHeight;
+ return status.cur_num_blocks != chainActive.Height();
}
QString TransactionRecord::getTxID() const
diff --git a/src/qt/transactiontablemodel.cpp b/src/qt/transactiontablemodel.cpp
index 07f6a62150..6f7a5933ab 100644
--- a/src/qt/transactiontablemodel.cpp
+++ b/src/qt/transactiontablemodel.cpp
@@ -250,9 +250,9 @@ void TransactionTableModel::updateTransaction(const QString &hash, int status)
void TransactionTableModel::updateConfirmations()
{
- if(nBestHeight != cachedNumBlocks)
+ if(chainActive.Height() != cachedNumBlocks)
{
- cachedNumBlocks = nBestHeight;
+ cachedNumBlocks = chainActive.Height();
// Blocks came in since last poll.
// Invalidate status (number of confirmations) and (possibly) description
// for all rows. Qt is smart enough to only actually request the data for the
diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp
index bda39b675f..099fbe8dc3 100644
--- a/src/qt/walletmodel.cpp
+++ b/src/qt/walletmodel.cpp
@@ -73,10 +73,10 @@ void WalletModel::updateStatus()
void WalletModel::pollBalanceChanged()
{
- if(nBestHeight != cachedNumBlocks)
+ if(chainActive.Height() != cachedNumBlocks)
{
// Balance and number of transactions might have changed
- cachedNumBlocks = nBestHeight;
+ cachedNumBlocks = chainActive.Height();
checkBalanceChanged();
}
}
diff --git a/src/rpcblockchain.cpp b/src/rpcblockchain.cpp
index 398f33605a..957241d6a0 100644
--- a/src/rpcblockchain.cpp
+++ b/src/rpcblockchain.cpp
@@ -17,10 +17,10 @@ double GetDifficulty(const CBlockIndex* blockindex)
// minimum difficulty = 1.0.
if (blockindex == NULL)
{
- if (pindexBest == NULL)
+ if (chainActive.Tip() == NULL)
return 1.0;
else
- blockindex = pindexBest;
+ blockindex = chainActive.Tip();
}
int nShift = (blockindex->nBits >> 24) & 0xff;
@@ -66,7 +66,7 @@ Object blockToJSON(const CBlock& block, const CBlockIndex* blockindex)
if (blockindex->pprev)
result.push_back(Pair("previousblockhash", blockindex->pprev->GetBlockHash().GetHex()));
- CBlockIndex *pnext = blockindex->GetNextInMainChain();
+ CBlockIndex *pnext = chainActive.Next(blockindex);
if (pnext)
result.push_back(Pair("nextblockhash", pnext->GetBlockHash().GetHex()));
return result;
@@ -80,7 +80,7 @@ Value getblockcount(const Array& params, bool fHelp)
"getblockcount\n"
"Returns the number of blocks in the longest block chain.");
- return nBestHeight;
+ return chainActive.Height();
}
Value getbestblockhash(const Array& params, bool fHelp)
@@ -90,7 +90,7 @@ Value getbestblockhash(const Array& params, bool fHelp)
"getbestblockhash\n"
"Returns the hash of the best (tip) block in the longest block chain.");
- return hashBestChain.GetHex();
+ return chainActive.Tip()->GetBlockHash().GetHex();
}
Value getdifficulty(const Array& params, bool fHelp)
@@ -145,11 +145,11 @@ Value getblockhash(const Array& params, bool fHelp)
"Returns hash of block in best-block-chain at <index>.");
int nHeight = params[0].get_int();
- if (nHeight < 0 || nHeight > nBestHeight)
+ if (nHeight < 0 || nHeight > chainActive.Height())
throw runtime_error("Block number out of range.");
- CBlockIndex* pblockindex = FindBlockByHeight(nHeight);
- return pblockindex->phashBlock->GetHex();
+ CBlockIndex* pblockindex = chainActive[nHeight];
+ return pblockindex->GetBlockHash().GetHex();
}
Value getblock(const Array& params, bool fHelp)
diff --git a/src/rpcdump.cpp b/src/rpcdump.cpp
index 842910c7e0..61cd07d507 100644
--- a/src/rpcdump.cpp
+++ b/src/rpcdump.cpp
@@ -102,7 +102,7 @@ Value importprivkey(const Array& params, bool fHelp)
throw JSONRPCError(RPC_WALLET_ERROR, "Error adding key to wallet");
if (fRescan) {
- pwalletMain->ScanForWalletTransactions(pindexGenesisBlock, true);
+ pwalletMain->ScanForWalletTransactions(chainActive.Genesis(), true);
pwalletMain->ReacceptWalletTransactions();
}
}
@@ -124,7 +124,7 @@ Value importwallet(const Array& params, bool fHelp)
if (!file.is_open())
throw JSONRPCError(RPC_INVALID_PARAMETER, "Cannot open wallet dump file");
- int64 nTimeBegin = pindexBest->nTime;
+ int64 nTimeBegin = chainActive.Tip()->nTime;
bool fGood = true;
@@ -175,11 +175,11 @@ Value importwallet(const Array& params, bool fHelp)
}
file.close();
- CBlockIndex *pindex = pindexBest;
+ CBlockIndex *pindex = chainActive.Tip();
while (pindex && pindex->pprev && pindex->nTime > nTimeBegin - 7200)
pindex = pindex->pprev;
- LogPrintf("Rescanning last %i blocks\n", pindexBest->nHeight - pindex->nHeight + 1);
+ LogPrintf("Rescanning last %i blocks\n", chainActive.Height() - pindex->nHeight + 1);
pwalletMain->ScanForWalletTransactions(pindex);
pwalletMain->ReacceptWalletTransactions();
pwalletMain->MarkDirty();
@@ -243,8 +243,8 @@ Value dumpwallet(const Array& params, bool fHelp)
// produce output
file << strprintf("# Wallet dump created by Bitcoin %s (%s)\n", CLIENT_BUILD.c_str(), CLIENT_DATE.c_str());
file << strprintf("# * Created on %s\n", EncodeDumpTime(GetTime()).c_str());
- file << strprintf("# * Best block at time of backup was %i (%s),\n", nBestHeight, hashBestChain.ToString().c_str());
- file << strprintf("# mined on %s\n", EncodeDumpTime(pindexBest->nTime).c_str());
+ file << strprintf("# * Best block at time of backup was %i (%s),\n", chainActive.Height(), chainActive.Tip()->GetBlockHash().ToString().c_str());
+ file << strprintf("# mined on %s\n", EncodeDumpTime(chainActive.Tip()->nTime).c_str());
file << "\n";
for (std::vector<std::pair<int64, CKeyID> >::const_iterator it = vKeyBirth.begin(); it != vKeyBirth.end(); it++) {
const CKeyID &keyid = it->second;
diff --git a/src/rpcmining.cpp b/src/rpcmining.cpp
index b013b4b200..f123c3a9e0 100644
--- a/src/rpcmining.cpp
+++ b/src/rpcmining.cpp
@@ -37,10 +37,7 @@ void ShutdownRPCMining()
// or from the last difficulty change if 'lookup' is nonpositive.
// If 'height' is nonnegative, compute the estimate at the time when a given block was found.
Value GetNetworkHashPS(int lookup, int height) {
- CBlockIndex *pb = pindexBest;
-
- if (height >= 0 && height < nBestHeight)
- pb = FindBlockByHeight(height);
+ CBlockIndex *pb = chainActive[height];
if (pb == NULL || !pb->nHeight)
return 0;
@@ -148,7 +145,7 @@ Value getmininginfo(const Array& params, bool fHelp)
"Returns an object containing mining-related information.");
Object obj;
- obj.push_back(Pair("blocks", (int)nBestHeight));
+ obj.push_back(Pair("blocks", (int)chainActive.Height()));
obj.push_back(Pair("currentblocksize", (uint64_t)nLastBlockSize));
obj.push_back(Pair("currentblocktx", (uint64_t)nLastBlockTx));
obj.push_back(Pair("difficulty", (double)GetDifficulty()));
@@ -192,10 +189,10 @@ Value getwork(const Array& params, bool fHelp)
static CBlockIndex* pindexPrev;
static int64 nStart;
static CBlockTemplate* pblocktemplate;
- if (pindexPrev != pindexBest ||
+ if (pindexPrev != chainActive.Tip() ||
(nTransactionsUpdated != nTransactionsUpdatedLast && GetTime() - nStart > 60))
{
- if (pindexPrev != pindexBest)
+ if (pindexPrev != chainActive.Tip())
{
// Deallocate old blocks since they're obsolete now
mapNewBlock.clear();
@@ -209,7 +206,7 @@ Value getwork(const Array& params, bool fHelp)
// Store the pindexBest used before CreateNewBlock, to avoid races
nTransactionsUpdatedLast = nTransactionsUpdated;
- CBlockIndex* pindexPrevNew = pindexBest;
+ CBlockIndex* pindexPrevNew = chainActive.Tip();
nStart = GetTime();
// Create new block
@@ -328,7 +325,7 @@ Value getblocktemplate(const Array& params, bool fHelp)
static CBlockIndex* pindexPrev;
static int64 nStart;
static CBlockTemplate* pblocktemplate;
- if (pindexPrev != pindexBest ||
+ if (pindexPrev != chainActive.Tip() ||
(nTransactionsUpdated != nTransactionsUpdatedLast && GetTime() - nStart > 5))
{
// Clear pindexPrev so future calls make a new block, despite any failures from here on
@@ -336,7 +333,7 @@ Value getblocktemplate(const Array& params, bool fHelp)
// Store the pindexBest used before CreateNewBlock, to avoid races
nTransactionsUpdatedLast = nTransactionsUpdated;
- CBlockIndex* pindexPrevNew = pindexBest;
+ CBlockIndex* pindexPrevNew = chainActive.Tip();
nStart = GetTime();
// Create new block
diff --git a/src/rpcrawtransaction.cpp b/src/rpcrawtransaction.cpp
index fcc5359dd6..5384b65906 100644
--- a/src/rpcrawtransaction.cpp
+++ b/src/rpcrawtransaction.cpp
@@ -87,9 +87,9 @@ void TxToJSON(const CTransaction& tx, const uint256 hashBlock, Object& entry)
if (mi != mapBlockIndex.end() && (*mi).second)
{
CBlockIndex* pindex = (*mi).second;
- if (pindex->IsInMainChain())
+ if (chainActive.Contains(pindex))
{
- entry.push_back(Pair("confirmations", 1 + nBestHeight - pindex->nHeight));
+ entry.push_back(Pair("confirmations", 1 + chainActive.Height() - pindex->nHeight));
entry.push_back(Pair("time", (boost::int64_t)pindex->nTime));
entry.push_back(Pair("blocktime", (boost::int64_t)pindex->nTime));
}
diff --git a/src/rpcwallet.cpp b/src/rpcwallet.cpp
index cafb6db9b1..433cc8b735 100644
--- a/src/rpcwallet.cpp
+++ b/src/rpcwallet.cpp
@@ -76,7 +76,7 @@ Value getinfo(const Array& params, bool fHelp)
obj.push_back(Pair("walletversion", pwalletMain->GetVersion()));
obj.push_back(Pair("balance", ValueFromAmount(pwalletMain->GetBalance())));
}
- obj.push_back(Pair("blocks", (int)nBestHeight));
+ obj.push_back(Pair("blocks", (int)chainActive.Height()));
obj.push_back(Pair("timeoffset", (boost::int64_t)GetTimeOffset()));
obj.push_back(Pair("connections", (int)vNodes.size()));
obj.push_back(Pair("proxy", (proxy.first.IsValid() ? proxy.first.ToStringIPPort() : string())));
@@ -1180,7 +1180,7 @@ Value listsinceblock(const Array& params, bool fHelp)
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter");
}
- int depth = pindex ? (1 + nBestHeight - pindex->nHeight) : -1;
+ int depth = pindex ? (1 + chainActive.Height() - pindex->nHeight) : -1;
Array transactions;
@@ -1192,23 +1192,8 @@ Value listsinceblock(const Array& params, bool fHelp)
ListTransactions(tx, "*", 0, true, transactions);
}
- uint256 lastblock;
-
- if (target_confirms == 1)
- {
- lastblock = hashBestChain;
- }
- else
- {
- int target_height = pindexBest->nHeight + 1 - target_confirms;
-
- CBlockIndex *block;
- for (block = pindexBest;
- block && block->nHeight > target_height;
- block = block->pprev) { }
-
- lastblock = block ? block->GetBlockHash() : 0;
- }
+ CBlockIndex *pblockLast = chainActive[chainActive.Height() + 1 - target_confirms];
+ uint256 lastblock = pblockLast ? pblockLast->GetBlockHash() : 0;
Object ret;
ret.push_back(Pair("transactions", transactions));
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index eeeacb0ad4..67165760b2 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -65,10 +65,10 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
{
CBlock *pblock = &pblocktemplate->block; // pointer for convenience
pblock->nVersion = 1;
- pblock->nTime = pindexBest->GetMedianTimePast()+1;
+ pblock->nTime = chainActive.Tip()->GetMedianTimePast()+1;
pblock->vtx[0].vin[0].scriptSig = CScript();
pblock->vtx[0].vin[0].scriptSig.push_back(blockinfo[i].extranonce);
- pblock->vtx[0].vin[0].scriptSig.push_back(pindexBest->nHeight);
+ pblock->vtx[0].vin[0].scriptSig.push_back(chainActive.Height());
pblock->vtx[0].vout[0].scriptPubKey = CScript();
if (txFirst.size() < 2)
txFirst.push_back(new CTransaction(pblock->vtx[0]));
@@ -193,14 +193,14 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
mempool.clear();
// subsidy changing
- int nHeight = pindexBest->nHeight;
- pindexBest->nHeight = 209999;
+ int nHeight = chainActive.Height();
+ chainActive.Tip()->nHeight = 209999;
BOOST_CHECK(pblocktemplate = CreateNewBlockWithKey(reservekey));
delete pblocktemplate;
- pindexBest->nHeight = 210000;
+ chainActive.Tip()->nHeight = 210000;
BOOST_CHECK(pblocktemplate = CreateNewBlockWithKey(reservekey));
delete pblocktemplate;
- pindexBest->nHeight = nHeight;
+ chainActive.Tip()->nHeight = nHeight;
BOOST_FOREACH(CTransaction *tx, txFirst)
delete tx;
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 0d2fdc2887..24ee8ec3e8 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -223,10 +223,6 @@ bool CBlockTreeDB::LoadBlockIndexGuts()
pindexNew->nStatus = diskindex.nStatus;
pindexNew->nTx = diskindex.nTx;
- // Watch for genesis block
- if (pindexGenesisBlock == NULL && diskindex.GetBlockHash() == Params().HashGenesisBlock())
- pindexGenesisBlock = pindexNew;
-
if (!pindexNew->CheckIndex())
return error("LoadBlockIndex() : CheckIndex failed: %s", pindexNew->ToString().c_str());
diff --git a/src/wallet.cpp b/src/wallet.cpp
index 26ffc71e19..0f0ce7e631 100644
--- a/src/wallet.cpp
+++ b/src/wallet.cpp
@@ -799,7 +799,7 @@ int CWallet::ScanForWalletTransactions(CBlockIndex* pindexStart, bool fUpdate)
// no need to read and scan block, if block was created before
// our wallet birthday (as adjusted for block time variability)
if (nTimeFirstKey && (pindex->nTime < (nTimeFirstKey - 7200))) {
- pindex = pindex->GetNextInMainChain();
+ pindex = chainActive.Next(pindex);
continue;
}
@@ -810,7 +810,7 @@ int CWallet::ScanForWalletTransactions(CBlockIndex* pindexStart, bool fUpdate)
if (AddToWalletIfInvolvingMe(tx.GetHash(), tx, &block, fUpdate))
ret++;
}
- pindex = pindex->GetNextInMainChain();
+ pindex = chainActive.Next(pindex);
}
}
return ret;
@@ -864,7 +864,7 @@ void CWallet::ReacceptWalletTransactions()
if (fMissing)
{
// TODO: optimize this to scan just part of the block chain?
- if (ScanForWalletTransactions(pindexGenesisBlock))
+ if (ScanForWalletTransactions(chainActive.Genesis()))
fRepeat = true; // Found missing transactions: re-do re-accept.
}
}
@@ -1933,7 +1933,7 @@ void CWallet::GetKeyBirthTimes(std::map<CKeyID, int64> &mapKeyBirth) const {
mapKeyBirth[it->first] = it->second.nCreateTime;
// map in which we'll infer heights of other keys
- CBlockIndex *pindexMax = FindBlockByHeight(std::max(0, nBestHeight - 144)); // the tip can be reorganised; use a 144-block safety margin
+ CBlockIndex *pindexMax = chainActive[std::max(0, chainActive.Height() - 144)]; // the tip can be reorganised; use a 144-block safety margin
std::map<CKeyID, CBlockIndex*> mapKeyFirstBlock;
std::set<CKeyID> setKeys;
GetKeys(setKeys);
@@ -1953,7 +1953,7 @@ void CWallet::GetKeyBirthTimes(std::map<CKeyID, int64> &mapKeyBirth) const {
// iterate over all wallet transactions...
const CWalletTx &wtx = (*it).second;
std::map<uint256, CBlockIndex*>::const_iterator blit = mapBlockIndex.find(wtx.hashBlock);
- if (blit != mapBlockIndex.end() && blit->second->IsInMainChain()) {
+ if (blit != mapBlockIndex.end() && chainActive.Contains(blit->second)) {
// ... which are already in a block
int nHeight = blit->second->nHeight;
BOOST_FOREACH(const CTxOut &txout, wtx.vout) {