aboutsummaryrefslogtreecommitdiff
path: root/src/index/txindex.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/index/txindex.cpp')
-rw-r--r--src/index/txindex.cpp157
1 files changed, 157 insertions, 0 deletions
diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp
new file mode 100644
index 0000000000..27cf844ceb
--- /dev/null
+++ b/src/index/txindex.cpp
@@ -0,0 +1,157 @@
+// Copyright (c) 2017-2018 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <index/txindex.h>
+#include <init.h>
+#include <tinyformat.h>
+#include <ui_interface.h>
+#include <util.h>
+#include <validation.h>
+#include <warnings.h>
+
+template<typename... Args>
+static void FatalError(const char* fmt, const Args&... args)
+{
+ std::string strMessage = tfm::format(fmt, args...);
+ SetMiscWarning(strMessage);
+ LogPrintf("*** %s\n", strMessage);
+ uiInterface.ThreadSafeMessageBox(
+ "Error: A fatal internal error occurred, see debug.log for details",
+ "", CClientUIInterface::MSG_ERROR);
+ StartShutdown();
+}
+
+TxIndex::TxIndex(std::unique_ptr<TxIndexDB> db) :
+ m_db(std::move(db)), m_synced(false), m_best_block_index(nullptr)
+{}
+
+bool TxIndex::Init()
+{
+ LOCK(cs_main);
+
+ // Attempt to migrate txindex from the old database to the new one. Even if
+ // chain_tip is null, the node could be reindexing and we still want to
+ // delete txindex records in the old database.
+ if (!m_db->MigrateData(*pblocktree, chainActive.GetLocator())) {
+ return false;
+ }
+
+ CBlockLocator locator;
+ if (!m_db->ReadBestBlock(locator)) {
+ locator.SetNull();
+ }
+
+ m_best_block_index = FindForkInGlobalIndex(chainActive, locator);
+ m_synced = m_best_block_index.load() == chainActive.Tip();
+ return true;
+}
+
+bool TxIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex)
+{
+ CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size()));
+ std::vector<std::pair<uint256, CDiskTxPos>> vPos;
+ vPos.reserve(block.vtx.size());
+ for (const auto& tx : block.vtx) {
+ vPos.emplace_back(tx->GetHash(), pos);
+ pos.nTxOffset += ::GetSerializeSize(*tx, SER_DISK, CLIENT_VERSION);
+ }
+ return m_db->WriteTxs(vPos);
+}
+
+void TxIndex::BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex,
+ const std::vector<CTransactionRef>& txn_conflicted)
+{
+ if (!m_synced) {
+ return;
+ }
+
+ const CBlockIndex* best_block_index = m_best_block_index.load();
+ if (!best_block_index) {
+ if (pindex->nHeight != 0) {
+ FatalError("%s: First block connected is not the genesis block (height=%d)",
+ __func__, pindex->nHeight);
+ return;
+ }
+ } else {
+ // Ensure block connects to an ancestor of the current best block. This should be the case
+ // most of the time, but may not be immediately after the the sync thread catches up and sets
+ // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are
+ // in the ValidationInterface queue backlog even after the sync thread has caught up to the
+ // new chain tip. In this unlikely event, log a warning and let the queue clear.
+ if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) {
+ LogPrintf("%s: WARNING: Block %s does not connect to an ancestor of " /* Continued */
+ "known best chain (tip=%s); not updating txindex\n",
+ __func__, pindex->GetBlockHash().ToString(),
+ best_block_index->GetBlockHash().ToString());
+ return;
+ }
+ }
+
+ if (WriteBlock(*block, pindex)) {
+ m_best_block_index = pindex;
+ } else {
+ FatalError("%s: Failed to write block %s to txindex",
+ __func__, pindex->GetBlockHash().ToString());
+ return;
+ }
+}
+
+void TxIndex::SetBestChain(const CBlockLocator& locator)
+{
+ if (!m_synced) {
+ return;
+ }
+
+ const uint256& locator_tip_hash = locator.vHave.front();
+ const CBlockIndex* locator_tip_index;
+ {
+ LOCK(cs_main);
+ locator_tip_index = LookupBlockIndex(locator_tip_hash);
+ }
+
+ if (!locator_tip_index) {
+ FatalError("%s: First block (hash=%s) in locator was not found",
+ __func__, locator_tip_hash.ToString());
+ return;
+ }
+
+ // This checks that SetBestChain callbacks are received after BlockConnected. The check may fail
+ // immediately after the the sync thread catches up and sets m_synced. Consider the case where
+ // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue
+ // backlog even after the sync thread has caught up to the new chain tip. In this unlikely
+ // event, log a warning and let the queue clear.
+ const CBlockIndex* best_block_index = m_best_block_index.load();
+ if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) {
+ LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known best " /* Continued */
+ "chain (tip=%s); not writing txindex locator\n",
+ __func__, locator_tip_hash.ToString(),
+ best_block_index->GetBlockHash().ToString());
+ return;
+ }
+
+ if (!m_db->WriteBestBlock(locator)) {
+ error("%s: Failed to write locator to disk", __func__);
+ }
+}
+
+bool TxIndex::FindTx(const uint256& txid, CDiskTxPos& pos) const
+{
+ return m_db->ReadTxPos(txid, pos);
+}
+
+void TxIndex::Start()
+{
+ // Need to register this ValidationInterface before running Init(), so that
+ // callbacks are not missed if Init sets m_synced to true.
+ RegisterValidationInterface(this);
+ if (!Init()) {
+ FatalError("%s: txindex failed to initialize", __func__);
+ return;
+ }
+}
+
+void TxIndex::Stop()
+{
+ UnregisterValidationInterface(this);
+}