1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2014 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "txdb.h"
#include "pow.h"
#include "uint256.h"
#include <stdint.h>
#include <boost/thread.hpp>
using namespace std;
void static BatchWriteCoins(CLevelDBBatch &batch, const uint256 &hash, const CCoins &coins) {
if (coins.IsPruned())
batch.Erase(make_pair('c', hash));
else
batch.Write(make_pair('c', hash), coins);
}
void static BatchWriteHashBestChain(CLevelDBBatch &batch, const uint256 &hash) {
batch.Write('B', hash);
}
CCoinsViewDB::CCoinsViewDB(size_t nCacheSize, bool fMemory, bool fWipe) : db(GetDataDir() / "chainstate", nCacheSize, fMemory, fWipe) {
}
bool CCoinsViewDB::GetCoins(const uint256 &txid, CCoins &coins) const {
return db.Read(make_pair('c', txid), coins);
}
bool CCoinsViewDB::HaveCoins(const uint256 &txid) const {
return db.Exists(make_pair('c', txid));
}
uint256 CCoinsViewDB::GetBestBlock() const {
uint256 hashBestChain;
if (!db.Read('B', hashBestChain))
return uint256(0);
return hashBestChain;
}
bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) {
CLevelDBBatch batch;
size_t count = 0;
size_t changed = 0;
for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) {
if (it->second.flags & CCoinsCacheEntry::DIRTY) {
BatchWriteCoins(batch, it->first, it->second.coins);
changed++;
}
count++;
CCoinsMap::iterator itOld = it++;
mapCoins.erase(itOld);
}
if (hashBlock != uint256(0))
BatchWriteHashBestChain(batch, hashBlock);
LogPrint("coindb", "Committing %u changed transactions (out of %u) to coin database...\n", (unsigned int)changed, (unsigned int)count);
return db.WriteBatch(batch);
}
CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CLevelDBWrapper(GetDataDir() / "blocks" / "index", nCacheSize, fMemory, fWipe) {
}
bool CBlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo &info) {
return Read(make_pair('f', nFile), info);
}
bool CBlockTreeDB::WriteReindexing(bool fReindexing) {
if (fReindexing)
return Write('R', '1');
else
return Erase('R');
}
bool CBlockTreeDB::ReadReindexing(bool &fReindexing) {
fReindexing = Exists('R');
return true;
}
bool CBlockTreeDB::ReadLastBlockFile(int &nFile) {
return Read('l', nFile);
}
bool CCoinsViewDB::GetStats(CCoinsStats &stats) const {
/* It seems that there are no "const iterators" for LevelDB. Since we
only need read operations on it, use a const-cast to get around
that restriction. */
boost::scoped_ptr<leveldb::Iterator> pcursor(const_cast<CLevelDBWrapper*>(&db)->NewIterator());
pcursor->SeekToFirst();
CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION);
stats.hashBlock = GetBestBlock();
ss << stats.hashBlock;
CAmount nTotalAmount = 0;
while (pcursor->Valid()) {
boost::this_thread::interruption_point();
try {
leveldb::Slice slKey = pcursor->key();
CDataStream ssKey(slKey.data(), slKey.data()+slKey.size(), SER_DISK, CLIENT_VERSION);
char chType;
ssKey >> chType;
if (chType == 'c') {
leveldb::Slice slValue = pcursor->value();
CDataStream ssValue(slValue.data(), slValue.data()+slValue.size(), SER_DISK, CLIENT_VERSION);
CCoins coins;
ssValue >> coins;
uint256 txhash;
ssKey >> txhash;
ss << txhash;
ss << VARINT(coins.nVersion);
ss << (coins.fCoinBase ? 'c' : 'n');
ss << VARINT(coins.nHeight);
stats.nTransactions++;
for (unsigned int i=0; i<coins.vout.size(); i++) {
const CTxOut &out = coins.vout[i];
if (!out.IsNull()) {
stats.nTransactionOutputs++;
ss << VARINT(i+1);
ss << out;
nTotalAmount += out.nValue;
}
}
stats.nSerializedSize += 32 + slValue.size();
ss << VARINT(0);
}
pcursor->Next();
} catch (const std::exception& e) {
return error("%s : Deserialize or I/O error - %s", __func__, e.what());
}
}
stats.nHeight = mapBlockIndex.find(GetBestBlock())->second->nHeight;
stats.hashSerialized = ss.GetHash();
stats.nTotalAmount = nTotalAmount;
return true;
}
bool CBlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*> >& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo) {
CLevelDBBatch batch;
for (std::vector<std::pair<int, const CBlockFileInfo*> >::const_iterator it=fileInfo.begin(); it != fileInfo.end(); it++) {
batch.Write(make_pair('f', it->first), *it->second);
}
batch.Write('l', nLastFile);
for (std::vector<const CBlockIndex*>::const_iterator it=blockinfo.begin(); it != blockinfo.end(); it++) {
batch.Write(make_pair('b', (*it)->GetBlockHash()), CDiskBlockIndex(*it));
}
return WriteBatch(batch, true);
}
bool CBlockTreeDB::ReadTxIndex(const uint256 &txid, CDiskTxPos &pos) {
return Read(make_pair('t', txid), pos);
}
bool CBlockTreeDB::WriteTxIndex(const std::vector<std::pair<uint256, CDiskTxPos> >&vect) {
CLevelDBBatch batch;
for (std::vector<std::pair<uint256,CDiskTxPos> >::const_iterator it=vect.begin(); it!=vect.end(); it++)
batch.Write(make_pair('t', it->first), it->second);
return WriteBatch(batch);
}
bool CBlockTreeDB::WriteFlag(const std::string &name, bool fValue) {
return Write(std::make_pair('F', name), fValue ? '1' : '0');
}
bool CBlockTreeDB::ReadFlag(const std::string &name, bool &fValue) {
char ch;
if (!Read(std::make_pair('F', name), ch))
return false;
fValue = ch == '1';
return true;
}
bool CBlockTreeDB::LoadBlockIndexGuts()
{
boost::scoped_ptr<leveldb::Iterator> pcursor(NewIterator());
CDataStream ssKeySet(SER_DISK, CLIENT_VERSION);
ssKeySet << make_pair('b', uint256(0));
pcursor->Seek(ssKeySet.str());
// Load mapBlockIndex
while (pcursor->Valid()) {
boost::this_thread::interruption_point();
try {
leveldb::Slice slKey = pcursor->key();
CDataStream ssKey(slKey.data(), slKey.data()+slKey.size(), SER_DISK, CLIENT_VERSION);
char chType;
ssKey >> chType;
if (chType == 'b') {
leveldb::Slice slValue = pcursor->value();
CDataStream ssValue(slValue.data(), slValue.data()+slValue.size(), SER_DISK, CLIENT_VERSION);
CDiskBlockIndex diskindex;
ssValue >> diskindex;
// Construct block index object
CBlockIndex* pindexNew = InsertBlockIndex(diskindex.GetBlockHash());
pindexNew->pprev = InsertBlockIndex(diskindex.hashPrev);
pindexNew->nHeight = diskindex.nHeight;
pindexNew->nFile = diskindex.nFile;
pindexNew->nDataPos = diskindex.nDataPos;
pindexNew->nUndoPos = diskindex.nUndoPos;
pindexNew->nVersion = diskindex.nVersion;
pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
pindexNew->nTime = diskindex.nTime;
pindexNew->nBits = diskindex.nBits;
pindexNew->nNonce = diskindex.nNonce;
pindexNew->nStatus = diskindex.nStatus;
pindexNew->nTx = diskindex.nTx;
if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits))
return error("LoadBlockIndex() : CheckProofOfWork failed: %s", pindexNew->ToString());
pcursor->Next();
} else {
break; // if shutdown requested or finished loading block index
}
} catch (const std::exception& e) {
return error("%s : Deserialize or I/O error - %s", __func__, e.what());
}
}
return true;
}
|