aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Makefile.qt.include3
-rw-r--r--src/addrman.cpp8
-rw-r--r--src/addrman.h239
-rw-r--r--src/chain.cpp5
-rw-r--r--src/chain.h5
-rw-r--r--src/chainparams.cpp2
-rw-r--r--src/core.cpp2
-rw-r--r--src/core_read.cpp2
-rw-r--r--src/core_write.cpp1
-rw-r--r--src/crypter.cpp1
-rw-r--r--src/init.cpp4
-rw-r--r--src/key.h8
-rw-r--r--src/keystore.cpp3
-rw-r--r--src/keystore.h1
-rw-r--r--src/leveldb/CONTRIBUTING.md36
-rw-r--r--src/leveldb/Makefile28
-rw-r--r--src/leveldb/README.md138
-rwxr-xr-xsrc/leveldb/build_detect_platform15
-rw-r--r--src/leveldb/db/db_bench.cc3
-rw-r--r--src/leveldb/db/db_impl.cc4
-rw-r--r--src/leveldb/db/db_test.cc2
-rw-r--r--src/leveldb/db/dbformat.h6
-rw-r--r--src/leveldb/db/dumpfile.cc225
-rw-r--r--src/leveldb/db/leveldb_main.cc204
-rw-r--r--src/leveldb/db/log_format.h4
-rw-r--r--src/leveldb/db/log_reader.cc6
-rw-r--r--src/leveldb/db/log_reader.h4
-rw-r--r--src/leveldb/db/log_test.cc2
-rw-r--r--src/leveldb/db/repair.cc2
-rw-r--r--src/leveldb/db/skiplist.h5
-rw-r--r--src/leveldb/db/write_batch_internal.h4
-rw-r--r--src/leveldb/doc/bench/db_bench_tree_db.cc2
-rw-r--r--src/leveldb/doc/impl.html4
-rw-r--r--src/leveldb/doc/log_format.txt6
-rw-r--r--src/leveldb/helpers/memenv/memenv.cc9
-rw-r--r--src/leveldb/include/leveldb/cache.h2
-rw-r--r--src/leveldb/include/leveldb/db.h2
-rw-r--r--src/leveldb/include/leveldb/dumpfile.h25
-rw-r--r--src/leveldb/include/leveldb/env.h2
-rw-r--r--src/leveldb/include/leveldb/iterator.h2
-rw-r--r--src/leveldb/include/leveldb/options.h2
-rw-r--r--src/leveldb/port/atomic_pointer.h21
-rw-r--r--src/leveldb/port/port_posix.h9
-rw-r--r--src/leveldb/port/thread_annotations.h5
-rw-r--r--src/leveldb/table/block.cc2
-rw-r--r--src/leveldb/table/block_builder.h2
-rw-r--r--src/leveldb/table/format.cc2
-rw-r--r--src/leveldb/table/table.cc14
-rw-r--r--src/leveldb/util/bloom.cc2
-rw-r--r--src/leveldb/util/env_posix.cc10
-rw-r--r--src/leveldb/util/hash.cc6
-rw-r--r--src/leveldb/util/hash_test.cc54
-rw-r--r--src/leveldb/util/logging.cc9
-rw-r--r--src/leveldb/util/logging.h4
-rw-r--r--src/main.cpp102
-rw-r--r--src/main.h1
-rw-r--r--src/miner.cpp3
-rw-r--r--src/net.cpp6
-rw-r--r--src/protocol.cpp1
-rw-r--r--src/qt/bitcoinamountfield.h4
-rw-r--r--src/qt/monitoreddatamapper.cpp39
-rw-r--r--src/qt/monitoreddatamapper.h34
-rw-r--r--src/qt/optionsdialog.cpp4
-rw-r--r--src/qt/optionsdialog.h4
-rw-r--r--src/qt/test/paymentservertests.cpp1
-rw-r--r--src/rpcblockchain.cpp2
-rw-r--r--src/rpcmisc.cpp2
-rw-r--r--src/rpcrawtransaction.cpp2
-rw-r--r--src/rpcwallet.cpp2
-rw-r--r--src/script/compressor.cpp3
-rw-r--r--src/script/compressor.h5
-rw-r--r--src/script/interpreter.cpp1
-rw-r--r--src/script/script.cpp36
-rw-r--r--src/script/script.h78
-rw-r--r--src/script/sign.cpp2
-rw-r--r--src/script/standard.cpp9
-rw-r--r--src/script/standard.h12
-rw-r--r--src/serialize.h19
-rw-r--r--src/test/base58_tests.cpp1
-rw-r--r--src/test/bloom_tests.cpp1
-rw-r--r--src/test/checkblock_tests.cpp2
-rw-r--r--src/test/miner_tests.cpp2
-rw-r--r--src/test/multisig_tests.cpp42
-rw-r--r--src/test/script_P2SH_tests.cpp40
-rw-r--r--src/test/script_tests.cpp90
-rw-r--r--src/test/sigopcount_tests.cpp10
-rw-r--r--src/test/transaction_tests.cpp4
-rw-r--r--src/txmempool.cpp1
-rw-r--r--src/utilmoneystr.cpp1
-rw-r--r--src/wallet.cpp2
90 files changed, 1009 insertions, 727 deletions
diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include
index f8f4439159..8fb4af81ac 100644
--- a/src/Makefile.qt.include
+++ b/src/Makefile.qt.include
@@ -110,7 +110,6 @@ QT_MOC_CPP = \
qt/moc_intro.cpp \
qt/moc_macdockiconhandler.cpp \
qt/moc_macnotificationhandler.cpp \
- qt/moc_monitoreddatamapper.cpp \
qt/moc_notificator.cpp \
qt/moc_openuridialog.cpp \
qt/moc_optionsdialog.cpp \
@@ -177,7 +176,6 @@ BITCOIN_QT_H = \
qt/intro.h \
qt/macdockiconhandler.h \
qt/macnotificationhandler.h \
- qt/monitoreddatamapper.h \
qt/networkstyle.h \
qt/notificator.h \
qt/openuridialog.h \
@@ -269,7 +267,6 @@ BITCOIN_QT_CPP = \
qt/csvmodelwriter.cpp \
qt/guiutil.cpp \
qt/intro.cpp \
- qt/monitoreddatamapper.cpp \
qt/networkstyle.cpp \
qt/notificator.cpp \
qt/optionsdialog.cpp \
diff --git a/src/addrman.cpp b/src/addrman.cpp
index 7b674a66e7..7ff21b00ec 100644
--- a/src/addrman.cpp
+++ b/src/addrman.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2012 Pieter Wuille
-// Distributed under the MIT/X11 software license, see the accompanying
+// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "addrman.h"
@@ -39,7 +39,7 @@ int CAddrInfo::GetNewBucket(const std::vector<unsigned char>& nKey, const CNetAd
bool CAddrInfo::IsTerrible(int64_t nNow) const
{
- if (nLastTry && nLastTry >= nNow - 60) // never remove things tried the last minute
+ if (nLastTry && nLastTry >= nNow - 60) // never remove things tried in the last minute
return false;
if (nTime > nNow + 10 * 60) // came in a flying DeLorean
@@ -131,7 +131,7 @@ int CAddrMan::SelectTried(int nKBucket)
{
std::vector<int>& vTried = vvTried[nKBucket];
- // random shuffle the first few elements (using the entire list)
+ // randomly shuffle the first few elements (using the entire list)
// find the least recently tried among them
int64_t nOldest = -1;
int nOldestPos = -1;
@@ -211,7 +211,7 @@ void CAddrMan::MakeTried(CAddrInfo& info, int nId, int nOrigin)
assert(info.nRefCount == 0);
- // what tried bucket to move the entry to
+ // which tried bucket to move the entry to
int nKBucket = info.GetTriedBucket(nKey);
std::vector<int>& vTried = vvTried[nKBucket];
diff --git a/src/addrman.h b/src/addrman.h
index 5fd698f18a..914086fc76 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -1,5 +1,5 @@
// Copyright (c) 2012 Pieter Wuille
-// Distributed under the MIT/X11 software license, see the accompanying
+// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef _BITCOIN_ADDRMAN
@@ -17,29 +17,31 @@
#include <stdint.h>
#include <vector>
-/** Extended statistics about a CAddress */
+/**
+ * Extended statistics about a CAddress
+ */
class CAddrInfo : public CAddress
{
private:
- // where knowledge about this address first came from
+ //! where knowledge about this address first came from
CNetAddr source;
- // last successful connection by us
+ //! last successful connection by us
int64_t nLastSuccess;
- // last try whatsoever by us:
+ //! last try whatsoever by us:
// int64_t CAddress::nLastTry
- // connection attempts since last successful attempt
+ //! connection attempts since last successful attempt
int nAttempts;
- // reference count in new sets (memory only)
+ //! reference count in new sets (memory only)
int nRefCount;
- // in tried set? (memory only)
+ //! in tried set? (memory only)
bool fInTried;
- // position in vRandom
+ //! position in vRandom
int nRandomPos;
friend class CAddrMan;
@@ -76,200 +78,205 @@ public:
Init();
}
- // Calculate in which "tried" bucket this entry belongs
+ //! Calculate in which "tried" bucket this entry belongs
int GetTriedBucket(const std::vector<unsigned char> &nKey) const;
- // Calculate in which "new" bucket this entry belongs, given a certain source
+ //! Calculate in which "new" bucket this entry belongs, given a certain source
int GetNewBucket(const std::vector<unsigned char> &nKey, const CNetAddr& src) const;
- // Calculate in which "new" bucket this entry belongs, using its default source
+ //! Calculate in which "new" bucket this entry belongs, using its default source
int GetNewBucket(const std::vector<unsigned char> &nKey) const
{
return GetNewBucket(nKey, source);
}
- // Determine whether the statistics about this entry are bad enough so that it can just be deleted
+ //! Determine whether the statistics about this entry are bad enough so that it can just be deleted
bool IsTerrible(int64_t nNow = GetAdjustedTime()) const;
- // Calculate the relative chance this entry should be given when selecting nodes to connect to
+ //! Calculate the relative chance this entry should be given when selecting nodes to connect to
double GetChance(int64_t nNow = GetAdjustedTime()) const;
};
-// Stochastic address manager
-//
-// Design goals:
-// * Only keep a limited number of addresses around, so that addr.dat and memory requirements do not grow without bound.
-// * Keep the address tables in-memory, and asynchronously dump the entire to able in addr.dat.
-// * Make sure no (localized) attacker can fill the entire table with his nodes/addresses.
-//
-// To that end:
-// * Addresses are organized into buckets.
-// * Address that have not yet been tried go into 256 "new" buckets.
-// * Based on the address range (/16 for IPv4) of source of the information, 32 buckets are selected at random
-// * The actual bucket is chosen from one of these, based on the range the address itself is located.
-// * One single address can occur in up to 4 different buckets, to increase selection chances for addresses that
-// are seen frequently. The chance for increasing this multiplicity decreases exponentially.
-// * When adding a new address to a full bucket, a randomly chosen entry (with a bias favoring less recently seen
-// ones) is removed from it first.
-// * Addresses of nodes that are known to be accessible go into 64 "tried" buckets.
-// * Each address range selects at random 4 of these buckets.
-// * The actual bucket is chosen from one of these, based on the full address.
-// * When adding a new good address to a full bucket, a randomly chosen entry (with a bias favoring less recently
-// tried ones) is evicted from it, back to the "new" buckets.
-// * Bucket selection is based on cryptographic hashing, using a randomly-generated 256-bit key, which should not
-// be observable by adversaries.
-// * Several indexes are kept for high performance. Defining DEBUG_ADDRMAN will introduce frequent (and expensive)
-// consistency checks for the entire data structure.
-
-// total number of buckets for tried addresses
+/** Stochastic address manager
+ *
+ * Design goals:
+ * * Keep the address tables in-memory, and asynchronously dump the entire to able in peers.dat.
+ * * Make sure no (localized) attacker can fill the entire table with his nodes/addresses.
+ *
+ * To that end:
+ * * Addresses are organized into buckets.
+ * * Address that have not yet been tried go into 256 "new" buckets.
+ * * Based on the address range (/16 for IPv4) of source of the information, 32 buckets are selected at random
+ * * The actual bucket is chosen from one of these, based on the range the address itself is located.
+ * * One single address can occur in up to 4 different buckets, to increase selection chances for addresses that
+ * are seen frequently. The chance for increasing this multiplicity decreases exponentially.
+ * * When adding a new address to a full bucket, a randomly chosen entry (with a bias favoring less recently seen
+ * ones) is removed from it first.
+ * * Addresses of nodes that are known to be accessible go into 64 "tried" buckets.
+ * * Each address range selects at random 4 of these buckets.
+ * * The actual bucket is chosen from one of these, based on the full address.
+ * * When adding a new good address to a full bucket, a randomly chosen entry (with a bias favoring less recently
+ * tried ones) is evicted from it, back to the "new" buckets.
+ * * Bucket selection is based on cryptographic hashing, using a randomly-generated 256-bit key, which should not
+ * be observable by adversaries.
+ * * Several indexes are kept for high performance. Defining DEBUG_ADDRMAN will introduce frequent (and expensive)
+ * consistency checks for the entire data structure.
+ */
+
+//! total number of buckets for tried addresses
#define ADDRMAN_TRIED_BUCKET_COUNT 64
-// maximum allowed number of entries in buckets for tried addresses
+//! maximum allowed number of entries in buckets for tried addresses
#define ADDRMAN_TRIED_BUCKET_SIZE 64
-// total number of buckets for new addresses
+//! total number of buckets for new addresses
#define ADDRMAN_NEW_BUCKET_COUNT 256
-// maximum allowed number of entries in buckets for new addresses
+//! maximum allowed number of entries in buckets for new addresses
#define ADDRMAN_NEW_BUCKET_SIZE 64
-// over how many buckets entries with tried addresses from a single group (/16 for IPv4) are spread
+//! over how many buckets entries with tried addresses from a single group (/16 for IPv4) are spread
#define ADDRMAN_TRIED_BUCKETS_PER_GROUP 4
-// over how many buckets entries with new addresses originating from a single group are spread
+//! over how many buckets entries with new addresses originating from a single group are spread
#define ADDRMAN_NEW_BUCKETS_PER_SOURCE_GROUP 32
-// in how many buckets for entries with new addresses a single address may occur
+//! in how many buckets for entries with new addresses a single address may occur
#define ADDRMAN_NEW_BUCKETS_PER_ADDRESS 4
-// how many entries in a bucket with tried addresses are inspected, when selecting one to replace
+//! how many entries in a bucket with tried addresses are inspected, when selecting one to replace
#define ADDRMAN_TRIED_ENTRIES_INSPECT_ON_EVICT 4
-// how old addresses can maximally be
+//! how old addresses can maximally be
#define ADDRMAN_HORIZON_DAYS 30
-// after how many failed attempts we give up on a new node
+//! after how many failed attempts we give up on a new node
#define ADDRMAN_RETRIES 3
-// how many successive failures are allowed ...
+//! how many successive failures are allowed ...
#define ADDRMAN_MAX_FAILURES 10
-// ... in at least this many days
+//! ... in at least this many days
#define ADDRMAN_MIN_FAIL_DAYS 7
-// the maximum percentage of nodes to return in a getaddr call
+//! the maximum percentage of nodes to return in a getaddr call
#define ADDRMAN_GETADDR_MAX_PCT 23
-// the maximum number of nodes to return in a getaddr call
+//! the maximum number of nodes to return in a getaddr call
#define ADDRMAN_GETADDR_MAX 2500
-/** Stochastical (IP) address manager */
+/**
+ * Stochastical (IP) address manager
+ */
class CAddrMan
{
private:
- // critical section to protect the inner data structures
+ //! critical section to protect the inner data structures
mutable CCriticalSection cs;
- // secret key to randomize bucket select with
+ //! secret key to randomize bucket select with
std::vector<unsigned char> nKey;
- // last used nId
+ //! last used nId
int nIdCount;
- // table with information about all nIds
+ //! table with information about all nIds
std::map<int, CAddrInfo> mapInfo;
- // find an nId based on its network address
+ //! find an nId based on its network address
std::map<CNetAddr, int> mapAddr;
- // randomly-ordered vector of all nIds
+ //! randomly-ordered vector of all nIds
std::vector<int> vRandom;
// number of "tried" entries
int nTried;
- // list of "tried" buckets
+ //! list of "tried" buckets
std::vector<std::vector<int> > vvTried;
- // number of (unique) "new" entries
+ //! number of (unique) "new" entries
int nNew;
- // list of "new" buckets
+ //! list of "new" buckets
std::vector<std::set<int> > vvNew;
protected:
- // Find an entry.
+ //! Find an entry.
CAddrInfo* Find(const CNetAddr& addr, int *pnId = NULL);
- // find an entry, creating it if necessary.
- // nTime and nServices of found node is updated, if necessary.
+ //! find an entry, creating it if necessary.
+ //! nTime and nServices of the found node are updated, if necessary.
CAddrInfo* Create(const CAddress &addr, const CNetAddr &addrSource, int *pnId = NULL);
- // Swap two elements in vRandom.
+ //! Swap two elements in vRandom.
void SwapRandom(unsigned int nRandomPos1, unsigned int nRandomPos2);
- // Return position in given bucket to replace.
+ //! Return position in given bucket to replace.
int SelectTried(int nKBucket);
- // Remove an element from a "new" bucket.
- // This is the only place where actual deletes occur.
- // They are never deleted while in the "tried" table, only possibly evicted back to the "new" table.
+ //! Remove an element from a "new" bucket.
+ //! This is the only place where actual deletions occur.
+ //! Elements are never deleted while in the "tried" table, only possibly evicted back to the "new" table.
int ShrinkNew(int nUBucket);
- // Move an entry from the "new" table(s) to the "tried" table
- // @pre vvUnkown[nOrigin].count(nId) != 0
+ //! Move an entry from the "new" table(s) to the "tried" table
+ //! @pre vvUnkown[nOrigin].count(nId) != 0
void MakeTried(CAddrInfo& info, int nId, int nOrigin);
- // Mark an entry "good", possibly moving it from "new" to "tried".
+ //! Mark an entry "good", possibly moving it from "new" to "tried".
void Good_(const CService &addr, int64_t nTime);
- // Add an entry to the "new" table.
+ //! Add an entry to the "new" table.
bool Add_(const CAddress &addr, const CNetAddr& source, int64_t nTimePenalty);
- // Mark an entry as attempted to connect.
+ //! Mark an entry as attempted to connect.
void Attempt_(const CService &addr, int64_t nTime);
- // Select an address to connect to.
- // nUnkBias determines how much to favor new addresses over tried ones (min=0, max=100)
+ //! Select an address to connect to.
+ //! nUnkBias determines how much to favor new addresses over tried ones (min=0, max=100)
CAddress Select_(int nUnkBias);
#ifdef DEBUG_ADDRMAN
- // Perform consistency check. Returns an error code or zero.
+ //! Perform consistency check. Returns an error code or zero.
int Check_();
#endif
- // Select several addresses at once.
+ //! Select several addresses at once.
void GetAddr_(std::vector<CAddress> &vAddr);
- // Mark an entry as currently-connected-to.
+ //! Mark an entry as currently-connected-to.
void Connected_(const CService &addr, int64_t nTime);
public:
- // serialized format:
- // * version byte (currently 0)
- // * nKey
- // * nNew
- // * nTried
- // * number of "new" buckets
- // * all nNew addrinfos in vvNew
- // * all nTried addrinfos in vvTried
- // * for each bucket:
- // * number of elements
- // * for each element: index
- //
- // Notice that vvTried, mapAddr and vVector are never encoded explicitly;
- // they are instead reconstructed from the other information.
- //
- // vvNew is serialized, but only used if ADDRMAN_UNKOWN_BUCKET_COUNT didn't change,
- // otherwise it is reconstructed as well.
- //
- // This format is more complex, but significantly smaller (at most 1.5 MiB), and supports
- // changes to the ADDRMAN_ parameters without breaking the on-disk structure.
- //
- // We don't use ADD_SERIALIZE_METHODS since the serialization and deserialization code has
- // very little in common.
+ /**
+ * serialized format:
+ * * version byte (currently 0)
+ * * nKey
+ * * nNew
+ * * nTried
+ * * number of "new" buckets
+ * * all nNew addrinfos in vvNew
+ * * all nTried addrinfos in vvTried
+ * * for each bucket:
+ * * number of elements
+ * * for each element: index
+ *
+ * Notice that vvTried, mapAddr and vVector are never encoded explicitly;
+ * they are instead reconstructed from the other information.
+ *
+ * vvNew is serialized, but only used if ADDRMAN_UNKOWN_BUCKET_COUNT didn't change,
+ * otherwise it is reconstructed as well.
+ *
+ * This format is more complex, but significantly smaller (at most 1.5 MiB), and supports
+ * changes to the ADDRMAN_ parameters without breaking the on-disk structure.
+ *
+ * We don't use ADD_SERIALIZE_METHODS since the serialization and deserialization code has
+ * very little in common.
+ *
+ */
template<typename Stream>
void Serialize(Stream &s, int nType, int nVersionDummy) const
{
@@ -394,13 +401,13 @@ public:
nNew = 0;
}
- // Return the number of (unique) addresses in all tables.
+ //! Return the number of (unique) addresses in all tables.
int size()
{
return vRandom.size();
}
- // Consistency check
+ //! Consistency check
void Check()
{
#ifdef DEBUG_ADDRMAN
@@ -413,7 +420,7 @@ public:
#endif
}
- // Add a single address.
+ //! Add a single address.
bool Add(const CAddress &addr, const CNetAddr& source, int64_t nTimePenalty = 0)
{
bool fRet = false;
@@ -428,7 +435,7 @@ public:
return fRet;
}
- // Add multiple addresses.
+ //! Add multiple addresses.
bool Add(const std::vector<CAddress> &vAddr, const CNetAddr& source, int64_t nTimePenalty = 0)
{
int nAdd = 0;
@@ -444,7 +451,7 @@ public:
return nAdd > 0;
}
- // Mark an entry as accessible.
+ //! Mark an entry as accessible.
void Good(const CService &addr, int64_t nTime = GetAdjustedTime())
{
{
@@ -455,7 +462,7 @@ public:
}
}
- // Mark an entry as connection attempted to.
+ //! Mark an entry as connection attempted to.
void Attempt(const CService &addr, int64_t nTime = GetAdjustedTime())
{
{
@@ -466,8 +473,10 @@ public:
}
}
- // Choose an address to connect to.
- // nUnkBias determines how much "new" entries are favored over "tried" ones (0-100).
+ /**
+ * Choose an address to connect to.
+ * nUnkBias determines how much "new" entries are favored over "tried" ones (0-100).
+ */
CAddress Select(int nUnkBias = 50)
{
CAddress addrRet;
@@ -480,7 +489,7 @@ public:
return addrRet;
}
- // Return a bunch of addresses, selected at random.
+ //! Return a bunch of addresses, selected at random.
std::vector<CAddress> GetAddr()
{
Check();
@@ -493,7 +502,7 @@ public:
return vAddr;
}
- // Mark an entry as currently-connected-to.
+ //! Mark an entry as currently-connected-to.
void Connected(const CService &addr, int64_t nTime = GetAdjustedTime())
{
{
diff --git a/src/chain.cpp b/src/chain.cpp
index 05427a4569..56ed22ce71 100644
--- a/src/chain.cpp
+++ b/src/chain.cpp
@@ -9,17 +9,16 @@ using namespace std;
// CChain implementation
-CBlockIndex *CChain::SetTip(CBlockIndex *pindex) {
+void CChain::SetTip(CBlockIndex *pindex) {
if (pindex == NULL) {
vChain.clear();
- return NULL;
+ return;
}
vChain.resize(pindex->nHeight + 1);
while (pindex && vChain[pindex->nHeight] != pindex) {
vChain[pindex->nHeight] = pindex;
pindex = pindex->pprev;
}
- return pindex;
}
CBlockLocator CChain::GetLocator(const CBlockIndex *pindex) const {
diff --git a/src/chain.h b/src/chain.h
index 4e6a466c6a..290150476e 100644
--- a/src/chain.h
+++ b/src/chain.h
@@ -8,6 +8,7 @@
#include "core.h"
#include "pow.h"
+#include "tinyformat.h"
#include "uint256.h"
#include <vector>
@@ -395,8 +396,8 @@ public:
return vChain.size() - 1;
}
- /** Set/initialize a chain with a given tip. Returns the forking point. */
- CBlockIndex *SetTip(CBlockIndex *pindex);
+ /** Set/initialize a chain with a given tip. */
+ void SetTip(CBlockIndex *pindex);
/** Return a CBlockLocator that refers to a block in this chain (by default the tip). */
CBlockLocator GetLocator(const CBlockIndex *pindex = NULL) const;
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index dfb4c59d87..1ab292517a 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -7,6 +7,7 @@
#include "random.h"
#include "util.h"
+#include "utilstrencodings.h"
#include <assert.h>
@@ -265,6 +266,7 @@ public:
nDefaultPort = 18444;
assert(hashGenesisBlock == uint256("0x0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"));
+ vFixedSeeds.clear(); // Regtest mode doesn't have any fixed seeds.
vSeeds.clear(); // Regtest mode doesn't have any DNS seeds.
fRequireRPCPassword = false;
diff --git a/src/core.cpp b/src/core.cpp
index 6a7a9ff378..73e6de88e1 100644
--- a/src/core.cpp
+++ b/src/core.cpp
@@ -5,7 +5,9 @@
#include "core.h"
+#include "hash.h"
#include "tinyformat.h"
+#include "utilstrencodings.h"
std::string COutPoint::ToString() const
{
diff --git a/src/core_read.cpp b/src/core_read.cpp
index 6bd3d9a4fa..8b85a03c54 100644
--- a/src/core_read.cpp
+++ b/src/core_read.cpp
@@ -9,6 +9,8 @@
#include "serialize.h"
#include "univalue/univalue.h"
#include "util.h"
+#include "utilstrencodings.h"
+#include "version.h"
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/predicate.hpp>
diff --git a/src/core_write.cpp b/src/core_write.cpp
index 40d547fb33..e42e0b62a9 100644
--- a/src/core_write.cpp
+++ b/src/core_write.cpp
@@ -12,6 +12,7 @@
#include "univalue/univalue.h"
#include "util.h"
#include "utilmoneystr.h"
+#include "utilstrencodings.h"
#include <boost/foreach.hpp>
diff --git a/src/crypter.cpp b/src/crypter.cpp
index a872df7024..756538836d 100644
--- a/src/crypter.cpp
+++ b/src/crypter.cpp
@@ -5,6 +5,7 @@
#include "crypter.h"
#include "script/script.h"
+#include "script/standard.h"
#include "util.h"
#include <string>
diff --git a/src/init.cpp b/src/init.cpp
index 743cdd4386..70ac5190d3 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -139,7 +139,7 @@ void Shutdown()
{
boost::filesystem::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME;
CAutoFile est_fileout(fopen(est_path.string().c_str(), "wb"), SER_DISK, CLIENT_VERSION);
- if (est_fileout)
+ if (!est_fileout.IsNull())
mempool.WriteFeeEstimates(est_fileout);
else
LogPrintf("%s: Failed to write fee estimates to %s\n", __func__, est_path.string());
@@ -1064,7 +1064,7 @@ bool AppInit2(boost::thread_group& threadGroup)
boost::filesystem::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME;
CAutoFile est_filein(fopen(est_path.string().c_str(), "rb"), SER_DISK, CLIENT_VERSION);
// Allowed to fail as this file IS missing on first startup.
- if (est_filein)
+ if (!est_filein.IsNull())
mempool.ReadFeeEstimates(est_filein);
fFeeEstimatesInitialized = true;
diff --git a/src/key.h b/src/key.h
index f6f6d35d34..48b1652536 100644
--- a/src/key.h
+++ b/src/key.h
@@ -30,14 +30,6 @@ public:
CKeyID(const uint160& in) : uint160(in) {}
};
-/** A reference to a CScript: the Hash160 of its serialization (see script.h) */
-class CScriptID : public uint160
-{
-public:
- CScriptID() : uint160(0) {}
- CScriptID(const uint160& in) : uint160(in) {}
-};
-
/** An encapsulated public key. */
class CPubKey
{
diff --git a/src/keystore.cpp b/src/keystore.cpp
index 755defa26d..039c690625 100644
--- a/src/keystore.cpp
+++ b/src/keystore.cpp
@@ -8,6 +8,7 @@
#include "crypter.h"
#include "key.h"
#include "script/script.h"
+#include "script/standard.h"
#include "util.h"
#include <boost/foreach.hpp>
@@ -38,7 +39,7 @@ bool CBasicKeyStore::AddCScript(const CScript& redeemScript)
return error("CBasicKeyStore::AddCScript() : redeemScripts > %i bytes are invalid", MAX_SCRIPT_ELEMENT_SIZE);
LOCK(cs_KeyStore);
- mapScripts[redeemScript.GetID()] = redeemScript;
+ mapScripts[CScriptID(redeemScript)] = redeemScript;
return true;
}
diff --git a/src/keystore.h b/src/keystore.h
index d3478f7672..4f8189c8f5 100644
--- a/src/keystore.h
+++ b/src/keystore.h
@@ -13,6 +13,7 @@
#include <boost/variant.hpp>
class CScript;
+class CScriptID;
/** A virtual base class for key stores */
class CKeyStore
diff --git a/src/leveldb/CONTRIBUTING.md b/src/leveldb/CONTRIBUTING.md
new file mode 100644
index 0000000000..cd600ff46b
--- /dev/null
+++ b/src/leveldb/CONTRIBUTING.md
@@ -0,0 +1,36 @@
+# Contributing
+
+We'd love to accept your code patches! However, before we can take them, we
+have to jump a couple of legal hurdles.
+
+## Contributor License Agreements
+
+Please fill out either the individual or corporate Contributor License
+Agreement as appropriate.
+
+* If you are an individual writing original source code and you're sure you
+own the intellectual property, then sign an [individual CLA](https://developers.google.com/open-source/cla/individual).
+* If you work for a company that wants to allow you to contribute your work,
+then sign a [corporate CLA](https://developers.google.com/open-source/cla/corporate).
+
+Follow either of the two links above to access the appropriate CLA and
+instructions for how to sign and return it.
+
+## Submitting a Patch
+
+1. Sign the contributors license agreement above.
+2. Decide which code you want to submit. A submission should be a set of changes
+that addresses one issue in the [issue tracker](https://github.com/google/leveldb/issues).
+Please don't mix more than one logical change per submission, because it makes
+the history hard to follow. If you want to make a change
+(e.g. add a sample or feature) that doesn't have a corresponding issue in the
+issue tracker, please create one.
+3. **Submitting**: When you are ready to submit, send us a Pull Request. Be
+sure to include the issue number you fixed and the name you used to sign
+the CLA.
+
+## Writing Code ##
+
+If your contribution contains code, please make sure that it follows
+[the style guide](http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml).
+Otherwise we will have to ask you to make changes, and that's no fun for anyone.
diff --git a/src/leveldb/Makefile b/src/leveldb/Makefile
index f8903b69e4..2bd2cadcdd 100644
--- a/src/leveldb/Makefile
+++ b/src/leveldb/Makefile
@@ -6,9 +6,12 @@
# Uncomment exactly one of the lines labelled (A), (B), and (C) below
# to switch between compilation modes.
-OPT ?= -O2 -DNDEBUG # (A) Production use (optimized mode)
-# OPT ?= -g2 # (B) Debug mode, w/ full line-level debugging symbols
-# OPT ?= -O2 -g2 -DNDEBUG # (C) Profiling mode: opt, but w/debugging symbols
+# (A) Production use (optimized mode)
+OPT ?= -O2 -DNDEBUG
+# (B) Debug mode, w/ full line-level debugging symbols
+# OPT ?= -g2
+# (C) Profiling mode: opt, but w/debugging symbols
+# OPT ?= -O2 -g2 -DNDEBUG
#-----------------------------------------------
# detect what platform we're building on
@@ -29,6 +32,11 @@ MEMENVOBJECTS = $(MEMENV_SOURCES:.cc=.o)
TESTUTIL = ./util/testutil.o
TESTHARNESS = ./util/testharness.o $(TESTUTIL)
+# Note: iOS should probably be using libtool, not ar.
+ifeq ($(PLATFORM), IOS)
+AR=xcrun ar
+endif
+
TESTS = \
arena_test \
autocompact_test \
@@ -43,6 +51,7 @@ TESTS = \
env_test \
filename_test \
filter_block_test \
+ hash_test \
issue178_test \
issue200_test \
log_test \
@@ -72,7 +81,7 @@ SHARED = $(SHARED1)
else
# Update db.h if you change these.
SHARED_MAJOR = 1
-SHARED_MINOR = 17
+SHARED_MINOR = 18
SHARED1 = libleveldb.$(PLATFORM_SHARED_EXT)
SHARED2 = $(SHARED1).$(SHARED_MAJOR)
SHARED3 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR)
@@ -152,6 +161,9 @@ filename_test: db/filename_test.o $(LIBOBJECTS) $(TESTHARNESS)
filter_block_test: table/filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS)
$(CXX) $(LDFLAGS) table/filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+hash_test: util/hash_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) util/hash_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
issue178_test: issues/issue178_test.o $(LIBOBJECTS) $(TESTHARNESS)
$(CXX) $(LDFLAGS) issues/issue178_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
@@ -194,17 +206,17 @@ IOSARCH=-arch armv6 -arch armv7 -arch armv7s -arch arm64
.cc.o:
mkdir -p ios-x86/$(dir $@)
- $(CXX) $(CXXFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
+ xcrun -sdk iphonesimulator $(CXX) $(CXXFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
mkdir -p ios-arm/$(dir $@)
xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk $(IOSARCH) -c $< -o ios-arm/$@
- lipo ios-x86/$@ ios-arm/$@ -create -output $@
+ xcrun lipo ios-x86/$@ ios-arm/$@ -create -output $@
.c.o:
mkdir -p ios-x86/$(dir $@)
- $(CC) $(CFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
+ xcrun -sdk iphonesimulator $(CC) $(CFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
mkdir -p ios-arm/$(dir $@)
xcrun -sdk iphoneos $(CC) $(CFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk $(IOSARCH) -c $< -o ios-arm/$@
- lipo ios-x86/$@ ios-arm/$@ -create -output $@
+ xcrun lipo ios-x86/$@ ios-arm/$@ -create -output $@
else
.cc.o:
diff --git a/src/leveldb/README.md b/src/leveldb/README.md
new file mode 100644
index 0000000000..480affb5ca
--- /dev/null
+++ b/src/leveldb/README.md
@@ -0,0 +1,138 @@
+**LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.**
+
+Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
+
+# Features
+ * Keys and values are arbitrary byte arrays.
+ * Data is stored sorted by key.
+ * Callers can provide a custom comparison function to override the sort order.
+ * The basic operations are `Put(key,value)`, `Get(key)`, `Delete(key)`.
+ * Multiple changes can be made in one atomic batch.
+ * Users can create a transient snapshot to get a consistent view of data.
+ * Forward and backward iteration is supported over the data.
+ * Data is automatically compressed using the [Snappy compression library](http://code.google.com/p/snappy).
+ * External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions.
+ * [Detailed documentation](http://htmlpreview.github.io/?https://github.com/google/leveldb/blob/master/doc/index.html) about how to use the library is included with the source code.
+
+
+# Limitations
+ * This is not a SQL database. It does not have a relational data model, it does not support SQL queries, and it has no support for indexes.
+ * Only a single process (possibly multi-threaded) can access a particular database at a time.
+ * There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library.
+
+# Performance
+
+Here is a performance report (with explanations) from the run of the
+included db_bench program. The results are somewhat noisy, but should
+be enough to get a ballpark performance estimate.
+
+## Setup
+
+We use a database with a million entries. Each entry has a 16 byte
+key, and a 100 byte value. Values used by the benchmark compress to
+about half their original size.
+
+ LevelDB: version 1.1
+ Date: Sun May 1 12:11:26 2011
+ CPU: 4 x Intel(R) Core(TM)2 Quad CPU Q6600 @ 2.40GHz
+ CPUCache: 4096 KB
+ Keys: 16 bytes each
+ Values: 100 bytes each (50 bytes after compression)
+ Entries: 1000000
+ Raw Size: 110.6 MB (estimated)
+ File Size: 62.9 MB (estimated)
+
+## Write performance
+
+The "fill" benchmarks create a brand new database, in either
+sequential, or random order. The "fillsync" benchmark flushes data
+from the operating system to the disk after every operation; the other
+write operations leave the data sitting in the operating system buffer
+cache for a while. The "overwrite" benchmark does random writes that
+update existing keys in the database.
+
+ fillseq : 1.765 micros/op; 62.7 MB/s
+ fillsync : 268.409 micros/op; 0.4 MB/s (10000 ops)
+ fillrandom : 2.460 micros/op; 45.0 MB/s
+ overwrite : 2.380 micros/op; 46.5 MB/s
+
+Each "op" above corresponds to a write of a single key/value pair.
+I.e., a random write benchmark goes at approximately 400,000 writes per second.
+
+Each "fillsync" operation costs much less (0.3 millisecond)
+than a disk seek (typically 10 milliseconds). We suspect that this is
+because the hard disk itself is buffering the update in its memory and
+responding before the data has been written to the platter. This may
+or may not be safe based on whether or not the hard disk has enough
+power to save its memory in the event of a power failure.
+
+## Read performance
+
+We list the performance of reading sequentially in both the forward
+and reverse direction, and also the performance of a random lookup.
+Note that the database created by the benchmark is quite small.
+Therefore the report characterizes the performance of leveldb when the
+working set fits in memory. The cost of reading a piece of data that
+is not present in the operating system buffer cache will be dominated
+by the one or two disk seeks needed to fetch the data from disk.
+Write performance will be mostly unaffected by whether or not the
+working set fits in memory.
+
+ readrandom : 16.677 micros/op; (approximately 60,000 reads per second)
+ readseq : 0.476 micros/op; 232.3 MB/s
+ readreverse : 0.724 micros/op; 152.9 MB/s
+
+LevelDB compacts its underlying storage data in the background to
+improve read performance. The results listed above were done
+immediately after a lot of random writes. The results after
+compactions (which are usually triggered automatically) are better.
+
+ readrandom : 11.602 micros/op; (approximately 85,000 reads per second)
+ readseq : 0.423 micros/op; 261.8 MB/s
+ readreverse : 0.663 micros/op; 166.9 MB/s
+
+Some of the high cost of reads comes from repeated decompression of blocks
+read from disk. If we supply enough cache to the leveldb so it can hold the
+uncompressed blocks in memory, the read performance improves again:
+
+ readrandom : 9.775 micros/op; (approximately 100,000 reads per second before compaction)
+ readrandom : 5.215 micros/op; (approximately 190,000 reads per second after compaction)
+
+## Repository contents
+
+See doc/index.html for more explanation. See doc/impl.html for a brief overview of the implementation.
+
+The public interface is in include/*.h. Callers should not include or
+rely on the details of any other header files in this package. Those
+internal APIs may be changed without warning.
+
+Guide to header files:
+
+* **include/db.h**: Main interface to the DB: Start here
+
+* **include/options.h**: Control over the behavior of an entire database,
+and also control over the behavior of individual reads and writes.
+
+* **include/comparator.h**: Abstraction for user-specified comparison function.
+If you want just bytewise comparison of keys, you can use the default
+comparator, but clients can write their own comparator implementations if they
+want custom ordering (e.g. to handle different character encodings, etc.)
+
+* **include/iterator.h**: Interface for iterating over data. You can get
+an iterator from a DB object.
+
+* **include/write_batch.h**: Interface for atomically applying multiple
+updates to a database.
+
+* **include/slice.h**: A simple module for maintaining a pointer and a
+length into some other byte array.
+
+* **include/status.h**: Status is returned from many of the public interfaces
+and is used to report success and various kinds of errors.
+
+* **include/env.h**:
+Abstraction of the OS environment. A posix implementation of this interface is
+in util/env_posix.cc
+
+* **include/table.h, include/table_builder.h**: Lower-level modules that most
+clients probably won't use directly
diff --git a/src/leveldb/build_detect_platform b/src/leveldb/build_detect_platform
index 85b1ce0224..a1101c1bda 100755
--- a/src/leveldb/build_detect_platform
+++ b/src/leveldb/build_detect_platform
@@ -20,7 +20,7 @@
#
# The PLATFORM_CCFLAGS and PLATFORM_CXXFLAGS might include the following:
#
-# -DLEVELDB_CSTDATOMIC_PRESENT if <cstdatomic> is present
+# -DLEVELDB_ATOMIC_PRESENT if <atomic> is present
# -DLEVELDB_PLATFORM_POSIX for Posix-based platforms
# -DSNAPPY if the Snappy library is present
#
@@ -72,6 +72,12 @@ if [ "$CXX" = "g++" ]; then
fi
case "$TARGET_OS" in
+ CYGWIN_*)
+ PLATFORM=OS_LINUX
+ COMMON_FLAGS="$MEMCMP_FLAG -lpthread -DOS_LINUX -DCYGWIN"
+ PLATFORM_LDFLAGS="-lpthread"
+ PORT_FILE=port/port_posix.cc
+ ;;
Darwin)
PLATFORM=OS_MACOSX
COMMON_FLAGS="$MEMCMP_FLAG -DOS_MACOSX"
@@ -185,13 +191,14 @@ if [ "$CROSS_COMPILE" = "true" ]; then
else
CXXOUTPUT="${TMPDIR}/leveldb_build_detect_platform-cxx.$$"
- # If -std=c++0x works, use <cstdatomic>. Otherwise use port_posix.h.
+ # If -std=c++0x works, use <atomic> as fallback for when memory barriers
+ # are not available.
$CXX $CXXFLAGS -std=c++0x -x c++ - -o $CXXOUTPUT 2>/dev/null <<EOF
- #include <cstdatomic>
+ #include <atomic>
int main() {}
EOF
if [ "$?" = 0 ]; then
- COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX -DLEVELDB_CSTDATOMIC_PRESENT"
+ COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX -DLEVELDB_ATOMIC_PRESENT"
PLATFORM_CXXFLAGS="-std=c++0x"
else
COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX"
diff --git a/src/leveldb/db/db_bench.cc b/src/leveldb/db/db_bench.cc
index fc46d89693..705a170aae 100644
--- a/src/leveldb/db/db_bench.cc
+++ b/src/leveldb/db/db_bench.cc
@@ -431,7 +431,7 @@ class Benchmark {
benchmarks = sep + 1;
}
- // Reset parameters that may be overriddden bwlow
+ // Reset parameters that may be overridden below
num_ = FLAGS_num;
reads_ = (FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads);
value_size_ = FLAGS_value_size;
@@ -811,7 +811,6 @@ class Benchmark {
void SeekRandom(ThreadState* thread) {
ReadOptions options;
- std::string value;
int found = 0;
for (int i = 0; i < reads_; i++) {
Iterator* iter = db_->NewIterator(options);
diff --git a/src/leveldb/db/db_impl.cc b/src/leveldb/db/db_impl.cc
index faf5e7d7ba..49b95953b4 100644
--- a/src/leveldb/db/db_impl.cc
+++ b/src/leveldb/db/db_impl.cc
@@ -392,7 +392,7 @@ Status DBImpl::RecoverLogFile(uint64_t log_number,
reporter.info_log = options_.info_log;
reporter.fname = fname.c_str();
reporter.status = (options_.paranoid_checks ? &status : NULL);
- // We intentially make log::Reader do checksumming even if
+ // We intentionally make log::Reader do checksumming even if
// paranoid_checks==false so that corruptions cause entire commits
// to be skipped instead of propagating bad information (like overly
// large sequence numbers).
@@ -1267,7 +1267,7 @@ WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
break;
}
- // Append to *reuslt
+ // Append to *result
if (result == first->batch) {
// Switch to temporary batch instead of disturbing caller's batch
result = tmp_batch_;
diff --git a/src/leveldb/db/db_test.cc b/src/leveldb/db/db_test.cc
index 280b01c14b..0fed9137d5 100644
--- a/src/leveldb/db/db_test.cc
+++ b/src/leveldb/db/db_test.cc
@@ -626,7 +626,7 @@ TEST(DBTest, GetEncountersEmptyLevel) {
// * sstable B in level 2
// Then do enough Get() calls to arrange for an automatic compaction
// of sstable A. A bug would cause the compaction to be marked as
- // occuring at level 1 (instead of the correct level 0).
+ // occurring at level 1 (instead of the correct level 0).
// Step 1: First place sstables in levels 0 and 2
int compaction_count = 0;
diff --git a/src/leveldb/db/dbformat.h b/src/leveldb/db/dbformat.h
index 5d8a032bd3..ea897b13c0 100644
--- a/src/leveldb/db/dbformat.h
+++ b/src/leveldb/db/dbformat.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#ifndef STORAGE_LEVELDB_DB_FORMAT_H_
-#define STORAGE_LEVELDB_DB_FORMAT_H_
+#ifndef STORAGE_LEVELDB_DB_DBFORMAT_H_
+#define STORAGE_LEVELDB_DB_DBFORMAT_H_
#include <stdio.h>
#include "leveldb/comparator.h"
@@ -227,4 +227,4 @@ inline LookupKey::~LookupKey() {
} // namespace leveldb
-#endif // STORAGE_LEVELDB_DB_FORMAT_H_
+#endif // STORAGE_LEVELDB_DB_DBFORMAT_H_
diff --git a/src/leveldb/db/dumpfile.cc b/src/leveldb/db/dumpfile.cc
new file mode 100644
index 0000000000..61c47c2ff9
--- /dev/null
+++ b/src/leveldb/db/dumpfile.cc
@@ -0,0 +1,225 @@
+// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <stdio.h>
+#include "db/dbformat.h"
+#include "db/filename.h"
+#include "db/log_reader.h"
+#include "db/version_edit.h"
+#include "db/write_batch_internal.h"
+#include "leveldb/env.h"
+#include "leveldb/iterator.h"
+#include "leveldb/options.h"
+#include "leveldb/status.h"
+#include "leveldb/table.h"
+#include "leveldb/write_batch.h"
+#include "util/logging.h"
+
+namespace leveldb {
+
+namespace {
+
+bool GuessType(const std::string& fname, FileType* type) {
+ size_t pos = fname.rfind('/');
+ std::string basename;
+ if (pos == std::string::npos) {
+ basename = fname;
+ } else {
+ basename = std::string(fname.data() + pos + 1, fname.size() - pos - 1);
+ }
+ uint64_t ignored;
+ return ParseFileName(basename, &ignored, type);
+}
+
+// Notified when log reader encounters corruption.
+class CorruptionReporter : public log::Reader::Reporter {
+ public:
+ WritableFile* dst_;
+ virtual void Corruption(size_t bytes, const Status& status) {
+ std::string r = "corruption: ";
+ AppendNumberTo(&r, bytes);
+ r += " bytes; ";
+ r += status.ToString();
+ r.push_back('\n');
+ dst_->Append(r);
+ }
+};
+
+// Print contents of a log file. (*func)() is called on every record.
+Status PrintLogContents(Env* env, const std::string& fname,
+ void (*func)(uint64_t, Slice, WritableFile*),
+ WritableFile* dst) {
+ SequentialFile* file;
+ Status s = env->NewSequentialFile(fname, &file);
+ if (!s.ok()) {
+ return s;
+ }
+ CorruptionReporter reporter;
+ reporter.dst_ = dst;
+ log::Reader reader(file, &reporter, true, 0);
+ Slice record;
+ std::string scratch;
+ while (reader.ReadRecord(&record, &scratch)) {
+ (*func)(reader.LastRecordOffset(), record, dst);
+ }
+ delete file;
+ return Status::OK();
+}
+
+// Called on every item found in a WriteBatch.
+class WriteBatchItemPrinter : public WriteBatch::Handler {
+ public:
+ WritableFile* dst_;
+ virtual void Put(const Slice& key, const Slice& value) {
+ std::string r = " put '";
+ AppendEscapedStringTo(&r, key);
+ r += "' '";
+ AppendEscapedStringTo(&r, value);
+ r += "'\n";
+ dst_->Append(r);
+ }
+ virtual void Delete(const Slice& key) {
+ std::string r = " del '";
+ AppendEscapedStringTo(&r, key);
+ r += "'\n";
+ dst_->Append(r);
+ }
+};
+
+
+// Called on every log record (each one of which is a WriteBatch)
+// found in a kLogFile.
+static void WriteBatchPrinter(uint64_t pos, Slice record, WritableFile* dst) {
+ std::string r = "--- offset ";
+ AppendNumberTo(&r, pos);
+ r += "; ";
+ if (record.size() < 12) {
+ r += "log record length ";
+ AppendNumberTo(&r, record.size());
+ r += " is too small\n";
+ dst->Append(r);
+ return;
+ }
+ WriteBatch batch;
+ WriteBatchInternal::SetContents(&batch, record);
+ r += "sequence ";
+ AppendNumberTo(&r, WriteBatchInternal::Sequence(&batch));
+ r.push_back('\n');
+ dst->Append(r);
+ WriteBatchItemPrinter batch_item_printer;
+ batch_item_printer.dst_ = dst;
+ Status s = batch.Iterate(&batch_item_printer);
+ if (!s.ok()) {
+ dst->Append(" error: " + s.ToString() + "\n");
+ }
+}
+
+Status DumpLog(Env* env, const std::string& fname, WritableFile* dst) {
+ return PrintLogContents(env, fname, WriteBatchPrinter, dst);
+}
+
+// Called on every log record (each one of which is a WriteBatch)
+// found in a kDescriptorFile.
+static void VersionEditPrinter(uint64_t pos, Slice record, WritableFile* dst) {
+ std::string r = "--- offset ";
+ AppendNumberTo(&r, pos);
+ r += "; ";
+ VersionEdit edit;
+ Status s = edit.DecodeFrom(record);
+ if (!s.ok()) {
+ r += s.ToString();
+ r.push_back('\n');
+ } else {
+ r += edit.DebugString();
+ }
+ dst->Append(r);
+}
+
+Status DumpDescriptor(Env* env, const std::string& fname, WritableFile* dst) {
+ return PrintLogContents(env, fname, VersionEditPrinter, dst);
+}
+
+Status DumpTable(Env* env, const std::string& fname, WritableFile* dst) {
+ uint64_t file_size;
+ RandomAccessFile* file = NULL;
+ Table* table = NULL;
+ Status s = env->GetFileSize(fname, &file_size);
+ if (s.ok()) {
+ s = env->NewRandomAccessFile(fname, &file);
+ }
+ if (s.ok()) {
+ // We use the default comparator, which may or may not match the
+ // comparator used in this database. However this should not cause
+ // problems since we only use Table operations that do not require
+ // any comparisons. In particular, we do not call Seek or Prev.
+ s = Table::Open(Options(), file, file_size, &table);
+ }
+ if (!s.ok()) {
+ delete table;
+ delete file;
+ return s;
+ }
+
+ ReadOptions ro;
+ ro.fill_cache = false;
+ Iterator* iter = table->NewIterator(ro);
+ std::string r;
+ for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
+ r.clear();
+ ParsedInternalKey key;
+ if (!ParseInternalKey(iter->key(), &key)) {
+ r = "badkey '";
+ AppendEscapedStringTo(&r, iter->key());
+ r += "' => '";
+ AppendEscapedStringTo(&r, iter->value());
+ r += "'\n";
+ dst->Append(r);
+ } else {
+ r = "'";
+ AppendEscapedStringTo(&r, key.user_key);
+ r += "' @ ";
+ AppendNumberTo(&r, key.sequence);
+ r += " : ";
+ if (key.type == kTypeDeletion) {
+ r += "del";
+ } else if (key.type == kTypeValue) {
+ r += "val";
+ } else {
+ AppendNumberTo(&r, key.type);
+ }
+ r += " => '";
+ AppendEscapedStringTo(&r, iter->value());
+ r += "'\n";
+ dst->Append(r);
+ }
+ }
+ s = iter->status();
+ if (!s.ok()) {
+ dst->Append("iterator error: " + s.ToString() + "\n");
+ }
+
+ delete iter;
+ delete table;
+ delete file;
+ return Status::OK();
+}
+
+} // namespace
+
+Status DumpFile(Env* env, const std::string& fname, WritableFile* dst) {
+ FileType ftype;
+ if (!GuessType(fname, &ftype)) {
+ return Status::InvalidArgument(fname + ": unknown file type");
+ }
+ switch (ftype) {
+ case kLogFile: return DumpLog(env, fname, dst);
+ case kDescriptorFile: return DumpDescriptor(env, fname, dst);
+ case kTableFile: return DumpTable(env, fname, dst);
+ default:
+ break;
+ }
+ return Status::InvalidArgument(fname + ": not a dump-able file type");
+}
+
+} // namespace leveldb
diff --git a/src/leveldb/db/leveldb_main.cc b/src/leveldb/db/leveldb_main.cc
index 995d76107a..9f4b7dd70c 100644
--- a/src/leveldb/db/leveldb_main.cc
+++ b/src/leveldb/db/leveldb_main.cc
@@ -3,212 +3,38 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <stdio.h>
-#include "db/dbformat.h"
-#include "db/filename.h"
-#include "db/log_reader.h"
-#include "db/version_edit.h"
-#include "db/write_batch_internal.h"
+#include "leveldb/dumpfile.h"
#include "leveldb/env.h"
-#include "leveldb/iterator.h"
-#include "leveldb/options.h"
#include "leveldb/status.h"
-#include "leveldb/table.h"
-#include "leveldb/write_batch.h"
-#include "util/logging.h"
namespace leveldb {
-
namespace {
-bool GuessType(const std::string& fname, FileType* type) {
- size_t pos = fname.rfind('/');
- std::string basename;
- if (pos == std::string::npos) {
- basename = fname;
- } else {
- basename = std::string(fname.data() + pos + 1, fname.size() - pos - 1);
- }
- uint64_t ignored;
- return ParseFileName(basename, &ignored, type);
-}
-
-// Notified when log reader encounters corruption.
-class CorruptionReporter : public log::Reader::Reporter {
- public:
- virtual void Corruption(size_t bytes, const Status& status) {
- printf("corruption: %d bytes; %s\n",
- static_cast<int>(bytes),
- status.ToString().c_str());
- }
-};
-
-// Print contents of a log file. (*func)() is called on every record.
-bool PrintLogContents(Env* env, const std::string& fname,
- void (*func)(Slice)) {
- SequentialFile* file;
- Status s = env->NewSequentialFile(fname, &file);
- if (!s.ok()) {
- fprintf(stderr, "%s\n", s.ToString().c_str());
- return false;
- }
- CorruptionReporter reporter;
- log::Reader reader(file, &reporter, true, 0);
- Slice record;
- std::string scratch;
- while (reader.ReadRecord(&record, &scratch)) {
- printf("--- offset %llu; ",
- static_cast<unsigned long long>(reader.LastRecordOffset()));
- (*func)(record);
- }
- delete file;
- return true;
-}
-
-// Called on every item found in a WriteBatch.
-class WriteBatchItemPrinter : public WriteBatch::Handler {
+class StdoutPrinter : public WritableFile {
public:
- uint64_t offset_;
- uint64_t sequence_;
-
- virtual void Put(const Slice& key, const Slice& value) {
- printf(" put '%s' '%s'\n",
- EscapeString(key).c_str(),
- EscapeString(value).c_str());
- }
- virtual void Delete(const Slice& key) {
- printf(" del '%s'\n",
- EscapeString(key).c_str());
+ virtual Status Append(const Slice& data) {
+ fwrite(data.data(), 1, data.size(), stdout);
+ return Status::OK();
}
+ virtual Status Close() { return Status::OK(); }
+ virtual Status Flush() { return Status::OK(); }
+ virtual Status Sync() { return Status::OK(); }
};
-
-// Called on every log record (each one of which is a WriteBatch)
-// found in a kLogFile.
-static void WriteBatchPrinter(Slice record) {
- if (record.size() < 12) {
- printf("log record length %d is too small\n",
- static_cast<int>(record.size()));
- return;
- }
- WriteBatch batch;
- WriteBatchInternal::SetContents(&batch, record);
- printf("sequence %llu\n",
- static_cast<unsigned long long>(WriteBatchInternal::Sequence(&batch)));
- WriteBatchItemPrinter batch_item_printer;
- Status s = batch.Iterate(&batch_item_printer);
- if (!s.ok()) {
- printf(" error: %s\n", s.ToString().c_str());
- }
-}
-
-bool DumpLog(Env* env, const std::string& fname) {
- return PrintLogContents(env, fname, WriteBatchPrinter);
-}
-
-// Called on every log record (each one of which is a WriteBatch)
-// found in a kDescriptorFile.
-static void VersionEditPrinter(Slice record) {
- VersionEdit edit;
- Status s = edit.DecodeFrom(record);
- if (!s.ok()) {
- printf("%s\n", s.ToString().c_str());
- return;
- }
- printf("%s", edit.DebugString().c_str());
-}
-
-bool DumpDescriptor(Env* env, const std::string& fname) {
- return PrintLogContents(env, fname, VersionEditPrinter);
-}
-
-bool DumpTable(Env* env, const std::string& fname) {
- uint64_t file_size;
- RandomAccessFile* file = NULL;
- Table* table = NULL;
- Status s = env->GetFileSize(fname, &file_size);
- if (s.ok()) {
- s = env->NewRandomAccessFile(fname, &file);
- }
- if (s.ok()) {
- // We use the default comparator, which may or may not match the
- // comparator used in this database. However this should not cause
- // problems since we only use Table operations that do not require
- // any comparisons. In particular, we do not call Seek or Prev.
- s = Table::Open(Options(), file, file_size, &table);
- }
- if (!s.ok()) {
- fprintf(stderr, "%s\n", s.ToString().c_str());
- delete table;
- delete file;
- return false;
- }
-
- ReadOptions ro;
- ro.fill_cache = false;
- Iterator* iter = table->NewIterator(ro);
- for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
- ParsedInternalKey key;
- if (!ParseInternalKey(iter->key(), &key)) {
- printf("badkey '%s' => '%s'\n",
- EscapeString(iter->key()).c_str(),
- EscapeString(iter->value()).c_str());
- } else {
- char kbuf[20];
- const char* type;
- if (key.type == kTypeDeletion) {
- type = "del";
- } else if (key.type == kTypeValue) {
- type = "val";
- } else {
- snprintf(kbuf, sizeof(kbuf), "%d", static_cast<int>(key.type));
- type = kbuf;
- }
- printf("'%s' @ %8llu : %s => '%s'\n",
- EscapeString(key.user_key).c_str(),
- static_cast<unsigned long long>(key.sequence),
- type,
- EscapeString(iter->value()).c_str());
- }
- }
- s = iter->status();
- if (!s.ok()) {
- printf("iterator error: %s\n", s.ToString().c_str());
- }
-
- delete iter;
- delete table;
- delete file;
- return true;
-}
-
-bool DumpFile(Env* env, const std::string& fname) {
- FileType ftype;
- if (!GuessType(fname, &ftype)) {
- fprintf(stderr, "%s: unknown file type\n", fname.c_str());
- return false;
- }
- switch (ftype) {
- case kLogFile: return DumpLog(env, fname);
- case kDescriptorFile: return DumpDescriptor(env, fname);
- case kTableFile: return DumpTable(env, fname);
-
- default: {
- fprintf(stderr, "%s: not a dump-able file type\n", fname.c_str());
- break;
- }
- }
- return false;
-}
-
bool HandleDumpCommand(Env* env, char** files, int num) {
+ StdoutPrinter printer;
bool ok = true;
for (int i = 0; i < num; i++) {
- ok &= DumpFile(env, files[i]);
+ Status s = DumpFile(env, files[i], &printer);
+ if (!s.ok()) {
+ fprintf(stderr, "%s\n", s.ToString().c_str());
+ ok = false;
+ }
}
return ok;
}
-}
+} // namespace
} // namespace leveldb
static void Usage() {
diff --git a/src/leveldb/db/log_format.h b/src/leveldb/db/log_format.h
index 2690cb9789..a8c06efe18 100644
--- a/src/leveldb/db/log_format.h
+++ b/src/leveldb/db/log_format.h
@@ -26,8 +26,8 @@ static const int kMaxRecordType = kLastType;
static const int kBlockSize = 32768;
-// Header is checksum (4 bytes), type (1 byte), length (2 bytes).
-static const int kHeaderSize = 4 + 1 + 2;
+// Header is checksum (4 bytes), length (2 bytes), type (1 byte).
+static const int kHeaderSize = 4 + 2 + 1;
} // namespace log
} // namespace leveldb
diff --git a/src/leveldb/db/log_reader.cc b/src/leveldb/db/log_reader.cc
index 4919216d04..e44b66c85b 100644
--- a/src/leveldb/db/log_reader.cc
+++ b/src/leveldb/db/log_reader.cc
@@ -167,14 +167,14 @@ uint64_t Reader::LastRecordOffset() {
return last_record_offset_;
}
-void Reader::ReportCorruption(size_t bytes, const char* reason) {
+void Reader::ReportCorruption(uint64_t bytes, const char* reason) {
ReportDrop(bytes, Status::Corruption(reason));
}
-void Reader::ReportDrop(size_t bytes, const Status& reason) {
+void Reader::ReportDrop(uint64_t bytes, const Status& reason) {
if (reporter_ != NULL &&
end_of_buffer_offset_ - buffer_.size() - bytes >= initial_offset_) {
- reporter_->Corruption(bytes, reason);
+ reporter_->Corruption(static_cast<size_t>(bytes), reason);
}
}
diff --git a/src/leveldb/db/log_reader.h b/src/leveldb/db/log_reader.h
index 82d4bee68d..6aff791716 100644
--- a/src/leveldb/db/log_reader.h
+++ b/src/leveldb/db/log_reader.h
@@ -94,8 +94,8 @@ class Reader {
// Reports dropped bytes to the reporter.
// buffer_ must be updated to remove the dropped bytes prior to invocation.
- void ReportCorruption(size_t bytes, const char* reason);
- void ReportDrop(size_t bytes, const Status& reason);
+ void ReportCorruption(uint64_t bytes, const char* reason);
+ void ReportDrop(uint64_t bytes, const Status& reason);
// No copying allowed
Reader(const Reader&);
diff --git a/src/leveldb/db/log_test.cc b/src/leveldb/db/log_test.cc
index 91d3caafc3..dcf0562652 100644
--- a/src/leveldb/db/log_test.cc
+++ b/src/leveldb/db/log_test.cc
@@ -463,7 +463,7 @@ TEST(LogTest, ErrorJoinsRecords) {
ASSERT_EQ("correct", Read());
ASSERT_EQ("EOF", Read());
- const int dropped = DroppedBytes();
+ const size_t dropped = DroppedBytes();
ASSERT_LE(dropped, 2*kBlockSize + 100);
ASSERT_GE(dropped, 2*kBlockSize);
}
diff --git a/src/leveldb/db/repair.cc b/src/leveldb/db/repair.cc
index 7727fafc58..4cd4bb047f 100644
--- a/src/leveldb/db/repair.cc
+++ b/src/leveldb/db/repair.cc
@@ -186,7 +186,7 @@ class Repairer {
reporter.env = env_;
reporter.info_log = options_.info_log;
reporter.lognum = log;
- // We intentially make log::Reader do checksumming so that
+ // We intentionally make log::Reader do checksumming so that
// corruptions cause entire commits to be skipped instead of
// propagating bad information (like overly large sequence
// numbers).
diff --git a/src/leveldb/db/skiplist.h b/src/leveldb/db/skiplist.h
index af85be6d01..ed8b092203 100644
--- a/src/leveldb/db/skiplist.h
+++ b/src/leveldb/db/skiplist.h
@@ -1,3 +1,6 @@
+#ifndef STORAGE_LEVELDB_DB_SKIPLIST_H_
+#define STORAGE_LEVELDB_DB_SKIPLIST_H_
+
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
@@ -377,3 +380,5 @@ bool SkipList<Key,Comparator>::Contains(const Key& key) const {
}
} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_DB_SKIPLIST_H_
diff --git a/src/leveldb/db/write_batch_internal.h b/src/leveldb/db/write_batch_internal.h
index 4423a7f318..310a3c8912 100644
--- a/src/leveldb/db/write_batch_internal.h
+++ b/src/leveldb/db/write_batch_internal.h
@@ -21,10 +21,10 @@ class WriteBatchInternal {
// Set the count for the number of entries in the batch.
static void SetCount(WriteBatch* batch, int n);
- // Return the seqeunce number for the start of this batch.
+ // Return the sequence number for the start of this batch.
static SequenceNumber Sequence(const WriteBatch* batch);
- // Store the specified number as the seqeunce number for the start of
+ // Store the specified number as the sequence number for the start of
// this batch.
static void SetSequence(WriteBatch* batch, SequenceNumber seq);
diff --git a/src/leveldb/doc/bench/db_bench_tree_db.cc b/src/leveldb/doc/bench/db_bench_tree_db.cc
index ed86f031c2..4ca381f11f 100644
--- a/src/leveldb/doc/bench/db_bench_tree_db.cc
+++ b/src/leveldb/doc/bench/db_bench_tree_db.cc
@@ -338,7 +338,7 @@ class Benchmark {
bool write_sync = false;
if (name == Slice("fillseq")) {
Write(write_sync, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1);
-
+ DBSynchronize(db_);
} else if (name == Slice("fillrandom")) {
Write(write_sync, RANDOM, FRESH, num_, FLAGS_value_size, 1);
DBSynchronize(db_);
diff --git a/src/leveldb/doc/impl.html b/src/leveldb/doc/impl.html
index 28817fe0da..6a468be095 100644
--- a/src/leveldb/doc/impl.html
+++ b/src/leveldb/doc/impl.html
@@ -111,7 +111,7 @@ A compaction merges the contents of the picked files to produce a
sequence of level-(L+1) files. We switch to producing a new
level-(L+1) file after the current output file has reached the target
file size (2MB). We also switch to a new output file when the key
-range of the current output file has grown enough to overlap more then
+range of the current output file has grown enough to overlap more than
ten level-(L+2) files. This last rule ensures that a later compaction
of a level-(L+1) file will not pick up too much data from level-(L+2).
@@ -151,7 +151,7 @@ compaction cost will be approximately 0.5 second.
If we throttle the background writing to something small, say 10% of
the full 100MB/s speed, a compaction may take up to 5 seconds. If the
user is writing at 10MB/s, we might build up lots of level-0 files
-(~50 to hold the 5*10MB). This may signficantly increase the cost of
+(~50 to hold the 5*10MB). This may significantly increase the cost of
reads due to the overhead of merging more files together on every
read.
diff --git a/src/leveldb/doc/log_format.txt b/src/leveldb/doc/log_format.txt
index 5228f624de..4cca5ef6ea 100644
--- a/src/leveldb/doc/log_format.txt
+++ b/src/leveldb/doc/log_format.txt
@@ -11,7 +11,7 @@ Each block consists of a sequence of records:
A record never starts within the last six bytes of a block (since it
won't fit). Any leftover bytes here form the trailer, which must
-consist entirely of zero bytes and must be skipped by readers.
+consist entirely of zero bytes and must be skipped by readers.
Aside: if exactly seven bytes are left in the current block, and a new
non-zero length record is added, the writer must emit a FIRST record
@@ -33,8 +33,8 @@ The FULL record contains the contents of an entire user record.
FIRST, MIDDLE, LAST are types used for user records that have been
split into multiple fragments (typically because of block boundaries).
FIRST is the type of the first fragment of a user record, LAST is the
-type of the last fragment of a user record, and MID is the type of all
-interior fragments of a user record.
+type of the last fragment of a user record, and MIDDLE is the type of
+all interior fragments of a user record.
Example: consider a sequence of user records:
A: length 1000
diff --git a/src/leveldb/helpers/memenv/memenv.cc b/src/leveldb/helpers/memenv/memenv.cc
index 5879de1214..43ef2e0729 100644
--- a/src/leveldb/helpers/memenv/memenv.cc
+++ b/src/leveldb/helpers/memenv/memenv.cc
@@ -55,14 +55,15 @@ class FileState {
}
const uint64_t available = size_ - offset;
if (n > available) {
- n = available;
+ n = static_cast<size_t>(available);
}
if (n == 0) {
*result = Slice();
return Status::OK();
}
- size_t block = offset / kBlockSize;
+ assert(offset / kBlockSize <= SIZE_MAX);
+ size_t block = static_cast<size_t>(offset / kBlockSize);
size_t block_offset = offset % kBlockSize;
if (n <= kBlockSize - block_offset) {
@@ -167,7 +168,7 @@ class SequentialFileImpl : public SequentialFile {
if (pos_ > file_->Size()) {
return Status::IOError("pos_ > file_->Size()");
}
- const size_t available = file_->Size() - pos_;
+ const uint64_t available = file_->Size() - pos_;
if (n > available) {
n = available;
}
@@ -177,7 +178,7 @@ class SequentialFileImpl : public SequentialFile {
private:
FileState* file_;
- size_t pos_;
+ uint64_t pos_;
};
class RandomAccessFileImpl : public RandomAccessFile {
diff --git a/src/leveldb/include/leveldb/cache.h b/src/leveldb/include/leveldb/cache.h
index 5e3b47637d..1a201e5e0a 100644
--- a/src/leveldb/include/leveldb/cache.h
+++ b/src/leveldb/include/leveldb/cache.h
@@ -96,4 +96,4 @@ class Cache {
} // namespace leveldb
-#endif // STORAGE_LEVELDB_UTIL_CACHE_H_
+#endif // STORAGE_LEVELDB_INCLUDE_CACHE_H_
diff --git a/src/leveldb/include/leveldb/db.h b/src/leveldb/include/leveldb/db.h
index 40851b2aa8..4c169bf22e 100644
--- a/src/leveldb/include/leveldb/db.h
+++ b/src/leveldb/include/leveldb/db.h
@@ -14,7 +14,7 @@ namespace leveldb {
// Update Makefile if you change these
static const int kMajorVersion = 1;
-static const int kMinorVersion = 17;
+static const int kMinorVersion = 18;
struct Options;
struct ReadOptions;
diff --git a/src/leveldb/include/leveldb/dumpfile.h b/src/leveldb/include/leveldb/dumpfile.h
new file mode 100644
index 0000000000..3f97fda16b
--- /dev/null
+++ b/src/leveldb/include/leveldb/dumpfile.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2014 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_
+#define STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_
+
+#include <string>
+#include "leveldb/env.h"
+#include "leveldb/status.h"
+
+namespace leveldb {
+
+// Dump the contents of the file named by fname in text format to
+// *dst. Makes a sequence of dst->Append() calls; each call is passed
+// the newline-terminated text corresponding to a single item found
+// in the file.
+//
+// Returns a non-OK result if fname does not name a leveldb storage
+// file, or if the file cannot be read.
+Status DumpFile(Env* env, const std::string& fname, WritableFile* dst);
+
+} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_
diff --git a/src/leveldb/include/leveldb/env.h b/src/leveldb/include/leveldb/env.h
index b2072d02c1..f709514da6 100644
--- a/src/leveldb/include/leveldb/env.h
+++ b/src/leveldb/include/leveldb/env.h
@@ -142,7 +142,7 @@ class Env {
// useful for computing deltas of time.
virtual uint64_t NowMicros() = 0;
- // Sleep/delay the thread for the perscribed number of micro-seconds.
+ // Sleep/delay the thread for the prescribed number of micro-seconds.
virtual void SleepForMicroseconds(int micros) = 0;
private:
diff --git a/src/leveldb/include/leveldb/iterator.h b/src/leveldb/include/leveldb/iterator.h
index ad543eb46c..76aced04bd 100644
--- a/src/leveldb/include/leveldb/iterator.h
+++ b/src/leveldb/include/leveldb/iterator.h
@@ -61,7 +61,7 @@ class Iterator {
// Return the value for the current entry. The underlying storage for
// the returned slice is valid only until the next modification of
// the iterator.
- // REQUIRES: !AtEnd() && !AtStart()
+ // REQUIRES: Valid()
virtual Slice value() const = 0;
// If an error has occurred, return it. Else return an ok status.
diff --git a/src/leveldb/include/leveldb/options.h b/src/leveldb/include/leveldb/options.h
index fdda718d30..7c9b973454 100644
--- a/src/leveldb/include/leveldb/options.h
+++ b/src/leveldb/include/leveldb/options.h
@@ -153,7 +153,7 @@ struct ReadOptions {
// If "snapshot" is non-NULL, read as of the supplied snapshot
// (which must belong to the DB that is being read and which must
- // not have been released). If "snapshot" is NULL, use an impliicit
+ // not have been released). If "snapshot" is NULL, use an implicit
// snapshot of the state at the beginning of this read operation.
// Default: NULL
const Snapshot* snapshot;
diff --git a/src/leveldb/port/atomic_pointer.h b/src/leveldb/port/atomic_pointer.h
index a9866b2302..9bf091f757 100644
--- a/src/leveldb/port/atomic_pointer.h
+++ b/src/leveldb/port/atomic_pointer.h
@@ -5,14 +5,13 @@
// AtomicPointer provides storage for a lock-free pointer.
// Platform-dependent implementation of AtomicPointer:
// - If the platform provides a cheap barrier, we use it with raw pointers
-// - If cstdatomic is present (on newer versions of gcc, it is), we use
-// a cstdatomic-based AtomicPointer. However we prefer the memory
+// - If <atomic> is present (on newer versions of gcc, it is), we use
+// a <atomic>-based AtomicPointer. However we prefer the memory
// barrier based version, because at least on a gcc 4.4 32-bit build
-// on linux, we have encountered a buggy <cstdatomic>
-// implementation. Also, some <cstdatomic> implementations are much
-// slower than a memory-barrier based implementation (~16ns for
-// <cstdatomic> based acquire-load vs. ~1ns for a barrier based
-// acquire-load).
+// on linux, we have encountered a buggy <atomic> implementation.
+// Also, some <atomic> implementations are much slower than a memory-barrier
+// based implementation (~16ns for <atomic> based acquire-load vs. ~1ns for
+// a barrier based acquire-load).
// This code is based on atomicops-internals-* in Google's perftools:
// http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase
@@ -20,8 +19,8 @@
#define PORT_ATOMIC_POINTER_H_
#include <stdint.h>
-#ifdef LEVELDB_CSTDATOMIC_PRESENT
-#include <cstdatomic>
+#ifdef LEVELDB_ATOMIC_PRESENT
+#include <atomic>
#endif
#ifdef OS_WIN
#include <windows.h>
@@ -126,7 +125,7 @@ class AtomicPointer {
};
// AtomicPointer based on <cstdatomic>
-#elif defined(LEVELDB_CSTDATOMIC_PRESENT)
+#elif defined(LEVELDB_ATOMIC_PRESENT)
class AtomicPointer {
private:
std::atomic<void*> rep_;
@@ -207,7 +206,7 @@ class AtomicPointer {
inline void NoBarrier_Store(void* v) { rep_ = v; }
};
-// We have neither MemoryBarrier(), nor <cstdatomic>
+// We have neither MemoryBarrier(), nor <atomic>
#else
#error Please implement AtomicPointer for this platform.
diff --git a/src/leveldb/port/port_posix.h b/src/leveldb/port/port_posix.h
index 21c845e211..ccca9939d3 100644
--- a/src/leveldb/port/port_posix.h
+++ b/src/leveldb/port/port_posix.h
@@ -21,14 +21,11 @@
#else
#define PLATFORM_IS_LITTLE_ENDIAN false
#endif
-#elif defined(OS_FREEBSD)
+#elif defined(OS_FREEBSD) || defined(OS_OPENBSD) ||\
+ defined(OS_NETBSD) || defined(OS_DRAGONFLYBSD)
#include <sys/types.h>
#include <sys/endian.h>
#define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN)
-#elif defined(OS_OPENBSD) || defined(OS_NETBSD) ||\
- defined(OS_DRAGONFLYBSD)
- #include <sys/types.h>
- #include <sys/endian.h>
#elif defined(OS_HPUX)
#define PLATFORM_IS_LITTLE_ENDIAN false
#elif defined(OS_ANDROID)
@@ -55,7 +52,7 @@
#if defined(OS_MACOSX) || defined(OS_SOLARIS) || defined(OS_FREEBSD) ||\
defined(OS_NETBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) ||\
- defined(OS_ANDROID) || defined(OS_HPUX)
+ defined(OS_ANDROID) || defined(OS_HPUX) || defined(CYGWIN)
// Use fread/fwrite/fflush on platforms without _unlocked variants
#define fread_unlocked fread
#define fwrite_unlocked fwrite
diff --git a/src/leveldb/port/thread_annotations.h b/src/leveldb/port/thread_annotations.h
index 6f9b6a7924..9470ef587c 100644
--- a/src/leveldb/port/thread_annotations.h
+++ b/src/leveldb/port/thread_annotations.h
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H
+#ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
+#define STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
// Some environments provide custom macros to aid in static thread-safety
// analysis. Provide empty definitions of such macros unless they are already
@@ -56,4 +57,4 @@
#define NO_THREAD_SAFETY_ANALYSIS
#endif
-#endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H
+#endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
diff --git a/src/leveldb/table/block.cc b/src/leveldb/table/block.cc
index 79ea9d9ee5..43e402c9c0 100644
--- a/src/leveldb/table/block.cc
+++ b/src/leveldb/table/block.cc
@@ -46,7 +46,7 @@ Block::~Block() {
// Helper routine: decode the next block entry starting at "p",
// storing the number of shared key bytes, non_shared key bytes,
// and the length of the value in "*shared", "*non_shared", and
-// "*value_length", respectively. Will not derefence past "limit".
+// "*value_length", respectively. Will not dereference past "limit".
//
// If any errors are detected, returns NULL. Otherwise, returns a
// pointer to the key delta (just past the three decoded values).
diff --git a/src/leveldb/table/block_builder.h b/src/leveldb/table/block_builder.h
index 5b545bd1af..4fbcb33972 100644
--- a/src/leveldb/table/block_builder.h
+++ b/src/leveldb/table/block_builder.h
@@ -21,7 +21,7 @@ class BlockBuilder {
// Reset the contents as if the BlockBuilder was just constructed.
void Reset();
- // REQUIRES: Finish() has not been callled since the last call to Reset().
+ // REQUIRES: Finish() has not been called since the last call to Reset().
// REQUIRES: key is larger than any previously added key
void Add(const Slice& key, const Slice& value);
diff --git a/src/leveldb/table/format.cc b/src/leveldb/table/format.cc
index cda1decdf3..aa63144c9e 100644
--- a/src/leveldb/table/format.cc
+++ b/src/leveldb/table/format.cc
@@ -48,7 +48,7 @@ Status Footer::DecodeFrom(Slice* input) {
const uint64_t magic = ((static_cast<uint64_t>(magic_hi) << 32) |
(static_cast<uint64_t>(magic_lo)));
if (magic != kTableMagicNumber) {
- return Status::InvalidArgument("not an sstable (bad magic number)");
+ return Status::Corruption("not an sstable (bad magic number)");
}
Status result = metaindex_handle_.DecodeFrom(input);
diff --git a/src/leveldb/table/table.cc b/src/leveldb/table/table.cc
index 71c1756e5f..dff8a82590 100644
--- a/src/leveldb/table/table.cc
+++ b/src/leveldb/table/table.cc
@@ -41,7 +41,7 @@ Status Table::Open(const Options& options,
Table** table) {
*table = NULL;
if (size < Footer::kEncodedLength) {
- return Status::InvalidArgument("file is too short to be an sstable");
+ return Status::Corruption("file is too short to be an sstable");
}
char footer_space[Footer::kEncodedLength];
@@ -58,7 +58,11 @@ Status Table::Open(const Options& options,
BlockContents contents;
Block* index_block = NULL;
if (s.ok()) {
- s = ReadBlock(file, ReadOptions(), footer.index_handle(), &contents);
+ ReadOptions opt;
+ if (options.paranoid_checks) {
+ opt.verify_checksums = true;
+ }
+ s = ReadBlock(file, opt, footer.index_handle(), &contents);
if (s.ok()) {
index_block = new Block(contents);
}
@@ -92,6 +96,9 @@ void Table::ReadMeta(const Footer& footer) {
// TODO(sanjay): Skip this if footer.metaindex_handle() size indicates
// it is an empty block.
ReadOptions opt;
+ if (rep_->options.paranoid_checks) {
+ opt.verify_checksums = true;
+ }
BlockContents contents;
if (!ReadBlock(rep_->file, opt, footer.metaindex_handle(), &contents).ok()) {
// Do not propagate errors since meta info is not needed for operation
@@ -120,6 +127,9 @@ void Table::ReadFilter(const Slice& filter_handle_value) {
// We might want to unify with ReadBlock() if we start
// requiring checksum verification in Table::Open.
ReadOptions opt;
+ if (rep_->options.paranoid_checks) {
+ opt.verify_checksums = true;
+ }
BlockContents block;
if (!ReadBlock(rep_->file, opt, filter_handle, &block).ok()) {
return;
diff --git a/src/leveldb/util/bloom.cc b/src/leveldb/util/bloom.cc
index d7941cd21f..a27a2ace28 100644
--- a/src/leveldb/util/bloom.cc
+++ b/src/leveldb/util/bloom.cc
@@ -29,7 +29,7 @@ class BloomFilterPolicy : public FilterPolicy {
}
virtual const char* Name() const {
- return "leveldb.BuiltinBloomFilter";
+ return "leveldb.BuiltinBloomFilter2";
}
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
diff --git a/src/leveldb/util/env_posix.cc b/src/leveldb/util/env_posix.cc
index 93eadb1a4f..ba2667864a 100644
--- a/src/leveldb/util/env_posix.cc
+++ b/src/leveldb/util/env_posix.cc
@@ -3,8 +3,6 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#if !defined(LEVELDB_PLATFORM_WINDOWS)
-#include <deque>
-#include <set>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
@@ -18,9 +16,8 @@
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
-#if defined(LEVELDB_PLATFORM_ANDROID)
-#include <sys/stat.h>
-#endif
+#include <deque>
+#include <set>
#include "leveldb/env.h"
#include "leveldb/slice.h"
#include "port/port.h"
@@ -296,7 +293,8 @@ class PosixEnv : public Env {
public:
PosixEnv();
virtual ~PosixEnv() {
- fprintf(stderr, "Destroying Env::Default()\n");
+ char msg[] = "Destroying Env::Default()\n";
+ fwrite(msg, 1, sizeof(msg), stderr);
abort();
}
diff --git a/src/leveldb/util/hash.cc b/src/leveldb/util/hash.cc
index 07cf022060..ed439ce7a2 100644
--- a/src/leveldb/util/hash.cc
+++ b/src/leveldb/util/hash.cc
@@ -34,13 +34,13 @@ uint32_t Hash(const char* data, size_t n, uint32_t seed) {
// Pick up remaining bytes
switch (limit - data) {
case 3:
- h += data[2] << 16;
+ h += static_cast<unsigned char>(data[2]) << 16;
FALLTHROUGH_INTENDED;
case 2:
- h += data[1] << 8;
+ h += static_cast<unsigned char>(data[1]) << 8;
FALLTHROUGH_INTENDED;
case 1:
- h += data[0];
+ h += static_cast<unsigned char>(data[0]);
h *= m;
h ^= (h >> r);
break;
diff --git a/src/leveldb/util/hash_test.cc b/src/leveldb/util/hash_test.cc
new file mode 100644
index 0000000000..eaa1c92c23
--- /dev/null
+++ b/src/leveldb/util/hash_test.cc
@@ -0,0 +1,54 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "util/hash.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+class HASH { };
+
+TEST(HASH, SignedUnsignedIssue) {
+ const unsigned char data1[1] = {0x62};
+ const unsigned char data2[2] = {0xc3, 0x97};
+ const unsigned char data3[3] = {0xe2, 0x99, 0xa5};
+ const unsigned char data4[4] = {0xe1, 0x80, 0xb9, 0x32};
+ const unsigned char data5[48] = {
+ 0x01, 0xc0, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x14, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14,
+ 0x00, 0x00, 0x00, 0x18,
+ 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ };
+
+ ASSERT_EQ(Hash(0, 0, 0xbc9f1d34), 0xbc9f1d34);
+ ASSERT_EQ(
+ Hash(reinterpret_cast<const char*>(data1), sizeof(data1), 0xbc9f1d34),
+ 0xef1345c4);
+ ASSERT_EQ(
+ Hash(reinterpret_cast<const char*>(data2), sizeof(data2), 0xbc9f1d34),
+ 0x5b663814);
+ ASSERT_EQ(
+ Hash(reinterpret_cast<const char*>(data3), sizeof(data3), 0xbc9f1d34),
+ 0x323c078f);
+ ASSERT_EQ(
+ Hash(reinterpret_cast<const char*>(data4), sizeof(data4), 0xbc9f1d34),
+ 0xed21633a);
+ ASSERT_EQ(
+ Hash(reinterpret_cast<const char*>(data5), sizeof(data5), 0x12345678),
+ 0xf333dabb);
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) {
+ return leveldb::test::RunAllTests();
+}
diff --git a/src/leveldb/util/logging.cc b/src/leveldb/util/logging.cc
index 22cf278512..ca6b324403 100644
--- a/src/leveldb/util/logging.cc
+++ b/src/leveldb/util/logging.cc
@@ -45,15 +45,6 @@ std::string EscapeString(const Slice& value) {
return r;
}
-bool ConsumeChar(Slice* in, char c) {
- if (!in->empty() && (*in)[0] == c) {
- in->remove_prefix(1);
- return true;
- } else {
- return false;
- }
-}
-
bool ConsumeDecimalNumber(Slice* in, uint64_t* val) {
uint64_t v = 0;
int digits = 0;
diff --git a/src/leveldb/util/logging.h b/src/leveldb/util/logging.h
index b0c5da813e..1b450d2480 100644
--- a/src/leveldb/util/logging.h
+++ b/src/leveldb/util/logging.h
@@ -32,10 +32,6 @@ extern std::string NumberToString(uint64_t num);
// Escapes any non-printable characters found in "value".
extern std::string EscapeString(const Slice& value);
-// If *in starts with "c", advances *in past the first character and
-// returns true. Otherwise, returns false.
-extern bool ConsumeChar(Slice* in, char c);
-
// Parse a human-readable number from "*in" into *value. On success,
// advances "*in" past the consumed number and sets "*val" to the
// numeric value. Otherwise, returns false and leaves *in in an
diff --git a/src/main.cpp b/src/main.cpp
index 630891bd3f..0cfe90beda 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -106,7 +106,7 @@ namespace {
multimap<CBlockIndex*, CBlockIndex*> mapBlocksUnlinked;
CCriticalSection cs_LastBlockFile;
- CBlockFileInfo infoLastBlockFile;
+ std::vector<CBlockFileInfo> vinfoBlockFile;
int nLastBlockFile = 0;
// Every received block is assigned a unique and increasing identifier, so we
@@ -1051,7 +1051,7 @@ bool GetTransaction(const uint256 &hash, CTransaction &txOut, uint256 &hashBlock
CBlockHeader header;
try {
file >> header;
- fseek(file, postx.nTxOffset, SEEK_CUR);
+ fseek(file.Get(), postx.nTxOffset, SEEK_CUR);
file >> txOut;
} catch (std::exception &e) {
return error("%s : Deserialize or I/O error - %s", __func__, e.what());
@@ -1106,7 +1106,7 @@ bool WriteBlockToDisk(CBlock& block, CDiskBlockPos& pos)
{
// Open history file to append
CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION);
- if (!fileout)
+ if (fileout.IsNull())
return error("WriteBlockToDisk : OpenBlockFile failed");
// Write index header
@@ -1114,16 +1114,16 @@ bool WriteBlockToDisk(CBlock& block, CDiskBlockPos& pos)
fileout << FLATDATA(Params().MessageStart()) << nSize;
// Write block
- long fileOutPos = ftell(fileout);
+ long fileOutPos = ftell(fileout.Get());
if (fileOutPos < 0)
return error("WriteBlockToDisk : ftell failed");
pos.nPos = (unsigned int)fileOutPos;
fileout << block;
// Flush stdio buffers and commit to disk before returning
- fflush(fileout);
+ fflush(fileout.Get());
if (!IsInitialBlockDownload())
- FileCommit(fileout);
+ FileCommit(fileout.Get());
return true;
}
@@ -1134,7 +1134,7 @@ bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos)
// Open history file to read
CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION);
- if (!filein)
+ if (filein.IsNull())
return error("ReadBlockFromDisk : OpenBlockFile failed");
// Read block
@@ -1547,7 +1547,7 @@ void static FlushBlockFile(bool fFinalize = false)
FILE *fileOld = OpenBlockFile(posOld);
if (fileOld) {
if (fFinalize)
- TruncateFile(fileOld, infoLastBlockFile.nSize);
+ TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize);
FileCommit(fileOld);
fclose(fileOld);
}
@@ -1555,7 +1555,7 @@ void static FlushBlockFile(bool fFinalize = false)
fileOld = OpenUndoFile(posOld);
if (fileOld) {
if (fFinalize)
- TruncateFile(fileOld, infoLastBlockFile.nUndoSize);
+ TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize);
FileCommit(fileOld);
fclose(fileOld);
}
@@ -2163,32 +2163,32 @@ bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAdd
LOCK(cs_LastBlockFile);
- if (fKnown) {
- if (nLastBlockFile != pos.nFile) {
- nLastBlockFile = pos.nFile;
- infoLastBlockFile.SetNull();
- pblocktree->ReadBlockFileInfo(nLastBlockFile, infoLastBlockFile);
- fUpdatedLast = true;
- }
- } else {
- while (infoLastBlockFile.nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
- LogPrintf("Leaving block file %i: %s\n", nLastBlockFile, infoLastBlockFile.ToString());
+ unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
+ if (vinfoBlockFile.size() <= nFile) {
+ vinfoBlockFile.resize(nFile + 1);
+ }
+
+ if (!fKnown) {
+ while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
+ LogPrintf("Leaving block file %i: %s\n", nFile, vinfoBlockFile[nFile].ToString());
FlushBlockFile(true);
- nLastBlockFile++;
- infoLastBlockFile.SetNull();
- pblocktree->ReadBlockFileInfo(nLastBlockFile, infoLastBlockFile); // check whether data for the new file somehow already exist; can fail just fine
+ nFile++;
+ if (vinfoBlockFile.size() <= nFile) {
+ vinfoBlockFile.resize(nFile + 1);
+ }
fUpdatedLast = true;
}
- pos.nFile = nLastBlockFile;
- pos.nPos = infoLastBlockFile.nSize;
+ pos.nFile = nFile;
+ pos.nPos = vinfoBlockFile[nFile].nSize;
}
- infoLastBlockFile.nSize += nAddSize;
- infoLastBlockFile.AddBlock(nHeight, nTime);
+ nLastBlockFile = nFile;
+ vinfoBlockFile[nFile].nSize += nAddSize;
+ vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
if (!fKnown) {
unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
- unsigned int nNewChunks = (infoLastBlockFile.nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
+ unsigned int nNewChunks = (vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
if (nNewChunks > nOldChunks) {
if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) {
FILE *file = OpenBlockFile(pos);
@@ -2203,7 +2203,7 @@ bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAdd
}
}
- if (!pblocktree->WriteBlockFileInfo(nLastBlockFile, infoLastBlockFile))
+ if (!pblocktree->WriteBlockFileInfo(nLastBlockFile, vinfoBlockFile[nFile]))
return state.Abort("Failed to write file info");
if (fUpdatedLast)
pblocktree->WriteLastBlockFile(nLastBlockFile);
@@ -2218,19 +2218,10 @@ bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigne
LOCK(cs_LastBlockFile);
unsigned int nNewSize;
- if (nFile == nLastBlockFile) {
- pos.nPos = infoLastBlockFile.nUndoSize;
- nNewSize = (infoLastBlockFile.nUndoSize += nAddSize);
- if (!pblocktree->WriteBlockFileInfo(nLastBlockFile, infoLastBlockFile))
- return state.Abort("Failed to write block info");
- } else {
- CBlockFileInfo info;
- if (!pblocktree->ReadBlockFileInfo(nFile, info))
- return state.Abort("Failed to read block info");
- pos.nPos = info.nUndoSize;
- nNewSize = (info.nUndoSize += nAddSize);
- if (!pblocktree->WriteBlockFileInfo(nFile, info))
- return state.Abort("Failed to write block info");
+ pos.nPos = vinfoBlockFile[nFile].nUndoSize;
+ nNewSize = vinfoBlockFile[nFile].nUndoSize += nAddSize;
+ if (!pblocktree->WriteBlockFileInfo(nLastBlockFile, vinfoBlockFile[nLastBlockFile])) {
+ return state.Abort("Failed to write block info");
}
unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
@@ -2826,9 +2817,20 @@ bool static LoadBlockIndexDB()
// Load block file info
pblocktree->ReadLastBlockFile(nLastBlockFile);
- LogPrintf("LoadBlockIndexDB(): last block file = %i\n", nLastBlockFile);
- if (pblocktree->ReadBlockFileInfo(nLastBlockFile, infoLastBlockFile))
- LogPrintf("LoadBlockIndexDB(): last block file info: %s\n", infoLastBlockFile.ToString());
+ vinfoBlockFile.resize(nLastBlockFile + 1);
+ LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
+ for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
+ pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
+ }
+ LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
+ for (int nFile = nLastBlockFile + 1; true; nFile++) {
+ CBlockFileInfo info;
+ if (pblocktree->ReadBlockFileInfo(nFile, info)) {
+ vinfoBlockFile.push_back(info);
+ } else {
+ break;
+ }
+ }
// Check presence of blk files
LogPrintf("Checking all blk files are present...\n");
@@ -2843,7 +2845,7 @@ bool static LoadBlockIndexDB()
for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
{
CDiskBlockPos pos(*it, 0);
- if (!CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION)) {
+ if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
return false;
}
}
@@ -4548,7 +4550,7 @@ bool CBlockUndo::WriteToDisk(CDiskBlockPos &pos, const uint256 &hashBlock)
{
// Open history file to append
CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
- if (!fileout)
+ if (fileout.IsNull())
return error("CBlockUndo::WriteToDisk : OpenUndoFile failed");
// Write index header
@@ -4556,7 +4558,7 @@ bool CBlockUndo::WriteToDisk(CDiskBlockPos &pos, const uint256 &hashBlock)
fileout << FLATDATA(Params().MessageStart()) << nSize;
// Write undo data
- long fileOutPos = ftell(fileout);
+ long fileOutPos = ftell(fileout.Get());
if (fileOutPos < 0)
return error("CBlockUndo::WriteToDisk : ftell failed");
pos.nPos = (unsigned int)fileOutPos;
@@ -4569,9 +4571,9 @@ bool CBlockUndo::WriteToDisk(CDiskBlockPos &pos, const uint256 &hashBlock)
fileout << hasher.GetHash();
// Flush stdio buffers and commit to disk before returning
- fflush(fileout);
+ fflush(fileout.Get());
if (!IsInitialBlockDownload())
- FileCommit(fileout);
+ FileCommit(fileout.Get());
return true;
}
@@ -4580,7 +4582,7 @@ bool CBlockUndo::ReadFromDisk(const CDiskBlockPos &pos, const uint256 &hashBlock
{
// Open history file to read
CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
- if (!filein)
+ if (filein.IsNull())
return error("CBlockUndo::ReadFromDisk : OpenBlockFile failed");
// Read block
diff --git a/src/main.h b/src/main.h
index c0c1fb2707..1ef51918c5 100644
--- a/src/main.h
+++ b/src/main.h
@@ -20,6 +20,7 @@
#include "script/sigcache.h"
#include "script/standard.h"
#include "sync.h"
+#include "tinyformat.h"
#include "txmempool.h"
#include "uint256.h"
diff --git a/src/miner.cpp b/src/miner.cpp
index c2762bf44e..eefccfd641 100644
--- a/src/miner.cpp
+++ b/src/miner.cpp
@@ -17,6 +17,7 @@
#endif
#include <boost/thread.hpp>
+#include <boost/tuple/tuple.hpp>
using namespace std;
@@ -398,7 +399,7 @@ CBlockTemplate* CreateNewBlockWithKey(CReserveKey& reservekey)
if (!reservekey.GetReservedKey(pubkey))
return NULL;
- CScript scriptPubKey = CScript() << pubkey << OP_CHECKSIG;
+ CScript scriptPubKey = CScript() << ToByteVector(pubkey) << OP_CHECKSIG;
return CreateNewBlock(scriptPubKey);
}
diff --git a/src/net.cpp b/src/net.cpp
index 50b435cf14..6cf64f51c3 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -1929,7 +1929,7 @@ bool CAddrDB::Write(const CAddrMan& addr)
boost::filesystem::path pathTmp = GetDataDir() / tmpfn;
FILE *file = fopen(pathTmp.string().c_str(), "wb");
CAutoFile fileout(file, SER_DISK, CLIENT_VERSION);
- if (!fileout)
+ if (fileout.IsNull())
return error("%s : Failed to open file %s", __func__, pathTmp.string());
// Write and commit header, data
@@ -1939,7 +1939,7 @@ bool CAddrDB::Write(const CAddrMan& addr)
catch (std::exception &e) {
return error("%s : Serialize or I/O error - %s", __func__, e.what());
}
- FileCommit(fileout);
+ FileCommit(fileout.Get());
fileout.fclose();
// replace existing peers.dat, if any, with new peers.dat.XXXX
@@ -1954,7 +1954,7 @@ bool CAddrDB::Read(CAddrMan& addr)
// open input file, and associate with CAutoFile
FILE *file = fopen(pathAddr.string().c_str(), "rb");
CAutoFile filein(file, SER_DISK, CLIENT_VERSION);
- if (!filein)
+ if (filein.IsNull())
return error("%s : Failed to open file %s", __func__, pathAddr.string());
// use file size to size memory buffer
diff --git a/src/protocol.cpp b/src/protocol.cpp
index 0e28f3abbd..72fdd753a8 100644
--- a/src/protocol.cpp
+++ b/src/protocol.cpp
@@ -7,6 +7,7 @@
#include "chainparams.h"
#include "util.h"
+#include "utilstrencodings.h"
#ifndef WIN32
# include <arpa/inet.h>
diff --git a/src/qt/bitcoinamountfield.h b/src/qt/bitcoinamountfield.h
index e52feeb46e..040a234177 100644
--- a/src/qt/bitcoinamountfield.h
+++ b/src/qt/bitcoinamountfield.h
@@ -21,7 +21,9 @@ class BitcoinAmountField: public QWidget
{
Q_OBJECT
- Q_PROPERTY(CAmount value READ value WRITE setValue NOTIFY valueChanged USER true)
+ // ugly hack: for some unknown reason CAmount (instead of qint64) does not work here as expected
+ // discussion: https://github.com/bitcoin/bitcoin/pull/5117
+ Q_PROPERTY(qint64 value READ value WRITE setValue NOTIFY valueChanged USER true)
public:
explicit BitcoinAmountField(QWidget *parent = 0);
diff --git a/src/qt/monitoreddatamapper.cpp b/src/qt/monitoreddatamapper.cpp
deleted file mode 100644
index 5931c53872..0000000000
--- a/src/qt/monitoreddatamapper.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2011-2013 The Bitcoin developers
-// Distributed under the MIT/X11 software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#include "monitoreddatamapper.h"
-
-#include <QMetaObject>
-#include <QMetaProperty>
-#include <QWidget>
-
-MonitoredDataMapper::MonitoredDataMapper(QObject *parent) :
- QDataWidgetMapper(parent)
-{
-}
-
-void MonitoredDataMapper::addMapping(QWidget *widget, int section)
-{
- QDataWidgetMapper::addMapping(widget, section);
- addChangeMonitor(widget);
-}
-
-void MonitoredDataMapper::addMapping(QWidget *widget, int section, const QByteArray &propertyName)
-{
- QDataWidgetMapper::addMapping(widget, section, propertyName);
- addChangeMonitor(widget);
-}
-
-void MonitoredDataMapper::addChangeMonitor(QWidget *widget)
-{
- // Watch user property of widget for changes, and connect
- // the signal to our viewModified signal.
- QMetaProperty prop = widget->metaObject()->userProperty();
- int signal = prop.notifySignalIndex();
- int method = this->metaObject()->indexOfMethod("viewModified()");
- if(signal != -1 && method != -1)
- {
- QMetaObject::connect(widget, signal, this, method);
- }
-}
diff --git a/src/qt/monitoreddatamapper.h b/src/qt/monitoreddatamapper.h
deleted file mode 100644
index b3237d3e09..0000000000
--- a/src/qt/monitoreddatamapper.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) 2011-2013 The Bitcoin developers
-// Distributed under the MIT/X11 software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#ifndef MONITOREDDATAMAPPER_H
-#define MONITOREDDATAMAPPER_H
-
-#include <QDataWidgetMapper>
-
-QT_BEGIN_NAMESPACE
-class QWidget;
-QT_END_NAMESPACE
-
-/** Data to Widget mapper that watches for edits and notifies listeners when a field is edited.
- This can be used, for example, to enable a commit/apply button in a configuration dialog.
- */
-class MonitoredDataMapper : public QDataWidgetMapper
-{
- Q_OBJECT
-
-public:
- explicit MonitoredDataMapper(QObject *parent=0);
-
- void addMapping(QWidget *widget, int section);
- void addMapping(QWidget *widget, int section, const QByteArray &propertyName);
-
-private:
- void addChangeMonitor(QWidget *widget);
-
-signals:
- void viewModified();
-};
-
-#endif // MONITOREDDATAMAPPER_H
diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp
index 279467129f..67be174d55 100644
--- a/src/qt/optionsdialog.cpp
+++ b/src/qt/optionsdialog.cpp
@@ -11,7 +11,6 @@
#include "bitcoinunits.h"
#include "guiutil.h"
-#include "monitoreddatamapper.h"
#include "optionsmodel.h"
#include "main.h" // for MAX_SCRIPTCHECK_THREADS
@@ -24,6 +23,7 @@
#include <boost/thread.hpp>
+#include <QDataWidgetMapper>
#include <QDir>
#include <QIntValidator>
#include <QLocale>
@@ -105,7 +105,7 @@ OptionsDialog::OptionsDialog(QWidget *parent) :
#endif
/* Widget-to-option mapper */
- mapper = new MonitoredDataMapper(this);
+ mapper = new QDataWidgetMapper(this);
mapper->setSubmitPolicy(QDataWidgetMapper::ManualSubmit);
mapper->setOrientation(Qt::Vertical);
diff --git a/src/qt/optionsdialog.h b/src/qt/optionsdialog.h
index 6b62069660..39c53f4391 100644
--- a/src/qt/optionsdialog.h
+++ b/src/qt/optionsdialog.h
@@ -7,7 +7,7 @@
#include <QDialog>
-class MonitoredDataMapper;
+class QDataWidgetMapper;
class OptionsModel;
class QValidatedLineEdit;
@@ -52,7 +52,7 @@ signals:
private:
Ui::OptionsDialog *ui;
OptionsModel *model;
- MonitoredDataMapper *mapper;
+ QDataWidgetMapper *mapper;
bool fProxyIpValid;
};
diff --git a/src/qt/test/paymentservertests.cpp b/src/qt/test/paymentservertests.cpp
index 5d7fe96285..84cab01c50 100644
--- a/src/qt/test/paymentservertests.cpp
+++ b/src/qt/test/paymentservertests.cpp
@@ -8,6 +8,7 @@
#include "paymentrequestdata.h"
#include "util.h"
+#include "utilstrencodings.h"
#include <openssl/x509.h>
#include <openssl/x509_vfy.h>
diff --git a/src/rpcblockchain.cpp b/src/rpcblockchain.cpp
index b0da1d6a5c..78f5569895 100644
--- a/src/rpcblockchain.cpp
+++ b/src/rpcblockchain.cpp
@@ -225,7 +225,7 @@ Value getblockhash(const Array& params, bool fHelp)
int nHeight = params[0].get_int();
if (nHeight < 0 || nHeight > chainActive.Height())
- throw runtime_error("Block number out of range.");
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Block height out of range");
CBlockIndex* pblockindex = chainActive[nHeight];
return pblockindex->GetBlockHash().GetHex();
diff --git a/src/rpcmisc.cpp b/src/rpcmisc.cpp
index 8be14b567c..92ed1c3e2b 100644
--- a/src/rpcmisc.cpp
+++ b/src/rpcmisc.cpp
@@ -292,7 +292,7 @@ Value createmultisig(const Array& params, bool fHelp)
// Construct using pay-to-script-hash:
CScript inner = _createmultisig_redeemScript(params);
- CScriptID innerID = inner.GetID();
+ CScriptID innerID(inner);
CBitcoinAddress address(innerID);
Object result;
diff --git a/src/rpcrawtransaction.cpp b/src/rpcrawtransaction.cpp
index 78372da685..fdfcb59eeb 100644
--- a/src/rpcrawtransaction.cpp
+++ b/src/rpcrawtransaction.cpp
@@ -480,7 +480,7 @@ Value decodescript(const Array& params, bool fHelp)
}
ScriptPubKeyToJSON(script, r, false);
- r.push_back(Pair("p2sh", CBitcoinAddress(script.GetID()).ToString()));
+ r.push_back(Pair("p2sh", CBitcoinAddress(CScriptID(script)).ToString()));
return r;
}
diff --git a/src/rpcwallet.cpp b/src/rpcwallet.cpp
index d11455e389..68bb4068b8 100644
--- a/src/rpcwallet.cpp
+++ b/src/rpcwallet.cpp
@@ -918,7 +918,7 @@ Value addmultisigaddress(const Array& params, bool fHelp)
// Construct using pay-to-script-hash:
CScript inner = _createmultisig_redeemScript(params);
- CScriptID innerID = inner.GetID();
+ CScriptID innerID(inner);
pwalletMain->AddCScript(inner);
pwalletMain->SetAddressBook(innerID, strAccount, "send");
diff --git a/src/script/compressor.cpp b/src/script/compressor.cpp
index 51a3cf6025..af1acf48db 100644
--- a/src/script/compressor.cpp
+++ b/src/script/compressor.cpp
@@ -5,6 +5,9 @@
#include "compressor.h"
+#include "key.h"
+#include "script/standard.h"
+
bool CScriptCompressor::IsToKeyID(CKeyID &hash) const
{
if (script.size() == 25 && script[0] == OP_DUP && script[1] == OP_HASH160
diff --git a/src/script/compressor.h b/src/script/compressor.h
index 53c6bf3ecc..154e0b2662 100644
--- a/src/script/compressor.h
+++ b/src/script/compressor.h
@@ -7,6 +7,11 @@
#define H_BITCOIN_SCRIPT_COMPRESSOR
#include "script/script.h"
+#include "serialize.h"
+
+class CKeyID;
+class CPubKey;
+class CScriptID;
/** Compact serializer for scripts.
*
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index ae66217b7c..cd73b88210 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -9,6 +9,7 @@
#include "crypto/ripemd160.h"
#include "crypto/sha1.h"
#include "crypto/sha2.h"
+#include "key.h"
#include "script/script.h"
#include "uint256.h"
#include "util.h"
diff --git a/src/script/script.cpp b/src/script/script.cpp
index a5126e7cc2..3e19d0c2bf 100644
--- a/src/script/script.cpp
+++ b/src/script/script.cpp
@@ -5,7 +5,18 @@
#include "script.h"
-#include <boost/foreach.hpp>
+#include "tinyformat.h"
+#include "utilstrencodings.h"
+
+namespace {
+inline std::string ValueString(const std::vector<unsigned char>& vch)
+{
+ if (vch.size() <= 4)
+ return strprintf("%d", CScriptNum(vch).getint());
+ else
+ return HexStr(vch);
+}
+} // anon namespace
using namespace std;
@@ -253,3 +264,26 @@ bool CScript::HasCanonicalPushes() const
}
return true;
}
+
+std::string CScript::ToString() const
+{
+ std::string str;
+ opcodetype opcode;
+ std::vector<unsigned char> vch;
+ const_iterator pc = begin();
+ while (pc < end())
+ {
+ if (!str.empty())
+ str += " ";
+ if (!GetOp(pc, opcode, vch))
+ {
+ str += "[error]";
+ return str;
+ }
+ if (0 <= opcode && opcode <= OP_PUSHDATA4)
+ str += ValueString(vch);
+ else
+ str += GetOpName(opcode);
+ }
+ return str;
+}
diff --git a/src/script/script.h b/src/script/script.h
index caf176476f..d450db5cad 100644
--- a/src/script/script.h
+++ b/src/script/script.h
@@ -6,16 +6,23 @@
#ifndef H_BITCOIN_SCRIPT
#define H_BITCOIN_SCRIPT
-#include "key.h"
-#include "tinyformat.h"
-#include "utilstrencodings.h"
-
+#include <assert.h>
+#include <climits>
+#include <limits>
#include <stdexcept>
-
-#include <boost/variant.hpp>
+#include <stdint.h>
+#include <string.h>
+#include <string>
+#include <vector>
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE = 520; // bytes
+template <typename T>
+std::vector<unsigned char> ToByteVector(const T& in)
+{
+ return std::vector<unsigned char>(in.begin(), in.end());
+}
+
/** Script opcodes */
enum opcodetype
{
@@ -312,13 +319,6 @@ private:
int64_t m_value;
};
-inline std::string ValueString(const std::vector<unsigned char>& vch)
-{
- if (vch.size() <= 4)
- return strprintf("%d", CScriptNum(vch).getint());
- else
- return HexStr(vch);
-}
/** Serialized script, used inside transaction inputs and outputs */
class CScript : public std::vector<unsigned char>
@@ -358,7 +358,6 @@ public:
CScript(int64_t b) { operator<<(b); }
explicit CScript(opcodetype b) { operator<<(b); }
- explicit CScript(const uint256& b) { operator<<(b); }
explicit CScript(const CScriptNum& b) { operator<<(b); }
explicit CScript(const std::vector<unsigned char>& b) { operator<<(b); }
@@ -373,28 +372,6 @@ public:
return *this;
}
- CScript& operator<<(const uint160& b)
- {
- insert(end(), sizeof(b));
- insert(end(), (unsigned char*)&b, (unsigned char*)&b + sizeof(b));
- return *this;
- }
-
- CScript& operator<<(const uint256& b)
- {
- insert(end(), sizeof(b));
- insert(end(), (unsigned char*)&b, (unsigned char*)&b + sizeof(b));
- return *this;
- }
-
- CScript& operator<<(const CPubKey& key)
- {
- assert(key.size() < OP_PUSHDATA1);
- insert(end(), (unsigned char)key.size());
- insert(end(), key.begin(), key.end());
- return *this;
- }
-
CScript& operator<<(const CScriptNum& b)
{
*this << b.getvch();
@@ -588,34 +565,7 @@ public:
return (size() > 0 && *begin() == OP_RETURN);
}
- std::string ToString() const
- {
- std::string str;
- opcodetype opcode;
- std::vector<unsigned char> vch;
- const_iterator pc = begin();
- while (pc < end())
- {
- if (!str.empty())
- str += " ";
- if (!GetOp(pc, opcode, vch))
- {
- str += "[error]";
- return str;
- }
- if (0 <= opcode && opcode <= OP_PUSHDATA4)
- str += ValueString(vch);
- else
- str += GetOpName(opcode);
- }
- return str;
- }
-
- CScriptID GetID() const
- {
- return CScriptID(Hash160(*this));
- }
-
+ std::string ToString() const;
void clear()
{
// The default std::vector::clear() does not release memory.
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index da77e7d1f1..bf98c40394 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -78,7 +78,7 @@ bool Solver(const CKeyStore& keystore, const CScript& scriptPubKey, uint256 hash
{
CPubKey vch;
keystore.GetPubKey(keyID, vch);
- scriptSigRet << vch;
+ scriptSigRet << ToByteVector(vch);
}
return true;
case TX_SCRIPTHASH:
diff --git a/src/script/standard.cpp b/src/script/standard.cpp
index 53ae254d59..05938961bc 100644
--- a/src/script/standard.cpp
+++ b/src/script/standard.cpp
@@ -7,6 +7,7 @@
#include "script/script.h"
#include "util.h"
+#include "utilstrencodings.h"
#include <boost/foreach.hpp>
@@ -14,6 +15,8 @@ using namespace std;
typedef vector<unsigned char> valtype;
+CScriptID::CScriptID(const CScript& in) : uint160(in.size() ? Hash160(in.begin(), in.end()) : 0) {}
+
const char* GetTxnOutputType(txnouttype t)
{
switch (t)
@@ -280,13 +283,13 @@ public:
bool operator()(const CKeyID &keyID) const {
script->clear();
- *script << OP_DUP << OP_HASH160 << keyID << OP_EQUALVERIFY << OP_CHECKSIG;
+ *script << OP_DUP << OP_HASH160 << ToByteVector(keyID) << OP_EQUALVERIFY << OP_CHECKSIG;
return true;
}
bool operator()(const CScriptID &scriptID) const {
script->clear();
- *script << OP_HASH160 << scriptID << OP_EQUAL;
+ *script << OP_HASH160 << ToByteVector(scriptID) << OP_EQUAL;
return true;
}
};
@@ -306,7 +309,7 @@ CScript GetScriptForMultisig(int nRequired, const std::vector<CPubKey>& keys)
script << CScript::EncodeOP_N(nRequired);
BOOST_FOREACH(const CPubKey& key, keys)
- script << key;
+ script << ToByteVector(key);
script << CScript::EncodeOP_N(keys.size()) << OP_CHECKMULTISIG;
return script;
}
diff --git a/src/script/standard.h b/src/script/standard.h
index ead79b82a2..961b214c89 100644
--- a/src/script/standard.h
+++ b/src/script/standard.h
@@ -6,13 +6,25 @@
#ifndef H_BITCOIN_SCRIPT_STANDARD
#define H_BITCOIN_SCRIPT_STANDARD
+#include "key.h"
#include "script/script.h"
#include "script/interpreter.h"
+#include <boost/variant.hpp>
+
#include <stdint.h>
class CScript;
+/** A reference to a CScript: the Hash160 of its serialization (see script.h) */
+class CScriptID : public uint160
+{
+public:
+ CScriptID() : uint160(0) {}
+ CScriptID(const CScript& in);
+ CScriptID(const uint160& in) : uint160(in) {}
+};
+
static const unsigned int MAX_OP_RETURN_RELAY = 40; // bytes
// Mandatory script verification flags that all new blocks must comply with for
diff --git a/src/serialize.h b/src/serialize.h
index 55b6891394..b9d5f95463 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -1116,14 +1116,21 @@ public:
}
}
+ /** Get wrapped FILE* with transfer of ownership.
+ * @note This will invalidate the CAutoFile object, and makes it the responsibility of the caller
+ * of this function to clean up the returned FILE*.
+ */
FILE* release() { FILE* ret = file; file = NULL; return ret; }
- operator FILE*() { return file; }
- FILE* operator->() { return file; }
- FILE& operator*() { return *file; }
- FILE** operator&() { return &file; }
- FILE* operator=(FILE* pnew) { return file = pnew; }
- bool operator!() { return (file == NULL); }
+ /** Get wrapped FILE* without transfer of ownership.
+ * @note Ownership of the FILE* will remain with this class. Use this only if the scope of the
+ * CAutoFile outlives use of the passed pointer.
+ */
+ FILE* Get() const { return file; }
+
+ /** Return true if the wrapped FILE* is NULL, false otherwise.
+ */
+ bool IsNull() const { return (file == NULL); }
//
// Stream subset
diff --git a/src/test/base58_tests.cpp b/src/test/base58_tests.cpp
index c298c805da..e495435b81 100644
--- a/src/test/base58_tests.cpp
+++ b/src/test/base58_tests.cpp
@@ -12,6 +12,7 @@
#include "script/script.h"
#include "uint256.h"
#include "util.h"
+#include "utilstrencodings.h"
#include <boost/foreach.hpp>
#include <boost/test/unit_test.hpp>
diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp
index 2cdafa4bdd..99b21a23a0 100644
--- a/src/test/bloom_tests.cpp
+++ b/src/test/bloom_tests.cpp
@@ -14,6 +14,7 @@
#include <vector>
#include <boost/test/unit_test.hpp>
+#include <boost/tuple/tuple.hpp>
using namespace std;
using namespace boost::tuples;
diff --git a/src/test/checkblock_tests.cpp b/src/test/checkblock_tests.cpp
index 67d40a45c7..9151fdc0c8 100644
--- a/src/test/checkblock_tests.cpp
+++ b/src/test/checkblock_tests.cpp
@@ -36,7 +36,7 @@ bool read_block(const std::string& filename, CBlock& block)
fseek(fp, 8, SEEK_SET); // skip msgheader/size
CAutoFile filein(fp, SER_DISK, CLIENT_VERSION);
- if (!filein) return false;
+ if (filein.IsNull()) return false;
filein >> block;
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index bad5c13ac2..93b7fe189a 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -170,7 +170,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
tx.vin[0].scriptSig = CScript() << OP_1;
tx.vout[0].nValue = 4900000000LL;
script = CScript() << OP_0;
- tx.vout[0].scriptPubKey = GetScriptForDestination(script.GetID());
+ tx.vout[0].scriptPubKey = GetScriptForDestination(CScriptID(script));
hash = tx.GetHash();
mempool.addUnchecked(hash, CTxMemPoolEntry(tx, 11, GetTime(), 111.0, 11));
tx.vin[0].prevout.hash = hash;
diff --git a/src/test/multisig_tests.cpp b/src/test/multisig_tests.cpp
index 5a2ec1cb31..e9fc86779a 100644
--- a/src/test/multisig_tests.cpp
+++ b/src/test/multisig_tests.cpp
@@ -51,13 +51,13 @@ BOOST_AUTO_TEST_CASE(multisig_verify)
key[i].MakeNewKey(true);
CScript a_and_b;
- a_and_b << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ a_and_b << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
CScript a_or_b;
- a_or_b << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ a_or_b << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
CScript escrow;
- escrow << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey() << OP_3 << OP_CHECKMULTISIG;
+ escrow << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << ToByteVector(key[2].GetPubKey()) << OP_3 << OP_CHECKMULTISIG;
CMutableTransaction txFrom; // Funding transaction
txFrom.vout.resize(3);
@@ -138,28 +138,28 @@ BOOST_AUTO_TEST_CASE(multisig_IsStandard)
txnouttype whichType;
CScript a_and_b;
- a_and_b << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ a_and_b << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
BOOST_CHECK(::IsStandard(a_and_b, whichType));
CScript a_or_b;
- a_or_b << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ a_or_b << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
BOOST_CHECK(::IsStandard(a_or_b, whichType));
CScript escrow;
- escrow << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey() << OP_3 << OP_CHECKMULTISIG;
+ escrow << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << ToByteVector(key[2].GetPubKey()) << OP_3 << OP_CHECKMULTISIG;
BOOST_CHECK(::IsStandard(escrow, whichType));
CScript one_of_four;
- one_of_four << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey() << key[3].GetPubKey() << OP_4 << OP_CHECKMULTISIG;
+ one_of_four << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << ToByteVector(key[2].GetPubKey()) << ToByteVector(key[3].GetPubKey()) << OP_4 << OP_CHECKMULTISIG;
BOOST_CHECK(!::IsStandard(one_of_four, whichType));
CScript malformed[6];
- malformed[0] << OP_3 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
- malformed[1] << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << OP_3 << OP_CHECKMULTISIG;
- malformed[2] << OP_0 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
- malformed[3] << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_0 << OP_CHECKMULTISIG;
- malformed[4] << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_CHECKMULTISIG;
- malformed[5] << OP_1 << key[0].GetPubKey() << key[1].GetPubKey();
+ malformed[0] << OP_3 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
+ malformed[1] << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_3 << OP_CHECKMULTISIG;
+ malformed[2] << OP_0 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
+ malformed[3] << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_0 << OP_CHECKMULTISIG;
+ malformed[4] << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_CHECKMULTISIG;
+ malformed[5] << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey());
for (int i = 0; i < 6; i++)
BOOST_CHECK(!::IsStandard(malformed[i], whichType));
@@ -192,7 +192,7 @@ BOOST_AUTO_TEST_CASE(multisig_Solver1)
vector<valtype> solutions;
txnouttype whichType;
CScript s;
- s << key[0].GetPubKey() << OP_CHECKSIG;
+ s << ToByteVector(key[0].GetPubKey()) << OP_CHECKSIG;
BOOST_CHECK(Solver(s, whichType, solutions));
BOOST_CHECK(solutions.size() == 1);
CTxDestination addr;
@@ -207,7 +207,7 @@ BOOST_AUTO_TEST_CASE(multisig_Solver1)
vector<valtype> solutions;
txnouttype whichType;
CScript s;
- s << OP_DUP << OP_HASH160 << key[0].GetPubKey().GetID() << OP_EQUALVERIFY << OP_CHECKSIG;
+ s << OP_DUP << OP_HASH160 << ToByteVector(key[0].GetPubKey().GetID()) << OP_EQUALVERIFY << OP_CHECKSIG;
BOOST_CHECK(Solver(s, whichType, solutions));
BOOST_CHECK(solutions.size() == 1);
CTxDestination addr;
@@ -222,7 +222,7 @@ BOOST_AUTO_TEST_CASE(multisig_Solver1)
vector<valtype> solutions;
txnouttype whichType;
CScript s;
- s << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ s << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
BOOST_CHECK(Solver(s, whichType, solutions));
BOOST_CHECK_EQUAL(solutions.size(), 4U);
CTxDestination addr;
@@ -237,7 +237,7 @@ BOOST_AUTO_TEST_CASE(multisig_Solver1)
vector<valtype> solutions;
txnouttype whichType;
CScript s;
- s << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ s << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
BOOST_CHECK(Solver(s, whichType, solutions));
BOOST_CHECK_EQUAL(solutions.size(), 4U);
vector<CTxDestination> addrs;
@@ -256,7 +256,7 @@ BOOST_AUTO_TEST_CASE(multisig_Solver1)
vector<valtype> solutions;
txnouttype whichType;
CScript s;
- s << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey() << OP_3 << OP_CHECKMULTISIG;
+ s << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << ToByteVector(key[2].GetPubKey()) << OP_3 << OP_CHECKMULTISIG;
BOOST_CHECK(Solver(s, whichType, solutions));
BOOST_CHECK(solutions.size() == 5);
}
@@ -274,13 +274,13 @@ BOOST_AUTO_TEST_CASE(multisig_Sign)
}
CScript a_and_b;
- a_and_b << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ a_and_b << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
CScript a_or_b;
- a_or_b << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ a_or_b << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
CScript escrow;
- escrow << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey() << OP_3 << OP_CHECKMULTISIG;
+ escrow << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << ToByteVector(key[2].GetPubKey()) << OP_3 << OP_CHECKMULTISIG;
CMutableTransaction txFrom; // Funding transaction
txFrom.vout.resize(3);
diff --git a/src/test/script_P2SH_tests.cpp b/src/test/script_P2SH_tests.cpp
index f8361a0dc8..fcab652783 100644
--- a/src/test/script_P2SH_tests.cpp
+++ b/src/test/script_P2SH_tests.cpp
@@ -67,15 +67,15 @@ BOOST_AUTO_TEST_CASE(sign)
// 8 Scripts: checking all combinations of
// different keys, straight/P2SH, pubkey/pubkeyhash
CScript standardScripts[4];
- standardScripts[0] << key[0].GetPubKey() << OP_CHECKSIG;
+ standardScripts[0] << ToByteVector(key[0].GetPubKey()) << OP_CHECKSIG;
standardScripts[1] = GetScriptForDestination(key[1].GetPubKey().GetID());
- standardScripts[2] << key[1].GetPubKey() << OP_CHECKSIG;
+ standardScripts[2] << ToByteVector(key[1].GetPubKey()) << OP_CHECKSIG;
standardScripts[3] = GetScriptForDestination(key[2].GetPubKey().GetID());
CScript evalScripts[4];
for (int i = 0; i < 4; i++)
{
keystore.AddCScript(standardScripts[i]);
- evalScripts[i] = GetScriptForDestination(standardScripts[i].GetID());
+ evalScripts[i] = GetScriptForDestination(CScriptID(standardScripts[i]));
}
CMutableTransaction txFrom; // Funding transaction:
@@ -129,7 +129,7 @@ BOOST_AUTO_TEST_CASE(norecurse)
CScript invalidAsScript;
invalidAsScript << OP_INVALIDOPCODE << OP_INVALIDOPCODE;
- CScript p2sh = GetScriptForDestination(invalidAsScript.GetID());
+ CScript p2sh = GetScriptForDestination(CScriptID(invalidAsScript));
CScript scriptSig;
scriptSig << Serialize(invalidAsScript);
@@ -139,7 +139,7 @@ BOOST_AUTO_TEST_CASE(norecurse)
// Try to recur, and verification should succeed because
// the inner HASH160 <> EQUAL should only check the hash:
- CScript p2sh2 = GetScriptForDestination(p2sh.GetID());
+ CScript p2sh2 = GetScriptForDestination(CScriptID(p2sh));
CScript scriptSig2;
scriptSig2 << Serialize(invalidAsScript) << Serialize(p2sh);
@@ -169,7 +169,7 @@ BOOST_AUTO_TEST_CASE(set)
CScript outer[4];
for (int i = 0; i < 4; i++)
{
- outer[i] = GetScriptForDestination(inner[i].GetID());
+ outer[i] = GetScriptForDestination(CScriptID(inner[i]));
keystore.AddCScript(inner[i]);
}
@@ -206,9 +206,9 @@ BOOST_AUTO_TEST_CASE(set)
BOOST_AUTO_TEST_CASE(is)
{
// Test CScript::IsPayToScriptHash()
- uint160 dummy;
+ uint160 dummy(0);
CScript p2sh;
- p2sh << OP_HASH160 << dummy << OP_EQUAL;
+ p2sh << OP_HASH160 << ToByteVector(dummy) << OP_EQUAL;
BOOST_CHECK(p2sh.IsPayToScriptHash());
// Not considered pay-to-script-hash if using one of the OP_PUSHDATA opcodes:
@@ -224,13 +224,13 @@ BOOST_AUTO_TEST_CASE(is)
CScript not_p2sh;
BOOST_CHECK(!not_p2sh.IsPayToScriptHash());
- not_p2sh.clear(); not_p2sh << OP_HASH160 << dummy << dummy << OP_EQUAL;
+ not_p2sh.clear(); not_p2sh << OP_HASH160 << ToByteVector(dummy) << ToByteVector(dummy) << OP_EQUAL;
BOOST_CHECK(!not_p2sh.IsPayToScriptHash());
- not_p2sh.clear(); not_p2sh << OP_NOP << dummy << OP_EQUAL;
+ not_p2sh.clear(); not_p2sh << OP_NOP << ToByteVector(dummy) << OP_EQUAL;
BOOST_CHECK(!not_p2sh.IsPayToScriptHash());
- not_p2sh.clear(); not_p2sh << OP_HASH160 << dummy << OP_CHECKSIG;
+ not_p2sh.clear(); not_p2sh << OP_HASH160 << ToByteVector(dummy) << OP_CHECKSIG;
BOOST_CHECK(!not_p2sh.IsPayToScriptHash());
}
@@ -242,7 +242,7 @@ BOOST_AUTO_TEST_CASE(switchover)
CScript scriptSig;
scriptSig << Serialize(notValid);
- CScript fund = GetScriptForDestination(notValid.GetID());
+ CScript fund = GetScriptForDestination(CScriptID(notValid));
// Validation should succeed under old rules (hash is correct):
@@ -275,7 +275,7 @@ BOOST_AUTO_TEST_CASE(AreInputsStandard)
keystore.AddCScript(pay1);
CScript pay1of3 = GetScriptForMultisig(1, keys);
- txFrom.vout[0].scriptPubKey = GetScriptForDestination(pay1.GetID()); // P2SH (OP_CHECKSIG)
+ txFrom.vout[0].scriptPubKey = GetScriptForDestination(CScriptID(pay1)); // P2SH (OP_CHECKSIG)
txFrom.vout[0].nValue = 1000;
txFrom.vout[1].scriptPubKey = pay1; // ordinary OP_CHECKSIG
txFrom.vout[1].nValue = 2000;
@@ -285,31 +285,31 @@ BOOST_AUTO_TEST_CASE(AreInputsStandard)
// vout[3] is complicated 1-of-3 AND 2-of-3
// ... that is OK if wrapped in P2SH:
CScript oneAndTwo;
- oneAndTwo << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey();
+ oneAndTwo << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << ToByteVector(key[2].GetPubKey());
oneAndTwo << OP_3 << OP_CHECKMULTISIGVERIFY;
- oneAndTwo << OP_2 << key[3].GetPubKey() << key[4].GetPubKey() << key[5].GetPubKey();
+ oneAndTwo << OP_2 << ToByteVector(key[3].GetPubKey()) << ToByteVector(key[4].GetPubKey()) << ToByteVector(key[5].GetPubKey());
oneAndTwo << OP_3 << OP_CHECKMULTISIG;
keystore.AddCScript(oneAndTwo);
- txFrom.vout[3].scriptPubKey = GetScriptForDestination(oneAndTwo.GetID());
+ txFrom.vout[3].scriptPubKey = GetScriptForDestination(CScriptID(oneAndTwo));
txFrom.vout[3].nValue = 4000;
// vout[4] is max sigops:
CScript fifteenSigops; fifteenSigops << OP_1;
for (unsigned i = 0; i < MAX_P2SH_SIGOPS; i++)
- fifteenSigops << key[i%3].GetPubKey();
+ fifteenSigops << ToByteVector(key[i%3].GetPubKey());
fifteenSigops << OP_15 << OP_CHECKMULTISIG;
keystore.AddCScript(fifteenSigops);
- txFrom.vout[4].scriptPubKey = GetScriptForDestination(fifteenSigops.GetID());
+ txFrom.vout[4].scriptPubKey = GetScriptForDestination(CScriptID(fifteenSigops));
txFrom.vout[4].nValue = 5000;
// vout[5/6] are non-standard because they exceed MAX_P2SH_SIGOPS
CScript sixteenSigops; sixteenSigops << OP_16 << OP_CHECKMULTISIG;
keystore.AddCScript(sixteenSigops);
- txFrom.vout[5].scriptPubKey = GetScriptForDestination(fifteenSigops.GetID());
+ txFrom.vout[5].scriptPubKey = GetScriptForDestination(CScriptID(fifteenSigops));
txFrom.vout[5].nValue = 5000;
CScript twentySigops; twentySigops << OP_CHECKMULTISIG;
keystore.AddCScript(twentySigops);
- txFrom.vout[6].scriptPubKey = GetScriptForDestination(twentySigops.GetID());
+ txFrom.vout[6].scriptPubKey = GetScriptForDestination(CScriptID(twentySigops));
txFrom.vout[6].nValue = 6000;
coins.ModifyCoins(txFrom.GetHash())->FromTx(txFrom, 0);
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index a4b0212494..d3fc673a79 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -162,7 +162,7 @@ public:
TestBuilder(const CScript& redeemScript, const std::string& comment_, int flags_, bool P2SH = false) : scriptPubKey(redeemScript), havePush(false), comment(comment_), flags(flags_)
{
if (P2SH) {
- creditTx = BuildCreditingTransaction(CScript() << OP_HASH160 << redeemScript.GetID() << OP_EQUAL);
+ creditTx = BuildCreditingTransaction(CScript() << OP_HASH160 << ToByteVector(CScriptID(redeemScript)) << OP_EQUAL);
} else {
creditTx = BuildCreditingTransaction(redeemScript);
}
@@ -270,135 +270,135 @@ BOOST_AUTO_TEST_CASE(script_build)
std::vector<TestBuilder> good;
std::vector<TestBuilder> bad;
- good.push_back(TestBuilder(CScript() << keys.pubkey0 << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"P2PK", 0
).PushSig(keys.key0));
- bad.push_back(TestBuilder(CScript() << keys.pubkey0 << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"P2PK, bad sig", 0
).PushSig(keys.key0).DamagePush(10));
- good.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160 << keys.pubkey1C.GetID() << OP_EQUALVERIFY << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160 << ToByteVector(keys.pubkey1C.GetID()) << OP_EQUALVERIFY << OP_CHECKSIG,
"P2PKH", 0
).PushSig(keys.key1).Push(keys.pubkey1C));
- bad.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160 << keys.pubkey2C.GetID() << OP_EQUALVERIFY << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160 << ToByteVector(keys.pubkey2C.GetID()) << OP_EQUALVERIFY << OP_CHECKSIG,
"P2PKH, bad pubkey", 0
).PushSig(keys.key2).Push(keys.pubkey2C).DamagePush(5));
- good.push_back(TestBuilder(CScript() << keys.pubkey1 << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"P2PK anyonecanpay", 0
).PushSig(keys.key1, SIGHASH_ALL | SIGHASH_ANYONECANPAY));
- bad.push_back(TestBuilder(CScript() << keys.pubkey1 << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"P2PK anyonecanpay marked with normal hashtype", 0
).PushSig(keys.key1, SIGHASH_ALL | SIGHASH_ANYONECANPAY).EditPush(70, "81", "01"));
- good.push_back(TestBuilder(CScript() << keys.pubkey0C << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0C) << OP_CHECKSIG,
"P2SH(P2PK)", SCRIPT_VERIFY_P2SH, true
).PushSig(keys.key0).PushRedeem());
- bad.push_back(TestBuilder(CScript() << keys.pubkey0C << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0C) << OP_CHECKSIG,
"P2SH(P2PK), bad redeemscript", SCRIPT_VERIFY_P2SH, true
).PushSig(keys.key0).PushRedeem().DamagePush(10));
- good.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160 << keys.pubkey1.GetID() << OP_EQUALVERIFY << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160 << ToByteVector(keys.pubkey1.GetID()) << OP_EQUALVERIFY << OP_CHECKSIG,
"P2SH(P2PKH), bad sig but no VERIFY_P2SH", 0, true
).PushSig(keys.key0).DamagePush(10).PushRedeem());
- bad.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160 << keys.pubkey1.GetID() << OP_EQUALVERIFY << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160 << ToByteVector(keys.pubkey1.GetID()) << OP_EQUALVERIFY << OP_CHECKSIG,
"P2SH(P2PKH), bad sig", SCRIPT_VERIFY_P2SH, true
).PushSig(keys.key0).DamagePush(10).PushRedeem());
- good.push_back(TestBuilder(CScript() << OP_3 << keys.pubkey0C << keys.pubkey1C << keys.pubkey2C << OP_3 << OP_CHECKMULTISIG,
+ good.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C) << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C) << OP_3 << OP_CHECKMULTISIG,
"3-of-3", 0
).Num(0).PushSig(keys.key0).PushSig(keys.key1).PushSig(keys.key2));
- bad.push_back(TestBuilder(CScript() << OP_3 << keys.pubkey0C << keys.pubkey1C << keys.pubkey2C << OP_3 << OP_CHECKMULTISIG,
+ bad.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C) << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C) << OP_3 << OP_CHECKMULTISIG,
"3-of-3, 2 sigs", 0
).Num(0).PushSig(keys.key0).PushSig(keys.key1).Num(0));
- good.push_back(TestBuilder(CScript() << OP_2 << keys.pubkey0C << keys.pubkey1C << keys.pubkey2C << OP_3 << OP_CHECKMULTISIG,
+ good.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C) << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C) << OP_3 << OP_CHECKMULTISIG,
"P2SH(2-of-3)", SCRIPT_VERIFY_P2SH, true
).Num(0).PushSig(keys.key1).PushSig(keys.key2).PushRedeem());
- bad.push_back(TestBuilder(CScript() << OP_2 << keys.pubkey0C << keys.pubkey1C << keys.pubkey2C << OP_3 << OP_CHECKMULTISIG,
+ bad.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C) << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C) << OP_3 << OP_CHECKMULTISIG,
"P2SH(2-of-3), 1 sig", SCRIPT_VERIFY_P2SH, true
).Num(0).PushSig(keys.key1).Num(0).PushRedeem());
- good.push_back(TestBuilder(CScript() << keys.pubkey1C << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too much R padding but no DERSIG", 0
).PushSig(keys.key1, SIGHASH_ALL, 31, 32).EditPush(1, "43021F", "44022000"));
- bad.push_back(TestBuilder(CScript() << keys.pubkey1C << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too much R padding", SCRIPT_VERIFY_DERSIG
).PushSig(keys.key1, SIGHASH_ALL, 31, 32).EditPush(1, "43021F", "44022000"));
- good.push_back(TestBuilder(CScript() << keys.pubkey1C << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too much S padding but no DERSIG", 0
).PushSig(keys.key1, SIGHASH_ALL).EditPush(1, "44", "45").EditPush(37, "20", "2100"));
- bad.push_back(TestBuilder(CScript() << keys.pubkey1C << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too much S padding", SCRIPT_VERIFY_DERSIG
).PushSig(keys.key1, SIGHASH_ALL).EditPush(1, "44", "45").EditPush(37, "20", "2100"));
- good.push_back(TestBuilder(CScript() << keys.pubkey1C << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too little R padding but no DERSIG", 0
).PushSig(keys.key1, SIGHASH_ALL, 33, 32).EditPush(1, "45022100", "440220"));
- bad.push_back(TestBuilder(CScript() << keys.pubkey1C << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too little R padding", SCRIPT_VERIFY_DERSIG
).PushSig(keys.key1, SIGHASH_ALL, 33, 32).EditPush(1, "45022100", "440220"));
- good.push_back(TestBuilder(CScript() << keys.pubkey2C << OP_CHECKSIG << OP_NOT,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with bad sig with too much R padding but no DERSIG", 0
).PushSig(keys.key2, SIGHASH_ALL, 31, 32).EditPush(1, "43021F", "44022000").DamagePush(10));
- bad.push_back(TestBuilder(CScript() << keys.pubkey2C << OP_CHECKSIG << OP_NOT,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with bad sig with too much R padding", SCRIPT_VERIFY_DERSIG
).PushSig(keys.key2, SIGHASH_ALL, 31, 32).EditPush(1, "43021F", "44022000").DamagePush(10));
- bad.push_back(TestBuilder(CScript() << keys.pubkey2C << OP_CHECKSIG << OP_NOT,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with too much R padding but no DERSIG", 0
).PushSig(keys.key2, SIGHASH_ALL, 31, 32).EditPush(1, "43021F", "44022000"));
- bad.push_back(TestBuilder(CScript() << keys.pubkey2C << OP_CHECKSIG << OP_NOT,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with too much R padding", SCRIPT_VERIFY_DERSIG
).PushSig(keys.key2, SIGHASH_ALL, 31, 32).EditPush(1, "43021F", "44022000"));
- good.push_back(TestBuilder(CScript() << keys.pubkey2C << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG,
"P2PK with high S but no LOW_S", 0
).PushSig(keys.key2, SIGHASH_ALL, 32, 33));
- bad.push_back(TestBuilder(CScript() << keys.pubkey2C << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG,
"P2PK with high S", SCRIPT_VERIFY_LOW_S
).PushSig(keys.key2, SIGHASH_ALL, 32, 33));
- good.push_back(TestBuilder(CScript() << keys.pubkey0H << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG,
"P2PK with hybrid pubkey but no STRICTENC", 0
).PushSig(keys.key0, SIGHASH_ALL));
- bad.push_back(TestBuilder(CScript() << keys.pubkey0H << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG,
"P2PK with hybrid pubkey", SCRIPT_VERIFY_STRICTENC
).PushSig(keys.key0, SIGHASH_ALL));
- bad.push_back(TestBuilder(CScript() << keys.pubkey0H << OP_CHECKSIG << OP_NOT,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with hybrid pubkey but no STRICTENC", 0
).PushSig(keys.key0, SIGHASH_ALL));
- good.push_back(TestBuilder(CScript() << keys.pubkey0H << OP_CHECKSIG << OP_NOT,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with hybrid pubkey", SCRIPT_VERIFY_STRICTENC
).PushSig(keys.key0, SIGHASH_ALL));
- good.push_back(TestBuilder(CScript() << keys.pubkey0H << OP_CHECKSIG << OP_NOT,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with invalid hybrid pubkey but no STRICTENC", 0
).PushSig(keys.key0, SIGHASH_ALL).DamagePush(10));
- good.push_back(TestBuilder(CScript() << keys.pubkey0H << OP_CHECKSIG << OP_NOT,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with invalid hybrid pubkey", SCRIPT_VERIFY_STRICTENC
).PushSig(keys.key0, SIGHASH_ALL).DamagePush(10));
- good.push_back(TestBuilder(CScript() << keys.pubkey1 << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"P2PK with undefined hashtype but no STRICTENC", 0
).PushSig(keys.key1, 5));
- bad.push_back(TestBuilder(CScript() << keys.pubkey1 << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"P2PK with undefined hashtype", SCRIPT_VERIFY_STRICTENC
).PushSig(keys.key1, 5));
- good.push_back(TestBuilder(CScript() << keys.pubkey1 << OP_CHECKSIG << OP_NOT,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with invalid sig and undefined hashtype but no STRICTENC", 0
).PushSig(keys.key1, 5).DamagePush(10));
- bad.push_back(TestBuilder(CScript() << keys.pubkey1 << OP_CHECKSIG << OP_NOT,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with invalid sig and undefined hashtype", SCRIPT_VERIFY_STRICTENC
).PushSig(keys.key1, 5).DamagePush(10));
- good.push_back(TestBuilder(CScript() << OP_3 << keys.pubkey0C << keys.pubkey1C << keys.pubkey2C << OP_3 << OP_CHECKMULTISIG,
+ good.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C) << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C) << OP_3 << OP_CHECKMULTISIG,
"3-of-3 with nonzero dummy but no NULLDUMMY", 0
).Num(1).PushSig(keys.key0).PushSig(keys.key1).PushSig(keys.key2));
- bad.push_back(TestBuilder(CScript() << OP_3 << keys.pubkey0C << keys.pubkey1C << keys.pubkey2C << OP_3 << OP_CHECKMULTISIG,
+ bad.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C) << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C) << OP_3 << OP_CHECKMULTISIG,
"3-of-3 with nonzero dummy", SCRIPT_VERIFY_NULLDUMMY
).Num(1).PushSig(keys.key0).PushSig(keys.key1).PushSig(keys.key2));
- good.push_back(TestBuilder(CScript() << OP_3 << keys.pubkey0C << keys.pubkey1C << keys.pubkey2C << OP_3 << OP_CHECKMULTISIG << OP_NOT,
+ good.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C) << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C) << OP_3 << OP_CHECKMULTISIG << OP_NOT,
"3-of-3 NOT with invalid sig and nonzero dummy but no NULLDUMMY", 0
).Num(1).PushSig(keys.key0).PushSig(keys.key1).PushSig(keys.key2).DamagePush(10));
- bad.push_back(TestBuilder(CScript() << OP_3 << keys.pubkey0C << keys.pubkey1C << keys.pubkey2C << OP_3 << OP_CHECKMULTISIG << OP_NOT,
+ bad.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C) << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C) << OP_3 << OP_CHECKMULTISIG << OP_NOT,
"3-of-3 NOT with invalid sig with nonzero dummy", SCRIPT_VERIFY_NULLDUMMY
).Num(1).PushSig(keys.key0).PushSig(keys.key1).PushSig(keys.key2).DamagePush(10));
@@ -582,7 +582,7 @@ BOOST_AUTO_TEST_CASE(script_CHECKMULTISIG12)
key3.MakeNewKey(true);
CScript scriptPubKey12;
- scriptPubKey12 << OP_1 << key1.GetPubKey() << key2.GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ scriptPubKey12 << OP_1 << ToByteVector(key1.GetPubKey()) << ToByteVector(key2.GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
CMutableTransaction txFrom12 = BuildCreditingTransaction(scriptPubKey12);
CMutableTransaction txTo12 = BuildSpendingTransaction(CScript(), txFrom12);
@@ -608,7 +608,7 @@ BOOST_AUTO_TEST_CASE(script_CHECKMULTISIG23)
key4.MakeNewKey(false);
CScript scriptPubKey23;
- scriptPubKey23 << OP_2 << key1.GetPubKey() << key2.GetPubKey() << key3.GetPubKey() << OP_3 << OP_CHECKMULTISIG;
+ scriptPubKey23 << OP_2 << ToByteVector(key1.GetPubKey()) << ToByteVector(key2.GetPubKey()) << ToByteVector(key3.GetPubKey()) << OP_3 << OP_CHECKMULTISIG;
CMutableTransaction txFrom23 = BuildCreditingTransaction(scriptPubKey23);
CMutableTransaction txTo23 = BuildSpendingTransaction(CScript(), txFrom23);
@@ -695,9 +695,9 @@ BOOST_AUTO_TEST_CASE(script_combineSigs)
BOOST_CHECK(combined == scriptSigCopy || combined == scriptSig);
// P2SH, single-signature case:
- CScript pkSingle; pkSingle << keys[0].GetPubKey() << OP_CHECKSIG;
+ CScript pkSingle; pkSingle << ToByteVector(keys[0].GetPubKey()) << OP_CHECKSIG;
keystore.AddCScript(pkSingle);
- scriptPubKey = GetScriptForDestination(pkSingle.GetID());
+ scriptPubKey = GetScriptForDestination(CScriptID(pkSingle));
SignSignature(keystore, txFrom, txTo, 0);
combined = CombineSignatures(scriptPubKey, txTo, 0, scriptSig, empty);
BOOST_CHECK(combined == scriptSig);
diff --git a/src/test/sigopcount_tests.cpp b/src/test/sigopcount_tests.cpp
index 62a6cd63d6..7b27703b62 100644
--- a/src/test/sigopcount_tests.cpp
+++ b/src/test/sigopcount_tests.cpp
@@ -31,14 +31,14 @@ BOOST_AUTO_TEST_CASE(GetSigOpCount)
BOOST_CHECK_EQUAL(s1.GetSigOpCount(false), 0U);
BOOST_CHECK_EQUAL(s1.GetSigOpCount(true), 0U);
- uint160 dummy;
- s1 << OP_1 << dummy << dummy << OP_2 << OP_CHECKMULTISIG;
+ uint160 dummy(0);
+ s1 << OP_1 << ToByteVector(dummy) << ToByteVector(dummy) << OP_2 << OP_CHECKMULTISIG;
BOOST_CHECK_EQUAL(s1.GetSigOpCount(true), 2U);
s1 << OP_IF << OP_CHECKSIG << OP_ENDIF;
BOOST_CHECK_EQUAL(s1.GetSigOpCount(true), 3U);
BOOST_CHECK_EQUAL(s1.GetSigOpCount(false), 21U);
- CScript p2sh = GetScriptForDestination(s1.GetID());
+ CScript p2sh = GetScriptForDestination(CScriptID(s1));
CScript scriptSig;
scriptSig << OP_0 << Serialize(s1);
BOOST_CHECK_EQUAL(p2sh.GetSigOpCount(scriptSig), 3U);
@@ -54,11 +54,11 @@ BOOST_AUTO_TEST_CASE(GetSigOpCount)
BOOST_CHECK_EQUAL(s2.GetSigOpCount(true), 3U);
BOOST_CHECK_EQUAL(s2.GetSigOpCount(false), 20U);
- p2sh = GetScriptForDestination(s2.GetID());
+ p2sh = GetScriptForDestination(CScriptID(s2));
BOOST_CHECK_EQUAL(p2sh.GetSigOpCount(true), 0U);
BOOST_CHECK_EQUAL(p2sh.GetSigOpCount(false), 0U);
CScript scriptSig2;
- scriptSig2 << OP_1 << dummy << dummy << Serialize(s2);
+ scriptSig2 << OP_1 << ToByteVector(dummy) << ToByteVector(dummy) << Serialize(s2);
BOOST_CHECK_EQUAL(p2sh.GetSigOpCount(scriptSig2), 3U);
}
diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp
index 18cb8f3d1b..41ccaaac94 100644
--- a/src/test/transaction_tests.cpp
+++ b/src/test/transaction_tests.cpp
@@ -259,9 +259,9 @@ SetupDummyInputs(CBasicKeyStore& keystoreRet, CCoinsViewCache& coinsRet)
// Create some dummy input transactions
dummyTransactions[0].vout.resize(2);
dummyTransactions[0].vout[0].nValue = 11*CENT;
- dummyTransactions[0].vout[0].scriptPubKey << key[0].GetPubKey() << OP_CHECKSIG;
+ dummyTransactions[0].vout[0].scriptPubKey << ToByteVector(key[0].GetPubKey()) << OP_CHECKSIG;
dummyTransactions[0].vout[1].nValue = 50*CENT;
- dummyTransactions[0].vout[1].scriptPubKey << key[1].GetPubKey() << OP_CHECKSIG;
+ dummyTransactions[0].vout[1].scriptPubKey << ToByteVector(key[1].GetPubKey()) << OP_CHECKSIG;
coinsRet.ModifyCoins(dummyTransactions[0].GetHash())->FromTx(dummyTransactions[0], 0);
dummyTransactions[1].vout.resize(2);
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index fa1802ad31..4522c63617 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -8,6 +8,7 @@
#include "core.h"
#include "util.h"
#include "utilmoneystr.h"
+#include "version.h"
#include <boost/circular_buffer.hpp>
diff --git a/src/utilmoneystr.cpp b/src/utilmoneystr.cpp
index 1a5635bfb8..95be06aa18 100644
--- a/src/utilmoneystr.cpp
+++ b/src/utilmoneystr.cpp
@@ -7,6 +7,7 @@
#include "core.h"
#include "tinyformat.h"
+#include "utilstrencodings.h"
using namespace std;
diff --git a/src/wallet.cpp b/src/wallet.cpp
index 19e43f6ec2..65944587f8 100644
--- a/src/wallet.cpp
+++ b/src/wallet.cpp
@@ -158,7 +158,7 @@ bool CWallet::LoadCScript(const CScript& redeemScript)
* these. Do not add them to the wallet and warn. */
if (redeemScript.size() > MAX_SCRIPT_ELEMENT_SIZE)
{
- std::string strAddr = CBitcoinAddress(redeemScript.GetID()).ToString();
+ std::string strAddr = CBitcoinAddress(CScriptID(redeemScript)).ToString();
LogPrintf("%s: Warning: This wallet contains a redeemScript of size %i which exceeds maximum size %i thus can never be redeemed. Do not use address %s.\n",
__func__, redeemScript.size(), MAX_SCRIPT_ELEMENT_SIZE, strAddr);
return true;