aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorKarl-Johan Alm <kalle.alm@gmail.com>2018-07-31 01:50:43 +0900
committerKarl-Johan Alm <karljohan-alm@garage.co.jp>2018-08-10 09:08:11 +0900
commit18f690ec2f7eb1b4aa51825bfed0cbfdadc93ac7 (patch)
tree0cd81cdaa3965f017adcec94006dd14b3e7320d9 /src
parentf66e1c793eda7a6143fd03400c98512a9b6f00c7 (diff)
wallet: shuffle coins before grouping, where warranted
Issue brought up in https://github.com/bitcoin/bitcoin/pull/12257\#discussion_r204554549
Diffstat (limited to 'src')
-rw-r--r--src/wallet/wallet.cpp10
1 files changed, 9 insertions, 1 deletions
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 597d108aef..73d2b205c2 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -35,6 +35,8 @@
#include <boost/algorithm/string/replace.hpp>
+static const size_t OUTPUT_GROUP_MAX_ENTRIES = 10;
+
static CCriticalSection cs_wallets;
static std::vector<std::shared_ptr<CWallet>> vpwallets GUARDED_BY(cs_wallets);
@@ -2525,6 +2527,12 @@ bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAm
// form groups from remaining coins; note that preset coins will not
// automatically have their associated (same address) coins included
+ if (coin_control.m_avoid_partial_spends && vCoins.size() > OUTPUT_GROUP_MAX_ENTRIES) {
+ // Cases where we have 11+ outputs all pointing to the same destination may result in
+ // privacy leaks as they will potentially be deterministically sorted. We solve that by
+ // explicitly shuffling the outputs before processing
+ std::shuffle(vCoins.begin(), vCoins.end(), FastRandomContext());
+ }
std::vector<OutputGroup> groups = GroupOutputs(vCoins, !coin_control.m_avoid_partial_spends);
size_t max_ancestors = (size_t)std::max<int64_t>(1, gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT));
@@ -4444,7 +4452,7 @@ std::vector<OutputGroup> CWallet::GroupOutputs(const std::vector<COutput>& outpu
// Limit output groups to no more than 10 entries, to protect
// against inadvertently creating a too-large transaction
// when using -avoidpartialspends
- if (gmap[dst].m_outputs.size() >= 10) {
+ if (gmap[dst].m_outputs.size() >= OUTPUT_GROUP_MAX_ENTRIES) {
groups.push_back(gmap[dst]);
gmap.erase(dst);
}