1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
|
// Copyright (c) 2012-2021 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_CHECKQUEUE_H
#define BITCOIN_CHECKQUEUE_H
#include <sync.h>
#include <tinyformat.h>
#include <util/syscall_sandbox.h>
#include <util/threadnames.h>
#include <algorithm>
#include <vector>
template <typename T>
class CCheckQueueControl;
/**
* Queue for verifications that have to be performed.
* The verifications are represented by a type T, which must provide an
* operator(), returning a bool.
*
* One thread (the master) is assumed to push batches of verifications
* onto the queue, where they are processed by N-1 worker threads. When
* the master is done adding work, it temporarily joins the worker pool
* as an N'th worker, until all jobs are done.
*/
template <typename T>
class CCheckQueue
{
private:
//! Mutex to protect the inner state
Mutex m_mutex;
//! Worker threads block on this when out of work
std::condition_variable m_worker_cv;
//! Master thread blocks on this when out of work
std::condition_variable m_master_cv;
//! The queue of elements to be processed.
//! As the order of booleans doesn't matter, it is used as a LIFO (stack)
std::vector<T> queue GUARDED_BY(m_mutex);
//! The number of workers (including the master) that are idle.
int nIdle GUARDED_BY(m_mutex){0};
//! The total number of workers (including the master).
int nTotal GUARDED_BY(m_mutex){0};
//! The temporary evaluation result.
bool fAllOk GUARDED_BY(m_mutex){true};
/**
* Number of verifications that haven't completed yet.
* This includes elements that are no longer queued, but still in the
* worker's own batches.
*/
unsigned int nTodo GUARDED_BY(m_mutex){0};
//! The maximum number of elements to be processed in one batch
const unsigned int nBatchSize;
std::vector<std::thread> m_worker_threads;
bool m_request_stop GUARDED_BY(m_mutex){false};
/** Internal function that does bulk of the verification work. */
bool Loop(bool fMaster) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
{
std::condition_variable& cond = fMaster ? m_master_cv : m_worker_cv;
std::vector<T> vChecks;
vChecks.reserve(nBatchSize);
unsigned int nNow = 0;
bool fOk = true;
do {
{
WAIT_LOCK(m_mutex, lock);
// first do the clean-up of the previous loop run (allowing us to do it in the same critsect)
if (nNow) {
fAllOk &= fOk;
nTodo -= nNow;
if (nTodo == 0 && !fMaster)
// We processed the last element; inform the master it can exit and return the result
m_master_cv.notify_one();
} else {
// first iteration
nTotal++;
}
// logically, the do loop starts here
while (queue.empty() && !m_request_stop) {
if (fMaster && nTodo == 0) {
nTotal--;
bool fRet = fAllOk;
// reset the status for new work later
fAllOk = true;
// return the current status
return fRet;
}
nIdle++;
cond.wait(lock); // wait
nIdle--;
}
if (m_request_stop) {
return false;
}
// Decide how many work units to process now.
// * Do not try to do everything at once, but aim for increasingly smaller batches so
// all workers finish approximately simultaneously.
// * Try to account for idle jobs which will instantly start helping.
// * Don't do batches smaller than 1 (duh), or larger than nBatchSize.
nNow = std::max(1U, std::min(nBatchSize, (unsigned int)queue.size() / (nTotal + nIdle + 1)));
vChecks.resize(nNow);
for (unsigned int i = 0; i < nNow; i++) {
// We want the lock on the m_mutex to be as short as possible, so swap jobs from the global
// queue to the local batch vector instead of copying.
vChecks[i].swap(queue.back());
queue.pop_back();
}
// Check whether we need to do work at all
fOk = fAllOk;
}
// execute work
for (T& check : vChecks)
if (fOk)
fOk = check();
vChecks.clear();
} while (true);
}
public:
//! Mutex to ensure only one concurrent CCheckQueueControl
Mutex m_control_mutex;
//! Create a new check queue
explicit CCheckQueue(unsigned int nBatchSizeIn)
: nBatchSize(nBatchSizeIn)
{
}
//! Create a pool of new worker threads.
void StartWorkerThreads(const int threads_num) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
{
{
LOCK(m_mutex);
nIdle = 0;
nTotal = 0;
fAllOk = true;
}
assert(m_worker_threads.empty());
for (int n = 0; n < threads_num; ++n) {
m_worker_threads.emplace_back([this, n]() {
util::ThreadRename(strprintf("scriptch.%i", n));
SetSyscallSandboxPolicy(SyscallSandboxPolicy::VALIDATION_SCRIPT_CHECK);
Loop(false /* worker thread */);
});
}
}
//! Wait until execution finishes, and return whether all evaluations were successful.
bool Wait() EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
{
return Loop(true /* master thread */);
}
//! Add a batch of checks to the queue
void Add(std::vector<T>& vChecks) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
{
if (vChecks.empty()) {
return;
}
{
LOCK(m_mutex);
for (T& check : vChecks) {
queue.emplace_back();
check.swap(queue.back());
}
nTodo += vChecks.size();
}
if (vChecks.size() == 1) {
m_worker_cv.notify_one();
} else {
m_worker_cv.notify_all();
}
}
//! Stop all of the worker threads.
void StopWorkerThreads() EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
{
WITH_LOCK(m_mutex, m_request_stop = true);
m_worker_cv.notify_all();
for (std::thread& t : m_worker_threads) {
t.join();
}
m_worker_threads.clear();
WITH_LOCK(m_mutex, m_request_stop = false);
}
bool HasThreads() const { return !m_worker_threads.empty(); }
~CCheckQueue()
{
assert(m_worker_threads.empty());
}
};
/**
* RAII-style controller object for a CCheckQueue that guarantees the passed
* queue is finished before continuing.
*/
template <typename T>
class CCheckQueueControl
{
private:
CCheckQueue<T> * const pqueue;
bool fDone;
public:
CCheckQueueControl() = delete;
CCheckQueueControl(const CCheckQueueControl&) = delete;
CCheckQueueControl& operator=(const CCheckQueueControl&) = delete;
explicit CCheckQueueControl(CCheckQueue<T> * const pqueueIn) : pqueue(pqueueIn), fDone(false)
{
// passed queue is supposed to be unused, or nullptr
if (pqueue != nullptr) {
ENTER_CRITICAL_SECTION(pqueue->m_control_mutex);
}
}
bool Wait()
{
if (pqueue == nullptr)
return true;
bool fRet = pqueue->Wait();
fDone = true;
return fRet;
}
void Add(std::vector<T>& vChecks)
{
if (pqueue != nullptr)
pqueue->Add(vChecks);
}
~CCheckQueueControl()
{
if (!fDone)
Wait();
if (pqueue != nullptr) {
LEAVE_CRITICAL_SECTION(pqueue->m_control_mutex);
}
}
};
#endif // BITCOIN_CHECKQUEUE_H
|