1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
|
// Copyright (c) 2015-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_SCHEDULER_H
#define BITCOIN_SCHEDULER_H
#include <condition_variable>
#include <functional>
#include <list>
#include <map>
#include <thread>
#include <sync.h>
/**
* Simple class for background tasks that should be run
* periodically or once "after a while"
*
* Usage:
*
* CScheduler* s = new CScheduler();
* s->scheduleFromNow(doSomething, std::chrono::milliseconds{11}); // Assuming a: void doSomething() { }
* s->scheduleFromNow([=] { this->func(argument); }, std::chrono::milliseconds{3});
* std::thread* t = new std::thread([&] { s->serviceQueue(); });
*
* ... then at program shutdown, make sure to call stop() to clean up the thread(s) running serviceQueue:
* s->stop();
* t->join();
* delete t;
* delete s; // Must be done after thread is interrupted/joined.
*/
class CScheduler
{
public:
CScheduler();
~CScheduler();
std::thread m_service_thread;
typedef std::function<void()> Function;
/** Call func at/after time t */
void schedule(Function f, std::chrono::system_clock::time_point t);
/** Call f once after the delta has passed */
void scheduleFromNow(Function f, std::chrono::milliseconds delta)
{
schedule(std::move(f), std::chrono::system_clock::now() + delta);
}
/**
* Repeat f until the scheduler is stopped. First run is after delta has passed once.
*
* The timing is not exact: Every time f is finished, it is rescheduled to run again after delta. If you need more
* accurate scheduling, don't use this method.
*/
void scheduleEvery(Function f, std::chrono::milliseconds delta);
/**
* Mock the scheduler to fast forward in time.
* Iterates through items on taskQueue and reschedules them
* to be delta_seconds sooner.
*/
void MockForward(std::chrono::seconds delta_seconds);
/**
* Services the queue 'forever'. Should be run in a thread.
*/
void serviceQueue();
/** Tell any threads running serviceQueue to stop as soon as the current task is done */
void stop()
{
WITH_LOCK(newTaskMutex, stopRequested = true);
newTaskScheduled.notify_all();
if (m_service_thread.joinable()) m_service_thread.join();
}
/** Tell any threads running serviceQueue to stop when there is no work left to be done */
void StopWhenDrained()
{
WITH_LOCK(newTaskMutex, stopWhenEmpty = true);
newTaskScheduled.notify_all();
if (m_service_thread.joinable()) m_service_thread.join();
}
/**
* Returns number of tasks waiting to be serviced,
* and first and last task times
*/
size_t getQueueInfo(std::chrono::system_clock::time_point& first,
std::chrono::system_clock::time_point& last) const;
/** Returns true if there are threads actively running in serviceQueue() */
bool AreThreadsServicingQueue() const;
private:
mutable Mutex newTaskMutex;
std::condition_variable newTaskScheduled;
std::multimap<std::chrono::system_clock::time_point, Function> taskQueue GUARDED_BY(newTaskMutex);
int nThreadsServicingQueue GUARDED_BY(newTaskMutex){0};
bool stopRequested GUARDED_BY(newTaskMutex){false};
bool stopWhenEmpty GUARDED_BY(newTaskMutex){false};
bool shouldStop() const EXCLUSIVE_LOCKS_REQUIRED(newTaskMutex) { return stopRequested || (stopWhenEmpty && taskQueue.empty()); }
};
/**
* Class used by CScheduler clients which may schedule multiple jobs
* which are required to be run serially. Jobs may not be run on the
* same thread, but no two jobs will be executed
* at the same time and memory will be release-acquire consistent
* (the scheduler will internally do an acquire before invoking a callback
* as well as a release at the end). In practice this means that a callback
* B() will be able to observe all of the effects of callback A() which executed
* before it.
*/
class SingleThreadedSchedulerClient
{
private:
CScheduler* m_pscheduler;
RecursiveMutex m_cs_callbacks_pending;
std::list<std::function<void()>> m_callbacks_pending GUARDED_BY(m_cs_callbacks_pending);
bool m_are_callbacks_running GUARDED_BY(m_cs_callbacks_pending) = false;
void MaybeScheduleProcessQueue();
void ProcessQueue();
public:
explicit SingleThreadedSchedulerClient(CScheduler* pschedulerIn) : m_pscheduler(pschedulerIn) {}
/**
* Add a callback to be executed. Callbacks are executed serially
* and memory is release-acquire consistent between callback executions.
* Practically, this means that callbacks can behave as if they are executed
* in order by a single thread.
*/
void AddToProcessQueue(std::function<void()> func);
/**
* Processes all remaining queue members on the calling thread, blocking until queue is empty
* Must be called after the CScheduler has no remaining processing threads!
*/
void EmptyQueue();
size_t CallbacksPending();
};
#endif
|