aboutsummaryrefslogtreecommitdiff
path: root/src/support/lockedpool.h
blob: 48ffd7b30716ac7147ce3c6f9f120a5eee911dad (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
// Copyright (c) 2016-2018 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.

#ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H
#define BITCOIN_SUPPORT_LOCKEDPOOL_H

#include <stdint.h>
#include <list>
#include <map>
#include <mutex>
#include <memory>
#include <unordered_map>

/**
 * OS-dependent allocation and deallocation of locked/pinned memory pages.
 * Abstract base class.
 */
class LockedPageAllocator
{
public:
    virtual ~LockedPageAllocator() {}
    /** Allocate and lock memory pages.
     * If len is not a multiple of the system page size, it is rounded up.
     * Returns 0 in case of allocation failure.
     *
     * If locking the memory pages could not be accomplished it will still
     * return the memory, however the lockingSuccess flag will be false.
     * lockingSuccess is undefined if the allocation fails.
     */
    virtual void* AllocateLocked(size_t len, bool *lockingSuccess) = 0;

    /** Unlock and free memory pages.
     * Clear the memory before unlocking.
     */
    virtual void FreeLocked(void* addr, size_t len) = 0;

    /** Get the total limit on the amount of memory that may be locked by this
     * process, in bytes. Return size_t max if there is no limit or the limit
     * is unknown. Return 0 if no memory can be locked at all.
     */
    virtual size_t GetLimit() = 0;
};

/* An arena manages a contiguous region of memory by dividing it into
 * chunks.
 */
class Arena
{
public:
    Arena(void *base, size_t size, size_t alignment);
    virtual ~Arena();

    Arena(const Arena& other) = delete; // non construction-copyable
    Arena& operator=(const Arena&) = delete; // non copyable

    /** Memory statistics. */
    struct Stats
    {
        size_t used;
        size_t free;
        size_t total;
        size_t chunks_used;
        size_t chunks_free;
    };

    /** Allocate size bytes from this arena.
     * Returns pointer on success, or 0 if memory is full or
     * the application tried to allocate 0 bytes.
     */
    void* alloc(size_t size);

    /** Free a previously allocated chunk of memory.
     * Freeing the zero pointer has no effect.
     * Raises std::runtime_error in case of error.
     */
    void free(void *ptr);

    /** Get arena usage statistics */
    Stats stats() const;

#ifdef ARENA_DEBUG
    void walk() const;
#endif

    /** Return whether a pointer points inside this arena.
     * This returns base <= ptr < (base+size) so only use it for (inclusive)
     * chunk starting addresses.
     */
    bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; }
private:
    typedef std::multimap<size_t, char*> SizeToChunkSortedMap;
    /** Map to enable O(log(n)) best-fit allocation, as it's sorted by size */
    SizeToChunkSortedMap size_to_free_chunk;

    typedef std::unordered_map<char*, SizeToChunkSortedMap::const_iterator> ChunkToSizeMap;
    /** Map from begin of free chunk to its node in size_to_free_chunk */
    ChunkToSizeMap chunks_free;
    /** Map from end of free chunk to its node in size_to_free_chunk */
    ChunkToSizeMap chunks_free_end;

    /** Map from begin of used chunk to its size */
    std::unordered_map<char*, size_t> chunks_used;

    /** Base address of arena */
    char* base;
    /** End address of arena */
    char* end;
    /** Minimum chunk alignment */
    size_t alignment;
};

/** Pool for locked memory chunks.
 *
 * To avoid sensitive key data from being swapped to disk, the memory in this pool
 * is locked/pinned.
 *
 * An arena manages a contiguous region of memory. The pool starts out with one arena
 * but can grow to multiple arenas if the need arises.
 *
 * Unlike a normal C heap, the administrative structures are separate from the managed
 * memory. This has been done as the sizes and bases of objects are not in themselves sensitive
 * information, as to conserve precious locked memory. In some operating systems
 * the amount of memory that can be locked is small.
 */
class LockedPool
{
public:
    /** Size of one arena of locked memory. This is a compromise.
     * Do not set this too low, as managing many arenas will increase
     * allocation and deallocation overhead. Setting it too high allocates
     * more locked memory from the OS than strictly necessary.
     */
    static const size_t ARENA_SIZE = 256*1024;
    /** Chunk alignment. Another compromise. Setting this too high will waste
     * memory, setting it too low will facilitate fragmentation.
     */
    static const size_t ARENA_ALIGN = 16;

    /** Callback when allocation succeeds but locking fails.
     */
    typedef bool (*LockingFailed_Callback)();

    /** Memory statistics. */
    struct Stats
    {
        size_t used;
        size_t free;
        size_t total;
        size_t locked;
        size_t chunks_used;
        size_t chunks_free;
    };

    /** Create a new LockedPool. This takes ownership of the MemoryPageLocker,
     * you can only instantiate this with LockedPool(std::move(...)).
     *
     * The second argument is an optional callback when locking a newly allocated arena failed.
     * If this callback is provided and returns false, the allocation fails (hard fail), if
     * it returns true the allocation proceeds, but it could warn.
     */
    explicit LockedPool(std::unique_ptr<LockedPageAllocator> allocator, LockingFailed_Callback lf_cb_in = nullptr);
    ~LockedPool();

    LockedPool(const LockedPool& other) = delete; // non construction-copyable
    LockedPool& operator=(const LockedPool&) = delete; // non copyable

    /** Allocate size bytes from this arena.
     * Returns pointer on success, or 0 if memory is full or
     * the application tried to allocate 0 bytes.
     */
    void* alloc(size_t size);

    /** Free a previously allocated chunk of memory.
     * Freeing the zero pointer has no effect.
     * Raises std::runtime_error in case of error.
     */
    void free(void *ptr);

    /** Get pool usage statistics */
    Stats stats() const;
private:
    std::unique_ptr<LockedPageAllocator> allocator;

    /** Create an arena from locked pages */
    class LockedPageArena: public Arena
    {
    public:
        LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align);
        ~LockedPageArena();
    private:
        void *base;
        size_t size;
        LockedPageAllocator *allocator;
    };

    bool new_arena(size_t size, size_t align);

    std::list<LockedPageArena> arenas;
    LockingFailed_Callback lf_cb;
    size_t cumulative_bytes_locked;
    /** Mutex protects access to this pool's data structures, including arenas.
     */
    mutable std::mutex mutex;
};

/**
 * Singleton class to keep track of locked (ie, non-swappable) memory, for use in
 * std::allocator templates.
 *
 * Some implementations of the STL allocate memory in some constructors (i.e., see
 * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.)
 * Due to the unpredictable order of static initializers, we have to make sure the
 * LockedPoolManager instance exists before any other STL-based objects that use
 * secure_allocator are created. So instead of having LockedPoolManager also be
 * static-initialized, it is created on demand.
 */
class LockedPoolManager : public LockedPool
{
public:
    /** Return the current instance, or create it once */
    static LockedPoolManager& Instance()
    {
        std::call_once(LockedPoolManager::init_flag, LockedPoolManager::CreateInstance);
        return *LockedPoolManager::_instance;
    }

private:
    explicit LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator);

    /** Create a new LockedPoolManager specialized to the OS */
    static void CreateInstance();
    /** Called when locking fails, warn the user here */
    static bool LockingFailed();

    static LockedPoolManager* _instance;
    static std::once_flag init_flag;
};

#endif // BITCOIN_SUPPORT_LOCKEDPOOL_H