12 #include <sys/resource.h> 32 static inline size_t align_up(
size_t x,
size_t align)
34 return (x + align - 1) & ~(align - 1);
41 base(base_in), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
69 const size_t size_remaining = size_ptr_it->first - size;
70 char*
const free_chunk =
static_cast<char*
>(size_ptr_it->second);
71 auto allocated =
chunks_used.emplace(free_chunk + size_remaining, size).first;
73 if (size_ptr_it->first == size) {
84 return allocated->first;
97 throw std::runtime_error(
"Arena: invalid or double free");
99 auto freed = std::make_pair(static_cast<char*>(i->first), i->second);
105 freed.first -= prev->second->first;
106 freed.second += prev->second->first;
112 auto next =
chunks_free.find(freed.first + freed.second);
114 freed.second += next->second->first;
129 r.used += chunk.second;
131 r.free += chunk.second->first;
132 r.total = r.used + r.free;
137 static void printchunk(
void* base,
size_t sz,
bool used) {
139 "0x" << std::hex << std::setw(16) << std::setfill(
'0') << base <<
140 " 0x" << std::hex << std::setw(16) << std::setfill(
'0') << sz <<
141 " 0x" << used << std::endl;
143 void Arena::walk()
const 146 printchunk(chunk.first, chunk.second,
true);
147 std::cout << std::endl;
149 printchunk(chunk.first, chunk.second->first,
false);
150 std::cout << std::endl;
163 Win32LockedPageAllocator();
165 void FreeLocked(
void* addr,
size_t len)
override;
171 Win32LockedPageAllocator::Win32LockedPageAllocator()
174 SYSTEM_INFO sSysInfo;
175 GetSystemInfo(&sSysInfo);
176 page_size = sSysInfo.dwPageSize;
178 void *Win32LockedPageAllocator::AllocateLocked(
size_t len,
bool *lockingSuccess)
181 void *addr = VirtualAlloc(
nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
187 *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0;
191 void Win32LockedPageAllocator::FreeLocked(
void* addr,
size_t len)
195 VirtualUnlock(const_cast<void*>(addr), len);
198 size_t Win32LockedPageAllocator::GetLimit()
201 if(GetProcessWorkingSetSize(GetCurrentProcess(), &min, &max) != 0) {
204 return std::numeric_limits<size_t>::max();
220 void FreeLocked(
void* addr,
size_t len)
override;
229 #if defined(PAGESIZE) // defined in limits.h 231 #else // assume some POSIX OS 240 addr = mmap(
nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
241 if (addr == MAP_FAILED) {
245 *lockingSuccess = mlock(addr, len) == 0;
246 #if defined(MADV_DONTDUMP) // Linux 247 madvise(addr, len, MADV_DONTDUMP);
248 #elif defined(MADV_NOCORE) // FreeBSD 249 madvise(addr, len, MADV_NOCORE);
263 #ifdef RLIMIT_MEMLOCK 265 if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
266 if (rlim.rlim_cur != RLIM_INFINITY) {
267 return rlim.rlim_cur;
271 return std::numeric_limits<size_t>::max();
279 : allocator(
std::move(allocator_in)), lf_cb(lf_cb_in)
287 std::lock_guard<std::mutex> lock(
mutex);
294 for (
auto &arena:
arenas) {
295 void *addr = arena.alloc(size);
302 return arenas.back().alloc(size);
309 std::lock_guard<std::mutex> lock(
mutex);
312 for (
auto &arena:
arenas) {
313 if (arena.addressInArena(ptr)) {
318 throw std::runtime_error(
"LockedPool: invalid address not pointing to any arena");
323 std::lock_guard<std::mutex> lock(
mutex);
325 for (
const auto &arena:
arenas) {
346 size = std::min(size, limit);
349 void *addr =
allocator->AllocateLocked(size, &locked);
366 Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in)
396 std::unique_ptr<LockedPageAllocator>
allocator(
new Win32LockedPageAllocator());
size_t alignment
Minimum chunk alignment.
std::mutex mutex
Mutex protects access to this pool's data structures, including arenas.
void * AllocateLocked(size_t len, bool *lockingSuccess) override
Allocate and lock memory pages.
std::list< LockedPageArena > arenas
static const size_t ARENA_ALIGN
Chunk alignment.
std::unordered_map< void *, size_t > chunks_used
Map from begin of used chunk to its size.
virtual void * AllocateLocked(size_t len, bool *lockingSuccess)=0
Allocate and lock memory pages.
LockedPool(std::unique_ptr< LockedPageAllocator > allocator, LockingFailed_Callback lf_cb_in=nullptr)
Create a new LockedPool.
ChunkToSizeMap chunks_free
Map from begin of free chunk to its node in size_to_free_chunk.
LockingFailed_Callback lf_cb
SizeToChunkSortedMap size_to_free_chunk
Map to enable O(log(n)) best-fit allocation, as it's sorted by size.
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align)
OS-dependent allocation and deallocation of locked/pinned memory pages.
Singleton class to keep track of locked (ie, non-swappable) memory, for use in std::allocator templat...
void * alloc(size_t size)
Allocate size bytes from this arena.
void memory_cleanse(void *ptr, size_t len)
Secure overwrite a buffer (possibly containing secret data) with zero-bytes.
void FreeLocked(void *addr, size_t len) override
Unlock and free memory pages.
Stats stats() const
Get arena usage statistics.
static LockedPoolManager * _instance
void * alloc(size_t size)
Allocate size bytes from this arena.
virtual void FreeLocked(void *addr, size_t len)=0
Unlock and free memory pages.
static size_t align_up(size_t x, size_t align)
Align up to power of 2.
static const size_t ARENA_SIZE
Size of one arena of locked memory.
static bool LockingFailed()
Called when locking fails, warn the user here.
Pool for locked memory chunks.
size_t GetLimit() override
Get the total limit on the amount of memory that may be locked by this process, in bytes...
void free(void *ptr)
Free a previously allocated chunk of memory.
void free(void *ptr)
Free a previously allocated chunk of memory.
virtual size_t GetLimit()=0
Get the total limit on the amount of memory that may be locked by this process, in bytes...
LockedPageAllocator specialized for OSes that don't try to be special snowflakes. ...
PosixLockedPageAllocator()
void * base
Base address of arena.
bool new_arena(size_t size, size_t align)
LockedPoolManager(std::unique_ptr< LockedPageAllocator > allocator)
Stats stats() const
Get pool usage statistics.
static void CreateInstance()
Create a new LockedPoolManager specialized to the OS.
size_t cumulative_bytes_locked
Arena(void *base, size_t size, size_t alignment)
ChunkToSizeMap chunks_free_end
Map from end of free chunk to its node in size_to_free_chunk.
std::unique_ptr< LockedPageAllocator > allocator