12 #include <sys/resource.h> 32 static inline size_t align_up(
size_t x,
size_t align)
34 return (x + align - 1) & ~(align - 1);
41 base(base_in), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
71 const size_t size_remaining = size_ptr_it->first - size;
72 char*
const free_chunk =
static_cast<char*
>(size_ptr_it->second);
73 auto allocated =
chunks_used.emplace(free_chunk + size_remaining, size).first;
75 if (size_ptr_it->first == size) {
86 return allocated->first;
99 throw std::runtime_error(
"Arena: invalid or double free");
101 auto freed = std::make_pair(static_cast<char*>(i->first), i->second);
107 freed.first -= prev->second->first;
108 freed.second += prev->second->first;
114 auto next =
chunks_free.find(freed.first + freed.second);
116 freed.second += next->second->first;
131 r.used += chunk.second;
133 r.free += chunk.second->first;
134 r.total = r.used + r.free;
139 static void printchunk(
void* base,
size_t sz,
bool used) {
141 "0x" << std::hex << std::setw(16) << std::setfill(
'0') << base <<
142 " 0x" << std::hex << std::setw(16) << std::setfill(
'0') << sz <<
143 " 0x" << used << std::endl;
145 void Arena::walk()
const 148 printchunk(chunk.first, chunk.second,
true);
149 std::cout << std::endl;
151 printchunk(chunk.first, chunk.second->first,
false);
152 std::cout << std::endl;
165 Win32LockedPageAllocator();
167 void FreeLocked(
void* addr,
size_t len)
override;
173 Win32LockedPageAllocator::Win32LockedPageAllocator()
176 SYSTEM_INFO sSysInfo;
177 GetSystemInfo(&sSysInfo);
178 page_size = sSysInfo.dwPageSize;
180 void *Win32LockedPageAllocator::AllocateLocked(
size_t len,
bool *lockingSuccess)
183 void *addr = VirtualAlloc(
nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
189 *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0;
193 void Win32LockedPageAllocator::FreeLocked(
void* addr,
size_t len)
197 VirtualUnlock(const_cast<void*>(addr), len);
200 size_t Win32LockedPageAllocator::GetLimit()
203 if(GetProcessWorkingSetSize(GetCurrentProcess(), &min, &max) != 0) {
206 return std::numeric_limits<size_t>::max();
222 void FreeLocked(
void* addr,
size_t len)
override;
231 #if defined(PAGESIZE) // defined in limits.h 233 #else // assume some POSIX OS 242 addr = mmap(
nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
243 if (addr == MAP_FAILED) {
247 *lockingSuccess = mlock(addr, len) == 0;
248 #if defined(MADV_DONTDUMP) // Linux 249 madvise(addr, len, MADV_DONTDUMP);
250 #elif defined(MADV_NOCORE) // FreeBSD 251 madvise(addr, len, MADV_NOCORE);
265 #ifdef RLIMIT_MEMLOCK 267 if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
268 if (rlim.rlim_cur != RLIM_INFINITY) {
269 return rlim.rlim_cur;
273 return std::numeric_limits<size_t>::max();
281 : allocator(
std::move(allocator_in)), lf_cb(lf_cb_in)
289 std::lock_guard<std::mutex> lock(
mutex);
296 for (
auto &arena:
arenas) {
297 void *addr = arena.alloc(size);
304 return arenas.back().alloc(size);
311 std::lock_guard<std::mutex> lock(
mutex);
314 for (
auto &arena:
arenas) {
315 if (arena.addressInArena(ptr)) {
320 throw std::runtime_error(
"LockedPool: invalid address not pointing to any arena");
325 std::lock_guard<std::mutex> lock(
mutex);
327 for (
const auto &arena:
arenas) {
348 size = std::min(size, limit);
351 void *addr =
allocator->AllocateLocked(size, &locked);
368 Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in)
398 std::unique_ptr<LockedPageAllocator>
allocator(
new Win32LockedPageAllocator());
size_t alignment
Minimum chunk alignment.
std::mutex mutex
Mutex protects access to this pool's data structures, including arenas.
void * AllocateLocked(size_t len, bool *lockingSuccess) override
Allocate and lock memory pages.
std::list< LockedPageArena > arenas
static const size_t ARENA_ALIGN
Chunk alignment.
std::unordered_map< void *, size_t > chunks_used
Map from begin of used chunk to its size.
virtual void * AllocateLocked(size_t len, bool *lockingSuccess)=0
Allocate and lock memory pages.
LockedPool(std::unique_ptr< LockedPageAllocator > allocator, LockingFailed_Callback lf_cb_in=nullptr)
Create a new LockedPool.
ChunkToSizeMap chunks_free
Map from begin of free chunk to its node in size_to_free_chunk.
LockingFailed_Callback lf_cb
SizeToChunkSortedMap size_to_free_chunk
Map to enable O(log(n)) best-fit allocation, as it's sorted by size.
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align)
OS-dependent allocation and deallocation of locked/pinned memory pages.
Singleton class to keep track of locked (ie, non-swappable) memory, for use in std::allocator templat...
void * alloc(size_t size)
Allocate size bytes from this arena.
void memory_cleanse(void *ptr, size_t len)
Secure overwrite a buffer (possibly containing secret data) with zero-bytes.
void FreeLocked(void *addr, size_t len) override
Unlock and free memory pages.
Stats stats() const
Get arena usage statistics.
static LockedPoolManager * _instance
void * alloc(size_t size)
Allocate size bytes from this arena.
virtual void FreeLocked(void *addr, size_t len)=0
Unlock and free memory pages.
static size_t align_up(size_t x, size_t align)
Align up to power of 2.
static const size_t ARENA_SIZE
Size of one arena of locked memory.
static bool LockingFailed()
Called when locking fails, warn the user here.
Pool for locked memory chunks.
size_t GetLimit() override
Get the total limit on the amount of memory that may be locked by this process, in bytes...
void free(void *ptr)
Free a previously allocated chunk of memory.
void free(void *ptr)
Free a previously allocated chunk of memory.
virtual size_t GetLimit()=0
Get the total limit on the amount of memory that may be locked by this process, in bytes...
LockedPageAllocator specialized for OSes that don't try to be special snowflakes. ...
PosixLockedPageAllocator()
void * base
Base address of arena.
bool new_arena(size_t size, size_t align)
LockedPoolManager(std::unique_ptr< LockedPageAllocator > allocator)
Stats stats() const
Get pool usage statistics.
static void CreateInstance()
Create a new LockedPoolManager specialized to the OS.
size_t cumulative_bytes_locked
Arena(void *base, size_t size, size_t alignment)
ChunkToSizeMap chunks_free_end
Map from end of free chunk to its node in size_to_free_chunk.
std::unique_ptr< LockedPageAllocator > allocator