8 #if defined(HAVE_CONFIG_H) 16 #include <sys/resource.h> 36 static inline size_t align_up(
size_t x,
size_t align)
38 return (x + align - 1) & ~(align - 1);
45 base(base_in), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
75 const size_t size_remaining = size_ptr_it->first - size;
76 char*
const free_chunk =
static_cast<char*
>(size_ptr_it->second);
77 auto allocated =
chunks_used.emplace(free_chunk + size_remaining, size).first;
79 if (size_ptr_it->first == size) {
90 return allocated->first;
103 throw std::runtime_error(
"Arena: invalid or double free");
105 auto freed = std::make_pair(static_cast<char*>(i->first), i->second);
111 freed.first -= prev->second->first;
112 freed.second += prev->second->first;
118 auto next =
chunks_free.find(freed.first + freed.second);
120 freed.second += next->second->first;
135 r.used += chunk.second;
137 r.free += chunk.second->first;
138 r.total = r.used + r.free;
143 static void printchunk(
void* base,
size_t sz,
bool used) {
145 "0x" << std::hex << std::setw(16) << std::setfill(
'0') << base <<
146 " 0x" << std::hex << std::setw(16) << std::setfill(
'0') << sz <<
147 " 0x" << used << std::endl;
149 void Arena::walk()
const 152 printchunk(chunk.first, chunk.second,
true);
153 std::cout << std::endl;
155 printchunk(chunk.first, chunk.second->first,
false);
156 std::cout << std::endl;
169 Win32LockedPageAllocator();
171 void FreeLocked(
void* addr,
size_t len)
override;
177 Win32LockedPageAllocator::Win32LockedPageAllocator()
180 SYSTEM_INFO sSysInfo;
181 GetSystemInfo(&sSysInfo);
182 page_size = sSysInfo.dwPageSize;
184 void *Win32LockedPageAllocator::AllocateLocked(
size_t len,
bool *lockingSuccess)
187 void *addr = VirtualAlloc(
nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
193 *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0;
197 void Win32LockedPageAllocator::FreeLocked(
void* addr,
size_t len)
201 VirtualUnlock(const_cast<void*>(addr), len);
204 size_t Win32LockedPageAllocator::GetLimit()
207 if(GetProcessWorkingSetSize(GetCurrentProcess(), &min, &max) != 0) {
210 return std::numeric_limits<size_t>::max();
226 void FreeLocked(
void* addr,
size_t len)
override;
235 #if defined(PAGESIZE) // defined in limits.h 237 #else // assume some POSIX OS 246 addr = mmap(
nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
247 if (addr == MAP_FAILED) {
251 *lockingSuccess = mlock(addr, len) == 0;
252 #if defined(MADV_DONTDUMP) // Linux 253 madvise(addr, len, MADV_DONTDUMP);
254 #elif defined(MADV_NOCORE) // FreeBSD 255 madvise(addr, len, MADV_NOCORE);
269 #ifdef RLIMIT_MEMLOCK 271 if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
272 if (rlim.rlim_cur != RLIM_INFINITY) {
273 return rlim.rlim_cur;
277 return std::numeric_limits<size_t>::max();
285 : allocator(
std::move(allocator_in)), lf_cb(lf_cb_in)
293 std::lock_guard<std::mutex> lock(
mutex);
300 for (
auto &arena:
arenas) {
301 void *addr = arena.alloc(size);
308 return arenas.back().alloc(size);
315 std::lock_guard<std::mutex> lock(
mutex);
318 for (
auto &arena:
arenas) {
319 if (arena.addressInArena(ptr)) {
324 throw std::runtime_error(
"LockedPool: invalid address not pointing to any arena");
329 std::lock_guard<std::mutex> lock(
mutex);
331 for (
const auto &arena:
arenas) {
352 size = std::min(size, limit);
355 void *addr =
allocator->AllocateLocked(size, &locked);
372 Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in)
402 std::unique_ptr<LockedPageAllocator>
allocator(
new Win32LockedPageAllocator());
size_t alignment
Minimum chunk alignment.
std::mutex mutex
Mutex protects access to this pool's data structures, including arenas.
void * AllocateLocked(size_t len, bool *lockingSuccess) override
Allocate and lock memory pages.
std::list< LockedPageArena > arenas
static const size_t ARENA_ALIGN
Chunk alignment.
std::unordered_map< void *, size_t > chunks_used
Map from begin of used chunk to its size.
virtual void * AllocateLocked(size_t len, bool *lockingSuccess)=0
Allocate and lock memory pages.
LockedPool(std::unique_ptr< LockedPageAllocator > allocator, LockingFailed_Callback lf_cb_in=nullptr)
Create a new LockedPool.
ChunkToSizeMap chunks_free
Map from begin of free chunk to its node in size_to_free_chunk.
LockingFailed_Callback lf_cb
SizeToChunkSortedMap size_to_free_chunk
Map to enable O(log(n)) best-fit allocation, as it's sorted by size.
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align)
OS-dependent allocation and deallocation of locked/pinned memory pages.
Singleton class to keep track of locked (ie, non-swappable) memory, for use in std::allocator templat...
void * alloc(size_t size)
Allocate size bytes from this arena.
void memory_cleanse(void *ptr, size_t len)
Secure overwrite a buffer (possibly containing secret data) with zero-bytes.
void FreeLocked(void *addr, size_t len) override
Unlock and free memory pages.
Stats stats() const
Get arena usage statistics.
static LockedPoolManager * _instance
void * alloc(size_t size)
Allocate size bytes from this arena.
virtual void FreeLocked(void *addr, size_t len)=0
Unlock and free memory pages.
static size_t align_up(size_t x, size_t align)
Align up to power of 2.
static const size_t ARENA_SIZE
Size of one arena of locked memory.
static bool LockingFailed()
Called when locking fails, warn the user here.
Pool for locked memory chunks.
size_t GetLimit() override
Get the total limit on the amount of memory that may be locked by this process, in bytes...
void free(void *ptr)
Free a previously allocated chunk of memory.
void free(void *ptr)
Free a previously allocated chunk of memory.
virtual size_t GetLimit()=0
Get the total limit on the amount of memory that may be locked by this process, in bytes...
LockedPageAllocator specialized for OSes that don't try to be special snowflakes. ...
PosixLockedPageAllocator()
void * base
Base address of arena.
bool new_arena(size_t size, size_t align)
LockedPoolManager(std::unique_ptr< LockedPageAllocator > allocator)
Stats stats() const
Get pool usage statistics.
static void CreateInstance()
Create a new LockedPoolManager specialized to the OS.
size_t cumulative_bytes_locked
Arena(void *base, size_t size, size_t alignment)
ChunkToSizeMap chunks_free_end
Map from end of free chunk to its node in size_to_free_chunk.
std::unique_ptr< LockedPageAllocator > allocator