Browse Source

Handle locked pages more robustly (Fixes issue #1462)

Memory locks do not stack, that is, pages which have been locked several times by calls to mlock()
will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when
those functions are used naively. In this commit a class "LockedPageManager" is added
that simulates stacking memory locks by keeping a counter per page.
pull/145/head
Wladimir J. van der Laan 12 years ago
parent
commit
e95568b78d
  1. 165
      src/allocators.h
  2. 115
      src/test/allocator_tests.cpp
  3. 2
      src/util.cpp

165
src/allocators.h

@ -7,6 +7,8 @@
#include <string.h>
#include <string>
#include <boost/thread/mutex.hpp>
#include <map>
#ifdef WIN32
#ifdef _WIN32_WINNT
@ -22,23 +24,156 @@
// Note that VirtualLock does not provide this as a guarantee on Windows,
// but, in practice, memory that has been VirtualLock'd almost never gets written to
// the pagefile except in rare circumstances where memory is extremely low.
#define mlock(p, n) VirtualLock((p), (n));
#define munlock(p, n) VirtualUnlock((p), (n));
#else
#include <sys/mman.h>
#include <limits.h>
/* This comes from limits.h if it's not defined there set a sane default */
#ifndef PAGESIZE
#include <unistd.h>
#define PAGESIZE sysconf(_SC_PAGESIZE)
#include <limits.h> // for PAGESIZE
#include <unistd.h> // for sysconf
#endif
#define mlock(a,b) \
mlock(((void *)(((size_t)(a)) & (~((PAGESIZE)-1)))),\
(((((size_t)(a)) + (b) - 1) | ((PAGESIZE) - 1)) + 1) - (((size_t)(a)) & (~((PAGESIZE) - 1))))
#define munlock(a,b) \
munlock(((void *)(((size_t)(a)) & (~((PAGESIZE)-1)))),\
(((((size_t)(a)) + (b) - 1) | ((PAGESIZE) - 1)) + 1) - (((size_t)(a)) & (~((PAGESIZE) - 1))))
/**
* Thread-safe class to keep track of locked (ie, non-swappable) memory pages.
*
* Memory locks do not stack, that is, pages which have been locked several times by calls to mlock()
* will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when
* those functions are used naively. This class simulates stacking memory locks by keeping a counter per page.
*
* @note By using a map from each page base address to lock count, this class is optimized for
* small objects that span up to a few pages, mostly smaller than a page. To support large allocations,
* something like an interval tree would be the preferred data structure.
*/
template <class Locker> class LockedPageManagerBase
{
public:
LockedPageManagerBase(size_t page_size):
page_size(page_size)
{
// Determine bitmask for extracting page from address
assert(!(page_size & (page_size-1))); // size must be power of two
page_mask = ~(page_size - 1);
}
// For all pages in affected range, increase lock count
void LockRange(void *p, size_t size)
{
boost::mutex::scoped_lock lock(mutex);
if(!size) return;
const size_t base_addr = reinterpret_cast<size_t>(p);
const size_t start_page = base_addr & page_mask;
const size_t end_page = (base_addr + size - 1) & page_mask;
for(size_t page = start_page; page <= end_page; page += page_size)
{
Histogram::iterator it = histogram.find(page);
if(it == histogram.end()) // Newly locked page
{
locker.Lock(reinterpret_cast<void*>(page), page_size);
histogram.insert(std::make_pair(page, 1));
}
else // Page was already locked; increase counter
{
it->second += 1;
}
}
}
// For all pages in affected range, decrease lock count
void UnlockRange(void *p, size_t size)
{
boost::mutex::scoped_lock lock(mutex);
if(!size) return;
const size_t base_addr = reinterpret_cast<size_t>(p);
const size_t start_page = base_addr & page_mask;
const size_t end_page = (base_addr + size - 1) & page_mask;
for(size_t page = start_page; page <= end_page; page += page_size)
{
Histogram::iterator it = histogram.find(page);
assert(it != histogram.end()); // Cannot unlock an area that was not locked
// Decrease counter for page, when it is zero, the page will be unlocked
it->second -= 1;
if(it->second == 0) // Nothing on the page anymore that keeps it locked
{
// Unlock page and remove the count from histogram
locker.Unlock(reinterpret_cast<void*>(page), page_size);
histogram.erase(it);
}
}
}
// Get number of locked pages for diagnostics
int GetLockedPageCount()
{
boost::mutex::scoped_lock lock(mutex);
return histogram.size();
}
private:
Locker locker;
boost::mutex mutex;
size_t page_size, page_mask;
// map of page base address to lock count
typedef std::map<size_t,int> Histogram;
Histogram histogram;
};
/** Determine system page size in bytes */
static inline size_t GetSystemPageSize()
{
size_t page_size;
#if defined(WIN32)
SYSTEM_INFO sSysInfo;
GetSystemInfo(&sSysInfo);
page_size = sSysInfo.dwPageSize;
#elif defined(PAGESIZE) // defined in limits.h
page_size = PAGESIZE;
#else // assume some POSIX OS
page_size = sysconf(_SC_PAGESIZE);
#endif
return page_size;
}
/**
* OS-dependent memory page locking/unlocking.
* Defined as policy class to make stubbing for test possible.
*/
class MemoryPageLocker
{
public:
/** Lock memory pages.
* addr and len must be a multiple of the system page size
*/
bool Lock(const void *addr, size_t len)
{
#ifdef WIN32
return VirtualLock(const_cast<void*>(addr), len);
#else
return mlock(addr, len) == 0;
#endif
}
/** Unlock memory pages.
* addr and len must be a multiple of the system page size
*/
bool Unlock(const void *addr, size_t len)
{
#ifdef WIN32
return VirtualUnlock(const_cast<void*>(addr), len);
#else
return munlock(addr, len) == 0;
#endif
}
};
/**
* Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in
* std::allocator templates.
*/
class LockedPageManager: public LockedPageManagerBase<MemoryPageLocker>
{
public:
static LockedPageManager instance; // instantiated in util.cpp
private:
LockedPageManager():
LockedPageManagerBase<MemoryPageLocker>(GetSystemPageSize())
{}
};
//
// Allocator that locks its contents from being paged
@ -69,7 +204,7 @@ struct secure_allocator : public std::allocator<T>
T *p;
p = std::allocator<T>::allocate(n, hint);
if (p != NULL)
mlock(p, sizeof(T) * n);
LockedPageManager::instance.LockRange(p, sizeof(T) * n);
return p;
}
@ -78,7 +213,7 @@ struct secure_allocator : public std::allocator<T>
if (p != NULL)
{
memset(p, 0, sizeof(T) * n);
munlock(p, sizeof(T) * n);
LockedPageManager::instance.UnlockRange(p, sizeof(T) * n);
}
std::allocator<T>::deallocate(p, n);
}

115
src/test/allocator_tests.cpp

@ -0,0 +1,115 @@
#include <boost/test/unit_test.hpp>
#include "init.h"
#include "main.h"
#include "util.h"
BOOST_AUTO_TEST_SUITE(allocator_tests)
// Dummy memory page locker for platform independent tests
static const void *last_lock_addr, *last_unlock_addr;
static size_t last_lock_len, last_unlock_len;
class TestLocker
{
public:
bool Lock(const void *addr, size_t len)
{
last_lock_addr = addr;
last_lock_len = len;
return true;
}
bool Unlock(const void *addr, size_t len)
{
last_unlock_addr = addr;
last_unlock_len = len;
return true;
}
};
BOOST_AUTO_TEST_CASE(test_LockedPageManagerBase)
{
const size_t test_page_size = 4096;
LockedPageManagerBase<TestLocker> lpm(test_page_size);
size_t addr;
last_lock_addr = last_unlock_addr = 0;
last_lock_len = last_unlock_len = 0;
/* Try large number of small objects */
addr = 0;
for(int i=0; i<1000; ++i)
{
lpm.LockRange(reinterpret_cast<void*>(addr), 33);
addr += 33;
}
/* Try small number of page-sized objects, straddling two pages */
addr = test_page_size*100 + 53;
for(int i=0; i<100; ++i)
{
lpm.LockRange(reinterpret_cast<void*>(addr), test_page_size);
addr += test_page_size;
}
/* Try small number of page-sized objects aligned to exactly one page */
addr = test_page_size*300;
for(int i=0; i<100; ++i)
{
lpm.LockRange(reinterpret_cast<void*>(addr), test_page_size);
addr += test_page_size;
}
/* one very large object, straddling pages */
lpm.LockRange(reinterpret_cast<void*>(test_page_size*600+1), test_page_size*500);
BOOST_CHECK(last_lock_addr == reinterpret_cast<void*>(test_page_size*(600+500)));
/* one very large object, page aligned */
lpm.LockRange(reinterpret_cast<void*>(test_page_size*1200), test_page_size*500-1);
BOOST_CHECK(last_lock_addr == reinterpret_cast<void*>(test_page_size*(1200+500-1)));
BOOST_CHECK(lpm.GetLockedPageCount() == (
(1000*33+test_page_size-1)/test_page_size + // small objects
101 + 100 + // page-sized objects
501 + 500)); // large objects
BOOST_CHECK((last_lock_len & (test_page_size-1)) == 0); // always lock entire pages
BOOST_CHECK(last_unlock_len == 0); // nothing unlocked yet
/* And unlock again */
addr = 0;
for(int i=0; i<1000; ++i)
{
lpm.UnlockRange(reinterpret_cast<void*>(addr), 33);
addr += 33;
}
addr = test_page_size*100 + 53;
for(int i=0; i<100; ++i)
{
lpm.UnlockRange(reinterpret_cast<void*>(addr), test_page_size);
addr += test_page_size;
}
addr = test_page_size*300;
for(int i=0; i<100; ++i)
{
lpm.UnlockRange(reinterpret_cast<void*>(addr), test_page_size);
addr += test_page_size;
}
lpm.UnlockRange(reinterpret_cast<void*>(test_page_size*600+1), test_page_size*500);
lpm.UnlockRange(reinterpret_cast<void*>(test_page_size*1200), test_page_size*500-1);
/* Check that everything is released */
BOOST_CHECK(lpm.GetLockedPageCount() == 0);
/* A few and unlocks of size zero (should have no effect) */
addr = 0;
for(int i=0; i<1000; ++i)
{
lpm.LockRange(reinterpret_cast<void*>(addr), 0);
addr += 1;
}
BOOST_CHECK(lpm.GetLockedPageCount() == 0);
addr = 0;
for(int i=0; i<1000; ++i)
{
lpm.UnlockRange(reinterpret_cast<void*>(addr), 0);
addr += 1;
}
BOOST_CHECK(lpm.GetLockedPageCount() == 0);
BOOST_CHECK((last_unlock_len & (test_page_size-1)) == 0); // always unlock entire pages
}
BOOST_AUTO_TEST_SUITE_END()

2
src/util.cpp

@ -86,6 +86,8 @@ void locking_callback(int mode, int i, const char* file, int line)
}
}
LockedPageManager LockedPageManager::instance;
// Init
class CInit
{

Loading…
Cancel
Save