allocators.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. // ECOin - Copyright (c) - 2014/2022 - GPLv3 - epsylon@riseup.net (https://03c8.net)
  2. #ifndef ECOIN_ALLOCATORS_H
  3. #define ECOIN_ALLOCATORS_H
  4. #include <string.h>
  5. #include <string>
  6. #include <boost/thread/mutex.hpp>
  7. #include <map>
  8. #ifdef WIN32
  9. #ifdef _WIN32_WINNT
  10. #undef _WIN32_WINNT
  11. #endif
  12. #define _WIN32_WINNT 0x0501
  13. #define WIN32_LEAN_AND_MEAN 1
  14. #ifndef NOMINMAX
  15. #define NOMINMAX
  16. #endif
  17. #include <windows.h>
  18. #else
  19. #include <sys/mman.h>
  20. #include <limits.h> // for PAGESIZE
  21. #include <unistd.h> // for sysconf
  22. #endif
  23. template <class Locker> class LockedPageManagerBase
  24. {
  25. public:
  26. LockedPageManagerBase(size_t page_size):
  27. page_size(page_size)
  28. {
  29. // Determine bitmask for extracting page from address
  30. assert(!(page_size & (page_size-1))); // size must be power of two
  31. page_mask = ~(page_size - 1);
  32. }
  33. // For all pages in affected range, increase lock count
  34. void LockRange(void *p, size_t size)
  35. {
  36. boost::mutex::scoped_lock lock(mutex);
  37. if(!size) return;
  38. const size_t base_addr = reinterpret_cast<size_t>(p);
  39. const size_t start_page = base_addr & page_mask;
  40. const size_t end_page = (base_addr + size - 1) & page_mask;
  41. for(size_t page = start_page; page <= end_page; page += page_size)
  42. {
  43. Histogram::iterator it = histogram.find(page);
  44. if(it == histogram.end()) // Newly locked page
  45. {
  46. locker.Lock(reinterpret_cast<void*>(page), page_size);
  47. histogram.insert(std::make_pair(page, 1));
  48. }
  49. else // Page was already locked; increase counter
  50. {
  51. it->second += 1;
  52. }
  53. }
  54. }
  55. // For all pages in affected range, decrease lock count
  56. void UnlockRange(void *p, size_t size)
  57. {
  58. boost::mutex::scoped_lock lock(mutex);
  59. if(!size) return;
  60. const size_t base_addr = reinterpret_cast<size_t>(p);
  61. const size_t start_page = base_addr & page_mask;
  62. const size_t end_page = (base_addr + size - 1) & page_mask;
  63. for(size_t page = start_page; page <= end_page; page += page_size)
  64. {
  65. Histogram::iterator it = histogram.find(page);
  66. assert(it != histogram.end()); // Cannot unlock an area that was not locked
  67. // Decrease counter for page, when it is zero, the page will be unlocked
  68. it->second -= 1;
  69. if(it->second == 0) // Nothing on the page anymore that keeps it locked
  70. {
  71. // Unlock page and remove the count from histogram
  72. locker.Unlock(reinterpret_cast<void*>(page), page_size);
  73. histogram.erase(it);
  74. }
  75. }
  76. }
  77. // Get number of locked pages for diagnostics
  78. int GetLockedPageCount()
  79. {
  80. boost::mutex::scoped_lock lock(mutex);
  81. return histogram.size();
  82. }
  83. private:
  84. Locker locker;
  85. boost::mutex mutex;
  86. size_t page_size, page_mask;
  87. // map of page base address to lock count
  88. typedef std::map<size_t,int> Histogram;
  89. Histogram histogram;
  90. };
  91. /** Determine system page size in bytes */
  92. static inline size_t GetSystemPageSize()
  93. {
  94. size_t page_size;
  95. #if defined(WIN32)
  96. SYSTEM_INFO sSysInfo;
  97. GetSystemInfo(&sSysInfo);
  98. page_size = sSysInfo.dwPageSize;
  99. #elif defined(PAGESIZE) // defined in limits.h
  100. page_size = PAGESIZE;
  101. #else // assume some POSIX OS
  102. page_size = sysconf(_SC_PAGESIZE);
  103. #endif
  104. return page_size;
  105. }
  106. class MemoryPageLocker
  107. {
  108. public:
  109. bool Lock(const void *addr, size_t len)
  110. {
  111. #ifdef WIN32
  112. return VirtualLock(const_cast<void*>(addr), len);
  113. #else
  114. return mlock(addr, len) == 0;
  115. #endif
  116. }
  117. bool Unlock(const void *addr, size_t len)
  118. {
  119. #ifdef WIN32
  120. return VirtualUnlock(const_cast<void*>(addr), len);
  121. #else
  122. return munlock(addr, len) == 0;
  123. #endif
  124. }
  125. };
  126. class LockedPageManager: public LockedPageManagerBase<MemoryPageLocker>
  127. {
  128. public:
  129. static LockedPageManager instance; // instantiated in util.cpp
  130. private:
  131. LockedPageManager():
  132. LockedPageManagerBase<MemoryPageLocker>(GetSystemPageSize())
  133. {}
  134. };
  135. template<typename T>
  136. struct secure_allocator : public std::allocator<T>
  137. {
  138. typedef std::allocator<T> base;
  139. typedef typename base::size_type size_type;
  140. typedef typename base::difference_type difference_type;
  141. typedef typename base::pointer pointer;
  142. typedef typename base::const_pointer const_pointer;
  143. typedef typename base::reference reference;
  144. typedef typename base::const_reference const_reference;
  145. typedef typename base::value_type value_type;
  146. secure_allocator() throw() {}
  147. secure_allocator(const secure_allocator& a) throw() : base(a) {}
  148. template <typename U>
  149. secure_allocator(const secure_allocator<U>& a) throw() : base(a) {}
  150. ~secure_allocator() throw() {}
  151. template<typename _Other> struct rebind
  152. { typedef secure_allocator<_Other> other; };
  153. T* allocate(std::size_t n, const void *hint = 0)
  154. {
  155. T *p;
  156. p = std::allocator<T>::allocate(n, hint);
  157. if (p != NULL)
  158. LockedPageManager::instance.LockRange(p, sizeof(T) * n);
  159. return p;
  160. }
  161. void deallocate(T* p, std::size_t n)
  162. {
  163. if (p != NULL)
  164. {
  165. memset(p, 0, sizeof(T) * n);
  166. LockedPageManager::instance.UnlockRange(p, sizeof(T) * n);
  167. }
  168. std::allocator<T>::deallocate(p, n);
  169. }
  170. };
  171. template<typename T>
  172. struct zero_after_free_allocator : public std::allocator<T>
  173. {
  174. typedef std::allocator<T> base;
  175. typedef typename base::size_type size_type;
  176. typedef typename base::difference_type difference_type;
  177. typedef typename base::pointer pointer;
  178. typedef typename base::const_pointer const_pointer;
  179. typedef typename base::reference reference;
  180. typedef typename base::const_reference const_reference;
  181. typedef typename base::value_type value_type;
  182. zero_after_free_allocator() throw() {}
  183. zero_after_free_allocator(const zero_after_free_allocator& a) throw() : base(a) {}
  184. template <typename U>
  185. zero_after_free_allocator(const zero_after_free_allocator<U>& a) throw() : base(a) {}
  186. ~zero_after_free_allocator() throw() {}
  187. template<typename _Other> struct rebind
  188. { typedef zero_after_free_allocator<_Other> other; };
  189. void deallocate(T* p, std::size_t n)
  190. {
  191. if (p != NULL)
  192. memset(p, 0, sizeof(T) * n);
  193. std::allocator<T>::deallocate(p, n);
  194. }
  195. };
  196. typedef std::basic_string<char, std::char_traits<char>, secure_allocator<char> > SecureString;
  197. #endif