rbtree_best_fit.hpp 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413
  1. //////////////////////////////////////////////////////////////////////////////
  2. //
  3. // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
  4. // Software License, Version 1.0. (See accompanying file
  5. // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. // See http://www.boost.org/libs/interprocess for documentation.
  8. //
  9. //////////////////////////////////////////////////////////////////////////////
  10. #ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
  11. #define BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
  12. #if (defined _MSC_VER) && (_MSC_VER >= 1200)
  13. # pragma once
  14. #endif
  15. #include <boost/interprocess/detail/config_begin.hpp>
  16. #include <boost/interprocess/detail/workaround.hpp>
  17. #include <boost/intrusive/pointer_traits.hpp>
  18. #include <boost/interprocess/interprocess_fwd.hpp>
  19. #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
  20. #include <boost/interprocess/containers/allocation_type.hpp>
  21. #include <boost/container/detail/multiallocation_chain.hpp>
  22. #include <boost/interprocess/offset_ptr.hpp>
  23. #include <boost/interprocess/exceptions.hpp>
  24. #include <boost/interprocess/detail/utilities.hpp>
  25. #include <boost/interprocess/detail/min_max.hpp>
  26. #include <boost/interprocess/detail/math_functions.hpp>
  27. #include <boost/interprocess/detail/type_traits.hpp>
  28. #include <boost/interprocess/sync/scoped_lock.hpp>
  29. #include <boost/type_traits/type_with_alignment.hpp>
  30. #include <boost/intrusive/pointer_traits.hpp>
  31. #include <boost/assert.hpp>
  32. #include <boost/static_assert.hpp>
  33. #include <boost/type_traits.hpp>
  34. #include <algorithm>
  35. #include <utility>
  36. #include <climits>
  37. #include <cstring>
  38. #include <iterator>
  39. #include <boost/assert.hpp>
  40. #include <new>
  41. #include <boost/intrusive/set.hpp>
  42. //#define BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
  43. //to maintain ABI compatible with the original version
  44. //ABI had to be updated to fix compatibility issues when
  45. //sharing shared memory between 32 adn 64 bit processes.
  46. //!\file
  47. //!Describes a best-fit algorithm based in an intrusive red-black tree used to allocate
  48. //!objects in shared memory. This class is intended as a base class for single segment
  49. //!and multi-segment implementations.
  50. namespace boost {
  51. namespace interprocess {
  52. //!This class implements an algorithm that stores the free nodes in a red-black tree
  53. //!to have logarithmic search/insert times.
  54. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  55. class rbtree_best_fit
  56. {
  57. /// @cond
  58. //Non-copyable
  59. rbtree_best_fit();
  60. rbtree_best_fit(const rbtree_best_fit &);
  61. rbtree_best_fit &operator=(const rbtree_best_fit &);
  62. private:
  63. struct block_ctrl;
  64. typedef typename boost::intrusive::
  65. pointer_traits<VoidPointer>::template
  66. rebind_pointer<block_ctrl>::type block_ctrl_ptr;
  67. typedef typename boost::intrusive::
  68. pointer_traits<VoidPointer>::template
  69. rebind_pointer<char>::type char_ptr;
  70. /// @endcond
  71. public:
  72. //!Shared mutex family used for the rest of the Interprocess framework
  73. typedef MutexFamily mutex_family;
  74. //!Pointer type to be used with the rest of the Interprocess framework
  75. typedef VoidPointer void_pointer;
  76. typedef ipcdetail::basic_multiallocation_chain<VoidPointer> multiallocation_chain;
  77. typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
  78. typedef typename boost::make_unsigned<difference_type>::type size_type;
  79. /// @cond
  80. private:
  81. typedef typename bi::make_set_base_hook
  82. < bi::void_pointer<VoidPointer>
  83. , bi::optimize_size<true>
  84. , bi::link_mode<bi::normal_link> >::type TreeHook;
  85. struct SizeHolder
  86. {
  87. //!This block's memory size (including block_ctrl
  88. //!header) in Alignment units
  89. size_type m_prev_size : sizeof(size_type)*CHAR_BIT;
  90. size_type m_size : sizeof(size_type)*CHAR_BIT - 2;
  91. size_type m_prev_allocated : 1;
  92. size_type m_allocated : 1;
  93. };
  94. //!Block control structure
  95. struct block_ctrl
  96. : public SizeHolder, public TreeHook
  97. {
  98. block_ctrl()
  99. { this->m_size = 0; this->m_allocated = 0, this->m_prev_allocated = 0; }
  100. friend bool operator<(const block_ctrl &a, const block_ctrl &b)
  101. { return a.m_size < b.m_size; }
  102. friend bool operator==(const block_ctrl &a, const block_ctrl &b)
  103. { return a.m_size == b.m_size; }
  104. };
  105. struct size_block_ctrl_compare
  106. {
  107. bool operator()(size_type size, const block_ctrl &block) const
  108. { return size < block.m_size; }
  109. bool operator()(const block_ctrl &block, size_type size) const
  110. { return block.m_size < size; }
  111. };
  112. //!Shared mutex to protect memory allocate/deallocate
  113. typedef typename MutexFamily::mutex_type mutex_type;
  114. typedef typename bi::make_multiset
  115. <block_ctrl, bi::base_hook<TreeHook> >::type Imultiset;
  116. typedef typename Imultiset::iterator imultiset_iterator;
  117. //!This struct includes needed data and derives from
  118. //!mutex_type to allow EBO when using null mutex_type
  119. struct header_t : public mutex_type
  120. {
  121. Imultiset m_imultiset;
  122. //!The extra size required by the segment
  123. size_type m_extra_hdr_bytes;
  124. //!Allocated bytes for internal checking
  125. size_type m_allocated;
  126. //!The size of the memory segment
  127. size_type m_size;
  128. } m_header;
  129. friend class ipcdetail::memory_algorithm_common<rbtree_best_fit>;
  130. typedef ipcdetail::memory_algorithm_common<rbtree_best_fit> algo_impl_t;
  131. public:
  132. /// @endcond
  133. //!Constructor. "size" is the total size of the managed memory segment,
  134. //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(rbtree_best_fit)
  135. //!offset that the allocator should not use at all.
  136. rbtree_best_fit (size_type size, size_type extra_hdr_bytes);
  137. //!Destructor.
  138. ~rbtree_best_fit();
  139. //!Obtains the minimum size needed by the algorithm
  140. static size_type get_min_size (size_type extra_hdr_bytes);
  141. //Functions for single segment management
  142. //!Allocates bytes, returns 0 if there is not more memory
  143. void* allocate (size_type nbytes);
  144. /// @cond
  145. //Experimental. Dont' use
  146. //!Multiple element allocation, same size
  147. void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
  148. {
  149. //-----------------------
  150. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  151. //-----------------------
  152. algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
  153. }
  154. //!Multiple element allocation, different size
  155. void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
  156. {
  157. //-----------------------
  158. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  159. //-----------------------
  160. algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
  161. }
  162. //!Multiple element allocation, different size
  163. void deallocate_many(multiallocation_chain &chain);
  164. /// @endcond
  165. //!Deallocates previously allocated bytes
  166. void deallocate (void *addr);
  167. //!Returns the size of the memory segment
  168. size_type get_size() const;
  169. //!Returns the number of free bytes of the segment
  170. size_type get_free_memory() const;
  171. //!Initializes to zero all the memory that's not in use.
  172. //!This function is normally used for security reasons.
  173. void zero_free_memory();
  174. //!Increases managed memory in
  175. //!extra_size bytes more
  176. void grow(size_type extra_size);
  177. //!Decreases managed memory as much as possible
  178. void shrink_to_fit();
  179. //!Returns true if all allocated memory has been deallocated
  180. bool all_memory_deallocated();
  181. //!Makes an internal sanity check
  182. //!and returns true if success
  183. bool check_sanity();
  184. template<class T>
  185. std::pair<T *, bool>
  186. allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  187. size_type preferred_size,size_type &received_size,
  188. T *reuse_ptr = 0);
  189. std::pair<void *, bool>
  190. raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_object,
  191. size_type preferred_object,size_type &received_object,
  192. void *reuse_ptr = 0, size_type sizeof_object = 1);
  193. //!Returns the size of the buffer previously allocated pointed by ptr
  194. size_type size(const void *ptr) const;
  195. //!Allocates aligned bytes, returns 0 if there is not more memory.
  196. //!Alignment must be power of 2
  197. void* allocate_aligned (size_type nbytes, size_type alignment);
  198. /// @cond
  199. private:
  200. static size_type priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes);
  201. block_ctrl *priv_first_block();
  202. block_ctrl *priv_end_block();
  203. std::pair<void*, bool>
  204. priv_allocation_command(boost::interprocess::allocation_type command, size_type limit_size,
  205. size_type preferred_size,size_type &received_size,
  206. void *reuse_ptr, size_type sizeof_object);
  207. //!Real allocation algorithm with min allocation option
  208. std::pair<void *, bool> priv_allocate(boost::interprocess::allocation_type command
  209. ,size_type limit_size
  210. ,size_type preferred_size
  211. ,size_type &received_size
  212. ,void *reuse_ptr = 0
  213. ,size_type backwards_multiple = 1);
  214. //!Obtains the block control structure of the user buffer
  215. static block_ctrl *priv_get_block(const void *ptr);
  216. //!Obtains the pointer returned to the user from the block control
  217. static void *priv_get_user_buffer(const block_ctrl *block);
  218. //!Returns the number of total units that a user buffer
  219. //!of "userbytes" bytes really occupies (including header)
  220. static size_type priv_get_total_units(size_type userbytes);
  221. //!Real expand function implementation
  222. bool priv_expand(void *ptr
  223. ,const size_type min_size, const size_type preferred_size
  224. ,size_type &received_size);
  225. //!Real expand to both sides implementation
  226. void* priv_expand_both_sides(boost::interprocess::allocation_type command
  227. ,size_type min_size
  228. ,size_type preferred_size
  229. ,size_type &received_size
  230. ,void *reuse_ptr
  231. ,bool only_preferred_backwards
  232. ,size_type backwards_multiple);
  233. //!Returns true if the previous block is allocated
  234. bool priv_is_prev_allocated(block_ctrl *ptr);
  235. //!Get a pointer of the "end" block from the first block of the segment
  236. static block_ctrl * priv_end_block(block_ctrl *first_segment_block);
  237. //!Get a pointer of the "first" block from the end block of the segment
  238. static block_ctrl * priv_first_block(block_ctrl *end_segment_block);
  239. //!Get poitner of the previous block (previous block must be free)
  240. static block_ctrl * priv_prev_block(block_ctrl *ptr);
  241. //!Get the size in the tail of the previous block
  242. static block_ctrl * priv_next_block(block_ctrl *ptr);
  243. //!Check if this block is free (not allocated)
  244. bool priv_is_allocated_block(block_ctrl *ptr);
  245. //!Marks the block as allocated
  246. void priv_mark_as_allocated_block(block_ctrl *ptr);
  247. //!Marks the block as allocated
  248. void priv_mark_new_allocated_block(block_ctrl *ptr)
  249. { return priv_mark_as_allocated_block(ptr); }
  250. //!Marks the block as allocated
  251. void priv_mark_as_free_block(block_ctrl *ptr);
  252. //!Checks if block has enough memory and splits/unlinks the block
  253. //!returning the address to the users
  254. void* priv_check_and_allocate(size_type units
  255. ,block_ctrl* block
  256. ,size_type &received_size);
  257. //!Real deallocation algorithm
  258. void priv_deallocate(void *addr);
  259. //!Makes a new memory portion available for allocation
  260. void priv_add_segment(void *addr, size_type size);
  261. public:
  262. static const size_type Alignment = !MemAlignment
  263. ? size_type(::boost::alignment_of< ::boost::detail::max_align>::value)
  264. : size_type(MemAlignment)
  265. ;
  266. private:
  267. //Due to embedded bits in size, Alignment must be at least 4
  268. BOOST_STATIC_ASSERT((Alignment >= 4));
  269. //Due to rbtree size optimizations, Alignment must have at least pointer alignment
  270. BOOST_STATIC_ASSERT((Alignment >= ::boost::alignment_of<void_pointer>::value));
  271. static const size_type AlignmentMask = (Alignment - 1);
  272. static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
  273. static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
  274. static const size_type AllocatedCtrlBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
  275. static const size_type AllocatedCtrlUnits = AllocatedCtrlBytes/Alignment;
  276. static const size_type EndCtrlBlockBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
  277. static const size_type EndCtrlBlockUnits = EndCtrlBlockBytes/Alignment;
  278. static const size_type MinBlockUnits = BlockCtrlUnits;
  279. static const size_type UsableByPreviousChunk = sizeof(size_type);
  280. //Make sure the maximum alignment is power of two
  281. BOOST_STATIC_ASSERT((0 == (Alignment & (Alignment - size_type(1u)))));
  282. /// @endcond
  283. public:
  284. static const size_type PayloadPerAllocation = AllocatedCtrlBytes - UsableByPreviousChunk;
  285. };
  286. /// @cond
  287. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  288. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  289. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
  290. ::priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes)
  291. {
  292. size_type uint_this = (std::size_t)this_ptr;
  293. size_type main_hdr_end = uint_this + sizeof(rbtree_best_fit) + extra_hdr_bytes;
  294. size_type aligned_main_hdr_end = ipcdetail::get_rounded_size(main_hdr_end, Alignment);
  295. size_type block1_off = aligned_main_hdr_end - uint_this;
  296. algo_impl_t::assert_alignment(aligned_main_hdr_end);
  297. algo_impl_t::assert_alignment(uint_this + block1_off);
  298. return block1_off;
  299. }
  300. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  301. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  302. priv_add_segment(void *addr, size_type segment_size)
  303. {
  304. //Check alignment
  305. algo_impl_t::check_alignment(addr);
  306. //Check size
  307. BOOST_ASSERT(segment_size >= (BlockCtrlBytes + EndCtrlBlockBytes));
  308. //Initialize the first big block and the "end" node
  309. block_ctrl *first_big_block = new(addr)block_ctrl;
  310. first_big_block->m_size = segment_size/Alignment - EndCtrlBlockUnits;
  311. BOOST_ASSERT(first_big_block->m_size >= BlockCtrlUnits);
  312. //The "end" node is just a node of size 0 with the "end" bit set
  313. block_ctrl *end_block = static_cast<block_ctrl*>
  314. (new (reinterpret_cast<char*>(addr) + first_big_block->m_size*Alignment)SizeHolder);
  315. //This will overwrite the prev part of the "end" node
  316. priv_mark_as_free_block (first_big_block);
  317. #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
  318. first_big_block->m_prev_size = end_block->m_size =
  319. (reinterpret_cast<char*>(first_big_block) - reinterpret_cast<char*>(end_block))/Alignment;
  320. #else
  321. first_big_block->m_prev_size = end_block->m_size =
  322. (reinterpret_cast<char*>(end_block) - reinterpret_cast<char*>(first_big_block))/Alignment;
  323. #endif
  324. end_block->m_allocated = 1;
  325. first_big_block->m_prev_allocated = 1;
  326. BOOST_ASSERT(priv_next_block(first_big_block) == end_block);
  327. BOOST_ASSERT(priv_prev_block(end_block) == first_big_block);
  328. BOOST_ASSERT(priv_first_block() == first_big_block);
  329. BOOST_ASSERT(priv_end_block() == end_block);
  330. //Some check to validate the algorithm, since it makes some assumptions
  331. //to optimize the space wasted in bookkeeping:
  332. //Check that the sizes of the header are placed before the rbtree
  333. BOOST_ASSERT(static_cast<void*>(static_cast<SizeHolder*>(first_big_block))
  334. < static_cast<void*>(static_cast<TreeHook*>(first_big_block)));
  335. //Insert it in the intrusive containers
  336. m_header.m_imultiset.insert(*first_big_block);
  337. }
  338. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  339. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  340. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
  341. ::priv_first_block()
  342. {
  343. size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  344. return reinterpret_cast<block_ctrl *>(reinterpret_cast<char*>(this) + block1_off);
  345. }
  346. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  347. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  348. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
  349. ::priv_end_block()
  350. {
  351. size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  352. const size_type original_first_block_size = m_header.m_size/Alignment*Alignment - block1_off/Alignment*Alignment - EndCtrlBlockBytes;
  353. block_ctrl *end_block = reinterpret_cast<block_ctrl*>
  354. (reinterpret_cast<char*>(this) + block1_off + original_first_block_size);
  355. return end_block;
  356. }
  357. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  358. inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  359. rbtree_best_fit(size_type segment_size, size_type extra_hdr_bytes)
  360. {
  361. //Initialize the header
  362. m_header.m_allocated = 0;
  363. m_header.m_size = segment_size;
  364. m_header.m_extra_hdr_bytes = extra_hdr_bytes;
  365. //Now write calculate the offset of the first big block that will
  366. //cover the whole segment
  367. BOOST_ASSERT(get_min_size(extra_hdr_bytes) <= segment_size);
  368. size_type block1_off = priv_first_block_offset_from_this(this, extra_hdr_bytes);
  369. priv_add_segment(reinterpret_cast<char*>(this) + block1_off, segment_size - block1_off);
  370. }
  371. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  372. inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::~rbtree_best_fit()
  373. {
  374. //There is a memory leak!
  375. // BOOST_ASSERT(m_header.m_allocated == 0);
  376. // BOOST_ASSERT(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
  377. }
  378. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  379. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::grow(size_type extra_size)
  380. {
  381. //Get the address of the first block
  382. block_ctrl *first_block = priv_first_block();
  383. block_ctrl *old_end_block = priv_end_block();
  384. size_type old_border_offset = (size_type)(reinterpret_cast<char*>(old_end_block) -
  385. reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
  386. //Update managed buffer's size
  387. m_header.m_size += extra_size;
  388. //We need at least MinBlockUnits blocks to create a new block
  389. if((m_header.m_size - old_border_offset) < MinBlockUnits){
  390. return;
  391. }
  392. //Now create a new block between the old end and the new end
  393. size_type align_offset = (m_header.m_size - old_border_offset)/Alignment;
  394. block_ctrl *new_end_block = reinterpret_cast<block_ctrl*>
  395. (reinterpret_cast<char*>(old_end_block) + align_offset*Alignment);
  396. //the last and first block are special:
  397. //new_end_block->m_size & first_block->m_prev_size store the absolute value
  398. //between them
  399. new_end_block->m_allocated = 1;
  400. #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
  401. new_end_block->m_size = (reinterpret_cast<char*>(first_block) -
  402. reinterpret_cast<char*>(new_end_block))/Alignment;
  403. #else
  404. new_end_block->m_size = (reinterpret_cast<char*>(new_end_block) -
  405. reinterpret_cast<char*>(first_block))/Alignment;
  406. #endif
  407. first_block->m_prev_size = new_end_block->m_size;
  408. first_block->m_prev_allocated = 1;
  409. BOOST_ASSERT(new_end_block == priv_end_block());
  410. //The old end block is the new block
  411. block_ctrl *new_block = old_end_block;
  412. new_block->m_size = (reinterpret_cast<char*>(new_end_block) -
  413. reinterpret_cast<char*>(new_block))/Alignment;
  414. BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
  415. priv_mark_as_allocated_block(new_block);
  416. BOOST_ASSERT(priv_next_block(new_block) == new_end_block);
  417. m_header.m_allocated += (size_type)new_block->m_size*Alignment;
  418. //Now deallocate the newly created block
  419. this->priv_deallocate(priv_get_user_buffer(new_block));
  420. }
  421. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  422. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::shrink_to_fit()
  423. {
  424. //Get the address of the first block
  425. block_ctrl *first_block = priv_first_block();
  426. algo_impl_t::assert_alignment(first_block);
  427. //block_ctrl *old_end_block = priv_end_block(first_block);
  428. block_ctrl *old_end_block = priv_end_block();
  429. algo_impl_t::assert_alignment(old_end_block);
  430. size_type old_end_block_size = old_end_block->m_size;
  431. void *unique_buffer = 0;
  432. block_ctrl *last_block;
  433. //Check if no memory is allocated between the first and last block
  434. if(priv_next_block(first_block) == old_end_block){
  435. //If so check if we can allocate memory
  436. size_type ignore;
  437. unique_buffer = priv_allocate(boost::interprocess::allocate_new, 0, 0, ignore).first;
  438. //If not, return, we can't shrink
  439. if(!unique_buffer)
  440. return;
  441. //If we can, mark the position just after the new allocation as the new end
  442. algo_impl_t::assert_alignment(unique_buffer);
  443. block_ctrl *unique_block = priv_get_block(unique_buffer);
  444. BOOST_ASSERT(priv_is_allocated_block(unique_block));
  445. algo_impl_t::assert_alignment(unique_block);
  446. last_block = priv_next_block(unique_block);
  447. BOOST_ASSERT(!priv_is_allocated_block(last_block));
  448. algo_impl_t::assert_alignment(last_block);
  449. }
  450. else{
  451. //If memory is allocated, check if the last block is allocated
  452. if(priv_is_prev_allocated(old_end_block))
  453. return;
  454. //If not, mark last block after the free block
  455. last_block = priv_prev_block(old_end_block);
  456. }
  457. size_type last_block_size = last_block->m_size;
  458. //Erase block from the free tree, since we will erase it
  459. m_header.m_imultiset.erase(Imultiset::s_iterator_to(*last_block));
  460. size_type shrunk_border_offset = (size_type)(reinterpret_cast<char*>(last_block) -
  461. reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
  462. block_ctrl *new_end_block = last_block;
  463. algo_impl_t::assert_alignment(new_end_block);
  464. //Write new end block attributes
  465. #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
  466. new_end_block->m_size = first_block->m_prev_size =
  467. (reinterpret_cast<char*>(first_block) - reinterpret_cast<char*>(new_end_block))/Alignment;
  468. #else
  469. new_end_block->m_size = first_block->m_prev_size =
  470. (reinterpret_cast<char*>(new_end_block) - reinterpret_cast<char*>(first_block))/Alignment;
  471. #endif
  472. new_end_block->m_allocated = 1;
  473. (void)last_block_size;
  474. (void)old_end_block_size;
  475. BOOST_ASSERT(new_end_block->m_size == (old_end_block_size - last_block_size));
  476. //Update managed buffer's size
  477. m_header.m_size = shrunk_border_offset;
  478. BOOST_ASSERT(priv_end_block() == new_end_block);
  479. if(unique_buffer)
  480. priv_deallocate(unique_buffer);
  481. }
  482. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  483. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  484. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_size() const
  485. { return m_header.m_size; }
  486. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  487. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  488. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_free_memory() const
  489. {
  490. return m_header.m_size - m_header.m_allocated -
  491. priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  492. }
  493. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  494. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  495. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  496. get_min_size (size_type extra_hdr_bytes)
  497. {
  498. return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit)) +
  499. algo_impl_t::ceil_units(extra_hdr_bytes) +
  500. MinBlockUnits + EndCtrlBlockUnits)*Alignment;
  501. }
  502. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  503. inline bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  504. all_memory_deallocated()
  505. {
  506. //-----------------------
  507. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  508. //-----------------------
  509. size_type block1_off =
  510. priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  511. return m_header.m_allocated == 0 &&
  512. m_header.m_imultiset.begin() != m_header.m_imultiset.end() &&
  513. (++m_header.m_imultiset.begin()) == m_header.m_imultiset.end()
  514. && m_header.m_imultiset.begin()->m_size ==
  515. (m_header.m_size - block1_off - EndCtrlBlockBytes)/Alignment;
  516. }
  517. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  518. bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  519. check_sanity()
  520. {
  521. //-----------------------
  522. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  523. //-----------------------
  524. imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
  525. size_type free_memory = 0;
  526. //Iterate through all blocks obtaining their size
  527. for(; ib != ie; ++ib){
  528. free_memory += (size_type)ib->m_size*Alignment;
  529. algo_impl_t::assert_alignment(&*ib);
  530. if(!algo_impl_t::check_alignment(&*ib))
  531. return false;
  532. }
  533. //Check allocated bytes are less than size
  534. if(m_header.m_allocated > m_header.m_size){
  535. return false;
  536. }
  537. size_type block1_off =
  538. priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  539. //Check free bytes are less than size
  540. if(free_memory > (m_header.m_size - block1_off)){
  541. return false;
  542. }
  543. return true;
  544. }
  545. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  546. inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  547. allocate(size_type nbytes)
  548. {
  549. //-----------------------
  550. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  551. //-----------------------
  552. size_type ignore;
  553. void * ret = priv_allocate(boost::interprocess::allocate_new, nbytes, nbytes, ignore).first;
  554. return ret;
  555. }
  556. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  557. inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  558. allocate_aligned(size_type nbytes, size_type alignment)
  559. {
  560. //-----------------------
  561. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  562. //-----------------------
  563. return algo_impl_t::allocate_aligned(this, nbytes, alignment);
  564. }
  565. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  566. template<class T>
  567. inline std::pair<T*, bool> rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  568. allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  569. size_type preferred_size,size_type &received_size,
  570. T *reuse_ptr)
  571. {
  572. std::pair<void*, bool> ret = priv_allocation_command
  573. (command, limit_size, preferred_size, received_size, static_cast<void*>(reuse_ptr), sizeof(T));
  574. BOOST_ASSERT(0 == ((std::size_t)ret.first % ::boost::alignment_of<T>::value));
  575. return std::pair<T *, bool>(static_cast<T*>(ret.first), ret.second);
  576. }
  577. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  578. inline std::pair<void*, bool> rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  579. raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects,
  580. size_type preferred_objects,size_type &received_objects,
  581. void *reuse_ptr, size_type sizeof_object)
  582. {
  583. if(!sizeof_object)
  584. return std::pair<void *, bool>(static_cast<void*>(0), false);
  585. if(command & boost::interprocess::try_shrink_in_place){
  586. bool success = algo_impl_t::try_shrink
  587. ( this, reuse_ptr, limit_objects*sizeof_object
  588. , preferred_objects*sizeof_object, received_objects);
  589. received_objects /= sizeof_object;
  590. return std::pair<void *, bool> ((success ? reuse_ptr : 0), true);
  591. }
  592. return priv_allocation_command
  593. (command, limit_objects, preferred_objects, received_objects, reuse_ptr, sizeof_object);
  594. }
  595. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  596. inline std::pair<void*, bool> rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  597. priv_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  598. size_type preferred_size,size_type &received_size,
  599. void *reuse_ptr, size_type sizeof_object)
  600. {
  601. std::pair<void*, bool> ret;
  602. size_type max_count = m_header.m_size/sizeof_object;
  603. if(limit_size > max_count || preferred_size > max_count){
  604. ret.first = 0; return ret;
  605. }
  606. size_type l_size = limit_size*sizeof_object;
  607. size_type p_size = preferred_size*sizeof_object;
  608. size_type r_size;
  609. {
  610. //-----------------------
  611. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  612. //-----------------------
  613. ret = priv_allocate(command, l_size, p_size, r_size, reuse_ptr, sizeof_object);
  614. }
  615. received_size = r_size/sizeof_object;
  616. return ret;
  617. }
  618. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  619. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  620. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  621. size(const void *ptr) const
  622. {
  623. //We need no synchronization since this block's size is not going
  624. //to be modified by anyone else
  625. //Obtain the real size of the block
  626. return ((size_type)priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  627. }
  628. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  629. inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::zero_free_memory()
  630. {
  631. //-----------------------
  632. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  633. //-----------------------
  634. imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
  635. //Iterate through all blocks obtaining their size
  636. while(ib != ie){
  637. //Just clear user the memory part reserved for the user
  638. volatile char *ptr = reinterpret_cast<char*>(&*ib) + BlockCtrlBytes;
  639. size_type s = (size_type)ib->m_size*Alignment - BlockCtrlBytes;
  640. while(s--){
  641. *ptr++ = 0;
  642. }
  643. //This surprisingly is optimized out by Visual C++ 7.1 in release mode!
  644. //std::memset( reinterpret_cast<char*>(&*ib) + BlockCtrlBytes
  645. // , 0
  646. // , ib->m_size*Alignment - BlockCtrlBytes);
  647. ++ib;
  648. }
  649. }
  650. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  651. void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  652. priv_expand_both_sides(boost::interprocess::allocation_type command
  653. ,size_type min_size
  654. ,size_type preferred_size
  655. ,size_type &received_size
  656. ,void *reuse_ptr
  657. ,bool only_preferred_backwards
  658. ,size_type backwards_multiple)
  659. {
  660. algo_impl_t::assert_alignment(reuse_ptr);
  661. if(command & boost::interprocess::expand_fwd){
  662. if(priv_expand(reuse_ptr, min_size, preferred_size, received_size))
  663. return reuse_ptr;
  664. }
  665. else{
  666. received_size = this->size(reuse_ptr);
  667. if(received_size >= preferred_size || received_size >= min_size)
  668. return reuse_ptr;
  669. }
  670. if(backwards_multiple){
  671. BOOST_ASSERT(0 == (min_size % backwards_multiple));
  672. BOOST_ASSERT(0 == (preferred_size % backwards_multiple));
  673. }
  674. if(command & boost::interprocess::expand_bwd){
  675. //Obtain the real size of the block
  676. block_ctrl *reuse = priv_get_block(reuse_ptr);
  677. //Sanity check
  678. //BOOST_ASSERT(reuse->m_size == priv_tail_size(reuse));
  679. algo_impl_t::assert_alignment(reuse);
  680. block_ctrl *prev_block;
  681. //If the previous block is not free, there is nothing to do
  682. if(priv_is_prev_allocated(reuse)){
  683. return 0;
  684. }
  685. prev_block = priv_prev_block(reuse);
  686. BOOST_ASSERT(!priv_is_allocated_block(prev_block));
  687. //Some sanity checks
  688. BOOST_ASSERT(prev_block->m_size == reuse->m_prev_size);
  689. algo_impl_t::assert_alignment(prev_block);
  690. size_type needs_backwards_aligned;
  691. size_type lcm;
  692. if(!algo_impl_t::calculate_lcm_and_needs_backwards_lcmed
  693. ( backwards_multiple
  694. , received_size
  695. , only_preferred_backwards ? preferred_size : min_size
  696. , lcm, needs_backwards_aligned)){
  697. return 0;
  698. }
  699. //Check if previous block has enough size
  700. if(size_type(prev_block->m_size*Alignment) >= needs_backwards_aligned){
  701. //Now take all next space. This will succeed
  702. if(command & boost::interprocess::expand_fwd){
  703. size_type received_size2;
  704. if(!priv_expand(reuse_ptr, received_size, received_size, received_size2)){
  705. BOOST_ASSERT(0);
  706. }
  707. BOOST_ASSERT(received_size == received_size2);
  708. }
  709. //We need a minimum size to split the previous one
  710. if(prev_block->m_size >= (needs_backwards_aligned/Alignment + BlockCtrlUnits)){
  711. block_ctrl *new_block = reinterpret_cast<block_ctrl *>
  712. (reinterpret_cast<char*>(reuse) - needs_backwards_aligned);
  713. //Free old previous buffer
  714. new_block->m_size =
  715. AllocatedCtrlUnits + (needs_backwards_aligned + (received_size - UsableByPreviousChunk))/Alignment;
  716. BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
  717. priv_mark_as_allocated_block(new_block);
  718. prev_block->m_size = (reinterpret_cast<char*>(new_block) -
  719. reinterpret_cast<char*>(prev_block))/Alignment;
  720. BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
  721. priv_mark_as_free_block(prev_block);
  722. //Update the old previous block in the free blocks tree
  723. //If the new size fulfills tree invariants do nothing,
  724. //otherwise erase() + insert()
  725. {
  726. imultiset_iterator prev_block_it(Imultiset::s_iterator_to(*prev_block));
  727. imultiset_iterator was_smaller_it(prev_block_it);
  728. if(prev_block_it != m_header.m_imultiset.begin() &&
  729. (--(was_smaller_it = prev_block_it))->m_size > prev_block->m_size){
  730. m_header.m_imultiset.erase(prev_block_it);
  731. m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *prev_block);
  732. }
  733. }
  734. received_size = needs_backwards_aligned + received_size;
  735. m_header.m_allocated += needs_backwards_aligned;
  736. //Check alignment
  737. algo_impl_t::assert_alignment(new_block);
  738. //If the backwards expansion has remaining bytes in the
  739. //first bytes, fill them with a pattern
  740. void *p = priv_get_user_buffer(new_block);
  741. void *user_ptr = reinterpret_cast<char*>(p);
  742. BOOST_ASSERT((static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
  743. algo_impl_t::assert_alignment(user_ptr);
  744. return user_ptr;
  745. }
  746. //Check if there is no place to create a new block and
  747. //the whole new block is multiple of the backwards expansion multiple
  748. else if(prev_block->m_size >= needs_backwards_aligned/Alignment &&
  749. 0 == ((prev_block->m_size*Alignment) % lcm)) {
  750. //Erase old previous block, since we will change it
  751. m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block));
  752. //Just merge the whole previous block
  753. //prev_block->m_size*Alignment is multiple of lcm (and backwards_multiple)
  754. received_size = received_size + (size_type)prev_block->m_size*Alignment;
  755. m_header.m_allocated += (size_type)prev_block->m_size*Alignment;
  756. //Now update sizes
  757. prev_block->m_size = prev_block->m_size + reuse->m_size;
  758. BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
  759. priv_mark_as_allocated_block(prev_block);
  760. //If the backwards expansion has remaining bytes in the
  761. //first bytes, fill them with a pattern
  762. void *user_ptr = priv_get_user_buffer(prev_block);
  763. BOOST_ASSERT((static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
  764. algo_impl_t::assert_alignment(user_ptr);
  765. return user_ptr;
  766. }
  767. else{
  768. //Alignment issues
  769. }
  770. }
  771. }
  772. return 0;
  773. }
  774. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  775. inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  776. deallocate_many(typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_chain &chain)
  777. {
  778. //-----------------------
  779. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  780. //-----------------------
  781. algo_impl_t::deallocate_many(this, chain);
  782. }
  783. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  784. std::pair<void *, bool> rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  785. priv_allocate(boost::interprocess::allocation_type command
  786. ,size_type limit_size
  787. ,size_type preferred_size
  788. ,size_type &received_size
  789. ,void *reuse_ptr
  790. ,size_type backwards_multiple)
  791. {
  792. //Remove me. Forbid backwards allocation
  793. //command &= (~boost::interprocess::expand_bwd);
  794. if(command & boost::interprocess::shrink_in_place){
  795. bool success =
  796. algo_impl_t::shrink(this, reuse_ptr, limit_size, preferred_size, received_size);
  797. return std::pair<void *, bool> ((success ? reuse_ptr : 0), true);
  798. }
  799. typedef std::pair<void *, bool> return_type;
  800. received_size = 0;
  801. if(limit_size > preferred_size)
  802. return return_type(static_cast<void*>(0), false);
  803. //Number of units to request (including block_ctrl header)
  804. size_type preferred_units = priv_get_total_units(preferred_size);
  805. //Number of units to request (including block_ctrl header)
  806. size_type limit_units = priv_get_total_units(limit_size);
  807. //Expand in place
  808. if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
  809. void *ret = priv_expand_both_sides
  810. (command, limit_size, preferred_size, received_size, reuse_ptr, true, backwards_multiple);
  811. if(ret)
  812. return return_type(ret, true);
  813. }
  814. if(command & boost::interprocess::allocate_new){
  815. size_block_ctrl_compare comp;
  816. imultiset_iterator it(m_header.m_imultiset.lower_bound(preferred_units, comp));
  817. if(it != m_header.m_imultiset.end()){
  818. return return_type(this->priv_check_and_allocate
  819. (preferred_units, ipcdetail::to_raw_pointer(&*it), received_size), false);
  820. }
  821. if(it != m_header.m_imultiset.begin()&&
  822. (--it)->m_size >= limit_units){
  823. return return_type(this->priv_check_and_allocate
  824. (it->m_size, ipcdetail::to_raw_pointer(&*it), received_size), false);
  825. }
  826. }
  827. //Now try to expand both sides with min size
  828. if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
  829. return return_type(priv_expand_both_sides
  830. (command, limit_size, preferred_size, received_size, reuse_ptr, false, backwards_multiple), true);
  831. }
  832. return return_type(static_cast<void*>(0), false);
  833. }
  834. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  835. inline
  836. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  837. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_get_block(const void *ptr)
  838. {
  839. return const_cast<block_ctrl*>
  840. (reinterpret_cast<const block_ctrl*>
  841. (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
  842. }
  843. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  844. inline
  845. void *rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  846. priv_get_user_buffer(const typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  847. { return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes); }
  848. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  849. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  850. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  851. priv_get_total_units(size_type userbytes)
  852. {
  853. if(userbytes < UsableByPreviousChunk)
  854. userbytes = UsableByPreviousChunk;
  855. size_type units = ipcdetail::get_rounded_size(userbytes - UsableByPreviousChunk, Alignment)/Alignment + AllocatedCtrlUnits;
  856. if(units < BlockCtrlUnits) units = BlockCtrlUnits;
  857. return units;
  858. }
  859. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  860. bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  861. priv_expand (void *ptr
  862. ,const size_type min_size
  863. ,const size_type preferred_size
  864. ,size_type &received_size)
  865. {
  866. //Obtain the real size of the block
  867. block_ctrl *block = priv_get_block(ptr);
  868. size_type old_block_units = block->m_size;
  869. //The block must be marked as allocated and the sizes must be equal
  870. BOOST_ASSERT(priv_is_allocated_block(block));
  871. //BOOST_ASSERT(old_block_units == priv_tail_size(block));
  872. //Put this to a safe value
  873. received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  874. if(received_size >= preferred_size || received_size >= min_size)
  875. return true;
  876. //Now translate it to Alignment units
  877. const size_type min_user_units = algo_impl_t::ceil_units(min_size - UsableByPreviousChunk);
  878. const size_type preferred_user_units = algo_impl_t::ceil_units(preferred_size - UsableByPreviousChunk);
  879. //Some parameter checks
  880. BOOST_ASSERT(min_user_units <= preferred_user_units);
  881. block_ctrl *next_block;
  882. if(priv_is_allocated_block(next_block = priv_next_block(block))){
  883. return received_size >= min_size ? true : false;
  884. }
  885. algo_impl_t::assert_alignment(next_block);
  886. //Is "block" + "next_block" big enough?
  887. const size_type merged_units = old_block_units + (size_type)next_block->m_size;
  888. //Now get the expansion size
  889. const size_type merged_user_units = merged_units - AllocatedCtrlUnits;
  890. if(merged_user_units < min_user_units){
  891. received_size = merged_units*Alignment - UsableByPreviousChunk;
  892. return false;
  893. }
  894. //Now get the maximum size the user can allocate
  895. size_type intended_user_units = (merged_user_units < preferred_user_units) ?
  896. merged_user_units : preferred_user_units;
  897. //These are total units of the merged block (supposing the next block can be split)
  898. const size_type intended_units = AllocatedCtrlUnits + intended_user_units;
  899. //Check if we can split the next one in two parts
  900. if((merged_units - intended_units) >= BlockCtrlUnits){
  901. //This block is bigger than needed, split it in
  902. //two blocks, the first one will be merged and
  903. //the second's size will be the remaining space
  904. BOOST_ASSERT(next_block->m_size == priv_next_block(next_block)->m_prev_size);
  905. const size_type rem_units = merged_units - intended_units;
  906. //Check if we we need to update the old next block in the free blocks tree
  907. //If the new size fulfills tree invariants, we just need to replace the node
  908. //(the block start has been displaced), otherwise erase() + insert().
  909. //
  910. //This fixup must be done in two parts, because the new next block might
  911. //overwrite the tree hook of the old next block. So we first erase the
  912. //old if needed and we'll insert the new one after creating the new next
  913. imultiset_iterator old_next_block_it(Imultiset::s_iterator_to(*next_block));
  914. const bool size_invariants_broken =
  915. (next_block->m_size - rem_units ) < BlockCtrlUnits ||
  916. (old_next_block_it != m_header.m_imultiset.begin() &&
  917. (--imultiset_iterator(old_next_block_it))->m_size > rem_units);
  918. if(size_invariants_broken){
  919. m_header.m_imultiset.erase(old_next_block_it);
  920. }
  921. //This is the remaining block
  922. block_ctrl *rem_block = new(reinterpret_cast<block_ctrl*>
  923. (reinterpret_cast<char*>(block) + intended_units*Alignment))block_ctrl;
  924. rem_block->m_size = rem_units;
  925. algo_impl_t::assert_alignment(rem_block);
  926. BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
  927. priv_mark_as_free_block(rem_block);
  928. //Now the second part of the fixup
  929. if(size_invariants_broken)
  930. m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block);
  931. else
  932. m_header.m_imultiset.replace_node(old_next_block_it, *rem_block);
  933. //Write the new length
  934. block->m_size = intended_user_units + AllocatedCtrlUnits;
  935. BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
  936. m_header.m_allocated += (intended_units - old_block_units)*Alignment;
  937. }
  938. //There is no free space to create a new node: just merge both blocks
  939. else{
  940. //Now we have to update the data in the tree
  941. m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block));
  942. //Write the new length
  943. block->m_size = merged_units;
  944. BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
  945. m_header.m_allocated += (merged_units - old_block_units)*Alignment;
  946. }
  947. priv_mark_as_allocated_block(block);
  948. received_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  949. return true;
  950. }
  951. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  952. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  953. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_prev_block
  954. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
  955. {
  956. BOOST_ASSERT(!ptr->m_prev_allocated);
  957. return reinterpret_cast<block_ctrl *>
  958. (reinterpret_cast<char*>(ptr) - ptr->m_prev_size*Alignment);
  959. }
  960. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  961. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  962. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_end_block
  963. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *first_segment_block)
  964. {
  965. //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
  966. //distance with the end block
  967. BOOST_ASSERT(first_segment_block->m_prev_allocated);
  968. block_ctrl *end_block = reinterpret_cast<block_ctrl *>
  969. (reinterpret_cast<char*>(first_segment_block) + first_segment_block->m_prev_size*Alignment);
  970. (void)end_block;
  971. BOOST_ASSERT(end_block->m_allocated == 1);
  972. BOOST_ASSERT(end_block->m_size == first_segment_block->m_prev_size);
  973. BOOST_ASSERT(end_block > first_segment_block);
  974. return end_block;
  975. }
  976. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  977. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  978. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_first_block
  979. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *end_segment_block)
  980. {
  981. //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
  982. //distance with the end block
  983. BOOST_ASSERT(end_segment_block->m_allocated);
  984. block_ctrl *first_block = reinterpret_cast<block_ctrl *>
  985. (reinterpret_cast<char*>(end_segment_block) - end_segment_block->m_size*Alignment);
  986. (void)first_block;
  987. BOOST_ASSERT(first_block->m_prev_allocated == 1);
  988. BOOST_ASSERT(first_block->m_prev_size == end_segment_block->m_size);
  989. BOOST_ASSERT(end_segment_block > first_block);
  990. return first_block;
  991. }
  992. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  993. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  994. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_next_block
  995. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
  996. {
  997. return reinterpret_cast<block_ctrl *>
  998. (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
  999. }
  1000. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1001. bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_allocated_block
  1002. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  1003. {
  1004. bool allocated = block->m_allocated != 0;
  1005. #ifndef NDEBUG
  1006. if(block != priv_end_block()){
  1007. block_ctrl *next_block = reinterpret_cast<block_ctrl *>
  1008. (reinterpret_cast<char*>(block) + block->m_size*Alignment);
  1009. bool next_block_prev_allocated = next_block->m_prev_allocated != 0;
  1010. (void)next_block_prev_allocated;
  1011. BOOST_ASSERT(allocated == next_block_prev_allocated);
  1012. }
  1013. else{
  1014. block = block;
  1015. }
  1016. #endif
  1017. return allocated;
  1018. }
  1019. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1020. bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_prev_allocated
  1021. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  1022. {
  1023. if(block->m_prev_allocated){
  1024. return true;
  1025. }
  1026. else{
  1027. #ifndef NDEBUG
  1028. if(block != priv_first_block()){
  1029. block_ctrl *prev = priv_prev_block(block);
  1030. (void)prev;
  1031. BOOST_ASSERT(!prev->m_allocated);
  1032. }
  1033. else{
  1034. block = block;
  1035. }
  1036. #endif
  1037. return false;
  1038. }
  1039. }
  1040. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1041. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_allocated_block
  1042. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  1043. {
  1044. block->m_allocated = 1;
  1045. reinterpret_cast<block_ctrl *>
  1046. (reinterpret_cast<char*>(block)+ block->m_size*Alignment)->m_prev_allocated = 1;
  1047. }
  1048. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1049. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_free_block
  1050. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  1051. {
  1052. block->m_allocated = 0;
  1053. block_ctrl *next_block = priv_next_block(block);
  1054. next_block->m_prev_allocated = 0;
  1055. next_block->m_prev_size = block->m_size;
  1056. }
  1057. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1058. void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_check_and_allocate
  1059. (size_type nunits
  1060. ,typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl* block
  1061. ,size_type &received_size)
  1062. {
  1063. size_type upper_nunits = nunits + BlockCtrlUnits;
  1064. imultiset_iterator it_old = Imultiset::s_iterator_to(*block);
  1065. algo_impl_t::assert_alignment(block);
  1066. if (block->m_size >= upper_nunits){
  1067. //This block is bigger than needed, split it in
  1068. //two blocks, the first's size will be "units" and
  1069. //the second's size "block->m_size-units"
  1070. size_type block_old_size = block->m_size;
  1071. block->m_size = nunits;
  1072. BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
  1073. //This is the remaining block
  1074. block_ctrl *rem_block = new(reinterpret_cast<block_ctrl*>
  1075. (reinterpret_cast<char*>(block) + Alignment*nunits))block_ctrl;
  1076. algo_impl_t::assert_alignment(rem_block);
  1077. rem_block->m_size = block_old_size - nunits;
  1078. BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
  1079. priv_mark_as_free_block(rem_block);
  1080. imultiset_iterator it_hint;
  1081. if(it_old == m_header.m_imultiset.begin()
  1082. || (--imultiset_iterator(it_old))->m_size < rem_block->m_size){
  1083. //option a: slow but secure
  1084. //m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block);
  1085. //option b: Construct an empty node and swap
  1086. //Imultiset::init_node(*rem_block);
  1087. //block->swap_nodes(*rem_block);
  1088. //option c: replace the node directly
  1089. m_header.m_imultiset.replace_node(Imultiset::s_iterator_to(*it_old), *rem_block);
  1090. }
  1091. else{
  1092. //Now we have to update the data in the tree
  1093. m_header.m_imultiset.erase(it_old);
  1094. m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block);
  1095. }
  1096. }
  1097. else if (block->m_size >= nunits){
  1098. m_header.m_imultiset.erase(it_old);
  1099. }
  1100. else{
  1101. BOOST_ASSERT(0);
  1102. return 0;
  1103. }
  1104. //We need block_ctrl for deallocation stuff, so
  1105. //return memory user can overwrite
  1106. m_header.m_allocated += (size_type)block->m_size*Alignment;
  1107. received_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  1108. //Mark the block as allocated
  1109. priv_mark_as_allocated_block(block);
  1110. //Clear the memory occupied by the tree hook, since this won't be
  1111. //cleared with zero_free_memory
  1112. TreeHook *t = static_cast<TreeHook*>(block);
  1113. //Just clear the memory part reserved for the user
  1114. std::size_t tree_hook_offset_in_block = (char*)t - (char*)block;
  1115. //volatile char *ptr =
  1116. char *ptr = reinterpret_cast<char*>(block)+tree_hook_offset_in_block;
  1117. const std::size_t s = BlockCtrlBytes - tree_hook_offset_in_block;
  1118. std::memset(ptr, 0, s);
  1119. this->priv_next_block(block)->m_prev_size = 0;
  1120. return priv_get_user_buffer(block);
  1121. }
  1122. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  1123. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::deallocate(void* addr)
  1124. {
  1125. if(!addr) return;
  1126. //-----------------------
  1127. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  1128. //-----------------------
  1129. return this->priv_deallocate(addr);
  1130. }
  1131. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  1132. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_deallocate(void* addr)
  1133. {
  1134. if(!addr) return;
  1135. block_ctrl *block = priv_get_block(addr);
  1136. //The blocks must be marked as allocated and the sizes must be equal
  1137. BOOST_ASSERT(priv_is_allocated_block(block));
  1138. // BOOST_ASSERT(block->m_size == priv_tail_size(block));
  1139. //Check if alignment and block size are right
  1140. algo_impl_t::assert_alignment(addr);
  1141. size_type block_old_size = Alignment*(size_type)block->m_size;
  1142. BOOST_ASSERT(m_header.m_allocated >= block_old_size);
  1143. //Update used memory count
  1144. m_header.m_allocated -= block_old_size;
  1145. //The block to insert in the tree
  1146. block_ctrl *block_to_insert = block;
  1147. //Get the next block
  1148. block_ctrl *next_block = priv_next_block(block);
  1149. bool merge_with_prev = !priv_is_prev_allocated(block);
  1150. bool merge_with_next = !priv_is_allocated_block(next_block);
  1151. //Merge logic. First just update block sizes, then fix free blocks tree
  1152. if(merge_with_prev || merge_with_next){
  1153. //Merge if the previous is free
  1154. if(merge_with_prev){
  1155. //Get the previous block
  1156. block_ctrl *prev_block = priv_prev_block(block);
  1157. prev_block->m_size += block->m_size;
  1158. BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
  1159. block_to_insert = prev_block;
  1160. }
  1161. //Merge if the next is free
  1162. if(merge_with_next){
  1163. block_to_insert->m_size += next_block->m_size;
  1164. BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
  1165. if(merge_with_prev)
  1166. m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block));
  1167. }
  1168. bool only_merge_next = !merge_with_prev && merge_with_next;
  1169. imultiset_iterator free_block_to_check_it
  1170. (Imultiset::s_iterator_to(only_merge_next ? *next_block : *block_to_insert));
  1171. imultiset_iterator was_bigger_it(free_block_to_check_it);
  1172. //Now try to shortcut erasure + insertion (O(log(N))) with
  1173. //a O(1) operation if merging does not alter tree positions
  1174. if(++was_bigger_it != m_header.m_imultiset.end() &&
  1175. block_to_insert->m_size > was_bigger_it->m_size ){
  1176. m_header.m_imultiset.erase(free_block_to_check_it);
  1177. m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *block_to_insert);
  1178. }
  1179. else if(only_merge_next){
  1180. m_header.m_imultiset.replace_node(free_block_to_check_it, *block_to_insert);
  1181. }
  1182. }
  1183. else{
  1184. m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *block_to_insert);
  1185. }
  1186. priv_mark_as_free_block(block_to_insert);
  1187. }
  1188. /// @endcond
  1189. } //namespace interprocess {
  1190. } //namespace boost {
  1191. #include <boost/interprocess/detail/config_end.hpp>
  1192. #endif //#ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP