Skip to content

Commit 4b6f3bd

Browse files
committed
Add overalignment support for segment_manager allocation_command functions.
1 parent 76e3fea commit 4b6f3bd

5 files changed

Lines changed: 362 additions & 498 deletions

File tree

include/boost/interprocess/mem_algo/detail/mem_algo_common.hpp

Lines changed: 115 additions & 113 deletions
Original file line numberDiff line numberDiff line change
@@ -136,12 +136,12 @@ class memory_algorithm_common
136136
}
137137

138138
static bool calculate_lcm_and_needs_backwards_lcmed
139-
(size_type backwards_multiple, size_type received_size, size_type size_to_achieve,
139+
(const size_type backwards_multiple, const size_type alignment, const size_type received_size, const size_type size_to_achieve,
140140
size_type &lcm_out, size_type &needs_backwards_lcmed_out)
141141
{
142142
// Now calculate lcm_val
143143
size_type max = backwards_multiple;
144-
size_type min = Alignment;
144+
size_type min = alignment;
145145
size_type needs_backwards;
146146
size_type needs_backwards_lcmed;
147147
size_type lcm_val;
@@ -171,39 +171,39 @@ class memory_algorithm_common
171171
return true;
172172
}
173173
//Check if it's multiple of alignment
174-
else if((backwards_multiple & (Alignment - 1u)) == 0){
174+
else if((backwards_multiple & (alignment - 1u)) == 0){
175175
lcm_val = backwards_multiple;
176176
current_forward = get_truncated_size(received_size, backwards_multiple);
177177
//No need to round needs_backwards because backwards_multiple == lcm_val
178178
needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
179-
BOOST_ASSERT((needs_backwards_lcmed & (Alignment - 1u)) == 0);
179+
BOOST_ASSERT((needs_backwards_lcmed & (alignment - 1u)) == 0);
180180
lcm_out = lcm_val;
181181
needs_backwards_lcmed_out = needs_backwards_lcmed;
182182
return true;
183183
}
184184
//Check if it's multiple of the half of the alignmment
185-
else if((backwards_multiple & ((Alignment/2u) - 1u)) == 0){
185+
else if((backwards_multiple & ((alignment/2u) - 1u)) == 0){
186186
lcm_val = backwards_multiple*2u;
187187
current_forward = get_truncated_size(received_size, backwards_multiple);
188188
needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
189-
if(0 != (needs_backwards_lcmed & (Alignment-1)))
190-
//while(0 != (needs_backwards_lcmed & (Alignment-1)))
189+
if(0 != (needs_backwards_lcmed & (alignment-1)))
190+
//while(0 != (needs_backwards_lcmed & (alignment-1)))
191191
needs_backwards_lcmed += backwards_multiple;
192192
BOOST_ASSERT((needs_backwards_lcmed % lcm_val) == 0);
193193
lcm_out = lcm_val;
194194
needs_backwards_lcmed_out = needs_backwards_lcmed;
195195
return true;
196196
}
197197
//Check if it's multiple of the quarter of the alignmment
198-
else if((backwards_multiple & ((Alignment/4u) - 1u)) == 0){
198+
else if((backwards_multiple & ((alignment/4u) - 1u)) == 0){
199199
size_type remainder;
200200
lcm_val = backwards_multiple*4u;
201201
current_forward = get_truncated_size(received_size, backwards_multiple);
202202
needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
203-
//while(0 != (needs_backwards_lcmed & (Alignment-1)))
203+
//while(0 != (needs_backwards_lcmed & (alignment-1)))
204204
//needs_backwards_lcmed += backwards_multiple;
205-
if(0 != (remainder = ((needs_backwards_lcmed & (Alignment-1))>>(Alignment/8u)))){
206-
if(backwards_multiple & Alignment/2u){
205+
if(0 != (remainder = ((needs_backwards_lcmed & (alignment-1))>>(alignment/8u)))){
206+
if(backwards_multiple & alignment/2u){
207207
needs_backwards_lcmed += (remainder)*backwards_multiple;
208208
}
209209
else{
@@ -240,25 +240,118 @@ class memory_algorithm_common
240240
this_type::priv_allocate_many(memory_algo, elem_sizes, n_elements, sizeof_element, chain);
241241
}
242242

243-
static void* allocate_aligned
244-
(MemoryAlgorithm * const memory_algo, const size_type nbytes, const size_type alignment)
243+
static void* allocate_aligned(MemoryAlgorithm * const memory_algo, const size_type nbytes, const size_type alignment)
245244
{
246-
247245
//Ensure power of 2
248246
const bool alignment_ok = (alignment & (alignment - 1u)) == 0;
249-
if (!alignment_ok){
250-
//Alignment is not power of two
251-
BOOST_ASSERT(alignment_ok);
247+
BOOST_ASSERT(alignment_ok);
248+
if (BOOST_UNLIKELY(!alignment_ok)){
252249
return 0;
253250
}
254-
255-
if(alignment <= Alignment){
251+
else if(alignment <= Alignment){
256252
size_type real_size = nbytes;
257253
void *ignore_reuse = 0;
258-
return memory_algo->priv_allocate
259-
(boost::interprocess::allocate_new, nbytes, real_size, ignore_reuse);
254+
return memory_algo->priv_allocate(boost::interprocess::allocate_new, nbytes, real_size, ignore_reuse);
260255
}
256+
else {
257+
return priv_allocate_overaligned(memory_algo, nbytes, alignment);
258+
}
259+
}
260+
261+
static bool try_shrink
262+
(MemoryAlgorithm *memory_algo, void *ptr
263+
,const size_type max_size, size_type &received_size)
264+
{
265+
size_type const preferred_size = received_size;
266+
(void)memory_algo;
267+
//Obtain the real block
268+
block_ctrl *block = memory_algo->priv_get_block(ptr);
269+
size_type old_block_units = (size_type)block->m_size;
270+
271+
//The block must be marked as allocated
272+
BOOST_ASSERT(memory_algo->priv_is_allocated_block(block));
273+
274+
//Check if alignment and block size are right
275+
assert_alignment(ptr);
276+
277+
//Put this to a safe value
278+
received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
279+
280+
//Now translate it to Alignment units
281+
const size_type max_user_units = floor_units(max_size - UsableByPreviousChunk);
282+
const size_type preferred_user_units = ceil_units(preferred_size - UsableByPreviousChunk);
283+
284+
//Check if rounded max and preferred are possible correct
285+
if(max_user_units < preferred_user_units)
286+
return false;
287+
288+
//Check if the block is smaller than the requested minimum
289+
size_type old_user_units = old_block_units - AllocatedCtrlUnits;
290+
291+
if(old_user_units < preferred_user_units)
292+
return false;
293+
294+
//If the block is smaller than the requested minimum
295+
if(old_user_units == preferred_user_units)
296+
return true;
297+
298+
size_type shrunk_user_units =
299+
((BlockCtrlUnits - AllocatedCtrlUnits) >= preferred_user_units)
300+
? (BlockCtrlUnits - AllocatedCtrlUnits)
301+
: preferred_user_units;
302+
303+
//Some parameter checks
304+
if(max_user_units < shrunk_user_units)
305+
return false;
306+
307+
//We must be able to create at least a new empty block
308+
if((old_user_units - shrunk_user_units) < BlockCtrlUnits ){
309+
return false;
310+
}
311+
312+
//Update new size
313+
received_size = shrunk_user_units*Alignment + UsableByPreviousChunk;
314+
return true;
315+
}
316+
317+
static bool shrink
318+
(MemoryAlgorithm *memory_algo, void *ptr
319+
,const size_type max_size, size_type &received_size)
320+
{
321+
size_type const preferred_size = received_size;
322+
//Obtain the real block
323+
block_ctrl *block = memory_algo->priv_get_block(ptr);
324+
size_type old_block_units = (size_type)block->m_size;
325+
326+
if(!try_shrink(memory_algo, ptr, max_size, received_size)){
327+
return false;
328+
}
329+
330+
//Check if the old size was just the shrunk size (no splitting)
331+
if((old_block_units - AllocatedCtrlUnits) == ceil_units(preferred_size - UsableByPreviousChunk))
332+
return true;
333+
334+
//Now we can just rewrite the size of the old buffer
335+
block->m_size = ((received_size-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits) & block_ctrl::size_mask;
336+
BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
337+
338+
//We create the new block
339+
block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
340+
(reinterpret_cast<char*>(block) + block->m_size*Alignment);
341+
//Write control data to simulate this new block was previously allocated
342+
//and deallocate it
343+
new_block->m_size = (old_block_units - block->m_size) & block_ctrl::size_mask;
344+
BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
345+
memory_algo->priv_mark_new_allocated_block(block);
346+
memory_algo->priv_mark_new_allocated_block(new_block);
347+
memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(new_block));
348+
return true;
349+
}
261350

351+
private:
352+
static void* priv_allocate_overaligned
353+
(MemoryAlgorithm * const memory_algo, const size_type nbytes, const size_type alignment)
354+
{
262355
//To fulfill user's request we need at least min_user_units
263356
size_type needed_units = user_buffer_ceil_units(nbytes);
264357
//However, there is a minimum allocation unit count (BlockCtrlUnits) to be able to deallocate the buffer,
@@ -357,7 +450,7 @@ class memory_algorithm_common
357450
//new block in the end.
358451
const size_type orig_second_units = orig_first_units - final_first_units;
359452
const size_type second_min_units = max_value( size_type(BlockCtrlUnits)
360-
, user_buffer_ceil_units(nbytes) + AllocatedCtrlUnits );
453+
, user_buffer_ceil_units(nbytes) + AllocatedCtrlUnits );
361454

362455
//Check if we can create a new free block (of size BlockCtrlUnits) at the end of the segment
363456
if(orig_second_units >= (second_min_units + BlockCtrlUnits)){
@@ -390,97 +483,6 @@ class memory_algorithm_common
390483
return usr_buf;
391484
}
392485

393-
static bool try_shrink
394-
(MemoryAlgorithm *memory_algo, void *ptr
395-
,const size_type max_size, size_type &received_size)
396-
{
397-
size_type const preferred_size = received_size;
398-
(void)memory_algo;
399-
//Obtain the real block
400-
block_ctrl *block = memory_algo->priv_get_block(ptr);
401-
size_type old_block_units = (size_type)block->m_size;
402-
403-
//The block must be marked as allocated
404-
BOOST_ASSERT(memory_algo->priv_is_allocated_block(block));
405-
406-
//Check if alignment and block size are right
407-
assert_alignment(ptr);
408-
409-
//Put this to a safe value
410-
received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
411-
412-
//Now translate it to Alignment units
413-
const size_type max_user_units = floor_units(max_size - UsableByPreviousChunk);
414-
const size_type preferred_user_units = ceil_units(preferred_size - UsableByPreviousChunk);
415-
416-
//Check if rounded max and preferred are possible correct
417-
if(max_user_units < preferred_user_units)
418-
return false;
419-
420-
//Check if the block is smaller than the requested minimum
421-
size_type old_user_units = old_block_units - AllocatedCtrlUnits;
422-
423-
if(old_user_units < preferred_user_units)
424-
return false;
425-
426-
//If the block is smaller than the requested minimum
427-
if(old_user_units == preferred_user_units)
428-
return true;
429-
430-
size_type shrunk_user_units =
431-
((BlockCtrlUnits - AllocatedCtrlUnits) >= preferred_user_units)
432-
? (BlockCtrlUnits - AllocatedCtrlUnits)
433-
: preferred_user_units;
434-
435-
//Some parameter checks
436-
if(max_user_units < shrunk_user_units)
437-
return false;
438-
439-
//We must be able to create at least a new empty block
440-
if((old_user_units - shrunk_user_units) < BlockCtrlUnits ){
441-
return false;
442-
}
443-
444-
//Update new size
445-
received_size = shrunk_user_units*Alignment + UsableByPreviousChunk;
446-
return true;
447-
}
448-
449-
static bool shrink
450-
(MemoryAlgorithm *memory_algo, void *ptr
451-
,const size_type max_size, size_type &received_size)
452-
{
453-
size_type const preferred_size = received_size;
454-
//Obtain the real block
455-
block_ctrl *block = memory_algo->priv_get_block(ptr);
456-
size_type old_block_units = (size_type)block->m_size;
457-
458-
if(!try_shrink(memory_algo, ptr, max_size, received_size)){
459-
return false;
460-
}
461-
462-
//Check if the old size was just the shrunk size (no splitting)
463-
if((old_block_units - AllocatedCtrlUnits) == ceil_units(preferred_size - UsableByPreviousChunk))
464-
return true;
465-
466-
//Now we can just rewrite the size of the old buffer
467-
block->m_size = ((received_size-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits) & block_ctrl::size_mask;
468-
BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
469-
470-
//We create the new block
471-
block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
472-
(reinterpret_cast<char*>(block) + block->m_size*Alignment);
473-
//Write control data to simulate this new block was previously allocated
474-
//and deallocate it
475-
new_block->m_size = (old_block_units - block->m_size) & block_ctrl::size_mask;
476-
BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
477-
memory_algo->priv_mark_new_allocated_block(block);
478-
memory_algo->priv_mark_new_allocated_block(new_block);
479-
memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(new_block));
480-
return true;
481-
}
482-
483-
private:
484486
static void priv_allocate_many
485487
( MemoryAlgorithm *memory_algo
486488
, const size_type *elem_sizes

0 commit comments

Comments
 (0)