mirror of
https://github.com/boostorg/interprocess.git
synced 2026-01-19 04:12:13 +00:00
Add overalignment support for segment_manager allocation_command functions.
This commit is contained in:
@@ -136,12 +136,12 @@ class memory_algorithm_common
|
||||
}
|
||||
|
||||
static bool calculate_lcm_and_needs_backwards_lcmed
|
||||
(size_type backwards_multiple, size_type received_size, size_type size_to_achieve,
|
||||
(const size_type backwards_multiple, const size_type alignment, const size_type received_size, const size_type size_to_achieve,
|
||||
size_type &lcm_out, size_type &needs_backwards_lcmed_out)
|
||||
{
|
||||
// Now calculate lcm_val
|
||||
size_type max = backwards_multiple;
|
||||
size_type min = Alignment;
|
||||
size_type min = alignment;
|
||||
size_type needs_backwards;
|
||||
size_type needs_backwards_lcmed;
|
||||
size_type lcm_val;
|
||||
@@ -171,23 +171,23 @@ class memory_algorithm_common
|
||||
return true;
|
||||
}
|
||||
//Check if it's multiple of alignment
|
||||
else if((backwards_multiple & (Alignment - 1u)) == 0){
|
||||
else if((backwards_multiple & (alignment - 1u)) == 0){
|
||||
lcm_val = backwards_multiple;
|
||||
current_forward = get_truncated_size(received_size, backwards_multiple);
|
||||
//No need to round needs_backwards because backwards_multiple == lcm_val
|
||||
needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
|
||||
BOOST_ASSERT((needs_backwards_lcmed & (Alignment - 1u)) == 0);
|
||||
BOOST_ASSERT((needs_backwards_lcmed & (alignment - 1u)) == 0);
|
||||
lcm_out = lcm_val;
|
||||
needs_backwards_lcmed_out = needs_backwards_lcmed;
|
||||
return true;
|
||||
}
|
||||
//Check if it's multiple of the half of the alignmment
|
||||
else if((backwards_multiple & ((Alignment/2u) - 1u)) == 0){
|
||||
else if((backwards_multiple & ((alignment/2u) - 1u)) == 0){
|
||||
lcm_val = backwards_multiple*2u;
|
||||
current_forward = get_truncated_size(received_size, backwards_multiple);
|
||||
needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
|
||||
if(0 != (needs_backwards_lcmed & (Alignment-1)))
|
||||
//while(0 != (needs_backwards_lcmed & (Alignment-1)))
|
||||
if(0 != (needs_backwards_lcmed & (alignment-1)))
|
||||
//while(0 != (needs_backwards_lcmed & (alignment-1)))
|
||||
needs_backwards_lcmed += backwards_multiple;
|
||||
BOOST_ASSERT((needs_backwards_lcmed % lcm_val) == 0);
|
||||
lcm_out = lcm_val;
|
||||
@@ -195,15 +195,15 @@ class memory_algorithm_common
|
||||
return true;
|
||||
}
|
||||
//Check if it's multiple of the quarter of the alignmment
|
||||
else if((backwards_multiple & ((Alignment/4u) - 1u)) == 0){
|
||||
else if((backwards_multiple & ((alignment/4u) - 1u)) == 0){
|
||||
size_type remainder;
|
||||
lcm_val = backwards_multiple*4u;
|
||||
current_forward = get_truncated_size(received_size, backwards_multiple);
|
||||
needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
|
||||
//while(0 != (needs_backwards_lcmed & (Alignment-1)))
|
||||
//while(0 != (needs_backwards_lcmed & (alignment-1)))
|
||||
//needs_backwards_lcmed += backwards_multiple;
|
||||
if(0 != (remainder = ((needs_backwards_lcmed & (Alignment-1))>>(Alignment/8u)))){
|
||||
if(backwards_multiple & Alignment/2u){
|
||||
if(0 != (remainder = ((needs_backwards_lcmed & (alignment-1))>>(alignment/8u)))){
|
||||
if(backwards_multiple & alignment/2u){
|
||||
needs_backwards_lcmed += (remainder)*backwards_multiple;
|
||||
}
|
||||
else{
|
||||
@@ -240,25 +240,118 @@ class memory_algorithm_common
|
||||
this_type::priv_allocate_many(memory_algo, elem_sizes, n_elements, sizeof_element, chain);
|
||||
}
|
||||
|
||||
static void* allocate_aligned
|
||||
(MemoryAlgorithm * const memory_algo, const size_type nbytes, const size_type alignment)
|
||||
static void* allocate_aligned(MemoryAlgorithm * const memory_algo, const size_type nbytes, const size_type alignment)
|
||||
{
|
||||
|
||||
//Ensure power of 2
|
||||
const bool alignment_ok = (alignment & (alignment - 1u)) == 0;
|
||||
if (!alignment_ok){
|
||||
//Alignment is not power of two
|
||||
BOOST_ASSERT(alignment_ok);
|
||||
if (BOOST_UNLIKELY(!alignment_ok)){
|
||||
return 0;
|
||||
}
|
||||
|
||||
if(alignment <= Alignment){
|
||||
else if(alignment <= Alignment){
|
||||
size_type real_size = nbytes;
|
||||
void *ignore_reuse = 0;
|
||||
return memory_algo->priv_allocate
|
||||
(boost::interprocess::allocate_new, nbytes, real_size, ignore_reuse);
|
||||
return memory_algo->priv_allocate(boost::interprocess::allocate_new, nbytes, real_size, ignore_reuse);
|
||||
}
|
||||
else {
|
||||
return priv_allocate_overaligned(memory_algo, nbytes, alignment);
|
||||
}
|
||||
}
|
||||
|
||||
static bool try_shrink
|
||||
(MemoryAlgorithm *memory_algo, void *ptr
|
||||
,const size_type max_size, size_type &received_size)
|
||||
{
|
||||
size_type const preferred_size = received_size;
|
||||
(void)memory_algo;
|
||||
//Obtain the real block
|
||||
block_ctrl *block = memory_algo->priv_get_block(ptr);
|
||||
size_type old_block_units = (size_type)block->m_size;
|
||||
|
||||
//The block must be marked as allocated
|
||||
BOOST_ASSERT(memory_algo->priv_is_allocated_block(block));
|
||||
|
||||
//Check if alignment and block size are right
|
||||
assert_alignment(ptr);
|
||||
|
||||
//Put this to a safe value
|
||||
received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
|
||||
|
||||
//Now translate it to Alignment units
|
||||
const size_type max_user_units = floor_units(max_size - UsableByPreviousChunk);
|
||||
const size_type preferred_user_units = ceil_units(preferred_size - UsableByPreviousChunk);
|
||||
|
||||
//Check if rounded max and preferred are possible correct
|
||||
if(max_user_units < preferred_user_units)
|
||||
return false;
|
||||
|
||||
//Check if the block is smaller than the requested minimum
|
||||
size_type old_user_units = old_block_units - AllocatedCtrlUnits;
|
||||
|
||||
if(old_user_units < preferred_user_units)
|
||||
return false;
|
||||
|
||||
//If the block is smaller than the requested minimum
|
||||
if(old_user_units == preferred_user_units)
|
||||
return true;
|
||||
|
||||
size_type shrunk_user_units =
|
||||
((BlockCtrlUnits - AllocatedCtrlUnits) >= preferred_user_units)
|
||||
? (BlockCtrlUnits - AllocatedCtrlUnits)
|
||||
: preferred_user_units;
|
||||
|
||||
//Some parameter checks
|
||||
if(max_user_units < shrunk_user_units)
|
||||
return false;
|
||||
|
||||
//We must be able to create at least a new empty block
|
||||
if((old_user_units - shrunk_user_units) < BlockCtrlUnits ){
|
||||
return false;
|
||||
}
|
||||
|
||||
//Update new size
|
||||
received_size = shrunk_user_units*Alignment + UsableByPreviousChunk;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool shrink
|
||||
(MemoryAlgorithm *memory_algo, void *ptr
|
||||
,const size_type max_size, size_type &received_size)
|
||||
{
|
||||
size_type const preferred_size = received_size;
|
||||
//Obtain the real block
|
||||
block_ctrl *block = memory_algo->priv_get_block(ptr);
|
||||
size_type old_block_units = (size_type)block->m_size;
|
||||
|
||||
if(!try_shrink(memory_algo, ptr, max_size, received_size)){
|
||||
return false;
|
||||
}
|
||||
|
||||
//Check if the old size was just the shrunk size (no splitting)
|
||||
if((old_block_units - AllocatedCtrlUnits) == ceil_units(preferred_size - UsableByPreviousChunk))
|
||||
return true;
|
||||
|
||||
//Now we can just rewrite the size of the old buffer
|
||||
block->m_size = ((received_size-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits) & block_ctrl::size_mask;
|
||||
BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
|
||||
|
||||
//We create the new block
|
||||
block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
|
||||
(reinterpret_cast<char*>(block) + block->m_size*Alignment);
|
||||
//Write control data to simulate this new block was previously allocated
|
||||
//and deallocate it
|
||||
new_block->m_size = (old_block_units - block->m_size) & block_ctrl::size_mask;
|
||||
BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
|
||||
memory_algo->priv_mark_new_allocated_block(block);
|
||||
memory_algo->priv_mark_new_allocated_block(new_block);
|
||||
memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(new_block));
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
static void* priv_allocate_overaligned
|
||||
(MemoryAlgorithm * const memory_algo, const size_type nbytes, const size_type alignment)
|
||||
{
|
||||
//To fulfill user's request we need at least min_user_units
|
||||
size_type needed_units = user_buffer_ceil_units(nbytes);
|
||||
//However, there is a minimum allocation unit count (BlockCtrlUnits) to be able to deallocate the buffer,
|
||||
@@ -390,97 +483,6 @@ class memory_algorithm_common
|
||||
return usr_buf;
|
||||
}
|
||||
|
||||
static bool try_shrink
|
||||
(MemoryAlgorithm *memory_algo, void *ptr
|
||||
,const size_type max_size, size_type &received_size)
|
||||
{
|
||||
size_type const preferred_size = received_size;
|
||||
(void)memory_algo;
|
||||
//Obtain the real block
|
||||
block_ctrl *block = memory_algo->priv_get_block(ptr);
|
||||
size_type old_block_units = (size_type)block->m_size;
|
||||
|
||||
//The block must be marked as allocated
|
||||
BOOST_ASSERT(memory_algo->priv_is_allocated_block(block));
|
||||
|
||||
//Check if alignment and block size are right
|
||||
assert_alignment(ptr);
|
||||
|
||||
//Put this to a safe value
|
||||
received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
|
||||
|
||||
//Now translate it to Alignment units
|
||||
const size_type max_user_units = floor_units(max_size - UsableByPreviousChunk);
|
||||
const size_type preferred_user_units = ceil_units(preferred_size - UsableByPreviousChunk);
|
||||
|
||||
//Check if rounded max and preferred are possible correct
|
||||
if(max_user_units < preferred_user_units)
|
||||
return false;
|
||||
|
||||
//Check if the block is smaller than the requested minimum
|
||||
size_type old_user_units = old_block_units - AllocatedCtrlUnits;
|
||||
|
||||
if(old_user_units < preferred_user_units)
|
||||
return false;
|
||||
|
||||
//If the block is smaller than the requested minimum
|
||||
if(old_user_units == preferred_user_units)
|
||||
return true;
|
||||
|
||||
size_type shrunk_user_units =
|
||||
((BlockCtrlUnits - AllocatedCtrlUnits) >= preferred_user_units)
|
||||
? (BlockCtrlUnits - AllocatedCtrlUnits)
|
||||
: preferred_user_units;
|
||||
|
||||
//Some parameter checks
|
||||
if(max_user_units < shrunk_user_units)
|
||||
return false;
|
||||
|
||||
//We must be able to create at least a new empty block
|
||||
if((old_user_units - shrunk_user_units) < BlockCtrlUnits ){
|
||||
return false;
|
||||
}
|
||||
|
||||
//Update new size
|
||||
received_size = shrunk_user_units*Alignment + UsableByPreviousChunk;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool shrink
|
||||
(MemoryAlgorithm *memory_algo, void *ptr
|
||||
,const size_type max_size, size_type &received_size)
|
||||
{
|
||||
size_type const preferred_size = received_size;
|
||||
//Obtain the real block
|
||||
block_ctrl *block = memory_algo->priv_get_block(ptr);
|
||||
size_type old_block_units = (size_type)block->m_size;
|
||||
|
||||
if(!try_shrink(memory_algo, ptr, max_size, received_size)){
|
||||
return false;
|
||||
}
|
||||
|
||||
//Check if the old size was just the shrunk size (no splitting)
|
||||
if((old_block_units - AllocatedCtrlUnits) == ceil_units(preferred_size - UsableByPreviousChunk))
|
||||
return true;
|
||||
|
||||
//Now we can just rewrite the size of the old buffer
|
||||
block->m_size = ((received_size-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits) & block_ctrl::size_mask;
|
||||
BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
|
||||
|
||||
//We create the new block
|
||||
block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
|
||||
(reinterpret_cast<char*>(block) + block->m_size*Alignment);
|
||||
//Write control data to simulate this new block was previously allocated
|
||||
//and deallocate it
|
||||
new_block->m_size = (old_block_units - block->m_size) & block_ctrl::size_mask;
|
||||
BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
|
||||
memory_algo->priv_mark_new_allocated_block(block);
|
||||
memory_algo->priv_mark_new_allocated_block(new_block);
|
||||
memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(new_block));
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
static void priv_allocate_many
|
||||
( MemoryAlgorithm *memory_algo
|
||||
, const size_type *elem_sizes
|
||||
|
||||
@@ -147,14 +147,13 @@ class simple_seq_fit_impl
|
||||
|
||||
#if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
|
||||
|
||||
template<class T>
|
||||
BOOST_INTERPROCESS_NODISCARD
|
||||
T *allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
|
||||
size_type &prefer_in_recvd_out_size, T *&reuse);
|
||||
|
||||
BOOST_INTERPROCESS_NODISCARD
|
||||
void * raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
|
||||
size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object = 1);
|
||||
void * allocation_command(boost::interprocess::allocation_type command
|
||||
,size_type min_size
|
||||
,size_type &prefer_in_recvd_out_size
|
||||
,void *&reuse_ptr
|
||||
,size_type sizeof_object
|
||||
,size_type alignof_object
|
||||
);
|
||||
|
||||
//!Multiple element allocation, same size
|
||||
//!Experimental. Dont' use
|
||||
@@ -230,13 +229,7 @@ class simple_seq_fit_impl
|
||||
//!Real allocation algorithm with min allocation option
|
||||
void * priv_allocate(boost::interprocess::allocation_type command
|
||||
,size_type min_size
|
||||
,size_type &prefer_in_recvd_out_size, void *&reuse_ptr);
|
||||
|
||||
void * priv_allocation_command(boost::interprocess::allocation_type command
|
||||
,size_type min_size
|
||||
,size_type &prefer_in_recvd_out_size
|
||||
,void *&reuse_ptr
|
||||
,size_type sizeof_object);
|
||||
,size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type alignof_object = Alignment);
|
||||
|
||||
//!Returns the number of total units that a user buffer
|
||||
//!of "userbytes" bytes really occupies (including header)
|
||||
@@ -259,15 +252,6 @@ class simple_seq_fit_impl
|
||||
//!Real expand function implementation
|
||||
bool priv_expand(void *ptr, size_type min_size, size_type &prefer_in_recvd_out_size);
|
||||
|
||||
//!Real expand to both sides implementation
|
||||
void* priv_expand_both_sides(boost::interprocess::allocation_type command
|
||||
,size_type min_size, size_type &prefer_in_recvd_out_size
|
||||
,void *reuse_ptr
|
||||
,bool only_preferred_backwards);
|
||||
|
||||
//!Real private aligned allocation function
|
||||
//void* priv_allocate_aligned (size_type nbytes, size_type alignment);
|
||||
|
||||
//!Checks if block has enough memory and splits/unlinks the block
|
||||
//!returning the address to the users
|
||||
void* priv_check_and_allocate(size_type units
|
||||
@@ -593,53 +577,11 @@ inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
|
||||
}
|
||||
|
||||
template<class MutexFamily, class VoidPointer>
|
||||
template<class T>
|
||||
inline T* simple_seq_fit_impl<MutexFamily, VoidPointer>::
|
||||
inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
|
||||
allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
|
||||
size_type &prefer_in_recvd_out_size, T *&reuse_ptr)
|
||||
{
|
||||
void *raw_reuse = reuse_ptr;
|
||||
void * const ret = priv_allocation_command
|
||||
(command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
|
||||
BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::dtl::alignment_of<T>::value));
|
||||
reuse_ptr = static_cast<T*>(raw_reuse);
|
||||
return static_cast<T*>(ret);
|
||||
}
|
||||
|
||||
template<class MutexFamily, class VoidPointer>
|
||||
inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
|
||||
raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects,
|
||||
size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object)
|
||||
{
|
||||
size_type const preferred_objects = prefer_in_recvd_out_size;
|
||||
if(!sizeof_object){
|
||||
return reuse_ptr = 0, static_cast<void*>(0);
|
||||
}
|
||||
if(command & boost::interprocess::try_shrink_in_place){
|
||||
if(!reuse_ptr) return static_cast<void*>(0);
|
||||
prefer_in_recvd_out_size = preferred_objects*sizeof_object;
|
||||
bool success = algo_impl_t::try_shrink
|
||||
( this, reuse_ptr, limit_objects*sizeof_object, prefer_in_recvd_out_size);
|
||||
prefer_in_recvd_out_size /= sizeof_object;
|
||||
return success ? reuse_ptr : 0;
|
||||
}
|
||||
else{
|
||||
return priv_allocation_command
|
||||
(command, limit_objects, prefer_in_recvd_out_size, reuse_ptr, sizeof_object);
|
||||
}
|
||||
}
|
||||
|
||||
template<class MutexFamily, class VoidPointer>
|
||||
inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
|
||||
priv_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
|
||||
size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object)
|
||||
size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object, size_type /*alignof_object*/)
|
||||
{
|
||||
size_type const preferred_size = prefer_in_recvd_out_size;
|
||||
command &= ~boost::interprocess::expand_bwd;
|
||||
if(!command){
|
||||
return reuse_ptr = 0, static_cast<void*>(0);
|
||||
}
|
||||
|
||||
size_type max_count = m_header.m_size/sizeof_object;
|
||||
if(limit_size > max_count || preferred_size > max_count){
|
||||
return reuse_ptr = 0, static_cast<void*>(0);
|
||||
@@ -668,85 +610,6 @@ simple_seq_fit_impl<MutexFamily, VoidPointer>::size(const void *ptr) const
|
||||
return block->get_user_bytes();
|
||||
}
|
||||
|
||||
template<class MutexFamily, class VoidPointer>
|
||||
void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
|
||||
priv_expand_both_sides(boost::interprocess::allocation_type command
|
||||
,size_type min_size
|
||||
,size_type &prefer_in_recvd_out_size
|
||||
,void *reuse_ptr
|
||||
,bool only_preferred_backwards)
|
||||
{
|
||||
size_type const preferred_size = prefer_in_recvd_out_size;
|
||||
typedef std::pair<block_ctrl *, block_ctrl *> prev_block_t;
|
||||
block_ctrl *reuse = priv_get_block(reuse_ptr);
|
||||
prefer_in_recvd_out_size = 0;
|
||||
|
||||
if(this->size(reuse_ptr) > min_size){
|
||||
prefer_in_recvd_out_size = this->size(reuse_ptr);
|
||||
return reuse_ptr;
|
||||
}
|
||||
|
||||
if(command & boost::interprocess::expand_fwd){
|
||||
if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
|
||||
return reuse_ptr;
|
||||
}
|
||||
else{
|
||||
prefer_in_recvd_out_size = this->size(reuse_ptr);
|
||||
}
|
||||
if(command & boost::interprocess::expand_bwd){
|
||||
size_type extra_forward = !prefer_in_recvd_out_size ? 0 : prefer_in_recvd_out_size + BlockCtrlBytes;
|
||||
prev_block_t prev_pair = priv_prev_block_if_free(reuse);
|
||||
block_ctrl *prev = prev_pair.second;
|
||||
if(!prev){
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_type needs_backwards =
|
||||
ipcdetail::get_rounded_size(preferred_size - extra_forward, Alignment);
|
||||
|
||||
if(!only_preferred_backwards){
|
||||
max_value(ipcdetail::get_rounded_size(min_size - extra_forward, Alignment)
|
||||
,min_value(prev->get_user_bytes(), needs_backwards));
|
||||
}
|
||||
|
||||
//Check if previous block has enough size
|
||||
if((prev->get_user_bytes()) >= needs_backwards){
|
||||
//Now take all next space. This will succeed
|
||||
if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, prefer_in_recvd_out_size)){
|
||||
BOOST_ASSERT(0);
|
||||
}
|
||||
|
||||
//We need a minimum size to split the previous one
|
||||
if((prev->get_user_bytes() - needs_backwards) > 2*BlockCtrlBytes){
|
||||
block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
|
||||
(reinterpret_cast<char*>(reuse) - needs_backwards - BlockCtrlBytes);
|
||||
|
||||
new_block->m_next = 0;
|
||||
new_block->m_size =
|
||||
BlockCtrlUnits + (needs_backwards + extra_forward)/Alignment;
|
||||
prev->m_size =
|
||||
(prev->get_total_bytes() - needs_backwards)/Alignment - BlockCtrlUnits;
|
||||
prefer_in_recvd_out_size = needs_backwards + extra_forward;
|
||||
m_header.m_allocated += needs_backwards + BlockCtrlBytes;
|
||||
return priv_get_user_buffer(new_block);
|
||||
}
|
||||
else{
|
||||
//Just merge the whole previous block
|
||||
block_ctrl *prev_2_block = prev_pair.first;
|
||||
//Update received size and allocation
|
||||
prefer_in_recvd_out_size = extra_forward + prev->get_user_bytes();
|
||||
m_header.m_allocated += prev->get_total_bytes();
|
||||
//Now unlink it from previous block
|
||||
prev_2_block->m_next = prev->m_next;
|
||||
prev->m_size = reuse->m_size + prev->m_size;
|
||||
prev->m_next = 0;
|
||||
priv_get_user_buffer(prev);
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
template<class MutexFamily, class VoidPointer>
|
||||
inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::
|
||||
deallocate_many(typename simple_seq_fit_impl<MutexFamily, VoidPointer>::multiallocation_chain &chain)
|
||||
@@ -772,8 +635,17 @@ simple_seq_fit_impl<MutexFamily, VoidPointer>::
|
||||
template<class MutexFamily, class VoidPointer>
|
||||
void * simple_seq_fit_impl<MutexFamily, VoidPointer>::
|
||||
priv_allocate(boost::interprocess::allocation_type command
|
||||
,size_type limit_size, size_type &prefer_in_recvd_out_size, void *&reuse_ptr)
|
||||
,size_type limit_size, size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type alignof_object)
|
||||
{
|
||||
//Backwards expansion not supported
|
||||
command &= ~boost::interprocess::expand_bwd;
|
||||
if(!command){
|
||||
return reuse_ptr = 0, static_cast<void*>(0);
|
||||
}
|
||||
|
||||
if(alignof_object < Alignment)
|
||||
alignof_object = Alignment;
|
||||
|
||||
size_type const preferred_size = prefer_in_recvd_out_size;
|
||||
if(command & boost::interprocess::shrink_in_place){
|
||||
if(!reuse_ptr) return static_cast<void*>(0);
|
||||
@@ -798,15 +670,17 @@ void * simple_seq_fit_impl<MutexFamily, VoidPointer>::
|
||||
size_type biggest_size = 0;
|
||||
|
||||
//Expand in place
|
||||
if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
|
||||
void *ret = priv_expand_both_sides(command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, true);
|
||||
if(ret){
|
||||
algo_impl_t::assert_alignment(ret);
|
||||
return ret;
|
||||
}
|
||||
if( reuse_ptr && (command & boost::interprocess::expand_fwd) &&
|
||||
priv_expand(reuse_ptr, limit_size, prefer_in_recvd_out_size = preferred_size) ){
|
||||
return reuse_ptr;
|
||||
}
|
||||
|
||||
if(command & boost::interprocess::allocate_new){
|
||||
|
||||
if (alignof_object > Alignment) {
|
||||
return algo_impl_t::allocate_aligned(this, limit_size, alignof_object);
|
||||
}
|
||||
else {
|
||||
prefer_in_recvd_out_size = 0;
|
||||
while(block != root){
|
||||
//Update biggest block pointers
|
||||
@@ -825,6 +699,7 @@ void * simple_seq_fit_impl<MutexFamily, VoidPointer>::
|
||||
prev = block;
|
||||
block = ipcdetail::to_raw_pointer(block->m_next);
|
||||
}
|
||||
}
|
||||
|
||||
//Bad luck finding preferred_size, now if we have any biggest_block
|
||||
//try with this block
|
||||
@@ -840,12 +715,6 @@ void * simple_seq_fit_impl<MutexFamily, VoidPointer>::
|
||||
return reuse_ptr = 0, ret;
|
||||
}
|
||||
}
|
||||
//Now try to expand both sides with min size
|
||||
if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
|
||||
void *ret = priv_expand_both_sides (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false);
|
||||
algo_impl_t::assert_alignment(ret);
|
||||
return ret;
|
||||
}
|
||||
return reuse_ptr = 0, static_cast<void*>(0);
|
||||
}
|
||||
|
||||
@@ -921,6 +790,11 @@ template<class MutexFamily, class VoidPointer>
|
||||
inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::
|
||||
priv_expand (void *ptr, size_type min_size, size_type &received_size)
|
||||
{
|
||||
if(this->size(ptr) > min_size){
|
||||
received_size = this->size(ptr);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
size_type preferred_size = received_size;
|
||||
//Obtain the real size of the block
|
||||
block_ctrl *block = move_detail::force_ptr<block_ctrl*>(priv_get_block(ptr));
|
||||
|
||||
@@ -226,16 +226,6 @@ class rbtree_best_fit
|
||||
//!Experimental. Dont' use
|
||||
void deallocate_many(multiallocation_chain &chain);
|
||||
|
||||
template<class T>
|
||||
BOOST_INTERPROCESS_NODISCARD
|
||||
T* allocation_command(boost::interprocess::allocation_type command, size_type limit_size,
|
||||
size_type& prefer_in_recvd_out_size, T*& reuse);
|
||||
|
||||
BOOST_INTERPROCESS_NODISCARD
|
||||
void* raw_allocation_command(boost::interprocess::allocation_type command, size_type limit_object,
|
||||
size_type& prefer_in_recvd_out_size,
|
||||
void*& reuse_ptr, size_type sizeof_object = 1);
|
||||
|
||||
#endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
|
||||
|
||||
//!Returns the size of the memory segment
|
||||
@@ -275,6 +265,12 @@ class rbtree_best_fit
|
||||
void* allocate_aligned (size_type nbytes, size_type alignment);
|
||||
|
||||
#if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
|
||||
|
||||
void* allocation_command ( boost::interprocess::allocation_type command, size_type limit_size
|
||||
, size_type &prefer_in_recvd_out_size, void *&reuse_ptr
|
||||
, size_type sizeof_object, size_type alignof_object
|
||||
);
|
||||
|
||||
private:
|
||||
static size_type priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes);
|
||||
|
||||
@@ -282,14 +278,10 @@ class rbtree_best_fit
|
||||
|
||||
block_ctrl *priv_end_block();
|
||||
|
||||
void* priv_allocation_command(boost::interprocess::allocation_type command, size_type limit_size,
|
||||
size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object);
|
||||
|
||||
|
||||
//!Real allocation algorithm with min allocation option
|
||||
void * priv_allocate( boost::interprocess::allocation_type command
|
||||
, size_type limit_size, size_type &prefer_in_recvd_out_size
|
||||
, void *&reuse_ptr, size_type backwards_multiple = 1);
|
||||
, void *&reuse_ptr, size_type sizeof_object = 1, size_type alignof_object = Alignment);
|
||||
|
||||
//!Obtains the block control structure of the user buffer
|
||||
static block_ctrl *priv_get_block(const void *ptr);
|
||||
@@ -310,7 +302,8 @@ class rbtree_best_fit
|
||||
,size_type &prefer_in_recvd_out_size
|
||||
,void *reuse_ptr
|
||||
,bool only_preferred_backwards
|
||||
,size_type backwards_multiple);
|
||||
,size_type sizeof_object
|
||||
,size_type alignof_object);
|
||||
|
||||
//!Returns true if the previous block is allocated
|
||||
bool priv_is_prev_allocated(block_ctrl *ptr);
|
||||
@@ -707,47 +700,14 @@ inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
|
||||
return algo_impl_t::allocate_aligned(this, nbytes, alignment);
|
||||
}
|
||||
|
||||
template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
|
||||
template<class T>
|
||||
inline T* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
|
||||
allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
|
||||
size_type &prefer_in_recvd_out_size, T *&reuse)
|
||||
{
|
||||
void* raw_reuse = reuse;
|
||||
void* const ret = priv_allocation_command(command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
|
||||
reuse = static_cast<T*>(raw_reuse);
|
||||
BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::dtl::alignment_of<T>::value));
|
||||
return static_cast<T*>(ret);
|
||||
}
|
||||
|
||||
template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
|
||||
inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
|
||||
raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects,
|
||||
size_type &prefer_in_recvd_out_objects, void *&reuse_ptr, size_type sizeof_object)
|
||||
{
|
||||
size_type const preferred_objects = prefer_in_recvd_out_objects;
|
||||
if(!sizeof_object)
|
||||
return reuse_ptr = 0, static_cast<void*>(0);
|
||||
if(command & boost::interprocess::try_shrink_in_place){
|
||||
if(!reuse_ptr) return static_cast<void*>(0);
|
||||
const bool success = algo_impl_t::try_shrink
|
||||
( this, reuse_ptr, limit_objects*sizeof_object
|
||||
, prefer_in_recvd_out_objects = preferred_objects*sizeof_object);
|
||||
prefer_in_recvd_out_objects /= sizeof_object;
|
||||
return success ? reuse_ptr : 0;
|
||||
}
|
||||
else{
|
||||
return priv_allocation_command
|
||||
(command, limit_objects, prefer_in_recvd_out_objects, reuse_ptr, sizeof_object);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
|
||||
inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
|
||||
priv_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
|
||||
size_type &prefer_in_recvd_out_size,
|
||||
void *&reuse_ptr, size_type sizeof_object)
|
||||
allocation_command ( boost::interprocess::allocation_type command
|
||||
, size_type limit_size
|
||||
, size_type &prefer_in_recvd_out_size
|
||||
, void *&reuse_ptr
|
||||
, size_type sizeof_object
|
||||
, size_type alignof_object )
|
||||
{
|
||||
void* ret;
|
||||
size_type const preferred_size = prefer_in_recvd_out_size;
|
||||
@@ -758,11 +718,12 @@ inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
|
||||
size_type l_size = limit_size*sizeof_object;
|
||||
size_type p_size = preferred_size*sizeof_object;
|
||||
size_type r_size;
|
||||
|
||||
{
|
||||
//-----------------------
|
||||
boost::interprocess::scoped_lock<mutex_type> guard(m_header);
|
||||
//-----------------------
|
||||
ret = priv_allocate(command, l_size, r_size = p_size, reuse_ptr, sizeof_object);
|
||||
ret = priv_allocate(command, l_size, r_size = p_size, reuse_ptr, sizeof_object, alignof_object);
|
||||
}
|
||||
prefer_in_recvd_out_size = r_size/sizeof_object;
|
||||
return ret;
|
||||
@@ -811,7 +772,8 @@ void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
|
||||
,size_type &prefer_in_recvd_out_size
|
||||
,void *reuse_ptr
|
||||
,bool only_preferred_backwards
|
||||
,size_type backwards_multiple)
|
||||
,size_type sizeof_object
|
||||
,size_type alignof_object)
|
||||
{
|
||||
size_type const preferred_size = prefer_in_recvd_out_size;
|
||||
algo_impl_t::assert_alignment(reuse_ptr);
|
||||
@@ -825,9 +787,9 @@ void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
|
||||
return reuse_ptr;
|
||||
}
|
||||
|
||||
if(backwards_multiple){
|
||||
BOOST_ASSERT(0 == (min_size % backwards_multiple));
|
||||
BOOST_ASSERT(0 == (preferred_size % backwards_multiple));
|
||||
if(sizeof_object){
|
||||
BOOST_ASSERT(0 == (min_size % sizeof_object));
|
||||
BOOST_ASSERT(0 == (preferred_size % sizeof_object));
|
||||
}
|
||||
|
||||
if(command & boost::interprocess::expand_bwd){
|
||||
@@ -854,7 +816,8 @@ void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
|
||||
size_type needs_backwards_aligned;
|
||||
size_type lcm;
|
||||
if(!algo_impl_t::calculate_lcm_and_needs_backwards_lcmed
|
||||
( backwards_multiple
|
||||
( sizeof_object
|
||||
, alignof_object
|
||||
, prefer_in_recvd_out_size
|
||||
, only_preferred_backwards ? preferred_size : min_size
|
||||
, lcm, needs_backwards_aligned)){
|
||||
@@ -910,7 +873,7 @@ void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
|
||||
//first bytes, fill them with a pattern
|
||||
void *p = priv_get_user_buffer(new_block);
|
||||
void *user_ptr = reinterpret_cast<char*>(p);
|
||||
BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
|
||||
BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % sizeof_object == 0);
|
||||
algo_impl_t::assert_alignment(user_ptr);
|
||||
return user_ptr;
|
||||
}
|
||||
@@ -922,7 +885,7 @@ void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
|
||||
m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block));
|
||||
|
||||
//Just merge the whole previous block
|
||||
//prev_block->m_size*Alignment is multiple of lcm (and backwards_multiple)
|
||||
//prev_block->m_size*Alignment is multiple of lcm (and sizeof_object)
|
||||
prefer_in_recvd_out_size = prefer_in_recvd_out_size + (size_type)prev_block->m_size*Alignment;
|
||||
|
||||
m_header.m_allocated += (size_type)prev_block->m_size*Alignment;
|
||||
@@ -934,7 +897,7 @@ void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
|
||||
//If the backwards expansion has remaining bytes in the
|
||||
//first bytes, fill them with a pattern
|
||||
void *user_ptr = priv_get_user_buffer(prev_block);
|
||||
BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
|
||||
BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % sizeof_object == 0);
|
||||
algo_impl_t::assert_alignment(user_ptr);
|
||||
return user_ptr;
|
||||
}
|
||||
@@ -958,12 +921,16 @@ inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
|
||||
|
||||
template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
|
||||
void * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
|
||||
priv_allocate(boost::interprocess::allocation_type command
|
||||
,size_type limit_size
|
||||
,size_type &prefer_in_recvd_out_size
|
||||
,void *&reuse_ptr
|
||||
,size_type backwards_multiple)
|
||||
priv_allocate( boost::interprocess::allocation_type command
|
||||
, size_type limit_size
|
||||
, size_type &prefer_in_recvd_out_size
|
||||
, void *&reuse_ptr
|
||||
, size_type sizeof_object
|
||||
, size_type alignof_object )
|
||||
{
|
||||
if(alignof_object < Alignment)
|
||||
alignof_object = Alignment;
|
||||
|
||||
size_type const preferred_size = prefer_in_recvd_out_size;
|
||||
if(command & boost::interprocess::shrink_in_place){
|
||||
if(!reuse_ptr) return static_cast<void*>(0);
|
||||
@@ -987,12 +954,16 @@ void * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
|
||||
prefer_in_recvd_out_size = preferred_size;
|
||||
if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
|
||||
void *ret = priv_expand_both_sides
|
||||
(command, limit_size, prefer_in_recvd_out_size, reuse_ptr, true, backwards_multiple);
|
||||
(command, limit_size, prefer_in_recvd_out_size, reuse_ptr, true, sizeof_object, alignof_object);
|
||||
if(ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if(command & boost::interprocess::allocate_new){
|
||||
if (alignof_object > Alignment) {
|
||||
return algo_impl_t::allocate_aligned(this, limit_size, alignof_object);
|
||||
}
|
||||
else {
|
||||
size_block_ctrl_compare comp;
|
||||
imultiset_iterator it(m_header.m_imultiset.lower_bound(preferred_units, comp));
|
||||
|
||||
@@ -1007,12 +978,18 @@ void * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
|
||||
(it->m_size, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//Now try to expand both sides with min size
|
||||
if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
|
||||
return priv_expand_both_sides
|
||||
(command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false, backwards_multiple);
|
||||
( command, limit_size
|
||||
, prefer_in_recvd_out_size = preferred_size
|
||||
, reuse_ptr
|
||||
, false
|
||||
, sizeof_object
|
||||
, alignof_object);
|
||||
}
|
||||
return reuse_ptr = 0, static_cast<void*>(0);
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ class segment_manager_base
|
||||
segment_manager_base(size_type sz, size_type reserved_bytes)
|
||||
: MemoryAlgorithm(sz, reserved_bytes)
|
||||
{
|
||||
BOOST_ASSERT((sizeof(segment_manager_base<MemoryAlgorithm>) == sizeof(MemoryAlgorithm)));
|
||||
BOOST_INTERPROCESS_STATIC_ASSERT((sizeof(segment_manager_base<MemoryAlgorithm>) == sizeof(MemoryAlgorithm)));
|
||||
}
|
||||
|
||||
//!Returns the size of the memory
|
||||
@@ -128,63 +128,6 @@ class segment_manager_base
|
||||
void * allocate (size_type nbytes, const std::nothrow_t &)
|
||||
{ return MemoryAlgorithm::allocate(nbytes); }
|
||||
|
||||
//!Returns a reference to the internal memory algorithm.
|
||||
//!This function is useful for custom memory algorithms that
|
||||
//!need additional configuration options after construction. Never throws.
|
||||
//!This function should be only used by advanced users.
|
||||
MemoryAlgorithm &get_memory_algorithm()
|
||||
{ return static_cast<MemoryAlgorithm&>(*this); }
|
||||
|
||||
//!Returns a const reference to the internal memory algorithm.
|
||||
//!This function is useful for custom memory algorithms that
|
||||
//!need additional configuration options after construction. Never throws.
|
||||
//!This function should be only used by advanced users.
|
||||
const MemoryAlgorithm &get_memory_algorithm() const
|
||||
{ return static_cast<const MemoryAlgorithm&>(*this); }
|
||||
|
||||
#if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
|
||||
|
||||
//Experimental. Dont' use.
|
||||
//!Allocates n_elements of elem_bytes bytes.
|
||||
//!Throws bad_alloc on failure. chain.size() is not increased on failure.
|
||||
void allocate_many(size_type elem_bytes, size_type n_elements, multiallocation_chain &chain)
|
||||
{
|
||||
size_type prev_size = chain.size();
|
||||
MemoryAlgorithm::allocate_many(elem_bytes, n_elements, chain);
|
||||
if(!elem_bytes || chain.size() == prev_size){
|
||||
throw bad_alloc();
|
||||
}
|
||||
}
|
||||
|
||||
//!Allocates n_elements, each one of element_lengths[i]*sizeof_element bytes.
|
||||
//!Throws bad_alloc on failure. chain.size() is not increased on failure.
|
||||
void allocate_many(const size_type *element_lengths, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
|
||||
{
|
||||
size_type prev_size = chain.size();
|
||||
MemoryAlgorithm::allocate_many(element_lengths, n_elements, sizeof_element, chain);
|
||||
if(!sizeof_element || chain.size() == prev_size){
|
||||
throw bad_alloc();
|
||||
}
|
||||
}
|
||||
|
||||
//!Allocates n_elements of elem_bytes bytes.
|
||||
//!Non-throwing version. chain.size() is not increased on failure.
|
||||
void allocate_many(const std::nothrow_t &, size_type elem_bytes, size_type n_elements, multiallocation_chain &chain)
|
||||
{ MemoryAlgorithm::allocate_many(elem_bytes, n_elements, chain); }
|
||||
|
||||
//!Allocates n_elements, each one of
|
||||
//!element_lengths[i]*sizeof_element bytes.
|
||||
//!Non-throwing version. chain.size() is not increased on failure.
|
||||
void allocate_many(const std::nothrow_t &, const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
|
||||
{ MemoryAlgorithm::allocate_many(elem_sizes, n_elements, sizeof_element, chain); }
|
||||
|
||||
//!Deallocates all elements contained in chain.
|
||||
//!Never throws.
|
||||
void deallocate_many(multiallocation_chain &chain)
|
||||
{ MemoryAlgorithm::deallocate_many(chain); }
|
||||
|
||||
#endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
|
||||
|
||||
//!Allocates nbytes bytes. Throws boost::interprocess::bad_alloc
|
||||
//!on failure
|
||||
void * allocate(size_type nbytes)
|
||||
@@ -210,33 +153,7 @@ class segment_manager_base
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
|
||||
|
||||
template<class T>
|
||||
T *allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
|
||||
size_type &prefer_in_recvd_out_size, T *&reuse)
|
||||
{
|
||||
T *ret = MemoryAlgorithm::allocation_command
|
||||
(command | boost::interprocess::nothrow_allocation, limit_size, prefer_in_recvd_out_size, reuse);
|
||||
if(!(command & boost::interprocess::nothrow_allocation) && !ret)
|
||||
throw bad_alloc();
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects,
|
||||
size_type &prefer_in_recvd_out_size, void *&reuse, size_type sizeof_object = 1)
|
||||
{
|
||||
void *ret = MemoryAlgorithm::raw_allocation_command
|
||||
( command | boost::interprocess::nothrow_allocation, limit_objects,
|
||||
prefer_in_recvd_out_size, reuse, sizeof_object);
|
||||
if(!(command & boost::interprocess::nothrow_allocation) && !ret)
|
||||
throw bad_alloc();
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
|
||||
|
||||
//!Deallocates the bytes allocated with allocate/allocate_many()
|
||||
//!Deallocates the bytes allocated with allocate/allocate_aligned()
|
||||
//!pointed by addr
|
||||
void deallocate (void *addr)
|
||||
{ MemoryAlgorithm::deallocate(addr); }
|
||||
@@ -273,6 +190,91 @@ class segment_manager_base
|
||||
//!Returns the size of the buffer previously allocated pointed by ptr
|
||||
size_type size(const void *ptr) const
|
||||
{ return MemoryAlgorithm::size(ptr); }
|
||||
|
||||
//!Returns a reference to the internal memory algorithm.
|
||||
//!This function is useful for custom memory algorithms that
|
||||
//!need additional configuration options after construction. Never throws.
|
||||
//!This function should be only used by advanced users.
|
||||
MemoryAlgorithm &get_memory_algorithm()
|
||||
{ return static_cast<MemoryAlgorithm&>(*this); }
|
||||
|
||||
//!Returns a const reference to the internal memory algorithm.
|
||||
//!This function is useful for custom memory algorithms that
|
||||
//!need additional configuration options after construction. Never throws.
|
||||
//!This function should be only used by advanced users.
|
||||
const MemoryAlgorithm &get_memory_algorithm() const
|
||||
{ return static_cast<const MemoryAlgorithm&>(*this); }
|
||||
|
||||
#if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
|
||||
|
||||
//Experimental. Don't use.
|
||||
//!Allocates n_elements of elem_bytes bytes.
|
||||
//!Throws bad_alloc on failure. chain.size() is not increased on failure.
|
||||
void allocate_many(size_type elem_bytes, size_type n_elements, multiallocation_chain &chain)
|
||||
{
|
||||
size_type prev_size = chain.size();
|
||||
MemoryAlgorithm::allocate_many(elem_bytes, n_elements, chain);
|
||||
if(!elem_bytes || chain.size() == prev_size){
|
||||
throw bad_alloc();
|
||||
}
|
||||
}
|
||||
|
||||
//Experimental. Don't use.
|
||||
//!Allocates n_elements, each one of element_lengths[i]*sizeof_element bytes.
|
||||
//!Throws bad_alloc on failure. chain.size() is not increased on failure.
|
||||
void allocate_many(const size_type *element_lengths, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
|
||||
{
|
||||
size_type prev_size = chain.size();
|
||||
MemoryAlgorithm::allocate_many(element_lengths, n_elements, sizeof_element, chain);
|
||||
if(!sizeof_element || chain.size() == prev_size){
|
||||
throw bad_alloc();
|
||||
}
|
||||
}
|
||||
|
||||
//Experimental. Don't use.
|
||||
//!Allocates n_elements of elem_bytes bytes.
|
||||
//!Non-throwing version. chain.size() is not increased on failure.
|
||||
void allocate_many(const std::nothrow_t &, size_type elem_bytes, size_type n_elements, multiallocation_chain &chain)
|
||||
{ MemoryAlgorithm::allocate_many(elem_bytes, n_elements, chain); }
|
||||
|
||||
//Experimental. Don't use.
|
||||
//!Allocates n_elements, each one of
|
||||
//!element_lengths[i]*sizeof_element bytes.
|
||||
//!Non-throwing version. chain.size() is not increased on failure.
|
||||
void allocate_many(const std::nothrow_t &, const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
|
||||
{ MemoryAlgorithm::allocate_many(elem_sizes, n_elements, sizeof_element, chain); }
|
||||
|
||||
//Experimental. Don't use.
|
||||
//!Deallocates all elements contained in chain.
|
||||
//!Never throws.
|
||||
void deallocate_many(multiallocation_chain &chain)
|
||||
{ MemoryAlgorithm::deallocate_many(chain); }
|
||||
|
||||
//Experimental. Don't use.
|
||||
template<class T>
|
||||
T *allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
|
||||
size_type &prefer_in_recvd_out_size, T *&reuse)
|
||||
{
|
||||
void* raw_reuse = reuse;
|
||||
const size_type al = ::boost::container::dtl::alignment_of<T>::value;
|
||||
void* const ret = this->allocation_command(command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T), al);
|
||||
reuse = static_cast<T*>(raw_reuse);
|
||||
BOOST_ASSERT(0 == ((std::size_t)ret % al));
|
||||
return static_cast<T*>(ret);
|
||||
}
|
||||
|
||||
//Experimental. Don't use.
|
||||
void *allocation_command ( boost::interprocess::allocation_type command, size_type limit_size
|
||||
, size_type &prefer_in_recvd_out_size, void *&reuse, size_type sizeof_object, size_type alignof_object )
|
||||
{
|
||||
void *ret = MemoryAlgorithm::allocation_command
|
||||
(command | boost::interprocess::nothrow_allocation, limit_size, prefer_in_recvd_out_size, reuse, sizeof_object, alignof_object);
|
||||
if(!(command & boost::interprocess::nothrow_allocation) && !ret)
|
||||
throw bad_alloc();
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
|
||||
};
|
||||
|
||||
//!This object is placed in the beginning of memory segment and
|
||||
|
||||
@@ -149,9 +149,13 @@ bool test_allocation_expand(SegMngr &sm)
|
||||
{
|
||||
std::vector<void*> buffers;
|
||||
|
||||
//We will repeat this test for different sized elements and alignments
|
||||
for(std::size_t al = 1; al <= SegMngr::MemAlignment*32u; al *= 2u) {
|
||||
//Allocate buffers with extra memory
|
||||
for(std::size_t i = 0; true; ++i){
|
||||
void *ptr = sm.allocate(i, std::nothrow);
|
||||
void *ptr = al > SegMngr::MemAlignment
|
||||
? sm.allocate_aligned(i, al, std::nothrow)
|
||||
: sm.allocate(i, std::nothrow);
|
||||
if(!ptr)
|
||||
break;
|
||||
std::size_t size = sm.size(ptr);
|
||||
@@ -190,6 +194,7 @@ bool test_allocation_expand(SegMngr &sm)
|
||||
sm.deallocate(buffers[pos]);
|
||||
buffers.erase(buffers.begin()+std::ptrdiff_t(pos));
|
||||
}
|
||||
}
|
||||
|
||||
return sm.all_memory_deallocated() && sm.check_sanity();
|
||||
}
|
||||
@@ -351,13 +356,16 @@ bool test_allocation_deallocation_expand(SegMngr &sm)
|
||||
template<class SegMngr>
|
||||
bool test_allocation_with_reuse(SegMngr &sm)
|
||||
{
|
||||
//We will repeat this test for different sized elements
|
||||
//We will repeat this test for different sized elements and alignments
|
||||
for(std::size_t al = 1; al <= SegMngr::MemAlignment*32u; al *= 2u)
|
||||
for(std::size_t sizeof_object = 1; sizeof_object < 20u; ++sizeof_object){
|
||||
std::vector<void*> buffers;
|
||||
|
||||
//Allocate buffers with extra memory
|
||||
for(std::size_t i = 0; true; ++i){
|
||||
void *ptr = sm.allocate(i*sizeof_object, std::nothrow);
|
||||
void *ptr = al > SegMngr::MemAlignment
|
||||
? sm.allocate_aligned(i*sizeof_object, al, std::nothrow)
|
||||
: sm.allocate(i*sizeof_object, std::nothrow);
|
||||
if(!ptr)
|
||||
break;
|
||||
std::size_t size = sm.size(ptr);
|
||||
@@ -379,16 +387,17 @@ bool test_allocation_with_reuse(SegMngr &sm)
|
||||
|
||||
//Now allocate with reuse
|
||||
typename SegMngr::size_type received_size = 0;
|
||||
// for(std::size_t al = 0; al <= 512u; ++h){
|
||||
for(std::size_t i = 0; true; ++i){
|
||||
std::size_t min_size = (received_size + 1);
|
||||
std::size_t prf_size = (received_size + (i+1)*2);
|
||||
void *reuse = ptr;
|
||||
void *ret = sm.raw_allocation_command
|
||||
void *ret = sm.allocation_command
|
||||
( boost::interprocess::expand_bwd | boost::interprocess::nothrow_allocation, min_size
|
||||
, received_size = prf_size, reuse, sizeof_object/*, alignof_object*/);
|
||||
, received_size = prf_size, reuse, sizeof_object, al);
|
||||
if(!ret)
|
||||
break;
|
||||
if(((std::size_t)ret & (al - 1)) != 0)
|
||||
return 1;
|
||||
//If we have memory, this must be a buffer reuse
|
||||
if(!reuse)
|
||||
return 1;
|
||||
@@ -992,15 +1001,6 @@ bool test_all_allocation(SegMngr &sm)
|
||||
return false;
|
||||
}
|
||||
|
||||
std::cout << "Starting test_allocation_with_reuse. Class: "
|
||||
<< typeid(sm).name() << std::endl;
|
||||
|
||||
if(!test_allocation_with_reuse(sm)){
|
||||
std::cout << "test_allocation_with_reuse failed. Class: "
|
||||
<< typeid(sm).name() << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
std::cout << "Starting test_aligned_allocation. Class: "
|
||||
<< typeid(sm).name() << std::endl;
|
||||
|
||||
@@ -1019,6 +1019,15 @@ bool test_all_allocation(SegMngr &sm)
|
||||
return false;
|
||||
}
|
||||
|
||||
std::cout << "Starting test_allocation_with_reuse. Class: "
|
||||
<< typeid(sm).name() << std::endl;
|
||||
|
||||
if(!test_allocation_with_reuse(sm)){
|
||||
std::cout << "test_allocation_with_reuse failed. Class: "
|
||||
<< typeid(sm).name() << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
std::cout << "Starting test_clear_free_memory. Class: "
|
||||
<< typeid(sm).name() << std::endl;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user