Updated Interprocess and Intrusive:

-> Added linear slist to intrusive
-> Updated all allocators to version 2 allocators in Interprocess
-> Optimized rbtree_best_fit size overhead to 1 std:size_t.

[SVN r42878]
This commit is contained in:
Ion Gaztañaga
2008-01-20 11:54:47 +00:00
parent 715769871f
commit 1a240759d3
127 changed files with 4675 additions and 2294 deletions

View File

@@ -26,6 +26,7 @@ doxygen autodoc
<doxygen:param>HIDE_UNDOC_MEMBERS=YES
<doxygen:param>EXTRACT_PRIVATE=NO
<doxygen:param>EXPAND_ONLY_PREDEF=YES
<doxygen:param>PREDEFINED=BOOST_INTERPROCESS_DOXYGEN_INVOKED
<xsl:param>"boost.doxygen.reftitle=Boost.Interprocess Reference"
;

View File

@@ -6418,7 +6418,7 @@ warranty.
[classref boost::interprocess::named_mutex named_mutex].
* Reduced template bloat for node and adaptive allocators extracting node
implementation to a class taht only depends on the memory algorithm, instead of
implementation to a class that only depends on the memory algorithm, instead of
the segment manager + node size + node number...
* Fixed bug in `mapped_region` in UNIX when mapping address was provided but

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -25,6 +25,7 @@
#include <boost/interprocess/detail/type_traits.hpp>
#include <boost/interprocess/allocators/detail/adaptive_node_pool.hpp>
#include <boost/interprocess/exceptions.hpp>
#include <boost/interprocess/allocators/detail/allocator_common.hpp>
#include <memory>
#include <algorithm>
#include <cstddef>
@@ -35,35 +36,38 @@
namespace boost {
namespace interprocess {
//!An STL node allocator that uses a segment manager as memory
//!source. The internal pointer type will of the same type (raw, smart) as
//!"typename SegmentManager::void_pointer" type. This allows
//!placing the allocator in shared memory, memory mapped-files, etc...
//!This node allocator shares a segregated storage between all instances
//!of adaptive_pool with equal sizeof(T) placed in the same segment
//!group. NodesPerChunk is the number of nodes allocated at once when the allocator
//!needs runs out of nodes. MaxFreeChunks is the number of free nodes
//!in the adaptive node pool that will trigger the deallocation of
template<class T, class SegmentManager, std::size_t NodesPerChunk, std::size_t MaxFreeChunks>
class adaptive_pool
/// @cond
namespace detail{
template < unsigned int Version
, class T
, class SegmentManager
, std::size_t NodesPerChunk
, std::size_t MaxFreeChunks
, unsigned char OverheadPercent
>
class adaptive_pool_base
: public node_pool_allocation_impl
< adaptive_pool_base
< Version, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent>
, Version
, T
, SegmentManager
>
{
public:
typedef typename SegmentManager::void_pointer void_pointer;
typedef typename detail::
pointer_to_other<void_pointer, const void>::type cvoid_pointer;
typedef SegmentManager segment_manager;
typedef typename detail::
pointer_to_other<void_pointer, char>::type char_pointer;
typedef typename SegmentManager::
mutex_family::mutex_type mutex_type;
typedef adaptive_pool
<T, SegmentManager, NodesPerChunk, MaxFreeChunks> self_t;
typedef adaptive_pool_base
<Version, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> self_t;
typedef detail::shared_adaptive_node_pool
< SegmentManager, mutex_type
, sizeof(T), NodesPerChunk, MaxFreeChunks> node_pool_t;
< SegmentManager, sizeof(T), NodesPerChunk, MaxFreeChunks, OverheadPercent> node_pool_t;
typedef typename detail::
pointer_to_other<void_pointer, node_pool_t>::type node_pool_ptr;
BOOST_STATIC_ASSERT((Version <=2));
public:
//-------
typedef typename detail::
@@ -78,52 +82,60 @@ class adaptive_pool
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
//!Obtains adaptive_pool from
//!adaptive_pool
typedef detail::version_type<adaptive_pool_base, Version> version;
typedef transform_iterator
< typename SegmentManager::
multiallocation_iterator
, detail::cast_functor <T> > multiallocation_iterator;
typedef typename SegmentManager::
multiallocation_chain multiallocation_chain;
//!Obtains adaptive_pool_base from
//!adaptive_pool_base
template<class T2>
struct rebind
{
typedef adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks> other;
typedef adaptive_pool_base<Version, T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> other;
};
/// @cond
private:
//!Not assignable from related adaptive_pool
template<class T2, class SegmentManager2, std::size_t N2, std::size_t F2>
adaptive_pool& operator=
(const adaptive_pool<T2, SegmentManager2, N2, F2>&);
//!Not assignable from related adaptive_pool_base
template<unsigned int Version2, class T2, class SegmentManager2, std::size_t N2, std::size_t F2, unsigned char O2>
adaptive_pool_base& operator=
(const adaptive_pool_base<Version2, T2, SegmentManager2, N2, F2, O2>&);
//!Not assignable from other adaptive_pool
adaptive_pool& operator=(const adaptive_pool&);
//!Not assignable from other adaptive_pool_base
adaptive_pool_base& operator=(const adaptive_pool_base&);
/// @endcond
public:
//!Constructor from a segment manager. If not present, constructs a node
//!pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
adaptive_pool(segment_manager *segment_mngr)
: mp_node_pool(priv_get_or_create(segment_mngr)) { }
adaptive_pool_base(segment_manager *segment_mngr)
: mp_node_pool(detail::get_or_create_node_pool<node_pool_t>(segment_mngr)) { }
//!Copy constructor from other adaptive_pool. Increments the reference
//!Copy constructor from other adaptive_pool_base. Increments the reference
//!count of the associated node pool. Never throws
adaptive_pool(const adaptive_pool &other)
adaptive_pool_base(const adaptive_pool_base &other)
: mp_node_pool(other.get_node_pool())
{
mp_node_pool->inc_ref_count();
}
//!Copy constructor from related adaptive_pool. If not present, constructs
//!Copy constructor from related adaptive_pool_base. If not present, constructs
//!a node pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
template<class T2>
adaptive_pool
(const adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks> &other)
: mp_node_pool(priv_get_or_create(other.get_segment_manager())) { }
adaptive_pool_base
(const adaptive_pool_base<Version, T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other)
: mp_node_pool(detail::get_or_create_node_pool<node_pool_t>(other.get_segment_manager())) { }
//!Destructor, removes node_pool_t from memory
//!if its reference count reaches to zero. Never throws
~adaptive_pool()
{ priv_destroy_if_last_link(); }
~adaptive_pool_base()
{ detail::destroy_node_pool_if_last_link(detail::get_pointer(mp_node_pool)); }
//!Returns a pointer to the node pool.
//!Never throws
@@ -135,156 +147,300 @@ class adaptive_pool
segment_manager* get_segment_manager()const
{ return mp_node_pool->get_segment_manager(); }
//!Returns the number of elements that could be allocated.
//!Never throws
size_type max_size() const
{ return this->get_segment_manager()->get_size()/sizeof(value_type); }
//!Allocate memory for an array of count elements.
//!Throws boost::interprocess::bad_alloc if there is no enough memory*/
pointer allocate(size_type count, cvoid_pointer = 0)
{
if(count > ((size_type)-1)/sizeof(value_type))
throw bad_alloc();
return pointer(static_cast<T*>(mp_node_pool->allocate(count)));
}
//!Deallocate allocated memory.
//!Never throws
void deallocate(const pointer &ptr, size_type count)
{ mp_node_pool->deallocate(detail::get_pointer(ptr), count); }
//!Deallocates all free chunks of the pool
void deallocate_free_chunks()
{ mp_node_pool->deallocate_free_chunks(); }
//!Swaps allocators. Does not throw. If each allocator is placed in a
//!different memory segment, the result is undefined.
friend void swap(self_t &alloc1, self_t &alloc2)
{ detail::do_swap(alloc1.mp_node_pool, alloc2.mp_node_pool); }
//These functions are obsolete. These are here to conserve
//backwards compatibility with containers using them...
//!Returns address of mutable object.
//!Never throws
pointer address(reference value) const
{ return pointer(boost::addressof(value)); }
//!Returns address of non mutable object.
//!Never throws
const_pointer address(const_reference value) const
{ return const_pointer(boost::addressof(value)); }
//!Default construct an object.
//!Throws if T's default constructor throws*/
void construct(const pointer &ptr)
{ new(detail::get_pointer(ptr)) value_type; }
//!Destroys object. Throws if object's
//!destructor throws
void destroy(const pointer &ptr)
{ BOOST_ASSERT(ptr != 0); (*ptr).~value_type(); }
/// @cond
private:
//!Object function that creates the node allocator if it is not created and
//!increments reference count if it is already created
struct get_or_create_func
{
typedef detail::shared_adaptive_node_pool
<SegmentManager, mutex_type, sizeof(T), NodesPerChunk, MaxFreeChunks> node_pool_t;
//!This connects or constructs the unique instance of node_pool_t
//!Can throw boost::interprocess::bad_alloc
void operator()()
{
//Find or create the node_pool_t
mp_node_pool = mp_named_alloc->template find_or_construct
<node_pool_t>(unique_instance)(mp_named_alloc);
//If valid, increment link count
if(mp_node_pool != 0)
mp_node_pool->inc_ref_count();
}
//!Constructor. Initializes function
//!object parameters
get_or_create_func(segment_manager *hdr) : mp_named_alloc(hdr){}
node_pool_t *mp_node_pool;
segment_manager *mp_named_alloc;
};
//!Initialization function, creates an executes atomically the
//!initialization object functions. Can throw boost::interprocess::bad_alloc
node_pool_t *priv_get_or_create(segment_manager *named_alloc)
{
get_or_create_func func(named_alloc);
named_alloc->atomic_func(func);
return func.mp_node_pool;
}
//!Object function that decrements the reference count. If the count
//!reaches to zero destroys the node allocator from memory.
//!Never throws
struct destroy_if_last_link_func
{
typedef detail::shared_adaptive_node_pool
<SegmentManager, mutex_type,sizeof(T), NodesPerChunk, MaxFreeChunks> node_pool_t;
//!Decrements reference count and destroys the object if there is no
//!more attached allocators. Never throws
void operator()()
{
//If not the last link return
if(mp_node_pool->dec_ref_count() != 0) return;
//Last link, let's destroy the segment_manager
mp_named_alloc->template destroy<node_pool_t>(unique_instance);
}
//!Constructor. Initializes function
//!object parameters
destroy_if_last_link_func(segment_manager *nhdr,
node_pool_t *phdr)
: mp_named_alloc(nhdr), mp_node_pool(phdr){}
segment_manager *mp_named_alloc;
node_pool_t *mp_node_pool;
};
//!Destruction function, initializes and executes destruction function
//!object. Never throws
void priv_destroy_if_last_link()
{
typedef detail::shared_adaptive_node_pool
<SegmentManager, mutex_type,sizeof(T), NodesPerChunk, MaxFreeChunks> node_pool_t;
//Get segment manager
segment_manager *named_segment_mngr = this->get_segment_manager();
//Execute destruction functor atomically
destroy_if_last_link_func func(named_segment_mngr, detail::get_pointer(mp_node_pool));
named_segment_mngr->atomic_func(func);
}
private:
node_pool_ptr mp_node_pool;
/// @endcond
};
//!Equality test for same type
//!of adaptive_pool
template<class T, class S, std::size_t NodesPerChunk, std::size_t F> inline
bool operator==(const adaptive_pool<T, S, NodesPerChunk, F> &alloc1,
const adaptive_pool<T, S, NodesPerChunk, F> &alloc2)
//!of adaptive_pool_base
template<unsigned int V, class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
bool operator==(const adaptive_pool_base<V, T, S, NodesPerChunk, F, OP> &alloc1,
const adaptive_pool_base<V, T, S, NodesPerChunk, F, OP> &alloc2)
{ return alloc1.get_node_pool() == alloc2.get_node_pool(); }
//!Inequality test for same type
//!of adaptive_pool
template<class T, class S, std::size_t NodesPerChunk, std::size_t F> inline
bool operator!=(const adaptive_pool<T, S, NodesPerChunk, F> &alloc1,
const adaptive_pool<T, S, NodesPerChunk, F> &alloc2)
//!of adaptive_pool_base
template<unsigned int V, class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
bool operator!=(const adaptive_pool_base<V, T, S, NodesPerChunk, F, OP> &alloc1,
const adaptive_pool_base<V, T, S, NodesPerChunk, F, OP> &alloc2)
{ return alloc1.get_node_pool() != alloc2.get_node_pool(); }
template < class T
, class SegmentManager
, std::size_t NodesPerChunk = 64
, std::size_t MaxFreeChunks = 2
, unsigned char OverheadPercent = 5
>
class adaptive_pool_v1
: public adaptive_pool_base
< 1
, T
, SegmentManager
, NodesPerChunk
, MaxFreeChunks
, OverheadPercent
>
{
public:
typedef detail::adaptive_pool_base
< 1, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> base_t;
template<class T2>
struct rebind
{
typedef adaptive_pool_v1<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> other;
};
adaptive_pool_v1(SegmentManager *segment_mngr)
: base_t(segment_mngr)
{}
template<class T2>
adaptive_pool_v1
(const adaptive_pool_v1<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other)
: base_t(other)
{}
};
} //namespace detail{
/// @endcond
//!An STL node allocator that uses a segment manager as memory
//!source. The internal pointer type will of the same type (raw, smart) as
//!"typename SegmentManager::void_pointer" type. This allows
//!placing the allocator in shared memory, memory mapped-files, etc...
//!
//!This node allocator shares a segregated storage between all instances
//!of adaptive_pool with equal sizeof(T) placed in the same segment
//!group. NodesPerChunk is the number of nodes allocated at once when the allocator
//!needs runs out of nodes. MaxFreeChunks is the maximum number of totally free chunks
//!that the adaptive node pool will hold. The rest of the totally free chunks will be
//!deallocated with the segment manager.
//!
//!OverheadPercent is the (approximated) maximum size overhead (1-20%) of the allocator:
//!(memory usable for nodes / total memory allocated from the segment manager)
template < class T
, class SegmentManager
, std::size_t NodesPerChunk
, std::size_t MaxFreeChunks
, unsigned char OverheadPercent
>
class adaptive_pool
/// @cond
: public detail::adaptive_pool_base
< 2
, T
, SegmentManager
, NodesPerChunk
, MaxFreeChunks
, OverheadPercent
>
/// @endcond
{
#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
typedef detail::adaptive_pool_base
< 2, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> base_t;
public:
typedef detail::version_type<adaptive_pool, 2> version;
template<class T2>
struct rebind
{
typedef adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> other;
};
adaptive_pool(SegmentManager *segment_mngr)
: base_t(segment_mngr)
{}
template<class T2>
adaptive_pool
(const adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other)
: base_t(other)
{}
#else //BOOST_INTERPROCESS_DOXYGEN_INVOKED
public:
typedef implementation_defined::segment_manager segment_manager;
typedef segment_manager::void_pointer void_pointer;
typedef implementation_defined::pointer pointer;
typedef implementation_defined::const_pointer const_pointer;
typedef T value_type;
typedef typename detail::add_reference
<value_type>::type reference;
typedef typename detail::add_reference
<const value_type>::type const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
//!Obtains adaptive_pool from
//!adaptive_pool
template<class T2>
struct rebind
{
typedef adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> other;
};
private:
//!Not assignable from
//!related adaptive_pool
template<class T2, class SegmentManager2, std::size_t N2, std::size_t F2, unsigned char OP2>
adaptive_pool& operator=
(const adaptive_pool<T2, SegmentManager2, N2, F2, OP2>&);
//!Not assignable from
//!other adaptive_pool
adaptive_pool& operator=(const adaptive_pool&);
public:
//!Constructor from a segment manager. If not present, constructs a node
//!pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
adaptive_pool(segment_manager *segment_mngr);
//!Copy constructor from other adaptive_pool. Increments the reference
//!count of the associated node pool. Never throws
adaptive_pool(const adaptive_pool &other);
//!Copy constructor from related adaptive_pool. If not present, constructs
//!a node pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
template<class T2>
adaptive_pool
(const adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other);
//!Destructor, removes node_pool_t from memory
//!if its reference count reaches to zero. Never throws
~adaptive_pool();
//!Returns a pointer to the node pool.
//!Never throws
node_pool_t* get_node_pool() const;
//!Returns the segment manager.
//!Never throws
segment_manager* get_segment_manager()const;
//!Returns the number of elements that could be allocated.
//!Never throws
size_type max_size() const;
//!Allocate memory for an array of count elements.
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate(size_type count, cvoid_pointer hint = 0);
//!Deallocate allocated memory.
//!Never throws
void deallocate(const pointer &ptr, size_type count);
//!Deallocates all free chunks
//!of the pool
void deallocate_free_chunks();
//!Swaps allocators. Does not throw. If each allocator is placed in a
//!different memory segment, the result is undefined.
friend void swap(self_t &alloc1, self_t &alloc2);
//!Returns address of mutable object.
//!Never throws
pointer address(reference value) const;
//!Returns address of non mutable object.
//!Never throws
const_pointer address(const_reference value) const;
//!Default construct an object.
//!Throws if T's default constructor throws
void construct(const pointer &ptr);
//!Destroys object. Throws if object's
//!destructor throws
void destroy(const pointer &ptr);
//!Returns maximum the number of objects the previously allocated memory
//!pointed by p can hold. This size only works for memory allocated with
//!allocate, allocation_command and allocate_many.
size_type size(const pointer &p) const;
std::pair<pointer, bool>
allocation_command(allocation_type command,
size_type limit_size,
size_type preferred_size,
size_type &received_size, const pointer &reuse = 0);
//!Allocates many elements of size elem_size in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. The elements must be deallocated
//!with deallocate(...)
multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements);
//!Allocates n_elements elements, each one of size elem_sizes[i]in a
//!contiguous chunk
//!of memory. The elements must be deallocated
multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements);
//!Allocates many elements of size elem_size in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. The elements must be deallocated
//!with deallocate(...)
void deallocate_many(multiallocation_iterator it);
//!Allocates just one object. Memory allocated with this function
//!must be deallocated only with deallocate_one().
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate_one();
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
multiallocation_iterator allocate_individual(std::size_t num_elements);
//!Deallocates memory previously allocated with allocate_one().
//!You should never use deallocate_one to deallocate memory allocated
//!with other functions different from allocate_one(). Never throws
void deallocate_one(const pointer &p);
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
void deallocate_individual(multiallocation_iterator it);
#endif
};
#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED
//!Equality test for same type
//!of adaptive_pool
template<class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
bool operator==(const adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc1,
const adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc2);
//!Inequality test for same type
//!of adaptive_pool
template<class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
bool operator!=(const adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc1,
const adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc2);
#endif
} //namespace interprocess {
} //namespace boost {

View File

@@ -1,6 +1,6 @@
///////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
///////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -51,13 +51,6 @@ class allocator
/// @cond
private:
struct cast_functor
{
typedef typename detail::add_reference<T>::type result_type;
result_type operator()(char &ptr) const
{ return *static_cast<T*>(static_cast<void*>(&ptr)); }
};
//Self type
typedef allocator<T, SegmentManager> self_t;
@@ -108,7 +101,9 @@ class allocator
typedef transform_iterator
< typename SegmentManager::
multiallocation_iterator
, cast_functor> multiallocation_iterator;
, detail::cast_functor <T> > multiallocation_iterator;
typedef typename SegmentManager::
multiallocation_chain multiallocation_chain;
/// @endcond
@@ -146,7 +141,7 @@ class allocator
pointer allocate(size_type count, cvoid_ptr hint = 0)
{
(void)hint;
if(count > ((size_type)-1)/sizeof(T))
if(count > this->max_size())
throw bad_alloc();
return pointer((value_type*)mp_mngr->allocate(count*sizeof(T)));
}
@@ -166,7 +161,13 @@ class allocator
friend void swap(self_t &alloc1, self_t &alloc2)
{ detail::do_swap(alloc1.mp_mngr, alloc2.mp_mngr); }
//Experimental version 2 allocator functions
//!Returns maximum the number of objects the previously allocated memory
//!pointed by p can hold. This size only works for memory allocated with
//!allocate, allocation_command and allocate_many.
size_type size(const pointer &p) const
{
return (size_type)mp_mngr->size(detail::get_pointer(p))/sizeof(T);
}
std::pair<pointer, bool>
allocation_command(allocation_type command,
@@ -178,42 +179,6 @@ class allocator
(command, limit_size, preferred_size, received_size, detail::get_pointer(reuse));
}
//!Returns maximum the number of objects the previously allocated memory
//!pointed by p can hold.
size_type size(const pointer &p) const
{
return (size_type)mp_mngr->size(detail::get_pointer(p))/sizeof(T);
}
//!Allocates just one object. Memory allocated with this function
//!must be deallocated only with deallocate_one().
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate_one()
{ return this->allocate(1); }
/// @cond
//Experimental. Don't use.
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
multiallocation_iterator allocate_individual(std::size_t num_elements)
{ return this->allocate_many(1, num_elements); }
/// @endcond
//!Deallocates memory previously allocated with allocate_one().
//!You should never use deallocate_one to deallocate memory allocated
//!with other functions different from allocate_one(). Never throws
void deallocate_one(const pointer &p)
{ return this->deallocate(p, 1); }
/// @cond
//!Allocates many elements of size elem_size in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
@@ -235,10 +200,44 @@ class allocator
(mp_mngr->allocate_many(elem_sizes, n_elements, sizeof(T)));
}
/// @endcond
//!Allocates many elements of size elem_size in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. The elements must be deallocated
//!with deallocate(...)
void deallocate_many(multiallocation_iterator it)
{ return mp_mngr->deallocate_many(it.base()); }
//These functions are obsolete. These are here to conserve
//backwards compatibility with containers using them...
//!Allocates just one object. Memory allocated with this function
//!must be deallocated only with deallocate_one().
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate_one()
{ return this->allocate(1); }
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
multiallocation_iterator allocate_individual(std::size_t num_elements)
{ return this->allocate_many(1, num_elements); }
//!Deallocates memory previously allocated with allocate_one().
//!You should never use deallocate_one to deallocate memory allocated
//!with other functions different from allocate_one(). Never throws
void deallocate_one(const pointer &p)
{ return this->deallocate(p, 1); }
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
void deallocate_individual(multiallocation_iterator it)
{ return this->deallocate_many(it); }
//!Returns address of mutable object.
//!Never throws
@@ -251,7 +250,7 @@ class allocator
{ return const_pointer(boost::addressof(value)); }
//!Default construct an object.
//!Throws if T's default constructor throws*/
//!Throws if T's default constructor throws
void construct(const pointer &ptr)
{ new(detail::get_pointer(ptr)) value_type; }

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -19,56 +19,155 @@
#include <boost/interprocess/detail/workaround.hpp>
#include <boost/interprocess/interprocess_fwd.hpp>
#include <boost/interprocess/detail/utilities.hpp>
#include <boost/assert.hpp>
#include <boost/utility/addressof.hpp>
#include <boost/interprocess/allocators/detail/adaptive_node_pool.hpp>
#include <boost/interprocess/allocators/detail/allocator_common.hpp>
#include <boost/interprocess/detail/workaround.hpp>
#include <boost/interprocess/exceptions.hpp>
#include <boost/interprocess/detail/version_type.hpp>
#include <boost/interprocess/allocators/detail/node_tools.hpp>
#include <memory>
#include <algorithm>
#include <cstddef>
//!\file
//!Describes cached_cached_node_allocator pooled shared memory STL compatible allocator
//!Describes cached_adaptive_pool pooled shared memory STL compatible allocator
namespace boost {
namespace interprocess {
/// @cond
namespace detail {
template < class T
, class SegmentManager
, std::size_t NodesPerChunk = 64
, std::size_t MaxFreeChunks = 2
, unsigned char OverheadPercent = 5
>
class cached_adaptive_pool_v1
: public detail::cached_allocator_impl
< T
, detail::shared_adaptive_node_pool
< SegmentManager
, sizeof(T)
, NodesPerChunk
, MaxFreeChunks
, OverheadPercent
>
, 1>
{
public:
typedef detail::cached_allocator_impl
< T
, detail::shared_adaptive_node_pool
< SegmentManager
, sizeof(T)
, NodesPerChunk
, MaxFreeChunks
, OverheadPercent
>
, 1> base_t;
template<class T2>
struct rebind
{
typedef cached_adaptive_pool_v1
<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> other;
};
cached_adaptive_pool_v1(SegmentManager *segment_mngr,
std::size_t max_cached_nodes = base_t::DEFAULT_MAX_CACHED_NODES)
: base_t(segment_mngr, max_cached_nodes)
{}
template<class T2>
cached_adaptive_pool_v1
(const cached_adaptive_pool_v1
<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other)
: base_t(other)
{}
};
} //namespace detail{
/// @endcond
//!An STL node allocator that uses a segment manager as memory
//!source. The internal pointer type will of the same type (raw, smart) as
//!"typename SegmentManager::void_pointer" type. This allows
//!placing the allocator in shared memory, memory mapped-files, etc...
//!
//!This node allocator shares a segregated storage between all instances of
//!cached_adaptive_pool with equal sizeof(T) placed in the same fixed size
//!cached_adaptive_pool with equal sizeof(T) placed in the same
//!memory segment. But also caches some nodes privately to
//!avoid some synchronization overhead.
template<class T, class SegmentManager, std::size_t NodesPerChunk, std::size_t MaxFreeChunks>
//!
//!NodesPerChunk is the minimum number of nodes of nodes allocated at once when
//!the allocator needs runs out of nodes. MaxFreeChunks is the maximum number of totally free chunks
//!that the adaptive node pool will hold. The rest of the totally free chunks will be
//!deallocated with the segment manager.
//!
//!OverheadPercent is the (approximated) maximum size overhead (1-20%) of the allocator:
//!(memory usable for nodes / total memory allocated from the segment manager)
template < class T
, class SegmentManager
, std::size_t NodesPerChunk
, std::size_t MaxFreeChunks
, unsigned char OverheadPercent
>
class cached_adaptive_pool
{
/// @cond
typedef typename SegmentManager::void_pointer void_pointer;
typedef typename detail::
pointer_to_other<void_pointer, const void>::type cvoid_pointer;
typedef SegmentManager segment_manager;
typedef typename detail::
pointer_to_other<void_pointer, char>::type char_pointer;
typedef typename SegmentManager::mutex_family::mutex_type mutex_type;
typedef cached_adaptive_pool
<T, SegmentManager, NodesPerChunk, MaxFreeChunks> self_t;
enum { DEFAULT_MAX_CACHED_NODES = 64 };
typedef typename detail::node_slist<void_pointer>::node_t node_t;
typedef typename detail::node_slist<void_pointer>::node_slist_t cached_list_t;
: public detail::cached_allocator_impl
< T
, detail::shared_adaptive_node_pool
< SegmentManager
, sizeof(T)
, NodesPerChunk
, MaxFreeChunks
, OverheadPercent
>
, 2>
/// @endcond
{
#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
public:
typedef detail::cached_allocator_impl
< T
, detail::shared_adaptive_node_pool
< SegmentManager
, sizeof(T)
, NodesPerChunk
, MaxFreeChunks
, OverheadPercent
>
, 2> base_t;
public:
//-------
typedef typename detail::
pointer_to_other<void_pointer, T>::type pointer;
typedef typename detail::
pointer_to_other<void_pointer, const T>::type const_pointer;
typedef detail::version_type<cached_adaptive_pool, 2> version;
template<class T2>
struct rebind
{
typedef cached_adaptive_pool
<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> other;
};
cached_adaptive_pool(SegmentManager *segment_mngr,
std::size_t max_cached_nodes = base_t::DEFAULT_MAX_CACHED_NODES)
: base_t(segment_mngr, max_cached_nodes)
{}
template<class T2>
cached_adaptive_pool
(const cached_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other)
: base_t(other)
{}
#else
public:
typedef implementation_defined::segment_manager segment_manager;
typedef segment_manager::void_pointer void_pointer;
typedef implementation_defined::pointer pointer;
typedef implementation_defined::const_pointer const_pointer;
typedef T value_type;
typedef typename detail::add_reference
<value_type>::type reference;
@@ -76,312 +175,178 @@ class cached_adaptive_pool
<const value_type>::type const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef detail::shared_adaptive_node_pool
< SegmentManager, mutex_type
, sizeof(T), NodesPerChunk, MaxFreeChunks> node_pool_t;
typedef typename detail::
pointer_to_other<void_pointer, node_pool_t>::type node_pool_ptr;
//!Obtains cached_adaptive_pool from other
//!Obtains cached_adaptive_pool from
//!cached_adaptive_pool
template<class T2>
struct rebind
{
typedef cached_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks> other;
typedef cached_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> other;
};
/// @cond
private:
//!Not assignable from related cached_adaptive_pool
template<class T2, class SegmentManager2, std::size_t N2, std::size_t MaxFreeChunks2>
//!Not assignable from
//!related cached_adaptive_pool
template<class T2, class SegmentManager2, std::size_t N2, std::size_t F2, unsigned char OP2>
cached_adaptive_pool& operator=
(const cached_adaptive_pool<T2, SegmentManager2, N2, MaxFreeChunks2>&);
(const cached_adaptive_pool<T2, SegmentManager2, N2, F2, OP2>&);
//!Not assignable from other cached_adaptive_pool
//!Not assignable from
//!other cached_adaptive_pool
cached_adaptive_pool& operator=(const cached_adaptive_pool&);
/// @endcond
public:
//!Constructor from a segment manager. If not present, constructs
//!a node pool. Increments the reference count of the node pool.
//!Can throw boost::interprocess::bad_alloc
cached_adaptive_pool(segment_manager *segment_mngr,
std::size_t max_cached_nodes = DEFAULT_MAX_CACHED_NODES)
: mp_node_pool(priv_get_or_create(segment_mngr)),
m_max_cached_nodes(max_cached_nodes)
{}
//!Copy constructor from other cached_adaptive_pool. Increments the
//!reference count of the associated node pool. Never throws
cached_adaptive_pool(const cached_adaptive_pool &other)
: mp_node_pool(other.get_node_pool()),
m_max_cached_nodes(other.get_max_cached_nodes())
{ mp_node_pool->inc_ref_count(); }
public:
//!Constructor from a segment manager. If not present, constructs a node
//!pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
cached_adaptive_pool(segment_manager *segment_mngr);
//!Copy constructor from other cached_adaptive_pool. Increments the reference
//!count of the associated node pool. Never throws
cached_adaptive_pool(const cached_adaptive_pool &other);
//!Copy constructor from related cached_adaptive_pool. If not present, constructs
//!a node pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
template<class T2>
cached_adaptive_pool
(const cached_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks> &other)
: mp_node_pool(priv_get_or_create(other.get_segment_manager())),
m_max_cached_nodes(other.get_max_cached_nodes())
{ }
(const cached_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other);
//!Destructor, removes node_pool_t from memory
//!if its reference count reaches to zero. Never throws
~cached_adaptive_pool()
{
priv_deallocate_all_cached_nodes();
priv_destroy_if_last_link();
}
~cached_adaptive_pool();
//!Returns a pointer to the node pool.
//!Never throws
node_pool_t* get_node_pool() const
{ return detail::get_pointer(mp_node_pool); }
node_pool_t* get_node_pool() const;
//!Returns the segment manager.
//!Never throws
segment_manager* get_segment_manager()const
{ return mp_node_pool->get_segment_manager(); }
segment_manager* get_segment_manager()const;
//!Sets the new max cached nodes value. This can provoke deallocations
//!if "newmax" is less than current cached nodes. Never throws
void set_max_cached_nodes(std::size_t newmax)
{
m_max_cached_nodes = newmax;
priv_deallocate_remaining_nodes();
}
//!Returns the max cached nodes parameter.
//!Returns the number of elements that could be allocated.
//!Never throws
std::size_t get_max_cached_nodes() const
{ return m_max_cached_nodes; }
//!Returns the number of elements that could be
//!allocated. Never throws
size_type max_size() const
{ return this->get_segment_manager()->get_size()/sizeof(value_type); }
size_type max_size() const;
//!Allocate memory for an array of count elements.
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate(size_type count, cvoid_pointer hint = 0)
{
(void)hint;
if(count > ((size_type)-1)/sizeof(value_type))
throw bad_alloc();
typedef detail::shared_adaptive_node_pool
<SegmentManager, mutex_type, sizeof(T), NodesPerChunk, MaxFreeChunks> node_pool_t;
void * ret;
if(count == 1){
//If don't have any cached node, we have to get a new list of free nodes from the pool
if(m_cached_nodes.empty()){
mp_node_pool->allocate_nodes(m_max_cached_nodes/2, m_cached_nodes);
}
ret = &m_cached_nodes.front();
m_cached_nodes.pop_front();
}
else{
ret = mp_node_pool->allocate(count);
}
return pointer(static_cast<T*>(ret));
}
pointer allocate(size_type count, cvoid_pointer hint = 0);
//!Deallocate allocated memory. Never throws
void deallocate(const pointer &ptr, size_type count)
{
typedef detail::shared_adaptive_node_pool
<SegmentManager, mutex_type, sizeof(T), NodesPerChunk, MaxFreeChunks> node_pool_t;
//!Deallocate allocated memory.
//!Never throws
void deallocate(const pointer &ptr, size_type count);
if(count == 1){
//Check if cache is full
if(m_cached_nodes.size() >= m_max_cached_nodes){
//This only occurs if this allocator deallocate memory allocated
//with other equal allocator. Since the cache is full, and more
//deallocations are probably coming, we'll make some room in cache
//in a single, efficient multi node deallocation.
priv_deallocate_n_nodes(m_cached_nodes.size() - m_max_cached_nodes/2);
}
m_cached_nodes.push_front(*(node_t*)detail::char_ptr_cast(detail::get_pointer(ptr)));
}
else{
mp_node_pool->deallocate(detail::get_pointer(ptr), count);
}
}
//!Deallocates all free chunks of the pool
void deallocate_free_chunks()
{ mp_node_pool->deallocate_free_chunks(); }
//!Deallocates all free chunks
//!of the pool
void deallocate_free_chunks();
//!Swaps allocators. Does not throw. If each allocator is placed in a
//!different shared memory segments, the result is undefined.
friend void swap(self_t &alloc1, self_t &alloc2)
{
detail::do_swap(alloc1.mp_node_pool, alloc2.mp_node_pool);
alloc1.m_cached_nodes.swap(alloc2.m_cached_nodes);
detail::do_swap(alloc1.m_max_cached_nodes, alloc2.m_max_cached_nodes);
}
void deallocate_cache()
{ this->priv_deallocate_all_cached_nodes(); }
//These functions are obsolete. These are here to conserve
//backwards compatibility with containers using them...
//!different memory segment, the result is undefined.
friend void swap(self_t &alloc1, self_t &alloc2);
//!Returns address of mutable object.
//!Never throws
pointer address(reference value) const
{ return pointer(boost::addressof(value)); }
pointer address(reference value) const;
//!Returns address of non mutable object.
//!Never throws
const_pointer address(const_reference value) const
{ return const_pointer(boost::addressof(value)); }
const_pointer address(const_reference value) const;
//!Default construct an object.
//!Throws if T's default constructor throws*/
void construct(const pointer &ptr)
{ new(detail::get_pointer(ptr)) value_type; }
//!Throws if T's default constructor throws
void construct(const pointer &ptr);
//!Destroys object. Throws if object's
//!destructor throws
void destroy(const pointer &ptr)
{ BOOST_ASSERT(ptr != 0); (*ptr).~value_type(); }
void destroy(const pointer &ptr);
/// @cond
private:
//!Returns maximum the number of objects the previously allocated memory
//!pointed by p can hold. This size only works for memory allocated with
//!allocate, allocation_command and allocate_many.
size_type size(const pointer &p) const;
//!Object function that creates the node allocator if it is not created and
//!increments reference count if it is already created
struct get_or_create_func
{
typedef detail::shared_adaptive_node_pool
<SegmentManager, mutex_type, sizeof(T), NodesPerChunk, MaxFreeChunks> node_pool_t;
std::pair<pointer, bool>
allocation_command(allocation_type command,
size_type limit_size,
size_type preferred_size,
size_type &received_size, const pointer &reuse = 0);
//!This connects or constructs the unique instance of node_pool_t
//!Can throw boost::interprocess::bad_alloc
void operator()()
{
//Find or create the node_pool_t
mp_node_pool = mp_named_alloc->template find_or_construct
<node_pool_t>(unique_instance)(mp_named_alloc);
//If valid, increment link count
if(mp_node_pool != 0)
mp_node_pool->inc_ref_count();
}
//!Allocates many elements of size elem_size in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. The elements must be deallocated
//!with deallocate(...)
multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements);
//!Constructor. Initializes function
//!object parameters
get_or_create_func(segment_manager *hdr) : mp_named_alloc(hdr){}
node_pool_t *mp_node_pool;
segment_manager *mp_named_alloc;
};
//!Allocates n_elements elements, each one of size elem_sizes[i]in a
//!contiguous chunk
//!of memory. The elements must be deallocated
multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements);
//!Frees all cached nodes.
//!Allocates many elements of size elem_size in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. The elements must be deallocated
//!with deallocate(...)
void deallocate_many(multiallocation_iterator it);
//!Allocates just one object. Memory allocated with this function
//!must be deallocated only with deallocate_one().
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate_one();
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
multiallocation_iterator allocate_individual(std::size_t num_elements);
//!Deallocates memory previously allocated with allocate_one().
//!You should never use deallocate_one to deallocate memory allocated
//!with other functions different from allocate_one(). Never throws
void deallocate_one(const pointer &p);
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
void deallocate_individual(multiallocation_iterator it);
//!Sets the new max cached nodes value. This can provoke deallocations
//!if "newmax" is less than current cached nodes. Never throws
void set_max_cached_nodes(std::size_t newmax);
//!Returns the max cached nodes parameter.
//!Never throws
void priv_deallocate_all_cached_nodes()
{
if(m_cached_nodes.empty()) return;
mp_node_pool->deallocate_nodes(m_cached_nodes);
}
//!Frees all cached nodes at once.
//!Never throws
void priv_deallocate_remaining_nodes()
{
if(m_cached_nodes.size() > m_max_cached_nodes){
priv_deallocate_n_nodes(m_cached_nodes.size()-m_max_cached_nodes);
}
}
//!Frees n cached nodes at once. Never throws
void priv_deallocate_n_nodes(std::size_t n)
{
//Deallocate all new linked list at once
mp_node_pool->deallocate_nodes(m_cached_nodes, n);
}
//!Initialization function, creates an executes atomically the
//!initialization object functions. Can throw boost::interprocess::bad_alloc
node_pool_t *priv_get_or_create(segment_manager *named_alloc)
{
get_or_create_func func(named_alloc);
named_alloc->atomic_func(func);
return func.mp_node_pool;
}
//!Object function that decrements the reference count. If the count
//!reaches to zero destroys the node allocator from memory.
//!Never throws
struct destroy_if_last_link_func
{
typedef detail::shared_adaptive_node_pool
<SegmentManager, mutex_type,sizeof(T), NodesPerChunk, MaxFreeChunks> node_pool_t;
//!Decrements reference count and destroys the object if there is no
//!more attached allocators. Never throws
void operator()()
{
//If not the last link return
if(mp_node_pool->dec_ref_count() != 0) return;
//Last link, let's destroy the segment_manager
mp_named_alloc->template destroy<node_pool_t>(unique_instance);
}
//!Constructor. Initializes function
//!object parameters
destroy_if_last_link_func(segment_manager *nhdr,
node_pool_t *phdr)
: mp_named_alloc(nhdr), mp_node_pool(phdr){}
segment_manager *mp_named_alloc;
node_pool_t *mp_node_pool;
};
//!Destruction function, initializes and executes destruction function
//!object. Never throws
void priv_destroy_if_last_link()
{
typedef detail::shared_adaptive_node_pool
<SegmentManager, mutex_type,sizeof(T), NodesPerChunk, MaxFreeChunks> node_pool_t;
//Get segment manager
segment_manager *segment_mngr = this->get_segment_manager();
//Execute destruction functor atomically
destroy_if_last_link_func func(segment_mngr, detail::get_pointer(mp_node_pool));
segment_mngr->atomic_func(func);
}
private:
node_pool_ptr mp_node_pool;
cached_list_t m_cached_nodes;
std::size_t m_max_cached_nodes;
/// @endcond
std::size_t get_max_cached_nodes() const;
#endif
};
//!Equality test for same type of
//!cached_adaptive_pool
template<class T, class S, std::size_t NodesPerChunk, std::size_t M> inline
bool operator==(const cached_adaptive_pool<T, S, NodesPerChunk, M> &alloc1,
const cached_adaptive_pool<T, S, NodesPerChunk, M> &alloc2)
{ return alloc1.get_node_pool() == alloc2.get_node_pool(); }
#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED
//!Inequality test for same type of
//!cached_adaptive_pool
template<class T, class S, std::size_t NodesPerChunk, std::size_t M> inline
bool operator!=(const cached_adaptive_pool<T, S, NodesPerChunk, M> &alloc1,
const cached_adaptive_pool<T, S, NodesPerChunk, M> &alloc2)
{ return alloc1.get_node_pool() != alloc2.get_node_pool(); }
//!Equality test for same type
//!of cached_adaptive_pool
template<class T, class S, std::size_t NodesPerChunk, std::size_t F, std::size_t OP> inline
bool operator==(const cached_adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc1,
const cached_adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc2);
//!Inequality test for same type
//!of cached_adaptive_pool
template<class T, class S, std::size_t NodesPerChunk, std::size_t F, std::size_t OP> inline
bool operator!=(const cached_adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc1,
const cached_adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc2);
#endif
} //namespace interprocess {
} //namespace boost {
#include <boost/interprocess/detail/config_end.hpp>
#endif //#ifndef BOOST_INTERPROCESS_CACHED_ADAPTIVE_POOL_HPP

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -19,14 +19,11 @@
#include <boost/interprocess/detail/workaround.hpp>
#include <boost/interprocess/interprocess_fwd.hpp>
#include <boost/interprocess/detail/utilities.hpp>
#include <boost/assert.hpp>
#include <boost/utility/addressof.hpp>
#include <boost/interprocess/allocators/detail/node_pool.hpp>
#include <boost/interprocess/allocators/detail/allocator_common.hpp>
#include <boost/interprocess/detail/workaround.hpp>
#include <boost/interprocess/exceptions.hpp>
#include <memory>
#include <algorithm>
#include <boost/interprocess/detail/version_type.hpp>
#include <boost/interprocess/allocators/detail/node_tools.hpp>
#include <cstddef>
//!\file
@@ -35,37 +32,113 @@
namespace boost {
namespace interprocess {
//!An STL node allocator that uses a segment manager as memory
//!source. The internal pointer type will of the same type (raw, smart) as
//!"typename SegmentManager::void_pointer" type. This allows
//!placing the allocator in shared memory, memory mapped-files, etc...
//!This node allocator shares a segregated storage between all instances of
//!cached_node_allocator with equal sizeof(T) placed in the same fixed size
//!memory segment. But also caches some nodes privately to
//!avoid some synchronization overhead.
template<class T, class SegmentManager, std::size_t NodesPerChunk>
class cached_node_allocator
/// @cond
namespace detail {
template < class T
, class SegmentManager
, std::size_t NodesPerChunk = 64
>
class cached_node_allocator_v1
: public detail::cached_allocator_impl
< T
, detail::shared_node_pool
< SegmentManager
, sizeof(T)
, NodesPerChunk
>
, 1>
{
public:
typedef detail::cached_allocator_impl
< T
, detail::shared_node_pool
< SegmentManager
, sizeof(T)
, NodesPerChunk
>
, 1> base_t;
template<class T2>
struct rebind
{
typedef cached_node_allocator_v1
<T2, SegmentManager, NodesPerChunk> other;
};
cached_node_allocator_v1(SegmentManager *segment_mngr,
std::size_t max_cached_nodes = base_t::DEFAULT_MAX_CACHED_NODES)
: base_t(segment_mngr, max_cached_nodes)
{}
template<class T2>
cached_node_allocator_v1
(const cached_node_allocator_v1
<T2, SegmentManager, NodesPerChunk> &other)
: base_t(other)
{}
};
} //namespace detail{
/// @endcond
template < class T
, class SegmentManager
, std::size_t NodesPerChunk
>
class cached_node_allocator
/// @cond
typedef typename SegmentManager::void_pointer void_pointer;
typedef typename detail::
pointer_to_other<void_pointer, const void>::type cvoid_pointer;
typedef SegmentManager segment_manager;
typedef typename detail::
pointer_to_other<void_pointer, char>::type char_pointer;
typedef typename SegmentManager::mutex_family::mutex_type mutex_type;
typedef cached_node_allocator<T, SegmentManager, NodesPerChunk> self_t;
enum { DEFAULT_MAX_CACHED_NODES = 64 };
typedef typename detail::node_slist<void_pointer>::node_t node_t;
typedef typename detail::node_slist<void_pointer>::node_slist_t cached_list_t;
: public detail::cached_allocator_impl
< T
, detail::shared_node_pool
< SegmentManager
, sizeof(T)
, NodesPerChunk
>
, 2>
/// @endcond
{
#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
public:
typedef detail::cached_allocator_impl
< T
, detail::shared_node_pool
< SegmentManager
, sizeof(T)
, NodesPerChunk
>
, 2> base_t;
public:
//-------
typedef typename detail::
pointer_to_other<void_pointer, T>::type pointer;
typedef typename detail::
pointer_to_other<void_pointer, const T>::type const_pointer;
typedef detail::version_type<cached_node_allocator, 2> version;
template<class T2>
struct rebind
{
typedef cached_node_allocator<T2, SegmentManager, NodesPerChunk> other;
};
cached_node_allocator(SegmentManager *segment_mngr,
std::size_t max_cached_nodes = base_t::DEFAULT_MAX_CACHED_NODES)
: base_t(segment_mngr, max_cached_nodes)
{}
template<class T2>
cached_node_allocator
(const cached_node_allocator<T2, SegmentManager, NodesPerChunk> &other)
: base_t(other)
{}
#else
public:
typedef implementation_defined::segment_manager segment_manager;
typedef segment_manager::void_pointer void_pointer;
typedef implementation_defined::pointer pointer;
typedef implementation_defined::const_pointer const_pointer;
typedef T value_type;
typedef typename detail::add_reference
<value_type>::type reference;
@@ -73,302 +146,173 @@ class cached_node_allocator
<const value_type>::type const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef detail::shared_node_pool
< SegmentManager, mutex_type
, sizeof(T), NodesPerChunk> node_pool_t;
typedef typename detail::
pointer_to_other<void_pointer, node_pool_t>::type node_pool_ptr;
//!Obtains cached_node_allocator from other cached_node_allocator
//!Obtains cached_node_allocator from
//!cached_node_allocator
template<class T2>
struct rebind
{
typedef cached_node_allocator<T2, SegmentManager, NodesPerChunk> other;
typedef cached_node_allocator<T2, SegmentManager> other;
};
/// @cond
private:
//!Not assignable from related cached_node_allocator
//!Not assignable from
//!related cached_node_allocator
template<class T2, class SegmentManager2, std::size_t N2>
cached_node_allocator& operator=
(const cached_node_allocator<T2, SegmentManager2, N2>&);
//!Not assignable from other cached_node_allocator
//!Not assignable from
//!other cached_node_allocator
cached_node_allocator& operator=(const cached_node_allocator&);
/// @endcond
public:
//!Constructor from a segment manager. If not present, constructs
//!a node pool. Increments the reference count of the node pool.
//!Constructor from a segment manager. If not present, constructs a node
//!pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
cached_node_allocator(segment_manager *segment_mngr,
std::size_t max_cached_nodes = DEFAULT_MAX_CACHED_NODES)
: mp_node_pool(priv_get_or_create(segment_mngr)),
m_max_cached_nodes(max_cached_nodes)
{}
cached_node_allocator(segment_manager *segment_mngr);
//!Copy constructor from other cached_node_allocator. Increments the
//!reference count of the associated node pool. Never throws
cached_node_allocator(const cached_node_allocator &other)
: mp_node_pool(other.get_node_pool()),
m_max_cached_nodes(other.get_max_cached_nodes())
{ mp_node_pool->inc_ref_count(); }
//!Copy constructor from other cached_node_allocator. Increments the reference
//!count of the associated node pool. Never throws
cached_node_allocator(const cached_node_allocator &other);
//!Copy constructor from related cached_node_allocator. If not present, constructs
//!a node pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
template<class T2>
cached_node_allocator
(const cached_node_allocator<T2, SegmentManager, NodesPerChunk> &other)
: mp_node_pool(priv_get_or_create(other.get_segment_manager())),
m_max_cached_nodes(other.get_max_cached_nodes())
{ }
(const cached_node_allocator<T2, SegmentManager, NodesPerChunk> &other);
//!Destructor, removes node_pool_t from memory
//!if its reference count reaches to zero. Never throws
~cached_node_allocator()
{
priv_deallocate_all_cached_nodes();
priv_destroy_if_last_link();
}
~cached_node_allocator();
//!Returns a pointer to the node pool.
//!Never throws
node_pool_t* get_node_pool() const
{ return detail::get_pointer(mp_node_pool); }
node_pool_t* get_node_pool() const;
//!Returns the segment manager.
//!Never throws
segment_manager* get_segment_manager()const
{ return mp_node_pool->get_segment_manager(); }
segment_manager* get_segment_manager()const;
//!Sets the new max cached nodes value. This can provoke deallocations
//!if "newmax" is less than current cached nodes. Never throws
void set_max_cached_nodes(std::size_t newmax)
{
m_max_cached_nodes = newmax;
priv_deallocate_remaining_nodes();
}
//!Returns the max cached nodes parameter.
//!Returns the number of elements that could be allocated.
//!Never throws
std::size_t get_max_cached_nodes() const
{ return m_max_cached_nodes; }
//!Returns the number of elements that could be allocated. Never throws
size_type max_size() const
{ return this->get_segment_manager()->get_size()/sizeof(value_type); }
size_type max_size() const;
//!Allocate memory for an array of count elements.
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate(size_type count, cvoid_pointer hint = 0)
{
(void)hint;
if(count > ((size_type)-1)/sizeof(value_type))
throw bad_alloc();
typedef detail::shared_node_pool
<SegmentManager, mutex_type, sizeof(T), NodesPerChunk> node_pool_t;
void * ret;
if(count == 1){
//If don't have any cached node, we have to get a new list of free nodes from the pool
if(m_cached_nodes.empty()){
mp_node_pool->allocate_nodes(m_max_cached_nodes/2, m_cached_nodes);
}
ret = &m_cached_nodes.front();
m_cached_nodes.pop_front();
}
else{
ret = mp_node_pool->allocate(count);
}
return pointer(static_cast<T*>(ret));
}
pointer allocate(size_type count, cvoid_pointer hint = 0);
//!Deallocate allocated memory.
//!Never throws
void deallocate(const pointer &ptr, size_type count)
{
typedef detail::shared_node_pool
<SegmentManager, mutex_type, sizeof(T), NodesPerChunk> node_pool_t;
void deallocate(const pointer &ptr, size_type count);
if(count == 1){
//Check if cache is full
if(m_cached_nodes.size() >= m_max_cached_nodes){
//This only occurs if this allocator deallocate memory allocated
//with other equal allocator. Since the cache is full, and more
//deallocations are probably coming, we'll make some room in cache
//in a single, efficient multi node deallocation.
priv_deallocate_n_nodes(m_cached_nodes.size() - m_max_cached_nodes/2);
}
m_cached_nodes.push_front(*(node_t*)detail::char_ptr_cast(detail::get_pointer(ptr)));
}
else{
mp_node_pool->deallocate(detail::get_pointer(ptr), count);
}
}
//!Deallocates all free chunks
//!of the pool
void deallocate_free_chunks();
//!Swaps allocators. Does not throw. If each allocator is placed in a
//!different shared memory segments, the result is undefined.
friend void swap(self_t &alloc1, self_t &alloc2)
{
detail::do_swap(alloc1.mp_node_pool, alloc2.mp_node_pool);
alloc1.m_cached_nodes.swap(alloc2.m_cached_nodes);
detail::do_swap(alloc1.m_max_cached_nodes, alloc2.m_max_cached_nodes);
}
//!Returns the cached nodes to the shared pool
void deallocate_cache()
{ this->priv_deallocate_all_cached_nodes(); }
//!Deallocates all free chunks of the pool
void deallocate_free_chunks()
{ mp_node_pool->deallocate_free_chunks(); }
//These functions are obsolete. These are here to conserve
//backwards compatibility with containers using them...
//!different memory segment, the result is undefined.
friend void swap(self_t &alloc1, self_t &alloc2);
//!Returns address of mutable object.
//!Never throws
pointer address(reference value) const
{ return pointer(boost::addressof(value)); }
pointer address(reference value) const;
//!Returns address of non mutable object.
//!Never throws
const_pointer address(const_reference value) const
{ return const_pointer(boost::addressof(value)); }
const_pointer address(const_reference value) const;
//!Default construct an object.
//!Throws if T's default constructor throws*/
void construct(const pointer &ptr)
{ new(detail::get_pointer(ptr)) value_type; }
//!Throws if T's default constructor throws
void construct(const pointer &ptr);
//!Destroys object. Throws if object's
//!destructor throws
void destroy(const pointer &ptr)
{ BOOST_ASSERT(ptr != 0); (*ptr).~value_type(); }
void destroy(const pointer &ptr);
/// @cond
private:
//!Returns maximum the number of objects the previously allocated memory
//!pointed by p can hold. This size only works for memory allocated with
//!allocate, allocation_command and allocate_many.
size_type size(const pointer &p) const;
//!Object function that creates the node allocator if it is not created and
//!increments reference count if it is already created
struct get_or_create_func
{
typedef detail::shared_node_pool
<SegmentManager, mutex_type, sizeof(T), NodesPerChunk> node_pool_t;
std::pair<pointer, bool>
allocation_command(allocation_type command,
size_type limit_size,
size_type preferred_size,
size_type &received_size, const pointer &reuse = 0);
//!This connects or constructs the unique instance of node_pool_t
//!Can throw boost::interprocess::bad_alloc
void operator()()
{
//Find or create the node_pool_t
mp_node_pool = mp_named_alloc->template find_or_construct
<node_pool_t>(unique_instance)(mp_named_alloc);
//If valid, increment link count
if(mp_node_pool != 0)
mp_node_pool->inc_ref_count();
}
//!Allocates many elements of size elem_size in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. The elements must be deallocated
//!with deallocate(...)
multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements);
//!Constructor. Initializes function
//!object parameters
get_or_create_func(segment_manager *hdr) : mp_named_alloc(hdr){}
node_pool_t *mp_node_pool;
segment_manager *mp_named_alloc;
};
//!Allocates n_elements elements, each one of size elem_sizes[i]in a
//!contiguous chunk
//!of memory. The elements must be deallocated
multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements);
//!Frees all cached nodes.
//!Allocates many elements of size elem_size in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. The elements must be deallocated
//!with deallocate(...)
void deallocate_many(multiallocation_iterator it);
//!Allocates just one object. Memory allocated with this function
//!must be deallocated only with deallocate_one().
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate_one();
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
multiallocation_iterator allocate_individual(std::size_t num_elements);
//!Deallocates memory previously allocated with allocate_one().
//!You should never use deallocate_one to deallocate memory allocated
//!with other functions different from allocate_one(). Never throws
void deallocate_one(const pointer &p);
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
void deallocate_individual(multiallocation_iterator it);
//!Sets the new max cached nodes value. This can provoke deallocations
//!if "newmax" is less than current cached nodes. Never throws
void set_max_cached_nodes(std::size_t newmax);
//!Returns the max cached nodes parameter.
//!Never throws
void priv_deallocate_all_cached_nodes()
{ mp_node_pool->deallocate_nodes(m_cached_nodes); }
//!Frees all cached nodes at once.
//!Never throws
void priv_deallocate_remaining_nodes()
{
if(m_cached_nodes.size() > m_max_cached_nodes){
priv_deallocate_n_nodes(m_cached_nodes.size()-m_max_cached_nodes);
}
}
//!Frees n cached nodes at once.
//!Never throws
void priv_deallocate_n_nodes(std::size_t n)
{ mp_node_pool->deallocate_nodes(m_cached_nodes, n); }
//!Initialization function, creates an executes atomically the
//!initialization object functions. Can throw boost::interprocess::bad_alloc
node_pool_t *priv_get_or_create(segment_manager *named_alloc)
{
get_or_create_func func(named_alloc);
named_alloc->atomic_func(func);
return func.mp_node_pool;
}
//!Object function that decrements the reference count. If the count
//!reaches to zero destroys the node allocator from memory.
//!Never throws
struct destroy_if_last_link_func
{
typedef detail::shared_node_pool
<SegmentManager, mutex_type,sizeof(T), NodesPerChunk> node_pool_t;
//!Decrements reference count and destroys the object if there is no
//!more attached allocators. Never throws
void operator()()
{
//If not the last link return
if(mp_node_pool->dec_ref_count() != 0) return;
//Last link, let's destroy the segment_manager
mp_named_alloc->template destroy<node_pool_t>(unique_instance);
}
//!Constructor. Initializes function object
//!parameters
destroy_if_last_link_func(segment_manager *nhdr,
node_pool_t *phdr)
: mp_named_alloc(nhdr), mp_node_pool(phdr){}
segment_manager *mp_named_alloc;
node_pool_t *mp_node_pool;
};
//!Destruction function, initializes and executes destruction function
//!object. Never throws
void priv_destroy_if_last_link()
{
typedef detail::shared_node_pool
<SegmentManager, mutex_type,sizeof(T), NodesPerChunk> node_pool_t;
//Get segment manager
segment_manager *segment_mngr = this->get_segment_manager();
//Execute destruction functor atomically
destroy_if_last_link_func func(segment_mngr, detail::get_pointer(mp_node_pool));
segment_mngr->atomic_func(func);
}
private:
node_pool_ptr mp_node_pool;
cached_list_t m_cached_nodes;
std::size_t m_max_cached_nodes;
/// @endcond
std::size_t get_max_cached_nodes() const;
#endif
};
//!Equality test for same type of
//!cached_node_allocator
#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED
//!Equality test for same type
//!of cached_node_allocator
template<class T, class S, std::size_t NodesPerChunk> inline
bool operator==(const cached_node_allocator<T, S, NodesPerChunk> &alloc1,
const cached_node_allocator<T, S, NodesPerChunk> &alloc2)
{ return alloc1.get_node_pool() == alloc2.get_node_pool(); }
const cached_node_allocator<T, S, NodesPerChunk> &alloc2);
//!Inequality test for same type of
//!cached_node_allocator
//!Inequality test for same type
//!of cached_node_allocator
template<class T, class S, std::size_t NodesPerChunk> inline
bool operator!=(const cached_node_allocator<T, S, NodesPerChunk> &alloc1,
const cached_node_allocator<T, S, NodesPerChunk> &alloc2)
{ return alloc1.get_node_pool() != alloc2.get_node_pool(); }
const cached_node_allocator<T, S, NodesPerChunk> &alloc2);
#endif
} //namespace interprocess {
} //namespace boost {

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -17,20 +17,21 @@
#include <boost/interprocess/detail/config_begin.hpp>
#include <boost/interprocess/detail/workaround.hpp>
#include <boost/interprocess/interprocess_fwd.hpp>
#include <boost/interprocess/sync/interprocess_mutex.hpp>
#include <boost/interprocess/detail/utilities.hpp>
#include <boost/interprocess/detail/min_max.hpp>
#include <boost/interprocess/detail/math_functions.hpp>
#include <boost/interprocess/exceptions.hpp>
#include <boost/intrusive/list.hpp>
#include <boost/intrusive/set.hpp>
#include <boost/intrusive/slist.hpp>
#include <boost/math/common_factor_ct.hpp>
#include <boost/interprocess/detail/type_traits.hpp>
#include <boost/interprocess/allocators/detail/node_tools.hpp>
#include <boost/interprocess/allocators/detail/allocator_common.hpp>
#include <cstddef>
#include <cmath>
#include <cassert>
#include <cassert>
//!\file
//!Describes the real adaptive pool shared by many Interprocess pool allocators
@@ -39,10 +40,6 @@ namespace boost {
namespace interprocess {
namespace detail {
//!Pooled shared memory allocator using an smart adaptive pool. Includes
//!a reference count but the class does not delete itself, this is
//!responsibility of user classes. Node size (NodeSize) and the number of
//!nodes allocated per chunk (NodesPerChunk) are known at compile time.
template<class SegmentManagerBase>
class private_adaptive_node_pool_impl
{
@@ -56,19 +53,85 @@ class private_adaptive_node_pool_impl
public:
typedef typename node_slist<void_pointer>::node_t node_t;
typedef typename node_slist<void_pointer>::node_slist_t free_nodes_t;
typedef typename SegmentManagerBase::multiallocation_iterator multiallocation_iterator;
typedef typename SegmentManagerBase::multiallocation_chain multiallocation_chain;
private:
//This hook will be used to chain the memory chunks
typedef typename bi::make_list_base_hook
<bi::void_pointer<void_pointer>, bi::link_mode<bi::normal_link> >::type list_hook_t;
typedef typename bi::make_set_base_hook
< bi::void_pointer<void_pointer>
, bi::optimize_size<true>
, bi::constant_time_size<false>
, bi::link_mode<bi::normal_link> >::type multiset_hook_t;
struct hdr_offset_holder
{
hdr_offset_holder(std::size_t offset = 0)
: hdr_offset(offset)
{}
std::size_t hdr_offset;
};
struct chunk_info_t
: public list_hook_t
:
public hdr_offset_holder,
public multiset_hook_t
{
//An intrusive list of free node from this chunk
free_nodes_t free_nodes;
friend bool operator <(const chunk_info_t &l, const chunk_info_t &r)
{
// { return l.free_nodes.size() < r.free_nodes.size(); }
//Let's order blocks first by free nodes and then by address
//so that highest address fully free chunks are deallocated.
//This improves returning memory to the OS (trimming).
const bool is_less = l.free_nodes.size() < r.free_nodes.size();
const bool is_equal = l.free_nodes.size() == r.free_nodes.size();
return is_less || (is_equal && (&l < &r));
}
};
typedef typename bi::make_list<chunk_info_t, bi::base_hook<list_hook_t> >::type chunk_list_t;
typedef typename bi::make_multiset
<chunk_info_t, bi::base_hook<multiset_hook_t> >::type chunk_multiset_t;
typedef typename chunk_multiset_t::iterator chunk_iterator;
static const std::size_t MaxAlign = alignment_of<node_t>::value;
static const std::size_t HdrSize = ((sizeof(chunk_info_t)-1)/MaxAlign+1)*MaxAlign;
static const std::size_t HdrOffsetSize = ((sizeof(hdr_offset_holder)-1)/MaxAlign+1)*MaxAlign;
static std::size_t calculate_alignment
(std::size_t overhead_percent, std::size_t real_node_size)
{
//to-do: handle real_node_size != node_size
const std::size_t divisor = overhead_percent*real_node_size;
const std::size_t dividend = HdrOffsetSize*100;
std::size_t elements_per_subchunk = (dividend - 1)/divisor + 1;
std::size_t candidate_power_of_2 =
upper_power_of_2(elements_per_subchunk*real_node_size + HdrOffsetSize);
bool overhead_satisfied = false;
while(!overhead_satisfied){
elements_per_subchunk = (candidate_power_of_2 - HdrOffsetSize)/real_node_size;
std::size_t overhead_size = candidate_power_of_2 - elements_per_subchunk*real_node_size;
if(overhead_size*100/candidate_power_of_2 < overhead_percent){
overhead_satisfied = true;
}
else{
candidate_power_of_2 <<= 1;
}
}
return candidate_power_of_2;
}
static void calculate_num_subchunks
(std::size_t alignment, std::size_t real_node_size, std::size_t elements_per_chunk
,std::size_t &num_subchunks, std::size_t &real_num_node)
{
std::size_t elements_per_subchunk = (alignment - HdrOffsetSize)/real_node_size;
std::size_t possible_num_subchunk = (elements_per_chunk - 1)/elements_per_subchunk + 1;
std::size_t hdr_subchunk_elements = (alignment - HdrSize - SegmentManagerBase::PayloadPerAllocation)/real_node_size;
while(((possible_num_subchunk-1)*elements_per_subchunk + hdr_subchunk_elements) < elements_per_chunk){
++possible_num_subchunk;
}
num_subchunks = possible_num_subchunk;
real_num_node = (possible_num_subchunk-1)*elements_per_subchunk + hdr_subchunk_elements;
}
public:
//!Segment manager typedef
@@ -77,26 +140,25 @@ class private_adaptive_node_pool_impl
//!Constructor from a segment manager. Never throws
private_adaptive_node_pool_impl
( segment_manager_base_type *segment_mngr_base, std::size_t node_size
, std::size_t nodes_per_chunk, std::size_t max_free_chunks)
: m_node_size(node_size)
, m_max_free_chunks(max_free_chunks)
, m_real_node_size(lcm(m_node_size, sizeof(node_t)))
, m_header_size(min_value(get_rounded_size(sizeof(chunk_info_t), alignment_of<max_align>::value)
,get_rounded_size(sizeof(chunk_info_t), m_real_node_size)))
//Round the size to a power of two value.
//This is the total memory size (including payload) that we want to
//allocate from the general-purpose allocator
, m_real_chunk_alignment(upper_power_of_2(m_header_size + m_real_node_size*nodes_per_chunk))
, std::size_t nodes_per_chunk, std::size_t max_free_chunks
, unsigned char overhead_percent
)
: m_max_free_chunks(max_free_chunks)
, m_real_node_size(lcm(node_size, std::size_t(alignment_of<node_t>::value)))
//Round the size to a power of two value.
//This is the total memory size (including payload) that we want to
//allocate from the general-purpose allocator
, m_real_chunk_alignment(calculate_alignment(overhead_percent, m_real_node_size))
//This is the real number of nodes per chunk
, m_real_num_node((m_real_chunk_alignment - SegmentManagerBase::PayloadPerAllocation - m_header_size)/m_real_node_size)
, m_num_subchunks(0)
, m_real_num_node(0)
//General purpose allocator
, mp_segment_mngr_base(segment_mngr_base)
, m_chunklist()
, m_first_free_chunk(m_chunklist.end())
//Debug node count
, m_allocated(0)
, m_free_chunks(0)
{}
, m_chunk_multiset()
, m_totally_free_chunks(0)
{
calculate_num_subchunks(m_real_chunk_alignment, m_real_node_size, nodes_per_chunk, m_num_subchunks, m_real_num_node);
}
//!Destructor. Deallocates all allocated chunks. Never throws
~private_adaptive_node_pool_impl()
@@ -110,60 +172,118 @@ class private_adaptive_node_pool_impl
{ return detail::get_pointer(mp_segment_mngr_base); }
//!Allocates array of count elements. Can throw boost::interprocess::bad_alloc
void *allocate(std::size_t count)
void *allocate_node()
{
std::size_t bytes = count*m_node_size;
if(bytes > m_real_node_size){//Normal allocation, no pooling used
void *addr = mp_segment_mngr_base->allocate(bytes);
if(!addr) throw bad_alloc();
return addr;
priv_invariants();
//If there are no free nodes we allocate a new block
if (m_chunk_multiset.empty()){
priv_alloc_chunk(1);
}
else //Node allocation, pooling used
return priv_alloc_node();
//We take the first free node the multiset can't be empty
return priv_take_first_node();
}
//!Deallocates an array pointed by ptr. Never throws
void deallocate(void *ptr, std::size_t count)
void deallocate_node(void *pElem)
{
std::size_t bytes = count*m_node_size;
if(bytes > m_real_node_size)//Normal allocation was used
mp_segment_mngr_base->deallocate(ptr);
else //Node allocation was used
priv_dealloc_node(ptr);
priv_invariants();
chunk_info_t *chunk_info = priv_chunk_from_node(pElem);
assert(chunk_info->free_nodes.size() < m_real_num_node);
//We put the node at the beginning of the free node list
node_t * to_deallocate = static_cast<node_t*>(pElem);
chunk_info->free_nodes.push_front(*to_deallocate);
chunk_iterator this_chunk(chunk_multiset_t::s_iterator_to(*chunk_info));
chunk_iterator next_chunk(this_chunk);
++next_chunk;
//Cache the free nodes from the chunk
std::size_t this_chunk_free_nodes = this_chunk->free_nodes.size();
if(this_chunk_free_nodes == 1){
m_chunk_multiset.insert(m_chunk_multiset.begin(), *chunk_info);
}
else{
chunk_iterator next_chunk(this_chunk);
++next_chunk;
if(next_chunk != m_chunk_multiset.end()){
std::size_t next_free_nodes = next_chunk->free_nodes.size();
if(this_chunk_free_nodes > next_free_nodes){
//Now move the chunk to the new position
m_chunk_multiset.erase(this_chunk);
m_chunk_multiset.insert(*chunk_info);
}
}
}
//Update free chunk count
if(this_chunk_free_nodes == m_real_num_node){
++m_totally_free_chunks;
priv_deallocate_free_chunks(m_max_free_chunks);
}
priv_invariants();
}
//!Allocates a singly linked list of n nodes ending in null pointer.
//!can throw boost::interprocess::bad_alloc
void allocate_nodes(const std::size_t n, free_nodes_t &nodes)
void allocate_nodes(multiallocation_chain &nodes, const std::size_t n)
{
std::size_t i = 0;
std::size_t old_node_count = nodes.size();
try{
for(; i < n; ++i){
nodes.push_front(*priv_alloc_node());
priv_invariants();
for(std::size_t i = 0; i != n; ++i){
//If there are no free nodes we allocate all needed chunks
if (m_chunk_multiset.empty()){
priv_alloc_chunk(((n - i) - 1)/m_real_num_node + 1);
}
nodes.push_front(priv_take_first_node());
}
}
catch(...){
priv_deallocate_nodes(nodes, i);
priv_deallocate_nodes(nodes, nodes.size());
priv_deallocate_free_chunks(m_max_free_chunks);
throw;
}
//remove me
assert((n+old_node_count) == (std::size_t)std::distance(nodes.get_it(), multiallocation_iterator()));
priv_invariants();
}
//!Allocates n nodes, pointed by the multiallocation_iterator.
//!Can throw boost::interprocess::bad_alloc
multiallocation_iterator allocate_nodes(const std::size_t n)
{
multiallocation_chain chain;
this->allocate_nodes(chain, n);
return chain.get_it();
}
//!Deallocates a linked list of nodes. Never throws
void deallocate_nodes(free_nodes_t &nodes)
void deallocate_nodes(multiallocation_chain &nodes)
{ priv_deallocate_nodes(nodes, nodes.size()); }
//!Deallocates the first n nodes of a linked list of nodes. Never throws
void deallocate_nodes(free_nodes_t &nodes, std::size_t n)
void deallocate_nodes(multiallocation_chain &nodes, std::size_t n)
{ priv_deallocate_nodes(nodes, n); }
//!Deallocates the nodes pointed by the multiallocation iterator. Never throws
void deallocate_nodes(multiallocation_iterator it)
{
multiallocation_iterator itend;
while(it != itend){
void *addr = &*it;
++it;
deallocate_node(addr);
}
}
void deallocate_free_chunks()
{ priv_deallocate_free_chunks(0); }
std::size_t num_free_nodes()
{
typedef typename chunk_list_t::const_iterator citerator;
typedef typename chunk_multiset_t::const_iterator citerator;
std::size_t count = 0;
citerator it (m_first_free_chunk), itend(m_chunklist.end());
citerator it (m_chunk_multiset.begin()), itend(m_chunk_multiset.end());
for(; it != itend; ++it){
count += it->free_nodes.size();
}
@@ -172,22 +292,40 @@ class private_adaptive_node_pool_impl
void swap(private_adaptive_node_pool_impl &other)
{
assert(m_max_free_chunks == other.m_max_free_chunks);
assert(m_real_node_size == other.m_real_node_size);
assert(m_real_chunk_alignment == other.m_real_chunk_alignment);
assert(m_real_num_node == other.m_real_num_node);
std::swap(mp_segment_mngr_base, other.mp_segment_mngr_base);
m_chunklist.swap(other.m_chunklist);
std::swap(m_first_free_chunk, other.m_first_free_chunk);
std::swap(m_allocated, other.m_allocated);
std::swap(m_free_chunks, other.m_allocated);
std::swap(m_totally_free_chunks, other.m_totally_free_chunks);
m_chunk_multiset.swap(other.m_chunk_multiset);
}
private:
node_t *priv_take_first_node()
{
assert(m_chunk_multiset.begin() != m_chunk_multiset.end());
//We take the first free node the multiset can't be empty
free_nodes_t &free_nodes = m_chunk_multiset.begin()->free_nodes;
node_t *first_node = &free_nodes.front();
const std::size_t free_nodes_count = free_nodes.size();
assert(0 != free_nodes_count);
free_nodes.pop_front();
if(free_nodes_count == 1){
m_chunk_multiset.erase(m_chunk_multiset.begin());
}
else if(free_nodes_count == m_real_num_node){
--m_totally_free_chunks;
}
priv_invariants();
return first_node;
}
void priv_deallocate_nodes(free_nodes_t &nodes, const std::size_t num)
void priv_deallocate_nodes(multiallocation_chain &nodes, const std::size_t num)
{
assert(nodes.size() >= num);
for(std::size_t i = 0; i < num; ++i){
node_t *to_deallocate = &nodes.front();
nodes.pop_front();
deallocate(to_deallocate, 1);
deallocate_node(nodes.pop_front());
}
}
@@ -197,71 +335,75 @@ class private_adaptive_node_pool_impl
class chunk_destroyer
{
public:
chunk_destroyer(segment_manager_base_type *mngr, std::size_t real_num_node)
: mngr_(mngr), m_real_num_node(real_num_node)
chunk_destroyer(const private_adaptive_node_pool_impl *impl)
: mp_impl(impl)
{}
void operator()(typename chunk_list_t::pointer to_deallocate)
void operator()(typename chunk_multiset_t::pointer to_deallocate)
{
std::size_t free_nodes = to_deallocate->free_nodes.size();
(void)free_nodes;
assert(free_nodes == m_real_num_node);
mngr_->deallocate(detail::get_pointer(to_deallocate));
assert(free_nodes == mp_impl->m_real_num_node);
assert(0 == to_deallocate->hdr_offset);
hdr_offset_holder *hdr_off_holder = mp_impl->priv_first_subchunk_from_chunk((chunk_info_t*)detail::get_pointer(to_deallocate));
mp_impl->mp_segment_mngr_base->deallocate(hdr_off_holder);
}
segment_manager_base_type *mngr_;
const std::size_t m_real_num_node;
const private_adaptive_node_pool_impl *mp_impl;
};
//This macro will activate invariant checking. Slow, but helpful for debugging the code.
//#define BOOST_INTERPROCESS_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS
#define BOOST_INTERPROCESS_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS
void priv_invariants()
#ifdef BOOST_INTERPROCESS_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS
#undef BOOST_INTERPROCESS_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS
{
typedef typename chunk_list_t::iterator chunk_iterator;
//We iterate though the chunk list to free the memory
chunk_iterator it(m_chunklist.begin()),
itend(m_chunklist.end()), to_deallocate;
for(++it; it != itend; ++it){
chunk_iterator prev(it);
--prev;
std::size_t sp = prev->free_nodes.size(),
si = it->free_nodes.size();
assert(sp <= si);
(void)sp; (void)si;
//We iterate through the chunk list to free the memory
chunk_iterator it(m_chunk_multiset.begin()),
itend(m_chunk_multiset.end()), to_deallocate;
if(it != itend){
for(++it; it != itend; ++it){
chunk_iterator prev(it);
--prev;
std::size_t sp = prev->free_nodes.size(),
si = it->free_nodes.size();
assert(sp <= si);
(void)sp; (void)si;
}
}
//Check that the total free nodes are correct
it = m_chunklist.begin();
itend = m_chunklist.end();
std::size_t total_free = 0;
for(; it != itend; ++it){
total_free += it->free_nodes.size();
}
assert(total_free >= m_free_chunks*m_real_num_node);
//Check that the total totally free chunks are correct
it = m_chunklist.begin();
itend = m_chunklist.end();
total_free = 0;
for(; it != itend; ++it){
total_free += it->free_nodes.size() == m_real_num_node;
}
assert(total_free >= m_free_chunks);
//The chunk pointed by m_first_free_chunk should point
//to end or to a non-empty chunk
if(m_first_free_chunk != m_chunklist.end()){
std::size_t s = m_first_free_chunk->free_nodes.size();
assert(s != 0);
{
//Check that the total free nodes are correct
it = m_chunk_multiset.begin();
itend = m_chunk_multiset.end();
std::size_t total_free_nodes = 0;
for(; it != itend; ++it){
total_free_nodes += it->free_nodes.size();
}
assert(total_free_nodes >= m_totally_free_chunks*m_real_num_node);
}
//All previous nodes of m_first_free_chunk should be 0
it = m_chunklist.begin();
itend = m_first_free_chunk;
{
//Check that the total totally free chunks are correct
it = m_chunk_multiset.begin();
itend = m_chunk_multiset.end();
std::size_t total_free_chunks = 0;
for(; it != itend; ++it){
total_free_chunks += (it->free_nodes.size() == m_real_num_node);
}
assert(total_free_chunks == m_totally_free_chunks);
}
{
//Check that header offsets are correct
it = m_chunk_multiset.begin();
for(; it != itend; ++it){
std::size_t s = it->free_nodes.size();
assert(s == 0);
hdr_offset_holder *hdr_off_holder = priv_first_subchunk_from_chunk(&*it);
for(std::size_t i = 0, max = m_num_subchunks; i < max; ++i){
assert(hdr_off_holder->hdr_offset == std::size_t((char*)&*it- (char*)hdr_off_holder));
assert(0 == ((std::size_t)hdr_off_holder & (m_real_chunk_alignment - 1)));
assert(0 == (hdr_off_holder->hdr_offset & (m_real_chunk_alignment - 1)));
hdr_off_holder = (hdr_offset_holder *)((char*)hdr_off_holder + m_real_chunk_alignment);
}
}
}
}
#else
@@ -271,165 +413,136 @@ class private_adaptive_node_pool_impl
//!Deallocates all used memory. Never throws
void priv_clear()
{
//Check for memory leaks
assert(m_allocated==0);
#ifndef NDEBUG
chunk_iterator it = m_chunk_multiset.begin();
chunk_iterator itend = m_chunk_multiset.end();
std::size_t num_free_nodes = 0;
for(; it != itend; ++it){
//Check for memory leak
assert(it->free_nodes.size() == m_real_num_node);
++num_free_nodes;
}
assert(num_free_nodes == m_totally_free_chunks);
#endif
priv_invariants();
m_first_free_chunk = m_chunklist.end();
m_chunklist.clear_and_dispose
(chunk_destroyer(detail::get_pointer(mp_segment_mngr_base), m_real_num_node));
m_free_chunks = 0;
m_chunk_multiset.clear_and_dispose
(chunk_destroyer(this));
m_totally_free_chunks = 0;
}
chunk_info_t *priv_chunk_from_node(void *node)
chunk_info_t *priv_chunk_from_node(void *node) const
{
return (chunk_info_t *)((std::size_t)node & std::size_t(~(m_real_chunk_alignment - 1)));
hdr_offset_holder *hdr_off_holder =
(hdr_offset_holder*)((std::size_t)node & std::size_t(~(m_real_chunk_alignment - 1)));
assert(0 == ((std::size_t)hdr_off_holder & (m_real_chunk_alignment - 1)));
assert(0 == (hdr_off_holder->hdr_offset & (m_real_chunk_alignment - 1)));
chunk_info_t *chunk = (chunk_info_t *)(((char*)hdr_off_holder) + hdr_off_holder->hdr_offset);
assert(chunk->hdr_offset == 0);
return chunk;
}
//!Allocates one node, using the adaptive pool algorithm.
//!Never throws
node_t *priv_alloc_node()
hdr_offset_holder *priv_first_subchunk_from_chunk(chunk_info_t *chunk) const
{
priv_invariants();
//If there are no free nodes we allocate a new block
if (m_first_free_chunk == m_chunklist.end()){
priv_alloc_chunk();
--m_first_free_chunk;
}
//We take the first free node since m_first_free_chunk can't be end()
chunk_info_t &chunk_info = *m_first_free_chunk;
assert(!chunk_info.free_nodes.empty());
node_t *first_node = &chunk_info.free_nodes.front();
if(chunk_info.free_nodes.size() == 1){
++m_first_free_chunk;
}
else if(chunk_info.free_nodes.size() == m_real_num_node){
--m_free_chunks;
}
chunk_info.free_nodes.pop_front();
++m_allocated;
priv_invariants();
return detail::get_pointer(first_node);
}
//!Deallocates one node, using the adaptive pool algorithm.
//!Never throws
void priv_dealloc_node(void *pElem)
{
typedef typename chunk_list_t::iterator chunk_iterator;
priv_invariants();
chunk_info_t *chunk_info = priv_chunk_from_node(pElem);
assert(chunk_info->free_nodes.size() < m_real_num_node);
//We put the node at the beginning of the free node list
node_t * to_deallocate = static_cast<node_t*>(pElem);
chunk_info->free_nodes.push_front(*to_deallocate);
chunk_iterator this_chunk(chunk_list_t::s_iterator_to(*chunk_info));
chunk_iterator next_chunk(this_chunk);
++next_chunk;
//If this chunk has more free nodes than the next ones,
//we have to move the chunk in the list to maintain it ordered.
//Check if we have to move it
while(next_chunk != m_chunklist.end() &&
this_chunk->free_nodes.size() > next_chunk->free_nodes.size()){
++next_chunk;
}
//Check if the chunk must be moved
if(++chunk_iterator(this_chunk) != next_chunk){
//Update m_first_free_chunk iterator if it was pointing to this_chunk
if(m_first_free_chunk == this_chunk){
++m_first_free_chunk;
}
//Update m_first_free_chunk if the moved chunk crosses the empty boundary
else if(this_chunk->free_nodes.size() == 1){
m_first_free_chunk = chunk_list_t::s_iterator_to(*chunk_info);
}
//Now move the chunk to the new position
m_chunklist.erase(this_chunk);
m_chunklist.insert(next_chunk, *chunk_info);
}
//Update m_first_free_chunk if the chunk crosses the empty boundary
else if(this_chunk->free_nodes.size() == 1){
--m_first_free_chunk;
}
if(this_chunk->free_nodes.size() == m_real_num_node){
++m_free_chunks;
}
assert(m_allocated>0);
--m_allocated;
priv_invariants();
priv_deallocate_free_chunks(m_max_free_chunks);
priv_invariants();
hdr_offset_holder *hdr_off_holder = (hdr_offset_holder*)
(((char*)chunk) - (m_num_subchunks-1)*m_real_chunk_alignment);
assert(hdr_off_holder->hdr_offset == std::size_t((char*)chunk - (char*)hdr_off_holder));
assert(0 == ((std::size_t)hdr_off_holder & (m_real_chunk_alignment - 1)));
assert(0 == (hdr_off_holder->hdr_offset & (m_real_chunk_alignment - 1)));
return hdr_off_holder;
}
void priv_deallocate_free_chunks(std::size_t max_free_chunks)
{
typedef typename chunk_list_t::iterator chunk_iterator;
priv_invariants();
//Now check if we've reached the free nodes limit
//and check if we have free chunks. If so, deallocate as much
//as we can to stay below the limit
while(m_free_chunks > max_free_chunks &&
m_chunklist.back().free_nodes.size() == m_real_num_node){
chunk_iterator it(--m_chunklist.end());
if(it == m_first_free_chunk)
++m_first_free_chunk; //m_first_free_chunk is now equal to end()
m_chunklist.erase_and_dispose(it, chunk_destroyer(detail::get_pointer(mp_segment_mngr_base),m_real_num_node));
--m_free_chunks;
for( chunk_iterator itend = m_chunk_multiset.end()
; m_totally_free_chunks > max_free_chunks
; --m_totally_free_chunks
){
assert(!m_chunk_multiset.empty());
chunk_iterator it = itend;
--it;
std::size_t num_nodes = it->free_nodes.size();
assert(num_nodes == m_real_num_node);
(void)num_nodes;
m_chunk_multiset.erase_and_dispose
(it, chunk_destroyer(this));
}
}
//!Allocates a chunk of nodes. Can throw boost::interprocess::bad_alloc
void priv_alloc_chunk()
//!Allocates a several chunks of nodes. Can throw boost::interprocess::bad_alloc
void priv_alloc_chunk(std::size_t n)
{
//We allocate a new NodeBlock and put it as first
//element in the free Node list
std::size_t real_chunk_size = m_real_chunk_alignment - SegmentManagerBase::PayloadPerAllocation;
char *pNode = detail::char_ptr_cast
(mp_segment_mngr_base->allocate_aligned(real_chunk_size, m_real_chunk_alignment));
if(!pNode) throw bad_alloc();
chunk_info_t *c_info = new(pNode)chunk_info_t;
m_chunklist.push_back(*c_info);
pNode += m_header_size;
//We initialize all Nodes in Node Block to insert
//them in the free Node list
for(std::size_t i = 0; i < m_real_num_node; ++i){
c_info->free_nodes.push_front(*new (pNode) node_t);
pNode += m_real_node_size;
std::size_t real_chunk_size = m_real_chunk_alignment*m_num_subchunks - SegmentManagerBase::PayloadPerAllocation;
std::size_t elements_per_subchunk = (m_real_chunk_alignment - HdrOffsetSize)/m_real_node_size;
std::size_t hdr_subchunk_elements = (m_real_chunk_alignment - HdrSize - SegmentManagerBase::PayloadPerAllocation)/m_real_node_size;
for(std::size_t i = 0; i != n; ++i){
//We allocate a new NodeBlock and put it the last
//element of the tree
char *mem_address = detail::char_ptr_cast
(mp_segment_mngr_base->allocate_aligned(real_chunk_size, m_real_chunk_alignment));
if(!mem_address) throw std::bad_alloc();
++m_totally_free_chunks;
//First initialize header information on the last subchunk
char *hdr_addr = mem_address + m_real_chunk_alignment*(m_num_subchunks-1);
chunk_info_t *c_info = new(hdr_addr)chunk_info_t;
//Some structural checks
assert(static_cast<void*>(&static_cast<hdr_offset_holder*>(c_info)->hdr_offset) ==
static_cast<void*>(c_info));
typename free_nodes_t::iterator prev_insert_pos = c_info->free_nodes.before_begin();
for( std::size_t subchunk = 0, maxsubchunk = m_num_subchunks - 1
; subchunk < maxsubchunk
; ++subchunk, mem_address += m_real_chunk_alignment){
//Initialize header offset mark
new(mem_address) hdr_offset_holder(std::size_t(hdr_addr - mem_address));
char *pNode = mem_address + HdrOffsetSize;
for(std::size_t i = 0; i < elements_per_subchunk; ++i){
prev_insert_pos = c_info->free_nodes.insert_after(prev_insert_pos, *new (pNode) node_t);
pNode += m_real_node_size;
}
}
{
char *pNode = hdr_addr + HdrSize;
//We initialize all Nodes in Node Block to insert
//them in the free Node list
for(std::size_t i = 0; i < hdr_subchunk_elements; ++i){
prev_insert_pos = c_info->free_nodes.insert_after(prev_insert_pos, *new (pNode) node_t);
pNode += m_real_node_size;
}
}
//Insert the chunk after the free node list is full
m_chunk_multiset.insert(m_chunk_multiset.end(), *c_info);
}
++m_free_chunks;
}
private:
typedef typename pointer_to_other
<void_pointer, segment_manager_base_type>::type segment_mngr_base_ptr_t;
const std::size_t m_node_size;
const std::size_t m_max_free_chunks;
const std::size_t m_real_node_size;
const std::size_t m_header_size;
//Round the size to a power of two value.
//This is the total memory size (including payload) that we want to
//allocate from the general-purpose allocator
const std::size_t m_real_chunk_alignment;
std::size_t m_num_subchunks;
//This is the real number of nodes per chunk
const std::size_t m_real_num_node;
//const
std::size_t m_real_num_node;
segment_mngr_base_ptr_t mp_segment_mngr_base;//Segment manager
chunk_list_t m_chunklist; //Intrusive chunk list
typename chunk_list_t::iterator m_first_free_chunk; //Iterator to the active chunk
std::size_t m_allocated; //Used nodes for debugging
std::size_t m_free_chunks; //Free chunks
chunk_multiset_t m_chunk_multiset; //Intrusive chunk list
std::size_t m_totally_free_chunks; //Free chunks
};
//!Pooled shared memory allocator using an smart adaptive pool. Includes
//!a reference count but the class does not delete itself, this is
//!responsibility of user classes. Node size (NodeSize) and the number of
//!nodes allocated per chunk (NodesPerChunk) are known at compile time.
template< class SegmentManager
, std::size_t NodeSize
, std::size_t NodesPerChunk
, std::size_t MaxFreeChunks
, unsigned char OverheadPercent
>
class private_adaptive_node_pool
: public private_adaptive_node_pool_impl
@@ -448,8 +561,8 @@ class private_adaptive_node_pool
static const std::size_t nodes_per_chunk = NodesPerChunk;
//!Constructor from a segment manager. Never throws
private_adaptive_node_pool(segment_manager *segmeng_mngr)
: base_t(segmeng_mngr, NodeSize, NodesPerChunk, MaxFreeChunks)
private_adaptive_node_pool(segment_manager *segment_mngr)
: base_t(segment_mngr, NodeSize, NodesPerChunk, MaxFreeChunks, OverheadPercent)
{}
//!Returns the segment manager. Never throws
@@ -462,117 +575,25 @@ class private_adaptive_node_pool
//!responsibility of user classes. Node size (NodeSize) and the number of
//!nodes allocated per chunk (NodesPerChunk) are known at compile time
template< class SegmentManager
, class Mutex
, std::size_t NodeSize
, std::size_t NodesPerChunk
, std::size_t MaxFreeChunks
, unsigned char OverheadPercent
>
class shared_adaptive_node_pool
: public private_adaptive_node_pool
<SegmentManager, NodeSize, NodesPerChunk, MaxFreeChunks>
: public detail::shared_pool_impl
< private_adaptive_node_pool
<SegmentManager, NodeSize, NodesPerChunk, MaxFreeChunks, OverheadPercent>
>
{
private:
typedef typename SegmentManager::void_pointer void_pointer;
typedef private_adaptive_node_pool
<SegmentManager,
NodeSize, NodesPerChunk, MaxFreeChunks> private_node_allocator_t;
public:
//!Segment manager typedef
typedef SegmentManager segment_manager;
typedef typename private_node_allocator_t::free_nodes_t free_nodes_t;
//!Constructor from a segment manager. Never throws
shared_adaptive_node_pool(segment_manager *segment_mgnr)
: private_node_allocator_t(segment_mgnr){}
//!Destructor. Deallocates all allocated chunks. Never throws
~shared_adaptive_node_pool()
typedef detail::shared_pool_impl
< private_adaptive_node_pool
<SegmentManager, NodeSize, NodesPerChunk, MaxFreeChunks, OverheadPercent>
> base_t;
public:
shared_adaptive_node_pool(SegmentManager *segment_mgnr)
: base_t(segment_mgnr)
{}
//!Allocates array of count elements. Can throw boost::interprocess::bad_alloc
void *allocate(std::size_t count)
{
//-----------------------
boost::interprocess::scoped_lock<Mutex> guard(m_header);
//-----------------------
return private_node_allocator_t::allocate(count);
}
//!Deallocates an array pointed by ptr. Never throws
void deallocate(void *ptr, std::size_t count)
{
//-----------------------
boost::interprocess::scoped_lock<Mutex> guard(m_header);
//-----------------------
private_node_allocator_t::deallocate(ptr, count);
}
//!Allocates a singly linked list of n nodes ending in null pointer.
//!can throw boost::interprocess::bad_alloc
void allocate_nodes(std::size_t n, free_nodes_t &nodes)
{
//-----------------------
boost::interprocess::scoped_lock<Mutex> guard(m_header);
//-----------------------
return private_node_allocator_t::allocate_nodes(n, nodes);
}
//!Deallocates a linked list of nodes ending in null pointer. Never throws
void deallocate_nodes(free_nodes_t &nodes, std::size_t num)
{
//-----------------------
boost::interprocess::scoped_lock<Mutex> guard(m_header);
//-----------------------
private_node_allocator_t::deallocate_nodes(nodes, num);
}
//!Deallocates a linked list of nodes ending in null pointer. Never throws
void deallocate_nodes(free_nodes_t &nodes)
{
//-----------------------
boost::interprocess::scoped_lock<Mutex> guard(m_header);
//-----------------------
private_node_allocator_t::deallocate_nodes(nodes);
}
//!Deallocates all the free chunks of memory. Never throws
void deallocate_free_chunks()
{
//-----------------------
boost::interprocess::scoped_lock<Mutex> guard(m_header);
//-----------------------
private_node_allocator_t::deallocate_free_chunks();
}
//!Increments internal reference count and returns new count. Never throws
std::size_t inc_ref_count()
{
//-----------------------
boost::interprocess::scoped_lock<Mutex> guard(m_header);
//-----------------------
return ++m_header.m_usecount;
}
//!Decrements internal reference count and returns new count. Never throws
std::size_t dec_ref_count()
{
//-----------------------
boost::interprocess::scoped_lock<Mutex> guard(m_header);
//-----------------------
assert(m_header.m_usecount > 0);
return --m_header.m_usecount;
}
private:
//!This struct includes needed data and derives from
//!interprocess_mutex to allow EBO when using null_mutex
struct header_t : Mutex
{
std::size_t m_usecount; //Number of attached allocators
header_t()
: m_usecount(0) {}
} m_header;
};
} //namespace detail {

View File

@@ -0,0 +1,760 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/interprocess for documentation.
//
//////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_INTERPROCESS_DETAIL_NODE_ALLOCATOR_COMMON_HPP
#define BOOST_INTERPROCESS_DETAIL_NODE_ALLOCATOR_COMMON_HPP
#include <boost/interprocess/detail/config_begin.hpp>
#include <boost/interprocess/detail/workaround.hpp>
#include <boost/interprocess/segment_manager.hpp>
#include <boost/interprocess/interprocess_fwd.hpp>
#include <boost/interprocess/detail/utilities.hpp> //pointer_to_other, get_pointer
#include <utility> //std::pair
#include <boost/utility/addressof.hpp> //boost::addressof
#include <boost/assert.hpp> //BOOST_ASSERT
#include <boost/interprocess/exceptions.hpp> //bad_alloc
#include <boost/interprocess/sync/scoped_lock.hpp> //scoped_lock
#include <boost/interprocess/allocators/allocation_type.hpp> //allocation_type
#include <algorithm> //std::swap
namespace boost {
namespace interprocess {
namespace detail {
//!Object function that creates the node allocator if it is not created and
//!increments reference count if it is already created
template<class NodePool>
struct get_or_create_node_pool_func
{
//!This connects or constructs the unique instance of node_pool_t
//!Can throw boost::interprocess::bad_alloc
void operator()()
{
//Find or create the node_pool_t
mp_node_pool = mp_segment_manager->template find_or_construct
<NodePool>(unique_instance)(mp_segment_manager);
//If valid, increment link count
if(mp_node_pool != 0)
mp_node_pool->inc_ref_count();
}
//!Constructor. Initializes function
//!object parameters
get_or_create_node_pool_func(typename NodePool::segment_manager *mngr)
: mp_segment_manager(mngr){}
NodePool *mp_node_pool;
typename NodePool::segment_manager *mp_segment_manager;
};
template<class NodePool>
inline NodePool *get_or_create_node_pool(typename NodePool::segment_manager *mgnr)
{
detail::get_or_create_node_pool_func<NodePool> func(mgnr);
mgnr->atomic_func(func);
return func.mp_node_pool;
}
//!Object function that decrements the reference count. If the count
//!reaches to zero destroys the node allocator from memory.
//!Never throws
template<class NodePool>
struct destroy_if_last_link_func
{
//!Decrements reference count and destroys the object if there is no
//!more attached allocators. Never throws
void operator()()
{
//If not the last link return
if(mp_node_pool->dec_ref_count() != 0) return;
//Last link, let's destroy the segment_manager
mp_node_pool->get_segment_manager()->template destroy<NodePool>(unique_instance);
}
//!Constructor. Initializes function
//!object parameters
destroy_if_last_link_func(NodePool *pool)
: mp_node_pool(pool)
{}
NodePool *mp_node_pool;
};
//!Destruction function, initializes and executes destruction function
//!object. Never throws
template<class NodePool>
inline void destroy_node_pool_if_last_link(NodePool *pool)
{
//Get segment manager
typename NodePool::segment_manager *mngr = pool->get_segment_manager();
//Execute destruction functor atomically
destroy_if_last_link_func<NodePool>func(pool);
mngr->atomic_func(func);
}
template<class NodePool>
class cache_impl
{
typedef typename NodePool::segment_manager::
void_pointer void_pointer;
typedef typename pointer_to_other
<void_pointer, NodePool>::type node_pool_ptr;
typedef typename NodePool::multiallocation_chain multiallocation_chain;
node_pool_ptr mp_node_pool;
multiallocation_chain m_cached_nodes;
std::size_t m_max_cached_nodes;
public:
typedef typename NodePool::multiallocation_iterator multiallocation_iterator;
typedef typename NodePool::segment_manager segment_manager;
cache_impl(segment_manager *segment_mngr, std::size_t max_cached_nodes)
: mp_node_pool(get_or_create_node_pool<NodePool>(segment_mngr))
, m_max_cached_nodes(max_cached_nodes)
{}
cache_impl(const cache_impl &other)
: mp_node_pool(other.get_node_pool())
, m_max_cached_nodes(other.get_max_cached_nodes())
{
mp_node_pool->inc_ref_count();
}
~cache_impl()
{
this->deallocate_all_cached_nodes();
detail::destroy_node_pool_if_last_link(detail::get_pointer(mp_node_pool));
}
NodePool *get_node_pool() const
{ return detail::get_pointer(mp_node_pool); }
segment_manager *get_segment_manager() const
{ return mp_node_pool->get_segment_manager(); }
std::size_t get_max_cached_nodes() const
{ return m_max_cached_nodes; }
void *cached_allocation()
{
//If don't have any cached node, we have to get a new list of free nodes from the pool
if(m_cached_nodes.empty()){
mp_node_pool->allocate_nodes(m_cached_nodes, m_max_cached_nodes/2);
}
return m_cached_nodes.pop_front();
}
multiallocation_iterator cached_allocation(std::size_t n)
{
multiallocation_chain chain;
std::size_t count = n;
BOOST_TRY{
//If don't have any cached node, we have to get a new list of free nodes from the pool
while(!m_cached_nodes.empty() && count--){
void *ret = m_cached_nodes.pop_front();
chain.push_back(ret);
}
if(chain.size() != n){
mp_node_pool->allocate_nodes(chain, n - chain.size());
}
assert(chain.size() == n);
chain.splice_back(m_cached_nodes);
return multiallocation_iterator(chain.get_it());
}
BOOST_CATCH(...){
this->cached_deallocation(multiallocation_iterator(chain.get_it()));
throw;
}
BOOST_CATCH_END
}
void cached_deallocation(void *ptr)
{
//Check if cache is full
if(m_cached_nodes.size() >= m_max_cached_nodes){
//This only occurs if this allocator deallocate memory allocated
//with other equal allocator. Since the cache is full, and more
//deallocations are probably coming, we'll make some room in cache
//in a single, efficient multi node deallocation.
this->priv_deallocate_n_nodes(m_cached_nodes.size() - m_max_cached_nodes/2);
}
m_cached_nodes.push_front(ptr);
}
void cached_deallocation(multiallocation_iterator it)
{
multiallocation_iterator itend;
while(it != itend){
void *addr = &*it;
++it;
m_cached_nodes.push_front(addr);
}
//Check if cache is full
if(m_cached_nodes.size() >= m_max_cached_nodes){
//This only occurs if this allocator deallocate memory allocated
//with other equal allocator. Since the cache is full, and more
//deallocations are probably coming, we'll make some room in cache
//in a single, efficient multi node deallocation.
this->priv_deallocate_n_nodes(m_cached_nodes.size() - m_max_cached_nodes/2);
}
}
//!Sets the new max cached nodes value. This can provoke deallocations
//!if "newmax" is less than current cached nodes. Never throws
void set_max_cached_nodes(std::size_t newmax)
{
m_max_cached_nodes = newmax;
this->priv_deallocate_remaining_nodes();
}
//!Frees all cached nodes.
//!Never throws
void deallocate_all_cached_nodes()
{
if(m_cached_nodes.empty()) return;
mp_node_pool->deallocate_nodes(m_cached_nodes);
}
private:
//!Frees all cached nodes at once.
//!Never throws
void priv_deallocate_remaining_nodes()
{
if(m_cached_nodes.size() > m_max_cached_nodes){
priv_deallocate_n_nodes(m_cached_nodes.size()-m_max_cached_nodes);
}
}
//!Frees n cached nodes at once. Never throws
void priv_deallocate_n_nodes(std::size_t n)
{
//Deallocate all new linked list at once
mp_node_pool->deallocate_nodes(m_cached_nodes, n);
}
};
template<class Derived, class T, class SegmentManager>
class array_allocation_impl
{
const Derived *derived() const
{ return static_cast<const Derived*>(this); }
Derived *derived()
{ return static_cast<Derived*>(this); }
typedef typename SegmentManager::void_pointer void_pointer;
public:
typedef typename detail::
pointer_to_other<void_pointer, T>::type pointer;
typedef typename detail::
pointer_to_other<void_pointer, const T>::type const_pointer;
typedef T value_type;
typedef typename detail::add_reference
<value_type>::type reference;
typedef typename detail::add_reference
<const value_type>::type const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef transform_iterator
< typename SegmentManager::
multiallocation_iterator
, detail::cast_functor <T> > multiallocation_iterator;
typedef typename SegmentManager::
multiallocation_chain multiallocation_chain;
public:
//!Returns maximum the number of objects the previously allocated memory
//!pointed by p can hold. This size only works for memory allocated with
//!allocate, allocation_command and allocate_many.
size_type size(const pointer &p) const
{
return (size_type)this->derived()->get_segment_manager()->size(detail::get_pointer(p))/sizeof(T);
}
std::pair<pointer, bool>
allocation_command(allocation_type command,
size_type limit_size,
size_type preferred_size,
size_type &received_size, const pointer &reuse = 0)
{
return this->derived()->get_segment_manager()->allocation_command
(command, limit_size, preferred_size, received_size, detail::get_pointer(reuse));
}
//!Allocates many elements of size elem_size in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. The elements must be deallocated
//!with deallocate(...)
multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements)
{
return multiallocation_iterator
(this->derived()->get_segment_manager()->allocate_many(sizeof(T)*elem_size, num_elements));
}
//!Allocates n_elements elements, each one of size elem_sizes[i]in a
//!contiguous chunk
//!of memory. The elements must be deallocated
multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements)
{
return multiallocation_iterator
(this->derived()->get_segment_manager()->allocate_many(elem_sizes, n_elements, sizeof(T)));
}
//!Allocates many elements of size elem_size in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. The elements must be deallocated
//!with deallocate(...)
void deallocate_many(multiallocation_iterator it)
{ return this->derived()->get_segment_manager()->deallocate_many(it.base()); }
//!Returns the number of elements that could be
//!allocated. Never throws
size_type max_size() const
{ return this->derived()->get_segment_manager()->get_size()/sizeof(T); }
//!Returns address of mutable object.
//!Never throws
pointer address(reference value) const
{ return pointer(boost::addressof(value)); }
//!Returns address of non mutable object.
//!Never throws
const_pointer address(const_reference value) const
{ return const_pointer(boost::addressof(value)); }
//!Default construct an object.
//!Throws if T's default constructor throws
void construct(const pointer &ptr)
{ new(detail::get_pointer(ptr)) value_type; }
//!Destroys object. Throws if object's
//!destructor throws
void destroy(const pointer &ptr)
{ BOOST_ASSERT(ptr != 0); (*ptr).~value_type(); }
};
template<class Derived, unsigned int Version, class T, class SegmentManager>
class node_pool_allocation_impl
: public array_allocation_impl
< Derived
, T
, SegmentManager>
{
const Derived *derived() const
{ return static_cast<const Derived*>(this); }
Derived *derived()
{ return static_cast<Derived*>(this); }
typedef typename SegmentManager::void_pointer void_pointer;
typedef typename detail::
pointer_to_other<void_pointer, const void>::type cvoid_pointer;
public:
typedef typename detail::
pointer_to_other<void_pointer, T>::type pointer;
typedef typename detail::
pointer_to_other<void_pointer, const T>::type const_pointer;
typedef T value_type;
typedef typename detail::add_reference
<value_type>::type reference;
typedef typename detail::add_reference
<const value_type>::type const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef transform_iterator
< typename SegmentManager::
multiallocation_iterator
, detail::cast_functor <T> > multiallocation_iterator;
typedef typename SegmentManager::
multiallocation_chain multiallocation_chain;
public:
//!Allocate memory for an array of count elements.
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate(size_type count, cvoid_pointer hint = 0)
{
(void)hint;
if(count > this->max_size())
throw bad_alloc();
else if(Version == 1 && count == 1)
return pointer(static_cast<value_type*>(this->derived()->get_node_pool()->allocate_node()));
else
return pointer(static_cast<value_type*>
(this->derived()->get_node_pool()->get_segment_manager()->allocate(sizeof(T)*count)));
}
//!Deallocate allocated memory. Never throws
void deallocate(const pointer &ptr, size_type count)
{
(void)count;
if(Version == 1 && count == 1)
this->derived()->get_node_pool()->deallocate_node(detail::get_pointer(ptr));
else
this->derived()->get_node_pool()->get_segment_manager()->deallocate(detail::get_pointer(ptr));
}
//!Allocates just one object. Memory allocated with this function
//!must be deallocated only with deallocate_one().
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate_one()
{ return pointer(static_cast<value_type*>(this->derived()->get_node_pool()->allocate_node())); }
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
multiallocation_iterator allocate_individual(std::size_t num_elements)
{ return multiallocation_iterator(this->derived()->get_node_pool()->allocate_nodes(num_elements)); }
//!Deallocates memory previously allocated with allocate_one().
//!You should never use deallocate_one to deallocate memory allocated
//!with other functions different from allocate_one(). Never throws
void deallocate_one(const pointer &p)
{ this->derived()->get_node_pool()->deallocate_node(detail::get_pointer(p)); }
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
void deallocate_individual(multiallocation_iterator it)
{ this->derived()->get_node_pool()->deallocate_nodes(it.base()); }
//!Deallocates all free chunks of the pool
void deallocate_free_chunks()
{ this->derived()->get_node_pool()->deallocate_free_chunks(); }
};
template<class T, class NodePool, unsigned int Version>
class cached_allocator_impl
: public array_allocation_impl
<cached_allocator_impl<T, NodePool, Version>, T, typename NodePool::segment_manager>
{
cached_allocator_impl & operator=(const cached_allocator_impl& other);
typedef array_allocation_impl
< cached_allocator_impl
<T, NodePool, Version>
, T
, typename NodePool::segment_manager> base_t;
public:
typedef NodePool node_pool_t;
typedef typename NodePool::segment_manager segment_manager;
typedef typename segment_manager::void_pointer void_pointer;
typedef typename detail::
pointer_to_other<void_pointer, const void>::type cvoid_pointer;
typedef typename base_t::pointer pointer;
typedef typename base_t::size_type size_type;
typedef typename base_t::multiallocation_iterator multiallocation_iterator;
typedef typename base_t::multiallocation_chain multiallocation_chain;
typedef typename base_t::value_type value_type;
public:
enum { DEFAULT_MAX_CACHED_NODES = 64 };
cached_allocator_impl(segment_manager *segment_mngr, std::size_t max_cached_nodes)
: m_cache(segment_mngr, max_cached_nodes)
{}
cached_allocator_impl(const cached_allocator_impl &other)
: m_cache(other.m_cache)
{}
//!Copy constructor from related cached_adaptive_pool_base. If not present, constructs
//!a node pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
template<class T2, class NodePool2>
cached_allocator_impl
(const cached_allocator_impl
<T2, NodePool2, Version> &other)
: m_cache(other.get_segment_manager(), other.get_max_cached_nodes())
{}
//!Returns a pointer to the node pool.
//!Never throws
node_pool_t* get_node_pool() const
{ return m_cache.get_node_pool(); }
//!Returns the segment manager.
//!Never throws
segment_manager* get_segment_manager()const
{ return m_cache.get_segment_manager(); }
//!Sets the new max cached nodes value. This can provoke deallocations
//!if "newmax" is less than current cached nodes. Never throws
void set_max_cached_nodes(std::size_t newmax)
{ m_cache.set_max_cached_nodes(newmax); }
//!Returns the max cached nodes parameter.
//!Never throws
std::size_t get_max_cached_nodes() const
{ return m_cache.get_max_cached_nodes(); }
//!Allocate memory for an array of count elements.
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate(size_type count, cvoid_pointer hint = 0)
{
(void)hint;
void * ret;
if(count > this->max_size())
throw bad_alloc();
else if(Version == 1 && count == 1){
ret = m_cache.cached_allocation();
}
else{
ret = this->get_segment_manager()->allocate(sizeof(T)*count);
}
return pointer(static_cast<T*>(ret));
}
//!Deallocate allocated memory. Never throws
void deallocate(const pointer &ptr, size_type count)
{
(void)count;
if(Version == 1 && count == 1){
m_cache.cached_deallocation(detail::get_pointer(ptr));
}
else{
this->get_segment_manager()->deallocate(detail::get_pointer(ptr));
}
}
//!Allocates just one object. Memory allocated with this function
//!must be deallocated only with deallocate_one().
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate_one()
{ return pointer(static_cast<value_type*>(this->m_cache.cached_allocation())); }
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
multiallocation_iterator allocate_individual(std::size_t num_elements)
{ return multiallocation_iterator(this->m_cache.cached_allocation(num_elements)); }
//!Deallocates memory previously allocated with allocate_one().
//!You should never use deallocate_one to deallocate memory allocated
//!with other functions different from allocate_one(). Never throws
void deallocate_one(const pointer &p)
{ this->m_cache.cached_deallocation(detail::get_pointer(p)); }
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
void deallocate_individual(multiallocation_iterator it)
{ m_cache.cached_deallocation(it.base()); }
//!Deallocates all free chunks of the pool
void deallocate_free_chunks()
{ m_cache.get_node_pool()->deallocate_free_chunks(); }
//!Swaps allocators. Does not throw. If each allocator is placed in a
//!different shared memory segments, the result is undefined.
friend void swap(cached_allocator_impl &alloc1, cached_allocator_impl &alloc2)
{
detail::do_swap(alloc1.mp_node_pool, alloc2.mp_node_pool);
alloc1.m_cached_nodes.swap(alloc2.m_cached_nodes);
detail::do_swap(alloc1.m_max_cached_nodes, alloc2.m_max_cached_nodes);
}
void deallocate_cache()
{ m_cache.deallocate_all_cached_nodes(); }
/// @cond
private:
cache_impl<node_pool_t> m_cache;
};
//!Equality test for same type of
//!cached_allocator_impl
template<class T, class N, unsigned int V> inline
bool operator==(const cached_allocator_impl<T, N, V> &alloc1,
const cached_allocator_impl<T, N, V> &alloc2)
{ return alloc1.get_node_pool() == alloc2.get_node_pool(); }
//!Inequality test for same type of
//!cached_allocator_impl
template<class T, class N, unsigned int V> inline
bool operator!=(const cached_allocator_impl<T, N, V> &alloc1,
const cached_allocator_impl<T, N, V> &alloc2)
{ return alloc1.get_node_pool() != alloc2.get_node_pool(); }
//!Pooled shared memory allocator using adaptive pool. Includes
//!a reference count but the class does not delete itself, this is
//!responsibility of user classes. Node size (NodeSize) and the number of
//!nodes allocated per chunk (NodesPerChunk) are known at compile time
template<class private_node_allocator_t>
class shared_pool_impl
: public private_node_allocator_t
{
public:
//!Segment manager typedef
typedef typename private_node_allocator_t::segment_manager segment_manager;
typedef typename private_node_allocator_t::
multiallocation_iterator multiallocation_iterator;
typedef typename private_node_allocator_t::
multiallocation_chain multiallocation_chain;
private:
typedef typename segment_manager::mutex_family::mutex_type mutex_type;
public:
//!Constructor from a segment manager. Never throws
shared_pool_impl(segment_manager *segment_mngr)
: private_node_allocator_t(segment_mngr)
{}
//!Destructor. Deallocates all allocated chunks. Never throws
~shared_pool_impl()
{}
//!Allocates array of count elements. Can throw boost::interprocess::bad_alloc
void *allocate_node()
{
//-----------------------
boost::interprocess::scoped_lock<mutex_type> guard(m_header);
//-----------------------
return private_node_allocator_t::allocate_node();
}
//!Deallocates an array pointed by ptr. Never throws
void deallocate_node(void *ptr)
{
//-----------------------
boost::interprocess::scoped_lock<mutex_type> guard(m_header);
//-----------------------
private_node_allocator_t::deallocate_node(ptr);
}
//!Allocates a singly linked list of n nodes ending in null pointer.
//!can throw boost::interprocess::bad_alloc
void allocate_nodes(multiallocation_chain &nodes, std::size_t n)
{
//-----------------------
boost::interprocess::scoped_lock<mutex_type> guard(m_header);
//-----------------------
return private_node_allocator_t::allocate_nodes(nodes, n);
}
//!Allocates n nodes, pointed by the multiallocation_iterator.
//!Can throw boost::interprocess::bad_alloc
multiallocation_iterator allocate_nodes(const std::size_t n)
{
//-----------------------
boost::interprocess::scoped_lock<mutex_type> guard(m_header);
//-----------------------
return private_node_allocator_t::allocate_nodes(n);
}
//!Deallocates a linked list of nodes ending in null pointer. Never throws
void deallocate_nodes(multiallocation_chain &nodes, std::size_t num)
{
//-----------------------
boost::interprocess::scoped_lock<mutex_type> guard(m_header);
//-----------------------
private_node_allocator_t::deallocate_nodes(nodes, num);
}
//!Deallocates a linked list of nodes ending in null pointer. Never throws
void deallocate_nodes(multiallocation_chain &nodes)
{
//-----------------------
boost::interprocess::scoped_lock<mutex_type> guard(m_header);
//-----------------------
private_node_allocator_t::deallocate_nodes(nodes);
}
//!Deallocates the nodes pointed by the multiallocation iterator. Never throws
void deallocate_nodes(multiallocation_iterator it)
{
//-----------------------
boost::interprocess::scoped_lock<mutex_type> guard(m_header);
//-----------------------
private_node_allocator_t::deallocate_nodes(it);
}
//!Deallocates all the free chunks of memory. Never throws
void deallocate_free_chunks()
{
//-----------------------
boost::interprocess::scoped_lock<mutex_type> guard(m_header);
//-----------------------
private_node_allocator_t::deallocate_free_chunks();
}
//!Deallocates all used memory from the common pool.
//!Precondition: all nodes allocated from this pool should
//!already be deallocated. Otherwise, undefined behavior. Never throws
void purge_chunks()
{
//-----------------------
boost::interprocess::scoped_lock<mutex_type> guard(m_header);
//-----------------------
private_node_allocator_t::purge_chunks();
}
//!Increments internal reference count and returns new count. Never throws
std::size_t inc_ref_count()
{
//-----------------------
boost::interprocess::scoped_lock<mutex_type> guard(m_header);
//-----------------------
return ++m_header.m_usecount;
}
//!Decrements internal reference count and returns new count. Never throws
std::size_t dec_ref_count()
{
//-----------------------
boost::interprocess::scoped_lock<mutex_type> guard(m_header);
//-----------------------
assert(m_header.m_usecount > 0);
return --m_header.m_usecount;
}
private:
//!This struct includes needed data and derives from
//!interprocess_mutex to allow EBO when using null_mutex
struct header_t : mutex_type
{
std::size_t m_usecount; //Number of attached allocators
header_t()
: m_usecount(0) {}
} m_header;
};
} //namespace detail {
} //namespace interprocess {
} //namespace boost {
#include <boost/interprocess/detail/config_end.hpp>
#endif //#ifndef BOOST_INTERPROCESS_DETAIL_NODE_ALLOCATOR_COMMON_HPP

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -24,7 +24,9 @@
#include <boost/intrusive/slist.hpp>
#include <boost/math/common_factor_ct.hpp>
#include <boost/interprocess/detail/math_functions.hpp>
#include <boost/interprocess/detail/type_traits.hpp>
#include <boost/interprocess/allocators/detail/node_tools.hpp>
#include <boost/interprocess/allocators/detail/allocator_common.hpp>
#include <cstddef>
#include <functional>
#include <algorithm>
@@ -51,10 +53,14 @@ class private_node_pool_impl
typedef typename node_slist<void_pointer>::slist_hook_t slist_hook_t;
typedef typename node_slist<void_pointer>::node_t node_t;
typedef typename node_slist<void_pointer>::node_slist_t free_nodes_t;
typedef typename SegmentManagerBase::multiallocation_iterator multiallocation_iterator;
typedef typename SegmentManagerBase::multiallocation_chain multiallocation_chain;
private:
typedef typename bi::make_slist < node_t, bi::base_hook<slist_hook_t>
, bi::constant_time_size<false> >::type chunkslist_t;
typedef typename bi::make_slist
< node_t, bi::base_hook<slist_hook_t>
, bi::linear<true>
, bi::constant_time_size<false> >::type chunkslist_t;
public:
//!Segment manager typedef
@@ -62,10 +68,8 @@ class private_node_pool_impl
//!Constructor from a segment manager. Never throws
private_node_pool_impl(segment_manager_base_type *segment_mngr_base, std::size_t node_size, std::size_t nodes_per_chunk)
: m_node_size(node_size)
, m_nodes_per_chunk(nodes_per_chunk)
, m_real_node_size(detail::lcm(node_size, sizeof(node_t)))
, m_block_size(detail::get_rounded_size(m_real_node_size*m_nodes_per_chunk, sizeof(node_t)))
: m_nodes_per_chunk(nodes_per_chunk)
, m_real_node_size(detail::lcm(node_size, std::size_t(alignment_of<node_t>::value)))
//General purpose allocator
, mp_segment_mngr_base(segment_mngr_base)
, m_chunklist()
@@ -76,7 +80,7 @@ class private_node_pool_impl
//!Destructor. Deallocates all allocated chunks. Never throws
~private_node_pool_impl()
{ priv_clear(); }
{ this->purge_chunks(); }
std::size_t get_real_num_node() const
{ return m_nodes_per_chunk; }
@@ -86,116 +90,73 @@ class private_node_pool_impl
{ return detail::get_pointer(mp_segment_mngr_base); }
//!Allocates array of count elements. Can throw boost::interprocess::bad_alloc
void *allocate(std::size_t count)
{
std::size_t bytes = count*m_node_size;
if(bytes > m_real_node_size){//Normal allocation, no pooling used
void *addr = mp_segment_mngr_base->allocate(bytes);
if(!addr) throw bad_alloc();
return addr;
}
else //Node allocation, pooling used
return priv_alloc_node();
}
void *allocate_node()
{ return priv_alloc_node(); }
//!Deallocates an array pointed by ptr. Never throws
void deallocate(void *ptr, std::size_t count)
{
std::size_t bytes = count*m_node_size;
if(bytes > m_real_node_size)//Normal allocation was used
mp_segment_mngr_base->deallocate(ptr);
else //Node allocation was used
priv_dealloc_node(ptr);
}
void deallocate_node(void *ptr)
{ priv_dealloc_node(ptr); }
//!Allocates a singly linked list of n nodes ending in null pointer.
//!Allocates a singly linked list of n nodes ending in null pointer and pushes them in the chain.
//!can throw boost::interprocess::bad_alloc
void allocate_nodes(const std::size_t n, free_nodes_t &nodes)
void allocate_nodes(multiallocation_chain &nodes, const std::size_t n)
{
std::size_t i = 0;
try{
for(; i < n; ++i){
nodes.push_front(*priv_alloc_node());
nodes.push_front(priv_alloc_node());
}
}
catch(...){
priv_deallocate_nodes(nodes, i);
this->deallocate_nodes(nodes, i);
throw;
}
}
//!Deallocates a linked list of nodes. Never throws
void deallocate_nodes(free_nodes_t &nodes)
{ priv_deallocate_nodes(nodes, nodes.size()); }
//!Deallocates the first n nodes of a linked list of nodes. Never throws
void deallocate_nodes(free_nodes_t &nodes, std::size_t n)
{ priv_deallocate_nodes(nodes, n); }
//!Deallocates all the free chunks of memory. Never throws
void deallocate_free_chunks()
{ priv_deallocate_free_chunks(); }
std::size_t num_free_nodes()
{ return m_freelist.size(); }
void swap(private_node_pool_impl &other)
//!Allocates a singly linked list of n nodes ending in null pointer
//!can throw boost::interprocess::bad_alloc
multiallocation_iterator allocate_nodes(const std::size_t n)
{
std::swap(mp_segment_mngr_base, other.mp_segment_mngr_base);
m_chunklist.swap(other.m_chunklist);
m_freelist.swap(other.m_freelist);
std::swap(m_allocated, other.m_allocated);
multiallocation_chain nodes;
std::size_t i = 0;
try{
for(; i < n; ++i){
nodes.push_front(priv_alloc_node());
}
}
catch(...){
this->deallocate_nodes(nodes, i);
throw;
}
return nodes.get_it();
}
private:
//!Deallocates a linked list of nodes. Never throws
void deallocate_nodes(multiallocation_chain &nodes)
{ this->deallocate_nodes(nodes.get_it()); }
void priv_deallocate_nodes(free_nodes_t &nodes, const std::size_t num)
//!Deallocates the first n nodes of a linked list of nodes. Never throws
void deallocate_nodes(multiallocation_chain &nodes, std::size_t num)
{
assert(nodes.size() >= num);
for(std::size_t i = 0; i < num; ++i){
node_t *to_deallocate = &nodes.front();
nodes.pop_front();
deallocate(to_deallocate, 1);
deallocate_node(nodes.pop_front());
}
}
struct push_in_list
//!Deallocates the nodes pointed by the multiallocation iterator. Never throws
void deallocate_nodes(multiallocation_iterator it)
{
push_in_list(free_nodes_t &l, typename free_nodes_t::iterator &it)
: slist_(l), last_it_(it)
{}
void operator()(typename free_nodes_t::pointer p) const
{
slist_.push_front(*p);
if(slist_.size() == 1){ //Cache last element
++last_it_ = slist_.begin();
}
multiallocation_iterator itend;
while(it != itend){
void *addr = &*it;
++it;
deallocate_node(addr);
}
}
private:
free_nodes_t &slist_;
typename free_nodes_t::iterator &last_it_;
};
struct is_between
: std::unary_function<typename free_nodes_t::value_type, bool>
{
is_between(const void *addr, std::size_t size)
: beg_((const char *)addr), end_(beg_+size)
{}
bool operator()(typename free_nodes_t::const_reference v) const
{
return (beg_ <= (const char *)&v &&
end_ > (const char *)&v);
}
private:
const char * beg_;
const char * end_;
};
void priv_deallocate_free_chunks()
//!Deallocates all the free chunks of memory. Never throws
void deallocate_free_chunks()
{
typedef typename free_nodes_t::iterator nodelist_iterator;
typename chunkslist_t::iterator bit(m_chunklist.before_begin()),
@@ -204,16 +165,19 @@ class private_node_pool_impl
free_nodes_t backup_list;
nodelist_iterator backup_list_last = backup_list.before_begin();
//Execute the algorithm and get an iterator to the last value
std::size_t blocksize = detail::get_rounded_size
(m_real_node_size*m_nodes_per_chunk, alignment_of<node_t>::value);
while(it != itend){
//Collect all the nodes from the chunk pointed by it
//and push them in the list
free_nodes_t free_nodes;
nodelist_iterator last_it = free_nodes.before_begin();
const void *addr = get_chunk_from_hook(&*it);
const void *addr = get_chunk_from_hook(&*it, blocksize);
//Execute the algorithm and get an iterator to the last value
m_freelist.remove_and_dispose_if
(is_between(addr, m_block_size), push_in_list(free_nodes, last_it));
(is_between(addr, blocksize), push_in_list(free_nodes, last_it));
//If the number of nodes is equal to m_nodes_per_chunk
//this means that the block can be deallocated
@@ -253,23 +217,76 @@ class private_node_pool_impl
, backup_list.size());
}
//!Deallocates all used memory. Never throws
void priv_clear()
std::size_t num_free_nodes()
{ return m_freelist.size(); }
//!Deallocates all used memory. Precondition: all nodes allocated from this pool should
//!already be deallocated. Otherwise, undefined behaviour. Never throws
void purge_chunks()
{
//check for memory leaks
assert(m_allocated==0);
std::size_t blocksize = detail::get_rounded_size
(m_real_node_size*m_nodes_per_chunk, alignment_of<node_t>::value);
typename chunkslist_t::iterator
it(m_chunklist.begin()), itend(m_chunklist.end()), aux;
//We iterate though the NodeBlock list to free the memory
while(!m_chunklist.empty()){
void *addr = get_chunk_from_hook(&m_chunklist.front());
void *addr = get_chunk_from_hook(&m_chunklist.front(), blocksize);
m_chunklist.pop_front();
mp_segment_mngr_base->deallocate(addr);
}
}
//Just clear free node list
m_freelist.clear();
}
void swap(private_node_pool_impl &other)
{
std::swap(mp_segment_mngr_base, other.mp_segment_mngr_base);
m_chunklist.swap(other.m_chunklist);
m_freelist.swap(other.m_freelist);
std::swap(m_allocated, other.m_allocated);
}
private:
struct push_in_list
{
push_in_list(free_nodes_t &l, typename free_nodes_t::iterator &it)
: slist_(l), last_it_(it)
{}
void operator()(typename free_nodes_t::pointer p) const
{
slist_.push_front(*p);
if(slist_.size() == 1){ //Cache last element
++last_it_ = slist_.begin();
}
}
private:
free_nodes_t &slist_;
typename free_nodes_t::iterator &last_it_;
};
struct is_between
: std::unary_function<typename free_nodes_t::value_type, bool>
{
is_between(const void *addr, std::size_t size)
: beg_((const char *)addr), end_(beg_+size)
{}
bool operator()(typename free_nodes_t::const_reference v) const
{
return (beg_ <= (const char *)&v &&
end_ > (const char *)&v);
}
private:
const char * beg_;
const char * end_;
};
//!Allocates one node, using single segregated storage algorithm.
//!Never throws
node_t *priv_alloc_node()
@@ -300,10 +317,13 @@ class private_node_pool_impl
{
//We allocate a new NodeBlock and put it as first
//element in the free Node list
char *pNode = detail::char_ptr_cast(mp_segment_mngr_base->allocate(m_block_size + sizeof(node_t)));
std::size_t blocksize =
detail::get_rounded_size(m_real_node_size*m_nodes_per_chunk, alignment_of<node_t>::value);
char *pNode = detail::char_ptr_cast
(mp_segment_mngr_base->allocate(blocksize + sizeof(node_t)));
if(!pNode) throw bad_alloc();
char *pBlock = pNode;
m_chunklist.push_front(get_chunk_hook(pBlock));
m_chunklist.push_front(get_chunk_hook(pBlock, blocksize));
//We initialize all Nodes in Node Block to insert
//them in the free Node list
@@ -314,26 +334,24 @@ class private_node_pool_impl
private:
//!Returns a reference to the chunk hook placed in the end of the chunk
inline node_t & get_chunk_hook (void *chunk)
static inline node_t & get_chunk_hook (void *chunk, std::size_t blocksize)
{
return *static_cast<node_t*>(
static_cast<void*>((detail::char_ptr_cast(chunk)+m_block_size)));
static_cast<void*>((detail::char_ptr_cast(chunk) + blocksize)));
}
//!Returns the starting address of the chunk reference to the chunk hook placed in the end of the chunk
inline void *get_chunk_from_hook (node_t *hook)
inline void *get_chunk_from_hook (node_t *hook, std::size_t blocksize)
{
return static_cast<void*>((detail::char_ptr_cast(hook) - m_block_size));
return static_cast<void*>((detail::char_ptr_cast(hook) - blocksize));
}
private:
typedef typename pointer_to_other
<void_pointer, segment_manager_base_type>::type segment_mngr_base_ptr_t;
const std::size_t m_node_size;
const std::size_t m_nodes_per_chunk;
const std::size_t m_real_node_size;
const std::size_t m_block_size;
segment_mngr_base_ptr_t mp_segment_mngr_base; //Segment manager
chunkslist_t m_chunklist; //Intrusive container of chunks
free_nodes_t m_freelist; //Intrusive container of free nods
@@ -376,114 +394,28 @@ class private_node_pool
//!a reference count but the class does not delete itself, this is
//!responsibility of user classes. Node size (NodeSize) and the number of
//!nodes allocated per chunk (NodesPerChunk) are known at compile time
//!Pooled shared memory allocator using adaptive pool. Includes
//!a reference count but the class does not delete itself, this is
//!responsibility of user classes. Node size (NodeSize) and the number of
//!nodes allocated per chunk (NodesPerChunk) are known at compile time
template< class SegmentManager
, class Mutex
, std::size_t NodeSize
, std::size_t NodesPerChunk
>
class shared_node_pool
: public private_node_pool<SegmentManager, NodeSize, NodesPerChunk>
: public detail::shared_pool_impl
< private_node_pool
<SegmentManager, NodeSize, NodesPerChunk>
>
{
private:
typedef typename SegmentManager::void_pointer void_pointer;
typedef private_node_pool
<SegmentManager, NodeSize, NodesPerChunk> private_node_allocator_t;
typedef detail::shared_pool_impl
< private_node_pool
<SegmentManager, NodeSize, NodesPerChunk>
> base_t;
public:
typedef SegmentManager segment_manager;
typedef typename private_node_allocator_t::free_nodes_t free_nodes_t;
//!Constructor from a segment manager. Never throws
shared_node_pool(segment_manager *segment_mngr)
: private_node_allocator_t(segment_mngr){}
//!Destructor. Deallocates all allocated chunks. Never throws
~shared_node_pool()
shared_node_pool(SegmentManager *segment_mgnr)
: base_t(segment_mgnr)
{}
//!Allocates array of count elements. Can throw boost::interprocess::bad_alloc
void *allocate(std::size_t count)
{
//-----------------------
boost::interprocess::scoped_lock<Mutex> guard(m_header);
//-----------------------
return private_node_allocator_t::allocate(count);
}
//!Deallocates an array pointed by ptr. Never throws
void deallocate(void *ptr, std::size_t count)
{
//-----------------------
boost::interprocess::scoped_lock<Mutex> guard(m_header);
//-----------------------
private_node_allocator_t::deallocate(ptr, count);
}
//!Allocates a singly linked list of n nodes ending in null pointer.
//!can throw boost::interprocess::bad_alloc
void allocate_nodes(const std::size_t n, free_nodes_t &nodes)
{
//-----------------------
boost::interprocess::scoped_lock<Mutex> guard(m_header);
//-----------------------
private_node_allocator_t::allocate_nodes(n, nodes);
}
//!Deallocates a linked list of nodes ending in null pointer. Never throws
void deallocate_nodes(free_nodes_t &nodes, std::size_t n)
{
//-----------------------
boost::interprocess::scoped_lock<Mutex> guard(m_header);
//-----------------------
private_node_allocator_t::deallocate_nodes(nodes, n);
}
void deallocate_nodes(free_nodes_t &nodes)
{
//-----------------------
boost::interprocess::scoped_lock<Mutex> guard(m_header);
//-----------------------
private_node_allocator_t::deallocate_nodes(nodes);
}
//!Deallocates all the free chunks of memory. Never throws
void deallocate_free_chunks()
{
//-----------------------
boost::interprocess::scoped_lock<Mutex> guard(m_header);
//-----------------------
private_node_allocator_t::deallocate_free_chunks();
}
//!Increments internal reference count and returns new count. Never throws
std::size_t inc_ref_count()
{
//-----------------------
boost::interprocess::scoped_lock<Mutex> guard(m_header);
//-----------------------
return ++m_header.m_usecount;
}
//!Decrements internal reference count and returns new count. Never throws
std::size_t dec_ref_count()
{
//-----------------------
boost::interprocess::scoped_lock<Mutex> guard(m_header);
//-----------------------
assert(m_header.m_usecount > 0);
return --m_header.m_usecount;
}
private:
//!This struct includes needed data and derives from
//!interprocess_mutex to allow EBO when using null_mutex
struct header_t : Mutex
{
std::size_t m_usecount; //Number of attached allocators
header_t()
: m_usecount(0) {}
} m_header;
};
} //namespace detail {

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2007-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -37,7 +37,8 @@ struct node_slist
: public slist_hook_t
{};
typedef typename bi::make_slist<node_t, bi::base_hook<slist_hook_t> >::type node_slist_t;
typedef typename bi::make_slist
<node_t, bi::linear<true>, bi::base_hook<slist_hook_t> >::type node_slist_t;
};
} //namespace detail {

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -8,8 +8,8 @@
//
//////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_INTERPROCESS_POOLED_NODE_ALLOCATOR_HPP
#define BOOST_INTERPROCESS_POOLED_NODE_ALLOCATOR_HPP
#ifndef BOOST_INTERPROCESS_NODE_ALLOCATOR_HPP
#define BOOST_INTERPROCESS_NODE_ALLOCATOR_HPP
#if (defined _MSC_VER) && (_MSC_VER >= 1200)
# pragma once
@@ -22,9 +22,10 @@
#include <boost/assert.hpp>
#include <boost/utility/addressof.hpp>
#include <boost/interprocess/detail/utilities.hpp>
#include <boost/interprocess/detail/workaround.hpp>
#include <boost/interprocess/detail/type_traits.hpp>
#include <boost/interprocess/allocators/detail/node_pool.hpp>
#include <boost/interprocess/exceptions.hpp>
#include <boost/interprocess/allocators/detail/allocator_common.hpp>
#include <memory>
#include <algorithm>
#include <cstddef>
@@ -35,26 +36,35 @@
namespace boost {
namespace interprocess {
//!An STL node allocator that uses a segment manager as memory
//!source. The internal pointer type will of the same type (raw, smart) as
//!"typename SegmentManager::void_pointer" type. This allows
//!placing the allocator in shared memory, memory mapped-files, etc...
//!This node allocator shares a segregated storage between all instances
//!of node_allocator with equal sizeof(T) placed in the same segment
//!group. NodesPerChunk is the number of nodes allocated at once when the allocator
//!needs runs out of nodes
template<class T, class SegmentManager, std::size_t NodesPerChunk>
class node_allocator
/// @cond
namespace detail{
template < unsigned int Version
, class T
, class SegmentManager
, std::size_t NodesPerChunk
>
class node_allocator_base
: public node_pool_allocation_impl
< node_allocator_base
< Version, T, SegmentManager, NodesPerChunk>
, Version
, T
, SegmentManager
>
{
public:
typedef typename SegmentManager::void_pointer void_pointer;
typedef typename detail::
pointer_to_other<void_pointer, const void>::type cvoid_pointer;
typedef SegmentManager segment_manager;
typedef typename SegmentManager::
mutex_family::mutex_type mutex_type;
typedef node_allocator
<T, SegmentManager, NodesPerChunk> self_t;
typedef node_allocator_base
<Version, T, SegmentManager, NodesPerChunk> self_t;
typedef detail::shared_node_pool
< SegmentManager, sizeof(T), NodesPerChunk> node_pool_t;
typedef typename detail::
pointer_to_other<void_pointer, node_pool_t>::type node_pool_ptr;
BOOST_STATIC_ASSERT((Version <=2));
public:
//-------
@@ -69,61 +79,61 @@ class node_allocator
<const value_type>::type const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef detail::shared_node_pool
< SegmentManager, mutex_type
, sizeof(T), NodesPerChunk> node_pool_t;
typedef typename detail::
pointer_to_other<void_pointer, node_pool_t>::type node_pool_ptr;
//!Obtains node_allocator from other
//!node_allocator
typedef detail::version_type<node_allocator_base, Version> version;
typedef transform_iterator
< typename SegmentManager::
multiallocation_iterator
, detail::cast_functor <T> > multiallocation_iterator;
typedef typename SegmentManager::
multiallocation_chain multiallocation_chain;
//!Obtains node_allocator_base from
//!node_allocator_base
template<class T2>
struct rebind
{
typedef node_allocator<T2, SegmentManager, NodesPerChunk> other;
typedef node_allocator_base<Version, T2, SegmentManager, NodesPerChunk> other;
};
/// @cond
private:
//!Not assignable from related
//!node_allocator
template<class T2, class SegmentManager2, std::size_t N2>
node_allocator& operator=
(const node_allocator<T2, SegmentManager2, N2>&);
//!Not assignable from related node_allocator_base
template<unsigned int Version2, class T2, class SegmentManager2, std::size_t N2>
node_allocator_base& operator=
(const node_allocator_base<Version2, T2, SegmentManager2, N2>&);
//!Not assignable from other
//!node_allocator
node_allocator& operator=(const node_allocator&);
//!Not assignable from other node_allocator_base
node_allocator_base& operator=(const node_allocator_base&);
/// @endcond
public:
//!Constructor from a segment manager. If not present, constructs a node
//!pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
node_allocator(segment_manager *segment_mngr)
: mp_node_pool(priv_get_or_create(segment_mngr))
{}
node_allocator_base(segment_manager *segment_mngr)
: mp_node_pool(detail::get_or_create_node_pool<node_pool_t>(segment_mngr)) { }
//!Copy constructor from other node_allocator. Increments the reference
//!Copy constructor from other node_allocator_base. Increments the reference
//!count of the associated node pool. Never throws
node_allocator(const node_allocator &other)
node_allocator_base(const node_allocator_base &other)
: mp_node_pool(other.get_node_pool())
{ mp_node_pool->inc_ref_count(); }
{
mp_node_pool->inc_ref_count();
}
//!Copy constructor from related node_allocator. If not present, constructs
//!Copy constructor from related node_allocator_base. If not present, constructs
//!a node pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
template<class T2>
node_allocator
(const node_allocator<T2, SegmentManager, NodesPerChunk> &other)
: mp_node_pool(priv_get_or_create(other.get_segment_manager()))
{}
node_allocator_base
(const node_allocator_base<Version, T2, SegmentManager, NodesPerChunk> &other)
: mp_node_pool(detail::get_or_create_node_pool<node_pool_t>(other.get_segment_manager())) { }
//!Destructor, removes node_pool_t from memory
//!if its reference count reaches to zero. Never throws
~node_allocator()
{ priv_destroy_if_last_link(); }
~node_allocator_base()
{ detail::destroy_node_pool_if_last_link(detail::get_pointer(mp_node_pool)); }
//!Returns a pointer to the node pool.
//!Never throws
@@ -135,159 +145,290 @@ class node_allocator
segment_manager* get_segment_manager()const
{ return mp_node_pool->get_segment_manager(); }
//!Returns the number of elements that could be allocated. Never throws
size_type max_size() const
{ return this->get_segment_manager()->get_size()/sizeof(value_type); }
//!Allocate memory for an array of count elements.
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate(size_type count, cvoid_pointer = 0)
{
if(count > ((size_type)-1)/sizeof(value_type))
throw bad_alloc();
return pointer(static_cast<T*>(mp_node_pool->allocate(count)));
}
//!Deallocate allocated memory.
//!Never throws
void deallocate(const pointer &ptr, size_type count)
{ mp_node_pool->deallocate(detail::get_pointer(ptr), count); }
//!Deallocates all free chunks of the pool
void deallocate_free_chunks()
{ mp_node_pool->deallocate_free_chunks(); }
//!Swaps allocators. Does not throw. If each allocator is placed in a
//!different memory segment, the result is undefined.
friend void swap(self_t &alloc1, self_t &alloc2)
{ detail::do_swap(alloc1.mp_node_pool, alloc2.mp_node_pool); }
//These functions are obsolete. These are here to conserve
//backwards compatibility with containers using them...
//!Returns address of mutable object.
//!Never throws
pointer address(reference value) const
{ return pointer(boost::addressof(value)); }
//!Returns address of non mutable object.
//!Never throws
const_pointer address(const_reference value) const
{ return const_pointer(boost::addressof(value)); }
//!Default construct an object.
//!Throws if T's default constructor throws*/
void construct(const pointer &ptr)
{ new(detail::get_pointer(ptr)) value_type; }
//!Destroys object. Throws if object's
//!destructor throws
void destroy(const pointer &ptr)
{ BOOST_ASSERT(ptr != 0); (*ptr).~value_type(); }
/// @cond
private:
//!Object function that creates the node allocator if it is not created and
//!increments reference count if it is already created
struct get_or_create_func
{
typedef detail::shared_node_pool
<SegmentManager, mutex_type, sizeof(T), NodesPerChunk> node_pool_t;
//!This connects or constructs the unique instance of node_pool_t
//!Can throw boost::interprocess::bad_alloc
void operator()()
{
//Find or create the node_pool_t
mp_node_pool = mp_named_alloc->template find_or_construct
<node_pool_t>(unique_instance)(mp_named_alloc);
//If valid, increment link count
if(mp_node_pool != 0)
mp_node_pool->inc_ref_count();
}
//!Constructor. Initializes function
//!object parameters
get_or_create_func(segment_manager *hdr) : mp_named_alloc(hdr){}
node_pool_t *mp_node_pool;
segment_manager *mp_named_alloc;
};
//!Initialization function, creates an executes atomically the
//!initialization object functions. Can throw boost::interprocess::bad_alloc
node_pool_t *priv_get_or_create(segment_manager *named_alloc)
{
get_or_create_func func(named_alloc);
named_alloc->atomic_func(func);
return func.mp_node_pool;
}
//!Object function that decrements the reference count. If the count
//!reaches to zero destroys the node allocator from memory.
//!Never throws
struct destroy_if_last_link_func
{
typedef detail::shared_node_pool
<SegmentManager, mutex_type,sizeof(T), NodesPerChunk> node_pool_t;
//!Decrements reference count and destroys the object if there is no
//!more attached allocators. Never throws
void operator()()
{
//If not the last link return
if(mp_node_pool->dec_ref_count() != 0) return;
//Last link, let's destroy the segment_manager
mp_named_alloc->template destroy<node_pool_t>(unique_instance);
}
//!Constructor. Initializes function
//!object parameters
destroy_if_last_link_func(segment_manager *nhdr,
node_pool_t *phdr)
: mp_named_alloc(nhdr), mp_node_pool(phdr)
{}
segment_manager *mp_named_alloc;
node_pool_t *mp_node_pool;
};
//!Destruction function, initializes and executes destruction function
//!object. Never throws
void priv_destroy_if_last_link()
{
typedef detail::shared_node_pool
<SegmentManager, mutex_type,sizeof(T), NodesPerChunk> node_pool_t;
//Get segment manager
segment_manager *named_segment_mngr = this->get_segment_manager();
//Execute destruction functor atomically
destroy_if_last_link_func func(named_segment_mngr, detail::get_pointer(mp_node_pool));
named_segment_mngr->atomic_func(func);
}
private:
node_pool_ptr mp_node_pool;
/// @endcond
};
//!Equality test for same type of
//!node_allocator
template<class T, class S, std::size_t NodesPerChunk> inline
bool operator==(const node_allocator<T, S, NodesPerChunk> &alloc1,
const node_allocator<T, S, NodesPerChunk> &alloc2)
//!Equality test for same type
//!of node_allocator_base
template<unsigned int V, class T, class S, std::size_t NodesPerChunk> inline
bool operator==(const node_allocator_base<V, T, S, NodesPerChunk> &alloc1,
const node_allocator_base<V, T, S, NodesPerChunk> &alloc2)
{ return alloc1.get_node_pool() == alloc2.get_node_pool(); }
//!Inequality test for same type of
//!node_allocator
template<class T, class S, std::size_t NodesPerChunk> inline
bool operator!=(const node_allocator<T, S, NodesPerChunk> &alloc1,
const node_allocator<T, S, NodesPerChunk> &alloc2)
//!Inequality test for same type
//!of node_allocator_base
template<unsigned int V, class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
bool operator!=(const node_allocator_base<V, T, S, NodesPerChunk> &alloc1,
const node_allocator_base<V, T, S, NodesPerChunk> &alloc2)
{ return alloc1.get_node_pool() != alloc2.get_node_pool(); }
template < class T
, class SegmentManager
, std::size_t NodesPerChunk = 64
>
class node_allocator_v1
: public node_allocator_base
< 1
, T
, SegmentManager
, NodesPerChunk
>
{
public:
typedef detail::node_allocator_base
< 1, T, SegmentManager, NodesPerChunk> base_t;
template<class T2>
struct rebind
{
typedef node_allocator_v1<T2, SegmentManager, NodesPerChunk> other;
};
node_allocator_v1(SegmentManager *segment_mngr)
: base_t(segment_mngr)
{}
template<class T2>
node_allocator_v1
(const node_allocator_v1<T2, SegmentManager, NodesPerChunk> &other)
: base_t(other)
{}
};
} //namespace detail{
/// @endcond
//!An STL node allocator that uses a segment manager as memory
//!source. The internal pointer type will of the same type (raw, smart) as
//!"typename SegmentManager::void_pointer" type. This allows
//!placing the allocator in shared memory, memory mapped-files, etc...
//!This node allocator shares a segregated storage between all instances
//!of node_allocator with equal sizeof(T) placed in the same segment
//!group. NodesPerChunk is the number of nodes allocated at once when the allocator
//!needs runs out of nodes
template < class T
, class SegmentManager
, std::size_t NodesPerChunk
>
class node_allocator
/// @cond
: public detail::node_allocator_base
< 2
, T
, SegmentManager
, NodesPerChunk
>
/// @endcond
{
#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
typedef detail::node_allocator_base
< 2, T, SegmentManager, NodesPerChunk> base_t;
public:
typedef detail::version_type<node_allocator, 2> version;
template<class T2>
struct rebind
{
typedef node_allocator<T2, SegmentManager, NodesPerChunk> other;
};
node_allocator(SegmentManager *segment_mngr)
: base_t(segment_mngr)
{}
template<class T2>
node_allocator
(const node_allocator<T2, SegmentManager, NodesPerChunk> &other)
: base_t(other)
{}
#else //BOOST_INTERPROCESS_DOXYGEN_INVOKED
public:
typedef implementation_defined::segment_manager segment_manager;
typedef segment_manager::void_pointer void_pointer;
typedef implementation_defined::pointer pointer;
typedef implementation_defined::const_pointer const_pointer;
typedef T value_type;
typedef typename detail::add_reference
<value_type>::type reference;
typedef typename detail::add_reference
<const value_type>::type const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
//!Obtains node_allocator from
//!node_allocator
template<class T2>
struct rebind
{
typedef node_allocator<T2, SegmentManager, NodesPerChunk> other;
};
private:
//!Not assignable from
//!related node_allocator
template<class T2, class SegmentManager2, std::size_t N2>
node_allocator& operator=
(const node_allocator<T2, SegmentManager2, N2>&);
//!Not assignable from
//!other node_allocator
node_allocator& operator=(const node_allocator&);
public:
//!Constructor from a segment manager. If not present, constructs a node
//!pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
node_allocator(segment_manager *segment_mngr);
//!Copy constructor from other node_allocator. Increments the reference
//!count of the associated node pool. Never throws
node_allocator(const node_allocator &other);
//!Copy constructor from related node_allocator. If not present, constructs
//!a node pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
template<class T2>
node_allocator
(const node_allocator<T2, SegmentManager, NodesPerChunk> &other);
//!Destructor, removes node_pool_t from memory
//!if its reference count reaches to zero. Never throws
~node_allocator();
//!Returns a pointer to the node pool.
//!Never throws
node_pool_t* get_node_pool() const;
//!Returns the segment manager.
//!Never throws
segment_manager* get_segment_manager()const;
//!Returns the number of elements that could be allocated.
//!Never throws
size_type max_size() const;
//!Allocate memory for an array of count elements.
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate(size_type count, cvoid_pointer hint = 0);
//!Deallocate allocated memory.
//!Never throws
void deallocate(const pointer &ptr, size_type count);
//!Deallocates all free chunks
//!of the pool
void deallocate_free_chunks();
//!Swaps allocators. Does not throw. If each allocator is placed in a
//!different memory segment, the result is undefined.
friend void swap(self_t &alloc1, self_t &alloc2);
//!Returns address of mutable object.
//!Never throws
pointer address(reference value) const;
//!Returns address of non mutable object.
//!Never throws
const_pointer address(const_reference value) const;
//!Default construct an object.
//!Throws if T's default constructor throws
void construct(const pointer &ptr);
//!Destroys object. Throws if object's
//!destructor throws
void destroy(const pointer &ptr);
//!Returns maximum the number of objects the previously allocated memory
//!pointed by p can hold. This size only works for memory allocated with
//!allocate, allocation_command and allocate_many.
size_type size(const pointer &p) const;
std::pair<pointer, bool>
allocation_command(allocation_type command,
size_type limit_size,
size_type preferred_size,
size_type &received_size, const pointer &reuse = 0);
//!Allocates many elements of size elem_size in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. The elements must be deallocated
//!with deallocate(...)
multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements);
//!Allocates n_elements elements, each one of size elem_sizes[i]in a
//!contiguous chunk
//!of memory. The elements must be deallocated
multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements);
//!Allocates many elements of size elem_size in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. The elements must be deallocated
//!with deallocate(...)
void deallocate_many(multiallocation_iterator it);
//!Allocates just one object. Memory allocated with this function
//!must be deallocated only with deallocate_one().
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate_one();
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
multiallocation_iterator allocate_individual(std::size_t num_elements);
//!Deallocates memory previously allocated with allocate_one().
//!You should never use deallocate_one to deallocate memory allocated
//!with other functions different from allocate_one(). Never throws
void deallocate_one(const pointer &p);
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
void deallocate_individual(multiallocation_iterator it);
#endif
};
#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED
//!Equality test for same type
//!of node_allocator
template<class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
bool operator==(const node_allocator<T, S, NodesPerChunk, F, OP> &alloc1,
const node_allocator<T, S, NodesPerChunk, F, OP> &alloc2);
//!Inequality test for same type
//!of node_allocator
template<class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
bool operator!=(const node_allocator<T, S, NodesPerChunk, F, OP> &alloc1,
const node_allocator<T, S, NodesPerChunk, F, OP> &alloc2);
#endif
} //namespace interprocess {
} //namespace boost {
#include <boost/interprocess/detail/config_end.hpp>
#endif //#ifndef BOOST_INTERPROCESS_POOLED_NODE_ALLOCATOR_HPP
#endif //#ifndef BOOST_INTERPROCESS_NODE_ALLOCATOR_HPP

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -30,38 +30,47 @@
#include <cstddef>
//!\file
//!Describes private_adaptive_pool pooled shared memory STL compatible allocator
//!Describes private_adaptive_pool_base pooled shared memory STL compatible allocator
namespace boost {
namespace interprocess {
//!An STL node allocator that uses a segment manager as memory
//!source. The internal pointer type will of the same type (raw, smart) as
//!"typename SegmentManager::void_pointer" type. This allows
//!placing the allocator in shared memory, memory mapped-files, etc...
//!This allocator has its own node pool. NodesPerChunk is the minimum number of nodes
//!allocated at once when the allocator needs runs out of nodes.
template<class T, class SegmentManager, std::size_t NodesPerChunk, std::size_t MaxFreeChunks>
class private_adaptive_pool
/// @cond
namespace detail {
template < unsigned int Version
, class T
, class SegmentManager
, std::size_t NodesPerChunk
, std::size_t MaxFreeChunks
, unsigned char OverheadPercent
>
class private_adaptive_pool_base
: public node_pool_allocation_impl
< private_adaptive_pool_base < Version, T, SegmentManager, NodesPerChunk
, MaxFreeChunks, OverheadPercent>
, Version
, T
, SegmentManager
>
{
/// @cond
private:
typedef typename SegmentManager::void_pointer void_pointer;
typedef typename detail::
pointer_to_other<void_pointer, const void>::type cvoid_pointer;
typedef SegmentManager segment_manager;
typedef typename detail::
pointer_to_other<void_pointer, char>::type char_pointer;
typedef typename detail::pointer_to_other
<void_pointer, segment_manager>::type segment_mngr_ptr_t;
typedef typename SegmentManager::
mutex_family::mutex_type mutex_type;
typedef private_adaptive_pool
<T, SegmentManager, NodesPerChunk, MaxFreeChunks> self_t;
typedef private_adaptive_pool_base
< Version, T, SegmentManager, NodesPerChunk
, MaxFreeChunks, OverheadPercent> self_t;
typedef detail::private_adaptive_node_pool
<SegmentManager, sizeof(T)
, NodesPerChunk, MaxFreeChunks> priv_node_pool_t;
<SegmentManager
, sizeof(T)
, NodesPerChunk
, MaxFreeChunks
, OverheadPercent
> node_pool_t;
BOOST_STATIC_ASSERT((Version <=2));
/// @endcond
@@ -77,120 +86,358 @@ class private_adaptive_pool
<const value_type>::type const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef detail::version_type
<private_adaptive_pool_base, Version> version;
typedef transform_iterator
< typename SegmentManager::
multiallocation_iterator
, detail::cast_functor <T> > multiallocation_iterator;
typedef typename SegmentManager::
multiallocation_chain multiallocation_chain;
//!Obtains node_allocator from other node_allocator
template<class T2>
struct rebind
{
typedef private_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks> other;
typedef private_adaptive_pool_base
<Version, T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> other;
};
/// @cond
private:
//!Not assignable from related private_adaptive_pool
template<class T2, class MemoryAlgorithm2, std::size_t N2, std::size_t F2>
private_adaptive_pool& operator=
(const private_adaptive_pool<T2, MemoryAlgorithm2, N2, F2>&);
//!Not assignable from related private_adaptive_pool_base
template<unsigned int Version2, class T2, class MemoryAlgorithm2, std::size_t N2, std::size_t F2, unsigned char OP2>
private_adaptive_pool_base& operator=
(const private_adaptive_pool_base<Version2, T2, MemoryAlgorithm2, N2, F2, OP2>&);
//!Not assignable from other private_adaptive_pool
private_adaptive_pool& operator=(const private_adaptive_pool&);
//!Not assignable from other private_adaptive_pool_base
private_adaptive_pool_base& operator=(const private_adaptive_pool_base&);
/// @endcond
public:
//!Constructor from a segment manager
private_adaptive_pool(segment_manager *segment_mngr)
private_adaptive_pool_base(segment_manager *segment_mngr)
: m_node_pool(segment_mngr)
{}
//!Copy constructor from other private_adaptive_pool. Never throws
private_adaptive_pool(const private_adaptive_pool &other)
//!Copy constructor from other private_adaptive_pool_base. Never throws
private_adaptive_pool_base(const private_adaptive_pool_base &other)
: m_node_pool(other.get_segment_manager())
{}
//!Copy constructor from related private_adaptive_pool. Never throws.
//!Copy constructor from related private_adaptive_pool_base. Never throws.
template<class T2>
private_adaptive_pool
(const private_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks> &other)
private_adaptive_pool_base
(const private_adaptive_pool_base
<Version, T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other)
: m_node_pool(other.get_segment_manager())
{}
//!Destructor, frees all used memory. Never throws
~private_adaptive_pool()
~private_adaptive_pool_base()
{}
//!Returns the segment manager. Never throws
segment_manager* get_segment_manager()const
{ return m_node_pool.get_segment_manager(); }
//!Returns the number of elements that could be allocated. Never throws
size_type max_size() const
{ return this->get_segment_manager()/sizeof(value_type); }
//!Allocate memory for an array of count elements.
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate(size_type count, cvoid_pointer hint = 0)
{
(void)hint;
if(count > ((size_type)-1)/sizeof(value_type))
throw bad_alloc();
return pointer(static_cast<value_type*>(m_node_pool.allocate(count)));
}
//!Deallocate allocated memory. Never throws
void deallocate(const pointer &ptr, size_type count)
{ m_node_pool.deallocate(detail::get_pointer(ptr), count); }
//!Deallocates all free chunks of the pool
void deallocate_free_chunks()
{ m_node_pool.deallocate_free_chunks(); }
//!Returns the internal node pool. Never throws
node_pool_t* get_node_pool() const
{ return const_cast<node_pool_t*>(&m_node_pool); }
//!Swaps allocators. Does not throw. If each allocator is placed in a
//!different shared memory segments, the result is undefined.*/
//!different shared memory segments, the result is undefined.
friend void swap(self_t &alloc1,self_t &alloc2)
{ alloc1.m_node_pool.swap(alloc2.m_node_pool); }
//These functions are obsolete. These are here to conserve
//backwards compatibility with containers using them...
//!Returns address of mutable object.
//!Never throws
pointer address(reference value) const
{ return pointer(boost::addressof(value)); }
//!Returns address of non mutable object.
//!Never throws
const_pointer address(const_reference value) const
{ return const_pointer(boost::addressof(value)); }
//!Default construct an object.
//!Throws if T's default constructor throws*/
void construct(const pointer &ptr)
{ new(detail::get_pointer(ptr)) value_type; }
//!Destroys object. Throws if object's
//!destructor throws
void destroy(const pointer &ptr)
{ BOOST_ASSERT(ptr != 0); (*ptr).~value_type(); }
/// @cond
private:
priv_node_pool_t m_node_pool;
node_pool_t m_node_pool;
/// @endcond
};
//!Equality test for same type of private_adaptive_pool
template<class T, class S, std::size_t NodesPerChunk, std::size_t F> inline
bool operator==(const private_adaptive_pool<T, S, NodesPerChunk, F> &alloc1,
const private_adaptive_pool<T, S, NodesPerChunk, F> &alloc2)
//!Equality test for same type of private_adaptive_pool_base
template<unsigned int V, class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
bool operator==(const private_adaptive_pool_base<V, T, S, NodesPerChunk, F, OP> &alloc1,
const private_adaptive_pool_base<V, T, S, NodesPerChunk, F, OP> &alloc2)
{ return &alloc1 == &alloc2; }
//!Inequality test for same type of private_adaptive_pool
template<class T, class S, std::size_t NodesPerChunk, std::size_t F> inline
bool operator!=(const private_adaptive_pool<T, S, NodesPerChunk, F> &alloc1,
const private_adaptive_pool<T, S, NodesPerChunk, F> &alloc2)
{
return &alloc1 != &alloc2;
}
//!Inequality test for same type of private_adaptive_pool_base
template<unsigned int V, class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
bool operator!=(const private_adaptive_pool_base<V, T, S, NodesPerChunk, F, OP> &alloc1,
const private_adaptive_pool_base<V, T, S, NodesPerChunk, F, OP> &alloc2)
{ return &alloc1 != &alloc2; }
template < class T
, class SegmentManager
, std::size_t NodesPerChunk = 64
, std::size_t MaxFreeChunks = 2
, unsigned char OverheadPercent = 5
>
class private_adaptive_pool_v1
: public private_adaptive_pool_base
< 1
, T
, SegmentManager
, NodesPerChunk
, MaxFreeChunks
, OverheadPercent
>
{
public:
typedef detail::private_adaptive_pool_base
< 1, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> base_t;
template<class T2>
struct rebind
{
typedef private_adaptive_pool_v1<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> other;
};
private_adaptive_pool_v1(SegmentManager *segment_mngr)
: base_t(segment_mngr)
{}
template<class T2>
private_adaptive_pool_v1
(const private_adaptive_pool_v1<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other)
: base_t(other)
{}
};
} //namespace detail {
/// @endcond
//!An STL node allocator that uses a segment manager as memory
//!source. The internal pointer type will of the same type (raw, smart) as
//!"typename SegmentManager::void_pointer" type. This allows
//!placing the allocator in shared memory, memory mapped-files, etc...
//!This allocator has its own node pool.
//!
//!NodesPerChunk is the minimum number of nodes of nodes allocated at once when
//!the allocator needs runs out of nodes. MaxFreeChunks is the maximum number of totally free chunks
//!that the adaptive node pool will hold. The rest of the totally free chunks will be
//!deallocated with the segment manager.
//!
//!OverheadPercent is the (approximated) maximum size overhead (1-20%) of the allocator:
//!(memory usable for nodes / total memory allocated from the segment manager)
template < class T
, class SegmentManager
, std::size_t NodesPerChunk
, std::size_t MaxFreeChunks
, unsigned char OverheadPercent
>
class private_adaptive_pool
/// @cond
: public detail::private_adaptive_pool_base
< 2
, T
, SegmentManager
, NodesPerChunk
, MaxFreeChunks
, OverheadPercent
>
/// @endcond
{
#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
typedef detail::private_adaptive_pool_base
< 2, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> base_t;
public:
typedef detail::version_type<private_adaptive_pool, 2> version;
template<class T2>
struct rebind
{
typedef private_adaptive_pool
<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> other;
};
private_adaptive_pool(SegmentManager *segment_mngr)
: base_t(segment_mngr)
{}
template<class T2>
private_adaptive_pool
(const private_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other)
: base_t(other)
{}
#else
public:
typedef implementation_defined::segment_manager segment_manager;
typedef segment_manager::void_pointer void_pointer;
typedef implementation_defined::pointer pointer;
typedef implementation_defined::const_pointer const_pointer;
typedef T value_type;
typedef typename detail::add_reference
<value_type>::type reference;
typedef typename detail::add_reference
<const value_type>::type const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
//!Obtains private_adaptive_pool from
//!private_adaptive_pool
template<class T2>
struct rebind
{
typedef private_adaptive_pool
<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> other;
};
private:
//!Not assignable from
//!related private_adaptive_pool
template<class T2, class SegmentManager2, std::size_t N2, std::size_t F2, unsigned char OP2>
private_adaptive_pool& operator=
(const private_adaptive_pool<T2, SegmentManager2, N2, F2>&);
//!Not assignable from
//!other private_adaptive_pool
private_adaptive_pool& operator=(const private_adaptive_pool&);
public:
//!Constructor from a segment manager. If not present, constructs a node
//!pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
private_adaptive_pool(segment_manager *segment_mngr);
//!Copy constructor from other private_adaptive_pool. Increments the reference
//!count of the associated node pool. Never throws
private_adaptive_pool(const private_adaptive_pool &other);
//!Copy constructor from related private_adaptive_pool. If not present, constructs
//!a node pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
template<class T2>
private_adaptive_pool
(const private_adaptive_pool<T2, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> &other);
//!Destructor, removes node_pool_t from memory
//!if its reference count reaches to zero. Never throws
~private_adaptive_pool();
//!Returns a pointer to the node pool.
//!Never throws
node_pool_t* get_node_pool() const;
//!Returns the segment manager.
//!Never throws
segment_manager* get_segment_manager()const;
//!Returns the number of elements that could be allocated.
//!Never throws
size_type max_size() const;
//!Allocate memory for an array of count elements.
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate(size_type count, cvoid_pointer hint = 0);
//!Deallocate allocated memory.
//!Never throws
void deallocate(const pointer &ptr, size_type count);
//!Deallocates all free chunks
//!of the pool
void deallocate_free_chunks();
//!Swaps allocators. Does not throw. If each allocator is placed in a
//!different memory segment, the result is undefined.
friend void swap(self_t &alloc1, self_t &alloc2);
//!Returns address of mutable object.
//!Never throws
pointer address(reference value) const;
//!Returns address of non mutable object.
//!Never throws
const_pointer address(const_reference value) const;
//!Default construct an object.
//!Throws if T's default constructor throws
void construct(const pointer &ptr);
//!Destroys object. Throws if object's
//!destructor throws
void destroy(const pointer &ptr);
//!Returns maximum the number of objects the previously allocated memory
//!pointed by p can hold. This size only works for memory allocated with
//!allocate, allocation_command and allocate_many.
size_type size(const pointer &p) const;
std::pair<pointer, bool>
allocation_command(allocation_type command,
size_type limit_size,
size_type preferred_size,
size_type &received_size, const pointer &reuse = 0);
//!Allocates many elements of size elem_size in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. The elements must be deallocated
//!with deallocate(...)
multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements);
//!Allocates n_elements elements, each one of size elem_sizes[i]in a
//!contiguous chunk
//!of memory. The elements must be deallocated
multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements);
//!Allocates many elements of size elem_size in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. The elements must be deallocated
//!with deallocate(...)
void deallocate_many(multiallocation_iterator it);
//!Allocates just one object. Memory allocated with this function
//!must be deallocated only with deallocate_one().
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate_one();
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
multiallocation_iterator allocate_individual(std::size_t num_elements);
//!Deallocates memory previously allocated with allocate_one().
//!You should never use deallocate_one to deallocate memory allocated
//!with other functions different from allocate_one(). Never throws
void deallocate_one(const pointer &p);
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
void deallocate_individual(multiallocation_iterator it);
#endif
};
#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED
//!Equality test for same type
//!of private_adaptive_pool
template<class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
bool operator==(const private_adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc1,
const private_adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc2);
//!Inequality test for same type
//!of private_adaptive_pool
template<class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
bool operator!=(const private_adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc1,
const private_adaptive_pool<T, S, NodesPerChunk, F, OP> &alloc2);
#endif
} //namespace interprocess {
} //namespace boost {

View File

@@ -1,13 +1,13 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/interprocess for documentation.
//
//////////////////////////////////////////////////////////////////////////////
/*
#ifndef BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP
#define BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP
@@ -50,12 +50,8 @@ class private_node_allocator
typedef typename detail::
pointer_to_other<void_pointer, const void>::type cvoid_pointer;
typedef SegmentManager segment_manager;
typedef typename detail::
pointer_to_other<void_pointer, char>::type char_pointer;
typedef typename detail::pointer_to_other
<void_pointer, segment_manager>::type segment_mngr_ptr_t;
typedef typename SegmentManager::
mutex_family::mutex_type mutex_type;
typedef private_node_allocator
<T, SegmentManager, NodesPerChunk> self_t;
typedef detail::private_node_pool
@@ -128,21 +124,30 @@ class private_node_allocator
pointer allocate(size_type count, cvoid_pointer hint = 0)
{
(void)hint;
if(count > ((size_type)-1)/sizeof(value_type))
if(count > this->max_size())
throw bad_alloc();
return pointer(static_cast<value_type*>(m_node_pool.allocate(count)));
else if(count == 1)
return pointer(static_cast<value_type*>(m_node_pool.allocate_node()));
else
return pointer(static_cast<value_type*>
(m_node_pool.get_segment_manager()->allocate(sizeof(T)*count)));
}
//!Deallocate allocated memory. Never throws
void deallocate(const pointer &ptr, size_type count)
{ m_node_pool.deallocate(detail::get_pointer(ptr), count); }
{
if(count == 1)
m_node_pool.deallocate_node(detail::get_pointer(ptr));
else
m_node_pool.get_segment_manager()->deallocate(detail::get_pointer(ptr));
}
//!Deallocates all free chunks of the pool
void deallocate_free_chunks()
{ m_node_pool.deallocate_free_chunks(); }
//!Swaps allocators. Does not throw. If each allocator is placed in a
//!different shared memory segments, the result is undefined.*/
//!different shared memory segments, the result is undefined.
friend void swap(self_t &alloc1,self_t &alloc2)
{ alloc1.m_node_pool.swap(alloc2.m_node_pool); }
@@ -160,7 +165,7 @@ class private_node_allocator
{ return const_pointer(boost::addressof(value)); }
//!Default construct an object.
//!Throws if T's default constructor throws*/
//!Throws if T's default constructor throws
void construct(const pointer &ptr)
{ new(detail::get_pointer(ptr)) value_type; }
@@ -196,3 +201,432 @@ bool operator!=(const private_node_allocator<T, S, NodesPerChunk> &alloc1,
#endif //#ifndef BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP
*/
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/interprocess for documentation.
//
//////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP
#define BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP
#if (defined _MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif
#include <boost/interprocess/detail/config_begin.hpp>
#include <boost/interprocess/detail/workaround.hpp>
#include <boost/interprocess/interprocess_fwd.hpp>
#include <boost/assert.hpp>
#include <boost/utility/addressof.hpp>
#include <boost/interprocess/allocators/detail/node_pool.hpp>
#include <boost/interprocess/exceptions.hpp>
#include <boost/interprocess/detail/utilities.hpp>
#include <boost/interprocess/detail/workaround.hpp>
#include <memory>
#include <algorithm>
#include <cstddef>
//!\file
//!Describes private_node_allocator_base pooled shared memory STL compatible allocator
namespace boost {
namespace interprocess {
/// @cond
namespace detail {
template < unsigned int Version
, class T
, class SegmentManager
, std::size_t NodesPerChunk
>
class private_node_allocator_base
: public node_pool_allocation_impl
< private_node_allocator_base < Version, T, SegmentManager, NodesPerChunk>
, Version
, T
, SegmentManager
>
{
/// @cond
private:
typedef typename SegmentManager::void_pointer void_pointer;
typedef SegmentManager segment_manager;
typedef private_node_allocator_base
< Version, T, SegmentManager, NodesPerChunk> self_t;
typedef detail::private_node_pool
<SegmentManager
, sizeof(T)
, NodesPerChunk
> node_pool_t;
BOOST_STATIC_ASSERT((Version <=2));
/// @endcond
public:
typedef typename detail::
pointer_to_other<void_pointer, T>::type pointer;
typedef typename detail::
pointer_to_other<void_pointer, const T>::type const_pointer;
typedef T value_type;
typedef typename detail::add_reference
<value_type>::type reference;
typedef typename detail::add_reference
<const value_type>::type const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef detail::version_type
<private_node_allocator_base, Version> version;
typedef transform_iterator
< typename SegmentManager::
multiallocation_iterator
, detail::cast_functor <T> > multiallocation_iterator;
typedef typename SegmentManager::
multiallocation_chain multiallocation_chain;
//!Obtains node_allocator from other node_allocator
template<class T2>
struct rebind
{
typedef private_node_allocator_base
<Version, T2, SegmentManager, NodesPerChunk> other;
};
/// @cond
private:
//!Not assignable from related private_node_allocator_base
template<unsigned int Version2, class T2, class MemoryAlgorithm2, std::size_t N2>
private_node_allocator_base& operator=
(const private_node_allocator_base<Version2, T2, MemoryAlgorithm2, N2>&);
//!Not assignable from other private_node_allocator_base
private_node_allocator_base& operator=(const private_node_allocator_base&);
/// @endcond
public:
//!Constructor from a segment manager
private_node_allocator_base(segment_manager *segment_mngr)
: m_node_pool(segment_mngr)
{}
//!Copy constructor from other private_node_allocator_base. Never throws
private_node_allocator_base(const private_node_allocator_base &other)
: m_node_pool(other.get_segment_manager())
{}
//!Copy constructor from related private_node_allocator_base. Never throws.
template<class T2>
private_node_allocator_base
(const private_node_allocator_base
<Version, T2, SegmentManager, NodesPerChunk> &other)
: m_node_pool(other.get_segment_manager())
{}
//!Destructor, frees all used memory. Never throws
~private_node_allocator_base()
{}
//!Returns the segment manager. Never throws
segment_manager* get_segment_manager()const
{ return m_node_pool.get_segment_manager(); }
//!Returns the internal node pool. Never throws
node_pool_t* get_node_pool() const
{ return const_cast<node_pool_t*>(&m_node_pool); }
//!Swaps allocators. Does not throw. If each allocator is placed in a
//!different shared memory segments, the result is undefined.
friend void swap(self_t &alloc1,self_t &alloc2)
{ alloc1.m_node_pool.swap(alloc2.m_node_pool); }
/// @cond
private:
node_pool_t m_node_pool;
/// @endcond
};
//!Equality test for same type of private_node_allocator_base
template<unsigned int V, class T, class S, std::size_t NodesPerChunk> inline
bool operator==(const private_node_allocator_base<V, T, S, NodesPerChunk> &alloc1,
const private_node_allocator_base<V, T, S, NodesPerChunk> &alloc2)
{ return &alloc1 == &alloc2; }
//!Inequality test for same type of private_node_allocator_base
template<unsigned int V, class T, class S, std::size_t NodesPerChunk> inline
bool operator!=(const private_node_allocator_base<V, T, S, NodesPerChunk> &alloc1,
const private_node_allocator_base<V, T, S, NodesPerChunk> &alloc2)
{ return &alloc1 != &alloc2; }
template < class T
, class SegmentManager
, std::size_t NodesPerChunk = 64
>
class private_node_allocator_v1
: public private_node_allocator_base
< 1
, T
, SegmentManager
, NodesPerChunk
>
{
public:
typedef detail::private_node_allocator_base
< 1, T, SegmentManager, NodesPerChunk> base_t;
template<class T2>
struct rebind
{
typedef private_node_allocator_v1<T2, SegmentManager, NodesPerChunk> other;
};
private_node_allocator_v1(SegmentManager *segment_mngr)
: base_t(segment_mngr)
{}
template<class T2>
private_node_allocator_v1
(const private_node_allocator_v1<T2, SegmentManager, NodesPerChunk> &other)
: base_t(other)
{}
};
} //namespace detail {
/// @endcond
//!An STL node allocator that uses a segment manager as memory
//!source. The internal pointer type will of the same type (raw, smart) as
//!"typename SegmentManager::void_pointer" type. This allows
//!placing the allocator in shared memory, memory mapped-files, etc...
//!This allocator has its own node pool. NodesPerChunk is the number of nodes allocated
//!at once when the allocator needs runs out of nodes
template < class T
, class SegmentManager
, std::size_t NodesPerChunk
>
class private_node_allocator
/// @cond
: public detail::private_node_allocator_base
< 2
, T
, SegmentManager
, NodesPerChunk
>
/// @endcond
{
#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
typedef detail::private_node_allocator_base
< 2, T, SegmentManager, NodesPerChunk> base_t;
public:
typedef detail::version_type<private_node_allocator, 2> version;
template<class T2>
struct rebind
{
typedef private_node_allocator
<T2, SegmentManager, NodesPerChunk> other;
};
private_node_allocator(SegmentManager *segment_mngr)
: base_t(segment_mngr)
{}
template<class T2>
private_node_allocator
(const private_node_allocator<T2, SegmentManager, NodesPerChunk> &other)
: base_t(other)
{}
#else
public:
typedef implementation_defined::segment_manager segment_manager;
typedef segment_manager::void_pointer void_pointer;
typedef implementation_defined::pointer pointer;
typedef implementation_defined::const_pointer const_pointer;
typedef T value_type;
typedef typename detail::add_reference
<value_type>::type reference;
typedef typename detail::add_reference
<const value_type>::type const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
//!Obtains private_node_allocator from
//!private_node_allocator
template<class T2>
struct rebind
{
typedef private_node_allocator
<T2, SegmentManager, NodesPerChunk> other;
};
private:
//!Not assignable from
//!related private_node_allocator
template<class T2, class SegmentManager2, std::size_t N2>
private_node_allocator& operator=
(const private_node_allocator<T2, SegmentManager2, N2>&);
//!Not assignable from
//!other private_node_allocator
private_node_allocator& operator=(const private_node_allocator&);
public:
//!Constructor from a segment manager. If not present, constructs a node
//!pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
private_node_allocator(segment_manager *segment_mngr);
//!Copy constructor from other private_node_allocator. Increments the reference
//!count of the associated node pool. Never throws
private_node_allocator(const private_node_allocator &other);
//!Copy constructor from related private_node_allocator. If not present, constructs
//!a node pool. Increments the reference count of the associated node pool.
//!Can throw boost::interprocess::bad_alloc
template<class T2>
private_node_allocator
(const private_node_allocator<T2, SegmentManager, NodesPerChunk> &other);
//!Destructor, removes node_pool_t from memory
//!if its reference count reaches to zero. Never throws
~private_node_allocator();
//!Returns a pointer to the node pool.
//!Never throws
node_pool_t* get_node_pool() const;
//!Returns the segment manager.
//!Never throws
segment_manager* get_segment_manager()const;
//!Returns the number of elements that could be allocated.
//!Never throws
size_type max_size() const;
//!Allocate memory for an array of count elements.
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate(size_type count, cvoid_pointer hint = 0);
//!Deallocate allocated memory.
//!Never throws
void deallocate(const pointer &ptr, size_type count);
//!Deallocates all free chunks
//!of the pool
void deallocate_free_chunks();
//!Swaps allocators. Does not throw. If each allocator is placed in a
//!different memory segment, the result is undefined.
friend void swap(self_t &alloc1, self_t &alloc2);
//!Returns address of mutable object.
//!Never throws
pointer address(reference value) const;
//!Returns address of non mutable object.
//!Never throws
const_pointer address(const_reference value) const;
//!Default construct an object.
//!Throws if T's default constructor throws
void construct(const pointer &ptr);
//!Destroys object. Throws if object's
//!destructor throws
void destroy(const pointer &ptr);
//!Returns maximum the number of objects the previously allocated memory
//!pointed by p can hold. This size only works for memory allocated with
//!allocate, allocation_command and allocate_many.
size_type size(const pointer &p) const;
std::pair<pointer, bool>
allocation_command(allocation_type command,
size_type limit_size,
size_type preferred_size,
size_type &received_size, const pointer &reuse = 0);
//!Allocates many elements of size elem_size in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. The elements must be deallocated
//!with deallocate(...)
multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements);
//!Allocates n_elements elements, each one of size elem_sizes[i]in a
//!contiguous chunk
//!of memory. The elements must be deallocated
multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements);
//!Allocates many elements of size elem_size in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. The elements must be deallocated
//!with deallocate(...)
void deallocate_many(multiallocation_iterator it);
//!Allocates just one object. Memory allocated with this function
//!must be deallocated only with deallocate_one().
//!Throws boost::interprocess::bad_alloc if there is no enough memory
pointer allocate_one();
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
multiallocation_iterator allocate_individual(std::size_t num_elements);
//!Deallocates memory previously allocated with allocate_one().
//!You should never use deallocate_one to deallocate memory allocated
//!with other functions different from allocate_one(). Never throws
void deallocate_one(const pointer &p);
//!Allocates many elements of size == 1 in a contiguous chunk
//!of memory. The minimum number to be allocated is min_elements,
//!the preferred and maximum number is
//!preferred_elements. The number of actually allocated elements is
//!will be assigned to received_size. Memory allocated with this function
//!must be deallocated only with deallocate_one().
void deallocate_individual(multiallocation_iterator it);
#endif
};
#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED
//!Equality test for same type
//!of private_node_allocator
template<class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
bool operator==(const private_node_allocator<T, S, NodesPerChunk, F, OP> &alloc1,
const private_node_allocator<T, S, NodesPerChunk, F, OP> &alloc2);
//!Inequality test for same type
//!of private_node_allocator
template<class T, class S, std::size_t NodesPerChunk, std::size_t F, unsigned char OP> inline
bool operator!=(const private_node_allocator<T, S, NodesPerChunk, F, OP> &alloc1,
const private_node_allocator<T, S, NodesPerChunk, F, OP> &alloc2);
#endif
} //namespace interprocess {
} //namespace boost {
#include <boost/interprocess/detail/config_end.hpp>
#endif //#ifndef BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP

View File

@@ -1,6 +1,6 @@
////////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -74,6 +74,9 @@ struct node_alloc_holder
typedef detail::integral_constant<unsigned,
boost::interprocess::detail::
version<NodeAlloc>::value> alloc_version;
typedef typename ICont::iterator icont_iterator;
typedef typename ICont::const_iterator icont_citerator;
typedef allocator_destroyer<NodeAlloc> Destroyer;
node_alloc_holder(const ValAlloc &a)
: members_(a)
@@ -292,18 +295,41 @@ struct node_alloc_holder
if(constructed){
this->destroy(p);
}
this->deallocate_one(p);
multiallocation_iterator itend;
while(itbeg != itend){
Node *n = &*itbeg;
++itbeg;
this->deallocate_one(n);
}
this->node_alloc().deallocate_many(itbeg);
}
BOOST_CATCH_END
return beg;
}
void clear(allocator_v1)
{ this->icont().clear_and_dispose(Destroyer(this->node_alloc())); }
void clear(allocator_v2)
{
allocator_multialloc_chain_node_deallocator<NodeAlloc> chain_holder(this->node_alloc());
this->icont().clear_and_dispose(chain_holder.get_chain_builder());
}
icont_iterator erase_range(icont_iterator first, icont_iterator last, allocator_v1)
{ return this->icont().erase_and_dispose(first, last, Destroyer(this->node_alloc())); }
icont_iterator erase_range(icont_iterator first, icont_iterator last, allocator_v2)
{
allocator_multialloc_chain_node_deallocator<NodeAlloc> chain_holder(this->node_alloc());
return this->icont().erase_and_dispose(first, last, chain_holder.get_chain_builder());
}
template<class Key, class Comparator>
size_type erase_key(const Key& k, const Comparator &comp, allocator_v1)
{ return this->icont().erase_and_dispose(k, comp, Destroyer(this->node_alloc())); }
template<class Key, class Comparator>
size_type erase_key(const Key& k, const Comparator &comp, allocator_v2)
{
allocator_multialloc_chain_node_deallocator<NodeAlloc> chain_holder(this->node_alloc());
return this->icont().erase_and_dispose(k, comp, chain_holder.get_chain_builder());
}
protected:
struct cloner
{
@@ -359,10 +385,10 @@ struct node_alloc_holder
{ return this->members_.m_icont; }
NodeAlloc &node_alloc()
{ return this->members_; }
{ return static_cast<NodeAlloc &>(this->members_); }
const NodeAlloc &node_alloc() const
{ return this->members_; }
{ return static_cast<const NodeAlloc &>(this->members_); }
};
} //namespace detail {

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -732,13 +732,13 @@ class rbtree
{ return iterator(this->icont().erase_and_dispose(position.get(), Destroyer(this->node_alloc()))); }
size_type erase(const key_type& k)
{ return this->icont().erase_and_dispose(k, KeyNodeCompare(value_comp()), Destroyer(this->node_alloc())); }
{ return AllocHolder::erase_key(k, KeyNodeCompare(value_comp()), alloc_version()); }
iterator erase(const_iterator first, const_iterator last)
{ return iterator(this->icont().erase_and_dispose(first.get(), last.get(), Destroyer(this->node_alloc()))); }
{ return iterator(AllocHolder::erase_range(first.get(), last.get(), alloc_version())); }
void clear()
{ this->icont().clear_and_dispose(Destroyer(this->node_alloc())); }
{ AllocHolder::clear(alloc_version()); }
// set operations:
iterator find(const key_type& k)

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -404,7 +404,7 @@ class list
//!
//! <b>Complexity</b>: Linear to the number of elements in the list.
void clear()
{ this->icont().clear_and_dispose(Destroyer(this->node_alloc())); }
{ AllocHolder::clear(alloc_version()); }
//! <b>Effects</b>: Returns an iterator to the first element contained in the list.
//!
@@ -786,7 +786,7 @@ class list
//!
//! <b>Complexity</b>: Linear to the distance between first and last.
iterator erase(iterator first, iterator last)
{ return iterator(this->icont().erase_and_dispose(first.get(), last.get(), Destroyer(this->node_alloc()))); }
{ return iterator(AllocHolder::erase_range(first.get(), last.get(), alloc_version())); }
//! <b>Effects</b>: Assigns the n copies of val to *this.
//!
@@ -1085,6 +1085,7 @@ class list
/// @cond
private:
//Iterator range version
template<class InpIterator>
void priv_create_and_insert_nodes
@@ -1160,7 +1161,7 @@ class list
template<class Integer>
void priv_insert_dispatch(iterator p, Integer n, Integer x, detail::true_)
{ this->priv_create_and_insert_nodes(p, n, x); }
{ this->insert(p, (size_type)n, x); }
void priv_fill_assign(size_type n, const T& val)
{

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2004-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2004-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -8,7 +8,7 @@
//
//////////////////////////////////////////////////////////////////////////////
//
// This file comes from SGI's stl_slist.h file. Modified by Ion Gaztanaga 2004-2007
// This file comes from SGI's stl_slist.h file. Modified by Ion Gaztanaga 2004-2008
// Renaming, isolating and porting to generic algorithms. Pointer typedef
// set to allocator::pointer to allow placing it in shared memory.
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -8,7 +8,7 @@
//
//////////////////////////////////////////////////////////////////////////////
//
// This file comes from SGI's string file. Modified by Ion Gaztanaga 2004-2007
// This file comes from SGI's string file. Modified by Ion Gaztanaga 2004-2008
// Renaming, isolating and porting to generic algorithms. Pointer typedef
// set to allocator::pointer to allow placing it in shared memory.
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -740,6 +740,9 @@ class vector : private detail::vector_alloc_holder<A>
//Check for forward expansion
same_buffer_start = ret.second && this->members_.m_start == ret.first;
if(same_buffer_start){
#ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS
++this->num_expand_fwd;
#endif
this->members_.m_capacity = real_cap;
}
//If there is no forward expansion, move objects
@@ -748,6 +751,9 @@ class vector : private detail::vector_alloc_holder<A>
copy_move_it dummy_it(detail::get_pointer(this->members_.m_start));
//Backwards (and possibly forward) expansion
if(ret.second){
#ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS
++this->num_expand_bwd;
#endif
this->priv_range_insert_expand_backwards
( detail::get_pointer(ret.first)
, real_cap
@@ -758,6 +764,9 @@ class vector : private detail::vector_alloc_holder<A>
}
//New buffer
else{
#ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS
++this->num_alloc;
#endif
this->priv_range_insert_new_allocation
( detail::get_pointer(ret.first)
, real_cap
@@ -1184,11 +1193,17 @@ class vector : private detail::vector_alloc_holder<A>
//If we had room or we have expanded forward
if (same_buffer_start){
#ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS
++this->num_expand_fwd;
#endif
this->priv_range_insert_expand_forward
(detail::get_pointer(pos), first, last, n);
}
//Backwards (and possibly forward) expansion
else if(ret.second){
#ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS
++this->num_expand_bwd;
#endif
this->priv_range_insert_expand_backwards
( detail::get_pointer(ret.first)
, real_cap
@@ -1199,6 +1214,9 @@ class vector : private detail::vector_alloc_holder<A>
}
//New buffer
else{
#ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS
++this->num_alloc;
#endif
this->priv_range_insert_new_allocation
( detail::get_pointer(ret.first)
, real_cap
@@ -1778,6 +1796,15 @@ class vector : private detail::vector_alloc_holder<A>
if (n >= size())
throw std::out_of_range("vector::at");
}
#ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS
public:
unsigned int num_expand_fwd;
unsigned int num_expand_bwd;
unsigned int num_alloc;
void reset_alloc_stats()
{ num_expand_fwd = num_expand_bwd = num_alloc = 0; }
#endif
/// @endcond
};

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007.
// (C) Copyright Ion Gaztanaga 2005-2008.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2006-2007
// (C) Copyright Ion Gaztanaga 2006-2008
// (C) Copyright Markus Schoepflin 2007
//
// Distributed under the Boost Software License, Version 1.0. (See

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -9,6 +9,7 @@
#define _CRT_SECURE_NO_DEPRECATE
#endif
#pragma warning (push)
#pragma warning (disable : 4702) // unreachable code
#pragma warning (disable : 4706) // assignment within conditional expression
#pragma warning (disable : 4127) // conditional expression is constant
#pragma warning (disable : 4146) // unary minus operator applied to unsigned type, result still unsigned

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2007-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007.
// (C) Copyright Ion Gaztanaga 2005-2008.
// (C) Copyright Gennaro Prota 2003 - 2004.
//
// Distributed under the Boost Software License, Version 1.0.
@@ -428,6 +428,12 @@ class transform_iterator
operator->() const
{ return operator_arrow_proxy<typename UnaryFunction::result_type>(dereference()); }
Iterator & base()
{ return m_it; }
const Iterator & base() const
{ return m_it; }
private:
Iterator m_it;

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,7 +1,7 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Stephen Cleary 2000.
// (C) Copyright Ion Gaztanaga 2007.
// (C) Copyright Ion Gaztanaga 2007-2008.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007.
// (C) Copyright Ion Gaztanaga 2005-2008.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007.
// (C) Copyright Ion Gaztanaga 2005-2008.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007.
// (C) Copyright Ion Gaztanaga 2005-2008.
// (C) Copyright Gennaro Prota 2003 - 2004.
//
// Distributed under the Boost Software License, Version 1.0.

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2007-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
// (C) Copyright John Maddock 2000.
// (C) Copyright Ion Gaztanaga 2005-2007.
// (C) Copyright Ion Gaztanaga 2005-2008.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007.
// (C) Copyright Ion Gaztanaga 2005-2008.
// (C) Copyright Gennaro Prota 2003 - 2004.
//
// Distributed under the Boost Software License, Version 1.0.
@@ -25,6 +25,9 @@
#include <boost/interprocess/detail/move.hpp>
#include <boost/type_traits/has_trivial_destructor.hpp>
#include <boost/interprocess/detail/min_max.hpp>
#include <boost/interprocess/detail/type_traits.hpp>
#include <boost/interprocess/detail/type_traits.hpp>
#include <boost/interprocess/detail/version_type.hpp>
#include <utility>
#include <algorithm>
@@ -70,14 +73,27 @@ template <class Allocator>
struct scoped_ptr_dealloc_functor
{
typedef typename Allocator::pointer pointer;
typedef detail::integral_constant<unsigned,
boost::interprocess::detail::
version<Allocator>::value> alloc_version;
typedef detail::integral_constant<unsigned, 1> allocator_v1;
typedef detail::integral_constant<unsigned, 2> allocator_v2;
private:
void priv_deallocate(const typename Allocator::pointer &p, allocator_v1)
{ m_alloc.deallocate(p, 1); }
void priv_deallocate(const typename Allocator::pointer &p, allocator_v2)
{ m_alloc.deallocate_one(p); }
public:
Allocator& m_alloc;
scoped_ptr_dealloc_functor(Allocator& a)
: m_alloc(a) {}
: m_alloc(a) {}
void operator()(pointer ptr)
{ if (ptr) m_alloc.deallocate(ptr, 1); }
{ if (ptr) priv_deallocate(ptr, alloc_version()); }
};
//!A deleter for scoped_ptr that deallocates the memory
@@ -86,7 +102,20 @@ template <class Allocator>
struct scoped_deallocator
{
typedef typename Allocator::pointer pointer;
typedef detail::integral_constant<unsigned,
boost::interprocess::detail::
version<Allocator>::value> alloc_version;
typedef detail::integral_constant<unsigned, 1> allocator_v1;
typedef detail::integral_constant<unsigned, 2> allocator_v2;
private:
void priv_deallocate(allocator_v1)
{ m_alloc.deallocate(m_ptr, 1); }
void priv_deallocate(allocator_v2)
{ m_alloc.deallocate_one(m_ptr); }
public:
pointer m_ptr;
Allocator& m_alloc;
@@ -94,7 +123,7 @@ struct scoped_deallocator
: m_ptr(p), m_alloc(a) {}
~scoped_deallocator()
{ if (m_ptr) m_alloc.deallocate(m_ptr, 1); }
{ if (m_ptr)priv_deallocate(alloc_version()); }
void release()
{ m_ptr = 0; }
@@ -189,9 +218,22 @@ template <class A>
class allocator_destroyer
{
typedef typename A::value_type value_type;
typedef detail::integral_constant<unsigned,
boost::interprocess::detail::
version<A>::value> alloc_version;
typedef detail::integral_constant<unsigned, 1> allocator_v1;
typedef detail::integral_constant<unsigned, 2> allocator_v2;
private:
A & a_;
private:
void priv_deallocate(const typename A::pointer &p, allocator_v1)
{ a_.deallocate(p, 1); }
void priv_deallocate(const typename A::pointer &p, allocator_v2)
{ a_.deallocate_one(p); }
public:
allocator_destroyer(A &a)
: a_(a)
@@ -200,35 +242,86 @@ class allocator_destroyer
void operator()(const typename A::pointer &p)
{
detail::get_pointer(p)->~value_type();
a_.deallocate(p, 1);
priv_deallocate(p, alloc_version());
}
};
//!A class used for exception-safe multi-allocation + construction.
template <class Allocator>
struct multiallocation_deallocator
template <class A>
class allocator_destroyer_and_chain_builder
{
typedef typename Allocator::multiallocation_iterator multiallocation_iterator;
typedef typename A::value_type value_type;
typedef typename A::multiallocation_iterator multiallocation_iterator;
typedef typename A::multiallocation_chain multiallocation_chain;
multiallocation_iterator m_itbeg;
Allocator& m_alloc;
A & a_;
multiallocation_chain &c_;
multiallocation_deallocator(multiallocation_iterator itbeg, Allocator& a)
: m_itbeg(itbeg), m_alloc(a) {}
public:
allocator_destroyer_and_chain_builder(A &a, multiallocation_chain &c)
: a_(a), c_(c)
{}
~multiallocation_deallocator()
{
multiallocation_iterator endit;
while(m_itbeg != endit){
m_alloc.deallocate(&*m_itbeg, 1);
++m_itbeg;
}
void operator()(const typename A::pointer &p)
{
value_type *vp = detail::get_pointer(p);
vp->~value_type();
c_.push_back(vp);
}
void increment()
{ ++m_itbeg; }
};
template <class A>
class allocator_multialloc_chain_node_deallocator
{
typedef typename A::value_type value_type;
typedef typename A::multiallocation_iterator multiallocation_iterator;
typedef typename A::multiallocation_chain multiallocation_chain;
typedef allocator_destroyer_and_chain_builder<A> chain_builder;
A & a_;
multiallocation_chain c_;
public:
allocator_multialloc_chain_node_deallocator(A &a)
: a_(a), c_()
{}
chain_builder get_chain_builder()
{ return chain_builder(a_, c_); }
~allocator_multialloc_chain_node_deallocator()
{
multiallocation_iterator it(c_.get_it());
if(it != multiallocation_iterator())
a_.deallocate_individual(it);
}
};
template <class A>
class allocator_multialloc_chain_array_deallocator
{
typedef typename A::value_type value_type;
typedef typename A::multiallocation_iterator multiallocation_iterator;
typedef typename A::multiallocation_chain multiallocation_chain;
typedef allocator_destroyer_and_chain_builder<A> chain_builder;
A & a_;
multiallocation_chain c_;
public:
allocator_multialloc_chain_array_deallocator(A &a)
: a_(a), c_()
{}
chain_builder get_chain_builder()
{ return chain_builder(a_, c_); }
~allocator_multialloc_chain_array_deallocator()
{
multiallocation_iterator it(c_.get_it());
if(it != multiallocation_iterator())
a_.deallocate_many(it);
}
};
//!A class used for exception-safe multi-allocation + construction.
template <class Allocator>
@@ -577,6 +670,14 @@ inline void swap(pair<T1, T2>&&x, pair<T1, T2>&&y)
}
#endif
template<class T>
struct cast_functor
{
typedef typename detail::add_reference<T>::type result_type;
result_type operator()(char &ptr) const
{ return *static_cast<T*>(static_cast<void*>(&ptr)); }
};
} //namespace detail {
//!The pair is movable if any of its members is movable

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -19,8 +19,8 @@
#if defined(_POSIX_THREAD_PROCESS_SHARED)
# if !((_XOPEN_VERSION >= 600) && (_POSIX_THREAD_PROCESS_SHARED - 0 <= 0))
// Cygwin defines _POSIX_THREAD_PROCESS_SHARED but does not support it.
// Mac Os X >= Leopard defines _POSIX_THREAD_PROCESS_SHARED but it does not seem to work
//Cygwin defines _POSIX_THREAD_PROCESS_SHARED but does not implement it.
//Mac Os X >= Leopard defines _POSIX_THREAD_PROCESS_SHARED but does not seems to work.
# if !defined(__CYGWIN__) && !defined(__APPLE__)
# define BOOST_INTERPROCESS_POSIX_PROCESS_SHARED
# endif

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -129,13 +129,19 @@ class private_node_allocator;
template<class T, class SegmentManager, std::size_t NodesPerChunk = 64>
class cached_node_allocator;
template<class T, class SegmentManager, std::size_t NodesPerChunk = 64, std::size_t MaxFreeChunks = 2>
template<class T, class SegmentManager, std::size_t NodesPerChunk = 64, std::size_t MaxFreeChunks = 2
, unsigned char OverheadPercent = 5
>
class adaptive_pool;
template<class T, class SegmentManager, std::size_t NodesPerChunk = 64, std::size_t MaxFreeChunks = 2>
template<class T, class SegmentManager, std::size_t NodesPerChunk = 64, std::size_t MaxFreeChunks = 2
, unsigned char OverheadPercent = 5
>
class private_adaptive_pool;
template<class T, class SegmentManager, std::size_t NodesPerChunk = 64, std::size_t MaxFreeChunks = 2>
template<class T, class SegmentManager, std::size_t NodesPerChunk = 64, std::size_t MaxFreeChunks = 2
, unsigned char OverheadPercent = 5
>
class cached_adaptive_pool;
@@ -151,10 +157,10 @@ class offset_ptr;
//////////////////////////////////////////////////////////////////////////////
//Single segment memory allocation algorithms
template<class MutexFamily, class VoidMutex = void*>//offset_ptr<void> >
template<class MutexFamily, class VoidMutex = offset_ptr<void> >
class simple_seq_fit;
template<class MutexFamily, class VoidMutex = offset_ptr<void> >
template<class MutexFamily, class VoidMutex = offset_ptr<void>, std::size_t MemAlignment = 0>
class rbtree_best_fit;
//////////////////////////////////////////////////////////////////////////////

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -20,6 +20,7 @@
#include <boost/interprocess/creation_tags.hpp>
#include <boost/interprocess/detail/managed_memory_impl.hpp>
#include <boost/interprocess/detail/move.hpp>
#include <cassert>
//!\file
//!Describes a named user memory allocation user class.
@@ -49,6 +50,8 @@ class basic_managed_external_buffer
basic_managed_external_buffer
(create_only_t, void *addr, std::size_t size)
{
//Check if alignment is correct
assert((0 == (((std::size_t)addr) & (AllocationAlgorithm::Alignment - std::size_t(1u)))));
if(!base_t::create_impl(addr, size)){
throw interprocess_exception();
}
@@ -58,6 +61,8 @@ class basic_managed_external_buffer
basic_managed_external_buffer
(open_only_t, void *addr, std::size_t size)
{
//Check if alignment is correct
assert((0 == (((std::size_t)addr) & (AllocationAlgorithm::Alignment - std::size_t(1u)))));
if(!base_t::open_impl(addr, size)){
throw interprocess_exception();
}

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -104,6 +104,167 @@ class basic_multiallocation_iterator
multi_allocation_next<VoidPointer> next_alloc_;
};
template<class VoidPointer>
class basic_multiallocation_chain
{
private:
basic_multiallocation_iterator<VoidPointer> it_;
VoidPointer last_mem_;
std::size_t num_mem_;
basic_multiallocation_chain(const basic_multiallocation_chain &);
basic_multiallocation_chain &operator=(const basic_multiallocation_chain &);
public:
typedef basic_multiallocation_iterator<VoidPointer> multiallocation_iterator;
basic_multiallocation_chain()
: it_(0), last_mem_(0), num_mem_(0)
{}
void push_back(void *mem)
{
typedef multi_allocation_next<VoidPointer> next_impl_t;
next_impl_t * tmp_mem = static_cast<next_impl_t*>(mem);
if(!this->last_mem_){
this->it_ = basic_multiallocation_iterator<VoidPointer>(tmp_mem);
}
else{
static_cast<next_impl_t*>(detail::get_pointer(this->last_mem_))->next_ = tmp_mem;
}
tmp_mem->next_ = 0;
this->last_mem_ = tmp_mem;
++num_mem_;
}
void push_back(multiallocation_iterator it, std::size_t n)
{
typedef multi_allocation_next<VoidPointer> next_impl_t;
next_impl_t * tmp_mem = (next_impl_t*)(&*it);
if(!this->last_mem_){
this->it_ = it;
}
else{
static_cast<next_impl_t*>(detail::get_pointer(this->last_mem_))->next_ = tmp_mem;
}
tmp_mem->next_ = 0;
this->last_mem_ = tmp_mem;
++num_mem_;
}
void push_front(void *mem)
{
typedef multi_allocation_next<VoidPointer> next_impl_t;
if(!this->last_mem_){
push_back(mem);
}
else{
next_impl_t * tmp_mem = static_cast<next_impl_t*>(mem);
next_impl_t * old_first = (next_impl_t*)(&*this->it_);
static_cast<next_impl_t*>(mem)->next_ = old_first;
this->it_ = basic_multiallocation_iterator<VoidPointer>(tmp_mem);
++num_mem_;
}
}
void swap(basic_multiallocation_chain &other_chain)
{
std::swap(this->it_, other_chain.it_);
std::swap(this->last_mem_, other_chain.last_mem_);
std::swap(this->num_mem_, other_chain.num_mem_);
}
void splice_back(basic_multiallocation_chain &other_chain)
{
typedef multi_allocation_next<VoidPointer> next_impl_t;
multiallocation_iterator end_it;
multiallocation_iterator other_it = other_chain.get_it();
multiallocation_iterator this_it = this->get_it();
if(end_it == other_it){
return;
}
else if(end_it == other_it){
this->swap(other_chain);
}
static_cast<next_impl_t*>(detail::get_pointer(this->last_mem_))->next_
= (next_impl_t*)&*this->it_;
this->last_mem_ = other_chain.last_mem_;
this->num_mem_ += other_chain.num_mem_;
}
void *pop_front()
{
multiallocation_iterator itend;
if(this->it_ == itend){
this->last_mem_= 0;
this->num_mem_ = 0;
return 0;
}
else{
void *addr = &*it_;
++it_;
--num_mem_;
if(!num_mem_){
this->last_mem_ = 0;
this->it_ = multiallocation_iterator();
}
return addr;
}
}
bool empty() const
{ return !num_mem_; }
multiallocation_iterator get_it() const
{ return it_; }
std::size_t size() const
{ return num_mem_; }
};
template<class Allocator>
class allocator_multiallocation_chain
{
typedef typename detail::
pointer_to_other<typename Allocator::pointer, void>::type
void_ptr;
typedef typename Allocator::multiallocation_iterator multiallocation_iterator;
basic_multiallocation_chain<void_ptr> chain_;
public:
allocator_multiallocation_chain()
: chain_()
{}
void push_back(void *mem)
{ chain_.push_back(mem); }
multiallocation_iterator get_it() const
{ return multiallocation_iterator(chain_.get_it()); }
};
#define BOOST_MULTIALLOC_IT_CHAIN_INIT(IT_CHAIN) ((IT_CHAIN).it.next = 0, (IT_CHAIN).last_mem = 0)
#define BOOST_MULTIALLOC_IT_CHAIN_ADD(IT_CHAIN, MEM)\
do{\
multialloc_it_t *____tmp_mem____ = (multialloc_it_t*)(MEM);\
if(!IT_CHAIN.last_mem){\
(IT_CHAIN).it.next = ____tmp_mem____;\
}else{\
((multialloc_it_t*)(IT_CHAIN.last_mem))->next = ____tmp_mem____;\
}\
____tmp_mem____->next = 0;\
IT_CHAIN.last_mem = ____tmp_mem____;\
}while(0)
#define BOOST_MULTIALLOC_IT_CHAIN_IT(IT_CHAIN) ((IT_CHAIN).it)
//!This class implements several allocation functions shared by different algorithms
//!(aligned allocation, multiple allocation...).
@@ -125,6 +286,7 @@ class memory_algorithm_common
static const std::size_t AllocatedCtrlUnits = MemoryAlgorithm::AllocatedCtrlUnits;
static const std::size_t BlockCtrlBytes = MemoryAlgorithm::BlockCtrlBytes;
static const std::size_t BlockCtrlUnits = MemoryAlgorithm::BlockCtrlUnits;
static const std::size_t UsableByPreviousChunk = MemoryAlgorithm::UsableByPreviousChunk;
static void assert_alignment(const void *ptr)
{ assert_alignment((std::size_t)ptr); }
@@ -165,10 +327,11 @@ class memory_algorithm_common
static void* allocate_aligned
(MemoryAlgorithm *memory_algo, std::size_t nbytes, std::size_t alignment)
{
//Ensure power of 2
if ((alignment & (alignment - std::size_t(1u))) != 0){
//Alignment is not power of two
BOOST_ASSERT((alignment & (alignment - std::size_t(1u))) != 0);
BOOST_ASSERT((alignment & (alignment - std::size_t(1u))) == 0);
return 0;
}
@@ -176,6 +339,9 @@ class memory_algorithm_common
if(alignment <= Alignment){
return memory_algo->priv_allocate(allocate_new, nbytes, nbytes, real_size).first;
}
if(nbytes > UsableByPreviousChunk)
nbytes -= UsableByPreviousChunk;
//We can find a aligned portion if we allocate a chunk that has alignment
//nbytes + alignment bytes or more.
@@ -191,7 +357,9 @@ class memory_algorithm_common
// | MBU |
// -----------------------------------------------------
std::size_t request =
minimum_allocation + (2*MinBlockUnits*Alignment - AllocatedCtrlBytes);
minimum_allocation + (2*MinBlockUnits*Alignment - AllocatedCtrlBytes
//prevsize - UsableByPreviousChunk
);
//Now allocate the buffer
void *buffer = memory_algo->priv_allocate(allocate_new, request, request, real_size).first;
@@ -207,7 +375,8 @@ class memory_algorithm_common
max_value(ceil_units(nbytes) + AllocatedCtrlUnits, std::size_t(MinBlockUnits));
//We can create a new block in the end of the segment
if(old_size >= (first_min_units + MinBlockUnits)){
block_ctrl *second = new((char*)first + Alignment*first_min_units) block_ctrl;
//block_ctrl *second = new((char*)first + Alignment*first_min_units) block_ctrl;
block_ctrl *second = (block_ctrl *)((char*)first + Alignment*first_min_units);
first->m_size = first_min_units;
second->m_size = old_size - first->m_size;
BOOST_ASSERT(second->m_size >= MinBlockUnits);
@@ -285,6 +454,7 @@ class memory_algorithm_common
,const std::size_t max_size, const std::size_t preferred_size
,std::size_t &received_size)
{
(void)memory_algo;
//Obtain the real block
block_ctrl *block = memory_algo->priv_get_block(ptr);
std::size_t old_block_units = block->m_size;
@@ -296,11 +466,11 @@ class memory_algorithm_common
assert_alignment(ptr);
//Put this to a safe value
received_size = (old_block_units - AllocatedCtrlUnits)*Alignment;
received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
//Now translate it to Alignment units
const std::size_t max_user_units = floor_units(max_size);
const std::size_t preferred_user_units = ceil_units(preferred_size);
const std::size_t max_user_units = floor_units(max_size - UsableByPreviousChunk);
const std::size_t preferred_user_units = ceil_units(preferred_size - UsableByPreviousChunk);
//Check if rounded max and preferred are possible correct
if(max_user_units < preferred_user_units)
@@ -331,7 +501,7 @@ class memory_algorithm_common
}
//Update new size
received_size = shrunk_user_units*Alignment;
received_size = shrunk_user_units*Alignment + UsableByPreviousChunk;
return true;
}
@@ -350,22 +520,23 @@ class memory_algorithm_common
}
//Check if the old size was just the shrunk size (no splitting)
if((old_block_units - AllocatedCtrlUnits) == ceil_units(preferred_size))
if((old_block_units - AllocatedCtrlUnits) == ceil_units(preferred_size - UsableByPreviousChunk))
return true;
//Now we can just rewrite the size of the old buffer
block->m_size = received_size/Alignment + AllocatedCtrlUnits;
block->m_size = (received_size-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits;
BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
memory_algo->priv_mark_new_allocated_block(block);
//We create the new block
block_ctrl *new_block = new(reinterpret_cast<block_ctrl*>
(detail::char_ptr_cast(block) + block->m_size*Alignment)) block_ctrl;
// block_ctrl *new_block = new(reinterpret_cast<block_ctrl*>
// (detail::char_ptr_cast(block) + block->m_size*Alignment)) block_ctrl;
block_ctrl *new_block = reinterpret_cast<block_ctrl*>
(detail::char_ptr_cast(block) + block->m_size*Alignment);
//Write control data to simulate this new block was previously allocated
//and deallocate it
new_block->m_size = old_block_units - block->m_size;
BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
memory_algo->priv_mark_new_allocated_block(block);
memory_algo->priv_mark_new_allocated_block(new_block);
memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(new_block));
return true;
@@ -401,11 +572,11 @@ class memory_algorithm_common
multi_allocation_next_ptr first = 0, previous = 0;
std::size_t low_idx = 0;
while(low_idx < n_elements){
std::size_t total_bytes = total_request_units*Alignment - AllocatedCtrlBytes;
std::size_t total_bytes = total_request_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
std::size_t min_allocation = (!sizeof_element)
? elem_units
: memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element);
min_allocation = min_allocation*Alignment - AllocatedCtrlBytes;
min_allocation = min_allocation*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
std::size_t received_size;
std::pair<void *, bool> ret = memory_algo->priv_allocate
@@ -419,6 +590,7 @@ class memory_algorithm_common
char *block_address = (char*)block;
std::size_t total_used_units = 0;
// block_ctrl *prev_block = 0;
while(total_used_units < received_units){
if(sizeof_element){
elem_units = memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element);
@@ -428,7 +600,10 @@ class memory_algorithm_common
break;
total_request_units -= elem_units;
//This is the position where the new block must be created
block_ctrl *new_block = new(block_address)block_ctrl;
// if(prev_block)
// memory_algo->priv_mark_new_allocated_block(prev_block);
block_ctrl *new_block = (block_ctrl *)(block_address);
// block_ctrl *new_block = new(block_address)block_ctrl;
assert_alignment(new_block);
//The last block should take all the remaining space
@@ -446,7 +621,7 @@ class memory_algorithm_common
//split it obtaining a new free memory block do it.
if((received_units - total_used_units) >= (elem_units + MemoryAlgorithm::BlockCtrlUnits)){
std::size_t shrunk_received;
std::size_t shrunk_request = elem_units*Alignment - AllocatedCtrlBytes;
std::size_t shrunk_request = elem_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
bool ret = shrink
(memory_algo
,memory_algo->priv_get_user_buffer(new_block)
@@ -457,7 +632,7 @@ class memory_algorithm_common
BOOST_ASSERT(ret);
//Some sanity checks
BOOST_ASSERT(shrunk_request == shrunk_received);
BOOST_ASSERT(elem_units == (shrunk_request/Alignment + AllocatedCtrlUnits));
BOOST_ASSERT(elem_units == ((shrunk_request-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits));
//"new_block->m_size" must have been reduced to elem_units by "shrink"
BOOST_ASSERT(new_block->m_size == elem_units);
//Now update the total received units with the reduction
@@ -483,6 +658,7 @@ class memory_algorithm_common
}
previous = p;
++low_idx;
//prev_block = new_block;
}
//Sanity check
BOOST_ASSERT(total_used_units == received_units);

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -64,6 +64,8 @@ class simple_seq_fit_impl
typedef detail::basic_multiallocation_iterator
<void_pointer> multiallocation_iterator;
typedef detail::basic_multiallocation_chain
<void_pointer> multiallocation_chain;
private:
class block_ctrl;
@@ -137,6 +139,9 @@ class simple_seq_fit_impl
//!Multiple element allocation, different size
multiallocation_iterator allocate_many(const std::size_t *elem_sizes, std::size_t n_elements, std::size_t sizeof_element);
//!Multiple element deallocation
void deallocate_many(multiallocation_iterator it);
/// @endcond
//!Deallocates previously allocated bytes
@@ -170,8 +175,13 @@ class simple_seq_fit_impl
std::size_t preferred_size,std::size_t &received_size,
T *reuse_ptr = 0);
std::pair<void *, bool>
raw_allocation_command (allocation_type command, std::size_t limit_size,
std::size_t preferred_size,std::size_t &received_size,
void *reuse_ptr = 0, std::size_t sizeof_object = 1);
//!Returns the size of the buffer previously allocated pointed by ptr
std::size_t size(void *ptr) const;
std::size_t size(const void *ptr) const;
//!Allocates aligned bytes, returns 0 if there is not more memory.
//!Alignment must be power of 2
@@ -247,13 +257,16 @@ class simple_seq_fit_impl
void priv_mark_new_allocated_block(block_ctrl *block);
public:
static const std::size_t Alignment = detail::alignment_of<detail::max_align>::value;
private:
static const std::size_t BlockCtrlBytes = detail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
static const std::size_t BlockCtrlUnits = BlockCtrlBytes/Alignment;
static const std::size_t MinBlockUnits = BlockCtrlUnits;
static const std::size_t MinBlockSize = MinBlockUnits*Alignment;
static const std::size_t AllocatedCtrlBytes = BlockCtrlBytes;
static const std::size_t AllocatedCtrlUnits = BlockCtrlUnits;
static const std::size_t UsableByPreviousChunk = 0;
public:
static const std::size_t PayloadPerAllocation = BlockCtrlBytes;
@@ -549,17 +562,32 @@ inline std::pair<T*, bool> simple_seq_fit_impl<MutexFamily, VoidPointer>::
std::size_t preferred_size,std::size_t &received_size,
T *reuse_ptr)
{
if(command & try_shrink_in_place){
bool success =
algo_impl_t::try_shrink(this, reuse_ptr, limit_size, preferred_size, received_size);
return std::pair<T *, bool> ((success ? reuse_ptr : 0), true);
}
std::pair<void*, bool> ret = priv_allocation_command
(command, limit_size, preferred_size, received_size, reuse_ptr, sizeof(T));
BOOST_ASSERT(0 == ((std::size_t)ret.first % detail::alignment_of<T>::value));
return std::pair<T *, bool>(static_cast<T*>(ret.first), ret.second);
}
template<class MutexFamily, class VoidPointer>
inline std::pair<void*, bool> simple_seq_fit_impl<MutexFamily, VoidPointer>::
raw_allocation_command (allocation_type command, std::size_t limit_objects,
std::size_t preferred_objects,std::size_t &received_objects,
void *reuse_ptr, std::size_t sizeof_object)
{
if(!sizeof_object)
return std::pair<void *, bool>(0, 0);
if(command & try_shrink_in_place){
bool success = algo_impl_t::try_shrink
( this, reuse_ptr, limit_objects*sizeof_object
, preferred_objects*sizeof_object, received_objects);
received_objects /= sizeof_object;
return std::pair<void *, bool> ((success ? reuse_ptr : 0), true);
}
return priv_allocation_command
(command, limit_objects, preferred_objects, received_objects, reuse_ptr, sizeof_object);
}
template<class MutexFamily, class VoidPointer>
inline std::pair<void*, bool> simple_seq_fit_impl<MutexFamily, VoidPointer>::
priv_allocation_command (allocation_type command, std::size_t limit_size,
@@ -589,13 +617,13 @@ inline std::pair<void*, bool> simple_seq_fit_impl<MutexFamily, VoidPointer>::
template<class MutexFamily, class VoidPointer>
inline std::size_t simple_seq_fit_impl<MutexFamily, VoidPointer>::
size(void *ptr) const
size(const void *ptr) const
{
//We need no synchronization since this block is not going
//to be modified
//Obtain the real size of the block
block_ctrl *block = reinterpret_cast<block_ctrl*>
(priv_get_block(detail::char_ptr_cast(ptr)));
(priv_get_block(detail::char_ptr_cast(const_cast<void*>(ptr))));
return block->get_user_bytes();
}
@@ -689,6 +717,20 @@ inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::multiallocation_i
allocate_many(this, elem_bytes, num_elements);
}
template<class MutexFamily, class VoidPointer>
inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::
deallocate_many(typename simple_seq_fit_impl<MutexFamily, VoidPointer>::multiallocation_iterator it)
{
//-----------------------
boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
//-----------------------
while(it){
void *addr = &*it;
++it;
this->priv_deallocate(addr);
}
}
template<class MutexFamily, class VoidPointer>
inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::multiallocation_iterator
simple_seq_fit_impl<MutexFamily, VoidPointer>::

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -61,7 +61,7 @@ class offset_ptr
typedef void (self_t::*unspecified_bool_type)() const;
#if defined(_MSC_VER) && (_MSC_VER >= 1400)
__declspec(noinline)
__declspec(noinline) //this workaround is needed for msvc-8.0 and msvc-9.0
#endif
void set_offset(const volatile void *ptr)
{
@@ -77,7 +77,7 @@ class offset_ptr
}
#if defined(_MSC_VER) && (_MSC_VER >= 1400)
__declspec(noinline)
__declspec(noinline) //this workaround is needed for msvc-8.0 and msvc-9.0
#endif
void* get_pointer() const
{ return (m_offset == 1) ? 0 : (detail::char_ptr_cast(this) + m_offset); }

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
@@ -70,7 +70,8 @@ class segment_manager_base
/// @cond
//Experimental. Don't use
typedef typename MemoryAlgorithm::multiallocation_iterator multiallocation_iterator;
typedef typename MemoryAlgorithm::multiallocation_iterator multiallocation_iterator;
typedef typename MemoryAlgorithm::multiallocation_chain multiallocation_chain;
/// @endcond
@@ -148,6 +149,11 @@ class segment_manager_base
multiallocation_iterator allocate_many(const std::size_t *elem_sizes, std::size_t n_elements, std::size_t sizeof_element, std::nothrow_t)
{ return MemoryAlgorithm::allocate_many(elem_sizes, n_elements, sizeof_element); }
//!Deallocates elements pointed by the
//!multiallocation iterator range.
void deallocate_many(multiallocation_iterator it)
{ MemoryAlgorithm::deallocate_many(it); }
/// @endcond
//!Allocates nbytes bytes. Throws boost::interprocess::bad_alloc
@@ -189,6 +195,19 @@ class segment_manager_base
return ret;
}
std::pair<void *, bool>
raw_allocation_command (allocation_type command, std::size_t limit_objects,
std::size_t preferred_objects,std::size_t &received_objects,
void *reuse_ptr = 0, std::size_t sizeof_object = 1)
{
std::pair<void *, bool> ret = MemoryAlgorithm::raw_allocation_command
( command | nothrow_allocation, limit_objects, preferred_objects, received_objects
, reuse_ptr, sizeof_object);
if(!(command & nothrow_allocation) && !ret.first)
throw bad_alloc();
return ret;
}
//!Deallocates the bytes allocated with allocate/allocate_many()
//!pointed by addr
void deallocate (void *addr)
@@ -219,6 +238,10 @@ class segment_manager_base
void zero_free_memory()
{ MemoryAlgorithm::zero_free_memory(); }
//!Returns the size of the buffer previously allocated pointed by ptr
std::size_t size(const void *ptr) const
{ return MemoryAlgorithm::size(ptr); }
/// @cond
protected:
void * prot_anonymous_construct

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2007.
// (C) Copyright Ion Gaztanaga 2007-2008.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at

View File

@@ -4,7 +4,7 @@
//
// (C) Copyright Peter Dimov and Multi Media Ltd. 2001, 2002, 2003
// (C) Copyright Peter Dimov 2004-2005
// (C) Copyright Ion Gaztanaga 2006-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2006-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -9,7 +9,7 @@
// Copyright (c) 2001, 2002, 2003 Peter Dimov and Multi Media Ltd.
// Copyright 2004-2005 Peter Dimov
// Copyright 2007 Ion Gaztanaga
// Copyright 2007-2008 Ion Gaztanaga
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at

View File

@@ -4,7 +4,7 @@
//
// (C) Copyright Greg Colvin and Beman Dawes 1998, 1999.
// (C) Copyright Peter Dimov 2001, 2002, 2003
// (C) Copyright Ion Gaztanaga 2006-2007.
// (C) Copyright Ion Gaztanaga 2006-2008.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)

View File

@@ -3,7 +3,7 @@
// This file is the adaptation for Interprocess of boost/weak_ptr.hpp
//
// (C) Copyright Peter Dimov 2001, 2002, 2003
// (C) Copyright Ion Gaztanaga 2006-2007.
// (C) Copyright Ion Gaztanaga 2006-2008.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2007-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

View File

@@ -1,6 +1,6 @@
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost
// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//

Some files were not shown because too many files have changed in this diff Show More