From 1a240759d3bb985ac111d006049dbb3a96ca26de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ion=20Gazta=C3=B1aga?= Date: Sun, 20 Jan 2008 11:54:47 +0000 Subject: [PATCH] Updated Interprocess and Intrusive: -> Added linear slist to intrusive -> Updated all allocators to version 2 allocators in Interprocess -> Optimized rbtree_best_fit size overhead to 1 std:size_t. [SVN r42878] --- doc/Jamfile.v2 | 1 + doc/interprocess.qbk | 2 +- .../interprocess/allocators/adaptive_pool.hpp | 504 ++++++++---- .../allocators/allocation_type.hpp | 2 +- .../interprocess/allocators/allocator.hpp | 101 ++- .../allocators/cached_adaptive_pool.hpp | 515 ++++++------ .../allocators/cached_node_allocator.hpp | 470 +++++------ .../allocators/detail/adaptive_node_pool.hpp | 689 ++++++++-------- .../allocators/detail/allocator_common.hpp | 760 ++++++++++++++++++ .../allocators/detail/node_pool.hpp | 344 ++++---- .../allocators/detail/node_tools.hpp | 5 +- .../allocators/node_allocator.hpp | 513 +++++++----- .../allocators/private_adaptive_pool.hpp | 433 +++++++--- .../allocators/private_node_allocator.hpp | 456 ++++++++++- .../containers/detail/flat_tree.hpp | 2 +- .../containers/detail/node_alloc_holder.hpp | 46 +- .../interprocess/containers/detail/tree.hpp | 8 +- .../interprocess/containers/flat_map.hpp | 2 +- .../interprocess/containers/flat_set.hpp | 2 +- .../boost/interprocess/containers/list.hpp | 9 +- include/boost/interprocess/containers/map.hpp | 2 +- include/boost/interprocess/containers/set.hpp | 2 +- .../boost/interprocess/containers/slist.hpp | 4 +- .../boost/interprocess/containers/string.hpp | 4 +- .../boost/interprocess/containers/vector.hpp | 29 +- include/boost/interprocess/creation_tags.hpp | 2 +- .../boost/interprocess/detail/algorithms.hpp | 2 +- include/boost/interprocess/detail/atomic.hpp | 2 +- .../boost/interprocess/detail/cast_tags.hpp | 2 +- .../interprocess/detail/config_begin.hpp | 1 + .../detail/in_place_interface.hpp | 2 +- .../detail/interprocess_tester.hpp | 2 +- .../boost/interprocess/detail/iterators.hpp | 8 +- .../detail/managed_memory_impl.hpp | 2 +- .../interprocess/detail/math_functions.hpp | 2 +- include/boost/interprocess/detail/min_max.hpp | 2 +- include/boost/interprocess/detail/mpl.hpp | 2 +- .../boost/interprocess/detail/named_proxy.hpp | 2 +- .../interprocess/detail/os_file_functions.hpp | 2 +- .../detail/os_thread_functions.hpp | 2 +- .../interprocess/detail/pointer_type.hpp | 2 +- .../detail/posix_time_types_wrk.hpp | 2 +- .../detail/segment_manager_helper.hpp | 2 +- .../interprocess/detail/tmp_dir_helpers.hpp | 2 +- .../boost/interprocess/detail/type_traits.hpp | 2 +- .../boost/interprocess/detail/utilities.hpp | 147 +++- .../interprocess/detail/version_type.hpp | 2 +- .../boost/interprocess/detail/win32_api.hpp | 2 +- .../boost/interprocess/detail/workaround.hpp | 6 +- include/boost/interprocess/errors.hpp | 2 +- include/boost/interprocess/exceptions.hpp | 2 +- include/boost/interprocess/file_mapping.hpp | 2 +- .../interprocess/indexes/flat_map_index.hpp | 2 +- .../boost/interprocess/indexes/iset_index.hpp | 2 +- .../indexes/iunordered_set_index.hpp | 2 +- .../boost/interprocess/indexes/map_index.hpp | 2 +- .../boost/interprocess/indexes/null_index.hpp | 2 +- .../indexes/unordered_map_index.hpp | 2 +- .../boost/interprocess/interprocess_fwd.hpp | 18 +- .../boost/interprocess/ipc/message_queue.hpp | 2 +- .../interprocess/managed_external_buffer.hpp | 7 +- .../interprocess/managed_heap_memory.hpp | 2 +- .../interprocess/managed_mapped_file.hpp | 2 +- .../interprocess/managed_shared_memory.hpp | 2 +- .../managed_windows_shared_memory.hpp | 2 +- include/boost/interprocess/mapped_region.hpp | 2 +- .../mem_algo/detail/mem_algo_common.hpp | 214 ++++- .../mem_algo/detail/simple_seq_fit_impl.hpp | 60 +- .../interprocess/mem_algo/rbtree_best_fit.hpp | 609 ++++++++------ .../interprocess/mem_algo/simple_seq_fit.hpp | 2 +- include/boost/interprocess/offset_ptr.hpp | 6 +- .../boost/interprocess/segment_manager.hpp | 27 +- .../interprocess/shared_memory_object.hpp | 2 +- .../boost/interprocess/smart_ptr/deleter.hpp | 2 +- .../smart_ptr/detail/shared_count.hpp | 2 +- .../detail/sp_counted_base_atomic.hpp | 2 +- .../interprocess/smart_ptr/shared_ptr.hpp | 2 +- .../boost/interprocess/smart_ptr/weak_ptr.hpp | 2 +- .../interprocess/streams/bufferstream.hpp | 2 +- .../interprocess/streams/vectorstream.hpp | 2 +- .../sync/emulation/interprocess_condition.hpp | 2 +- .../sync/emulation/interprocess_mutex.hpp | 2 +- .../interprocess_recursive_mutex.hpp | 2 +- .../sync/emulation/interprocess_semaphore.hpp | 2 +- .../sync/emulation/named_creation_functor.hpp | 2 +- include/boost/interprocess/sync/file_lock.hpp | 2 +- .../sync/interprocess_barrier.hpp | 2 +- .../sync/interprocess_condition.hpp | 2 +- .../interprocess/sync/interprocess_mutex.hpp | 2 +- .../sync/interprocess_recursive_mutex.hpp | 2 +- .../sync/interprocess_semaphore.hpp | 2 +- .../sync/interprocess_upgradable_mutex.hpp | 2 +- .../boost/interprocess/sync/lock_options.hpp | 2 +- .../boost/interprocess/sync/mutex_family.hpp | 2 +- .../interprocess/sync/named_condition.hpp | 2 +- .../boost/interprocess/sync/named_mutex.hpp | 2 +- .../sync/named_recursive_mutex.hpp | 2 +- .../interprocess/sync/named_semaphore.hpp | 2 +- .../sync/named_upgradable_mutex.hpp | 2 +- .../boost/interprocess/sync/null_mutex.hpp | 2 +- .../sync/posix/interprocess_condition.hpp | 2 +- .../sync/posix/interprocess_mutex.hpp | 2 +- .../posix/interprocess_recursive_mutex.hpp | 2 +- .../sync/posix/interprocess_semaphore.hpp | 2 +- .../sync/posix/pthread_helpers.hpp | 2 +- .../sync/posix/ptime_to_timespec.hpp | 2 +- .../sync/posix/semaphore_wrapper.hpp | 2 +- .../boost/interprocess/sync/scoped_lock.hpp | 2 +- .../boost/interprocess/sync/sharable_lock.hpp | 2 +- .../interprocess/sync/upgradable_lock.hpp | 2 +- .../interprocess/windows_shared_memory.hpp | 2 +- proj/vc7ide/interprocesslib.vcproj | 6 + test/adaptive_node_pool_test.cpp | 2 +- test/adaptive_pool_test.cpp | 24 + test/cached_adaptive_pool_test.cpp | 24 + test/cached_node_allocator_test.cpp | 23 +- test/file_mapping_test.cpp | 1 - test/map_test.hpp | 2 + test/memory_algorithm_test.cpp | 94 ++- test/memory_algorithm_test_template.hpp | 110 ++- test/node_allocator_test.cpp | 19 +- test/node_pool_test.hpp | 8 +- test/private_adaptive_pool_test.cpp | 19 +- test/private_node_allocator_test.cpp | 21 +- test/set_test.hpp | 2 + test/vector_test.cpp | 237 +----- test/vector_test.hpp | 219 +++++ 127 files changed, 4675 insertions(+), 2294 deletions(-) create mode 100644 include/boost/interprocess/allocators/detail/allocator_common.hpp create mode 100644 test/vector_test.hpp diff --git a/doc/Jamfile.v2 b/doc/Jamfile.v2 index c935497..9cb17de 100644 --- a/doc/Jamfile.v2 +++ b/doc/Jamfile.v2 @@ -26,6 +26,7 @@ doxygen autodoc HIDE_UNDOC_MEMBERS=YES EXTRACT_PRIVATE=NO EXPAND_ONLY_PREDEF=YES + PREDEFINED=BOOST_INTERPROCESS_DOXYGEN_INVOKED "boost.doxygen.reftitle=Boost.Interprocess Reference" ; diff --git a/doc/interprocess.qbk b/doc/interprocess.qbk index e444b72..a05d77a 100644 --- a/doc/interprocess.qbk +++ b/doc/interprocess.qbk @@ -6418,7 +6418,7 @@ warranty. [classref boost::interprocess::named_mutex named_mutex]. * Reduced template bloat for node and adaptive allocators extracting node - implementation to a class taht only depends on the memory algorithm, instead of + implementation to a class that only depends on the memory algorithm, instead of the segment manager + node size + node number... * Fixed bug in `mapped_region` in UNIX when mapping address was provided but diff --git a/include/boost/interprocess/allocators/adaptive_pool.hpp b/include/boost/interprocess/allocators/adaptive_pool.hpp index f480651..0c00006 100644 --- a/include/boost/interprocess/allocators/adaptive_pool.hpp +++ b/include/boost/interprocess/allocators/adaptive_pool.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -35,35 +36,38 @@ namespace boost { namespace interprocess { -//!An STL node allocator that uses a segment manager as memory -//!source. The internal pointer type will of the same type (raw, smart) as -//!"typename SegmentManager::void_pointer" type. This allows -//!placing the allocator in shared memory, memory mapped-files, etc... -//!This node allocator shares a segregated storage between all instances -//!of adaptive_pool with equal sizeof(T) placed in the same segment -//!group. NodesPerChunk is the number of nodes allocated at once when the allocator -//!needs runs out of nodes. MaxFreeChunks is the number of free nodes -//!in the adaptive node pool that will trigger the deallocation of -template -class adaptive_pool +/// @cond + +namespace detail{ + +template < unsigned int Version + , class T + , class SegmentManager + , std::size_t NodesPerChunk + , std::size_t MaxFreeChunks + , unsigned char OverheadPercent + > +class adaptive_pool_base + : public node_pool_allocation_impl + < adaptive_pool_base + < Version, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> + , Version + , T + , SegmentManager + > { public: typedef typename SegmentManager::void_pointer void_pointer; - typedef typename detail:: - pointer_to_other::type cvoid_pointer; typedef SegmentManager segment_manager; - typedef typename detail:: - pointer_to_other::type char_pointer; - typedef typename SegmentManager:: - mutex_family::mutex_type mutex_type; - typedef adaptive_pool - self_t; + typedef adaptive_pool_base + self_t; typedef detail::shared_adaptive_node_pool - < SegmentManager, mutex_type - , sizeof(T), NodesPerChunk, MaxFreeChunks> node_pool_t; + < SegmentManager, sizeof(T), NodesPerChunk, MaxFreeChunks, OverheadPercent> node_pool_t; typedef typename detail:: pointer_to_other::type node_pool_ptr; + BOOST_STATIC_ASSERT((Version <=2)); + public: //------- typedef typename detail:: @@ -78,52 +82,60 @@ class adaptive_pool typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; - //!Obtains adaptive_pool from - //!adaptive_pool + typedef detail::version_type version; + typedef transform_iterator + < typename SegmentManager:: + multiallocation_iterator + , detail::cast_functor > multiallocation_iterator; + typedef typename SegmentManager:: + multiallocation_chain multiallocation_chain; + + //!Obtains adaptive_pool_base from + //!adaptive_pool_base template struct rebind { - typedef adaptive_pool other; + typedef adaptive_pool_base other; }; /// @cond private: - //!Not assignable from related adaptive_pool - template - adaptive_pool& operator= - (const adaptive_pool&); + //!Not assignable from related adaptive_pool_base + template + adaptive_pool_base& operator= + (const adaptive_pool_base&); - //!Not assignable from other adaptive_pool - adaptive_pool& operator=(const adaptive_pool&); + //!Not assignable from other adaptive_pool_base + adaptive_pool_base& operator=(const adaptive_pool_base&); /// @endcond public: //!Constructor from a segment manager. If not present, constructs a node //!pool. Increments the reference count of the associated node pool. //!Can throw boost::interprocess::bad_alloc - adaptive_pool(segment_manager *segment_mngr) - : mp_node_pool(priv_get_or_create(segment_mngr)) { } + adaptive_pool_base(segment_manager *segment_mngr) + : mp_node_pool(detail::get_or_create_node_pool(segment_mngr)) { } - //!Copy constructor from other adaptive_pool. Increments the reference + //!Copy constructor from other adaptive_pool_base. Increments the reference //!count of the associated node pool. Never throws - adaptive_pool(const adaptive_pool &other) + adaptive_pool_base(const adaptive_pool_base &other) : mp_node_pool(other.get_node_pool()) { mp_node_pool->inc_ref_count(); } - //!Copy constructor from related adaptive_pool. If not present, constructs + //!Copy constructor from related adaptive_pool_base. If not present, constructs //!a node pool. Increments the reference count of the associated node pool. //!Can throw boost::interprocess::bad_alloc template - adaptive_pool - (const adaptive_pool &other) - : mp_node_pool(priv_get_or_create(other.get_segment_manager())) { } + adaptive_pool_base + (const adaptive_pool_base &other) + : mp_node_pool(detail::get_or_create_node_pool(other.get_segment_manager())) { } //!Destructor, removes node_pool_t from memory //!if its reference count reaches to zero. Never throws - ~adaptive_pool() - { priv_destroy_if_last_link(); } + ~adaptive_pool_base() + { detail::destroy_node_pool_if_last_link(detail::get_pointer(mp_node_pool)); } //!Returns a pointer to the node pool. //!Never throws @@ -135,156 +147,300 @@ class adaptive_pool segment_manager* get_segment_manager()const { return mp_node_pool->get_segment_manager(); } - //!Returns the number of elements that could be allocated. - //!Never throws - size_type max_size() const - { return this->get_segment_manager()->get_size()/sizeof(value_type); } - - //!Allocate memory for an array of count elements. - //!Throws boost::interprocess::bad_alloc if there is no enough memory*/ - pointer allocate(size_type count, cvoid_pointer = 0) - { - if(count > ((size_type)-1)/sizeof(value_type)) - throw bad_alloc(); - return pointer(static_cast(mp_node_pool->allocate(count))); - } - - //!Deallocate allocated memory. - //!Never throws - void deallocate(const pointer &ptr, size_type count) - { mp_node_pool->deallocate(detail::get_pointer(ptr), count); } - - //!Deallocates all free chunks of the pool - void deallocate_free_chunks() - { mp_node_pool->deallocate_free_chunks(); } - //!Swaps allocators. Does not throw. If each allocator is placed in a //!different memory segment, the result is undefined. friend void swap(self_t &alloc1, self_t &alloc2) { detail::do_swap(alloc1.mp_node_pool, alloc2.mp_node_pool); } - //These functions are obsolete. These are here to conserve - //backwards compatibility with containers using them... - - //!Returns address of mutable object. - //!Never throws - pointer address(reference value) const - { return pointer(boost::addressof(value)); } - - //!Returns address of non mutable object. - //!Never throws - const_pointer address(const_reference value) const - { return const_pointer(boost::addressof(value)); } - - //!Default construct an object. - //!Throws if T's default constructor throws*/ - void construct(const pointer &ptr) - { new(detail::get_pointer(ptr)) value_type; } - - //!Destroys object. Throws if object's - //!destructor throws - void destroy(const pointer &ptr) - { BOOST_ASSERT(ptr != 0); (*ptr).~value_type(); } - /// @cond - private: - //!Object function that creates the node allocator if it is not created and - //!increments reference count if it is already created - struct get_or_create_func - { - typedef detail::shared_adaptive_node_pool - node_pool_t; - - //!This connects or constructs the unique instance of node_pool_t - //!Can throw boost::interprocess::bad_alloc - void operator()() - { - //Find or create the node_pool_t - mp_node_pool = mp_named_alloc->template find_or_construct - (unique_instance)(mp_named_alloc); - //If valid, increment link count - if(mp_node_pool != 0) - mp_node_pool->inc_ref_count(); - } - - //!Constructor. Initializes function - //!object parameters - get_or_create_func(segment_manager *hdr) : mp_named_alloc(hdr){} - - node_pool_t *mp_node_pool; - segment_manager *mp_named_alloc; - }; - - //!Initialization function, creates an executes atomically the - //!initialization object functions. Can throw boost::interprocess::bad_alloc - node_pool_t *priv_get_or_create(segment_manager *named_alloc) - { - get_or_create_func func(named_alloc); - named_alloc->atomic_func(func); - return func.mp_node_pool; - } - - //!Object function that decrements the reference count. If the count - //!reaches to zero destroys the node allocator from memory. - //!Never throws - struct destroy_if_last_link_func - { - typedef detail::shared_adaptive_node_pool - node_pool_t; - - //!Decrements reference count and destroys the object if there is no - //!more attached allocators. Never throws - void operator()() - { - //If not the last link return - if(mp_node_pool->dec_ref_count() != 0) return; - - //Last link, let's destroy the segment_manager - mp_named_alloc->template destroy(unique_instance); - } - - //!Constructor. Initializes function - //!object parameters - destroy_if_last_link_func(segment_manager *nhdr, - node_pool_t *phdr) - : mp_named_alloc(nhdr), mp_node_pool(phdr){} - - segment_manager *mp_named_alloc; - node_pool_t *mp_node_pool; - }; - - //!Destruction function, initializes and executes destruction function - //!object. Never throws - void priv_destroy_if_last_link() - { - typedef detail::shared_adaptive_node_pool - node_pool_t; - //Get segment manager - segment_manager *named_segment_mngr = this->get_segment_manager(); - //Execute destruction functor atomically - destroy_if_last_link_func func(named_segment_mngr, detail::get_pointer(mp_node_pool)); - named_segment_mngr->atomic_func(func); - } - private: node_pool_ptr mp_node_pool; /// @endcond }; //!Equality test for same type -//!of adaptive_pool -template inline -bool operator==(const adaptive_pool &alloc1, - const adaptive_pool &alloc2) +//!of adaptive_pool_base +template inline +bool operator==(const adaptive_pool_base &alloc1, + const adaptive_pool_base &alloc2) { return alloc1.get_node_pool() == alloc2.get_node_pool(); } //!Inequality test for same type -//!of adaptive_pool -template inline -bool operator!=(const adaptive_pool &alloc1, - const adaptive_pool &alloc2) +//!of adaptive_pool_base +template inline +bool operator!=(const adaptive_pool_base &alloc1, + const adaptive_pool_base &alloc2) { return alloc1.get_node_pool() != alloc2.get_node_pool(); } +template < class T + , class SegmentManager + , std::size_t NodesPerChunk = 64 + , std::size_t MaxFreeChunks = 2 + , unsigned char OverheadPercent = 5 + > +class adaptive_pool_v1 + : public adaptive_pool_base + < 1 + , T + , SegmentManager + , NodesPerChunk + , MaxFreeChunks + , OverheadPercent + > +{ + public: + typedef detail::adaptive_pool_base + < 1, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> base_t; + + template + struct rebind + { + typedef adaptive_pool_v1 other; + }; + + adaptive_pool_v1(SegmentManager *segment_mngr) + : base_t(segment_mngr) + {} + + template + adaptive_pool_v1 + (const adaptive_pool_v1 &other) + : base_t(other) + {} +}; + +} //namespace detail{ + +/// @endcond + +//!An STL node allocator that uses a segment manager as memory +//!source. The internal pointer type will of the same type (raw, smart) as +//!"typename SegmentManager::void_pointer" type. This allows +//!placing the allocator in shared memory, memory mapped-files, etc... +//! +//!This node allocator shares a segregated storage between all instances +//!of adaptive_pool with equal sizeof(T) placed in the same segment +//!group. NodesPerChunk is the number of nodes allocated at once when the allocator +//!needs runs out of nodes. MaxFreeChunks is the maximum number of totally free chunks +//!that the adaptive node pool will hold. The rest of the totally free chunks will be +//!deallocated with the segment manager. +//! +//!OverheadPercent is the (approximated) maximum size overhead (1-20%) of the allocator: +//!(memory usable for nodes / total memory allocated from the segment manager) +template < class T + , class SegmentManager + , std::size_t NodesPerChunk + , std::size_t MaxFreeChunks + , unsigned char OverheadPercent + > +class adaptive_pool + /// @cond + : public detail::adaptive_pool_base + < 2 + , T + , SegmentManager + , NodesPerChunk + , MaxFreeChunks + , OverheadPercent + > + /// @endcond +{ + + #ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED + typedef detail::adaptive_pool_base + < 2, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> base_t; + public: + typedef detail::version_type version; + + template + struct rebind + { + typedef adaptive_pool other; + }; + + adaptive_pool(SegmentManager *segment_mngr) + : base_t(segment_mngr) + {} + + template + adaptive_pool + (const adaptive_pool &other) + : base_t(other) + {} + + #else //BOOST_INTERPROCESS_DOXYGEN_INVOKED + public: + typedef implementation_defined::segment_manager segment_manager; + typedef segment_manager::void_pointer void_pointer; + typedef implementation_defined::pointer pointer; + typedef implementation_defined::const_pointer const_pointer; + typedef T value_type; + typedef typename detail::add_reference + ::type reference; + typedef typename detail::add_reference + ::type const_reference; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + + //!Obtains adaptive_pool from + //!adaptive_pool + template + struct rebind + { + typedef adaptive_pool other; + }; + + private: + //!Not assignable from + //!related adaptive_pool + template + adaptive_pool& operator= + (const adaptive_pool&); + + //!Not assignable from + //!other adaptive_pool + adaptive_pool& operator=(const adaptive_pool&); + + public: + //!Constructor from a segment manager. If not present, constructs a node + //!pool. Increments the reference count of the associated node pool. + //!Can throw boost::interprocess::bad_alloc + adaptive_pool(segment_manager *segment_mngr); + + //!Copy constructor from other adaptive_pool. Increments the reference + //!count of the associated node pool. Never throws + adaptive_pool(const adaptive_pool &other); + + //!Copy constructor from related adaptive_pool. If not present, constructs + //!a node pool. Increments the reference count of the associated node pool. + //!Can throw boost::interprocess::bad_alloc + template + adaptive_pool + (const adaptive_pool &other); + + //!Destructor, removes node_pool_t from memory + //!if its reference count reaches to zero. Never throws + ~adaptive_pool(); + + //!Returns a pointer to the node pool. + //!Never throws + node_pool_t* get_node_pool() const; + + //!Returns the segment manager. + //!Never throws + segment_manager* get_segment_manager()const; + + //!Returns the number of elements that could be allocated. + //!Never throws + size_type max_size() const; + + //!Allocate memory for an array of count elements. + //!Throws boost::interprocess::bad_alloc if there is no enough memory + pointer allocate(size_type count, cvoid_pointer hint = 0); + + //!Deallocate allocated memory. + //!Never throws + void deallocate(const pointer &ptr, size_type count); + + //!Deallocates all free chunks + //!of the pool + void deallocate_free_chunks(); + + //!Swaps allocators. Does not throw. If each allocator is placed in a + //!different memory segment, the result is undefined. + friend void swap(self_t &alloc1, self_t &alloc2); + + //!Returns address of mutable object. + //!Never throws + pointer address(reference value) const; + + //!Returns address of non mutable object. + //!Never throws + const_pointer address(const_reference value) const; + + //!Default construct an object. + //!Throws if T's default constructor throws + void construct(const pointer &ptr); + + //!Destroys object. Throws if object's + //!destructor throws + void destroy(const pointer &ptr); + + //!Returns maximum the number of objects the previously allocated memory + //!pointed by p can hold. This size only works for memory allocated with + //!allocate, allocation_command and allocate_many. + size_type size(const pointer &p) const; + + std::pair + allocation_command(allocation_type command, + size_type limit_size, + size_type preferred_size, + size_type &received_size, const pointer &reuse = 0); + + //!Allocates many elements of size elem_size in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. The elements must be deallocated + //!with deallocate(...) + multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements); + + //!Allocates n_elements elements, each one of size elem_sizes[i]in a + //!contiguous chunk + //!of memory. The elements must be deallocated + multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements); + + //!Allocates many elements of size elem_size in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. The elements must be deallocated + //!with deallocate(...) + void deallocate_many(multiallocation_iterator it); + + //!Allocates just one object. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + //!Throws boost::interprocess::bad_alloc if there is no enough memory + pointer allocate_one(); + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + multiallocation_iterator allocate_individual(std::size_t num_elements); + + //!Deallocates memory previously allocated with allocate_one(). + //!You should never use deallocate_one to deallocate memory allocated + //!with other functions different from allocate_one(). Never throws + void deallocate_one(const pointer &p); + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + void deallocate_individual(multiallocation_iterator it); + #endif +}; + +#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED + +//!Equality test for same type +//!of adaptive_pool +template inline +bool operator==(const adaptive_pool &alloc1, + const adaptive_pool &alloc2); + +//!Inequality test for same type +//!of adaptive_pool +template inline +bool operator!=(const adaptive_pool &alloc1, + const adaptive_pool &alloc2); + +#endif } //namespace interprocess { } //namespace boost { diff --git a/include/boost/interprocess/allocators/allocation_type.hpp b/include/boost/interprocess/allocators/allocation_type.hpp index a24d789..1894d11 100644 --- a/include/boost/interprocess/allocators/allocation_type.hpp +++ b/include/boost/interprocess/allocators/allocation_type.hpp @@ -1,6 +1,6 @@ /////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/allocators/allocator.hpp b/include/boost/interprocess/allocators/allocator.hpp index e87b557..9d7ce2d 100644 --- a/include/boost/interprocess/allocators/allocator.hpp +++ b/include/boost/interprocess/allocators/allocator.hpp @@ -1,6 +1,6 @@ /////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -51,13 +51,6 @@ class allocator /// @cond private: - struct cast_functor - { - typedef typename detail::add_reference::type result_type; - result_type operator()(char &ptr) const - { return *static_cast(static_cast(&ptr)); } - }; - //Self type typedef allocator self_t; @@ -108,7 +101,9 @@ class allocator typedef transform_iterator < typename SegmentManager:: multiallocation_iterator - , cast_functor> multiallocation_iterator; + , detail::cast_functor > multiallocation_iterator; + typedef typename SegmentManager:: + multiallocation_chain multiallocation_chain; /// @endcond @@ -146,7 +141,7 @@ class allocator pointer allocate(size_type count, cvoid_ptr hint = 0) { (void)hint; - if(count > ((size_type)-1)/sizeof(T)) + if(count > this->max_size()) throw bad_alloc(); return pointer((value_type*)mp_mngr->allocate(count*sizeof(T))); } @@ -166,7 +161,13 @@ class allocator friend void swap(self_t &alloc1, self_t &alloc2) { detail::do_swap(alloc1.mp_mngr, alloc2.mp_mngr); } - //Experimental version 2 allocator functions + //!Returns maximum the number of objects the previously allocated memory + //!pointed by p can hold. This size only works for memory allocated with + //!allocate, allocation_command and allocate_many. + size_type size(const pointer &p) const + { + return (size_type)mp_mngr->size(detail::get_pointer(p))/sizeof(T); + } std::pair allocation_command(allocation_type command, @@ -178,42 +179,6 @@ class allocator (command, limit_size, preferred_size, received_size, detail::get_pointer(reuse)); } - //!Returns maximum the number of objects the previously allocated memory - //!pointed by p can hold. - size_type size(const pointer &p) const - { - return (size_type)mp_mngr->size(detail::get_pointer(p))/sizeof(T); - } - - //!Allocates just one object. Memory allocated with this function - //!must be deallocated only with deallocate_one(). - //!Throws boost::interprocess::bad_alloc if there is no enough memory - pointer allocate_one() - { return this->allocate(1); } - - /// @cond - - //Experimental. Don't use. - - //!Allocates many elements of size == 1 in a contiguous chunk - //!of memory. The minimum number to be allocated is min_elements, - //!the preferred and maximum number is - //!preferred_elements. The number of actually allocated elements is - //!will be assigned to received_size. Memory allocated with this function - //!must be deallocated only with deallocate_one(). - multiallocation_iterator allocate_individual(std::size_t num_elements) - { return this->allocate_many(1, num_elements); } - - /// @endcond - - //!Deallocates memory previously allocated with allocate_one(). - //!You should never use deallocate_one to deallocate memory allocated - //!with other functions different from allocate_one(). Never throws - void deallocate_one(const pointer &p) - { return this->deallocate(p, 1); } - - /// @cond - //!Allocates many elements of size elem_size in a contiguous chunk //!of memory. The minimum number to be allocated is min_elements, //!the preferred and maximum number is @@ -235,10 +200,44 @@ class allocator (mp_mngr->allocate_many(elem_sizes, n_elements, sizeof(T))); } - /// @endcond + //!Allocates many elements of size elem_size in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. The elements must be deallocated + //!with deallocate(...) + void deallocate_many(multiallocation_iterator it) + { return mp_mngr->deallocate_many(it.base()); } - //These functions are obsolete. These are here to conserve - //backwards compatibility with containers using them... + //!Allocates just one object. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + //!Throws boost::interprocess::bad_alloc if there is no enough memory + pointer allocate_one() + { return this->allocate(1); } + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + multiallocation_iterator allocate_individual(std::size_t num_elements) + { return this->allocate_many(1, num_elements); } + + //!Deallocates memory previously allocated with allocate_one(). + //!You should never use deallocate_one to deallocate memory allocated + //!with other functions different from allocate_one(). Never throws + void deallocate_one(const pointer &p) + { return this->deallocate(p, 1); } + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + void deallocate_individual(multiallocation_iterator it) + { return this->deallocate_many(it); } //!Returns address of mutable object. //!Never throws @@ -251,7 +250,7 @@ class allocator { return const_pointer(boost::addressof(value)); } //!Default construct an object. - //!Throws if T's default constructor throws*/ + //!Throws if T's default constructor throws void construct(const pointer &ptr) { new(detail::get_pointer(ptr)) value_type; } diff --git a/include/boost/interprocess/allocators/cached_adaptive_pool.hpp b/include/boost/interprocess/allocators/cached_adaptive_pool.hpp index 9930209..ec75948 100644 --- a/include/boost/interprocess/allocators/cached_adaptive_pool.hpp +++ b/include/boost/interprocess/allocators/cached_adaptive_pool.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -19,56 +19,155 @@ #include #include -#include -#include -#include #include +#include #include -#include +#include #include -#include -#include #include //!\file -//!Describes cached_cached_node_allocator pooled shared memory STL compatible allocator +//!Describes cached_adaptive_pool pooled shared memory STL compatible allocator namespace boost { namespace interprocess { +/// @cond + +namespace detail { + +template < class T + , class SegmentManager + , std::size_t NodesPerChunk = 64 + , std::size_t MaxFreeChunks = 2 + , unsigned char OverheadPercent = 5 + > +class cached_adaptive_pool_v1 + : public detail::cached_allocator_impl + < T + , detail::shared_adaptive_node_pool + < SegmentManager + , sizeof(T) + , NodesPerChunk + , MaxFreeChunks + , OverheadPercent + > + , 1> +{ + public: + typedef detail::cached_allocator_impl + < T + , detail::shared_adaptive_node_pool + < SegmentManager + , sizeof(T) + , NodesPerChunk + , MaxFreeChunks + , OverheadPercent + > + , 1> base_t; + + template + struct rebind + { + typedef cached_adaptive_pool_v1 + other; + }; + + cached_adaptive_pool_v1(SegmentManager *segment_mngr, + std::size_t max_cached_nodes = base_t::DEFAULT_MAX_CACHED_NODES) + : base_t(segment_mngr, max_cached_nodes) + {} + + template + cached_adaptive_pool_v1 + (const cached_adaptive_pool_v1 + &other) + : base_t(other) + {} +}; + +} //namespace detail{ + +/// @endcond + //!An STL node allocator that uses a segment manager as memory //!source. The internal pointer type will of the same type (raw, smart) as //!"typename SegmentManager::void_pointer" type. This allows //!placing the allocator in shared memory, memory mapped-files, etc... +//! //!This node allocator shares a segregated storage between all instances of -//!cached_adaptive_pool with equal sizeof(T) placed in the same fixed size +//!cached_adaptive_pool with equal sizeof(T) placed in the same //!memory segment. But also caches some nodes privately to //!avoid some synchronization overhead. -template +//! +//!NodesPerChunk is the minimum number of nodes of nodes allocated at once when +//!the allocator needs runs out of nodes. MaxFreeChunks is the maximum number of totally free chunks +//!that the adaptive node pool will hold. The rest of the totally free chunks will be +//!deallocated with the segment manager. +//! +//!OverheadPercent is the (approximated) maximum size overhead (1-20%) of the allocator: +//!(memory usable for nodes / total memory allocated from the segment manager) +template < class T + , class SegmentManager + , std::size_t NodesPerChunk + , std::size_t MaxFreeChunks + , unsigned char OverheadPercent + > class cached_adaptive_pool -{ /// @cond - typedef typename SegmentManager::void_pointer void_pointer; - typedef typename detail:: - pointer_to_other::type cvoid_pointer; - typedef SegmentManager segment_manager; - typedef typename detail:: - pointer_to_other::type char_pointer; - typedef typename SegmentManager::mutex_family::mutex_type mutex_type; - typedef cached_adaptive_pool - self_t; - enum { DEFAULT_MAX_CACHED_NODES = 64 }; - - typedef typename detail::node_slist::node_t node_t; - typedef typename detail::node_slist::node_slist_t cached_list_t; + : public detail::cached_allocator_impl + < T + , detail::shared_adaptive_node_pool + < SegmentManager + , sizeof(T) + , NodesPerChunk + , MaxFreeChunks + , OverheadPercent + > + , 2> /// @endcond +{ + + #ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED + public: + typedef detail::cached_allocator_impl + < T + , detail::shared_adaptive_node_pool + < SegmentManager + , sizeof(T) + , NodesPerChunk + , MaxFreeChunks + , OverheadPercent + > + , 2> base_t; public: - //------- - typedef typename detail:: - pointer_to_other::type pointer; - typedef typename detail:: - pointer_to_other::type const_pointer; + typedef detail::version_type version; + + template + struct rebind + { + typedef cached_adaptive_pool + other; + }; + + cached_adaptive_pool(SegmentManager *segment_mngr, + std::size_t max_cached_nodes = base_t::DEFAULT_MAX_CACHED_NODES) + : base_t(segment_mngr, max_cached_nodes) + {} + + template + cached_adaptive_pool + (const cached_adaptive_pool &other) + : base_t(other) + {} + + #else + public: + typedef implementation_defined::segment_manager segment_manager; + typedef segment_manager::void_pointer void_pointer; + typedef implementation_defined::pointer pointer; + typedef implementation_defined::const_pointer const_pointer; typedef T value_type; typedef typename detail::add_reference ::type reference; @@ -76,312 +175,178 @@ class cached_adaptive_pool ::type const_reference; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; - typedef detail::shared_adaptive_node_pool - < SegmentManager, mutex_type - , sizeof(T), NodesPerChunk, MaxFreeChunks> node_pool_t; - typedef typename detail:: - pointer_to_other::type node_pool_ptr; - //!Obtains cached_adaptive_pool from other + //!Obtains cached_adaptive_pool from //!cached_adaptive_pool template struct rebind { - typedef cached_adaptive_pool other; + typedef cached_adaptive_pool other; }; - /// @cond private: - - //!Not assignable from related cached_adaptive_pool - template + //!Not assignable from + //!related cached_adaptive_pool + template cached_adaptive_pool& operator= - (const cached_adaptive_pool&); + (const cached_adaptive_pool&); - //!Not assignable from other cached_adaptive_pool + //!Not assignable from + //!other cached_adaptive_pool cached_adaptive_pool& operator=(const cached_adaptive_pool&); - /// @endcond - - public: - //!Constructor from a segment manager. If not present, constructs - //!a node pool. Increments the reference count of the node pool. - //!Can throw boost::interprocess::bad_alloc - cached_adaptive_pool(segment_manager *segment_mngr, - std::size_t max_cached_nodes = DEFAULT_MAX_CACHED_NODES) - : mp_node_pool(priv_get_or_create(segment_mngr)), - m_max_cached_nodes(max_cached_nodes) - {} - //!Copy constructor from other cached_adaptive_pool. Increments the - //!reference count of the associated node pool. Never throws - cached_adaptive_pool(const cached_adaptive_pool &other) - : mp_node_pool(other.get_node_pool()), - m_max_cached_nodes(other.get_max_cached_nodes()) - { mp_node_pool->inc_ref_count(); } + public: + //!Constructor from a segment manager. If not present, constructs a node + //!pool. Increments the reference count of the associated node pool. + //!Can throw boost::interprocess::bad_alloc + cached_adaptive_pool(segment_manager *segment_mngr); + + //!Copy constructor from other cached_adaptive_pool. Increments the reference + //!count of the associated node pool. Never throws + cached_adaptive_pool(const cached_adaptive_pool &other); //!Copy constructor from related cached_adaptive_pool. If not present, constructs //!a node pool. Increments the reference count of the associated node pool. //!Can throw boost::interprocess::bad_alloc template cached_adaptive_pool - (const cached_adaptive_pool &other) - : mp_node_pool(priv_get_or_create(other.get_segment_manager())), - m_max_cached_nodes(other.get_max_cached_nodes()) - { } + (const cached_adaptive_pool &other); //!Destructor, removes node_pool_t from memory //!if its reference count reaches to zero. Never throws - ~cached_adaptive_pool() - { - priv_deallocate_all_cached_nodes(); - priv_destroy_if_last_link(); - } + ~cached_adaptive_pool(); //!Returns a pointer to the node pool. //!Never throws - node_pool_t* get_node_pool() const - { return detail::get_pointer(mp_node_pool); } + node_pool_t* get_node_pool() const; //!Returns the segment manager. //!Never throws - segment_manager* get_segment_manager()const - { return mp_node_pool->get_segment_manager(); } + segment_manager* get_segment_manager()const; - //!Sets the new max cached nodes value. This can provoke deallocations - //!if "newmax" is less than current cached nodes. Never throws - void set_max_cached_nodes(std::size_t newmax) - { - m_max_cached_nodes = newmax; - priv_deallocate_remaining_nodes(); - } - - //!Returns the max cached nodes parameter. + //!Returns the number of elements that could be allocated. //!Never throws - std::size_t get_max_cached_nodes() const - { return m_max_cached_nodes; } - - //!Returns the number of elements that could be - //!allocated. Never throws - size_type max_size() const - { return this->get_segment_manager()->get_size()/sizeof(value_type); } + size_type max_size() const; //!Allocate memory for an array of count elements. //!Throws boost::interprocess::bad_alloc if there is no enough memory - pointer allocate(size_type count, cvoid_pointer hint = 0) - { - (void)hint; - if(count > ((size_type)-1)/sizeof(value_type)) - throw bad_alloc(); - typedef detail::shared_adaptive_node_pool - node_pool_t; - - void * ret; - - if(count == 1){ - //If don't have any cached node, we have to get a new list of free nodes from the pool - if(m_cached_nodes.empty()){ - mp_node_pool->allocate_nodes(m_max_cached_nodes/2, m_cached_nodes); - } - ret = &m_cached_nodes.front(); - m_cached_nodes.pop_front(); - } - else{ - ret = mp_node_pool->allocate(count); - } - return pointer(static_cast(ret)); - } + pointer allocate(size_type count, cvoid_pointer hint = 0); - //!Deallocate allocated memory. Never throws - void deallocate(const pointer &ptr, size_type count) - { - typedef detail::shared_adaptive_node_pool - node_pool_t; + //!Deallocate allocated memory. + //!Never throws + void deallocate(const pointer &ptr, size_type count); - if(count == 1){ - //Check if cache is full - if(m_cached_nodes.size() >= m_max_cached_nodes){ - //This only occurs if this allocator deallocate memory allocated - //with other equal allocator. Since the cache is full, and more - //deallocations are probably coming, we'll make some room in cache - //in a single, efficient multi node deallocation. - priv_deallocate_n_nodes(m_cached_nodes.size() - m_max_cached_nodes/2); - } - m_cached_nodes.push_front(*(node_t*)detail::char_ptr_cast(detail::get_pointer(ptr))); - } - else{ - mp_node_pool->deallocate(detail::get_pointer(ptr), count); - } - } - - //!Deallocates all free chunks of the pool - void deallocate_free_chunks() - { mp_node_pool->deallocate_free_chunks(); } + //!Deallocates all free chunks + //!of the pool + void deallocate_free_chunks(); //!Swaps allocators. Does not throw. If each allocator is placed in a - //!different shared memory segments, the result is undefined. - friend void swap(self_t &alloc1, self_t &alloc2) - { - detail::do_swap(alloc1.mp_node_pool, alloc2.mp_node_pool); - alloc1.m_cached_nodes.swap(alloc2.m_cached_nodes); - detail::do_swap(alloc1.m_max_cached_nodes, alloc2.m_max_cached_nodes); - } - - void deallocate_cache() - { this->priv_deallocate_all_cached_nodes(); } - - //These functions are obsolete. These are here to conserve - //backwards compatibility with containers using them... + //!different memory segment, the result is undefined. + friend void swap(self_t &alloc1, self_t &alloc2); //!Returns address of mutable object. //!Never throws - pointer address(reference value) const - { return pointer(boost::addressof(value)); } + pointer address(reference value) const; //!Returns address of non mutable object. //!Never throws - const_pointer address(const_reference value) const - { return const_pointer(boost::addressof(value)); } + const_pointer address(const_reference value) const; //!Default construct an object. - //!Throws if T's default constructor throws*/ - void construct(const pointer &ptr) - { new(detail::get_pointer(ptr)) value_type; } + //!Throws if T's default constructor throws + void construct(const pointer &ptr); //!Destroys object. Throws if object's //!destructor throws - void destroy(const pointer &ptr) - { BOOST_ASSERT(ptr != 0); (*ptr).~value_type(); } + void destroy(const pointer &ptr); - /// @cond - private: + //!Returns maximum the number of objects the previously allocated memory + //!pointed by p can hold. This size only works for memory allocated with + //!allocate, allocation_command and allocate_many. + size_type size(const pointer &p) const; - //!Object function that creates the node allocator if it is not created and - //!increments reference count if it is already created - struct get_or_create_func - { - typedef detail::shared_adaptive_node_pool - node_pool_t; + std::pair + allocation_command(allocation_type command, + size_type limit_size, + size_type preferred_size, + size_type &received_size, const pointer &reuse = 0); - //!This connects or constructs the unique instance of node_pool_t - //!Can throw boost::interprocess::bad_alloc - void operator()() - { - //Find or create the node_pool_t - mp_node_pool = mp_named_alloc->template find_or_construct - (unique_instance)(mp_named_alloc); - //If valid, increment link count - if(mp_node_pool != 0) - mp_node_pool->inc_ref_count(); - } + //!Allocates many elements of size elem_size in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. The elements must be deallocated + //!with deallocate(...) + multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements); - //!Constructor. Initializes function - //!object parameters - get_or_create_func(segment_manager *hdr) : mp_named_alloc(hdr){} - - node_pool_t *mp_node_pool; - segment_manager *mp_named_alloc; - }; + //!Allocates n_elements elements, each one of size elem_sizes[i]in a + //!contiguous chunk + //!of memory. The elements must be deallocated + multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements); - //!Frees all cached nodes. + //!Allocates many elements of size elem_size in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. The elements must be deallocated + //!with deallocate(...) + void deallocate_many(multiallocation_iterator it); + + //!Allocates just one object. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + //!Throws boost::interprocess::bad_alloc if there is no enough memory + pointer allocate_one(); + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + multiallocation_iterator allocate_individual(std::size_t num_elements); + + //!Deallocates memory previously allocated with allocate_one(). + //!You should never use deallocate_one to deallocate memory allocated + //!with other functions different from allocate_one(). Never throws + void deallocate_one(const pointer &p); + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + void deallocate_individual(multiallocation_iterator it); + //!Sets the new max cached nodes value. This can provoke deallocations + //!if "newmax" is less than current cached nodes. Never throws + void set_max_cached_nodes(std::size_t newmax); + + //!Returns the max cached nodes parameter. //!Never throws - void priv_deallocate_all_cached_nodes() - { - if(m_cached_nodes.empty()) return; - mp_node_pool->deallocate_nodes(m_cached_nodes); - } - - //!Frees all cached nodes at once. - //!Never throws - void priv_deallocate_remaining_nodes() - { - if(m_cached_nodes.size() > m_max_cached_nodes){ - priv_deallocate_n_nodes(m_cached_nodes.size()-m_max_cached_nodes); - } - } - - //!Frees n cached nodes at once. Never throws - void priv_deallocate_n_nodes(std::size_t n) - { - //Deallocate all new linked list at once - mp_node_pool->deallocate_nodes(m_cached_nodes, n); - } - - //!Initialization function, creates an executes atomically the - //!initialization object functions. Can throw boost::interprocess::bad_alloc - node_pool_t *priv_get_or_create(segment_manager *named_alloc) - { - get_or_create_func func(named_alloc); - named_alloc->atomic_func(func); - return func.mp_node_pool; - } - - //!Object function that decrements the reference count. If the count - //!reaches to zero destroys the node allocator from memory. - //!Never throws - struct destroy_if_last_link_func - { - typedef detail::shared_adaptive_node_pool - node_pool_t; - - //!Decrements reference count and destroys the object if there is no - //!more attached allocators. Never throws - void operator()() - { - //If not the last link return - if(mp_node_pool->dec_ref_count() != 0) return; - - //Last link, let's destroy the segment_manager - mp_named_alloc->template destroy(unique_instance); - } - - //!Constructor. Initializes function - //!object parameters - destroy_if_last_link_func(segment_manager *nhdr, - node_pool_t *phdr) - : mp_named_alloc(nhdr), mp_node_pool(phdr){} - - segment_manager *mp_named_alloc; - node_pool_t *mp_node_pool; - }; - - //!Destruction function, initializes and executes destruction function - //!object. Never throws - void priv_destroy_if_last_link() - { - typedef detail::shared_adaptive_node_pool - node_pool_t; - //Get segment manager - segment_manager *segment_mngr = this->get_segment_manager(); - //Execute destruction functor atomically - destroy_if_last_link_func func(segment_mngr, detail::get_pointer(mp_node_pool)); - segment_mngr->atomic_func(func); - } - - private: - node_pool_ptr mp_node_pool; - cached_list_t m_cached_nodes; - std::size_t m_max_cached_nodes; - /// @endcond + std::size_t get_max_cached_nodes() const; + #endif }; -//!Equality test for same type of -//!cached_adaptive_pool -template inline -bool operator==(const cached_adaptive_pool &alloc1, - const cached_adaptive_pool &alloc2) - { return alloc1.get_node_pool() == alloc2.get_node_pool(); } +#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED -//!Inequality test for same type of -//!cached_adaptive_pool -template inline -bool operator!=(const cached_adaptive_pool &alloc1, - const cached_adaptive_pool &alloc2) - { return alloc1.get_node_pool() != alloc2.get_node_pool(); } +//!Equality test for same type +//!of cached_adaptive_pool +template inline +bool operator==(const cached_adaptive_pool &alloc1, + const cached_adaptive_pool &alloc2); + +//!Inequality test for same type +//!of cached_adaptive_pool +template inline +bool operator!=(const cached_adaptive_pool &alloc1, + const cached_adaptive_pool &alloc2); + +#endif } //namespace interprocess { - } //namespace boost { + #include #endif //#ifndef BOOST_INTERPROCESS_CACHED_ADAPTIVE_POOL_HPP diff --git a/include/boost/interprocess/allocators/cached_node_allocator.hpp b/include/boost/interprocess/allocators/cached_node_allocator.hpp index 2f7d1f4..4ee4767 100644 --- a/include/boost/interprocess/allocators/cached_node_allocator.hpp +++ b/include/boost/interprocess/allocators/cached_node_allocator.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -19,14 +19,11 @@ #include #include -#include -#include -#include #include +#include #include -#include -#include -#include +#include +#include #include //!\file @@ -35,37 +32,113 @@ namespace boost { namespace interprocess { -//!An STL node allocator that uses a segment manager as memory -//!source. The internal pointer type will of the same type (raw, smart) as -//!"typename SegmentManager::void_pointer" type. This allows -//!placing the allocator in shared memory, memory mapped-files, etc... -//!This node allocator shares a segregated storage between all instances of -//!cached_node_allocator with equal sizeof(T) placed in the same fixed size -//!memory segment. But also caches some nodes privately to -//!avoid some synchronization overhead. -template -class cached_node_allocator + +/// @cond + +namespace detail { + +template < class T + , class SegmentManager + , std::size_t NodesPerChunk = 64 + > +class cached_node_allocator_v1 + : public detail::cached_allocator_impl + < T + , detail::shared_node_pool + < SegmentManager + , sizeof(T) + , NodesPerChunk + > + , 1> { + public: + typedef detail::cached_allocator_impl + < T + , detail::shared_node_pool + < SegmentManager + , sizeof(T) + , NodesPerChunk + > + , 1> base_t; + + template + struct rebind + { + typedef cached_node_allocator_v1 + other; + }; + + cached_node_allocator_v1(SegmentManager *segment_mngr, + std::size_t max_cached_nodes = base_t::DEFAULT_MAX_CACHED_NODES) + : base_t(segment_mngr, max_cached_nodes) + {} + + template + cached_node_allocator_v1 + (const cached_node_allocator_v1 + &other) + : base_t(other) + {} +}; + +} //namespace detail{ + +/// @endcond + +template < class T + , class SegmentManager + , std::size_t NodesPerChunk + > +class cached_node_allocator /// @cond - typedef typename SegmentManager::void_pointer void_pointer; - typedef typename detail:: - pointer_to_other::type cvoid_pointer; - typedef SegmentManager segment_manager; - typedef typename detail:: - pointer_to_other::type char_pointer; - typedef typename SegmentManager::mutex_family::mutex_type mutex_type; - typedef cached_node_allocator self_t; - enum { DEFAULT_MAX_CACHED_NODES = 64 }; - typedef typename detail::node_slist::node_t node_t; - typedef typename detail::node_slist::node_slist_t cached_list_t; + : public detail::cached_allocator_impl + < T + , detail::shared_node_pool + < SegmentManager + , sizeof(T) + , NodesPerChunk + > + , 2> /// @endcond +{ + + #ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED + public: + typedef detail::cached_allocator_impl + < T + , detail::shared_node_pool + < SegmentManager + , sizeof(T) + , NodesPerChunk + > + , 2> base_t; public: - //------- - typedef typename detail:: - pointer_to_other::type pointer; - typedef typename detail:: - pointer_to_other::type const_pointer; + typedef detail::version_type version; + + template + struct rebind + { + typedef cached_node_allocator other; + }; + + cached_node_allocator(SegmentManager *segment_mngr, + std::size_t max_cached_nodes = base_t::DEFAULT_MAX_CACHED_NODES) + : base_t(segment_mngr, max_cached_nodes) + {} + + template + cached_node_allocator + (const cached_node_allocator &other) + : base_t(other) + {} + + #else + public: + typedef implementation_defined::segment_manager segment_manager; + typedef segment_manager::void_pointer void_pointer; + typedef implementation_defined::pointer pointer; + typedef implementation_defined::const_pointer const_pointer; typedef T value_type; typedef typename detail::add_reference ::type reference; @@ -73,302 +146,173 @@ class cached_node_allocator ::type const_reference; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; - typedef detail::shared_node_pool - < SegmentManager, mutex_type - , sizeof(T), NodesPerChunk> node_pool_t; - typedef typename detail:: - pointer_to_other::type node_pool_ptr; - //!Obtains cached_node_allocator from other cached_node_allocator + //!Obtains cached_node_allocator from + //!cached_node_allocator template struct rebind { - typedef cached_node_allocator other; + typedef cached_node_allocator other; }; - /// @cond private: - - //!Not assignable from related cached_node_allocator + //!Not assignable from + //!related cached_node_allocator template cached_node_allocator& operator= (const cached_node_allocator&); - //!Not assignable from other cached_node_allocator + //!Not assignable from + //!other cached_node_allocator cached_node_allocator& operator=(const cached_node_allocator&); - /// @endcond public: - //!Constructor from a segment manager. If not present, constructs - //!a node pool. Increments the reference count of the node pool. + //!Constructor from a segment manager. If not present, constructs a node + //!pool. Increments the reference count of the associated node pool. //!Can throw boost::interprocess::bad_alloc - cached_node_allocator(segment_manager *segment_mngr, - std::size_t max_cached_nodes = DEFAULT_MAX_CACHED_NODES) - : mp_node_pool(priv_get_or_create(segment_mngr)), - m_max_cached_nodes(max_cached_nodes) - {} + cached_node_allocator(segment_manager *segment_mngr); - //!Copy constructor from other cached_node_allocator. Increments the - //!reference count of the associated node pool. Never throws - cached_node_allocator(const cached_node_allocator &other) - : mp_node_pool(other.get_node_pool()), - m_max_cached_nodes(other.get_max_cached_nodes()) - { mp_node_pool->inc_ref_count(); } + //!Copy constructor from other cached_node_allocator. Increments the reference + //!count of the associated node pool. Never throws + cached_node_allocator(const cached_node_allocator &other); //!Copy constructor from related cached_node_allocator. If not present, constructs //!a node pool. Increments the reference count of the associated node pool. //!Can throw boost::interprocess::bad_alloc template cached_node_allocator - (const cached_node_allocator &other) - : mp_node_pool(priv_get_or_create(other.get_segment_manager())), - m_max_cached_nodes(other.get_max_cached_nodes()) - { } + (const cached_node_allocator &other); //!Destructor, removes node_pool_t from memory //!if its reference count reaches to zero. Never throws - ~cached_node_allocator() - { - priv_deallocate_all_cached_nodes(); - priv_destroy_if_last_link(); - } + ~cached_node_allocator(); //!Returns a pointer to the node pool. //!Never throws - node_pool_t* get_node_pool() const - { return detail::get_pointer(mp_node_pool); } + node_pool_t* get_node_pool() const; //!Returns the segment manager. //!Never throws - segment_manager* get_segment_manager()const - { return mp_node_pool->get_segment_manager(); } + segment_manager* get_segment_manager()const; - //!Sets the new max cached nodes value. This can provoke deallocations - //!if "newmax" is less than current cached nodes. Never throws - void set_max_cached_nodes(std::size_t newmax) - { - m_max_cached_nodes = newmax; - priv_deallocate_remaining_nodes(); - } - - //!Returns the max cached nodes parameter. + //!Returns the number of elements that could be allocated. //!Never throws - std::size_t get_max_cached_nodes() const - { return m_max_cached_nodes; } - - //!Returns the number of elements that could be allocated. Never throws - size_type max_size() const - { return this->get_segment_manager()->get_size()/sizeof(value_type); } + size_type max_size() const; //!Allocate memory for an array of count elements. //!Throws boost::interprocess::bad_alloc if there is no enough memory - pointer allocate(size_type count, cvoid_pointer hint = 0) - { - (void)hint; - if(count > ((size_type)-1)/sizeof(value_type)) - throw bad_alloc(); - typedef detail::shared_node_pool - node_pool_t; - - void * ret; - - if(count == 1){ - //If don't have any cached node, we have to get a new list of free nodes from the pool - if(m_cached_nodes.empty()){ - mp_node_pool->allocate_nodes(m_max_cached_nodes/2, m_cached_nodes); - } - ret = &m_cached_nodes.front(); - m_cached_nodes.pop_front(); - } - else{ - ret = mp_node_pool->allocate(count); - } - return pointer(static_cast(ret)); - } + pointer allocate(size_type count, cvoid_pointer hint = 0); //!Deallocate allocated memory. //!Never throws - void deallocate(const pointer &ptr, size_type count) - { - typedef detail::shared_node_pool - node_pool_t; + void deallocate(const pointer &ptr, size_type count); - if(count == 1){ - //Check if cache is full - if(m_cached_nodes.size() >= m_max_cached_nodes){ - //This only occurs if this allocator deallocate memory allocated - //with other equal allocator. Since the cache is full, and more - //deallocations are probably coming, we'll make some room in cache - //in a single, efficient multi node deallocation. - priv_deallocate_n_nodes(m_cached_nodes.size() - m_max_cached_nodes/2); - } - m_cached_nodes.push_front(*(node_t*)detail::char_ptr_cast(detail::get_pointer(ptr))); - } - else{ - mp_node_pool->deallocate(detail::get_pointer(ptr), count); - } - } + //!Deallocates all free chunks + //!of the pool + void deallocate_free_chunks(); //!Swaps allocators. Does not throw. If each allocator is placed in a - //!different shared memory segments, the result is undefined. - friend void swap(self_t &alloc1, self_t &alloc2) - { - detail::do_swap(alloc1.mp_node_pool, alloc2.mp_node_pool); - alloc1.m_cached_nodes.swap(alloc2.m_cached_nodes); - detail::do_swap(alloc1.m_max_cached_nodes, alloc2.m_max_cached_nodes); - } - - //!Returns the cached nodes to the shared pool - void deallocate_cache() - { this->priv_deallocate_all_cached_nodes(); } - - //!Deallocates all free chunks of the pool - void deallocate_free_chunks() - { mp_node_pool->deallocate_free_chunks(); } - - //These functions are obsolete. These are here to conserve - //backwards compatibility with containers using them... + //!different memory segment, the result is undefined. + friend void swap(self_t &alloc1, self_t &alloc2); //!Returns address of mutable object. //!Never throws - pointer address(reference value) const - { return pointer(boost::addressof(value)); } + pointer address(reference value) const; //!Returns address of non mutable object. //!Never throws - const_pointer address(const_reference value) const - { return const_pointer(boost::addressof(value)); } + const_pointer address(const_reference value) const; //!Default construct an object. - //!Throws if T's default constructor throws*/ - void construct(const pointer &ptr) - { new(detail::get_pointer(ptr)) value_type; } + //!Throws if T's default constructor throws + void construct(const pointer &ptr); //!Destroys object. Throws if object's //!destructor throws - void destroy(const pointer &ptr) - { BOOST_ASSERT(ptr != 0); (*ptr).~value_type(); } + void destroy(const pointer &ptr); - /// @cond - private: + //!Returns maximum the number of objects the previously allocated memory + //!pointed by p can hold. This size only works for memory allocated with + //!allocate, allocation_command and allocate_many. + size_type size(const pointer &p) const; - //!Object function that creates the node allocator if it is not created and - //!increments reference count if it is already created - struct get_or_create_func - { - typedef detail::shared_node_pool - node_pool_t; + std::pair + allocation_command(allocation_type command, + size_type limit_size, + size_type preferred_size, + size_type &received_size, const pointer &reuse = 0); - //!This connects or constructs the unique instance of node_pool_t - //!Can throw boost::interprocess::bad_alloc - void operator()() - { - //Find or create the node_pool_t - mp_node_pool = mp_named_alloc->template find_or_construct - (unique_instance)(mp_named_alloc); - //If valid, increment link count - if(mp_node_pool != 0) - mp_node_pool->inc_ref_count(); - } + //!Allocates many elements of size elem_size in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. The elements must be deallocated + //!with deallocate(...) + multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements); - //!Constructor. Initializes function - //!object parameters - get_or_create_func(segment_manager *hdr) : mp_named_alloc(hdr){} - - node_pool_t *mp_node_pool; - segment_manager *mp_named_alloc; - }; + //!Allocates n_elements elements, each one of size elem_sizes[i]in a + //!contiguous chunk + //!of memory. The elements must be deallocated + multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements); - //!Frees all cached nodes. + //!Allocates many elements of size elem_size in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. The elements must be deallocated + //!with deallocate(...) + void deallocate_many(multiallocation_iterator it); + + //!Allocates just one object. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + //!Throws boost::interprocess::bad_alloc if there is no enough memory + pointer allocate_one(); + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + multiallocation_iterator allocate_individual(std::size_t num_elements); + + //!Deallocates memory previously allocated with allocate_one(). + //!You should never use deallocate_one to deallocate memory allocated + //!with other functions different from allocate_one(). Never throws + void deallocate_one(const pointer &p); + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + void deallocate_individual(multiallocation_iterator it); + //!Sets the new max cached nodes value. This can provoke deallocations + //!if "newmax" is less than current cached nodes. Never throws + void set_max_cached_nodes(std::size_t newmax); + + //!Returns the max cached nodes parameter. //!Never throws - void priv_deallocate_all_cached_nodes() - { mp_node_pool->deallocate_nodes(m_cached_nodes); } - - //!Frees all cached nodes at once. - //!Never throws - void priv_deallocate_remaining_nodes() - { - if(m_cached_nodes.size() > m_max_cached_nodes){ - priv_deallocate_n_nodes(m_cached_nodes.size()-m_max_cached_nodes); - } - } - - //!Frees n cached nodes at once. - //!Never throws - void priv_deallocate_n_nodes(std::size_t n) - { mp_node_pool->deallocate_nodes(m_cached_nodes, n); } - - //!Initialization function, creates an executes atomically the - //!initialization object functions. Can throw boost::interprocess::bad_alloc - node_pool_t *priv_get_or_create(segment_manager *named_alloc) - { - get_or_create_func func(named_alloc); - named_alloc->atomic_func(func); - return func.mp_node_pool; - } - - //!Object function that decrements the reference count. If the count - //!reaches to zero destroys the node allocator from memory. - //!Never throws - struct destroy_if_last_link_func - { - typedef detail::shared_node_pool - node_pool_t; - - //!Decrements reference count and destroys the object if there is no - //!more attached allocators. Never throws - void operator()() - { - //If not the last link return - if(mp_node_pool->dec_ref_count() != 0) return; - - //Last link, let's destroy the segment_manager - mp_named_alloc->template destroy(unique_instance); - } - - //!Constructor. Initializes function object - //!parameters - destroy_if_last_link_func(segment_manager *nhdr, - node_pool_t *phdr) - : mp_named_alloc(nhdr), mp_node_pool(phdr){} - - segment_manager *mp_named_alloc; - node_pool_t *mp_node_pool; - }; - - //!Destruction function, initializes and executes destruction function - //!object. Never throws - void priv_destroy_if_last_link() - { - typedef detail::shared_node_pool - node_pool_t; - //Get segment manager - segment_manager *segment_mngr = this->get_segment_manager(); - //Execute destruction functor atomically - destroy_if_last_link_func func(segment_mngr, detail::get_pointer(mp_node_pool)); - segment_mngr->atomic_func(func); - } - - private: - node_pool_ptr mp_node_pool; - cached_list_t m_cached_nodes; - std::size_t m_max_cached_nodes; - /// @endcond + std::size_t get_max_cached_nodes() const; + #endif }; -//!Equality test for same type of -//!cached_node_allocator +#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED + +//!Equality test for same type +//!of cached_node_allocator template inline bool operator==(const cached_node_allocator &alloc1, - const cached_node_allocator &alloc2) - { return alloc1.get_node_pool() == alloc2.get_node_pool(); } + const cached_node_allocator &alloc2); -//!Inequality test for same type of -//!cached_node_allocator +//!Inequality test for same type +//!of cached_node_allocator template inline bool operator!=(const cached_node_allocator &alloc1, - const cached_node_allocator &alloc2) - { return alloc1.get_node_pool() != alloc2.get_node_pool(); } + const cached_node_allocator &alloc2); + +#endif } //namespace interprocess { } //namespace boost { diff --git a/include/boost/interprocess/allocators/detail/adaptive_node_pool.hpp b/include/boost/interprocess/allocators/detail/adaptive_node_pool.hpp index b9280f3..02dca7a 100644 --- a/include/boost/interprocess/allocators/detail/adaptive_node_pool.hpp +++ b/include/boost/interprocess/allocators/detail/adaptive_node_pool.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -17,20 +17,21 @@ #include #include +#include #include #include #include #include #include -#include +#include #include #include #include #include +#include #include #include #include -#include //!\file //!Describes the real adaptive pool shared by many Interprocess pool allocators @@ -39,10 +40,6 @@ namespace boost { namespace interprocess { namespace detail { -//!Pooled shared memory allocator using an smart adaptive pool. Includes -//!a reference count but the class does not delete itself, this is -//!responsibility of user classes. Node size (NodeSize) and the number of -//!nodes allocated per chunk (NodesPerChunk) are known at compile time. template class private_adaptive_node_pool_impl { @@ -56,19 +53,85 @@ class private_adaptive_node_pool_impl public: typedef typename node_slist::node_t node_t; typedef typename node_slist::node_slist_t free_nodes_t; + typedef typename SegmentManagerBase::multiallocation_iterator multiallocation_iterator; + typedef typename SegmentManagerBase::multiallocation_chain multiallocation_chain; private: - //This hook will be used to chain the memory chunks - typedef typename bi::make_list_base_hook - , bi::link_mode >::type list_hook_t; + typedef typename bi::make_set_base_hook + < bi::void_pointer + , bi::optimize_size + , bi::constant_time_size + , bi::link_mode >::type multiset_hook_t; + + struct hdr_offset_holder + { + hdr_offset_holder(std::size_t offset = 0) + : hdr_offset(offset) + {} + std::size_t hdr_offset; + }; struct chunk_info_t - : public list_hook_t + : + public hdr_offset_holder, + public multiset_hook_t { //An intrusive list of free node from this chunk free_nodes_t free_nodes; + friend bool operator <(const chunk_info_t &l, const chunk_info_t &r) + { +// { return l.free_nodes.size() < r.free_nodes.size(); } + //Let's order blocks first by free nodes and then by address + //so that highest address fully free chunks are deallocated. + //This improves returning memory to the OS (trimming). + const bool is_less = l.free_nodes.size() < r.free_nodes.size(); + const bool is_equal = l.free_nodes.size() == r.free_nodes.size(); + return is_less || (is_equal && (&l < &r)); + } }; - typedef typename bi::make_list >::type chunk_list_t; + typedef typename bi::make_multiset + >::type chunk_multiset_t; + typedef typename chunk_multiset_t::iterator chunk_iterator; + + static const std::size_t MaxAlign = alignment_of::value; + static const std::size_t HdrSize = ((sizeof(chunk_info_t)-1)/MaxAlign+1)*MaxAlign; + static const std::size_t HdrOffsetSize = ((sizeof(hdr_offset_holder)-1)/MaxAlign+1)*MaxAlign; + static std::size_t calculate_alignment + (std::size_t overhead_percent, std::size_t real_node_size) + { + //to-do: handle real_node_size != node_size + const std::size_t divisor = overhead_percent*real_node_size; + const std::size_t dividend = HdrOffsetSize*100; + std::size_t elements_per_subchunk = (dividend - 1)/divisor + 1; + std::size_t candidate_power_of_2 = + upper_power_of_2(elements_per_subchunk*real_node_size + HdrOffsetSize); + bool overhead_satisfied = false; + while(!overhead_satisfied){ + elements_per_subchunk = (candidate_power_of_2 - HdrOffsetSize)/real_node_size; + std::size_t overhead_size = candidate_power_of_2 - elements_per_subchunk*real_node_size; + if(overhead_size*100/candidate_power_of_2 < overhead_percent){ + overhead_satisfied = true; + } + else{ + candidate_power_of_2 <<= 1; + } + } + return candidate_power_of_2; + } + + static void calculate_num_subchunks + (std::size_t alignment, std::size_t real_node_size, std::size_t elements_per_chunk + ,std::size_t &num_subchunks, std::size_t &real_num_node) + { + std::size_t elements_per_subchunk = (alignment - HdrOffsetSize)/real_node_size; + std::size_t possible_num_subchunk = (elements_per_chunk - 1)/elements_per_subchunk + 1; + std::size_t hdr_subchunk_elements = (alignment - HdrSize - SegmentManagerBase::PayloadPerAllocation)/real_node_size; + while(((possible_num_subchunk-1)*elements_per_subchunk + hdr_subchunk_elements) < elements_per_chunk){ + ++possible_num_subchunk; + } + num_subchunks = possible_num_subchunk; + real_num_node = (possible_num_subchunk-1)*elements_per_subchunk + hdr_subchunk_elements; + } public: //!Segment manager typedef @@ -77,26 +140,25 @@ class private_adaptive_node_pool_impl //!Constructor from a segment manager. Never throws private_adaptive_node_pool_impl ( segment_manager_base_type *segment_mngr_base, std::size_t node_size - , std::size_t nodes_per_chunk, std::size_t max_free_chunks) - : m_node_size(node_size) - , m_max_free_chunks(max_free_chunks) - , m_real_node_size(lcm(m_node_size, sizeof(node_t))) - , m_header_size(min_value(get_rounded_size(sizeof(chunk_info_t), alignment_of::value) - ,get_rounded_size(sizeof(chunk_info_t), m_real_node_size))) - //Round the size to a power of two value. - //This is the total memory size (including payload) that we want to - //allocate from the general-purpose allocator - , m_real_chunk_alignment(upper_power_of_2(m_header_size + m_real_node_size*nodes_per_chunk)) + , std::size_t nodes_per_chunk, std::size_t max_free_chunks + , unsigned char overhead_percent + ) + : m_max_free_chunks(max_free_chunks) + , m_real_node_size(lcm(node_size, std::size_t(alignment_of::value))) + //Round the size to a power of two value. + //This is the total memory size (including payload) that we want to + //allocate from the general-purpose allocator + , m_real_chunk_alignment(calculate_alignment(overhead_percent, m_real_node_size)) //This is the real number of nodes per chunk - , m_real_num_node((m_real_chunk_alignment - SegmentManagerBase::PayloadPerAllocation - m_header_size)/m_real_node_size) + , m_num_subchunks(0) + , m_real_num_node(0) //General purpose allocator , mp_segment_mngr_base(segment_mngr_base) - , m_chunklist() - , m_first_free_chunk(m_chunklist.end()) - //Debug node count - , m_allocated(0) - , m_free_chunks(0) - {} + , m_chunk_multiset() + , m_totally_free_chunks(0) + { + calculate_num_subchunks(m_real_chunk_alignment, m_real_node_size, nodes_per_chunk, m_num_subchunks, m_real_num_node); + } //!Destructor. Deallocates all allocated chunks. Never throws ~private_adaptive_node_pool_impl() @@ -110,60 +172,118 @@ class private_adaptive_node_pool_impl { return detail::get_pointer(mp_segment_mngr_base); } //!Allocates array of count elements. Can throw boost::interprocess::bad_alloc - void *allocate(std::size_t count) + void *allocate_node() { - std::size_t bytes = count*m_node_size; - if(bytes > m_real_node_size){//Normal allocation, no pooling used - void *addr = mp_segment_mngr_base->allocate(bytes); - if(!addr) throw bad_alloc(); - return addr; + priv_invariants(); + //If there are no free nodes we allocate a new block + if (m_chunk_multiset.empty()){ + priv_alloc_chunk(1); } - else //Node allocation, pooling used - return priv_alloc_node(); + //We take the first free node the multiset can't be empty + return priv_take_first_node(); } - + //!Deallocates an array pointed by ptr. Never throws - void deallocate(void *ptr, std::size_t count) + void deallocate_node(void *pElem) { - std::size_t bytes = count*m_node_size; - if(bytes > m_real_node_size)//Normal allocation was used - mp_segment_mngr_base->deallocate(ptr); - else //Node allocation was used - priv_dealloc_node(ptr); + priv_invariants(); + chunk_info_t *chunk_info = priv_chunk_from_node(pElem); + assert(chunk_info->free_nodes.size() < m_real_num_node); + //We put the node at the beginning of the free node list + node_t * to_deallocate = static_cast(pElem); + chunk_info->free_nodes.push_front(*to_deallocate); + + chunk_iterator this_chunk(chunk_multiset_t::s_iterator_to(*chunk_info)); + chunk_iterator next_chunk(this_chunk); + ++next_chunk; + + //Cache the free nodes from the chunk + std::size_t this_chunk_free_nodes = this_chunk->free_nodes.size(); + + if(this_chunk_free_nodes == 1){ + m_chunk_multiset.insert(m_chunk_multiset.begin(), *chunk_info); + } + else{ + chunk_iterator next_chunk(this_chunk); + ++next_chunk; + if(next_chunk != m_chunk_multiset.end()){ + std::size_t next_free_nodes = next_chunk->free_nodes.size(); + if(this_chunk_free_nodes > next_free_nodes){ + //Now move the chunk to the new position + m_chunk_multiset.erase(this_chunk); + m_chunk_multiset.insert(*chunk_info); + } + } + } + //Update free chunk count + if(this_chunk_free_nodes == m_real_num_node){ + ++m_totally_free_chunks; + priv_deallocate_free_chunks(m_max_free_chunks); + } + priv_invariants(); } //!Allocates a singly linked list of n nodes ending in null pointer. //!can throw boost::interprocess::bad_alloc - void allocate_nodes(const std::size_t n, free_nodes_t &nodes) + void allocate_nodes(multiallocation_chain &nodes, const std::size_t n) { - std::size_t i = 0; + std::size_t old_node_count = nodes.size(); try{ - for(; i < n; ++i){ - nodes.push_front(*priv_alloc_node()); + priv_invariants(); + for(std::size_t i = 0; i != n; ++i){ + //If there are no free nodes we allocate all needed chunks + if (m_chunk_multiset.empty()){ + priv_alloc_chunk(((n - i) - 1)/m_real_num_node + 1); + } + nodes.push_front(priv_take_first_node()); } } catch(...){ - priv_deallocate_nodes(nodes, i); + priv_deallocate_nodes(nodes, nodes.size()); + priv_deallocate_free_chunks(m_max_free_chunks); throw; } + //remove me + assert((n+old_node_count) == (std::size_t)std::distance(nodes.get_it(), multiallocation_iterator())); + priv_invariants(); + } + + //!Allocates n nodes, pointed by the multiallocation_iterator. + //!Can throw boost::interprocess::bad_alloc + multiallocation_iterator allocate_nodes(const std::size_t n) + { + multiallocation_chain chain; + this->allocate_nodes(chain, n); + return chain.get_it(); } //!Deallocates a linked list of nodes. Never throws - void deallocate_nodes(free_nodes_t &nodes) + void deallocate_nodes(multiallocation_chain &nodes) { priv_deallocate_nodes(nodes, nodes.size()); } //!Deallocates the first n nodes of a linked list of nodes. Never throws - void deallocate_nodes(free_nodes_t &nodes, std::size_t n) + void deallocate_nodes(multiallocation_chain &nodes, std::size_t n) { priv_deallocate_nodes(nodes, n); } + //!Deallocates the nodes pointed by the multiallocation iterator. Never throws + void deallocate_nodes(multiallocation_iterator it) + { + multiallocation_iterator itend; + while(it != itend){ + void *addr = &*it; + ++it; + deallocate_node(addr); + } + } + void deallocate_free_chunks() { priv_deallocate_free_chunks(0); } std::size_t num_free_nodes() { - typedef typename chunk_list_t::const_iterator citerator; + typedef typename chunk_multiset_t::const_iterator citerator; std::size_t count = 0; - citerator it (m_first_free_chunk), itend(m_chunklist.end()); + citerator it (m_chunk_multiset.begin()), itend(m_chunk_multiset.end()); for(; it != itend; ++it){ count += it->free_nodes.size(); } @@ -172,22 +292,40 @@ class private_adaptive_node_pool_impl void swap(private_adaptive_node_pool_impl &other) { + assert(m_max_free_chunks == other.m_max_free_chunks); + assert(m_real_node_size == other.m_real_node_size); + assert(m_real_chunk_alignment == other.m_real_chunk_alignment); + assert(m_real_num_node == other.m_real_num_node); std::swap(mp_segment_mngr_base, other.mp_segment_mngr_base); - m_chunklist.swap(other.m_chunklist); - std::swap(m_first_free_chunk, other.m_first_free_chunk); - std::swap(m_allocated, other.m_allocated); - std::swap(m_free_chunks, other.m_allocated); + std::swap(m_totally_free_chunks, other.m_totally_free_chunks); + m_chunk_multiset.swap(other.m_chunk_multiset); } private: + node_t *priv_take_first_node() + { + assert(m_chunk_multiset.begin() != m_chunk_multiset.end()); + //We take the first free node the multiset can't be empty + free_nodes_t &free_nodes = m_chunk_multiset.begin()->free_nodes; + node_t *first_node = &free_nodes.front(); + const std::size_t free_nodes_count = free_nodes.size(); + assert(0 != free_nodes_count); + free_nodes.pop_front(); + if(free_nodes_count == 1){ + m_chunk_multiset.erase(m_chunk_multiset.begin()); + } + else if(free_nodes_count == m_real_num_node){ + --m_totally_free_chunks; + } + priv_invariants(); + return first_node; + } - void priv_deallocate_nodes(free_nodes_t &nodes, const std::size_t num) + void priv_deallocate_nodes(multiallocation_chain &nodes, const std::size_t num) { assert(nodes.size() >= num); for(std::size_t i = 0; i < num; ++i){ - node_t *to_deallocate = &nodes.front(); - nodes.pop_front(); - deallocate(to_deallocate, 1); + deallocate_node(nodes.pop_front()); } } @@ -197,71 +335,75 @@ class private_adaptive_node_pool_impl class chunk_destroyer { public: - chunk_destroyer(segment_manager_base_type *mngr, std::size_t real_num_node) - : mngr_(mngr), m_real_num_node(real_num_node) + chunk_destroyer(const private_adaptive_node_pool_impl *impl) + : mp_impl(impl) {} - void operator()(typename chunk_list_t::pointer to_deallocate) + void operator()(typename chunk_multiset_t::pointer to_deallocate) { std::size_t free_nodes = to_deallocate->free_nodes.size(); (void)free_nodes; - assert(free_nodes == m_real_num_node); - mngr_->deallocate(detail::get_pointer(to_deallocate)); + assert(free_nodes == mp_impl->m_real_num_node); + assert(0 == to_deallocate->hdr_offset); + hdr_offset_holder *hdr_off_holder = mp_impl->priv_first_subchunk_from_chunk((chunk_info_t*)detail::get_pointer(to_deallocate)); + mp_impl->mp_segment_mngr_base->deallocate(hdr_off_holder); } - segment_manager_base_type *mngr_; - const std::size_t m_real_num_node; + const private_adaptive_node_pool_impl *mp_impl; }; //This macro will activate invariant checking. Slow, but helpful for debugging the code. - //#define BOOST_INTERPROCESS_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS + #define BOOST_INTERPROCESS_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS void priv_invariants() #ifdef BOOST_INTERPROCESS_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS #undef BOOST_INTERPROCESS_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS { - typedef typename chunk_list_t::iterator chunk_iterator; - //We iterate though the chunk list to free the memory - chunk_iterator it(m_chunklist.begin()), - itend(m_chunklist.end()), to_deallocate; - for(++it; it != itend; ++it){ - chunk_iterator prev(it); - --prev; - std::size_t sp = prev->free_nodes.size(), - si = it->free_nodes.size(); - assert(sp <= si); - (void)sp; (void)si; + //We iterate through the chunk list to free the memory + chunk_iterator it(m_chunk_multiset.begin()), + itend(m_chunk_multiset.end()), to_deallocate; + if(it != itend){ + for(++it; it != itend; ++it){ + chunk_iterator prev(it); + --prev; + std::size_t sp = prev->free_nodes.size(), + si = it->free_nodes.size(); + assert(sp <= si); + (void)sp; (void)si; + } } - //Check that the total free nodes are correct - it = m_chunklist.begin(); - itend = m_chunklist.end(); - std::size_t total_free = 0; - for(; it != itend; ++it){ - total_free += it->free_nodes.size(); - } - assert(total_free >= m_free_chunks*m_real_num_node); - - //Check that the total totally free chunks are correct - it = m_chunklist.begin(); - itend = m_chunklist.end(); - total_free = 0; - for(; it != itend; ++it){ - total_free += it->free_nodes.size() == m_real_num_node; - } - assert(total_free >= m_free_chunks); - - //The chunk pointed by m_first_free_chunk should point - //to end or to a non-empty chunk - if(m_first_free_chunk != m_chunklist.end()){ - std::size_t s = m_first_free_chunk->free_nodes.size(); - assert(s != 0); + { + //Check that the total free nodes are correct + it = m_chunk_multiset.begin(); + itend = m_chunk_multiset.end(); + std::size_t total_free_nodes = 0; + for(; it != itend; ++it){ + total_free_nodes += it->free_nodes.size(); + } + assert(total_free_nodes >= m_totally_free_chunks*m_real_num_node); } - //All previous nodes of m_first_free_chunk should be 0 - it = m_chunklist.begin(); - itend = m_first_free_chunk; + { + //Check that the total totally free chunks are correct + it = m_chunk_multiset.begin(); + itend = m_chunk_multiset.end(); + std::size_t total_free_chunks = 0; + for(; it != itend; ++it){ + total_free_chunks += (it->free_nodes.size() == m_real_num_node); + } + assert(total_free_chunks == m_totally_free_chunks); + } + { + //Check that header offsets are correct + it = m_chunk_multiset.begin(); for(; it != itend; ++it){ - std::size_t s = it->free_nodes.size(); - assert(s == 0); + hdr_offset_holder *hdr_off_holder = priv_first_subchunk_from_chunk(&*it); + for(std::size_t i = 0, max = m_num_subchunks; i < max; ++i){ + assert(hdr_off_holder->hdr_offset == std::size_t((char*)&*it- (char*)hdr_off_holder)); + assert(0 == ((std::size_t)hdr_off_holder & (m_real_chunk_alignment - 1))); + assert(0 == (hdr_off_holder->hdr_offset & (m_real_chunk_alignment - 1))); + hdr_off_holder = (hdr_offset_holder *)((char*)hdr_off_holder + m_real_chunk_alignment); + } + } } } #else @@ -271,165 +413,136 @@ class private_adaptive_node_pool_impl //!Deallocates all used memory. Never throws void priv_clear() { - //Check for memory leaks - assert(m_allocated==0); + #ifndef NDEBUG + chunk_iterator it = m_chunk_multiset.begin(); + chunk_iterator itend = m_chunk_multiset.end(); + std::size_t num_free_nodes = 0; + for(; it != itend; ++it){ + //Check for memory leak + assert(it->free_nodes.size() == m_real_num_node); + ++num_free_nodes; + } + assert(num_free_nodes == m_totally_free_chunks); + #endif priv_invariants(); - m_first_free_chunk = m_chunklist.end(); - m_chunklist.clear_and_dispose - (chunk_destroyer(detail::get_pointer(mp_segment_mngr_base), m_real_num_node)); - m_free_chunks = 0; + m_chunk_multiset.clear_and_dispose + (chunk_destroyer(this)); + m_totally_free_chunks = 0; } - chunk_info_t *priv_chunk_from_node(void *node) + chunk_info_t *priv_chunk_from_node(void *node) const { - return (chunk_info_t *)((std::size_t)node & std::size_t(~(m_real_chunk_alignment - 1))); + hdr_offset_holder *hdr_off_holder = + (hdr_offset_holder*)((std::size_t)node & std::size_t(~(m_real_chunk_alignment - 1))); + assert(0 == ((std::size_t)hdr_off_holder & (m_real_chunk_alignment - 1))); + assert(0 == (hdr_off_holder->hdr_offset & (m_real_chunk_alignment - 1))); + chunk_info_t *chunk = (chunk_info_t *)(((char*)hdr_off_holder) + hdr_off_holder->hdr_offset); + assert(chunk->hdr_offset == 0); + return chunk; } - //!Allocates one node, using the adaptive pool algorithm. - //!Never throws - node_t *priv_alloc_node() + hdr_offset_holder *priv_first_subchunk_from_chunk(chunk_info_t *chunk) const { - priv_invariants(); - //If there are no free nodes we allocate a new block - if (m_first_free_chunk == m_chunklist.end()){ - priv_alloc_chunk(); - --m_first_free_chunk; - } - //We take the first free node since m_first_free_chunk can't be end() - chunk_info_t &chunk_info = *m_first_free_chunk; - assert(!chunk_info.free_nodes.empty()); - node_t *first_node = &chunk_info.free_nodes.front(); - if(chunk_info.free_nodes.size() == 1){ - ++m_first_free_chunk; - } - else if(chunk_info.free_nodes.size() == m_real_num_node){ - --m_free_chunks; - } - chunk_info.free_nodes.pop_front(); - ++m_allocated; - priv_invariants(); - return detail::get_pointer(first_node); - } - - //!Deallocates one node, using the adaptive pool algorithm. - //!Never throws - void priv_dealloc_node(void *pElem) - { - typedef typename chunk_list_t::iterator chunk_iterator; - priv_invariants(); - chunk_info_t *chunk_info = priv_chunk_from_node(pElem); - assert(chunk_info->free_nodes.size() < m_real_num_node); - //We put the node at the beginning of the free node list - node_t * to_deallocate = static_cast(pElem); - chunk_info->free_nodes.push_front(*to_deallocate); - chunk_iterator this_chunk(chunk_list_t::s_iterator_to(*chunk_info)); - chunk_iterator next_chunk(this_chunk); - ++next_chunk; - - //If this chunk has more free nodes than the next ones, - //we have to move the chunk in the list to maintain it ordered. - //Check if we have to move it - while(next_chunk != m_chunklist.end() && - this_chunk->free_nodes.size() > next_chunk->free_nodes.size()){ - ++next_chunk; - } - //Check if the chunk must be moved - if(++chunk_iterator(this_chunk) != next_chunk){ - //Update m_first_free_chunk iterator if it was pointing to this_chunk - if(m_first_free_chunk == this_chunk){ - ++m_first_free_chunk; - } - //Update m_first_free_chunk if the moved chunk crosses the empty boundary - else if(this_chunk->free_nodes.size() == 1){ - m_first_free_chunk = chunk_list_t::s_iterator_to(*chunk_info); - } - //Now move the chunk to the new position - m_chunklist.erase(this_chunk); - m_chunklist.insert(next_chunk, *chunk_info); - } - //Update m_first_free_chunk if the chunk crosses the empty boundary - else if(this_chunk->free_nodes.size() == 1){ - --m_first_free_chunk; - } - - if(this_chunk->free_nodes.size() == m_real_num_node){ - ++m_free_chunks; - } - - assert(m_allocated>0); - --m_allocated; - priv_invariants(); - priv_deallocate_free_chunks(m_max_free_chunks); - priv_invariants(); + hdr_offset_holder *hdr_off_holder = (hdr_offset_holder*) + (((char*)chunk) - (m_num_subchunks-1)*m_real_chunk_alignment); + assert(hdr_off_holder->hdr_offset == std::size_t((char*)chunk - (char*)hdr_off_holder)); + assert(0 == ((std::size_t)hdr_off_holder & (m_real_chunk_alignment - 1))); + assert(0 == (hdr_off_holder->hdr_offset & (m_real_chunk_alignment - 1))); + return hdr_off_holder; } void priv_deallocate_free_chunks(std::size_t max_free_chunks) { - typedef typename chunk_list_t::iterator chunk_iterator; + priv_invariants(); //Now check if we've reached the free nodes limit //and check if we have free chunks. If so, deallocate as much //as we can to stay below the limit - while(m_free_chunks > max_free_chunks && - m_chunklist.back().free_nodes.size() == m_real_num_node){ - chunk_iterator it(--m_chunklist.end()); - if(it == m_first_free_chunk) - ++m_first_free_chunk; //m_first_free_chunk is now equal to end() - m_chunklist.erase_and_dispose(it, chunk_destroyer(detail::get_pointer(mp_segment_mngr_base),m_real_num_node)); - --m_free_chunks; + for( chunk_iterator itend = m_chunk_multiset.end() + ; m_totally_free_chunks > max_free_chunks + ; --m_totally_free_chunks + ){ + assert(!m_chunk_multiset.empty()); + chunk_iterator it = itend; + --it; + std::size_t num_nodes = it->free_nodes.size(); + assert(num_nodes == m_real_num_node); + (void)num_nodes; + m_chunk_multiset.erase_and_dispose + (it, chunk_destroyer(this)); } } - //!Allocates a chunk of nodes. Can throw boost::interprocess::bad_alloc - void priv_alloc_chunk() + //!Allocates a several chunks of nodes. Can throw boost::interprocess::bad_alloc + void priv_alloc_chunk(std::size_t n) { - //We allocate a new NodeBlock and put it as first - //element in the free Node list - std::size_t real_chunk_size = m_real_chunk_alignment - SegmentManagerBase::PayloadPerAllocation; - char *pNode = detail::char_ptr_cast - (mp_segment_mngr_base->allocate_aligned(real_chunk_size, m_real_chunk_alignment)); - if(!pNode) throw bad_alloc(); - chunk_info_t *c_info = new(pNode)chunk_info_t; - m_chunklist.push_back(*c_info); - - pNode += m_header_size; - //We initialize all Nodes in Node Block to insert - //them in the free Node list - for(std::size_t i = 0; i < m_real_num_node; ++i){ - c_info->free_nodes.push_front(*new (pNode) node_t); - pNode += m_real_node_size; + std::size_t real_chunk_size = m_real_chunk_alignment*m_num_subchunks - SegmentManagerBase::PayloadPerAllocation; + std::size_t elements_per_subchunk = (m_real_chunk_alignment - HdrOffsetSize)/m_real_node_size; + std::size_t hdr_subchunk_elements = (m_real_chunk_alignment - HdrSize - SegmentManagerBase::PayloadPerAllocation)/m_real_node_size; + + for(std::size_t i = 0; i != n; ++i){ + //We allocate a new NodeBlock and put it the last + //element of the tree + char *mem_address = detail::char_ptr_cast + (mp_segment_mngr_base->allocate_aligned(real_chunk_size, m_real_chunk_alignment)); + if(!mem_address) throw std::bad_alloc(); + ++m_totally_free_chunks; + + //First initialize header information on the last subchunk + char *hdr_addr = mem_address + m_real_chunk_alignment*(m_num_subchunks-1); + chunk_info_t *c_info = new(hdr_addr)chunk_info_t; + //Some structural checks + assert(static_cast(&static_cast(c_info)->hdr_offset) == + static_cast(c_info)); + typename free_nodes_t::iterator prev_insert_pos = c_info->free_nodes.before_begin(); + for( std::size_t subchunk = 0, maxsubchunk = m_num_subchunks - 1 + ; subchunk < maxsubchunk + ; ++subchunk, mem_address += m_real_chunk_alignment){ + //Initialize header offset mark + new(mem_address) hdr_offset_holder(std::size_t(hdr_addr - mem_address)); + char *pNode = mem_address + HdrOffsetSize; + for(std::size_t i = 0; i < elements_per_subchunk; ++i){ + prev_insert_pos = c_info->free_nodes.insert_after(prev_insert_pos, *new (pNode) node_t); + pNode += m_real_node_size; + } + } + { + char *pNode = hdr_addr + HdrSize; + //We initialize all Nodes in Node Block to insert + //them in the free Node list + for(std::size_t i = 0; i < hdr_subchunk_elements; ++i){ + prev_insert_pos = c_info->free_nodes.insert_after(prev_insert_pos, *new (pNode) node_t); + pNode += m_real_node_size; + } + } + //Insert the chunk after the free node list is full + m_chunk_multiset.insert(m_chunk_multiset.end(), *c_info); } - ++m_free_chunks; } private: typedef typename pointer_to_other ::type segment_mngr_base_ptr_t; - const std::size_t m_node_size; const std::size_t m_max_free_chunks; const std::size_t m_real_node_size; - const std::size_t m_header_size; //Round the size to a power of two value. //This is the total memory size (including payload) that we want to //allocate from the general-purpose allocator const std::size_t m_real_chunk_alignment; + std::size_t m_num_subchunks; //This is the real number of nodes per chunk - const std::size_t m_real_num_node; + //const + std::size_t m_real_num_node; segment_mngr_base_ptr_t mp_segment_mngr_base;//Segment manager - chunk_list_t m_chunklist; //Intrusive chunk list - typename chunk_list_t::iterator m_first_free_chunk; //Iterator to the active chunk - std::size_t m_allocated; //Used nodes for debugging - std::size_t m_free_chunks; //Free chunks + chunk_multiset_t m_chunk_multiset; //Intrusive chunk list + std::size_t m_totally_free_chunks; //Free chunks }; -//!Pooled shared memory allocator using an smart adaptive pool. Includes -//!a reference count but the class does not delete itself, this is -//!responsibility of user classes. Node size (NodeSize) and the number of -//!nodes allocated per chunk (NodesPerChunk) are known at compile time. template< class SegmentManager , std::size_t NodeSize , std::size_t NodesPerChunk , std::size_t MaxFreeChunks + , unsigned char OverheadPercent > class private_adaptive_node_pool : public private_adaptive_node_pool_impl @@ -448,8 +561,8 @@ class private_adaptive_node_pool static const std::size_t nodes_per_chunk = NodesPerChunk; //!Constructor from a segment manager. Never throws - private_adaptive_node_pool(segment_manager *segmeng_mngr) - : base_t(segmeng_mngr, NodeSize, NodesPerChunk, MaxFreeChunks) + private_adaptive_node_pool(segment_manager *segment_mngr) + : base_t(segment_mngr, NodeSize, NodesPerChunk, MaxFreeChunks, OverheadPercent) {} //!Returns the segment manager. Never throws @@ -462,117 +575,25 @@ class private_adaptive_node_pool //!responsibility of user classes. Node size (NodeSize) and the number of //!nodes allocated per chunk (NodesPerChunk) are known at compile time template< class SegmentManager - , class Mutex , std::size_t NodeSize , std::size_t NodesPerChunk , std::size_t MaxFreeChunks + , unsigned char OverheadPercent > class shared_adaptive_node_pool - : public private_adaptive_node_pool - + : public detail::shared_pool_impl + < private_adaptive_node_pool + + > { - private: - typedef typename SegmentManager::void_pointer void_pointer; - typedef private_adaptive_node_pool - private_node_allocator_t; - public: - //!Segment manager typedef - typedef SegmentManager segment_manager; - typedef typename private_node_allocator_t::free_nodes_t free_nodes_t; - - //!Constructor from a segment manager. Never throws - shared_adaptive_node_pool(segment_manager *segment_mgnr) - : private_node_allocator_t(segment_mgnr){} - - //!Destructor. Deallocates all allocated chunks. Never throws - ~shared_adaptive_node_pool() + typedef detail::shared_pool_impl + < private_adaptive_node_pool + + > base_t; + public: + shared_adaptive_node_pool(SegmentManager *segment_mgnr) + : base_t(segment_mgnr) {} - - //!Allocates array of count elements. Can throw boost::interprocess::bad_alloc - void *allocate(std::size_t count) - { - //----------------------- - boost::interprocess::scoped_lock guard(m_header); - //----------------------- - return private_node_allocator_t::allocate(count); - } - - //!Deallocates an array pointed by ptr. Never throws - void deallocate(void *ptr, std::size_t count) - { - //----------------------- - boost::interprocess::scoped_lock guard(m_header); - //----------------------- - private_node_allocator_t::deallocate(ptr, count); - } - - //!Allocates a singly linked list of n nodes ending in null pointer. - //!can throw boost::interprocess::bad_alloc - void allocate_nodes(std::size_t n, free_nodes_t &nodes) - { - //----------------------- - boost::interprocess::scoped_lock guard(m_header); - //----------------------- - return private_node_allocator_t::allocate_nodes(n, nodes); - } - - //!Deallocates a linked list of nodes ending in null pointer. Never throws - void deallocate_nodes(free_nodes_t &nodes, std::size_t num) - { - //----------------------- - boost::interprocess::scoped_lock guard(m_header); - //----------------------- - private_node_allocator_t::deallocate_nodes(nodes, num); - } - - //!Deallocates a linked list of nodes ending in null pointer. Never throws - void deallocate_nodes(free_nodes_t &nodes) - { - //----------------------- - boost::interprocess::scoped_lock guard(m_header); - //----------------------- - private_node_allocator_t::deallocate_nodes(nodes); - } - - //!Deallocates all the free chunks of memory. Never throws - void deallocate_free_chunks() - { - //----------------------- - boost::interprocess::scoped_lock guard(m_header); - //----------------------- - private_node_allocator_t::deallocate_free_chunks(); - } - - //!Increments internal reference count and returns new count. Never throws - std::size_t inc_ref_count() - { - //----------------------- - boost::interprocess::scoped_lock guard(m_header); - //----------------------- - return ++m_header.m_usecount; - } - - //!Decrements internal reference count and returns new count. Never throws - std::size_t dec_ref_count() - { - //----------------------- - boost::interprocess::scoped_lock guard(m_header); - //----------------------- - assert(m_header.m_usecount > 0); - return --m_header.m_usecount; - } - - private: - //!This struct includes needed data and derives from - //!interprocess_mutex to allow EBO when using null_mutex - struct header_t : Mutex - { - std::size_t m_usecount; //Number of attached allocators - - header_t() - : m_usecount(0) {} - } m_header; }; } //namespace detail { diff --git a/include/boost/interprocess/allocators/detail/allocator_common.hpp b/include/boost/interprocess/allocators/detail/allocator_common.hpp new file mode 100644 index 0000000..7c5bbcc --- /dev/null +++ b/include/boost/interprocess/allocators/detail/allocator_common.hpp @@ -0,0 +1,760 @@ +////////////////////////////////////////////////////////////////////////////// +// +// (C) Copyright Ion Gaztanaga 2008. Distributed under the Boost +// Software License, Version 1.0. (See accompanying file +// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// See http://www.boost.org/libs/interprocess for documentation. +// +////////////////////////////////////////////////////////////////////////////// + +#ifndef BOOST_INTERPROCESS_DETAIL_NODE_ALLOCATOR_COMMON_HPP +#define BOOST_INTERPROCESS_DETAIL_NODE_ALLOCATOR_COMMON_HPP + +#include +#include +#include +#include +#include //pointer_to_other, get_pointer +#include //std::pair +#include //boost::addressof +#include //BOOST_ASSERT +#include //bad_alloc +#include //scoped_lock +#include //allocation_type +#include //std::swap + + +namespace boost { +namespace interprocess { +namespace detail { + +//!Object function that creates the node allocator if it is not created and +//!increments reference count if it is already created +template +struct get_or_create_node_pool_func +{ + + //!This connects or constructs the unique instance of node_pool_t + //!Can throw boost::interprocess::bad_alloc + void operator()() + { + //Find or create the node_pool_t + mp_node_pool = mp_segment_manager->template find_or_construct + (unique_instance)(mp_segment_manager); + //If valid, increment link count + if(mp_node_pool != 0) + mp_node_pool->inc_ref_count(); + } + + //!Constructor. Initializes function + //!object parameters + get_or_create_node_pool_func(typename NodePool::segment_manager *mngr) + : mp_segment_manager(mngr){} + + NodePool *mp_node_pool; + typename NodePool::segment_manager *mp_segment_manager; +}; + +template +inline NodePool *get_or_create_node_pool(typename NodePool::segment_manager *mgnr) +{ + detail::get_or_create_node_pool_func func(mgnr); + mgnr->atomic_func(func); + return func.mp_node_pool; +} + +//!Object function that decrements the reference count. If the count +//!reaches to zero destroys the node allocator from memory. +//!Never throws +template +struct destroy_if_last_link_func +{ + //!Decrements reference count and destroys the object if there is no + //!more attached allocators. Never throws + void operator()() + { + //If not the last link return + if(mp_node_pool->dec_ref_count() != 0) return; + + //Last link, let's destroy the segment_manager + mp_node_pool->get_segment_manager()->template destroy(unique_instance); + } + + //!Constructor. Initializes function + //!object parameters + destroy_if_last_link_func(NodePool *pool) + : mp_node_pool(pool) + {} + + NodePool *mp_node_pool; +}; + +//!Destruction function, initializes and executes destruction function +//!object. Never throws +template +inline void destroy_node_pool_if_last_link(NodePool *pool) +{ + //Get segment manager + typename NodePool::segment_manager *mngr = pool->get_segment_manager(); + //Execute destruction functor atomically + destroy_if_last_link_funcfunc(pool); + mngr->atomic_func(func); +} + +template +class cache_impl +{ + typedef typename NodePool::segment_manager:: + void_pointer void_pointer; + typedef typename pointer_to_other + ::type node_pool_ptr; + typedef typename NodePool::multiallocation_chain multiallocation_chain; + node_pool_ptr mp_node_pool; + multiallocation_chain m_cached_nodes; + std::size_t m_max_cached_nodes; + + public: + typedef typename NodePool::multiallocation_iterator multiallocation_iterator; + typedef typename NodePool::segment_manager segment_manager; + + cache_impl(segment_manager *segment_mngr, std::size_t max_cached_nodes) + : mp_node_pool(get_or_create_node_pool(segment_mngr)) + , m_max_cached_nodes(max_cached_nodes) + {} + + cache_impl(const cache_impl &other) + : mp_node_pool(other.get_node_pool()) + , m_max_cached_nodes(other.get_max_cached_nodes()) + { + mp_node_pool->inc_ref_count(); + } + + ~cache_impl() + { + this->deallocate_all_cached_nodes(); + detail::destroy_node_pool_if_last_link(detail::get_pointer(mp_node_pool)); + } + + NodePool *get_node_pool() const + { return detail::get_pointer(mp_node_pool); } + + segment_manager *get_segment_manager() const + { return mp_node_pool->get_segment_manager(); } + + std::size_t get_max_cached_nodes() const + { return m_max_cached_nodes; } + + void *cached_allocation() + { + //If don't have any cached node, we have to get a new list of free nodes from the pool + if(m_cached_nodes.empty()){ + mp_node_pool->allocate_nodes(m_cached_nodes, m_max_cached_nodes/2); + } + return m_cached_nodes.pop_front(); + } + + multiallocation_iterator cached_allocation(std::size_t n) + { + multiallocation_chain chain; + std::size_t count = n; + BOOST_TRY{ + //If don't have any cached node, we have to get a new list of free nodes from the pool + while(!m_cached_nodes.empty() && count--){ + void *ret = m_cached_nodes.pop_front(); + chain.push_back(ret); + } + + if(chain.size() != n){ + mp_node_pool->allocate_nodes(chain, n - chain.size()); + } + assert(chain.size() == n); + chain.splice_back(m_cached_nodes); + return multiallocation_iterator(chain.get_it()); + } + BOOST_CATCH(...){ + this->cached_deallocation(multiallocation_iterator(chain.get_it())); + throw; + } + BOOST_CATCH_END + } + + void cached_deallocation(void *ptr) + { + //Check if cache is full + if(m_cached_nodes.size() >= m_max_cached_nodes){ + //This only occurs if this allocator deallocate memory allocated + //with other equal allocator. Since the cache is full, and more + //deallocations are probably coming, we'll make some room in cache + //in a single, efficient multi node deallocation. + this->priv_deallocate_n_nodes(m_cached_nodes.size() - m_max_cached_nodes/2); + } + m_cached_nodes.push_front(ptr); + } + + void cached_deallocation(multiallocation_iterator it) + { + multiallocation_iterator itend; + + while(it != itend){ + void *addr = &*it; + ++it; + m_cached_nodes.push_front(addr); + } + + //Check if cache is full + if(m_cached_nodes.size() >= m_max_cached_nodes){ + //This only occurs if this allocator deallocate memory allocated + //with other equal allocator. Since the cache is full, and more + //deallocations are probably coming, we'll make some room in cache + //in a single, efficient multi node deallocation. + this->priv_deallocate_n_nodes(m_cached_nodes.size() - m_max_cached_nodes/2); + } + } + + //!Sets the new max cached nodes value. This can provoke deallocations + //!if "newmax" is less than current cached nodes. Never throws + void set_max_cached_nodes(std::size_t newmax) + { + m_max_cached_nodes = newmax; + this->priv_deallocate_remaining_nodes(); + } + + //!Frees all cached nodes. + //!Never throws + void deallocate_all_cached_nodes() + { + if(m_cached_nodes.empty()) return; + mp_node_pool->deallocate_nodes(m_cached_nodes); + } + + private: + //!Frees all cached nodes at once. + //!Never throws + void priv_deallocate_remaining_nodes() + { + if(m_cached_nodes.size() > m_max_cached_nodes){ + priv_deallocate_n_nodes(m_cached_nodes.size()-m_max_cached_nodes); + } + } + + //!Frees n cached nodes at once. Never throws + void priv_deallocate_n_nodes(std::size_t n) + { + //Deallocate all new linked list at once + mp_node_pool->deallocate_nodes(m_cached_nodes, n); + } +}; + +template +class array_allocation_impl +{ + const Derived *derived() const + { return static_cast(this); } + Derived *derived() + { return static_cast(this); } + + typedef typename SegmentManager::void_pointer void_pointer; + + public: + typedef typename detail:: + pointer_to_other::type pointer; + typedef typename detail:: + pointer_to_other::type const_pointer; + typedef T value_type; + typedef typename detail::add_reference + ::type reference; + typedef typename detail::add_reference + ::type const_reference; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef transform_iterator + < typename SegmentManager:: + multiallocation_iterator + , detail::cast_functor > multiallocation_iterator; + typedef typename SegmentManager:: + multiallocation_chain multiallocation_chain; + + public: + //!Returns maximum the number of objects the previously allocated memory + //!pointed by p can hold. This size only works for memory allocated with + //!allocate, allocation_command and allocate_many. + size_type size(const pointer &p) const + { + return (size_type)this->derived()->get_segment_manager()->size(detail::get_pointer(p))/sizeof(T); + } + + std::pair + allocation_command(allocation_type command, + size_type limit_size, + size_type preferred_size, + size_type &received_size, const pointer &reuse = 0) + { + return this->derived()->get_segment_manager()->allocation_command + (command, limit_size, preferred_size, received_size, detail::get_pointer(reuse)); + } + + //!Allocates many elements of size elem_size in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. The elements must be deallocated + //!with deallocate(...) + multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements) + { + return multiallocation_iterator + (this->derived()->get_segment_manager()->allocate_many(sizeof(T)*elem_size, num_elements)); + } + + //!Allocates n_elements elements, each one of size elem_sizes[i]in a + //!contiguous chunk + //!of memory. The elements must be deallocated + multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements) + { + return multiallocation_iterator + (this->derived()->get_segment_manager()->allocate_many(elem_sizes, n_elements, sizeof(T))); + } + + //!Allocates many elements of size elem_size in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. The elements must be deallocated + //!with deallocate(...) + void deallocate_many(multiallocation_iterator it) + { return this->derived()->get_segment_manager()->deallocate_many(it.base()); } + + //!Returns the number of elements that could be + //!allocated. Never throws + size_type max_size() const + { return this->derived()->get_segment_manager()->get_size()/sizeof(T); } + + //!Returns address of mutable object. + //!Never throws + pointer address(reference value) const + { return pointer(boost::addressof(value)); } + + //!Returns address of non mutable object. + //!Never throws + const_pointer address(const_reference value) const + { return const_pointer(boost::addressof(value)); } + + //!Default construct an object. + //!Throws if T's default constructor throws + void construct(const pointer &ptr) + { new(detail::get_pointer(ptr)) value_type; } + + //!Destroys object. Throws if object's + //!destructor throws + void destroy(const pointer &ptr) + { BOOST_ASSERT(ptr != 0); (*ptr).~value_type(); } +}; + + +template +class node_pool_allocation_impl + : public array_allocation_impl + < Derived + , T + , SegmentManager> +{ + const Derived *derived() const + { return static_cast(this); } + Derived *derived() + { return static_cast(this); } + + typedef typename SegmentManager::void_pointer void_pointer; + typedef typename detail:: + pointer_to_other::type cvoid_pointer; + + public: + typedef typename detail:: + pointer_to_other::type pointer; + typedef typename detail:: + pointer_to_other::type const_pointer; + typedef T value_type; + typedef typename detail::add_reference + ::type reference; + typedef typename detail::add_reference + ::type const_reference; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef transform_iterator + < typename SegmentManager:: + multiallocation_iterator + , detail::cast_functor > multiallocation_iterator; + typedef typename SegmentManager:: + multiallocation_chain multiallocation_chain; + + public: + //!Allocate memory for an array of count elements. + //!Throws boost::interprocess::bad_alloc if there is no enough memory + pointer allocate(size_type count, cvoid_pointer hint = 0) + { + (void)hint; + if(count > this->max_size()) + throw bad_alloc(); + else if(Version == 1 && count == 1) + return pointer(static_cast(this->derived()->get_node_pool()->allocate_node())); + else + return pointer(static_cast + (this->derived()->get_node_pool()->get_segment_manager()->allocate(sizeof(T)*count))); + } + + //!Deallocate allocated memory. Never throws + void deallocate(const pointer &ptr, size_type count) + { + (void)count; + if(Version == 1 && count == 1) + this->derived()->get_node_pool()->deallocate_node(detail::get_pointer(ptr)); + else + this->derived()->get_node_pool()->get_segment_manager()->deallocate(detail::get_pointer(ptr)); + } + + //!Allocates just one object. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + //!Throws boost::interprocess::bad_alloc if there is no enough memory + pointer allocate_one() + { return pointer(static_cast(this->derived()->get_node_pool()->allocate_node())); } + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + multiallocation_iterator allocate_individual(std::size_t num_elements) + { return multiallocation_iterator(this->derived()->get_node_pool()->allocate_nodes(num_elements)); } + + //!Deallocates memory previously allocated with allocate_one(). + //!You should never use deallocate_one to deallocate memory allocated + //!with other functions different from allocate_one(). Never throws + void deallocate_one(const pointer &p) + { this->derived()->get_node_pool()->deallocate_node(detail::get_pointer(p)); } + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + void deallocate_individual(multiallocation_iterator it) + { this->derived()->get_node_pool()->deallocate_nodes(it.base()); } + + //!Deallocates all free chunks of the pool + void deallocate_free_chunks() + { this->derived()->get_node_pool()->deallocate_free_chunks(); } +}; + +template +class cached_allocator_impl + : public array_allocation_impl + , T, typename NodePool::segment_manager> +{ + cached_allocator_impl & operator=(const cached_allocator_impl& other); + typedef array_allocation_impl + < cached_allocator_impl + + , T + , typename NodePool::segment_manager> base_t; + + public: + typedef NodePool node_pool_t; + typedef typename NodePool::segment_manager segment_manager; + typedef typename segment_manager::void_pointer void_pointer; + typedef typename detail:: + pointer_to_other::type cvoid_pointer; + typedef typename base_t::pointer pointer; + typedef typename base_t::size_type size_type; + typedef typename base_t::multiallocation_iterator multiallocation_iterator; + typedef typename base_t::multiallocation_chain multiallocation_chain; + typedef typename base_t::value_type value_type; + + public: + enum { DEFAULT_MAX_CACHED_NODES = 64 }; + + cached_allocator_impl(segment_manager *segment_mngr, std::size_t max_cached_nodes) + : m_cache(segment_mngr, max_cached_nodes) + {} + + cached_allocator_impl(const cached_allocator_impl &other) + : m_cache(other.m_cache) + {} + + //!Copy constructor from related cached_adaptive_pool_base. If not present, constructs + //!a node pool. Increments the reference count of the associated node pool. + //!Can throw boost::interprocess::bad_alloc + template + cached_allocator_impl + (const cached_allocator_impl + &other) + : m_cache(other.get_segment_manager(), other.get_max_cached_nodes()) + {} + + //!Returns a pointer to the node pool. + //!Never throws + node_pool_t* get_node_pool() const + { return m_cache.get_node_pool(); } + + //!Returns the segment manager. + //!Never throws + segment_manager* get_segment_manager()const + { return m_cache.get_segment_manager(); } + + //!Sets the new max cached nodes value. This can provoke deallocations + //!if "newmax" is less than current cached nodes. Never throws + void set_max_cached_nodes(std::size_t newmax) + { m_cache.set_max_cached_nodes(newmax); } + + //!Returns the max cached nodes parameter. + //!Never throws + std::size_t get_max_cached_nodes() const + { return m_cache.get_max_cached_nodes(); } + + //!Allocate memory for an array of count elements. + //!Throws boost::interprocess::bad_alloc if there is no enough memory + pointer allocate(size_type count, cvoid_pointer hint = 0) + { + (void)hint; + void * ret; + if(count > this->max_size()) + throw bad_alloc(); + else if(Version == 1 && count == 1){ + ret = m_cache.cached_allocation(); + } + else{ + ret = this->get_segment_manager()->allocate(sizeof(T)*count); + } + return pointer(static_cast(ret)); + } + + //!Deallocate allocated memory. Never throws + void deallocate(const pointer &ptr, size_type count) + { + (void)count; + if(Version == 1 && count == 1){ + m_cache.cached_deallocation(detail::get_pointer(ptr)); + } + else{ + this->get_segment_manager()->deallocate(detail::get_pointer(ptr)); + } + } + + //!Allocates just one object. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + //!Throws boost::interprocess::bad_alloc if there is no enough memory + pointer allocate_one() + { return pointer(static_cast(this->m_cache.cached_allocation())); } + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + multiallocation_iterator allocate_individual(std::size_t num_elements) + { return multiallocation_iterator(this->m_cache.cached_allocation(num_elements)); } + + //!Deallocates memory previously allocated with allocate_one(). + //!You should never use deallocate_one to deallocate memory allocated + //!with other functions different from allocate_one(). Never throws + void deallocate_one(const pointer &p) + { this->m_cache.cached_deallocation(detail::get_pointer(p)); } + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + void deallocate_individual(multiallocation_iterator it) + { m_cache.cached_deallocation(it.base()); } + + //!Deallocates all free chunks of the pool + void deallocate_free_chunks() + { m_cache.get_node_pool()->deallocate_free_chunks(); } + + //!Swaps allocators. Does not throw. If each allocator is placed in a + //!different shared memory segments, the result is undefined. + friend void swap(cached_allocator_impl &alloc1, cached_allocator_impl &alloc2) + { + detail::do_swap(alloc1.mp_node_pool, alloc2.mp_node_pool); + alloc1.m_cached_nodes.swap(alloc2.m_cached_nodes); + detail::do_swap(alloc1.m_max_cached_nodes, alloc2.m_max_cached_nodes); + } + + void deallocate_cache() + { m_cache.deallocate_all_cached_nodes(); } + + /// @cond + private: + cache_impl m_cache; +}; + +//!Equality test for same type of +//!cached_allocator_impl +template inline +bool operator==(const cached_allocator_impl &alloc1, + const cached_allocator_impl &alloc2) + { return alloc1.get_node_pool() == alloc2.get_node_pool(); } + +//!Inequality test for same type of +//!cached_allocator_impl +template inline +bool operator!=(const cached_allocator_impl &alloc1, + const cached_allocator_impl &alloc2) + { return alloc1.get_node_pool() != alloc2.get_node_pool(); } + + +//!Pooled shared memory allocator using adaptive pool. Includes +//!a reference count but the class does not delete itself, this is +//!responsibility of user classes. Node size (NodeSize) and the number of +//!nodes allocated per chunk (NodesPerChunk) are known at compile time +template +class shared_pool_impl + : public private_node_allocator_t +{ + public: + //!Segment manager typedef + typedef typename private_node_allocator_t::segment_manager segment_manager; + typedef typename private_node_allocator_t:: + multiallocation_iterator multiallocation_iterator; + typedef typename private_node_allocator_t:: + multiallocation_chain multiallocation_chain; + + private: + typedef typename segment_manager::mutex_family::mutex_type mutex_type; + + public: + //!Constructor from a segment manager. Never throws + shared_pool_impl(segment_manager *segment_mngr) + : private_node_allocator_t(segment_mngr) + {} + + //!Destructor. Deallocates all allocated chunks. Never throws + ~shared_pool_impl() + {} + + //!Allocates array of count elements. Can throw boost::interprocess::bad_alloc + void *allocate_node() + { + //----------------------- + boost::interprocess::scoped_lock guard(m_header); + //----------------------- + return private_node_allocator_t::allocate_node(); + } + + //!Deallocates an array pointed by ptr. Never throws + void deallocate_node(void *ptr) + { + //----------------------- + boost::interprocess::scoped_lock guard(m_header); + //----------------------- + private_node_allocator_t::deallocate_node(ptr); + } + + //!Allocates a singly linked list of n nodes ending in null pointer. + //!can throw boost::interprocess::bad_alloc + void allocate_nodes(multiallocation_chain &nodes, std::size_t n) + { + //----------------------- + boost::interprocess::scoped_lock guard(m_header); + //----------------------- + return private_node_allocator_t::allocate_nodes(nodes, n); + } + + //!Allocates n nodes, pointed by the multiallocation_iterator. + //!Can throw boost::interprocess::bad_alloc + multiallocation_iterator allocate_nodes(const std::size_t n) + { + //----------------------- + boost::interprocess::scoped_lock guard(m_header); + //----------------------- + return private_node_allocator_t::allocate_nodes(n); + } + + //!Deallocates a linked list of nodes ending in null pointer. Never throws + void deallocate_nodes(multiallocation_chain &nodes, std::size_t num) + { + //----------------------- + boost::interprocess::scoped_lock guard(m_header); + //----------------------- + private_node_allocator_t::deallocate_nodes(nodes, num); + } + + //!Deallocates a linked list of nodes ending in null pointer. Never throws + void deallocate_nodes(multiallocation_chain &nodes) + { + //----------------------- + boost::interprocess::scoped_lock guard(m_header); + //----------------------- + private_node_allocator_t::deallocate_nodes(nodes); + } + + //!Deallocates the nodes pointed by the multiallocation iterator. Never throws + void deallocate_nodes(multiallocation_iterator it) + { + //----------------------- + boost::interprocess::scoped_lock guard(m_header); + //----------------------- + private_node_allocator_t::deallocate_nodes(it); + } + + //!Deallocates all the free chunks of memory. Never throws + void deallocate_free_chunks() + { + //----------------------- + boost::interprocess::scoped_lock guard(m_header); + //----------------------- + private_node_allocator_t::deallocate_free_chunks(); + } + + //!Deallocates all used memory from the common pool. + //!Precondition: all nodes allocated from this pool should + //!already be deallocated. Otherwise, undefined behavior. Never throws + void purge_chunks() + { + //----------------------- + boost::interprocess::scoped_lock guard(m_header); + //----------------------- + private_node_allocator_t::purge_chunks(); + } + + //!Increments internal reference count and returns new count. Never throws + std::size_t inc_ref_count() + { + //----------------------- + boost::interprocess::scoped_lock guard(m_header); + //----------------------- + return ++m_header.m_usecount; + } + + //!Decrements internal reference count and returns new count. Never throws + std::size_t dec_ref_count() + { + //----------------------- + boost::interprocess::scoped_lock guard(m_header); + //----------------------- + assert(m_header.m_usecount > 0); + return --m_header.m_usecount; + } + + private: + //!This struct includes needed data and derives from + //!interprocess_mutex to allow EBO when using null_mutex + struct header_t : mutex_type + { + std::size_t m_usecount; //Number of attached allocators + + header_t() + : m_usecount(0) {} + } m_header; +}; + +} //namespace detail { +} //namespace interprocess { +} //namespace boost { + +#include + +#endif //#ifndef BOOST_INTERPROCESS_DETAIL_NODE_ALLOCATOR_COMMON_HPP diff --git a/include/boost/interprocess/allocators/detail/node_pool.hpp b/include/boost/interprocess/allocators/detail/node_pool.hpp index 726b99f..ce2c74b 100644 --- a/include/boost/interprocess/allocators/detail/node_pool.hpp +++ b/include/boost/interprocess/allocators/detail/node_pool.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -24,7 +24,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -51,10 +53,14 @@ class private_node_pool_impl typedef typename node_slist::slist_hook_t slist_hook_t; typedef typename node_slist::node_t node_t; typedef typename node_slist::node_slist_t free_nodes_t; + typedef typename SegmentManagerBase::multiallocation_iterator multiallocation_iterator; + typedef typename SegmentManagerBase::multiallocation_chain multiallocation_chain; private: - typedef typename bi::make_slist < node_t, bi::base_hook - , bi::constant_time_size >::type chunkslist_t; + typedef typename bi::make_slist + < node_t, bi::base_hook + , bi::linear + , bi::constant_time_size >::type chunkslist_t; public: //!Segment manager typedef @@ -62,10 +68,8 @@ class private_node_pool_impl //!Constructor from a segment manager. Never throws private_node_pool_impl(segment_manager_base_type *segment_mngr_base, std::size_t node_size, std::size_t nodes_per_chunk) - : m_node_size(node_size) - , m_nodes_per_chunk(nodes_per_chunk) - , m_real_node_size(detail::lcm(node_size, sizeof(node_t))) - , m_block_size(detail::get_rounded_size(m_real_node_size*m_nodes_per_chunk, sizeof(node_t))) + : m_nodes_per_chunk(nodes_per_chunk) + , m_real_node_size(detail::lcm(node_size, std::size_t(alignment_of::value))) //General purpose allocator , mp_segment_mngr_base(segment_mngr_base) , m_chunklist() @@ -76,7 +80,7 @@ class private_node_pool_impl //!Destructor. Deallocates all allocated chunks. Never throws ~private_node_pool_impl() - { priv_clear(); } + { this->purge_chunks(); } std::size_t get_real_num_node() const { return m_nodes_per_chunk; } @@ -86,116 +90,73 @@ class private_node_pool_impl { return detail::get_pointer(mp_segment_mngr_base); } //!Allocates array of count elements. Can throw boost::interprocess::bad_alloc - void *allocate(std::size_t count) - { - std::size_t bytes = count*m_node_size; - if(bytes > m_real_node_size){//Normal allocation, no pooling used - void *addr = mp_segment_mngr_base->allocate(bytes); - if(!addr) throw bad_alloc(); - return addr; - } - else //Node allocation, pooling used - return priv_alloc_node(); - } + void *allocate_node() + { return priv_alloc_node(); } //!Deallocates an array pointed by ptr. Never throws - void deallocate(void *ptr, std::size_t count) - { - std::size_t bytes = count*m_node_size; - if(bytes > m_real_node_size)//Normal allocation was used - mp_segment_mngr_base->deallocate(ptr); - else //Node allocation was used - priv_dealloc_node(ptr); - } + void deallocate_node(void *ptr) + { priv_dealloc_node(ptr); } - //!Allocates a singly linked list of n nodes ending in null pointer. + //!Allocates a singly linked list of n nodes ending in null pointer and pushes them in the chain. //!can throw boost::interprocess::bad_alloc - void allocate_nodes(const std::size_t n, free_nodes_t &nodes) + void allocate_nodes(multiallocation_chain &nodes, const std::size_t n) { std::size_t i = 0; try{ for(; i < n; ++i){ - nodes.push_front(*priv_alloc_node()); + nodes.push_front(priv_alloc_node()); } } catch(...){ - priv_deallocate_nodes(nodes, i); + this->deallocate_nodes(nodes, i); throw; } } - //!Deallocates a linked list of nodes. Never throws - void deallocate_nodes(free_nodes_t &nodes) - { priv_deallocate_nodes(nodes, nodes.size()); } - - //!Deallocates the first n nodes of a linked list of nodes. Never throws - void deallocate_nodes(free_nodes_t &nodes, std::size_t n) - { priv_deallocate_nodes(nodes, n); } - - //!Deallocates all the free chunks of memory. Never throws - void deallocate_free_chunks() - { priv_deallocate_free_chunks(); } - - std::size_t num_free_nodes() - { return m_freelist.size(); } - - void swap(private_node_pool_impl &other) + //!Allocates a singly linked list of n nodes ending in null pointer + //!can throw boost::interprocess::bad_alloc + multiallocation_iterator allocate_nodes(const std::size_t n) { - std::swap(mp_segment_mngr_base, other.mp_segment_mngr_base); - m_chunklist.swap(other.m_chunklist); - m_freelist.swap(other.m_freelist); - std::swap(m_allocated, other.m_allocated); + multiallocation_chain nodes; + std::size_t i = 0; + try{ + for(; i < n; ++i){ + nodes.push_front(priv_alloc_node()); + } + } + catch(...){ + this->deallocate_nodes(nodes, i); + throw; + } + return nodes.get_it(); } - private: + //!Deallocates a linked list of nodes. Never throws + void deallocate_nodes(multiallocation_chain &nodes) + { this->deallocate_nodes(nodes.get_it()); } - void priv_deallocate_nodes(free_nodes_t &nodes, const std::size_t num) + //!Deallocates the first n nodes of a linked list of nodes. Never throws + void deallocate_nodes(multiallocation_chain &nodes, std::size_t num) { assert(nodes.size() >= num); for(std::size_t i = 0; i < num; ++i){ - node_t *to_deallocate = &nodes.front(); - nodes.pop_front(); - deallocate(to_deallocate, 1); + deallocate_node(nodes.pop_front()); } } - struct push_in_list + //!Deallocates the nodes pointed by the multiallocation iterator. Never throws + void deallocate_nodes(multiallocation_iterator it) { - push_in_list(free_nodes_t &l, typename free_nodes_t::iterator &it) - : slist_(l), last_it_(it) - {} - - void operator()(typename free_nodes_t::pointer p) const - { - slist_.push_front(*p); - if(slist_.size() == 1){ //Cache last element - ++last_it_ = slist_.begin(); - } + multiallocation_iterator itend; + while(it != itend){ + void *addr = &*it; + ++it; + deallocate_node(addr); } + } - private: - free_nodes_t &slist_; - typename free_nodes_t::iterator &last_it_; - }; - - struct is_between - : std::unary_function - { - is_between(const void *addr, std::size_t size) - : beg_((const char *)addr), end_(beg_+size) - {} - - bool operator()(typename free_nodes_t::const_reference v) const - { - return (beg_ <= (const char *)&v && - end_ > (const char *)&v); - } - private: - const char * beg_; - const char * end_; - }; - - void priv_deallocate_free_chunks() + //!Deallocates all the free chunks of memory. Never throws + void deallocate_free_chunks() { typedef typename free_nodes_t::iterator nodelist_iterator; typename chunkslist_t::iterator bit(m_chunklist.before_begin()), @@ -204,16 +165,19 @@ class private_node_pool_impl free_nodes_t backup_list; nodelist_iterator backup_list_last = backup_list.before_begin(); + //Execute the algorithm and get an iterator to the last value + std::size_t blocksize = detail::get_rounded_size + (m_real_node_size*m_nodes_per_chunk, alignment_of::value); + while(it != itend){ //Collect all the nodes from the chunk pointed by it //and push them in the list free_nodes_t free_nodes; nodelist_iterator last_it = free_nodes.before_begin(); - const void *addr = get_chunk_from_hook(&*it); + const void *addr = get_chunk_from_hook(&*it, blocksize); - //Execute the algorithm and get an iterator to the last value m_freelist.remove_and_dispose_if - (is_between(addr, m_block_size), push_in_list(free_nodes, last_it)); + (is_between(addr, blocksize), push_in_list(free_nodes, last_it)); //If the number of nodes is equal to m_nodes_per_chunk //this means that the block can be deallocated @@ -253,23 +217,76 @@ class private_node_pool_impl , backup_list.size()); } - //!Deallocates all used memory. Never throws - void priv_clear() + std::size_t num_free_nodes() + { return m_freelist.size(); } + + //!Deallocates all used memory. Precondition: all nodes allocated from this pool should + //!already be deallocated. Otherwise, undefined behaviour. Never throws + void purge_chunks() { //check for memory leaks assert(m_allocated==0); - + std::size_t blocksize = detail::get_rounded_size + (m_real_node_size*m_nodes_per_chunk, alignment_of::value); typename chunkslist_t::iterator it(m_chunklist.begin()), itend(m_chunklist.end()), aux; //We iterate though the NodeBlock list to free the memory while(!m_chunklist.empty()){ - void *addr = get_chunk_from_hook(&m_chunklist.front()); + void *addr = get_chunk_from_hook(&m_chunklist.front(), blocksize); m_chunklist.pop_front(); mp_segment_mngr_base->deallocate(addr); - } + } + //Just clear free node list + m_freelist.clear(); } + void swap(private_node_pool_impl &other) + { + std::swap(mp_segment_mngr_base, other.mp_segment_mngr_base); + m_chunklist.swap(other.m_chunklist); + m_freelist.swap(other.m_freelist); + std::swap(m_allocated, other.m_allocated); + } + + private: + + struct push_in_list + { + push_in_list(free_nodes_t &l, typename free_nodes_t::iterator &it) + : slist_(l), last_it_(it) + {} + + void operator()(typename free_nodes_t::pointer p) const + { + slist_.push_front(*p); + if(slist_.size() == 1){ //Cache last element + ++last_it_ = slist_.begin(); + } + } + + private: + free_nodes_t &slist_; + typename free_nodes_t::iterator &last_it_; + }; + + struct is_between + : std::unary_function + { + is_between(const void *addr, std::size_t size) + : beg_((const char *)addr), end_(beg_+size) + {} + + bool operator()(typename free_nodes_t::const_reference v) const + { + return (beg_ <= (const char *)&v && + end_ > (const char *)&v); + } + private: + const char * beg_; + const char * end_; + }; + //!Allocates one node, using single segregated storage algorithm. //!Never throws node_t *priv_alloc_node() @@ -300,10 +317,13 @@ class private_node_pool_impl { //We allocate a new NodeBlock and put it as first //element in the free Node list - char *pNode = detail::char_ptr_cast(mp_segment_mngr_base->allocate(m_block_size + sizeof(node_t))); + std::size_t blocksize = + detail::get_rounded_size(m_real_node_size*m_nodes_per_chunk, alignment_of::value); + char *pNode = detail::char_ptr_cast + (mp_segment_mngr_base->allocate(blocksize + sizeof(node_t))); if(!pNode) throw bad_alloc(); char *pBlock = pNode; - m_chunklist.push_front(get_chunk_hook(pBlock)); + m_chunklist.push_front(get_chunk_hook(pBlock, blocksize)); //We initialize all Nodes in Node Block to insert //them in the free Node list @@ -314,26 +334,24 @@ class private_node_pool_impl private: //!Returns a reference to the chunk hook placed in the end of the chunk - inline node_t & get_chunk_hook (void *chunk) + static inline node_t & get_chunk_hook (void *chunk, std::size_t blocksize) { return *static_cast( - static_cast((detail::char_ptr_cast(chunk)+m_block_size))); + static_cast((detail::char_ptr_cast(chunk) + blocksize))); } //!Returns the starting address of the chunk reference to the chunk hook placed in the end of the chunk - inline void *get_chunk_from_hook (node_t *hook) + inline void *get_chunk_from_hook (node_t *hook, std::size_t blocksize) { - return static_cast((detail::char_ptr_cast(hook) - m_block_size)); + return static_cast((detail::char_ptr_cast(hook) - blocksize)); } private: typedef typename pointer_to_other ::type segment_mngr_base_ptr_t; - const std::size_t m_node_size; const std::size_t m_nodes_per_chunk; const std::size_t m_real_node_size; - const std::size_t m_block_size; segment_mngr_base_ptr_t mp_segment_mngr_base; //Segment manager chunkslist_t m_chunklist; //Intrusive container of chunks free_nodes_t m_freelist; //Intrusive container of free nods @@ -376,114 +394,28 @@ class private_node_pool //!a reference count but the class does not delete itself, this is //!responsibility of user classes. Node size (NodeSize) and the number of //!nodes allocated per chunk (NodesPerChunk) are known at compile time +//!Pooled shared memory allocator using adaptive pool. Includes +//!a reference count but the class does not delete itself, this is +//!responsibility of user classes. Node size (NodeSize) and the number of +//!nodes allocated per chunk (NodesPerChunk) are known at compile time template< class SegmentManager - , class Mutex , std::size_t NodeSize , std::size_t NodesPerChunk > class shared_node_pool - : public private_node_pool + : public detail::shared_pool_impl + < private_node_pool + + > { - private: - typedef typename SegmentManager::void_pointer void_pointer; - typedef private_node_pool - private_node_allocator_t; - + typedef detail::shared_pool_impl + < private_node_pool + + > base_t; public: - typedef SegmentManager segment_manager; - typedef typename private_node_allocator_t::free_nodes_t free_nodes_t; - - //!Constructor from a segment manager. Never throws - shared_node_pool(segment_manager *segment_mngr) - : private_node_allocator_t(segment_mngr){} - - //!Destructor. Deallocates all allocated chunks. Never throws - ~shared_node_pool() + shared_node_pool(SegmentManager *segment_mgnr) + : base_t(segment_mgnr) {} - - //!Allocates array of count elements. Can throw boost::interprocess::bad_alloc - void *allocate(std::size_t count) - { - //----------------------- - boost::interprocess::scoped_lock guard(m_header); - //----------------------- - return private_node_allocator_t::allocate(count); - } - - //!Deallocates an array pointed by ptr. Never throws - void deallocate(void *ptr, std::size_t count) - { - //----------------------- - boost::interprocess::scoped_lock guard(m_header); - //----------------------- - private_node_allocator_t::deallocate(ptr, count); - } - - //!Allocates a singly linked list of n nodes ending in null pointer. - //!can throw boost::interprocess::bad_alloc - void allocate_nodes(const std::size_t n, free_nodes_t &nodes) - { - //----------------------- - boost::interprocess::scoped_lock guard(m_header); - //----------------------- - private_node_allocator_t::allocate_nodes(n, nodes); - } - - //!Deallocates a linked list of nodes ending in null pointer. Never throws - void deallocate_nodes(free_nodes_t &nodes, std::size_t n) - { - //----------------------- - boost::interprocess::scoped_lock guard(m_header); - //----------------------- - private_node_allocator_t::deallocate_nodes(nodes, n); - } - - void deallocate_nodes(free_nodes_t &nodes) - { - //----------------------- - boost::interprocess::scoped_lock guard(m_header); - //----------------------- - private_node_allocator_t::deallocate_nodes(nodes); - } - - //!Deallocates all the free chunks of memory. Never throws - void deallocate_free_chunks() - { - //----------------------- - boost::interprocess::scoped_lock guard(m_header); - //----------------------- - private_node_allocator_t::deallocate_free_chunks(); - } - - //!Increments internal reference count and returns new count. Never throws - std::size_t inc_ref_count() - { - //----------------------- - boost::interprocess::scoped_lock guard(m_header); - //----------------------- - return ++m_header.m_usecount; - } - - //!Decrements internal reference count and returns new count. Never throws - std::size_t dec_ref_count() - { - //----------------------- - boost::interprocess::scoped_lock guard(m_header); - //----------------------- - assert(m_header.m_usecount > 0); - return --m_header.m_usecount; - } - - private: - //!This struct includes needed data and derives from - //!interprocess_mutex to allow EBO when using null_mutex - struct header_t : Mutex - { - std::size_t m_usecount; //Number of attached allocators - - header_t() - : m_usecount(0) {} - } m_header; }; } //namespace detail { diff --git a/include/boost/interprocess/allocators/detail/node_tools.hpp b/include/boost/interprocess/allocators/detail/node_tools.hpp index bf6860a..048eb7c 100644 --- a/include/boost/interprocess/allocators/detail/node_tools.hpp +++ b/include/boost/interprocess/allocators/detail/node_tools.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2007-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -37,7 +37,8 @@ struct node_slist : public slist_hook_t {}; - typedef typename bi::make_slist >::type node_slist_t; + typedef typename bi::make_slist + , bi::base_hook >::type node_slist_t; }; } //namespace detail { diff --git a/include/boost/interprocess/allocators/node_allocator.hpp b/include/boost/interprocess/allocators/node_allocator.hpp index f6a0aa5..eed6e9e 100644 --- a/include/boost/interprocess/allocators/node_allocator.hpp +++ b/include/boost/interprocess/allocators/node_allocator.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -8,8 +8,8 @@ // ////////////////////////////////////////////////////////////////////////////// -#ifndef BOOST_INTERPROCESS_POOLED_NODE_ALLOCATOR_HPP -#define BOOST_INTERPROCESS_POOLED_NODE_ALLOCATOR_HPP +#ifndef BOOST_INTERPROCESS_NODE_ALLOCATOR_HPP +#define BOOST_INTERPROCESS_NODE_ALLOCATOR_HPP #if (defined _MSC_VER) && (_MSC_VER >= 1200) # pragma once @@ -22,9 +22,10 @@ #include #include #include -#include +#include #include #include +#include #include #include #include @@ -35,26 +36,35 @@ namespace boost { namespace interprocess { -//!An STL node allocator that uses a segment manager as memory -//!source. The internal pointer type will of the same type (raw, smart) as -//!"typename SegmentManager::void_pointer" type. This allows -//!placing the allocator in shared memory, memory mapped-files, etc... -//!This node allocator shares a segregated storage between all instances -//!of node_allocator with equal sizeof(T) placed in the same segment -//!group. NodesPerChunk is the number of nodes allocated at once when the allocator -//!needs runs out of nodes -template -class node_allocator +/// @cond + +namespace detail{ + +template < unsigned int Version + , class T + , class SegmentManager + , std::size_t NodesPerChunk + > +class node_allocator_base + : public node_pool_allocation_impl + < node_allocator_base + < Version, T, SegmentManager, NodesPerChunk> + , Version + , T + , SegmentManager + > { public: typedef typename SegmentManager::void_pointer void_pointer; - typedef typename detail:: - pointer_to_other::type cvoid_pointer; typedef SegmentManager segment_manager; - typedef typename SegmentManager:: - mutex_family::mutex_type mutex_type; - typedef node_allocator - self_t; + typedef node_allocator_base + self_t; + typedef detail::shared_node_pool + < SegmentManager, sizeof(T), NodesPerChunk> node_pool_t; + typedef typename detail:: + pointer_to_other::type node_pool_ptr; + + BOOST_STATIC_ASSERT((Version <=2)); public: //------- @@ -69,61 +79,61 @@ class node_allocator ::type const_reference; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; - typedef detail::shared_node_pool - < SegmentManager, mutex_type - , sizeof(T), NodesPerChunk> node_pool_t; - typedef typename detail:: - pointer_to_other::type node_pool_ptr; - //!Obtains node_allocator from other - //!node_allocator + typedef detail::version_type version; + typedef transform_iterator + < typename SegmentManager:: + multiallocation_iterator + , detail::cast_functor > multiallocation_iterator; + typedef typename SegmentManager:: + multiallocation_chain multiallocation_chain; + + //!Obtains node_allocator_base from + //!node_allocator_base template struct rebind { - typedef node_allocator other; + typedef node_allocator_base other; }; /// @cond private: - //!Not assignable from related - //!node_allocator - template - node_allocator& operator= - (const node_allocator&); + //!Not assignable from related node_allocator_base + template + node_allocator_base& operator= + (const node_allocator_base&); - //!Not assignable from other - //!node_allocator - node_allocator& operator=(const node_allocator&); + //!Not assignable from other node_allocator_base + node_allocator_base& operator=(const node_allocator_base&); /// @endcond public: - //!Constructor from a segment manager. If not present, constructs a node //!pool. Increments the reference count of the associated node pool. //!Can throw boost::interprocess::bad_alloc - node_allocator(segment_manager *segment_mngr) - : mp_node_pool(priv_get_or_create(segment_mngr)) - {} + node_allocator_base(segment_manager *segment_mngr) + : mp_node_pool(detail::get_or_create_node_pool(segment_mngr)) { } - //!Copy constructor from other node_allocator. Increments the reference + //!Copy constructor from other node_allocator_base. Increments the reference //!count of the associated node pool. Never throws - node_allocator(const node_allocator &other) + node_allocator_base(const node_allocator_base &other) : mp_node_pool(other.get_node_pool()) - { mp_node_pool->inc_ref_count(); } + { + mp_node_pool->inc_ref_count(); + } - //!Copy constructor from related node_allocator. If not present, constructs + //!Copy constructor from related node_allocator_base. If not present, constructs //!a node pool. Increments the reference count of the associated node pool. //!Can throw boost::interprocess::bad_alloc template - node_allocator - (const node_allocator &other) - : mp_node_pool(priv_get_or_create(other.get_segment_manager())) - {} + node_allocator_base + (const node_allocator_base &other) + : mp_node_pool(detail::get_or_create_node_pool(other.get_segment_manager())) { } //!Destructor, removes node_pool_t from memory //!if its reference count reaches to zero. Never throws - ~node_allocator() - { priv_destroy_if_last_link(); } + ~node_allocator_base() + { detail::destroy_node_pool_if_last_link(detail::get_pointer(mp_node_pool)); } //!Returns a pointer to the node pool. //!Never throws @@ -135,159 +145,290 @@ class node_allocator segment_manager* get_segment_manager()const { return mp_node_pool->get_segment_manager(); } - //!Returns the number of elements that could be allocated. Never throws - size_type max_size() const - { return this->get_segment_manager()->get_size()/sizeof(value_type); } - - //!Allocate memory for an array of count elements. - //!Throws boost::interprocess::bad_alloc if there is no enough memory - pointer allocate(size_type count, cvoid_pointer = 0) - { - if(count > ((size_type)-1)/sizeof(value_type)) - throw bad_alloc(); - return pointer(static_cast(mp_node_pool->allocate(count))); - } - - //!Deallocate allocated memory. - //!Never throws - void deallocate(const pointer &ptr, size_type count) - { mp_node_pool->deallocate(detail::get_pointer(ptr), count); } - - //!Deallocates all free chunks of the pool - void deallocate_free_chunks() - { mp_node_pool->deallocate_free_chunks(); } - //!Swaps allocators. Does not throw. If each allocator is placed in a //!different memory segment, the result is undefined. friend void swap(self_t &alloc1, self_t &alloc2) { detail::do_swap(alloc1.mp_node_pool, alloc2.mp_node_pool); } - //These functions are obsolete. These are here to conserve - //backwards compatibility with containers using them... - - //!Returns address of mutable object. - //!Never throws - pointer address(reference value) const - { return pointer(boost::addressof(value)); } - - //!Returns address of non mutable object. - //!Never throws - const_pointer address(const_reference value) const - { return const_pointer(boost::addressof(value)); } - - //!Default construct an object. - //!Throws if T's default constructor throws*/ - void construct(const pointer &ptr) - { new(detail::get_pointer(ptr)) value_type; } - - //!Destroys object. Throws if object's - //!destructor throws - void destroy(const pointer &ptr) - { BOOST_ASSERT(ptr != 0); (*ptr).~value_type(); } - /// @cond - private: - //!Object function that creates the node allocator if it is not created and - //!increments reference count if it is already created - struct get_or_create_func - { - typedef detail::shared_node_pool - node_pool_t; - - //!This connects or constructs the unique instance of node_pool_t - //!Can throw boost::interprocess::bad_alloc - void operator()() - { - //Find or create the node_pool_t - mp_node_pool = mp_named_alloc->template find_or_construct - (unique_instance)(mp_named_alloc); - //If valid, increment link count - if(mp_node_pool != 0) - mp_node_pool->inc_ref_count(); - } - - //!Constructor. Initializes function - //!object parameters - get_or_create_func(segment_manager *hdr) : mp_named_alloc(hdr){} - - node_pool_t *mp_node_pool; - segment_manager *mp_named_alloc; - }; - - //!Initialization function, creates an executes atomically the - //!initialization object functions. Can throw boost::interprocess::bad_alloc - node_pool_t *priv_get_or_create(segment_manager *named_alloc) - { - get_or_create_func func(named_alloc); - named_alloc->atomic_func(func); - return func.mp_node_pool; - } - - //!Object function that decrements the reference count. If the count - //!reaches to zero destroys the node allocator from memory. - //!Never throws - struct destroy_if_last_link_func - { - typedef detail::shared_node_pool - node_pool_t; - - //!Decrements reference count and destroys the object if there is no - //!more attached allocators. Never throws - void operator()() - { - //If not the last link return - if(mp_node_pool->dec_ref_count() != 0) return; - - //Last link, let's destroy the segment_manager - mp_named_alloc->template destroy(unique_instance); - } - - //!Constructor. Initializes function - //!object parameters - destroy_if_last_link_func(segment_manager *nhdr, - node_pool_t *phdr) - : mp_named_alloc(nhdr), mp_node_pool(phdr) - {} - - segment_manager *mp_named_alloc; - node_pool_t *mp_node_pool; - }; - - //!Destruction function, initializes and executes destruction function - //!object. Never throws - void priv_destroy_if_last_link() - { - typedef detail::shared_node_pool - node_pool_t; - //Get segment manager - segment_manager *named_segment_mngr = this->get_segment_manager(); - //Execute destruction functor atomically - destroy_if_last_link_func func(named_segment_mngr, detail::get_pointer(mp_node_pool)); - named_segment_mngr->atomic_func(func); - } - private: node_pool_ptr mp_node_pool; /// @endcond }; -//!Equality test for same type of -//!node_allocator -template inline -bool operator==(const node_allocator &alloc1, - const node_allocator &alloc2) +//!Equality test for same type +//!of node_allocator_base +template inline +bool operator==(const node_allocator_base &alloc1, + const node_allocator_base &alloc2) { return alloc1.get_node_pool() == alloc2.get_node_pool(); } -//!Inequality test for same type of -//!node_allocator -template inline -bool operator!=(const node_allocator &alloc1, - const node_allocator &alloc2) +//!Inequality test for same type +//!of node_allocator_base +template inline +bool operator!=(const node_allocator_base &alloc1, + const node_allocator_base &alloc2) { return alloc1.get_node_pool() != alloc2.get_node_pool(); } +template < class T + , class SegmentManager + , std::size_t NodesPerChunk = 64 + > +class node_allocator_v1 + : public node_allocator_base + < 1 + , T + , SegmentManager + , NodesPerChunk + > +{ + public: + typedef detail::node_allocator_base + < 1, T, SegmentManager, NodesPerChunk> base_t; + + template + struct rebind + { + typedef node_allocator_v1 other; + }; + + node_allocator_v1(SegmentManager *segment_mngr) + : base_t(segment_mngr) + {} + + template + node_allocator_v1 + (const node_allocator_v1 &other) + : base_t(other) + {} +}; + +} //namespace detail{ + +/// @endcond + +//!An STL node allocator that uses a segment manager as memory +//!source. The internal pointer type will of the same type (raw, smart) as +//!"typename SegmentManager::void_pointer" type. This allows +//!placing the allocator in shared memory, memory mapped-files, etc... +//!This node allocator shares a segregated storage between all instances +//!of node_allocator with equal sizeof(T) placed in the same segment +//!group. NodesPerChunk is the number of nodes allocated at once when the allocator +//!needs runs out of nodes +template < class T + , class SegmentManager + , std::size_t NodesPerChunk + > +class node_allocator + /// @cond + : public detail::node_allocator_base + < 2 + , T + , SegmentManager + , NodesPerChunk + > + /// @endcond +{ + + #ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED + typedef detail::node_allocator_base + < 2, T, SegmentManager, NodesPerChunk> base_t; + public: + typedef detail::version_type version; + + template + struct rebind + { + typedef node_allocator other; + }; + + node_allocator(SegmentManager *segment_mngr) + : base_t(segment_mngr) + {} + + template + node_allocator + (const node_allocator &other) + : base_t(other) + {} + + #else //BOOST_INTERPROCESS_DOXYGEN_INVOKED + public: + typedef implementation_defined::segment_manager segment_manager; + typedef segment_manager::void_pointer void_pointer; + typedef implementation_defined::pointer pointer; + typedef implementation_defined::const_pointer const_pointer; + typedef T value_type; + typedef typename detail::add_reference + ::type reference; + typedef typename detail::add_reference + ::type const_reference; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + + //!Obtains node_allocator from + //!node_allocator + template + struct rebind + { + typedef node_allocator other; + }; + + private: + //!Not assignable from + //!related node_allocator + template + node_allocator& operator= + (const node_allocator&); + + //!Not assignable from + //!other node_allocator + node_allocator& operator=(const node_allocator&); + + public: + //!Constructor from a segment manager. If not present, constructs a node + //!pool. Increments the reference count of the associated node pool. + //!Can throw boost::interprocess::bad_alloc + node_allocator(segment_manager *segment_mngr); + + //!Copy constructor from other node_allocator. Increments the reference + //!count of the associated node pool. Never throws + node_allocator(const node_allocator &other); + + //!Copy constructor from related node_allocator. If not present, constructs + //!a node pool. Increments the reference count of the associated node pool. + //!Can throw boost::interprocess::bad_alloc + template + node_allocator + (const node_allocator &other); + + //!Destructor, removes node_pool_t from memory + //!if its reference count reaches to zero. Never throws + ~node_allocator(); + + //!Returns a pointer to the node pool. + //!Never throws + node_pool_t* get_node_pool() const; + + //!Returns the segment manager. + //!Never throws + segment_manager* get_segment_manager()const; + + //!Returns the number of elements that could be allocated. + //!Never throws + size_type max_size() const; + + //!Allocate memory for an array of count elements. + //!Throws boost::interprocess::bad_alloc if there is no enough memory + pointer allocate(size_type count, cvoid_pointer hint = 0); + + //!Deallocate allocated memory. + //!Never throws + void deallocate(const pointer &ptr, size_type count); + + //!Deallocates all free chunks + //!of the pool + void deallocate_free_chunks(); + + //!Swaps allocators. Does not throw. If each allocator is placed in a + //!different memory segment, the result is undefined. + friend void swap(self_t &alloc1, self_t &alloc2); + + //!Returns address of mutable object. + //!Never throws + pointer address(reference value) const; + + //!Returns address of non mutable object. + //!Never throws + const_pointer address(const_reference value) const; + + //!Default construct an object. + //!Throws if T's default constructor throws + void construct(const pointer &ptr); + + //!Destroys object. Throws if object's + //!destructor throws + void destroy(const pointer &ptr); + + //!Returns maximum the number of objects the previously allocated memory + //!pointed by p can hold. This size only works for memory allocated with + //!allocate, allocation_command and allocate_many. + size_type size(const pointer &p) const; + + std::pair + allocation_command(allocation_type command, + size_type limit_size, + size_type preferred_size, + size_type &received_size, const pointer &reuse = 0); + + //!Allocates many elements of size elem_size in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. The elements must be deallocated + //!with deallocate(...) + multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements); + + //!Allocates n_elements elements, each one of size elem_sizes[i]in a + //!contiguous chunk + //!of memory. The elements must be deallocated + multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements); + + //!Allocates many elements of size elem_size in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. The elements must be deallocated + //!with deallocate(...) + void deallocate_many(multiallocation_iterator it); + + //!Allocates just one object. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + //!Throws boost::interprocess::bad_alloc if there is no enough memory + pointer allocate_one(); + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + multiallocation_iterator allocate_individual(std::size_t num_elements); + + //!Deallocates memory previously allocated with allocate_one(). + //!You should never use deallocate_one to deallocate memory allocated + //!with other functions different from allocate_one(). Never throws + void deallocate_one(const pointer &p); + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + void deallocate_individual(multiallocation_iterator it); + #endif +}; + +#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED + +//!Equality test for same type +//!of node_allocator +template inline +bool operator==(const node_allocator &alloc1, + const node_allocator &alloc2); + +//!Inequality test for same type +//!of node_allocator +template inline +bool operator!=(const node_allocator &alloc1, + const node_allocator &alloc2); + +#endif + } //namespace interprocess { } //namespace boost { #include -#endif //#ifndef BOOST_INTERPROCESS_POOLED_NODE_ALLOCATOR_HPP +#endif //#ifndef BOOST_INTERPROCESS_NODE_ALLOCATOR_HPP diff --git a/include/boost/interprocess/allocators/private_adaptive_pool.hpp b/include/boost/interprocess/allocators/private_adaptive_pool.hpp index 1c743ce..5552348 100644 --- a/include/boost/interprocess/allocators/private_adaptive_pool.hpp +++ b/include/boost/interprocess/allocators/private_adaptive_pool.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -30,38 +30,47 @@ #include //!\file -//!Describes private_adaptive_pool pooled shared memory STL compatible allocator +//!Describes private_adaptive_pool_base pooled shared memory STL compatible allocator namespace boost { namespace interprocess { -//!An STL node allocator that uses a segment manager as memory -//!source. The internal pointer type will of the same type (raw, smart) as -//!"typename SegmentManager::void_pointer" type. This allows -//!placing the allocator in shared memory, memory mapped-files, etc... -//!This allocator has its own node pool. NodesPerChunk is the minimum number of nodes -//!allocated at once when the allocator needs runs out of nodes. -template -class private_adaptive_pool +/// @cond + +namespace detail { + +template < unsigned int Version + , class T + , class SegmentManager + , std::size_t NodesPerChunk + , std::size_t MaxFreeChunks + , unsigned char OverheadPercent + > +class private_adaptive_pool_base + : public node_pool_allocation_impl + < private_adaptive_pool_base < Version, T, SegmentManager, NodesPerChunk + , MaxFreeChunks, OverheadPercent> + , Version + , T + , SegmentManager + > { /// @cond private: typedef typename SegmentManager::void_pointer void_pointer; - typedef typename detail:: - pointer_to_other::type cvoid_pointer; typedef SegmentManager segment_manager; - typedef typename detail:: - pointer_to_other::type char_pointer; - typedef typename detail::pointer_to_other - ::type segment_mngr_ptr_t; - typedef typename SegmentManager:: - mutex_family::mutex_type mutex_type; - typedef private_adaptive_pool - self_t; + typedef private_adaptive_pool_base + < Version, T, SegmentManager, NodesPerChunk + , MaxFreeChunks, OverheadPercent> self_t; typedef detail::private_adaptive_node_pool - priv_node_pool_t; + node_pool_t; + BOOST_STATIC_ASSERT((Version <=2)); /// @endcond @@ -77,120 +86,358 @@ class private_adaptive_pool ::type const_reference; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; + typedef detail::version_type + version; + typedef transform_iterator + < typename SegmentManager:: + multiallocation_iterator + , detail::cast_functor > multiallocation_iterator; + typedef typename SegmentManager:: + multiallocation_chain multiallocation_chain; //!Obtains node_allocator from other node_allocator template struct rebind { - typedef private_adaptive_pool other; + typedef private_adaptive_pool_base + other; }; /// @cond private: - //!Not assignable from related private_adaptive_pool - template - private_adaptive_pool& operator= - (const private_adaptive_pool&); + //!Not assignable from related private_adaptive_pool_base + template + private_adaptive_pool_base& operator= + (const private_adaptive_pool_base&); - //!Not assignable from other private_adaptive_pool - private_adaptive_pool& operator=(const private_adaptive_pool&); + //!Not assignable from other private_adaptive_pool_base + private_adaptive_pool_base& operator=(const private_adaptive_pool_base&); /// @endcond public: //!Constructor from a segment manager - private_adaptive_pool(segment_manager *segment_mngr) + private_adaptive_pool_base(segment_manager *segment_mngr) : m_node_pool(segment_mngr) {} - //!Copy constructor from other private_adaptive_pool. Never throws - private_adaptive_pool(const private_adaptive_pool &other) + //!Copy constructor from other private_adaptive_pool_base. Never throws + private_adaptive_pool_base(const private_adaptive_pool_base &other) : m_node_pool(other.get_segment_manager()) {} - //!Copy constructor from related private_adaptive_pool. Never throws. + //!Copy constructor from related private_adaptive_pool_base. Never throws. template - private_adaptive_pool - (const private_adaptive_pool &other) + private_adaptive_pool_base + (const private_adaptive_pool_base + &other) : m_node_pool(other.get_segment_manager()) {} //!Destructor, frees all used memory. Never throws - ~private_adaptive_pool() + ~private_adaptive_pool_base() {} //!Returns the segment manager. Never throws segment_manager* get_segment_manager()const { return m_node_pool.get_segment_manager(); } - //!Returns the number of elements that could be allocated. Never throws - size_type max_size() const - { return this->get_segment_manager()/sizeof(value_type); } - - //!Allocate memory for an array of count elements. - //!Throws boost::interprocess::bad_alloc if there is no enough memory - pointer allocate(size_type count, cvoid_pointer hint = 0) - { - (void)hint; - if(count > ((size_type)-1)/sizeof(value_type)) - throw bad_alloc(); - return pointer(static_cast(m_node_pool.allocate(count))); - } - - //!Deallocate allocated memory. Never throws - void deallocate(const pointer &ptr, size_type count) - { m_node_pool.deallocate(detail::get_pointer(ptr), count); } - - //!Deallocates all free chunks of the pool - void deallocate_free_chunks() - { m_node_pool.deallocate_free_chunks(); } + //!Returns the internal node pool. Never throws + node_pool_t* get_node_pool() const + { return const_cast(&m_node_pool); } //!Swaps allocators. Does not throw. If each allocator is placed in a - //!different shared memory segments, the result is undefined.*/ + //!different shared memory segments, the result is undefined. friend void swap(self_t &alloc1,self_t &alloc2) { alloc1.m_node_pool.swap(alloc2.m_node_pool); } - //These functions are obsolete. These are here to conserve - //backwards compatibility with containers using them... - - //!Returns address of mutable object. - //!Never throws - pointer address(reference value) const - { return pointer(boost::addressof(value)); } - - //!Returns address of non mutable object. - //!Never throws - const_pointer address(const_reference value) const - { return const_pointer(boost::addressof(value)); } - - //!Default construct an object. - //!Throws if T's default constructor throws*/ - void construct(const pointer &ptr) - { new(detail::get_pointer(ptr)) value_type; } - - //!Destroys object. Throws if object's - //!destructor throws - void destroy(const pointer &ptr) - { BOOST_ASSERT(ptr != 0); (*ptr).~value_type(); } - /// @cond private: - priv_node_pool_t m_node_pool; + node_pool_t m_node_pool; /// @endcond }; -//!Equality test for same type of private_adaptive_pool -template inline -bool operator==(const private_adaptive_pool &alloc1, - const private_adaptive_pool &alloc2) +//!Equality test for same type of private_adaptive_pool_base +template inline +bool operator==(const private_adaptive_pool_base &alloc1, + const private_adaptive_pool_base &alloc2) { return &alloc1 == &alloc2; } -//!Inequality test for same type of private_adaptive_pool -template inline -bool operator!=(const private_adaptive_pool &alloc1, - const private_adaptive_pool &alloc2) -{ - return &alloc1 != &alloc2; -} +//!Inequality test for same type of private_adaptive_pool_base +template inline +bool operator!=(const private_adaptive_pool_base &alloc1, + const private_adaptive_pool_base &alloc2) +{ return &alloc1 != &alloc2; } + +template < class T + , class SegmentManager + , std::size_t NodesPerChunk = 64 + , std::size_t MaxFreeChunks = 2 + , unsigned char OverheadPercent = 5 + > +class private_adaptive_pool_v1 + : public private_adaptive_pool_base + < 1 + , T + , SegmentManager + , NodesPerChunk + , MaxFreeChunks + , OverheadPercent + > +{ + public: + typedef detail::private_adaptive_pool_base + < 1, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> base_t; + + template + struct rebind + { + typedef private_adaptive_pool_v1 other; + }; + + private_adaptive_pool_v1(SegmentManager *segment_mngr) + : base_t(segment_mngr) + {} + + template + private_adaptive_pool_v1 + (const private_adaptive_pool_v1 &other) + : base_t(other) + {} +}; + +} //namespace detail { + +/// @endcond + +//!An STL node allocator that uses a segment manager as memory +//!source. The internal pointer type will of the same type (raw, smart) as +//!"typename SegmentManager::void_pointer" type. This allows +//!placing the allocator in shared memory, memory mapped-files, etc... +//!This allocator has its own node pool. +//! +//!NodesPerChunk is the minimum number of nodes of nodes allocated at once when +//!the allocator needs runs out of nodes. MaxFreeChunks is the maximum number of totally free chunks +//!that the adaptive node pool will hold. The rest of the totally free chunks will be +//!deallocated with the segment manager. +//! +//!OverheadPercent is the (approximated) maximum size overhead (1-20%) of the allocator: +//!(memory usable for nodes / total memory allocated from the segment manager) +template < class T + , class SegmentManager + , std::size_t NodesPerChunk + , std::size_t MaxFreeChunks + , unsigned char OverheadPercent + > +class private_adaptive_pool + /// @cond + : public detail::private_adaptive_pool_base + < 2 + , T + , SegmentManager + , NodesPerChunk + , MaxFreeChunks + , OverheadPercent + > + /// @endcond +{ + + #ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED + typedef detail::private_adaptive_pool_base + < 2, T, SegmentManager, NodesPerChunk, MaxFreeChunks, OverheadPercent> base_t; + public: + typedef detail::version_type version; + + template + struct rebind + { + typedef private_adaptive_pool + other; + }; + + private_adaptive_pool(SegmentManager *segment_mngr) + : base_t(segment_mngr) + {} + + template + private_adaptive_pool + (const private_adaptive_pool &other) + : base_t(other) + {} + + #else + public: + typedef implementation_defined::segment_manager segment_manager; + typedef segment_manager::void_pointer void_pointer; + typedef implementation_defined::pointer pointer; + typedef implementation_defined::const_pointer const_pointer; + typedef T value_type; + typedef typename detail::add_reference + ::type reference; + typedef typename detail::add_reference + ::type const_reference; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + + //!Obtains private_adaptive_pool from + //!private_adaptive_pool + template + struct rebind + { + typedef private_adaptive_pool + other; + }; + + private: + //!Not assignable from + //!related private_adaptive_pool + template + private_adaptive_pool& operator= + (const private_adaptive_pool&); + + //!Not assignable from + //!other private_adaptive_pool + private_adaptive_pool& operator=(const private_adaptive_pool&); + + public: + //!Constructor from a segment manager. If not present, constructs a node + //!pool. Increments the reference count of the associated node pool. + //!Can throw boost::interprocess::bad_alloc + private_adaptive_pool(segment_manager *segment_mngr); + + //!Copy constructor from other private_adaptive_pool. Increments the reference + //!count of the associated node pool. Never throws + private_adaptive_pool(const private_adaptive_pool &other); + + //!Copy constructor from related private_adaptive_pool. If not present, constructs + //!a node pool. Increments the reference count of the associated node pool. + //!Can throw boost::interprocess::bad_alloc + template + private_adaptive_pool + (const private_adaptive_pool &other); + + //!Destructor, removes node_pool_t from memory + //!if its reference count reaches to zero. Never throws + ~private_adaptive_pool(); + + //!Returns a pointer to the node pool. + //!Never throws + node_pool_t* get_node_pool() const; + + //!Returns the segment manager. + //!Never throws + segment_manager* get_segment_manager()const; + + //!Returns the number of elements that could be allocated. + //!Never throws + size_type max_size() const; + + //!Allocate memory for an array of count elements. + //!Throws boost::interprocess::bad_alloc if there is no enough memory + pointer allocate(size_type count, cvoid_pointer hint = 0); + + //!Deallocate allocated memory. + //!Never throws + void deallocate(const pointer &ptr, size_type count); + + //!Deallocates all free chunks + //!of the pool + void deallocate_free_chunks(); + + //!Swaps allocators. Does not throw. If each allocator is placed in a + //!different memory segment, the result is undefined. + friend void swap(self_t &alloc1, self_t &alloc2); + + //!Returns address of mutable object. + //!Never throws + pointer address(reference value) const; + + //!Returns address of non mutable object. + //!Never throws + const_pointer address(const_reference value) const; + + //!Default construct an object. + //!Throws if T's default constructor throws + void construct(const pointer &ptr); + + //!Destroys object. Throws if object's + //!destructor throws + void destroy(const pointer &ptr); + + //!Returns maximum the number of objects the previously allocated memory + //!pointed by p can hold. This size only works for memory allocated with + //!allocate, allocation_command and allocate_many. + size_type size(const pointer &p) const; + + std::pair + allocation_command(allocation_type command, + size_type limit_size, + size_type preferred_size, + size_type &received_size, const pointer &reuse = 0); + + //!Allocates many elements of size elem_size in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. The elements must be deallocated + //!with deallocate(...) + multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements); + + //!Allocates n_elements elements, each one of size elem_sizes[i]in a + //!contiguous chunk + //!of memory. The elements must be deallocated + multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements); + + //!Allocates many elements of size elem_size in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. The elements must be deallocated + //!with deallocate(...) + void deallocate_many(multiallocation_iterator it); + + //!Allocates just one object. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + //!Throws boost::interprocess::bad_alloc if there is no enough memory + pointer allocate_one(); + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + multiallocation_iterator allocate_individual(std::size_t num_elements); + + //!Deallocates memory previously allocated with allocate_one(). + //!You should never use deallocate_one to deallocate memory allocated + //!with other functions different from allocate_one(). Never throws + void deallocate_one(const pointer &p); + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + void deallocate_individual(multiallocation_iterator it); + #endif +}; + +#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED + +//!Equality test for same type +//!of private_adaptive_pool +template inline +bool operator==(const private_adaptive_pool &alloc1, + const private_adaptive_pool &alloc2); + +//!Inequality test for same type +//!of private_adaptive_pool +template inline +bool operator!=(const private_adaptive_pool &alloc1, + const private_adaptive_pool &alloc2); + +#endif } //namespace interprocess { } //namespace boost { diff --git a/include/boost/interprocess/allocators/private_node_allocator.hpp b/include/boost/interprocess/allocators/private_node_allocator.hpp index be7d491..608bacc 100644 --- a/include/boost/interprocess/allocators/private_node_allocator.hpp +++ b/include/boost/interprocess/allocators/private_node_allocator.hpp @@ -1,13 +1,13 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // // See http://www.boost.org/libs/interprocess for documentation. // ////////////////////////////////////////////////////////////////////////////// - +/* #ifndef BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP #define BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP @@ -50,12 +50,8 @@ class private_node_allocator typedef typename detail:: pointer_to_other::type cvoid_pointer; typedef SegmentManager segment_manager; - typedef typename detail:: - pointer_to_other::type char_pointer; typedef typename detail::pointer_to_other ::type segment_mngr_ptr_t; - typedef typename SegmentManager:: - mutex_family::mutex_type mutex_type; typedef private_node_allocator self_t; typedef detail::private_node_pool @@ -128,21 +124,30 @@ class private_node_allocator pointer allocate(size_type count, cvoid_pointer hint = 0) { (void)hint; - if(count > ((size_type)-1)/sizeof(value_type)) + if(count > this->max_size()) throw bad_alloc(); - return pointer(static_cast(m_node_pool.allocate(count))); + else if(count == 1) + return pointer(static_cast(m_node_pool.allocate_node())); + else + return pointer(static_cast + (m_node_pool.get_segment_manager()->allocate(sizeof(T)*count))); } //!Deallocate allocated memory. Never throws void deallocate(const pointer &ptr, size_type count) - { m_node_pool.deallocate(detail::get_pointer(ptr), count); } + { + if(count == 1) + m_node_pool.deallocate_node(detail::get_pointer(ptr)); + else + m_node_pool.get_segment_manager()->deallocate(detail::get_pointer(ptr)); + } //!Deallocates all free chunks of the pool void deallocate_free_chunks() { m_node_pool.deallocate_free_chunks(); } //!Swaps allocators. Does not throw. If each allocator is placed in a - //!different shared memory segments, the result is undefined.*/ + //!different shared memory segments, the result is undefined. friend void swap(self_t &alloc1,self_t &alloc2) { alloc1.m_node_pool.swap(alloc2.m_node_pool); } @@ -160,7 +165,7 @@ class private_node_allocator { return const_pointer(boost::addressof(value)); } //!Default construct an object. - //!Throws if T's default constructor throws*/ + //!Throws if T's default constructor throws void construct(const pointer &ptr) { new(detail::get_pointer(ptr)) value_type; } @@ -196,3 +201,432 @@ bool operator!=(const private_node_allocator &alloc1, #endif //#ifndef BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP +*/ + +////////////////////////////////////////////////////////////////////////////// +// +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost +// Software License, Version 1.0. (See accompanying file +// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// See http://www.boost.org/libs/interprocess for documentation. +// +////////////////////////////////////////////////////////////////////////////// + +#ifndef BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP +#define BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP + +#if (defined _MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +//!\file +//!Describes private_node_allocator_base pooled shared memory STL compatible allocator + +namespace boost { +namespace interprocess { + +/// @cond + +namespace detail { + +template < unsigned int Version + , class T + , class SegmentManager + , std::size_t NodesPerChunk + > +class private_node_allocator_base + : public node_pool_allocation_impl + < private_node_allocator_base < Version, T, SegmentManager, NodesPerChunk> + , Version + , T + , SegmentManager + > +{ + /// @cond + private: + typedef typename SegmentManager::void_pointer void_pointer; + typedef SegmentManager segment_manager; + typedef private_node_allocator_base + < Version, T, SegmentManager, NodesPerChunk> self_t; + typedef detail::private_node_pool + node_pool_t; + + BOOST_STATIC_ASSERT((Version <=2)); + + /// @endcond + + public: + typedef typename detail:: + pointer_to_other::type pointer; + typedef typename detail:: + pointer_to_other::type const_pointer; + typedef T value_type; + typedef typename detail::add_reference + ::type reference; + typedef typename detail::add_reference + ::type const_reference; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef detail::version_type + version; + typedef transform_iterator + < typename SegmentManager:: + multiallocation_iterator + , detail::cast_functor > multiallocation_iterator; + typedef typename SegmentManager:: + multiallocation_chain multiallocation_chain; + + //!Obtains node_allocator from other node_allocator + template + struct rebind + { + typedef private_node_allocator_base + other; + }; + + /// @cond + private: + //!Not assignable from related private_node_allocator_base + template + private_node_allocator_base& operator= + (const private_node_allocator_base&); + + //!Not assignable from other private_node_allocator_base + private_node_allocator_base& operator=(const private_node_allocator_base&); + /// @endcond + + public: + //!Constructor from a segment manager + private_node_allocator_base(segment_manager *segment_mngr) + : m_node_pool(segment_mngr) + {} + + //!Copy constructor from other private_node_allocator_base. Never throws + private_node_allocator_base(const private_node_allocator_base &other) + : m_node_pool(other.get_segment_manager()) + {} + + //!Copy constructor from related private_node_allocator_base. Never throws. + template + private_node_allocator_base + (const private_node_allocator_base + &other) + : m_node_pool(other.get_segment_manager()) + {} + + //!Destructor, frees all used memory. Never throws + ~private_node_allocator_base() + {} + + //!Returns the segment manager. Never throws + segment_manager* get_segment_manager()const + { return m_node_pool.get_segment_manager(); } + + //!Returns the internal node pool. Never throws + node_pool_t* get_node_pool() const + { return const_cast(&m_node_pool); } + + //!Swaps allocators. Does not throw. If each allocator is placed in a + //!different shared memory segments, the result is undefined. + friend void swap(self_t &alloc1,self_t &alloc2) + { alloc1.m_node_pool.swap(alloc2.m_node_pool); } + + /// @cond + private: + node_pool_t m_node_pool; + /// @endcond +}; + +//!Equality test for same type of private_node_allocator_base +template inline +bool operator==(const private_node_allocator_base &alloc1, + const private_node_allocator_base &alloc2) +{ return &alloc1 == &alloc2; } + +//!Inequality test for same type of private_node_allocator_base +template inline +bool operator!=(const private_node_allocator_base &alloc1, + const private_node_allocator_base &alloc2) +{ return &alloc1 != &alloc2; } + +template < class T + , class SegmentManager + , std::size_t NodesPerChunk = 64 + > +class private_node_allocator_v1 + : public private_node_allocator_base + < 1 + , T + , SegmentManager + , NodesPerChunk + > +{ + public: + typedef detail::private_node_allocator_base + < 1, T, SegmentManager, NodesPerChunk> base_t; + + template + struct rebind + { + typedef private_node_allocator_v1 other; + }; + + private_node_allocator_v1(SegmentManager *segment_mngr) + : base_t(segment_mngr) + {} + + template + private_node_allocator_v1 + (const private_node_allocator_v1 &other) + : base_t(other) + {} +}; + +} //namespace detail { + +/// @endcond + +//!An STL node allocator that uses a segment manager as memory +//!source. The internal pointer type will of the same type (raw, smart) as +//!"typename SegmentManager::void_pointer" type. This allows +//!placing the allocator in shared memory, memory mapped-files, etc... +//!This allocator has its own node pool. NodesPerChunk is the number of nodes allocated +//!at once when the allocator needs runs out of nodes +template < class T + , class SegmentManager + , std::size_t NodesPerChunk + > +class private_node_allocator + /// @cond + : public detail::private_node_allocator_base + < 2 + , T + , SegmentManager + , NodesPerChunk + > + /// @endcond +{ + + #ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED + typedef detail::private_node_allocator_base + < 2, T, SegmentManager, NodesPerChunk> base_t; + public: + typedef detail::version_type version; + + template + struct rebind + { + typedef private_node_allocator + other; + }; + + private_node_allocator(SegmentManager *segment_mngr) + : base_t(segment_mngr) + {} + + template + private_node_allocator + (const private_node_allocator &other) + : base_t(other) + {} + + #else + public: + typedef implementation_defined::segment_manager segment_manager; + typedef segment_manager::void_pointer void_pointer; + typedef implementation_defined::pointer pointer; + typedef implementation_defined::const_pointer const_pointer; + typedef T value_type; + typedef typename detail::add_reference + ::type reference; + typedef typename detail::add_reference + ::type const_reference; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + + //!Obtains private_node_allocator from + //!private_node_allocator + template + struct rebind + { + typedef private_node_allocator + other; + }; + + private: + //!Not assignable from + //!related private_node_allocator + template + private_node_allocator& operator= + (const private_node_allocator&); + + //!Not assignable from + //!other private_node_allocator + private_node_allocator& operator=(const private_node_allocator&); + + public: + //!Constructor from a segment manager. If not present, constructs a node + //!pool. Increments the reference count of the associated node pool. + //!Can throw boost::interprocess::bad_alloc + private_node_allocator(segment_manager *segment_mngr); + + //!Copy constructor from other private_node_allocator. Increments the reference + //!count of the associated node pool. Never throws + private_node_allocator(const private_node_allocator &other); + + //!Copy constructor from related private_node_allocator. If not present, constructs + //!a node pool. Increments the reference count of the associated node pool. + //!Can throw boost::interprocess::bad_alloc + template + private_node_allocator + (const private_node_allocator &other); + + //!Destructor, removes node_pool_t from memory + //!if its reference count reaches to zero. Never throws + ~private_node_allocator(); + + //!Returns a pointer to the node pool. + //!Never throws + node_pool_t* get_node_pool() const; + + //!Returns the segment manager. + //!Never throws + segment_manager* get_segment_manager()const; + + //!Returns the number of elements that could be allocated. + //!Never throws + size_type max_size() const; + + //!Allocate memory for an array of count elements. + //!Throws boost::interprocess::bad_alloc if there is no enough memory + pointer allocate(size_type count, cvoid_pointer hint = 0); + + //!Deallocate allocated memory. + //!Never throws + void deallocate(const pointer &ptr, size_type count); + + //!Deallocates all free chunks + //!of the pool + void deallocate_free_chunks(); + + //!Swaps allocators. Does not throw. If each allocator is placed in a + //!different memory segment, the result is undefined. + friend void swap(self_t &alloc1, self_t &alloc2); + + //!Returns address of mutable object. + //!Never throws + pointer address(reference value) const; + + //!Returns address of non mutable object. + //!Never throws + const_pointer address(const_reference value) const; + + //!Default construct an object. + //!Throws if T's default constructor throws + void construct(const pointer &ptr); + + //!Destroys object. Throws if object's + //!destructor throws + void destroy(const pointer &ptr); + + //!Returns maximum the number of objects the previously allocated memory + //!pointed by p can hold. This size only works for memory allocated with + //!allocate, allocation_command and allocate_many. + size_type size(const pointer &p) const; + + std::pair + allocation_command(allocation_type command, + size_type limit_size, + size_type preferred_size, + size_type &received_size, const pointer &reuse = 0); + + //!Allocates many elements of size elem_size in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. The elements must be deallocated + //!with deallocate(...) + multiallocation_iterator allocate_many(size_type elem_size, std::size_t num_elements); + + //!Allocates n_elements elements, each one of size elem_sizes[i]in a + //!contiguous chunk + //!of memory. The elements must be deallocated + multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements); + + //!Allocates many elements of size elem_size in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. The elements must be deallocated + //!with deallocate(...) + void deallocate_many(multiallocation_iterator it); + + //!Allocates just one object. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + //!Throws boost::interprocess::bad_alloc if there is no enough memory + pointer allocate_one(); + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + multiallocation_iterator allocate_individual(std::size_t num_elements); + + //!Deallocates memory previously allocated with allocate_one(). + //!You should never use deallocate_one to deallocate memory allocated + //!with other functions different from allocate_one(). Never throws + void deallocate_one(const pointer &p); + + //!Allocates many elements of size == 1 in a contiguous chunk + //!of memory. The minimum number to be allocated is min_elements, + //!the preferred and maximum number is + //!preferred_elements. The number of actually allocated elements is + //!will be assigned to received_size. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + void deallocate_individual(multiallocation_iterator it); + #endif +}; + +#ifdef BOOST_INTERPROCESS_DOXYGEN_INVOKED + +//!Equality test for same type +//!of private_node_allocator +template inline +bool operator==(const private_node_allocator &alloc1, + const private_node_allocator &alloc2); + +//!Inequality test for same type +//!of private_node_allocator +template inline +bool operator!=(const private_node_allocator &alloc1, + const private_node_allocator &alloc2); + +#endif + +} //namespace interprocess { +} //namespace boost { + +#include + +#endif //#ifndef BOOST_INTERPROCESS_PRIVATE_NODE_ALLOCATOR_HPP + diff --git a/include/boost/interprocess/containers/detail/flat_tree.hpp b/include/boost/interprocess/containers/detail/flat_tree.hpp index 001b213..5901a2f 100644 --- a/include/boost/interprocess/containers/detail/flat_tree.hpp +++ b/include/boost/interprocess/containers/detail/flat_tree.hpp @@ -1,6 +1,6 @@ //////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/containers/detail/node_alloc_holder.hpp b/include/boost/interprocess/containers/detail/node_alloc_holder.hpp index 339cefc..7352019 100644 --- a/include/boost/interprocess/containers/detail/node_alloc_holder.hpp +++ b/include/boost/interprocess/containers/detail/node_alloc_holder.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -74,6 +74,9 @@ struct node_alloc_holder typedef detail::integral_constant::value> alloc_version; + typedef typename ICont::iterator icont_iterator; + typedef typename ICont::const_iterator icont_citerator; + typedef allocator_destroyer Destroyer; node_alloc_holder(const ValAlloc &a) : members_(a) @@ -292,18 +295,41 @@ struct node_alloc_holder if(constructed){ this->destroy(p); } - this->deallocate_one(p); - multiallocation_iterator itend; - while(itbeg != itend){ - Node *n = &*itbeg; - ++itbeg; - this->deallocate_one(n); - } + this->node_alloc().deallocate_many(itbeg); } BOOST_CATCH_END return beg; } + void clear(allocator_v1) + { this->icont().clear_and_dispose(Destroyer(this->node_alloc())); } + + void clear(allocator_v2) + { + allocator_multialloc_chain_node_deallocator chain_holder(this->node_alloc()); + this->icont().clear_and_dispose(chain_holder.get_chain_builder()); + } + + icont_iterator erase_range(icont_iterator first, icont_iterator last, allocator_v1) + { return this->icont().erase_and_dispose(first, last, Destroyer(this->node_alloc())); } + + icont_iterator erase_range(icont_iterator first, icont_iterator last, allocator_v2) + { + allocator_multialloc_chain_node_deallocator chain_holder(this->node_alloc()); + return this->icont().erase_and_dispose(first, last, chain_holder.get_chain_builder()); + } + + template + size_type erase_key(const Key& k, const Comparator &comp, allocator_v1) + { return this->icont().erase_and_dispose(k, comp, Destroyer(this->node_alloc())); } + + template + size_type erase_key(const Key& k, const Comparator &comp, allocator_v2) + { + allocator_multialloc_chain_node_deallocator chain_holder(this->node_alloc()); + return this->icont().erase_and_dispose(k, comp, chain_holder.get_chain_builder()); + } + protected: struct cloner { @@ -359,10 +385,10 @@ struct node_alloc_holder { return this->members_.m_icont; } NodeAlloc &node_alloc() - { return this->members_; } + { return static_cast(this->members_); } const NodeAlloc &node_alloc() const - { return this->members_; } + { return static_cast(this->members_); } }; } //namespace detail { diff --git a/include/boost/interprocess/containers/detail/tree.hpp b/include/boost/interprocess/containers/detail/tree.hpp index ecaae35..b2e4428 100644 --- a/include/boost/interprocess/containers/detail/tree.hpp +++ b/include/boost/interprocess/containers/detail/tree.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -732,13 +732,13 @@ class rbtree { return iterator(this->icont().erase_and_dispose(position.get(), Destroyer(this->node_alloc()))); } size_type erase(const key_type& k) - { return this->icont().erase_and_dispose(k, KeyNodeCompare(value_comp()), Destroyer(this->node_alloc())); } + { return AllocHolder::erase_key(k, KeyNodeCompare(value_comp()), alloc_version()); } iterator erase(const_iterator first, const_iterator last) - { return iterator(this->icont().erase_and_dispose(first.get(), last.get(), Destroyer(this->node_alloc()))); } + { return iterator(AllocHolder::erase_range(first.get(), last.get(), alloc_version())); } void clear() - { this->icont().clear_and_dispose(Destroyer(this->node_alloc())); } + { AllocHolder::clear(alloc_version()); } // set operations: iterator find(const key_type& k) diff --git a/include/boost/interprocess/containers/flat_map.hpp b/include/boost/interprocess/containers/flat_map.hpp index da45184..0478e2c 100644 --- a/include/boost/interprocess/containers/flat_map.hpp +++ b/include/boost/interprocess/containers/flat_map.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/containers/flat_set.hpp b/include/boost/interprocess/containers/flat_set.hpp index 00ee12b..d98677a 100644 --- a/include/boost/interprocess/containers/flat_set.hpp +++ b/include/boost/interprocess/containers/flat_set.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/containers/list.hpp b/include/boost/interprocess/containers/list.hpp index 902d89f..b9e0801 100644 --- a/include/boost/interprocess/containers/list.hpp +++ b/include/boost/interprocess/containers/list.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -404,7 +404,7 @@ class list //! //! Complexity: Linear to the number of elements in the list. void clear() - { this->icont().clear_and_dispose(Destroyer(this->node_alloc())); } + { AllocHolder::clear(alloc_version()); } //! Effects: Returns an iterator to the first element contained in the list. //! @@ -786,7 +786,7 @@ class list //! //! Complexity: Linear to the distance between first and last. iterator erase(iterator first, iterator last) - { return iterator(this->icont().erase_and_dispose(first.get(), last.get(), Destroyer(this->node_alloc()))); } + { return iterator(AllocHolder::erase_range(first.get(), last.get(), alloc_version())); } //! Effects: Assigns the n copies of val to *this. //! @@ -1085,6 +1085,7 @@ class list /// @cond private: + //Iterator range version template void priv_create_and_insert_nodes @@ -1160,7 +1161,7 @@ class list template void priv_insert_dispatch(iterator p, Integer n, Integer x, detail::true_) - { this->priv_create_and_insert_nodes(p, n, x); } + { this->insert(p, (size_type)n, x); } void priv_fill_assign(size_type n, const T& val) { diff --git a/include/boost/interprocess/containers/map.hpp b/include/boost/interprocess/containers/map.hpp index 896262b..b8d45f3 100644 --- a/include/boost/interprocess/containers/map.hpp +++ b/include/boost/interprocess/containers/map.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/containers/set.hpp b/include/boost/interprocess/containers/set.hpp index e9d7cb0..bc4b364 100644 --- a/include/boost/interprocess/containers/set.hpp +++ b/include/boost/interprocess/containers/set.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/containers/slist.hpp b/include/boost/interprocess/containers/slist.hpp index 195910f..9f57c24 100644 --- a/include/boost/interprocess/containers/slist.hpp +++ b/include/boost/interprocess/containers/slist.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2004-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2004-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -8,7 +8,7 @@ // ////////////////////////////////////////////////////////////////////////////// // -// This file comes from SGI's stl_slist.h file. Modified by Ion Gaztanaga 2004-2007 +// This file comes from SGI's stl_slist.h file. Modified by Ion Gaztanaga 2004-2008 // Renaming, isolating and porting to generic algorithms. Pointer typedef // set to allocator::pointer to allow placing it in shared memory. // diff --git a/include/boost/interprocess/containers/string.hpp b/include/boost/interprocess/containers/string.hpp index ac4ce46..37f3269 100644 --- a/include/boost/interprocess/containers/string.hpp +++ b/include/boost/interprocess/containers/string.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -8,7 +8,7 @@ // ////////////////////////////////////////////////////////////////////////////// // -// This file comes from SGI's string file. Modified by Ion Gaztanaga 2004-2007 +// This file comes from SGI's string file. Modified by Ion Gaztanaga 2004-2008 // Renaming, isolating and porting to generic algorithms. Pointer typedef // set to allocator::pointer to allow placing it in shared memory. // diff --git a/include/boost/interprocess/containers/vector.hpp b/include/boost/interprocess/containers/vector.hpp index a49e2a4..bd7fb5d 100644 --- a/include/boost/interprocess/containers/vector.hpp +++ b/include/boost/interprocess/containers/vector.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -740,6 +740,9 @@ class vector : private detail::vector_alloc_holder //Check for forward expansion same_buffer_start = ret.second && this->members_.m_start == ret.first; if(same_buffer_start){ + #ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS + ++this->num_expand_fwd; + #endif this->members_.m_capacity = real_cap; } //If there is no forward expansion, move objects @@ -748,6 +751,9 @@ class vector : private detail::vector_alloc_holder copy_move_it dummy_it(detail::get_pointer(this->members_.m_start)); //Backwards (and possibly forward) expansion if(ret.second){ + #ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS + ++this->num_expand_bwd; + #endif this->priv_range_insert_expand_backwards ( detail::get_pointer(ret.first) , real_cap @@ -758,6 +764,9 @@ class vector : private detail::vector_alloc_holder } //New buffer else{ + #ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS + ++this->num_alloc; + #endif this->priv_range_insert_new_allocation ( detail::get_pointer(ret.first) , real_cap @@ -1184,11 +1193,17 @@ class vector : private detail::vector_alloc_holder //If we had room or we have expanded forward if (same_buffer_start){ + #ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS + ++this->num_expand_fwd; + #endif this->priv_range_insert_expand_forward (detail::get_pointer(pos), first, last, n); } //Backwards (and possibly forward) expansion else if(ret.second){ + #ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS + ++this->num_expand_bwd; + #endif this->priv_range_insert_expand_backwards ( detail::get_pointer(ret.first) , real_cap @@ -1199,6 +1214,9 @@ class vector : private detail::vector_alloc_holder } //New buffer else{ + #ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS + ++this->num_alloc; + #endif this->priv_range_insert_new_allocation ( detail::get_pointer(ret.first) , real_cap @@ -1778,6 +1796,15 @@ class vector : private detail::vector_alloc_holder if (n >= size()) throw std::out_of_range("vector::at"); } + + #ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS + public: + unsigned int num_expand_fwd; + unsigned int num_expand_bwd; + unsigned int num_alloc; + void reset_alloc_stats() + { num_expand_fwd = num_expand_bwd = num_alloc = 0; } + #endif /// @endcond }; diff --git a/include/boost/interprocess/creation_tags.hpp b/include/boost/interprocess/creation_tags.hpp index 521a9b5..190c6bd 100644 --- a/include/boost/interprocess/creation_tags.hpp +++ b/include/boost/interprocess/creation_tags.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/detail/algorithms.hpp b/include/boost/interprocess/detail/algorithms.hpp index cc303f4..b95d301 100644 --- a/include/boost/interprocess/detail/algorithms.hpp +++ b/include/boost/interprocess/detail/algorithms.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. +// (C) Copyright Ion Gaztanaga 2005-2008. // // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at diff --git a/include/boost/interprocess/detail/atomic.hpp b/include/boost/interprocess/detail/atomic.hpp index e41248e..944d1a8 100644 --- a/include/boost/interprocess/detail/atomic.hpp +++ b/include/boost/interprocess/detail/atomic.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2006-2007 +// (C) Copyright Ion Gaztanaga 2006-2008 // (C) Copyright Markus Schoepflin 2007 // // Distributed under the Boost Software License, Version 1.0. (See diff --git a/include/boost/interprocess/detail/cast_tags.hpp b/include/boost/interprocess/detail/cast_tags.hpp index 9099c0f..6d28a24 100644 --- a/include/boost/interprocess/detail/cast_tags.hpp +++ b/include/boost/interprocess/detail/cast_tags.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/detail/config_begin.hpp b/include/boost/interprocess/detail/config_begin.hpp index abde766..4db523a 100644 --- a/include/boost/interprocess/detail/config_begin.hpp +++ b/include/boost/interprocess/detail/config_begin.hpp @@ -9,6 +9,7 @@ #define _CRT_SECURE_NO_DEPRECATE #endif #pragma warning (push) + #pragma warning (disable : 4702) // unreachable code #pragma warning (disable : 4706) // assignment within conditional expression #pragma warning (disable : 4127) // conditional expression is constant #pragma warning (disable : 4146) // unary minus operator applied to unsigned type, result still unsigned diff --git a/include/boost/interprocess/detail/in_place_interface.hpp b/include/boost/interprocess/detail/in_place_interface.hpp index 57aeeba..dd8c4c0 100644 --- a/include/boost/interprocess/detail/in_place_interface.hpp +++ b/include/boost/interprocess/detail/in_place_interface.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/detail/interprocess_tester.hpp b/include/boost/interprocess/detail/interprocess_tester.hpp index 0354a96..ffb66ea 100644 --- a/include/boost/interprocess/detail/interprocess_tester.hpp +++ b/include/boost/interprocess/detail/interprocess_tester.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2007-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/detail/iterators.hpp b/include/boost/interprocess/detail/iterators.hpp index 5483de8..c566159 100644 --- a/include/boost/interprocess/detail/iterators.hpp +++ b/include/boost/interprocess/detail/iterators.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. +// (C) Copyright Ion Gaztanaga 2005-2008. // (C) Copyright Gennaro Prota 2003 - 2004. // // Distributed under the Boost Software License, Version 1.0. @@ -428,6 +428,12 @@ class transform_iterator operator->() const { return operator_arrow_proxy(dereference()); } + Iterator & base() + { return m_it; } + + const Iterator & base() const + { return m_it; } + private: Iterator m_it; diff --git a/include/boost/interprocess/detail/managed_memory_impl.hpp b/include/boost/interprocess/detail/managed_memory_impl.hpp index 91c5e0e..c659d9d 100644 --- a/include/boost/interprocess/detail/managed_memory_impl.hpp +++ b/include/boost/interprocess/detail/managed_memory_impl.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/detail/math_functions.hpp b/include/boost/interprocess/detail/math_functions.hpp index c754f9b..362b1cd 100644 --- a/include/boost/interprocess/detail/math_functions.hpp +++ b/include/boost/interprocess/detail/math_functions.hpp @@ -1,7 +1,7 @@ ////////////////////////////////////////////////////////////////////////////// // // (C) Copyright Stephen Cleary 2000. -// (C) Copyright Ion Gaztanaga 2007. +// (C) Copyright Ion Gaztanaga 2007-2008. // // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at diff --git a/include/boost/interprocess/detail/min_max.hpp b/include/boost/interprocess/detail/min_max.hpp index 92eb959..2c4f3e7 100644 --- a/include/boost/interprocess/detail/min_max.hpp +++ b/include/boost/interprocess/detail/min_max.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. +// (C) Copyright Ion Gaztanaga 2005-2008. // // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at diff --git a/include/boost/interprocess/detail/mpl.hpp b/include/boost/interprocess/detail/mpl.hpp index 8ab2600..0ae2392 100644 --- a/include/boost/interprocess/detail/mpl.hpp +++ b/include/boost/interprocess/detail/mpl.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. +// (C) Copyright Ion Gaztanaga 2005-2008. // // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at diff --git a/include/boost/interprocess/detail/named_proxy.hpp b/include/boost/interprocess/detail/named_proxy.hpp index bb7588a..98a28ee 100644 --- a/include/boost/interprocess/detail/named_proxy.hpp +++ b/include/boost/interprocess/detail/named_proxy.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/detail/os_file_functions.hpp b/include/boost/interprocess/detail/os_file_functions.hpp index bdecd8a..0a81c59 100644 --- a/include/boost/interprocess/detail/os_file_functions.hpp +++ b/include/boost/interprocess/detail/os_file_functions.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/detail/os_thread_functions.hpp b/include/boost/interprocess/detail/os_thread_functions.hpp index 0a26e44..4373152 100644 --- a/include/boost/interprocess/detail/os_thread_functions.hpp +++ b/include/boost/interprocess/detail/os_thread_functions.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/detail/pointer_type.hpp b/include/boost/interprocess/detail/pointer_type.hpp index 52a2274..9f17fa0 100644 --- a/include/boost/interprocess/detail/pointer_type.hpp +++ b/include/boost/interprocess/detail/pointer_type.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. +// (C) Copyright Ion Gaztanaga 2005-2008. // (C) Copyright Gennaro Prota 2003 - 2004. // // Distributed under the Boost Software License, Version 1.0. diff --git a/include/boost/interprocess/detail/posix_time_types_wrk.hpp b/include/boost/interprocess/detail/posix_time_types_wrk.hpp index ad077c3..bf6e485 100644 --- a/include/boost/interprocess/detail/posix_time_types_wrk.hpp +++ b/include/boost/interprocess/detail/posix_time_types_wrk.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/detail/segment_manager_helper.hpp b/include/boost/interprocess/detail/segment_manager_helper.hpp index 515516e..4961422 100644 --- a/include/boost/interprocess/detail/segment_manager_helper.hpp +++ b/include/boost/interprocess/detail/segment_manager_helper.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/detail/tmp_dir_helpers.hpp b/include/boost/interprocess/detail/tmp_dir_helpers.hpp index 5b742c4..34db635 100644 --- a/include/boost/interprocess/detail/tmp_dir_helpers.hpp +++ b/include/boost/interprocess/detail/tmp_dir_helpers.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2007-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/detail/type_traits.hpp b/include/boost/interprocess/detail/type_traits.hpp index bc3fb1a..d06d041 100644 --- a/include/boost/interprocess/detail/type_traits.hpp +++ b/include/boost/interprocess/detail/type_traits.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // (C) Copyright John Maddock 2000. -// (C) Copyright Ion Gaztanaga 2005-2007. +// (C) Copyright Ion Gaztanaga 2005-2008. // // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at diff --git a/include/boost/interprocess/detail/utilities.hpp b/include/boost/interprocess/detail/utilities.hpp index 0bb4a0f..0e507f8 100644 --- a/include/boost/interprocess/detail/utilities.hpp +++ b/include/boost/interprocess/detail/utilities.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. +// (C) Copyright Ion Gaztanaga 2005-2008. // (C) Copyright Gennaro Prota 2003 - 2004. // // Distributed under the Boost Software License, Version 1.0. @@ -25,6 +25,9 @@ #include #include #include +#include +#include +#include #include #include @@ -70,14 +73,27 @@ template struct scoped_ptr_dealloc_functor { typedef typename Allocator::pointer pointer; + typedef detail::integral_constant::value> alloc_version; + typedef detail::integral_constant allocator_v1; + typedef detail::integral_constant allocator_v2; + private: + void priv_deallocate(const typename Allocator::pointer &p, allocator_v1) + { m_alloc.deallocate(p, 1); } + + void priv_deallocate(const typename Allocator::pointer &p, allocator_v2) + { m_alloc.deallocate_one(p); } + + public: Allocator& m_alloc; scoped_ptr_dealloc_functor(Allocator& a) - : m_alloc(a) {} + : m_alloc(a) {} void operator()(pointer ptr) - { if (ptr) m_alloc.deallocate(ptr, 1); } + { if (ptr) priv_deallocate(ptr, alloc_version()); } }; //!A deleter for scoped_ptr that deallocates the memory @@ -86,7 +102,20 @@ template struct scoped_deallocator { typedef typename Allocator::pointer pointer; + typedef detail::integral_constant::value> alloc_version; + typedef detail::integral_constant allocator_v1; + typedef detail::integral_constant allocator_v2; + private: + void priv_deallocate(allocator_v1) + { m_alloc.deallocate(m_ptr, 1); } + + void priv_deallocate(allocator_v2) + { m_alloc.deallocate_one(m_ptr); } + + public: pointer m_ptr; Allocator& m_alloc; @@ -94,7 +123,7 @@ struct scoped_deallocator : m_ptr(p), m_alloc(a) {} ~scoped_deallocator() - { if (m_ptr) m_alloc.deallocate(m_ptr, 1); } + { if (m_ptr)priv_deallocate(alloc_version()); } void release() { m_ptr = 0; } @@ -189,9 +218,22 @@ template class allocator_destroyer { typedef typename A::value_type value_type; + typedef detail::integral_constant::value> alloc_version; + typedef detail::integral_constant allocator_v1; + typedef detail::integral_constant allocator_v2; + private: A & a_; + private: + void priv_deallocate(const typename A::pointer &p, allocator_v1) + { a_.deallocate(p, 1); } + + void priv_deallocate(const typename A::pointer &p, allocator_v2) + { a_.deallocate_one(p); } + public: allocator_destroyer(A &a) : a_(a) @@ -200,35 +242,86 @@ class allocator_destroyer void operator()(const typename A::pointer &p) { detail::get_pointer(p)->~value_type(); - a_.deallocate(p, 1); + priv_deallocate(p, alloc_version()); } }; -//!A class used for exception-safe multi-allocation + construction. -template -struct multiallocation_deallocator +template +class allocator_destroyer_and_chain_builder { - typedef typename Allocator::multiallocation_iterator multiallocation_iterator; + typedef typename A::value_type value_type; + typedef typename A::multiallocation_iterator multiallocation_iterator; + typedef typename A::multiallocation_chain multiallocation_chain; - multiallocation_iterator m_itbeg; - Allocator& m_alloc; + A & a_; + multiallocation_chain &c_; - multiallocation_deallocator(multiallocation_iterator itbeg, Allocator& a) - : m_itbeg(itbeg), m_alloc(a) {} + public: + allocator_destroyer_and_chain_builder(A &a, multiallocation_chain &c) + : a_(a), c_(c) + {} - ~multiallocation_deallocator() - { - multiallocation_iterator endit; - while(m_itbeg != endit){ - m_alloc.deallocate(&*m_itbeg, 1); - ++m_itbeg; - } + void operator()(const typename A::pointer &p) + { + value_type *vp = detail::get_pointer(p); + vp->~value_type(); + c_.push_back(vp); } - - void increment() - { ++m_itbeg; } }; +template +class allocator_multialloc_chain_node_deallocator +{ + typedef typename A::value_type value_type; + typedef typename A::multiallocation_iterator multiallocation_iterator; + typedef typename A::multiallocation_chain multiallocation_chain; + typedef allocator_destroyer_and_chain_builder chain_builder; + + A & a_; + multiallocation_chain c_; + + public: + allocator_multialloc_chain_node_deallocator(A &a) + : a_(a), c_() + {} + + chain_builder get_chain_builder() + { return chain_builder(a_, c_); } + + ~allocator_multialloc_chain_node_deallocator() + { + multiallocation_iterator it(c_.get_it()); + if(it != multiallocation_iterator()) + a_.deallocate_individual(it); + } +}; + +template +class allocator_multialloc_chain_array_deallocator +{ + typedef typename A::value_type value_type; + typedef typename A::multiallocation_iterator multiallocation_iterator; + typedef typename A::multiallocation_chain multiallocation_chain; + typedef allocator_destroyer_and_chain_builder chain_builder; + + A & a_; + multiallocation_chain c_; + + public: + allocator_multialloc_chain_array_deallocator(A &a) + : a_(a), c_() + {} + + chain_builder get_chain_builder() + { return chain_builder(a_, c_); } + + ~allocator_multialloc_chain_array_deallocator() + { + multiallocation_iterator it(c_.get_it()); + if(it != multiallocation_iterator()) + a_.deallocate_many(it); + } +}; //!A class used for exception-safe multi-allocation + construction. template @@ -577,6 +670,14 @@ inline void swap(pair&&x, pair&&y) } #endif +template +struct cast_functor +{ + typedef typename detail::add_reference::type result_type; + result_type operator()(char &ptr) const + { return *static_cast(static_cast(&ptr)); } +}; + } //namespace detail { //!The pair is movable if any of its members is movable diff --git a/include/boost/interprocess/detail/version_type.hpp b/include/boost/interprocess/detail/version_type.hpp index 7b71268..ed43623 100644 --- a/include/boost/interprocess/detail/version_type.hpp +++ b/include/boost/interprocess/detail/version_type.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/detail/win32_api.hpp b/include/boost/interprocess/detail/win32_api.hpp index 1800ea5..b6771d0 100644 --- a/include/boost/interprocess/detail/win32_api.hpp +++ b/include/boost/interprocess/detail/win32_api.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/detail/workaround.hpp b/include/boost/interprocess/detail/workaround.hpp index 6265d8d..b22e283 100644 --- a/include/boost/interprocess/detail/workaround.hpp +++ b/include/boost/interprocess/detail/workaround.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -19,8 +19,8 @@ #if defined(_POSIX_THREAD_PROCESS_SHARED) # if !((_XOPEN_VERSION >= 600) && (_POSIX_THREAD_PROCESS_SHARED - 0 <= 0)) - // Cygwin defines _POSIX_THREAD_PROCESS_SHARED but does not support it. - // Mac Os X >= Leopard defines _POSIX_THREAD_PROCESS_SHARED but it does not seem to work + //Cygwin defines _POSIX_THREAD_PROCESS_SHARED but does not implement it. + //Mac Os X >= Leopard defines _POSIX_THREAD_PROCESS_SHARED but does not seems to work. # if !defined(__CYGWIN__) && !defined(__APPLE__) # define BOOST_INTERPROCESS_POSIX_PROCESS_SHARED # endif diff --git a/include/boost/interprocess/errors.hpp b/include/boost/interprocess/errors.hpp index d7ba469..1374c8e 100644 --- a/include/boost/interprocess/errors.hpp +++ b/include/boost/interprocess/errors.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/exceptions.hpp b/include/boost/interprocess/exceptions.hpp index dcc4f6d..8bbb211 100644 --- a/include/boost/interprocess/exceptions.hpp +++ b/include/boost/interprocess/exceptions.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/file_mapping.hpp b/include/boost/interprocess/file_mapping.hpp index ac81ca0..a55ff03 100644 --- a/include/boost/interprocess/file_mapping.hpp +++ b/include/boost/interprocess/file_mapping.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/indexes/flat_map_index.hpp b/include/boost/interprocess/indexes/flat_map_index.hpp index 7ea1600..ad338da 100644 --- a/include/boost/interprocess/indexes/flat_map_index.hpp +++ b/include/boost/interprocess/indexes/flat_map_index.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/indexes/iset_index.hpp b/include/boost/interprocess/indexes/iset_index.hpp index f29a771..5a4a1e6 100644 --- a/include/boost/interprocess/indexes/iset_index.hpp +++ b/include/boost/interprocess/indexes/iset_index.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/indexes/iunordered_set_index.hpp b/include/boost/interprocess/indexes/iunordered_set_index.hpp index dd5e1c1..d247adc 100644 --- a/include/boost/interprocess/indexes/iunordered_set_index.hpp +++ b/include/boost/interprocess/indexes/iunordered_set_index.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/indexes/map_index.hpp b/include/boost/interprocess/indexes/map_index.hpp index df568dc..024c647 100644 --- a/include/boost/interprocess/indexes/map_index.hpp +++ b/include/boost/interprocess/indexes/map_index.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/indexes/null_index.hpp b/include/boost/interprocess/indexes/null_index.hpp index 5d3be0d..1cc2908 100644 --- a/include/boost/interprocess/indexes/null_index.hpp +++ b/include/boost/interprocess/indexes/null_index.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/indexes/unordered_map_index.hpp b/include/boost/interprocess/indexes/unordered_map_index.hpp index 44f226c..8c2f670 100644 --- a/include/boost/interprocess/indexes/unordered_map_index.hpp +++ b/include/boost/interprocess/indexes/unordered_map_index.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/interprocess_fwd.hpp b/include/boost/interprocess/interprocess_fwd.hpp index 4244e62..acb1b3e 100644 --- a/include/boost/interprocess/interprocess_fwd.hpp +++ b/include/boost/interprocess/interprocess_fwd.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -129,13 +129,19 @@ class private_node_allocator; template class cached_node_allocator; -template +template class adaptive_pool; -template +template class private_adaptive_pool; -template +template class cached_adaptive_pool; @@ -151,10 +157,10 @@ class offset_ptr; ////////////////////////////////////////////////////////////////////////////// //Single segment memory allocation algorithms -template//offset_ptr > +template > class simple_seq_fit; -template > +template, std::size_t MemAlignment = 0> class rbtree_best_fit; ////////////////////////////////////////////////////////////////////////////// diff --git a/include/boost/interprocess/ipc/message_queue.hpp b/include/boost/interprocess/ipc/message_queue.hpp index b6366f8..b28ba68 100644 --- a/include/boost/interprocess/ipc/message_queue.hpp +++ b/include/boost/interprocess/ipc/message_queue.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/managed_external_buffer.hpp b/include/boost/interprocess/managed_external_buffer.hpp index 23fe0ac..be18e4d 100644 --- a/include/boost/interprocess/managed_external_buffer.hpp +++ b/include/boost/interprocess/managed_external_buffer.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -20,6 +20,7 @@ #include #include #include +#include //!\file //!Describes a named user memory allocation user class. @@ -49,6 +50,8 @@ class basic_managed_external_buffer basic_managed_external_buffer (create_only_t, void *addr, std::size_t size) { + //Check if alignment is correct + assert((0 == (((std::size_t)addr) & (AllocationAlgorithm::Alignment - std::size_t(1u))))); if(!base_t::create_impl(addr, size)){ throw interprocess_exception(); } @@ -58,6 +61,8 @@ class basic_managed_external_buffer basic_managed_external_buffer (open_only_t, void *addr, std::size_t size) { + //Check if alignment is correct + assert((0 == (((std::size_t)addr) & (AllocationAlgorithm::Alignment - std::size_t(1u))))); if(!base_t::open_impl(addr, size)){ throw interprocess_exception(); } diff --git a/include/boost/interprocess/managed_heap_memory.hpp b/include/boost/interprocess/managed_heap_memory.hpp index 77843f5..36e97bd 100644 --- a/include/boost/interprocess/managed_heap_memory.hpp +++ b/include/boost/interprocess/managed_heap_memory.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/managed_mapped_file.hpp b/include/boost/interprocess/managed_mapped_file.hpp index bc0355b..cdd6267 100644 --- a/include/boost/interprocess/managed_mapped_file.hpp +++ b/include/boost/interprocess/managed_mapped_file.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/managed_shared_memory.hpp b/include/boost/interprocess/managed_shared_memory.hpp index e49fb22..551a7d3 100644 --- a/include/boost/interprocess/managed_shared_memory.hpp +++ b/include/boost/interprocess/managed_shared_memory.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/managed_windows_shared_memory.hpp b/include/boost/interprocess/managed_windows_shared_memory.hpp index 105e74b..b4acdf0 100644 --- a/include/boost/interprocess/managed_windows_shared_memory.hpp +++ b/include/boost/interprocess/managed_windows_shared_memory.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/mapped_region.hpp b/include/boost/interprocess/mapped_region.hpp index d859a52..71a28fb 100644 --- a/include/boost/interprocess/mapped_region.hpp +++ b/include/boost/interprocess/mapped_region.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/mem_algo/detail/mem_algo_common.hpp b/include/boost/interprocess/mem_algo/detail/mem_algo_common.hpp index a9b6219..acd2c16 100644 --- a/include/boost/interprocess/mem_algo/detail/mem_algo_common.hpp +++ b/include/boost/interprocess/mem_algo/detail/mem_algo_common.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -104,6 +104,167 @@ class basic_multiallocation_iterator multi_allocation_next next_alloc_; }; +template +class basic_multiallocation_chain +{ + private: + basic_multiallocation_iterator it_; + VoidPointer last_mem_; + std::size_t num_mem_; + + basic_multiallocation_chain(const basic_multiallocation_chain &); + basic_multiallocation_chain &operator=(const basic_multiallocation_chain &); + + public: + typedef basic_multiallocation_iterator multiallocation_iterator; + + basic_multiallocation_chain() + : it_(0), last_mem_(0), num_mem_(0) + {} + + void push_back(void *mem) + { + typedef multi_allocation_next next_impl_t; + next_impl_t * tmp_mem = static_cast(mem); + + if(!this->last_mem_){ + this->it_ = basic_multiallocation_iterator(tmp_mem); + } + else{ + static_cast(detail::get_pointer(this->last_mem_))->next_ = tmp_mem; + } + tmp_mem->next_ = 0; + this->last_mem_ = tmp_mem; + ++num_mem_; + } + + void push_back(multiallocation_iterator it, std::size_t n) + { + typedef multi_allocation_next next_impl_t; + next_impl_t * tmp_mem = (next_impl_t*)(&*it); + + if(!this->last_mem_){ + this->it_ = it; + } + else{ + static_cast(detail::get_pointer(this->last_mem_))->next_ = tmp_mem; + } + tmp_mem->next_ = 0; + this->last_mem_ = tmp_mem; + ++num_mem_; + } + + void push_front(void *mem) + { + typedef multi_allocation_next next_impl_t; + + if(!this->last_mem_){ + push_back(mem); + } + else{ + next_impl_t * tmp_mem = static_cast(mem); + next_impl_t * old_first = (next_impl_t*)(&*this->it_); + static_cast(mem)->next_ = old_first; + this->it_ = basic_multiallocation_iterator(tmp_mem); + ++num_mem_; + } + } + + void swap(basic_multiallocation_chain &other_chain) + { + std::swap(this->it_, other_chain.it_); + std::swap(this->last_mem_, other_chain.last_mem_); + std::swap(this->num_mem_, other_chain.num_mem_); + } + + void splice_back(basic_multiallocation_chain &other_chain) + { + typedef multi_allocation_next next_impl_t; + multiallocation_iterator end_it; + multiallocation_iterator other_it = other_chain.get_it(); + multiallocation_iterator this_it = this->get_it(); + if(end_it == other_it){ + return; + } + else if(end_it == other_it){ + this->swap(other_chain); + } + + static_cast(detail::get_pointer(this->last_mem_))->next_ + = (next_impl_t*)&*this->it_; + this->last_mem_ = other_chain.last_mem_; + this->num_mem_ += other_chain.num_mem_; + } + + void *pop_front() + { + multiallocation_iterator itend; + if(this->it_ == itend){ + this->last_mem_= 0; + this->num_mem_ = 0; + return 0; + } + else{ + void *addr = &*it_; + ++it_; + --num_mem_; + if(!num_mem_){ + this->last_mem_ = 0; + this->it_ = multiallocation_iterator(); + } + return addr; + } + } + + bool empty() const + { return !num_mem_; } + + multiallocation_iterator get_it() const + { return it_; } + + std::size_t size() const + { return num_mem_; } +}; + +template +class allocator_multiallocation_chain +{ + typedef typename detail:: + pointer_to_other::type + void_ptr; + + typedef typename Allocator::multiallocation_iterator multiallocation_iterator; + basic_multiallocation_chain chain_; + + public: + + allocator_multiallocation_chain() + : chain_() + {} + + void push_back(void *mem) + { chain_.push_back(mem); } + + multiallocation_iterator get_it() const + { return multiallocation_iterator(chain_.get_it()); } +}; + + +#define BOOST_MULTIALLOC_IT_CHAIN_INIT(IT_CHAIN) ((IT_CHAIN).it.next = 0, (IT_CHAIN).last_mem = 0) +#define BOOST_MULTIALLOC_IT_CHAIN_ADD(IT_CHAIN, MEM)\ + do{\ + multialloc_it_t *____tmp_mem____ = (multialloc_it_t*)(MEM);\ + if(!IT_CHAIN.last_mem){\ + (IT_CHAIN).it.next = ____tmp_mem____;\ + }else{\ + ((multialloc_it_t*)(IT_CHAIN.last_mem))->next = ____tmp_mem____;\ + }\ + ____tmp_mem____->next = 0;\ + IT_CHAIN.last_mem = ____tmp_mem____;\ + }while(0) + +#define BOOST_MULTIALLOC_IT_CHAIN_IT(IT_CHAIN) ((IT_CHAIN).it) + //!This class implements several allocation functions shared by different algorithms //!(aligned allocation, multiple allocation...). @@ -125,6 +286,7 @@ class memory_algorithm_common static const std::size_t AllocatedCtrlUnits = MemoryAlgorithm::AllocatedCtrlUnits; static const std::size_t BlockCtrlBytes = MemoryAlgorithm::BlockCtrlBytes; static const std::size_t BlockCtrlUnits = MemoryAlgorithm::BlockCtrlUnits; + static const std::size_t UsableByPreviousChunk = MemoryAlgorithm::UsableByPreviousChunk; static void assert_alignment(const void *ptr) { assert_alignment((std::size_t)ptr); } @@ -165,10 +327,11 @@ class memory_algorithm_common static void* allocate_aligned (MemoryAlgorithm *memory_algo, std::size_t nbytes, std::size_t alignment) { + //Ensure power of 2 if ((alignment & (alignment - std::size_t(1u))) != 0){ //Alignment is not power of two - BOOST_ASSERT((alignment & (alignment - std::size_t(1u))) != 0); + BOOST_ASSERT((alignment & (alignment - std::size_t(1u))) == 0); return 0; } @@ -176,6 +339,9 @@ class memory_algorithm_common if(alignment <= Alignment){ return memory_algo->priv_allocate(allocate_new, nbytes, nbytes, real_size).first; } + + if(nbytes > UsableByPreviousChunk) + nbytes -= UsableByPreviousChunk; //We can find a aligned portion if we allocate a chunk that has alignment //nbytes + alignment bytes or more. @@ -191,7 +357,9 @@ class memory_algorithm_common // | MBU | // ----------------------------------------------------- std::size_t request = - minimum_allocation + (2*MinBlockUnits*Alignment - AllocatedCtrlBytes); + minimum_allocation + (2*MinBlockUnits*Alignment - AllocatedCtrlBytes + //prevsize - UsableByPreviousChunk + ); //Now allocate the buffer void *buffer = memory_algo->priv_allocate(allocate_new, request, request, real_size).first; @@ -207,7 +375,8 @@ class memory_algorithm_common max_value(ceil_units(nbytes) + AllocatedCtrlUnits, std::size_t(MinBlockUnits)); //We can create a new block in the end of the segment if(old_size >= (first_min_units + MinBlockUnits)){ - block_ctrl *second = new((char*)first + Alignment*first_min_units) block_ctrl; + //block_ctrl *second = new((char*)first + Alignment*first_min_units) block_ctrl; + block_ctrl *second = (block_ctrl *)((char*)first + Alignment*first_min_units); first->m_size = first_min_units; second->m_size = old_size - first->m_size; BOOST_ASSERT(second->m_size >= MinBlockUnits); @@ -285,6 +454,7 @@ class memory_algorithm_common ,const std::size_t max_size, const std::size_t preferred_size ,std::size_t &received_size) { + (void)memory_algo; //Obtain the real block block_ctrl *block = memory_algo->priv_get_block(ptr); std::size_t old_block_units = block->m_size; @@ -296,11 +466,11 @@ class memory_algorithm_common assert_alignment(ptr); //Put this to a safe value - received_size = (old_block_units - AllocatedCtrlUnits)*Alignment; + received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk; //Now translate it to Alignment units - const std::size_t max_user_units = floor_units(max_size); - const std::size_t preferred_user_units = ceil_units(preferred_size); + const std::size_t max_user_units = floor_units(max_size - UsableByPreviousChunk); + const std::size_t preferred_user_units = ceil_units(preferred_size - UsableByPreviousChunk); //Check if rounded max and preferred are possible correct if(max_user_units < preferred_user_units) @@ -331,7 +501,7 @@ class memory_algorithm_common } //Update new size - received_size = shrunk_user_units*Alignment; + received_size = shrunk_user_units*Alignment + UsableByPreviousChunk; return true; } @@ -350,22 +520,23 @@ class memory_algorithm_common } //Check if the old size was just the shrunk size (no splitting) - if((old_block_units - AllocatedCtrlUnits) == ceil_units(preferred_size)) + if((old_block_units - AllocatedCtrlUnits) == ceil_units(preferred_size - UsableByPreviousChunk)) return true; //Now we can just rewrite the size of the old buffer - block->m_size = received_size/Alignment + AllocatedCtrlUnits; + block->m_size = (received_size-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits; BOOST_ASSERT(block->m_size >= BlockCtrlUnits); - memory_algo->priv_mark_new_allocated_block(block); //We create the new block - block_ctrl *new_block = new(reinterpret_cast - (detail::char_ptr_cast(block) + block->m_size*Alignment)) block_ctrl; - +// block_ctrl *new_block = new(reinterpret_cast +// (detail::char_ptr_cast(block) + block->m_size*Alignment)) block_ctrl; + block_ctrl *new_block = reinterpret_cast + (detail::char_ptr_cast(block) + block->m_size*Alignment); //Write control data to simulate this new block was previously allocated //and deallocate it new_block->m_size = old_block_units - block->m_size; BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits); + memory_algo->priv_mark_new_allocated_block(block); memory_algo->priv_mark_new_allocated_block(new_block); memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(new_block)); return true; @@ -401,11 +572,11 @@ class memory_algorithm_common multi_allocation_next_ptr first = 0, previous = 0; std::size_t low_idx = 0; while(low_idx < n_elements){ - std::size_t total_bytes = total_request_units*Alignment - AllocatedCtrlBytes; + std::size_t total_bytes = total_request_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk; std::size_t min_allocation = (!sizeof_element) ? elem_units : memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element); - min_allocation = min_allocation*Alignment - AllocatedCtrlBytes; + min_allocation = min_allocation*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk; std::size_t received_size; std::pair ret = memory_algo->priv_allocate @@ -419,6 +590,7 @@ class memory_algorithm_common char *block_address = (char*)block; std::size_t total_used_units = 0; +// block_ctrl *prev_block = 0; while(total_used_units < received_units){ if(sizeof_element){ elem_units = memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element); @@ -428,7 +600,10 @@ class memory_algorithm_common break; total_request_units -= elem_units; //This is the position where the new block must be created - block_ctrl *new_block = new(block_address)block_ctrl; +// if(prev_block) +// memory_algo->priv_mark_new_allocated_block(prev_block); + block_ctrl *new_block = (block_ctrl *)(block_address); +// block_ctrl *new_block = new(block_address)block_ctrl; assert_alignment(new_block); //The last block should take all the remaining space @@ -446,7 +621,7 @@ class memory_algorithm_common //split it obtaining a new free memory block do it. if((received_units - total_used_units) >= (elem_units + MemoryAlgorithm::BlockCtrlUnits)){ std::size_t shrunk_received; - std::size_t shrunk_request = elem_units*Alignment - AllocatedCtrlBytes; + std::size_t shrunk_request = elem_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk; bool ret = shrink (memory_algo ,memory_algo->priv_get_user_buffer(new_block) @@ -457,7 +632,7 @@ class memory_algorithm_common BOOST_ASSERT(ret); //Some sanity checks BOOST_ASSERT(shrunk_request == shrunk_received); - BOOST_ASSERT(elem_units == (shrunk_request/Alignment + AllocatedCtrlUnits)); + BOOST_ASSERT(elem_units == ((shrunk_request-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits)); //"new_block->m_size" must have been reduced to elem_units by "shrink" BOOST_ASSERT(new_block->m_size == elem_units); //Now update the total received units with the reduction @@ -483,6 +658,7 @@ class memory_algorithm_common } previous = p; ++low_idx; + //prev_block = new_block; } //Sanity check BOOST_ASSERT(total_used_units == received_units); diff --git a/include/boost/interprocess/mem_algo/detail/simple_seq_fit_impl.hpp b/include/boost/interprocess/mem_algo/detail/simple_seq_fit_impl.hpp index 7300a7b..a724189 100644 --- a/include/boost/interprocess/mem_algo/detail/simple_seq_fit_impl.hpp +++ b/include/boost/interprocess/mem_algo/detail/simple_seq_fit_impl.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -64,6 +64,8 @@ class simple_seq_fit_impl typedef detail::basic_multiallocation_iterator multiallocation_iterator; + typedef detail::basic_multiallocation_chain + multiallocation_chain; private: class block_ctrl; @@ -137,6 +139,9 @@ class simple_seq_fit_impl //!Multiple element allocation, different size multiallocation_iterator allocate_many(const std::size_t *elem_sizes, std::size_t n_elements, std::size_t sizeof_element); + //!Multiple element deallocation + void deallocate_many(multiallocation_iterator it); + /// @endcond //!Deallocates previously allocated bytes @@ -170,8 +175,13 @@ class simple_seq_fit_impl std::size_t preferred_size,std::size_t &received_size, T *reuse_ptr = 0); + std::pair + raw_allocation_command (allocation_type command, std::size_t limit_size, + std::size_t preferred_size,std::size_t &received_size, + void *reuse_ptr = 0, std::size_t sizeof_object = 1); + //!Returns the size of the buffer previously allocated pointed by ptr - std::size_t size(void *ptr) const; + std::size_t size(const void *ptr) const; //!Allocates aligned bytes, returns 0 if there is not more memory. //!Alignment must be power of 2 @@ -247,13 +257,16 @@ class simple_seq_fit_impl void priv_mark_new_allocated_block(block_ctrl *block); + public: static const std::size_t Alignment = detail::alignment_of::value; + private: static const std::size_t BlockCtrlBytes = detail::ct_rounded_size::value; static const std::size_t BlockCtrlUnits = BlockCtrlBytes/Alignment; static const std::size_t MinBlockUnits = BlockCtrlUnits; static const std::size_t MinBlockSize = MinBlockUnits*Alignment; static const std::size_t AllocatedCtrlBytes = BlockCtrlBytes; static const std::size_t AllocatedCtrlUnits = BlockCtrlUnits; + static const std::size_t UsableByPreviousChunk = 0; public: static const std::size_t PayloadPerAllocation = BlockCtrlBytes; @@ -549,17 +562,32 @@ inline std::pair simple_seq_fit_impl:: std::size_t preferred_size,std::size_t &received_size, T *reuse_ptr) { - if(command & try_shrink_in_place){ - bool success = - algo_impl_t::try_shrink(this, reuse_ptr, limit_size, preferred_size, received_size); - return std::pair ((success ? reuse_ptr : 0), true); - } std::pair ret = priv_allocation_command (command, limit_size, preferred_size, received_size, reuse_ptr, sizeof(T)); + BOOST_ASSERT(0 == ((std::size_t)ret.first % detail::alignment_of::value)); return std::pair(static_cast(ret.first), ret.second); } +template +inline std::pair simple_seq_fit_impl:: + raw_allocation_command (allocation_type command, std::size_t limit_objects, + std::size_t preferred_objects,std::size_t &received_objects, + void *reuse_ptr, std::size_t sizeof_object) +{ + if(!sizeof_object) + return std::pair(0, 0); + if(command & try_shrink_in_place){ + bool success = algo_impl_t::try_shrink + ( this, reuse_ptr, limit_objects*sizeof_object + , preferred_objects*sizeof_object, received_objects); + received_objects /= sizeof_object; + return std::pair ((success ? reuse_ptr : 0), true); + } + return priv_allocation_command + (command, limit_objects, preferred_objects, received_objects, reuse_ptr, sizeof_object); +} + template inline std::pair simple_seq_fit_impl:: priv_allocation_command (allocation_type command, std::size_t limit_size, @@ -589,13 +617,13 @@ inline std::pair simple_seq_fit_impl:: template inline std::size_t simple_seq_fit_impl:: - size(void *ptr) const + size(const void *ptr) const { //We need no synchronization since this block is not going //to be modified //Obtain the real size of the block block_ctrl *block = reinterpret_cast - (priv_get_block(detail::char_ptr_cast(ptr))); + (priv_get_block(detail::char_ptr_cast(const_cast(ptr)))); return block->get_user_bytes(); } @@ -689,6 +717,20 @@ inline typename simple_seq_fit_impl::multiallocation_i allocate_many(this, elem_bytes, num_elements); } +template +inline void simple_seq_fit_impl:: + deallocate_many(typename simple_seq_fit_impl::multiallocation_iterator it) +{ + //----------------------- + boost::interprocess::scoped_lock guard(m_header); + //----------------------- + while(it){ + void *addr = &*it; + ++it; + this->priv_deallocate(addr); + } +} + template inline typename simple_seq_fit_impl::multiallocation_iterator simple_seq_fit_impl:: diff --git a/include/boost/interprocess/mem_algo/rbtree_best_fit.hpp b/include/boost/interprocess/mem_algo/rbtree_best_fit.hpp index 19100ab..0d7bab9 100644 --- a/include/boost/interprocess/mem_algo/rbtree_best_fit.hpp +++ b/include/boost/interprocess/mem_algo/rbtree_best_fit.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -60,7 +60,7 @@ namespace interprocess { //!This class implements an algorithm that stores the free nodes in a red-black tree //!to have logarithmic search/insert times. -template +template class rbtree_best_fit { /// @cond @@ -77,6 +77,8 @@ class rbtree_best_fit typedef VoidPointer void_pointer; typedef detail::basic_multiallocation_iterator multiallocation_iterator; + typedef detail::basic_multiallocation_chain + multiallocation_chain; /// @cond @@ -106,9 +108,9 @@ class rbtree_best_fit { //!This block's memory size (including block_ctrl //!header) in Alignment units - std::size_t m_prev_size : sizeof(std::size_t)*CHAR_BIT - 1; - std::size_t m_end : 1; - std::size_t m_size : sizeof(std::size_t)*CHAR_BIT - 1; + std::size_t m_prev_size : sizeof(std::size_t)*CHAR_BIT; + std::size_t m_size : sizeof(std::size_t)*CHAR_BIT - 2; + std::size_t m_prev_allocated : 1; std::size_t m_allocated : 1; }; @@ -117,7 +119,7 @@ class rbtree_best_fit : public SizeHolder, public TreeHook { block_ctrl() - { this->m_end = 0; this->m_size = 0; this->m_allocated = 0; } + { this->m_size = 0; this->m_allocated = 0, this->m_prev_allocated = 0; } friend bool operator<(const block_ctrl &a, const block_ctrl &b) { return a.m_size < b.m_size; } @@ -195,6 +197,9 @@ class rbtree_best_fit //!Multiple element allocation, different size multiallocation_iterator allocate_many(const std::size_t *elem_sizes, std::size_t n_elements, std::size_t sizeof_element); + //!Multiple element allocation, different size + void deallocate_many(multiallocation_iterator it); + /// @endcond //!Deallocates previously allocated bytes @@ -230,6 +235,11 @@ class rbtree_best_fit std::size_t preferred_size,std::size_t &received_size, T *reuse_ptr = 0); + std::pair + raw_allocation_command (allocation_type command, std::size_t limit_object, + std::size_t preferred_object,std::size_t &received_object, + void *reuse_ptr = 0, std::size_t sizeof_object = 1); + //!Returns the size of the buffer previously allocated pointed by ptr std::size_t size(const void *ptr) const; @@ -279,18 +289,15 @@ class rbtree_best_fit ,bool only_preferred_backwards ,std::size_t backwards_multiple); - //!Set the size in the tail of the block - void priv_tail_size(block_ctrl *ptr, std::size_t size); - - //!Real private aligned allocation function - //void* priv_allocate_aligned (std::size_t nbytes, std::size_t alignment); - - //!Get the size in the tail of the block - std::size_t priv_tail_size(block_ctrl *ptr); - - //!Get the size in the tail of the previous block + //!Get poitner of the previous block (previous block must be free) block_ctrl * priv_prev_block(block_ctrl *ptr); + //!Returns true if the previous block is allocated + bool priv_is_prev_allocated(block_ctrl *ptr); + + //!Get a pointer of the "end" block from the first block of the segment + block_ctrl * priv_end_block(block_ctrl *first_segment_block); + //!Get the size in the tail of the previous block block_ctrl * priv_next_block(block_ctrl *ptr); @@ -316,44 +323,50 @@ class rbtree_best_fit void priv_mark_new_allocated_block(block_ctrl *block); - static const std::size_t Alignment = detail::alignment_of::value; + public: + + static const std::size_t Alignment = !MemAlignment + ? detail::alignment_of::value + : MemAlignment + ; + + private: //Due to embedded bits in size, Alignment must be at least 2 - BOOST_STATIC_ASSERT((Alignment >= 2)); + BOOST_STATIC_ASSERT((Alignment >= 4)); //Due to rbtree size optimizations, Alignment must have at least pointer alignment BOOST_STATIC_ASSERT((Alignment >= detail::alignment_of::value)); static const std::size_t AlignmentMask = (Alignment - 1); static const std::size_t BlockCtrlBytes = detail::ct_rounded_size::value; static const std::size_t BlockCtrlUnits = BlockCtrlBytes/Alignment; - static const std::size_t AllocatedCtrlBytes = detail::ct_rounded_size::value; - static const std::size_t AllocatedCtrlUnits = AllocatedCtrlBytes/Alignment; + static const std::size_t AllocatedCtrlBytes = detail::ct_rounded_size::value; + static const std::size_t AllocatedCtrlUnits = AllocatedCtrlBytes/Alignment; static const std::size_t EndCtrlBlockBytes = detail::ct_rounded_size::value; - static const std::size_t EndCtrlBlockUnits = EndCtrlBlockBytes/Alignment; - static const std::size_t MinBlockUnits = BlockCtrlUnits; + static const std::size_t EndCtrlBlockUnits = EndCtrlBlockBytes/Alignment; + static const std::size_t MinBlockUnits = BlockCtrlUnits; + static const std::size_t UsableByPreviousChunk = sizeof(std::size_t); //Make sure the maximum alignment is power of two BOOST_STATIC_ASSERT((0 == (Alignment & (Alignment - std::size_t(1u))))); /// @endcond public: - static const std::size_t PayloadPerAllocation = AllocatedCtrlBytes; + static const std::size_t PayloadPerAllocation = AllocatedCtrlBytes - UsableByPreviousChunk; }; -template -inline std::size_t rbtree_best_fit +template +inline std::size_t rbtree_best_fit ::priv_first_block_offset(const void *this_ptr, std::size_t extra_hdr_bytes) { - //First align "this" pointer - std::size_t uint_this = (std::size_t)this_ptr; - std::size_t uint_aligned_this = uint_this/Alignment*Alignment; - std::size_t this_disalignment = (uint_this - uint_aligned_this); - std::size_t block1_off = - detail::get_rounded_size(sizeof(rbtree_best_fit) + extra_hdr_bytes + this_disalignment, Alignment) - - this_disalignment; - algo_impl_t::assert_alignment(this_disalignment + block1_off); + std::size_t uint_this = (std::size_t)this_ptr; + std::size_t main_hdr_end = uint_this + sizeof(rbtree_best_fit) + extra_hdr_bytes; + std::size_t aligned_main_hdr_end = detail::get_rounded_size(main_hdr_end, Alignment); + std::size_t block1_off = aligned_main_hdr_end - uint_this; + algo_impl_t::assert_alignment(aligned_main_hdr_end); + algo_impl_t::assert_alignment(uint_this + block1_off); return block1_off; } -template -inline rbtree_best_fit:: +template +inline rbtree_best_fit:: rbtree_best_fit(std::size_t size, std::size_t extra_hdr_bytes) { //Initialize the header @@ -368,26 +381,25 @@ inline rbtree_best_fit:: priv_add_segment(detail::char_ptr_cast(this) + block1_off, size - block1_off); } -template -inline rbtree_best_fit::~rbtree_best_fit() +template +inline rbtree_best_fit::~rbtree_best_fit() { //There is a memory leak! // assert(m_header.m_allocated == 0); // assert(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root)); } -template -void rbtree_best_fit::grow(std::size_t extra_size) -{ +template +void rbtree_best_fit::grow(std::size_t extra_size) +{ //Get the address of the first block std::size_t block1_off = priv_first_block_offset(this, m_header.m_extra_hdr_bytes); block_ctrl *first_block = reinterpret_cast (detail::char_ptr_cast(this) + block1_off); - block_ctrl *old_end_block = priv_prev_block(first_block); + block_ctrl *old_end_block = priv_end_block(first_block); assert(priv_is_allocated_block(old_end_block)); - assert(old_end_block->m_end); std::size_t old_border_offset = (detail::char_ptr_cast(old_end_block) - detail::char_ptr_cast(this)) + EndCtrlBlockBytes; @@ -406,18 +418,16 @@ void rbtree_best_fit::grow(std::size_t extra_size) (detail::char_ptr_cast(old_end_block) + align_offset*Alignment); new_end_block->m_size = (detail::char_ptr_cast(first_block) - detail::char_ptr_cast(new_end_block))/Alignment; + first_block->m_prev_size = new_end_block->m_size; assert(first_block == priv_next_block(new_end_block)); - new_end_block->m_end = 1; priv_mark_new_allocated_block(new_end_block); - - assert(new_end_block == priv_prev_block(first_block)); + + assert(new_end_block == priv_end_block(first_block)); //The old end block is the new block - std::size_t old_end_prev = old_end_block->m_prev_size; - block_ctrl *new_block = new(old_end_block)block_ctrl; + block_ctrl *new_block = old_end_block; new_block->m_size = (detail::char_ptr_cast(new_end_block) - detail::char_ptr_cast(new_block))/Alignment; - new_block->m_prev_size = old_end_prev; assert(new_block->m_size >= BlockCtrlUnits); priv_mark_new_allocated_block(new_block); assert(priv_next_block(new_block) == new_end_block); @@ -428,8 +438,8 @@ void rbtree_best_fit::grow(std::size_t extra_size) this->priv_deallocate(priv_get_user_buffer(new_block)); } -template -void rbtree_best_fit::shrink_to_fit() +template +void rbtree_best_fit::shrink_to_fit() { //Get the address of the first block std::size_t block1_off = @@ -439,32 +449,36 @@ void rbtree_best_fit::shrink_to_fit() (detail::char_ptr_cast(this) + block1_off); algo_impl_t::assert_alignment(first_block); - block_ctrl *old_end_block = priv_prev_block(first_block); + block_ctrl *old_end_block = priv_end_block(first_block); algo_impl_t::assert_alignment(old_end_block); assert(priv_is_allocated_block(old_end_block)); - assert(old_end_block->m_end); - block_ctrl *last_block = priv_prev_block(old_end_block); algo_impl_t::assert_alignment(old_end_block); - std::size_t old_end_block_size = old_end_block->m_size; + std::size_t old_end_block_size = old_end_block->m_size; - void *unique_block = 0; - if(last_block == first_block){ + void *unique_buffer = 0; + block_ctrl *last_block; + if(priv_next_block(first_block) == old_end_block){ std::size_t ignore; - unique_block = priv_allocate(allocate_new, 0, 0, ignore).first; - if(!unique_block) + unique_buffer = priv_allocate(allocate_new, 0, 0, ignore).first; + if(!unique_buffer) return; + algo_impl_t::assert_alignment(unique_buffer); + block_ctrl *unique_block = priv_get_block(unique_buffer); + assert(priv_is_allocated_block(unique_block)); algo_impl_t::assert_alignment(unique_block); - last_block = priv_prev_block(old_end_block); + last_block = priv_next_block(unique_block); + assert(!priv_is_allocated_block(last_block)); algo_impl_t::assert_alignment(last_block); } + else{ + if(priv_is_prev_allocated(old_end_block)) + return; + last_block = priv_prev_block(old_end_block); + } - //The last block must be free to be able to shrink - if(priv_is_allocated_block(last_block)) - return; - - std::size_t last_block_size = last_block->m_size; + std::size_t last_block_size = last_block->m_size; //Erase block from the free tree, since we will erase it m_header.m_imultiset.erase(Imultiset::s_iterator_to(*last_block)); @@ -474,20 +488,23 @@ void rbtree_best_fit::shrink_to_fit() block_ctrl *new_end_block = last_block; algo_impl_t::assert_alignment(new_end_block); - priv_mark_as_allocated_block(new_end_block); - new_end_block->m_end = 1; new_end_block->m_size = old_end_block_size + last_block_size; - priv_tail_size(new_end_block, new_end_block->m_size); - assert(priv_prev_block(first_block) == new_end_block); + priv_mark_as_allocated_block(new_end_block); + + //Although the first block might be allocated, we'll + //store the offset to the end block since in the previous + //offset can't be overwritten by a previous block + first_block->m_prev_size = new_end_block->m_size; + assert(priv_end_block(first_block) == new_end_block); //Update managed buffer's size m_header.m_size = shrunk_border_offset; - if(unique_block) - priv_deallocate(unique_block); + if(unique_buffer) + priv_deallocate(unique_buffer); } -template -void rbtree_best_fit:: +template +void rbtree_best_fit:: priv_add_segment(void *addr, std::size_t size) { //Check alignment @@ -506,17 +523,15 @@ void rbtree_best_fit:: (detail::char_ptr_cast(addr) + first_big_block->m_size*Alignment))SizeHolder); //This will overwrite the prev part of the "end" node - priv_tail_size(first_big_block, first_big_block->m_size); priv_mark_as_free_block (first_big_block); first_big_block->m_prev_size = end_block->m_size = (detail::char_ptr_cast(first_big_block) - detail::char_ptr_cast(end_block))/Alignment; - end_block->m_end = 1; - end_block->m_allocated = 1; + priv_mark_as_allocated_block(end_block); assert(priv_next_block(first_big_block) == end_block); - assert(priv_prev_block(end_block) == first_big_block); assert(priv_next_block(end_block) == first_big_block); - assert(priv_prev_block(first_big_block) == end_block); + assert(priv_end_block(first_big_block) == end_block); + assert(priv_prev_block(end_block) == first_big_block); //Some check to validate the algorithm, since it makes some assumptions //to optimize the space wasted in bookkeeping: @@ -530,27 +545,24 @@ void rbtree_best_fit:: m_header.m_imultiset.insert(*first_big_block); } -template -inline void rbtree_best_fit:: +template +inline void rbtree_best_fit:: priv_mark_new_allocated_block(block_ctrl *new_block) -{ - priv_tail_size(new_block, new_block->m_size); - priv_mark_as_allocated_block(new_block); -} +{ priv_mark_as_allocated_block(new_block); } -template -inline std::size_t rbtree_best_fit::get_size() const +template +inline std::size_t rbtree_best_fit::get_size() const { return m_header.m_size; } -template -inline std::size_t rbtree_best_fit::get_free_memory() const +template +inline std::size_t rbtree_best_fit::get_free_memory() const { return m_header.m_size - m_header.m_allocated - priv_first_block_offset(this, m_header.m_extra_hdr_bytes); } -template -inline std::size_t rbtree_best_fit:: +template +inline std::size_t rbtree_best_fit:: get_min_size (std::size_t extra_hdr_bytes) { return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit)) + @@ -558,8 +570,8 @@ inline std::size_t rbtree_best_fit:: MinBlockUnits + EndCtrlBlockUnits)*Alignment; } -template -inline bool rbtree_best_fit:: +template +inline bool rbtree_best_fit:: all_memory_deallocated() { //----------------------- @@ -575,8 +587,8 @@ inline bool rbtree_best_fit:: (m_header.m_size - block1_off - EndCtrlBlockBytes)/Alignment; } -template -bool rbtree_best_fit:: +template +bool rbtree_best_fit:: check_sanity() { //----------------------- @@ -609,8 +621,8 @@ bool rbtree_best_fit:: return true; } -template -inline void* rbtree_best_fit:: +template +inline void* rbtree_best_fit:: allocate(std::size_t nbytes) { //----------------------- @@ -621,8 +633,8 @@ inline void* rbtree_best_fit:: return ret; } -template -inline void* rbtree_best_fit:: +template +inline void* rbtree_best_fit:: allocate_aligned(std::size_t nbytes, std::size_t alignment) { //----------------------- @@ -631,26 +643,42 @@ inline void* rbtree_best_fit:: return algo_impl_t::allocate_aligned(this, nbytes, alignment); } -template +template template -inline std::pair rbtree_best_fit:: +inline std::pair rbtree_best_fit:: allocation_command (allocation_type command, std::size_t limit_size, std::size_t preferred_size,std::size_t &received_size, T *reuse_ptr) { - if(command & try_shrink_in_place){ - bool success = - algo_impl_t::try_shrink(this, reuse_ptr, limit_size, preferred_size, received_size); - return std::pair ((success ? reuse_ptr : 0), true); - } std::pair ret = priv_allocation_command (command, limit_size, preferred_size, received_size, reuse_ptr, sizeof(T)); + BOOST_ASSERT(0 == ((std::size_t)ret.first % detail::alignment_of::value)); return std::pair(static_cast(ret.first), ret.second); } -template -inline std::pair rbtree_best_fit:: +template +inline std::pair rbtree_best_fit:: + raw_allocation_command (allocation_type command, std::size_t limit_objects, + std::size_t preferred_objects,std::size_t &received_objects, + void *reuse_ptr, std::size_t sizeof_object) +{ + if(!sizeof_object) + return std::pair(0, 0); + if(command & try_shrink_in_place){ + bool success = algo_impl_t::try_shrink + ( this, reuse_ptr, limit_objects*sizeof_object + , preferred_objects*sizeof_object, received_objects); + received_objects /= sizeof_object; + return std::pair ((success ? reuse_ptr : 0), true); + } + return priv_allocation_command + (command, limit_objects, preferred_objects, received_objects, reuse_ptr, sizeof_object); +} + + +template +inline std::pair rbtree_best_fit:: priv_allocation_command (allocation_type command, std::size_t limit_size, std::size_t preferred_size,std::size_t &received_size, void *reuse_ptr, std::size_t sizeof_object) @@ -673,18 +701,18 @@ inline std::pair rbtree_best_fit:: return ret; } -template -inline std::size_t rbtree_best_fit:: +template +inline std::size_t rbtree_best_fit:: size(const void *ptr) const { //We need no synchronization since this block's size is not going //to be modified by anyone else //Obtain the real size of the block - return (priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment; + return (priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk; } -template -inline void rbtree_best_fit::zero_free_memory() +template +inline void rbtree_best_fit::zero_free_memory() { //----------------------- boost::interprocess::scoped_lock guard(m_header); @@ -700,8 +728,8 @@ inline void rbtree_best_fit::zero_free_memory() } } -template -void* rbtree_best_fit:: +template +void* rbtree_best_fit:: priv_expand_both_sides(allocation_type command ,std::size_t min_size ,std::size_t preferred_size @@ -717,7 +745,7 @@ void* rbtree_best_fit:: } else{ received_size = this->size(reuse_ptr); - if(received_size >= preferred_size) + if(received_size >= preferred_size || received_size >= min_size) return reuse_ptr; } @@ -731,18 +759,21 @@ void* rbtree_best_fit:: block_ctrl *reuse = priv_get_block(reuse_ptr); //Sanity check - assert(reuse->m_size == priv_tail_size(reuse)); + //assert(reuse->m_size == priv_tail_size(reuse)); algo_impl_t::assert_alignment(reuse); block_ctrl *prev_block; //If the previous block is not free, there is nothing to do - if(priv_is_allocated_block(prev_block = priv_prev_block(reuse))){ + if(priv_is_prev_allocated(reuse)){ return 0; } + prev_block = priv_prev_block(reuse); + assert(!priv_is_allocated_block(prev_block)); + //Some sanity checks - assert(prev_block->m_size == priv_tail_size(prev_block)); + assert(prev_block->m_size == reuse->m_prev_size); algo_impl_t::assert_alignment(prev_block); //Let's calculate the number of extra bytes of data before the current @@ -769,32 +800,41 @@ void* rbtree_best_fit:: if(std::size_t(prev_block->m_size*Alignment) >= needs_backwards_aligned){ //Now take all next space. This will succeed if(command & expand_fwd){ - if(!priv_expand(reuse_ptr, received_size, received_size, received_size)){ + std::size_t received_size2; + if(!priv_expand(reuse_ptr, received_size, received_size, received_size2)){ assert(0); } + assert(received_size = received_size2); } //We need a minimum size to split the previous one if(prev_block->m_size >= (needs_backwards_aligned/Alignment + BlockCtrlUnits)){ block_ctrl *new_block = reinterpret_cast (detail::char_ptr_cast(reuse) - needs_backwards_aligned); - //Erase old previous block, since we will change it - m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block)); - //Free old previous buffer new_block->m_size = - AllocatedCtrlUnits + (needs_backwards_aligned + received_size)/Alignment; + AllocatedCtrlUnits + (needs_backwards_aligned + (received_size - UsableByPreviousChunk))/Alignment; assert(new_block->m_size >= BlockCtrlUnits); priv_mark_new_allocated_block(new_block); prev_block->m_size = (detail::char_ptr_cast(new_block) - detail::char_ptr_cast(prev_block))/Alignment; assert(prev_block->m_size >= BlockCtrlUnits); - priv_tail_size(prev_block, prev_block->m_size); priv_mark_as_free_block(prev_block); - //Insert the remaining previous block in the free tree - m_header.m_imultiset.insert( m_header.m_imultiset.begin(), *prev_block); + //Update the old previous block in the free chunks tree + //If the new size fulfills tree invariants do nothing, + //otherwise erase() + insert() + { + imultiset_iterator prev_block_it(Imultiset::s_iterator_to(*prev_block)); + imultiset_iterator was_smaller_it(prev_block_it); + if(prev_block_it != m_header.m_imultiset.begin() && + (--(was_smaller_it = prev_block_it))->m_size > prev_block->m_size){ + m_header.m_imultiset.erase(prev_block_it); + m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *prev_block); + } + } + received_size = needs_backwards_aligned + received_size; m_header.m_allocated += needs_backwards_aligned; @@ -812,14 +852,15 @@ void* rbtree_best_fit:: //Check if there is no place to create a new block and //the whole new block is multiple of the backwards expansion multiple else if(prev_block->m_size >= needs_backwards_aligned/Alignment && - 0 == (prev_block->m_size % lcm)) { + 0 == ((prev_block->m_size*Alignment) % lcm)) { //Erase old previous block, since we will change it m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block)); //Just merge the whole previous block - const std::size_t needs_backwards_aligned = prev_block->m_size*Alignment; - const std::size_t needs_backwards = detail::get_truncated_size(needs_backwards_aligned, backwards_multiple); - received_size = received_size/backwards_multiple*backwards_multiple + needs_backwards; + needs_backwards = detail::get_truncated_size + (prev_block->m_size*Alignment, backwards_multiple); + //received_size = received_size/backwards_multiple*backwards_multiple + needs_backwards; + received_size = received_size + needs_backwards; m_header.m_allocated += prev_block->m_size*Alignment; //Now update sizes @@ -843,9 +884,9 @@ void* rbtree_best_fit:: return 0; } -template -inline typename rbtree_best_fit::multiallocation_iterator - rbtree_best_fit:: +template +inline typename rbtree_best_fit::multiallocation_iterator + rbtree_best_fit:: allocate_many(std::size_t elem_bytes, std::size_t num_elements) { //----------------------- @@ -854,9 +895,23 @@ inline typename rbtree_best_fit::multiallocation_itera return algo_impl_t::allocate_many(this, elem_bytes, num_elements); } -template -inline typename rbtree_best_fit::multiallocation_iterator - rbtree_best_fit:: +template +inline void rbtree_best_fit:: + deallocate_many(typename rbtree_best_fit::multiallocation_iterator it) +{ + //----------------------- + boost::interprocess::scoped_lock guard(m_header); + //----------------------- + while(it){ + void *addr = &*it; + ++it; + this->priv_deallocate(addr); + } +} + +template +inline typename rbtree_best_fit::multiallocation_iterator + rbtree_best_fit:: allocate_many(const std::size_t *elem_sizes, std::size_t n_elements, std::size_t sizeof_element) { //----------------------- @@ -865,8 +920,8 @@ inline typename rbtree_best_fit::multiallocation_itera return algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element); } -template -std::pair rbtree_best_fit:: +template +std::pair rbtree_best_fit:: priv_allocate(allocation_type command ,std::size_t limit_size ,std::size_t preferred_size @@ -929,32 +984,34 @@ std::pair rbtree_best_fit:: return return_type(0, false); } -template +template inline -typename rbtree_best_fit::block_ctrl * - rbtree_best_fit::priv_get_block(const void *ptr) +typename rbtree_best_fit::block_ctrl * + rbtree_best_fit::priv_get_block(const void *ptr) { return reinterpret_cast(detail::char_ptr_cast(ptr) - AllocatedCtrlBytes); } -template +template inline -void *rbtree_best_fit:: - priv_get_user_buffer(const typename rbtree_best_fit::block_ctrl *block) +void *rbtree_best_fit:: + priv_get_user_buffer(const typename rbtree_best_fit::block_ctrl *block) { return detail::char_ptr_cast(block) + AllocatedCtrlBytes; } -template +template inline -std::size_t rbtree_best_fit:: +std::size_t rbtree_best_fit:: priv_get_total_units(std::size_t userbytes) { - std::size_t units = detail::get_rounded_size(userbytes, Alignment)/Alignment + AllocatedCtrlUnits; + if(userbytes < UsableByPreviousChunk) + userbytes = UsableByPreviousChunk; + std::size_t units = detail::get_rounded_size(userbytes - UsableByPreviousChunk, Alignment)/Alignment + AllocatedCtrlUnits; if(units < BlockCtrlUnits) units = BlockCtrlUnits; return units; } -template -bool rbtree_best_fit:: +template +bool rbtree_best_fit:: priv_expand (void *ptr ,const std::size_t min_size ,const std::size_t preferred_size @@ -966,16 +1023,16 @@ bool rbtree_best_fit:: //The block must be marked as allocated and the sizes must be equal assert(priv_is_allocated_block(block)); - assert(old_block_units == priv_tail_size(block)); + //assert(old_block_units == priv_tail_size(block)); //Put this to a safe value - received_size = (old_block_units - AllocatedCtrlUnits)*Alignment; - if(received_size > preferred_size) + received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk; + if(received_size >= preferred_size || received_size >= min_size) return true; //Now translate it to Alignment units - const std::size_t min_user_units = algo_impl_t::ceil_units(min_size); - const std::size_t preferred_user_units = algo_impl_t::ceil_units(preferred_size); + const std::size_t min_user_units = algo_impl_t::ceil_units(min_size - UsableByPreviousChunk); + const std::size_t preferred_user_units = algo_impl_t::ceil_units(preferred_size - UsableByPreviousChunk); //Some parameter checks assert(min_user_units <= preferred_user_units); @@ -994,7 +1051,7 @@ bool rbtree_best_fit:: const std::size_t merged_user_units = merged_units - AllocatedCtrlUnits; if(merged_user_units < min_user_units){ - received_size = merged_user_units*Alignment; + received_size = merged_units*Alignment - UsableByPreviousChunk; return false; } @@ -1007,30 +1064,44 @@ bool rbtree_best_fit:: //Check if we can split the next one in two parts if((merged_units - intended_units) >= BlockCtrlUnits){ - //Now we have to update the data in the tree - m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block)); - //This block is bigger than needed, split it in //two blocks, the first one will be merged and //the second's size will be the remaining space - assert(next_block->m_size == priv_tail_size(next_block)); + assert(next_block->m_size == priv_next_block(next_block)->m_prev_size); + const std::size_t rem_units = merged_units - intended_units; + //Check if we we need to update the old next block in the free chunks tree + //If the new size fulfills tree invariants, we just need to replace the node + //(the block start has been displaced), otherwise erase() + insert(). + // + //This fixup must be done in two parts, because the new next chunk might + //overwrite the tree hook of the old next chunk. So we first erase the + //old if needed and we'll insert the new one after creating the new next + imultiset_iterator old_next_block_it(Imultiset::s_iterator_to(*next_block)); + const bool size_invariants_broken = + (next_block->m_size - rem_units ) < BlockCtrlUnits || + (old_next_block_it != m_header.m_imultiset.begin() && + (--imultiset_iterator(old_next_block_it))->m_size > rem_units); + if(size_invariants_broken){ + m_header.m_imultiset.erase(old_next_block_it); + } //This is the remaining block - block_ctrl *new_block = new(reinterpret_cast + block_ctrl *rem_block = new(reinterpret_cast (detail::char_ptr_cast(block) + intended_units*Alignment))block_ctrl; - new_block->m_size = merged_units - intended_units; - algo_impl_t::assert_alignment(new_block); - assert(new_block->m_size >= BlockCtrlUnits); - priv_tail_size(new_block, new_block->m_size); - priv_mark_as_free_block(new_block); + rem_block->m_size = rem_units; + algo_impl_t::assert_alignment(rem_block); + assert(rem_block->m_size >= BlockCtrlUnits); + priv_mark_as_free_block(rem_block); - //Insert the new block in the container - m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *new_block); + //Now the second part of the fixup + if(size_invariants_broken) + m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block); + else + m_header.m_imultiset.replace_node(old_next_block_it, *rem_block); //Write the new length block->m_size = intended_user_units + AllocatedCtrlUnits; assert(block->m_size >= BlockCtrlUnits); - priv_tail_size(block, block->m_size); m_header.m_allocated += (intended_units - old_block_units)*Alignment; } //There is no free space to create a new node: just merge both blocks @@ -1041,61 +1112,95 @@ bool rbtree_best_fit:: //Write the new length block->m_size = merged_units; assert(block->m_size >= BlockCtrlUnits); - priv_tail_size(block, merged_units); m_header.m_allocated += (merged_units - old_block_units)*Alignment; } - - received_size = (block->m_size - AllocatedCtrlUnits)*Alignment; + priv_mark_as_allocated_block(block); + received_size = (block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk; return true; } -template inline -void rbtree_best_fit::priv_tail_size - (typename rbtree_best_fit::block_ctrl *ptr, std::size_t size) -{ priv_next_block(ptr)->m_prev_size = size; } - -template inline -std::size_t rbtree_best_fit::priv_tail_size - (typename rbtree_best_fit::block_ctrl *ptr) -{ return priv_next_block(ptr)->m_prev_size; } - -template inline -typename rbtree_best_fit::block_ctrl * - rbtree_best_fit::priv_prev_block - (typename rbtree_best_fit::block_ctrl *ptr) +template inline +typename rbtree_best_fit::block_ctrl * + rbtree_best_fit::priv_prev_block + (typename rbtree_best_fit::block_ctrl *ptr) { + assert(!ptr->m_prev_allocated); return reinterpret_cast (detail::char_ptr_cast(ptr) - ptr->m_prev_size*Alignment); } -template inline -typename rbtree_best_fit::block_ctrl * - rbtree_best_fit::priv_next_block - (typename rbtree_best_fit::block_ctrl *ptr) +template inline +bool rbtree_best_fit::priv_is_prev_allocated + (typename rbtree_best_fit::block_ctrl *ptr) +{ + if(ptr->m_prev_allocated){ + return true; + } + else{ + block_ctrl *prev = priv_prev_block(ptr); + assert(!priv_is_allocated_block(prev)); + return false; + } +} + +template inline +typename rbtree_best_fit::block_ctrl * + rbtree_best_fit::priv_end_block + (typename rbtree_best_fit::block_ctrl *first_segment_block) +{ + assert(first_segment_block->m_prev_allocated); + block_ctrl *end_block = reinterpret_cast + (detail::char_ptr_cast(first_segment_block) - first_segment_block->m_prev_size*Alignment); + assert(priv_is_allocated_block(end_block)); + assert(end_block > first_segment_block); + return end_block; +} + +template inline +typename rbtree_best_fit::block_ctrl * + rbtree_best_fit::priv_next_block + (typename rbtree_best_fit::block_ctrl *ptr) { return reinterpret_cast (detail::char_ptr_cast(ptr) + ptr->m_size*Alignment); } -template inline -bool rbtree_best_fit::priv_is_allocated_block - (typename rbtree_best_fit::block_ctrl *block) -{ return block->m_allocated != 0; } +template inline +bool rbtree_best_fit::priv_is_allocated_block + (typename rbtree_best_fit::block_ctrl *block) +{ + bool allocated = block->m_allocated != 0; + block_ctrl *next_block = (block_ctrl *) + (detail::char_ptr_cast(block) + block->m_size*Alignment); + bool next_block_prev_allocated = next_block->m_prev_allocated != 0; + (void)next_block_prev_allocated; + assert(allocated == next_block_prev_allocated); + return allocated; +} -template inline -void rbtree_best_fit::priv_mark_as_allocated_block - (typename rbtree_best_fit::block_ctrl *block) -{ block->m_allocated = 1; } +template inline +void rbtree_best_fit::priv_mark_as_allocated_block + (typename rbtree_best_fit::block_ctrl *block) +{ + //assert(!priv_is_allocated_block(block)); + block->m_allocated = 1; + ((block_ctrl *)(((char*)block) + block->m_size*Alignment))->m_prev_allocated = 1; +} -template inline -void rbtree_best_fit::priv_mark_as_free_block - (typename rbtree_best_fit::block_ctrl *block) -{ block->m_allocated = 0; } +template inline +void rbtree_best_fit::priv_mark_as_free_block + (typename rbtree_best_fit::block_ctrl *block) +{ + block->m_allocated = 0; + ((block_ctrl *)(((char*)block) + block->m_size*Alignment))->m_prev_allocated = 0; + //assert(!priv_is_allocated_block(ptr)); + priv_next_block(block)->m_prev_size = block->m_size; +} -template inline -void* rbtree_best_fit::priv_check_and_allocate +template inline +void* rbtree_best_fit::priv_check_and_allocate (std::size_t nunits - ,typename rbtree_best_fit::block_ctrl* block + ,typename rbtree_best_fit::block_ctrl* block ,std::size_t &received_size) { std::size_t upper_nunits = nunits + BlockCtrlUnits; @@ -1109,32 +1214,30 @@ void* rbtree_best_fit::priv_check_and_allocate std::size_t block_old_size = block->m_size; block->m_size = nunits; assert(block->m_size >= BlockCtrlUnits); - priv_tail_size(block, block->m_size); //This is the remaining block - block_ctrl *new_block = new(reinterpret_cast + block_ctrl *rem_block = new(reinterpret_cast (detail::char_ptr_cast(block) + Alignment*nunits))block_ctrl; - algo_impl_t::assert_alignment(new_block); - new_block->m_size = block_old_size - nunits; - assert(new_block->m_size >= BlockCtrlUnits); - priv_tail_size(new_block, new_block->m_size); - priv_mark_as_free_block(new_block); + algo_impl_t::assert_alignment(rem_block); + rem_block->m_size = block_old_size - nunits; + assert(rem_block->m_size >= BlockCtrlUnits); + priv_mark_as_free_block(rem_block); imultiset_iterator it_hint; if(it_old == m_header.m_imultiset.begin() - || (--imultiset_iterator(it_old))->m_size < new_block->m_size){ + || (--imultiset_iterator(it_old))->m_size < rem_block->m_size){ //option a: slow but secure - //m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *new_block); + //m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block); //option b: Construct an empty node and swap - //Imultiset::init_node(*new_block); - //block->swap_nodes(*new_block); + //Imultiset::init_node(*rem_block); + //block->swap_nodes(*rem_block); //option c: replace the node directly - m_header.m_imultiset.replace_node(Imultiset::s_iterator_to(*it_old), *new_block); + m_header.m_imultiset.replace_node(Imultiset::s_iterator_to(*it_old), *rem_block); } else{ //Now we have to update the data in the tree m_header.m_imultiset.erase(it_old); - m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *new_block); + m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block); } } @@ -1148,7 +1251,7 @@ void* rbtree_best_fit::priv_check_and_allocate //We need block_ctrl for deallocation stuff, so //return memory user can overwrite m_header.m_allocated += block->m_size*Alignment; - received_size = (block->m_size - AllocatedCtrlUnits)*Alignment; + received_size = (block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk; //Mark the block as allocated priv_mark_as_allocated_block(block); @@ -1157,11 +1260,12 @@ void* rbtree_best_fit::priv_check_and_allocate //cleared with zero_free_memory TreeHook *t = static_cast(block); std::memset(t, 0, sizeof(*t)); + this->priv_next_block(block)->m_prev_size = 0; return priv_get_user_buffer(block); } -template -void rbtree_best_fit::deallocate(void* addr) +template +void rbtree_best_fit::deallocate(void* addr) { if(!addr) return; //----------------------- @@ -1170,8 +1274,8 @@ void rbtree_best_fit::deallocate(void* addr) return this->priv_deallocate(addr); } -template -void rbtree_best_fit::priv_deallocate(void* addr) +template +void rbtree_best_fit::priv_deallocate(void* addr) { if(!addr) return; @@ -1179,7 +1283,7 @@ void rbtree_best_fit::priv_deallocate(void* addr) //The blocks must be marked as allocated and the sizes must be equal assert(priv_is_allocated_block(block)); - assert(block->m_size == priv_tail_size(block)); +// assert(block->m_size == priv_tail_size(block)); //Check if alignment and block size are right algo_impl_t::assert_alignment(addr); @@ -1194,33 +1298,48 @@ void rbtree_best_fit::priv_deallocate(void* addr) block_ctrl *block_to_insert = block; //Get the next block - block_ctrl *next_block = priv_next_block(block); + block_ctrl *next_block = priv_next_block(block); + bool merge_with_prev = !priv_is_prev_allocated(block); + bool merge_with_next = !priv_is_allocated_block(next_block); - //Merge if the next is free - if(!priv_is_allocated_block(next_block)){ - block->m_size += next_block->m_size; - assert(block->m_size >= BlockCtrlUnits); - priv_tail_size(block, block->m_size); - m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block)); + //Merge logic. First just update block sizes, then fix free chunks tree + if(merge_with_prev || merge_with_next){ + //Merge if the previous is free + if(merge_with_prev){ + //Get the previous block + block_ctrl *prev_block = priv_prev_block(block); + prev_block->m_size += block->m_size; + assert(prev_block->m_size >= BlockCtrlUnits); + block_to_insert = prev_block; + } + //Merge if the next is free + if(merge_with_next){ + block_to_insert->m_size += next_block->m_size; + assert(block_to_insert->m_size >= BlockCtrlUnits); + if(merge_with_prev) + m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block)); + } + + bool only_merge_next = !merge_with_prev && merge_with_next; + imultiset_iterator free_block_to_check_it + (Imultiset::s_iterator_to(only_merge_next ? *next_block : *block_to_insert)); + imultiset_iterator was_bigger_it(free_block_to_check_it); + + //Now try to shortcut erasure + insertion (O(log(N))) with + //a O(1) operation if merging does not alter tree positions + if(++was_bigger_it != m_header.m_imultiset.end() && + block_to_insert->m_size > was_bigger_it->m_size ){ + m_header.m_imultiset.erase(free_block_to_check_it); + m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *block_to_insert); + } + else if(only_merge_next){ + m_header.m_imultiset.replace_node(free_block_to_check_it, *block_to_insert); + } } - - //Get the previous block - block_ctrl *prev_block = priv_prev_block(block); - - //Now check that tail size and control size are equal - assert(prev_block->m_size == priv_tail_size(prev_block)); - - //Merge if the previous is free - if(!priv_is_allocated_block(prev_block)){ - prev_block->m_size += block->m_size; - assert(prev_block->m_size >= BlockCtrlUnits); - priv_tail_size(prev_block, prev_block->m_size); - m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block)); - block_to_insert = prev_block; + else{ + m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *block_to_insert); } - priv_mark_as_free_block(block_to_insert); - m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *block_to_insert); } } //namespace interprocess { diff --git a/include/boost/interprocess/mem_algo/simple_seq_fit.hpp b/include/boost/interprocess/mem_algo/simple_seq_fit.hpp index 05dd128..dfa2a0e 100644 --- a/include/boost/interprocess/mem_algo/simple_seq_fit.hpp +++ b/include/boost/interprocess/mem_algo/simple_seq_fit.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/offset_ptr.hpp b/include/boost/interprocess/offset_ptr.hpp index 510f61b..c3853a1 100644 --- a/include/boost/interprocess/offset_ptr.hpp +++ b/include/boost/interprocess/offset_ptr.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -61,7 +61,7 @@ class offset_ptr typedef void (self_t::*unspecified_bool_type)() const; #if defined(_MSC_VER) && (_MSC_VER >= 1400) - __declspec(noinline) + __declspec(noinline) //this workaround is needed for msvc-8.0 and msvc-9.0 #endif void set_offset(const volatile void *ptr) { @@ -77,7 +77,7 @@ class offset_ptr } #if defined(_MSC_VER) && (_MSC_VER >= 1400) - __declspec(noinline) + __declspec(noinline) //this workaround is needed for msvc-8.0 and msvc-9.0 #endif void* get_pointer() const { return (m_offset == 1) ? 0 : (detail::char_ptr_cast(this) + m_offset); } diff --git a/include/boost/interprocess/segment_manager.hpp b/include/boost/interprocess/segment_manager.hpp index b602b85..daf61a8 100644 --- a/include/boost/interprocess/segment_manager.hpp +++ b/include/boost/interprocess/segment_manager.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -70,7 +70,8 @@ class segment_manager_base /// @cond //Experimental. Don't use - typedef typename MemoryAlgorithm::multiallocation_iterator multiallocation_iterator; + typedef typename MemoryAlgorithm::multiallocation_iterator multiallocation_iterator; + typedef typename MemoryAlgorithm::multiallocation_chain multiallocation_chain; /// @endcond @@ -148,6 +149,11 @@ class segment_manager_base multiallocation_iterator allocate_many(const std::size_t *elem_sizes, std::size_t n_elements, std::size_t sizeof_element, std::nothrow_t) { return MemoryAlgorithm::allocate_many(elem_sizes, n_elements, sizeof_element); } + //!Deallocates elements pointed by the + //!multiallocation iterator range. + void deallocate_many(multiallocation_iterator it) + { MemoryAlgorithm::deallocate_many(it); } + /// @endcond //!Allocates nbytes bytes. Throws boost::interprocess::bad_alloc @@ -189,6 +195,19 @@ class segment_manager_base return ret; } + std::pair + raw_allocation_command (allocation_type command, std::size_t limit_objects, + std::size_t preferred_objects,std::size_t &received_objects, + void *reuse_ptr = 0, std::size_t sizeof_object = 1) + { + std::pair ret = MemoryAlgorithm::raw_allocation_command + ( command | nothrow_allocation, limit_objects, preferred_objects, received_objects + , reuse_ptr, sizeof_object); + if(!(command & nothrow_allocation) && !ret.first) + throw bad_alloc(); + return ret; + } + //!Deallocates the bytes allocated with allocate/allocate_many() //!pointed by addr void deallocate (void *addr) @@ -219,6 +238,10 @@ class segment_manager_base void zero_free_memory() { MemoryAlgorithm::zero_free_memory(); } + //!Returns the size of the buffer previously allocated pointed by ptr + std::size_t size(const void *ptr) const + { return MemoryAlgorithm::size(ptr); } + /// @cond protected: void * prot_anonymous_construct diff --git a/include/boost/interprocess/shared_memory_object.hpp b/include/boost/interprocess/shared_memory_object.hpp index 59d25f6..9d2eda3 100644 --- a/include/boost/interprocess/shared_memory_object.hpp +++ b/include/boost/interprocess/shared_memory_object.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/smart_ptr/deleter.hpp b/include/boost/interprocess/smart_ptr/deleter.hpp index 993f879..5909808 100644 --- a/include/boost/interprocess/smart_ptr/deleter.hpp +++ b/include/boost/interprocess/smart_ptr/deleter.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2007. +// (C) Copyright Ion Gaztanaga 2007-2008. // // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at diff --git a/include/boost/interprocess/smart_ptr/detail/shared_count.hpp b/include/boost/interprocess/smart_ptr/detail/shared_count.hpp index 0ca7a00..1b26426 100644 --- a/include/boost/interprocess/smart_ptr/detail/shared_count.hpp +++ b/include/boost/interprocess/smart_ptr/detail/shared_count.hpp @@ -4,7 +4,7 @@ // // (C) Copyright Peter Dimov and Multi Media Ltd. 2001, 2002, 2003 // (C) Copyright Peter Dimov 2004-2005 -// (C) Copyright Ion Gaztanaga 2006-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2006-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/smart_ptr/detail/sp_counted_base_atomic.hpp b/include/boost/interprocess/smart_ptr/detail/sp_counted_base_atomic.hpp index 40cacfe..3fee4e7 100644 --- a/include/boost/interprocess/smart_ptr/detail/sp_counted_base_atomic.hpp +++ b/include/boost/interprocess/smart_ptr/detail/sp_counted_base_atomic.hpp @@ -9,7 +9,7 @@ // Copyright (c) 2001, 2002, 2003 Peter Dimov and Multi Media Ltd. // Copyright 2004-2005 Peter Dimov -// Copyright 2007 Ion Gaztanaga +// Copyright 2007-2008 Ion Gaztanaga // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at diff --git a/include/boost/interprocess/smart_ptr/shared_ptr.hpp b/include/boost/interprocess/smart_ptr/shared_ptr.hpp index 2cc31bf..9566224 100644 --- a/include/boost/interprocess/smart_ptr/shared_ptr.hpp +++ b/include/boost/interprocess/smart_ptr/shared_ptr.hpp @@ -4,7 +4,7 @@ // // (C) Copyright Greg Colvin and Beman Dawes 1998, 1999. // (C) Copyright Peter Dimov 2001, 2002, 2003 -// (C) Copyright Ion Gaztanaga 2006-2007. +// (C) Copyright Ion Gaztanaga 2006-2008. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) diff --git a/include/boost/interprocess/smart_ptr/weak_ptr.hpp b/include/boost/interprocess/smart_ptr/weak_ptr.hpp index 979958f..7e48cd7 100644 --- a/include/boost/interprocess/smart_ptr/weak_ptr.hpp +++ b/include/boost/interprocess/smart_ptr/weak_ptr.hpp @@ -3,7 +3,7 @@ // This file is the adaptation for Interprocess of boost/weak_ptr.hpp // // (C) Copyright Peter Dimov 2001, 2002, 2003 -// (C) Copyright Ion Gaztanaga 2006-2007. +// (C) Copyright Ion Gaztanaga 2006-2008. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) diff --git a/include/boost/interprocess/streams/bufferstream.hpp b/include/boost/interprocess/streams/bufferstream.hpp index c29e98b..91cb548 100644 --- a/include/boost/interprocess/streams/bufferstream.hpp +++ b/include/boost/interprocess/streams/bufferstream.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/streams/vectorstream.hpp b/include/boost/interprocess/streams/vectorstream.hpp index c75b172..64795b7 100644 --- a/include/boost/interprocess/streams/vectorstream.hpp +++ b/include/boost/interprocess/streams/vectorstream.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/emulation/interprocess_condition.hpp b/include/boost/interprocess/sync/emulation/interprocess_condition.hpp index d7bf137..d967809 100644 --- a/include/boost/interprocess/sync/emulation/interprocess_condition.hpp +++ b/include/boost/interprocess/sync/emulation/interprocess_condition.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/emulation/interprocess_mutex.hpp b/include/boost/interprocess/sync/emulation/interprocess_mutex.hpp index 036cd04..4ef74c2 100644 --- a/include/boost/interprocess/sync/emulation/interprocess_mutex.hpp +++ b/include/boost/interprocess/sync/emulation/interprocess_mutex.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/emulation/interprocess_recursive_mutex.hpp b/include/boost/interprocess/sync/emulation/interprocess_recursive_mutex.hpp index fff61b1..83235be 100644 --- a/include/boost/interprocess/sync/emulation/interprocess_recursive_mutex.hpp +++ b/include/boost/interprocess/sync/emulation/interprocess_recursive_mutex.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/emulation/interprocess_semaphore.hpp b/include/boost/interprocess/sync/emulation/interprocess_semaphore.hpp index 23d5963..6c75bfd 100644 --- a/include/boost/interprocess/sync/emulation/interprocess_semaphore.hpp +++ b/include/boost/interprocess/sync/emulation/interprocess_semaphore.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/emulation/named_creation_functor.hpp b/include/boost/interprocess/sync/emulation/named_creation_functor.hpp index aa651a2..1714de7 100644 --- a/include/boost/interprocess/sync/emulation/named_creation_functor.hpp +++ b/include/boost/interprocess/sync/emulation/named_creation_functor.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2007-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/file_lock.hpp b/include/boost/interprocess/sync/file_lock.hpp index 1de1407..d9ef89f 100644 --- a/include/boost/interprocess/sync/file_lock.hpp +++ b/include/boost/interprocess/sync/file_lock.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/interprocess_barrier.hpp b/include/boost/interprocess/sync/interprocess_barrier.hpp index 635aa6b..6ba5a66 100644 --- a/include/boost/interprocess/sync/interprocess_barrier.hpp +++ b/include/boost/interprocess/sync/interprocess_barrier.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/interprocess_condition.hpp b/include/boost/interprocess/sync/interprocess_condition.hpp index 6237c29..1ec3f74 100644 --- a/include/boost/interprocess/sync/interprocess_condition.hpp +++ b/include/boost/interprocess/sync/interprocess_condition.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/interprocess_mutex.hpp b/include/boost/interprocess/sync/interprocess_mutex.hpp index ee375a8..c7a1ee1 100644 --- a/include/boost/interprocess/sync/interprocess_mutex.hpp +++ b/include/boost/interprocess/sync/interprocess_mutex.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/interprocess_recursive_mutex.hpp b/include/boost/interprocess/sync/interprocess_recursive_mutex.hpp index 0c902b3..d84be12 100644 --- a/include/boost/interprocess/sync/interprocess_recursive_mutex.hpp +++ b/include/boost/interprocess/sync/interprocess_recursive_mutex.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/interprocess_semaphore.hpp b/include/boost/interprocess/sync/interprocess_semaphore.hpp index a23128b..ccd5ec4 100644 --- a/include/boost/interprocess/sync/interprocess_semaphore.hpp +++ b/include/boost/interprocess/sync/interprocess_semaphore.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/interprocess_upgradable_mutex.hpp b/include/boost/interprocess/sync/interprocess_upgradable_mutex.hpp index ee22e84..c600e4c 100644 --- a/include/boost/interprocess/sync/interprocess_upgradable_mutex.hpp +++ b/include/boost/interprocess/sync/interprocess_upgradable_mutex.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/lock_options.hpp b/include/boost/interprocess/sync/lock_options.hpp index 5161182..dad8f67 100644 --- a/include/boost/interprocess/sync/lock_options.hpp +++ b/include/boost/interprocess/sync/lock_options.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/mutex_family.hpp b/include/boost/interprocess/sync/mutex_family.hpp index 9a7caa6..f31d42c 100644 --- a/include/boost/interprocess/sync/mutex_family.hpp +++ b/include/boost/interprocess/sync/mutex_family.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/named_condition.hpp b/include/boost/interprocess/sync/named_condition.hpp index 4551025..cfdbe13 100644 --- a/include/boost/interprocess/sync/named_condition.hpp +++ b/include/boost/interprocess/sync/named_condition.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/named_mutex.hpp b/include/boost/interprocess/sync/named_mutex.hpp index e897485..a1f5e21 100644 --- a/include/boost/interprocess/sync/named_mutex.hpp +++ b/include/boost/interprocess/sync/named_mutex.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/named_recursive_mutex.hpp b/include/boost/interprocess/sync/named_recursive_mutex.hpp index 84f6018..3d62284 100644 --- a/include/boost/interprocess/sync/named_recursive_mutex.hpp +++ b/include/boost/interprocess/sync/named_recursive_mutex.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/named_semaphore.hpp b/include/boost/interprocess/sync/named_semaphore.hpp index 0378d3f..7754750 100644 --- a/include/boost/interprocess/sync/named_semaphore.hpp +++ b/include/boost/interprocess/sync/named_semaphore.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/named_upgradable_mutex.hpp b/include/boost/interprocess/sync/named_upgradable_mutex.hpp index 3c4d97b..853ced4 100644 --- a/include/boost/interprocess/sync/named_upgradable_mutex.hpp +++ b/include/boost/interprocess/sync/named_upgradable_mutex.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/null_mutex.hpp b/include/boost/interprocess/sync/null_mutex.hpp index fa7a9cf..fac5243 100644 --- a/include/boost/interprocess/sync/null_mutex.hpp +++ b/include/boost/interprocess/sync/null_mutex.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/posix/interprocess_condition.hpp b/include/boost/interprocess/sync/posix/interprocess_condition.hpp index 2cc76f0..8ac8c8e 100644 --- a/include/boost/interprocess/sync/posix/interprocess_condition.hpp +++ b/include/boost/interprocess/sync/posix/interprocess_condition.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/posix/interprocess_mutex.hpp b/include/boost/interprocess/sync/posix/interprocess_mutex.hpp index 06b36b2..692542f 100644 --- a/include/boost/interprocess/sync/posix/interprocess_mutex.hpp +++ b/include/boost/interprocess/sync/posix/interprocess_mutex.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/posix/interprocess_recursive_mutex.hpp b/include/boost/interprocess/sync/posix/interprocess_recursive_mutex.hpp index be92550..0701a3e 100644 --- a/include/boost/interprocess/sync/posix/interprocess_recursive_mutex.hpp +++ b/include/boost/interprocess/sync/posix/interprocess_recursive_mutex.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/posix/interprocess_semaphore.hpp b/include/boost/interprocess/sync/posix/interprocess_semaphore.hpp index 9cf1d12..e55b0fd 100644 --- a/include/boost/interprocess/sync/posix/interprocess_semaphore.hpp +++ b/include/boost/interprocess/sync/posix/interprocess_semaphore.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/posix/pthread_helpers.hpp b/include/boost/interprocess/sync/posix/pthread_helpers.hpp index bd580df..82e61c2 100644 --- a/include/boost/interprocess/sync/posix/pthread_helpers.hpp +++ b/include/boost/interprocess/sync/posix/pthread_helpers.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/posix/ptime_to_timespec.hpp b/include/boost/interprocess/sync/posix/ptime_to_timespec.hpp index 67f18d4..80a2f09 100644 --- a/include/boost/interprocess/sync/posix/ptime_to_timespec.hpp +++ b/include/boost/interprocess/sync/posix/ptime_to_timespec.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/posix/semaphore_wrapper.hpp b/include/boost/interprocess/sync/posix/semaphore_wrapper.hpp index c6c9663..51d1e57 100644 --- a/include/boost/interprocess/sync/posix/semaphore_wrapper.hpp +++ b/include/boost/interprocess/sync/posix/semaphore_wrapper.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/scoped_lock.hpp b/include/boost/interprocess/sync/scoped_lock.hpp index 9792f4d..2e4442c 100644 --- a/include/boost/interprocess/sync/scoped_lock.hpp +++ b/include/boost/interprocess/sync/scoped_lock.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/sharable_lock.hpp b/include/boost/interprocess/sync/sharable_lock.hpp index 80ed1cb..1fbe7d1 100644 --- a/include/boost/interprocess/sync/sharable_lock.hpp +++ b/include/boost/interprocess/sync/sharable_lock.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/sync/upgradable_lock.hpp b/include/boost/interprocess/sync/upgradable_lock.hpp index 75ef211..6399400 100644 --- a/include/boost/interprocess/sync/upgradable_lock.hpp +++ b/include/boost/interprocess/sync/upgradable_lock.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/include/boost/interprocess/windows_shared_memory.hpp b/include/boost/interprocess/windows_shared_memory.hpp index 23b2350..c8b3c1a 100644 --- a/include/boost/interprocess/windows_shared_memory.hpp +++ b/include/boost/interprocess/windows_shared_memory.hpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2005-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // diff --git a/proj/vc7ide/interprocesslib.vcproj b/proj/vc7ide/interprocesslib.vcproj index 8c0bd92..5ccbb82 100644 --- a/proj/vc7ide/interprocesslib.vcproj +++ b/proj/vc7ide/interprocesslib.vcproj @@ -172,6 +172,9 @@ + + @@ -571,6 +574,9 @@ + + node_pool_t; + node_pool_t; if(!test::test_all_node_pool()) return 1; diff --git a/test/adaptive_pool_test.cpp b/test/adaptive_pool_test.cpp index 728c3bf..7b36131 100644 --- a/test/adaptive_pool_test.cpp +++ b/test/adaptive_pool_test.cpp @@ -11,11 +11,13 @@ #include #include #include +#include #include #include "print_container.hpp" #include "dummy_test_allocator.hpp" #include "movable_int.hpp" #include "list_test.hpp" +#include "vector_test.hpp" using namespace boost::interprocess; @@ -24,14 +26,36 @@ using namespace boost::interprocess; typedef adaptive_pool shmem_node_allocator_t; +typedef detail::adaptive_pool_v1 + shmem_node_allocator_v1_t; + +//Explicit instantiations to catch compilation errors +template class adaptive_pool; +template class detail::adaptive_pool_v1; + //Alias list types typedef list MyShmList; +typedef list MyShmListV1; + +//Alias vector types +typedef vector MyShmVector; +typedef vector MyShmVectorV1; + int main () { if(test::list_test()) return 1; + if(test::list_test()) + return 1; + + if(test::vector_test()) + return 1; + + if(test::vector_test()) + return 1; + return 0; } diff --git a/test/cached_adaptive_pool_test.cpp b/test/cached_adaptive_pool_test.cpp index 96dcee4..0a0574e 100644 --- a/test/cached_adaptive_pool_test.cpp +++ b/test/cached_adaptive_pool_test.cpp @@ -16,6 +16,7 @@ #include "dummy_test_allocator.hpp" #include "movable_int.hpp" #include "list_test.hpp" +#include "vector_test.hpp" using namespace boost::interprocess; @@ -25,14 +26,37 @@ typedef cached_adaptive_pool cached_node_allocator_t; +typedef detail::cached_adaptive_pool_v1 + + cached_node_allocator_v1_t; + +//Explicit instantiations to catch compilation errors +template class cached_adaptive_pool; +template class detail::cached_adaptive_pool_v1; + + //Alias list types typedef list MyShmList; +typedef list MyShmListV1; + +//Alias vector types +typedef vector MyShmVector; +typedef vector MyShmVectorV1; int main () { if(test::list_test()) return 1; + if(test::list_test()) + return 1; + + if(test::vector_test()) + return 1; + + if(test::vector_test()) + return 1; + return 0; } diff --git a/test/cached_node_allocator_test.cpp b/test/cached_node_allocator_test.cpp index 62ff407..597c90d 100644 --- a/test/cached_node_allocator_test.cpp +++ b/test/cached_node_allocator_test.cpp @@ -1,6 +1,6 @@ ////////////////////////////////////////////////////////////////////////////// // -// (C) Copyright Ion Gaztanaga 2004-2007. Distributed under the Boost +// (C) Copyright Ion Gaztanaga 2004-2008. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // @@ -16,23 +16,40 @@ #include "dummy_test_allocator.hpp" #include "movable_int.hpp" #include "list_test.hpp" +#include "vector_test.hpp" using namespace boost::interprocess; -//We will work with wide characters for shared memory objects //Alias a integer node allocator type typedef cached_node_allocator cached_node_allocator_t; +typedef detail::cached_node_allocator_v1 + + cached_node_allocator_v1_t; + +//Explicit instantiations to catch compilation errors +template class cached_node_allocator; +template class detail::cached_node_allocator_v1; //Alias list types typedef list MyShmList; +typedef list MyShmListV1; + +//Alias vector types +typedef vector MyShmVector; +typedef vector MyShmVectorV1; int main () { if(test::list_test()) return 1; - + if(test::list_test()) + return 1; + if(test::vector_test()) + return 1; + if(test::vector_test()) + return 1; return 0; } diff --git a/test/file_mapping_test.cpp b/test/file_mapping_test.cpp index 53182d1..b9851c9 100644 --- a/test/file_mapping_test.cpp +++ b/test/file_mapping_test.cpp @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/test/map_test.hpp b/test/map_test.hpp index 899773c..4d9902f 100644 --- a/test/map_test.hpp +++ b/test/map_test.hpp @@ -296,6 +296,8 @@ int map_test () } //Now do count exercise + shmmap->erase(shmmap->begin(), shmmap->end()); + shmmultimap->erase(shmmultimap->begin(), shmmultimap->end()); shmmap->clear(); shmmultimap->clear(); diff --git a/test/memory_algorithm_test.cpp b/test/memory_algorithm_test.cpp index 036d5b7..fa93ec2 100644 --- a/test/memory_algorithm_test.cpp +++ b/test/memory_algorithm_test.cpp @@ -19,47 +19,67 @@ #include #include "get_process_id_name.hpp" +using namespace boost::interprocess; + +const int memsize = 16384; +const char *const shMemName = test::get_process_id_name(); + +int test_simple_seq_fit() +{ + //A shared memory with simple sequential fit algorithm + typedef basic_managed_shared_memory + + ,null_index + > my_managed_shared_memory; + + //Create shared memory + shared_memory_object::remove(shMemName); + my_managed_shared_memory segment(create_only, shMemName, memsize); + + //Now take the segment manager and launch memory test + if(!test::test_all_allocation(*segment.get_segment_manager())){ + return 1; + } + return 0; +} + +template +int test_rbtree_best_fit() +{ + //A shared memory with red-black tree best fit algorithm + typedef basic_managed_shared_memory + , Alignment> + ,null_index + > my_managed_shared_memory; + + //Create shared memory + shared_memory_object::remove(shMemName); + my_managed_shared_memory segment(create_only, shMemName, memsize); + + //Now take the segment manager and launch memory test + if(!test::test_all_allocation(*segment.get_segment_manager())){ + return 1; + } + return 0; +} + int main () { - using namespace boost::interprocess; - const int memsize = 16384; - const char *const shMemName = test::get_process_id_name(); - - { - //A shared memory with simple sequential fit algorithm - typedef basic_managed_shared_memory - - ,null_index - > my_managed_shared_memory; - - //Create shared memory - shared_memory_object::remove(shMemName); - my_managed_shared_memory segment(create_only, shMemName, memsize); - - //Now take the segment manager and launch memory test - if(!test::test_all_allocation(*segment.get_segment_manager())){ - return 1; - } + if(test_simple_seq_fit()){ + return 1; + } + if(test_rbtree_best_fit<4>()){ + return 1; + } + if(test_rbtree_best_fit<8>()){ + return 1; + } + if(test_rbtree_best_fit<16>()){ + return 1; } - { - //A shared memory with red-black tree best fit algorithm - typedef basic_managed_shared_memory - - ,null_index - > my_managed_shared_memory; - - //Create shared memory - shared_memory_object::remove(shMemName); - my_managed_shared_memory segment(create_only, shMemName, memsize); - - //Now take the segment manager and launch memory test - if(!test::test_all_allocation(*segment.get_segment_manager())){ - return 1; - } - } shared_memory_object::remove(shMemName); return 0; } diff --git a/test/memory_algorithm_test_template.hpp b/test/memory_algorithm_test_template.hpp index 817283f..29a85ef 100644 --- a/test/memory_algorithm_test_template.hpp +++ b/test/memory_algorithm_test_template.hpp @@ -16,7 +16,7 @@ #include #include #include -#include +#include //std::memset #include //std::remove namespace boost { namespace interprocess { namespace test { @@ -38,6 +38,8 @@ bool test_allocation(Allocator &a) void *ptr = a.allocate(i, std::nothrow); if(!ptr) break; + std::size_t size = a.size(ptr); + std::memset(ptr, 0, size); buffers.push_back(ptr); } @@ -94,6 +96,8 @@ bool test_allocation_shrink(Allocator &a) void *ptr = a.allocate(i*2, std::nothrow); if(!ptr) break; + std::size_t size = a.size(ptr); + std::memset(ptr, 0, size); buffers.push_back(ptr); } @@ -111,6 +115,7 @@ bool test_allocation_shrink(Allocator &a) if(received_size < std::size_t(i)){ return false; } + std::memset(buffers[i], 0, a.size(buffers[i])); } } @@ -139,6 +144,8 @@ bool test_allocation_expand(Allocator &a) void *ptr = a.allocate(i, std::nothrow); if(!ptr) break; + std::size_t size = a.size(ptr); + std::memset(ptr, 0, size); buffers.push_back(ptr); } @@ -203,7 +210,7 @@ bool test_allocation_shrink_and_expand(Allocator &a) //Now shrink to half for(int i = 0, max = (int)buffers.size() - ;i < max + ; i < max ; ++i){ std::size_t received_size; if(a.template allocation_command @@ -224,9 +231,10 @@ bool test_allocation_shrink_and_expand(Allocator &a) ;i < max ;++i){ std::size_t received_size; + std::size_t request_size = received_sizes[i]; if(a.template allocation_command - ( expand_fwd | nothrow_allocation, received_sizes[i] - , received_sizes[i], received_size, (char*)buffers[i]).first){ + ( expand_fwd | nothrow_allocation, request_size + , request_size, received_size, (char*)buffers[i]).first){ if(received_size != received_sizes[i]){ return false; } @@ -262,6 +270,8 @@ bool test_allocation_deallocation_expand(Allocator &a) void *ptr = a.allocate(i, std::nothrow); if(!ptr) break; + std::size_t size = a.size(ptr); + std::memset(ptr, 0, size); buffers.push_back(ptr); } @@ -327,19 +337,21 @@ template bool test_allocation_with_reuse(Allocator &a) { //We will repeat this test for different sized elements - for(int size = 1; size < 20; ++size){ + for(int sizeof_object = 1; sizeof_object < 20; ++sizeof_object){ std::vector buffers; //Allocate buffers with extra memory for(int i = 0; true; ++i){ - void *ptr = a.allocate(i*size, std::nothrow); + void *ptr = a.allocate(i*sizeof_object, std::nothrow); if(!ptr) break; + std::size_t size = a.size(ptr); + std::memset(ptr, 0, size); buffers.push_back(ptr); } //Now deallocate all except the latest - //Now try to expand to the double of the size + //Now try to expand to the double of the sizeof_object for(int i = 0, max = (int)buffers.size() - 1 ;i < max ;++i){ @@ -353,14 +365,18 @@ bool test_allocation_with_reuse(Allocator &a) //Now allocate with reuse std::size_t received_size = 0; for(int i = 0; true; ++i){ - std::pair ret = a.template allocation_command - ( expand_bwd | nothrow_allocation, received_size/size*size + size - , received_size/size*size+(i+1)*size*2, received_size, (char*)ptr); + std::size_t min_size = (received_size + 1); + std::size_t prf_size = (received_size + (i+1)*2); + std::pair ret = a.raw_allocation_command + ( expand_bwd | nothrow_allocation, min_size + , prf_size, received_size, (char*)ptr, sizeof_object); if(!ret.first) break; //If we have memory, this must be a buffer reuse if(!ret.second) return 1; + if(received_size < min_size) + return 1; ptr = ret.first; } //There is only a single block so deallocate it @@ -456,6 +472,8 @@ bool test_clear_free_memory(Allocator &a) void *ptr = a.allocate(i, std::nothrow); if(!ptr) break; + std::size_t size = a.size(ptr); + std::memset(ptr, 1, size); buffers.push_back(ptr); } @@ -544,6 +562,8 @@ bool test_grow_shrink_to_fit(Allocator &a) void *ptr = a.allocate(i, std::nothrow); if(!ptr) break; + std::size_t size = a.size(ptr); + std::memset(ptr, 0, size); buffers.push_back(ptr); } @@ -564,7 +584,11 @@ bool test_grow_shrink_to_fit(Allocator &a) for(int j = 0, max = (int)buffers.size() ;j < max ;++j){ - int pos = (j%4)*((int)buffers.size())/4; + int pos = (j%5)*((int)buffers.size())/4; + if(pos == int(buffers.size())) + --pos; + a.deallocate(buffers[pos]); + buffers.erase(buffers.begin()+pos); std::size_t old_free = a.get_free_memory(); a.shrink_to_fit(); if(!a.check_sanity()) return false; @@ -576,9 +600,6 @@ bool test_grow_shrink_to_fit(Allocator &a) if(!a.check_sanity()) return false; if(original_size != a.get_size()) return false; if(old_free != a.get_free_memory()) return false; - - a.deallocate(buffers[pos]); - buffers.erase(buffers.begin()+pos); } //Now shrink it to the maximum @@ -623,6 +644,8 @@ bool test_many_equal_allocation(Allocator &a) void *ptr = a.allocate(i, std::nothrow); if(!ptr) break; + std::size_t size = a.size(ptr); + std::memset(ptr, 0, size); if(!a.check_sanity()) return false; buffers2.push_back(ptr); @@ -736,6 +759,8 @@ bool test_many_different_allocation(Allocator &a) void *ptr = a.allocate(i, std::nothrow); if(!ptr) break; + std::size_t size = a.size(ptr); + std::memset(ptr, 0, size); buffers2.push_back(ptr); } @@ -816,6 +841,57 @@ bool test_many_different_allocation(Allocator &a) return true; } +//This test allocates multiple values until there is no more memory +//and after that deallocates all in the inverse order +template +bool test_many_deallocation(Allocator &a) +{ + typedef typename Allocator::multiallocation_iterator multiallocation_iterator; + const std::size_t ArraySize = 11; + std::vector buffers; + std::size_t requested_sizes[ArraySize]; + for(std::size_t i = 0; i < ArraySize; ++i){ + requested_sizes[i] = 4*i; + } + std::size_t free_memory = a.get_free_memory(); + + { + for(int i = 0; true; ++i){ + multiallocation_iterator it = a.allocate_many(requested_sizes, ArraySize, 1, std::nothrow); + if(!it) + break; + buffers.push_back(it); + } + for(int i = 0, max = (int)buffers.size(); i != max; ++i){ + a.deallocate_many(buffers[i]); + } + buffers.clear(); + bool ok = free_memory == a.get_free_memory() && + a.all_memory_deallocated() && a.check_sanity(); + if(!ok) return ok; + } + + { + for(int i = 0; true; ++i){ + multiallocation_iterator it = a.allocate_many(i*4, ArraySize, std::nothrow); + if(!it) + break; + buffers.push_back(it); + } + for(int i = 0, max = (int)buffers.size(); i != max; ++i){ + a.deallocate_many(buffers[i]); + } + buffers.clear(); + + bool ok = free_memory == a.get_free_memory() && + a.all_memory_deallocated() && a.check_sanity(); + if(!ok) return ok; + } + + return true; +} + + //This function calls all tests template bool test_all_allocation(Allocator &a) @@ -847,6 +923,12 @@ bool test_all_allocation(Allocator &a) return false; } + if(!test_many_deallocation(a)){ + std::cout << "test_many_deallocation failed. Class: " + << typeid(a).name() << std::endl; + return false; + } + std::cout << "Starting test_allocation_shrink. Class: " << typeid(a).name() << std::endl; diff --git a/test/node_allocator_test.cpp b/test/node_allocator_test.cpp index 486f4b6..dbe7b5d 100644 --- a/test/node_allocator_test.cpp +++ b/test/node_allocator_test.cpp @@ -16,6 +16,7 @@ #include "dummy_test_allocator.hpp" #include "movable_int.hpp" #include "list_test.hpp" +#include "vector_test.hpp" using namespace boost::interprocess; @@ -23,15 +24,31 @@ using namespace boost::interprocess; //Alias a integer node allocator type typedef node_allocator shmem_node_allocator_t; +typedef detail::node_allocator_v1 + shmem_node_allocator_v1_t; + +//Explicit instantiations to catch compilation errors +template class node_allocator; +template class detail::node_allocator_v1; //Alias list types typedef list MyShmList; +typedef list MyShmListV1; + +//Alias vector types +typedef vector MyShmVector; +typedef vector MyShmVectorV1; int main () { if(test::list_test()) return 1; - + if(test::list_test()) + return 1; + if(test::vector_test()) + return 1; + if(test::vector_test()) + return 1; return 0; } diff --git a/test/node_pool_test.hpp b/test/node_pool_test.hpp index e4bc832..2e2b608 100644 --- a/test/node_pool_test.hpp +++ b/test/node_pool_test.hpp @@ -42,7 +42,7 @@ bool test_node_pool::allocate_then_deallocate(NodePool &pool) //First allocate nodes for(std::size_t i = 0; i < num_alloc; ++i){ - nodes.push_back(pool.allocate(1)); + nodes.push_back(pool.allocate_node()); } //Check that the free count is correct @@ -52,7 +52,7 @@ bool test_node_pool::allocate_then_deallocate(NodePool &pool) //Now deallocate all and check again for(std::size_t i = 0; i < num_alloc; ++i){ - pool.deallocate(nodes[i], 1); + pool.deallocate_node(nodes[i]); } //Check that the free count is correct @@ -85,7 +85,7 @@ bool test_node_pool::deallocate_free_chunks(NodePool &pool) //First allocate nodes for(std::size_t i = 0; i < max_nodes; ++i){ - nodes.push_back(pool.allocate(1)); + nodes.push_back(pool.allocate_node()); } //Check that the free count is correct @@ -97,7 +97,7 @@ bool test_node_pool::deallocate_free_chunks(NodePool &pool) for(std::size_t node_i = 0; node_i < nodes_per_chunk; ++node_i){ //Deallocate a node per chunk for(std::size_t i = 0; i < max_chunks; ++i){ - pool.deallocate(nodes[i*nodes_per_chunk + node_i], 1); + pool.deallocate_node(nodes[i*nodes_per_chunk + node_i]); } //Check that the free count is correct diff --git a/test/private_adaptive_pool_test.cpp b/test/private_adaptive_pool_test.cpp index c46f9ae..a914e83 100644 --- a/test/private_adaptive_pool_test.cpp +++ b/test/private_adaptive_pool_test.cpp @@ -16,6 +16,7 @@ #include "dummy_test_allocator.hpp" #include "movable_int.hpp" #include "list_test.hpp" +#include "vector_test.hpp" using namespace boost::interprocess; @@ -23,15 +24,31 @@ using namespace boost::interprocess; //Alias a private adaptive pool that allocates ints typedef private_adaptive_pool priv_node_allocator_t; +typedef detail::private_adaptive_pool_v1 + priv_node_allocator_v1_t; + +//Explicit instantiations to catch compilation errors +template class private_adaptive_pool; +template class detail::private_adaptive_pool_v1; //Alias list types typedef list MyShmList; +typedef list MyShmListV1; + +//Alias vector types +typedef vector MyShmVector; +typedef vector MyShmVectorV1; int main () { if(test::list_test(false)) return 1; - + if(test::list_test(false)) + return 1; + if(test::vector_test()) + return 1; + if(test::vector_test()) + return 1; return 0; } diff --git a/test/private_node_allocator_test.cpp b/test/private_node_allocator_test.cpp index fd125c2..4190c6a 100644 --- a/test/private_node_allocator_test.cpp +++ b/test/private_node_allocator_test.cpp @@ -16,6 +16,7 @@ #include "dummy_test_allocator.hpp" #include "movable_int.hpp" #include "list_test.hpp" +#include "vector_test.hpp" using namespace boost::interprocess; @@ -23,15 +24,31 @@ using namespace boost::interprocess; //Alias a integer node allocator type typedef private_node_allocator priv_node_allocator_t; +typedef detail::private_node_allocator_v1 + priv_node_allocator_v1_t; + +//Explicit instantiations to catch compilation errors +template class private_node_allocator; +template class detail::private_node_allocator_v1; //Alias list types -typedef list MyShmList; +typedef list MyShmList; +typedef list MyShmListV1; + +//Alias vector types +typedef vector MyShmVector; +typedef vector MyShmVectorV1; int main () { if(test::list_test(false)) return 1; - + if(test::list_test(false)) + return 1; + if(test::vector_test()) + return 1; + if(test::vector_test()) + return 1; return 0; } diff --git a/test/set_test.hpp b/test/set_test.hpp index 01c90f6..f35ccee 100644 --- a/test/set_test.hpp +++ b/test/set_test.hpp @@ -384,6 +384,8 @@ int set_test () } //Now do count exercise + shmset->erase(shmset->begin(), shmset->end()); + shmmultiset->erase(shmmultiset->begin(), shmmultiset->end()); shmset->clear(); shmmultiset->clear(); diff --git a/test/vector_test.cpp b/test/vector_test.cpp index f6e9031..0e76115 100644 --- a/test/vector_test.cpp +++ b/test/vector_test.cpp @@ -17,227 +17,22 @@ #include #include -#include #include #include "allocator_v1.hpp" -#include -#include -#include -#include "print_container.hpp" #include "check_equal_containers.hpp" #include "movable_int.hpp" #include "expand_bwd_test_allocator.hpp" #include "expand_bwd_test_template.hpp" #include "dummy_test_allocator.hpp" -#include -#include "get_process_id_name.hpp" +#include "vector_test.hpp" using namespace boost::interprocess; -typedef basic_managed_shared_memory - , - flat_map_index - > managed_shared_memory_t; - //Explicit instantiation to detect compilation errors template class boost::interprocess::vector >; -template -bool copyable_only(V1 *, V2 *, detail::false_type) -{ - return true; -} - -//Function to check if both sets are equal -template -bool copyable_only(V1 *shmvector, V2 *stdvector, detail::true_type) -{ - typedef typename V1::value_type IntType; - std::size_t size = shmvector->size(); - stdvector->insert(stdvector->end(), 50, 1); - shmvector->insert(shmvector->end(), 50, 1); - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - - { - IntType move_me(1); - stdvector->insert(stdvector->begin()+size/2, 50, 1); - shmvector->insert(shmvector->begin()+size/2, 50, move(move_me)); - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - } - { - IntType move_me(2); - shmvector->assign(shmvector->size()/2, move(move_me)); - stdvector->assign(stdvector->size()/2, 2); - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - } - { - IntType move_me(3); - shmvector->assign(shmvector->size()*3-1, move(move_me)); - stdvector->assign(stdvector->size()*3-1, 3); - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - } - return true; -} - -template class AllocatorType > -bool do_test() -{ - //Customize managed_shared_memory class - typedef basic_managed_shared_memory - , - rbtree_best_fit, - flat_map_index - > my_managed_shared_memory; - - //Alias AllocatorType type - typedef AllocatorType - shmem_allocator_t; - - //Alias vector types - typedef vector MyShmVector; - typedef std::vector MyStdVector; - - std::string process_name; - test::get_process_id_name(process_name); - - const int Memsize = 65536; - const char *const shMemName = process_name.c_str(); - const int max = 100; - - { - //Compare several shared memory vector operations with std::vector - //Create shared memory - shared_memory_object::remove(shMemName); - try{ - my_managed_shared_memory segment(create_only, shMemName, Memsize); - - segment.reserve_named_objects(100); - - //Shared memory allocator must be always be initialized - //since it has no default constructor - MyShmVector *shmvector = segment.template construct("MyShmVector") - (segment.get_segment_manager()); - MyStdVector *stdvector = new MyStdVector; - - shmvector->resize(100); - stdvector->resize(100); - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - - shmvector->resize(200); - stdvector->resize(200); - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - - shmvector->resize(0); - stdvector->resize(0); - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - - for(int i = 0; i < max; ++i){ - IntType new_int(i); - shmvector->insert(shmvector->end(), move(new_int)); - stdvector->insert(stdvector->end(), i); - } - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - - typename MyShmVector::iterator shmit(shmvector->begin()); - typename MyStdVector::iterator stdit(stdvector->begin()); - typename MyShmVector::const_iterator cshmit = shmit; - ++shmit; ++stdit; - shmvector->erase(shmit); - stdvector->erase(stdit); - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - - shmvector->erase(shmvector->begin()); - stdvector->erase(stdvector->begin()); - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - - { - //Initialize values - IntType aux_vect[50]; - for(int i = 0; i < 50; ++i){ - IntType new_int(-1); - aux_vect[i] = move(new_int); - } - int aux_vect2[50]; - for(int i = 0; i < 50; ++i){ - aux_vect2[i] = -1; - } - - shmvector->insert(shmvector->end() - ,detail::make_move_iterator(&aux_vect[0]) - ,detail::make_move_iterator(aux_vect + 50)); - stdvector->insert(stdvector->end(), aux_vect2, aux_vect2 + 50); - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - - for(int i = 0, j = static_cast(shmvector->size()); i < j; ++i){ - shmvector->erase(shmvector->begin()); - stdvector->erase(stdvector->begin()); - } - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - } - { - IntType aux_vect[50]; - for(int i = 0; i < 50; ++i){ - IntType new_int(-1); - aux_vect[i] = move(new_int); - } - int aux_vect2[50]; - for(int i = 0; i < 50; ++i){ - aux_vect2[i] = -1; - } - shmvector->insert(shmvector->begin() - ,detail::make_move_iterator(&aux_vect[0]) - ,detail::make_move_iterator(aux_vect + 50)); - stdvector->insert(stdvector->begin(), aux_vect2, aux_vect2 + 50); - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - } - - shmvector->reserve(shmvector->size()*2); - stdvector->reserve(stdvector->size()*2); - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - - IntType push_back_this(1); - shmvector->push_back(move(push_back_this)); - stdvector->push_back(int(1)); - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - - if(!copyable_only(shmvector, stdvector - ,detail::bool_::value>())){ - return false; - } - - shmvector->erase(shmvector->begin()); - stdvector->erase(stdvector->begin()); - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - - for(int i = 0; i < max; ++i){ - IntType insert_this(i); - shmvector->insert(shmvector->begin(), move(insert_this)); - stdvector->insert(stdvector->begin(), i); - } - if(!test::CheckEqualContainers(shmvector, stdvector)) return false; - - delete stdvector; - segment.template destroy("MyShmVector"); - segment.shrink_to_fit_indexes(); - - if(!segment.all_memory_deallocated()) - return false; - } - catch(std::exception &ex){ - shared_memory_object::remove(shMemName); - std::cout << ex.what() << std::endl; - return false; - } - } - shared_memory_object::remove(shMemName); - std::cout << std::endl << "Test OK!" << std::endl; - return true; -} - -bool test_expand_bwd() +int test_expand_bwd() { //Now test all back insertion possibilities @@ -248,7 +43,7 @@ bool test_expand_bwd() int_vector; if(!test::test_all_expand_bwd()) - return false; + return 1; //Now user defined wrapped int typedef test::expand_bwd_test_allocator @@ -257,7 +52,7 @@ bool test_expand_bwd() int_holder_vector; if(!test::test_all_expand_bwd()) - return false; + return 1; //Now user defined bigger wrapped int typedef test::expand_bwd_test_allocator @@ -267,26 +62,32 @@ bool test_expand_bwd() triple_int_holder_vector; if(!test::test_all_expand_bwd()) - return false; + return 1; - return true; + return 0; } int main() { - if(!do_test()) + typedef allocator ShmemAllocator; + typedef vector MyVector; + + typedef allocator ShmemMoveAllocator; + typedef vector MyMoveVector; + + typedef allocator ShmemCopyMoveAllocator; + typedef vector MyCopyMoveVector; + + if(test::vector_test()) return 1; - if(!do_test()) + if(test::vector_test()) return 1; - if(!do_test()) + if(test::vector_test()) return 1; - if(!do_test()) - return 1; - - if(!test_expand_bwd()) + if(test_expand_bwd()) return 1; return 0; diff --git a/test/vector_test.hpp b/test/vector_test.hpp new file mode 100644 index 0000000..3d76790 --- /dev/null +++ b/test/vector_test.hpp @@ -0,0 +1,219 @@ +////////////////////////////////////////////////////////////////////////////// +// +// (C) Copyright Ion Gaztanaga 2004-2007. Distributed under the Boost +// Software License, Version 1.0. (See accompanying file +// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// See http://www.boost.org/libs/interprocess for documentation. +// +////////////////////////////////////////////////////////////////////////////// + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "print_container.hpp" +#include "check_equal_containers.hpp" +#include "movable_int.hpp" +#include +#include "get_process_id_name.hpp" + +namespace boost{ +namespace interprocess{ +namespace test{ + +template +bool copyable_only(V1 *, V2 *, detail::false_type) +{ + return true; +} + +//Function to check if both sets are equal +template +bool copyable_only(V1 *shmvector, V2 *stdvector, detail::true_type) +{ + typedef typename V1::value_type IntType; + std::size_t size = shmvector->size(); + stdvector->insert(stdvector->end(), 50, 1); + shmvector->insert(shmvector->end(), 50, 1); + if(!test::CheckEqualContainers(shmvector, stdvector)) return false; + + { + IntType move_me(1); + stdvector->insert(stdvector->begin()+size/2, 50, 1); + shmvector->insert(shmvector->begin()+size/2, 50, move(move_me)); + if(!test::CheckEqualContainers(shmvector, stdvector)) return false; + } + { + IntType move_me(2); + shmvector->assign(shmvector->size()/2, move(move_me)); + stdvector->assign(stdvector->size()/2, 2); + if(!test::CheckEqualContainers(shmvector, stdvector)) return false; + } + { + IntType move_me(3); + shmvector->assign(shmvector->size()*3-1, move(move_me)); + stdvector->assign(stdvector->size()*3-1, 3); + if(!test::CheckEqualContainers(shmvector, stdvector)) return false; + } + return true; +} + +template +int vector_test() +{ + typedef std::vector MyStdVector; + typedef typename MyShmVector::value_type IntType; + + std::string process_name; + test::get_process_id_name(process_name); + + const int Memsize = 65536; + const char *const shMemName = process_name.c_str(); + const int max = 100; + + { + //Compare several shared memory vector operations with std::vector + //Create shared memory + shared_memory_object::remove(shMemName); + try{ + ManagedSharedMemory segment(create_only, shMemName, Memsize); + + segment.reserve_named_objects(100); + + //Shared memory allocator must be always be initialized + //since it has no default constructor + MyShmVector *shmvector = segment.template construct("MyShmVector") + (segment.get_segment_manager()); + MyStdVector *stdvector = new MyStdVector; + + shmvector->resize(100); + stdvector->resize(100); + if(!test::CheckEqualContainers(shmvector, stdvector)) return 1; + + shmvector->resize(200); + stdvector->resize(200); + if(!test::CheckEqualContainers(shmvector, stdvector)) return 1; + + shmvector->resize(0); + stdvector->resize(0); + if(!test::CheckEqualContainers(shmvector, stdvector)) return 1; + + for(int i = 0; i < max; ++i){ + IntType new_int(i); + shmvector->insert(shmvector->end(), move(new_int)); + stdvector->insert(stdvector->end(), i); + } + if(!test::CheckEqualContainers(shmvector, stdvector)) return 1; + + typename MyShmVector::iterator shmit(shmvector->begin()); + typename MyStdVector::iterator stdit(stdvector->begin()); + typename MyShmVector::const_iterator cshmit = shmit; + ++shmit; ++stdit; + shmvector->erase(shmit); + stdvector->erase(stdit); + if(!test::CheckEqualContainers(shmvector, stdvector)) return 1; + + shmvector->erase(shmvector->begin()); + stdvector->erase(stdvector->begin()); + if(!test::CheckEqualContainers(shmvector, stdvector)) return 1; + + { + //Initialize values + IntType aux_vect[50]; + for(int i = 0; i < 50; ++i){ + IntType new_int(-1); + aux_vect[i] = move(new_int); + } + int aux_vect2[50]; + for(int i = 0; i < 50; ++i){ + aux_vect2[i] = -1; + } + + shmvector->insert(shmvector->end() + ,detail::make_move_iterator(&aux_vect[0]) + ,detail::make_move_iterator(aux_vect + 50)); + stdvector->insert(stdvector->end(), aux_vect2, aux_vect2 + 50); + if(!test::CheckEqualContainers(shmvector, stdvector)) return 1; + + for(int i = 0, j = static_cast(shmvector->size()); i < j; ++i){ + shmvector->erase(shmvector->begin()); + stdvector->erase(stdvector->begin()); + } + if(!test::CheckEqualContainers(shmvector, stdvector)) return 1; + } + { + IntType aux_vect[50]; + for(int i = 0; i < 50; ++i){ + IntType new_int(-1); + aux_vect[i] = move(new_int); + } + int aux_vect2[50]; + for(int i = 0; i < 50; ++i){ + aux_vect2[i] = -1; + } + shmvector->insert(shmvector->begin() + ,detail::make_move_iterator(&aux_vect[0]) + ,detail::make_move_iterator(aux_vect + 50)); + stdvector->insert(stdvector->begin(), aux_vect2, aux_vect2 + 50); + if(!test::CheckEqualContainers(shmvector, stdvector)) return 1; + } + + shmvector->reserve(shmvector->size()*2); + stdvector->reserve(stdvector->size()*2); + if(!test::CheckEqualContainers(shmvector, stdvector)) return 1; + + IntType push_back_this(1); + shmvector->push_back(move(push_back_this)); + stdvector->push_back(int(1)); + if(!test::CheckEqualContainers(shmvector, stdvector)) return 1; + + if(!copyable_only(shmvector, stdvector + ,detail::bool_::value>())){ + return 1; + } + + shmvector->erase(shmvector->begin()); + stdvector->erase(stdvector->begin()); + if(!test::CheckEqualContainers(shmvector, stdvector)) return 1; + + for(int i = 0; i < max; ++i){ + IntType insert_this(i); + shmvector->insert(shmvector->begin(), move(insert_this)); + stdvector->insert(stdvector->begin(), i); + } + if(!test::CheckEqualContainers(shmvector, stdvector)) return 1; + + delete stdvector; + segment.template destroy("MyShmVector"); + segment.shrink_to_fit_indexes(); + + if(!segment.all_memory_deallocated()) + return 1; + } + catch(std::exception &ex){ + shared_memory_object::remove(shMemName); + std::cout << ex.what() << std::endl; + return 1; + } + } + shared_memory_object::remove(shMemName); + std::cout << std::endl << "Test OK!" << std::endl; + return 0; +} + +} //namespace test{ +} //namespace interprocess{ +} //namespace boost{ + +#include