/* Measuring performance of concurrent hashmaps under several * workload configurations. * * Copyright 2023-2024 Joaquin M Lopez Munoz. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) */ #include #include #include #include std::chrono::high_resolution_clock::time_point measure_start,measure_pause; template double measure(F f) { using namespace std::chrono; static const int num_trials=10; static const milliseconds min_time_per_trial(10); std::array trials; for(int i=0;i>(t2-measure_start).count()/runs; } std::sort(trials.begin(),trials.end()); return std::accumulate( trials.begin()+2,trials.end()-2,0.0)/(trials.size()-4); } void pause_timing() { measure_pause=std::chrono::high_resolution_clock::now(); } void resume_timing() { measure_start+=std::chrono::high_resolution_clock::now()-measure_pause; } #include #include #include #include #include #include #include #include #include #include "gtl/phmap.hpp" #include "oneapi/tbb/concurrent_hash_map.h" #include "zipfian_int_distribution.h" using boost_flat_map=boost::concurrent_flat_map; using boost_node_map=boost::concurrent_node_map; using tbb_map=tbb::concurrent_hash_map; using gtl_map=gtl::parallel_flat_hash_map< int,int,gtl::priv::hash_default_hash,gtl::priv::hash_default_eq, std::allocator>, 8,std::mutex>; struct bulk_flat_map:boost::concurrent_flat_map{}; struct bulk_node_map:boost::concurrent_node_map{}; template inline void map_update(boost_flat_map& m,Args&&... args) { m.emplace_or_visit(std::forward(args)...,[](auto& x){++x.second;}); } template inline bool map_find(const boost_flat_map& m,const Key& x) { return m.contains(x); } template inline void map_update(boost_node_map& m,Args&&... args) { m.emplace_or_visit(std::forward(args)...,[](auto& x){++x.second;}); } template inline bool map_find(const boost_node_map& m,const Key& x) { return m.contains(x); } template inline void map_update(tbb_map& m,Args&&... args) { tbb_map::accessor acc; if(!m.emplace(acc,std::forward(args)...))++acc->second; } template inline bool map_find(const tbb_map& m,const Key& x) { return m.count(x); } template inline void map_update(gtl_map& m,Key&& k,Args&&... args) { m.lazy_emplace_l( k, [](auto& x){++x.second;}, [&](const auto& ctor){ ctor(std::forward(k),std::forward(args)...);}); } template inline bool map_find(const gtl_map& m,const Key& x) { return m.contains(x); } template class updater { public: updater(const Distribution& dist_):dist{dist_}{} template void operator()(Map& m,URNG& gen) { map_update(m,dist(gen),0); } private: Distribution dist; }; template class finder { public: finder(const Distribution& dist_):dist{dist_}{} template void operator()(const Map& m,URNG& gen) { if(map_find(m,dist(gen)))++res; } int res=0; private: Distribution dist; }; template class bulk_finder { public: bulk_finder(const Distribution& dist_):dist{dist_}{} template void operator()(const BulkMap& m,URNG& gen) { keys[i++]=dist(gen); if(i==N){ flush(m); i=0; } } void flush(const BulkMap& m) { res+=(int)m.visit(keys.begin(),keys.begin()+i,[](const auto&){}); } int res=0; private: static constexpr std::size_t N=BulkMap::bulk_visit_size; Distribution dist; int i=0; std::array keys; }; /* contributed by Martin Leinter-Ankerl */ template class simple_discrete_distribution { public: /* N-1 because we don't store the last probability*/ std::array m_cummulative{}; public: simple_discrete_distribution(std::initializer_list l) { std::array sums{}; double sum=0.0; auto i=0; for(auto x:l){ sum+=x; sums[i]=sum; ++i; } /* normalize to 2^64 */ for(int i=0;i( sums[i]/sum*(double)(std::numeric_limits::max)()); } m_cummulative.back()=(std::numeric_limits::max)(); } std::size_t operator()(uint64_t r01)const noexcept { for(size_t i=0;i std::size_t operator()(URNG& rng)const noexcept { static_assert((URNG::min)()==0,"URNG::min must be 0"); static_assert( (URNG::max)()==(std::numeric_limits::max)(), "URNG::max must be max of uint64_t"); return operator()(rng()); } }; struct splitmix64_urng:boost::detail::splitmix64 { using boost::detail::splitmix64::splitmix64; using result_type=boost::uint64_t; static constexpr result_type (min)(){return 0u;} static constexpr result_type(max)() {return (std::numeric_limits::max)();} }; template struct parallel_load { using result_type=std::size_t; BOOST_NOINLINE result_type operator()(int N,double theta,int num_threads)const { int res=0; pause_timing(); { Map m; std::vector threads; std::vector results(num_threads); zipfian_int_distribution zipf1{1,N,theta}, zipf2{N+1,2*N,theta}; std::latch ready(num_threads), start(1), completed(num_threads), finish(1); for(int i=0;i::value|| std::is_same::value; using finder_type=typename std::conditional< is_bulk, bulk_finder>, finder> >::type; simple_discrete_distribution<3> dist({10,45,45}); splitmix64_urng gen(std::size_t(282472+i*213731)); updater update{zipf1}; finder_type successful_find{zipf1}, unsuccessful_find{zipf2}; int n=i==0?(N+num_threads-1)/num_threads:N/num_threads; n*=10; /* so that sum(#updates(i)) = N */ ready.count_down(); start.wait(); for(int j=0;j class Tester, typename Container1,typename Container2, typename Container3,typename Container4, typename Container5,typename Container6 > BOOST_NOINLINE void test( const char* title,int N,double theta, const char* name1,const char* name2,const char* name3,const char* name4, const char* name5,const char* name6) { #ifdef NUM_THREADS const int num_threads=NUM_THREADS; #else const int num_threads=16; #endif std::cout<(),N,theta,n)); std::cout<<10*N/t/1E6<<";"; t=measure(boost::bind(Tester(),N,theta,n)); std::cout<<10*N/t/1E6<<";"; t=measure(boost::bind(Tester(),N,theta,n)); std::cout<<10*N/t/1E6<<";"; t=measure(boost::bind(Tester(),N,theta,n)); std::cout<<10*N/t/1E6<<";"; t=measure(boost::bind(Tester(),N,theta,n)); std::cout<<10*N/t/1E6<<";"; t=measure(boost::bind(Tester(),N,theta,n)); std::cout<<10*N/t/1E6< ( "Parallel load",N,theta, "tbb::concurrent_hash_map", "gtl::parallel_flat_hash_map", "boost::concurrent_flat_map", "boost::concurrent_flat_map bulk", "boost::concurrent_node_map", "boost::concurrent_node_map bulk" ); } } }